80af3323bb9019532b217a9654494492904ba6e7
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
48
49 #define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50 #define BRW_KEY_INIT(gen, prog_id) \
51 .base.program_string_id = prog_id, \
52 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54 .base.tex.compressed_multisample_layout_mask = ~0, \
55 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
56
57 static unsigned
58 get_new_program_id(struct iris_screen *screen)
59 {
60 return p_atomic_inc_return(&screen->program_id);
61 }
62
63 static struct brw_vs_prog_key
64 iris_to_brw_vs_key(const struct gen_device_info *devinfo,
65 const struct iris_vs_prog_key *key)
66 {
67 return (struct brw_vs_prog_key) {
68 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
69
70 /* Don't tell the backend about our clip plane constants, we've
71 * already lowered them in NIR and don't want it doing it again.
72 */
73 .nr_userclip_plane_consts = 0,
74 };
75 }
76
77 static struct brw_tcs_prog_key
78 iris_to_brw_tcs_key(const struct gen_device_info *devinfo,
79 const struct iris_tcs_prog_key *key)
80 {
81 return (struct brw_tcs_prog_key) {
82 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
83 .tes_primitive_mode = key->tes_primitive_mode,
84 .input_vertices = key->input_vertices,
85 .patch_outputs_written = key->patch_outputs_written,
86 .outputs_written = key->outputs_written,
87 .quads_workaround = key->quads_workaround,
88 };
89 }
90
91 static struct brw_tes_prog_key
92 iris_to_brw_tes_key(const struct gen_device_info *devinfo,
93 const struct iris_tes_prog_key *key)
94 {
95 return (struct brw_tes_prog_key) {
96 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
97 .patch_inputs_read = key->patch_inputs_read,
98 .inputs_read = key->inputs_read,
99 };
100 }
101
102 static struct brw_gs_prog_key
103 iris_to_brw_gs_key(const struct gen_device_info *devinfo,
104 const struct iris_gs_prog_key *key)
105 {
106 return (struct brw_gs_prog_key) {
107 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
108 };
109 }
110
111 static struct brw_wm_prog_key
112 iris_to_brw_fs_key(const struct gen_device_info *devinfo,
113 const struct iris_fs_prog_key *key)
114 {
115 return (struct brw_wm_prog_key) {
116 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
117 .nr_color_regions = key->nr_color_regions,
118 .flat_shade = key->flat_shade,
119 .alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
120 .alpha_to_coverage = key->alpha_to_coverage,
121 .clamp_fragment_color = key->clamp_fragment_color,
122 .persample_interp = key->persample_interp,
123 .multisample_fbo = key->multisample_fbo,
124 .force_dual_color_blend = key->force_dual_color_blend,
125 .coherent_fb_fetch = key->coherent_fb_fetch,
126 .color_outputs_valid = key->color_outputs_valid,
127 .input_slots_valid = key->input_slots_valid,
128 .ignore_sample_mask_out = !key->multisample_fbo,
129 };
130 }
131
132 static struct brw_cs_prog_key
133 iris_to_brw_cs_key(const struct gen_device_info *devinfo,
134 const struct iris_cs_prog_key *key)
135 {
136 return (struct brw_cs_prog_key) {
137 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
138 };
139 }
140
141 static void *
142 upload_state(struct u_upload_mgr *uploader,
143 struct iris_state_ref *ref,
144 unsigned size,
145 unsigned alignment)
146 {
147 void *p = NULL;
148 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
149 return p;
150 }
151
152 void
153 iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
154 struct pipe_shader_buffer *buf,
155 struct iris_state_ref *surf_state,
156 bool ssbo)
157 {
158 struct pipe_context *ctx = &ice->ctx;
159 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
160
161 void *map =
162 upload_state(ice->state.surface_uploader, surf_state,
163 screen->isl_dev.ss.size, 64);
164 if (!unlikely(map)) {
165 surf_state->res = NULL;
166 return;
167 }
168
169 struct iris_resource *res = (void *) buf->buffer;
170 struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
171 surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
172
173 isl_buffer_fill_state(&screen->isl_dev, map,
174 .address = res->bo->gtt_offset + res->offset +
175 buf->buffer_offset,
176 .size_B = buf->buffer_size - res->offset,
177 .format = ssbo ? ISL_FORMAT_RAW
178 : ISL_FORMAT_R32G32B32A32_FLOAT,
179 .swizzle = ISL_SWIZZLE_IDENTITY,
180 .stride_B = 1,
181 .mocs = iris_mocs(res->bo, &screen->isl_dev));
182 }
183
184 static nir_ssa_def *
185 get_aoa_deref_offset(nir_builder *b,
186 nir_deref_instr *deref,
187 unsigned elem_size)
188 {
189 unsigned array_size = elem_size;
190 nir_ssa_def *offset = nir_imm_int(b, 0);
191
192 while (deref->deref_type != nir_deref_type_var) {
193 assert(deref->deref_type == nir_deref_type_array);
194
195 /* This level's element size is the previous level's array size */
196 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
197 assert(deref->arr.index.ssa);
198 offset = nir_iadd(b, offset,
199 nir_imul(b, index, nir_imm_int(b, array_size)));
200
201 deref = nir_deref_instr_parent(deref);
202 assert(glsl_type_is_array(deref->type));
203 array_size *= glsl_get_length(deref->type);
204 }
205
206 /* Accessing an invalid surface index with the dataport can result in a
207 * hang. According to the spec "if the index used to select an individual
208 * element is negative or greater than or equal to the size of the array,
209 * the results of the operation are undefined but may not lead to
210 * termination" -- which is one of the possible outcomes of the hang.
211 * Clamp the index to prevent access outside of the array bounds.
212 */
213 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
214 }
215
216 static void
217 iris_lower_storage_image_derefs(nir_shader *nir)
218 {
219 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
220
221 nir_builder b;
222 nir_builder_init(&b, impl);
223
224 nir_foreach_block(block, impl) {
225 nir_foreach_instr_safe(instr, block) {
226 if (instr->type != nir_instr_type_intrinsic)
227 continue;
228
229 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
230 switch (intrin->intrinsic) {
231 case nir_intrinsic_image_deref_load:
232 case nir_intrinsic_image_deref_store:
233 case nir_intrinsic_image_deref_atomic_add:
234 case nir_intrinsic_image_deref_atomic_imin:
235 case nir_intrinsic_image_deref_atomic_umin:
236 case nir_intrinsic_image_deref_atomic_imax:
237 case nir_intrinsic_image_deref_atomic_umax:
238 case nir_intrinsic_image_deref_atomic_and:
239 case nir_intrinsic_image_deref_atomic_or:
240 case nir_intrinsic_image_deref_atomic_xor:
241 case nir_intrinsic_image_deref_atomic_exchange:
242 case nir_intrinsic_image_deref_atomic_comp_swap:
243 case nir_intrinsic_image_deref_size:
244 case nir_intrinsic_image_deref_samples:
245 case nir_intrinsic_image_deref_load_raw_intel:
246 case nir_intrinsic_image_deref_store_raw_intel: {
247 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
248 nir_variable *var = nir_deref_instr_get_variable(deref);
249
250 b.cursor = nir_before_instr(&intrin->instr);
251 nir_ssa_def *index =
252 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
253 get_aoa_deref_offset(&b, deref, 1));
254 nir_rewrite_image_intrinsic(intrin, index, false);
255 break;
256 }
257
258 default:
259 break;
260 }
261 }
262 }
263 }
264
265 /**
266 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
267 */
268 static bool
269 iris_fix_edge_flags(nir_shader *nir)
270 {
271 if (nir->info.stage != MESA_SHADER_VERTEX) {
272 nir_shader_preserve_all_metadata(nir);
273 return false;
274 }
275
276 nir_variable *var = nir_find_variable_with_location(nir, nir_var_shader_out,
277 VARYING_SLOT_EDGE);
278 if (!var) {
279 nir_shader_preserve_all_metadata(nir);
280 return false;
281 }
282
283 var->data.mode = nir_var_shader_temp;
284 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
285 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
286 nir_fixup_deref_modes(nir);
287
288 nir_foreach_function(f, nir) {
289 if (f->impl) {
290 nir_metadata_preserve(f->impl, nir_metadata_block_index |
291 nir_metadata_dominance |
292 nir_metadata_live_ssa_defs |
293 nir_metadata_loop_analysis);
294 } else {
295 nir_metadata_preserve(f->impl, nir_metadata_all);
296 }
297 }
298
299 return true;
300 }
301
302 /**
303 * Fix an uncompiled shader's stream output info.
304 *
305 * Core Gallium stores output->register_index as a "slot" number, where
306 * slots are assigned consecutively to all outputs in info->outputs_written.
307 * This naive packing of outputs doesn't work for us - we too have slots,
308 * but the layout is defined by the VUE map, which we won't have until we
309 * compile a specific shader variant. So, we remap these and simply store
310 * VARYING_SLOT_* in our copy's output->register_index fields.
311 *
312 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
313 * components of our VUE header. See brw_vue_map.c for the layout.
314 */
315 static void
316 update_so_info(struct pipe_stream_output_info *so_info,
317 uint64_t outputs_written)
318 {
319 uint8_t reverse_map[64] = {};
320 unsigned slot = 0;
321 while (outputs_written) {
322 reverse_map[slot++] = u_bit_scan64(&outputs_written);
323 }
324
325 for (unsigned i = 0; i < so_info->num_outputs; i++) {
326 struct pipe_stream_output *output = &so_info->output[i];
327
328 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
329 output->register_index = reverse_map[output->register_index];
330
331 /* The VUE header contains three scalar fields packed together:
332 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
333 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
334 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
335 */
336 switch (output->register_index) {
337 case VARYING_SLOT_LAYER:
338 assert(output->num_components == 1);
339 output->register_index = VARYING_SLOT_PSIZ;
340 output->start_component = 1;
341 break;
342 case VARYING_SLOT_VIEWPORT:
343 assert(output->num_components == 1);
344 output->register_index = VARYING_SLOT_PSIZ;
345 output->start_component = 2;
346 break;
347 case VARYING_SLOT_PSIZ:
348 assert(output->num_components == 1);
349 output->start_component = 3;
350 break;
351 }
352
353 //info->outputs_written |= 1ull << output->register_index;
354 }
355 }
356
357 static void
358 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
359 unsigned offset, unsigned n)
360 {
361 assert(offset % sizeof(uint32_t) == 0);
362
363 for (unsigned i = 0; i < n; ++i)
364 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
365
366 for (unsigned i = n; i < 4; ++i)
367 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
368 }
369
370 /**
371 * Associate NIR uniform variables with the prog_data->param[] mechanism
372 * used by the backend. Also, decide which UBOs we'd like to push in an
373 * ideal situation (though the backend can reduce this).
374 */
375 static void
376 iris_setup_uniforms(const struct brw_compiler *compiler,
377 void *mem_ctx,
378 nir_shader *nir,
379 struct brw_stage_prog_data *prog_data,
380 enum brw_param_builtin **out_system_values,
381 unsigned *out_num_system_values,
382 unsigned *out_num_cbufs)
383 {
384 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
385
386 const unsigned IRIS_MAX_SYSTEM_VALUES =
387 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
388 enum brw_param_builtin *system_values =
389 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
390 unsigned num_system_values = 0;
391
392 unsigned patch_vert_idx = -1;
393 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
394 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
395 unsigned variable_group_size_idx = -1;
396 memset(ucp_idx, -1, sizeof(ucp_idx));
397 memset(img_idx, -1, sizeof(img_idx));
398
399 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
400
401 nir_builder b;
402 nir_builder_init(&b, impl);
403
404 b.cursor = nir_before_block(nir_start_block(impl));
405 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
406 nir_ssa_def *temp_const_ubo_name = NULL;
407
408 /* Turn system value intrinsics into uniforms */
409 nir_foreach_block(block, impl) {
410 nir_foreach_instr_safe(instr, block) {
411 if (instr->type != nir_instr_type_intrinsic)
412 continue;
413
414 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
415 nir_ssa_def *offset;
416
417 switch (intrin->intrinsic) {
418 case nir_intrinsic_load_constant: {
419 /* This one is special because it reads from the shader constant
420 * data and not cbuf0 which gallium uploads for us.
421 */
422 b.cursor = nir_before_instr(instr);
423 nir_ssa_def *offset =
424 nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
425 nir_intrinsic_base(intrin));
426
427 if (temp_const_ubo_name == NULL)
428 temp_const_ubo_name = nir_imm_int(&b, 0);
429
430 nir_intrinsic_instr *load_ubo =
431 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
432 load_ubo->num_components = intrin->num_components;
433 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
434 load_ubo->src[1] = nir_src_for_ssa(offset);
435 nir_intrinsic_set_align(load_ubo,
436 nir_intrinsic_align_mul(intrin),
437 nir_intrinsic_align_offset(intrin));
438 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
439 intrin->dest.ssa.num_components,
440 intrin->dest.ssa.bit_size,
441 intrin->dest.ssa.name);
442 nir_builder_instr_insert(&b, &load_ubo->instr);
443
444 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
445 nir_src_for_ssa(&load_ubo->dest.ssa));
446 nir_instr_remove(&intrin->instr);
447 continue;
448 }
449 case nir_intrinsic_load_user_clip_plane: {
450 unsigned ucp = nir_intrinsic_ucp_id(intrin);
451
452 if (ucp_idx[ucp] == -1) {
453 ucp_idx[ucp] = num_system_values;
454 num_system_values += 4;
455 }
456
457 for (int i = 0; i < 4; i++) {
458 system_values[ucp_idx[ucp] + i] =
459 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
460 }
461
462 b.cursor = nir_before_instr(instr);
463 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
464 break;
465 }
466 case nir_intrinsic_load_patch_vertices_in:
467 if (patch_vert_idx == -1)
468 patch_vert_idx = num_system_values++;
469
470 system_values[patch_vert_idx] =
471 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
472
473 b.cursor = nir_before_instr(instr);
474 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
475 break;
476 case nir_intrinsic_image_deref_load_param_intel: {
477 assert(devinfo->gen < 9);
478 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
479 nir_variable *var = nir_deref_instr_get_variable(deref);
480
481 if (img_idx[var->data.binding] == -1) {
482 /* GL only allows arrays of arrays of images. */
483 assert(glsl_type_is_image(glsl_without_array(var->type)));
484 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
485
486 for (int i = 0; i < num_images; i++) {
487 const unsigned img = var->data.binding + i;
488
489 img_idx[img] = num_system_values;
490 num_system_values += BRW_IMAGE_PARAM_SIZE;
491
492 uint32_t *img_sv = &system_values[img_idx[img]];
493
494 setup_vec4_image_sysval(
495 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
496 offsetof(struct brw_image_param, offset), 2);
497 setup_vec4_image_sysval(
498 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
499 offsetof(struct brw_image_param, size), 3);
500 setup_vec4_image_sysval(
501 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
502 offsetof(struct brw_image_param, stride), 4);
503 setup_vec4_image_sysval(
504 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
505 offsetof(struct brw_image_param, tiling), 3);
506 setup_vec4_image_sysval(
507 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
508 offsetof(struct brw_image_param, swizzling), 2);
509 }
510 }
511
512 b.cursor = nir_before_instr(instr);
513 offset = nir_iadd(&b,
514 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
515 nir_imm_int(&b, img_idx[var->data.binding] * 4 +
516 nir_intrinsic_base(intrin) * 16));
517 break;
518 }
519 case nir_intrinsic_load_local_group_size: {
520 assert(nir->info.cs.local_size_variable);
521 if (variable_group_size_idx == -1) {
522 variable_group_size_idx = num_system_values;
523 num_system_values += 3;
524 for (int i = 0; i < 3; i++) {
525 system_values[variable_group_size_idx + i] =
526 BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
527 }
528 }
529
530 b.cursor = nir_before_instr(instr);
531 offset = nir_imm_int(&b, variable_group_size_idx * sizeof(uint32_t));
532 break;
533 }
534 default:
535 continue;
536 }
537
538 unsigned comps = nir_intrinsic_dest_components(intrin);
539
540 nir_intrinsic_instr *load =
541 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
542 load->num_components = comps;
543 load->src[0] = nir_src_for_ssa(temp_ubo_name);
544 load->src[1] = nir_src_for_ssa(offset);
545 nir_intrinsic_set_align(load, 4, 0);
546 nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
547 nir_builder_instr_insert(&b, &load->instr);
548 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
549 nir_src_for_ssa(&load->dest.ssa));
550 nir_instr_remove(instr);
551 }
552 }
553
554 nir_validate_shader(nir, "before remapping");
555
556 /* Uniforms are stored in constant buffer 0, the
557 * user-facing UBOs are indexed by one. So if any constant buffer is
558 * needed, the constant buffer 0 will be needed, so account for it.
559 */
560 unsigned num_cbufs = nir->info.num_ubos;
561 if (num_cbufs || nir->num_uniforms)
562 num_cbufs++;
563
564 /* Place the new params in a new cbuf. */
565 if (num_system_values > 0) {
566 unsigned sysval_cbuf_index = num_cbufs;
567 num_cbufs++;
568
569 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
570 num_system_values);
571
572 nir_foreach_block(block, impl) {
573 nir_foreach_instr_safe(instr, block) {
574 if (instr->type != nir_instr_type_intrinsic)
575 continue;
576
577 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
578
579 if (load->intrinsic != nir_intrinsic_load_ubo)
580 continue;
581
582 b.cursor = nir_before_instr(instr);
583
584 assert(load->src[0].is_ssa);
585
586 if (load->src[0].ssa == temp_ubo_name) {
587 nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
588 nir_instr_rewrite_src(instr, &load->src[0],
589 nir_src_for_ssa(imm));
590 }
591 }
592 }
593
594 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
595 nir_opt_constant_folding(nir);
596 } else {
597 ralloc_free(system_values);
598 system_values = NULL;
599 }
600
601 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
602 nir_validate_shader(nir, "after remap");
603
604 /* We don't use params[] but gallium leaves num_uniforms set. We use this
605 * to detect when cbuf0 exists but we don't need it anymore when we get
606 * here. Instead, zero it out so that the back-end doesn't get confused
607 * when nr_params * 4 != num_uniforms != nr_params * 4.
608 */
609 nir->num_uniforms = 0;
610
611 /* Constant loads (if any) need to go at the end of the constant buffers so
612 * we need to know num_cbufs before we can lower to them.
613 */
614 if (temp_const_ubo_name != NULL) {
615 nir_load_const_instr *const_ubo_index =
616 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
617 assert(const_ubo_index->def.bit_size == 32);
618 const_ubo_index->value[0].u32 = num_cbufs;
619 }
620
621 *out_system_values = system_values;
622 *out_num_system_values = num_system_values;
623 *out_num_cbufs = num_cbufs;
624 }
625
626 static const char *surface_group_names[] = {
627 [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
628 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
629 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
630 [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
631 [IRIS_SURFACE_GROUP_UBO] = "ubo",
632 [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
633 [IRIS_SURFACE_GROUP_IMAGE] = "image",
634 };
635
636 static void
637 iris_print_binding_table(FILE *fp, const char *name,
638 const struct iris_binding_table *bt)
639 {
640 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
641
642 uint32_t total = 0;
643 uint32_t compacted = 0;
644
645 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
646 uint32_t size = bt->sizes[i];
647 total += size;
648 if (size)
649 compacted += util_bitcount64(bt->used_mask[i]);
650 }
651
652 if (total == 0) {
653 fprintf(fp, "Binding table for %s is empty\n\n", name);
654 return;
655 }
656
657 if (total != compacted) {
658 fprintf(fp, "Binding table for %s "
659 "(compacted to %u entries from %u entries)\n",
660 name, compacted, total);
661 } else {
662 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
663 }
664
665 uint32_t entry = 0;
666 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
667 uint64_t mask = bt->used_mask[i];
668 while (mask) {
669 int index = u_bit_scan64(&mask);
670 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
671 }
672 }
673 fprintf(fp, "\n");
674 }
675
676 enum {
677 /* Max elements in a surface group. */
678 SURFACE_GROUP_MAX_ELEMENTS = 64,
679 };
680
681 /**
682 * Map a <group, index> pair to a binding table index.
683 *
684 * For example: <UBO, 5> => binding table index 12
685 */
686 uint32_t
687 iris_group_index_to_bti(const struct iris_binding_table *bt,
688 enum iris_surface_group group, uint32_t index)
689 {
690 assert(index < bt->sizes[group]);
691 uint64_t mask = bt->used_mask[group];
692 uint64_t bit = 1ull << index;
693 if (bit & mask) {
694 return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
695 } else {
696 return IRIS_SURFACE_NOT_USED;
697 }
698 }
699
700 /**
701 * Map a binding table index back to a <group, index> pair.
702 *
703 * For example: binding table index 12 => <UBO, 5>
704 */
705 uint32_t
706 iris_bti_to_group_index(const struct iris_binding_table *bt,
707 enum iris_surface_group group, uint32_t bti)
708 {
709 uint64_t used_mask = bt->used_mask[group];
710 assert(bti >= bt->offsets[group]);
711
712 uint32_t c = bti - bt->offsets[group];
713 while (used_mask) {
714 int i = u_bit_scan64(&used_mask);
715 if (c == 0)
716 return i;
717 c--;
718 }
719
720 return IRIS_SURFACE_NOT_USED;
721 }
722
723 static void
724 rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
725 nir_instr *instr, nir_src *src,
726 enum iris_surface_group group)
727 {
728 assert(bt->sizes[group] > 0);
729
730 b->cursor = nir_before_instr(instr);
731 nir_ssa_def *bti;
732 if (nir_src_is_const(*src)) {
733 uint32_t index = nir_src_as_uint(*src);
734 bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
735 src->ssa->bit_size);
736 } else {
737 /* Indirect usage makes all the surfaces of the group to be available,
738 * so we can just add the base.
739 */
740 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
741 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
742 }
743 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
744 }
745
746 static void
747 mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
748 enum iris_surface_group group)
749 {
750 assert(bt->sizes[group] > 0);
751
752 if (nir_src_is_const(*src)) {
753 uint64_t index = nir_src_as_uint(*src);
754 assert(index < bt->sizes[group]);
755 bt->used_mask[group] |= 1ull << index;
756 } else {
757 /* There's an indirect usage, we need all the surfaces. */
758 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
759 }
760 }
761
762 static bool
763 skip_compacting_binding_tables(void)
764 {
765 static int skip = -1;
766 if (skip < 0)
767 skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
768 return skip;
769 }
770
771 /**
772 * Set up the binding table indices and apply to the shader.
773 */
774 static void
775 iris_setup_binding_table(const struct gen_device_info *devinfo,
776 struct nir_shader *nir,
777 struct iris_binding_table *bt,
778 unsigned num_render_targets,
779 unsigned num_system_values,
780 unsigned num_cbufs)
781 {
782 const struct shader_info *info = &nir->info;
783
784 memset(bt, 0, sizeof(*bt));
785
786 /* Set the sizes for each surface group. For some groups, we already know
787 * upfront how many will be used, so mark them.
788 */
789 if (info->stage == MESA_SHADER_FRAGMENT) {
790 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
791 /* All render targets used. */
792 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
793 BITFIELD64_MASK(num_render_targets);
794
795 /* Setup render target read surface group inorder to support non-coherent
796 * framebuffer fetch on Gen8
797 */
798 if (devinfo->gen == 8 && info->outputs_read) {
799 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
800 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
801 BITFIELD64_MASK(num_render_targets);
802 }
803 } else if (info->stage == MESA_SHADER_COMPUTE) {
804 bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
805 }
806
807 bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
808 bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
809
810 bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
811
812 /* Allocate an extra slot in the UBO section for NIR constants.
813 * Binding table compaction will remove it if unnecessary.
814 *
815 * We don't include them in iris_compiled_shader::num_cbufs because
816 * they are uploaded separately from shs->constbuf[], but from a shader
817 * point of view, they're another UBO (at the end of the section).
818 */
819 bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
820
821 bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
822
823 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
824 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
825
826 /* Mark surfaces used for the cases we don't have the information available
827 * upfront.
828 */
829 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
830 nir_foreach_block (block, impl) {
831 nir_foreach_instr (instr, block) {
832 if (instr->type != nir_instr_type_intrinsic)
833 continue;
834
835 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
836 switch (intrin->intrinsic) {
837 case nir_intrinsic_load_num_work_groups:
838 bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
839 break;
840
841 case nir_intrinsic_load_output:
842 if (devinfo->gen == 8) {
843 mark_used_with_src(bt, &intrin->src[0],
844 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
845 }
846 break;
847
848 case nir_intrinsic_image_size:
849 case nir_intrinsic_image_load:
850 case nir_intrinsic_image_store:
851 case nir_intrinsic_image_atomic_add:
852 case nir_intrinsic_image_atomic_imin:
853 case nir_intrinsic_image_atomic_umin:
854 case nir_intrinsic_image_atomic_imax:
855 case nir_intrinsic_image_atomic_umax:
856 case nir_intrinsic_image_atomic_and:
857 case nir_intrinsic_image_atomic_or:
858 case nir_intrinsic_image_atomic_xor:
859 case nir_intrinsic_image_atomic_exchange:
860 case nir_intrinsic_image_atomic_comp_swap:
861 case nir_intrinsic_image_load_raw_intel:
862 case nir_intrinsic_image_store_raw_intel:
863 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
864 break;
865
866 case nir_intrinsic_load_ubo:
867 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
868 break;
869
870 case nir_intrinsic_store_ssbo:
871 mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
872 break;
873
874 case nir_intrinsic_get_buffer_size:
875 case nir_intrinsic_ssbo_atomic_add:
876 case nir_intrinsic_ssbo_atomic_imin:
877 case nir_intrinsic_ssbo_atomic_umin:
878 case nir_intrinsic_ssbo_atomic_imax:
879 case nir_intrinsic_ssbo_atomic_umax:
880 case nir_intrinsic_ssbo_atomic_and:
881 case nir_intrinsic_ssbo_atomic_or:
882 case nir_intrinsic_ssbo_atomic_xor:
883 case nir_intrinsic_ssbo_atomic_exchange:
884 case nir_intrinsic_ssbo_atomic_comp_swap:
885 case nir_intrinsic_ssbo_atomic_fmin:
886 case nir_intrinsic_ssbo_atomic_fmax:
887 case nir_intrinsic_ssbo_atomic_fcomp_swap:
888 case nir_intrinsic_load_ssbo:
889 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
890 break;
891
892 default:
893 break;
894 }
895 }
896 }
897
898 /* When disable we just mark everything as used. */
899 if (unlikely(skip_compacting_binding_tables())) {
900 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
901 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
902 }
903
904 /* Calculate the offsets and the binding table size based on the used
905 * surfaces. After this point, the functions to go between "group indices"
906 * and binding table indices can be used.
907 */
908 uint32_t next = 0;
909 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
910 if (bt->used_mask[i] != 0) {
911 bt->offsets[i] = next;
912 next += util_bitcount64(bt->used_mask[i]);
913 }
914 }
915 bt->size_bytes = next * 4;
916
917 if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
918 iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
919 }
920
921 /* Apply the binding table indices. The backend compiler is not expected
922 * to change those, as we haven't set any of the *_start entries in brw
923 * binding_table.
924 */
925 nir_builder b;
926 nir_builder_init(&b, impl);
927
928 nir_foreach_block (block, impl) {
929 nir_foreach_instr (instr, block) {
930 if (instr->type == nir_instr_type_tex) {
931 nir_tex_instr *tex = nir_instr_as_tex(instr);
932 tex->texture_index =
933 iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
934 tex->texture_index);
935 continue;
936 }
937
938 if (instr->type != nir_instr_type_intrinsic)
939 continue;
940
941 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
942 switch (intrin->intrinsic) {
943 case nir_intrinsic_image_size:
944 case nir_intrinsic_image_load:
945 case nir_intrinsic_image_store:
946 case nir_intrinsic_image_atomic_add:
947 case nir_intrinsic_image_atomic_imin:
948 case nir_intrinsic_image_atomic_umin:
949 case nir_intrinsic_image_atomic_imax:
950 case nir_intrinsic_image_atomic_umax:
951 case nir_intrinsic_image_atomic_and:
952 case nir_intrinsic_image_atomic_or:
953 case nir_intrinsic_image_atomic_xor:
954 case nir_intrinsic_image_atomic_exchange:
955 case nir_intrinsic_image_atomic_comp_swap:
956 case nir_intrinsic_image_load_raw_intel:
957 case nir_intrinsic_image_store_raw_intel:
958 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
959 IRIS_SURFACE_GROUP_IMAGE);
960 break;
961
962 case nir_intrinsic_load_ubo:
963 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
964 IRIS_SURFACE_GROUP_UBO);
965 break;
966
967 case nir_intrinsic_store_ssbo:
968 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
969 IRIS_SURFACE_GROUP_SSBO);
970 break;
971
972 case nir_intrinsic_load_output:
973 if (devinfo->gen == 8) {
974 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
975 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
976 }
977 break;
978
979 case nir_intrinsic_get_buffer_size:
980 case nir_intrinsic_ssbo_atomic_add:
981 case nir_intrinsic_ssbo_atomic_imin:
982 case nir_intrinsic_ssbo_atomic_umin:
983 case nir_intrinsic_ssbo_atomic_imax:
984 case nir_intrinsic_ssbo_atomic_umax:
985 case nir_intrinsic_ssbo_atomic_and:
986 case nir_intrinsic_ssbo_atomic_or:
987 case nir_intrinsic_ssbo_atomic_xor:
988 case nir_intrinsic_ssbo_atomic_exchange:
989 case nir_intrinsic_ssbo_atomic_comp_swap:
990 case nir_intrinsic_ssbo_atomic_fmin:
991 case nir_intrinsic_ssbo_atomic_fmax:
992 case nir_intrinsic_ssbo_atomic_fcomp_swap:
993 case nir_intrinsic_load_ssbo:
994 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
995 IRIS_SURFACE_GROUP_SSBO);
996 break;
997
998 default:
999 break;
1000 }
1001 }
1002 }
1003 }
1004
1005 static void
1006 iris_debug_recompile(struct iris_context *ice,
1007 struct shader_info *info,
1008 const struct brw_base_prog_key *key)
1009 {
1010 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
1011 const struct gen_device_info *devinfo = &screen->devinfo;
1012 const struct brw_compiler *c = screen->compiler;
1013
1014 if (!info)
1015 return;
1016
1017 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
1018 _mesa_shader_stage_to_string(info->stage),
1019 info->name ? info->name : "(no identifier)",
1020 info->label ? info->label : "");
1021
1022 const void *old_iris_key =
1023 iris_find_previous_compile(ice, info->stage, key->program_string_id);
1024
1025 union brw_any_prog_key old_key;
1026
1027 switch (info->stage) {
1028 case MESA_SHADER_VERTEX:
1029 old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
1030 break;
1031 case MESA_SHADER_TESS_CTRL:
1032 old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
1033 break;
1034 case MESA_SHADER_TESS_EVAL:
1035 old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
1036 break;
1037 case MESA_SHADER_GEOMETRY:
1038 old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
1039 break;
1040 case MESA_SHADER_FRAGMENT:
1041 old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
1042 break;
1043 case MESA_SHADER_COMPUTE:
1044 old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
1045 break;
1046 default:
1047 unreachable("invalid shader stage");
1048 }
1049
1050 brw_debug_key_recompile(c, &ice->dbg, info->stage, &old_key.base, key);
1051 }
1052
1053 /**
1054 * Get the shader for the last enabled geometry stage.
1055 *
1056 * This stage is the one which will feed stream output and the rasterizer.
1057 */
1058 static gl_shader_stage
1059 last_vue_stage(struct iris_context *ice)
1060 {
1061 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1062 return MESA_SHADER_GEOMETRY;
1063
1064 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1065 return MESA_SHADER_TESS_EVAL;
1066
1067 return MESA_SHADER_VERTEX;
1068 }
1069
1070 /**
1071 * Compile a vertex shader, and upload the assembly.
1072 */
1073 static struct iris_compiled_shader *
1074 iris_compile_vs(struct iris_context *ice,
1075 struct iris_uncompiled_shader *ish,
1076 const struct iris_vs_prog_key *key)
1077 {
1078 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1079 const struct brw_compiler *compiler = screen->compiler;
1080 const struct gen_device_info *devinfo = &screen->devinfo;
1081 void *mem_ctx = ralloc_context(NULL);
1082 struct brw_vs_prog_data *vs_prog_data =
1083 rzalloc(mem_ctx, struct brw_vs_prog_data);
1084 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1085 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1086 enum brw_param_builtin *system_values;
1087 unsigned num_system_values;
1088 unsigned num_cbufs;
1089
1090 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1091
1092 if (key->vue.nr_userclip_plane_consts) {
1093 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1094 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1095 true, false, NULL);
1096 nir_lower_io_to_temporaries(nir, impl, true, false);
1097 nir_lower_global_vars_to_local(nir);
1098 nir_lower_vars_to_ssa(nir);
1099 nir_shader_gather_info(nir, impl);
1100 }
1101
1102 prog_data->use_alt_mode = ish->use_alt_mode;
1103
1104 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1105 &num_system_values, &num_cbufs);
1106
1107 struct iris_binding_table bt;
1108 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1109 num_system_values, num_cbufs);
1110
1111 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1112
1113 brw_compute_vue_map(devinfo,
1114 &vue_prog_data->vue_map, nir->info.outputs_written,
1115 nir->info.separate_shader, /* pos_slots */ 1);
1116
1117 struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
1118
1119 char *error_str = NULL;
1120 const unsigned *program =
1121 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &brw_key, vs_prog_data,
1122 nir, -1, NULL, &error_str);
1123 if (program == NULL) {
1124 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
1125 ralloc_free(mem_ctx);
1126 return false;
1127 }
1128
1129 if (ish->compiled_once) {
1130 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1131 } else {
1132 ish->compiled_once = true;
1133 }
1134
1135 uint32_t *so_decls =
1136 screen->vtbl.create_so_decl_list(&ish->stream_output,
1137 &vue_prog_data->vue_map);
1138
1139 struct iris_compiled_shader *shader =
1140 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
1141 prog_data, so_decls, system_values, num_system_values,
1142 0, num_cbufs, &bt);
1143
1144 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1145
1146 ralloc_free(mem_ctx);
1147 return shader;
1148 }
1149
1150 /**
1151 * Update the current vertex shader variant.
1152 *
1153 * Fill out the key, look in the cache, compile and bind if needed.
1154 */
1155 static void
1156 iris_update_compiled_vs(struct iris_context *ice)
1157 {
1158 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1159 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1160 struct iris_uncompiled_shader *ish =
1161 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1162
1163 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
1164 screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1165
1166 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1167 struct iris_compiled_shader *shader =
1168 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
1169
1170 if (!shader)
1171 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1172
1173 if (!shader)
1174 shader = iris_compile_vs(ice, ish, &key);
1175
1176 if (old != shader) {
1177 ice->shaders.prog[IRIS_CACHE_VS] = shader;
1178 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
1179 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_VS |
1180 IRIS_STAGE_DIRTY_BINDINGS_VS |
1181 IRIS_STAGE_DIRTY_CONSTANTS_VS;
1182 shs->sysvals_need_upload = true;
1183
1184 const struct brw_vs_prog_data *vs_prog_data =
1185 (void *) shader->prog_data;
1186 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1187 vs_prog_data->uses_baseinstance;
1188 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1189 vs_prog_data->uses_is_indexed_draw;
1190 const bool needs_sgvs_element = uses_draw_params ||
1191 vs_prog_data->uses_instanceid ||
1192 vs_prog_data->uses_vertexid;
1193
1194 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1195 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1196 ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
1197 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
1198 IRIS_DIRTY_VERTEX_ELEMENTS;
1199 }
1200 ice->state.vs_uses_draw_params = uses_draw_params;
1201 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1202 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1203 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1204 }
1205 }
1206
1207 /**
1208 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1209 */
1210 const struct shader_info *
1211 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1212 {
1213 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1214
1215 if (!ish)
1216 return NULL;
1217
1218 const nir_shader *nir = ish->nir;
1219 return &nir->info;
1220 }
1221
1222 /**
1223 * Get the union of TCS output and TES input slots.
1224 *
1225 * TCS and TES need to agree on a common URB entry layout. In particular,
1226 * the data for all patch vertices is stored in a single URB entry (unlike
1227 * GS which has one entry per input vertex). This means that per-vertex
1228 * array indexing needs a stride.
1229 *
1230 * SSO requires locations to match, but doesn't require the number of
1231 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1232 * So, we need to take the extra step of unifying these on the fly.
1233 */
1234 static void
1235 get_unified_tess_slots(const struct iris_context *ice,
1236 uint64_t *per_vertex_slots,
1237 uint32_t *per_patch_slots)
1238 {
1239 const struct shader_info *tcs =
1240 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1241 const struct shader_info *tes =
1242 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1243
1244 *per_vertex_slots = tes->inputs_read;
1245 *per_patch_slots = tes->patch_inputs_read;
1246
1247 if (tcs) {
1248 *per_vertex_slots |= tcs->outputs_written;
1249 *per_patch_slots |= tcs->patch_outputs_written;
1250 }
1251 }
1252
1253 /**
1254 * Compile a tessellation control shader, and upload the assembly.
1255 */
1256 static struct iris_compiled_shader *
1257 iris_compile_tcs(struct iris_context *ice,
1258 struct iris_uncompiled_shader *ish,
1259 const struct iris_tcs_prog_key *key)
1260 {
1261 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1262 const struct brw_compiler *compiler = screen->compiler;
1263 const struct nir_shader_compiler_options *options =
1264 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1265 void *mem_ctx = ralloc_context(NULL);
1266 struct brw_tcs_prog_data *tcs_prog_data =
1267 rzalloc(mem_ctx, struct brw_tcs_prog_data);
1268 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1269 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1270 const struct gen_device_info *devinfo = &screen->devinfo;
1271 enum brw_param_builtin *system_values = NULL;
1272 unsigned num_system_values = 0;
1273 unsigned num_cbufs = 0;
1274
1275 nir_shader *nir;
1276
1277 struct iris_binding_table bt;
1278
1279 struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
1280
1281 if (ish) {
1282 nir = nir_shader_clone(mem_ctx, ish->nir);
1283
1284 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1285 &num_system_values, &num_cbufs);
1286 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1287 num_system_values, num_cbufs);
1288 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1289 } else {
1290 nir =
1291 brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
1292
1293 /* Reserve space for passing the default tess levels as constants. */
1294 num_cbufs = 1;
1295 num_system_values = 8;
1296 system_values =
1297 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1298 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1299 prog_data->nr_params = num_system_values;
1300
1301 if (key->tes_primitive_mode == GL_QUADS) {
1302 for (int i = 0; i < 4; i++)
1303 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1304
1305 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1306 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1307 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
1308 for (int i = 0; i < 3; i++)
1309 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1310
1311 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1312 } else {
1313 assert(key->tes_primitive_mode == GL_ISOLINES);
1314 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1315 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1316 }
1317
1318 /* Manually setup the TCS binding table. */
1319 memset(&bt, 0, sizeof(bt));
1320 bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1321 bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1322 bt.size_bytes = 4;
1323
1324 prog_data->ubo_ranges[0].length = 1;
1325 }
1326
1327 char *error_str = NULL;
1328 const unsigned *program =
1329 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, &brw_key, tcs_prog_data,
1330 nir, -1, NULL, &error_str);
1331 if (program == NULL) {
1332 dbg_printf("Failed to compile control shader: %s\n", error_str);
1333 ralloc_free(mem_ctx);
1334 return false;
1335 }
1336
1337 if (ish) {
1338 if (ish->compiled_once) {
1339 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1340 } else {
1341 ish->compiled_once = true;
1342 }
1343 }
1344
1345 struct iris_compiled_shader *shader =
1346 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1347 prog_data, NULL, system_values, num_system_values,
1348 0, num_cbufs, &bt);
1349
1350 if (ish)
1351 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1352
1353 ralloc_free(mem_ctx);
1354 return shader;
1355 }
1356
1357 /**
1358 * Update the current tessellation control shader variant.
1359 *
1360 * Fill out the key, look in the cache, compile and bind if needed.
1361 */
1362 static void
1363 iris_update_compiled_tcs(struct iris_context *ice)
1364 {
1365 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1366 struct iris_uncompiled_shader *tcs =
1367 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1368 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1369 const struct brw_compiler *compiler = screen->compiler;
1370 const struct gen_device_info *devinfo = &screen->devinfo;
1371
1372 const struct shader_info *tes_info =
1373 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1374 struct iris_tcs_prog_key key = {
1375 .vue.base.program_string_id = tcs ? tcs->program_id : 0,
1376 .tes_primitive_mode = tes_info->tess.primitive_mode,
1377 .input_vertices =
1378 !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1379 .quads_workaround = devinfo->gen < 9 &&
1380 tes_info->tess.primitive_mode == GL_QUADS &&
1381 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1382 };
1383 get_unified_tess_slots(ice, &key.outputs_written,
1384 &key.patch_outputs_written);
1385 screen->vtbl.populate_tcs_key(ice, &key);
1386
1387 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1388 struct iris_compiled_shader *shader =
1389 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1390
1391 if (tcs && !shader)
1392 shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1393
1394 if (!shader)
1395 shader = iris_compile_tcs(ice, tcs, &key);
1396
1397 if (old != shader) {
1398 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1399 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TCS |
1400 IRIS_STAGE_DIRTY_BINDINGS_TCS |
1401 IRIS_STAGE_DIRTY_CONSTANTS_TCS;
1402 shs->sysvals_need_upload = true;
1403 }
1404 }
1405
1406 /**
1407 * Compile a tessellation evaluation shader, and upload the assembly.
1408 */
1409 static struct iris_compiled_shader *
1410 iris_compile_tes(struct iris_context *ice,
1411 struct iris_uncompiled_shader *ish,
1412 const struct iris_tes_prog_key *key)
1413 {
1414 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1415 const struct brw_compiler *compiler = screen->compiler;
1416 void *mem_ctx = ralloc_context(NULL);
1417 struct brw_tes_prog_data *tes_prog_data =
1418 rzalloc(mem_ctx, struct brw_tes_prog_data);
1419 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1420 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1421 enum brw_param_builtin *system_values;
1422 const struct gen_device_info *devinfo = &screen->devinfo;
1423 unsigned num_system_values;
1424 unsigned num_cbufs;
1425
1426 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1427
1428 if (key->vue.nr_userclip_plane_consts) {
1429 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1430 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1431 true, false, NULL);
1432 nir_lower_io_to_temporaries(nir, impl, true, false);
1433 nir_lower_global_vars_to_local(nir);
1434 nir_lower_vars_to_ssa(nir);
1435 nir_shader_gather_info(nir, impl);
1436 }
1437
1438 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1439 &num_system_values, &num_cbufs);
1440
1441 struct iris_binding_table bt;
1442 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1443 num_system_values, num_cbufs);
1444
1445 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1446
1447 struct brw_vue_map input_vue_map;
1448 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1449 key->patch_inputs_read);
1450
1451 struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
1452
1453 char *error_str = NULL;
1454 const unsigned *program =
1455 brw_compile_tes(compiler, &ice->dbg, mem_ctx, &brw_key, &input_vue_map,
1456 tes_prog_data, nir, -1, NULL, &error_str);
1457 if (program == NULL) {
1458 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1459 ralloc_free(mem_ctx);
1460 return false;
1461 }
1462
1463 if (ish->compiled_once) {
1464 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1465 } else {
1466 ish->compiled_once = true;
1467 }
1468
1469 uint32_t *so_decls =
1470 screen->vtbl.create_so_decl_list(&ish->stream_output,
1471 &vue_prog_data->vue_map);
1472
1473
1474 struct iris_compiled_shader *shader =
1475 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1476 prog_data, so_decls, system_values, num_system_values,
1477 0, num_cbufs, &bt);
1478
1479 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1480
1481 ralloc_free(mem_ctx);
1482 return shader;
1483 }
1484
1485 /**
1486 * Update the current tessellation evaluation shader variant.
1487 *
1488 * Fill out the key, look in the cache, compile and bind if needed.
1489 */
1490 static void
1491 iris_update_compiled_tes(struct iris_context *ice)
1492 {
1493 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1494 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1495 struct iris_uncompiled_shader *ish =
1496 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1497
1498 struct iris_tes_prog_key key = { KEY_ID(vue.base) };
1499 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1500 screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1501
1502 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1503 struct iris_compiled_shader *shader =
1504 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1505
1506 if (!shader)
1507 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1508
1509 if (!shader)
1510 shader = iris_compile_tes(ice, ish, &key);
1511
1512 if (old != shader) {
1513 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1514 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TES |
1515 IRIS_STAGE_DIRTY_BINDINGS_TES |
1516 IRIS_STAGE_DIRTY_CONSTANTS_TES;
1517 shs->sysvals_need_upload = true;
1518 }
1519
1520 /* TODO: Could compare and avoid flagging this. */
1521 const struct shader_info *tes_info = &ish->nir->info;
1522 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
1523 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TES;
1524 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1525 }
1526 }
1527
1528 /**
1529 * Compile a geometry shader, and upload the assembly.
1530 */
1531 static struct iris_compiled_shader *
1532 iris_compile_gs(struct iris_context *ice,
1533 struct iris_uncompiled_shader *ish,
1534 const struct iris_gs_prog_key *key)
1535 {
1536 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1537 const struct brw_compiler *compiler = screen->compiler;
1538 const struct gen_device_info *devinfo = &screen->devinfo;
1539 void *mem_ctx = ralloc_context(NULL);
1540 struct brw_gs_prog_data *gs_prog_data =
1541 rzalloc(mem_ctx, struct brw_gs_prog_data);
1542 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1543 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1544 enum brw_param_builtin *system_values;
1545 unsigned num_system_values;
1546 unsigned num_cbufs;
1547
1548 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1549
1550 if (key->vue.nr_userclip_plane_consts) {
1551 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1552 nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1553 false, NULL);
1554 nir_lower_io_to_temporaries(nir, impl, true, false);
1555 nir_lower_global_vars_to_local(nir);
1556 nir_lower_vars_to_ssa(nir);
1557 nir_shader_gather_info(nir, impl);
1558 }
1559
1560 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1561 &num_system_values, &num_cbufs);
1562
1563 struct iris_binding_table bt;
1564 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1565 num_system_values, num_cbufs);
1566
1567 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1568
1569 brw_compute_vue_map(devinfo,
1570 &vue_prog_data->vue_map, nir->info.outputs_written,
1571 nir->info.separate_shader, /* pos_slots */ 1);
1572
1573 struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
1574
1575 char *error_str = NULL;
1576 const unsigned *program =
1577 brw_compile_gs(compiler, &ice->dbg, mem_ctx, &brw_key, gs_prog_data,
1578 nir, NULL, -1, NULL, &error_str);
1579 if (program == NULL) {
1580 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1581 ralloc_free(mem_ctx);
1582 return false;
1583 }
1584
1585 if (ish->compiled_once) {
1586 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1587 } else {
1588 ish->compiled_once = true;
1589 }
1590
1591 uint32_t *so_decls =
1592 screen->vtbl.create_so_decl_list(&ish->stream_output,
1593 &vue_prog_data->vue_map);
1594
1595 struct iris_compiled_shader *shader =
1596 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1597 prog_data, so_decls, system_values, num_system_values,
1598 0, num_cbufs, &bt);
1599
1600 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1601
1602 ralloc_free(mem_ctx);
1603 return shader;
1604 }
1605
1606 /**
1607 * Update the current geometry shader variant.
1608 *
1609 * Fill out the key, look in the cache, compile and bind if needed.
1610 */
1611 static void
1612 iris_update_compiled_gs(struct iris_context *ice)
1613 {
1614 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1615 struct iris_uncompiled_shader *ish =
1616 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1617 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1618 struct iris_compiled_shader *shader = NULL;
1619 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1620
1621 if (ish) {
1622 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
1623 screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1624
1625 shader =
1626 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1627
1628 if (!shader)
1629 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1630
1631 if (!shader)
1632 shader = iris_compile_gs(ice, ish, &key);
1633 }
1634
1635 if (old != shader) {
1636 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1637 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
1638 IRIS_STAGE_DIRTY_BINDINGS_GS |
1639 IRIS_STAGE_DIRTY_CONSTANTS_GS;
1640 shs->sysvals_need_upload = true;
1641 }
1642 }
1643
1644 /**
1645 * Compile a fragment (pixel) shader, and upload the assembly.
1646 */
1647 static struct iris_compiled_shader *
1648 iris_compile_fs(struct iris_context *ice,
1649 struct iris_uncompiled_shader *ish,
1650 const struct iris_fs_prog_key *key,
1651 struct brw_vue_map *vue_map)
1652 {
1653 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1654 const struct brw_compiler *compiler = screen->compiler;
1655 void *mem_ctx = ralloc_context(NULL);
1656 struct brw_wm_prog_data *fs_prog_data =
1657 rzalloc(mem_ctx, struct brw_wm_prog_data);
1658 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1659 enum brw_param_builtin *system_values;
1660 const struct gen_device_info *devinfo = &screen->devinfo;
1661 unsigned num_system_values;
1662 unsigned num_cbufs;
1663
1664 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1665
1666 prog_data->use_alt_mode = ish->use_alt_mode;
1667
1668 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1669 &num_system_values, &num_cbufs);
1670
1671 /* Lower output variables to load_output intrinsics before setting up
1672 * binding tables, so iris_setup_binding_table can map any load_output
1673 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1674 * non-coherent framebuffer fetches.
1675 */
1676 brw_nir_lower_fs_outputs(nir);
1677
1678 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1679 * and do not need a binding table entry with a null surface. Earlier
1680 * generations need an entry for a null surface.
1681 */
1682 int null_rts = devinfo->gen < 11 ? 1 : 0;
1683
1684 struct iris_binding_table bt;
1685 iris_setup_binding_table(devinfo, nir, &bt,
1686 MAX2(key->nr_color_regions, null_rts),
1687 num_system_values, num_cbufs);
1688
1689 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1690
1691 struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
1692
1693 char *error_str = NULL;
1694 const unsigned *program =
1695 brw_compile_fs(compiler, &ice->dbg, mem_ctx, &brw_key, fs_prog_data,
1696 nir, -1, -1, -1, true, false, vue_map,
1697 NULL, &error_str);
1698 if (program == NULL) {
1699 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1700 ralloc_free(mem_ctx);
1701 return false;
1702 }
1703
1704 if (ish->compiled_once) {
1705 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1706 } else {
1707 ish->compiled_once = true;
1708 }
1709
1710 struct iris_compiled_shader *shader =
1711 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1712 prog_data, NULL, system_values, num_system_values,
1713 0, num_cbufs, &bt);
1714
1715 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1716
1717 ralloc_free(mem_ctx);
1718 return shader;
1719 }
1720
1721 /**
1722 * Update the current fragment shader variant.
1723 *
1724 * Fill out the key, look in the cache, compile and bind if needed.
1725 */
1726 static void
1727 iris_update_compiled_fs(struct iris_context *ice)
1728 {
1729 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1730 struct iris_uncompiled_shader *ish =
1731 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1732 struct iris_fs_prog_key key = { KEY_ID(base) };
1733 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1734 screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1735
1736 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1737 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1738
1739 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1740 struct iris_compiled_shader *shader =
1741 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1742
1743 if (!shader)
1744 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1745
1746 if (!shader)
1747 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1748
1749 if (old != shader) {
1750 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1751 // toggles. might be able to avoid flagging SBE too.
1752 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1753 ice->state.dirty |= IRIS_DIRTY_WM |
1754 IRIS_DIRTY_CLIP |
1755 IRIS_DIRTY_SBE;
1756 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS |
1757 IRIS_STAGE_DIRTY_BINDINGS_FS |
1758 IRIS_STAGE_DIRTY_CONSTANTS_FS;
1759 shs->sysvals_need_upload = true;
1760 }
1761 }
1762
1763 /**
1764 * Update the last enabled stage's VUE map.
1765 *
1766 * When the shader feeding the rasterizer's output interface changes, we
1767 * need to re-emit various packets.
1768 */
1769 static void
1770 update_last_vue_map(struct iris_context *ice,
1771 struct brw_stage_prog_data *prog_data)
1772 {
1773 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1774 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1775 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1776 const uint64_t changed_slots =
1777 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1778
1779 if (changed_slots & VARYING_BIT_VIEWPORT) {
1780 ice->state.num_viewports =
1781 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1782 ice->state.dirty |= IRIS_DIRTY_CLIP |
1783 IRIS_DIRTY_SF_CL_VIEWPORT |
1784 IRIS_DIRTY_CC_VIEWPORT |
1785 IRIS_DIRTY_SCISSOR_RECT;
1786 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_FS |
1787 ice->state.stage_dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1788 }
1789
1790 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1791 ice->state.dirty |= IRIS_DIRTY_SBE;
1792 }
1793
1794 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1795 }
1796
1797 static void
1798 iris_update_pull_constant_descriptors(struct iris_context *ice,
1799 gl_shader_stage stage)
1800 {
1801 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1802
1803 if (!shader || !shader->prog_data->has_ubo_pull)
1804 return;
1805
1806 struct iris_shader_state *shs = &ice->state.shaders[stage];
1807 bool any_new_descriptors =
1808 shader->num_system_values > 0 && shs->sysvals_need_upload;
1809
1810 unsigned bound_cbufs = shs->bound_cbufs;
1811
1812 while (bound_cbufs) {
1813 const int i = u_bit_scan(&bound_cbufs);
1814 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1815 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1816 if (!surf_state->res && cbuf->buffer) {
1817 iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
1818 any_new_descriptors = true;
1819 }
1820 }
1821
1822 if (any_new_descriptors)
1823 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
1824 }
1825
1826 /**
1827 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1828 */
1829 static struct brw_vue_prog_data *
1830 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1831 {
1832 if (!ice->shaders.prog[stage])
1833 return NULL;
1834
1835 return (void *) ice->shaders.prog[stage]->prog_data;
1836 }
1837
1838 /**
1839 * Update the current shader variants for the given state.
1840 *
1841 * This should be called on every draw call to ensure that the correct
1842 * shaders are bound. It will also flag any dirty state triggered by
1843 * swapping out those shaders.
1844 */
1845 void
1846 iris_update_compiled_shaders(struct iris_context *ice)
1847 {
1848 const uint64_t dirty = ice->state.dirty;
1849 const uint64_t stage_dirty = ice->state.stage_dirty;
1850
1851 struct brw_vue_prog_data *old_prog_datas[4];
1852 if (!(dirty & IRIS_DIRTY_URB)) {
1853 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1854 old_prog_datas[i] = get_vue_prog_data(ice, i);
1855 }
1856
1857 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
1858 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1859 struct iris_uncompiled_shader *tes =
1860 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1861 if (tes) {
1862 iris_update_compiled_tcs(ice);
1863 iris_update_compiled_tes(ice);
1864 } else {
1865 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1866 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1867 ice->state.stage_dirty |=
1868 IRIS_STAGE_DIRTY_TCS | IRIS_STAGE_DIRTY_TES |
1869 IRIS_STAGE_DIRTY_BINDINGS_TCS | IRIS_STAGE_DIRTY_BINDINGS_TES |
1870 IRIS_STAGE_DIRTY_CONSTANTS_TCS | IRIS_STAGE_DIRTY_CONSTANTS_TES;
1871 }
1872 }
1873
1874 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_VS)
1875 iris_update_compiled_vs(ice);
1876 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_GS)
1877 iris_update_compiled_gs(ice);
1878
1879 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_GS |
1880 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1881 const struct iris_compiled_shader *gs =
1882 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1883 const struct iris_compiled_shader *tes =
1884 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1885
1886 bool points_or_lines = false;
1887
1888 if (gs) {
1889 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1890 points_or_lines =
1891 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1892 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1893 } else if (tes) {
1894 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1895 points_or_lines =
1896 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1897 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1898 }
1899
1900 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1901 /* Outbound to XY Clip enables */
1902 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1903 ice->state.dirty |= IRIS_DIRTY_CLIP;
1904 }
1905 }
1906
1907 gl_shader_stage last_stage = last_vue_stage(ice);
1908 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1909 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1910 update_last_vue_map(ice, shader->prog_data);
1911 if (ice->state.streamout != shader->streamout) {
1912 ice->state.streamout = shader->streamout;
1913 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1914 }
1915
1916 if (ice->state.streamout_active) {
1917 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1918 struct iris_stream_output_target *so =
1919 (void *) ice->state.so_target[i];
1920 if (so)
1921 so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
1922 }
1923 }
1924
1925 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_FS)
1926 iris_update_compiled_fs(ice);
1927
1928 /* Changing shader interfaces may require a URB configuration. */
1929 if (!(dirty & IRIS_DIRTY_URB)) {
1930 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1931 struct brw_vue_prog_data *old = old_prog_datas[i];
1932 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1933 if (!!old != !!new ||
1934 (new && new->urb_entry_size != old->urb_entry_size)) {
1935 ice->state.dirty |= IRIS_DIRTY_URB;
1936 break;
1937 }
1938 }
1939 }
1940
1941 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
1942 if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << i))
1943 iris_update_pull_constant_descriptors(ice, i);
1944 }
1945 }
1946
1947 static struct iris_compiled_shader *
1948 iris_compile_cs(struct iris_context *ice,
1949 struct iris_uncompiled_shader *ish,
1950 const struct iris_cs_prog_key *key)
1951 {
1952 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1953 const struct brw_compiler *compiler = screen->compiler;
1954 void *mem_ctx = ralloc_context(NULL);
1955 struct brw_cs_prog_data *cs_prog_data =
1956 rzalloc(mem_ctx, struct brw_cs_prog_data);
1957 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1958 enum brw_param_builtin *system_values;
1959 const struct gen_device_info *devinfo = &screen->devinfo;
1960 unsigned num_system_values;
1961 unsigned num_cbufs;
1962
1963 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1964
1965 NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);
1966
1967 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1968 &num_system_values, &num_cbufs);
1969
1970 struct iris_binding_table bt;
1971 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1972 num_system_values, num_cbufs);
1973
1974 struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
1975
1976 char *error_str = NULL;
1977 const unsigned *program =
1978 brw_compile_cs(compiler, &ice->dbg, mem_ctx, &brw_key, cs_prog_data,
1979 nir, -1, NULL, &error_str);
1980 if (program == NULL) {
1981 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1982 ralloc_free(mem_ctx);
1983 return false;
1984 }
1985
1986 if (ish->compiled_once) {
1987 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1988 } else {
1989 ish->compiled_once = true;
1990 }
1991
1992 struct iris_compiled_shader *shader =
1993 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
1994 prog_data, NULL, system_values, num_system_values,
1995 0, num_cbufs, &bt);
1996
1997 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1998
1999 ralloc_free(mem_ctx);
2000 return shader;
2001 }
2002
2003 static void
2004 iris_update_compiled_cs(struct iris_context *ice)
2005 {
2006 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2007 struct iris_uncompiled_shader *ish =
2008 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2009
2010 struct iris_cs_prog_key key = { KEY_ID(base) };
2011 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2012 screen->vtbl.populate_cs_key(ice, &key);
2013
2014 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
2015 struct iris_compiled_shader *shader =
2016 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
2017
2018 if (!shader)
2019 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
2020
2021 if (!shader)
2022 shader = iris_compile_cs(ice, ish, &key);
2023
2024 if (old != shader) {
2025 ice->shaders.prog[IRIS_CACHE_CS] = shader;
2026 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CS |
2027 IRIS_STAGE_DIRTY_BINDINGS_CS |
2028 IRIS_STAGE_DIRTY_CONSTANTS_CS;
2029 shs->sysvals_need_upload = true;
2030 }
2031 }
2032
2033 void
2034 iris_update_compiled_compute_shader(struct iris_context *ice)
2035 {
2036 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_CS)
2037 iris_update_compiled_cs(ice);
2038
2039 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS)
2040 iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2041 }
2042
2043 void
2044 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
2045 unsigned threads,
2046 uint32_t *dst)
2047 {
2048 assert(brw_cs_push_const_total_size(cs_prog_data, threads) > 0);
2049 assert(cs_prog_data->push.cross_thread.size == 0);
2050 assert(cs_prog_data->push.per_thread.dwords == 1);
2051 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
2052 for (unsigned t = 0; t < threads; t++)
2053 dst[8 * t] = t;
2054 }
2055
2056 /**
2057 * Allocate scratch BOs as needed for the given per-thread size and stage.
2058 */
2059 struct iris_bo *
2060 iris_get_scratch_space(struct iris_context *ice,
2061 unsigned per_thread_scratch,
2062 gl_shader_stage stage)
2063 {
2064 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2065 struct iris_bufmgr *bufmgr = screen->bufmgr;
2066 const struct gen_device_info *devinfo = &screen->devinfo;
2067
2068 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2069 assert(encoded_size < (1 << 16));
2070
2071 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2072
2073 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2074 *
2075 * "Scratch Space per slice is computed based on 4 sub-slices. SW
2076 * must allocate scratch space enough so that each slice has 4
2077 * slices allowed."
2078 *
2079 * According to the other driver team, this applies to compute shaders
2080 * as well. This is not currently documented at all.
2081 *
2082 * This hack is no longer necessary on Gen11+.
2083 *
2084 * For, Gen11+, scratch space allocation is based on the number of threads
2085 * in the base configuration.
2086 */
2087 unsigned subslice_total = screen->subslice_total;
2088 if (devinfo->gen >= 12)
2089 subslice_total = devinfo->num_subslices[0];
2090 else if (devinfo->gen == 11)
2091 subslice_total = 8;
2092 else if (devinfo->gen < 11)
2093 subslice_total = 4 * devinfo->num_slices;
2094 assert(subslice_total >= screen->subslice_total);
2095
2096 if (!*bop) {
2097 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
2098
2099 if (devinfo->gen >= 12) {
2100 /* Same as ICL below, but with 16 EUs. */
2101 scratch_ids_per_subslice = 16 * 8;
2102 } else if (devinfo->gen == 11) {
2103 /* The MEDIA_VFE_STATE docs say:
2104 *
2105 * "Starting with this configuration, the Maximum Number of
2106 * Threads must be set to (#EU * 8) for GPGPU dispatches.
2107 *
2108 * Although there are only 7 threads per EU in the configuration,
2109 * the FFTID is calculated as if there are 8 threads per EU,
2110 * which in turn requires a larger amount of Scratch Space to be
2111 * allocated by the driver."
2112 */
2113 scratch_ids_per_subslice = 8 * 8;
2114 }
2115
2116 uint32_t max_threads[] = {
2117 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
2118 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
2119 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
2120 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
2121 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
2122 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
2123 };
2124
2125 uint32_t size = per_thread_scratch * max_threads[stage];
2126
2127 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
2128 }
2129
2130 return *bop;
2131 }
2132
2133 /* ------------------------------------------------------------------- */
2134
2135 /**
2136 * The pipe->create_[stage]_state() driver hooks.
2137 *
2138 * Performs basic NIR preprocessing, records any state dependencies, and
2139 * returns an iris_uncompiled_shader as the Gallium CSO.
2140 *
2141 * Actual shader compilation to assembly happens later, at first use.
2142 */
2143 static void *
2144 iris_create_uncompiled_shader(struct pipe_context *ctx,
2145 nir_shader *nir,
2146 const struct pipe_stream_output_info *so_info)
2147 {
2148 struct iris_context *ice = (void *)ctx;
2149 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2150 const struct gen_device_info *devinfo = &screen->devinfo;
2151
2152 struct iris_uncompiled_shader *ish =
2153 calloc(1, sizeof(struct iris_uncompiled_shader));
2154 if (!ish)
2155 return NULL;
2156
2157 NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
2158
2159 brw_preprocess_nir(screen->compiler, nir, NULL);
2160
2161 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo,
2162 &ish->uses_atomic_load_store);
2163 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2164
2165 nir_sweep(nir);
2166
2167 if (nir->constant_data_size > 0) {
2168 unsigned data_offset;
2169 u_upload_data(ice->shaders.uploader, 0, nir->constant_data_size,
2170 32, nir->constant_data, &data_offset, &ish->const_data);
2171
2172 struct pipe_shader_buffer psb = {
2173 .buffer = ish->const_data,
2174 .buffer_offset = data_offset,
2175 .buffer_size = nir->constant_data_size,
2176 };
2177 iris_upload_ubo_ssbo_surf_state(ice, &psb, &ish->const_data_state, false);
2178 }
2179
2180 ish->program_id = get_new_program_id(screen);
2181 ish->nir = nir;
2182 if (so_info) {
2183 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2184 update_so_info(&ish->stream_output, nir->info.outputs_written);
2185 }
2186
2187 /* Save this now before potentially dropping nir->info.name */
2188 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2189 ish->use_alt_mode = true;
2190
2191 if (screen->disk_cache) {
2192 /* Serialize the NIR to a binary blob that we can hash for the disk
2193 * cache. Drop unnecessary information (like variable names)
2194 * so the serialized NIR is smaller, and also to let us detect more
2195 * isomorphic shaders when hashing, increasing cache hits.
2196 */
2197 struct blob blob;
2198 blob_init(&blob);
2199 nir_serialize(&blob, nir, true);
2200 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2201 blob_finish(&blob);
2202 }
2203
2204 return ish;
2205 }
2206
2207 static struct iris_uncompiled_shader *
2208 iris_create_shader_state(struct pipe_context *ctx,
2209 const struct pipe_shader_state *state)
2210 {
2211 struct nir_shader *nir;
2212
2213 if (state->type == PIPE_SHADER_IR_TGSI)
2214 nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2215 else
2216 nir = state->ir.nir;
2217
2218 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
2219 }
2220
2221 static void *
2222 iris_create_vs_state(struct pipe_context *ctx,
2223 const struct pipe_shader_state *state)
2224 {
2225 struct iris_context *ice = (void *) ctx;
2226 struct iris_screen *screen = (void *) ctx->screen;
2227 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2228
2229 /* User clip planes */
2230 if (ish->nir->info.clip_distance_array_size == 0)
2231 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2232
2233 if (screen->precompile) {
2234 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
2235
2236 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2237 iris_compile_vs(ice, ish, &key);
2238 }
2239
2240 return ish;
2241 }
2242
2243 static void *
2244 iris_create_tcs_state(struct pipe_context *ctx,
2245 const struct pipe_shader_state *state)
2246 {
2247 struct iris_context *ice = (void *) ctx;
2248 struct iris_screen *screen = (void *) ctx->screen;
2249 const struct brw_compiler *compiler = screen->compiler;
2250 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2251 struct shader_info *info = &ish->nir->info;
2252
2253 if (screen->precompile) {
2254 const unsigned _GL_TRIANGLES = 0x0004;
2255 struct iris_tcs_prog_key key = {
2256 KEY_ID(vue.base),
2257 // XXX: make sure the linker fills this out from the TES...
2258 .tes_primitive_mode =
2259 info->tess.primitive_mode ? info->tess.primitive_mode
2260 : _GL_TRIANGLES,
2261 .outputs_written = info->outputs_written,
2262 .patch_outputs_written = info->patch_outputs_written,
2263 };
2264
2265 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2266 * We don't have that information, so we randomly guess that the input
2267 * and output patches are the same size. This is a bad guess, but we
2268 * can't do much better.
2269 */
2270 if (compiler->use_tcs_8_patch)
2271 key.input_vertices = info->tess.tcs_vertices_out;
2272
2273 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2274 iris_compile_tcs(ice, ish, &key);
2275 }
2276
2277 return ish;
2278 }
2279
2280 static void *
2281 iris_create_tes_state(struct pipe_context *ctx,
2282 const struct pipe_shader_state *state)
2283 {
2284 struct iris_context *ice = (void *) ctx;
2285 struct iris_screen *screen = (void *) ctx->screen;
2286 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2287 struct shader_info *info = &ish->nir->info;
2288
2289 /* User clip planes */
2290 if (ish->nir->info.clip_distance_array_size == 0)
2291 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2292
2293 if (screen->precompile) {
2294 struct iris_tes_prog_key key = {
2295 KEY_ID(vue.base),
2296 // XXX: not ideal, need TCS output/TES input unification
2297 .inputs_read = info->inputs_read,
2298 .patch_inputs_read = info->patch_inputs_read,
2299 };
2300
2301 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2302 iris_compile_tes(ice, ish, &key);
2303 }
2304
2305 return ish;
2306 }
2307
2308 static void *
2309 iris_create_gs_state(struct pipe_context *ctx,
2310 const struct pipe_shader_state *state)
2311 {
2312 struct iris_context *ice = (void *) ctx;
2313 struct iris_screen *screen = (void *) ctx->screen;
2314 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2315
2316 /* User clip planes */
2317 if (ish->nir->info.clip_distance_array_size == 0)
2318 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2319
2320 if (screen->precompile) {
2321 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
2322
2323 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2324 iris_compile_gs(ice, ish, &key);
2325 }
2326
2327 return ish;
2328 }
2329
2330 static void *
2331 iris_create_fs_state(struct pipe_context *ctx,
2332 const struct pipe_shader_state *state)
2333 {
2334 struct iris_context *ice = (void *) ctx;
2335 struct iris_screen *screen = (void *) ctx->screen;
2336 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2337 struct shader_info *info = &ish->nir->info;
2338
2339 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2340 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2341 (1ull << IRIS_NOS_RASTERIZER) |
2342 (1ull << IRIS_NOS_BLEND);
2343
2344 /* The program key needs the VUE map if there are > 16 inputs */
2345 if (util_bitcount64(ish->nir->info.inputs_read &
2346 BRW_FS_VARYING_INPUT_MASK) > 16) {
2347 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2348 }
2349
2350 if (screen->precompile) {
2351 const uint64_t color_outputs = info->outputs_written &
2352 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2353 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2354 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2355
2356 bool can_rearrange_varyings =
2357 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2358
2359 const struct gen_device_info *devinfo = &screen->devinfo;
2360 struct iris_fs_prog_key key = {
2361 KEY_ID(base),
2362 .nr_color_regions = util_bitcount(color_outputs),
2363 .coherent_fb_fetch = devinfo->gen >= 9,
2364 .input_slots_valid =
2365 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2366 };
2367
2368 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2369 iris_compile_fs(ice, ish, &key, NULL);
2370 }
2371
2372 return ish;
2373 }
2374
2375 static void *
2376 iris_create_compute_state(struct pipe_context *ctx,
2377 const struct pipe_compute_state *state)
2378 {
2379 struct iris_context *ice = (void *) ctx;
2380 struct iris_screen *screen = (void *) ctx->screen;
2381 const nir_shader_compiler_options *options =
2382 screen->compiler->glsl_compiler_options[MESA_SHADER_COMPUTE].NirOptions;
2383
2384 nir_shader *nir;
2385 switch (state->ir_type) {
2386 case PIPE_SHADER_IR_NIR:
2387 nir = (void *)state->prog;
2388 break;
2389
2390 case PIPE_SHADER_IR_NIR_SERIALIZED: {
2391 struct blob_reader reader;
2392 const struct pipe_binary_program_header *hdr = state->prog;
2393 blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
2394 nir = nir_deserialize(NULL, options, &reader);
2395 break;
2396 }
2397
2398 default:
2399 unreachable("Unsupported IR");
2400 }
2401
2402 struct iris_uncompiled_shader *ish =
2403 iris_create_uncompiled_shader(ctx, nir, NULL);
2404
2405 // XXX: disallow more than 64KB of shared variables
2406
2407 if (screen->precompile) {
2408 struct iris_cs_prog_key key = { KEY_ID(base) };
2409
2410 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2411 iris_compile_cs(ice, ish, &key);
2412 }
2413
2414 return ish;
2415 }
2416
2417 /**
2418 * The pipe->delete_[stage]_state() driver hooks.
2419 *
2420 * Frees the iris_uncompiled_shader.
2421 */
2422 static void
2423 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2424 {
2425 struct iris_uncompiled_shader *ish = state;
2426 struct iris_context *ice = (void *) ctx;
2427
2428 if (ice->shaders.uncompiled[stage] == ish) {
2429 ice->shaders.uncompiled[stage] = NULL;
2430 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2431 }
2432
2433 if (ish->const_data) {
2434 pipe_resource_reference(&ish->const_data, NULL);
2435 pipe_resource_reference(&ish->const_data_state.res, NULL);
2436 }
2437
2438 iris_delete_shader_variants(ice, ish);
2439
2440 ralloc_free(ish->nir);
2441 free(ish);
2442 }
2443
2444 static void
2445 iris_delete_vs_state(struct pipe_context *ctx, void *state)
2446 {
2447 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2448 }
2449
2450 static void
2451 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2452 {
2453 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2454 }
2455
2456 static void
2457 iris_delete_tes_state(struct pipe_context *ctx, void *state)
2458 {
2459 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2460 }
2461
2462 static void
2463 iris_delete_gs_state(struct pipe_context *ctx, void *state)
2464 {
2465 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2466 }
2467
2468 static void
2469 iris_delete_fs_state(struct pipe_context *ctx, void *state)
2470 {
2471 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2472 }
2473
2474 static void
2475 iris_delete_cs_state(struct pipe_context *ctx, void *state)
2476 {
2477 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2478 }
2479
2480 /**
2481 * The pipe->bind_[stage]_state() driver hook.
2482 *
2483 * Binds an uncompiled shader as the current one for a particular stage.
2484 * Updates dirty tracking to account for the shader's NOS.
2485 */
2486 static void
2487 bind_shader_state(struct iris_context *ice,
2488 struct iris_uncompiled_shader *ish,
2489 gl_shader_stage stage)
2490 {
2491 uint64_t stage_dirty_bit = IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2492 const uint64_t nos = ish ? ish->nos : 0;
2493
2494 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2495 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2496
2497 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
2498 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
2499 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2500 }
2501
2502 ice->shaders.uncompiled[stage] = ish;
2503 ice->state.stage_dirty |= stage_dirty_bit;
2504
2505 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2506 * (or that they no longer need to do so).
2507 */
2508 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2509 if (nos & (1 << i))
2510 ice->state.stage_dirty_for_nos[i] |= stage_dirty_bit;
2511 else
2512 ice->state.stage_dirty_for_nos[i] &= ~stage_dirty_bit;
2513 }
2514 }
2515
2516 static void
2517 iris_bind_vs_state(struct pipe_context *ctx, void *state)
2518 {
2519 struct iris_context *ice = (struct iris_context *)ctx;
2520 struct iris_uncompiled_shader *new_ish = state;
2521
2522 if (new_ish &&
2523 ice->state.window_space_position !=
2524 new_ish->nir->info.vs.window_space_position) {
2525 ice->state.window_space_position =
2526 new_ish->nir->info.vs.window_space_position;
2527
2528 ice->state.dirty |= IRIS_DIRTY_CLIP |
2529 IRIS_DIRTY_RASTER |
2530 IRIS_DIRTY_CC_VIEWPORT;
2531 }
2532
2533 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2534 }
2535
2536 static void
2537 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2538 {
2539 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2540 }
2541
2542 static void
2543 iris_bind_tes_state(struct pipe_context *ctx, void *state)
2544 {
2545 struct iris_context *ice = (struct iris_context *)ctx;
2546
2547 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2548 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2549 ice->state.dirty |= IRIS_DIRTY_URB;
2550
2551 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2552 }
2553
2554 static void
2555 iris_bind_gs_state(struct pipe_context *ctx, void *state)
2556 {
2557 struct iris_context *ice = (struct iris_context *)ctx;
2558
2559 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2560 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2561 ice->state.dirty |= IRIS_DIRTY_URB;
2562
2563 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2564 }
2565
2566 static void
2567 iris_bind_fs_state(struct pipe_context *ctx, void *state)
2568 {
2569 struct iris_context *ice = (struct iris_context *) ctx;
2570 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2571 const struct gen_device_info *devinfo = &screen->devinfo;
2572 struct iris_uncompiled_shader *old_ish =
2573 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2574 struct iris_uncompiled_shader *new_ish = state;
2575
2576 const unsigned color_bits =
2577 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2578 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2579
2580 /* Fragment shader outputs influence HasWriteableRT */
2581 if (!old_ish || !new_ish ||
2582 (old_ish->nir->info.outputs_written & color_bits) !=
2583 (new_ish->nir->info.outputs_written & color_bits))
2584 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2585
2586 if (devinfo->gen == 8)
2587 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2588
2589 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2590 }
2591
2592 static void
2593 iris_bind_cs_state(struct pipe_context *ctx, void *state)
2594 {
2595 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2596 }
2597
2598 void
2599 iris_init_program_functions(struct pipe_context *ctx)
2600 {
2601 ctx->create_vs_state = iris_create_vs_state;
2602 ctx->create_tcs_state = iris_create_tcs_state;
2603 ctx->create_tes_state = iris_create_tes_state;
2604 ctx->create_gs_state = iris_create_gs_state;
2605 ctx->create_fs_state = iris_create_fs_state;
2606 ctx->create_compute_state = iris_create_compute_state;
2607
2608 ctx->delete_vs_state = iris_delete_vs_state;
2609 ctx->delete_tcs_state = iris_delete_tcs_state;
2610 ctx->delete_tes_state = iris_delete_tes_state;
2611 ctx->delete_gs_state = iris_delete_gs_state;
2612 ctx->delete_fs_state = iris_delete_fs_state;
2613 ctx->delete_compute_state = iris_delete_cs_state;
2614
2615 ctx->bind_vs_state = iris_bind_vs_state;
2616 ctx->bind_tcs_state = iris_bind_tcs_state;
2617 ctx->bind_tes_state = iris_bind_tes_state;
2618 ctx->bind_gs_state = iris_bind_gs_state;
2619 ctx->bind_fs_state = iris_bind_fs_state;
2620 ctx->bind_compute_state = iris_bind_cs_state;
2621 }