Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
48
49 #define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50 #define BRW_KEY_INIT(gen, prog_id) \
51 .base.program_string_id = prog_id, \
52 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54 .base.tex.compressed_multisample_layout_mask = ~0, \
55 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
56
57 static unsigned
58 get_new_program_id(struct iris_screen *screen)
59 {
60 return p_atomic_inc_return(&screen->program_id);
61 }
62
63 static struct brw_vs_prog_key
64 iris_to_brw_vs_key(const struct gen_device_info *devinfo,
65 const struct iris_vs_prog_key *key)
66 {
67 return (struct brw_vs_prog_key) {
68 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
69
70 /* Don't tell the backend about our clip plane constants, we've
71 * already lowered them in NIR and don't want it doing it again.
72 */
73 .nr_userclip_plane_consts = 0,
74 };
75 }
76
77 static struct brw_tcs_prog_key
78 iris_to_brw_tcs_key(const struct gen_device_info *devinfo,
79 const struct iris_tcs_prog_key *key)
80 {
81 return (struct brw_tcs_prog_key) {
82 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
83 .tes_primitive_mode = key->tes_primitive_mode,
84 .input_vertices = key->input_vertices,
85 .patch_outputs_written = key->patch_outputs_written,
86 .outputs_written = key->outputs_written,
87 .quads_workaround = key->quads_workaround,
88 };
89 }
90
91 static struct brw_tes_prog_key
92 iris_to_brw_tes_key(const struct gen_device_info *devinfo,
93 const struct iris_tes_prog_key *key)
94 {
95 return (struct brw_tes_prog_key) {
96 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
97 .patch_inputs_read = key->patch_inputs_read,
98 .inputs_read = key->inputs_read,
99 };
100 }
101
102 static struct brw_gs_prog_key
103 iris_to_brw_gs_key(const struct gen_device_info *devinfo,
104 const struct iris_gs_prog_key *key)
105 {
106 return (struct brw_gs_prog_key) {
107 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
108 };
109 }
110
111 static struct brw_wm_prog_key
112 iris_to_brw_fs_key(const struct gen_device_info *devinfo,
113 const struct iris_fs_prog_key *key)
114 {
115 return (struct brw_wm_prog_key) {
116 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
117 .nr_color_regions = key->nr_color_regions,
118 .flat_shade = key->flat_shade,
119 .alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
120 .alpha_to_coverage = key->alpha_to_coverage,
121 .clamp_fragment_color = key->clamp_fragment_color,
122 .persample_interp = key->persample_interp,
123 .multisample_fbo = key->multisample_fbo,
124 .force_dual_color_blend = key->force_dual_color_blend,
125 .coherent_fb_fetch = key->coherent_fb_fetch,
126 .color_outputs_valid = key->color_outputs_valid,
127 .input_slots_valid = key->input_slots_valid,
128 .ignore_sample_mask_out = !key->multisample_fbo,
129 };
130 }
131
132 static struct brw_cs_prog_key
133 iris_to_brw_cs_key(const struct gen_device_info *devinfo,
134 const struct iris_cs_prog_key *key)
135 {
136 return (struct brw_cs_prog_key) {
137 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
138 };
139 }
140
141 static void *
142 upload_state(struct u_upload_mgr *uploader,
143 struct iris_state_ref *ref,
144 unsigned size,
145 unsigned alignment)
146 {
147 void *p = NULL;
148 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
149 return p;
150 }
151
152 void
153 iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
154 struct pipe_shader_buffer *buf,
155 struct iris_state_ref *surf_state,
156 bool ssbo)
157 {
158 struct pipe_context *ctx = &ice->ctx;
159 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
160
161 void *map =
162 upload_state(ice->state.surface_uploader, surf_state,
163 screen->isl_dev.ss.size, 64);
164 if (!unlikely(map)) {
165 surf_state->res = NULL;
166 return;
167 }
168
169 struct iris_resource *res = (void *) buf->buffer;
170 struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
171 surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
172
173 isl_buffer_fill_state(&screen->isl_dev, map,
174 .address = res->bo->gtt_offset + res->offset +
175 buf->buffer_offset,
176 .size_B = buf->buffer_size - res->offset,
177 .format = ssbo ? ISL_FORMAT_RAW
178 : ISL_FORMAT_R32G32B32A32_FLOAT,
179 .swizzle = ISL_SWIZZLE_IDENTITY,
180 .stride_B = 1,
181 .mocs = iris_mocs(res->bo, &screen->isl_dev));
182 }
183
184 static nir_ssa_def *
185 get_aoa_deref_offset(nir_builder *b,
186 nir_deref_instr *deref,
187 unsigned elem_size)
188 {
189 unsigned array_size = elem_size;
190 nir_ssa_def *offset = nir_imm_int(b, 0);
191
192 while (deref->deref_type != nir_deref_type_var) {
193 assert(deref->deref_type == nir_deref_type_array);
194
195 /* This level's element size is the previous level's array size */
196 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
197 assert(deref->arr.index.ssa);
198 offset = nir_iadd(b, offset,
199 nir_imul(b, index, nir_imm_int(b, array_size)));
200
201 deref = nir_deref_instr_parent(deref);
202 assert(glsl_type_is_array(deref->type));
203 array_size *= glsl_get_length(deref->type);
204 }
205
206 /* Accessing an invalid surface index with the dataport can result in a
207 * hang. According to the spec "if the index used to select an individual
208 * element is negative or greater than or equal to the size of the array,
209 * the results of the operation are undefined but may not lead to
210 * termination" -- which is one of the possible outcomes of the hang.
211 * Clamp the index to prevent access outside of the array bounds.
212 */
213 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
214 }
215
216 static void
217 iris_lower_storage_image_derefs(nir_shader *nir)
218 {
219 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
220
221 nir_builder b;
222 nir_builder_init(&b, impl);
223
224 nir_foreach_block(block, impl) {
225 nir_foreach_instr_safe(instr, block) {
226 if (instr->type != nir_instr_type_intrinsic)
227 continue;
228
229 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
230 switch (intrin->intrinsic) {
231 case nir_intrinsic_image_deref_load:
232 case nir_intrinsic_image_deref_store:
233 case nir_intrinsic_image_deref_atomic_add:
234 case nir_intrinsic_image_deref_atomic_imin:
235 case nir_intrinsic_image_deref_atomic_umin:
236 case nir_intrinsic_image_deref_atomic_imax:
237 case nir_intrinsic_image_deref_atomic_umax:
238 case nir_intrinsic_image_deref_atomic_and:
239 case nir_intrinsic_image_deref_atomic_or:
240 case nir_intrinsic_image_deref_atomic_xor:
241 case nir_intrinsic_image_deref_atomic_exchange:
242 case nir_intrinsic_image_deref_atomic_comp_swap:
243 case nir_intrinsic_image_deref_size:
244 case nir_intrinsic_image_deref_samples:
245 case nir_intrinsic_image_deref_load_raw_intel:
246 case nir_intrinsic_image_deref_store_raw_intel: {
247 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
248 nir_variable *var = nir_deref_instr_get_variable(deref);
249
250 b.cursor = nir_before_instr(&intrin->instr);
251 nir_ssa_def *index =
252 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
253 get_aoa_deref_offset(&b, deref, 1));
254 nir_rewrite_image_intrinsic(intrin, index, false);
255 break;
256 }
257
258 default:
259 break;
260 }
261 }
262 }
263 }
264
265 /**
266 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
267 */
268 static bool
269 iris_fix_edge_flags(nir_shader *nir)
270 {
271 if (nir->info.stage != MESA_SHADER_VERTEX) {
272 nir_shader_preserve_all_metadata(nir);
273 return false;
274 }
275
276 nir_variable *var = nir_find_variable_with_location(nir, nir_var_shader_out,
277 VARYING_SLOT_EDGE);
278 if (!var) {
279 nir_shader_preserve_all_metadata(nir);
280 return false;
281 }
282
283 var->data.mode = nir_var_shader_temp;
284 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
285 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
286 nir_fixup_deref_modes(nir);
287
288 nir_foreach_function(f, nir) {
289 if (f->impl) {
290 nir_metadata_preserve(f->impl, nir_metadata_block_index |
291 nir_metadata_dominance |
292 nir_metadata_live_ssa_defs |
293 nir_metadata_loop_analysis);
294 } else {
295 nir_metadata_preserve(f->impl, nir_metadata_all);
296 }
297 }
298
299 return true;
300 }
301
302 /**
303 * Fix an uncompiled shader's stream output info.
304 *
305 * Core Gallium stores output->register_index as a "slot" number, where
306 * slots are assigned consecutively to all outputs in info->outputs_written.
307 * This naive packing of outputs doesn't work for us - we too have slots,
308 * but the layout is defined by the VUE map, which we won't have until we
309 * compile a specific shader variant. So, we remap these and simply store
310 * VARYING_SLOT_* in our copy's output->register_index fields.
311 *
312 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
313 * components of our VUE header. See brw_vue_map.c for the layout.
314 */
315 static void
316 update_so_info(struct pipe_stream_output_info *so_info,
317 uint64_t outputs_written)
318 {
319 uint8_t reverse_map[64] = {};
320 unsigned slot = 0;
321 while (outputs_written) {
322 reverse_map[slot++] = u_bit_scan64(&outputs_written);
323 }
324
325 for (unsigned i = 0; i < so_info->num_outputs; i++) {
326 struct pipe_stream_output *output = &so_info->output[i];
327
328 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
329 output->register_index = reverse_map[output->register_index];
330
331 /* The VUE header contains three scalar fields packed together:
332 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
333 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
334 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
335 */
336 switch (output->register_index) {
337 case VARYING_SLOT_LAYER:
338 assert(output->num_components == 1);
339 output->register_index = VARYING_SLOT_PSIZ;
340 output->start_component = 1;
341 break;
342 case VARYING_SLOT_VIEWPORT:
343 assert(output->num_components == 1);
344 output->register_index = VARYING_SLOT_PSIZ;
345 output->start_component = 2;
346 break;
347 case VARYING_SLOT_PSIZ:
348 assert(output->num_components == 1);
349 output->start_component = 3;
350 break;
351 }
352
353 //info->outputs_written |= 1ull << output->register_index;
354 }
355 }
356
357 static void
358 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
359 unsigned offset, unsigned n)
360 {
361 assert(offset % sizeof(uint32_t) == 0);
362
363 for (unsigned i = 0; i < n; ++i)
364 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
365
366 for (unsigned i = n; i < 4; ++i)
367 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
368 }
369
370 /**
371 * Associate NIR uniform variables with the prog_data->param[] mechanism
372 * used by the backend. Also, decide which UBOs we'd like to push in an
373 * ideal situation (though the backend can reduce this).
374 */
375 static void
376 iris_setup_uniforms(const struct brw_compiler *compiler,
377 void *mem_ctx,
378 nir_shader *nir,
379 struct brw_stage_prog_data *prog_data,
380 unsigned kernel_input_size,
381 enum brw_param_builtin **out_system_values,
382 unsigned *out_num_system_values,
383 unsigned *out_num_cbufs)
384 {
385 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
386
387 unsigned system_values_start = ALIGN(kernel_input_size, sizeof(uint32_t));
388
389 const unsigned IRIS_MAX_SYSTEM_VALUES =
390 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
391 enum brw_param_builtin *system_values =
392 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
393 unsigned num_system_values = 0;
394
395 unsigned patch_vert_idx = -1;
396 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
397 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
398 unsigned variable_group_size_idx = -1;
399 memset(ucp_idx, -1, sizeof(ucp_idx));
400 memset(img_idx, -1, sizeof(img_idx));
401
402 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
403
404 nir_builder b;
405 nir_builder_init(&b, impl);
406
407 b.cursor = nir_before_block(nir_start_block(impl));
408 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
409
410 /* Turn system value intrinsics into uniforms */
411 nir_foreach_block(block, impl) {
412 nir_foreach_instr_safe(instr, block) {
413 if (instr->type != nir_instr_type_intrinsic)
414 continue;
415
416 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
417 nir_ssa_def *offset;
418
419 switch (intrin->intrinsic) {
420 case nir_intrinsic_load_constant: {
421 unsigned load_size = intrin->dest.ssa.num_components *
422 intrin->dest.ssa.bit_size / 8;
423 unsigned load_align = intrin->dest.ssa.bit_size / 8;
424
425 /* This one is special because it reads from the shader constant
426 * data and not cbuf0 which gallium uploads for us.
427 */
428 b.cursor = nir_instr_remove(&intrin->instr);
429
430 nir_ssa_def *offset =
431 nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
432 nir_intrinsic_base(intrin));
433
434 assert(load_size < b.shader->constant_data_size);
435 unsigned max_offset = b.shader->constant_data_size - load_size;
436 offset = nir_umin(&b, offset, nir_imm_int(&b, max_offset));
437
438 nir_ssa_def *const_data_base_addr = nir_pack_64_2x32_split(&b,
439 nir_load_reloc_const_intel(&b, IRIS_SHADER_RELOC_CONST_DATA_ADDR_LOW),
440 nir_load_reloc_const_intel(&b, IRIS_SHADER_RELOC_CONST_DATA_ADDR_HIGH));
441
442 nir_ssa_def *data =
443 nir_load_global(&b, nir_iadd(&b, const_data_base_addr,
444 nir_u2u64(&b, offset)),
445 load_align,
446 intrin->dest.ssa.num_components,
447 intrin->dest.ssa.bit_size);
448
449 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
450 nir_src_for_ssa(data));
451 continue;
452 }
453 case nir_intrinsic_load_user_clip_plane: {
454 unsigned ucp = nir_intrinsic_ucp_id(intrin);
455
456 if (ucp_idx[ucp] == -1) {
457 ucp_idx[ucp] = num_system_values;
458 num_system_values += 4;
459 }
460
461 for (int i = 0; i < 4; i++) {
462 system_values[ucp_idx[ucp] + i] =
463 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
464 }
465
466 b.cursor = nir_before_instr(instr);
467 offset = nir_imm_int(&b, system_values_start +
468 ucp_idx[ucp] * sizeof(uint32_t));
469 break;
470 }
471 case nir_intrinsic_load_patch_vertices_in:
472 if (patch_vert_idx == -1)
473 patch_vert_idx = num_system_values++;
474
475 system_values[patch_vert_idx] =
476 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
477
478 b.cursor = nir_before_instr(instr);
479 offset = nir_imm_int(&b, system_values_start +
480 patch_vert_idx * sizeof(uint32_t));
481 break;
482 case nir_intrinsic_image_deref_load_param_intel: {
483 assert(devinfo->gen < 9);
484 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
485 nir_variable *var = nir_deref_instr_get_variable(deref);
486
487 if (img_idx[var->data.binding] == -1) {
488 /* GL only allows arrays of arrays of images. */
489 assert(glsl_type_is_image(glsl_without_array(var->type)));
490 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
491
492 for (int i = 0; i < num_images; i++) {
493 const unsigned img = var->data.binding + i;
494
495 img_idx[img] = num_system_values;
496 num_system_values += BRW_IMAGE_PARAM_SIZE;
497
498 uint32_t *img_sv = &system_values[img_idx[img]];
499
500 setup_vec4_image_sysval(
501 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
502 offsetof(struct brw_image_param, offset), 2);
503 setup_vec4_image_sysval(
504 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
505 offsetof(struct brw_image_param, size), 3);
506 setup_vec4_image_sysval(
507 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
508 offsetof(struct brw_image_param, stride), 4);
509 setup_vec4_image_sysval(
510 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
511 offsetof(struct brw_image_param, tiling), 3);
512 setup_vec4_image_sysval(
513 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
514 offsetof(struct brw_image_param, swizzling), 2);
515 }
516 }
517
518 b.cursor = nir_before_instr(instr);
519 offset = nir_iadd(&b,
520 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
521 nir_imm_int(&b, system_values_start +
522 img_idx[var->data.binding] * 4 +
523 nir_intrinsic_base(intrin) * 16));
524 break;
525 }
526 case nir_intrinsic_load_local_group_size: {
527 assert(nir->info.cs.local_size_variable);
528 if (variable_group_size_idx == -1) {
529 variable_group_size_idx = num_system_values;
530 num_system_values += 3;
531 for (int i = 0; i < 3; i++) {
532 system_values[variable_group_size_idx + i] =
533 BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
534 }
535 }
536
537 b.cursor = nir_before_instr(instr);
538 offset = nir_imm_int(&b, system_values_start +
539 variable_group_size_idx * sizeof(uint32_t));
540 break;
541 }
542 case nir_intrinsic_load_kernel_input: {
543 assert(nir_intrinsic_base(intrin) +
544 nir_intrinsic_range(intrin) <= kernel_input_size);
545 b.cursor = nir_before_instr(instr);
546 offset = nir_iadd_imm(&b, intrin->src[0].ssa,
547 nir_intrinsic_base(intrin));
548 break;
549 }
550 default:
551 continue;
552 }
553
554 nir_intrinsic_instr *load =
555 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
556 load->num_components = intrin->dest.ssa.num_components;
557 load->src[0] = nir_src_for_ssa(temp_ubo_name);
558 load->src[1] = nir_src_for_ssa(offset);
559 nir_intrinsic_set_align(load, 4, 0);
560 nir_ssa_dest_init(&load->instr, &load->dest,
561 intrin->dest.ssa.num_components,
562 intrin->dest.ssa.bit_size, NULL);
563 nir_builder_instr_insert(&b, &load->instr);
564 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
565 nir_src_for_ssa(&load->dest.ssa));
566 nir_instr_remove(instr);
567 }
568 }
569
570 nir_validate_shader(nir, "before remapping");
571
572 /* Uniforms are stored in constant buffer 0, the
573 * user-facing UBOs are indexed by one. So if any constant buffer is
574 * needed, the constant buffer 0 will be needed, so account for it.
575 */
576 unsigned num_cbufs = nir->info.num_ubos;
577 if (num_cbufs || nir->num_uniforms)
578 num_cbufs++;
579
580 /* Place the new params in a new cbuf. */
581 if (num_system_values > 0 || kernel_input_size > 0) {
582 unsigned sysval_cbuf_index = num_cbufs;
583 num_cbufs++;
584
585 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
586 num_system_values);
587
588 nir_foreach_block(block, impl) {
589 nir_foreach_instr_safe(instr, block) {
590 if (instr->type != nir_instr_type_intrinsic)
591 continue;
592
593 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
594
595 if (load->intrinsic != nir_intrinsic_load_ubo)
596 continue;
597
598 b.cursor = nir_before_instr(instr);
599
600 assert(load->src[0].is_ssa);
601
602 if (load->src[0].ssa == temp_ubo_name) {
603 nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
604 nir_instr_rewrite_src(instr, &load->src[0],
605 nir_src_for_ssa(imm));
606 }
607 }
608 }
609
610 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
611 nir_opt_constant_folding(nir);
612 } else {
613 ralloc_free(system_values);
614 system_values = NULL;
615 }
616
617 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
618 nir_validate_shader(nir, "after remap");
619
620 /* We don't use params[] but gallium leaves num_uniforms set. We use this
621 * to detect when cbuf0 exists but we don't need it anymore when we get
622 * here. Instead, zero it out so that the back-end doesn't get confused
623 * when nr_params * 4 != num_uniforms != nr_params * 4.
624 */
625 nir->num_uniforms = 0;
626
627 *out_system_values = system_values;
628 *out_num_system_values = num_system_values;
629 *out_num_cbufs = num_cbufs;
630 }
631
632 static const char *surface_group_names[] = {
633 [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
634 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
635 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
636 [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
637 [IRIS_SURFACE_GROUP_UBO] = "ubo",
638 [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
639 [IRIS_SURFACE_GROUP_IMAGE] = "image",
640 };
641
642 static void
643 iris_print_binding_table(FILE *fp, const char *name,
644 const struct iris_binding_table *bt)
645 {
646 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
647
648 uint32_t total = 0;
649 uint32_t compacted = 0;
650
651 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
652 uint32_t size = bt->sizes[i];
653 total += size;
654 if (size)
655 compacted += util_bitcount64(bt->used_mask[i]);
656 }
657
658 if (total == 0) {
659 fprintf(fp, "Binding table for %s is empty\n\n", name);
660 return;
661 }
662
663 if (total != compacted) {
664 fprintf(fp, "Binding table for %s "
665 "(compacted to %u entries from %u entries)\n",
666 name, compacted, total);
667 } else {
668 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
669 }
670
671 uint32_t entry = 0;
672 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
673 uint64_t mask = bt->used_mask[i];
674 while (mask) {
675 int index = u_bit_scan64(&mask);
676 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
677 }
678 }
679 fprintf(fp, "\n");
680 }
681
682 enum {
683 /* Max elements in a surface group. */
684 SURFACE_GROUP_MAX_ELEMENTS = 64,
685 };
686
687 /**
688 * Map a <group, index> pair to a binding table index.
689 *
690 * For example: <UBO, 5> => binding table index 12
691 */
692 uint32_t
693 iris_group_index_to_bti(const struct iris_binding_table *bt,
694 enum iris_surface_group group, uint32_t index)
695 {
696 assert(index < bt->sizes[group]);
697 uint64_t mask = bt->used_mask[group];
698 uint64_t bit = 1ull << index;
699 if (bit & mask) {
700 return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
701 } else {
702 return IRIS_SURFACE_NOT_USED;
703 }
704 }
705
706 /**
707 * Map a binding table index back to a <group, index> pair.
708 *
709 * For example: binding table index 12 => <UBO, 5>
710 */
711 uint32_t
712 iris_bti_to_group_index(const struct iris_binding_table *bt,
713 enum iris_surface_group group, uint32_t bti)
714 {
715 uint64_t used_mask = bt->used_mask[group];
716 assert(bti >= bt->offsets[group]);
717
718 uint32_t c = bti - bt->offsets[group];
719 while (used_mask) {
720 int i = u_bit_scan64(&used_mask);
721 if (c == 0)
722 return i;
723 c--;
724 }
725
726 return IRIS_SURFACE_NOT_USED;
727 }
728
729 static void
730 rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
731 nir_instr *instr, nir_src *src,
732 enum iris_surface_group group)
733 {
734 assert(bt->sizes[group] > 0);
735
736 b->cursor = nir_before_instr(instr);
737 nir_ssa_def *bti;
738 if (nir_src_is_const(*src)) {
739 uint32_t index = nir_src_as_uint(*src);
740 bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
741 src->ssa->bit_size);
742 } else {
743 /* Indirect usage makes all the surfaces of the group to be available,
744 * so we can just add the base.
745 */
746 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
747 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
748 }
749 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
750 }
751
752 static void
753 mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
754 enum iris_surface_group group)
755 {
756 assert(bt->sizes[group] > 0);
757
758 if (nir_src_is_const(*src)) {
759 uint64_t index = nir_src_as_uint(*src);
760 assert(index < bt->sizes[group]);
761 bt->used_mask[group] |= 1ull << index;
762 } else {
763 /* There's an indirect usage, we need all the surfaces. */
764 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
765 }
766 }
767
768 static bool
769 skip_compacting_binding_tables(void)
770 {
771 static int skip = -1;
772 if (skip < 0)
773 skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
774 return skip;
775 }
776
777 /**
778 * Set up the binding table indices and apply to the shader.
779 */
780 static void
781 iris_setup_binding_table(const struct gen_device_info *devinfo,
782 struct nir_shader *nir,
783 struct iris_binding_table *bt,
784 unsigned num_render_targets,
785 unsigned num_system_values,
786 unsigned num_cbufs)
787 {
788 const struct shader_info *info = &nir->info;
789
790 memset(bt, 0, sizeof(*bt));
791
792 /* Set the sizes for each surface group. For some groups, we already know
793 * upfront how many will be used, so mark them.
794 */
795 if (info->stage == MESA_SHADER_FRAGMENT) {
796 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
797 /* All render targets used. */
798 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
799 BITFIELD64_MASK(num_render_targets);
800
801 /* Setup render target read surface group inorder to support non-coherent
802 * framebuffer fetch on Gen8
803 */
804 if (devinfo->gen == 8 && info->outputs_read) {
805 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
806 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
807 BITFIELD64_MASK(num_render_targets);
808 }
809 } else if (info->stage == MESA_SHADER_COMPUTE) {
810 bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
811 }
812
813 bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
814 bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
815
816 bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
817
818 /* Allocate an extra slot in the UBO section for NIR constants.
819 * Binding table compaction will remove it if unnecessary.
820 *
821 * We don't include them in iris_compiled_shader::num_cbufs because
822 * they are uploaded separately from shs->constbuf[], but from a shader
823 * point of view, they're another UBO (at the end of the section).
824 */
825 bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
826
827 bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
828
829 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
830 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
831
832 /* Mark surfaces used for the cases we don't have the information available
833 * upfront.
834 */
835 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
836 nir_foreach_block (block, impl) {
837 nir_foreach_instr (instr, block) {
838 if (instr->type != nir_instr_type_intrinsic)
839 continue;
840
841 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
842 switch (intrin->intrinsic) {
843 case nir_intrinsic_load_num_work_groups:
844 bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
845 break;
846
847 case nir_intrinsic_load_output:
848 if (devinfo->gen == 8) {
849 mark_used_with_src(bt, &intrin->src[0],
850 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
851 }
852 break;
853
854 case nir_intrinsic_image_size:
855 case nir_intrinsic_image_load:
856 case nir_intrinsic_image_store:
857 case nir_intrinsic_image_atomic_add:
858 case nir_intrinsic_image_atomic_imin:
859 case nir_intrinsic_image_atomic_umin:
860 case nir_intrinsic_image_atomic_imax:
861 case nir_intrinsic_image_atomic_umax:
862 case nir_intrinsic_image_atomic_and:
863 case nir_intrinsic_image_atomic_or:
864 case nir_intrinsic_image_atomic_xor:
865 case nir_intrinsic_image_atomic_exchange:
866 case nir_intrinsic_image_atomic_comp_swap:
867 case nir_intrinsic_image_load_raw_intel:
868 case nir_intrinsic_image_store_raw_intel:
869 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
870 break;
871
872 case nir_intrinsic_load_ubo:
873 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
874 break;
875
876 case nir_intrinsic_store_ssbo:
877 mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
878 break;
879
880 case nir_intrinsic_get_buffer_size:
881 case nir_intrinsic_ssbo_atomic_add:
882 case nir_intrinsic_ssbo_atomic_imin:
883 case nir_intrinsic_ssbo_atomic_umin:
884 case nir_intrinsic_ssbo_atomic_imax:
885 case nir_intrinsic_ssbo_atomic_umax:
886 case nir_intrinsic_ssbo_atomic_and:
887 case nir_intrinsic_ssbo_atomic_or:
888 case nir_intrinsic_ssbo_atomic_xor:
889 case nir_intrinsic_ssbo_atomic_exchange:
890 case nir_intrinsic_ssbo_atomic_comp_swap:
891 case nir_intrinsic_ssbo_atomic_fmin:
892 case nir_intrinsic_ssbo_atomic_fmax:
893 case nir_intrinsic_ssbo_atomic_fcomp_swap:
894 case nir_intrinsic_load_ssbo:
895 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
896 break;
897
898 default:
899 break;
900 }
901 }
902 }
903
904 /* When disable we just mark everything as used. */
905 if (unlikely(skip_compacting_binding_tables())) {
906 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
907 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
908 }
909
910 /* Calculate the offsets and the binding table size based on the used
911 * surfaces. After this point, the functions to go between "group indices"
912 * and binding table indices can be used.
913 */
914 uint32_t next = 0;
915 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
916 if (bt->used_mask[i] != 0) {
917 bt->offsets[i] = next;
918 next += util_bitcount64(bt->used_mask[i]);
919 }
920 }
921 bt->size_bytes = next * 4;
922
923 if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
924 iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
925 }
926
927 /* Apply the binding table indices. The backend compiler is not expected
928 * to change those, as we haven't set any of the *_start entries in brw
929 * binding_table.
930 */
931 nir_builder b;
932 nir_builder_init(&b, impl);
933
934 nir_foreach_block (block, impl) {
935 nir_foreach_instr (instr, block) {
936 if (instr->type == nir_instr_type_tex) {
937 nir_tex_instr *tex = nir_instr_as_tex(instr);
938 tex->texture_index =
939 iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
940 tex->texture_index);
941 continue;
942 }
943
944 if (instr->type != nir_instr_type_intrinsic)
945 continue;
946
947 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
948 switch (intrin->intrinsic) {
949 case nir_intrinsic_image_size:
950 case nir_intrinsic_image_load:
951 case nir_intrinsic_image_store:
952 case nir_intrinsic_image_atomic_add:
953 case nir_intrinsic_image_atomic_imin:
954 case nir_intrinsic_image_atomic_umin:
955 case nir_intrinsic_image_atomic_imax:
956 case nir_intrinsic_image_atomic_umax:
957 case nir_intrinsic_image_atomic_and:
958 case nir_intrinsic_image_atomic_or:
959 case nir_intrinsic_image_atomic_xor:
960 case nir_intrinsic_image_atomic_exchange:
961 case nir_intrinsic_image_atomic_comp_swap:
962 case nir_intrinsic_image_load_raw_intel:
963 case nir_intrinsic_image_store_raw_intel:
964 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
965 IRIS_SURFACE_GROUP_IMAGE);
966 break;
967
968 case nir_intrinsic_load_ubo:
969 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
970 IRIS_SURFACE_GROUP_UBO);
971 break;
972
973 case nir_intrinsic_store_ssbo:
974 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
975 IRIS_SURFACE_GROUP_SSBO);
976 break;
977
978 case nir_intrinsic_load_output:
979 if (devinfo->gen == 8) {
980 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
981 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
982 }
983 break;
984
985 case nir_intrinsic_get_buffer_size:
986 case nir_intrinsic_ssbo_atomic_add:
987 case nir_intrinsic_ssbo_atomic_imin:
988 case nir_intrinsic_ssbo_atomic_umin:
989 case nir_intrinsic_ssbo_atomic_imax:
990 case nir_intrinsic_ssbo_atomic_umax:
991 case nir_intrinsic_ssbo_atomic_and:
992 case nir_intrinsic_ssbo_atomic_or:
993 case nir_intrinsic_ssbo_atomic_xor:
994 case nir_intrinsic_ssbo_atomic_exchange:
995 case nir_intrinsic_ssbo_atomic_comp_swap:
996 case nir_intrinsic_ssbo_atomic_fmin:
997 case nir_intrinsic_ssbo_atomic_fmax:
998 case nir_intrinsic_ssbo_atomic_fcomp_swap:
999 case nir_intrinsic_load_ssbo:
1000 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1001 IRIS_SURFACE_GROUP_SSBO);
1002 break;
1003
1004 default:
1005 break;
1006 }
1007 }
1008 }
1009 }
1010
1011 static void
1012 iris_debug_recompile(struct iris_context *ice,
1013 struct shader_info *info,
1014 const struct brw_base_prog_key *key)
1015 {
1016 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
1017 const struct gen_device_info *devinfo = &screen->devinfo;
1018 const struct brw_compiler *c = screen->compiler;
1019
1020 if (!info)
1021 return;
1022
1023 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
1024 _mesa_shader_stage_to_string(info->stage),
1025 info->name ? info->name : "(no identifier)",
1026 info->label ? info->label : "");
1027
1028 const void *old_iris_key =
1029 iris_find_previous_compile(ice, info->stage, key->program_string_id);
1030
1031 union brw_any_prog_key old_key;
1032
1033 switch (info->stage) {
1034 case MESA_SHADER_VERTEX:
1035 old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
1036 break;
1037 case MESA_SHADER_TESS_CTRL:
1038 old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
1039 break;
1040 case MESA_SHADER_TESS_EVAL:
1041 old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
1042 break;
1043 case MESA_SHADER_GEOMETRY:
1044 old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
1045 break;
1046 case MESA_SHADER_FRAGMENT:
1047 old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
1048 break;
1049 case MESA_SHADER_COMPUTE:
1050 old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
1051 break;
1052 default:
1053 unreachable("invalid shader stage");
1054 }
1055
1056 brw_debug_key_recompile(c, &ice->dbg, info->stage, &old_key.base, key);
1057 }
1058
1059 /**
1060 * Get the shader for the last enabled geometry stage.
1061 *
1062 * This stage is the one which will feed stream output and the rasterizer.
1063 */
1064 static gl_shader_stage
1065 last_vue_stage(struct iris_context *ice)
1066 {
1067 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1068 return MESA_SHADER_GEOMETRY;
1069
1070 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1071 return MESA_SHADER_TESS_EVAL;
1072
1073 return MESA_SHADER_VERTEX;
1074 }
1075
1076 /**
1077 * Compile a vertex shader, and upload the assembly.
1078 */
1079 static struct iris_compiled_shader *
1080 iris_compile_vs(struct iris_context *ice,
1081 struct iris_uncompiled_shader *ish,
1082 const struct iris_vs_prog_key *key)
1083 {
1084 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1085 const struct brw_compiler *compiler = screen->compiler;
1086 const struct gen_device_info *devinfo = &screen->devinfo;
1087 void *mem_ctx = ralloc_context(NULL);
1088 struct brw_vs_prog_data *vs_prog_data =
1089 rzalloc(mem_ctx, struct brw_vs_prog_data);
1090 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1091 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1092 enum brw_param_builtin *system_values;
1093 unsigned num_system_values;
1094 unsigned num_cbufs;
1095
1096 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1097
1098 if (key->vue.nr_userclip_plane_consts) {
1099 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1100 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1101 true, false, NULL);
1102 nir_lower_io_to_temporaries(nir, impl, true, false);
1103 nir_lower_global_vars_to_local(nir);
1104 nir_lower_vars_to_ssa(nir);
1105 nir_shader_gather_info(nir, impl);
1106 }
1107
1108 prog_data->use_alt_mode = ish->use_alt_mode;
1109
1110 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1111 &num_system_values, &num_cbufs);
1112
1113 struct iris_binding_table bt;
1114 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1115 num_system_values, num_cbufs);
1116
1117 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1118
1119 brw_compute_vue_map(devinfo,
1120 &vue_prog_data->vue_map, nir->info.outputs_written,
1121 nir->info.separate_shader, /* pos_slots */ 1);
1122
1123 struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
1124
1125 char *error_str = NULL;
1126 const unsigned *program =
1127 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &brw_key, vs_prog_data,
1128 nir, -1, NULL, &error_str);
1129 if (program == NULL) {
1130 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
1131 ralloc_free(mem_ctx);
1132 return false;
1133 }
1134
1135 if (ish->compiled_once) {
1136 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1137 } else {
1138 ish->compiled_once = true;
1139 }
1140
1141 uint32_t *so_decls =
1142 screen->vtbl.create_so_decl_list(&ish->stream_output,
1143 &vue_prog_data->vue_map);
1144
1145 struct iris_compiled_shader *shader =
1146 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
1147 prog_data, so_decls, system_values, num_system_values,
1148 0, num_cbufs, &bt);
1149
1150 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1151
1152 ralloc_free(mem_ctx);
1153 return shader;
1154 }
1155
1156 /**
1157 * Update the current vertex shader variant.
1158 *
1159 * Fill out the key, look in the cache, compile and bind if needed.
1160 */
1161 static void
1162 iris_update_compiled_vs(struct iris_context *ice)
1163 {
1164 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1165 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1166 struct iris_uncompiled_shader *ish =
1167 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1168
1169 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
1170 screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1171
1172 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1173 struct iris_compiled_shader *shader =
1174 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
1175
1176 if (!shader)
1177 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1178
1179 if (!shader)
1180 shader = iris_compile_vs(ice, ish, &key);
1181
1182 if (old != shader) {
1183 ice->shaders.prog[IRIS_CACHE_VS] = shader;
1184 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
1185 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_VS |
1186 IRIS_STAGE_DIRTY_BINDINGS_VS |
1187 IRIS_STAGE_DIRTY_CONSTANTS_VS;
1188 shs->sysvals_need_upload = true;
1189
1190 const struct brw_vs_prog_data *vs_prog_data =
1191 (void *) shader->prog_data;
1192 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1193 vs_prog_data->uses_baseinstance;
1194 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1195 vs_prog_data->uses_is_indexed_draw;
1196 const bool needs_sgvs_element = uses_draw_params ||
1197 vs_prog_data->uses_instanceid ||
1198 vs_prog_data->uses_vertexid;
1199
1200 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1201 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1202 ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
1203 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
1204 IRIS_DIRTY_VERTEX_ELEMENTS;
1205 }
1206 ice->state.vs_uses_draw_params = uses_draw_params;
1207 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1208 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1209 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1210 }
1211 }
1212
1213 /**
1214 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1215 */
1216 const struct shader_info *
1217 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1218 {
1219 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1220
1221 if (!ish)
1222 return NULL;
1223
1224 const nir_shader *nir = ish->nir;
1225 return &nir->info;
1226 }
1227
1228 /**
1229 * Get the union of TCS output and TES input slots.
1230 *
1231 * TCS and TES need to agree on a common URB entry layout. In particular,
1232 * the data for all patch vertices is stored in a single URB entry (unlike
1233 * GS which has one entry per input vertex). This means that per-vertex
1234 * array indexing needs a stride.
1235 *
1236 * SSO requires locations to match, but doesn't require the number of
1237 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1238 * So, we need to take the extra step of unifying these on the fly.
1239 */
1240 static void
1241 get_unified_tess_slots(const struct iris_context *ice,
1242 uint64_t *per_vertex_slots,
1243 uint32_t *per_patch_slots)
1244 {
1245 const struct shader_info *tcs =
1246 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1247 const struct shader_info *tes =
1248 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1249
1250 *per_vertex_slots = tes->inputs_read;
1251 *per_patch_slots = tes->patch_inputs_read;
1252
1253 if (tcs) {
1254 *per_vertex_slots |= tcs->outputs_written;
1255 *per_patch_slots |= tcs->patch_outputs_written;
1256 }
1257 }
1258
1259 /**
1260 * Compile a tessellation control shader, and upload the assembly.
1261 */
1262 static struct iris_compiled_shader *
1263 iris_compile_tcs(struct iris_context *ice,
1264 struct iris_uncompiled_shader *ish,
1265 const struct iris_tcs_prog_key *key)
1266 {
1267 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1268 const struct brw_compiler *compiler = screen->compiler;
1269 const struct nir_shader_compiler_options *options =
1270 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1271 void *mem_ctx = ralloc_context(NULL);
1272 struct brw_tcs_prog_data *tcs_prog_data =
1273 rzalloc(mem_ctx, struct brw_tcs_prog_data);
1274 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1275 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1276 const struct gen_device_info *devinfo = &screen->devinfo;
1277 enum brw_param_builtin *system_values = NULL;
1278 unsigned num_system_values = 0;
1279 unsigned num_cbufs = 0;
1280
1281 nir_shader *nir;
1282
1283 struct iris_binding_table bt;
1284
1285 struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
1286
1287 if (ish) {
1288 nir = nir_shader_clone(mem_ctx, ish->nir);
1289
1290 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1291 &num_system_values, &num_cbufs);
1292 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1293 num_system_values, num_cbufs);
1294 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1295 } else {
1296 nir =
1297 brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
1298
1299 /* Reserve space for passing the default tess levels as constants. */
1300 num_cbufs = 1;
1301 num_system_values = 8;
1302 system_values =
1303 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1304 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1305 prog_data->nr_params = num_system_values;
1306
1307 if (key->tes_primitive_mode == GL_QUADS) {
1308 for (int i = 0; i < 4; i++)
1309 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1310
1311 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1312 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1313 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
1314 for (int i = 0; i < 3; i++)
1315 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1316
1317 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1318 } else {
1319 assert(key->tes_primitive_mode == GL_ISOLINES);
1320 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1321 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1322 }
1323
1324 /* Manually setup the TCS binding table. */
1325 memset(&bt, 0, sizeof(bt));
1326 bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1327 bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1328 bt.size_bytes = 4;
1329
1330 prog_data->ubo_ranges[0].length = 1;
1331 }
1332
1333 char *error_str = NULL;
1334 const unsigned *program =
1335 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, &brw_key, tcs_prog_data,
1336 nir, -1, NULL, &error_str);
1337 if (program == NULL) {
1338 dbg_printf("Failed to compile control shader: %s\n", error_str);
1339 ralloc_free(mem_ctx);
1340 return false;
1341 }
1342
1343 if (ish) {
1344 if (ish->compiled_once) {
1345 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1346 } else {
1347 ish->compiled_once = true;
1348 }
1349 }
1350
1351 struct iris_compiled_shader *shader =
1352 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1353 prog_data, NULL, system_values, num_system_values,
1354 0, num_cbufs, &bt);
1355
1356 if (ish)
1357 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1358
1359 ralloc_free(mem_ctx);
1360 return shader;
1361 }
1362
1363 /**
1364 * Update the current tessellation control shader variant.
1365 *
1366 * Fill out the key, look in the cache, compile and bind if needed.
1367 */
1368 static void
1369 iris_update_compiled_tcs(struct iris_context *ice)
1370 {
1371 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1372 struct iris_uncompiled_shader *tcs =
1373 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1374 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1375 const struct brw_compiler *compiler = screen->compiler;
1376 const struct gen_device_info *devinfo = &screen->devinfo;
1377
1378 const struct shader_info *tes_info =
1379 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1380 struct iris_tcs_prog_key key = {
1381 .vue.base.program_string_id = tcs ? tcs->program_id : 0,
1382 .tes_primitive_mode = tes_info->tess.primitive_mode,
1383 .input_vertices =
1384 !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1385 .quads_workaround = devinfo->gen < 9 &&
1386 tes_info->tess.primitive_mode == GL_QUADS &&
1387 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1388 };
1389 get_unified_tess_slots(ice, &key.outputs_written,
1390 &key.patch_outputs_written);
1391 screen->vtbl.populate_tcs_key(ice, &key);
1392
1393 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1394 struct iris_compiled_shader *shader =
1395 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1396
1397 if (tcs && !shader)
1398 shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1399
1400 if (!shader)
1401 shader = iris_compile_tcs(ice, tcs, &key);
1402
1403 if (old != shader) {
1404 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1405 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TCS |
1406 IRIS_STAGE_DIRTY_BINDINGS_TCS |
1407 IRIS_STAGE_DIRTY_CONSTANTS_TCS;
1408 shs->sysvals_need_upload = true;
1409 }
1410 }
1411
1412 /**
1413 * Compile a tessellation evaluation shader, and upload the assembly.
1414 */
1415 static struct iris_compiled_shader *
1416 iris_compile_tes(struct iris_context *ice,
1417 struct iris_uncompiled_shader *ish,
1418 const struct iris_tes_prog_key *key)
1419 {
1420 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1421 const struct brw_compiler *compiler = screen->compiler;
1422 void *mem_ctx = ralloc_context(NULL);
1423 struct brw_tes_prog_data *tes_prog_data =
1424 rzalloc(mem_ctx, struct brw_tes_prog_data);
1425 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1426 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1427 enum brw_param_builtin *system_values;
1428 const struct gen_device_info *devinfo = &screen->devinfo;
1429 unsigned num_system_values;
1430 unsigned num_cbufs;
1431
1432 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1433
1434 if (key->vue.nr_userclip_plane_consts) {
1435 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1436 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1437 true, false, NULL);
1438 nir_lower_io_to_temporaries(nir, impl, true, false);
1439 nir_lower_global_vars_to_local(nir);
1440 nir_lower_vars_to_ssa(nir);
1441 nir_shader_gather_info(nir, impl);
1442 }
1443
1444 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1445 &num_system_values, &num_cbufs);
1446
1447 struct iris_binding_table bt;
1448 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1449 num_system_values, num_cbufs);
1450
1451 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1452
1453 struct brw_vue_map input_vue_map;
1454 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1455 key->patch_inputs_read);
1456
1457 struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
1458
1459 char *error_str = NULL;
1460 const unsigned *program =
1461 brw_compile_tes(compiler, &ice->dbg, mem_ctx, &brw_key, &input_vue_map,
1462 tes_prog_data, nir, -1, NULL, &error_str);
1463 if (program == NULL) {
1464 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1465 ralloc_free(mem_ctx);
1466 return false;
1467 }
1468
1469 if (ish->compiled_once) {
1470 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1471 } else {
1472 ish->compiled_once = true;
1473 }
1474
1475 uint32_t *so_decls =
1476 screen->vtbl.create_so_decl_list(&ish->stream_output,
1477 &vue_prog_data->vue_map);
1478
1479
1480 struct iris_compiled_shader *shader =
1481 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1482 prog_data, so_decls, system_values, num_system_values,
1483 0, num_cbufs, &bt);
1484
1485 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1486
1487 ralloc_free(mem_ctx);
1488 return shader;
1489 }
1490
1491 /**
1492 * Update the current tessellation evaluation shader variant.
1493 *
1494 * Fill out the key, look in the cache, compile and bind if needed.
1495 */
1496 static void
1497 iris_update_compiled_tes(struct iris_context *ice)
1498 {
1499 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1500 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1501 struct iris_uncompiled_shader *ish =
1502 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1503
1504 struct iris_tes_prog_key key = { KEY_ID(vue.base) };
1505 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1506 screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1507
1508 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1509 struct iris_compiled_shader *shader =
1510 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1511
1512 if (!shader)
1513 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1514
1515 if (!shader)
1516 shader = iris_compile_tes(ice, ish, &key);
1517
1518 if (old != shader) {
1519 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1520 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TES |
1521 IRIS_STAGE_DIRTY_BINDINGS_TES |
1522 IRIS_STAGE_DIRTY_CONSTANTS_TES;
1523 shs->sysvals_need_upload = true;
1524 }
1525
1526 /* TODO: Could compare and avoid flagging this. */
1527 const struct shader_info *tes_info = &ish->nir->info;
1528 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
1529 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TES;
1530 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1531 }
1532 }
1533
1534 /**
1535 * Compile a geometry shader, and upload the assembly.
1536 */
1537 static struct iris_compiled_shader *
1538 iris_compile_gs(struct iris_context *ice,
1539 struct iris_uncompiled_shader *ish,
1540 const struct iris_gs_prog_key *key)
1541 {
1542 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1543 const struct brw_compiler *compiler = screen->compiler;
1544 const struct gen_device_info *devinfo = &screen->devinfo;
1545 void *mem_ctx = ralloc_context(NULL);
1546 struct brw_gs_prog_data *gs_prog_data =
1547 rzalloc(mem_ctx, struct brw_gs_prog_data);
1548 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1549 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1550 enum brw_param_builtin *system_values;
1551 unsigned num_system_values;
1552 unsigned num_cbufs;
1553
1554 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1555
1556 if (key->vue.nr_userclip_plane_consts) {
1557 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1558 nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1559 false, NULL);
1560 nir_lower_io_to_temporaries(nir, impl, true, false);
1561 nir_lower_global_vars_to_local(nir);
1562 nir_lower_vars_to_ssa(nir);
1563 nir_shader_gather_info(nir, impl);
1564 }
1565
1566 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1567 &num_system_values, &num_cbufs);
1568
1569 struct iris_binding_table bt;
1570 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1571 num_system_values, num_cbufs);
1572
1573 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1574
1575 brw_compute_vue_map(devinfo,
1576 &vue_prog_data->vue_map, nir->info.outputs_written,
1577 nir->info.separate_shader, /* pos_slots */ 1);
1578
1579 struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
1580
1581 char *error_str = NULL;
1582 const unsigned *program =
1583 brw_compile_gs(compiler, &ice->dbg, mem_ctx, &brw_key, gs_prog_data,
1584 nir, NULL, -1, NULL, &error_str);
1585 if (program == NULL) {
1586 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1587 ralloc_free(mem_ctx);
1588 return false;
1589 }
1590
1591 if (ish->compiled_once) {
1592 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1593 } else {
1594 ish->compiled_once = true;
1595 }
1596
1597 uint32_t *so_decls =
1598 screen->vtbl.create_so_decl_list(&ish->stream_output,
1599 &vue_prog_data->vue_map);
1600
1601 struct iris_compiled_shader *shader =
1602 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1603 prog_data, so_decls, system_values, num_system_values,
1604 0, num_cbufs, &bt);
1605
1606 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1607
1608 ralloc_free(mem_ctx);
1609 return shader;
1610 }
1611
1612 /**
1613 * Update the current geometry shader variant.
1614 *
1615 * Fill out the key, look in the cache, compile and bind if needed.
1616 */
1617 static void
1618 iris_update_compiled_gs(struct iris_context *ice)
1619 {
1620 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1621 struct iris_uncompiled_shader *ish =
1622 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1623 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1624 struct iris_compiled_shader *shader = NULL;
1625 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1626
1627 if (ish) {
1628 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
1629 screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1630
1631 shader =
1632 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1633
1634 if (!shader)
1635 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1636
1637 if (!shader)
1638 shader = iris_compile_gs(ice, ish, &key);
1639 }
1640
1641 if (old != shader) {
1642 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1643 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
1644 IRIS_STAGE_DIRTY_BINDINGS_GS |
1645 IRIS_STAGE_DIRTY_CONSTANTS_GS;
1646 shs->sysvals_need_upload = true;
1647 }
1648 }
1649
1650 /**
1651 * Compile a fragment (pixel) shader, and upload the assembly.
1652 */
1653 static struct iris_compiled_shader *
1654 iris_compile_fs(struct iris_context *ice,
1655 struct iris_uncompiled_shader *ish,
1656 const struct iris_fs_prog_key *key,
1657 struct brw_vue_map *vue_map)
1658 {
1659 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1660 const struct brw_compiler *compiler = screen->compiler;
1661 void *mem_ctx = ralloc_context(NULL);
1662 struct brw_wm_prog_data *fs_prog_data =
1663 rzalloc(mem_ctx, struct brw_wm_prog_data);
1664 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1665 enum brw_param_builtin *system_values;
1666 const struct gen_device_info *devinfo = &screen->devinfo;
1667 unsigned num_system_values;
1668 unsigned num_cbufs;
1669
1670 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1671
1672 prog_data->use_alt_mode = ish->use_alt_mode;
1673
1674 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1675 &num_system_values, &num_cbufs);
1676
1677 /* Lower output variables to load_output intrinsics before setting up
1678 * binding tables, so iris_setup_binding_table can map any load_output
1679 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1680 * non-coherent framebuffer fetches.
1681 */
1682 brw_nir_lower_fs_outputs(nir);
1683
1684 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1685 * and do not need a binding table entry with a null surface. Earlier
1686 * generations need an entry for a null surface.
1687 */
1688 int null_rts = devinfo->gen < 11 ? 1 : 0;
1689
1690 struct iris_binding_table bt;
1691 iris_setup_binding_table(devinfo, nir, &bt,
1692 MAX2(key->nr_color_regions, null_rts),
1693 num_system_values, num_cbufs);
1694
1695 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1696
1697 struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
1698
1699 char *error_str = NULL;
1700 const unsigned *program =
1701 brw_compile_fs(compiler, &ice->dbg, mem_ctx, &brw_key, fs_prog_data,
1702 nir, -1, -1, -1, true, false, vue_map,
1703 NULL, &error_str);
1704 if (program == NULL) {
1705 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1706 ralloc_free(mem_ctx);
1707 return false;
1708 }
1709
1710 if (ish->compiled_once) {
1711 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1712 } else {
1713 ish->compiled_once = true;
1714 }
1715
1716 struct iris_compiled_shader *shader =
1717 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1718 prog_data, NULL, system_values, num_system_values,
1719 0, num_cbufs, &bt);
1720
1721 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1722
1723 ralloc_free(mem_ctx);
1724 return shader;
1725 }
1726
1727 /**
1728 * Update the current fragment shader variant.
1729 *
1730 * Fill out the key, look in the cache, compile and bind if needed.
1731 */
1732 static void
1733 iris_update_compiled_fs(struct iris_context *ice)
1734 {
1735 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1736 struct iris_uncompiled_shader *ish =
1737 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1738 struct iris_fs_prog_key key = { KEY_ID(base) };
1739 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1740 screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1741
1742 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1743 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1744
1745 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1746 struct iris_compiled_shader *shader =
1747 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1748
1749 if (!shader)
1750 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1751
1752 if (!shader)
1753 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1754
1755 if (old != shader) {
1756 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1757 // toggles. might be able to avoid flagging SBE too.
1758 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1759 ice->state.dirty |= IRIS_DIRTY_WM |
1760 IRIS_DIRTY_CLIP |
1761 IRIS_DIRTY_SBE;
1762 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS |
1763 IRIS_STAGE_DIRTY_BINDINGS_FS |
1764 IRIS_STAGE_DIRTY_CONSTANTS_FS;
1765 shs->sysvals_need_upload = true;
1766 }
1767 }
1768
1769 /**
1770 * Update the last enabled stage's VUE map.
1771 *
1772 * When the shader feeding the rasterizer's output interface changes, we
1773 * need to re-emit various packets.
1774 */
1775 static void
1776 update_last_vue_map(struct iris_context *ice,
1777 struct brw_stage_prog_data *prog_data)
1778 {
1779 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1780 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1781 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1782 const uint64_t changed_slots =
1783 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1784
1785 if (changed_slots & VARYING_BIT_VIEWPORT) {
1786 ice->state.num_viewports =
1787 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1788 ice->state.dirty |= IRIS_DIRTY_CLIP |
1789 IRIS_DIRTY_SF_CL_VIEWPORT |
1790 IRIS_DIRTY_CC_VIEWPORT |
1791 IRIS_DIRTY_SCISSOR_RECT;
1792 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_FS |
1793 ice->state.stage_dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1794 }
1795
1796 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1797 ice->state.dirty |= IRIS_DIRTY_SBE;
1798 }
1799
1800 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1801 }
1802
1803 static void
1804 iris_update_pull_constant_descriptors(struct iris_context *ice,
1805 gl_shader_stage stage)
1806 {
1807 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1808
1809 if (!shader || !shader->prog_data->has_ubo_pull)
1810 return;
1811
1812 struct iris_shader_state *shs = &ice->state.shaders[stage];
1813 bool any_new_descriptors =
1814 shader->num_system_values > 0 && shs->sysvals_need_upload;
1815
1816 unsigned bound_cbufs = shs->bound_cbufs;
1817
1818 while (bound_cbufs) {
1819 const int i = u_bit_scan(&bound_cbufs);
1820 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1821 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1822 if (!surf_state->res && cbuf->buffer) {
1823 iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
1824 any_new_descriptors = true;
1825 }
1826 }
1827
1828 if (any_new_descriptors)
1829 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
1830 }
1831
1832 /**
1833 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1834 */
1835 static struct brw_vue_prog_data *
1836 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1837 {
1838 if (!ice->shaders.prog[stage])
1839 return NULL;
1840
1841 return (void *) ice->shaders.prog[stage]->prog_data;
1842 }
1843
1844 /**
1845 * Update the current shader variants for the given state.
1846 *
1847 * This should be called on every draw call to ensure that the correct
1848 * shaders are bound. It will also flag any dirty state triggered by
1849 * swapping out those shaders.
1850 */
1851 void
1852 iris_update_compiled_shaders(struct iris_context *ice)
1853 {
1854 const uint64_t dirty = ice->state.dirty;
1855 const uint64_t stage_dirty = ice->state.stage_dirty;
1856
1857 struct brw_vue_prog_data *old_prog_datas[4];
1858 if (!(dirty & IRIS_DIRTY_URB)) {
1859 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1860 old_prog_datas[i] = get_vue_prog_data(ice, i);
1861 }
1862
1863 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
1864 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1865 struct iris_uncompiled_shader *tes =
1866 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1867 if (tes) {
1868 iris_update_compiled_tcs(ice);
1869 iris_update_compiled_tes(ice);
1870 } else {
1871 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1872 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1873 ice->state.stage_dirty |=
1874 IRIS_STAGE_DIRTY_TCS | IRIS_STAGE_DIRTY_TES |
1875 IRIS_STAGE_DIRTY_BINDINGS_TCS | IRIS_STAGE_DIRTY_BINDINGS_TES |
1876 IRIS_STAGE_DIRTY_CONSTANTS_TCS | IRIS_STAGE_DIRTY_CONSTANTS_TES;
1877 }
1878 }
1879
1880 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_VS)
1881 iris_update_compiled_vs(ice);
1882 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_GS)
1883 iris_update_compiled_gs(ice);
1884
1885 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_GS |
1886 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1887 const struct iris_compiled_shader *gs =
1888 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1889 const struct iris_compiled_shader *tes =
1890 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1891
1892 bool points_or_lines = false;
1893
1894 if (gs) {
1895 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1896 points_or_lines =
1897 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1898 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1899 } else if (tes) {
1900 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1901 points_or_lines =
1902 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1903 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1904 }
1905
1906 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1907 /* Outbound to XY Clip enables */
1908 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1909 ice->state.dirty |= IRIS_DIRTY_CLIP;
1910 }
1911 }
1912
1913 gl_shader_stage last_stage = last_vue_stage(ice);
1914 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1915 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1916 update_last_vue_map(ice, shader->prog_data);
1917 if (ice->state.streamout != shader->streamout) {
1918 ice->state.streamout = shader->streamout;
1919 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1920 }
1921
1922 if (ice->state.streamout_active) {
1923 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1924 struct iris_stream_output_target *so =
1925 (void *) ice->state.so_target[i];
1926 if (so)
1927 so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
1928 }
1929 }
1930
1931 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_FS)
1932 iris_update_compiled_fs(ice);
1933
1934 /* Changing shader interfaces may require a URB configuration. */
1935 if (!(dirty & IRIS_DIRTY_URB)) {
1936 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1937 struct brw_vue_prog_data *old = old_prog_datas[i];
1938 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1939 if (!!old != !!new ||
1940 (new && new->urb_entry_size != old->urb_entry_size)) {
1941 ice->state.dirty |= IRIS_DIRTY_URB;
1942 break;
1943 }
1944 }
1945 }
1946
1947 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
1948 if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << i))
1949 iris_update_pull_constant_descriptors(ice, i);
1950 }
1951 }
1952
1953 static struct iris_compiled_shader *
1954 iris_compile_cs(struct iris_context *ice,
1955 struct iris_uncompiled_shader *ish,
1956 const struct iris_cs_prog_key *key)
1957 {
1958 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1959 const struct brw_compiler *compiler = screen->compiler;
1960 void *mem_ctx = ralloc_context(NULL);
1961 struct brw_cs_prog_data *cs_prog_data =
1962 rzalloc(mem_ctx, struct brw_cs_prog_data);
1963 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1964 enum brw_param_builtin *system_values;
1965 const struct gen_device_info *devinfo = &screen->devinfo;
1966 unsigned num_system_values;
1967 unsigned num_cbufs;
1968
1969 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1970
1971 NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);
1972
1973 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data,
1974 ish->kernel_input_size,
1975 &system_values, &num_system_values, &num_cbufs);
1976
1977 struct iris_binding_table bt;
1978 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1979 num_system_values, num_cbufs);
1980
1981 struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
1982
1983 char *error_str = NULL;
1984 const unsigned *program =
1985 brw_compile_cs(compiler, &ice->dbg, mem_ctx, &brw_key, cs_prog_data,
1986 nir, -1, NULL, &error_str);
1987 if (program == NULL) {
1988 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1989 ralloc_free(mem_ctx);
1990 return false;
1991 }
1992
1993 if (ish->compiled_once) {
1994 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1995 } else {
1996 ish->compiled_once = true;
1997 }
1998
1999 struct iris_compiled_shader *shader =
2000 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
2001 prog_data, NULL, system_values, num_system_values,
2002 ish->kernel_input_size, num_cbufs, &bt);
2003
2004 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
2005
2006 ralloc_free(mem_ctx);
2007 return shader;
2008 }
2009
2010 static void
2011 iris_update_compiled_cs(struct iris_context *ice)
2012 {
2013 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2014 struct iris_uncompiled_shader *ish =
2015 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2016
2017 struct iris_cs_prog_key key = { KEY_ID(base) };
2018 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2019 screen->vtbl.populate_cs_key(ice, &key);
2020
2021 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
2022 struct iris_compiled_shader *shader =
2023 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
2024
2025 if (!shader)
2026 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
2027
2028 if (!shader)
2029 shader = iris_compile_cs(ice, ish, &key);
2030
2031 if (old != shader) {
2032 ice->shaders.prog[IRIS_CACHE_CS] = shader;
2033 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CS |
2034 IRIS_STAGE_DIRTY_BINDINGS_CS |
2035 IRIS_STAGE_DIRTY_CONSTANTS_CS;
2036 shs->sysvals_need_upload = true;
2037 }
2038 }
2039
2040 void
2041 iris_update_compiled_compute_shader(struct iris_context *ice)
2042 {
2043 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_CS)
2044 iris_update_compiled_cs(ice);
2045
2046 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS)
2047 iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2048 }
2049
2050 void
2051 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
2052 unsigned threads,
2053 uint32_t *dst)
2054 {
2055 assert(brw_cs_push_const_total_size(cs_prog_data, threads) > 0);
2056 assert(cs_prog_data->push.cross_thread.size == 0);
2057 assert(cs_prog_data->push.per_thread.dwords == 1);
2058 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
2059 for (unsigned t = 0; t < threads; t++)
2060 dst[8 * t] = t;
2061 }
2062
2063 /**
2064 * Allocate scratch BOs as needed for the given per-thread size and stage.
2065 */
2066 struct iris_bo *
2067 iris_get_scratch_space(struct iris_context *ice,
2068 unsigned per_thread_scratch,
2069 gl_shader_stage stage)
2070 {
2071 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2072 struct iris_bufmgr *bufmgr = screen->bufmgr;
2073 const struct gen_device_info *devinfo = &screen->devinfo;
2074
2075 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2076 assert(encoded_size < (1 << 16));
2077
2078 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2079
2080 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2081 *
2082 * "Scratch Space per slice is computed based on 4 sub-slices. SW
2083 * must allocate scratch space enough so that each slice has 4
2084 * slices allowed."
2085 *
2086 * According to the other driver team, this applies to compute shaders
2087 * as well. This is not currently documented at all.
2088 *
2089 * This hack is no longer necessary on Gen11+.
2090 *
2091 * For, Gen11+, scratch space allocation is based on the number of threads
2092 * in the base configuration.
2093 */
2094 unsigned subslice_total = screen->subslice_total;
2095 if (devinfo->gen >= 12)
2096 subslice_total = devinfo->num_subslices[0];
2097 else if (devinfo->gen == 11)
2098 subslice_total = 8;
2099 else if (devinfo->gen < 11)
2100 subslice_total = 4 * devinfo->num_slices;
2101 assert(subslice_total >= screen->subslice_total);
2102
2103 if (!*bop) {
2104 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
2105
2106 if (devinfo->gen >= 12) {
2107 /* Same as ICL below, but with 16 EUs. */
2108 scratch_ids_per_subslice = 16 * 8;
2109 } else if (devinfo->gen == 11) {
2110 /* The MEDIA_VFE_STATE docs say:
2111 *
2112 * "Starting with this configuration, the Maximum Number of
2113 * Threads must be set to (#EU * 8) for GPGPU dispatches.
2114 *
2115 * Although there are only 7 threads per EU in the configuration,
2116 * the FFTID is calculated as if there are 8 threads per EU,
2117 * which in turn requires a larger amount of Scratch Space to be
2118 * allocated by the driver."
2119 */
2120 scratch_ids_per_subslice = 8 * 8;
2121 }
2122
2123 uint32_t max_threads[] = {
2124 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
2125 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
2126 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
2127 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
2128 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
2129 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
2130 };
2131
2132 uint32_t size = per_thread_scratch * max_threads[stage];
2133
2134 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
2135 }
2136
2137 return *bop;
2138 }
2139
2140 /* ------------------------------------------------------------------- */
2141
2142 /**
2143 * The pipe->create_[stage]_state() driver hooks.
2144 *
2145 * Performs basic NIR preprocessing, records any state dependencies, and
2146 * returns an iris_uncompiled_shader as the Gallium CSO.
2147 *
2148 * Actual shader compilation to assembly happens later, at first use.
2149 */
2150 static void *
2151 iris_create_uncompiled_shader(struct pipe_context *ctx,
2152 nir_shader *nir,
2153 const struct pipe_stream_output_info *so_info)
2154 {
2155 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2156 const struct gen_device_info *devinfo = &screen->devinfo;
2157
2158 struct iris_uncompiled_shader *ish =
2159 calloc(1, sizeof(struct iris_uncompiled_shader));
2160 if (!ish)
2161 return NULL;
2162
2163 NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
2164
2165 brw_preprocess_nir(screen->compiler, nir, NULL);
2166
2167 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo,
2168 &ish->uses_atomic_load_store);
2169 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2170
2171 nir_sweep(nir);
2172
2173 ish->program_id = get_new_program_id(screen);
2174 ish->nir = nir;
2175 if (so_info) {
2176 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2177 update_so_info(&ish->stream_output, nir->info.outputs_written);
2178 }
2179
2180 /* Save this now before potentially dropping nir->info.name */
2181 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2182 ish->use_alt_mode = true;
2183
2184 if (screen->disk_cache) {
2185 /* Serialize the NIR to a binary blob that we can hash for the disk
2186 * cache. Drop unnecessary information (like variable names)
2187 * so the serialized NIR is smaller, and also to let us detect more
2188 * isomorphic shaders when hashing, increasing cache hits.
2189 */
2190 struct blob blob;
2191 blob_init(&blob);
2192 nir_serialize(&blob, nir, true);
2193 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2194 blob_finish(&blob);
2195 }
2196
2197 return ish;
2198 }
2199
2200 static struct iris_uncompiled_shader *
2201 iris_create_shader_state(struct pipe_context *ctx,
2202 const struct pipe_shader_state *state)
2203 {
2204 struct nir_shader *nir;
2205
2206 if (state->type == PIPE_SHADER_IR_TGSI)
2207 nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2208 else
2209 nir = state->ir.nir;
2210
2211 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
2212 }
2213
2214 static void *
2215 iris_create_vs_state(struct pipe_context *ctx,
2216 const struct pipe_shader_state *state)
2217 {
2218 struct iris_context *ice = (void *) ctx;
2219 struct iris_screen *screen = (void *) ctx->screen;
2220 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2221
2222 /* User clip planes */
2223 if (ish->nir->info.clip_distance_array_size == 0)
2224 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2225
2226 if (screen->precompile) {
2227 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
2228
2229 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2230 iris_compile_vs(ice, ish, &key);
2231 }
2232
2233 return ish;
2234 }
2235
2236 static void *
2237 iris_create_tcs_state(struct pipe_context *ctx,
2238 const struct pipe_shader_state *state)
2239 {
2240 struct iris_context *ice = (void *) ctx;
2241 struct iris_screen *screen = (void *) ctx->screen;
2242 const struct brw_compiler *compiler = screen->compiler;
2243 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2244 struct shader_info *info = &ish->nir->info;
2245
2246 if (screen->precompile) {
2247 const unsigned _GL_TRIANGLES = 0x0004;
2248 struct iris_tcs_prog_key key = {
2249 KEY_ID(vue.base),
2250 // XXX: make sure the linker fills this out from the TES...
2251 .tes_primitive_mode =
2252 info->tess.primitive_mode ? info->tess.primitive_mode
2253 : _GL_TRIANGLES,
2254 .outputs_written = info->outputs_written,
2255 .patch_outputs_written = info->patch_outputs_written,
2256 };
2257
2258 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2259 * We don't have that information, so we randomly guess that the input
2260 * and output patches are the same size. This is a bad guess, but we
2261 * can't do much better.
2262 */
2263 if (compiler->use_tcs_8_patch)
2264 key.input_vertices = info->tess.tcs_vertices_out;
2265
2266 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2267 iris_compile_tcs(ice, ish, &key);
2268 }
2269
2270 return ish;
2271 }
2272
2273 static void *
2274 iris_create_tes_state(struct pipe_context *ctx,
2275 const struct pipe_shader_state *state)
2276 {
2277 struct iris_context *ice = (void *) ctx;
2278 struct iris_screen *screen = (void *) ctx->screen;
2279 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2280 struct shader_info *info = &ish->nir->info;
2281
2282 /* User clip planes */
2283 if (ish->nir->info.clip_distance_array_size == 0)
2284 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2285
2286 if (screen->precompile) {
2287 struct iris_tes_prog_key key = {
2288 KEY_ID(vue.base),
2289 // XXX: not ideal, need TCS output/TES input unification
2290 .inputs_read = info->inputs_read,
2291 .patch_inputs_read = info->patch_inputs_read,
2292 };
2293
2294 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2295 iris_compile_tes(ice, ish, &key);
2296 }
2297
2298 return ish;
2299 }
2300
2301 static void *
2302 iris_create_gs_state(struct pipe_context *ctx,
2303 const struct pipe_shader_state *state)
2304 {
2305 struct iris_context *ice = (void *) ctx;
2306 struct iris_screen *screen = (void *) ctx->screen;
2307 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2308
2309 /* User clip planes */
2310 if (ish->nir->info.clip_distance_array_size == 0)
2311 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2312
2313 if (screen->precompile) {
2314 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
2315
2316 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2317 iris_compile_gs(ice, ish, &key);
2318 }
2319
2320 return ish;
2321 }
2322
2323 static void *
2324 iris_create_fs_state(struct pipe_context *ctx,
2325 const struct pipe_shader_state *state)
2326 {
2327 struct iris_context *ice = (void *) ctx;
2328 struct iris_screen *screen = (void *) ctx->screen;
2329 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2330 struct shader_info *info = &ish->nir->info;
2331
2332 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2333 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2334 (1ull << IRIS_NOS_RASTERIZER) |
2335 (1ull << IRIS_NOS_BLEND);
2336
2337 /* The program key needs the VUE map if there are > 16 inputs */
2338 if (util_bitcount64(ish->nir->info.inputs_read &
2339 BRW_FS_VARYING_INPUT_MASK) > 16) {
2340 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2341 }
2342
2343 if (screen->precompile) {
2344 const uint64_t color_outputs = info->outputs_written &
2345 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2346 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2347 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2348
2349 bool can_rearrange_varyings =
2350 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2351
2352 const struct gen_device_info *devinfo = &screen->devinfo;
2353 struct iris_fs_prog_key key = {
2354 KEY_ID(base),
2355 .nr_color_regions = util_bitcount(color_outputs),
2356 .coherent_fb_fetch = devinfo->gen >= 9,
2357 .input_slots_valid =
2358 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2359 };
2360
2361 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2362 iris_compile_fs(ice, ish, &key, NULL);
2363 }
2364
2365 return ish;
2366 }
2367
2368 static void *
2369 iris_create_compute_state(struct pipe_context *ctx,
2370 const struct pipe_compute_state *state)
2371 {
2372 struct iris_context *ice = (void *) ctx;
2373 struct iris_screen *screen = (void *) ctx->screen;
2374 const nir_shader_compiler_options *options =
2375 screen->compiler->glsl_compiler_options[MESA_SHADER_COMPUTE].NirOptions;
2376
2377 nir_shader *nir;
2378 switch (state->ir_type) {
2379 case PIPE_SHADER_IR_NIR:
2380 nir = (void *)state->prog;
2381 break;
2382
2383 case PIPE_SHADER_IR_NIR_SERIALIZED: {
2384 struct blob_reader reader;
2385 const struct pipe_binary_program_header *hdr = state->prog;
2386 blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
2387 nir = nir_deserialize(NULL, options, &reader);
2388 break;
2389 }
2390
2391 default:
2392 unreachable("Unsupported IR");
2393 }
2394
2395 /* Most of iris doesn't really care about the difference between compute
2396 * shaders and kernels. We also tend to hard-code COMPUTE everywhere so
2397 * it's way easier if we just normalize to COMPUTE here.
2398 */
2399 assert(nir->info.stage == MESA_SHADER_COMPUTE ||
2400 nir->info.stage == MESA_SHADER_KERNEL);
2401 nir->info.stage = MESA_SHADER_COMPUTE;
2402
2403 struct iris_uncompiled_shader *ish =
2404 iris_create_uncompiled_shader(ctx, nir, NULL);
2405 ish->kernel_input_size = state->req_input_mem;
2406
2407 // XXX: disallow more than 64KB of shared variables
2408
2409 if (screen->precompile) {
2410 struct iris_cs_prog_key key = { KEY_ID(base) };
2411
2412 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2413 iris_compile_cs(ice, ish, &key);
2414 }
2415
2416 return ish;
2417 }
2418
2419 /**
2420 * The pipe->delete_[stage]_state() driver hooks.
2421 *
2422 * Frees the iris_uncompiled_shader.
2423 */
2424 static void
2425 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2426 {
2427 struct iris_uncompiled_shader *ish = state;
2428 struct iris_context *ice = (void *) ctx;
2429
2430 if (ice->shaders.uncompiled[stage] == ish) {
2431 ice->shaders.uncompiled[stage] = NULL;
2432 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2433 }
2434
2435 iris_delete_shader_variants(ice, ish);
2436
2437 ralloc_free(ish->nir);
2438 free(ish);
2439 }
2440
2441 static void
2442 iris_delete_vs_state(struct pipe_context *ctx, void *state)
2443 {
2444 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2445 }
2446
2447 static void
2448 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2449 {
2450 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2451 }
2452
2453 static void
2454 iris_delete_tes_state(struct pipe_context *ctx, void *state)
2455 {
2456 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2457 }
2458
2459 static void
2460 iris_delete_gs_state(struct pipe_context *ctx, void *state)
2461 {
2462 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2463 }
2464
2465 static void
2466 iris_delete_fs_state(struct pipe_context *ctx, void *state)
2467 {
2468 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2469 }
2470
2471 static void
2472 iris_delete_cs_state(struct pipe_context *ctx, void *state)
2473 {
2474 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2475 }
2476
2477 /**
2478 * The pipe->bind_[stage]_state() driver hook.
2479 *
2480 * Binds an uncompiled shader as the current one for a particular stage.
2481 * Updates dirty tracking to account for the shader's NOS.
2482 */
2483 static void
2484 bind_shader_state(struct iris_context *ice,
2485 struct iris_uncompiled_shader *ish,
2486 gl_shader_stage stage)
2487 {
2488 uint64_t stage_dirty_bit = IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2489 const uint64_t nos = ish ? ish->nos : 0;
2490
2491 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2492 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2493
2494 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
2495 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
2496 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2497 }
2498
2499 ice->shaders.uncompiled[stage] = ish;
2500 ice->state.stage_dirty |= stage_dirty_bit;
2501
2502 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2503 * (or that they no longer need to do so).
2504 */
2505 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2506 if (nos & (1 << i))
2507 ice->state.stage_dirty_for_nos[i] |= stage_dirty_bit;
2508 else
2509 ice->state.stage_dirty_for_nos[i] &= ~stage_dirty_bit;
2510 }
2511 }
2512
2513 static void
2514 iris_bind_vs_state(struct pipe_context *ctx, void *state)
2515 {
2516 struct iris_context *ice = (struct iris_context *)ctx;
2517 struct iris_uncompiled_shader *new_ish = state;
2518
2519 if (new_ish &&
2520 ice->state.window_space_position !=
2521 new_ish->nir->info.vs.window_space_position) {
2522 ice->state.window_space_position =
2523 new_ish->nir->info.vs.window_space_position;
2524
2525 ice->state.dirty |= IRIS_DIRTY_CLIP |
2526 IRIS_DIRTY_RASTER |
2527 IRIS_DIRTY_CC_VIEWPORT;
2528 }
2529
2530 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2531 }
2532
2533 static void
2534 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2535 {
2536 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2537 }
2538
2539 static void
2540 iris_bind_tes_state(struct pipe_context *ctx, void *state)
2541 {
2542 struct iris_context *ice = (struct iris_context *)ctx;
2543
2544 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2545 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2546 ice->state.dirty |= IRIS_DIRTY_URB;
2547
2548 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2549 }
2550
2551 static void
2552 iris_bind_gs_state(struct pipe_context *ctx, void *state)
2553 {
2554 struct iris_context *ice = (struct iris_context *)ctx;
2555
2556 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2557 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2558 ice->state.dirty |= IRIS_DIRTY_URB;
2559
2560 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2561 }
2562
2563 static void
2564 iris_bind_fs_state(struct pipe_context *ctx, void *state)
2565 {
2566 struct iris_context *ice = (struct iris_context *) ctx;
2567 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2568 const struct gen_device_info *devinfo = &screen->devinfo;
2569 struct iris_uncompiled_shader *old_ish =
2570 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2571 struct iris_uncompiled_shader *new_ish = state;
2572
2573 const unsigned color_bits =
2574 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2575 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2576
2577 /* Fragment shader outputs influence HasWriteableRT */
2578 if (!old_ish || !new_ish ||
2579 (old_ish->nir->info.outputs_written & color_bits) !=
2580 (new_ish->nir->info.outputs_written & color_bits))
2581 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2582
2583 if (devinfo->gen == 8)
2584 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2585
2586 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2587 }
2588
2589 static void
2590 iris_bind_cs_state(struct pipe_context *ctx, void *state)
2591 {
2592 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2593 }
2594
2595 void
2596 iris_init_program_functions(struct pipe_context *ctx)
2597 {
2598 ctx->create_vs_state = iris_create_vs_state;
2599 ctx->create_tcs_state = iris_create_tcs_state;
2600 ctx->create_tes_state = iris_create_tes_state;
2601 ctx->create_gs_state = iris_create_gs_state;
2602 ctx->create_fs_state = iris_create_fs_state;
2603 ctx->create_compute_state = iris_create_compute_state;
2604
2605 ctx->delete_vs_state = iris_delete_vs_state;
2606 ctx->delete_tcs_state = iris_delete_tcs_state;
2607 ctx->delete_tes_state = iris_delete_tes_state;
2608 ctx->delete_gs_state = iris_delete_gs_state;
2609 ctx->delete_fs_state = iris_delete_fs_state;
2610 ctx->delete_compute_state = iris_delete_cs_state;
2611
2612 ctx->bind_vs_state = iris_bind_vs_state;
2613 ctx->bind_tcs_state = iris_bind_tcs_state;
2614 ctx->bind_tes_state = iris_bind_tes_state;
2615 ctx->bind_gs_state = iris_bind_gs_state;
2616 ctx->bind_fs_state = iris_bind_fs_state;
2617 ctx->bind_compute_state = iris_bind_cs_state;
2618 }