iris: Upload kernel inputs with system values
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
48
49 #define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50 #define BRW_KEY_INIT(gen, prog_id) \
51 .base.program_string_id = prog_id, \
52 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54 .base.tex.compressed_multisample_layout_mask = ~0, \
55 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
56
57 static unsigned
58 get_new_program_id(struct iris_screen *screen)
59 {
60 return p_atomic_inc_return(&screen->program_id);
61 }
62
63 static struct brw_vs_prog_key
64 iris_to_brw_vs_key(const struct gen_device_info *devinfo,
65 const struct iris_vs_prog_key *key)
66 {
67 return (struct brw_vs_prog_key) {
68 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
69
70 /* Don't tell the backend about our clip plane constants, we've
71 * already lowered them in NIR and don't want it doing it again.
72 */
73 .nr_userclip_plane_consts = 0,
74 };
75 }
76
77 static struct brw_tcs_prog_key
78 iris_to_brw_tcs_key(const struct gen_device_info *devinfo,
79 const struct iris_tcs_prog_key *key)
80 {
81 return (struct brw_tcs_prog_key) {
82 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
83 .tes_primitive_mode = key->tes_primitive_mode,
84 .input_vertices = key->input_vertices,
85 .patch_outputs_written = key->patch_outputs_written,
86 .outputs_written = key->outputs_written,
87 .quads_workaround = key->quads_workaround,
88 };
89 }
90
91 static struct brw_tes_prog_key
92 iris_to_brw_tes_key(const struct gen_device_info *devinfo,
93 const struct iris_tes_prog_key *key)
94 {
95 return (struct brw_tes_prog_key) {
96 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
97 .patch_inputs_read = key->patch_inputs_read,
98 .inputs_read = key->inputs_read,
99 };
100 }
101
102 static struct brw_gs_prog_key
103 iris_to_brw_gs_key(const struct gen_device_info *devinfo,
104 const struct iris_gs_prog_key *key)
105 {
106 return (struct brw_gs_prog_key) {
107 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
108 };
109 }
110
111 static struct brw_wm_prog_key
112 iris_to_brw_fs_key(const struct gen_device_info *devinfo,
113 const struct iris_fs_prog_key *key)
114 {
115 return (struct brw_wm_prog_key) {
116 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
117 .nr_color_regions = key->nr_color_regions,
118 .flat_shade = key->flat_shade,
119 .alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
120 .alpha_to_coverage = key->alpha_to_coverage,
121 .clamp_fragment_color = key->clamp_fragment_color,
122 .persample_interp = key->persample_interp,
123 .multisample_fbo = key->multisample_fbo,
124 .force_dual_color_blend = key->force_dual_color_blend,
125 .coherent_fb_fetch = key->coherent_fb_fetch,
126 .color_outputs_valid = key->color_outputs_valid,
127 .input_slots_valid = key->input_slots_valid,
128 .ignore_sample_mask_out = !key->multisample_fbo,
129 };
130 }
131
132 static struct brw_cs_prog_key
133 iris_to_brw_cs_key(const struct gen_device_info *devinfo,
134 const struct iris_cs_prog_key *key)
135 {
136 return (struct brw_cs_prog_key) {
137 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
138 };
139 }
140
141 static void *
142 upload_state(struct u_upload_mgr *uploader,
143 struct iris_state_ref *ref,
144 unsigned size,
145 unsigned alignment)
146 {
147 void *p = NULL;
148 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
149 return p;
150 }
151
152 void
153 iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
154 struct pipe_shader_buffer *buf,
155 struct iris_state_ref *surf_state,
156 bool ssbo)
157 {
158 struct pipe_context *ctx = &ice->ctx;
159 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
160
161 void *map =
162 upload_state(ice->state.surface_uploader, surf_state,
163 screen->isl_dev.ss.size, 64);
164 if (!unlikely(map)) {
165 surf_state->res = NULL;
166 return;
167 }
168
169 struct iris_resource *res = (void *) buf->buffer;
170 struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
171 surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
172
173 isl_buffer_fill_state(&screen->isl_dev, map,
174 .address = res->bo->gtt_offset + res->offset +
175 buf->buffer_offset,
176 .size_B = buf->buffer_size - res->offset,
177 .format = ssbo ? ISL_FORMAT_RAW
178 : ISL_FORMAT_R32G32B32A32_FLOAT,
179 .swizzle = ISL_SWIZZLE_IDENTITY,
180 .stride_B = 1,
181 .mocs = iris_mocs(res->bo, &screen->isl_dev));
182 }
183
184 static nir_ssa_def *
185 get_aoa_deref_offset(nir_builder *b,
186 nir_deref_instr *deref,
187 unsigned elem_size)
188 {
189 unsigned array_size = elem_size;
190 nir_ssa_def *offset = nir_imm_int(b, 0);
191
192 while (deref->deref_type != nir_deref_type_var) {
193 assert(deref->deref_type == nir_deref_type_array);
194
195 /* This level's element size is the previous level's array size */
196 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
197 assert(deref->arr.index.ssa);
198 offset = nir_iadd(b, offset,
199 nir_imul(b, index, nir_imm_int(b, array_size)));
200
201 deref = nir_deref_instr_parent(deref);
202 assert(glsl_type_is_array(deref->type));
203 array_size *= glsl_get_length(deref->type);
204 }
205
206 /* Accessing an invalid surface index with the dataport can result in a
207 * hang. According to the spec "if the index used to select an individual
208 * element is negative or greater than or equal to the size of the array,
209 * the results of the operation are undefined but may not lead to
210 * termination" -- which is one of the possible outcomes of the hang.
211 * Clamp the index to prevent access outside of the array bounds.
212 */
213 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
214 }
215
216 static void
217 iris_lower_storage_image_derefs(nir_shader *nir)
218 {
219 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
220
221 nir_builder b;
222 nir_builder_init(&b, impl);
223
224 nir_foreach_block(block, impl) {
225 nir_foreach_instr_safe(instr, block) {
226 if (instr->type != nir_instr_type_intrinsic)
227 continue;
228
229 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
230 switch (intrin->intrinsic) {
231 case nir_intrinsic_image_deref_load:
232 case nir_intrinsic_image_deref_store:
233 case nir_intrinsic_image_deref_atomic_add:
234 case nir_intrinsic_image_deref_atomic_imin:
235 case nir_intrinsic_image_deref_atomic_umin:
236 case nir_intrinsic_image_deref_atomic_imax:
237 case nir_intrinsic_image_deref_atomic_umax:
238 case nir_intrinsic_image_deref_atomic_and:
239 case nir_intrinsic_image_deref_atomic_or:
240 case nir_intrinsic_image_deref_atomic_xor:
241 case nir_intrinsic_image_deref_atomic_exchange:
242 case nir_intrinsic_image_deref_atomic_comp_swap:
243 case nir_intrinsic_image_deref_size:
244 case nir_intrinsic_image_deref_samples:
245 case nir_intrinsic_image_deref_load_raw_intel:
246 case nir_intrinsic_image_deref_store_raw_intel: {
247 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
248 nir_variable *var = nir_deref_instr_get_variable(deref);
249
250 b.cursor = nir_before_instr(&intrin->instr);
251 nir_ssa_def *index =
252 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
253 get_aoa_deref_offset(&b, deref, 1));
254 nir_rewrite_image_intrinsic(intrin, index, false);
255 break;
256 }
257
258 default:
259 break;
260 }
261 }
262 }
263 }
264
265 /**
266 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
267 */
268 static bool
269 iris_fix_edge_flags(nir_shader *nir)
270 {
271 if (nir->info.stage != MESA_SHADER_VERTEX) {
272 nir_shader_preserve_all_metadata(nir);
273 return false;
274 }
275
276 nir_variable *var = nir_find_variable_with_location(nir, nir_var_shader_out,
277 VARYING_SLOT_EDGE);
278 if (!var) {
279 nir_shader_preserve_all_metadata(nir);
280 return false;
281 }
282
283 var->data.mode = nir_var_shader_temp;
284 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
285 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
286 nir_fixup_deref_modes(nir);
287
288 nir_foreach_function(f, nir) {
289 if (f->impl) {
290 nir_metadata_preserve(f->impl, nir_metadata_block_index |
291 nir_metadata_dominance |
292 nir_metadata_live_ssa_defs |
293 nir_metadata_loop_analysis);
294 } else {
295 nir_metadata_preserve(f->impl, nir_metadata_all);
296 }
297 }
298
299 return true;
300 }
301
302 /**
303 * Fix an uncompiled shader's stream output info.
304 *
305 * Core Gallium stores output->register_index as a "slot" number, where
306 * slots are assigned consecutively to all outputs in info->outputs_written.
307 * This naive packing of outputs doesn't work for us - we too have slots,
308 * but the layout is defined by the VUE map, which we won't have until we
309 * compile a specific shader variant. So, we remap these and simply store
310 * VARYING_SLOT_* in our copy's output->register_index fields.
311 *
312 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
313 * components of our VUE header. See brw_vue_map.c for the layout.
314 */
315 static void
316 update_so_info(struct pipe_stream_output_info *so_info,
317 uint64_t outputs_written)
318 {
319 uint8_t reverse_map[64] = {};
320 unsigned slot = 0;
321 while (outputs_written) {
322 reverse_map[slot++] = u_bit_scan64(&outputs_written);
323 }
324
325 for (unsigned i = 0; i < so_info->num_outputs; i++) {
326 struct pipe_stream_output *output = &so_info->output[i];
327
328 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
329 output->register_index = reverse_map[output->register_index];
330
331 /* The VUE header contains three scalar fields packed together:
332 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
333 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
334 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
335 */
336 switch (output->register_index) {
337 case VARYING_SLOT_LAYER:
338 assert(output->num_components == 1);
339 output->register_index = VARYING_SLOT_PSIZ;
340 output->start_component = 1;
341 break;
342 case VARYING_SLOT_VIEWPORT:
343 assert(output->num_components == 1);
344 output->register_index = VARYING_SLOT_PSIZ;
345 output->start_component = 2;
346 break;
347 case VARYING_SLOT_PSIZ:
348 assert(output->num_components == 1);
349 output->start_component = 3;
350 break;
351 }
352
353 //info->outputs_written |= 1ull << output->register_index;
354 }
355 }
356
357 static void
358 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
359 unsigned offset, unsigned n)
360 {
361 assert(offset % sizeof(uint32_t) == 0);
362
363 for (unsigned i = 0; i < n; ++i)
364 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
365
366 for (unsigned i = n; i < 4; ++i)
367 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
368 }
369
370 /**
371 * Associate NIR uniform variables with the prog_data->param[] mechanism
372 * used by the backend. Also, decide which UBOs we'd like to push in an
373 * ideal situation (though the backend can reduce this).
374 */
375 static void
376 iris_setup_uniforms(const struct brw_compiler *compiler,
377 void *mem_ctx,
378 nir_shader *nir,
379 struct brw_stage_prog_data *prog_data,
380 unsigned kernel_input_size,
381 enum brw_param_builtin **out_system_values,
382 unsigned *out_num_system_values,
383 unsigned *out_num_cbufs)
384 {
385 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
386
387 unsigned system_values_start = ALIGN(kernel_input_size, sizeof(uint32_t));
388
389 const unsigned IRIS_MAX_SYSTEM_VALUES =
390 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
391 enum brw_param_builtin *system_values =
392 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
393 unsigned num_system_values = 0;
394
395 unsigned patch_vert_idx = -1;
396 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
397 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
398 unsigned variable_group_size_idx = -1;
399 memset(ucp_idx, -1, sizeof(ucp_idx));
400 memset(img_idx, -1, sizeof(img_idx));
401
402 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
403
404 nir_builder b;
405 nir_builder_init(&b, impl);
406
407 b.cursor = nir_before_block(nir_start_block(impl));
408 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
409 nir_ssa_def *temp_const_ubo_name = NULL;
410
411 /* Turn system value intrinsics into uniforms */
412 nir_foreach_block(block, impl) {
413 nir_foreach_instr_safe(instr, block) {
414 if (instr->type != nir_instr_type_intrinsic)
415 continue;
416
417 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
418 nir_ssa_def *offset;
419
420 switch (intrin->intrinsic) {
421 case nir_intrinsic_load_constant: {
422 /* This one is special because it reads from the shader constant
423 * data and not cbuf0 which gallium uploads for us.
424 */
425 b.cursor = nir_before_instr(instr);
426 nir_ssa_def *offset =
427 nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
428 nir_intrinsic_base(intrin));
429
430 if (temp_const_ubo_name == NULL)
431 temp_const_ubo_name = nir_imm_int(&b, 0);
432
433 nir_intrinsic_instr *load_ubo =
434 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
435 load_ubo->num_components = intrin->num_components;
436 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
437 load_ubo->src[1] = nir_src_for_ssa(offset);
438 nir_intrinsic_set_align(load_ubo,
439 nir_intrinsic_align_mul(intrin),
440 nir_intrinsic_align_offset(intrin));
441 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
442 intrin->dest.ssa.num_components,
443 intrin->dest.ssa.bit_size,
444 intrin->dest.ssa.name);
445 nir_builder_instr_insert(&b, &load_ubo->instr);
446
447 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
448 nir_src_for_ssa(&load_ubo->dest.ssa));
449 nir_instr_remove(&intrin->instr);
450 continue;
451 }
452 case nir_intrinsic_load_user_clip_plane: {
453 unsigned ucp = nir_intrinsic_ucp_id(intrin);
454
455 if (ucp_idx[ucp] == -1) {
456 ucp_idx[ucp] = num_system_values;
457 num_system_values += 4;
458 }
459
460 for (int i = 0; i < 4; i++) {
461 system_values[ucp_idx[ucp] + i] =
462 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
463 }
464
465 b.cursor = nir_before_instr(instr);
466 offset = nir_imm_int(&b, system_values_start +
467 ucp_idx[ucp] * sizeof(uint32_t));
468 break;
469 }
470 case nir_intrinsic_load_patch_vertices_in:
471 if (patch_vert_idx == -1)
472 patch_vert_idx = num_system_values++;
473
474 system_values[patch_vert_idx] =
475 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
476
477 b.cursor = nir_before_instr(instr);
478 offset = nir_imm_int(&b, system_values_start +
479 patch_vert_idx * sizeof(uint32_t));
480 break;
481 case nir_intrinsic_image_deref_load_param_intel: {
482 assert(devinfo->gen < 9);
483 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
484 nir_variable *var = nir_deref_instr_get_variable(deref);
485
486 if (img_idx[var->data.binding] == -1) {
487 /* GL only allows arrays of arrays of images. */
488 assert(glsl_type_is_image(glsl_without_array(var->type)));
489 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
490
491 for (int i = 0; i < num_images; i++) {
492 const unsigned img = var->data.binding + i;
493
494 img_idx[img] = num_system_values;
495 num_system_values += BRW_IMAGE_PARAM_SIZE;
496
497 uint32_t *img_sv = &system_values[img_idx[img]];
498
499 setup_vec4_image_sysval(
500 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
501 offsetof(struct brw_image_param, offset), 2);
502 setup_vec4_image_sysval(
503 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
504 offsetof(struct brw_image_param, size), 3);
505 setup_vec4_image_sysval(
506 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
507 offsetof(struct brw_image_param, stride), 4);
508 setup_vec4_image_sysval(
509 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
510 offsetof(struct brw_image_param, tiling), 3);
511 setup_vec4_image_sysval(
512 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
513 offsetof(struct brw_image_param, swizzling), 2);
514 }
515 }
516
517 b.cursor = nir_before_instr(instr);
518 offset = nir_iadd(&b,
519 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
520 nir_imm_int(&b, system_values_start +
521 img_idx[var->data.binding] * 4 +
522 nir_intrinsic_base(intrin) * 16));
523 break;
524 }
525 case nir_intrinsic_load_local_group_size: {
526 assert(nir->info.cs.local_size_variable);
527 if (variable_group_size_idx == -1) {
528 variable_group_size_idx = num_system_values;
529 num_system_values += 3;
530 for (int i = 0; i < 3; i++) {
531 system_values[variable_group_size_idx + i] =
532 BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
533 }
534 }
535
536 b.cursor = nir_before_instr(instr);
537 offset = nir_imm_int(&b, system_values_start +
538 variable_group_size_idx * sizeof(uint32_t));
539 break;
540 }
541 case nir_intrinsic_load_kernel_input: {
542 assert(nir_intrinsic_base(intrin) +
543 nir_intrinsic_range(intrin) <= kernel_input_size);
544 b.cursor = nir_before_instr(instr);
545 offset = nir_iadd_imm(&b, intrin->src[0].ssa,
546 nir_intrinsic_base(intrin));
547 break;
548 }
549 default:
550 continue;
551 }
552
553 nir_intrinsic_instr *load =
554 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
555 load->num_components = intrin->dest.ssa.num_components;
556 load->src[0] = nir_src_for_ssa(temp_ubo_name);
557 load->src[1] = nir_src_for_ssa(offset);
558 nir_intrinsic_set_align(load, 4, 0);
559 nir_ssa_dest_init(&load->instr, &load->dest,
560 intrin->dest.ssa.num_components,
561 intrin->dest.ssa.bit_size, NULL);
562 nir_builder_instr_insert(&b, &load->instr);
563 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
564 nir_src_for_ssa(&load->dest.ssa));
565 nir_instr_remove(instr);
566 }
567 }
568
569 nir_validate_shader(nir, "before remapping");
570
571 /* Uniforms are stored in constant buffer 0, the
572 * user-facing UBOs are indexed by one. So if any constant buffer is
573 * needed, the constant buffer 0 will be needed, so account for it.
574 */
575 unsigned num_cbufs = nir->info.num_ubos;
576 if (num_cbufs || nir->num_uniforms)
577 num_cbufs++;
578
579 /* Place the new params in a new cbuf. */
580 if (num_system_values > 0 || kernel_input_size > 0) {
581 unsigned sysval_cbuf_index = num_cbufs;
582 num_cbufs++;
583
584 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
585 num_system_values);
586
587 nir_foreach_block(block, impl) {
588 nir_foreach_instr_safe(instr, block) {
589 if (instr->type != nir_instr_type_intrinsic)
590 continue;
591
592 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
593
594 if (load->intrinsic != nir_intrinsic_load_ubo)
595 continue;
596
597 b.cursor = nir_before_instr(instr);
598
599 assert(load->src[0].is_ssa);
600
601 if (load->src[0].ssa == temp_ubo_name) {
602 nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
603 nir_instr_rewrite_src(instr, &load->src[0],
604 nir_src_for_ssa(imm));
605 }
606 }
607 }
608
609 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
610 nir_opt_constant_folding(nir);
611 } else {
612 ralloc_free(system_values);
613 system_values = NULL;
614 }
615
616 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
617 nir_validate_shader(nir, "after remap");
618
619 /* We don't use params[] but gallium leaves num_uniforms set. We use this
620 * to detect when cbuf0 exists but we don't need it anymore when we get
621 * here. Instead, zero it out so that the back-end doesn't get confused
622 * when nr_params * 4 != num_uniforms != nr_params * 4.
623 */
624 nir->num_uniforms = 0;
625
626 /* Constant loads (if any) need to go at the end of the constant buffers so
627 * we need to know num_cbufs before we can lower to them.
628 */
629 if (temp_const_ubo_name != NULL) {
630 nir_load_const_instr *const_ubo_index =
631 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
632 assert(const_ubo_index->def.bit_size == 32);
633 const_ubo_index->value[0].u32 = num_cbufs;
634 }
635
636 *out_system_values = system_values;
637 *out_num_system_values = num_system_values;
638 *out_num_cbufs = num_cbufs;
639 }
640
641 static const char *surface_group_names[] = {
642 [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
643 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
644 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
645 [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
646 [IRIS_SURFACE_GROUP_UBO] = "ubo",
647 [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
648 [IRIS_SURFACE_GROUP_IMAGE] = "image",
649 };
650
651 static void
652 iris_print_binding_table(FILE *fp, const char *name,
653 const struct iris_binding_table *bt)
654 {
655 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
656
657 uint32_t total = 0;
658 uint32_t compacted = 0;
659
660 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
661 uint32_t size = bt->sizes[i];
662 total += size;
663 if (size)
664 compacted += util_bitcount64(bt->used_mask[i]);
665 }
666
667 if (total == 0) {
668 fprintf(fp, "Binding table for %s is empty\n\n", name);
669 return;
670 }
671
672 if (total != compacted) {
673 fprintf(fp, "Binding table for %s "
674 "(compacted to %u entries from %u entries)\n",
675 name, compacted, total);
676 } else {
677 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
678 }
679
680 uint32_t entry = 0;
681 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
682 uint64_t mask = bt->used_mask[i];
683 while (mask) {
684 int index = u_bit_scan64(&mask);
685 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
686 }
687 }
688 fprintf(fp, "\n");
689 }
690
691 enum {
692 /* Max elements in a surface group. */
693 SURFACE_GROUP_MAX_ELEMENTS = 64,
694 };
695
696 /**
697 * Map a <group, index> pair to a binding table index.
698 *
699 * For example: <UBO, 5> => binding table index 12
700 */
701 uint32_t
702 iris_group_index_to_bti(const struct iris_binding_table *bt,
703 enum iris_surface_group group, uint32_t index)
704 {
705 assert(index < bt->sizes[group]);
706 uint64_t mask = bt->used_mask[group];
707 uint64_t bit = 1ull << index;
708 if (bit & mask) {
709 return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
710 } else {
711 return IRIS_SURFACE_NOT_USED;
712 }
713 }
714
715 /**
716 * Map a binding table index back to a <group, index> pair.
717 *
718 * For example: binding table index 12 => <UBO, 5>
719 */
720 uint32_t
721 iris_bti_to_group_index(const struct iris_binding_table *bt,
722 enum iris_surface_group group, uint32_t bti)
723 {
724 uint64_t used_mask = bt->used_mask[group];
725 assert(bti >= bt->offsets[group]);
726
727 uint32_t c = bti - bt->offsets[group];
728 while (used_mask) {
729 int i = u_bit_scan64(&used_mask);
730 if (c == 0)
731 return i;
732 c--;
733 }
734
735 return IRIS_SURFACE_NOT_USED;
736 }
737
738 static void
739 rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
740 nir_instr *instr, nir_src *src,
741 enum iris_surface_group group)
742 {
743 assert(bt->sizes[group] > 0);
744
745 b->cursor = nir_before_instr(instr);
746 nir_ssa_def *bti;
747 if (nir_src_is_const(*src)) {
748 uint32_t index = nir_src_as_uint(*src);
749 bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
750 src->ssa->bit_size);
751 } else {
752 /* Indirect usage makes all the surfaces of the group to be available,
753 * so we can just add the base.
754 */
755 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
756 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
757 }
758 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
759 }
760
761 static void
762 mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
763 enum iris_surface_group group)
764 {
765 assert(bt->sizes[group] > 0);
766
767 if (nir_src_is_const(*src)) {
768 uint64_t index = nir_src_as_uint(*src);
769 assert(index < bt->sizes[group]);
770 bt->used_mask[group] |= 1ull << index;
771 } else {
772 /* There's an indirect usage, we need all the surfaces. */
773 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
774 }
775 }
776
777 static bool
778 skip_compacting_binding_tables(void)
779 {
780 static int skip = -1;
781 if (skip < 0)
782 skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
783 return skip;
784 }
785
786 /**
787 * Set up the binding table indices and apply to the shader.
788 */
789 static void
790 iris_setup_binding_table(const struct gen_device_info *devinfo,
791 struct nir_shader *nir,
792 struct iris_binding_table *bt,
793 unsigned num_render_targets,
794 unsigned num_system_values,
795 unsigned num_cbufs)
796 {
797 const struct shader_info *info = &nir->info;
798
799 memset(bt, 0, sizeof(*bt));
800
801 /* Set the sizes for each surface group. For some groups, we already know
802 * upfront how many will be used, so mark them.
803 */
804 if (info->stage == MESA_SHADER_FRAGMENT) {
805 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
806 /* All render targets used. */
807 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
808 BITFIELD64_MASK(num_render_targets);
809
810 /* Setup render target read surface group inorder to support non-coherent
811 * framebuffer fetch on Gen8
812 */
813 if (devinfo->gen == 8 && info->outputs_read) {
814 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
815 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
816 BITFIELD64_MASK(num_render_targets);
817 }
818 } else if (info->stage == MESA_SHADER_COMPUTE) {
819 bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
820 }
821
822 bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
823 bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
824
825 bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
826
827 /* Allocate an extra slot in the UBO section for NIR constants.
828 * Binding table compaction will remove it if unnecessary.
829 *
830 * We don't include them in iris_compiled_shader::num_cbufs because
831 * they are uploaded separately from shs->constbuf[], but from a shader
832 * point of view, they're another UBO (at the end of the section).
833 */
834 bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
835
836 bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
837
838 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
839 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
840
841 /* Mark surfaces used for the cases we don't have the information available
842 * upfront.
843 */
844 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
845 nir_foreach_block (block, impl) {
846 nir_foreach_instr (instr, block) {
847 if (instr->type != nir_instr_type_intrinsic)
848 continue;
849
850 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
851 switch (intrin->intrinsic) {
852 case nir_intrinsic_load_num_work_groups:
853 bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
854 break;
855
856 case nir_intrinsic_load_output:
857 if (devinfo->gen == 8) {
858 mark_used_with_src(bt, &intrin->src[0],
859 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
860 }
861 break;
862
863 case nir_intrinsic_image_size:
864 case nir_intrinsic_image_load:
865 case nir_intrinsic_image_store:
866 case nir_intrinsic_image_atomic_add:
867 case nir_intrinsic_image_atomic_imin:
868 case nir_intrinsic_image_atomic_umin:
869 case nir_intrinsic_image_atomic_imax:
870 case nir_intrinsic_image_atomic_umax:
871 case nir_intrinsic_image_atomic_and:
872 case nir_intrinsic_image_atomic_or:
873 case nir_intrinsic_image_atomic_xor:
874 case nir_intrinsic_image_atomic_exchange:
875 case nir_intrinsic_image_atomic_comp_swap:
876 case nir_intrinsic_image_load_raw_intel:
877 case nir_intrinsic_image_store_raw_intel:
878 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
879 break;
880
881 case nir_intrinsic_load_ubo:
882 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
883 break;
884
885 case nir_intrinsic_store_ssbo:
886 mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
887 break;
888
889 case nir_intrinsic_get_buffer_size:
890 case nir_intrinsic_ssbo_atomic_add:
891 case nir_intrinsic_ssbo_atomic_imin:
892 case nir_intrinsic_ssbo_atomic_umin:
893 case nir_intrinsic_ssbo_atomic_imax:
894 case nir_intrinsic_ssbo_atomic_umax:
895 case nir_intrinsic_ssbo_atomic_and:
896 case nir_intrinsic_ssbo_atomic_or:
897 case nir_intrinsic_ssbo_atomic_xor:
898 case nir_intrinsic_ssbo_atomic_exchange:
899 case nir_intrinsic_ssbo_atomic_comp_swap:
900 case nir_intrinsic_ssbo_atomic_fmin:
901 case nir_intrinsic_ssbo_atomic_fmax:
902 case nir_intrinsic_ssbo_atomic_fcomp_swap:
903 case nir_intrinsic_load_ssbo:
904 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
905 break;
906
907 default:
908 break;
909 }
910 }
911 }
912
913 /* When disable we just mark everything as used. */
914 if (unlikely(skip_compacting_binding_tables())) {
915 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
916 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
917 }
918
919 /* Calculate the offsets and the binding table size based on the used
920 * surfaces. After this point, the functions to go between "group indices"
921 * and binding table indices can be used.
922 */
923 uint32_t next = 0;
924 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
925 if (bt->used_mask[i] != 0) {
926 bt->offsets[i] = next;
927 next += util_bitcount64(bt->used_mask[i]);
928 }
929 }
930 bt->size_bytes = next * 4;
931
932 if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
933 iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
934 }
935
936 /* Apply the binding table indices. The backend compiler is not expected
937 * to change those, as we haven't set any of the *_start entries in brw
938 * binding_table.
939 */
940 nir_builder b;
941 nir_builder_init(&b, impl);
942
943 nir_foreach_block (block, impl) {
944 nir_foreach_instr (instr, block) {
945 if (instr->type == nir_instr_type_tex) {
946 nir_tex_instr *tex = nir_instr_as_tex(instr);
947 tex->texture_index =
948 iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
949 tex->texture_index);
950 continue;
951 }
952
953 if (instr->type != nir_instr_type_intrinsic)
954 continue;
955
956 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
957 switch (intrin->intrinsic) {
958 case nir_intrinsic_image_size:
959 case nir_intrinsic_image_load:
960 case nir_intrinsic_image_store:
961 case nir_intrinsic_image_atomic_add:
962 case nir_intrinsic_image_atomic_imin:
963 case nir_intrinsic_image_atomic_umin:
964 case nir_intrinsic_image_atomic_imax:
965 case nir_intrinsic_image_atomic_umax:
966 case nir_intrinsic_image_atomic_and:
967 case nir_intrinsic_image_atomic_or:
968 case nir_intrinsic_image_atomic_xor:
969 case nir_intrinsic_image_atomic_exchange:
970 case nir_intrinsic_image_atomic_comp_swap:
971 case nir_intrinsic_image_load_raw_intel:
972 case nir_intrinsic_image_store_raw_intel:
973 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
974 IRIS_SURFACE_GROUP_IMAGE);
975 break;
976
977 case nir_intrinsic_load_ubo:
978 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
979 IRIS_SURFACE_GROUP_UBO);
980 break;
981
982 case nir_intrinsic_store_ssbo:
983 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
984 IRIS_SURFACE_GROUP_SSBO);
985 break;
986
987 case nir_intrinsic_load_output:
988 if (devinfo->gen == 8) {
989 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
990 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
991 }
992 break;
993
994 case nir_intrinsic_get_buffer_size:
995 case nir_intrinsic_ssbo_atomic_add:
996 case nir_intrinsic_ssbo_atomic_imin:
997 case nir_intrinsic_ssbo_atomic_umin:
998 case nir_intrinsic_ssbo_atomic_imax:
999 case nir_intrinsic_ssbo_atomic_umax:
1000 case nir_intrinsic_ssbo_atomic_and:
1001 case nir_intrinsic_ssbo_atomic_or:
1002 case nir_intrinsic_ssbo_atomic_xor:
1003 case nir_intrinsic_ssbo_atomic_exchange:
1004 case nir_intrinsic_ssbo_atomic_comp_swap:
1005 case nir_intrinsic_ssbo_atomic_fmin:
1006 case nir_intrinsic_ssbo_atomic_fmax:
1007 case nir_intrinsic_ssbo_atomic_fcomp_swap:
1008 case nir_intrinsic_load_ssbo:
1009 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1010 IRIS_SURFACE_GROUP_SSBO);
1011 break;
1012
1013 default:
1014 break;
1015 }
1016 }
1017 }
1018 }
1019
1020 static void
1021 iris_debug_recompile(struct iris_context *ice,
1022 struct shader_info *info,
1023 const struct brw_base_prog_key *key)
1024 {
1025 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
1026 const struct gen_device_info *devinfo = &screen->devinfo;
1027 const struct brw_compiler *c = screen->compiler;
1028
1029 if (!info)
1030 return;
1031
1032 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
1033 _mesa_shader_stage_to_string(info->stage),
1034 info->name ? info->name : "(no identifier)",
1035 info->label ? info->label : "");
1036
1037 const void *old_iris_key =
1038 iris_find_previous_compile(ice, info->stage, key->program_string_id);
1039
1040 union brw_any_prog_key old_key;
1041
1042 switch (info->stage) {
1043 case MESA_SHADER_VERTEX:
1044 old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
1045 break;
1046 case MESA_SHADER_TESS_CTRL:
1047 old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
1048 break;
1049 case MESA_SHADER_TESS_EVAL:
1050 old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
1051 break;
1052 case MESA_SHADER_GEOMETRY:
1053 old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
1054 break;
1055 case MESA_SHADER_FRAGMENT:
1056 old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
1057 break;
1058 case MESA_SHADER_COMPUTE:
1059 old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
1060 break;
1061 default:
1062 unreachable("invalid shader stage");
1063 }
1064
1065 brw_debug_key_recompile(c, &ice->dbg, info->stage, &old_key.base, key);
1066 }
1067
1068 /**
1069 * Get the shader for the last enabled geometry stage.
1070 *
1071 * This stage is the one which will feed stream output and the rasterizer.
1072 */
1073 static gl_shader_stage
1074 last_vue_stage(struct iris_context *ice)
1075 {
1076 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1077 return MESA_SHADER_GEOMETRY;
1078
1079 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1080 return MESA_SHADER_TESS_EVAL;
1081
1082 return MESA_SHADER_VERTEX;
1083 }
1084
1085 /**
1086 * Compile a vertex shader, and upload the assembly.
1087 */
1088 static struct iris_compiled_shader *
1089 iris_compile_vs(struct iris_context *ice,
1090 struct iris_uncompiled_shader *ish,
1091 const struct iris_vs_prog_key *key)
1092 {
1093 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1094 const struct brw_compiler *compiler = screen->compiler;
1095 const struct gen_device_info *devinfo = &screen->devinfo;
1096 void *mem_ctx = ralloc_context(NULL);
1097 struct brw_vs_prog_data *vs_prog_data =
1098 rzalloc(mem_ctx, struct brw_vs_prog_data);
1099 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1100 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1101 enum brw_param_builtin *system_values;
1102 unsigned num_system_values;
1103 unsigned num_cbufs;
1104
1105 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1106
1107 if (key->vue.nr_userclip_plane_consts) {
1108 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1109 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1110 true, false, NULL);
1111 nir_lower_io_to_temporaries(nir, impl, true, false);
1112 nir_lower_global_vars_to_local(nir);
1113 nir_lower_vars_to_ssa(nir);
1114 nir_shader_gather_info(nir, impl);
1115 }
1116
1117 prog_data->use_alt_mode = ish->use_alt_mode;
1118
1119 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1120 &num_system_values, &num_cbufs);
1121
1122 struct iris_binding_table bt;
1123 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1124 num_system_values, num_cbufs);
1125
1126 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1127
1128 brw_compute_vue_map(devinfo,
1129 &vue_prog_data->vue_map, nir->info.outputs_written,
1130 nir->info.separate_shader, /* pos_slots */ 1);
1131
1132 struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
1133
1134 char *error_str = NULL;
1135 const unsigned *program =
1136 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &brw_key, vs_prog_data,
1137 nir, -1, NULL, &error_str);
1138 if (program == NULL) {
1139 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
1140 ralloc_free(mem_ctx);
1141 return false;
1142 }
1143
1144 if (ish->compiled_once) {
1145 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1146 } else {
1147 ish->compiled_once = true;
1148 }
1149
1150 uint32_t *so_decls =
1151 screen->vtbl.create_so_decl_list(&ish->stream_output,
1152 &vue_prog_data->vue_map);
1153
1154 struct iris_compiled_shader *shader =
1155 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
1156 prog_data, so_decls, system_values, num_system_values,
1157 0, num_cbufs, &bt);
1158
1159 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1160
1161 ralloc_free(mem_ctx);
1162 return shader;
1163 }
1164
1165 /**
1166 * Update the current vertex shader variant.
1167 *
1168 * Fill out the key, look in the cache, compile and bind if needed.
1169 */
1170 static void
1171 iris_update_compiled_vs(struct iris_context *ice)
1172 {
1173 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1174 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1175 struct iris_uncompiled_shader *ish =
1176 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1177
1178 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
1179 screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1180
1181 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1182 struct iris_compiled_shader *shader =
1183 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
1184
1185 if (!shader)
1186 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1187
1188 if (!shader)
1189 shader = iris_compile_vs(ice, ish, &key);
1190
1191 if (old != shader) {
1192 ice->shaders.prog[IRIS_CACHE_VS] = shader;
1193 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
1194 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_VS |
1195 IRIS_STAGE_DIRTY_BINDINGS_VS |
1196 IRIS_STAGE_DIRTY_CONSTANTS_VS;
1197 shs->sysvals_need_upload = true;
1198
1199 const struct brw_vs_prog_data *vs_prog_data =
1200 (void *) shader->prog_data;
1201 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1202 vs_prog_data->uses_baseinstance;
1203 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1204 vs_prog_data->uses_is_indexed_draw;
1205 const bool needs_sgvs_element = uses_draw_params ||
1206 vs_prog_data->uses_instanceid ||
1207 vs_prog_data->uses_vertexid;
1208
1209 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1210 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1211 ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
1212 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
1213 IRIS_DIRTY_VERTEX_ELEMENTS;
1214 }
1215 ice->state.vs_uses_draw_params = uses_draw_params;
1216 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1217 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1218 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1219 }
1220 }
1221
1222 /**
1223 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1224 */
1225 const struct shader_info *
1226 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1227 {
1228 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1229
1230 if (!ish)
1231 return NULL;
1232
1233 const nir_shader *nir = ish->nir;
1234 return &nir->info;
1235 }
1236
1237 /**
1238 * Get the union of TCS output and TES input slots.
1239 *
1240 * TCS and TES need to agree on a common URB entry layout. In particular,
1241 * the data for all patch vertices is stored in a single URB entry (unlike
1242 * GS which has one entry per input vertex). This means that per-vertex
1243 * array indexing needs a stride.
1244 *
1245 * SSO requires locations to match, but doesn't require the number of
1246 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1247 * So, we need to take the extra step of unifying these on the fly.
1248 */
1249 static void
1250 get_unified_tess_slots(const struct iris_context *ice,
1251 uint64_t *per_vertex_slots,
1252 uint32_t *per_patch_slots)
1253 {
1254 const struct shader_info *tcs =
1255 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1256 const struct shader_info *tes =
1257 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1258
1259 *per_vertex_slots = tes->inputs_read;
1260 *per_patch_slots = tes->patch_inputs_read;
1261
1262 if (tcs) {
1263 *per_vertex_slots |= tcs->outputs_written;
1264 *per_patch_slots |= tcs->patch_outputs_written;
1265 }
1266 }
1267
1268 /**
1269 * Compile a tessellation control shader, and upload the assembly.
1270 */
1271 static struct iris_compiled_shader *
1272 iris_compile_tcs(struct iris_context *ice,
1273 struct iris_uncompiled_shader *ish,
1274 const struct iris_tcs_prog_key *key)
1275 {
1276 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1277 const struct brw_compiler *compiler = screen->compiler;
1278 const struct nir_shader_compiler_options *options =
1279 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1280 void *mem_ctx = ralloc_context(NULL);
1281 struct brw_tcs_prog_data *tcs_prog_data =
1282 rzalloc(mem_ctx, struct brw_tcs_prog_data);
1283 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1284 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1285 const struct gen_device_info *devinfo = &screen->devinfo;
1286 enum brw_param_builtin *system_values = NULL;
1287 unsigned num_system_values = 0;
1288 unsigned num_cbufs = 0;
1289
1290 nir_shader *nir;
1291
1292 struct iris_binding_table bt;
1293
1294 struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
1295
1296 if (ish) {
1297 nir = nir_shader_clone(mem_ctx, ish->nir);
1298
1299 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1300 &num_system_values, &num_cbufs);
1301 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1302 num_system_values, num_cbufs);
1303 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1304 } else {
1305 nir =
1306 brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
1307
1308 /* Reserve space for passing the default tess levels as constants. */
1309 num_cbufs = 1;
1310 num_system_values = 8;
1311 system_values =
1312 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1313 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1314 prog_data->nr_params = num_system_values;
1315
1316 if (key->tes_primitive_mode == GL_QUADS) {
1317 for (int i = 0; i < 4; i++)
1318 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1319
1320 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1321 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1322 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
1323 for (int i = 0; i < 3; i++)
1324 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1325
1326 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1327 } else {
1328 assert(key->tes_primitive_mode == GL_ISOLINES);
1329 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1330 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1331 }
1332
1333 /* Manually setup the TCS binding table. */
1334 memset(&bt, 0, sizeof(bt));
1335 bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1336 bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1337 bt.size_bytes = 4;
1338
1339 prog_data->ubo_ranges[0].length = 1;
1340 }
1341
1342 char *error_str = NULL;
1343 const unsigned *program =
1344 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, &brw_key, tcs_prog_data,
1345 nir, -1, NULL, &error_str);
1346 if (program == NULL) {
1347 dbg_printf("Failed to compile control shader: %s\n", error_str);
1348 ralloc_free(mem_ctx);
1349 return false;
1350 }
1351
1352 if (ish) {
1353 if (ish->compiled_once) {
1354 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1355 } else {
1356 ish->compiled_once = true;
1357 }
1358 }
1359
1360 struct iris_compiled_shader *shader =
1361 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1362 prog_data, NULL, system_values, num_system_values,
1363 0, num_cbufs, &bt);
1364
1365 if (ish)
1366 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1367
1368 ralloc_free(mem_ctx);
1369 return shader;
1370 }
1371
1372 /**
1373 * Update the current tessellation control shader variant.
1374 *
1375 * Fill out the key, look in the cache, compile and bind if needed.
1376 */
1377 static void
1378 iris_update_compiled_tcs(struct iris_context *ice)
1379 {
1380 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1381 struct iris_uncompiled_shader *tcs =
1382 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1383 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1384 const struct brw_compiler *compiler = screen->compiler;
1385 const struct gen_device_info *devinfo = &screen->devinfo;
1386
1387 const struct shader_info *tes_info =
1388 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1389 struct iris_tcs_prog_key key = {
1390 .vue.base.program_string_id = tcs ? tcs->program_id : 0,
1391 .tes_primitive_mode = tes_info->tess.primitive_mode,
1392 .input_vertices =
1393 !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1394 .quads_workaround = devinfo->gen < 9 &&
1395 tes_info->tess.primitive_mode == GL_QUADS &&
1396 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1397 };
1398 get_unified_tess_slots(ice, &key.outputs_written,
1399 &key.patch_outputs_written);
1400 screen->vtbl.populate_tcs_key(ice, &key);
1401
1402 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1403 struct iris_compiled_shader *shader =
1404 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1405
1406 if (tcs && !shader)
1407 shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1408
1409 if (!shader)
1410 shader = iris_compile_tcs(ice, tcs, &key);
1411
1412 if (old != shader) {
1413 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1414 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TCS |
1415 IRIS_STAGE_DIRTY_BINDINGS_TCS |
1416 IRIS_STAGE_DIRTY_CONSTANTS_TCS;
1417 shs->sysvals_need_upload = true;
1418 }
1419 }
1420
1421 /**
1422 * Compile a tessellation evaluation shader, and upload the assembly.
1423 */
1424 static struct iris_compiled_shader *
1425 iris_compile_tes(struct iris_context *ice,
1426 struct iris_uncompiled_shader *ish,
1427 const struct iris_tes_prog_key *key)
1428 {
1429 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1430 const struct brw_compiler *compiler = screen->compiler;
1431 void *mem_ctx = ralloc_context(NULL);
1432 struct brw_tes_prog_data *tes_prog_data =
1433 rzalloc(mem_ctx, struct brw_tes_prog_data);
1434 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1435 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1436 enum brw_param_builtin *system_values;
1437 const struct gen_device_info *devinfo = &screen->devinfo;
1438 unsigned num_system_values;
1439 unsigned num_cbufs;
1440
1441 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1442
1443 if (key->vue.nr_userclip_plane_consts) {
1444 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1445 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1446 true, false, NULL);
1447 nir_lower_io_to_temporaries(nir, impl, true, false);
1448 nir_lower_global_vars_to_local(nir);
1449 nir_lower_vars_to_ssa(nir);
1450 nir_shader_gather_info(nir, impl);
1451 }
1452
1453 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1454 &num_system_values, &num_cbufs);
1455
1456 struct iris_binding_table bt;
1457 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1458 num_system_values, num_cbufs);
1459
1460 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1461
1462 struct brw_vue_map input_vue_map;
1463 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1464 key->patch_inputs_read);
1465
1466 struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
1467
1468 char *error_str = NULL;
1469 const unsigned *program =
1470 brw_compile_tes(compiler, &ice->dbg, mem_ctx, &brw_key, &input_vue_map,
1471 tes_prog_data, nir, -1, NULL, &error_str);
1472 if (program == NULL) {
1473 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1474 ralloc_free(mem_ctx);
1475 return false;
1476 }
1477
1478 if (ish->compiled_once) {
1479 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1480 } else {
1481 ish->compiled_once = true;
1482 }
1483
1484 uint32_t *so_decls =
1485 screen->vtbl.create_so_decl_list(&ish->stream_output,
1486 &vue_prog_data->vue_map);
1487
1488
1489 struct iris_compiled_shader *shader =
1490 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1491 prog_data, so_decls, system_values, num_system_values,
1492 0, num_cbufs, &bt);
1493
1494 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1495
1496 ralloc_free(mem_ctx);
1497 return shader;
1498 }
1499
1500 /**
1501 * Update the current tessellation evaluation shader variant.
1502 *
1503 * Fill out the key, look in the cache, compile and bind if needed.
1504 */
1505 static void
1506 iris_update_compiled_tes(struct iris_context *ice)
1507 {
1508 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1509 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1510 struct iris_uncompiled_shader *ish =
1511 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1512
1513 struct iris_tes_prog_key key = { KEY_ID(vue.base) };
1514 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1515 screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1516
1517 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1518 struct iris_compiled_shader *shader =
1519 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1520
1521 if (!shader)
1522 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1523
1524 if (!shader)
1525 shader = iris_compile_tes(ice, ish, &key);
1526
1527 if (old != shader) {
1528 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1529 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TES |
1530 IRIS_STAGE_DIRTY_BINDINGS_TES |
1531 IRIS_STAGE_DIRTY_CONSTANTS_TES;
1532 shs->sysvals_need_upload = true;
1533 }
1534
1535 /* TODO: Could compare and avoid flagging this. */
1536 const struct shader_info *tes_info = &ish->nir->info;
1537 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
1538 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TES;
1539 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1540 }
1541 }
1542
1543 /**
1544 * Compile a geometry shader, and upload the assembly.
1545 */
1546 static struct iris_compiled_shader *
1547 iris_compile_gs(struct iris_context *ice,
1548 struct iris_uncompiled_shader *ish,
1549 const struct iris_gs_prog_key *key)
1550 {
1551 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1552 const struct brw_compiler *compiler = screen->compiler;
1553 const struct gen_device_info *devinfo = &screen->devinfo;
1554 void *mem_ctx = ralloc_context(NULL);
1555 struct brw_gs_prog_data *gs_prog_data =
1556 rzalloc(mem_ctx, struct brw_gs_prog_data);
1557 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1558 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1559 enum brw_param_builtin *system_values;
1560 unsigned num_system_values;
1561 unsigned num_cbufs;
1562
1563 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1564
1565 if (key->vue.nr_userclip_plane_consts) {
1566 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1567 nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1568 false, NULL);
1569 nir_lower_io_to_temporaries(nir, impl, true, false);
1570 nir_lower_global_vars_to_local(nir);
1571 nir_lower_vars_to_ssa(nir);
1572 nir_shader_gather_info(nir, impl);
1573 }
1574
1575 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1576 &num_system_values, &num_cbufs);
1577
1578 struct iris_binding_table bt;
1579 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1580 num_system_values, num_cbufs);
1581
1582 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1583
1584 brw_compute_vue_map(devinfo,
1585 &vue_prog_data->vue_map, nir->info.outputs_written,
1586 nir->info.separate_shader, /* pos_slots */ 1);
1587
1588 struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
1589
1590 char *error_str = NULL;
1591 const unsigned *program =
1592 brw_compile_gs(compiler, &ice->dbg, mem_ctx, &brw_key, gs_prog_data,
1593 nir, NULL, -1, NULL, &error_str);
1594 if (program == NULL) {
1595 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1596 ralloc_free(mem_ctx);
1597 return false;
1598 }
1599
1600 if (ish->compiled_once) {
1601 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1602 } else {
1603 ish->compiled_once = true;
1604 }
1605
1606 uint32_t *so_decls =
1607 screen->vtbl.create_so_decl_list(&ish->stream_output,
1608 &vue_prog_data->vue_map);
1609
1610 struct iris_compiled_shader *shader =
1611 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1612 prog_data, so_decls, system_values, num_system_values,
1613 0, num_cbufs, &bt);
1614
1615 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1616
1617 ralloc_free(mem_ctx);
1618 return shader;
1619 }
1620
1621 /**
1622 * Update the current geometry shader variant.
1623 *
1624 * Fill out the key, look in the cache, compile and bind if needed.
1625 */
1626 static void
1627 iris_update_compiled_gs(struct iris_context *ice)
1628 {
1629 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1630 struct iris_uncompiled_shader *ish =
1631 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1632 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1633 struct iris_compiled_shader *shader = NULL;
1634 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1635
1636 if (ish) {
1637 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
1638 screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1639
1640 shader =
1641 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1642
1643 if (!shader)
1644 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1645
1646 if (!shader)
1647 shader = iris_compile_gs(ice, ish, &key);
1648 }
1649
1650 if (old != shader) {
1651 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1652 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
1653 IRIS_STAGE_DIRTY_BINDINGS_GS |
1654 IRIS_STAGE_DIRTY_CONSTANTS_GS;
1655 shs->sysvals_need_upload = true;
1656 }
1657 }
1658
1659 /**
1660 * Compile a fragment (pixel) shader, and upload the assembly.
1661 */
1662 static struct iris_compiled_shader *
1663 iris_compile_fs(struct iris_context *ice,
1664 struct iris_uncompiled_shader *ish,
1665 const struct iris_fs_prog_key *key,
1666 struct brw_vue_map *vue_map)
1667 {
1668 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1669 const struct brw_compiler *compiler = screen->compiler;
1670 void *mem_ctx = ralloc_context(NULL);
1671 struct brw_wm_prog_data *fs_prog_data =
1672 rzalloc(mem_ctx, struct brw_wm_prog_data);
1673 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1674 enum brw_param_builtin *system_values;
1675 const struct gen_device_info *devinfo = &screen->devinfo;
1676 unsigned num_system_values;
1677 unsigned num_cbufs;
1678
1679 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1680
1681 prog_data->use_alt_mode = ish->use_alt_mode;
1682
1683 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, 0, &system_values,
1684 &num_system_values, &num_cbufs);
1685
1686 /* Lower output variables to load_output intrinsics before setting up
1687 * binding tables, so iris_setup_binding_table can map any load_output
1688 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1689 * non-coherent framebuffer fetches.
1690 */
1691 brw_nir_lower_fs_outputs(nir);
1692
1693 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1694 * and do not need a binding table entry with a null surface. Earlier
1695 * generations need an entry for a null surface.
1696 */
1697 int null_rts = devinfo->gen < 11 ? 1 : 0;
1698
1699 struct iris_binding_table bt;
1700 iris_setup_binding_table(devinfo, nir, &bt,
1701 MAX2(key->nr_color_regions, null_rts),
1702 num_system_values, num_cbufs);
1703
1704 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1705
1706 struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
1707
1708 char *error_str = NULL;
1709 const unsigned *program =
1710 brw_compile_fs(compiler, &ice->dbg, mem_ctx, &brw_key, fs_prog_data,
1711 nir, -1, -1, -1, true, false, vue_map,
1712 NULL, &error_str);
1713 if (program == NULL) {
1714 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1715 ralloc_free(mem_ctx);
1716 return false;
1717 }
1718
1719 if (ish->compiled_once) {
1720 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1721 } else {
1722 ish->compiled_once = true;
1723 }
1724
1725 struct iris_compiled_shader *shader =
1726 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1727 prog_data, NULL, system_values, num_system_values,
1728 0, num_cbufs, &bt);
1729
1730 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1731
1732 ralloc_free(mem_ctx);
1733 return shader;
1734 }
1735
1736 /**
1737 * Update the current fragment shader variant.
1738 *
1739 * Fill out the key, look in the cache, compile and bind if needed.
1740 */
1741 static void
1742 iris_update_compiled_fs(struct iris_context *ice)
1743 {
1744 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1745 struct iris_uncompiled_shader *ish =
1746 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1747 struct iris_fs_prog_key key = { KEY_ID(base) };
1748 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1749 screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1750
1751 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1752 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1753
1754 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1755 struct iris_compiled_shader *shader =
1756 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1757
1758 if (!shader)
1759 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1760
1761 if (!shader)
1762 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1763
1764 if (old != shader) {
1765 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1766 // toggles. might be able to avoid flagging SBE too.
1767 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1768 ice->state.dirty |= IRIS_DIRTY_WM |
1769 IRIS_DIRTY_CLIP |
1770 IRIS_DIRTY_SBE;
1771 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS |
1772 IRIS_STAGE_DIRTY_BINDINGS_FS |
1773 IRIS_STAGE_DIRTY_CONSTANTS_FS;
1774 shs->sysvals_need_upload = true;
1775 }
1776 }
1777
1778 /**
1779 * Update the last enabled stage's VUE map.
1780 *
1781 * When the shader feeding the rasterizer's output interface changes, we
1782 * need to re-emit various packets.
1783 */
1784 static void
1785 update_last_vue_map(struct iris_context *ice,
1786 struct brw_stage_prog_data *prog_data)
1787 {
1788 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1789 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1790 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1791 const uint64_t changed_slots =
1792 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1793
1794 if (changed_slots & VARYING_BIT_VIEWPORT) {
1795 ice->state.num_viewports =
1796 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1797 ice->state.dirty |= IRIS_DIRTY_CLIP |
1798 IRIS_DIRTY_SF_CL_VIEWPORT |
1799 IRIS_DIRTY_CC_VIEWPORT |
1800 IRIS_DIRTY_SCISSOR_RECT;
1801 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_FS |
1802 ice->state.stage_dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1803 }
1804
1805 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1806 ice->state.dirty |= IRIS_DIRTY_SBE;
1807 }
1808
1809 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1810 }
1811
1812 static void
1813 iris_update_pull_constant_descriptors(struct iris_context *ice,
1814 gl_shader_stage stage)
1815 {
1816 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1817
1818 if (!shader || !shader->prog_data->has_ubo_pull)
1819 return;
1820
1821 struct iris_shader_state *shs = &ice->state.shaders[stage];
1822 bool any_new_descriptors =
1823 shader->num_system_values > 0 && shs->sysvals_need_upload;
1824
1825 unsigned bound_cbufs = shs->bound_cbufs;
1826
1827 while (bound_cbufs) {
1828 const int i = u_bit_scan(&bound_cbufs);
1829 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1830 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1831 if (!surf_state->res && cbuf->buffer) {
1832 iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
1833 any_new_descriptors = true;
1834 }
1835 }
1836
1837 if (any_new_descriptors)
1838 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
1839 }
1840
1841 /**
1842 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1843 */
1844 static struct brw_vue_prog_data *
1845 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1846 {
1847 if (!ice->shaders.prog[stage])
1848 return NULL;
1849
1850 return (void *) ice->shaders.prog[stage]->prog_data;
1851 }
1852
1853 /**
1854 * Update the current shader variants for the given state.
1855 *
1856 * This should be called on every draw call to ensure that the correct
1857 * shaders are bound. It will also flag any dirty state triggered by
1858 * swapping out those shaders.
1859 */
1860 void
1861 iris_update_compiled_shaders(struct iris_context *ice)
1862 {
1863 const uint64_t dirty = ice->state.dirty;
1864 const uint64_t stage_dirty = ice->state.stage_dirty;
1865
1866 struct brw_vue_prog_data *old_prog_datas[4];
1867 if (!(dirty & IRIS_DIRTY_URB)) {
1868 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1869 old_prog_datas[i] = get_vue_prog_data(ice, i);
1870 }
1871
1872 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
1873 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1874 struct iris_uncompiled_shader *tes =
1875 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1876 if (tes) {
1877 iris_update_compiled_tcs(ice);
1878 iris_update_compiled_tes(ice);
1879 } else {
1880 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1881 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1882 ice->state.stage_dirty |=
1883 IRIS_STAGE_DIRTY_TCS | IRIS_STAGE_DIRTY_TES |
1884 IRIS_STAGE_DIRTY_BINDINGS_TCS | IRIS_STAGE_DIRTY_BINDINGS_TES |
1885 IRIS_STAGE_DIRTY_CONSTANTS_TCS | IRIS_STAGE_DIRTY_CONSTANTS_TES;
1886 }
1887 }
1888
1889 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_VS)
1890 iris_update_compiled_vs(ice);
1891 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_GS)
1892 iris_update_compiled_gs(ice);
1893
1894 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_GS |
1895 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1896 const struct iris_compiled_shader *gs =
1897 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1898 const struct iris_compiled_shader *tes =
1899 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1900
1901 bool points_or_lines = false;
1902
1903 if (gs) {
1904 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1905 points_or_lines =
1906 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1907 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1908 } else if (tes) {
1909 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1910 points_or_lines =
1911 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1912 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1913 }
1914
1915 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1916 /* Outbound to XY Clip enables */
1917 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1918 ice->state.dirty |= IRIS_DIRTY_CLIP;
1919 }
1920 }
1921
1922 gl_shader_stage last_stage = last_vue_stage(ice);
1923 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1924 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1925 update_last_vue_map(ice, shader->prog_data);
1926 if (ice->state.streamout != shader->streamout) {
1927 ice->state.streamout = shader->streamout;
1928 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1929 }
1930
1931 if (ice->state.streamout_active) {
1932 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1933 struct iris_stream_output_target *so =
1934 (void *) ice->state.so_target[i];
1935 if (so)
1936 so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
1937 }
1938 }
1939
1940 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_FS)
1941 iris_update_compiled_fs(ice);
1942
1943 /* Changing shader interfaces may require a URB configuration. */
1944 if (!(dirty & IRIS_DIRTY_URB)) {
1945 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1946 struct brw_vue_prog_data *old = old_prog_datas[i];
1947 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1948 if (!!old != !!new ||
1949 (new && new->urb_entry_size != old->urb_entry_size)) {
1950 ice->state.dirty |= IRIS_DIRTY_URB;
1951 break;
1952 }
1953 }
1954 }
1955
1956 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
1957 if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << i))
1958 iris_update_pull_constant_descriptors(ice, i);
1959 }
1960 }
1961
1962 static struct iris_compiled_shader *
1963 iris_compile_cs(struct iris_context *ice,
1964 struct iris_uncompiled_shader *ish,
1965 const struct iris_cs_prog_key *key)
1966 {
1967 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1968 const struct brw_compiler *compiler = screen->compiler;
1969 void *mem_ctx = ralloc_context(NULL);
1970 struct brw_cs_prog_data *cs_prog_data =
1971 rzalloc(mem_ctx, struct brw_cs_prog_data);
1972 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1973 enum brw_param_builtin *system_values;
1974 const struct gen_device_info *devinfo = &screen->devinfo;
1975 unsigned num_system_values;
1976 unsigned num_cbufs;
1977
1978 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1979
1980 NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);
1981
1982 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data,
1983 ish->kernel_input_size,
1984 &system_values, &num_system_values, &num_cbufs);
1985
1986 struct iris_binding_table bt;
1987 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1988 num_system_values, num_cbufs);
1989
1990 struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
1991
1992 char *error_str = NULL;
1993 const unsigned *program =
1994 brw_compile_cs(compiler, &ice->dbg, mem_ctx, &brw_key, cs_prog_data,
1995 nir, -1, NULL, &error_str);
1996 if (program == NULL) {
1997 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1998 ralloc_free(mem_ctx);
1999 return false;
2000 }
2001
2002 if (ish->compiled_once) {
2003 iris_debug_recompile(ice, &nir->info, &brw_key.base);
2004 } else {
2005 ish->compiled_once = true;
2006 }
2007
2008 struct iris_compiled_shader *shader =
2009 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
2010 prog_data, NULL, system_values, num_system_values,
2011 ish->kernel_input_size, num_cbufs, &bt);
2012
2013 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
2014
2015 ralloc_free(mem_ctx);
2016 return shader;
2017 }
2018
2019 static void
2020 iris_update_compiled_cs(struct iris_context *ice)
2021 {
2022 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2023 struct iris_uncompiled_shader *ish =
2024 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2025
2026 struct iris_cs_prog_key key = { KEY_ID(base) };
2027 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2028 screen->vtbl.populate_cs_key(ice, &key);
2029
2030 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
2031 struct iris_compiled_shader *shader =
2032 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
2033
2034 if (!shader)
2035 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
2036
2037 if (!shader)
2038 shader = iris_compile_cs(ice, ish, &key);
2039
2040 if (old != shader) {
2041 ice->shaders.prog[IRIS_CACHE_CS] = shader;
2042 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CS |
2043 IRIS_STAGE_DIRTY_BINDINGS_CS |
2044 IRIS_STAGE_DIRTY_CONSTANTS_CS;
2045 shs->sysvals_need_upload = true;
2046 }
2047 }
2048
2049 void
2050 iris_update_compiled_compute_shader(struct iris_context *ice)
2051 {
2052 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_CS)
2053 iris_update_compiled_cs(ice);
2054
2055 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS)
2056 iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2057 }
2058
2059 void
2060 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
2061 unsigned threads,
2062 uint32_t *dst)
2063 {
2064 assert(brw_cs_push_const_total_size(cs_prog_data, threads) > 0);
2065 assert(cs_prog_data->push.cross_thread.size == 0);
2066 assert(cs_prog_data->push.per_thread.dwords == 1);
2067 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
2068 for (unsigned t = 0; t < threads; t++)
2069 dst[8 * t] = t;
2070 }
2071
2072 /**
2073 * Allocate scratch BOs as needed for the given per-thread size and stage.
2074 */
2075 struct iris_bo *
2076 iris_get_scratch_space(struct iris_context *ice,
2077 unsigned per_thread_scratch,
2078 gl_shader_stage stage)
2079 {
2080 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2081 struct iris_bufmgr *bufmgr = screen->bufmgr;
2082 const struct gen_device_info *devinfo = &screen->devinfo;
2083
2084 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2085 assert(encoded_size < (1 << 16));
2086
2087 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2088
2089 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2090 *
2091 * "Scratch Space per slice is computed based on 4 sub-slices. SW
2092 * must allocate scratch space enough so that each slice has 4
2093 * slices allowed."
2094 *
2095 * According to the other driver team, this applies to compute shaders
2096 * as well. This is not currently documented at all.
2097 *
2098 * This hack is no longer necessary on Gen11+.
2099 *
2100 * For, Gen11+, scratch space allocation is based on the number of threads
2101 * in the base configuration.
2102 */
2103 unsigned subslice_total = screen->subslice_total;
2104 if (devinfo->gen >= 12)
2105 subslice_total = devinfo->num_subslices[0];
2106 else if (devinfo->gen == 11)
2107 subslice_total = 8;
2108 else if (devinfo->gen < 11)
2109 subslice_total = 4 * devinfo->num_slices;
2110 assert(subslice_total >= screen->subslice_total);
2111
2112 if (!*bop) {
2113 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
2114
2115 if (devinfo->gen >= 12) {
2116 /* Same as ICL below, but with 16 EUs. */
2117 scratch_ids_per_subslice = 16 * 8;
2118 } else if (devinfo->gen == 11) {
2119 /* The MEDIA_VFE_STATE docs say:
2120 *
2121 * "Starting with this configuration, the Maximum Number of
2122 * Threads must be set to (#EU * 8) for GPGPU dispatches.
2123 *
2124 * Although there are only 7 threads per EU in the configuration,
2125 * the FFTID is calculated as if there are 8 threads per EU,
2126 * which in turn requires a larger amount of Scratch Space to be
2127 * allocated by the driver."
2128 */
2129 scratch_ids_per_subslice = 8 * 8;
2130 }
2131
2132 uint32_t max_threads[] = {
2133 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
2134 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
2135 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
2136 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
2137 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
2138 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
2139 };
2140
2141 uint32_t size = per_thread_scratch * max_threads[stage];
2142
2143 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
2144 }
2145
2146 return *bop;
2147 }
2148
2149 /* ------------------------------------------------------------------- */
2150
2151 /**
2152 * The pipe->create_[stage]_state() driver hooks.
2153 *
2154 * Performs basic NIR preprocessing, records any state dependencies, and
2155 * returns an iris_uncompiled_shader as the Gallium CSO.
2156 *
2157 * Actual shader compilation to assembly happens later, at first use.
2158 */
2159 static void *
2160 iris_create_uncompiled_shader(struct pipe_context *ctx,
2161 nir_shader *nir,
2162 const struct pipe_stream_output_info *so_info)
2163 {
2164 struct iris_context *ice = (void *)ctx;
2165 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2166 const struct gen_device_info *devinfo = &screen->devinfo;
2167
2168 struct iris_uncompiled_shader *ish =
2169 calloc(1, sizeof(struct iris_uncompiled_shader));
2170 if (!ish)
2171 return NULL;
2172
2173 NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
2174
2175 brw_preprocess_nir(screen->compiler, nir, NULL);
2176
2177 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo,
2178 &ish->uses_atomic_load_store);
2179 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2180
2181 nir_sweep(nir);
2182
2183 if (nir->constant_data_size > 0) {
2184 unsigned data_offset;
2185 u_upload_data(ice->shaders.uploader, 0, nir->constant_data_size,
2186 32, nir->constant_data, &data_offset, &ish->const_data);
2187
2188 struct pipe_shader_buffer psb = {
2189 .buffer = ish->const_data,
2190 .buffer_offset = data_offset,
2191 .buffer_size = nir->constant_data_size,
2192 };
2193 iris_upload_ubo_ssbo_surf_state(ice, &psb, &ish->const_data_state, false);
2194 }
2195
2196 ish->program_id = get_new_program_id(screen);
2197 ish->nir = nir;
2198 if (so_info) {
2199 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2200 update_so_info(&ish->stream_output, nir->info.outputs_written);
2201 }
2202
2203 /* Save this now before potentially dropping nir->info.name */
2204 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2205 ish->use_alt_mode = true;
2206
2207 if (screen->disk_cache) {
2208 /* Serialize the NIR to a binary blob that we can hash for the disk
2209 * cache. Drop unnecessary information (like variable names)
2210 * so the serialized NIR is smaller, and also to let us detect more
2211 * isomorphic shaders when hashing, increasing cache hits.
2212 */
2213 struct blob blob;
2214 blob_init(&blob);
2215 nir_serialize(&blob, nir, true);
2216 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2217 blob_finish(&blob);
2218 }
2219
2220 return ish;
2221 }
2222
2223 static struct iris_uncompiled_shader *
2224 iris_create_shader_state(struct pipe_context *ctx,
2225 const struct pipe_shader_state *state)
2226 {
2227 struct nir_shader *nir;
2228
2229 if (state->type == PIPE_SHADER_IR_TGSI)
2230 nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2231 else
2232 nir = state->ir.nir;
2233
2234 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
2235 }
2236
2237 static void *
2238 iris_create_vs_state(struct pipe_context *ctx,
2239 const struct pipe_shader_state *state)
2240 {
2241 struct iris_context *ice = (void *) ctx;
2242 struct iris_screen *screen = (void *) ctx->screen;
2243 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2244
2245 /* User clip planes */
2246 if (ish->nir->info.clip_distance_array_size == 0)
2247 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2248
2249 if (screen->precompile) {
2250 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
2251
2252 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2253 iris_compile_vs(ice, ish, &key);
2254 }
2255
2256 return ish;
2257 }
2258
2259 static void *
2260 iris_create_tcs_state(struct pipe_context *ctx,
2261 const struct pipe_shader_state *state)
2262 {
2263 struct iris_context *ice = (void *) ctx;
2264 struct iris_screen *screen = (void *) ctx->screen;
2265 const struct brw_compiler *compiler = screen->compiler;
2266 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2267 struct shader_info *info = &ish->nir->info;
2268
2269 if (screen->precompile) {
2270 const unsigned _GL_TRIANGLES = 0x0004;
2271 struct iris_tcs_prog_key key = {
2272 KEY_ID(vue.base),
2273 // XXX: make sure the linker fills this out from the TES...
2274 .tes_primitive_mode =
2275 info->tess.primitive_mode ? info->tess.primitive_mode
2276 : _GL_TRIANGLES,
2277 .outputs_written = info->outputs_written,
2278 .patch_outputs_written = info->patch_outputs_written,
2279 };
2280
2281 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2282 * We don't have that information, so we randomly guess that the input
2283 * and output patches are the same size. This is a bad guess, but we
2284 * can't do much better.
2285 */
2286 if (compiler->use_tcs_8_patch)
2287 key.input_vertices = info->tess.tcs_vertices_out;
2288
2289 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2290 iris_compile_tcs(ice, ish, &key);
2291 }
2292
2293 return ish;
2294 }
2295
2296 static void *
2297 iris_create_tes_state(struct pipe_context *ctx,
2298 const struct pipe_shader_state *state)
2299 {
2300 struct iris_context *ice = (void *) ctx;
2301 struct iris_screen *screen = (void *) ctx->screen;
2302 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2303 struct shader_info *info = &ish->nir->info;
2304
2305 /* User clip planes */
2306 if (ish->nir->info.clip_distance_array_size == 0)
2307 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2308
2309 if (screen->precompile) {
2310 struct iris_tes_prog_key key = {
2311 KEY_ID(vue.base),
2312 // XXX: not ideal, need TCS output/TES input unification
2313 .inputs_read = info->inputs_read,
2314 .patch_inputs_read = info->patch_inputs_read,
2315 };
2316
2317 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2318 iris_compile_tes(ice, ish, &key);
2319 }
2320
2321 return ish;
2322 }
2323
2324 static void *
2325 iris_create_gs_state(struct pipe_context *ctx,
2326 const struct pipe_shader_state *state)
2327 {
2328 struct iris_context *ice = (void *) ctx;
2329 struct iris_screen *screen = (void *) ctx->screen;
2330 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2331
2332 /* User clip planes */
2333 if (ish->nir->info.clip_distance_array_size == 0)
2334 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2335
2336 if (screen->precompile) {
2337 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
2338
2339 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2340 iris_compile_gs(ice, ish, &key);
2341 }
2342
2343 return ish;
2344 }
2345
2346 static void *
2347 iris_create_fs_state(struct pipe_context *ctx,
2348 const struct pipe_shader_state *state)
2349 {
2350 struct iris_context *ice = (void *) ctx;
2351 struct iris_screen *screen = (void *) ctx->screen;
2352 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2353 struct shader_info *info = &ish->nir->info;
2354
2355 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2356 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2357 (1ull << IRIS_NOS_RASTERIZER) |
2358 (1ull << IRIS_NOS_BLEND);
2359
2360 /* The program key needs the VUE map if there are > 16 inputs */
2361 if (util_bitcount64(ish->nir->info.inputs_read &
2362 BRW_FS_VARYING_INPUT_MASK) > 16) {
2363 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2364 }
2365
2366 if (screen->precompile) {
2367 const uint64_t color_outputs = info->outputs_written &
2368 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2369 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2370 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2371
2372 bool can_rearrange_varyings =
2373 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2374
2375 const struct gen_device_info *devinfo = &screen->devinfo;
2376 struct iris_fs_prog_key key = {
2377 KEY_ID(base),
2378 .nr_color_regions = util_bitcount(color_outputs),
2379 .coherent_fb_fetch = devinfo->gen >= 9,
2380 .input_slots_valid =
2381 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2382 };
2383
2384 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2385 iris_compile_fs(ice, ish, &key, NULL);
2386 }
2387
2388 return ish;
2389 }
2390
2391 static void *
2392 iris_create_compute_state(struct pipe_context *ctx,
2393 const struct pipe_compute_state *state)
2394 {
2395 struct iris_context *ice = (void *) ctx;
2396 struct iris_screen *screen = (void *) ctx->screen;
2397 const nir_shader_compiler_options *options =
2398 screen->compiler->glsl_compiler_options[MESA_SHADER_COMPUTE].NirOptions;
2399
2400 nir_shader *nir;
2401 switch (state->ir_type) {
2402 case PIPE_SHADER_IR_NIR:
2403 nir = (void *)state->prog;
2404 break;
2405
2406 case PIPE_SHADER_IR_NIR_SERIALIZED: {
2407 struct blob_reader reader;
2408 const struct pipe_binary_program_header *hdr = state->prog;
2409 blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
2410 nir = nir_deserialize(NULL, options, &reader);
2411 break;
2412 }
2413
2414 default:
2415 unreachable("Unsupported IR");
2416 }
2417
2418 struct iris_uncompiled_shader *ish =
2419 iris_create_uncompiled_shader(ctx, nir, NULL);
2420 ish->kernel_input_size = state->req_input_mem;
2421
2422 // XXX: disallow more than 64KB of shared variables
2423
2424 if (screen->precompile) {
2425 struct iris_cs_prog_key key = { KEY_ID(base) };
2426
2427 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2428 iris_compile_cs(ice, ish, &key);
2429 }
2430
2431 return ish;
2432 }
2433
2434 /**
2435 * The pipe->delete_[stage]_state() driver hooks.
2436 *
2437 * Frees the iris_uncompiled_shader.
2438 */
2439 static void
2440 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2441 {
2442 struct iris_uncompiled_shader *ish = state;
2443 struct iris_context *ice = (void *) ctx;
2444
2445 if (ice->shaders.uncompiled[stage] == ish) {
2446 ice->shaders.uncompiled[stage] = NULL;
2447 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2448 }
2449
2450 if (ish->const_data) {
2451 pipe_resource_reference(&ish->const_data, NULL);
2452 pipe_resource_reference(&ish->const_data_state.res, NULL);
2453 }
2454
2455 iris_delete_shader_variants(ice, ish);
2456
2457 ralloc_free(ish->nir);
2458 free(ish);
2459 }
2460
2461 static void
2462 iris_delete_vs_state(struct pipe_context *ctx, void *state)
2463 {
2464 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2465 }
2466
2467 static void
2468 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2469 {
2470 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2471 }
2472
2473 static void
2474 iris_delete_tes_state(struct pipe_context *ctx, void *state)
2475 {
2476 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2477 }
2478
2479 static void
2480 iris_delete_gs_state(struct pipe_context *ctx, void *state)
2481 {
2482 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2483 }
2484
2485 static void
2486 iris_delete_fs_state(struct pipe_context *ctx, void *state)
2487 {
2488 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2489 }
2490
2491 static void
2492 iris_delete_cs_state(struct pipe_context *ctx, void *state)
2493 {
2494 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2495 }
2496
2497 /**
2498 * The pipe->bind_[stage]_state() driver hook.
2499 *
2500 * Binds an uncompiled shader as the current one for a particular stage.
2501 * Updates dirty tracking to account for the shader's NOS.
2502 */
2503 static void
2504 bind_shader_state(struct iris_context *ice,
2505 struct iris_uncompiled_shader *ish,
2506 gl_shader_stage stage)
2507 {
2508 uint64_t stage_dirty_bit = IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2509 const uint64_t nos = ish ? ish->nos : 0;
2510
2511 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2512 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2513
2514 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
2515 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
2516 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2517 }
2518
2519 ice->shaders.uncompiled[stage] = ish;
2520 ice->state.stage_dirty |= stage_dirty_bit;
2521
2522 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2523 * (or that they no longer need to do so).
2524 */
2525 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2526 if (nos & (1 << i))
2527 ice->state.stage_dirty_for_nos[i] |= stage_dirty_bit;
2528 else
2529 ice->state.stage_dirty_for_nos[i] &= ~stage_dirty_bit;
2530 }
2531 }
2532
2533 static void
2534 iris_bind_vs_state(struct pipe_context *ctx, void *state)
2535 {
2536 struct iris_context *ice = (struct iris_context *)ctx;
2537 struct iris_uncompiled_shader *new_ish = state;
2538
2539 if (new_ish &&
2540 ice->state.window_space_position !=
2541 new_ish->nir->info.vs.window_space_position) {
2542 ice->state.window_space_position =
2543 new_ish->nir->info.vs.window_space_position;
2544
2545 ice->state.dirty |= IRIS_DIRTY_CLIP |
2546 IRIS_DIRTY_RASTER |
2547 IRIS_DIRTY_CC_VIEWPORT;
2548 }
2549
2550 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2551 }
2552
2553 static void
2554 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2555 {
2556 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2557 }
2558
2559 static void
2560 iris_bind_tes_state(struct pipe_context *ctx, void *state)
2561 {
2562 struct iris_context *ice = (struct iris_context *)ctx;
2563
2564 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2565 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2566 ice->state.dirty |= IRIS_DIRTY_URB;
2567
2568 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2569 }
2570
2571 static void
2572 iris_bind_gs_state(struct pipe_context *ctx, void *state)
2573 {
2574 struct iris_context *ice = (struct iris_context *)ctx;
2575
2576 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2577 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2578 ice->state.dirty |= IRIS_DIRTY_URB;
2579
2580 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2581 }
2582
2583 static void
2584 iris_bind_fs_state(struct pipe_context *ctx, void *state)
2585 {
2586 struct iris_context *ice = (struct iris_context *) ctx;
2587 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2588 const struct gen_device_info *devinfo = &screen->devinfo;
2589 struct iris_uncompiled_shader *old_ish =
2590 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2591 struct iris_uncompiled_shader *new_ish = state;
2592
2593 const unsigned color_bits =
2594 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2595 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2596
2597 /* Fragment shader outputs influence HasWriteableRT */
2598 if (!old_ish || !new_ish ||
2599 (old_ish->nir->info.outputs_written & color_bits) !=
2600 (new_ish->nir->info.outputs_written & color_bits))
2601 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2602
2603 if (devinfo->gen == 8)
2604 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2605
2606 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2607 }
2608
2609 static void
2610 iris_bind_cs_state(struct pipe_context *ctx, void *state)
2611 {
2612 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2613 }
2614
2615 void
2616 iris_init_program_functions(struct pipe_context *ctx)
2617 {
2618 ctx->create_vs_state = iris_create_vs_state;
2619 ctx->create_tcs_state = iris_create_tcs_state;
2620 ctx->create_tes_state = iris_create_tes_state;
2621 ctx->create_gs_state = iris_create_gs_state;
2622 ctx->create_fs_state = iris_create_fs_state;
2623 ctx->create_compute_state = iris_create_compute_state;
2624
2625 ctx->delete_vs_state = iris_delete_vs_state;
2626 ctx->delete_tcs_state = iris_delete_tcs_state;
2627 ctx->delete_tes_state = iris_delete_tes_state;
2628 ctx->delete_gs_state = iris_delete_gs_state;
2629 ctx->delete_fs_state = iris_delete_fs_state;
2630 ctx->delete_compute_state = iris_delete_cs_state;
2631
2632 ctx->bind_vs_state = iris_bind_vs_state;
2633 ctx->bind_tcs_state = iris_bind_tcs_state;
2634 ctx->bind_tes_state = iris_bind_tes_state;
2635 ctx->bind_gs_state = iris_bind_gs_state;
2636 ctx->bind_fs_state = iris_bind_fs_state;
2637 ctx->bind_compute_state = iris_bind_cs_state;
2638 }