iris: Remove the CCS_D fallback
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
48
49 #define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50 #define BRW_KEY_INIT(gen, prog_id) \
51 .base.program_string_id = prog_id, \
52 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54 .base.tex.compressed_multisample_layout_mask = ~0, \
55 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
56
57 static unsigned
58 get_new_program_id(struct iris_screen *screen)
59 {
60 return p_atomic_inc_return(&screen->program_id);
61 }
62
63 static struct brw_vs_prog_key
64 iris_to_brw_vs_key(const struct gen_device_info *devinfo,
65 const struct iris_vs_prog_key *key)
66 {
67 return (struct brw_vs_prog_key) {
68 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
69
70 /* Don't tell the backend about our clip plane constants, we've
71 * already lowered them in NIR and don't want it doing it again.
72 */
73 .nr_userclip_plane_consts = 0,
74 };
75 }
76
77 static struct brw_tcs_prog_key
78 iris_to_brw_tcs_key(const struct gen_device_info *devinfo,
79 const struct iris_tcs_prog_key *key)
80 {
81 return (struct brw_tcs_prog_key) {
82 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
83 .tes_primitive_mode = key->tes_primitive_mode,
84 .input_vertices = key->input_vertices,
85 .patch_outputs_written = key->patch_outputs_written,
86 .outputs_written = key->outputs_written,
87 .quads_workaround = key->quads_workaround,
88 };
89 }
90
91 static struct brw_tes_prog_key
92 iris_to_brw_tes_key(const struct gen_device_info *devinfo,
93 const struct iris_tes_prog_key *key)
94 {
95 return (struct brw_tes_prog_key) {
96 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
97 .patch_inputs_read = key->patch_inputs_read,
98 .inputs_read = key->inputs_read,
99 };
100 }
101
102 static struct brw_gs_prog_key
103 iris_to_brw_gs_key(const struct gen_device_info *devinfo,
104 const struct iris_gs_prog_key *key)
105 {
106 return (struct brw_gs_prog_key) {
107 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
108 };
109 }
110
111 static struct brw_wm_prog_key
112 iris_to_brw_fs_key(const struct gen_device_info *devinfo,
113 const struct iris_fs_prog_key *key)
114 {
115 return (struct brw_wm_prog_key) {
116 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
117 .nr_color_regions = key->nr_color_regions,
118 .flat_shade = key->flat_shade,
119 .alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
120 .alpha_to_coverage = key->alpha_to_coverage,
121 .clamp_fragment_color = key->clamp_fragment_color,
122 .persample_interp = key->persample_interp,
123 .multisample_fbo = key->multisample_fbo,
124 .force_dual_color_blend = key->force_dual_color_blend,
125 .coherent_fb_fetch = key->coherent_fb_fetch,
126 .color_outputs_valid = key->color_outputs_valid,
127 .input_slots_valid = key->input_slots_valid,
128 };
129 }
130
131 static struct brw_cs_prog_key
132 iris_to_brw_cs_key(const struct gen_device_info *devinfo,
133 const struct iris_cs_prog_key *key)
134 {
135 return (struct brw_cs_prog_key) {
136 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
137 };
138 }
139
140 static void *
141 upload_state(struct u_upload_mgr *uploader,
142 struct iris_state_ref *ref,
143 unsigned size,
144 unsigned alignment)
145 {
146 void *p = NULL;
147 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
148 return p;
149 }
150
151 void
152 iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
153 struct pipe_shader_buffer *buf,
154 struct iris_state_ref *surf_state,
155 bool ssbo)
156 {
157 struct pipe_context *ctx = &ice->ctx;
158 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
159
160 void *map =
161 upload_state(ice->state.surface_uploader, surf_state,
162 screen->isl_dev.ss.size, 64);
163 if (!unlikely(map)) {
164 surf_state->res = NULL;
165 return;
166 }
167
168 struct iris_resource *res = (void *) buf->buffer;
169 struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
170 surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
171
172 isl_buffer_fill_state(&screen->isl_dev, map,
173 .address = res->bo->gtt_offset + res->offset +
174 buf->buffer_offset,
175 .size_B = buf->buffer_size - res->offset,
176 .format = ssbo ? ISL_FORMAT_RAW
177 : ISL_FORMAT_R32G32B32A32_FLOAT,
178 .swizzle = ISL_SWIZZLE_IDENTITY,
179 .stride_B = 1,
180 .mocs = iris_mocs(res->bo, &screen->isl_dev));
181 }
182
183 static nir_ssa_def *
184 get_aoa_deref_offset(nir_builder *b,
185 nir_deref_instr *deref,
186 unsigned elem_size)
187 {
188 unsigned array_size = elem_size;
189 nir_ssa_def *offset = nir_imm_int(b, 0);
190
191 while (deref->deref_type != nir_deref_type_var) {
192 assert(deref->deref_type == nir_deref_type_array);
193
194 /* This level's element size is the previous level's array size */
195 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
196 assert(deref->arr.index.ssa);
197 offset = nir_iadd(b, offset,
198 nir_imul(b, index, nir_imm_int(b, array_size)));
199
200 deref = nir_deref_instr_parent(deref);
201 assert(glsl_type_is_array(deref->type));
202 array_size *= glsl_get_length(deref->type);
203 }
204
205 /* Accessing an invalid surface index with the dataport can result in a
206 * hang. According to the spec "if the index used to select an individual
207 * element is negative or greater than or equal to the size of the array,
208 * the results of the operation are undefined but may not lead to
209 * termination" -- which is one of the possible outcomes of the hang.
210 * Clamp the index to prevent access outside of the array bounds.
211 */
212 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
213 }
214
215 static void
216 iris_lower_storage_image_derefs(nir_shader *nir)
217 {
218 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
219
220 nir_builder b;
221 nir_builder_init(&b, impl);
222
223 nir_foreach_block(block, impl) {
224 nir_foreach_instr_safe(instr, block) {
225 if (instr->type != nir_instr_type_intrinsic)
226 continue;
227
228 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
229 switch (intrin->intrinsic) {
230 case nir_intrinsic_image_deref_load:
231 case nir_intrinsic_image_deref_store:
232 case nir_intrinsic_image_deref_atomic_add:
233 case nir_intrinsic_image_deref_atomic_imin:
234 case nir_intrinsic_image_deref_atomic_umin:
235 case nir_intrinsic_image_deref_atomic_imax:
236 case nir_intrinsic_image_deref_atomic_umax:
237 case nir_intrinsic_image_deref_atomic_and:
238 case nir_intrinsic_image_deref_atomic_or:
239 case nir_intrinsic_image_deref_atomic_xor:
240 case nir_intrinsic_image_deref_atomic_exchange:
241 case nir_intrinsic_image_deref_atomic_comp_swap:
242 case nir_intrinsic_image_deref_size:
243 case nir_intrinsic_image_deref_samples:
244 case nir_intrinsic_image_deref_load_raw_intel:
245 case nir_intrinsic_image_deref_store_raw_intel: {
246 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
247 nir_variable *var = nir_deref_instr_get_variable(deref);
248
249 b.cursor = nir_before_instr(&intrin->instr);
250 nir_ssa_def *index =
251 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
252 get_aoa_deref_offset(&b, deref, 1));
253 nir_rewrite_image_intrinsic(intrin, index, false);
254 break;
255 }
256
257 default:
258 break;
259 }
260 }
261 }
262 }
263
264 /**
265 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
266 */
267 static bool
268 iris_fix_edge_flags(nir_shader *nir)
269 {
270 if (nir->info.stage != MESA_SHADER_VERTEX) {
271 nir_shader_preserve_all_metadata(nir);
272 return false;
273 }
274
275 nir_variable *var = NULL;
276 nir_foreach_variable(v, &nir->outputs) {
277 if (v->data.location == VARYING_SLOT_EDGE) {
278 var = v;
279 break;
280 }
281 }
282
283 if (!var) {
284 nir_shader_preserve_all_metadata(nir);
285 return false;
286 }
287
288 exec_node_remove(&var->node);
289 var->data.mode = nir_var_shader_temp;
290 exec_list_push_tail(&nir->globals, &var->node);
291 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
292 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
293 nir_fixup_deref_modes(nir);
294
295 nir_foreach_function(f, nir) {
296 if (f->impl) {
297 nir_metadata_preserve(f->impl, nir_metadata_block_index |
298 nir_metadata_dominance |
299 nir_metadata_live_ssa_defs |
300 nir_metadata_loop_analysis);
301 } else {
302 nir_metadata_preserve(f->impl, nir_metadata_all);
303 }
304 }
305
306 return true;
307 }
308
309 /**
310 * Fix an uncompiled shader's stream output info.
311 *
312 * Core Gallium stores output->register_index as a "slot" number, where
313 * slots are assigned consecutively to all outputs in info->outputs_written.
314 * This naive packing of outputs doesn't work for us - we too have slots,
315 * but the layout is defined by the VUE map, which we won't have until we
316 * compile a specific shader variant. So, we remap these and simply store
317 * VARYING_SLOT_* in our copy's output->register_index fields.
318 *
319 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
320 * components of our VUE header. See brw_vue_map.c for the layout.
321 */
322 static void
323 update_so_info(struct pipe_stream_output_info *so_info,
324 uint64_t outputs_written)
325 {
326 uint8_t reverse_map[64] = {};
327 unsigned slot = 0;
328 while (outputs_written) {
329 reverse_map[slot++] = u_bit_scan64(&outputs_written);
330 }
331
332 for (unsigned i = 0; i < so_info->num_outputs; i++) {
333 struct pipe_stream_output *output = &so_info->output[i];
334
335 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
336 output->register_index = reverse_map[output->register_index];
337
338 /* The VUE header contains three scalar fields packed together:
339 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
340 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
341 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
342 */
343 switch (output->register_index) {
344 case VARYING_SLOT_LAYER:
345 assert(output->num_components == 1);
346 output->register_index = VARYING_SLOT_PSIZ;
347 output->start_component = 1;
348 break;
349 case VARYING_SLOT_VIEWPORT:
350 assert(output->num_components == 1);
351 output->register_index = VARYING_SLOT_PSIZ;
352 output->start_component = 2;
353 break;
354 case VARYING_SLOT_PSIZ:
355 assert(output->num_components == 1);
356 output->start_component = 3;
357 break;
358 }
359
360 //info->outputs_written |= 1ull << output->register_index;
361 }
362 }
363
364 static void
365 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
366 unsigned offset, unsigned n)
367 {
368 assert(offset % sizeof(uint32_t) == 0);
369
370 for (unsigned i = 0; i < n; ++i)
371 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
372
373 for (unsigned i = n; i < 4; ++i)
374 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
375 }
376
377 /**
378 * Associate NIR uniform variables with the prog_data->param[] mechanism
379 * used by the backend. Also, decide which UBOs we'd like to push in an
380 * ideal situation (though the backend can reduce this).
381 */
382 static void
383 iris_setup_uniforms(const struct brw_compiler *compiler,
384 void *mem_ctx,
385 nir_shader *nir,
386 struct brw_stage_prog_data *prog_data,
387 enum brw_param_builtin **out_system_values,
388 unsigned *out_num_system_values,
389 unsigned *out_num_cbufs)
390 {
391 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
392
393 const unsigned IRIS_MAX_SYSTEM_VALUES =
394 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
395 enum brw_param_builtin *system_values =
396 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
397 unsigned num_system_values = 0;
398
399 unsigned patch_vert_idx = -1;
400 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
401 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
402 unsigned variable_group_size_idx = -1;
403 memset(ucp_idx, -1, sizeof(ucp_idx));
404 memset(img_idx, -1, sizeof(img_idx));
405
406 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
407
408 nir_builder b;
409 nir_builder_init(&b, impl);
410
411 b.cursor = nir_before_block(nir_start_block(impl));
412 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
413 nir_ssa_def *temp_const_ubo_name = NULL;
414
415 /* Turn system value intrinsics into uniforms */
416 nir_foreach_block(block, impl) {
417 nir_foreach_instr_safe(instr, block) {
418 if (instr->type != nir_instr_type_intrinsic)
419 continue;
420
421 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
422 nir_ssa_def *offset;
423
424 switch (intrin->intrinsic) {
425 case nir_intrinsic_load_constant: {
426 /* This one is special because it reads from the shader constant
427 * data and not cbuf0 which gallium uploads for us.
428 */
429 b.cursor = nir_before_instr(instr);
430 nir_ssa_def *offset =
431 nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
432 nir_intrinsic_base(intrin));
433
434 if (temp_const_ubo_name == NULL)
435 temp_const_ubo_name = nir_imm_int(&b, 0);
436
437 nir_intrinsic_instr *load_ubo =
438 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
439 load_ubo->num_components = intrin->num_components;
440 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
441 load_ubo->src[1] = nir_src_for_ssa(offset);
442 nir_intrinsic_set_align(load_ubo,
443 nir_intrinsic_align_mul(intrin),
444 nir_intrinsic_align_offset(intrin));
445 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
446 intrin->dest.ssa.num_components,
447 intrin->dest.ssa.bit_size,
448 intrin->dest.ssa.name);
449 nir_builder_instr_insert(&b, &load_ubo->instr);
450
451 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
452 nir_src_for_ssa(&load_ubo->dest.ssa));
453 nir_instr_remove(&intrin->instr);
454 continue;
455 }
456 case nir_intrinsic_load_user_clip_plane: {
457 unsigned ucp = nir_intrinsic_ucp_id(intrin);
458
459 if (ucp_idx[ucp] == -1) {
460 ucp_idx[ucp] = num_system_values;
461 num_system_values += 4;
462 }
463
464 for (int i = 0; i < 4; i++) {
465 system_values[ucp_idx[ucp] + i] =
466 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
467 }
468
469 b.cursor = nir_before_instr(instr);
470 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
471 break;
472 }
473 case nir_intrinsic_load_patch_vertices_in:
474 if (patch_vert_idx == -1)
475 patch_vert_idx = num_system_values++;
476
477 system_values[patch_vert_idx] =
478 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
479
480 b.cursor = nir_before_instr(instr);
481 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
482 break;
483 case nir_intrinsic_image_deref_load_param_intel: {
484 assert(devinfo->gen < 9);
485 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
486 nir_variable *var = nir_deref_instr_get_variable(deref);
487
488 if (img_idx[var->data.binding] == -1) {
489 /* GL only allows arrays of arrays of images. */
490 assert(glsl_type_is_image(glsl_without_array(var->type)));
491 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
492
493 for (int i = 0; i < num_images; i++) {
494 const unsigned img = var->data.binding + i;
495
496 img_idx[img] = num_system_values;
497 num_system_values += BRW_IMAGE_PARAM_SIZE;
498
499 uint32_t *img_sv = &system_values[img_idx[img]];
500
501 setup_vec4_image_sysval(
502 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
503 offsetof(struct brw_image_param, offset), 2);
504 setup_vec4_image_sysval(
505 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
506 offsetof(struct brw_image_param, size), 3);
507 setup_vec4_image_sysval(
508 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
509 offsetof(struct brw_image_param, stride), 4);
510 setup_vec4_image_sysval(
511 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
512 offsetof(struct brw_image_param, tiling), 3);
513 setup_vec4_image_sysval(
514 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
515 offsetof(struct brw_image_param, swizzling), 2);
516 }
517 }
518
519 b.cursor = nir_before_instr(instr);
520 offset = nir_iadd(&b,
521 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
522 nir_imm_int(&b, img_idx[var->data.binding] * 4 +
523 nir_intrinsic_base(intrin) * 16));
524 break;
525 }
526 case nir_intrinsic_load_local_group_size: {
527 assert(nir->info.cs.local_size_variable);
528 if (variable_group_size_idx == -1) {
529 variable_group_size_idx = num_system_values;
530 num_system_values += 3;
531 for (int i = 0; i < 3; i++) {
532 system_values[variable_group_size_idx + i] =
533 BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
534 }
535 }
536
537 b.cursor = nir_before_instr(instr);
538 offset = nir_imm_int(&b, variable_group_size_idx * sizeof(uint32_t));
539 break;
540 }
541 default:
542 continue;
543 }
544
545 unsigned comps = nir_intrinsic_dest_components(intrin);
546
547 nir_intrinsic_instr *load =
548 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
549 load->num_components = comps;
550 load->src[0] = nir_src_for_ssa(temp_ubo_name);
551 load->src[1] = nir_src_for_ssa(offset);
552 nir_intrinsic_set_align(load, 4, 0);
553 nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
554 nir_builder_instr_insert(&b, &load->instr);
555 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
556 nir_src_for_ssa(&load->dest.ssa));
557 nir_instr_remove(instr);
558 }
559 }
560
561 nir_validate_shader(nir, "before remapping");
562
563 /* Uniforms are stored in constant buffer 0, the
564 * user-facing UBOs are indexed by one. So if any constant buffer is
565 * needed, the constant buffer 0 will be needed, so account for it.
566 */
567 unsigned num_cbufs = nir->info.num_ubos;
568 if (num_cbufs || nir->num_uniforms)
569 num_cbufs++;
570
571 /* Place the new params in a new cbuf. */
572 if (num_system_values > 0) {
573 unsigned sysval_cbuf_index = num_cbufs;
574 num_cbufs++;
575
576 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
577 num_system_values);
578
579 nir_foreach_block(block, impl) {
580 nir_foreach_instr_safe(instr, block) {
581 if (instr->type != nir_instr_type_intrinsic)
582 continue;
583
584 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
585
586 if (load->intrinsic != nir_intrinsic_load_ubo)
587 continue;
588
589 b.cursor = nir_before_instr(instr);
590
591 assert(load->src[0].is_ssa);
592
593 if (load->src[0].ssa == temp_ubo_name) {
594 nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
595 nir_instr_rewrite_src(instr, &load->src[0],
596 nir_src_for_ssa(imm));
597 }
598 }
599 }
600
601 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
602 nir_opt_constant_folding(nir);
603 } else {
604 ralloc_free(system_values);
605 system_values = NULL;
606 }
607
608 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
609 nir_validate_shader(nir, "after remap");
610
611 /* We don't use params[] but gallium leaves num_uniforms set. We use this
612 * to detect when cbuf0 exists but we don't need it anymore when we get
613 * here. Instead, zero it out so that the back-end doesn't get confused
614 * when nr_params * 4 != num_uniforms != nr_params * 4.
615 */
616 nir->num_uniforms = 0;
617
618 /* Constant loads (if any) need to go at the end of the constant buffers so
619 * we need to know num_cbufs before we can lower to them.
620 */
621 if (temp_const_ubo_name != NULL) {
622 nir_load_const_instr *const_ubo_index =
623 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
624 assert(const_ubo_index->def.bit_size == 32);
625 const_ubo_index->value[0].u32 = num_cbufs;
626 }
627
628 *out_system_values = system_values;
629 *out_num_system_values = num_system_values;
630 *out_num_cbufs = num_cbufs;
631 }
632
633 static const char *surface_group_names[] = {
634 [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
635 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
636 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
637 [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
638 [IRIS_SURFACE_GROUP_UBO] = "ubo",
639 [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
640 [IRIS_SURFACE_GROUP_IMAGE] = "image",
641 };
642
643 static void
644 iris_print_binding_table(FILE *fp, const char *name,
645 const struct iris_binding_table *bt)
646 {
647 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
648
649 uint32_t total = 0;
650 uint32_t compacted = 0;
651
652 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
653 uint32_t size = bt->sizes[i];
654 total += size;
655 if (size)
656 compacted += util_bitcount64(bt->used_mask[i]);
657 }
658
659 if (total == 0) {
660 fprintf(fp, "Binding table for %s is empty\n\n", name);
661 return;
662 }
663
664 if (total != compacted) {
665 fprintf(fp, "Binding table for %s "
666 "(compacted to %u entries from %u entries)\n",
667 name, compacted, total);
668 } else {
669 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
670 }
671
672 uint32_t entry = 0;
673 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
674 uint64_t mask = bt->used_mask[i];
675 while (mask) {
676 int index = u_bit_scan64(&mask);
677 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
678 }
679 }
680 fprintf(fp, "\n");
681 }
682
683 enum {
684 /* Max elements in a surface group. */
685 SURFACE_GROUP_MAX_ELEMENTS = 64,
686 };
687
688 /**
689 * Map a <group, index> pair to a binding table index.
690 *
691 * For example: <UBO, 5> => binding table index 12
692 */
693 uint32_t
694 iris_group_index_to_bti(const struct iris_binding_table *bt,
695 enum iris_surface_group group, uint32_t index)
696 {
697 assert(index < bt->sizes[group]);
698 uint64_t mask = bt->used_mask[group];
699 uint64_t bit = 1ull << index;
700 if (bit & mask) {
701 return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
702 } else {
703 return IRIS_SURFACE_NOT_USED;
704 }
705 }
706
707 /**
708 * Map a binding table index back to a <group, index> pair.
709 *
710 * For example: binding table index 12 => <UBO, 5>
711 */
712 uint32_t
713 iris_bti_to_group_index(const struct iris_binding_table *bt,
714 enum iris_surface_group group, uint32_t bti)
715 {
716 uint64_t used_mask = bt->used_mask[group];
717 assert(bti >= bt->offsets[group]);
718
719 uint32_t c = bti - bt->offsets[group];
720 while (used_mask) {
721 int i = u_bit_scan64(&used_mask);
722 if (c == 0)
723 return i;
724 c--;
725 }
726
727 return IRIS_SURFACE_NOT_USED;
728 }
729
730 static void
731 rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
732 nir_instr *instr, nir_src *src,
733 enum iris_surface_group group)
734 {
735 assert(bt->sizes[group] > 0);
736
737 b->cursor = nir_before_instr(instr);
738 nir_ssa_def *bti;
739 if (nir_src_is_const(*src)) {
740 uint32_t index = nir_src_as_uint(*src);
741 bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
742 src->ssa->bit_size);
743 } else {
744 /* Indirect usage makes all the surfaces of the group to be available,
745 * so we can just add the base.
746 */
747 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
748 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
749 }
750 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
751 }
752
753 static void
754 mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
755 enum iris_surface_group group)
756 {
757 assert(bt->sizes[group] > 0);
758
759 if (nir_src_is_const(*src)) {
760 uint64_t index = nir_src_as_uint(*src);
761 assert(index < bt->sizes[group]);
762 bt->used_mask[group] |= 1ull << index;
763 } else {
764 /* There's an indirect usage, we need all the surfaces. */
765 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
766 }
767 }
768
769 static bool
770 skip_compacting_binding_tables(void)
771 {
772 static int skip = -1;
773 if (skip < 0)
774 skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
775 return skip;
776 }
777
778 /**
779 * Set up the binding table indices and apply to the shader.
780 */
781 static void
782 iris_setup_binding_table(const struct gen_device_info *devinfo,
783 struct nir_shader *nir,
784 struct iris_binding_table *bt,
785 unsigned num_render_targets,
786 unsigned num_system_values,
787 unsigned num_cbufs)
788 {
789 const struct shader_info *info = &nir->info;
790
791 memset(bt, 0, sizeof(*bt));
792
793 /* Set the sizes for each surface group. For some groups, we already know
794 * upfront how many will be used, so mark them.
795 */
796 if (info->stage == MESA_SHADER_FRAGMENT) {
797 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
798 /* All render targets used. */
799 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
800 BITFIELD64_MASK(num_render_targets);
801
802 /* Setup render target read surface group inorder to support non-coherent
803 * framebuffer fetch on Gen8
804 */
805 if (devinfo->gen == 8 && info->outputs_read) {
806 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
807 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
808 BITFIELD64_MASK(num_render_targets);
809 }
810 } else if (info->stage == MESA_SHADER_COMPUTE) {
811 bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
812 }
813
814 bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
815 bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
816
817 bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
818
819 /* Allocate an extra slot in the UBO section for NIR constants.
820 * Binding table compaction will remove it if unnecessary.
821 *
822 * We don't include them in iris_compiled_shader::num_cbufs because
823 * they are uploaded separately from shs->constbuf[], but from a shader
824 * point of view, they're another UBO (at the end of the section).
825 */
826 bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
827
828 bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
829
830 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
831 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
832
833 /* Mark surfaces used for the cases we don't have the information available
834 * upfront.
835 */
836 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
837 nir_foreach_block (block, impl) {
838 nir_foreach_instr (instr, block) {
839 if (instr->type != nir_instr_type_intrinsic)
840 continue;
841
842 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
843 switch (intrin->intrinsic) {
844 case nir_intrinsic_load_num_work_groups:
845 bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
846 break;
847
848 case nir_intrinsic_load_output:
849 if (devinfo->gen == 8) {
850 mark_used_with_src(bt, &intrin->src[0],
851 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
852 }
853 break;
854
855 case nir_intrinsic_image_size:
856 case nir_intrinsic_image_load:
857 case nir_intrinsic_image_store:
858 case nir_intrinsic_image_atomic_add:
859 case nir_intrinsic_image_atomic_imin:
860 case nir_intrinsic_image_atomic_umin:
861 case nir_intrinsic_image_atomic_imax:
862 case nir_intrinsic_image_atomic_umax:
863 case nir_intrinsic_image_atomic_and:
864 case nir_intrinsic_image_atomic_or:
865 case nir_intrinsic_image_atomic_xor:
866 case nir_intrinsic_image_atomic_exchange:
867 case nir_intrinsic_image_atomic_comp_swap:
868 case nir_intrinsic_image_load_raw_intel:
869 case nir_intrinsic_image_store_raw_intel:
870 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
871 break;
872
873 case nir_intrinsic_load_ubo:
874 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
875 break;
876
877 case nir_intrinsic_store_ssbo:
878 mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
879 break;
880
881 case nir_intrinsic_get_buffer_size:
882 case nir_intrinsic_ssbo_atomic_add:
883 case nir_intrinsic_ssbo_atomic_imin:
884 case nir_intrinsic_ssbo_atomic_umin:
885 case nir_intrinsic_ssbo_atomic_imax:
886 case nir_intrinsic_ssbo_atomic_umax:
887 case nir_intrinsic_ssbo_atomic_and:
888 case nir_intrinsic_ssbo_atomic_or:
889 case nir_intrinsic_ssbo_atomic_xor:
890 case nir_intrinsic_ssbo_atomic_exchange:
891 case nir_intrinsic_ssbo_atomic_comp_swap:
892 case nir_intrinsic_ssbo_atomic_fmin:
893 case nir_intrinsic_ssbo_atomic_fmax:
894 case nir_intrinsic_ssbo_atomic_fcomp_swap:
895 case nir_intrinsic_load_ssbo:
896 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
897 break;
898
899 default:
900 break;
901 }
902 }
903 }
904
905 /* When disable we just mark everything as used. */
906 if (unlikely(skip_compacting_binding_tables())) {
907 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
908 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
909 }
910
911 /* Calculate the offsets and the binding table size based on the used
912 * surfaces. After this point, the functions to go between "group indices"
913 * and binding table indices can be used.
914 */
915 uint32_t next = 0;
916 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
917 if (bt->used_mask[i] != 0) {
918 bt->offsets[i] = next;
919 next += util_bitcount64(bt->used_mask[i]);
920 }
921 }
922 bt->size_bytes = next * 4;
923
924 if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
925 iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
926 }
927
928 /* Apply the binding table indices. The backend compiler is not expected
929 * to change those, as we haven't set any of the *_start entries in brw
930 * binding_table.
931 */
932 nir_builder b;
933 nir_builder_init(&b, impl);
934
935 nir_foreach_block (block, impl) {
936 nir_foreach_instr (instr, block) {
937 if (instr->type == nir_instr_type_tex) {
938 nir_tex_instr *tex = nir_instr_as_tex(instr);
939 tex->texture_index =
940 iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
941 tex->texture_index);
942 continue;
943 }
944
945 if (instr->type != nir_instr_type_intrinsic)
946 continue;
947
948 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
949 switch (intrin->intrinsic) {
950 case nir_intrinsic_image_size:
951 case nir_intrinsic_image_load:
952 case nir_intrinsic_image_store:
953 case nir_intrinsic_image_atomic_add:
954 case nir_intrinsic_image_atomic_imin:
955 case nir_intrinsic_image_atomic_umin:
956 case nir_intrinsic_image_atomic_imax:
957 case nir_intrinsic_image_atomic_umax:
958 case nir_intrinsic_image_atomic_and:
959 case nir_intrinsic_image_atomic_or:
960 case nir_intrinsic_image_atomic_xor:
961 case nir_intrinsic_image_atomic_exchange:
962 case nir_intrinsic_image_atomic_comp_swap:
963 case nir_intrinsic_image_load_raw_intel:
964 case nir_intrinsic_image_store_raw_intel:
965 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
966 IRIS_SURFACE_GROUP_IMAGE);
967 break;
968
969 case nir_intrinsic_load_ubo:
970 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
971 IRIS_SURFACE_GROUP_UBO);
972 break;
973
974 case nir_intrinsic_store_ssbo:
975 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
976 IRIS_SURFACE_GROUP_SSBO);
977 break;
978
979 case nir_intrinsic_load_output:
980 if (devinfo->gen == 8) {
981 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
982 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
983 }
984 break;
985
986 case nir_intrinsic_get_buffer_size:
987 case nir_intrinsic_ssbo_atomic_add:
988 case nir_intrinsic_ssbo_atomic_imin:
989 case nir_intrinsic_ssbo_atomic_umin:
990 case nir_intrinsic_ssbo_atomic_imax:
991 case nir_intrinsic_ssbo_atomic_umax:
992 case nir_intrinsic_ssbo_atomic_and:
993 case nir_intrinsic_ssbo_atomic_or:
994 case nir_intrinsic_ssbo_atomic_xor:
995 case nir_intrinsic_ssbo_atomic_exchange:
996 case nir_intrinsic_ssbo_atomic_comp_swap:
997 case nir_intrinsic_ssbo_atomic_fmin:
998 case nir_intrinsic_ssbo_atomic_fmax:
999 case nir_intrinsic_ssbo_atomic_fcomp_swap:
1000 case nir_intrinsic_load_ssbo:
1001 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1002 IRIS_SURFACE_GROUP_SSBO);
1003 break;
1004
1005 default:
1006 break;
1007 }
1008 }
1009 }
1010 }
1011
1012 static void
1013 iris_debug_recompile(struct iris_context *ice,
1014 struct shader_info *info,
1015 const struct brw_base_prog_key *key)
1016 {
1017 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
1018 const struct gen_device_info *devinfo = &screen->devinfo;
1019 const struct brw_compiler *c = screen->compiler;
1020
1021 if (!info)
1022 return;
1023
1024 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
1025 _mesa_shader_stage_to_string(info->stage),
1026 info->name ? info->name : "(no identifier)",
1027 info->label ? info->label : "");
1028
1029 const void *old_iris_key =
1030 iris_find_previous_compile(ice, info->stage, key->program_string_id);
1031
1032 union brw_any_prog_key old_key;
1033
1034 switch (info->stage) {
1035 case MESA_SHADER_VERTEX:
1036 old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
1037 break;
1038 case MESA_SHADER_TESS_CTRL:
1039 old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
1040 break;
1041 case MESA_SHADER_TESS_EVAL:
1042 old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
1043 break;
1044 case MESA_SHADER_GEOMETRY:
1045 old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
1046 break;
1047 case MESA_SHADER_FRAGMENT:
1048 old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
1049 break;
1050 case MESA_SHADER_COMPUTE:
1051 old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
1052 break;
1053 default:
1054 unreachable("invalid shader stage");
1055 }
1056
1057 brw_debug_key_recompile(c, &ice->dbg, info->stage, &old_key.base, key);
1058 }
1059
1060 /**
1061 * Get the shader for the last enabled geometry stage.
1062 *
1063 * This stage is the one which will feed stream output and the rasterizer.
1064 */
1065 static gl_shader_stage
1066 last_vue_stage(struct iris_context *ice)
1067 {
1068 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1069 return MESA_SHADER_GEOMETRY;
1070
1071 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1072 return MESA_SHADER_TESS_EVAL;
1073
1074 return MESA_SHADER_VERTEX;
1075 }
1076
1077 /**
1078 * Compile a vertex shader, and upload the assembly.
1079 */
1080 static struct iris_compiled_shader *
1081 iris_compile_vs(struct iris_context *ice,
1082 struct iris_uncompiled_shader *ish,
1083 const struct iris_vs_prog_key *key)
1084 {
1085 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1086 const struct brw_compiler *compiler = screen->compiler;
1087 const struct gen_device_info *devinfo = &screen->devinfo;
1088 void *mem_ctx = ralloc_context(NULL);
1089 struct brw_vs_prog_data *vs_prog_data =
1090 rzalloc(mem_ctx, struct brw_vs_prog_data);
1091 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1092 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1093 enum brw_param_builtin *system_values;
1094 unsigned num_system_values;
1095 unsigned num_cbufs;
1096
1097 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1098
1099 if (key->vue.nr_userclip_plane_consts) {
1100 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1101 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1102 true, false, NULL);
1103 nir_lower_io_to_temporaries(nir, impl, true, false);
1104 nir_lower_global_vars_to_local(nir);
1105 nir_lower_vars_to_ssa(nir);
1106 nir_shader_gather_info(nir, impl);
1107 }
1108
1109 prog_data->use_alt_mode = ish->use_alt_mode;
1110
1111 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1112 &num_system_values, &num_cbufs);
1113
1114 struct iris_binding_table bt;
1115 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1116 num_system_values, num_cbufs);
1117
1118 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1119
1120 brw_compute_vue_map(devinfo,
1121 &vue_prog_data->vue_map, nir->info.outputs_written,
1122 nir->info.separate_shader, /* pos_slots */ 1);
1123
1124 struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
1125
1126 char *error_str = NULL;
1127 const unsigned *program =
1128 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &brw_key, vs_prog_data,
1129 nir, -1, NULL, &error_str);
1130 if (program == NULL) {
1131 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
1132 ralloc_free(mem_ctx);
1133 return false;
1134 }
1135
1136 if (ish->compiled_once) {
1137 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1138 } else {
1139 ish->compiled_once = true;
1140 }
1141
1142 uint32_t *so_decls =
1143 screen->vtbl.create_so_decl_list(&ish->stream_output,
1144 &vue_prog_data->vue_map);
1145
1146 struct iris_compiled_shader *shader =
1147 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
1148 prog_data, so_decls, system_values, num_system_values,
1149 num_cbufs, &bt);
1150
1151 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1152
1153 ralloc_free(mem_ctx);
1154 return shader;
1155 }
1156
1157 /**
1158 * Update the current vertex shader variant.
1159 *
1160 * Fill out the key, look in the cache, compile and bind if needed.
1161 */
1162 static void
1163 iris_update_compiled_vs(struct iris_context *ice)
1164 {
1165 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1166 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1167 struct iris_uncompiled_shader *ish =
1168 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1169
1170 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
1171 screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1172
1173 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1174 struct iris_compiled_shader *shader =
1175 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
1176
1177 if (!shader)
1178 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1179
1180 if (!shader)
1181 shader = iris_compile_vs(ice, ish, &key);
1182
1183 if (old != shader) {
1184 ice->shaders.prog[IRIS_CACHE_VS] = shader;
1185 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
1186 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_VS |
1187 IRIS_STAGE_DIRTY_BINDINGS_VS |
1188 IRIS_STAGE_DIRTY_CONSTANTS_VS;
1189 shs->sysvals_need_upload = true;
1190
1191 const struct brw_vs_prog_data *vs_prog_data =
1192 (void *) shader->prog_data;
1193 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1194 vs_prog_data->uses_baseinstance;
1195 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1196 vs_prog_data->uses_is_indexed_draw;
1197 const bool needs_sgvs_element = uses_draw_params ||
1198 vs_prog_data->uses_instanceid ||
1199 vs_prog_data->uses_vertexid;
1200
1201 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1202 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1203 ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
1204 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
1205 IRIS_DIRTY_VERTEX_ELEMENTS;
1206 }
1207 ice->state.vs_uses_draw_params = uses_draw_params;
1208 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1209 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1210 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1211 }
1212 }
1213
1214 /**
1215 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1216 */
1217 const struct shader_info *
1218 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1219 {
1220 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1221
1222 if (!ish)
1223 return NULL;
1224
1225 const nir_shader *nir = ish->nir;
1226 return &nir->info;
1227 }
1228
1229 /**
1230 * Get the union of TCS output and TES input slots.
1231 *
1232 * TCS and TES need to agree on a common URB entry layout. In particular,
1233 * the data for all patch vertices is stored in a single URB entry (unlike
1234 * GS which has one entry per input vertex). This means that per-vertex
1235 * array indexing needs a stride.
1236 *
1237 * SSO requires locations to match, but doesn't require the number of
1238 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1239 * So, we need to take the extra step of unifying these on the fly.
1240 */
1241 static void
1242 get_unified_tess_slots(const struct iris_context *ice,
1243 uint64_t *per_vertex_slots,
1244 uint32_t *per_patch_slots)
1245 {
1246 const struct shader_info *tcs =
1247 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1248 const struct shader_info *tes =
1249 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1250
1251 *per_vertex_slots = tes->inputs_read;
1252 *per_patch_slots = tes->patch_inputs_read;
1253
1254 if (tcs) {
1255 *per_vertex_slots |= tcs->outputs_written;
1256 *per_patch_slots |= tcs->patch_outputs_written;
1257 }
1258 }
1259
1260 /**
1261 * Compile a tessellation control shader, and upload the assembly.
1262 */
1263 static struct iris_compiled_shader *
1264 iris_compile_tcs(struct iris_context *ice,
1265 struct iris_uncompiled_shader *ish,
1266 const struct iris_tcs_prog_key *key)
1267 {
1268 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1269 const struct brw_compiler *compiler = screen->compiler;
1270 const struct nir_shader_compiler_options *options =
1271 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1272 void *mem_ctx = ralloc_context(NULL);
1273 struct brw_tcs_prog_data *tcs_prog_data =
1274 rzalloc(mem_ctx, struct brw_tcs_prog_data);
1275 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1276 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1277 const struct gen_device_info *devinfo = &screen->devinfo;
1278 enum brw_param_builtin *system_values = NULL;
1279 unsigned num_system_values = 0;
1280 unsigned num_cbufs = 0;
1281
1282 nir_shader *nir;
1283
1284 struct iris_binding_table bt;
1285
1286 struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
1287
1288 if (ish) {
1289 nir = nir_shader_clone(mem_ctx, ish->nir);
1290
1291 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1292 &num_system_values, &num_cbufs);
1293 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1294 num_system_values, num_cbufs);
1295 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1296 } else {
1297 nir =
1298 brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
1299
1300 /* Reserve space for passing the default tess levels as constants. */
1301 num_cbufs = 1;
1302 num_system_values = 8;
1303 system_values =
1304 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1305 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1306 prog_data->nr_params = num_system_values;
1307
1308 if (key->tes_primitive_mode == GL_QUADS) {
1309 for (int i = 0; i < 4; i++)
1310 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1311
1312 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1313 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1314 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
1315 for (int i = 0; i < 3; i++)
1316 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1317
1318 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1319 } else {
1320 assert(key->tes_primitive_mode == GL_ISOLINES);
1321 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1322 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1323 }
1324
1325 /* Manually setup the TCS binding table. */
1326 memset(&bt, 0, sizeof(bt));
1327 bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1328 bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1329 bt.size_bytes = 4;
1330
1331 prog_data->ubo_ranges[0].length = 1;
1332 }
1333
1334 char *error_str = NULL;
1335 const unsigned *program =
1336 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, &brw_key, tcs_prog_data,
1337 nir, -1, NULL, &error_str);
1338 if (program == NULL) {
1339 dbg_printf("Failed to compile control shader: %s\n", error_str);
1340 ralloc_free(mem_ctx);
1341 return false;
1342 }
1343
1344 if (ish) {
1345 if (ish->compiled_once) {
1346 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1347 } else {
1348 ish->compiled_once = true;
1349 }
1350 }
1351
1352 struct iris_compiled_shader *shader =
1353 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1354 prog_data, NULL, system_values, num_system_values,
1355 num_cbufs, &bt);
1356
1357 if (ish)
1358 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1359
1360 ralloc_free(mem_ctx);
1361 return shader;
1362 }
1363
1364 /**
1365 * Update the current tessellation control shader variant.
1366 *
1367 * Fill out the key, look in the cache, compile and bind if needed.
1368 */
1369 static void
1370 iris_update_compiled_tcs(struct iris_context *ice)
1371 {
1372 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1373 struct iris_uncompiled_shader *tcs =
1374 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1375 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1376 const struct brw_compiler *compiler = screen->compiler;
1377 const struct gen_device_info *devinfo = &screen->devinfo;
1378
1379 const struct shader_info *tes_info =
1380 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1381 struct iris_tcs_prog_key key = {
1382 .vue.base.program_string_id = tcs ? tcs->program_id : 0,
1383 .tes_primitive_mode = tes_info->tess.primitive_mode,
1384 .input_vertices =
1385 !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1386 .quads_workaround = devinfo->gen < 9 &&
1387 tes_info->tess.primitive_mode == GL_QUADS &&
1388 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1389 };
1390 get_unified_tess_slots(ice, &key.outputs_written,
1391 &key.patch_outputs_written);
1392 screen->vtbl.populate_tcs_key(ice, &key);
1393
1394 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1395 struct iris_compiled_shader *shader =
1396 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1397
1398 if (tcs && !shader)
1399 shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1400
1401 if (!shader)
1402 shader = iris_compile_tcs(ice, tcs, &key);
1403
1404 if (old != shader) {
1405 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1406 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TCS |
1407 IRIS_STAGE_DIRTY_BINDINGS_TCS |
1408 IRIS_STAGE_DIRTY_CONSTANTS_TCS;
1409 shs->sysvals_need_upload = true;
1410 }
1411 }
1412
1413 /**
1414 * Compile a tessellation evaluation shader, and upload the assembly.
1415 */
1416 static struct iris_compiled_shader *
1417 iris_compile_tes(struct iris_context *ice,
1418 struct iris_uncompiled_shader *ish,
1419 const struct iris_tes_prog_key *key)
1420 {
1421 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1422 const struct brw_compiler *compiler = screen->compiler;
1423 void *mem_ctx = ralloc_context(NULL);
1424 struct brw_tes_prog_data *tes_prog_data =
1425 rzalloc(mem_ctx, struct brw_tes_prog_data);
1426 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1427 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1428 enum brw_param_builtin *system_values;
1429 const struct gen_device_info *devinfo = &screen->devinfo;
1430 unsigned num_system_values;
1431 unsigned num_cbufs;
1432
1433 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1434
1435 if (key->vue.nr_userclip_plane_consts) {
1436 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1437 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1438 true, false, NULL);
1439 nir_lower_io_to_temporaries(nir, impl, true, false);
1440 nir_lower_global_vars_to_local(nir);
1441 nir_lower_vars_to_ssa(nir);
1442 nir_shader_gather_info(nir, impl);
1443 }
1444
1445 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1446 &num_system_values, &num_cbufs);
1447
1448 struct iris_binding_table bt;
1449 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1450 num_system_values, num_cbufs);
1451
1452 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1453
1454 struct brw_vue_map input_vue_map;
1455 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1456 key->patch_inputs_read);
1457
1458 struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
1459
1460 char *error_str = NULL;
1461 const unsigned *program =
1462 brw_compile_tes(compiler, &ice->dbg, mem_ctx, &brw_key, &input_vue_map,
1463 tes_prog_data, nir, -1, NULL, &error_str);
1464 if (program == NULL) {
1465 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1466 ralloc_free(mem_ctx);
1467 return false;
1468 }
1469
1470 if (ish->compiled_once) {
1471 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1472 } else {
1473 ish->compiled_once = true;
1474 }
1475
1476 uint32_t *so_decls =
1477 screen->vtbl.create_so_decl_list(&ish->stream_output,
1478 &vue_prog_data->vue_map);
1479
1480
1481 struct iris_compiled_shader *shader =
1482 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1483 prog_data, so_decls, system_values, num_system_values,
1484 num_cbufs, &bt);
1485
1486 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1487
1488 ralloc_free(mem_ctx);
1489 return shader;
1490 }
1491
1492 /**
1493 * Update the current tessellation evaluation shader variant.
1494 *
1495 * Fill out the key, look in the cache, compile and bind if needed.
1496 */
1497 static void
1498 iris_update_compiled_tes(struct iris_context *ice)
1499 {
1500 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1501 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1502 struct iris_uncompiled_shader *ish =
1503 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1504
1505 struct iris_tes_prog_key key = { KEY_ID(vue.base) };
1506 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1507 screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1508
1509 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1510 struct iris_compiled_shader *shader =
1511 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1512
1513 if (!shader)
1514 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1515
1516 if (!shader)
1517 shader = iris_compile_tes(ice, ish, &key);
1518
1519 if (old != shader) {
1520 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1521 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TES |
1522 IRIS_STAGE_DIRTY_BINDINGS_TES |
1523 IRIS_STAGE_DIRTY_CONSTANTS_TES;
1524 shs->sysvals_need_upload = true;
1525 }
1526
1527 /* TODO: Could compare and avoid flagging this. */
1528 const struct shader_info *tes_info = &ish->nir->info;
1529 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
1530 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TES;
1531 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1532 }
1533 }
1534
1535 /**
1536 * Compile a geometry shader, and upload the assembly.
1537 */
1538 static struct iris_compiled_shader *
1539 iris_compile_gs(struct iris_context *ice,
1540 struct iris_uncompiled_shader *ish,
1541 const struct iris_gs_prog_key *key)
1542 {
1543 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1544 const struct brw_compiler *compiler = screen->compiler;
1545 const struct gen_device_info *devinfo = &screen->devinfo;
1546 void *mem_ctx = ralloc_context(NULL);
1547 struct brw_gs_prog_data *gs_prog_data =
1548 rzalloc(mem_ctx, struct brw_gs_prog_data);
1549 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1550 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1551 enum brw_param_builtin *system_values;
1552 unsigned num_system_values;
1553 unsigned num_cbufs;
1554
1555 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1556
1557 if (key->vue.nr_userclip_plane_consts) {
1558 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1559 nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1560 false, NULL);
1561 nir_lower_io_to_temporaries(nir, impl, true, false);
1562 nir_lower_global_vars_to_local(nir);
1563 nir_lower_vars_to_ssa(nir);
1564 nir_shader_gather_info(nir, impl);
1565 }
1566
1567 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1568 &num_system_values, &num_cbufs);
1569
1570 struct iris_binding_table bt;
1571 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1572 num_system_values, num_cbufs);
1573
1574 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1575
1576 brw_compute_vue_map(devinfo,
1577 &vue_prog_data->vue_map, nir->info.outputs_written,
1578 nir->info.separate_shader, /* pos_slots */ 1);
1579
1580 struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
1581
1582 char *error_str = NULL;
1583 const unsigned *program =
1584 brw_compile_gs(compiler, &ice->dbg, mem_ctx, &brw_key, gs_prog_data,
1585 nir, NULL, -1, NULL, &error_str);
1586 if (program == NULL) {
1587 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1588 ralloc_free(mem_ctx);
1589 return false;
1590 }
1591
1592 if (ish->compiled_once) {
1593 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1594 } else {
1595 ish->compiled_once = true;
1596 }
1597
1598 uint32_t *so_decls =
1599 screen->vtbl.create_so_decl_list(&ish->stream_output,
1600 &vue_prog_data->vue_map);
1601
1602 struct iris_compiled_shader *shader =
1603 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1604 prog_data, so_decls, system_values, num_system_values,
1605 num_cbufs, &bt);
1606
1607 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1608
1609 ralloc_free(mem_ctx);
1610 return shader;
1611 }
1612
1613 /**
1614 * Update the current geometry shader variant.
1615 *
1616 * Fill out the key, look in the cache, compile and bind if needed.
1617 */
1618 static void
1619 iris_update_compiled_gs(struct iris_context *ice)
1620 {
1621 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1622 struct iris_uncompiled_shader *ish =
1623 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1624 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1625 struct iris_compiled_shader *shader = NULL;
1626 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1627
1628 if (ish) {
1629 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
1630 screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1631
1632 shader =
1633 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1634
1635 if (!shader)
1636 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1637
1638 if (!shader)
1639 shader = iris_compile_gs(ice, ish, &key);
1640 }
1641
1642 if (old != shader) {
1643 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1644 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
1645 IRIS_STAGE_DIRTY_BINDINGS_GS |
1646 IRIS_STAGE_DIRTY_CONSTANTS_GS;
1647 shs->sysvals_need_upload = true;
1648 }
1649 }
1650
1651 /**
1652 * Compile a fragment (pixel) shader, and upload the assembly.
1653 */
1654 static struct iris_compiled_shader *
1655 iris_compile_fs(struct iris_context *ice,
1656 struct iris_uncompiled_shader *ish,
1657 const struct iris_fs_prog_key *key,
1658 struct brw_vue_map *vue_map)
1659 {
1660 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1661 const struct brw_compiler *compiler = screen->compiler;
1662 void *mem_ctx = ralloc_context(NULL);
1663 struct brw_wm_prog_data *fs_prog_data =
1664 rzalloc(mem_ctx, struct brw_wm_prog_data);
1665 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1666 enum brw_param_builtin *system_values;
1667 const struct gen_device_info *devinfo = &screen->devinfo;
1668 unsigned num_system_values;
1669 unsigned num_cbufs;
1670
1671 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1672
1673 prog_data->use_alt_mode = ish->use_alt_mode;
1674
1675 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1676 &num_system_values, &num_cbufs);
1677
1678 /* Lower output variables to load_output intrinsics before setting up
1679 * binding tables, so iris_setup_binding_table can map any load_output
1680 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1681 * non-coherent framebuffer fetches.
1682 */
1683 brw_nir_lower_fs_outputs(nir);
1684
1685 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1686 * and do not need a binding table entry with a null surface. Earlier
1687 * generations need an entry for a null surface.
1688 */
1689 int null_rts = devinfo->gen < 11 ? 1 : 0;
1690
1691 struct iris_binding_table bt;
1692 iris_setup_binding_table(devinfo, nir, &bt,
1693 MAX2(key->nr_color_regions, null_rts),
1694 num_system_values, num_cbufs);
1695
1696 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1697
1698 struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
1699
1700 char *error_str = NULL;
1701 const unsigned *program =
1702 brw_compile_fs(compiler, &ice->dbg, mem_ctx, &brw_key, fs_prog_data,
1703 nir, -1, -1, -1, true, false, vue_map,
1704 NULL, &error_str);
1705 if (program == NULL) {
1706 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1707 ralloc_free(mem_ctx);
1708 return false;
1709 }
1710
1711 if (ish->compiled_once) {
1712 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1713 } else {
1714 ish->compiled_once = true;
1715 }
1716
1717 struct iris_compiled_shader *shader =
1718 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1719 prog_data, NULL, system_values, num_system_values,
1720 num_cbufs, &bt);
1721
1722 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1723
1724 ralloc_free(mem_ctx);
1725 return shader;
1726 }
1727
1728 /**
1729 * Update the current fragment shader variant.
1730 *
1731 * Fill out the key, look in the cache, compile and bind if needed.
1732 */
1733 static void
1734 iris_update_compiled_fs(struct iris_context *ice)
1735 {
1736 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1737 struct iris_uncompiled_shader *ish =
1738 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1739 struct iris_fs_prog_key key = { KEY_ID(base) };
1740 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1741 screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1742
1743 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1744 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1745
1746 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1747 struct iris_compiled_shader *shader =
1748 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1749
1750 if (!shader)
1751 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1752
1753 if (!shader)
1754 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1755
1756 if (old != shader) {
1757 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1758 // toggles. might be able to avoid flagging SBE too.
1759 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1760 ice->state.dirty |= IRIS_DIRTY_WM |
1761 IRIS_DIRTY_CLIP |
1762 IRIS_DIRTY_SBE;
1763 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS |
1764 IRIS_STAGE_DIRTY_BINDINGS_FS |
1765 IRIS_STAGE_DIRTY_CONSTANTS_FS;
1766 shs->sysvals_need_upload = true;
1767 }
1768 }
1769
1770 /**
1771 * Update the last enabled stage's VUE map.
1772 *
1773 * When the shader feeding the rasterizer's output interface changes, we
1774 * need to re-emit various packets.
1775 */
1776 static void
1777 update_last_vue_map(struct iris_context *ice,
1778 struct brw_stage_prog_data *prog_data)
1779 {
1780 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1781 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1782 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1783 const uint64_t changed_slots =
1784 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1785
1786 if (changed_slots & VARYING_BIT_VIEWPORT) {
1787 ice->state.num_viewports =
1788 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1789 ice->state.dirty |= IRIS_DIRTY_CLIP |
1790 IRIS_DIRTY_SF_CL_VIEWPORT |
1791 IRIS_DIRTY_CC_VIEWPORT |
1792 IRIS_DIRTY_SCISSOR_RECT;
1793 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_FS |
1794 ice->state.stage_dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1795 }
1796
1797 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1798 ice->state.dirty |= IRIS_DIRTY_SBE;
1799 }
1800
1801 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1802 }
1803
1804 static void
1805 iris_update_pull_constant_descriptors(struct iris_context *ice,
1806 gl_shader_stage stage)
1807 {
1808 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1809
1810 if (!shader || !shader->prog_data->has_ubo_pull)
1811 return;
1812
1813 struct iris_shader_state *shs = &ice->state.shaders[stage];
1814 bool any_new_descriptors =
1815 shader->num_system_values > 0 && shs->sysvals_need_upload;
1816
1817 unsigned bound_cbufs = shs->bound_cbufs;
1818
1819 while (bound_cbufs) {
1820 const int i = u_bit_scan(&bound_cbufs);
1821 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1822 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1823 if (!surf_state->res && cbuf->buffer) {
1824 iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
1825 any_new_descriptors = true;
1826 }
1827 }
1828
1829 if (any_new_descriptors)
1830 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
1831 }
1832
1833 /**
1834 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1835 */
1836 static struct brw_vue_prog_data *
1837 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1838 {
1839 if (!ice->shaders.prog[stage])
1840 return NULL;
1841
1842 return (void *) ice->shaders.prog[stage]->prog_data;
1843 }
1844
1845 // XXX: iris_compiled_shaders are space-leaking :(
1846 // XXX: do remember to unbind them if deleting them.
1847
1848 /**
1849 * Update the current shader variants for the given state.
1850 *
1851 * This should be called on every draw call to ensure that the correct
1852 * shaders are bound. It will also flag any dirty state triggered by
1853 * swapping out those shaders.
1854 */
1855 void
1856 iris_update_compiled_shaders(struct iris_context *ice)
1857 {
1858 const uint64_t dirty = ice->state.dirty;
1859 const uint64_t stage_dirty = ice->state.stage_dirty;
1860
1861 struct brw_vue_prog_data *old_prog_datas[4];
1862 if (!(dirty & IRIS_DIRTY_URB)) {
1863 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1864 old_prog_datas[i] = get_vue_prog_data(ice, i);
1865 }
1866
1867 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
1868 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1869 struct iris_uncompiled_shader *tes =
1870 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1871 if (tes) {
1872 iris_update_compiled_tcs(ice);
1873 iris_update_compiled_tes(ice);
1874 } else {
1875 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1876 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1877 ice->state.stage_dirty |=
1878 IRIS_STAGE_DIRTY_TCS | IRIS_STAGE_DIRTY_TES |
1879 IRIS_STAGE_DIRTY_BINDINGS_TCS | IRIS_STAGE_DIRTY_BINDINGS_TES |
1880 IRIS_STAGE_DIRTY_CONSTANTS_TCS | IRIS_STAGE_DIRTY_CONSTANTS_TES;
1881 }
1882 }
1883
1884 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_VS)
1885 iris_update_compiled_vs(ice);
1886 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_GS)
1887 iris_update_compiled_gs(ice);
1888
1889 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_GS |
1890 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1891 const struct iris_compiled_shader *gs =
1892 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1893 const struct iris_compiled_shader *tes =
1894 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1895
1896 bool points_or_lines = false;
1897
1898 if (gs) {
1899 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1900 points_or_lines =
1901 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1902 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1903 } else if (tes) {
1904 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1905 points_or_lines =
1906 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1907 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1908 }
1909
1910 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1911 /* Outbound to XY Clip enables */
1912 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1913 ice->state.dirty |= IRIS_DIRTY_CLIP;
1914 }
1915 }
1916
1917 gl_shader_stage last_stage = last_vue_stage(ice);
1918 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1919 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1920 update_last_vue_map(ice, shader->prog_data);
1921 if (ice->state.streamout != shader->streamout) {
1922 ice->state.streamout = shader->streamout;
1923 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1924 }
1925
1926 if (ice->state.streamout_active) {
1927 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1928 struct iris_stream_output_target *so =
1929 (void *) ice->state.so_target[i];
1930 if (so)
1931 so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
1932 }
1933 }
1934
1935 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_FS)
1936 iris_update_compiled_fs(ice);
1937
1938 /* Changing shader interfaces may require a URB configuration. */
1939 if (!(dirty & IRIS_DIRTY_URB)) {
1940 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1941 struct brw_vue_prog_data *old = old_prog_datas[i];
1942 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1943 if (!!old != !!new ||
1944 (new && new->urb_entry_size != old->urb_entry_size)) {
1945 ice->state.dirty |= IRIS_DIRTY_URB;
1946 break;
1947 }
1948 }
1949 }
1950
1951 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
1952 if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << i))
1953 iris_update_pull_constant_descriptors(ice, i);
1954 }
1955 }
1956
1957 static struct iris_compiled_shader *
1958 iris_compile_cs(struct iris_context *ice,
1959 struct iris_uncompiled_shader *ish,
1960 const struct iris_cs_prog_key *key)
1961 {
1962 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1963 const struct brw_compiler *compiler = screen->compiler;
1964 void *mem_ctx = ralloc_context(NULL);
1965 struct brw_cs_prog_data *cs_prog_data =
1966 rzalloc(mem_ctx, struct brw_cs_prog_data);
1967 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1968 enum brw_param_builtin *system_values;
1969 const struct gen_device_info *devinfo = &screen->devinfo;
1970 unsigned num_system_values;
1971 unsigned num_cbufs;
1972
1973 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1974
1975 NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);
1976
1977 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1978 &num_system_values, &num_cbufs);
1979
1980 struct iris_binding_table bt;
1981 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1982 num_system_values, num_cbufs);
1983
1984 struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
1985
1986 char *error_str = NULL;
1987 const unsigned *program =
1988 brw_compile_cs(compiler, &ice->dbg, mem_ctx, &brw_key, cs_prog_data,
1989 nir, -1, NULL, &error_str);
1990 if (program == NULL) {
1991 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1992 ralloc_free(mem_ctx);
1993 return false;
1994 }
1995
1996 if (ish->compiled_once) {
1997 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1998 } else {
1999 ish->compiled_once = true;
2000 }
2001
2002 struct iris_compiled_shader *shader =
2003 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
2004 prog_data, NULL, system_values, num_system_values,
2005 num_cbufs, &bt);
2006
2007 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
2008
2009 ralloc_free(mem_ctx);
2010 return shader;
2011 }
2012
2013 static void
2014 iris_update_compiled_cs(struct iris_context *ice)
2015 {
2016 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2017 struct iris_uncompiled_shader *ish =
2018 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2019
2020 struct iris_cs_prog_key key = { KEY_ID(base) };
2021 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2022 screen->vtbl.populate_cs_key(ice, &key);
2023
2024 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
2025 struct iris_compiled_shader *shader =
2026 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
2027
2028 if (!shader)
2029 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
2030
2031 if (!shader)
2032 shader = iris_compile_cs(ice, ish, &key);
2033
2034 if (old != shader) {
2035 ice->shaders.prog[IRIS_CACHE_CS] = shader;
2036 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CS |
2037 IRIS_STAGE_DIRTY_BINDINGS_CS |
2038 IRIS_STAGE_DIRTY_CONSTANTS_CS;
2039 shs->sysvals_need_upload = true;
2040 }
2041 }
2042
2043 void
2044 iris_update_compiled_compute_shader(struct iris_context *ice)
2045 {
2046 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_CS)
2047 iris_update_compiled_cs(ice);
2048
2049 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS)
2050 iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2051 }
2052
2053 void
2054 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
2055 unsigned threads,
2056 uint32_t *dst)
2057 {
2058 assert(brw_cs_push_const_total_size(cs_prog_data, threads) > 0);
2059 assert(cs_prog_data->push.cross_thread.size == 0);
2060 assert(cs_prog_data->push.per_thread.dwords == 1);
2061 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
2062 for (unsigned t = 0; t < threads; t++)
2063 dst[8 * t] = t;
2064 }
2065
2066 /**
2067 * Allocate scratch BOs as needed for the given per-thread size and stage.
2068 */
2069 struct iris_bo *
2070 iris_get_scratch_space(struct iris_context *ice,
2071 unsigned per_thread_scratch,
2072 gl_shader_stage stage)
2073 {
2074 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2075 struct iris_bufmgr *bufmgr = screen->bufmgr;
2076 const struct gen_device_info *devinfo = &screen->devinfo;
2077
2078 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2079 assert(encoded_size < (1 << 16));
2080
2081 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2082
2083 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2084 *
2085 * "Scratch Space per slice is computed based on 4 sub-slices. SW
2086 * must allocate scratch space enough so that each slice has 4
2087 * slices allowed."
2088 *
2089 * According to the other driver team, this applies to compute shaders
2090 * as well. This is not currently documented at all.
2091 *
2092 * This hack is no longer necessary on Gen11+.
2093 *
2094 * For, Gen11+, scratch space allocation is based on the number of threads
2095 * in the base configuration.
2096 */
2097 unsigned subslice_total = screen->subslice_total;
2098 if (devinfo->gen >= 12)
2099 subslice_total = devinfo->num_subslices[0];
2100 else if (devinfo->gen == 11)
2101 subslice_total = 8;
2102 else if (devinfo->gen < 11)
2103 subslice_total = 4 * devinfo->num_slices;
2104 assert(subslice_total >= screen->subslice_total);
2105
2106 if (!*bop) {
2107 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
2108
2109 if (devinfo->gen >= 12) {
2110 /* Same as ICL below, but with 16 EUs. */
2111 scratch_ids_per_subslice = 16 * 8;
2112 } else if (devinfo->gen == 11) {
2113 /* The MEDIA_VFE_STATE docs say:
2114 *
2115 * "Starting with this configuration, the Maximum Number of
2116 * Threads must be set to (#EU * 8) for GPGPU dispatches.
2117 *
2118 * Although there are only 7 threads per EU in the configuration,
2119 * the FFTID is calculated as if there are 8 threads per EU,
2120 * which in turn requires a larger amount of Scratch Space to be
2121 * allocated by the driver."
2122 */
2123 scratch_ids_per_subslice = 8 * 8;
2124 }
2125
2126 uint32_t max_threads[] = {
2127 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
2128 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
2129 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
2130 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
2131 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
2132 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
2133 };
2134
2135 uint32_t size = per_thread_scratch * max_threads[stage];
2136
2137 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
2138 }
2139
2140 return *bop;
2141 }
2142
2143 /* ------------------------------------------------------------------- */
2144
2145 /**
2146 * The pipe->create_[stage]_state() driver hooks.
2147 *
2148 * Performs basic NIR preprocessing, records any state dependencies, and
2149 * returns an iris_uncompiled_shader as the Gallium CSO.
2150 *
2151 * Actual shader compilation to assembly happens later, at first use.
2152 */
2153 static void *
2154 iris_create_uncompiled_shader(struct pipe_context *ctx,
2155 nir_shader *nir,
2156 const struct pipe_stream_output_info *so_info)
2157 {
2158 struct iris_context *ice = (void *)ctx;
2159 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2160 const struct gen_device_info *devinfo = &screen->devinfo;
2161
2162 struct iris_uncompiled_shader *ish =
2163 calloc(1, sizeof(struct iris_uncompiled_shader));
2164 if (!ish)
2165 return NULL;
2166
2167 NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
2168
2169 brw_preprocess_nir(screen->compiler, nir, NULL);
2170
2171 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo,
2172 &ish->uses_atomic_load_store);
2173 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2174
2175 nir_sweep(nir);
2176
2177 if (nir->constant_data_size > 0) {
2178 unsigned data_offset;
2179 u_upload_data(ice->shaders.uploader, 0, nir->constant_data_size,
2180 32, nir->constant_data, &data_offset, &ish->const_data);
2181
2182 struct pipe_shader_buffer psb = {
2183 .buffer = ish->const_data,
2184 .buffer_offset = data_offset,
2185 .buffer_size = nir->constant_data_size,
2186 };
2187 iris_upload_ubo_ssbo_surf_state(ice, &psb, &ish->const_data_state, false);
2188 }
2189
2190 ish->program_id = get_new_program_id(screen);
2191 ish->nir = nir;
2192 if (so_info) {
2193 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2194 update_so_info(&ish->stream_output, nir->info.outputs_written);
2195 }
2196
2197 /* Save this now before potentially dropping nir->info.name */
2198 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2199 ish->use_alt_mode = true;
2200
2201 if (screen->disk_cache) {
2202 /* Serialize the NIR to a binary blob that we can hash for the disk
2203 * cache. Drop unnecessary information (like variable names)
2204 * so the serialized NIR is smaller, and also to let us detect more
2205 * isomorphic shaders when hashing, increasing cache hits.
2206 */
2207 struct blob blob;
2208 blob_init(&blob);
2209 nir_serialize(&blob, nir, true);
2210 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2211 blob_finish(&blob);
2212 }
2213
2214 return ish;
2215 }
2216
2217 static struct iris_uncompiled_shader *
2218 iris_create_shader_state(struct pipe_context *ctx,
2219 const struct pipe_shader_state *state)
2220 {
2221 struct nir_shader *nir;
2222
2223 if (state->type == PIPE_SHADER_IR_TGSI)
2224 nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2225 else
2226 nir = state->ir.nir;
2227
2228 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
2229 }
2230
2231 static void *
2232 iris_create_vs_state(struct pipe_context *ctx,
2233 const struct pipe_shader_state *state)
2234 {
2235 struct iris_context *ice = (void *) ctx;
2236 struct iris_screen *screen = (void *) ctx->screen;
2237 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2238
2239 /* User clip planes */
2240 if (ish->nir->info.clip_distance_array_size == 0)
2241 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2242
2243 if (screen->precompile) {
2244 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
2245
2246 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2247 iris_compile_vs(ice, ish, &key);
2248 }
2249
2250 return ish;
2251 }
2252
2253 static void *
2254 iris_create_tcs_state(struct pipe_context *ctx,
2255 const struct pipe_shader_state *state)
2256 {
2257 struct iris_context *ice = (void *) ctx;
2258 struct iris_screen *screen = (void *) ctx->screen;
2259 const struct brw_compiler *compiler = screen->compiler;
2260 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2261 struct shader_info *info = &ish->nir->info;
2262
2263 if (screen->precompile) {
2264 const unsigned _GL_TRIANGLES = 0x0004;
2265 struct iris_tcs_prog_key key = {
2266 KEY_ID(vue.base),
2267 // XXX: make sure the linker fills this out from the TES...
2268 .tes_primitive_mode =
2269 info->tess.primitive_mode ? info->tess.primitive_mode
2270 : _GL_TRIANGLES,
2271 .outputs_written = info->outputs_written,
2272 .patch_outputs_written = info->patch_outputs_written,
2273 };
2274
2275 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2276 * We don't have that information, so we randomly guess that the input
2277 * and output patches are the same size. This is a bad guess, but we
2278 * can't do much better.
2279 */
2280 if (compiler->use_tcs_8_patch)
2281 key.input_vertices = info->tess.tcs_vertices_out;
2282
2283 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2284 iris_compile_tcs(ice, ish, &key);
2285 }
2286
2287 return ish;
2288 }
2289
2290 static void *
2291 iris_create_tes_state(struct pipe_context *ctx,
2292 const struct pipe_shader_state *state)
2293 {
2294 struct iris_context *ice = (void *) ctx;
2295 struct iris_screen *screen = (void *) ctx->screen;
2296 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2297 struct shader_info *info = &ish->nir->info;
2298
2299 /* User clip planes */
2300 if (ish->nir->info.clip_distance_array_size == 0)
2301 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2302
2303 if (screen->precompile) {
2304 struct iris_tes_prog_key key = {
2305 KEY_ID(vue.base),
2306 // XXX: not ideal, need TCS output/TES input unification
2307 .inputs_read = info->inputs_read,
2308 .patch_inputs_read = info->patch_inputs_read,
2309 };
2310
2311 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2312 iris_compile_tes(ice, ish, &key);
2313 }
2314
2315 return ish;
2316 }
2317
2318 static void *
2319 iris_create_gs_state(struct pipe_context *ctx,
2320 const struct pipe_shader_state *state)
2321 {
2322 struct iris_context *ice = (void *) ctx;
2323 struct iris_screen *screen = (void *) ctx->screen;
2324 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2325
2326 /* User clip planes */
2327 if (ish->nir->info.clip_distance_array_size == 0)
2328 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2329
2330 if (screen->precompile) {
2331 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
2332
2333 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2334 iris_compile_gs(ice, ish, &key);
2335 }
2336
2337 return ish;
2338 }
2339
2340 static void *
2341 iris_create_fs_state(struct pipe_context *ctx,
2342 const struct pipe_shader_state *state)
2343 {
2344 struct iris_context *ice = (void *) ctx;
2345 struct iris_screen *screen = (void *) ctx->screen;
2346 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2347 struct shader_info *info = &ish->nir->info;
2348
2349 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2350 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2351 (1ull << IRIS_NOS_RASTERIZER) |
2352 (1ull << IRIS_NOS_BLEND);
2353
2354 /* The program key needs the VUE map if there are > 16 inputs */
2355 if (util_bitcount64(ish->nir->info.inputs_read &
2356 BRW_FS_VARYING_INPUT_MASK) > 16) {
2357 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2358 }
2359
2360 if (screen->precompile) {
2361 const uint64_t color_outputs = info->outputs_written &
2362 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2363 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2364 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2365
2366 bool can_rearrange_varyings =
2367 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2368
2369 const struct gen_device_info *devinfo = &screen->devinfo;
2370 struct iris_fs_prog_key key = {
2371 KEY_ID(base),
2372 .nr_color_regions = util_bitcount(color_outputs),
2373 .coherent_fb_fetch = devinfo->gen >= 9,
2374 .input_slots_valid =
2375 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2376 };
2377
2378 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2379 iris_compile_fs(ice, ish, &key, NULL);
2380 }
2381
2382 return ish;
2383 }
2384
2385 static void *
2386 iris_create_compute_state(struct pipe_context *ctx,
2387 const struct pipe_compute_state *state)
2388 {
2389 assert(state->ir_type == PIPE_SHADER_IR_NIR);
2390
2391 struct iris_context *ice = (void *) ctx;
2392 struct iris_screen *screen = (void *) ctx->screen;
2393 struct iris_uncompiled_shader *ish =
2394 iris_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
2395
2396 // XXX: disallow more than 64KB of shared variables
2397
2398 if (screen->precompile) {
2399 struct iris_cs_prog_key key = { KEY_ID(base) };
2400
2401 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2402 iris_compile_cs(ice, ish, &key);
2403 }
2404
2405 return ish;
2406 }
2407
2408 /**
2409 * The pipe->delete_[stage]_state() driver hooks.
2410 *
2411 * Frees the iris_uncompiled_shader.
2412 */
2413 static void
2414 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2415 {
2416 struct iris_uncompiled_shader *ish = state;
2417 struct iris_context *ice = (void *) ctx;
2418
2419 if (ice->shaders.uncompiled[stage] == ish) {
2420 ice->shaders.uncompiled[stage] = NULL;
2421 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2422 }
2423
2424 if (ish->const_data) {
2425 pipe_resource_reference(&ish->const_data, NULL);
2426 pipe_resource_reference(&ish->const_data_state.res, NULL);
2427 }
2428
2429 ralloc_free(ish->nir);
2430 free(ish);
2431 }
2432
2433 static void
2434 iris_delete_vs_state(struct pipe_context *ctx, void *state)
2435 {
2436 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2437 }
2438
2439 static void
2440 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2441 {
2442 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2443 }
2444
2445 static void
2446 iris_delete_tes_state(struct pipe_context *ctx, void *state)
2447 {
2448 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2449 }
2450
2451 static void
2452 iris_delete_gs_state(struct pipe_context *ctx, void *state)
2453 {
2454 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2455 }
2456
2457 static void
2458 iris_delete_fs_state(struct pipe_context *ctx, void *state)
2459 {
2460 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2461 }
2462
2463 static void
2464 iris_delete_cs_state(struct pipe_context *ctx, void *state)
2465 {
2466 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2467 }
2468
2469 /**
2470 * The pipe->bind_[stage]_state() driver hook.
2471 *
2472 * Binds an uncompiled shader as the current one for a particular stage.
2473 * Updates dirty tracking to account for the shader's NOS.
2474 */
2475 static void
2476 bind_shader_state(struct iris_context *ice,
2477 struct iris_uncompiled_shader *ish,
2478 gl_shader_stage stage)
2479 {
2480 uint64_t stage_dirty_bit = IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2481 const uint64_t nos = ish ? ish->nos : 0;
2482
2483 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2484 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2485
2486 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
2487 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
2488 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2489 }
2490
2491 ice->shaders.uncompiled[stage] = ish;
2492 ice->state.stage_dirty |= stage_dirty_bit;
2493
2494 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2495 * (or that they no longer need to do so).
2496 */
2497 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2498 if (nos & (1 << i))
2499 ice->state.stage_dirty_for_nos[i] |= stage_dirty_bit;
2500 else
2501 ice->state.stage_dirty_for_nos[i] &= ~stage_dirty_bit;
2502 }
2503 }
2504
2505 static void
2506 iris_bind_vs_state(struct pipe_context *ctx, void *state)
2507 {
2508 struct iris_context *ice = (struct iris_context *)ctx;
2509 struct iris_uncompiled_shader *new_ish = state;
2510
2511 if (new_ish &&
2512 ice->state.window_space_position !=
2513 new_ish->nir->info.vs.window_space_position) {
2514 ice->state.window_space_position =
2515 new_ish->nir->info.vs.window_space_position;
2516
2517 ice->state.dirty |= IRIS_DIRTY_CLIP |
2518 IRIS_DIRTY_RASTER |
2519 IRIS_DIRTY_CC_VIEWPORT;
2520 }
2521
2522 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2523 }
2524
2525 static void
2526 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2527 {
2528 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2529 }
2530
2531 static void
2532 iris_bind_tes_state(struct pipe_context *ctx, void *state)
2533 {
2534 struct iris_context *ice = (struct iris_context *)ctx;
2535
2536 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2537 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2538 ice->state.dirty |= IRIS_DIRTY_URB;
2539
2540 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2541 }
2542
2543 static void
2544 iris_bind_gs_state(struct pipe_context *ctx, void *state)
2545 {
2546 struct iris_context *ice = (struct iris_context *)ctx;
2547
2548 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2549 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2550 ice->state.dirty |= IRIS_DIRTY_URB;
2551
2552 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2553 }
2554
2555 static void
2556 iris_bind_fs_state(struct pipe_context *ctx, void *state)
2557 {
2558 struct iris_context *ice = (struct iris_context *) ctx;
2559 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2560 const struct gen_device_info *devinfo = &screen->devinfo;
2561 struct iris_uncompiled_shader *old_ish =
2562 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2563 struct iris_uncompiled_shader *new_ish = state;
2564
2565 const unsigned color_bits =
2566 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2567 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2568
2569 /* Fragment shader outputs influence HasWriteableRT */
2570 if (!old_ish || !new_ish ||
2571 (old_ish->nir->info.outputs_written & color_bits) !=
2572 (new_ish->nir->info.outputs_written & color_bits))
2573 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2574
2575 if (devinfo->gen == 8)
2576 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2577
2578 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2579 }
2580
2581 static void
2582 iris_bind_cs_state(struct pipe_context *ctx, void *state)
2583 {
2584 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2585 }
2586
2587 void
2588 iris_init_program_functions(struct pipe_context *ctx)
2589 {
2590 ctx->create_vs_state = iris_create_vs_state;
2591 ctx->create_tcs_state = iris_create_tcs_state;
2592 ctx->create_tes_state = iris_create_tes_state;
2593 ctx->create_gs_state = iris_create_gs_state;
2594 ctx->create_fs_state = iris_create_fs_state;
2595 ctx->create_compute_state = iris_create_compute_state;
2596
2597 ctx->delete_vs_state = iris_delete_vs_state;
2598 ctx->delete_tcs_state = iris_delete_tcs_state;
2599 ctx->delete_tes_state = iris_delete_tes_state;
2600 ctx->delete_gs_state = iris_delete_gs_state;
2601 ctx->delete_fs_state = iris_delete_fs_state;
2602 ctx->delete_compute_state = iris_delete_cs_state;
2603
2604 ctx->bind_vs_state = iris_bind_vs_state;
2605 ctx->bind_tcs_state = iris_bind_tcs_state;
2606 ctx->bind_tes_state = iris_bind_tes_state;
2607 ctx->bind_gs_state = iris_bind_gs_state;
2608 ctx->bind_fs_state = iris_bind_fs_state;
2609 ctx->bind_compute_state = iris_bind_cs_state;
2610 }