intel: fix the gen 11 compute shader scratch IDs
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
48
49 #define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50 #define BRW_KEY_INIT(gen, prog_id) \
51 .base.program_string_id = prog_id, \
52 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54 .base.tex.compressed_multisample_layout_mask = ~0, \
55 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
56
57 static unsigned
58 get_new_program_id(struct iris_screen *screen)
59 {
60 return p_atomic_inc_return(&screen->program_id);
61 }
62
63 static struct brw_vs_prog_key
64 iris_to_brw_vs_key(const struct gen_device_info *devinfo,
65 const struct iris_vs_prog_key *key)
66 {
67 return (struct brw_vs_prog_key) {
68 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
69
70 /* Don't tell the backend about our clip plane constants, we've
71 * already lowered them in NIR and don't want it doing it again.
72 */
73 .nr_userclip_plane_consts = 0,
74 };
75 }
76
77 static struct brw_tcs_prog_key
78 iris_to_brw_tcs_key(const struct gen_device_info *devinfo,
79 const struct iris_tcs_prog_key *key)
80 {
81 return (struct brw_tcs_prog_key) {
82 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
83 .tes_primitive_mode = key->tes_primitive_mode,
84 .input_vertices = key->input_vertices,
85 .patch_outputs_written = key->patch_outputs_written,
86 .outputs_written = key->outputs_written,
87 .quads_workaround = key->quads_workaround,
88 };
89 }
90
91 static struct brw_tes_prog_key
92 iris_to_brw_tes_key(const struct gen_device_info *devinfo,
93 const struct iris_tes_prog_key *key)
94 {
95 return (struct brw_tes_prog_key) {
96 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
97 .patch_inputs_read = key->patch_inputs_read,
98 .inputs_read = key->inputs_read,
99 };
100 }
101
102 static struct brw_gs_prog_key
103 iris_to_brw_gs_key(const struct gen_device_info *devinfo,
104 const struct iris_gs_prog_key *key)
105 {
106 return (struct brw_gs_prog_key) {
107 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
108 };
109 }
110
111 static struct brw_wm_prog_key
112 iris_to_brw_fs_key(const struct gen_device_info *devinfo,
113 const struct iris_fs_prog_key *key)
114 {
115 return (struct brw_wm_prog_key) {
116 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
117 .nr_color_regions = key->nr_color_regions,
118 .flat_shade = key->flat_shade,
119 .alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
120 .alpha_to_coverage = key->alpha_to_coverage,
121 .clamp_fragment_color = key->clamp_fragment_color,
122 .persample_interp = key->persample_interp,
123 .multisample_fbo = key->multisample_fbo,
124 .force_dual_color_blend = key->force_dual_color_blend,
125 .coherent_fb_fetch = key->coherent_fb_fetch,
126 .color_outputs_valid = key->color_outputs_valid,
127 .input_slots_valid = key->input_slots_valid,
128 };
129 }
130
131 static struct brw_cs_prog_key
132 iris_to_brw_cs_key(const struct gen_device_info *devinfo,
133 const struct iris_cs_prog_key *key)
134 {
135 return (struct brw_cs_prog_key) {
136 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
137 };
138 }
139
140 static void *
141 upload_state(struct u_upload_mgr *uploader,
142 struct iris_state_ref *ref,
143 unsigned size,
144 unsigned alignment)
145 {
146 void *p = NULL;
147 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
148 return p;
149 }
150
151 void
152 iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
153 struct pipe_shader_buffer *buf,
154 struct iris_state_ref *surf_state,
155 bool ssbo)
156 {
157 struct pipe_context *ctx = &ice->ctx;
158 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
159
160 void *map =
161 upload_state(ice->state.surface_uploader, surf_state,
162 screen->isl_dev.ss.size, 64);
163 if (!unlikely(map)) {
164 surf_state->res = NULL;
165 return;
166 }
167
168 struct iris_resource *res = (void *) buf->buffer;
169 struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
170 surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
171
172 isl_buffer_fill_state(&screen->isl_dev, map,
173 .address = res->bo->gtt_offset + res->offset +
174 buf->buffer_offset,
175 .size_B = buf->buffer_size - res->offset,
176 .format = ssbo ? ISL_FORMAT_RAW
177 : ISL_FORMAT_R32G32B32A32_FLOAT,
178 .swizzle = ISL_SWIZZLE_IDENTITY,
179 .stride_B = 1,
180 .mocs = iris_mocs(res->bo, &screen->isl_dev));
181 }
182
183 static nir_ssa_def *
184 get_aoa_deref_offset(nir_builder *b,
185 nir_deref_instr *deref,
186 unsigned elem_size)
187 {
188 unsigned array_size = elem_size;
189 nir_ssa_def *offset = nir_imm_int(b, 0);
190
191 while (deref->deref_type != nir_deref_type_var) {
192 assert(deref->deref_type == nir_deref_type_array);
193
194 /* This level's element size is the previous level's array size */
195 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
196 assert(deref->arr.index.ssa);
197 offset = nir_iadd(b, offset,
198 nir_imul(b, index, nir_imm_int(b, array_size)));
199
200 deref = nir_deref_instr_parent(deref);
201 assert(glsl_type_is_array(deref->type));
202 array_size *= glsl_get_length(deref->type);
203 }
204
205 /* Accessing an invalid surface index with the dataport can result in a
206 * hang. According to the spec "if the index used to select an individual
207 * element is negative or greater than or equal to the size of the array,
208 * the results of the operation are undefined but may not lead to
209 * termination" -- which is one of the possible outcomes of the hang.
210 * Clamp the index to prevent access outside of the array bounds.
211 */
212 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
213 }
214
215 static void
216 iris_lower_storage_image_derefs(nir_shader *nir)
217 {
218 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
219
220 nir_builder b;
221 nir_builder_init(&b, impl);
222
223 nir_foreach_block(block, impl) {
224 nir_foreach_instr_safe(instr, block) {
225 if (instr->type != nir_instr_type_intrinsic)
226 continue;
227
228 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
229 switch (intrin->intrinsic) {
230 case nir_intrinsic_image_deref_load:
231 case nir_intrinsic_image_deref_store:
232 case nir_intrinsic_image_deref_atomic_add:
233 case nir_intrinsic_image_deref_atomic_imin:
234 case nir_intrinsic_image_deref_atomic_umin:
235 case nir_intrinsic_image_deref_atomic_imax:
236 case nir_intrinsic_image_deref_atomic_umax:
237 case nir_intrinsic_image_deref_atomic_and:
238 case nir_intrinsic_image_deref_atomic_or:
239 case nir_intrinsic_image_deref_atomic_xor:
240 case nir_intrinsic_image_deref_atomic_exchange:
241 case nir_intrinsic_image_deref_atomic_comp_swap:
242 case nir_intrinsic_image_deref_size:
243 case nir_intrinsic_image_deref_samples:
244 case nir_intrinsic_image_deref_load_raw_intel:
245 case nir_intrinsic_image_deref_store_raw_intel: {
246 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
247 nir_variable *var = nir_deref_instr_get_variable(deref);
248
249 b.cursor = nir_before_instr(&intrin->instr);
250 nir_ssa_def *index =
251 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
252 get_aoa_deref_offset(&b, deref, 1));
253 nir_rewrite_image_intrinsic(intrin, index, false);
254 break;
255 }
256
257 default:
258 break;
259 }
260 }
261 }
262 }
263
264 /**
265 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
266 */
267 static bool
268 iris_fix_edge_flags(nir_shader *nir)
269 {
270 if (nir->info.stage != MESA_SHADER_VERTEX)
271 return false;
272
273 nir_variable *var = NULL;
274 nir_foreach_variable(v, &nir->outputs) {
275 if (v->data.location == VARYING_SLOT_EDGE) {
276 var = v;
277 break;
278 }
279 }
280
281 if (!var)
282 return false;
283
284 exec_node_remove(&var->node);
285 var->data.mode = nir_var_shader_temp;
286 exec_list_push_tail(&nir->globals, &var->node);
287 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
288 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
289 nir_fixup_deref_modes(nir);
290
291 nir_foreach_function(f, nir) {
292 if (f->impl) {
293 nir_metadata_preserve(f->impl, nir_metadata_block_index |
294 nir_metadata_dominance |
295 nir_metadata_live_ssa_defs |
296 nir_metadata_loop_analysis);
297 }
298 }
299
300 return true;
301 }
302
303 /**
304 * Fix an uncompiled shader's stream output info.
305 *
306 * Core Gallium stores output->register_index as a "slot" number, where
307 * slots are assigned consecutively to all outputs in info->outputs_written.
308 * This naive packing of outputs doesn't work for us - we too have slots,
309 * but the layout is defined by the VUE map, which we won't have until we
310 * compile a specific shader variant. So, we remap these and simply store
311 * VARYING_SLOT_* in our copy's output->register_index fields.
312 *
313 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
314 * components of our VUE header. See brw_vue_map.c for the layout.
315 */
316 static void
317 update_so_info(struct pipe_stream_output_info *so_info,
318 uint64_t outputs_written)
319 {
320 uint8_t reverse_map[64] = {};
321 unsigned slot = 0;
322 while (outputs_written) {
323 reverse_map[slot++] = u_bit_scan64(&outputs_written);
324 }
325
326 for (unsigned i = 0; i < so_info->num_outputs; i++) {
327 struct pipe_stream_output *output = &so_info->output[i];
328
329 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
330 output->register_index = reverse_map[output->register_index];
331
332 /* The VUE header contains three scalar fields packed together:
333 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
334 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
335 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
336 */
337 switch (output->register_index) {
338 case VARYING_SLOT_LAYER:
339 assert(output->num_components == 1);
340 output->register_index = VARYING_SLOT_PSIZ;
341 output->start_component = 1;
342 break;
343 case VARYING_SLOT_VIEWPORT:
344 assert(output->num_components == 1);
345 output->register_index = VARYING_SLOT_PSIZ;
346 output->start_component = 2;
347 break;
348 case VARYING_SLOT_PSIZ:
349 assert(output->num_components == 1);
350 output->start_component = 3;
351 break;
352 }
353
354 //info->outputs_written |= 1ull << output->register_index;
355 }
356 }
357
358 static void
359 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
360 unsigned offset, unsigned n)
361 {
362 assert(offset % sizeof(uint32_t) == 0);
363
364 for (unsigned i = 0; i < n; ++i)
365 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
366
367 for (unsigned i = n; i < 4; ++i)
368 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
369 }
370
371 /**
372 * Associate NIR uniform variables with the prog_data->param[] mechanism
373 * used by the backend. Also, decide which UBOs we'd like to push in an
374 * ideal situation (though the backend can reduce this).
375 */
376 static void
377 iris_setup_uniforms(const struct brw_compiler *compiler,
378 void *mem_ctx,
379 nir_shader *nir,
380 struct brw_stage_prog_data *prog_data,
381 enum brw_param_builtin **out_system_values,
382 unsigned *out_num_system_values,
383 unsigned *out_num_cbufs)
384 {
385 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
386
387 const unsigned IRIS_MAX_SYSTEM_VALUES =
388 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
389 enum brw_param_builtin *system_values =
390 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
391 unsigned num_system_values = 0;
392
393 unsigned patch_vert_idx = -1;
394 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
395 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
396 memset(ucp_idx, -1, sizeof(ucp_idx));
397 memset(img_idx, -1, sizeof(img_idx));
398
399 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
400
401 nir_builder b;
402 nir_builder_init(&b, impl);
403
404 b.cursor = nir_before_block(nir_start_block(impl));
405 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
406 nir_ssa_def *temp_const_ubo_name = NULL;
407
408 /* Turn system value intrinsics into uniforms */
409 nir_foreach_block(block, impl) {
410 nir_foreach_instr_safe(instr, block) {
411 if (instr->type != nir_instr_type_intrinsic)
412 continue;
413
414 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
415 nir_ssa_def *offset;
416
417 switch (intrin->intrinsic) {
418 case nir_intrinsic_load_constant: {
419 /* This one is special because it reads from the shader constant
420 * data and not cbuf0 which gallium uploads for us.
421 */
422 b.cursor = nir_before_instr(instr);
423 nir_ssa_def *offset =
424 nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
425 nir_intrinsic_base(intrin));
426
427 if (temp_const_ubo_name == NULL)
428 temp_const_ubo_name = nir_imm_int(&b, 0);
429
430 nir_intrinsic_instr *load_ubo =
431 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
432 load_ubo->num_components = intrin->num_components;
433 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
434 load_ubo->src[1] = nir_src_for_ssa(offset);
435 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
436 intrin->dest.ssa.num_components,
437 intrin->dest.ssa.bit_size,
438 intrin->dest.ssa.name);
439 nir_builder_instr_insert(&b, &load_ubo->instr);
440
441 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
442 nir_src_for_ssa(&load_ubo->dest.ssa));
443 nir_instr_remove(&intrin->instr);
444 continue;
445 }
446 case nir_intrinsic_load_user_clip_plane: {
447 unsigned ucp = nir_intrinsic_ucp_id(intrin);
448
449 if (ucp_idx[ucp] == -1) {
450 ucp_idx[ucp] = num_system_values;
451 num_system_values += 4;
452 }
453
454 for (int i = 0; i < 4; i++) {
455 system_values[ucp_idx[ucp] + i] =
456 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
457 }
458
459 b.cursor = nir_before_instr(instr);
460 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
461 break;
462 }
463 case nir_intrinsic_load_patch_vertices_in:
464 if (patch_vert_idx == -1)
465 patch_vert_idx = num_system_values++;
466
467 system_values[patch_vert_idx] =
468 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
469
470 b.cursor = nir_before_instr(instr);
471 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
472 break;
473 case nir_intrinsic_image_deref_load_param_intel: {
474 assert(devinfo->gen < 9);
475 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
476 nir_variable *var = nir_deref_instr_get_variable(deref);
477
478 if (img_idx[var->data.binding] == -1) {
479 /* GL only allows arrays of arrays of images. */
480 assert(glsl_type_is_image(glsl_without_array(var->type)));
481 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
482
483 for (int i = 0; i < num_images; i++) {
484 const unsigned img = var->data.binding + i;
485
486 img_idx[img] = num_system_values;
487 num_system_values += BRW_IMAGE_PARAM_SIZE;
488
489 uint32_t *img_sv = &system_values[img_idx[img]];
490
491 setup_vec4_image_sysval(
492 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
493 offsetof(struct brw_image_param, offset), 2);
494 setup_vec4_image_sysval(
495 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
496 offsetof(struct brw_image_param, size), 3);
497 setup_vec4_image_sysval(
498 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
499 offsetof(struct brw_image_param, stride), 4);
500 setup_vec4_image_sysval(
501 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
502 offsetof(struct brw_image_param, tiling), 3);
503 setup_vec4_image_sysval(
504 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
505 offsetof(struct brw_image_param, swizzling), 2);
506 }
507 }
508
509 b.cursor = nir_before_instr(instr);
510 offset = nir_iadd(&b,
511 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
512 nir_imm_int(&b, img_idx[var->data.binding] * 4 +
513 nir_intrinsic_base(intrin) * 16));
514 break;
515 }
516 default:
517 continue;
518 }
519
520 unsigned comps = nir_intrinsic_dest_components(intrin);
521
522 nir_intrinsic_instr *load =
523 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
524 load->num_components = comps;
525 load->src[0] = nir_src_for_ssa(temp_ubo_name);
526 load->src[1] = nir_src_for_ssa(offset);
527 nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
528 nir_builder_instr_insert(&b, &load->instr);
529 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
530 nir_src_for_ssa(&load->dest.ssa));
531 nir_instr_remove(instr);
532 }
533 }
534
535 nir_validate_shader(nir, "before remapping");
536
537 /* Uniforms are stored in constant buffer 0, the
538 * user-facing UBOs are indexed by one. So if any constant buffer is
539 * needed, the constant buffer 0 will be needed, so account for it.
540 */
541 unsigned num_cbufs = nir->info.num_ubos;
542 if (num_cbufs || nir->num_uniforms)
543 num_cbufs++;
544
545 /* Place the new params in a new cbuf. */
546 if (num_system_values > 0) {
547 unsigned sysval_cbuf_index = num_cbufs;
548 num_cbufs++;
549
550 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
551 num_system_values);
552
553 nir_foreach_block(block, impl) {
554 nir_foreach_instr_safe(instr, block) {
555 if (instr->type != nir_instr_type_intrinsic)
556 continue;
557
558 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
559
560 if (load->intrinsic != nir_intrinsic_load_ubo)
561 continue;
562
563 b.cursor = nir_before_instr(instr);
564
565 assert(load->src[0].is_ssa);
566
567 if (load->src[0].ssa == temp_ubo_name) {
568 nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
569 nir_instr_rewrite_src(instr, &load->src[0],
570 nir_src_for_ssa(imm));
571 }
572 }
573 }
574
575 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
576 nir_opt_constant_folding(nir);
577 } else {
578 ralloc_free(system_values);
579 system_values = NULL;
580 }
581
582 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
583 nir_validate_shader(nir, "after remap");
584
585 /* We don't use params[] but gallium leaves num_uniforms set. We use this
586 * to detect when cbuf0 exists but we don't need it anymore when we get
587 * here. Instead, zero it out so that the back-end doesn't get confused
588 * when nr_params * 4 != num_uniforms != nr_params * 4.
589 */
590 nir->num_uniforms = 0;
591
592 /* Constant loads (if any) need to go at the end of the constant buffers so
593 * we need to know num_cbufs before we can lower to them.
594 */
595 if (temp_const_ubo_name != NULL) {
596 nir_load_const_instr *const_ubo_index =
597 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
598 assert(const_ubo_index->def.bit_size == 32);
599 const_ubo_index->value[0].u32 = num_cbufs;
600 }
601
602 *out_system_values = system_values;
603 *out_num_system_values = num_system_values;
604 *out_num_cbufs = num_cbufs;
605 }
606
607 static const char *surface_group_names[] = {
608 [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
609 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
610 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
611 [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
612 [IRIS_SURFACE_GROUP_UBO] = "ubo",
613 [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
614 [IRIS_SURFACE_GROUP_IMAGE] = "image",
615 };
616
617 static void
618 iris_print_binding_table(FILE *fp, const char *name,
619 const struct iris_binding_table *bt)
620 {
621 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
622
623 uint32_t total = 0;
624 uint32_t compacted = 0;
625
626 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
627 uint32_t size = bt->sizes[i];
628 total += size;
629 if (size)
630 compacted += util_bitcount64(bt->used_mask[i]);
631 }
632
633 if (total == 0) {
634 fprintf(fp, "Binding table for %s is empty\n\n", name);
635 return;
636 }
637
638 if (total != compacted) {
639 fprintf(fp, "Binding table for %s "
640 "(compacted to %u entries from %u entries)\n",
641 name, compacted, total);
642 } else {
643 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
644 }
645
646 uint32_t entry = 0;
647 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
648 uint64_t mask = bt->used_mask[i];
649 while (mask) {
650 int index = u_bit_scan64(&mask);
651 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
652 }
653 }
654 fprintf(fp, "\n");
655 }
656
657 enum {
658 /* Max elements in a surface group. */
659 SURFACE_GROUP_MAX_ELEMENTS = 64,
660 };
661
662 /**
663 * Map a <group, index> pair to a binding table index.
664 *
665 * For example: <UBO, 5> => binding table index 12
666 */
667 uint32_t
668 iris_group_index_to_bti(const struct iris_binding_table *bt,
669 enum iris_surface_group group, uint32_t index)
670 {
671 assert(index < bt->sizes[group]);
672 uint64_t mask = bt->used_mask[group];
673 uint64_t bit = 1ull << index;
674 if (bit & mask) {
675 return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
676 } else {
677 return IRIS_SURFACE_NOT_USED;
678 }
679 }
680
681 /**
682 * Map a binding table index back to a <group, index> pair.
683 *
684 * For example: binding table index 12 => <UBO, 5>
685 */
686 uint32_t
687 iris_bti_to_group_index(const struct iris_binding_table *bt,
688 enum iris_surface_group group, uint32_t bti)
689 {
690 uint64_t used_mask = bt->used_mask[group];
691 assert(bti >= bt->offsets[group]);
692
693 uint32_t c = bti - bt->offsets[group];
694 while (used_mask) {
695 int i = u_bit_scan64(&used_mask);
696 if (c == 0)
697 return i;
698 c--;
699 }
700
701 return IRIS_SURFACE_NOT_USED;
702 }
703
704 static void
705 rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
706 nir_instr *instr, nir_src *src,
707 enum iris_surface_group group)
708 {
709 assert(bt->sizes[group] > 0);
710
711 b->cursor = nir_before_instr(instr);
712 nir_ssa_def *bti;
713 if (nir_src_is_const(*src)) {
714 uint32_t index = nir_src_as_uint(*src);
715 bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
716 src->ssa->bit_size);
717 } else {
718 /* Indirect usage makes all the surfaces of the group to be available,
719 * so we can just add the base.
720 */
721 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
722 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
723 }
724 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
725 }
726
727 static void
728 mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
729 enum iris_surface_group group)
730 {
731 assert(bt->sizes[group] > 0);
732
733 if (nir_src_is_const(*src)) {
734 uint64_t index = nir_src_as_uint(*src);
735 assert(index < bt->sizes[group]);
736 bt->used_mask[group] |= 1ull << index;
737 } else {
738 /* There's an indirect usage, we need all the surfaces. */
739 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
740 }
741 }
742
743 static bool
744 skip_compacting_binding_tables(void)
745 {
746 static int skip = -1;
747 if (skip < 0)
748 skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
749 return skip;
750 }
751
752 /**
753 * Set up the binding table indices and apply to the shader.
754 */
755 static void
756 iris_setup_binding_table(const struct gen_device_info *devinfo,
757 struct nir_shader *nir,
758 struct iris_binding_table *bt,
759 unsigned num_render_targets,
760 unsigned num_system_values,
761 unsigned num_cbufs)
762 {
763 const struct shader_info *info = &nir->info;
764
765 memset(bt, 0, sizeof(*bt));
766
767 /* Set the sizes for each surface group. For some groups, we already know
768 * upfront how many will be used, so mark them.
769 */
770 if (info->stage == MESA_SHADER_FRAGMENT) {
771 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
772 /* All render targets used. */
773 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
774 BITFIELD64_MASK(num_render_targets);
775
776 /* Setup render target read surface group inorder to support non-coherent
777 * framebuffer fetch on Gen8
778 */
779 if (devinfo->gen == 8 && info->outputs_read) {
780 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
781 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
782 BITFIELD64_MASK(num_render_targets);
783 }
784 } else if (info->stage == MESA_SHADER_COMPUTE) {
785 bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
786 }
787
788 bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
789 bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
790
791 bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
792
793 /* Allocate an extra slot in the UBO section for NIR constants.
794 * Binding table compaction will remove it if unnecessary.
795 *
796 * We don't include them in iris_compiled_shader::num_cbufs because
797 * they are uploaded separately from shs->constbuf[], but from a shader
798 * point of view, they're another UBO (at the end of the section).
799 */
800 bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
801
802 bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
803
804 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
805 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
806
807 /* Mark surfaces used for the cases we don't have the information available
808 * upfront.
809 */
810 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
811 nir_foreach_block (block, impl) {
812 nir_foreach_instr (instr, block) {
813 if (instr->type != nir_instr_type_intrinsic)
814 continue;
815
816 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
817 switch (intrin->intrinsic) {
818 case nir_intrinsic_load_num_work_groups:
819 bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
820 break;
821
822 case nir_intrinsic_load_output:
823 if (devinfo->gen == 8) {
824 mark_used_with_src(bt, &intrin->src[0],
825 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
826 }
827 break;
828
829 case nir_intrinsic_image_size:
830 case nir_intrinsic_image_load:
831 case nir_intrinsic_image_store:
832 case nir_intrinsic_image_atomic_add:
833 case nir_intrinsic_image_atomic_imin:
834 case nir_intrinsic_image_atomic_umin:
835 case nir_intrinsic_image_atomic_imax:
836 case nir_intrinsic_image_atomic_umax:
837 case nir_intrinsic_image_atomic_and:
838 case nir_intrinsic_image_atomic_or:
839 case nir_intrinsic_image_atomic_xor:
840 case nir_intrinsic_image_atomic_exchange:
841 case nir_intrinsic_image_atomic_comp_swap:
842 case nir_intrinsic_image_load_raw_intel:
843 case nir_intrinsic_image_store_raw_intel:
844 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
845 break;
846
847 case nir_intrinsic_load_ubo:
848 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
849 break;
850
851 case nir_intrinsic_store_ssbo:
852 mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
853 break;
854
855 case nir_intrinsic_get_buffer_size:
856 case nir_intrinsic_ssbo_atomic_add:
857 case nir_intrinsic_ssbo_atomic_imin:
858 case nir_intrinsic_ssbo_atomic_umin:
859 case nir_intrinsic_ssbo_atomic_imax:
860 case nir_intrinsic_ssbo_atomic_umax:
861 case nir_intrinsic_ssbo_atomic_and:
862 case nir_intrinsic_ssbo_atomic_or:
863 case nir_intrinsic_ssbo_atomic_xor:
864 case nir_intrinsic_ssbo_atomic_exchange:
865 case nir_intrinsic_ssbo_atomic_comp_swap:
866 case nir_intrinsic_ssbo_atomic_fmin:
867 case nir_intrinsic_ssbo_atomic_fmax:
868 case nir_intrinsic_ssbo_atomic_fcomp_swap:
869 case nir_intrinsic_load_ssbo:
870 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
871 break;
872
873 default:
874 break;
875 }
876 }
877 }
878
879 /* When disable we just mark everything as used. */
880 if (unlikely(skip_compacting_binding_tables())) {
881 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
882 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
883 }
884
885 /* Calculate the offsets and the binding table size based on the used
886 * surfaces. After this point, the functions to go between "group indices"
887 * and binding table indices can be used.
888 */
889 uint32_t next = 0;
890 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
891 if (bt->used_mask[i] != 0) {
892 bt->offsets[i] = next;
893 next += util_bitcount64(bt->used_mask[i]);
894 }
895 }
896 bt->size_bytes = next * 4;
897
898 if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
899 iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
900 }
901
902 /* Apply the binding table indices. The backend compiler is not expected
903 * to change those, as we haven't set any of the *_start entries in brw
904 * binding_table.
905 */
906 nir_builder b;
907 nir_builder_init(&b, impl);
908
909 nir_foreach_block (block, impl) {
910 nir_foreach_instr (instr, block) {
911 if (instr->type == nir_instr_type_tex) {
912 nir_tex_instr *tex = nir_instr_as_tex(instr);
913 tex->texture_index =
914 iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
915 tex->texture_index);
916 continue;
917 }
918
919 if (instr->type != nir_instr_type_intrinsic)
920 continue;
921
922 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
923 switch (intrin->intrinsic) {
924 case nir_intrinsic_image_size:
925 case nir_intrinsic_image_load:
926 case nir_intrinsic_image_store:
927 case nir_intrinsic_image_atomic_add:
928 case nir_intrinsic_image_atomic_imin:
929 case nir_intrinsic_image_atomic_umin:
930 case nir_intrinsic_image_atomic_imax:
931 case nir_intrinsic_image_atomic_umax:
932 case nir_intrinsic_image_atomic_and:
933 case nir_intrinsic_image_atomic_or:
934 case nir_intrinsic_image_atomic_xor:
935 case nir_intrinsic_image_atomic_exchange:
936 case nir_intrinsic_image_atomic_comp_swap:
937 case nir_intrinsic_image_load_raw_intel:
938 case nir_intrinsic_image_store_raw_intel:
939 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
940 IRIS_SURFACE_GROUP_IMAGE);
941 break;
942
943 case nir_intrinsic_load_ubo:
944 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
945 IRIS_SURFACE_GROUP_UBO);
946 break;
947
948 case nir_intrinsic_store_ssbo:
949 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
950 IRIS_SURFACE_GROUP_SSBO);
951 break;
952
953 case nir_intrinsic_load_output:
954 if (devinfo->gen == 8) {
955 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
956 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
957 }
958 break;
959
960 case nir_intrinsic_get_buffer_size:
961 case nir_intrinsic_ssbo_atomic_add:
962 case nir_intrinsic_ssbo_atomic_imin:
963 case nir_intrinsic_ssbo_atomic_umin:
964 case nir_intrinsic_ssbo_atomic_imax:
965 case nir_intrinsic_ssbo_atomic_umax:
966 case nir_intrinsic_ssbo_atomic_and:
967 case nir_intrinsic_ssbo_atomic_or:
968 case nir_intrinsic_ssbo_atomic_xor:
969 case nir_intrinsic_ssbo_atomic_exchange:
970 case nir_intrinsic_ssbo_atomic_comp_swap:
971 case nir_intrinsic_ssbo_atomic_fmin:
972 case nir_intrinsic_ssbo_atomic_fmax:
973 case nir_intrinsic_ssbo_atomic_fcomp_swap:
974 case nir_intrinsic_load_ssbo:
975 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
976 IRIS_SURFACE_GROUP_SSBO);
977 break;
978
979 default:
980 break;
981 }
982 }
983 }
984 }
985
986 static void
987 iris_debug_recompile(struct iris_context *ice,
988 struct shader_info *info,
989 const struct brw_base_prog_key *key)
990 {
991 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
992 const struct gen_device_info *devinfo = &screen->devinfo;
993 const struct brw_compiler *c = screen->compiler;
994
995 if (!info)
996 return;
997
998 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
999 _mesa_shader_stage_to_string(info->stage),
1000 info->name ? info->name : "(no identifier)",
1001 info->label ? info->label : "");
1002
1003 const void *old_iris_key =
1004 iris_find_previous_compile(ice, info->stage, key->program_string_id);
1005
1006 union brw_any_prog_key old_key;
1007
1008 switch (info->stage) {
1009 case MESA_SHADER_VERTEX:
1010 old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
1011 break;
1012 case MESA_SHADER_TESS_CTRL:
1013 old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
1014 break;
1015 case MESA_SHADER_TESS_EVAL:
1016 old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
1017 break;
1018 case MESA_SHADER_GEOMETRY:
1019 old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
1020 break;
1021 case MESA_SHADER_FRAGMENT:
1022 old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
1023 break;
1024 case MESA_SHADER_COMPUTE:
1025 old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
1026 break;
1027 default:
1028 unreachable("invalid shader stage");
1029 }
1030
1031 brw_debug_key_recompile(c, &ice->dbg, info->stage, &old_key.base, key);
1032 }
1033
1034 /**
1035 * Get the shader for the last enabled geometry stage.
1036 *
1037 * This stage is the one which will feed stream output and the rasterizer.
1038 */
1039 static gl_shader_stage
1040 last_vue_stage(struct iris_context *ice)
1041 {
1042 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1043 return MESA_SHADER_GEOMETRY;
1044
1045 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1046 return MESA_SHADER_TESS_EVAL;
1047
1048 return MESA_SHADER_VERTEX;
1049 }
1050
1051 /**
1052 * Compile a vertex shader, and upload the assembly.
1053 */
1054 static struct iris_compiled_shader *
1055 iris_compile_vs(struct iris_context *ice,
1056 struct iris_uncompiled_shader *ish,
1057 const struct iris_vs_prog_key *key)
1058 {
1059 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1060 const struct brw_compiler *compiler = screen->compiler;
1061 const struct gen_device_info *devinfo = &screen->devinfo;
1062 void *mem_ctx = ralloc_context(NULL);
1063 struct brw_vs_prog_data *vs_prog_data =
1064 rzalloc(mem_ctx, struct brw_vs_prog_data);
1065 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1066 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1067 enum brw_param_builtin *system_values;
1068 unsigned num_system_values;
1069 unsigned num_cbufs;
1070
1071 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1072
1073 if (key->vue.nr_userclip_plane_consts) {
1074 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1075 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1076 true, false, NULL);
1077 nir_lower_io_to_temporaries(nir, impl, true, false);
1078 nir_lower_global_vars_to_local(nir);
1079 nir_lower_vars_to_ssa(nir);
1080 nir_shader_gather_info(nir, impl);
1081 }
1082
1083 prog_data->use_alt_mode = ish->use_alt_mode;
1084
1085 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1086 &num_system_values, &num_cbufs);
1087
1088 struct iris_binding_table bt;
1089 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1090 num_system_values, num_cbufs);
1091
1092 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1093
1094 brw_compute_vue_map(devinfo,
1095 &vue_prog_data->vue_map, nir->info.outputs_written,
1096 nir->info.separate_shader);
1097
1098 struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
1099
1100 char *error_str = NULL;
1101 const unsigned *program =
1102 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &brw_key, vs_prog_data,
1103 nir, -1, NULL, &error_str);
1104 if (program == NULL) {
1105 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
1106 ralloc_free(mem_ctx);
1107 return false;
1108 }
1109
1110 if (ish->compiled_once) {
1111 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1112 } else {
1113 ish->compiled_once = true;
1114 }
1115
1116 uint32_t *so_decls =
1117 ice->vtbl.create_so_decl_list(&ish->stream_output,
1118 &vue_prog_data->vue_map);
1119
1120 struct iris_compiled_shader *shader =
1121 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
1122 prog_data, so_decls, system_values, num_system_values,
1123 num_cbufs, &bt);
1124
1125 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1126
1127 ralloc_free(mem_ctx);
1128 return shader;
1129 }
1130
1131 /**
1132 * Update the current vertex shader variant.
1133 *
1134 * Fill out the key, look in the cache, compile and bind if needed.
1135 */
1136 static void
1137 iris_update_compiled_vs(struct iris_context *ice)
1138 {
1139 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1140 struct iris_uncompiled_shader *ish =
1141 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1142
1143 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
1144 ice->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1145
1146 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1147 struct iris_compiled_shader *shader =
1148 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
1149
1150 if (!shader)
1151 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1152
1153 if (!shader)
1154 shader = iris_compile_vs(ice, ish, &key);
1155
1156 if (old != shader) {
1157 ice->shaders.prog[IRIS_CACHE_VS] = shader;
1158 ice->state.dirty |= IRIS_DIRTY_VS |
1159 IRIS_DIRTY_BINDINGS_VS |
1160 IRIS_DIRTY_CONSTANTS_VS |
1161 IRIS_DIRTY_VF_SGVS;
1162 shs->sysvals_need_upload = true;
1163
1164 const struct brw_vs_prog_data *vs_prog_data =
1165 (void *) shader->prog_data;
1166 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1167 vs_prog_data->uses_baseinstance;
1168 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1169 vs_prog_data->uses_is_indexed_draw;
1170 const bool needs_sgvs_element = uses_draw_params ||
1171 vs_prog_data->uses_instanceid ||
1172 vs_prog_data->uses_vertexid;
1173
1174 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1175 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1176 ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
1177 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
1178 IRIS_DIRTY_VERTEX_ELEMENTS;
1179 }
1180 ice->state.vs_uses_draw_params = uses_draw_params;
1181 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1182 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1183 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1184 }
1185 }
1186
1187 /**
1188 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1189 */
1190 const struct shader_info *
1191 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1192 {
1193 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1194
1195 if (!ish)
1196 return NULL;
1197
1198 const nir_shader *nir = ish->nir;
1199 return &nir->info;
1200 }
1201
1202 /**
1203 * Get the union of TCS output and TES input slots.
1204 *
1205 * TCS and TES need to agree on a common URB entry layout. In particular,
1206 * the data for all patch vertices is stored in a single URB entry (unlike
1207 * GS which has one entry per input vertex). This means that per-vertex
1208 * array indexing needs a stride.
1209 *
1210 * SSO requires locations to match, but doesn't require the number of
1211 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1212 * So, we need to take the extra step of unifying these on the fly.
1213 */
1214 static void
1215 get_unified_tess_slots(const struct iris_context *ice,
1216 uint64_t *per_vertex_slots,
1217 uint32_t *per_patch_slots)
1218 {
1219 const struct shader_info *tcs =
1220 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1221 const struct shader_info *tes =
1222 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1223
1224 *per_vertex_slots = tes->inputs_read;
1225 *per_patch_slots = tes->patch_inputs_read;
1226
1227 if (tcs) {
1228 *per_vertex_slots |= tcs->outputs_written;
1229 *per_patch_slots |= tcs->patch_outputs_written;
1230 }
1231 }
1232
1233 /**
1234 * Compile a tessellation control shader, and upload the assembly.
1235 */
1236 static struct iris_compiled_shader *
1237 iris_compile_tcs(struct iris_context *ice,
1238 struct iris_uncompiled_shader *ish,
1239 const struct iris_tcs_prog_key *key)
1240 {
1241 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1242 const struct brw_compiler *compiler = screen->compiler;
1243 const struct nir_shader_compiler_options *options =
1244 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1245 void *mem_ctx = ralloc_context(NULL);
1246 struct brw_tcs_prog_data *tcs_prog_data =
1247 rzalloc(mem_ctx, struct brw_tcs_prog_data);
1248 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1249 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1250 const struct gen_device_info *devinfo = &screen->devinfo;
1251 enum brw_param_builtin *system_values = NULL;
1252 unsigned num_system_values = 0;
1253 unsigned num_cbufs = 0;
1254
1255 nir_shader *nir;
1256
1257 struct iris_binding_table bt;
1258
1259 struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
1260
1261 if (ish) {
1262 nir = nir_shader_clone(mem_ctx, ish->nir);
1263
1264 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1265 &num_system_values, &num_cbufs);
1266 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1267 num_system_values, num_cbufs);
1268 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1269 } else {
1270 nir =
1271 brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
1272
1273 /* Reserve space for passing the default tess levels as constants. */
1274 num_cbufs = 1;
1275 num_system_values = 8;
1276 system_values =
1277 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1278 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1279 prog_data->nr_params = num_system_values;
1280
1281 if (key->tes_primitive_mode == GL_QUADS) {
1282 for (int i = 0; i < 4; i++)
1283 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1284
1285 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1286 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1287 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
1288 for (int i = 0; i < 3; i++)
1289 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1290
1291 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1292 } else {
1293 assert(key->tes_primitive_mode == GL_ISOLINES);
1294 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1295 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1296 }
1297
1298 /* Manually setup the TCS binding table. */
1299 memset(&bt, 0, sizeof(bt));
1300 bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1301 bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1302 bt.size_bytes = 4;
1303
1304 prog_data->ubo_ranges[0].length = 1;
1305 }
1306
1307 char *error_str = NULL;
1308 const unsigned *program =
1309 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, &brw_key, tcs_prog_data,
1310 nir, -1, NULL, &error_str);
1311 if (program == NULL) {
1312 dbg_printf("Failed to compile control shader: %s\n", error_str);
1313 ralloc_free(mem_ctx);
1314 return false;
1315 }
1316
1317 if (ish) {
1318 if (ish->compiled_once) {
1319 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1320 } else {
1321 ish->compiled_once = true;
1322 }
1323 }
1324
1325 struct iris_compiled_shader *shader =
1326 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1327 prog_data, NULL, system_values, num_system_values,
1328 num_cbufs, &bt);
1329
1330 if (ish)
1331 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1332
1333 ralloc_free(mem_ctx);
1334 return shader;
1335 }
1336
1337 /**
1338 * Update the current tessellation control shader variant.
1339 *
1340 * Fill out the key, look in the cache, compile and bind if needed.
1341 */
1342 static void
1343 iris_update_compiled_tcs(struct iris_context *ice)
1344 {
1345 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1346 struct iris_uncompiled_shader *tcs =
1347 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1348 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1349 const struct brw_compiler *compiler = screen->compiler;
1350 const struct gen_device_info *devinfo = &screen->devinfo;
1351
1352 const struct shader_info *tes_info =
1353 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1354 struct iris_tcs_prog_key key = {
1355 .vue.base.program_string_id = tcs ? tcs->program_id : 0,
1356 .tes_primitive_mode = tes_info->tess.primitive_mode,
1357 .input_vertices =
1358 !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1359 .quads_workaround = devinfo->gen < 9 &&
1360 tes_info->tess.primitive_mode == GL_QUADS &&
1361 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1362 };
1363 get_unified_tess_slots(ice, &key.outputs_written,
1364 &key.patch_outputs_written);
1365 ice->vtbl.populate_tcs_key(ice, &key);
1366
1367 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1368 struct iris_compiled_shader *shader =
1369 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1370
1371 if (tcs && !shader)
1372 shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1373
1374 if (!shader)
1375 shader = iris_compile_tcs(ice, tcs, &key);
1376
1377 if (old != shader) {
1378 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1379 ice->state.dirty |= IRIS_DIRTY_TCS |
1380 IRIS_DIRTY_BINDINGS_TCS |
1381 IRIS_DIRTY_CONSTANTS_TCS;
1382 shs->sysvals_need_upload = true;
1383 }
1384 }
1385
1386 /**
1387 * Compile a tessellation evaluation shader, and upload the assembly.
1388 */
1389 static struct iris_compiled_shader *
1390 iris_compile_tes(struct iris_context *ice,
1391 struct iris_uncompiled_shader *ish,
1392 const struct iris_tes_prog_key *key)
1393 {
1394 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1395 const struct brw_compiler *compiler = screen->compiler;
1396 void *mem_ctx = ralloc_context(NULL);
1397 struct brw_tes_prog_data *tes_prog_data =
1398 rzalloc(mem_ctx, struct brw_tes_prog_data);
1399 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1400 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1401 enum brw_param_builtin *system_values;
1402 const struct gen_device_info *devinfo = &screen->devinfo;
1403 unsigned num_system_values;
1404 unsigned num_cbufs;
1405
1406 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1407
1408 if (key->vue.nr_userclip_plane_consts) {
1409 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1410 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1411 true, false, NULL);
1412 nir_lower_io_to_temporaries(nir, impl, true, false);
1413 nir_lower_global_vars_to_local(nir);
1414 nir_lower_vars_to_ssa(nir);
1415 nir_shader_gather_info(nir, impl);
1416 }
1417
1418 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1419 &num_system_values, &num_cbufs);
1420
1421 struct iris_binding_table bt;
1422 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1423 num_system_values, num_cbufs);
1424
1425 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1426
1427 struct brw_vue_map input_vue_map;
1428 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1429 key->patch_inputs_read);
1430
1431 struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
1432
1433 char *error_str = NULL;
1434 const unsigned *program =
1435 brw_compile_tes(compiler, &ice->dbg, mem_ctx, &brw_key, &input_vue_map,
1436 tes_prog_data, nir, -1, NULL, &error_str);
1437 if (program == NULL) {
1438 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1439 ralloc_free(mem_ctx);
1440 return false;
1441 }
1442
1443 if (ish->compiled_once) {
1444 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1445 } else {
1446 ish->compiled_once = true;
1447 }
1448
1449 uint32_t *so_decls =
1450 ice->vtbl.create_so_decl_list(&ish->stream_output,
1451 &vue_prog_data->vue_map);
1452
1453
1454 struct iris_compiled_shader *shader =
1455 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1456 prog_data, so_decls, system_values, num_system_values,
1457 num_cbufs, &bt);
1458
1459 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1460
1461 ralloc_free(mem_ctx);
1462 return shader;
1463 }
1464
1465 /**
1466 * Update the current tessellation evaluation shader variant.
1467 *
1468 * Fill out the key, look in the cache, compile and bind if needed.
1469 */
1470 static void
1471 iris_update_compiled_tes(struct iris_context *ice)
1472 {
1473 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1474 struct iris_uncompiled_shader *ish =
1475 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1476
1477 struct iris_tes_prog_key key = { KEY_ID(vue.base) };
1478 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1479 ice->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1480
1481 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1482 struct iris_compiled_shader *shader =
1483 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1484
1485 if (!shader)
1486 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1487
1488 if (!shader)
1489 shader = iris_compile_tes(ice, ish, &key);
1490
1491 if (old != shader) {
1492 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1493 ice->state.dirty |= IRIS_DIRTY_TES |
1494 IRIS_DIRTY_BINDINGS_TES |
1495 IRIS_DIRTY_CONSTANTS_TES;
1496 shs->sysvals_need_upload = true;
1497 }
1498
1499 /* TODO: Could compare and avoid flagging this. */
1500 const struct shader_info *tes_info = &ish->nir->info;
1501 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
1502 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TES;
1503 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1504 }
1505 }
1506
1507 /**
1508 * Compile a geometry shader, and upload the assembly.
1509 */
1510 static struct iris_compiled_shader *
1511 iris_compile_gs(struct iris_context *ice,
1512 struct iris_uncompiled_shader *ish,
1513 const struct iris_gs_prog_key *key)
1514 {
1515 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1516 const struct brw_compiler *compiler = screen->compiler;
1517 const struct gen_device_info *devinfo = &screen->devinfo;
1518 void *mem_ctx = ralloc_context(NULL);
1519 struct brw_gs_prog_data *gs_prog_data =
1520 rzalloc(mem_ctx, struct brw_gs_prog_data);
1521 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1522 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1523 enum brw_param_builtin *system_values;
1524 unsigned num_system_values;
1525 unsigned num_cbufs;
1526
1527 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1528
1529 if (key->vue.nr_userclip_plane_consts) {
1530 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1531 nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1532 false, NULL);
1533 nir_lower_io_to_temporaries(nir, impl, true, false);
1534 nir_lower_global_vars_to_local(nir);
1535 nir_lower_vars_to_ssa(nir);
1536 nir_shader_gather_info(nir, impl);
1537 }
1538
1539 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1540 &num_system_values, &num_cbufs);
1541
1542 struct iris_binding_table bt;
1543 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1544 num_system_values, num_cbufs);
1545
1546 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1547
1548 brw_compute_vue_map(devinfo,
1549 &vue_prog_data->vue_map, nir->info.outputs_written,
1550 nir->info.separate_shader);
1551
1552 struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
1553
1554 char *error_str = NULL;
1555 const unsigned *program =
1556 brw_compile_gs(compiler, &ice->dbg, mem_ctx, &brw_key, gs_prog_data,
1557 nir, NULL, -1, NULL, &error_str);
1558 if (program == NULL) {
1559 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1560 ralloc_free(mem_ctx);
1561 return false;
1562 }
1563
1564 if (ish->compiled_once) {
1565 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1566 } else {
1567 ish->compiled_once = true;
1568 }
1569
1570 uint32_t *so_decls =
1571 ice->vtbl.create_so_decl_list(&ish->stream_output,
1572 &vue_prog_data->vue_map);
1573
1574 struct iris_compiled_shader *shader =
1575 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1576 prog_data, so_decls, system_values, num_system_values,
1577 num_cbufs, &bt);
1578
1579 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1580
1581 ralloc_free(mem_ctx);
1582 return shader;
1583 }
1584
1585 /**
1586 * Update the current geometry shader variant.
1587 *
1588 * Fill out the key, look in the cache, compile and bind if needed.
1589 */
1590 static void
1591 iris_update_compiled_gs(struct iris_context *ice)
1592 {
1593 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1594 struct iris_uncompiled_shader *ish =
1595 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1596 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1597 struct iris_compiled_shader *shader = NULL;
1598
1599 if (ish) {
1600 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
1601 ice->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1602
1603 shader =
1604 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1605
1606 if (!shader)
1607 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1608
1609 if (!shader)
1610 shader = iris_compile_gs(ice, ish, &key);
1611 }
1612
1613 if (old != shader) {
1614 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1615 ice->state.dirty |= IRIS_DIRTY_GS |
1616 IRIS_DIRTY_BINDINGS_GS |
1617 IRIS_DIRTY_CONSTANTS_GS;
1618 shs->sysvals_need_upload = true;
1619 }
1620 }
1621
1622 /**
1623 * Compile a fragment (pixel) shader, and upload the assembly.
1624 */
1625 static struct iris_compiled_shader *
1626 iris_compile_fs(struct iris_context *ice,
1627 struct iris_uncompiled_shader *ish,
1628 const struct iris_fs_prog_key *key,
1629 struct brw_vue_map *vue_map)
1630 {
1631 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1632 const struct brw_compiler *compiler = screen->compiler;
1633 void *mem_ctx = ralloc_context(NULL);
1634 struct brw_wm_prog_data *fs_prog_data =
1635 rzalloc(mem_ctx, struct brw_wm_prog_data);
1636 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1637 enum brw_param_builtin *system_values;
1638 const struct gen_device_info *devinfo = &screen->devinfo;
1639 unsigned num_system_values;
1640 unsigned num_cbufs;
1641
1642 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1643
1644 prog_data->use_alt_mode = ish->use_alt_mode;
1645
1646 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1647 &num_system_values, &num_cbufs);
1648
1649 /* Lower output variables to load_output intrinsics before setting up
1650 * binding tables, so iris_setup_binding_table can map any load_output
1651 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1652 * non-coherent framebuffer fetches.
1653 */
1654 brw_nir_lower_fs_outputs(nir);
1655
1656 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1657 * and do not need a binding table entry with a null surface. Earlier
1658 * generations need an entry for a null surface.
1659 */
1660 int null_rts = devinfo->gen < 11 ? 1 : 0;
1661
1662 struct iris_binding_table bt;
1663 iris_setup_binding_table(devinfo, nir, &bt,
1664 MAX2(key->nr_color_regions, null_rts),
1665 num_system_values, num_cbufs);
1666
1667 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1668
1669 struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
1670
1671 char *error_str = NULL;
1672 const unsigned *program =
1673 brw_compile_fs(compiler, &ice->dbg, mem_ctx, &brw_key, fs_prog_data,
1674 nir, -1, -1, -1, true, false, vue_map,
1675 NULL, &error_str);
1676 if (program == NULL) {
1677 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1678 ralloc_free(mem_ctx);
1679 return false;
1680 }
1681
1682 if (ish->compiled_once) {
1683 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1684 } else {
1685 ish->compiled_once = true;
1686 }
1687
1688 struct iris_compiled_shader *shader =
1689 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1690 prog_data, NULL, system_values, num_system_values,
1691 num_cbufs, &bt);
1692
1693 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1694
1695 ralloc_free(mem_ctx);
1696 return shader;
1697 }
1698
1699 /**
1700 * Update the current fragment shader variant.
1701 *
1702 * Fill out the key, look in the cache, compile and bind if needed.
1703 */
1704 static void
1705 iris_update_compiled_fs(struct iris_context *ice)
1706 {
1707 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1708 struct iris_uncompiled_shader *ish =
1709 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1710 struct iris_fs_prog_key key = { KEY_ID(base) };
1711 ice->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1712
1713 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1714 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1715
1716 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1717 struct iris_compiled_shader *shader =
1718 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1719
1720 if (!shader)
1721 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1722
1723 if (!shader)
1724 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1725
1726 if (old != shader) {
1727 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1728 // toggles. might be able to avoid flagging SBE too.
1729 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1730 ice->state.dirty |= IRIS_DIRTY_FS |
1731 IRIS_DIRTY_BINDINGS_FS |
1732 IRIS_DIRTY_CONSTANTS_FS |
1733 IRIS_DIRTY_WM |
1734 IRIS_DIRTY_CLIP |
1735 IRIS_DIRTY_SBE;
1736 shs->sysvals_need_upload = true;
1737 }
1738 }
1739
1740 /**
1741 * Update the last enabled stage's VUE map.
1742 *
1743 * When the shader feeding the rasterizer's output interface changes, we
1744 * need to re-emit various packets.
1745 */
1746 static void
1747 update_last_vue_map(struct iris_context *ice,
1748 struct brw_stage_prog_data *prog_data)
1749 {
1750 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1751 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1752 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1753 const uint64_t changed_slots =
1754 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1755
1756 if (changed_slots & VARYING_BIT_VIEWPORT) {
1757 ice->state.num_viewports =
1758 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1759 ice->state.dirty |= IRIS_DIRTY_CLIP |
1760 IRIS_DIRTY_SF_CL_VIEWPORT |
1761 IRIS_DIRTY_CC_VIEWPORT |
1762 IRIS_DIRTY_SCISSOR_RECT |
1763 IRIS_DIRTY_UNCOMPILED_FS |
1764 ice->state.dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1765 }
1766
1767 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1768 ice->state.dirty |= IRIS_DIRTY_SBE;
1769 }
1770
1771 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1772 }
1773
1774 static void
1775 iris_update_pull_constant_descriptors(struct iris_context *ice,
1776 gl_shader_stage stage)
1777 {
1778 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1779
1780 if (!shader || !shader->prog_data->has_ubo_pull)
1781 return;
1782
1783 struct iris_shader_state *shs = &ice->state.shaders[stage];
1784 bool any_new_descriptors =
1785 shader->num_system_values > 0 && shs->sysvals_need_upload;
1786
1787 unsigned bound_cbufs = shs->bound_cbufs;
1788
1789 while (bound_cbufs) {
1790 const int i = u_bit_scan(&bound_cbufs);
1791 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1792 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1793 if (!surf_state->res && cbuf->buffer) {
1794 iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
1795 any_new_descriptors = true;
1796 }
1797 }
1798
1799 if (any_new_descriptors)
1800 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
1801 }
1802
1803 /**
1804 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1805 */
1806 static struct brw_vue_prog_data *
1807 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1808 {
1809 if (!ice->shaders.prog[stage])
1810 return NULL;
1811
1812 return (void *) ice->shaders.prog[stage]->prog_data;
1813 }
1814
1815 // XXX: iris_compiled_shaders are space-leaking :(
1816 // XXX: do remember to unbind them if deleting them.
1817
1818 /**
1819 * Update the current shader variants for the given state.
1820 *
1821 * This should be called on every draw call to ensure that the correct
1822 * shaders are bound. It will also flag any dirty state triggered by
1823 * swapping out those shaders.
1824 */
1825 void
1826 iris_update_compiled_shaders(struct iris_context *ice)
1827 {
1828 const uint64_t dirty = ice->state.dirty;
1829
1830 struct brw_vue_prog_data *old_prog_datas[4];
1831 if (!(dirty & IRIS_DIRTY_URB)) {
1832 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1833 old_prog_datas[i] = get_vue_prog_data(ice, i);
1834 }
1835
1836 if (dirty & (IRIS_DIRTY_UNCOMPILED_TCS | IRIS_DIRTY_UNCOMPILED_TES)) {
1837 struct iris_uncompiled_shader *tes =
1838 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1839 if (tes) {
1840 iris_update_compiled_tcs(ice);
1841 iris_update_compiled_tes(ice);
1842 } else {
1843 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1844 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1845 ice->state.dirty |=
1846 IRIS_DIRTY_TCS | IRIS_DIRTY_TES |
1847 IRIS_DIRTY_BINDINGS_TCS | IRIS_DIRTY_BINDINGS_TES |
1848 IRIS_DIRTY_CONSTANTS_TCS | IRIS_DIRTY_CONSTANTS_TES;
1849 }
1850 }
1851
1852 if (dirty & IRIS_DIRTY_UNCOMPILED_VS)
1853 iris_update_compiled_vs(ice);
1854 if (dirty & IRIS_DIRTY_UNCOMPILED_GS)
1855 iris_update_compiled_gs(ice);
1856
1857 if (dirty & (IRIS_DIRTY_UNCOMPILED_GS | IRIS_DIRTY_UNCOMPILED_TES)) {
1858 const struct iris_compiled_shader *gs =
1859 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1860 const struct iris_compiled_shader *tes =
1861 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1862
1863 bool points_or_lines = false;
1864
1865 if (gs) {
1866 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1867 points_or_lines =
1868 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1869 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1870 } else if (tes) {
1871 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1872 points_or_lines =
1873 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1874 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1875 }
1876
1877 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1878 /* Outbound to XY Clip enables */
1879 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1880 ice->state.dirty |= IRIS_DIRTY_CLIP;
1881 }
1882 }
1883
1884 gl_shader_stage last_stage = last_vue_stage(ice);
1885 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1886 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1887 update_last_vue_map(ice, shader->prog_data);
1888 if (ice->state.streamout != shader->streamout) {
1889 ice->state.streamout = shader->streamout;
1890 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1891 }
1892
1893 if (ice->state.streamout_active) {
1894 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1895 struct iris_stream_output_target *so =
1896 (void *) ice->state.so_target[i];
1897 if (so)
1898 so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
1899 }
1900 }
1901
1902 if (dirty & IRIS_DIRTY_UNCOMPILED_FS)
1903 iris_update_compiled_fs(ice);
1904
1905 /* Changing shader interfaces may require a URB configuration. */
1906 if (!(dirty & IRIS_DIRTY_URB)) {
1907 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1908 struct brw_vue_prog_data *old = old_prog_datas[i];
1909 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1910 if (!!old != !!new ||
1911 (new && new->urb_entry_size != old->urb_entry_size)) {
1912 ice->state.dirty |= IRIS_DIRTY_URB;
1913 break;
1914 }
1915 }
1916 }
1917
1918 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
1919 if (ice->state.dirty & (IRIS_DIRTY_CONSTANTS_VS << i))
1920 iris_update_pull_constant_descriptors(ice, i);
1921 }
1922 }
1923
1924 static struct iris_compiled_shader *
1925 iris_compile_cs(struct iris_context *ice,
1926 struct iris_uncompiled_shader *ish,
1927 const struct iris_cs_prog_key *key)
1928 {
1929 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1930 const struct brw_compiler *compiler = screen->compiler;
1931 void *mem_ctx = ralloc_context(NULL);
1932 struct brw_cs_prog_data *cs_prog_data =
1933 rzalloc(mem_ctx, struct brw_cs_prog_data);
1934 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1935 enum brw_param_builtin *system_values;
1936 const struct gen_device_info *devinfo = &screen->devinfo;
1937 unsigned num_system_values;
1938 unsigned num_cbufs;
1939
1940 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1941
1942 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1943 &num_system_values, &num_cbufs);
1944
1945 struct iris_binding_table bt;
1946 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1947 num_system_values, num_cbufs);
1948
1949 struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
1950
1951 char *error_str = NULL;
1952 const unsigned *program =
1953 brw_compile_cs(compiler, &ice->dbg, mem_ctx, &brw_key, cs_prog_data,
1954 nir, -1, NULL, &error_str);
1955 if (program == NULL) {
1956 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1957 ralloc_free(mem_ctx);
1958 return false;
1959 }
1960
1961 if (ish->compiled_once) {
1962 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1963 } else {
1964 ish->compiled_once = true;
1965 }
1966
1967 struct iris_compiled_shader *shader =
1968 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
1969 prog_data, NULL, system_values, num_system_values,
1970 num_cbufs, &bt);
1971
1972 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1973
1974 ralloc_free(mem_ctx);
1975 return shader;
1976 }
1977
1978 static void
1979 iris_update_compiled_cs(struct iris_context *ice)
1980 {
1981 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
1982 struct iris_uncompiled_shader *ish =
1983 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
1984
1985 struct iris_cs_prog_key key = { KEY_ID(base) };
1986 ice->vtbl.populate_cs_key(ice, &key);
1987
1988 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
1989 struct iris_compiled_shader *shader =
1990 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
1991
1992 if (!shader)
1993 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1994
1995 if (!shader)
1996 shader = iris_compile_cs(ice, ish, &key);
1997
1998 if (old != shader) {
1999 ice->shaders.prog[IRIS_CACHE_CS] = shader;
2000 ice->state.dirty |= IRIS_DIRTY_CS |
2001 IRIS_DIRTY_BINDINGS_CS |
2002 IRIS_DIRTY_CONSTANTS_CS;
2003 shs->sysvals_need_upload = true;
2004 }
2005 }
2006
2007 void
2008 iris_update_compiled_compute_shader(struct iris_context *ice)
2009 {
2010 if (ice->state.dirty & IRIS_DIRTY_UNCOMPILED_CS)
2011 iris_update_compiled_cs(ice);
2012
2013 if (ice->state.dirty & IRIS_DIRTY_CONSTANTS_CS)
2014 iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2015 }
2016
2017 void
2018 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
2019 uint32_t *dst)
2020 {
2021 assert(cs_prog_data->push.total.size > 0);
2022 assert(cs_prog_data->push.cross_thread.size == 0);
2023 assert(cs_prog_data->push.per_thread.dwords == 1);
2024 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
2025 for (unsigned t = 0; t < cs_prog_data->threads; t++)
2026 dst[8 * t] = t;
2027 }
2028
2029 /**
2030 * Allocate scratch BOs as needed for the given per-thread size and stage.
2031 */
2032 struct iris_bo *
2033 iris_get_scratch_space(struct iris_context *ice,
2034 unsigned per_thread_scratch,
2035 gl_shader_stage stage)
2036 {
2037 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2038 struct iris_bufmgr *bufmgr = screen->bufmgr;
2039 const struct gen_device_info *devinfo = &screen->devinfo;
2040
2041 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2042 assert(encoded_size < (1 << 16));
2043
2044 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2045
2046 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2047 *
2048 * "Scratch Space per slice is computed based on 4 sub-slices. SW
2049 * must allocate scratch space enough so that each slice has 4
2050 * slices allowed."
2051 *
2052 * According to the other driver team, this applies to compute shaders
2053 * as well. This is not currently documented at all.
2054 *
2055 * This hack is no longer necessary on Gen11+.
2056 *
2057 * For, ICL, scratch space allocation is based on the number of threads
2058 * in the base configuration.
2059 */
2060 unsigned subslice_total = screen->subslice_total;
2061 if (devinfo->gen == 11)
2062 subslice_total = 8;
2063 else if (devinfo->gen < 11)
2064 subslice_total = 4 * devinfo->num_slices;
2065 assert(subslice_total >= screen->subslice_total);
2066
2067 if (!*bop) {
2068 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
2069
2070 if (devinfo->gen >= 11) {
2071 /* The MEDIA_VFE_STATE docs say:
2072 *
2073 * "Starting with this configuration, the Maximum Number of
2074 * Threads must be set to (#EU * 8) for GPGPU dispatches.
2075 *
2076 * Although there are only 7 threads per EU in the configuration,
2077 * the FFTID is calculated as if there are 8 threads per EU,
2078 * which in turn requires a larger amount of Scratch Space to be
2079 * allocated by the driver."
2080 */
2081 scratch_ids_per_subslice = 8 * 8;
2082 }
2083
2084 uint32_t max_threads[] = {
2085 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
2086 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
2087 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
2088 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
2089 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
2090 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
2091 };
2092
2093 uint32_t size = per_thread_scratch * max_threads[stage];
2094
2095 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
2096 }
2097
2098 return *bop;
2099 }
2100
2101 /* ------------------------------------------------------------------- */
2102
2103 /**
2104 * The pipe->create_[stage]_state() driver hooks.
2105 *
2106 * Performs basic NIR preprocessing, records any state dependencies, and
2107 * returns an iris_uncompiled_shader as the Gallium CSO.
2108 *
2109 * Actual shader compilation to assembly happens later, at first use.
2110 */
2111 static void *
2112 iris_create_uncompiled_shader(struct pipe_context *ctx,
2113 nir_shader *nir,
2114 const struct pipe_stream_output_info *so_info)
2115 {
2116 struct iris_context *ice = (void *)ctx;
2117 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2118 const struct gen_device_info *devinfo = &screen->devinfo;
2119
2120 struct iris_uncompiled_shader *ish =
2121 calloc(1, sizeof(struct iris_uncompiled_shader));
2122 if (!ish)
2123 return NULL;
2124
2125 NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
2126
2127 brw_preprocess_nir(screen->compiler, nir, NULL);
2128
2129 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo);
2130 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2131
2132 nir_sweep(nir);
2133
2134 if (nir->constant_data_size > 0) {
2135 unsigned data_offset;
2136 u_upload_data(ice->shaders.uploader, 0, nir->constant_data_size,
2137 32, nir->constant_data, &data_offset, &ish->const_data);
2138
2139 struct pipe_shader_buffer psb = {
2140 .buffer = ish->const_data,
2141 .buffer_offset = data_offset,
2142 .buffer_size = nir->constant_data_size,
2143 };
2144 iris_upload_ubo_ssbo_surf_state(ice, &psb, &ish->const_data_state, false);
2145 }
2146
2147 ish->program_id = get_new_program_id(screen);
2148 ish->nir = nir;
2149 if (so_info) {
2150 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2151 update_so_info(&ish->stream_output, nir->info.outputs_written);
2152 }
2153
2154 /* Save this now before potentially dropping nir->info.name */
2155 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2156 ish->use_alt_mode = true;
2157
2158 if (screen->disk_cache) {
2159 /* Serialize the NIR to a binary blob that we can hash for the disk
2160 * cache. Drop unnecessary information (like variable names)
2161 * so the serialized NIR is smaller, and also to let us detect more
2162 * isomorphic shaders when hashing, increasing cache hits.
2163 */
2164 struct blob blob;
2165 blob_init(&blob);
2166 nir_serialize(&blob, nir, true);
2167 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2168 blob_finish(&blob);
2169 }
2170
2171 return ish;
2172 }
2173
2174 static struct iris_uncompiled_shader *
2175 iris_create_shader_state(struct pipe_context *ctx,
2176 const struct pipe_shader_state *state)
2177 {
2178 struct nir_shader *nir;
2179
2180 if (state->type == PIPE_SHADER_IR_TGSI)
2181 nir = tgsi_to_nir(state->tokens, ctx->screen);
2182 else
2183 nir = state->ir.nir;
2184
2185 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
2186 }
2187
2188 static void *
2189 iris_create_vs_state(struct pipe_context *ctx,
2190 const struct pipe_shader_state *state)
2191 {
2192 struct iris_context *ice = (void *) ctx;
2193 struct iris_screen *screen = (void *) ctx->screen;
2194 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2195
2196 /* User clip planes */
2197 if (ish->nir->info.clip_distance_array_size == 0)
2198 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2199
2200 if (screen->precompile) {
2201 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
2202
2203 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2204 iris_compile_vs(ice, ish, &key);
2205 }
2206
2207 return ish;
2208 }
2209
2210 static void *
2211 iris_create_tcs_state(struct pipe_context *ctx,
2212 const struct pipe_shader_state *state)
2213 {
2214 struct iris_context *ice = (void *) ctx;
2215 struct iris_screen *screen = (void *) ctx->screen;
2216 const struct brw_compiler *compiler = screen->compiler;
2217 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2218 struct shader_info *info = &ish->nir->info;
2219
2220 if (screen->precompile) {
2221 const unsigned _GL_TRIANGLES = 0x0004;
2222 struct iris_tcs_prog_key key = {
2223 KEY_ID(vue.base),
2224 // XXX: make sure the linker fills this out from the TES...
2225 .tes_primitive_mode =
2226 info->tess.primitive_mode ? info->tess.primitive_mode
2227 : _GL_TRIANGLES,
2228 .outputs_written = info->outputs_written,
2229 .patch_outputs_written = info->patch_outputs_written,
2230 };
2231
2232 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2233 * We don't have that information, so we randomly guess that the input
2234 * and output patches are the same size. This is a bad guess, but we
2235 * can't do much better.
2236 */
2237 if (compiler->use_tcs_8_patch)
2238 key.input_vertices = info->tess.tcs_vertices_out;
2239
2240 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2241 iris_compile_tcs(ice, ish, &key);
2242 }
2243
2244 return ish;
2245 }
2246
2247 static void *
2248 iris_create_tes_state(struct pipe_context *ctx,
2249 const struct pipe_shader_state *state)
2250 {
2251 struct iris_context *ice = (void *) ctx;
2252 struct iris_screen *screen = (void *) ctx->screen;
2253 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2254 struct shader_info *info = &ish->nir->info;
2255
2256 /* User clip planes */
2257 if (ish->nir->info.clip_distance_array_size == 0)
2258 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2259
2260 if (screen->precompile) {
2261 struct iris_tes_prog_key key = {
2262 KEY_ID(vue.base),
2263 // XXX: not ideal, need TCS output/TES input unification
2264 .inputs_read = info->inputs_read,
2265 .patch_inputs_read = info->patch_inputs_read,
2266 };
2267
2268 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2269 iris_compile_tes(ice, ish, &key);
2270 }
2271
2272 return ish;
2273 }
2274
2275 static void *
2276 iris_create_gs_state(struct pipe_context *ctx,
2277 const struct pipe_shader_state *state)
2278 {
2279 struct iris_context *ice = (void *) ctx;
2280 struct iris_screen *screen = (void *) ctx->screen;
2281 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2282
2283 /* User clip planes */
2284 if (ish->nir->info.clip_distance_array_size == 0)
2285 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2286
2287 if (screen->precompile) {
2288 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
2289
2290 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2291 iris_compile_gs(ice, ish, &key);
2292 }
2293
2294 return ish;
2295 }
2296
2297 static void *
2298 iris_create_fs_state(struct pipe_context *ctx,
2299 const struct pipe_shader_state *state)
2300 {
2301 struct iris_context *ice = (void *) ctx;
2302 struct iris_screen *screen = (void *) ctx->screen;
2303 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2304 struct shader_info *info = &ish->nir->info;
2305
2306 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2307 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2308 (1ull << IRIS_NOS_RASTERIZER) |
2309 (1ull << IRIS_NOS_BLEND);
2310
2311 /* The program key needs the VUE map if there are > 16 inputs */
2312 if (util_bitcount64(ish->nir->info.inputs_read &
2313 BRW_FS_VARYING_INPUT_MASK) > 16) {
2314 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2315 }
2316
2317 if (screen->precompile) {
2318 const uint64_t color_outputs = info->outputs_written &
2319 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2320 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2321 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2322
2323 bool can_rearrange_varyings =
2324 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2325
2326 const struct gen_device_info *devinfo = &screen->devinfo;
2327 struct iris_fs_prog_key key = {
2328 KEY_ID(base),
2329 .nr_color_regions = util_bitcount(color_outputs),
2330 .coherent_fb_fetch = devinfo->gen >= 9,
2331 .input_slots_valid =
2332 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2333 };
2334
2335 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2336 iris_compile_fs(ice, ish, &key, NULL);
2337 }
2338
2339 return ish;
2340 }
2341
2342 static void *
2343 iris_create_compute_state(struct pipe_context *ctx,
2344 const struct pipe_compute_state *state)
2345 {
2346 assert(state->ir_type == PIPE_SHADER_IR_NIR);
2347
2348 struct iris_context *ice = (void *) ctx;
2349 struct iris_screen *screen = (void *) ctx->screen;
2350 struct iris_uncompiled_shader *ish =
2351 iris_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
2352
2353 // XXX: disallow more than 64KB of shared variables
2354
2355 if (screen->precompile) {
2356 struct iris_cs_prog_key key = { KEY_ID(base) };
2357
2358 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2359 iris_compile_cs(ice, ish, &key);
2360 }
2361
2362 return ish;
2363 }
2364
2365 /**
2366 * The pipe->delete_[stage]_state() driver hooks.
2367 *
2368 * Frees the iris_uncompiled_shader.
2369 */
2370 static void
2371 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2372 {
2373 struct iris_uncompiled_shader *ish = state;
2374 struct iris_context *ice = (void *) ctx;
2375
2376 if (ice->shaders.uncompiled[stage] == ish) {
2377 ice->shaders.uncompiled[stage] = NULL;
2378 ice->state.dirty |= IRIS_DIRTY_UNCOMPILED_VS << stage;
2379 }
2380
2381 if (ish->const_data) {
2382 pipe_resource_reference(&ish->const_data, NULL);
2383 pipe_resource_reference(&ish->const_data_state.res, NULL);
2384 }
2385
2386 ralloc_free(ish->nir);
2387 free(ish);
2388 }
2389
2390 static void
2391 iris_delete_vs_state(struct pipe_context *ctx, void *state)
2392 {
2393 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2394 }
2395
2396 static void
2397 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2398 {
2399 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2400 }
2401
2402 static void
2403 iris_delete_tes_state(struct pipe_context *ctx, void *state)
2404 {
2405 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2406 }
2407
2408 static void
2409 iris_delete_gs_state(struct pipe_context *ctx, void *state)
2410 {
2411 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2412 }
2413
2414 static void
2415 iris_delete_fs_state(struct pipe_context *ctx, void *state)
2416 {
2417 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2418 }
2419
2420 static void
2421 iris_delete_cs_state(struct pipe_context *ctx, void *state)
2422 {
2423 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2424 }
2425
2426 /**
2427 * The pipe->bind_[stage]_state() driver hook.
2428 *
2429 * Binds an uncompiled shader as the current one for a particular stage.
2430 * Updates dirty tracking to account for the shader's NOS.
2431 */
2432 static void
2433 bind_shader_state(struct iris_context *ice,
2434 struct iris_uncompiled_shader *ish,
2435 gl_shader_stage stage)
2436 {
2437 uint64_t dirty_bit = IRIS_DIRTY_UNCOMPILED_VS << stage;
2438 const uint64_t nos = ish ? ish->nos : 0;
2439
2440 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2441 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2442
2443 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
2444 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
2445 ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
2446 }
2447
2448 ice->shaders.uncompiled[stage] = ish;
2449 ice->state.dirty |= dirty_bit;
2450
2451 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2452 * (or that they no longer need to do so).
2453 */
2454 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2455 if (nos & (1 << i))
2456 ice->state.dirty_for_nos[i] |= dirty_bit;
2457 else
2458 ice->state.dirty_for_nos[i] &= ~dirty_bit;
2459 }
2460 }
2461
2462 static void
2463 iris_bind_vs_state(struct pipe_context *ctx, void *state)
2464 {
2465 struct iris_context *ice = (struct iris_context *)ctx;
2466 struct iris_uncompiled_shader *new_ish = state;
2467
2468 if (new_ish &&
2469 ice->state.window_space_position !=
2470 new_ish->nir->info.vs.window_space_position) {
2471 ice->state.window_space_position =
2472 new_ish->nir->info.vs.window_space_position;
2473
2474 ice->state.dirty |= IRIS_DIRTY_CLIP |
2475 IRIS_DIRTY_RASTER |
2476 IRIS_DIRTY_CC_VIEWPORT;
2477 }
2478
2479 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2480 }
2481
2482 static void
2483 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2484 {
2485 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2486 }
2487
2488 static void
2489 iris_bind_tes_state(struct pipe_context *ctx, void *state)
2490 {
2491 struct iris_context *ice = (struct iris_context *)ctx;
2492
2493 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2494 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2495 ice->state.dirty |= IRIS_DIRTY_URB;
2496
2497 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2498 }
2499
2500 static void
2501 iris_bind_gs_state(struct pipe_context *ctx, void *state)
2502 {
2503 struct iris_context *ice = (struct iris_context *)ctx;
2504
2505 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2506 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2507 ice->state.dirty |= IRIS_DIRTY_URB;
2508
2509 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2510 }
2511
2512 static void
2513 iris_bind_fs_state(struct pipe_context *ctx, void *state)
2514 {
2515 struct iris_context *ice = (struct iris_context *) ctx;
2516 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2517 const struct gen_device_info *devinfo = &screen->devinfo;
2518 struct iris_uncompiled_shader *old_ish =
2519 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2520 struct iris_uncompiled_shader *new_ish = state;
2521
2522 const unsigned color_bits =
2523 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2524 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2525
2526 /* Fragment shader outputs influence HasWriteableRT */
2527 if (!old_ish || !new_ish ||
2528 (old_ish->nir->info.outputs_written & color_bits) !=
2529 (new_ish->nir->info.outputs_written & color_bits))
2530 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2531
2532 if (devinfo->gen == 8)
2533 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2534
2535 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2536 }
2537
2538 static void
2539 iris_bind_cs_state(struct pipe_context *ctx, void *state)
2540 {
2541 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2542 }
2543
2544 void
2545 iris_init_program_functions(struct pipe_context *ctx)
2546 {
2547 ctx->create_vs_state = iris_create_vs_state;
2548 ctx->create_tcs_state = iris_create_tcs_state;
2549 ctx->create_tes_state = iris_create_tes_state;
2550 ctx->create_gs_state = iris_create_gs_state;
2551 ctx->create_fs_state = iris_create_fs_state;
2552 ctx->create_compute_state = iris_create_compute_state;
2553
2554 ctx->delete_vs_state = iris_delete_vs_state;
2555 ctx->delete_tcs_state = iris_delete_tcs_state;
2556 ctx->delete_tes_state = iris_delete_tes_state;
2557 ctx->delete_gs_state = iris_delete_gs_state;
2558 ctx->delete_fs_state = iris_delete_fs_state;
2559 ctx->delete_compute_state = iris_delete_cs_state;
2560
2561 ctx->bind_vs_state = iris_bind_vs_state;
2562 ctx->bind_tcs_state = iris_bind_tcs_state;
2563 ctx->bind_tes_state = iris_bind_tes_state;
2564 ctx->bind_gs_state = iris_bind_gs_state;
2565 ctx->bind_fs_state = iris_bind_fs_state;
2566 ctx->bind_compute_state = iris_bind_cs_state;
2567 }