c162141a9f97b816f916c641d62da42d022c38c1
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
48
49 #define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50 #define BRW_KEY_INIT(gen, prog_id) \
51 .base.program_string_id = prog_id, \
52 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54 .base.tex.compressed_multisample_layout_mask = ~0, \
55 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
56
57 static unsigned
58 get_new_program_id(struct iris_screen *screen)
59 {
60 return p_atomic_inc_return(&screen->program_id);
61 }
62
63 static struct brw_vs_prog_key
64 iris_to_brw_vs_key(const struct gen_device_info *devinfo,
65 const struct iris_vs_prog_key *key)
66 {
67 return (struct brw_vs_prog_key) {
68 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
69
70 /* Don't tell the backend about our clip plane constants, we've
71 * already lowered them in NIR and don't want it doing it again.
72 */
73 .nr_userclip_plane_consts = 0,
74 };
75 }
76
77 static struct brw_tcs_prog_key
78 iris_to_brw_tcs_key(const struct gen_device_info *devinfo,
79 const struct iris_tcs_prog_key *key)
80 {
81 return (struct brw_tcs_prog_key) {
82 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
83 .tes_primitive_mode = key->tes_primitive_mode,
84 .input_vertices = key->input_vertices,
85 .patch_outputs_written = key->patch_outputs_written,
86 .outputs_written = key->outputs_written,
87 .quads_workaround = key->quads_workaround,
88 };
89 }
90
91 static struct brw_tes_prog_key
92 iris_to_brw_tes_key(const struct gen_device_info *devinfo,
93 const struct iris_tes_prog_key *key)
94 {
95 return (struct brw_tes_prog_key) {
96 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
97 .patch_inputs_read = key->patch_inputs_read,
98 .inputs_read = key->inputs_read,
99 };
100 }
101
102 static struct brw_gs_prog_key
103 iris_to_brw_gs_key(const struct gen_device_info *devinfo,
104 const struct iris_gs_prog_key *key)
105 {
106 return (struct brw_gs_prog_key) {
107 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
108 };
109 }
110
111 static struct brw_wm_prog_key
112 iris_to_brw_fs_key(const struct gen_device_info *devinfo,
113 const struct iris_fs_prog_key *key)
114 {
115 return (struct brw_wm_prog_key) {
116 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
117 .nr_color_regions = key->nr_color_regions,
118 .flat_shade = key->flat_shade,
119 .alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
120 .alpha_to_coverage = key->alpha_to_coverage,
121 .clamp_fragment_color = key->clamp_fragment_color,
122 .persample_interp = key->persample_interp,
123 .multisample_fbo = key->multisample_fbo,
124 .force_dual_color_blend = key->force_dual_color_blend,
125 .coherent_fb_fetch = key->coherent_fb_fetch,
126 .color_outputs_valid = key->color_outputs_valid,
127 .input_slots_valid = key->input_slots_valid,
128 };
129 }
130
131 static struct brw_cs_prog_key
132 iris_to_brw_cs_key(const struct gen_device_info *devinfo,
133 const struct iris_cs_prog_key *key)
134 {
135 return (struct brw_cs_prog_key) {
136 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
137 };
138 }
139
140 static void *
141 upload_state(struct u_upload_mgr *uploader,
142 struct iris_state_ref *ref,
143 unsigned size,
144 unsigned alignment)
145 {
146 void *p = NULL;
147 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
148 return p;
149 }
150
151 void
152 iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
153 struct pipe_shader_buffer *buf,
154 struct iris_state_ref *surf_state,
155 bool ssbo)
156 {
157 struct pipe_context *ctx = &ice->ctx;
158 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
159
160 void *map =
161 upload_state(ice->state.surface_uploader, surf_state,
162 screen->isl_dev.ss.size, 64);
163 if (!unlikely(map)) {
164 surf_state->res = NULL;
165 return;
166 }
167
168 struct iris_resource *res = (void *) buf->buffer;
169 struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
170 surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
171
172 isl_buffer_fill_state(&screen->isl_dev, map,
173 .address = res->bo->gtt_offset + res->offset +
174 buf->buffer_offset,
175 .size_B = buf->buffer_size - res->offset,
176 .format = ssbo ? ISL_FORMAT_RAW
177 : ISL_FORMAT_R32G32B32A32_FLOAT,
178 .swizzle = ISL_SWIZZLE_IDENTITY,
179 .stride_B = 1,
180 .mocs = ice->vtbl.mocs(res->bo, &screen->isl_dev));
181 }
182
183 static nir_ssa_def *
184 get_aoa_deref_offset(nir_builder *b,
185 nir_deref_instr *deref,
186 unsigned elem_size)
187 {
188 unsigned array_size = elem_size;
189 nir_ssa_def *offset = nir_imm_int(b, 0);
190
191 while (deref->deref_type != nir_deref_type_var) {
192 assert(deref->deref_type == nir_deref_type_array);
193
194 /* This level's element size is the previous level's array size */
195 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
196 assert(deref->arr.index.ssa);
197 offset = nir_iadd(b, offset,
198 nir_imul(b, index, nir_imm_int(b, array_size)));
199
200 deref = nir_deref_instr_parent(deref);
201 assert(glsl_type_is_array(deref->type));
202 array_size *= glsl_get_length(deref->type);
203 }
204
205 /* Accessing an invalid surface index with the dataport can result in a
206 * hang. According to the spec "if the index used to select an individual
207 * element is negative or greater than or equal to the size of the array,
208 * the results of the operation are undefined but may not lead to
209 * termination" -- which is one of the possible outcomes of the hang.
210 * Clamp the index to prevent access outside of the array bounds.
211 */
212 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
213 }
214
215 static void
216 iris_lower_storage_image_derefs(nir_shader *nir)
217 {
218 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
219
220 nir_builder b;
221 nir_builder_init(&b, impl);
222
223 nir_foreach_block(block, impl) {
224 nir_foreach_instr_safe(instr, block) {
225 if (instr->type != nir_instr_type_intrinsic)
226 continue;
227
228 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
229 switch (intrin->intrinsic) {
230 case nir_intrinsic_image_deref_load:
231 case nir_intrinsic_image_deref_store:
232 case nir_intrinsic_image_deref_atomic_add:
233 case nir_intrinsic_image_deref_atomic_imin:
234 case nir_intrinsic_image_deref_atomic_umin:
235 case nir_intrinsic_image_deref_atomic_imax:
236 case nir_intrinsic_image_deref_atomic_umax:
237 case nir_intrinsic_image_deref_atomic_and:
238 case nir_intrinsic_image_deref_atomic_or:
239 case nir_intrinsic_image_deref_atomic_xor:
240 case nir_intrinsic_image_deref_atomic_exchange:
241 case nir_intrinsic_image_deref_atomic_comp_swap:
242 case nir_intrinsic_image_deref_size:
243 case nir_intrinsic_image_deref_samples:
244 case nir_intrinsic_image_deref_load_raw_intel:
245 case nir_intrinsic_image_deref_store_raw_intel: {
246 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
247 nir_variable *var = nir_deref_instr_get_variable(deref);
248
249 b.cursor = nir_before_instr(&intrin->instr);
250 nir_ssa_def *index =
251 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
252 get_aoa_deref_offset(&b, deref, 1));
253 nir_rewrite_image_intrinsic(intrin, index, false);
254 break;
255 }
256
257 default:
258 break;
259 }
260 }
261 }
262 }
263
264 // XXX: need unify_interfaces() at link time...
265
266 /**
267 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
268 */
269 static bool
270 iris_fix_edge_flags(nir_shader *nir)
271 {
272 if (nir->info.stage != MESA_SHADER_VERTEX)
273 return false;
274
275 nir_variable *var = NULL;
276 nir_foreach_variable(v, &nir->outputs) {
277 if (v->data.location == VARYING_SLOT_EDGE) {
278 var = v;
279 break;
280 }
281 }
282
283 if (!var)
284 return false;
285
286 exec_node_remove(&var->node);
287 var->data.mode = nir_var_shader_temp;
288 exec_list_push_tail(&nir->globals, &var->node);
289 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
290 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
291 nir_fixup_deref_modes(nir);
292
293 nir_foreach_function(f, nir) {
294 if (f->impl) {
295 nir_metadata_preserve(f->impl, nir_metadata_block_index |
296 nir_metadata_dominance |
297 nir_metadata_live_ssa_defs |
298 nir_metadata_loop_analysis);
299 }
300 }
301
302 return true;
303 }
304
305 /**
306 * Fix an uncompiled shader's stream output info.
307 *
308 * Core Gallium stores output->register_index as a "slot" number, where
309 * slots are assigned consecutively to all outputs in info->outputs_written.
310 * This naive packing of outputs doesn't work for us - we too have slots,
311 * but the layout is defined by the VUE map, which we won't have until we
312 * compile a specific shader variant. So, we remap these and simply store
313 * VARYING_SLOT_* in our copy's output->register_index fields.
314 *
315 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
316 * components of our VUE header. See brw_vue_map.c for the layout.
317 */
318 static void
319 update_so_info(struct pipe_stream_output_info *so_info,
320 uint64_t outputs_written)
321 {
322 uint8_t reverse_map[64] = {};
323 unsigned slot = 0;
324 while (outputs_written) {
325 reverse_map[slot++] = u_bit_scan64(&outputs_written);
326 }
327
328 for (unsigned i = 0; i < so_info->num_outputs; i++) {
329 struct pipe_stream_output *output = &so_info->output[i];
330
331 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
332 output->register_index = reverse_map[output->register_index];
333
334 /* The VUE header contains three scalar fields packed together:
335 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
336 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
337 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
338 */
339 switch (output->register_index) {
340 case VARYING_SLOT_LAYER:
341 assert(output->num_components == 1);
342 output->register_index = VARYING_SLOT_PSIZ;
343 output->start_component = 1;
344 break;
345 case VARYING_SLOT_VIEWPORT:
346 assert(output->num_components == 1);
347 output->register_index = VARYING_SLOT_PSIZ;
348 output->start_component = 2;
349 break;
350 case VARYING_SLOT_PSIZ:
351 assert(output->num_components == 1);
352 output->start_component = 3;
353 break;
354 }
355
356 //info->outputs_written |= 1ull << output->register_index;
357 }
358 }
359
360 static void
361 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
362 unsigned offset, unsigned n)
363 {
364 assert(offset % sizeof(uint32_t) == 0);
365
366 for (unsigned i = 0; i < n; ++i)
367 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
368
369 for (unsigned i = n; i < 4; ++i)
370 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
371 }
372
373 /**
374 * Associate NIR uniform variables with the prog_data->param[] mechanism
375 * used by the backend. Also, decide which UBOs we'd like to push in an
376 * ideal situation (though the backend can reduce this).
377 */
378 static void
379 iris_setup_uniforms(const struct brw_compiler *compiler,
380 void *mem_ctx,
381 nir_shader *nir,
382 struct brw_stage_prog_data *prog_data,
383 enum brw_param_builtin **out_system_values,
384 unsigned *out_num_system_values,
385 unsigned *out_num_cbufs)
386 {
387 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
388
389 const unsigned IRIS_MAX_SYSTEM_VALUES =
390 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
391 enum brw_param_builtin *system_values =
392 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
393 unsigned num_system_values = 0;
394
395 unsigned patch_vert_idx = -1;
396 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
397 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
398 memset(ucp_idx, -1, sizeof(ucp_idx));
399 memset(img_idx, -1, sizeof(img_idx));
400
401 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
402
403 nir_builder b;
404 nir_builder_init(&b, impl);
405
406 b.cursor = nir_before_block(nir_start_block(impl));
407 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
408 nir_ssa_def *temp_const_ubo_name = NULL;
409
410 /* Turn system value intrinsics into uniforms */
411 nir_foreach_block(block, impl) {
412 nir_foreach_instr_safe(instr, block) {
413 if (instr->type != nir_instr_type_intrinsic)
414 continue;
415
416 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
417 nir_ssa_def *offset;
418
419 switch (intrin->intrinsic) {
420 case nir_intrinsic_load_constant: {
421 /* This one is special because it reads from the shader constant
422 * data and not cbuf0 which gallium uploads for us.
423 */
424 b.cursor = nir_before_instr(instr);
425 nir_ssa_def *offset =
426 nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
427 nir_intrinsic_base(intrin));
428
429 if (temp_const_ubo_name == NULL)
430 temp_const_ubo_name = nir_imm_int(&b, 0);
431
432 nir_intrinsic_instr *load_ubo =
433 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
434 load_ubo->num_components = intrin->num_components;
435 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
436 load_ubo->src[1] = nir_src_for_ssa(offset);
437 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
438 intrin->dest.ssa.num_components,
439 intrin->dest.ssa.bit_size,
440 intrin->dest.ssa.name);
441 nir_builder_instr_insert(&b, &load_ubo->instr);
442
443 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
444 nir_src_for_ssa(&load_ubo->dest.ssa));
445 nir_instr_remove(&intrin->instr);
446 continue;
447 }
448 case nir_intrinsic_load_user_clip_plane: {
449 unsigned ucp = nir_intrinsic_ucp_id(intrin);
450
451 if (ucp_idx[ucp] == -1) {
452 ucp_idx[ucp] = num_system_values;
453 num_system_values += 4;
454 }
455
456 for (int i = 0; i < 4; i++) {
457 system_values[ucp_idx[ucp] + i] =
458 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
459 }
460
461 b.cursor = nir_before_instr(instr);
462 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
463 break;
464 }
465 case nir_intrinsic_load_patch_vertices_in:
466 if (patch_vert_idx == -1)
467 patch_vert_idx = num_system_values++;
468
469 system_values[patch_vert_idx] =
470 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
471
472 b.cursor = nir_before_instr(instr);
473 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
474 break;
475 case nir_intrinsic_image_deref_load_param_intel: {
476 assert(devinfo->gen < 9);
477 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
478 nir_variable *var = nir_deref_instr_get_variable(deref);
479
480 if (img_idx[var->data.binding] == -1) {
481 /* GL only allows arrays of arrays of images. */
482 assert(glsl_type_is_image(glsl_without_array(var->type)));
483 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
484
485 for (int i = 0; i < num_images; i++) {
486 const unsigned img = var->data.binding + i;
487
488 img_idx[img] = num_system_values;
489 num_system_values += BRW_IMAGE_PARAM_SIZE;
490
491 uint32_t *img_sv = &system_values[img_idx[img]];
492
493 setup_vec4_image_sysval(
494 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
495 offsetof(struct brw_image_param, offset), 2);
496 setup_vec4_image_sysval(
497 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
498 offsetof(struct brw_image_param, size), 3);
499 setup_vec4_image_sysval(
500 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
501 offsetof(struct brw_image_param, stride), 4);
502 setup_vec4_image_sysval(
503 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
504 offsetof(struct brw_image_param, tiling), 3);
505 setup_vec4_image_sysval(
506 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
507 offsetof(struct brw_image_param, swizzling), 2);
508 }
509 }
510
511 b.cursor = nir_before_instr(instr);
512 offset = nir_iadd(&b,
513 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
514 nir_imm_int(&b, img_idx[var->data.binding] * 4 +
515 nir_intrinsic_base(intrin) * 16));
516 break;
517 }
518 default:
519 continue;
520 }
521
522 unsigned comps = nir_intrinsic_dest_components(intrin);
523
524 nir_intrinsic_instr *load =
525 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
526 load->num_components = comps;
527 load->src[0] = nir_src_for_ssa(temp_ubo_name);
528 load->src[1] = nir_src_for_ssa(offset);
529 nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
530 nir_builder_instr_insert(&b, &load->instr);
531 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
532 nir_src_for_ssa(&load->dest.ssa));
533 nir_instr_remove(instr);
534 }
535 }
536
537 nir_validate_shader(nir, "before remapping");
538
539 /* Uniforms are stored in constant buffer 0, the
540 * user-facing UBOs are indexed by one. So if any constant buffer is
541 * needed, the constant buffer 0 will be needed, so account for it.
542 */
543 unsigned num_cbufs = nir->info.num_ubos;
544 if (num_cbufs || nir->num_uniforms)
545 num_cbufs++;
546
547 /* Place the new params in a new cbuf. */
548 if (num_system_values > 0) {
549 unsigned sysval_cbuf_index = num_cbufs;
550 num_cbufs++;
551
552 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
553 num_system_values);
554
555 nir_foreach_block(block, impl) {
556 nir_foreach_instr_safe(instr, block) {
557 if (instr->type != nir_instr_type_intrinsic)
558 continue;
559
560 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
561
562 if (load->intrinsic != nir_intrinsic_load_ubo)
563 continue;
564
565 b.cursor = nir_before_instr(instr);
566
567 assert(load->src[0].is_ssa);
568
569 if (load->src[0].ssa == temp_ubo_name) {
570 nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
571 nir_instr_rewrite_src(instr, &load->src[0],
572 nir_src_for_ssa(imm));
573 }
574 }
575 }
576
577 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
578 nir_opt_constant_folding(nir);
579 } else {
580 ralloc_free(system_values);
581 system_values = NULL;
582 }
583
584 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
585 nir_validate_shader(nir, "after remap");
586
587 /* We don't use params[] but gallium leaves num_uniforms set. We use this
588 * to detect when cbuf0 exists but we don't need it anymore when we get
589 * here. Instead, zero it out so that the back-end doesn't get confused
590 * when nr_params * 4 != num_uniforms != nr_params * 4.
591 */
592 nir->num_uniforms = 0;
593
594 /* Constant loads (if any) need to go at the end of the constant buffers so
595 * we need to know num_cbufs before we can lower to them.
596 */
597 if (temp_const_ubo_name != NULL) {
598 nir_load_const_instr *const_ubo_index =
599 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
600 assert(const_ubo_index->def.bit_size == 32);
601 const_ubo_index->value[0].u32 = num_cbufs;
602 }
603
604 *out_system_values = system_values;
605 *out_num_system_values = num_system_values;
606 *out_num_cbufs = num_cbufs;
607 }
608
609 static const char *surface_group_names[] = {
610 [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
611 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
612 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
613 [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
614 [IRIS_SURFACE_GROUP_UBO] = "ubo",
615 [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
616 [IRIS_SURFACE_GROUP_IMAGE] = "image",
617 };
618
619 static void
620 iris_print_binding_table(FILE *fp, const char *name,
621 const struct iris_binding_table *bt)
622 {
623 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
624
625 uint32_t total = 0;
626 uint32_t compacted = 0;
627
628 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
629 uint32_t size = bt->sizes[i];
630 total += size;
631 if (size)
632 compacted += util_bitcount64(bt->used_mask[i]);
633 }
634
635 if (total == 0) {
636 fprintf(fp, "Binding table for %s is empty\n\n", name);
637 return;
638 }
639
640 if (total != compacted) {
641 fprintf(fp, "Binding table for %s "
642 "(compacted to %u entries from %u entries)\n",
643 name, compacted, total);
644 } else {
645 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
646 }
647
648 uint32_t entry = 0;
649 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
650 uint64_t mask = bt->used_mask[i];
651 while (mask) {
652 int index = u_bit_scan64(&mask);
653 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
654 }
655 }
656 fprintf(fp, "\n");
657 }
658
659 enum {
660 /* Max elements in a surface group. */
661 SURFACE_GROUP_MAX_ELEMENTS = 64,
662 };
663
664 /**
665 * Map a <group, index> pair to a binding table index.
666 *
667 * For example: <UBO, 5> => binding table index 12
668 */
669 uint32_t
670 iris_group_index_to_bti(const struct iris_binding_table *bt,
671 enum iris_surface_group group, uint32_t index)
672 {
673 assert(index < bt->sizes[group]);
674 uint64_t mask = bt->used_mask[group];
675 uint64_t bit = 1ull << index;
676 if (bit & mask) {
677 return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
678 } else {
679 return IRIS_SURFACE_NOT_USED;
680 }
681 }
682
683 /**
684 * Map a binding table index back to a <group, index> pair.
685 *
686 * For example: binding table index 12 => <UBO, 5>
687 */
688 uint32_t
689 iris_bti_to_group_index(const struct iris_binding_table *bt,
690 enum iris_surface_group group, uint32_t bti)
691 {
692 uint64_t used_mask = bt->used_mask[group];
693 assert(bti >= bt->offsets[group]);
694
695 uint32_t c = bti - bt->offsets[group];
696 while (used_mask) {
697 int i = u_bit_scan64(&used_mask);
698 if (c == 0)
699 return i;
700 c--;
701 }
702
703 return IRIS_SURFACE_NOT_USED;
704 }
705
706 static void
707 rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
708 nir_instr *instr, nir_src *src,
709 enum iris_surface_group group)
710 {
711 assert(bt->sizes[group] > 0);
712
713 b->cursor = nir_before_instr(instr);
714 nir_ssa_def *bti;
715 if (nir_src_is_const(*src)) {
716 uint32_t index = nir_src_as_uint(*src);
717 bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
718 src->ssa->bit_size);
719 } else {
720 /* Indirect usage makes all the surfaces of the group to be available,
721 * so we can just add the base.
722 */
723 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
724 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
725 }
726 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
727 }
728
729 static void
730 mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
731 enum iris_surface_group group)
732 {
733 assert(bt->sizes[group] > 0);
734
735 if (nir_src_is_const(*src)) {
736 uint64_t index = nir_src_as_uint(*src);
737 assert(index < bt->sizes[group]);
738 bt->used_mask[group] |= 1ull << index;
739 } else {
740 /* There's an indirect usage, we need all the surfaces. */
741 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
742 }
743 }
744
745 static bool
746 skip_compacting_binding_tables(void)
747 {
748 static int skip = -1;
749 if (skip < 0)
750 skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
751 return skip;
752 }
753
754 /**
755 * Set up the binding table indices and apply to the shader.
756 */
757 static void
758 iris_setup_binding_table(const struct gen_device_info *devinfo,
759 struct nir_shader *nir,
760 struct iris_binding_table *bt,
761 unsigned num_render_targets,
762 unsigned num_system_values,
763 unsigned num_cbufs)
764 {
765 const struct shader_info *info = &nir->info;
766
767 memset(bt, 0, sizeof(*bt));
768
769 /* Set the sizes for each surface group. For some groups, we already know
770 * upfront how many will be used, so mark them.
771 */
772 if (info->stage == MESA_SHADER_FRAGMENT) {
773 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
774 /* All render targets used. */
775 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
776 BITFIELD64_MASK(num_render_targets);
777
778 /* Setup render target read surface group inorder to support non-coherent
779 * framebuffer fetch on Gen8
780 */
781 if (devinfo->gen == 8 && info->outputs_read) {
782 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
783 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
784 BITFIELD64_MASK(num_render_targets);
785 }
786 } else if (info->stage == MESA_SHADER_COMPUTE) {
787 bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
788 }
789
790 bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
791 bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
792
793 bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
794
795 /* Allocate an extra slot in the UBO section for NIR constants.
796 * Binding table compaction will remove it if unnecessary.
797 *
798 * We don't include them in iris_compiled_shader::num_cbufs because
799 * they are uploaded separately from shs->constbuf[], but from a shader
800 * point of view, they're another UBO (at the end of the section).
801 */
802 bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
803
804 bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
805
806 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
807 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
808
809 /* Mark surfaces used for the cases we don't have the information available
810 * upfront.
811 */
812 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
813 nir_foreach_block (block, impl) {
814 nir_foreach_instr (instr, block) {
815 if (instr->type != nir_instr_type_intrinsic)
816 continue;
817
818 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
819 switch (intrin->intrinsic) {
820 case nir_intrinsic_load_num_work_groups:
821 bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
822 break;
823
824 case nir_intrinsic_load_output:
825 if (devinfo->gen == 8) {
826 mark_used_with_src(bt, &intrin->src[0],
827 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
828 }
829 break;
830
831 case nir_intrinsic_image_size:
832 case nir_intrinsic_image_load:
833 case nir_intrinsic_image_store:
834 case nir_intrinsic_image_atomic_add:
835 case nir_intrinsic_image_atomic_imin:
836 case nir_intrinsic_image_atomic_umin:
837 case nir_intrinsic_image_atomic_imax:
838 case nir_intrinsic_image_atomic_umax:
839 case nir_intrinsic_image_atomic_and:
840 case nir_intrinsic_image_atomic_or:
841 case nir_intrinsic_image_atomic_xor:
842 case nir_intrinsic_image_atomic_exchange:
843 case nir_intrinsic_image_atomic_comp_swap:
844 case nir_intrinsic_image_load_raw_intel:
845 case nir_intrinsic_image_store_raw_intel:
846 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
847 break;
848
849 case nir_intrinsic_load_ubo:
850 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
851 break;
852
853 case nir_intrinsic_store_ssbo:
854 mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
855 break;
856
857 case nir_intrinsic_get_buffer_size:
858 case nir_intrinsic_ssbo_atomic_add:
859 case nir_intrinsic_ssbo_atomic_imin:
860 case nir_intrinsic_ssbo_atomic_umin:
861 case nir_intrinsic_ssbo_atomic_imax:
862 case nir_intrinsic_ssbo_atomic_umax:
863 case nir_intrinsic_ssbo_atomic_and:
864 case nir_intrinsic_ssbo_atomic_or:
865 case nir_intrinsic_ssbo_atomic_xor:
866 case nir_intrinsic_ssbo_atomic_exchange:
867 case nir_intrinsic_ssbo_atomic_comp_swap:
868 case nir_intrinsic_ssbo_atomic_fmin:
869 case nir_intrinsic_ssbo_atomic_fmax:
870 case nir_intrinsic_ssbo_atomic_fcomp_swap:
871 case nir_intrinsic_load_ssbo:
872 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
873 break;
874
875 default:
876 break;
877 }
878 }
879 }
880
881 /* When disable we just mark everything as used. */
882 if (unlikely(skip_compacting_binding_tables())) {
883 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
884 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
885 }
886
887 /* Calculate the offsets and the binding table size based on the used
888 * surfaces. After this point, the functions to go between "group indices"
889 * and binding table indices can be used.
890 */
891 uint32_t next = 0;
892 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
893 if (bt->used_mask[i] != 0) {
894 bt->offsets[i] = next;
895 next += util_bitcount64(bt->used_mask[i]);
896 }
897 }
898 bt->size_bytes = next * 4;
899
900 if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
901 iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
902 }
903
904 /* Apply the binding table indices. The backend compiler is not expected
905 * to change those, as we haven't set any of the *_start entries in brw
906 * binding_table.
907 */
908 nir_builder b;
909 nir_builder_init(&b, impl);
910
911 nir_foreach_block (block, impl) {
912 nir_foreach_instr (instr, block) {
913 if (instr->type == nir_instr_type_tex) {
914 nir_tex_instr *tex = nir_instr_as_tex(instr);
915 tex->texture_index =
916 iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
917 tex->texture_index);
918 continue;
919 }
920
921 if (instr->type != nir_instr_type_intrinsic)
922 continue;
923
924 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
925 switch (intrin->intrinsic) {
926 case nir_intrinsic_image_size:
927 case nir_intrinsic_image_load:
928 case nir_intrinsic_image_store:
929 case nir_intrinsic_image_atomic_add:
930 case nir_intrinsic_image_atomic_imin:
931 case nir_intrinsic_image_atomic_umin:
932 case nir_intrinsic_image_atomic_imax:
933 case nir_intrinsic_image_atomic_umax:
934 case nir_intrinsic_image_atomic_and:
935 case nir_intrinsic_image_atomic_or:
936 case nir_intrinsic_image_atomic_xor:
937 case nir_intrinsic_image_atomic_exchange:
938 case nir_intrinsic_image_atomic_comp_swap:
939 case nir_intrinsic_image_load_raw_intel:
940 case nir_intrinsic_image_store_raw_intel:
941 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
942 IRIS_SURFACE_GROUP_IMAGE);
943 break;
944
945 case nir_intrinsic_load_ubo:
946 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
947 IRIS_SURFACE_GROUP_UBO);
948 break;
949
950 case nir_intrinsic_store_ssbo:
951 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
952 IRIS_SURFACE_GROUP_SSBO);
953 break;
954
955 case nir_intrinsic_load_output:
956 if (devinfo->gen == 8) {
957 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
958 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
959 }
960 break;
961
962 case nir_intrinsic_get_buffer_size:
963 case nir_intrinsic_ssbo_atomic_add:
964 case nir_intrinsic_ssbo_atomic_imin:
965 case nir_intrinsic_ssbo_atomic_umin:
966 case nir_intrinsic_ssbo_atomic_imax:
967 case nir_intrinsic_ssbo_atomic_umax:
968 case nir_intrinsic_ssbo_atomic_and:
969 case nir_intrinsic_ssbo_atomic_or:
970 case nir_intrinsic_ssbo_atomic_xor:
971 case nir_intrinsic_ssbo_atomic_exchange:
972 case nir_intrinsic_ssbo_atomic_comp_swap:
973 case nir_intrinsic_ssbo_atomic_fmin:
974 case nir_intrinsic_ssbo_atomic_fmax:
975 case nir_intrinsic_ssbo_atomic_fcomp_swap:
976 case nir_intrinsic_load_ssbo:
977 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
978 IRIS_SURFACE_GROUP_SSBO);
979 break;
980
981 default:
982 break;
983 }
984 }
985 }
986 }
987
988 static void
989 iris_debug_recompile(struct iris_context *ice,
990 struct shader_info *info,
991 const struct brw_base_prog_key *key)
992 {
993 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
994 const struct gen_device_info *devinfo = &screen->devinfo;
995 const struct brw_compiler *c = screen->compiler;
996
997 if (!info)
998 return;
999
1000 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
1001 _mesa_shader_stage_to_string(info->stage),
1002 info->name ? info->name : "(no identifier)",
1003 info->label ? info->label : "");
1004
1005 const void *old_iris_key =
1006 iris_find_previous_compile(ice, info->stage, key->program_string_id);
1007
1008 union brw_any_prog_key old_key;
1009
1010 switch (info->stage) {
1011 case MESA_SHADER_VERTEX:
1012 old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
1013 break;
1014 case MESA_SHADER_TESS_CTRL:
1015 old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
1016 break;
1017 case MESA_SHADER_TESS_EVAL:
1018 old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
1019 break;
1020 case MESA_SHADER_GEOMETRY:
1021 old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
1022 break;
1023 case MESA_SHADER_FRAGMENT:
1024 old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
1025 break;
1026 case MESA_SHADER_COMPUTE:
1027 old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
1028 break;
1029 default:
1030 unreachable("invalid shader stage");
1031 }
1032
1033 brw_debug_key_recompile(c, &ice->dbg, info->stage, &old_key.base, key);
1034 }
1035
1036 /**
1037 * Get the shader for the last enabled geometry stage.
1038 *
1039 * This stage is the one which will feed stream output and the rasterizer.
1040 */
1041 static gl_shader_stage
1042 last_vue_stage(struct iris_context *ice)
1043 {
1044 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1045 return MESA_SHADER_GEOMETRY;
1046
1047 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1048 return MESA_SHADER_TESS_EVAL;
1049
1050 return MESA_SHADER_VERTEX;
1051 }
1052
1053 /**
1054 * Compile a vertex shader, and upload the assembly.
1055 */
1056 static struct iris_compiled_shader *
1057 iris_compile_vs(struct iris_context *ice,
1058 struct iris_uncompiled_shader *ish,
1059 const struct iris_vs_prog_key *key)
1060 {
1061 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1062 const struct brw_compiler *compiler = screen->compiler;
1063 const struct gen_device_info *devinfo = &screen->devinfo;
1064 void *mem_ctx = ralloc_context(NULL);
1065 struct brw_vs_prog_data *vs_prog_data =
1066 rzalloc(mem_ctx, struct brw_vs_prog_data);
1067 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1068 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1069 enum brw_param_builtin *system_values;
1070 unsigned num_system_values;
1071 unsigned num_cbufs;
1072
1073 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1074
1075 if (key->vue.nr_userclip_plane_consts) {
1076 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1077 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1078 true, false, NULL);
1079 nir_lower_io_to_temporaries(nir, impl, true, false);
1080 nir_lower_global_vars_to_local(nir);
1081 nir_lower_vars_to_ssa(nir);
1082 nir_shader_gather_info(nir, impl);
1083 }
1084
1085 prog_data->use_alt_mode = ish->use_alt_mode;
1086
1087 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1088 &num_system_values, &num_cbufs);
1089
1090 struct iris_binding_table bt;
1091 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1092 num_system_values, num_cbufs);
1093
1094 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1095
1096 brw_compute_vue_map(devinfo,
1097 &vue_prog_data->vue_map, nir->info.outputs_written,
1098 nir->info.separate_shader);
1099
1100 struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
1101
1102 char *error_str = NULL;
1103 const unsigned *program =
1104 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &brw_key, vs_prog_data,
1105 nir, -1, NULL, &error_str);
1106 if (program == NULL) {
1107 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
1108 ralloc_free(mem_ctx);
1109 return false;
1110 }
1111
1112 if (ish->compiled_once) {
1113 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1114 } else {
1115 ish->compiled_once = true;
1116 }
1117
1118 uint32_t *so_decls =
1119 ice->vtbl.create_so_decl_list(&ish->stream_output,
1120 &vue_prog_data->vue_map);
1121
1122 struct iris_compiled_shader *shader =
1123 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
1124 prog_data, so_decls, system_values, num_system_values,
1125 num_cbufs, &bt);
1126
1127 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1128
1129 ralloc_free(mem_ctx);
1130 return shader;
1131 }
1132
1133 /**
1134 * Update the current vertex shader variant.
1135 *
1136 * Fill out the key, look in the cache, compile and bind if needed.
1137 */
1138 static void
1139 iris_update_compiled_vs(struct iris_context *ice)
1140 {
1141 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1142 struct iris_uncompiled_shader *ish =
1143 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1144
1145 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
1146 ice->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1147
1148 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1149 struct iris_compiled_shader *shader =
1150 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
1151
1152 if (!shader)
1153 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1154
1155 if (!shader)
1156 shader = iris_compile_vs(ice, ish, &key);
1157
1158 if (old != shader) {
1159 ice->shaders.prog[IRIS_CACHE_VS] = shader;
1160 ice->state.dirty |= IRIS_DIRTY_VS |
1161 IRIS_DIRTY_BINDINGS_VS |
1162 IRIS_DIRTY_CONSTANTS_VS |
1163 IRIS_DIRTY_VF_SGVS;
1164 shs->sysvals_need_upload = true;
1165
1166 const struct brw_vs_prog_data *vs_prog_data =
1167 (void *) shader->prog_data;
1168 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1169 vs_prog_data->uses_baseinstance;
1170 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1171 vs_prog_data->uses_is_indexed_draw;
1172 const bool needs_sgvs_element = uses_draw_params ||
1173 vs_prog_data->uses_instanceid ||
1174 vs_prog_data->uses_vertexid;
1175
1176 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1177 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1178 ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
1179 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
1180 IRIS_DIRTY_VERTEX_ELEMENTS;
1181 }
1182 ice->state.vs_uses_draw_params = uses_draw_params;
1183 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1184 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1185 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1186 }
1187 }
1188
1189 /**
1190 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1191 */
1192 const struct shader_info *
1193 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1194 {
1195 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1196
1197 if (!ish)
1198 return NULL;
1199
1200 const nir_shader *nir = ish->nir;
1201 return &nir->info;
1202 }
1203
1204 /**
1205 * Get the union of TCS output and TES input slots.
1206 *
1207 * TCS and TES need to agree on a common URB entry layout. In particular,
1208 * the data for all patch vertices is stored in a single URB entry (unlike
1209 * GS which has one entry per input vertex). This means that per-vertex
1210 * array indexing needs a stride.
1211 *
1212 * SSO requires locations to match, but doesn't require the number of
1213 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1214 * So, we need to take the extra step of unifying these on the fly.
1215 */
1216 static void
1217 get_unified_tess_slots(const struct iris_context *ice,
1218 uint64_t *per_vertex_slots,
1219 uint32_t *per_patch_slots)
1220 {
1221 const struct shader_info *tcs =
1222 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1223 const struct shader_info *tes =
1224 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1225
1226 *per_vertex_slots = tes->inputs_read;
1227 *per_patch_slots = tes->patch_inputs_read;
1228
1229 if (tcs) {
1230 *per_vertex_slots |= tcs->outputs_written;
1231 *per_patch_slots |= tcs->patch_outputs_written;
1232 }
1233 }
1234
1235 /**
1236 * Compile a tessellation control shader, and upload the assembly.
1237 */
1238 static struct iris_compiled_shader *
1239 iris_compile_tcs(struct iris_context *ice,
1240 struct iris_uncompiled_shader *ish,
1241 const struct iris_tcs_prog_key *key)
1242 {
1243 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1244 const struct brw_compiler *compiler = screen->compiler;
1245 const struct nir_shader_compiler_options *options =
1246 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1247 void *mem_ctx = ralloc_context(NULL);
1248 struct brw_tcs_prog_data *tcs_prog_data =
1249 rzalloc(mem_ctx, struct brw_tcs_prog_data);
1250 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1251 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1252 const struct gen_device_info *devinfo = &screen->devinfo;
1253 enum brw_param_builtin *system_values = NULL;
1254 unsigned num_system_values = 0;
1255 unsigned num_cbufs = 0;
1256
1257 nir_shader *nir;
1258
1259 struct iris_binding_table bt;
1260
1261 struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
1262
1263 if (ish) {
1264 nir = nir_shader_clone(mem_ctx, ish->nir);
1265
1266 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1267 &num_system_values, &num_cbufs);
1268 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1269 num_system_values, num_cbufs);
1270 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1271 } else {
1272 nir =
1273 brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
1274
1275 /* Reserve space for passing the default tess levels as constants. */
1276 num_cbufs = 1;
1277 num_system_values = 8;
1278 system_values =
1279 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1280 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1281 prog_data->nr_params = num_system_values;
1282
1283 if (key->tes_primitive_mode == GL_QUADS) {
1284 for (int i = 0; i < 4; i++)
1285 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1286
1287 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1288 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1289 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
1290 for (int i = 0; i < 3; i++)
1291 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1292
1293 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1294 } else {
1295 assert(key->tes_primitive_mode == GL_ISOLINES);
1296 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1297 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1298 }
1299
1300 /* Manually setup the TCS binding table. */
1301 memset(&bt, 0, sizeof(bt));
1302 bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1303 bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1304 bt.size_bytes = 4;
1305
1306 prog_data->ubo_ranges[0].length = 1;
1307 }
1308
1309 char *error_str = NULL;
1310 const unsigned *program =
1311 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, &brw_key, tcs_prog_data,
1312 nir, -1, NULL, &error_str);
1313 if (program == NULL) {
1314 dbg_printf("Failed to compile control shader: %s\n", error_str);
1315 ralloc_free(mem_ctx);
1316 return false;
1317 }
1318
1319 if (ish) {
1320 if (ish->compiled_once) {
1321 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1322 } else {
1323 ish->compiled_once = true;
1324 }
1325 }
1326
1327 struct iris_compiled_shader *shader =
1328 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1329 prog_data, NULL, system_values, num_system_values,
1330 num_cbufs, &bt);
1331
1332 if (ish)
1333 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1334
1335 ralloc_free(mem_ctx);
1336 return shader;
1337 }
1338
1339 /**
1340 * Update the current tessellation control shader variant.
1341 *
1342 * Fill out the key, look in the cache, compile and bind if needed.
1343 */
1344 static void
1345 iris_update_compiled_tcs(struct iris_context *ice)
1346 {
1347 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1348 struct iris_uncompiled_shader *tcs =
1349 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1350 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1351 const struct brw_compiler *compiler = screen->compiler;
1352 const struct gen_device_info *devinfo = &screen->devinfo;
1353
1354 const struct shader_info *tes_info =
1355 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1356 struct iris_tcs_prog_key key = {
1357 .vue.base.program_string_id = tcs ? tcs->program_id : 0,
1358 .tes_primitive_mode = tes_info->tess.primitive_mode,
1359 .input_vertices =
1360 !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1361 .quads_workaround = devinfo->gen < 9 &&
1362 tes_info->tess.primitive_mode == GL_QUADS &&
1363 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1364 };
1365 get_unified_tess_slots(ice, &key.outputs_written,
1366 &key.patch_outputs_written);
1367 ice->vtbl.populate_tcs_key(ice, &key);
1368
1369 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1370 struct iris_compiled_shader *shader =
1371 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1372
1373 if (tcs && !shader)
1374 shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1375
1376 if (!shader)
1377 shader = iris_compile_tcs(ice, tcs, &key);
1378
1379 if (old != shader) {
1380 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1381 ice->state.dirty |= IRIS_DIRTY_TCS |
1382 IRIS_DIRTY_BINDINGS_TCS |
1383 IRIS_DIRTY_CONSTANTS_TCS;
1384 shs->sysvals_need_upload = true;
1385 }
1386 }
1387
1388 /**
1389 * Compile a tessellation evaluation shader, and upload the assembly.
1390 */
1391 static struct iris_compiled_shader *
1392 iris_compile_tes(struct iris_context *ice,
1393 struct iris_uncompiled_shader *ish,
1394 const struct iris_tes_prog_key *key)
1395 {
1396 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1397 const struct brw_compiler *compiler = screen->compiler;
1398 void *mem_ctx = ralloc_context(NULL);
1399 struct brw_tes_prog_data *tes_prog_data =
1400 rzalloc(mem_ctx, struct brw_tes_prog_data);
1401 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1402 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1403 enum brw_param_builtin *system_values;
1404 const struct gen_device_info *devinfo = &screen->devinfo;
1405 unsigned num_system_values;
1406 unsigned num_cbufs;
1407
1408 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1409
1410 if (key->vue.nr_userclip_plane_consts) {
1411 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1412 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1413 true, false, NULL);
1414 nir_lower_io_to_temporaries(nir, impl, true, false);
1415 nir_lower_global_vars_to_local(nir);
1416 nir_lower_vars_to_ssa(nir);
1417 nir_shader_gather_info(nir, impl);
1418 }
1419
1420 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1421 &num_system_values, &num_cbufs);
1422
1423 struct iris_binding_table bt;
1424 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1425 num_system_values, num_cbufs);
1426
1427 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1428
1429 struct brw_vue_map input_vue_map;
1430 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1431 key->patch_inputs_read);
1432
1433 struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
1434
1435 char *error_str = NULL;
1436 const unsigned *program =
1437 brw_compile_tes(compiler, &ice->dbg, mem_ctx, &brw_key, &input_vue_map,
1438 tes_prog_data, nir, -1, NULL, &error_str);
1439 if (program == NULL) {
1440 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1441 ralloc_free(mem_ctx);
1442 return false;
1443 }
1444
1445 if (ish->compiled_once) {
1446 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1447 } else {
1448 ish->compiled_once = true;
1449 }
1450
1451 uint32_t *so_decls =
1452 ice->vtbl.create_so_decl_list(&ish->stream_output,
1453 &vue_prog_data->vue_map);
1454
1455
1456 struct iris_compiled_shader *shader =
1457 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1458 prog_data, so_decls, system_values, num_system_values,
1459 num_cbufs, &bt);
1460
1461 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1462
1463 ralloc_free(mem_ctx);
1464 return shader;
1465 }
1466
1467 /**
1468 * Update the current tessellation evaluation shader variant.
1469 *
1470 * Fill out the key, look in the cache, compile and bind if needed.
1471 */
1472 static void
1473 iris_update_compiled_tes(struct iris_context *ice)
1474 {
1475 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1476 struct iris_uncompiled_shader *ish =
1477 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1478
1479 struct iris_tes_prog_key key = { KEY_ID(vue.base) };
1480 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1481 ice->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1482
1483 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1484 struct iris_compiled_shader *shader =
1485 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1486
1487 if (!shader)
1488 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1489
1490 if (!shader)
1491 shader = iris_compile_tes(ice, ish, &key);
1492
1493 if (old != shader) {
1494 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1495 ice->state.dirty |= IRIS_DIRTY_TES |
1496 IRIS_DIRTY_BINDINGS_TES |
1497 IRIS_DIRTY_CONSTANTS_TES;
1498 shs->sysvals_need_upload = true;
1499 }
1500
1501 /* TODO: Could compare and avoid flagging this. */
1502 const struct shader_info *tes_info = &ish->nir->info;
1503 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
1504 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TES;
1505 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1506 }
1507 }
1508
1509 /**
1510 * Compile a geometry shader, and upload the assembly.
1511 */
1512 static struct iris_compiled_shader *
1513 iris_compile_gs(struct iris_context *ice,
1514 struct iris_uncompiled_shader *ish,
1515 const struct iris_gs_prog_key *key)
1516 {
1517 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1518 const struct brw_compiler *compiler = screen->compiler;
1519 const struct gen_device_info *devinfo = &screen->devinfo;
1520 void *mem_ctx = ralloc_context(NULL);
1521 struct brw_gs_prog_data *gs_prog_data =
1522 rzalloc(mem_ctx, struct brw_gs_prog_data);
1523 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1524 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1525 enum brw_param_builtin *system_values;
1526 unsigned num_system_values;
1527 unsigned num_cbufs;
1528
1529 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1530
1531 if (key->vue.nr_userclip_plane_consts) {
1532 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1533 nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1534 false, NULL);
1535 nir_lower_io_to_temporaries(nir, impl, true, false);
1536 nir_lower_global_vars_to_local(nir);
1537 nir_lower_vars_to_ssa(nir);
1538 nir_shader_gather_info(nir, impl);
1539 }
1540
1541 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1542 &num_system_values, &num_cbufs);
1543
1544 struct iris_binding_table bt;
1545 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1546 num_system_values, num_cbufs);
1547
1548 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1549
1550 brw_compute_vue_map(devinfo,
1551 &vue_prog_data->vue_map, nir->info.outputs_written,
1552 nir->info.separate_shader);
1553
1554 struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
1555
1556 char *error_str = NULL;
1557 const unsigned *program =
1558 brw_compile_gs(compiler, &ice->dbg, mem_ctx, &brw_key, gs_prog_data,
1559 nir, NULL, -1, NULL, &error_str);
1560 if (program == NULL) {
1561 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1562 ralloc_free(mem_ctx);
1563 return false;
1564 }
1565
1566 if (ish->compiled_once) {
1567 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1568 } else {
1569 ish->compiled_once = true;
1570 }
1571
1572 uint32_t *so_decls =
1573 ice->vtbl.create_so_decl_list(&ish->stream_output,
1574 &vue_prog_data->vue_map);
1575
1576 struct iris_compiled_shader *shader =
1577 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1578 prog_data, so_decls, system_values, num_system_values,
1579 num_cbufs, &bt);
1580
1581 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1582
1583 ralloc_free(mem_ctx);
1584 return shader;
1585 }
1586
1587 /**
1588 * Update the current geometry shader variant.
1589 *
1590 * Fill out the key, look in the cache, compile and bind if needed.
1591 */
1592 static void
1593 iris_update_compiled_gs(struct iris_context *ice)
1594 {
1595 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1596 struct iris_uncompiled_shader *ish =
1597 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1598 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1599 struct iris_compiled_shader *shader = NULL;
1600
1601 if (ish) {
1602 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
1603 ice->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1604
1605 shader =
1606 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1607
1608 if (!shader)
1609 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1610
1611 if (!shader)
1612 shader = iris_compile_gs(ice, ish, &key);
1613 }
1614
1615 if (old != shader) {
1616 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1617 ice->state.dirty |= IRIS_DIRTY_GS |
1618 IRIS_DIRTY_BINDINGS_GS |
1619 IRIS_DIRTY_CONSTANTS_GS;
1620 shs->sysvals_need_upload = true;
1621 }
1622 }
1623
1624 /**
1625 * Compile a fragment (pixel) shader, and upload the assembly.
1626 */
1627 static struct iris_compiled_shader *
1628 iris_compile_fs(struct iris_context *ice,
1629 struct iris_uncompiled_shader *ish,
1630 const struct iris_fs_prog_key *key,
1631 struct brw_vue_map *vue_map)
1632 {
1633 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1634 const struct brw_compiler *compiler = screen->compiler;
1635 void *mem_ctx = ralloc_context(NULL);
1636 struct brw_wm_prog_data *fs_prog_data =
1637 rzalloc(mem_ctx, struct brw_wm_prog_data);
1638 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1639 enum brw_param_builtin *system_values;
1640 const struct gen_device_info *devinfo = &screen->devinfo;
1641 unsigned num_system_values;
1642 unsigned num_cbufs;
1643
1644 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1645
1646 prog_data->use_alt_mode = ish->use_alt_mode;
1647
1648 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1649 &num_system_values, &num_cbufs);
1650
1651 /* Lower output variables to load_output intrinsics before setting up
1652 * binding tables, so iris_setup_binding_table can map any load_output
1653 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1654 * non-coherent framebuffer fetches.
1655 */
1656 brw_nir_lower_fs_outputs(nir);
1657
1658 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1659 * and do not need a binding table entry with a null surface. Earlier
1660 * generations need an entry for a null surface.
1661 */
1662 int null_rts = devinfo->gen < 11 ? 1 : 0;
1663
1664 struct iris_binding_table bt;
1665 iris_setup_binding_table(devinfo, nir, &bt,
1666 MAX2(key->nr_color_regions, null_rts),
1667 num_system_values, num_cbufs);
1668
1669 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1670
1671 struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
1672
1673 char *error_str = NULL;
1674 const unsigned *program =
1675 brw_compile_fs(compiler, &ice->dbg, mem_ctx, &brw_key, fs_prog_data,
1676 nir, -1, -1, -1, true, false, vue_map,
1677 NULL, &error_str);
1678 if (program == NULL) {
1679 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1680 ralloc_free(mem_ctx);
1681 return false;
1682 }
1683
1684 if (ish->compiled_once) {
1685 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1686 } else {
1687 ish->compiled_once = true;
1688 }
1689
1690 struct iris_compiled_shader *shader =
1691 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1692 prog_data, NULL, system_values, num_system_values,
1693 num_cbufs, &bt);
1694
1695 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1696
1697 ralloc_free(mem_ctx);
1698 return shader;
1699 }
1700
1701 /**
1702 * Update the current fragment shader variant.
1703 *
1704 * Fill out the key, look in the cache, compile and bind if needed.
1705 */
1706 static void
1707 iris_update_compiled_fs(struct iris_context *ice)
1708 {
1709 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1710 struct iris_uncompiled_shader *ish =
1711 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1712 struct iris_fs_prog_key key = { KEY_ID(base) };
1713 ice->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1714
1715 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1716 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1717
1718 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1719 struct iris_compiled_shader *shader =
1720 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1721
1722 if (!shader)
1723 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1724
1725 if (!shader)
1726 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1727
1728 if (old != shader) {
1729 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1730 // toggles. might be able to avoid flagging SBE too.
1731 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1732 ice->state.dirty |= IRIS_DIRTY_FS |
1733 IRIS_DIRTY_BINDINGS_FS |
1734 IRIS_DIRTY_CONSTANTS_FS |
1735 IRIS_DIRTY_WM |
1736 IRIS_DIRTY_CLIP |
1737 IRIS_DIRTY_SBE;
1738 shs->sysvals_need_upload = true;
1739 }
1740 }
1741
1742 /**
1743 * Update the last enabled stage's VUE map.
1744 *
1745 * When the shader feeding the rasterizer's output interface changes, we
1746 * need to re-emit various packets.
1747 */
1748 static void
1749 update_last_vue_map(struct iris_context *ice,
1750 struct brw_stage_prog_data *prog_data)
1751 {
1752 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1753 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1754 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1755 const uint64_t changed_slots =
1756 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1757
1758 if (changed_slots & VARYING_BIT_VIEWPORT) {
1759 ice->state.num_viewports =
1760 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1761 ice->state.dirty |= IRIS_DIRTY_CLIP |
1762 IRIS_DIRTY_SF_CL_VIEWPORT |
1763 IRIS_DIRTY_CC_VIEWPORT |
1764 IRIS_DIRTY_SCISSOR_RECT |
1765 IRIS_DIRTY_UNCOMPILED_FS |
1766 ice->state.dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1767 }
1768
1769 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1770 ice->state.dirty |= IRIS_DIRTY_SBE;
1771 }
1772
1773 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1774 }
1775
1776 static void
1777 iris_update_pull_constant_descriptors(struct iris_context *ice,
1778 gl_shader_stage stage)
1779 {
1780 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1781
1782 if (!shader || !shader->prog_data->has_ubo_pull)
1783 return;
1784
1785 struct iris_shader_state *shs = &ice->state.shaders[stage];
1786 bool any_new_descriptors =
1787 shader->num_system_values > 0 && shs->sysvals_need_upload;
1788
1789 unsigned bound_cbufs = shs->bound_cbufs;
1790
1791 while (bound_cbufs) {
1792 const int i = u_bit_scan(&bound_cbufs);
1793 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1794 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1795 if (!surf_state->res && cbuf->buffer) {
1796 iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
1797 any_new_descriptors = true;
1798 }
1799 }
1800
1801 if (any_new_descriptors)
1802 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
1803 }
1804
1805 /**
1806 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1807 */
1808 static struct brw_vue_prog_data *
1809 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1810 {
1811 if (!ice->shaders.prog[stage])
1812 return NULL;
1813
1814 return (void *) ice->shaders.prog[stage]->prog_data;
1815 }
1816
1817 // XXX: iris_compiled_shaders are space-leaking :(
1818 // XXX: do remember to unbind them if deleting them.
1819
1820 /**
1821 * Update the current shader variants for the given state.
1822 *
1823 * This should be called on every draw call to ensure that the correct
1824 * shaders are bound. It will also flag any dirty state triggered by
1825 * swapping out those shaders.
1826 */
1827 void
1828 iris_update_compiled_shaders(struct iris_context *ice)
1829 {
1830 const uint64_t dirty = ice->state.dirty;
1831
1832 struct brw_vue_prog_data *old_prog_datas[4];
1833 if (!(dirty & IRIS_DIRTY_URB)) {
1834 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1835 old_prog_datas[i] = get_vue_prog_data(ice, i);
1836 }
1837
1838 if (dirty & (IRIS_DIRTY_UNCOMPILED_TCS | IRIS_DIRTY_UNCOMPILED_TES)) {
1839 struct iris_uncompiled_shader *tes =
1840 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1841 if (tes) {
1842 iris_update_compiled_tcs(ice);
1843 iris_update_compiled_tes(ice);
1844 } else {
1845 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1846 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1847 ice->state.dirty |=
1848 IRIS_DIRTY_TCS | IRIS_DIRTY_TES |
1849 IRIS_DIRTY_BINDINGS_TCS | IRIS_DIRTY_BINDINGS_TES |
1850 IRIS_DIRTY_CONSTANTS_TCS | IRIS_DIRTY_CONSTANTS_TES;
1851 }
1852 }
1853
1854 if (dirty & IRIS_DIRTY_UNCOMPILED_VS)
1855 iris_update_compiled_vs(ice);
1856 if (dirty & IRIS_DIRTY_UNCOMPILED_GS)
1857 iris_update_compiled_gs(ice);
1858
1859 if (dirty & (IRIS_DIRTY_UNCOMPILED_GS | IRIS_DIRTY_UNCOMPILED_TES)) {
1860 const struct iris_compiled_shader *gs =
1861 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1862 const struct iris_compiled_shader *tes =
1863 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1864
1865 bool points_or_lines = false;
1866
1867 if (gs) {
1868 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1869 points_or_lines =
1870 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1871 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1872 } else if (tes) {
1873 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1874 points_or_lines =
1875 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1876 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1877 }
1878
1879 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1880 /* Outbound to XY Clip enables */
1881 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1882 ice->state.dirty |= IRIS_DIRTY_CLIP;
1883 }
1884 }
1885
1886 gl_shader_stage last_stage = last_vue_stage(ice);
1887 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1888 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1889 update_last_vue_map(ice, shader->prog_data);
1890 if (ice->state.streamout != shader->streamout) {
1891 ice->state.streamout = shader->streamout;
1892 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1893 }
1894
1895 if (ice->state.streamout_active) {
1896 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1897 struct iris_stream_output_target *so =
1898 (void *) ice->state.so_target[i];
1899 if (so)
1900 so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
1901 }
1902 }
1903
1904 if (dirty & IRIS_DIRTY_UNCOMPILED_FS)
1905 iris_update_compiled_fs(ice);
1906
1907 /* Changing shader interfaces may require a URB configuration. */
1908 if (!(dirty & IRIS_DIRTY_URB)) {
1909 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1910 struct brw_vue_prog_data *old = old_prog_datas[i];
1911 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1912 if (!!old != !!new ||
1913 (new && new->urb_entry_size != old->urb_entry_size)) {
1914 ice->state.dirty |= IRIS_DIRTY_URB;
1915 break;
1916 }
1917 }
1918 }
1919
1920 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
1921 if (ice->state.dirty & (IRIS_DIRTY_CONSTANTS_VS << i))
1922 iris_update_pull_constant_descriptors(ice, i);
1923 }
1924 }
1925
1926 static struct iris_compiled_shader *
1927 iris_compile_cs(struct iris_context *ice,
1928 struct iris_uncompiled_shader *ish,
1929 const struct iris_cs_prog_key *key)
1930 {
1931 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1932 const struct brw_compiler *compiler = screen->compiler;
1933 void *mem_ctx = ralloc_context(NULL);
1934 struct brw_cs_prog_data *cs_prog_data =
1935 rzalloc(mem_ctx, struct brw_cs_prog_data);
1936 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1937 enum brw_param_builtin *system_values;
1938 const struct gen_device_info *devinfo = &screen->devinfo;
1939 unsigned num_system_values;
1940 unsigned num_cbufs;
1941
1942 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1943
1944 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1945 &num_system_values, &num_cbufs);
1946
1947 struct iris_binding_table bt;
1948 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1949 num_system_values, num_cbufs);
1950
1951 struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
1952
1953 char *error_str = NULL;
1954 const unsigned *program =
1955 brw_compile_cs(compiler, &ice->dbg, mem_ctx, &brw_key, cs_prog_data,
1956 nir, -1, NULL, &error_str);
1957 if (program == NULL) {
1958 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1959 ralloc_free(mem_ctx);
1960 return false;
1961 }
1962
1963 if (ish->compiled_once) {
1964 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1965 } else {
1966 ish->compiled_once = true;
1967 }
1968
1969 struct iris_compiled_shader *shader =
1970 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
1971 prog_data, NULL, system_values, num_system_values,
1972 num_cbufs, &bt);
1973
1974 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1975
1976 ralloc_free(mem_ctx);
1977 return shader;
1978 }
1979
1980 static void
1981 iris_update_compiled_cs(struct iris_context *ice)
1982 {
1983 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
1984 struct iris_uncompiled_shader *ish =
1985 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
1986
1987 struct iris_cs_prog_key key = { KEY_ID(base) };
1988 ice->vtbl.populate_cs_key(ice, &key);
1989
1990 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
1991 struct iris_compiled_shader *shader =
1992 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
1993
1994 if (!shader)
1995 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1996
1997 if (!shader)
1998 shader = iris_compile_cs(ice, ish, &key);
1999
2000 if (old != shader) {
2001 ice->shaders.prog[IRIS_CACHE_CS] = shader;
2002 ice->state.dirty |= IRIS_DIRTY_CS |
2003 IRIS_DIRTY_BINDINGS_CS |
2004 IRIS_DIRTY_CONSTANTS_CS;
2005 shs->sysvals_need_upload = true;
2006 }
2007 }
2008
2009 void
2010 iris_update_compiled_compute_shader(struct iris_context *ice)
2011 {
2012 if (ice->state.dirty & IRIS_DIRTY_UNCOMPILED_CS)
2013 iris_update_compiled_cs(ice);
2014
2015 if (ice->state.dirty & IRIS_DIRTY_CONSTANTS_CS)
2016 iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2017 }
2018
2019 void
2020 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
2021 uint32_t *dst)
2022 {
2023 assert(cs_prog_data->push.total.size > 0);
2024 assert(cs_prog_data->push.cross_thread.size == 0);
2025 assert(cs_prog_data->push.per_thread.dwords == 1);
2026 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
2027 for (unsigned t = 0; t < cs_prog_data->threads; t++)
2028 dst[8 * t] = t;
2029 }
2030
2031 /**
2032 * Allocate scratch BOs as needed for the given per-thread size and stage.
2033 */
2034 struct iris_bo *
2035 iris_get_scratch_space(struct iris_context *ice,
2036 unsigned per_thread_scratch,
2037 gl_shader_stage stage)
2038 {
2039 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2040 struct iris_bufmgr *bufmgr = screen->bufmgr;
2041 const struct gen_device_info *devinfo = &screen->devinfo;
2042
2043 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2044 assert(encoded_size < (1 << 16));
2045
2046 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2047
2048 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2049 *
2050 * "Scratch Space per slice is computed based on 4 sub-slices. SW
2051 * must allocate scratch space enough so that each slice has 4
2052 * slices allowed."
2053 *
2054 * According to the other driver team, this applies to compute shaders
2055 * as well. This is not currently documented at all.
2056 *
2057 * This hack is no longer necessary on Gen11+.
2058 */
2059 unsigned subslice_total = screen->subslice_total;
2060 if (devinfo->gen < 11)
2061 subslice_total = 4 * devinfo->num_slices;
2062 assert(subslice_total >= screen->subslice_total);
2063
2064 if (!*bop) {
2065 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
2066
2067 if (devinfo->gen >= 11) {
2068 /* The MEDIA_VFE_STATE docs say:
2069 *
2070 * "Starting with this configuration, the Maximum Number of
2071 * Threads must be set to (#EU * 8) for GPGPU dispatches.
2072 *
2073 * Although there are only 7 threads per EU in the configuration,
2074 * the FFTID is calculated as if there are 8 threads per EU,
2075 * which in turn requires a larger amount of Scratch Space to be
2076 * allocated by the driver."
2077 */
2078 scratch_ids_per_subslice = 8 * 8;
2079 }
2080
2081 uint32_t max_threads[] = {
2082 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
2083 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
2084 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
2085 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
2086 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
2087 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
2088 };
2089
2090 uint32_t size = per_thread_scratch * max_threads[stage];
2091
2092 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
2093 }
2094
2095 return *bop;
2096 }
2097
2098 /* ------------------------------------------------------------------- */
2099
2100 /**
2101 * The pipe->create_[stage]_state() driver hooks.
2102 *
2103 * Performs basic NIR preprocessing, records any state dependencies, and
2104 * returns an iris_uncompiled_shader as the Gallium CSO.
2105 *
2106 * Actual shader compilation to assembly happens later, at first use.
2107 */
2108 static void *
2109 iris_create_uncompiled_shader(struct pipe_context *ctx,
2110 nir_shader *nir,
2111 const struct pipe_stream_output_info *so_info)
2112 {
2113 struct iris_context *ice = (void *)ctx;
2114 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2115 const struct gen_device_info *devinfo = &screen->devinfo;
2116
2117 struct iris_uncompiled_shader *ish =
2118 calloc(1, sizeof(struct iris_uncompiled_shader));
2119 if (!ish)
2120 return NULL;
2121
2122 NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
2123
2124 brw_preprocess_nir(screen->compiler, nir, NULL);
2125
2126 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo);
2127 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2128
2129 nir_sweep(nir);
2130
2131 if (nir->constant_data_size > 0) {
2132 unsigned data_offset;
2133 u_upload_data(ice->shaders.uploader, 0, nir->constant_data_size,
2134 32, nir->constant_data, &data_offset, &ish->const_data);
2135
2136 struct pipe_shader_buffer psb = {
2137 .buffer = ish->const_data,
2138 .buffer_offset = data_offset,
2139 .buffer_size = nir->constant_data_size,
2140 };
2141 iris_upload_ubo_ssbo_surf_state(ice, &psb, &ish->const_data_state, false);
2142 }
2143
2144 ish->program_id = get_new_program_id(screen);
2145 ish->nir = nir;
2146 if (so_info) {
2147 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2148 update_so_info(&ish->stream_output, nir->info.outputs_written);
2149 }
2150
2151 /* Save this now before potentially dropping nir->info.name */
2152 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2153 ish->use_alt_mode = true;
2154
2155 if (screen->disk_cache) {
2156 /* Serialize the NIR to a binary blob that we can hash for the disk
2157 * cache. Drop unnecessary information (like variable names)
2158 * so the serialized NIR is smaller, and also to let us detect more
2159 * isomorphic shaders when hashing, increasing cache hits.
2160 */
2161 struct blob blob;
2162 blob_init(&blob);
2163 nir_serialize(&blob, nir, true);
2164 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2165 blob_finish(&blob);
2166 }
2167
2168 return ish;
2169 }
2170
2171 static struct iris_uncompiled_shader *
2172 iris_create_shader_state(struct pipe_context *ctx,
2173 const struct pipe_shader_state *state)
2174 {
2175 struct nir_shader *nir;
2176
2177 if (state->type == PIPE_SHADER_IR_TGSI)
2178 nir = tgsi_to_nir(state->tokens, ctx->screen);
2179 else
2180 nir = state->ir.nir;
2181
2182 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
2183 }
2184
2185 static void *
2186 iris_create_vs_state(struct pipe_context *ctx,
2187 const struct pipe_shader_state *state)
2188 {
2189 struct iris_context *ice = (void *) ctx;
2190 struct iris_screen *screen = (void *) ctx->screen;
2191 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2192
2193 /* User clip planes */
2194 if (ish->nir->info.clip_distance_array_size == 0)
2195 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2196
2197 if (screen->precompile) {
2198 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
2199
2200 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2201 iris_compile_vs(ice, ish, &key);
2202 }
2203
2204 return ish;
2205 }
2206
2207 static void *
2208 iris_create_tcs_state(struct pipe_context *ctx,
2209 const struct pipe_shader_state *state)
2210 {
2211 struct iris_context *ice = (void *) ctx;
2212 struct iris_screen *screen = (void *) ctx->screen;
2213 const struct brw_compiler *compiler = screen->compiler;
2214 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2215 struct shader_info *info = &ish->nir->info;
2216
2217 if (screen->precompile) {
2218 const unsigned _GL_TRIANGLES = 0x0004;
2219 struct iris_tcs_prog_key key = {
2220 KEY_ID(vue.base),
2221 // XXX: make sure the linker fills this out from the TES...
2222 .tes_primitive_mode =
2223 info->tess.primitive_mode ? info->tess.primitive_mode
2224 : _GL_TRIANGLES,
2225 .outputs_written = info->outputs_written,
2226 .patch_outputs_written = info->patch_outputs_written,
2227 };
2228
2229 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2230 * We don't have that information, so we randomly guess that the input
2231 * and output patches are the same size. This is a bad guess, but we
2232 * can't do much better.
2233 */
2234 if (compiler->use_tcs_8_patch)
2235 key.input_vertices = info->tess.tcs_vertices_out;
2236
2237 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2238 iris_compile_tcs(ice, ish, &key);
2239 }
2240
2241 return ish;
2242 }
2243
2244 static void *
2245 iris_create_tes_state(struct pipe_context *ctx,
2246 const struct pipe_shader_state *state)
2247 {
2248 struct iris_context *ice = (void *) ctx;
2249 struct iris_screen *screen = (void *) ctx->screen;
2250 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2251 struct shader_info *info = &ish->nir->info;
2252
2253 /* User clip planes */
2254 if (ish->nir->info.clip_distance_array_size == 0)
2255 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2256
2257 if (screen->precompile) {
2258 struct iris_tes_prog_key key = {
2259 KEY_ID(vue.base),
2260 // XXX: not ideal, need TCS output/TES input unification
2261 .inputs_read = info->inputs_read,
2262 .patch_inputs_read = info->patch_inputs_read,
2263 };
2264
2265 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2266 iris_compile_tes(ice, ish, &key);
2267 }
2268
2269 return ish;
2270 }
2271
2272 static void *
2273 iris_create_gs_state(struct pipe_context *ctx,
2274 const struct pipe_shader_state *state)
2275 {
2276 struct iris_context *ice = (void *) ctx;
2277 struct iris_screen *screen = (void *) ctx->screen;
2278 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2279
2280 /* User clip planes */
2281 if (ish->nir->info.clip_distance_array_size == 0)
2282 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2283
2284 if (screen->precompile) {
2285 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
2286
2287 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2288 iris_compile_gs(ice, ish, &key);
2289 }
2290
2291 return ish;
2292 }
2293
2294 static void *
2295 iris_create_fs_state(struct pipe_context *ctx,
2296 const struct pipe_shader_state *state)
2297 {
2298 struct iris_context *ice = (void *) ctx;
2299 struct iris_screen *screen = (void *) ctx->screen;
2300 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2301 struct shader_info *info = &ish->nir->info;
2302
2303 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2304 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2305 (1ull << IRIS_NOS_RASTERIZER) |
2306 (1ull << IRIS_NOS_BLEND);
2307
2308 /* The program key needs the VUE map if there are > 16 inputs */
2309 if (util_bitcount64(ish->nir->info.inputs_read &
2310 BRW_FS_VARYING_INPUT_MASK) > 16) {
2311 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2312 }
2313
2314 if (screen->precompile) {
2315 const uint64_t color_outputs = info->outputs_written &
2316 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2317 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2318 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2319
2320 bool can_rearrange_varyings =
2321 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2322
2323 const struct gen_device_info *devinfo = &screen->devinfo;
2324 struct iris_fs_prog_key key = {
2325 KEY_ID(base),
2326 .nr_color_regions = util_bitcount(color_outputs),
2327 .coherent_fb_fetch = devinfo->gen >= 9,
2328 .input_slots_valid =
2329 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2330 };
2331
2332 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2333 iris_compile_fs(ice, ish, &key, NULL);
2334 }
2335
2336 return ish;
2337 }
2338
2339 static void *
2340 iris_create_compute_state(struct pipe_context *ctx,
2341 const struct pipe_compute_state *state)
2342 {
2343 assert(state->ir_type == PIPE_SHADER_IR_NIR);
2344
2345 struct iris_context *ice = (void *) ctx;
2346 struct iris_screen *screen = (void *) ctx->screen;
2347 struct iris_uncompiled_shader *ish =
2348 iris_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
2349
2350 // XXX: disallow more than 64KB of shared variables
2351
2352 if (screen->precompile) {
2353 struct iris_cs_prog_key key = { KEY_ID(base) };
2354
2355 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2356 iris_compile_cs(ice, ish, &key);
2357 }
2358
2359 return ish;
2360 }
2361
2362 /**
2363 * The pipe->delete_[stage]_state() driver hooks.
2364 *
2365 * Frees the iris_uncompiled_shader.
2366 */
2367 static void
2368 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2369 {
2370 struct iris_uncompiled_shader *ish = state;
2371 struct iris_context *ice = (void *) ctx;
2372
2373 if (ice->shaders.uncompiled[stage] == ish) {
2374 ice->shaders.uncompiled[stage] = NULL;
2375 ice->state.dirty |= IRIS_DIRTY_UNCOMPILED_VS << stage;
2376 }
2377
2378 if (ish->const_data) {
2379 pipe_resource_reference(&ish->const_data, NULL);
2380 pipe_resource_reference(&ish->const_data_state.res, NULL);
2381 }
2382
2383 ralloc_free(ish->nir);
2384 free(ish);
2385 }
2386
2387 static void
2388 iris_delete_vs_state(struct pipe_context *ctx, void *state)
2389 {
2390 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2391 }
2392
2393 static void
2394 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2395 {
2396 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2397 }
2398
2399 static void
2400 iris_delete_tes_state(struct pipe_context *ctx, void *state)
2401 {
2402 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2403 }
2404
2405 static void
2406 iris_delete_gs_state(struct pipe_context *ctx, void *state)
2407 {
2408 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2409 }
2410
2411 static void
2412 iris_delete_fs_state(struct pipe_context *ctx, void *state)
2413 {
2414 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2415 }
2416
2417 static void
2418 iris_delete_cs_state(struct pipe_context *ctx, void *state)
2419 {
2420 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2421 }
2422
2423 /**
2424 * The pipe->bind_[stage]_state() driver hook.
2425 *
2426 * Binds an uncompiled shader as the current one for a particular stage.
2427 * Updates dirty tracking to account for the shader's NOS.
2428 */
2429 static void
2430 bind_shader_state(struct iris_context *ice,
2431 struct iris_uncompiled_shader *ish,
2432 gl_shader_stage stage)
2433 {
2434 uint64_t dirty_bit = IRIS_DIRTY_UNCOMPILED_VS << stage;
2435 const uint64_t nos = ish ? ish->nos : 0;
2436
2437 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2438 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2439
2440 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
2441 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
2442 ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
2443 }
2444
2445 ice->shaders.uncompiled[stage] = ish;
2446 ice->state.dirty |= dirty_bit;
2447
2448 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2449 * (or that they no longer need to do so).
2450 */
2451 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2452 if (nos & (1 << i))
2453 ice->state.dirty_for_nos[i] |= dirty_bit;
2454 else
2455 ice->state.dirty_for_nos[i] &= ~dirty_bit;
2456 }
2457 }
2458
2459 static void
2460 iris_bind_vs_state(struct pipe_context *ctx, void *state)
2461 {
2462 struct iris_context *ice = (struct iris_context *)ctx;
2463 struct iris_uncompiled_shader *new_ish = state;
2464
2465 if (new_ish &&
2466 ice->state.window_space_position !=
2467 new_ish->nir->info.vs.window_space_position) {
2468 ice->state.window_space_position =
2469 new_ish->nir->info.vs.window_space_position;
2470
2471 ice->state.dirty |= IRIS_DIRTY_CLIP |
2472 IRIS_DIRTY_RASTER |
2473 IRIS_DIRTY_CC_VIEWPORT;
2474 }
2475
2476 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2477 }
2478
2479 static void
2480 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2481 {
2482 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2483 }
2484
2485 static void
2486 iris_bind_tes_state(struct pipe_context *ctx, void *state)
2487 {
2488 struct iris_context *ice = (struct iris_context *)ctx;
2489
2490 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2491 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2492 ice->state.dirty |= IRIS_DIRTY_URB;
2493
2494 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2495 }
2496
2497 static void
2498 iris_bind_gs_state(struct pipe_context *ctx, void *state)
2499 {
2500 struct iris_context *ice = (struct iris_context *)ctx;
2501
2502 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2503 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2504 ice->state.dirty |= IRIS_DIRTY_URB;
2505
2506 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2507 }
2508
2509 static void
2510 iris_bind_fs_state(struct pipe_context *ctx, void *state)
2511 {
2512 struct iris_context *ice = (struct iris_context *) ctx;
2513 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2514 const struct gen_device_info *devinfo = &screen->devinfo;
2515 struct iris_uncompiled_shader *old_ish =
2516 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2517 struct iris_uncompiled_shader *new_ish = state;
2518
2519 const unsigned color_bits =
2520 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2521 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2522
2523 /* Fragment shader outputs influence HasWriteableRT */
2524 if (!old_ish || !new_ish ||
2525 (old_ish->nir->info.outputs_written & color_bits) !=
2526 (new_ish->nir->info.outputs_written & color_bits))
2527 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2528
2529 if (devinfo->gen == 8)
2530 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2531
2532 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2533 }
2534
2535 static void
2536 iris_bind_cs_state(struct pipe_context *ctx, void *state)
2537 {
2538 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2539 }
2540
2541 void
2542 iris_init_program_functions(struct pipe_context *ctx)
2543 {
2544 ctx->create_vs_state = iris_create_vs_state;
2545 ctx->create_tcs_state = iris_create_tcs_state;
2546 ctx->create_tes_state = iris_create_tes_state;
2547 ctx->create_gs_state = iris_create_gs_state;
2548 ctx->create_fs_state = iris_create_fs_state;
2549 ctx->create_compute_state = iris_create_compute_state;
2550
2551 ctx->delete_vs_state = iris_delete_vs_state;
2552 ctx->delete_tcs_state = iris_delete_tcs_state;
2553 ctx->delete_tes_state = iris_delete_tes_state;
2554 ctx->delete_gs_state = iris_delete_gs_state;
2555 ctx->delete_fs_state = iris_delete_fs_state;
2556 ctx->delete_compute_state = iris_delete_cs_state;
2557
2558 ctx->bind_vs_state = iris_bind_vs_state;
2559 ctx->bind_tcs_state = iris_bind_tcs_state;
2560 ctx->bind_tes_state = iris_bind_tes_state;
2561 ctx->bind_gs_state = iris_bind_gs_state;
2562 ctx->bind_fs_state = iris_bind_fs_state;
2563 ctx->bind_compute_state = iris_bind_cs_state;
2564 }