iris: Properly move edgeflag_out from output list to global list
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
48
49 #define KEY_INIT_NO_ID(gen) \
50 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
51 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
52 .base.tex.compressed_multisample_layout_mask = ~0, \
53 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
54 #define KEY_INIT(gen) .base.program_string_id = ish->program_id, KEY_INIT_NO_ID(gen)
55
56 static unsigned
57 get_new_program_id(struct iris_screen *screen)
58 {
59 return p_atomic_inc_return(&screen->program_id);
60 }
61
62 static void *
63 upload_state(struct u_upload_mgr *uploader,
64 struct iris_state_ref *ref,
65 unsigned size,
66 unsigned alignment)
67 {
68 void *p = NULL;
69 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
70 return p;
71 }
72
73 void
74 iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
75 struct pipe_shader_buffer *buf,
76 struct iris_state_ref *surf_state,
77 bool ssbo)
78 {
79 struct pipe_context *ctx = &ice->ctx;
80 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
81
82 void *map =
83 upload_state(ice->state.surface_uploader, surf_state,
84 screen->isl_dev.ss.size, 64);
85 if (!unlikely(map)) {
86 surf_state->res = NULL;
87 return;
88 }
89
90 struct iris_resource *res = (void *) buf->buffer;
91 struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
92 surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
93
94 isl_buffer_fill_state(&screen->isl_dev, map,
95 .address = res->bo->gtt_offset + res->offset +
96 buf->buffer_offset,
97 .size_B = buf->buffer_size - res->offset,
98 .format = ssbo ? ISL_FORMAT_RAW
99 : ISL_FORMAT_R32G32B32A32_FLOAT,
100 .swizzle = ISL_SWIZZLE_IDENTITY,
101 .stride_B = 1,
102 .mocs = ice->vtbl.mocs(res->bo, &screen->isl_dev));
103 }
104
105 static nir_ssa_def *
106 get_aoa_deref_offset(nir_builder *b,
107 nir_deref_instr *deref,
108 unsigned elem_size)
109 {
110 unsigned array_size = elem_size;
111 nir_ssa_def *offset = nir_imm_int(b, 0);
112
113 while (deref->deref_type != nir_deref_type_var) {
114 assert(deref->deref_type == nir_deref_type_array);
115
116 /* This level's element size is the previous level's array size */
117 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
118 assert(deref->arr.index.ssa);
119 offset = nir_iadd(b, offset,
120 nir_imul(b, index, nir_imm_int(b, array_size)));
121
122 deref = nir_deref_instr_parent(deref);
123 assert(glsl_type_is_array(deref->type));
124 array_size *= glsl_get_length(deref->type);
125 }
126
127 /* Accessing an invalid surface index with the dataport can result in a
128 * hang. According to the spec "if the index used to select an individual
129 * element is negative or greater than or equal to the size of the array,
130 * the results of the operation are undefined but may not lead to
131 * termination" -- which is one of the possible outcomes of the hang.
132 * Clamp the index to prevent access outside of the array bounds.
133 */
134 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
135 }
136
137 static void
138 iris_lower_storage_image_derefs(nir_shader *nir)
139 {
140 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
141
142 nir_builder b;
143 nir_builder_init(&b, impl);
144
145 nir_foreach_block(block, impl) {
146 nir_foreach_instr_safe(instr, block) {
147 if (instr->type != nir_instr_type_intrinsic)
148 continue;
149
150 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
151 switch (intrin->intrinsic) {
152 case nir_intrinsic_image_deref_load:
153 case nir_intrinsic_image_deref_store:
154 case nir_intrinsic_image_deref_atomic_add:
155 case nir_intrinsic_image_deref_atomic_imin:
156 case nir_intrinsic_image_deref_atomic_umin:
157 case nir_intrinsic_image_deref_atomic_imax:
158 case nir_intrinsic_image_deref_atomic_umax:
159 case nir_intrinsic_image_deref_atomic_and:
160 case nir_intrinsic_image_deref_atomic_or:
161 case nir_intrinsic_image_deref_atomic_xor:
162 case nir_intrinsic_image_deref_atomic_exchange:
163 case nir_intrinsic_image_deref_atomic_comp_swap:
164 case nir_intrinsic_image_deref_size:
165 case nir_intrinsic_image_deref_samples:
166 case nir_intrinsic_image_deref_load_raw_intel:
167 case nir_intrinsic_image_deref_store_raw_intel: {
168 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
169 nir_variable *var = nir_deref_instr_get_variable(deref);
170
171 b.cursor = nir_before_instr(&intrin->instr);
172 nir_ssa_def *index =
173 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
174 get_aoa_deref_offset(&b, deref, 1));
175 nir_rewrite_image_intrinsic(intrin, index, false);
176 break;
177 }
178
179 default:
180 break;
181 }
182 }
183 }
184 }
185
186 // XXX: need unify_interfaces() at link time...
187
188 /**
189 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
190 */
191 static bool
192 iris_fix_edge_flags(nir_shader *nir)
193 {
194 if (nir->info.stage != MESA_SHADER_VERTEX)
195 return false;
196
197 nir_variable *var = NULL;
198 nir_foreach_variable(v, &nir->outputs) {
199 if (v->data.location == VARYING_SLOT_EDGE) {
200 var = v;
201 break;
202 }
203 }
204
205 if (!var)
206 return false;
207
208 exec_node_remove(&var->node);
209 var->data.mode = nir_var_shader_temp;
210 exec_list_push_tail(&nir->globals, &var->node);
211 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
212 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
213 nir_fixup_deref_modes(nir);
214
215 return true;
216 }
217
218 /**
219 * Fix an uncompiled shader's stream output info.
220 *
221 * Core Gallium stores output->register_index as a "slot" number, where
222 * slots are assigned consecutively to all outputs in info->outputs_written.
223 * This naive packing of outputs doesn't work for us - we too have slots,
224 * but the layout is defined by the VUE map, which we won't have until we
225 * compile a specific shader variant. So, we remap these and simply store
226 * VARYING_SLOT_* in our copy's output->register_index fields.
227 *
228 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
229 * components of our VUE header. See brw_vue_map.c for the layout.
230 */
231 static void
232 update_so_info(struct pipe_stream_output_info *so_info,
233 uint64_t outputs_written)
234 {
235 uint8_t reverse_map[64] = {};
236 unsigned slot = 0;
237 while (outputs_written) {
238 reverse_map[slot++] = u_bit_scan64(&outputs_written);
239 }
240
241 for (unsigned i = 0; i < so_info->num_outputs; i++) {
242 struct pipe_stream_output *output = &so_info->output[i];
243
244 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
245 output->register_index = reverse_map[output->register_index];
246
247 /* The VUE header contains three scalar fields packed together:
248 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
249 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
250 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
251 */
252 switch (output->register_index) {
253 case VARYING_SLOT_LAYER:
254 assert(output->num_components == 1);
255 output->register_index = VARYING_SLOT_PSIZ;
256 output->start_component = 1;
257 break;
258 case VARYING_SLOT_VIEWPORT:
259 assert(output->num_components == 1);
260 output->register_index = VARYING_SLOT_PSIZ;
261 output->start_component = 2;
262 break;
263 case VARYING_SLOT_PSIZ:
264 assert(output->num_components == 1);
265 output->start_component = 3;
266 break;
267 }
268
269 //info->outputs_written |= 1ull << output->register_index;
270 }
271 }
272
273 static void
274 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
275 unsigned offset, unsigned n)
276 {
277 assert(offset % sizeof(uint32_t) == 0);
278
279 for (unsigned i = 0; i < n; ++i)
280 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
281
282 for (unsigned i = n; i < 4; ++i)
283 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
284 }
285
286 /**
287 * Associate NIR uniform variables with the prog_data->param[] mechanism
288 * used by the backend. Also, decide which UBOs we'd like to push in an
289 * ideal situation (though the backend can reduce this).
290 */
291 static void
292 iris_setup_uniforms(const struct brw_compiler *compiler,
293 void *mem_ctx,
294 nir_shader *nir,
295 struct brw_stage_prog_data *prog_data,
296 enum brw_param_builtin **out_system_values,
297 unsigned *out_num_system_values,
298 unsigned *out_num_cbufs)
299 {
300 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
301
302 /* The intel compiler assumes that num_uniforms is in bytes. For
303 * scalar that means 4 bytes per uniform slot.
304 *
305 * Ref: brw_nir_lower_uniforms, type_size_scalar_bytes.
306 */
307 nir->num_uniforms *= 4;
308
309 const unsigned IRIS_MAX_SYSTEM_VALUES =
310 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
311 enum brw_param_builtin *system_values =
312 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
313 unsigned num_system_values = 0;
314
315 unsigned patch_vert_idx = -1;
316 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
317 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
318 memset(ucp_idx, -1, sizeof(ucp_idx));
319 memset(img_idx, -1, sizeof(img_idx));
320
321 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
322
323 nir_builder b;
324 nir_builder_init(&b, impl);
325
326 b.cursor = nir_before_block(nir_start_block(impl));
327 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
328 nir_ssa_def *temp_const_ubo_name = NULL;
329
330 /* Turn system value intrinsics into uniforms */
331 nir_foreach_block(block, impl) {
332 nir_foreach_instr_safe(instr, block) {
333 if (instr->type != nir_instr_type_intrinsic)
334 continue;
335
336 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
337 nir_ssa_def *offset;
338
339 switch (intrin->intrinsic) {
340 case nir_intrinsic_load_constant: {
341 /* This one is special because it reads from the shader constant
342 * data and not cbuf0 which gallium uploads for us.
343 */
344 b.cursor = nir_before_instr(instr);
345 nir_ssa_def *offset =
346 nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
347 nir_intrinsic_base(intrin));
348
349 if (temp_const_ubo_name == NULL)
350 temp_const_ubo_name = nir_imm_int(&b, 0);
351
352 nir_intrinsic_instr *load_ubo =
353 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
354 load_ubo->num_components = intrin->num_components;
355 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
356 load_ubo->src[1] = nir_src_for_ssa(offset);
357 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
358 intrin->dest.ssa.num_components,
359 intrin->dest.ssa.bit_size,
360 intrin->dest.ssa.name);
361 nir_builder_instr_insert(&b, &load_ubo->instr);
362
363 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
364 nir_src_for_ssa(&load_ubo->dest.ssa));
365 nir_instr_remove(&intrin->instr);
366 continue;
367 }
368 case nir_intrinsic_load_user_clip_plane: {
369 unsigned ucp = nir_intrinsic_ucp_id(intrin);
370
371 if (ucp_idx[ucp] == -1) {
372 ucp_idx[ucp] = num_system_values;
373 num_system_values += 4;
374 }
375
376 for (int i = 0; i < 4; i++) {
377 system_values[ucp_idx[ucp] + i] =
378 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
379 }
380
381 b.cursor = nir_before_instr(instr);
382 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
383 break;
384 }
385 case nir_intrinsic_load_patch_vertices_in:
386 if (patch_vert_idx == -1)
387 patch_vert_idx = num_system_values++;
388
389 system_values[patch_vert_idx] =
390 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
391
392 b.cursor = nir_before_instr(instr);
393 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
394 break;
395 case nir_intrinsic_image_deref_load_param_intel: {
396 assert(devinfo->gen < 9);
397 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
398 nir_variable *var = nir_deref_instr_get_variable(deref);
399
400 if (img_idx[var->data.binding] == -1) {
401 /* GL only allows arrays of arrays of images. */
402 assert(glsl_type_is_image(glsl_without_array(var->type)));
403 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
404
405 for (int i = 0; i < num_images; i++) {
406 const unsigned img = var->data.binding + i;
407
408 img_idx[img] = num_system_values;
409 num_system_values += BRW_IMAGE_PARAM_SIZE;
410
411 uint32_t *img_sv = &system_values[img_idx[img]];
412
413 setup_vec4_image_sysval(
414 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
415 offsetof(struct brw_image_param, offset), 2);
416 setup_vec4_image_sysval(
417 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
418 offsetof(struct brw_image_param, size), 3);
419 setup_vec4_image_sysval(
420 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
421 offsetof(struct brw_image_param, stride), 4);
422 setup_vec4_image_sysval(
423 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
424 offsetof(struct brw_image_param, tiling), 3);
425 setup_vec4_image_sysval(
426 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
427 offsetof(struct brw_image_param, swizzling), 2);
428 }
429 }
430
431 b.cursor = nir_before_instr(instr);
432 offset = nir_iadd(&b,
433 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
434 nir_imm_int(&b, img_idx[var->data.binding] * 4 +
435 nir_intrinsic_base(intrin) * 16));
436 break;
437 }
438 default:
439 continue;
440 }
441
442 unsigned comps = nir_intrinsic_dest_components(intrin);
443
444 nir_intrinsic_instr *load =
445 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
446 load->num_components = comps;
447 load->src[0] = nir_src_for_ssa(temp_ubo_name);
448 load->src[1] = nir_src_for_ssa(offset);
449 nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
450 nir_builder_instr_insert(&b, &load->instr);
451 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
452 nir_src_for_ssa(&load->dest.ssa));
453 nir_instr_remove(instr);
454 }
455 }
456
457 nir_validate_shader(nir, "before remapping");
458
459 /* Uniforms are stored in constant buffer 0, the
460 * user-facing UBOs are indexed by one. So if any constant buffer is
461 * needed, the constant buffer 0 will be needed, so account for it.
462 */
463 unsigned num_cbufs = nir->info.num_ubos;
464 if (num_cbufs || nir->num_uniforms)
465 num_cbufs++;
466
467 /* Place the new params in a new cbuf. */
468 if (num_system_values > 0) {
469 unsigned sysval_cbuf_index = num_cbufs;
470 num_cbufs++;
471
472 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
473 num_system_values);
474
475 nir_foreach_block(block, impl) {
476 nir_foreach_instr_safe(instr, block) {
477 if (instr->type != nir_instr_type_intrinsic)
478 continue;
479
480 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
481
482 if (load->intrinsic != nir_intrinsic_load_ubo)
483 continue;
484
485 b.cursor = nir_before_instr(instr);
486
487 assert(load->src[0].is_ssa);
488
489 if (load->src[0].ssa == temp_ubo_name) {
490 nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
491 nir_instr_rewrite_src(instr, &load->src[0],
492 nir_src_for_ssa(imm));
493 }
494 }
495 }
496
497 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
498 nir_opt_constant_folding(nir);
499 } else {
500 ralloc_free(system_values);
501 system_values = NULL;
502 }
503
504 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
505 nir_validate_shader(nir, "after remap");
506
507 /* We don't use params[], but fs_visitor::nir_setup_uniforms() asserts
508 * about it for compute shaders, so go ahead and make some fake ones
509 * which the backend will dead code eliminate.
510 */
511 prog_data->nr_params = nir->num_uniforms / 4;
512 prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params);
513
514 /* Constant loads (if any) need to go at the end of the constant buffers so
515 * we need to know num_cbufs before we can lower to them.
516 */
517 if (temp_const_ubo_name != NULL) {
518 nir_load_const_instr *const_ubo_index =
519 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
520 assert(const_ubo_index->def.bit_size == 32);
521 const_ubo_index->value[0].u32 = num_cbufs;
522 }
523
524 *out_system_values = system_values;
525 *out_num_system_values = num_system_values;
526 *out_num_cbufs = num_cbufs;
527 }
528
529 static const char *surface_group_names[] = {
530 [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
531 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
532 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
533 [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
534 [IRIS_SURFACE_GROUP_UBO] = "ubo",
535 [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
536 [IRIS_SURFACE_GROUP_IMAGE] = "image",
537 };
538
539 static void
540 iris_print_binding_table(FILE *fp, const char *name,
541 const struct iris_binding_table *bt)
542 {
543 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
544
545 uint32_t total = 0;
546 uint32_t compacted = 0;
547
548 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
549 uint32_t size = bt->sizes[i];
550 total += size;
551 if (size)
552 compacted += util_bitcount64(bt->used_mask[i]);
553 }
554
555 if (total == 0) {
556 fprintf(fp, "Binding table for %s is empty\n\n", name);
557 return;
558 }
559
560 if (total != compacted) {
561 fprintf(fp, "Binding table for %s "
562 "(compacted to %u entries from %u entries)\n",
563 name, compacted, total);
564 } else {
565 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
566 }
567
568 uint32_t entry = 0;
569 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
570 uint64_t mask = bt->used_mask[i];
571 while (mask) {
572 int index = u_bit_scan64(&mask);
573 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
574 }
575 }
576 fprintf(fp, "\n");
577 }
578
579 enum {
580 /* Max elements in a surface group. */
581 SURFACE_GROUP_MAX_ELEMENTS = 64,
582 };
583
584 /**
585 * Map a <group, index> pair to a binding table index.
586 *
587 * For example: <UBO, 5> => binding table index 12
588 */
589 uint32_t
590 iris_group_index_to_bti(const struct iris_binding_table *bt,
591 enum iris_surface_group group, uint32_t index)
592 {
593 assert(index < bt->sizes[group]);
594 uint64_t mask = bt->used_mask[group];
595 uint64_t bit = 1ull << index;
596 if (bit & mask) {
597 return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
598 } else {
599 return IRIS_SURFACE_NOT_USED;
600 }
601 }
602
603 /**
604 * Map a binding table index back to a <group, index> pair.
605 *
606 * For example: binding table index 12 => <UBO, 5>
607 */
608 uint32_t
609 iris_bti_to_group_index(const struct iris_binding_table *bt,
610 enum iris_surface_group group, uint32_t bti)
611 {
612 uint64_t used_mask = bt->used_mask[group];
613 assert(bti >= bt->offsets[group]);
614
615 uint32_t c = bti - bt->offsets[group];
616 while (used_mask) {
617 int i = u_bit_scan64(&used_mask);
618 if (c == 0)
619 return i;
620 c--;
621 }
622
623 return IRIS_SURFACE_NOT_USED;
624 }
625
626 static void
627 rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
628 nir_instr *instr, nir_src *src,
629 enum iris_surface_group group)
630 {
631 assert(bt->sizes[group] > 0);
632
633 b->cursor = nir_before_instr(instr);
634 nir_ssa_def *bti;
635 if (nir_src_is_const(*src)) {
636 uint32_t index = nir_src_as_uint(*src);
637 bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
638 src->ssa->bit_size);
639 } else {
640 /* Indirect usage makes all the surfaces of the group to be available,
641 * so we can just add the base.
642 */
643 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
644 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
645 }
646 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
647 }
648
649 static void
650 mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
651 enum iris_surface_group group)
652 {
653 assert(bt->sizes[group] > 0);
654
655 if (nir_src_is_const(*src)) {
656 uint64_t index = nir_src_as_uint(*src);
657 assert(index < bt->sizes[group]);
658 bt->used_mask[group] |= 1ull << index;
659 } else {
660 /* There's an indirect usage, we need all the surfaces. */
661 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
662 }
663 }
664
665 static bool
666 skip_compacting_binding_tables(void)
667 {
668 static int skip = -1;
669 if (skip < 0)
670 skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
671 return skip;
672 }
673
674 /**
675 * Set up the binding table indices and apply to the shader.
676 */
677 static void
678 iris_setup_binding_table(const struct gen_device_info *devinfo,
679 struct nir_shader *nir,
680 struct iris_binding_table *bt,
681 unsigned num_render_targets,
682 unsigned num_system_values,
683 unsigned num_cbufs)
684 {
685 const struct shader_info *info = &nir->info;
686
687 memset(bt, 0, sizeof(*bt));
688
689 /* Set the sizes for each surface group. For some groups, we already know
690 * upfront how many will be used, so mark them.
691 */
692 if (info->stage == MESA_SHADER_FRAGMENT) {
693 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
694 /* All render targets used. */
695 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
696 BITFIELD64_MASK(num_render_targets);
697
698 /* Setup render target read surface group inorder to support non-coherent
699 * framebuffer fetch on Gen8
700 */
701 if (devinfo->gen == 8 && info->outputs_read) {
702 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
703 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
704 BITFIELD64_MASK(num_render_targets);
705 }
706 } else if (info->stage == MESA_SHADER_COMPUTE) {
707 bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
708 }
709
710 bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
711 bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
712
713 bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
714
715 /* Allocate an extra slot in the UBO section for NIR constants.
716 * Binding table compaction will remove it if unnecessary.
717 *
718 * We don't include them in iris_compiled_shader::num_cbufs because
719 * they are uploaded separately from shs->constbuf[], but from a shader
720 * point of view, they're another UBO (at the end of the section).
721 */
722 bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
723
724 bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
725
726 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
727 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
728
729 /* Mark surfaces used for the cases we don't have the information available
730 * upfront.
731 */
732 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
733 nir_foreach_block (block, impl) {
734 nir_foreach_instr (instr, block) {
735 if (instr->type != nir_instr_type_intrinsic)
736 continue;
737
738 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
739 switch (intrin->intrinsic) {
740 case nir_intrinsic_load_num_work_groups:
741 bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
742 break;
743
744 case nir_intrinsic_load_output:
745 if (devinfo->gen == 8) {
746 mark_used_with_src(bt, &intrin->src[0],
747 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
748 }
749 break;
750
751 case nir_intrinsic_image_size:
752 case nir_intrinsic_image_load:
753 case nir_intrinsic_image_store:
754 case nir_intrinsic_image_atomic_add:
755 case nir_intrinsic_image_atomic_imin:
756 case nir_intrinsic_image_atomic_umin:
757 case nir_intrinsic_image_atomic_imax:
758 case nir_intrinsic_image_atomic_umax:
759 case nir_intrinsic_image_atomic_and:
760 case nir_intrinsic_image_atomic_or:
761 case nir_intrinsic_image_atomic_xor:
762 case nir_intrinsic_image_atomic_exchange:
763 case nir_intrinsic_image_atomic_comp_swap:
764 case nir_intrinsic_image_load_raw_intel:
765 case nir_intrinsic_image_store_raw_intel:
766 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
767 break;
768
769 case nir_intrinsic_load_ubo:
770 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
771 break;
772
773 case nir_intrinsic_store_ssbo:
774 mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
775 break;
776
777 case nir_intrinsic_get_buffer_size:
778 case nir_intrinsic_ssbo_atomic_add:
779 case nir_intrinsic_ssbo_atomic_imin:
780 case nir_intrinsic_ssbo_atomic_umin:
781 case nir_intrinsic_ssbo_atomic_imax:
782 case nir_intrinsic_ssbo_atomic_umax:
783 case nir_intrinsic_ssbo_atomic_and:
784 case nir_intrinsic_ssbo_atomic_or:
785 case nir_intrinsic_ssbo_atomic_xor:
786 case nir_intrinsic_ssbo_atomic_exchange:
787 case nir_intrinsic_ssbo_atomic_comp_swap:
788 case nir_intrinsic_ssbo_atomic_fmin:
789 case nir_intrinsic_ssbo_atomic_fmax:
790 case nir_intrinsic_ssbo_atomic_fcomp_swap:
791 case nir_intrinsic_load_ssbo:
792 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
793 break;
794
795 default:
796 break;
797 }
798 }
799 }
800
801 /* When disable we just mark everything as used. */
802 if (unlikely(skip_compacting_binding_tables())) {
803 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
804 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
805 }
806
807 /* Calculate the offsets and the binding table size based on the used
808 * surfaces. After this point, the functions to go between "group indices"
809 * and binding table indices can be used.
810 */
811 uint32_t next = 0;
812 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
813 if (bt->used_mask[i] != 0) {
814 bt->offsets[i] = next;
815 next += util_bitcount64(bt->used_mask[i]);
816 }
817 }
818 bt->size_bytes = next * 4;
819
820 if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
821 iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
822 }
823
824 /* Apply the binding table indices. The backend compiler is not expected
825 * to change those, as we haven't set any of the *_start entries in brw
826 * binding_table.
827 */
828 nir_builder b;
829 nir_builder_init(&b, impl);
830
831 nir_foreach_block (block, impl) {
832 nir_foreach_instr (instr, block) {
833 if (instr->type == nir_instr_type_tex) {
834 nir_tex_instr *tex = nir_instr_as_tex(instr);
835 tex->texture_index =
836 iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
837 tex->texture_index);
838 continue;
839 }
840
841 if (instr->type != nir_instr_type_intrinsic)
842 continue;
843
844 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
845 switch (intrin->intrinsic) {
846 case nir_intrinsic_image_size:
847 case nir_intrinsic_image_load:
848 case nir_intrinsic_image_store:
849 case nir_intrinsic_image_atomic_add:
850 case nir_intrinsic_image_atomic_imin:
851 case nir_intrinsic_image_atomic_umin:
852 case nir_intrinsic_image_atomic_imax:
853 case nir_intrinsic_image_atomic_umax:
854 case nir_intrinsic_image_atomic_and:
855 case nir_intrinsic_image_atomic_or:
856 case nir_intrinsic_image_atomic_xor:
857 case nir_intrinsic_image_atomic_exchange:
858 case nir_intrinsic_image_atomic_comp_swap:
859 case nir_intrinsic_image_load_raw_intel:
860 case nir_intrinsic_image_store_raw_intel:
861 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
862 IRIS_SURFACE_GROUP_IMAGE);
863 break;
864
865 case nir_intrinsic_load_ubo:
866 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
867 IRIS_SURFACE_GROUP_UBO);
868 break;
869
870 case nir_intrinsic_store_ssbo:
871 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
872 IRIS_SURFACE_GROUP_SSBO);
873 break;
874
875 case nir_intrinsic_load_output:
876 if (devinfo->gen == 8) {
877 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
878 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
879 }
880 break;
881
882 case nir_intrinsic_get_buffer_size:
883 case nir_intrinsic_ssbo_atomic_add:
884 case nir_intrinsic_ssbo_atomic_imin:
885 case nir_intrinsic_ssbo_atomic_umin:
886 case nir_intrinsic_ssbo_atomic_imax:
887 case nir_intrinsic_ssbo_atomic_umax:
888 case nir_intrinsic_ssbo_atomic_and:
889 case nir_intrinsic_ssbo_atomic_or:
890 case nir_intrinsic_ssbo_atomic_xor:
891 case nir_intrinsic_ssbo_atomic_exchange:
892 case nir_intrinsic_ssbo_atomic_comp_swap:
893 case nir_intrinsic_ssbo_atomic_fmin:
894 case nir_intrinsic_ssbo_atomic_fmax:
895 case nir_intrinsic_ssbo_atomic_fcomp_swap:
896 case nir_intrinsic_load_ssbo:
897 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
898 IRIS_SURFACE_GROUP_SSBO);
899 break;
900
901 default:
902 break;
903 }
904 }
905 }
906 }
907
908 static void
909 iris_debug_recompile(struct iris_context *ice,
910 struct shader_info *info,
911 const struct brw_base_prog_key *key)
912 {
913 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
914 const struct brw_compiler *c = screen->compiler;
915
916 if (!info)
917 return;
918
919 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
920 _mesa_shader_stage_to_string(info->stage),
921 info->name ? info->name : "(no identifier)",
922 info->label ? info->label : "");
923
924 const void *old_key =
925 iris_find_previous_compile(ice, info->stage, key->program_string_id);
926
927 brw_debug_key_recompile(c, &ice->dbg, info->stage, old_key, key);
928 }
929
930 /**
931 * Get the shader for the last enabled geometry stage.
932 *
933 * This stage is the one which will feed stream output and the rasterizer.
934 */
935 static gl_shader_stage
936 last_vue_stage(struct iris_context *ice)
937 {
938 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
939 return MESA_SHADER_GEOMETRY;
940
941 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
942 return MESA_SHADER_TESS_EVAL;
943
944 return MESA_SHADER_VERTEX;
945 }
946
947 /**
948 * Compile a vertex shader, and upload the assembly.
949 */
950 static struct iris_compiled_shader *
951 iris_compile_vs(struct iris_context *ice,
952 struct iris_uncompiled_shader *ish,
953 const struct brw_vs_prog_key *key)
954 {
955 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
956 const struct brw_compiler *compiler = screen->compiler;
957 const struct gen_device_info *devinfo = &screen->devinfo;
958 void *mem_ctx = ralloc_context(NULL);
959 struct brw_vs_prog_data *vs_prog_data =
960 rzalloc(mem_ctx, struct brw_vs_prog_data);
961 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
962 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
963 enum brw_param_builtin *system_values;
964 unsigned num_system_values;
965 unsigned num_cbufs;
966
967 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
968
969 if (key->nr_userclip_plane_consts) {
970 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
971 nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1, true,
972 false, NULL);
973 nir_lower_io_to_temporaries(nir, impl, true, false);
974 nir_lower_global_vars_to_local(nir);
975 nir_lower_vars_to_ssa(nir);
976 nir_shader_gather_info(nir, impl);
977 }
978
979 prog_data->use_alt_mode = ish->use_alt_mode;
980
981 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
982 &num_system_values, &num_cbufs);
983
984 struct iris_binding_table bt;
985 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
986 num_system_values, num_cbufs);
987
988 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
989
990 brw_compute_vue_map(devinfo,
991 &vue_prog_data->vue_map, nir->info.outputs_written,
992 nir->info.separate_shader);
993
994 /* Don't tell the backend about our clip plane constants, we've already
995 * lowered them in NIR and we don't want it doing it again.
996 */
997 struct brw_vs_prog_key key_no_ucp = *key;
998 key_no_ucp.nr_userclip_plane_consts = 0;
999
1000 char *error_str = NULL;
1001 const unsigned *program =
1002 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &key_no_ucp, vs_prog_data,
1003 nir, -1, NULL, &error_str);
1004 if (program == NULL) {
1005 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
1006 ralloc_free(mem_ctx);
1007 return false;
1008 }
1009
1010 if (ish->compiled_once) {
1011 iris_debug_recompile(ice, &nir->info, &key->base);
1012 } else {
1013 ish->compiled_once = true;
1014 }
1015
1016 uint32_t *so_decls =
1017 ice->vtbl.create_so_decl_list(&ish->stream_output,
1018 &vue_prog_data->vue_map);
1019
1020 struct iris_compiled_shader *shader =
1021 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
1022 prog_data, so_decls, system_values, num_system_values,
1023 num_cbufs, &bt);
1024
1025 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1026
1027 ralloc_free(mem_ctx);
1028 return shader;
1029 }
1030
1031 /**
1032 * Update the current vertex shader variant.
1033 *
1034 * Fill out the key, look in the cache, compile and bind if needed.
1035 */
1036 static void
1037 iris_update_compiled_vs(struct iris_context *ice)
1038 {
1039 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1040 struct iris_uncompiled_shader *ish =
1041 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1042 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1043 const struct gen_device_info *devinfo = &screen->devinfo;
1044
1045 struct brw_vs_prog_key key = { KEY_INIT(devinfo->gen) };
1046 ice->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1047
1048 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1049 struct iris_compiled_shader *shader =
1050 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
1051
1052 if (!shader)
1053 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1054
1055 if (!shader)
1056 shader = iris_compile_vs(ice, ish, &key);
1057
1058 if (old != shader) {
1059 ice->shaders.prog[IRIS_CACHE_VS] = shader;
1060 ice->state.dirty |= IRIS_DIRTY_VS |
1061 IRIS_DIRTY_BINDINGS_VS |
1062 IRIS_DIRTY_CONSTANTS_VS |
1063 IRIS_DIRTY_VF_SGVS;
1064 shs->sysvals_need_upload = true;
1065
1066 const struct brw_vs_prog_data *vs_prog_data =
1067 (void *) shader->prog_data;
1068 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1069 vs_prog_data->uses_baseinstance;
1070 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1071 vs_prog_data->uses_is_indexed_draw;
1072 const bool needs_sgvs_element = uses_draw_params ||
1073 vs_prog_data->uses_instanceid ||
1074 vs_prog_data->uses_vertexid;
1075
1076 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1077 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1078 ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
1079 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
1080 IRIS_DIRTY_VERTEX_ELEMENTS;
1081 }
1082 ice->state.vs_uses_draw_params = uses_draw_params;
1083 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1084 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1085 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1086 }
1087 }
1088
1089 /**
1090 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1091 */
1092 const struct shader_info *
1093 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1094 {
1095 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1096
1097 if (!ish)
1098 return NULL;
1099
1100 const nir_shader *nir = ish->nir;
1101 return &nir->info;
1102 }
1103
1104 /**
1105 * Get the union of TCS output and TES input slots.
1106 *
1107 * TCS and TES need to agree on a common URB entry layout. In particular,
1108 * the data for all patch vertices is stored in a single URB entry (unlike
1109 * GS which has one entry per input vertex). This means that per-vertex
1110 * array indexing needs a stride.
1111 *
1112 * SSO requires locations to match, but doesn't require the number of
1113 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1114 * So, we need to take the extra step of unifying these on the fly.
1115 */
1116 static void
1117 get_unified_tess_slots(const struct iris_context *ice,
1118 uint64_t *per_vertex_slots,
1119 uint32_t *per_patch_slots)
1120 {
1121 const struct shader_info *tcs =
1122 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1123 const struct shader_info *tes =
1124 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1125
1126 *per_vertex_slots = tes->inputs_read;
1127 *per_patch_slots = tes->patch_inputs_read;
1128
1129 if (tcs) {
1130 *per_vertex_slots |= tcs->outputs_written;
1131 *per_patch_slots |= tcs->patch_outputs_written;
1132 }
1133 }
1134
1135 /**
1136 * Compile a tessellation control shader, and upload the assembly.
1137 */
1138 static struct iris_compiled_shader *
1139 iris_compile_tcs(struct iris_context *ice,
1140 struct iris_uncompiled_shader *ish,
1141 const struct brw_tcs_prog_key *key)
1142 {
1143 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1144 const struct brw_compiler *compiler = screen->compiler;
1145 const struct nir_shader_compiler_options *options =
1146 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1147 void *mem_ctx = ralloc_context(NULL);
1148 struct brw_tcs_prog_data *tcs_prog_data =
1149 rzalloc(mem_ctx, struct brw_tcs_prog_data);
1150 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1151 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1152 const struct gen_device_info *devinfo = &screen->devinfo;
1153 enum brw_param_builtin *system_values = NULL;
1154 unsigned num_system_values = 0;
1155 unsigned num_cbufs = 0;
1156
1157 nir_shader *nir;
1158
1159 struct iris_binding_table bt;
1160
1161 if (ish) {
1162 nir = nir_shader_clone(mem_ctx, ish->nir);
1163
1164 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1165 &num_system_values, &num_cbufs);
1166 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1167 num_system_values, num_cbufs);
1168 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1169 } else {
1170 nir = brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, key);
1171
1172 /* Reserve space for passing the default tess levels as constants. */
1173 num_cbufs = 1;
1174 num_system_values = 8;
1175 system_values =
1176 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1177 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1178 prog_data->nr_params = num_system_values;
1179
1180 if (key->tes_primitive_mode == GL_QUADS) {
1181 for (int i = 0; i < 4; i++)
1182 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1183
1184 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1185 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1186 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
1187 for (int i = 0; i < 3; i++)
1188 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1189
1190 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1191 } else {
1192 assert(key->tes_primitive_mode == GL_ISOLINES);
1193 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1194 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1195 }
1196
1197 /* Manually setup the TCS binding table. */
1198 memset(&bt, 0, sizeof(bt));
1199 bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1200 bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1201 bt.size_bytes = 4;
1202
1203 prog_data->ubo_ranges[0].length = 1;
1204 }
1205
1206 char *error_str = NULL;
1207 const unsigned *program =
1208 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, key, tcs_prog_data, nir,
1209 -1, NULL, &error_str);
1210 if (program == NULL) {
1211 dbg_printf("Failed to compile control shader: %s\n", error_str);
1212 ralloc_free(mem_ctx);
1213 return false;
1214 }
1215
1216 if (ish) {
1217 if (ish->compiled_once) {
1218 iris_debug_recompile(ice, &nir->info, &key->base);
1219 } else {
1220 ish->compiled_once = true;
1221 }
1222 }
1223
1224 struct iris_compiled_shader *shader =
1225 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1226 prog_data, NULL, system_values, num_system_values,
1227 num_cbufs, &bt);
1228
1229 if (ish)
1230 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1231
1232 ralloc_free(mem_ctx);
1233 return shader;
1234 }
1235
1236 /**
1237 * Update the current tessellation control shader variant.
1238 *
1239 * Fill out the key, look in the cache, compile and bind if needed.
1240 */
1241 static void
1242 iris_update_compiled_tcs(struct iris_context *ice)
1243 {
1244 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1245 struct iris_uncompiled_shader *tcs =
1246 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1247 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1248 const struct brw_compiler *compiler = screen->compiler;
1249 const struct gen_device_info *devinfo = &screen->devinfo;
1250
1251 const struct shader_info *tes_info =
1252 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1253 struct brw_tcs_prog_key key = {
1254 KEY_INIT_NO_ID(devinfo->gen),
1255 .base.program_string_id = tcs ? tcs->program_id : 0,
1256 .tes_primitive_mode = tes_info->tess.primitive_mode,
1257 .input_vertices =
1258 !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1259 .quads_workaround = devinfo->gen < 9 &&
1260 tes_info->tess.primitive_mode == GL_QUADS &&
1261 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1262 };
1263 get_unified_tess_slots(ice, &key.outputs_written,
1264 &key.patch_outputs_written);
1265 ice->vtbl.populate_tcs_key(ice, &key);
1266
1267 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1268 struct iris_compiled_shader *shader =
1269 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1270
1271 if (tcs && !shader)
1272 shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1273
1274 if (!shader)
1275 shader = iris_compile_tcs(ice, tcs, &key);
1276
1277 if (old != shader) {
1278 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1279 ice->state.dirty |= IRIS_DIRTY_TCS |
1280 IRIS_DIRTY_BINDINGS_TCS |
1281 IRIS_DIRTY_CONSTANTS_TCS;
1282 shs->sysvals_need_upload = true;
1283 }
1284 }
1285
1286 /**
1287 * Compile a tessellation evaluation shader, and upload the assembly.
1288 */
1289 static struct iris_compiled_shader *
1290 iris_compile_tes(struct iris_context *ice,
1291 struct iris_uncompiled_shader *ish,
1292 const struct brw_tes_prog_key *key)
1293 {
1294 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1295 const struct brw_compiler *compiler = screen->compiler;
1296 void *mem_ctx = ralloc_context(NULL);
1297 struct brw_tes_prog_data *tes_prog_data =
1298 rzalloc(mem_ctx, struct brw_tes_prog_data);
1299 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1300 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1301 enum brw_param_builtin *system_values;
1302 const struct gen_device_info *devinfo = &screen->devinfo;
1303 unsigned num_system_values;
1304 unsigned num_cbufs;
1305
1306 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1307
1308 if (key->nr_userclip_plane_consts) {
1309 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1310 nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1, true,
1311 false, NULL);
1312 nir_lower_io_to_temporaries(nir, impl, true, false);
1313 nir_lower_global_vars_to_local(nir);
1314 nir_lower_vars_to_ssa(nir);
1315 nir_shader_gather_info(nir, impl);
1316 }
1317
1318 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1319 &num_system_values, &num_cbufs);
1320
1321 struct iris_binding_table bt;
1322 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1323 num_system_values, num_cbufs);
1324
1325 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1326
1327 struct brw_vue_map input_vue_map;
1328 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1329 key->patch_inputs_read);
1330
1331 char *error_str = NULL;
1332 const unsigned *program =
1333 brw_compile_tes(compiler, &ice->dbg, mem_ctx, key, &input_vue_map,
1334 tes_prog_data, nir, -1, NULL, &error_str);
1335 if (program == NULL) {
1336 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1337 ralloc_free(mem_ctx);
1338 return false;
1339 }
1340
1341 if (ish->compiled_once) {
1342 iris_debug_recompile(ice, &nir->info, &key->base);
1343 } else {
1344 ish->compiled_once = true;
1345 }
1346
1347 uint32_t *so_decls =
1348 ice->vtbl.create_so_decl_list(&ish->stream_output,
1349 &vue_prog_data->vue_map);
1350
1351
1352 struct iris_compiled_shader *shader =
1353 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1354 prog_data, so_decls, system_values, num_system_values,
1355 num_cbufs, &bt);
1356
1357 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1358
1359 ralloc_free(mem_ctx);
1360 return shader;
1361 }
1362
1363 /**
1364 * Update the current tessellation evaluation shader variant.
1365 *
1366 * Fill out the key, look in the cache, compile and bind if needed.
1367 */
1368 static void
1369 iris_update_compiled_tes(struct iris_context *ice)
1370 {
1371 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1372 struct iris_uncompiled_shader *ish =
1373 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1374 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1375 const struct gen_device_info *devinfo = &screen->devinfo;
1376
1377 struct brw_tes_prog_key key = { KEY_INIT(devinfo->gen) };
1378 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1379 ice->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1380
1381 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1382 struct iris_compiled_shader *shader =
1383 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1384
1385 if (!shader)
1386 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1387
1388 if (!shader)
1389 shader = iris_compile_tes(ice, ish, &key);
1390
1391 if (old != shader) {
1392 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1393 ice->state.dirty |= IRIS_DIRTY_TES |
1394 IRIS_DIRTY_BINDINGS_TES |
1395 IRIS_DIRTY_CONSTANTS_TES;
1396 shs->sysvals_need_upload = true;
1397 }
1398
1399 /* TODO: Could compare and avoid flagging this. */
1400 const struct shader_info *tes_info = &ish->nir->info;
1401 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
1402 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TES;
1403 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1404 }
1405 }
1406
1407 /**
1408 * Compile a geometry shader, and upload the assembly.
1409 */
1410 static struct iris_compiled_shader *
1411 iris_compile_gs(struct iris_context *ice,
1412 struct iris_uncompiled_shader *ish,
1413 const struct brw_gs_prog_key *key)
1414 {
1415 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1416 const struct brw_compiler *compiler = screen->compiler;
1417 const struct gen_device_info *devinfo = &screen->devinfo;
1418 void *mem_ctx = ralloc_context(NULL);
1419 struct brw_gs_prog_data *gs_prog_data =
1420 rzalloc(mem_ctx, struct brw_gs_prog_data);
1421 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1422 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1423 enum brw_param_builtin *system_values;
1424 unsigned num_system_values;
1425 unsigned num_cbufs;
1426
1427 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1428
1429 if (key->nr_userclip_plane_consts) {
1430 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1431 nir_lower_clip_gs(nir, (1 << key->nr_userclip_plane_consts) - 1, false,
1432 NULL);
1433 nir_lower_io_to_temporaries(nir, impl, true, false);
1434 nir_lower_global_vars_to_local(nir);
1435 nir_lower_vars_to_ssa(nir);
1436 nir_shader_gather_info(nir, impl);
1437 }
1438
1439 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1440 &num_system_values, &num_cbufs);
1441
1442 struct iris_binding_table bt;
1443 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1444 num_system_values, num_cbufs);
1445
1446 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1447
1448 brw_compute_vue_map(devinfo,
1449 &vue_prog_data->vue_map, nir->info.outputs_written,
1450 nir->info.separate_shader);
1451
1452 char *error_str = NULL;
1453 const unsigned *program =
1454 brw_compile_gs(compiler, &ice->dbg, mem_ctx, key, gs_prog_data, nir,
1455 NULL, -1, NULL, &error_str);
1456 if (program == NULL) {
1457 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1458 ralloc_free(mem_ctx);
1459 return false;
1460 }
1461
1462 if (ish->compiled_once) {
1463 iris_debug_recompile(ice, &nir->info, &key->base);
1464 } else {
1465 ish->compiled_once = true;
1466 }
1467
1468 uint32_t *so_decls =
1469 ice->vtbl.create_so_decl_list(&ish->stream_output,
1470 &vue_prog_data->vue_map);
1471
1472 struct iris_compiled_shader *shader =
1473 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1474 prog_data, so_decls, system_values, num_system_values,
1475 num_cbufs, &bt);
1476
1477 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1478
1479 ralloc_free(mem_ctx);
1480 return shader;
1481 }
1482
1483 /**
1484 * Update the current geometry shader variant.
1485 *
1486 * Fill out the key, look in the cache, compile and bind if needed.
1487 */
1488 static void
1489 iris_update_compiled_gs(struct iris_context *ice)
1490 {
1491 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1492 struct iris_uncompiled_shader *ish =
1493 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1494 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1495 struct iris_compiled_shader *shader = NULL;
1496
1497 if (ish) {
1498 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1499 const struct gen_device_info *devinfo = &screen->devinfo;
1500 struct brw_gs_prog_key key = { KEY_INIT(devinfo->gen) };
1501 ice->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1502
1503 shader =
1504 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1505
1506 if (!shader)
1507 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1508
1509 if (!shader)
1510 shader = iris_compile_gs(ice, ish, &key);
1511 }
1512
1513 if (old != shader) {
1514 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1515 ice->state.dirty |= IRIS_DIRTY_GS |
1516 IRIS_DIRTY_BINDINGS_GS |
1517 IRIS_DIRTY_CONSTANTS_GS;
1518 shs->sysvals_need_upload = true;
1519 }
1520 }
1521
1522 /**
1523 * Compile a fragment (pixel) shader, and upload the assembly.
1524 */
1525 static struct iris_compiled_shader *
1526 iris_compile_fs(struct iris_context *ice,
1527 struct iris_uncompiled_shader *ish,
1528 const struct brw_wm_prog_key *key,
1529 struct brw_vue_map *vue_map)
1530 {
1531 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1532 const struct brw_compiler *compiler = screen->compiler;
1533 void *mem_ctx = ralloc_context(NULL);
1534 struct brw_wm_prog_data *fs_prog_data =
1535 rzalloc(mem_ctx, struct brw_wm_prog_data);
1536 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1537 enum brw_param_builtin *system_values;
1538 const struct gen_device_info *devinfo = &screen->devinfo;
1539 unsigned num_system_values;
1540 unsigned num_cbufs;
1541
1542 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1543
1544 prog_data->use_alt_mode = ish->use_alt_mode;
1545
1546 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1547 &num_system_values, &num_cbufs);
1548
1549 /* Lower output variables to load_output intrinsics before setting up
1550 * binding tables, so iris_setup_binding_table can map any load_output
1551 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1552 * non-coherent framebuffer fetches.
1553 */
1554 brw_nir_lower_fs_outputs(nir);
1555
1556 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1557 * and do not need a binding table entry with a null surface. Earlier
1558 * generations need an entry for a null surface.
1559 */
1560 int null_rts = devinfo->gen < 11 ? 1 : 0;
1561
1562 struct iris_binding_table bt;
1563 iris_setup_binding_table(devinfo, nir, &bt,
1564 MAX2(key->nr_color_regions, null_rts),
1565 num_system_values, num_cbufs);
1566
1567 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1568
1569 char *error_str = NULL;
1570 const unsigned *program =
1571 brw_compile_fs(compiler, &ice->dbg, mem_ctx, key, fs_prog_data,
1572 nir, -1, -1, -1, true, false, vue_map,
1573 NULL, &error_str);
1574 if (program == NULL) {
1575 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1576 ralloc_free(mem_ctx);
1577 return false;
1578 }
1579
1580 if (ish->compiled_once) {
1581 iris_debug_recompile(ice, &nir->info, &key->base);
1582 } else {
1583 ish->compiled_once = true;
1584 }
1585
1586 struct iris_compiled_shader *shader =
1587 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1588 prog_data, NULL, system_values, num_system_values,
1589 num_cbufs, &bt);
1590
1591 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1592
1593 ralloc_free(mem_ctx);
1594 return shader;
1595 }
1596
1597 /**
1598 * Update the current fragment shader variant.
1599 *
1600 * Fill out the key, look in the cache, compile and bind if needed.
1601 */
1602 static void
1603 iris_update_compiled_fs(struct iris_context *ice)
1604 {
1605 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1606 const struct gen_device_info *devinfo = &screen->devinfo;
1607 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1608 struct iris_uncompiled_shader *ish =
1609 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1610 struct brw_wm_prog_key key = { KEY_INIT(devinfo->gen) };
1611 ice->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1612
1613 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1614 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1615
1616 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1617 struct iris_compiled_shader *shader =
1618 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1619
1620 if (!shader)
1621 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1622
1623 if (!shader)
1624 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1625
1626 if (old != shader) {
1627 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1628 // toggles. might be able to avoid flagging SBE too.
1629 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1630 ice->state.dirty |= IRIS_DIRTY_FS |
1631 IRIS_DIRTY_BINDINGS_FS |
1632 IRIS_DIRTY_CONSTANTS_FS |
1633 IRIS_DIRTY_WM |
1634 IRIS_DIRTY_CLIP |
1635 IRIS_DIRTY_SBE;
1636 shs->sysvals_need_upload = true;
1637 }
1638 }
1639
1640 /**
1641 * Update the last enabled stage's VUE map.
1642 *
1643 * When the shader feeding the rasterizer's output interface changes, we
1644 * need to re-emit various packets.
1645 */
1646 static void
1647 update_last_vue_map(struct iris_context *ice,
1648 struct brw_stage_prog_data *prog_data)
1649 {
1650 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1651 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1652 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1653 const uint64_t changed_slots =
1654 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1655
1656 if (changed_slots & VARYING_BIT_VIEWPORT) {
1657 ice->state.num_viewports =
1658 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1659 ice->state.dirty |= IRIS_DIRTY_CLIP |
1660 IRIS_DIRTY_SF_CL_VIEWPORT |
1661 IRIS_DIRTY_CC_VIEWPORT |
1662 IRIS_DIRTY_SCISSOR_RECT |
1663 IRIS_DIRTY_UNCOMPILED_FS |
1664 ice->state.dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1665 }
1666
1667 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1668 ice->state.dirty |= IRIS_DIRTY_SBE;
1669 }
1670
1671 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1672 }
1673
1674 static void
1675 iris_update_pull_constant_descriptors(struct iris_context *ice,
1676 gl_shader_stage stage)
1677 {
1678 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1679
1680 if (!shader || !shader->prog_data->has_ubo_pull)
1681 return;
1682
1683 struct iris_shader_state *shs = &ice->state.shaders[stage];
1684 bool any_new_descriptors =
1685 shader->num_system_values > 0 && shs->sysvals_need_upload;
1686
1687 unsigned bound_cbufs = shs->bound_cbufs;
1688
1689 while (bound_cbufs) {
1690 const int i = u_bit_scan(&bound_cbufs);
1691 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1692 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1693 if (!surf_state->res && cbuf->buffer) {
1694 iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
1695 any_new_descriptors = true;
1696 }
1697 }
1698
1699 if (any_new_descriptors)
1700 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
1701 }
1702
1703 /**
1704 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1705 */
1706 static struct brw_vue_prog_data *
1707 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1708 {
1709 if (!ice->shaders.prog[stage])
1710 return NULL;
1711
1712 return (void *) ice->shaders.prog[stage]->prog_data;
1713 }
1714
1715 // XXX: iris_compiled_shaders are space-leaking :(
1716 // XXX: do remember to unbind them if deleting them.
1717
1718 /**
1719 * Update the current shader variants for the given state.
1720 *
1721 * This should be called on every draw call to ensure that the correct
1722 * shaders are bound. It will also flag any dirty state triggered by
1723 * swapping out those shaders.
1724 */
1725 void
1726 iris_update_compiled_shaders(struct iris_context *ice)
1727 {
1728 const uint64_t dirty = ice->state.dirty;
1729
1730 struct brw_vue_prog_data *old_prog_datas[4];
1731 if (!(dirty & IRIS_DIRTY_URB)) {
1732 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1733 old_prog_datas[i] = get_vue_prog_data(ice, i);
1734 }
1735
1736 if (dirty & (IRIS_DIRTY_UNCOMPILED_TCS | IRIS_DIRTY_UNCOMPILED_TES)) {
1737 struct iris_uncompiled_shader *tes =
1738 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1739 if (tes) {
1740 iris_update_compiled_tcs(ice);
1741 iris_update_compiled_tes(ice);
1742 } else {
1743 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1744 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1745 ice->state.dirty |=
1746 IRIS_DIRTY_TCS | IRIS_DIRTY_TES |
1747 IRIS_DIRTY_BINDINGS_TCS | IRIS_DIRTY_BINDINGS_TES |
1748 IRIS_DIRTY_CONSTANTS_TCS | IRIS_DIRTY_CONSTANTS_TES;
1749 }
1750 }
1751
1752 if (dirty & IRIS_DIRTY_UNCOMPILED_VS)
1753 iris_update_compiled_vs(ice);
1754 if (dirty & IRIS_DIRTY_UNCOMPILED_GS)
1755 iris_update_compiled_gs(ice);
1756
1757 if (dirty & (IRIS_DIRTY_UNCOMPILED_GS | IRIS_DIRTY_UNCOMPILED_TES)) {
1758 const struct iris_compiled_shader *gs =
1759 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1760 const struct iris_compiled_shader *tes =
1761 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1762
1763 bool points_or_lines = false;
1764
1765 if (gs) {
1766 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1767 points_or_lines =
1768 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1769 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1770 } else if (tes) {
1771 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1772 points_or_lines =
1773 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1774 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1775 }
1776
1777 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1778 /* Outbound to XY Clip enables */
1779 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1780 ice->state.dirty |= IRIS_DIRTY_CLIP;
1781 }
1782 }
1783
1784 gl_shader_stage last_stage = last_vue_stage(ice);
1785 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1786 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1787 update_last_vue_map(ice, shader->prog_data);
1788 if (ice->state.streamout != shader->streamout) {
1789 ice->state.streamout = shader->streamout;
1790 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1791 }
1792
1793 if (ice->state.streamout_active) {
1794 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1795 struct iris_stream_output_target *so =
1796 (void *) ice->state.so_target[i];
1797 if (so)
1798 so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
1799 }
1800 }
1801
1802 if (dirty & IRIS_DIRTY_UNCOMPILED_FS)
1803 iris_update_compiled_fs(ice);
1804
1805 /* Changing shader interfaces may require a URB configuration. */
1806 if (!(dirty & IRIS_DIRTY_URB)) {
1807 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1808 struct brw_vue_prog_data *old = old_prog_datas[i];
1809 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1810 if (!!old != !!new ||
1811 (new && new->urb_entry_size != old->urb_entry_size)) {
1812 ice->state.dirty |= IRIS_DIRTY_URB;
1813 break;
1814 }
1815 }
1816 }
1817
1818 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
1819 if (ice->state.dirty & (IRIS_DIRTY_CONSTANTS_VS << i))
1820 iris_update_pull_constant_descriptors(ice, i);
1821 }
1822 }
1823
1824 static struct iris_compiled_shader *
1825 iris_compile_cs(struct iris_context *ice,
1826 struct iris_uncompiled_shader *ish,
1827 const struct brw_cs_prog_key *key)
1828 {
1829 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1830 const struct brw_compiler *compiler = screen->compiler;
1831 void *mem_ctx = ralloc_context(NULL);
1832 struct brw_cs_prog_data *cs_prog_data =
1833 rzalloc(mem_ctx, struct brw_cs_prog_data);
1834 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1835 enum brw_param_builtin *system_values;
1836 const struct gen_device_info *devinfo = &screen->devinfo;
1837 unsigned num_system_values;
1838 unsigned num_cbufs;
1839
1840 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1841
1842 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1843 &num_system_values, &num_cbufs);
1844
1845 struct iris_binding_table bt;
1846 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1847 num_system_values, num_cbufs);
1848
1849 char *error_str = NULL;
1850 const unsigned *program =
1851 brw_compile_cs(compiler, &ice->dbg, mem_ctx, key, cs_prog_data,
1852 nir, -1, NULL, &error_str);
1853 if (program == NULL) {
1854 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1855 ralloc_free(mem_ctx);
1856 return false;
1857 }
1858
1859 if (ish->compiled_once) {
1860 iris_debug_recompile(ice, &nir->info, &key->base);
1861 } else {
1862 ish->compiled_once = true;
1863 }
1864
1865 struct iris_compiled_shader *shader =
1866 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
1867 prog_data, NULL, system_values, num_system_values,
1868 num_cbufs, &bt);
1869
1870 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1871
1872 ralloc_free(mem_ctx);
1873 return shader;
1874 }
1875
1876 static void
1877 iris_update_compiled_cs(struct iris_context *ice)
1878 {
1879 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
1880 struct iris_uncompiled_shader *ish =
1881 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
1882
1883 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1884 const struct gen_device_info *devinfo = &screen->devinfo;
1885 struct brw_cs_prog_key key = { KEY_INIT(devinfo->gen) };
1886 ice->vtbl.populate_cs_key(ice, &key);
1887
1888 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
1889 struct iris_compiled_shader *shader =
1890 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
1891
1892 if (!shader)
1893 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1894
1895 if (!shader)
1896 shader = iris_compile_cs(ice, ish, &key);
1897
1898 if (old != shader) {
1899 ice->shaders.prog[IRIS_CACHE_CS] = shader;
1900 ice->state.dirty |= IRIS_DIRTY_CS |
1901 IRIS_DIRTY_BINDINGS_CS |
1902 IRIS_DIRTY_CONSTANTS_CS;
1903 shs->sysvals_need_upload = true;
1904 }
1905 }
1906
1907 void
1908 iris_update_compiled_compute_shader(struct iris_context *ice)
1909 {
1910 if (ice->state.dirty & IRIS_DIRTY_UNCOMPILED_CS)
1911 iris_update_compiled_cs(ice);
1912
1913 if (ice->state.dirty & IRIS_DIRTY_CONSTANTS_CS)
1914 iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
1915 }
1916
1917 void
1918 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
1919 uint32_t *dst)
1920 {
1921 assert(cs_prog_data->push.total.size > 0);
1922 assert(cs_prog_data->push.cross_thread.size == 0);
1923 assert(cs_prog_data->push.per_thread.dwords == 1);
1924 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
1925 for (unsigned t = 0; t < cs_prog_data->threads; t++)
1926 dst[8 * t] = t;
1927 }
1928
1929 /**
1930 * Allocate scratch BOs as needed for the given per-thread size and stage.
1931 */
1932 struct iris_bo *
1933 iris_get_scratch_space(struct iris_context *ice,
1934 unsigned per_thread_scratch,
1935 gl_shader_stage stage)
1936 {
1937 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1938 struct iris_bufmgr *bufmgr = screen->bufmgr;
1939 const struct gen_device_info *devinfo = &screen->devinfo;
1940
1941 unsigned encoded_size = ffs(per_thread_scratch) - 11;
1942 assert(encoded_size < (1 << 16));
1943
1944 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
1945
1946 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
1947 *
1948 * "Scratch Space per slice is computed based on 4 sub-slices. SW
1949 * must allocate scratch space enough so that each slice has 4
1950 * slices allowed."
1951 *
1952 * According to the other driver team, this applies to compute shaders
1953 * as well. This is not currently documented at all.
1954 *
1955 * This hack is no longer necessary on Gen11+.
1956 */
1957 unsigned subslice_total = screen->subslice_total;
1958 if (devinfo->gen < 11)
1959 subslice_total = 4 * devinfo->num_slices;
1960 assert(subslice_total >= screen->subslice_total);
1961
1962 if (!*bop) {
1963 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
1964
1965 if (devinfo->gen >= 11) {
1966 /* The MEDIA_VFE_STATE docs say:
1967 *
1968 * "Starting with this configuration, the Maximum Number of
1969 * Threads must be set to (#EU * 8) for GPGPU dispatches.
1970 *
1971 * Although there are only 7 threads per EU in the configuration,
1972 * the FFTID is calculated as if there are 8 threads per EU,
1973 * which in turn requires a larger amount of Scratch Space to be
1974 * allocated by the driver."
1975 */
1976 scratch_ids_per_subslice = 8 * 8;
1977 }
1978
1979 uint32_t max_threads[] = {
1980 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
1981 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
1982 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
1983 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
1984 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
1985 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
1986 };
1987
1988 uint32_t size = per_thread_scratch * max_threads[stage];
1989
1990 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
1991 }
1992
1993 return *bop;
1994 }
1995
1996 /* ------------------------------------------------------------------- */
1997
1998 /**
1999 * The pipe->create_[stage]_state() driver hooks.
2000 *
2001 * Performs basic NIR preprocessing, records any state dependencies, and
2002 * returns an iris_uncompiled_shader as the Gallium CSO.
2003 *
2004 * Actual shader compilation to assembly happens later, at first use.
2005 */
2006 static void *
2007 iris_create_uncompiled_shader(struct pipe_context *ctx,
2008 nir_shader *nir,
2009 const struct pipe_stream_output_info *so_info)
2010 {
2011 struct iris_context *ice = (void *)ctx;
2012 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2013 const struct gen_device_info *devinfo = &screen->devinfo;
2014
2015 struct iris_uncompiled_shader *ish =
2016 calloc(1, sizeof(struct iris_uncompiled_shader));
2017 if (!ish)
2018 return NULL;
2019
2020 ish->needs_edge_flag = iris_fix_edge_flags(nir);
2021
2022 brw_preprocess_nir(screen->compiler, nir, NULL);
2023
2024 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo);
2025 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2026
2027 nir_sweep(nir);
2028
2029 if (nir->constant_data_size > 0) {
2030 unsigned data_offset;
2031 u_upload_data(ice->shaders.uploader, 0, nir->constant_data_size,
2032 32, nir->constant_data, &data_offset, &ish->const_data);
2033
2034 struct pipe_shader_buffer psb = {
2035 .buffer = ish->const_data,
2036 .buffer_offset = data_offset,
2037 .buffer_size = nir->constant_data_size,
2038 };
2039 iris_upload_ubo_ssbo_surf_state(ice, &psb, &ish->const_data_state, false);
2040 }
2041
2042 ish->program_id = get_new_program_id(screen);
2043 ish->nir = nir;
2044 if (so_info) {
2045 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2046 update_so_info(&ish->stream_output, nir->info.outputs_written);
2047 }
2048
2049 /* Save this now before potentially dropping nir->info.name */
2050 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2051 ish->use_alt_mode = true;
2052
2053 if (screen->disk_cache) {
2054 /* Serialize the NIR to a binary blob that we can hash for the disk
2055 * cache. Drop unnecessary information (like variable names)
2056 * so the serialized NIR is smaller, and also to let us detect more
2057 * isomorphic shaders when hashing, increasing cache hits.
2058 */
2059 struct blob blob;
2060 blob_init(&blob);
2061 nir_serialize(&blob, nir, true);
2062 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2063 blob_finish(&blob);
2064 }
2065
2066 return ish;
2067 }
2068
2069 static struct iris_uncompiled_shader *
2070 iris_create_shader_state(struct pipe_context *ctx,
2071 const struct pipe_shader_state *state)
2072 {
2073 struct nir_shader *nir;
2074
2075 if (state->type == PIPE_SHADER_IR_TGSI)
2076 nir = tgsi_to_nir(state->tokens, ctx->screen);
2077 else
2078 nir = state->ir.nir;
2079
2080 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
2081 }
2082
2083 static void *
2084 iris_create_vs_state(struct pipe_context *ctx,
2085 const struct pipe_shader_state *state)
2086 {
2087 struct iris_context *ice = (void *) ctx;
2088 struct iris_screen *screen = (void *) ctx->screen;
2089 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2090
2091 /* User clip planes */
2092 if (ish->nir->info.clip_distance_array_size == 0)
2093 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2094
2095 if (screen->precompile) {
2096 const struct gen_device_info *devinfo = &screen->devinfo;
2097 struct brw_vs_prog_key key = { KEY_INIT(devinfo->gen) };
2098
2099 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2100 iris_compile_vs(ice, ish, &key);
2101 }
2102
2103 return ish;
2104 }
2105
2106 static void *
2107 iris_create_tcs_state(struct pipe_context *ctx,
2108 const struct pipe_shader_state *state)
2109 {
2110 struct iris_context *ice = (void *) ctx;
2111 struct iris_screen *screen = (void *) ctx->screen;
2112 const struct brw_compiler *compiler = screen->compiler;
2113 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2114 struct shader_info *info = &ish->nir->info;
2115
2116 if (screen->precompile) {
2117 const unsigned _GL_TRIANGLES = 0x0004;
2118 const struct gen_device_info *devinfo = &screen->devinfo;
2119 struct brw_tcs_prog_key key = {
2120 KEY_INIT(devinfo->gen),
2121 // XXX: make sure the linker fills this out from the TES...
2122 .tes_primitive_mode =
2123 info->tess.primitive_mode ? info->tess.primitive_mode
2124 : _GL_TRIANGLES,
2125 .outputs_written = info->outputs_written,
2126 .patch_outputs_written = info->patch_outputs_written,
2127 };
2128
2129 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2130 * We don't have that information, so we randomly guess that the input
2131 * and output patches are the same size. This is a bad guess, but we
2132 * can't do much better.
2133 */
2134 if (compiler->use_tcs_8_patch)
2135 key.input_vertices = info->tess.tcs_vertices_out;
2136
2137 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2138 iris_compile_tcs(ice, ish, &key);
2139 }
2140
2141 return ish;
2142 }
2143
2144 static void *
2145 iris_create_tes_state(struct pipe_context *ctx,
2146 const struct pipe_shader_state *state)
2147 {
2148 struct iris_context *ice = (void *) ctx;
2149 struct iris_screen *screen = (void *) ctx->screen;
2150 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2151 struct shader_info *info = &ish->nir->info;
2152
2153 /* User clip planes */
2154 if (ish->nir->info.clip_distance_array_size == 0)
2155 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2156
2157 if (screen->precompile) {
2158 const struct gen_device_info *devinfo = &screen->devinfo;
2159 struct brw_tes_prog_key key = {
2160 KEY_INIT(devinfo->gen),
2161 // XXX: not ideal, need TCS output/TES input unification
2162 .inputs_read = info->inputs_read,
2163 .patch_inputs_read = info->patch_inputs_read,
2164 };
2165
2166 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2167 iris_compile_tes(ice, ish, &key);
2168 }
2169
2170 return ish;
2171 }
2172
2173 static void *
2174 iris_create_gs_state(struct pipe_context *ctx,
2175 const struct pipe_shader_state *state)
2176 {
2177 struct iris_context *ice = (void *) ctx;
2178 struct iris_screen *screen = (void *) ctx->screen;
2179 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2180
2181 /* User clip planes */
2182 if (ish->nir->info.clip_distance_array_size == 0)
2183 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2184
2185 if (screen->precompile) {
2186 const struct gen_device_info *devinfo = &screen->devinfo;
2187 struct brw_gs_prog_key key = { KEY_INIT(devinfo->gen) };
2188
2189 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2190 iris_compile_gs(ice, ish, &key);
2191 }
2192
2193 return ish;
2194 }
2195
2196 static void *
2197 iris_create_fs_state(struct pipe_context *ctx,
2198 const struct pipe_shader_state *state)
2199 {
2200 struct iris_context *ice = (void *) ctx;
2201 struct iris_screen *screen = (void *) ctx->screen;
2202 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2203 struct shader_info *info = &ish->nir->info;
2204
2205 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2206 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2207 (1ull << IRIS_NOS_RASTERIZER) |
2208 (1ull << IRIS_NOS_BLEND);
2209
2210 /* The program key needs the VUE map if there are > 16 inputs */
2211 if (util_bitcount64(ish->nir->info.inputs_read &
2212 BRW_FS_VARYING_INPUT_MASK) > 16) {
2213 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2214 }
2215
2216 if (screen->precompile) {
2217 const uint64_t color_outputs = info->outputs_written &
2218 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2219 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2220 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2221
2222 bool can_rearrange_varyings =
2223 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2224
2225 const struct gen_device_info *devinfo = &screen->devinfo;
2226 struct brw_wm_prog_key key = {
2227 KEY_INIT(devinfo->gen),
2228 .nr_color_regions = util_bitcount(color_outputs),
2229 .coherent_fb_fetch = devinfo->gen >= 9,
2230 .input_slots_valid =
2231 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2232 };
2233
2234 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2235 iris_compile_fs(ice, ish, &key, NULL);
2236 }
2237
2238 return ish;
2239 }
2240
2241 static void *
2242 iris_create_compute_state(struct pipe_context *ctx,
2243 const struct pipe_compute_state *state)
2244 {
2245 assert(state->ir_type == PIPE_SHADER_IR_NIR);
2246
2247 struct iris_context *ice = (void *) ctx;
2248 struct iris_screen *screen = (void *) ctx->screen;
2249 struct iris_uncompiled_shader *ish =
2250 iris_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
2251
2252 // XXX: disallow more than 64KB of shared variables
2253
2254 if (screen->precompile) {
2255 const struct gen_device_info *devinfo = &screen->devinfo;
2256 struct brw_cs_prog_key key = { KEY_INIT(devinfo->gen) };
2257
2258 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2259 iris_compile_cs(ice, ish, &key);
2260 }
2261
2262 return ish;
2263 }
2264
2265 /**
2266 * The pipe->delete_[stage]_state() driver hooks.
2267 *
2268 * Frees the iris_uncompiled_shader.
2269 */
2270 static void
2271 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2272 {
2273 struct iris_uncompiled_shader *ish = state;
2274 struct iris_context *ice = (void *) ctx;
2275
2276 if (ice->shaders.uncompiled[stage] == ish) {
2277 ice->shaders.uncompiled[stage] = NULL;
2278 ice->state.dirty |= IRIS_DIRTY_UNCOMPILED_VS << stage;
2279 }
2280
2281 if (ish->const_data) {
2282 pipe_resource_reference(&ish->const_data, NULL);
2283 pipe_resource_reference(&ish->const_data_state.res, NULL);
2284 }
2285
2286 ralloc_free(ish->nir);
2287 free(ish);
2288 }
2289
2290 static void
2291 iris_delete_vs_state(struct pipe_context *ctx, void *state)
2292 {
2293 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2294 }
2295
2296 static void
2297 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2298 {
2299 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2300 }
2301
2302 static void
2303 iris_delete_tes_state(struct pipe_context *ctx, void *state)
2304 {
2305 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2306 }
2307
2308 static void
2309 iris_delete_gs_state(struct pipe_context *ctx, void *state)
2310 {
2311 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2312 }
2313
2314 static void
2315 iris_delete_fs_state(struct pipe_context *ctx, void *state)
2316 {
2317 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2318 }
2319
2320 static void
2321 iris_delete_cs_state(struct pipe_context *ctx, void *state)
2322 {
2323 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2324 }
2325
2326 /**
2327 * The pipe->bind_[stage]_state() driver hook.
2328 *
2329 * Binds an uncompiled shader as the current one for a particular stage.
2330 * Updates dirty tracking to account for the shader's NOS.
2331 */
2332 static void
2333 bind_shader_state(struct iris_context *ice,
2334 struct iris_uncompiled_shader *ish,
2335 gl_shader_stage stage)
2336 {
2337 uint64_t dirty_bit = IRIS_DIRTY_UNCOMPILED_VS << stage;
2338 const uint64_t nos = ish ? ish->nos : 0;
2339
2340 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2341 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2342
2343 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
2344 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
2345 ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
2346 }
2347
2348 ice->shaders.uncompiled[stage] = ish;
2349 ice->state.dirty |= dirty_bit;
2350
2351 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2352 * (or that they no longer need to do so).
2353 */
2354 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2355 if (nos & (1 << i))
2356 ice->state.dirty_for_nos[i] |= dirty_bit;
2357 else
2358 ice->state.dirty_for_nos[i] &= ~dirty_bit;
2359 }
2360 }
2361
2362 static void
2363 iris_bind_vs_state(struct pipe_context *ctx, void *state)
2364 {
2365 struct iris_context *ice = (struct iris_context *)ctx;
2366 struct iris_uncompiled_shader *new_ish = state;
2367
2368 if (new_ish &&
2369 ice->state.window_space_position !=
2370 new_ish->nir->info.vs.window_space_position) {
2371 ice->state.window_space_position =
2372 new_ish->nir->info.vs.window_space_position;
2373
2374 ice->state.dirty |= IRIS_DIRTY_CLIP |
2375 IRIS_DIRTY_RASTER |
2376 IRIS_DIRTY_CC_VIEWPORT;
2377 }
2378
2379 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2380 }
2381
2382 static void
2383 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2384 {
2385 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2386 }
2387
2388 static void
2389 iris_bind_tes_state(struct pipe_context *ctx, void *state)
2390 {
2391 struct iris_context *ice = (struct iris_context *)ctx;
2392
2393 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2394 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2395 ice->state.dirty |= IRIS_DIRTY_URB;
2396
2397 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2398 }
2399
2400 static void
2401 iris_bind_gs_state(struct pipe_context *ctx, void *state)
2402 {
2403 struct iris_context *ice = (struct iris_context *)ctx;
2404
2405 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2406 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2407 ice->state.dirty |= IRIS_DIRTY_URB;
2408
2409 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2410 }
2411
2412 static void
2413 iris_bind_fs_state(struct pipe_context *ctx, void *state)
2414 {
2415 struct iris_context *ice = (struct iris_context *) ctx;
2416 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2417 const struct gen_device_info *devinfo = &screen->devinfo;
2418 struct iris_uncompiled_shader *old_ish =
2419 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2420 struct iris_uncompiled_shader *new_ish = state;
2421
2422 const unsigned color_bits =
2423 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2424 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2425
2426 /* Fragment shader outputs influence HasWriteableRT */
2427 if (!old_ish || !new_ish ||
2428 (old_ish->nir->info.outputs_written & color_bits) !=
2429 (new_ish->nir->info.outputs_written & color_bits))
2430 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2431
2432 if (devinfo->gen == 8)
2433 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2434
2435 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2436 }
2437
2438 static void
2439 iris_bind_cs_state(struct pipe_context *ctx, void *state)
2440 {
2441 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2442 }
2443
2444 void
2445 iris_init_program_functions(struct pipe_context *ctx)
2446 {
2447 ctx->create_vs_state = iris_create_vs_state;
2448 ctx->create_tcs_state = iris_create_tcs_state;
2449 ctx->create_tes_state = iris_create_tes_state;
2450 ctx->create_gs_state = iris_create_gs_state;
2451 ctx->create_fs_state = iris_create_fs_state;
2452 ctx->create_compute_state = iris_create_compute_state;
2453
2454 ctx->delete_vs_state = iris_delete_vs_state;
2455 ctx->delete_tcs_state = iris_delete_tcs_state;
2456 ctx->delete_tes_state = iris_delete_tes_state;
2457 ctx->delete_gs_state = iris_delete_gs_state;
2458 ctx->delete_fs_state = iris_delete_fs_state;
2459 ctx->delete_compute_state = iris_delete_cs_state;
2460
2461 ctx->bind_vs_state = iris_bind_vs_state;
2462 ctx->bind_tcs_state = iris_bind_tcs_state;
2463 ctx->bind_tes_state = iris_bind_tes_state;
2464 ctx->bind_gs_state = iris_bind_gs_state;
2465 ctx->bind_fs_state = iris_bind_fs_state;
2466 ctx->bind_compute_state = iris_bind_cs_state;
2467 }