iris: Implement ALT mode for ARB_{vertex,fragment}_shader
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "compiler/nir/nir.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "intel/compiler/brw_compiler.h"
42 #include "intel/compiler/brw_nir.h"
43 #include "iris_context.h"
44
45 #define ALL_SAMPLERS_XYZW .tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688
46 #define KEY_INIT .program_string_id = ish->program_id, ALL_SAMPLERS_XYZW
47
48 static struct iris_compiled_shader *
49 iris_compile_vs(struct iris_context *, struct iris_uncompiled_shader *,
50 const struct brw_vs_prog_key *);
51 static struct iris_compiled_shader *
52 iris_compile_tcs(struct iris_context *, struct iris_uncompiled_shader *,
53 const struct brw_tcs_prog_key *);
54 static struct iris_compiled_shader *
55 iris_compile_tes(struct iris_context *, struct iris_uncompiled_shader *,
56 const struct brw_tes_prog_key *);
57 static struct iris_compiled_shader *
58 iris_compile_gs(struct iris_context *, struct iris_uncompiled_shader *,
59 const struct brw_gs_prog_key *);
60 static struct iris_compiled_shader *
61 iris_compile_fs(struct iris_context *, struct iris_uncompiled_shader *,
62 const struct brw_wm_prog_key *, struct brw_vue_map *);
63 static struct iris_compiled_shader *
64 iris_compile_cs(struct iris_context *, struct iris_uncompiled_shader *,
65 const struct brw_cs_prog_key *);
66
67
68 static unsigned
69 get_new_program_id(struct iris_screen *screen)
70 {
71 return p_atomic_inc_return(&screen->program_id);
72 }
73
74 /**
75 * An uncompiled, API-facing shader. This is the Gallium CSO for shaders.
76 * It primarily contains the NIR for the shader.
77 *
78 * Each API-facing shader can be compiled into multiple shader variants,
79 * based on non-orthogonal state dependencies, recorded in the shader key.
80 *
81 * See iris_compiled_shader, which represents a compiled shader variant.
82 */
83 struct iris_uncompiled_shader {
84 nir_shader *nir;
85
86 struct pipe_stream_output_info stream_output;
87
88 unsigned program_id;
89
90 /** Bitfield of (1 << IRIS_NOS_*) flags. */
91 unsigned nos;
92
93 /** Have any shader variants been compiled yet? */
94 bool compiled_once;
95 };
96
97 static nir_ssa_def *
98 get_aoa_deref_offset(nir_builder *b,
99 nir_deref_instr *deref,
100 unsigned elem_size)
101 {
102 unsigned array_size = elem_size;
103 nir_ssa_def *offset = nir_imm_int(b, 0);
104
105 while (deref->deref_type != nir_deref_type_var) {
106 assert(deref->deref_type == nir_deref_type_array);
107
108 /* This level's element size is the previous level's array size */
109 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
110 assert(deref->arr.index.ssa);
111 offset = nir_iadd(b, offset,
112 nir_imul(b, index, nir_imm_int(b, array_size)));
113
114 deref = nir_deref_instr_parent(deref);
115 assert(glsl_type_is_array(deref->type));
116 array_size *= glsl_get_length(deref->type);
117 }
118
119 /* Accessing an invalid surface index with the dataport can result in a
120 * hang. According to the spec "if the index used to select an individual
121 * element is negative or greater than or equal to the size of the array,
122 * the results of the operation are undefined but may not lead to
123 * termination" -- which is one of the possible outcomes of the hang.
124 * Clamp the index to prevent access outside of the array bounds.
125 */
126 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
127 }
128
129 static void
130 iris_lower_storage_image_derefs(nir_shader *nir)
131 {
132 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
133
134 nir_builder b;
135 nir_builder_init(&b, impl);
136
137 nir_foreach_block(block, impl) {
138 nir_foreach_instr_safe(instr, block) {
139 if (instr->type != nir_instr_type_intrinsic)
140 continue;
141
142 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
143 switch (intrin->intrinsic) {
144 case nir_intrinsic_image_deref_load:
145 case nir_intrinsic_image_deref_store:
146 case nir_intrinsic_image_deref_atomic_add:
147 case nir_intrinsic_image_deref_atomic_min:
148 case nir_intrinsic_image_deref_atomic_max:
149 case nir_intrinsic_image_deref_atomic_and:
150 case nir_intrinsic_image_deref_atomic_or:
151 case nir_intrinsic_image_deref_atomic_xor:
152 case nir_intrinsic_image_deref_atomic_exchange:
153 case nir_intrinsic_image_deref_atomic_comp_swap:
154 case nir_intrinsic_image_deref_size:
155 case nir_intrinsic_image_deref_samples:
156 case nir_intrinsic_image_deref_load_raw_intel:
157 case nir_intrinsic_image_deref_store_raw_intel: {
158 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
159 nir_variable *var = nir_deref_instr_get_variable(deref);
160
161 b.cursor = nir_before_instr(&intrin->instr);
162 nir_ssa_def *index =
163 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
164 get_aoa_deref_offset(&b, deref, 1));
165 brw_nir_rewrite_image_intrinsic(intrin, index);
166 break;
167 }
168
169 default:
170 break;
171 }
172 }
173 }
174 }
175
176 // XXX: need unify_interfaces() at link time...
177
178 /**
179 * Fix an uncompiled shader's stream output info.
180 *
181 * Core Gallium stores output->register_index as a "slot" number, where
182 * slots are assigned consecutively to all outputs in info->outputs_written.
183 * This naive packing of outputs doesn't work for us - we too have slots,
184 * but the layout is defined by the VUE map, which we won't have until we
185 * compile a specific shader variant. So, we remap these and simply store
186 * VARYING_SLOT_* in our copy's output->register_index fields.
187 *
188 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
189 * components of our VUE header. See brw_vue_map.c for the layout.
190 */
191 static void
192 update_so_info(struct pipe_stream_output_info *so_info,
193 uint64_t outputs_written)
194 {
195 uint8_t reverse_map[64] = {};
196 unsigned slot = 0;
197 while (outputs_written) {
198 reverse_map[slot++] = u_bit_scan64(&outputs_written);
199 }
200
201 for (unsigned i = 0; i < so_info->num_outputs; i++) {
202 struct pipe_stream_output *output = &so_info->output[i];
203
204 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
205 output->register_index = reverse_map[output->register_index];
206
207 /* The VUE header contains three scalar fields packed together:
208 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
209 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
210 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
211 */
212 switch (output->register_index) {
213 case VARYING_SLOT_LAYER:
214 assert(output->num_components == 1);
215 output->register_index = VARYING_SLOT_PSIZ;
216 output->start_component = 1;
217 break;
218 case VARYING_SLOT_VIEWPORT:
219 assert(output->num_components == 1);
220 output->register_index = VARYING_SLOT_PSIZ;
221 output->start_component = 2;
222 break;
223 case VARYING_SLOT_PSIZ:
224 assert(output->num_components == 1);
225 output->start_component = 3;
226 break;
227 }
228
229 //info->outputs_written |= 1ull << output->register_index;
230 }
231 }
232
233 /**
234 * The pipe->create_[stage]_state() driver hooks.
235 *
236 * Performs basic NIR preprocessing, records any state dependencies, and
237 * returns an iris_uncompiled_shader as the Gallium CSO.
238 *
239 * Actual shader compilation to assembly happens later, at first use.
240 */
241 static void *
242 iris_create_uncompiled_shader(struct pipe_context *ctx,
243 nir_shader *nir,
244 const struct pipe_stream_output_info *so_info)
245 {
246 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
247 const struct gen_device_info *devinfo = &screen->devinfo;
248
249 struct iris_uncompiled_shader *ish =
250 calloc(1, sizeof(struct iris_uncompiled_shader));
251 if (!ish)
252 return NULL;
253
254 nir = brw_preprocess_nir(screen->compiler, nir);
255
256 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo);
257 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
258
259 ish->program_id = get_new_program_id(screen);
260 ish->nir = nir;
261 if (so_info) {
262 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
263 update_so_info(&ish->stream_output, nir->info.outputs_written);
264 }
265
266 return ish;
267 }
268
269 static struct iris_uncompiled_shader *
270 iris_create_shader_state(struct pipe_context *ctx,
271 const struct pipe_shader_state *state)
272 {
273 assert(state->type == PIPE_SHADER_IR_NIR);
274
275 return iris_create_uncompiled_shader(ctx, state->ir.nir,
276 &state->stream_output);
277 }
278
279 static void *
280 iris_create_vs_state(struct pipe_context *ctx,
281 const struct pipe_shader_state *state)
282 {
283 struct iris_context *ice = (void *) ctx;
284 struct iris_screen *screen = (void *) ctx->screen;
285 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
286
287 /* User clip planes */
288 if (ish->nir->info.clip_distance_array_size == 0)
289 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
290
291 if (screen->precompile) {
292 struct brw_vs_prog_key key = { KEY_INIT };
293
294 iris_compile_vs(ice, ish, &key);
295 }
296
297 return ish;
298 }
299
300 static void *
301 iris_create_tcs_state(struct pipe_context *ctx,
302 const struct pipe_shader_state *state)
303 {
304 struct iris_context *ice = (void *) ctx;
305 struct iris_screen *screen = (void *) ctx->screen;
306 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
307 struct shader_info *info = &ish->nir->info;
308
309 // XXX: NOS?
310
311 if (screen->precompile) {
312 const unsigned _GL_TRIANGLES = 0x0004;
313 struct brw_tcs_prog_key key = {
314 KEY_INIT,
315 // XXX: make sure the linker fills this out from the TES...
316 .tes_primitive_mode =
317 info->tess.primitive_mode ? info->tess.primitive_mode
318 : _GL_TRIANGLES,
319 .outputs_written = info->outputs_written,
320 .patch_outputs_written = info->patch_outputs_written,
321 };
322
323 iris_compile_tcs(ice, ish, &key);
324 }
325
326 return ish;
327 }
328
329 static void *
330 iris_create_tes_state(struct pipe_context *ctx,
331 const struct pipe_shader_state *state)
332 {
333 struct iris_context *ice = (void *) ctx;
334 struct iris_screen *screen = (void *) ctx->screen;
335 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
336 struct shader_info *info = &ish->nir->info;
337
338 // XXX: NOS?
339
340 if (screen->precompile) {
341 struct brw_tes_prog_key key = {
342 KEY_INIT,
343 // XXX: not ideal, need TCS output/TES input unification
344 .inputs_read = info->inputs_read,
345 .patch_inputs_read = info->patch_inputs_read,
346 };
347
348 iris_compile_tes(ice, ish, &key);
349 }
350
351 return ish;
352 }
353
354 static void *
355 iris_create_gs_state(struct pipe_context *ctx,
356 const struct pipe_shader_state *state)
357 {
358 struct iris_context *ice = (void *) ctx;
359 struct iris_screen *screen = (void *) ctx->screen;
360 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
361
362 // XXX: NOS?
363
364 if (screen->precompile) {
365 struct brw_gs_prog_key key = { KEY_INIT };
366
367 iris_compile_gs(ice, ish, &key);
368 }
369
370 return ish;
371 }
372
373 static void *
374 iris_create_fs_state(struct pipe_context *ctx,
375 const struct pipe_shader_state *state)
376 {
377 struct iris_context *ice = (void *) ctx;
378 struct iris_screen *screen = (void *) ctx->screen;
379 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
380 struct shader_info *info = &ish->nir->info;
381
382 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
383 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
384 (1ull << IRIS_NOS_RASTERIZER) |
385 (1ull << IRIS_NOS_BLEND);
386
387 /* The program key needs the VUE map if there are > 16 inputs */
388 if (util_bitcount64(ish->nir->info.inputs_read &
389 BRW_FS_VARYING_INPUT_MASK) > 16) {
390 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
391 }
392
393 if (screen->precompile) {
394 const uint64_t color_outputs = info->outputs_written &
395 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
396 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
397 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
398
399 bool can_rearrange_varyings =
400 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
401
402 struct brw_wm_prog_key key = {
403 KEY_INIT,
404 .nr_color_regions = util_bitcount(color_outputs),
405 .coherent_fb_fetch = true,
406 .input_slots_valid =
407 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
408 };
409
410 iris_compile_fs(ice, ish, &key, NULL);
411 }
412
413 return ish;
414 }
415
416 static void *
417 iris_create_compute_state(struct pipe_context *ctx,
418 const struct pipe_compute_state *state)
419 {
420 assert(state->ir_type == PIPE_SHADER_IR_NIR);
421
422 struct iris_context *ice = (void *) ctx;
423 struct iris_screen *screen = (void *) ctx->screen;
424 struct iris_uncompiled_shader *ish =
425 iris_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
426
427 // XXX: disallow more than 64KB of shared variables
428
429 if (screen->precompile) {
430 struct brw_cs_prog_key key = { KEY_INIT };
431
432 iris_compile_cs(ice, ish, &key);
433 }
434
435 return ish;
436 }
437
438 /**
439 * The pipe->delete_[stage]_state() driver hooks.
440 *
441 * Frees the iris_uncompiled_shader.
442 */
443 static void
444 iris_delete_shader_state(struct pipe_context *ctx, void *state)
445 {
446 struct iris_uncompiled_shader *ish = state;
447
448 ralloc_free(ish->nir);
449 free(ish);
450 }
451
452 /**
453 * The pipe->bind_[stage]_state() driver hook.
454 *
455 * Binds an uncompiled shader as the current one for a particular stage.
456 * Updates dirty tracking to account for the shader's NOS.
457 */
458 static void
459 bind_state(struct iris_context *ice,
460 struct iris_uncompiled_shader *ish,
461 gl_shader_stage stage)
462 {
463 uint64_t dirty_bit = IRIS_DIRTY_UNCOMPILED_VS << stage;
464 const uint64_t nos = ish ? ish->nos : 0;
465
466 ice->shaders.uncompiled[stage] = ish;
467 ice->state.dirty |= dirty_bit;
468
469 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
470 * (or that they no longer need to do so).
471 */
472 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
473 if (nos & (1 << i))
474 ice->state.dirty_for_nos[i] |= dirty_bit;
475 else
476 ice->state.dirty_for_nos[i] &= ~dirty_bit;
477 }
478 }
479
480 static void
481 iris_bind_vs_state(struct pipe_context *ctx, void *state)
482 {
483 bind_state((void *) ctx, state, MESA_SHADER_VERTEX);
484 }
485
486 static void
487 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
488 {
489 bind_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
490 }
491
492 static void
493 iris_bind_tes_state(struct pipe_context *ctx, void *state)
494 {
495 struct iris_context *ice = (struct iris_context *)ctx;
496
497 /* Enabling/disabling optional stages requires a URB reconfiguration. */
498 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
499 ice->state.dirty |= IRIS_DIRTY_URB;
500
501 bind_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
502 }
503
504 static void
505 iris_bind_gs_state(struct pipe_context *ctx, void *state)
506 {
507 struct iris_context *ice = (struct iris_context *)ctx;
508
509 /* Enabling/disabling optional stages requires a URB reconfiguration. */
510 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
511 ice->state.dirty |= IRIS_DIRTY_URB;
512
513 bind_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
514 }
515
516 static void
517 iris_bind_fs_state(struct pipe_context *ctx, void *state)
518 {
519 bind_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
520 }
521
522 static void
523 iris_bind_cs_state(struct pipe_context *ctx, void *state)
524 {
525 bind_state((void *) ctx, state, MESA_SHADER_COMPUTE);
526 }
527
528 /**
529 * Sets up the starting offsets for the groups of binding table entries
530 * common to all pipeline stages.
531 *
532 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
533 * unused but also make sure that addition of small offsets to them will
534 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
535 */
536 static uint32_t
537 assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
538 const struct nir_shader *nir,
539 struct brw_stage_prog_data *prog_data,
540 uint32_t next_binding_table_offset,
541 unsigned num_system_values,
542 unsigned num_cbufs)
543 {
544 const struct shader_info *info = &nir->info;
545
546 if (info->num_textures) {
547 prog_data->binding_table.texture_start = next_binding_table_offset;
548 prog_data->binding_table.gather_texture_start = next_binding_table_offset;
549 next_binding_table_offset += info->num_textures;
550 } else {
551 prog_data->binding_table.texture_start = 0xd0d0d0d0;
552 prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
553 }
554
555 if (info->num_images) {
556 prog_data->binding_table.image_start = next_binding_table_offset;
557 next_binding_table_offset += info->num_images;
558 } else {
559 prog_data->binding_table.image_start = 0xd0d0d0d0;
560 }
561
562 if (num_cbufs) {
563 //assert(info->num_ubos <= BRW_MAX_UBO);
564 prog_data->binding_table.ubo_start = next_binding_table_offset;
565 next_binding_table_offset += num_cbufs;
566 } else {
567 prog_data->binding_table.ubo_start = 0xd0d0d0d0;
568 }
569
570 if (info->num_ssbos || info->num_abos) {
571 prog_data->binding_table.ssbo_start = next_binding_table_offset;
572 // XXX: see iris_state "wasting 16 binding table slots for ABOs" comment
573 next_binding_table_offset += IRIS_MAX_ABOS + info->num_ssbos;
574 } else {
575 prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
576 }
577
578 prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
579
580 /* Plane 0 is just the regular texture section */
581 prog_data->binding_table.plane_start[0] = prog_data->binding_table.texture_start;
582
583 prog_data->binding_table.plane_start[1] = next_binding_table_offset;
584 next_binding_table_offset += info->num_textures;
585
586 prog_data->binding_table.plane_start[2] = next_binding_table_offset;
587 next_binding_table_offset += info->num_textures;
588
589 /* Set the binding table size */
590 prog_data->binding_table.size_bytes = next_binding_table_offset * 4;
591
592 return next_binding_table_offset;
593 }
594
595 static void
596 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
597 unsigned offset, unsigned n)
598 {
599 assert(offset % sizeof(uint32_t) == 0);
600
601 for (unsigned i = 0; i < n; ++i)
602 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
603
604 for (unsigned i = n; i < 4; ++i)
605 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
606 }
607
608 /**
609 * Associate NIR uniform variables with the prog_data->param[] mechanism
610 * used by the backend. Also, decide which UBOs we'd like to push in an
611 * ideal situation (though the backend can reduce this).
612 */
613 static void
614 iris_setup_uniforms(const struct brw_compiler *compiler,
615 void *mem_ctx,
616 nir_shader *nir,
617 struct brw_stage_prog_data *prog_data,
618 enum brw_param_builtin **out_system_values,
619 unsigned *out_num_system_values,
620 unsigned *out_num_cbufs)
621 {
622 const struct gen_device_info *devinfo = compiler->devinfo;
623
624 /* The intel compiler assumes that num_uniforms is in bytes. For
625 * scalar that means 4 bytes per uniform slot.
626 *
627 * Ref: brw_nir_lower_uniforms, type_size_scalar_bytes.
628 */
629 nir->num_uniforms *= 4;
630
631 const unsigned IRIS_MAX_SYSTEM_VALUES =
632 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
633 enum brw_param_builtin *system_values =
634 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
635 unsigned num_system_values = 0;
636
637 unsigned patch_vert_idx = -1;
638 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
639 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
640 memset(ucp_idx, -1, sizeof(ucp_idx));
641 memset(img_idx, -1, sizeof(img_idx));
642
643 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
644
645 nir_builder b;
646 nir_builder_init(&b, impl);
647
648 b.cursor = nir_before_block(nir_start_block(impl));
649 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
650
651 /* Turn system value intrinsics into uniforms */
652 nir_foreach_block(block, impl) {
653 nir_foreach_instr_safe(instr, block) {
654 if (instr->type != nir_instr_type_intrinsic)
655 continue;
656
657 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
658 nir_ssa_def *offset;
659
660 switch (intrin->intrinsic) {
661 case nir_intrinsic_load_user_clip_plane: {
662 unsigned ucp = nir_intrinsic_ucp_id(intrin);
663
664 if (ucp_idx[ucp] == -1) {
665 ucp_idx[ucp] = num_system_values;
666 num_system_values += 4;
667 }
668
669 for (int i = 0; i < 4; i++) {
670 system_values[ucp_idx[ucp] + i] =
671 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
672 }
673
674 b.cursor = nir_before_instr(instr);
675 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
676 break;
677 }
678 case nir_intrinsic_load_patch_vertices_in:
679 if (patch_vert_idx == -1)
680 patch_vert_idx = num_system_values++;
681
682 system_values[patch_vert_idx] =
683 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
684
685 b.cursor = nir_before_instr(instr);
686 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
687 break;
688 case nir_intrinsic_image_deref_load_param_intel: {
689 assert(devinfo->gen < 9);
690 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
691 nir_variable *var = nir_deref_instr_get_variable(deref);
692
693 /* XXX: var->data.binding is not set properly. We need to run
694 * some form of gl_nir_lower_samplers_as_deref() to get it.
695 * This breaks tests which use more than one image.
696 */
697 if (img_idx[var->data.binding] == -1) {
698 /* GL only allows arrays of arrays of images. */
699 assert(glsl_type_is_image(glsl_without_array(var->type)));
700 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
701
702 for (int i = 0; i < num_images; i++) {
703 const unsigned img = var->data.binding + i;
704
705 img_idx[img] = num_system_values;
706 num_system_values += BRW_IMAGE_PARAM_SIZE;
707
708 uint32_t *img_sv = &system_values[img_idx[img]];
709
710 setup_vec4_image_sysval(
711 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
712 offsetof(struct brw_image_param, offset), 2);
713 setup_vec4_image_sysval(
714 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
715 offsetof(struct brw_image_param, size), 3);
716 setup_vec4_image_sysval(
717 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
718 offsetof(struct brw_image_param, stride), 4);
719 setup_vec4_image_sysval(
720 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
721 offsetof(struct brw_image_param, tiling), 3);
722 setup_vec4_image_sysval(
723 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
724 offsetof(struct brw_image_param, swizzling), 2);
725 }
726 }
727
728 b.cursor = nir_before_instr(instr);
729 offset = nir_iadd(&b,
730 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
731 nir_imm_int(&b, img_idx[var->data.binding] * 4 +
732 nir_intrinsic_base(intrin) * 16));
733 break;
734 }
735 default:
736 continue;
737 }
738
739 unsigned comps = nir_intrinsic_dest_components(intrin);
740
741 nir_intrinsic_instr *load =
742 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
743 load->num_components = comps;
744 load->src[0] = nir_src_for_ssa(temp_ubo_name);
745 load->src[1] = nir_src_for_ssa(offset);
746 nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
747 nir_builder_instr_insert(&b, &load->instr);
748 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
749 nir_src_for_ssa(&load->dest.ssa));
750 nir_instr_remove(instr);
751 }
752 }
753
754 nir_validate_shader(nir, "before remapping");
755
756 /* Place the new params at the front of constant buffer 0. */
757 if (num_system_values > 0) {
758 nir->num_uniforms += num_system_values * sizeof(uint32_t);
759
760 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
761 num_system_values);
762
763 nir_foreach_block(block, impl) {
764 nir_foreach_instr_safe(instr, block) {
765 if (instr->type != nir_instr_type_intrinsic)
766 continue;
767
768 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
769
770 if (load->intrinsic != nir_intrinsic_load_ubo)
771 continue;
772
773 b.cursor = nir_before_instr(instr);
774
775 assert(load->src[0].is_ssa);
776
777 if (load->src[0].ssa == temp_ubo_name) {
778 nir_instr_rewrite_src(instr, &load->src[0],
779 nir_src_for_ssa(nir_imm_int(&b, 0)));
780 } else if (nir_src_as_uint(load->src[0]) == 0) {
781 nir_ssa_def *offset =
782 nir_iadd(&b, load->src[1].ssa,
783 nir_imm_int(&b, 4 * num_system_values));
784 nir_instr_rewrite_src(instr, &load->src[1],
785 nir_src_for_ssa(offset));
786 }
787 }
788 }
789
790 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
791 nir_opt_constant_folding(nir);
792 } else {
793 ralloc_free(system_values);
794 system_values = NULL;
795 }
796
797 nir_validate_shader(nir, "after remap");
798
799 if (nir->info.stage != MESA_SHADER_COMPUTE)
800 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
801
802 /* We don't use params[], but fs_visitor::nir_setup_uniforms() asserts
803 * about it for compute shaders, so go ahead and make some fake ones
804 * which the backend will dead code eliminate.
805 */
806 prog_data->nr_params = nir->num_uniforms / 4;
807 prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params);
808
809 /* System values and uniforms are stored in constant buffer 0, the
810 * user-facing UBOs are indexed by one. So if any constant buffer is
811 * needed, the constant buffer 0 will be needed, so account for it.
812 */
813 unsigned num_cbufs = nir->info.num_ubos;
814 if (num_cbufs || num_system_values || nir->num_uniforms)
815 num_cbufs++;
816
817 *out_system_values = system_values;
818 *out_num_system_values = num_system_values;
819 *out_num_cbufs = num_cbufs;
820 }
821
822 /**
823 * Compile a vertex shader, and upload the assembly.
824 */
825 static struct iris_compiled_shader *
826 iris_compile_vs(struct iris_context *ice,
827 struct iris_uncompiled_shader *ish,
828 const struct brw_vs_prog_key *key)
829 {
830 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
831 const struct brw_compiler *compiler = screen->compiler;
832 const struct gen_device_info *devinfo = &screen->devinfo;
833 void *mem_ctx = ralloc_context(NULL);
834 struct brw_vs_prog_data *vs_prog_data =
835 rzalloc(mem_ctx, struct brw_vs_prog_data);
836 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
837 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
838 enum brw_param_builtin *system_values;
839 unsigned num_system_values;
840 unsigned num_cbufs;
841
842 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
843
844 if (key->nr_userclip_plane_consts) {
845 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
846 nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1, true);
847 nir_lower_io_to_temporaries(nir, impl, true, false);
848 nir_lower_global_vars_to_local(nir);
849 nir_lower_vars_to_ssa(nir);
850 nir_shader_gather_info(nir, impl);
851 }
852
853 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
854 prog_data->use_alt_mode = true;
855
856 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
857 &num_system_values, &num_cbufs);
858
859 assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
860 num_system_values, num_cbufs);
861
862 brw_compute_vue_map(devinfo,
863 &vue_prog_data->vue_map, nir->info.outputs_written,
864 nir->info.separate_shader);
865
866 /* Don't tell the backend about our clip plane constants, we've already
867 * lowered them in NIR and we don't want it doing it again.
868 */
869 struct brw_vs_prog_key key_no_ucp = *key;
870 key_no_ucp.nr_userclip_plane_consts = 0;
871
872 char *error_str = NULL;
873 const unsigned *program =
874 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &key_no_ucp, vs_prog_data,
875 nir, -1, &error_str);
876 if (program == NULL) {
877 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
878 ralloc_free(mem_ctx);
879 return false;
880 }
881
882 uint32_t *so_decls =
883 ice->vtbl.create_so_decl_list(&ish->stream_output,
884 &vue_prog_data->vue_map);
885
886 struct iris_compiled_shader *shader =
887 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
888 prog_data, so_decls, system_values, num_system_values,
889 num_cbufs);
890
891 if (ish->compiled_once) {
892 perf_debug(&ice->dbg, "Recompiling vertex shader\n");
893 } else {
894 ish->compiled_once = true;
895 }
896
897 ralloc_free(mem_ctx);
898 return shader;
899 }
900
901 /**
902 * Update the current vertex shader variant.
903 *
904 * Fill out the key, look in the cache, compile and bind if needed.
905 */
906 static void
907 iris_update_compiled_vs(struct iris_context *ice)
908 {
909 struct iris_uncompiled_shader *ish =
910 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
911
912 struct brw_vs_prog_key key = { KEY_INIT };
913 ice->vtbl.populate_vs_key(ice, &ish->nir->info, &key);
914
915 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
916 struct iris_compiled_shader *shader =
917 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
918
919 if (!shader)
920 shader = iris_compile_vs(ice, ish, &key);
921
922 if (old != shader) {
923 ice->shaders.prog[IRIS_CACHE_VS] = shader;
924 ice->state.dirty |= IRIS_DIRTY_VS |
925 IRIS_DIRTY_BINDINGS_VS |
926 IRIS_DIRTY_CONSTANTS_VS |
927 IRIS_DIRTY_VF_SGVS;
928 }
929 }
930
931 /**
932 * Get the shader_info for a given stage, or NULL if the stage is disabled.
933 */
934 const struct shader_info *
935 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
936 {
937 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
938
939 if (!ish)
940 return NULL;
941
942 const nir_shader *nir = ish->nir;
943 return &nir->info;
944 }
945
946 /**
947 * Get the union of TCS output and TES input slots.
948 *
949 * TCS and TES need to agree on a common URB entry layout. In particular,
950 * the data for all patch vertices is stored in a single URB entry (unlike
951 * GS which has one entry per input vertex). This means that per-vertex
952 * array indexing needs a stride.
953 *
954 * SSO requires locations to match, but doesn't require the number of
955 * outputs/inputs to match (in fact, the TCS often has extra outputs).
956 * So, we need to take the extra step of unifying these on the fly.
957 */
958 static void
959 get_unified_tess_slots(const struct iris_context *ice,
960 uint64_t *per_vertex_slots,
961 uint32_t *per_patch_slots)
962 {
963 const struct shader_info *tcs =
964 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
965 const struct shader_info *tes =
966 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
967
968 *per_vertex_slots = tes->inputs_read;
969 *per_patch_slots = tes->patch_inputs_read;
970
971 if (tcs) {
972 *per_vertex_slots |= tcs->outputs_written;
973 *per_patch_slots |= tcs->patch_outputs_written;
974 }
975 }
976
977 /**
978 * Compile a tessellation control shader, and upload the assembly.
979 */
980 static struct iris_compiled_shader *
981 iris_compile_tcs(struct iris_context *ice,
982 struct iris_uncompiled_shader *ish,
983 const struct brw_tcs_prog_key *key)
984 {
985 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
986 const struct brw_compiler *compiler = screen->compiler;
987 const struct nir_shader_compiler_options *options =
988 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
989 const struct gen_device_info *devinfo = &screen->devinfo;
990 void *mem_ctx = ralloc_context(NULL);
991 struct brw_tcs_prog_data *tcs_prog_data =
992 rzalloc(mem_ctx, struct brw_tcs_prog_data);
993 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
994 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
995 enum brw_param_builtin *system_values = NULL;
996 unsigned num_system_values = 0;
997 unsigned num_cbufs;
998
999 nir_shader *nir;
1000
1001 if (ish) {
1002 nir = nir_shader_clone(mem_ctx, ish->nir);
1003
1004 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1005 &num_system_values, &num_cbufs);
1006 assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
1007 num_system_values, num_cbufs);
1008 } else {
1009 nir = brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, key);
1010
1011 /* Reserve space for passing the default tess levels as constants. */
1012 prog_data->param = rzalloc_array(mem_ctx, uint32_t, 8);
1013 prog_data->nr_params = 8;
1014 prog_data->ubo_ranges[0].length = 1;
1015 }
1016
1017 char *error_str = NULL;
1018 const unsigned *program =
1019 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, key, tcs_prog_data, nir,
1020 -1, &error_str);
1021 if (program == NULL) {
1022 dbg_printf("Failed to compile control shader: %s\n", error_str);
1023 ralloc_free(mem_ctx);
1024 return false;
1025 }
1026
1027 struct iris_compiled_shader *shader =
1028 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1029 prog_data, NULL, system_values, num_system_values,
1030 num_cbufs);
1031
1032 if (ish) {
1033 if (ish->compiled_once) {
1034 perf_debug(&ice->dbg, "Recompiling tessellation control shader\n");
1035 } else {
1036 ish->compiled_once = true;
1037 }
1038 }
1039
1040 ralloc_free(mem_ctx);
1041 return shader;
1042 }
1043
1044 /**
1045 * Update the current tessellation control shader variant.
1046 *
1047 * Fill out the key, look in the cache, compile and bind if needed.
1048 */
1049 static void
1050 iris_update_compiled_tcs(struct iris_context *ice)
1051 {
1052 struct iris_uncompiled_shader *tcs =
1053 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1054
1055 const struct shader_info *tes_info =
1056 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1057 struct brw_tcs_prog_key key = {
1058 ALL_SAMPLERS_XYZW,
1059 .program_string_id = tcs ? tcs->program_id : 0,
1060 .tes_primitive_mode = tes_info->tess.primitive_mode,
1061 .input_vertices = ice->state.vertices_per_patch,
1062 };
1063 get_unified_tess_slots(ice, &key.outputs_written,
1064 &key.patch_outputs_written);
1065 ice->vtbl.populate_tcs_key(ice, &key);
1066
1067 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1068 struct iris_compiled_shader *shader =
1069 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1070
1071 if (!shader)
1072 shader = iris_compile_tcs(ice, tcs, &key);
1073
1074 if (old != shader) {
1075 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1076 ice->state.dirty |= IRIS_DIRTY_TCS |
1077 IRIS_DIRTY_BINDINGS_TCS |
1078 IRIS_DIRTY_CONSTANTS_TCS;
1079 }
1080 }
1081
1082 /**
1083 * Compile a tessellation evaluation shader, and upload the assembly.
1084 */
1085 static struct iris_compiled_shader *
1086 iris_compile_tes(struct iris_context *ice,
1087 struct iris_uncompiled_shader *ish,
1088 const struct brw_tes_prog_key *key)
1089 {
1090 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1091 const struct brw_compiler *compiler = screen->compiler;
1092 const struct gen_device_info *devinfo = &screen->devinfo;
1093 void *mem_ctx = ralloc_context(NULL);
1094 struct brw_tes_prog_data *tes_prog_data =
1095 rzalloc(mem_ctx, struct brw_tes_prog_data);
1096 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1097 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1098 enum brw_param_builtin *system_values;
1099 unsigned num_system_values;
1100 unsigned num_cbufs;
1101
1102 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1103
1104 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1105 &num_system_values, &num_cbufs);
1106
1107 assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
1108 num_system_values, num_cbufs);
1109
1110 struct brw_vue_map input_vue_map;
1111 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1112 key->patch_inputs_read);
1113
1114 char *error_str = NULL;
1115 const unsigned *program =
1116 brw_compile_tes(compiler, &ice->dbg, mem_ctx, key, &input_vue_map,
1117 tes_prog_data, nir, NULL, -1, &error_str);
1118 if (program == NULL) {
1119 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1120 ralloc_free(mem_ctx);
1121 return false;
1122 }
1123
1124 uint32_t *so_decls =
1125 ice->vtbl.create_so_decl_list(&ish->stream_output,
1126 &vue_prog_data->vue_map);
1127
1128
1129 struct iris_compiled_shader *shader =
1130 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1131 prog_data, so_decls, system_values, num_system_values,
1132 num_cbufs);
1133
1134 if (ish->compiled_once) {
1135 perf_debug(&ice->dbg, "Recompiling tessellation evaluation shader\n");
1136 } else {
1137 ish->compiled_once = true;
1138 }
1139
1140 ralloc_free(mem_ctx);
1141 return shader;
1142 }
1143
1144 /**
1145 * Update the current tessellation evaluation shader variant.
1146 *
1147 * Fill out the key, look in the cache, compile and bind if needed.
1148 */
1149 static void
1150 iris_update_compiled_tes(struct iris_context *ice)
1151 {
1152 struct iris_uncompiled_shader *ish =
1153 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1154
1155 struct brw_tes_prog_key key = { KEY_INIT };
1156 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1157 ice->vtbl.populate_tes_key(ice, &key);
1158
1159 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1160 struct iris_compiled_shader *shader =
1161 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1162
1163 if (!shader)
1164 shader = iris_compile_tes(ice, ish, &key);
1165
1166 if (old != shader) {
1167 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1168 ice->state.dirty |= IRIS_DIRTY_TES |
1169 IRIS_DIRTY_BINDINGS_TES |
1170 IRIS_DIRTY_CONSTANTS_TES;
1171 }
1172 }
1173
1174 /**
1175 * Compile a geometry shader, and upload the assembly.
1176 */
1177 static struct iris_compiled_shader *
1178 iris_compile_gs(struct iris_context *ice,
1179 struct iris_uncompiled_shader *ish,
1180 const struct brw_gs_prog_key *key)
1181 {
1182 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1183 const struct brw_compiler *compiler = screen->compiler;
1184 const struct gen_device_info *devinfo = &screen->devinfo;
1185 void *mem_ctx = ralloc_context(NULL);
1186 struct brw_gs_prog_data *gs_prog_data =
1187 rzalloc(mem_ctx, struct brw_gs_prog_data);
1188 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1189 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1190 enum brw_param_builtin *system_values;
1191 unsigned num_system_values;
1192 unsigned num_cbufs;
1193
1194 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1195
1196 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1197 &num_system_values, &num_cbufs);
1198
1199 assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
1200 num_system_values, num_cbufs);
1201
1202 brw_compute_vue_map(devinfo,
1203 &vue_prog_data->vue_map, nir->info.outputs_written,
1204 nir->info.separate_shader);
1205
1206 char *error_str = NULL;
1207 const unsigned *program =
1208 brw_compile_gs(compiler, &ice->dbg, mem_ctx, key, gs_prog_data, nir,
1209 NULL, -1, &error_str);
1210 if (program == NULL) {
1211 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1212 ralloc_free(mem_ctx);
1213 return false;
1214 }
1215
1216 uint32_t *so_decls =
1217 ice->vtbl.create_so_decl_list(&ish->stream_output,
1218 &vue_prog_data->vue_map);
1219
1220 struct iris_compiled_shader *shader =
1221 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1222 prog_data, so_decls, system_values, num_system_values,
1223 num_cbufs);
1224
1225 if (ish->compiled_once) {
1226 perf_debug(&ice->dbg, "Recompiling geometry shader\n");
1227 } else {
1228 ish->compiled_once = true;
1229 }
1230
1231 ralloc_free(mem_ctx);
1232 return shader;
1233 }
1234
1235 /**
1236 * Update the current geometry shader variant.
1237 *
1238 * Fill out the key, look in the cache, compile and bind if needed.
1239 */
1240 static void
1241 iris_update_compiled_gs(struct iris_context *ice)
1242 {
1243 struct iris_uncompiled_shader *ish =
1244 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1245 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1246 struct iris_compiled_shader *shader = NULL;
1247
1248 if (ish) {
1249 struct brw_gs_prog_key key = { KEY_INIT };
1250 ice->vtbl.populate_gs_key(ice, &key);
1251
1252 shader =
1253 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1254
1255 if (!shader)
1256 shader = iris_compile_gs(ice, ish, &key);
1257 }
1258
1259 if (old != shader) {
1260 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1261 ice->state.dirty |= IRIS_DIRTY_GS |
1262 IRIS_DIRTY_BINDINGS_GS |
1263 IRIS_DIRTY_CONSTANTS_GS;
1264 }
1265 }
1266
1267 /**
1268 * Compile a fragment (pixel) shader, and upload the assembly.
1269 */
1270 static struct iris_compiled_shader *
1271 iris_compile_fs(struct iris_context *ice,
1272 struct iris_uncompiled_shader *ish,
1273 const struct brw_wm_prog_key *key,
1274 struct brw_vue_map *vue_map)
1275 {
1276 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1277 const struct brw_compiler *compiler = screen->compiler;
1278 const struct gen_device_info *devinfo = &screen->devinfo;
1279 void *mem_ctx = ralloc_context(NULL);
1280 struct brw_wm_prog_data *fs_prog_data =
1281 rzalloc(mem_ctx, struct brw_wm_prog_data);
1282 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1283 enum brw_param_builtin *system_values;
1284 unsigned num_system_values;
1285 unsigned num_cbufs;
1286
1287 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1288
1289 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
1290 prog_data->use_alt_mode = true;
1291
1292 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1293 &num_system_values, &num_cbufs);
1294
1295 assign_common_binding_table_offsets(devinfo, nir, prog_data,
1296 MAX2(key->nr_color_regions, 1),
1297 num_system_values, num_cbufs);
1298 char *error_str = NULL;
1299 const unsigned *program =
1300 brw_compile_fs(compiler, &ice->dbg, mem_ctx, key, fs_prog_data,
1301 nir, NULL, -1, -1, -1, true, false, vue_map, &error_str);
1302 if (program == NULL) {
1303 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1304 ralloc_free(mem_ctx);
1305 return false;
1306 }
1307
1308 struct iris_compiled_shader *shader =
1309 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1310 prog_data, NULL, system_values, num_system_values,
1311 num_cbufs);
1312
1313 if (ish->compiled_once) {
1314 perf_debug(&ice->dbg, "Recompiling fragment shader\n");
1315 } else {
1316 ish->compiled_once = true;
1317 }
1318
1319 ralloc_free(mem_ctx);
1320 return shader;
1321 }
1322
1323 /**
1324 * Update the current fragment shader variant.
1325 *
1326 * Fill out the key, look in the cache, compile and bind if needed.
1327 */
1328 static void
1329 iris_update_compiled_fs(struct iris_context *ice)
1330 {
1331 struct iris_uncompiled_shader *ish =
1332 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1333 struct brw_wm_prog_key key = { KEY_INIT };
1334 ice->vtbl.populate_fs_key(ice, &key);
1335
1336 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1337 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1338
1339 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1340 struct iris_compiled_shader *shader =
1341 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1342
1343 if (!shader)
1344 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1345
1346 if (old != shader) {
1347 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1348 // toggles. might be able to avoid flagging SBE too.
1349 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1350 ice->state.dirty |= IRIS_DIRTY_FS |
1351 IRIS_DIRTY_BINDINGS_FS |
1352 IRIS_DIRTY_CONSTANTS_FS |
1353 IRIS_DIRTY_WM |
1354 IRIS_DIRTY_CLIP |
1355 IRIS_DIRTY_SBE;
1356 }
1357 }
1358
1359 /**
1360 * Get the compiled shader for the last enabled geometry stage.
1361 *
1362 * This stage is the one which will feed stream output and the rasterizer.
1363 */
1364 static gl_shader_stage
1365 last_vue_stage(struct iris_context *ice)
1366 {
1367 if (ice->shaders.prog[MESA_SHADER_GEOMETRY])
1368 return MESA_SHADER_GEOMETRY;
1369
1370 if (ice->shaders.prog[MESA_SHADER_TESS_EVAL])
1371 return MESA_SHADER_TESS_EVAL;
1372
1373 return MESA_SHADER_VERTEX;
1374 }
1375
1376 /**
1377 * Update the last enabled stage's VUE map.
1378 *
1379 * When the shader feeding the rasterizer's output interface changes, we
1380 * need to re-emit various packets.
1381 */
1382 static void
1383 update_last_vue_map(struct iris_context *ice,
1384 struct brw_stage_prog_data *prog_data)
1385 {
1386 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1387 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1388 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1389 const uint64_t changed_slots =
1390 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1391
1392 if (changed_slots & VARYING_BIT_VIEWPORT) {
1393 // XXX: could use ctx->Const.MaxViewports for old API efficiency
1394 ice->state.num_viewports =
1395 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1396 ice->state.dirty |= IRIS_DIRTY_CLIP |
1397 IRIS_DIRTY_SF_CL_VIEWPORT |
1398 IRIS_DIRTY_CC_VIEWPORT |
1399 IRIS_DIRTY_SCISSOR_RECT |
1400 IRIS_DIRTY_UNCOMPILED_FS |
1401 ice->state.dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1402 // XXX: CC_VIEWPORT?
1403 }
1404
1405 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1406 ice->state.dirty |= IRIS_DIRTY_SBE;
1407 }
1408
1409 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1410 }
1411
1412 /**
1413 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1414 */
1415 static struct brw_vue_prog_data *
1416 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1417 {
1418 if (!ice->shaders.prog[stage])
1419 return NULL;
1420
1421 return (void *) ice->shaders.prog[stage]->prog_data;
1422 }
1423
1424 // XXX: iris_compiled_shaders are space-leaking :(
1425 // XXX: do remember to unbind them if deleting them.
1426
1427 /**
1428 * Update the current shader variants for the given state.
1429 *
1430 * This should be called on every draw call to ensure that the correct
1431 * shaders are bound. It will also flag any dirty state triggered by
1432 * swapping out those shaders.
1433 */
1434 void
1435 iris_update_compiled_shaders(struct iris_context *ice)
1436 {
1437 const uint64_t dirty = ice->state.dirty;
1438
1439 struct brw_vue_prog_data *old_prog_datas[4];
1440 if (!(dirty & IRIS_DIRTY_URB)) {
1441 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1442 old_prog_datas[i] = get_vue_prog_data(ice, i);
1443 }
1444
1445 if (dirty & (IRIS_DIRTY_UNCOMPILED_TCS | IRIS_DIRTY_UNCOMPILED_TES)) {
1446 struct iris_uncompiled_shader *tes =
1447 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1448 if (tes) {
1449 iris_update_compiled_tcs(ice);
1450 iris_update_compiled_tes(ice);
1451 } else {
1452 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1453 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1454 ice->state.dirty |=
1455 IRIS_DIRTY_TCS | IRIS_DIRTY_TES |
1456 IRIS_DIRTY_BINDINGS_TCS | IRIS_DIRTY_BINDINGS_TES |
1457 IRIS_DIRTY_CONSTANTS_TCS | IRIS_DIRTY_CONSTANTS_TES;
1458 }
1459 }
1460
1461 if (dirty & IRIS_DIRTY_UNCOMPILED_VS)
1462 iris_update_compiled_vs(ice);
1463 if (dirty & IRIS_DIRTY_UNCOMPILED_GS)
1464 iris_update_compiled_gs(ice);
1465
1466 gl_shader_stage last_stage = last_vue_stage(ice);
1467 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1468 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1469 update_last_vue_map(ice, shader->prog_data);
1470 if (ice->state.streamout != shader->streamout) {
1471 ice->state.streamout = shader->streamout;
1472 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1473 }
1474
1475 if (ice->state.streamout_active) {
1476 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1477 struct iris_stream_output_target *so =
1478 (void *) ice->state.so_target[i];
1479 if (so)
1480 so->stride = ish->stream_output.stride[i];
1481 }
1482 }
1483
1484 if (dirty & IRIS_DIRTY_UNCOMPILED_FS)
1485 iris_update_compiled_fs(ice);
1486 // ...
1487
1488 /* Changing shader interfaces may require a URB configuration. */
1489 if (!(dirty & IRIS_DIRTY_URB)) {
1490 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1491 struct brw_vue_prog_data *old = old_prog_datas[i];
1492 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1493 if (!!old != !!new ||
1494 (new && new->urb_entry_size != old->urb_entry_size)) {
1495 ice->state.dirty |= IRIS_DIRTY_URB;
1496 break;
1497 }
1498 }
1499 }
1500 }
1501
1502 static struct iris_compiled_shader *
1503 iris_compile_cs(struct iris_context *ice,
1504 struct iris_uncompiled_shader *ish,
1505 const struct brw_cs_prog_key *key)
1506 {
1507 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1508 const struct brw_compiler *compiler = screen->compiler;
1509 const struct gen_device_info *devinfo = &screen->devinfo;
1510 void *mem_ctx = ralloc_context(NULL);
1511 struct brw_cs_prog_data *cs_prog_data =
1512 rzalloc(mem_ctx, struct brw_cs_prog_data);
1513 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1514 enum brw_param_builtin *system_values;
1515 unsigned num_system_values;
1516 unsigned num_cbufs;
1517
1518 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1519
1520 cs_prog_data->binding_table.work_groups_start = 0;
1521
1522 prog_data->total_shared = nir->info.cs.shared_size;
1523
1524 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1525 &num_system_values, &num_cbufs);
1526
1527 assign_common_binding_table_offsets(devinfo, nir, prog_data, 1,
1528 num_system_values, num_cbufs);
1529
1530 char *error_str = NULL;
1531 const unsigned *program =
1532 brw_compile_cs(compiler, &ice->dbg, mem_ctx, key, cs_prog_data,
1533 nir, -1, &error_str);
1534 if (program == NULL) {
1535 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1536 ralloc_free(mem_ctx);
1537 return false;
1538 }
1539
1540 struct iris_compiled_shader *shader =
1541 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
1542 prog_data, NULL, system_values, num_system_values,
1543 num_cbufs);
1544
1545 if (ish->compiled_once) {
1546 perf_debug(&ice->dbg, "Recompiling compute shader\n");
1547 } else {
1548 ish->compiled_once = true;
1549 }
1550
1551 ralloc_free(mem_ctx);
1552 return shader;
1553 }
1554
1555 void
1556 iris_update_compiled_compute_shader(struct iris_context *ice)
1557 {
1558 struct iris_uncompiled_shader *ish =
1559 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
1560
1561 struct brw_cs_prog_key key = { KEY_INIT };
1562 ice->vtbl.populate_cs_key(ice, &key);
1563
1564 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
1565 struct iris_compiled_shader *shader =
1566 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
1567
1568 if (!shader)
1569 shader = iris_compile_cs(ice, ish, &key);
1570
1571 if (old != shader) {
1572 ice->shaders.prog[IRIS_CACHE_CS] = shader;
1573 ice->state.dirty |= IRIS_DIRTY_CS |
1574 IRIS_DIRTY_BINDINGS_CS |
1575 IRIS_DIRTY_CONSTANTS_CS;
1576 }
1577 }
1578
1579 void
1580 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
1581 uint32_t *dst)
1582 {
1583 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1584 assert(cs_prog_data->push.total.size > 0);
1585 assert(cs_prog_data->push.cross_thread.size == 0);
1586 assert(cs_prog_data->push.per_thread.dwords == 1);
1587 assert(prog_data->param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
1588 for (unsigned t = 0; t < cs_prog_data->threads; t++)
1589 dst[8 * t] = t;
1590 }
1591
1592 /**
1593 * Allocate scratch BOs as needed for the given per-thread size and stage.
1594 */
1595 struct iris_bo *
1596 iris_get_scratch_space(struct iris_context *ice,
1597 unsigned per_thread_scratch,
1598 gl_shader_stage stage)
1599 {
1600 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1601 struct iris_bufmgr *bufmgr = screen->bufmgr;
1602 const struct gen_device_info *devinfo = &screen->devinfo;
1603
1604 unsigned encoded_size = ffs(per_thread_scratch) - 11;
1605 assert(encoded_size < (1 << 16));
1606
1607 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
1608
1609 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
1610 *
1611 * "Scratch Space per slice is computed based on 4 sub-slices. SW
1612 * must allocate scratch space enough so that each slice has 4
1613 * slices allowed."
1614 *
1615 * According to the other driver team, this applies to compute shaders
1616 * as well. This is not currently documented at all.
1617 *
1618 * This hack is no longer necessary on Gen11+.
1619 */
1620 unsigned subslice_total = screen->subslice_total;
1621 if (devinfo->gen < 11)
1622 subslice_total = 4 * devinfo->num_slices;
1623 assert(subslice_total >= screen->subslice_total);
1624
1625 if (!*bop) {
1626 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
1627 uint32_t max_threads[] = {
1628 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
1629 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
1630 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
1631 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
1632 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
1633 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
1634 };
1635
1636 uint32_t size = per_thread_scratch * max_threads[stage];
1637
1638 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
1639 }
1640
1641 return *bop;
1642 }
1643
1644 void
1645 iris_init_program_functions(struct pipe_context *ctx)
1646 {
1647 ctx->create_vs_state = iris_create_vs_state;
1648 ctx->create_tcs_state = iris_create_tcs_state;
1649 ctx->create_tes_state = iris_create_tes_state;
1650 ctx->create_gs_state = iris_create_gs_state;
1651 ctx->create_fs_state = iris_create_fs_state;
1652 ctx->create_compute_state = iris_create_compute_state;
1653
1654 ctx->delete_vs_state = iris_delete_shader_state;
1655 ctx->delete_tcs_state = iris_delete_shader_state;
1656 ctx->delete_tes_state = iris_delete_shader_state;
1657 ctx->delete_gs_state = iris_delete_shader_state;
1658 ctx->delete_fs_state = iris_delete_shader_state;
1659 ctx->delete_compute_state = iris_delete_shader_state;
1660
1661 ctx->bind_vs_state = iris_bind_vs_state;
1662 ctx->bind_tcs_state = iris_bind_tcs_state;
1663 ctx->bind_tes_state = iris_bind_tes_state;
1664 ctx->bind_gs_state = iris_bind_gs_state;
1665 ctx->bind_fs_state = iris_bind_fs_state;
1666 ctx->bind_compute_state = iris_bind_cs_state;
1667 }