st/nir: Optionally unify inputs_read/outputs_written when linking.
[mesa.git] / src / mesa / state_tracker / st_glsl_to_nir.cpp
1 /*
2 * Copyright © 2015 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "st_nir.h"
25
26 #include "pipe/p_defines.h"
27 #include "pipe/p_screen.h"
28 #include "pipe/p_context.h"
29
30 #include "program/program.h"
31 #include "program/prog_statevars.h"
32 #include "program/prog_parameter.h"
33 #include "program/ir_to_mesa.h"
34 #include "main/context.h"
35 #include "main/mtypes.h"
36 #include "main/errors.h"
37 #include "main/glspirv.h"
38 #include "main/shaderapi.h"
39 #include "main/uniforms.h"
40
41 #include "main/shaderobj.h"
42 #include "st_context.h"
43 #include "st_glsl_types.h"
44 #include "st_program.h"
45 #include "st_shader_cache.h"
46
47 #include "compiler/nir/nir.h"
48 #include "compiler/glsl_types.h"
49 #include "compiler/glsl/glsl_to_nir.h"
50 #include "compiler/glsl/gl_nir.h"
51 #include "compiler/glsl/gl_nir_linker.h"
52 #include "compiler/glsl/ir.h"
53 #include "compiler/glsl/ir_optimization.h"
54 #include "compiler/glsl/string_to_uint_map.h"
55
56 static int
57 type_size(const struct glsl_type *type)
58 {
59 return type->count_attribute_slots(false);
60 }
61
62 /* Depending on PIPE_CAP_TGSI_TEXCOORD (st->needs_texcoord_semantic) we
63 * may need to fix up varying slots so the glsl->nir path is aligned
64 * with the anything->tgsi->nir path.
65 */
66 static void
67 st_nir_fixup_varying_slots(struct st_context *st, struct exec_list *var_list)
68 {
69 if (st->needs_texcoord_semantic)
70 return;
71
72 nir_foreach_variable(var, var_list) {
73 if (var->data.location >= VARYING_SLOT_VAR0) {
74 var->data.location += 9;
75 } else if ((var->data.location >= VARYING_SLOT_TEX0) &&
76 (var->data.location <= VARYING_SLOT_TEX7)) {
77 var->data.location += VARYING_SLOT_VAR0 - VARYING_SLOT_TEX0;
78 }
79 }
80 }
81
82 /* input location assignment for VS inputs must be handled specially, so
83 * that it is aligned w/ st's vbo state.
84 * (This isn't the case with, for ex, FS inputs, which only need to agree
85 * on varying-slot w/ the VS outputs)
86 */
87 void
88 st_nir_assign_vs_in_locations(struct nir_shader *nir)
89 {
90 if (nir->info.stage != MESA_SHADER_VERTEX)
91 return;
92
93 bool removed_inputs = false;
94
95 nir->num_inputs = util_bitcount64(nir->info.inputs_read);
96 nir_foreach_variable_safe(var, &nir->inputs) {
97 /* NIR already assigns dual-slot inputs to two locations so all we have
98 * to do is compact everything down.
99 */
100 if (nir->info.inputs_read & BITFIELD64_BIT(var->data.location)) {
101 var->data.driver_location =
102 util_bitcount64(nir->info.inputs_read &
103 BITFIELD64_MASK(var->data.location));
104 } else {
105 /* Move unused input variables to the globals list (with no
106 * initialization), to avoid confusing drivers looking through the
107 * inputs array and expecting to find inputs with a driver_location
108 * set.
109 */
110 exec_node_remove(&var->node);
111 var->data.mode = nir_var_shader_temp;
112 exec_list_push_tail(&nir->globals, &var->node);
113 removed_inputs = true;
114 }
115 }
116
117 /* Re-lower global vars, to deal with any dead VS inputs. */
118 if (removed_inputs)
119 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
120 }
121
122 static int
123 st_nir_lookup_parameter_index(struct gl_program *prog, nir_variable *var)
124 {
125 struct gl_program_parameter_list *params = prog->Parameters;
126
127 /* Lookup the first parameter that the uniform storage that match the
128 * variable location.
129 */
130 for (unsigned i = 0; i < params->NumParameters; i++) {
131 int index = params->Parameters[i].MainUniformStorageIndex;
132 if (index == var->data.location)
133 return i;
134 }
135
136 /* TODO: Handle this fallback for SPIR-V. We need this for GLSL e.g. in
137 * dEQP-GLES2.functional.uniform_api.random.3
138 */
139
140 /* is there a better way to do this? If we have something like:
141 *
142 * struct S {
143 * float f;
144 * vec4 v;
145 * };
146 * uniform S color;
147 *
148 * Then what we get in prog->Parameters looks like:
149 *
150 * 0: Name=color.f, Type=6, DataType=1406, Size=1
151 * 1: Name=color.v, Type=6, DataType=8b52, Size=4
152 *
153 * So the name doesn't match up and _mesa_lookup_parameter_index()
154 * fails. In this case just find the first matching "color.*"..
155 *
156 * Note for arrays you could end up w/ color[n].f, for example.
157 *
158 * glsl_to_tgsi works slightly differently in this regard. It is
159 * emitting something more low level, so it just translates the
160 * params list 1:1 to CONST[] regs. Going from GLSL IR to TGSI,
161 * it just calculates the additional offset of struct field members
162 * in glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) or
163 * glsl_to_tgsi_visitor::visit(ir_dereference_array *ir). It never
164 * needs to work backwards to get base var loc from the param-list
165 * which already has them separated out.
166 */
167 if (!prog->sh.data->spirv) {
168 int namelen = strlen(var->name);
169 for (unsigned i = 0; i < params->NumParameters; i++) {
170 struct gl_program_parameter *p = &params->Parameters[i];
171 if ((strncmp(p->Name, var->name, namelen) == 0) &&
172 ((p->Name[namelen] == '.') || (p->Name[namelen] == '['))) {
173 return i;
174 }
175 }
176 }
177
178 return -1;
179 }
180
181 static void
182 st_nir_assign_uniform_locations(struct gl_context *ctx,
183 struct gl_program *prog,
184 struct exec_list *uniform_list)
185 {
186 int shaderidx = 0;
187 int imageidx = 0;
188
189 nir_foreach_variable(uniform, uniform_list) {
190 int loc;
191
192 /*
193 * UBO's have their own address spaces, so don't count them towards the
194 * number of global uniforms
195 */
196 if (uniform->data.mode == nir_var_mem_ubo || uniform->data.mode == nir_var_mem_ssbo)
197 continue;
198
199 const struct glsl_type *type = glsl_without_array(uniform->type);
200 if (!uniform->data.bindless && (type->is_sampler() || type->is_image())) {
201 if (type->is_sampler()) {
202 loc = shaderidx;
203 shaderidx += type_size(uniform->type);
204 } else {
205 loc = imageidx;
206 imageidx += type_size(uniform->type);
207 }
208 } else if (uniform->state_slots) {
209 const gl_state_index16 *const stateTokens = uniform->state_slots[0].tokens;
210 /* This state reference has already been setup by ir_to_mesa, but we'll
211 * get the same index back here.
212 */
213
214 unsigned comps;
215 if (glsl_type_is_struct_or_ifc(type)) {
216 comps = 4;
217 } else {
218 comps = glsl_get_vector_elements(type);
219 }
220
221 if (ctx->Const.PackedDriverUniformStorage) {
222 loc = _mesa_add_sized_state_reference(prog->Parameters,
223 stateTokens, comps, false);
224 loc = prog->Parameters->ParameterValueOffset[loc];
225 } else {
226 loc = _mesa_add_state_reference(prog->Parameters, stateTokens);
227 }
228 } else {
229 loc = st_nir_lookup_parameter_index(prog, uniform);
230
231 /* We need to check that loc is not -1 here before accessing the
232 * array. It can be negative for example when we have a struct that
233 * only contains opaque types.
234 */
235 if (loc >= 0 && ctx->Const.PackedDriverUniformStorage) {
236 loc = prog->Parameters->ParameterValueOffset[loc];
237 }
238 }
239
240 uniform->data.driver_location = loc;
241 }
242 }
243
244 void
245 st_nir_opts(nir_shader *nir)
246 {
247 bool progress;
248
249 do {
250 progress = false;
251
252 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
253
254 /* Linking deals with unused inputs/outputs, but here we can remove
255 * things local to the shader in the hopes that we can cleanup other
256 * things. This pass will also remove variables with only stores, so we
257 * might be able to make progress after it.
258 */
259 NIR_PASS(progress, nir, nir_remove_dead_variables,
260 (nir_variable_mode)(nir_var_function_temp |
261 nir_var_shader_temp |
262 nir_var_mem_shared));
263
264 NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
265 NIR_PASS(progress, nir, nir_opt_dead_write_vars);
266
267 if (nir->options->lower_to_scalar) {
268 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
269 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
270 }
271
272 NIR_PASS_V(nir, nir_lower_alu);
273 NIR_PASS_V(nir, nir_lower_pack);
274 NIR_PASS(progress, nir, nir_copy_prop);
275 NIR_PASS(progress, nir, nir_opt_remove_phis);
276 NIR_PASS(progress, nir, nir_opt_dce);
277 if (nir_opt_trivial_continues(nir)) {
278 progress = true;
279 NIR_PASS(progress, nir, nir_copy_prop);
280 NIR_PASS(progress, nir, nir_opt_dce);
281 }
282 NIR_PASS(progress, nir, nir_opt_if, false);
283 NIR_PASS(progress, nir, nir_opt_dead_cf);
284 NIR_PASS(progress, nir, nir_opt_cse);
285 NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
286
287 NIR_PASS(progress, nir, nir_opt_algebraic);
288 NIR_PASS(progress, nir, nir_opt_constant_folding);
289
290 if (!nir->info.flrp_lowered) {
291 unsigned lower_flrp =
292 (nir->options->lower_flrp16 ? 16 : 0) |
293 (nir->options->lower_flrp32 ? 32 : 0) |
294 (nir->options->lower_flrp64 ? 64 : 0);
295
296 if (lower_flrp) {
297 bool lower_flrp_progress = false;
298
299 NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
300 lower_flrp,
301 false /* always_precise */,
302 nir->options->lower_ffma);
303 if (lower_flrp_progress) {
304 NIR_PASS(progress, nir,
305 nir_opt_constant_folding);
306 progress = true;
307 }
308 }
309
310 /* Nothing should rematerialize any flrps, so we only need to do this
311 * lowering once.
312 */
313 nir->info.flrp_lowered = true;
314 }
315
316 NIR_PASS(progress, nir, nir_opt_undef);
317 NIR_PASS(progress, nir, nir_opt_conditional_discard);
318 if (nir->options->max_unroll_iterations) {
319 NIR_PASS(progress, nir, nir_opt_loop_unroll, (nir_variable_mode)0);
320 }
321 } while (progress);
322 }
323
324 static void
325 shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
326 {
327 assert(glsl_type_is_vector_or_scalar(type));
328
329 uint32_t comp_size = glsl_type_is_boolean(type)
330 ? 4 : glsl_get_bit_size(type) / 8;
331 unsigned length = glsl_get_vector_elements(type);
332 *size = comp_size * length,
333 *align = comp_size * (length == 3 ? 4 : length);
334 }
335
336 /* First third of converting glsl_to_nir.. this leaves things in a pre-
337 * nir_lower_io state, so that shader variants can more easily insert/
338 * replace variables, etc.
339 */
340 static void
341 st_nir_preprocess(struct st_context *st, struct gl_program *prog,
342 struct gl_shader_program *shader_program,
343 gl_shader_stage stage)
344 {
345 struct pipe_screen *screen = st->pipe->screen;
346 const nir_shader_compiler_options *options =
347 st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
348 assert(options);
349 nir_shader *nir = prog->nir;
350
351 /* Set the next shader stage hint for VS and TES. */
352 if (!nir->info.separate_shader &&
353 (nir->info.stage == MESA_SHADER_VERTEX ||
354 nir->info.stage == MESA_SHADER_TESS_EVAL)) {
355
356 unsigned prev_stages = (1 << (prog->info.stage + 1)) - 1;
357 unsigned stages_mask =
358 ~prev_stages & shader_program->data->linked_stages;
359
360 nir->info.next_stage = stages_mask ?
361 (gl_shader_stage) u_bit_scan(&stages_mask) : MESA_SHADER_FRAGMENT;
362 } else {
363 nir->info.next_stage = MESA_SHADER_FRAGMENT;
364 }
365
366 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
367 if (!st->ctx->SoftFP64 && nir->info.uses_64bit &&
368 (options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
369 st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
370 }
371
372 /* ES has strict SSO validation rules for shader IO matching so we can't
373 * remove dead IO until the resource list has been built. Here we skip
374 * removing them until later. This will potentially make the IO lowering
375 * calls below do a little extra work but should otherwise have no impact.
376 */
377 if (!_mesa_is_gles(st->ctx) || !nir->info.separate_shader) {
378 nir_variable_mode mask =
379 (nir_variable_mode) (nir_var_shader_in | nir_var_shader_out);
380 nir_remove_dead_variables(nir, mask);
381 }
382
383 if (options->lower_all_io_to_temps ||
384 nir->info.stage == MESA_SHADER_VERTEX ||
385 nir->info.stage == MESA_SHADER_GEOMETRY) {
386 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
387 nir_shader_get_entrypoint(nir),
388 true, true);
389 } else if (nir->info.stage == MESA_SHADER_FRAGMENT ||
390 !screen->get_param(screen, PIPE_CAP_TGSI_CAN_READ_OUTPUTS)) {
391 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
392 nir_shader_get_entrypoint(nir),
393 true, false);
394 }
395
396 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
397 NIR_PASS_V(nir, nir_split_var_copies);
398 NIR_PASS_V(nir, nir_lower_var_copies);
399
400 if (options->lower_to_scalar) {
401 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
402 }
403
404 /* before buffers and vars_to_ssa */
405 NIR_PASS_V(nir, gl_nir_lower_bindless_images);
406
407 /* TODO: Change GLSL to not lower shared memory. */
408 if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
409 shader_program->data->spirv) {
410 NIR_PASS_V(prog->nir, nir_lower_vars_to_explicit_types,
411 nir_var_mem_shared, shared_type_info);
412 NIR_PASS_V(prog->nir, nir_lower_explicit_io,
413 nir_var_mem_shared, nir_address_format_32bit_offset);
414 }
415
416 /* Do a round of constant folding to clean up address calculations */
417 NIR_PASS_V(nir, nir_opt_constant_folding);
418 }
419
420 /* Second third of converting glsl_to_nir. This creates uniforms, gathers
421 * info on varyings, etc after NIR link time opts have been applied.
422 */
423 static void
424 st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
425 struct gl_shader_program *shader_program)
426 {
427 nir_shader *nir = prog->nir;
428
429 /* Make a pass over the IR to add state references for any built-in
430 * uniforms that are used. This has to be done now (during linking).
431 * Code generation doesn't happen until the first time this shader is
432 * used for rendering. Waiting until then to generate the parameters is
433 * too late. At that point, the values for the built-in uniforms won't
434 * get sent to the shader.
435 */
436 nir_foreach_variable(var, &nir->uniforms) {
437 const nir_state_slot *const slots = var->state_slots;
438 if (slots != NULL) {
439 const struct glsl_type *type = glsl_without_array(var->type);
440 for (unsigned int i = 0; i < var->num_state_slots; i++) {
441 unsigned comps;
442 if (glsl_type_is_struct_or_ifc(type)) {
443 /* Builtin struct require specical handling for now we just
444 * make all members vec4. See st_nir_lower_builtin.
445 */
446 comps = 4;
447 } else {
448 comps = glsl_get_vector_elements(type);
449 }
450
451 if (st->ctx->Const.PackedDriverUniformStorage) {
452 _mesa_add_sized_state_reference(prog->Parameters,
453 slots[i].tokens,
454 comps, false);
455 } else {
456 _mesa_add_state_reference(prog->Parameters,
457 slots[i].tokens);
458 }
459 }
460 }
461 }
462
463 /* Avoid reallocation of the program parameter list, because the uniform
464 * storage is only associated with the original parameter list.
465 * This should be enough for Bitmap and DrawPixels constants.
466 */
467 _mesa_reserve_parameter_storage(prog->Parameters, 8);
468
469 /* This has to be done last. Any operation the can cause
470 * prog->ParameterValues to get reallocated (e.g., anything that adds a
471 * program constant) has to happen before creating this linkage.
472 */
473 _mesa_associate_uniform_storage(st->ctx, shader_program, prog);
474
475 st_set_prog_affected_state_flags(prog);
476
477 /* None of the builtins being lowered here can be produced by SPIR-V. See
478 * _mesa_builtin_uniform_desc.
479 */
480 if (!shader_program->data->spirv)
481 NIR_PASS_V(nir, st_nir_lower_builtin);
482
483 NIR_PASS_V(nir, gl_nir_lower_atomics, shader_program, true);
484 NIR_PASS_V(nir, nir_opt_intrinsics);
485
486 /* Lower 64-bit ops. */
487 if (nir->options->lower_int64_options ||
488 nir->options->lower_doubles_options) {
489 bool lowered_64bit_ops = false;
490 if (nir->options->lower_doubles_options) {
491 NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
492 st->ctx->SoftFP64, nir->options->lower_doubles_options);
493 }
494 if (nir->options->lower_int64_options) {
495 NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64,
496 nir->options->lower_int64_options);
497 }
498
499 if (lowered_64bit_ops)
500 st_nir_opts(nir);
501 }
502
503 nir_variable_mode mask = (nir_variable_mode)
504 (nir_var_shader_in | nir_var_shader_out | nir_var_function_temp );
505 nir_remove_dead_variables(nir, mask);
506
507 NIR_PASS_V(nir, nir_lower_atomics_to_ssbo,
508 st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers);
509
510 st_finalize_nir_before_variants(nir);
511
512 if (st->allow_st_finalize_nir_twice)
513 st_finalize_nir(st, prog, shader_program, nir, true);
514
515 if (st->ctx->_Shader->Flags & GLSL_DUMP) {
516 _mesa_log("\n");
517 _mesa_log("NIR IR for linked %s program %d:\n",
518 _mesa_shader_stage_to_string(prog->info.stage),
519 shader_program->Name);
520 nir_print_shader(nir, _mesa_get_log_file());
521 _mesa_log("\n\n");
522 }
523 }
524
525 static void
526 st_nir_vectorize_io(nir_shader *producer, nir_shader *consumer)
527 {
528 NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
529 NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
530 NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
531
532 if ((producer)->info.stage != MESA_SHADER_TESS_CTRL) {
533 /* Calling lower_io_to_vector creates output variable writes with
534 * write-masks. We only support these for TCS outputs, so for other
535 * stages, we need to call nir_lower_io_to_temporaries to get rid of
536 * them. This, in turn, creates temporary variables and extra
537 * copy_deref intrinsics that we need to clean up.
538 */
539 NIR_PASS_V(producer, nir_lower_io_to_temporaries,
540 nir_shader_get_entrypoint(producer), true, false);
541 NIR_PASS_V(producer, nir_lower_global_vars_to_local);
542 NIR_PASS_V(producer, nir_split_var_copies);
543 NIR_PASS_V(producer, nir_lower_var_copies);
544 }
545 }
546
547 static void
548 st_nir_link_shaders(nir_shader *producer, nir_shader *consumer)
549 {
550 if (producer->options->lower_to_scalar) {
551 NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
552 NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
553 }
554
555 nir_lower_io_arrays_to_elements(producer, consumer);
556
557 st_nir_opts(producer);
558 st_nir_opts(consumer);
559
560 if (nir_link_opt_varyings(producer, consumer))
561 st_nir_opts(consumer);
562
563 NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
564 NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
565
566 if (nir_remove_unused_varyings(producer, consumer)) {
567 NIR_PASS_V(producer, nir_lower_global_vars_to_local);
568 NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
569
570 st_nir_opts(producer);
571 st_nir_opts(consumer);
572
573 /* Optimizations can cause varyings to become unused.
574 * nir_compact_varyings() depends on all dead varyings being removed so
575 * we need to call nir_remove_dead_variables() again here.
576 */
577 NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
578 NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
579 }
580 }
581
582 static void
583 st_lower_patch_vertices_in(struct gl_shader_program *shader_prog)
584 {
585 struct gl_linked_shader *linked_tcs =
586 shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
587 struct gl_linked_shader *linked_tes =
588 shader_prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
589
590 /* If we have a TCS and TES linked together, lower TES patch vertices. */
591 if (linked_tcs && linked_tes) {
592 nir_shader *tcs_nir = linked_tcs->Program->nir;
593 nir_shader *tes_nir = linked_tes->Program->nir;
594
595 /* The TES input vertex count is the TCS output vertex count,
596 * lower TES gl_PatchVerticesIn to a constant.
597 */
598 uint32_t tes_patch_verts = tcs_nir->info.tess.tcs_vertices_out;
599 NIR_PASS_V(tes_nir, nir_lower_patch_vertices, tes_patch_verts, NULL);
600 }
601 }
602
603 extern "C" {
604
605 void
606 st_nir_lower_wpos_ytransform(struct nir_shader *nir,
607 struct gl_program *prog,
608 struct pipe_screen *pscreen)
609 {
610 if (nir->info.stage != MESA_SHADER_FRAGMENT)
611 return;
612
613 static const gl_state_index16 wposTransformState[STATE_LENGTH] = {
614 STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM
615 };
616 nir_lower_wpos_ytransform_options wpos_options = { { 0 } };
617
618 memcpy(wpos_options.state_tokens, wposTransformState,
619 sizeof(wpos_options.state_tokens));
620 wpos_options.fs_coord_origin_upper_left =
621 pscreen->get_param(pscreen,
622 PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT);
623 wpos_options.fs_coord_origin_lower_left =
624 pscreen->get_param(pscreen,
625 PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
626 wpos_options.fs_coord_pixel_center_integer =
627 pscreen->get_param(pscreen,
628 PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
629 wpos_options.fs_coord_pixel_center_half_integer =
630 pscreen->get_param(pscreen,
631 PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER);
632
633 if (nir_lower_wpos_ytransform(nir, &wpos_options)) {
634 nir_validate_shader(nir, "after nir_lower_wpos_ytransform");
635 _mesa_add_state_reference(prog->Parameters, wposTransformState);
636 }
637 }
638
639 bool
640 st_link_nir(struct gl_context *ctx,
641 struct gl_shader_program *shader_program)
642 {
643 struct st_context *st = st_context(ctx);
644 struct gl_linked_shader *linked_shader[MESA_SHADER_STAGES];
645 unsigned num_shaders = 0;
646
647 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
648 if (shader_program->_LinkedShaders[i])
649 linked_shader[num_shaders++] = shader_program->_LinkedShaders[i];
650 }
651
652 for (unsigned i = 0; i < num_shaders; i++) {
653 struct gl_linked_shader *shader = linked_shader[i];
654 const nir_shader_compiler_options *options =
655 st->ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions;
656 struct gl_program *prog = shader->Program;
657 struct st_program *stp = (struct st_program *)prog;
658
659 _mesa_copy_linked_program_data(shader_program, shader);
660
661 assert(!prog->nir);
662 stp->shader_program = shader_program;
663 stp->state.type = PIPE_SHADER_IR_NIR;
664
665 if (shader_program->data->spirv) {
666 prog->Parameters = _mesa_new_parameter_list();
667 /* Parameters will be filled during NIR linking. */
668
669 prog->nir = _mesa_spirv_to_nir(ctx, shader_program, shader->Stage, options);
670 } else {
671 validate_ir_tree(shader->ir);
672
673 prog->Parameters = _mesa_new_parameter_list();
674 _mesa_generate_parameters_list_for_uniforms(ctx, shader_program, shader,
675 prog->Parameters);
676
677 if (ctx->_Shader->Flags & GLSL_DUMP) {
678 _mesa_log("\n");
679 _mesa_log("GLSL IR for linked %s program %d:\n",
680 _mesa_shader_stage_to_string(shader->Stage),
681 shader_program->Name);
682 _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
683 _mesa_log("\n\n");
684 }
685
686 prog->ExternalSamplersUsed = gl_external_samplers(prog);
687 _mesa_update_shader_textures_used(shader_program, prog);
688
689 prog->nir = glsl_to_nir(st->ctx, shader_program, shader->Stage, options);
690 st_nir_preprocess(st, prog, shader_program, shader->Stage);
691 }
692
693 if (options->lower_to_scalar) {
694 NIR_PASS_V(shader->Program->nir, nir_lower_load_const_to_scalar);
695 }
696 }
697
698 st_lower_patch_vertices_in(shader_program);
699
700 /* For SPIR-V, we have to perform the NIR linking before applying
701 * st_nir_preprocess.
702 */
703 if (shader_program->data->spirv) {
704 static const gl_nir_linker_options opts = {
705 true /*fill_parameters */
706 };
707 if (!gl_nir_link(ctx, shader_program, &opts))
708 return GL_FALSE;
709
710 nir_build_program_resource_list(ctx, shader_program, true);
711
712 for (unsigned i = 0; i < num_shaders; i++) {
713 struct gl_linked_shader *shader = linked_shader[i];
714 struct gl_program *prog = shader->Program;
715
716 prog->ExternalSamplersUsed = gl_external_samplers(prog);
717 _mesa_update_shader_textures_used(shader_program, prog);
718 st_nir_preprocess(st, prog, shader_program, shader->Stage);
719 }
720 }
721
722 /* Linking the stages in the opposite order (from fragment to vertex)
723 * ensures that inter-shader outputs written to in an earlier stage
724 * are eliminated if they are (transitively) not used in a later
725 * stage.
726 */
727 for (int i = num_shaders - 2; i >= 0; i--) {
728 st_nir_link_shaders(linked_shader[i]->Program->nir,
729 linked_shader[i + 1]->Program->nir);
730 }
731
732 if (!shader_program->data->spirv)
733 nir_build_program_resource_list(ctx, shader_program, false);
734
735 for (unsigned i = 0; i < num_shaders; i++) {
736 struct gl_linked_shader *shader = linked_shader[i];
737 nir_shader *nir = shader->Program->nir;
738
739 NIR_PASS_V(nir, gl_nir_lower_buffers, shader_program);
740
741 /* Linked shaders are optimized in st_nir_link_shaders. Separate shaders
742 * and shaders with a fixed-func VS or FS are optimized here.
743 */
744 if (num_shaders == 1)
745 st_nir_opts(nir);
746
747 /* Remap the locations to slots so those requiring two slots will occupy
748 * two locations. For instance, if we have in the IR code a dvec3 attr0 in
749 * location 0 and vec4 attr1 in location 1, in NIR attr0 will use
750 * locations/slots 0 and 1, and attr1 will use location/slot 2
751 */
752 if (nir->info.stage == MESA_SHADER_VERTEX && !shader_program->data->spirv)
753 nir_remap_dual_slot_attributes(nir, &shader->Program->DualSlotInputs);
754
755 NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, shader->Program,
756 st->pipe->screen);
757
758 NIR_PASS_V(nir, nir_lower_system_values);
759 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
760
761 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
762 shader->Program->info = nir->info;
763 if (shader->Stage == MESA_SHADER_VERTEX) {
764 /* NIR expands dual-slot inputs out to two locations. We need to
765 * compact things back down GL-style single-slot inputs to avoid
766 * confusing the state tracker.
767 */
768 shader->Program->info.inputs_read =
769 nir_get_single_slot_attribs_mask(nir->info.inputs_read,
770 shader->Program->DualSlotInputs);
771 }
772
773 if (i >= 1) {
774 struct gl_program *prev_shader = linked_shader[i - 1]->Program;
775
776 /* We can't use nir_compact_varyings with transform feedback, since
777 * the pipe_stream_output->output_register field is based on the
778 * pre-compacted driver_locations.
779 */
780 if (!(prev_shader->sh.LinkedTransformFeedback &&
781 prev_shader->sh.LinkedTransformFeedback->NumVarying > 0))
782 nir_compact_varyings(prev_shader->nir,
783 nir, ctx->API != API_OPENGL_COMPAT);
784
785 if (ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions->vectorize_io)
786 st_nir_vectorize_io(prev_shader->nir, nir);
787 }
788 }
789
790 for (unsigned i = 0; i < num_shaders; i++) {
791 struct gl_linked_shader *shader = linked_shader[i];
792 struct gl_program *prog = shader->Program;
793 struct st_program *stp = st_program(prog);
794 st_glsl_to_nir_post_opts(st, prog, shader_program);
795
796 /* Initialize st_vertex_program members. */
797 if (shader->Stage == MESA_SHADER_VERTEX)
798 st_prepare_vertex_program(stp);
799
800 /* Get pipe_stream_output_info. */
801 if (shader->Stage == MESA_SHADER_VERTEX ||
802 shader->Stage == MESA_SHADER_TESS_EVAL ||
803 shader->Stage == MESA_SHADER_GEOMETRY)
804 st_translate_stream_output_info(prog);
805
806 st_store_ir_in_disk_cache(st, prog, true);
807
808 st_release_variants(st, stp);
809 st_finalize_program(st, prog);
810
811 /* The GLSL IR won't be needed anymore. */
812 ralloc_free(shader->ir);
813 shader->ir = NULL;
814 }
815
816 struct shader_info *prev_info = NULL;
817
818 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
819 struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
820 if (!shader)
821 continue;
822
823 struct shader_info *info = &shader->Program->nir->info;
824
825 if (prev_info &&
826 ctx->Const.ShaderCompilerOptions[i].NirOptions->unify_interfaces) {
827 prev_info->outputs_written |= info->inputs_read &
828 ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
829 info->inputs_read |= prev_info->outputs_written &
830 ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
831
832 prev_info->patch_outputs_written |= info->patch_inputs_read;
833 info->patch_inputs_read |= prev_info->patch_outputs_written;
834 }
835 prev_info = info;
836 }
837
838 return true;
839 }
840
841 void
842 st_nir_assign_varying_locations(struct st_context *st, nir_shader *nir)
843 {
844 if (nir->info.stage == MESA_SHADER_VERTEX) {
845 nir_assign_io_var_locations(&nir->outputs,
846 &nir->num_outputs,
847 nir->info.stage);
848 st_nir_fixup_varying_slots(st, &nir->outputs);
849 } else if (nir->info.stage == MESA_SHADER_GEOMETRY ||
850 nir->info.stage == MESA_SHADER_TESS_CTRL ||
851 nir->info.stage == MESA_SHADER_TESS_EVAL) {
852 nir_assign_io_var_locations(&nir->inputs,
853 &nir->num_inputs,
854 nir->info.stage);
855 st_nir_fixup_varying_slots(st, &nir->inputs);
856
857 nir_assign_io_var_locations(&nir->outputs,
858 &nir->num_outputs,
859 nir->info.stage);
860 st_nir_fixup_varying_slots(st, &nir->outputs);
861 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
862 nir_assign_io_var_locations(&nir->inputs,
863 &nir->num_inputs,
864 nir->info.stage);
865 st_nir_fixup_varying_slots(st, &nir->inputs);
866 nir_assign_io_var_locations(&nir->outputs,
867 &nir->num_outputs,
868 nir->info.stage);
869 } else if (nir->info.stage == MESA_SHADER_COMPUTE) {
870 /* TODO? */
871 } else {
872 unreachable("invalid shader type");
873 }
874 }
875
876 void
877 st_nir_lower_samplers(struct pipe_screen *screen, nir_shader *nir,
878 struct gl_shader_program *shader_program,
879 struct gl_program *prog)
880 {
881 if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
882 NIR_PASS_V(nir, gl_nir_lower_samplers_as_deref, shader_program);
883 else
884 NIR_PASS_V(nir, gl_nir_lower_samplers, shader_program);
885
886 if (prog) {
887 prog->info.textures_used = nir->info.textures_used;
888 prog->info.textures_used_by_txf = nir->info.textures_used_by_txf;
889 }
890 }
891
892 /* Last third of preparing nir from glsl, which happens after shader
893 * variant lowering.
894 */
895 void
896 st_finalize_nir(struct st_context *st, struct gl_program *prog,
897 struct gl_shader_program *shader_program,
898 nir_shader *nir, bool finalize_by_driver)
899 {
900 struct pipe_screen *screen = st->pipe->screen;
901
902 NIR_PASS_V(nir, nir_split_var_copies);
903 NIR_PASS_V(nir, nir_lower_var_copies);
904
905 st_nir_assign_varying_locations(st, nir);
906 st_nir_assign_uniform_locations(st->ctx, prog,
907 &nir->uniforms);
908
909 /* Set num_uniforms in number of attribute slots (vec4s) */
910 nir->num_uniforms = DIV_ROUND_UP(prog->Parameters->NumParameterValues, 4);
911
912 if (st->ctx->Const.PackedDriverUniformStorage) {
913 NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, st_glsl_type_dword_size,
914 (nir_lower_io_options)0);
915 NIR_PASS_V(nir, nir_lower_uniforms_to_ubo, 4);
916 } else {
917 NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, st_glsl_uniforms_type_size,
918 (nir_lower_io_options)0);
919 }
920
921 st_nir_lower_samplers(screen, nir, shader_program, prog);
922
923 if (finalize_by_driver && screen->finalize_nir)
924 screen->finalize_nir(screen, nir, false);
925 }
926
927 } /* extern "C" */