mesa/gallium: automatically lower point-size
[mesa.git] / src / mesa / state_tracker / st_glsl_to_nir.cpp
1 /*
2 * Copyright © 2015 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "st_nir.h"
25
26 #include "pipe/p_defines.h"
27 #include "pipe/p_screen.h"
28 #include "pipe/p_context.h"
29
30 #include "program/program.h"
31 #include "program/prog_statevars.h"
32 #include "program/prog_parameter.h"
33 #include "program/ir_to_mesa.h"
34 #include "main/mtypes.h"
35 #include "main/errors.h"
36 #include "main/glspirv.h"
37 #include "main/shaderapi.h"
38 #include "main/uniforms.h"
39
40 #include "main/shaderobj.h"
41 #include "st_context.h"
42 #include "st_glsl_types.h"
43 #include "st_program.h"
44 #include "st_shader_cache.h"
45
46 #include "compiler/nir/nir.h"
47 #include "compiler/glsl_types.h"
48 #include "compiler/glsl/glsl_to_nir.h"
49 #include "compiler/glsl/gl_nir.h"
50 #include "compiler/glsl/gl_nir_linker.h"
51 #include "compiler/glsl/ir.h"
52 #include "compiler/glsl/ir_optimization.h"
53 #include "compiler/glsl/string_to_uint_map.h"
54
55 static int
56 type_size(const struct glsl_type *type)
57 {
58 return type->count_attribute_slots(false);
59 }
60
61 /* Depending on PIPE_CAP_TGSI_TEXCOORD (st->needs_texcoord_semantic) we
62 * may need to fix up varying slots so the glsl->nir path is aligned
63 * with the anything->tgsi->nir path.
64 */
65 static void
66 st_nir_fixup_varying_slots(struct st_context *st, struct exec_list *var_list)
67 {
68 if (st->needs_texcoord_semantic)
69 return;
70
71 nir_foreach_variable(var, var_list) {
72 if (var->data.location >= VARYING_SLOT_VAR0) {
73 var->data.location += 9;
74 } else if ((var->data.location >= VARYING_SLOT_TEX0) &&
75 (var->data.location <= VARYING_SLOT_TEX7)) {
76 var->data.location += VARYING_SLOT_VAR0 - VARYING_SLOT_TEX0;
77 }
78 }
79 }
80
81 /* input location assignment for VS inputs must be handled specially, so
82 * that it is aligned w/ st's vbo state.
83 * (This isn't the case with, for ex, FS inputs, which only need to agree
84 * on varying-slot w/ the VS outputs)
85 */
86 static void
87 st_nir_assign_vs_in_locations(nir_shader *nir)
88 {
89 nir->num_inputs = util_bitcount64(nir->info.inputs_read);
90 nir_foreach_variable_safe(var, &nir->inputs) {
91 /* NIR already assigns dual-slot inputs to two locations so all we have
92 * to do is compact everything down.
93 */
94 if (var->data.location == VERT_ATTRIB_EDGEFLAG) {
95 /* bit of a hack, mirroring st_translate_vertex_program */
96 var->data.driver_location = nir->num_inputs++;
97 } else if (nir->info.inputs_read & BITFIELD64_BIT(var->data.location)) {
98 var->data.driver_location =
99 util_bitcount64(nir->info.inputs_read &
100 BITFIELD64_MASK(var->data.location));
101 } else {
102 /* Move unused input variables to the globals list (with no
103 * initialization), to avoid confusing drivers looking through the
104 * inputs array and expecting to find inputs with a driver_location
105 * set.
106 */
107 exec_node_remove(&var->node);
108 var->data.mode = nir_var_shader_temp;
109 exec_list_push_tail(&nir->globals, &var->node);
110 }
111 }
112 }
113
114 static int
115 st_nir_lookup_parameter_index(struct gl_program *prog, nir_variable *var)
116 {
117 struct gl_program_parameter_list *params = prog->Parameters;
118
119 /* Lookup the first parameter that the uniform storage that match the
120 * variable location.
121 */
122 for (unsigned i = 0; i < params->NumParameters; i++) {
123 int index = params->Parameters[i].MainUniformStorageIndex;
124 if (index == var->data.location)
125 return i;
126 }
127
128 /* TODO: Handle this fallback for SPIR-V. We need this for GLSL e.g. in
129 * dEQP-GLES2.functional.uniform_api.random.3
130 */
131
132 /* is there a better way to do this? If we have something like:
133 *
134 * struct S {
135 * float f;
136 * vec4 v;
137 * };
138 * uniform S color;
139 *
140 * Then what we get in prog->Parameters looks like:
141 *
142 * 0: Name=color.f, Type=6, DataType=1406, Size=1
143 * 1: Name=color.v, Type=6, DataType=8b52, Size=4
144 *
145 * So the name doesn't match up and _mesa_lookup_parameter_index()
146 * fails. In this case just find the first matching "color.*"..
147 *
148 * Note for arrays you could end up w/ color[n].f, for example.
149 *
150 * glsl_to_tgsi works slightly differently in this regard. It is
151 * emitting something more low level, so it just translates the
152 * params list 1:1 to CONST[] regs. Going from GLSL IR to TGSI,
153 * it just calculates the additional offset of struct field members
154 * in glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) or
155 * glsl_to_tgsi_visitor::visit(ir_dereference_array *ir). It never
156 * needs to work backwards to get base var loc from the param-list
157 * which already has them separated out.
158 */
159 if (!prog->sh.data->spirv) {
160 int namelen = strlen(var->name);
161 for (unsigned i = 0; i < params->NumParameters; i++) {
162 struct gl_program_parameter *p = &params->Parameters[i];
163 if ((strncmp(p->Name, var->name, namelen) == 0) &&
164 ((p->Name[namelen] == '.') || (p->Name[namelen] == '['))) {
165 return i;
166 }
167 }
168 }
169
170 return -1;
171 }
172
173 static void
174 st_nir_assign_uniform_locations(struct gl_context *ctx,
175 struct gl_program *prog,
176 struct exec_list *uniform_list)
177 {
178 int shaderidx = 0;
179 int imageidx = 0;
180
181 nir_foreach_variable(uniform, uniform_list) {
182 int loc;
183
184 /*
185 * UBO's have their own address spaces, so don't count them towards the
186 * number of global uniforms
187 */
188 if (uniform->data.mode == nir_var_mem_ubo || uniform->data.mode == nir_var_mem_ssbo)
189 continue;
190
191 const struct glsl_type *type = glsl_without_array(uniform->type);
192 if (!uniform->data.bindless && (type->is_sampler() || type->is_image())) {
193 if (type->is_sampler()) {
194 loc = shaderidx;
195 shaderidx += type_size(uniform->type);
196 } else {
197 loc = imageidx;
198 imageidx += type_size(uniform->type);
199 }
200 } else if (uniform->state_slots) {
201 const gl_state_index16 *const stateTokens = uniform->state_slots[0].tokens;
202 /* This state reference has already been setup by ir_to_mesa, but we'll
203 * get the same index back here.
204 */
205
206 unsigned comps;
207 if (glsl_type_is_struct_or_ifc(type)) {
208 comps = 4;
209 } else {
210 comps = glsl_get_vector_elements(type);
211 }
212
213 if (ctx->Const.PackedDriverUniformStorage) {
214 loc = _mesa_add_sized_state_reference(prog->Parameters,
215 stateTokens, comps, false);
216 loc = prog->Parameters->ParameterValueOffset[loc];
217 } else {
218 loc = _mesa_add_state_reference(prog->Parameters, stateTokens);
219 }
220 } else {
221 loc = st_nir_lookup_parameter_index(prog, uniform);
222
223 /* We need to check that loc is not -1 here before accessing the
224 * array. It can be negative for example when we have a struct that
225 * only contains opaque types.
226 */
227 if (loc >= 0 && ctx->Const.PackedDriverUniformStorage) {
228 loc = prog->Parameters->ParameterValueOffset[loc];
229 }
230 }
231
232 uniform->data.driver_location = loc;
233 }
234 }
235
236 void
237 st_nir_opts(nir_shader *nir)
238 {
239 bool progress;
240 unsigned lower_flrp =
241 (nir->options->lower_flrp16 ? 16 : 0) |
242 (nir->options->lower_flrp32 ? 32 : 0) |
243 (nir->options->lower_flrp64 ? 64 : 0);
244
245 do {
246 progress = false;
247
248 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
249
250 /* Linking deals with unused inputs/outputs, but here we can remove
251 * things local to the shader in the hopes that we can cleanup other
252 * things. This pass will also remove variables with only stores, so we
253 * might be able to make progress after it.
254 */
255 NIR_PASS(progress, nir, nir_remove_dead_variables,
256 (nir_variable_mode)(nir_var_function_temp |
257 nir_var_shader_temp |
258 nir_var_mem_shared));
259
260 NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
261 NIR_PASS(progress, nir, nir_opt_dead_write_vars);
262
263 if (nir->options->lower_to_scalar) {
264 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
265 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
266 }
267
268 NIR_PASS_V(nir, nir_lower_alu);
269 NIR_PASS_V(nir, nir_lower_pack);
270 NIR_PASS(progress, nir, nir_copy_prop);
271 NIR_PASS(progress, nir, nir_opt_remove_phis);
272 NIR_PASS(progress, nir, nir_opt_dce);
273 if (nir_opt_trivial_continues(nir)) {
274 progress = true;
275 NIR_PASS(progress, nir, nir_copy_prop);
276 NIR_PASS(progress, nir, nir_opt_dce);
277 }
278 NIR_PASS(progress, nir, nir_opt_if, false);
279 NIR_PASS(progress, nir, nir_opt_dead_cf);
280 NIR_PASS(progress, nir, nir_opt_cse);
281 NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
282
283 NIR_PASS(progress, nir, nir_opt_algebraic);
284 NIR_PASS(progress, nir, nir_opt_constant_folding);
285
286 if (lower_flrp != 0) {
287 bool lower_flrp_progress = false;
288
289 NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
290 lower_flrp,
291 false /* always_precise */,
292 nir->options->lower_ffma);
293 if (lower_flrp_progress) {
294 NIR_PASS(progress, nir,
295 nir_opt_constant_folding);
296 progress = true;
297 }
298
299 /* Nothing should rematerialize any flrps, so we only need to do this
300 * lowering once.
301 */
302 lower_flrp = 0;
303 }
304
305 NIR_PASS(progress, nir, nir_opt_access);
306
307 NIR_PASS(progress, nir, nir_opt_undef);
308 NIR_PASS(progress, nir, nir_opt_conditional_discard);
309 if (nir->options->max_unroll_iterations) {
310 NIR_PASS(progress, nir, nir_opt_loop_unroll, (nir_variable_mode)0);
311 }
312 } while (progress);
313 }
314
315 static void
316 shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
317 {
318 assert(glsl_type_is_vector_or_scalar(type));
319
320 uint32_t comp_size = glsl_type_is_boolean(type)
321 ? 4 : glsl_get_bit_size(type) / 8;
322 unsigned length = glsl_get_vector_elements(type);
323 *size = comp_size * length,
324 *align = comp_size * (length == 3 ? 4 : length);
325 }
326
327 /* First third of converting glsl_to_nir.. this leaves things in a pre-
328 * nir_lower_io state, so that shader variants can more easily insert/
329 * replace variables, etc.
330 */
331 static void
332 st_nir_preprocess(struct st_context *st, struct gl_program *prog,
333 struct gl_shader_program *shader_program,
334 gl_shader_stage stage)
335 {
336 const nir_shader_compiler_options *options =
337 st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
338 assert(options);
339 bool lower_64bit =
340 options->lower_int64_options || options->lower_doubles_options;
341 nir_shader *nir = prog->nir;
342
343 /* Set the next shader stage hint for VS and TES. */
344 if (!nir->info.separate_shader &&
345 (nir->info.stage == MESA_SHADER_VERTEX ||
346 nir->info.stage == MESA_SHADER_TESS_EVAL)) {
347
348 unsigned prev_stages = (1 << (prog->info.stage + 1)) - 1;
349 unsigned stages_mask =
350 ~prev_stages & shader_program->data->linked_stages;
351
352 nir->info.next_stage = stages_mask ?
353 (gl_shader_stage) u_bit_scan(&stages_mask) : MESA_SHADER_FRAGMENT;
354 } else {
355 nir->info.next_stage = MESA_SHADER_FRAGMENT;
356 }
357
358 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
359 if (!st->ctx->SoftFP64 && nir->info.uses_64bit &&
360 (options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
361 st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
362 }
363
364 nir_variable_mode mask =
365 (nir_variable_mode) (nir_var_shader_in | nir_var_shader_out);
366 nir_remove_dead_variables(nir, mask);
367
368 if (options->lower_all_io_to_temps ||
369 nir->info.stage == MESA_SHADER_VERTEX ||
370 nir->info.stage == MESA_SHADER_GEOMETRY) {
371 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
372 nir_shader_get_entrypoint(nir),
373 true, true);
374 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
375 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
376 nir_shader_get_entrypoint(nir),
377 true, false);
378 }
379
380 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
381 NIR_PASS_V(nir, nir_split_var_copies);
382 NIR_PASS_V(nir, nir_lower_var_copies);
383
384 if (options->lower_to_scalar) {
385 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
386 }
387
388 /* before buffers and vars_to_ssa */
389 NIR_PASS_V(nir, gl_nir_lower_bindless_images);
390 st_nir_opts(nir);
391
392 /* TODO: Change GLSL to not lower shared memory. */
393 if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
394 shader_program->data->spirv) {
395 NIR_PASS_V(prog->nir, nir_lower_vars_to_explicit_types,
396 nir_var_mem_shared, shared_type_info);
397 NIR_PASS_V(prog->nir, nir_lower_explicit_io,
398 nir_var_mem_shared, nir_address_format_32bit_offset);
399 }
400
401 NIR_PASS_V(nir, gl_nir_lower_buffers, shader_program);
402 /* Do a round of constant folding to clean up address calculations */
403 NIR_PASS_V(nir, nir_opt_constant_folding);
404
405 if (lower_64bit) {
406 bool lowered_64bit_ops = false;
407 if (options->lower_doubles_options) {
408 NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
409 st->ctx->SoftFP64, options->lower_doubles_options);
410 }
411 if (options->lower_int64_options) {
412 NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64,
413 options->lower_int64_options);
414 }
415
416 if (lowered_64bit_ops)
417 st_nir_opts(nir);
418 }
419 }
420
421 /* Second third of converting glsl_to_nir. This creates uniforms, gathers
422 * info on varyings, etc after NIR link time opts have been applied.
423 */
424 static void
425 st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
426 struct gl_shader_program *shader_program)
427 {
428 nir_shader *nir = prog->nir;
429
430 /* Make a pass over the IR to add state references for any built-in
431 * uniforms that are used. This has to be done now (during linking).
432 * Code generation doesn't happen until the first time this shader is
433 * used for rendering. Waiting until then to generate the parameters is
434 * too late. At that point, the values for the built-in uniforms won't
435 * get sent to the shader.
436 */
437 nir_foreach_variable(var, &nir->uniforms) {
438 const nir_state_slot *const slots = var->state_slots;
439 if (slots != NULL) {
440 const struct glsl_type *type = glsl_without_array(var->type);
441 for (unsigned int i = 0; i < var->num_state_slots; i++) {
442 unsigned comps;
443 if (glsl_type_is_struct_or_ifc(type)) {
444 /* Builtin struct require specical handling for now we just
445 * make all members vec4. See st_nir_lower_builtin.
446 */
447 comps = 4;
448 } else {
449 comps = glsl_get_vector_elements(type);
450 }
451
452 if (st->ctx->Const.PackedDriverUniformStorage) {
453 _mesa_add_sized_state_reference(prog->Parameters,
454 slots[i].tokens,
455 comps, false);
456 } else {
457 _mesa_add_state_reference(prog->Parameters,
458 slots[i].tokens);
459 }
460 }
461 }
462 }
463
464 /* Avoid reallocation of the program parameter list, because the uniform
465 * storage is only associated with the original parameter list.
466 * This should be enough for Bitmap and DrawPixels constants.
467 */
468 _mesa_reserve_parameter_storage(prog->Parameters, 8);
469
470 /* This has to be done last. Any operation the can cause
471 * prog->ParameterValues to get reallocated (e.g., anything that adds a
472 * program constant) has to happen before creating this linkage.
473 */
474 _mesa_associate_uniform_storage(st->ctx, shader_program, prog);
475
476 st_set_prog_affected_state_flags(prog);
477
478 /* None of the builtins being lowered here can be produced by SPIR-V. See
479 * _mesa_builtin_uniform_desc.
480 */
481 if (!shader_program->data->spirv)
482 NIR_PASS_V(nir, st_nir_lower_builtin);
483
484 NIR_PASS_V(nir, gl_nir_lower_atomics, shader_program, true);
485 NIR_PASS_V(nir, nir_opt_intrinsics);
486
487 nir_variable_mode mask = nir_var_function_temp;
488 nir_remove_dead_variables(nir, mask);
489
490 if (st->ctx->_Shader->Flags & GLSL_DUMP) {
491 _mesa_log("\n");
492 _mesa_log("NIR IR for linked %s program %d:\n",
493 _mesa_shader_stage_to_string(prog->info.stage),
494 shader_program->Name);
495 nir_print_shader(nir, _mesa_get_log_file());
496 _mesa_log("\n\n");
497 }
498 }
499
500 static void
501 set_st_program(struct gl_program *prog,
502 struct gl_shader_program *shader_program,
503 nir_shader *nir)
504 {
505 struct st_vertex_program *stvp;
506 struct st_common_program *stp;
507 struct st_fragment_program *stfp;
508
509 switch (prog->info.stage) {
510 case MESA_SHADER_VERTEX:
511 stvp = (struct st_vertex_program *)prog;
512 stvp->shader_program = shader_program;
513 stvp->tgsi.type = PIPE_SHADER_IR_NIR;
514 stvp->tgsi.ir.nir = nir;
515 break;
516 case MESA_SHADER_GEOMETRY:
517 case MESA_SHADER_TESS_CTRL:
518 case MESA_SHADER_TESS_EVAL:
519 case MESA_SHADER_COMPUTE:
520 stp = (struct st_common_program *)prog;
521 stp->shader_program = shader_program;
522 stp->tgsi.type = PIPE_SHADER_IR_NIR;
523 stp->tgsi.ir.nir = nir;
524 break;
525 case MESA_SHADER_FRAGMENT:
526 stfp = (struct st_fragment_program *)prog;
527 stfp->shader_program = shader_program;
528 stfp->tgsi.type = PIPE_SHADER_IR_NIR;
529 stfp->tgsi.ir.nir = nir;
530 break;
531 default:
532 unreachable("unknown shader stage");
533 }
534 }
535
536 static void
537 st_nir_vectorize_io(nir_shader *producer, nir_shader *consumer)
538 {
539 NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
540 NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
541 NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
542
543 if ((producer)->info.stage != MESA_SHADER_TESS_CTRL) {
544 /* Calling lower_io_to_vector creates output variable writes with
545 * write-masks. We only support these for TCS outputs, so for other
546 * stages, we need to call nir_lower_io_to_temporaries to get rid of
547 * them. This, in turn, creates temporary variables and extra
548 * copy_deref intrinsics that we need to clean up.
549 */
550 NIR_PASS_V(producer, nir_lower_io_to_temporaries,
551 nir_shader_get_entrypoint(producer), true, false);
552 NIR_PASS_V(producer, nir_lower_global_vars_to_local);
553 NIR_PASS_V(producer, nir_split_var_copies);
554 NIR_PASS_V(producer, nir_lower_var_copies);
555 }
556 }
557
558 static void
559 st_nir_link_shaders(nir_shader **producer, nir_shader **consumer)
560 {
561 if ((*producer)->options->lower_to_scalar) {
562 NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
563 NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
564 }
565
566 nir_lower_io_arrays_to_elements(*producer, *consumer);
567
568 st_nir_opts(*producer);
569 st_nir_opts(*consumer);
570
571 if (nir_link_opt_varyings(*producer, *consumer))
572 st_nir_opts(*consumer);
573
574 NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
575 NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
576
577 if (nir_remove_unused_varyings(*producer, *consumer)) {
578 NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
579 NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
580
581 st_nir_opts(*producer);
582 st_nir_opts(*consumer);
583
584 /* Optimizations can cause varyings to become unused.
585 * nir_compact_varyings() depends on all dead varyings being removed so
586 * we need to call nir_remove_dead_variables() again here.
587 */
588 NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
589 NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
590 }
591 }
592
593 static void
594 st_lower_patch_vertices_in(struct gl_shader_program *shader_prog)
595 {
596 struct gl_linked_shader *linked_tcs =
597 shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
598 struct gl_linked_shader *linked_tes =
599 shader_prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
600
601 /* If we have a TCS and TES linked together, lower TES patch vertices. */
602 if (linked_tcs && linked_tes) {
603 nir_shader *tcs_nir = linked_tcs->Program->nir;
604 nir_shader *tes_nir = linked_tes->Program->nir;
605
606 /* The TES input vertex count is the TCS output vertex count,
607 * lower TES gl_PatchVerticesIn to a constant.
608 */
609 uint32_t tes_patch_verts = tcs_nir->info.tess.tcs_vertices_out;
610 NIR_PASS_V(tes_nir, nir_lower_patch_vertices, tes_patch_verts, NULL);
611 }
612 }
613
614 extern "C" {
615
616 void
617 st_nir_lower_wpos_ytransform(struct nir_shader *nir,
618 struct gl_program *prog,
619 struct pipe_screen *pscreen)
620 {
621 if (nir->info.stage != MESA_SHADER_FRAGMENT)
622 return;
623
624 static const gl_state_index16 wposTransformState[STATE_LENGTH] = {
625 STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM
626 };
627 nir_lower_wpos_ytransform_options wpos_options = { { 0 } };
628
629 memcpy(wpos_options.state_tokens, wposTransformState,
630 sizeof(wpos_options.state_tokens));
631 wpos_options.fs_coord_origin_upper_left =
632 pscreen->get_param(pscreen,
633 PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT);
634 wpos_options.fs_coord_origin_lower_left =
635 pscreen->get_param(pscreen,
636 PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
637 wpos_options.fs_coord_pixel_center_integer =
638 pscreen->get_param(pscreen,
639 PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
640 wpos_options.fs_coord_pixel_center_half_integer =
641 pscreen->get_param(pscreen,
642 PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER);
643
644 if (nir_lower_wpos_ytransform(nir, &wpos_options)) {
645 nir_validate_shader(nir, "after nir_lower_wpos_ytransform");
646 _mesa_add_state_reference(prog->Parameters, wposTransformState);
647 }
648 }
649
650 bool
651 st_link_nir(struct gl_context *ctx,
652 struct gl_shader_program *shader_program)
653 {
654 struct st_context *st = st_context(ctx);
655 struct pipe_screen *screen = st->pipe->screen;
656
657 unsigned last_stage = 0;
658 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
659 struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
660 if (shader == NULL)
661 continue;
662
663 const nir_shader_compiler_options *options =
664 st->ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions;
665 struct gl_program *prog = shader->Program;
666 _mesa_copy_linked_program_data(shader_program, shader);
667
668 assert(!prog->nir);
669
670 if (shader_program->data->spirv) {
671 prog->Parameters = _mesa_new_parameter_list();
672 /* Parameters will be filled during NIR linking. */
673
674 prog->nir = _mesa_spirv_to_nir(ctx, shader_program, shader->Stage, options);
675 set_st_program(prog, shader_program, prog->nir);
676 } else {
677 validate_ir_tree(shader->ir);
678
679 prog->Parameters = _mesa_new_parameter_list();
680 _mesa_generate_parameters_list_for_uniforms(ctx, shader_program, shader,
681 prog->Parameters);
682
683 /* Remove reads from output registers. */
684 if (!screen->get_param(screen, PIPE_CAP_TGSI_CAN_READ_OUTPUTS))
685 lower_output_reads(shader->Stage, shader->ir);
686
687 if (ctx->_Shader->Flags & GLSL_DUMP) {
688 _mesa_log("\n");
689 _mesa_log("GLSL IR for linked %s program %d:\n",
690 _mesa_shader_stage_to_string(shader->Stage),
691 shader_program->Name);
692 _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
693 _mesa_log("\n\n");
694 }
695
696 prog->ExternalSamplersUsed = gl_external_samplers(prog);
697 _mesa_update_shader_textures_used(shader_program, prog);
698
699 prog->nir = glsl_to_nir(st->ctx, shader_program, shader->Stage, options);
700 set_st_program(prog, shader_program, prog->nir);
701 st_nir_preprocess(st, prog, shader_program, shader->Stage);
702 }
703
704 last_stage = i;
705
706 if (options->lower_to_scalar) {
707 NIR_PASS_V(shader->Program->nir, nir_lower_load_const_to_scalar);
708 }
709 }
710
711 /* For SPIR-V, we have to perform the NIR linking before applying
712 * st_nir_preprocess.
713 */
714 if (shader_program->data->spirv) {
715 static const gl_nir_linker_options opts = {
716 true /*fill_parameters */
717 };
718 if (!gl_nir_link(ctx, shader_program, &opts))
719 return GL_FALSE;
720
721 nir_build_program_resource_list(ctx, shader_program);
722
723 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
724 struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
725 if (shader == NULL)
726 continue;
727
728 struct gl_program *prog = shader->Program;
729 prog->ExternalSamplersUsed = gl_external_samplers(prog);
730 _mesa_update_shader_textures_used(shader_program, prog);
731
732 st_nir_preprocess(st, prog, shader_program, shader->Stage);
733 }
734 }
735
736 /* Linking the stages in the opposite order (from fragment to vertex)
737 * ensures that inter-shader outputs written to in an earlier stage
738 * are eliminated if they are (transitively) not used in a later
739 * stage.
740 */
741 int next = last_stage;
742 for (int i = next - 1; i >= 0; i--) {
743 struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
744 if (shader == NULL)
745 continue;
746
747 st_nir_link_shaders(&shader->Program->nir,
748 &shader_program->_LinkedShaders[next]->Program->nir);
749 next = i;
750 }
751
752 int prev = -1;
753 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
754 struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
755 if (shader == NULL)
756 continue;
757
758 nir_shader *nir = shader->Program->nir;
759
760 NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, shader->Program,
761 st->pipe->screen);
762
763 NIR_PASS_V(nir, nir_lower_system_values);
764 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
765
766 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
767 shader->Program->info = nir->info;
768 if (i == MESA_SHADER_VERTEX) {
769 /* NIR expands dual-slot inputs out to two locations. We need to
770 * compact things back down GL-style single-slot inputs to avoid
771 * confusing the state tracker.
772 */
773 shader->Program->info.inputs_read =
774 nir_get_single_slot_attribs_mask(nir->info.inputs_read,
775 shader->Program->DualSlotInputs);
776 }
777
778 if (prev != -1) {
779 struct gl_program *prev_shader =
780 shader_program->_LinkedShaders[prev]->Program;
781
782 /* We can't use nir_compact_varyings with transform feedback, since
783 * the pipe_stream_output->output_register field is based on the
784 * pre-compacted driver_locations.
785 */
786 if (!(prev_shader->sh.LinkedTransformFeedback &&
787 prev_shader->sh.LinkedTransformFeedback->NumVarying > 0))
788 nir_compact_varyings(shader_program->_LinkedShaders[prev]->Program->nir,
789 nir, ctx->API != API_OPENGL_COMPAT);
790
791 if (ctx->Const.ShaderCompilerOptions[i].NirOptions->vectorize_io)
792 st_nir_vectorize_io(prev_shader->nir, nir);
793 }
794 prev = i;
795 }
796
797 st_lower_patch_vertices_in(shader_program);
798
799 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
800 struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
801 if (shader == NULL)
802 continue;
803
804 struct gl_program *prog = shader->Program;
805 st_glsl_to_nir_post_opts(st, prog, shader_program);
806
807 /* Initialize st_vertex_program members. */
808 if (i == MESA_SHADER_VERTEX)
809 st_prepare_vertex_program(st_vertex_program(prog));
810
811 /* Get pipe_stream_output_info. */
812 if (i == MESA_SHADER_VERTEX ||
813 i == MESA_SHADER_TESS_EVAL ||
814 i == MESA_SHADER_GEOMETRY)
815 st_translate_stream_output_info(prog);
816
817 st_store_ir_in_disk_cache(st, prog, true);
818
819 if (!ctx->Driver.ProgramStringNotify(ctx,
820 _mesa_shader_stage_to_program(i),
821 prog)) {
822 _mesa_reference_program(ctx, &shader->Program, NULL);
823 return false;
824 }
825
826 nir_sweep(prog->nir);
827
828 /* The GLSL IR won't be needed anymore. */
829 ralloc_free(shader->ir);
830 shader->ir = NULL;
831 }
832
833 return true;
834 }
835
836 void
837 st_nir_assign_varying_locations(struct st_context *st, nir_shader *nir)
838 {
839 if (nir->info.stage == MESA_SHADER_VERTEX) {
840 /* Needs special handling so drvloc matches the vbo state: */
841 st_nir_assign_vs_in_locations(nir);
842 /* Re-lower global vars, to deal with any dead VS inputs. */
843 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
844
845 nir_assign_io_var_locations(&nir->outputs,
846 &nir->num_outputs,
847 nir->info.stage);
848 st_nir_fixup_varying_slots(st, &nir->outputs);
849 } else if (nir->info.stage == MESA_SHADER_GEOMETRY ||
850 nir->info.stage == MESA_SHADER_TESS_CTRL ||
851 nir->info.stage == MESA_SHADER_TESS_EVAL) {
852 nir_assign_io_var_locations(&nir->inputs,
853 &nir->num_inputs,
854 nir->info.stage);
855 st_nir_fixup_varying_slots(st, &nir->inputs);
856
857 nir_assign_io_var_locations(&nir->outputs,
858 &nir->num_outputs,
859 nir->info.stage);
860 st_nir_fixup_varying_slots(st, &nir->outputs);
861 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
862 nir_assign_io_var_locations(&nir->inputs,
863 &nir->num_inputs,
864 nir->info.stage);
865 st_nir_fixup_varying_slots(st, &nir->inputs);
866 nir_assign_io_var_locations(&nir->outputs,
867 &nir->num_outputs,
868 nir->info.stage);
869 } else if (nir->info.stage == MESA_SHADER_COMPUTE) {
870 /* TODO? */
871 } else {
872 unreachable("invalid shader type");
873 }
874 }
875
876 void
877 st_nir_lower_samplers(struct pipe_screen *screen, nir_shader *nir,
878 struct gl_shader_program *shader_program,
879 struct gl_program *prog)
880 {
881 if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
882 NIR_PASS_V(nir, gl_nir_lower_samplers_as_deref, shader_program);
883 else
884 NIR_PASS_V(nir, gl_nir_lower_samplers, shader_program);
885
886 if (prog) {
887 prog->info.textures_used = nir->info.textures_used;
888 prog->info.textures_used_by_txf = nir->info.textures_used_by_txf;
889 }
890 }
891
892 /* Last third of preparing nir from glsl, which happens after shader
893 * variant lowering.
894 */
895 void
896 st_finalize_nir(struct st_context *st, struct gl_program *prog,
897 struct gl_shader_program *shader_program, nir_shader *nir)
898 {
899 struct pipe_screen *screen = st->pipe->screen;
900 const nir_shader_compiler_options *options =
901 st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
902
903 NIR_PASS_V(nir, nir_split_var_copies);
904 NIR_PASS_V(nir, nir_lower_var_copies);
905 if (options->lower_all_io_to_temps ||
906 options->lower_all_io_to_elements ||
907 nir->info.stage == MESA_SHADER_VERTEX ||
908 nir->info.stage == MESA_SHADER_GEOMETRY) {
909 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
910 } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
911 NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
912 }
913
914 st_nir_assign_varying_locations(st, nir);
915
916 NIR_PASS_V(nir, nir_lower_atomics_to_ssbo,
917 st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers);
918
919 st_nir_assign_uniform_locations(st->ctx, prog,
920 &nir->uniforms);
921
922 /* Set num_uniforms in number of attribute slots (vec4s) */
923 nir->num_uniforms = DIV_ROUND_UP(prog->Parameters->NumParameterValues, 4);
924
925 if (st->ctx->Const.PackedDriverUniformStorage) {
926 NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, st_glsl_type_dword_size,
927 (nir_lower_io_options)0);
928 NIR_PASS_V(nir, nir_lower_uniforms_to_ubo, 4);
929 } else {
930 NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, st_glsl_uniforms_type_size,
931 (nir_lower_io_options)0);
932 }
933
934 st_nir_lower_samplers(screen, nir, shader_program, prog);
935 }
936
937 } /* extern "C" */