2 * Copyright © 2015 Red Hat
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "pipe/p_defines.h"
27 #include "pipe/p_screen.h"
28 #include "pipe/p_context.h"
30 #include "program/program.h"
31 #include "program/prog_statevars.h"
32 #include "program/prog_parameter.h"
33 #include "program/ir_to_mesa.h"
34 #include "main/mtypes.h"
35 #include "main/errors.h"
36 #include "main/shaderapi.h"
37 #include "main/uniforms.h"
39 #include "st_context.h"
40 #include "st_glsl_types.h"
41 #include "st_program.h"
43 #include "compiler/nir/nir.h"
44 #include "compiler/glsl_types.h"
45 #include "compiler/glsl/glsl_to_nir.h"
46 #include "compiler/glsl/gl_nir.h"
47 #include "compiler/glsl/ir.h"
48 #include "compiler/glsl/string_to_uint_map.h"
52 type_size(const struct glsl_type
*type
)
54 return type
->count_attribute_slots(false);
57 /* Depending on PIPE_CAP_TGSI_TEXCOORD (st->needs_texcoord_semantic) we
58 * may need to fix up varying slots so the glsl->nir path is aligned
59 * with the anything->tgsi->nir path.
62 st_nir_fixup_varying_slots(struct st_context
*st
, struct exec_list
*var_list
)
64 if (st
->needs_texcoord_semantic
)
67 nir_foreach_variable(var
, var_list
) {
68 if (var
->data
.location
>= VARYING_SLOT_VAR0
) {
69 var
->data
.location
+= 9;
70 } else if ((var
->data
.location
>= VARYING_SLOT_TEX0
) &&
71 (var
->data
.location
<= VARYING_SLOT_TEX7
)) {
72 var
->data
.location
+= VARYING_SLOT_VAR0
- VARYING_SLOT_TEX0
;
77 /* input location assignment for VS inputs must be handled specially, so
78 * that it is aligned w/ st's vbo state.
79 * (This isn't the case with, for ex, FS inputs, which only need to agree
80 * on varying-slot w/ the VS outputs)
83 st_nir_assign_vs_in_locations(struct gl_program
*prog
, nir_shader
*nir
)
85 unsigned attr
, num_inputs
= 0;
86 unsigned input_to_index
[VERT_ATTRIB_MAX
] = {0};
88 /* TODO de-duplicate w/ similar code in st_translate_vertex_program()? */
89 for (attr
= 0; attr
< VERT_ATTRIB_MAX
; attr
++) {
90 if ((prog
->info
.inputs_read
& BITFIELD64_BIT(attr
)) != 0) {
91 input_to_index
[attr
] = num_inputs
;
93 if ((prog
->info
.vs
.double_inputs_read
& BITFIELD64_BIT(attr
)) != 0) {
94 /* add placeholder for second part of a double attribute */
98 input_to_index
[attr
] = ~0;
102 /* bit of a hack, mirroring st_translate_vertex_program */
103 input_to_index
[VERT_ATTRIB_EDGEFLAG
] = num_inputs
;
106 nir_foreach_variable_safe(var
, &nir
->inputs
) {
107 attr
= var
->data
.location
;
108 assert(attr
< ARRAY_SIZE(input_to_index
));
110 if (input_to_index
[attr
] != ~0u) {
111 var
->data
.driver_location
= input_to_index
[attr
];
114 /* Move unused input variables to the globals list (with no
115 * initialization), to avoid confusing drivers looking through the
116 * inputs array and expecting to find inputs with a driver_location
119 exec_node_remove(&var
->node
);
120 var
->data
.mode
= nir_var_global
;
121 exec_list_push_tail(&nir
->globals
, &var
->node
);
127 st_nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
128 gl_shader_stage stage
)
130 unsigned location
= 0;
131 unsigned assigned_locations
[VARYING_SLOT_TESS_MAX
];
132 uint64_t processed_locs
[2] = {0};
134 const int base
= stage
== MESA_SHADER_FRAGMENT
?
135 (int) FRAG_RESULT_DATA0
: (int) VARYING_SLOT_VAR0
;
137 int UNUSED last_loc
= 0;
138 nir_foreach_variable(var
, var_list
) {
140 const struct glsl_type
*type
= var
->type
;
141 if (nir_is_per_vertex_io(var
, stage
)) {
142 assert(glsl_type_is_array(type
));
143 type
= glsl_get_array_element(type
);
146 unsigned var_size
= type_size(type
);
148 /* Builtins don't allow component packing so we only need to worry about
149 * user defined varyings sharing the same location.
151 bool processed
= false;
152 if (var
->data
.location
>= base
) {
153 unsigned glsl_location
= var
->data
.location
- base
;
155 for (unsigned i
= 0; i
< var_size
; i
++) {
156 if (processed_locs
[var
->data
.index
] &
157 ((uint64_t)1 << (glsl_location
+ i
)))
160 processed_locs
[var
->data
.index
] |=
161 ((uint64_t)1 << (glsl_location
+ i
));
165 /* Because component packing allows varyings to share the same location
166 * we may have already have processed this location.
169 unsigned driver_location
= assigned_locations
[var
->data
.location
];
170 var
->data
.driver_location
= driver_location
;
171 *size
+= type_size(type
);
173 /* An array may be packed such that is crosses multiple other arrays
174 * or variables, we need to make sure we have allocated the elements
175 * consecutively if the previously proccessed var was shorter than
176 * the current array we are processing.
178 * NOTE: The code below assumes the var list is ordered in ascending
181 assert(last_loc
<= var
->data
.location
);
182 last_loc
= var
->data
.location
;
183 unsigned last_slot_location
= driver_location
+ var_size
;
184 if (last_slot_location
> location
) {
185 unsigned num_unallocated_slots
= last_slot_location
- location
;
186 unsigned first_unallocated_slot
= var_size
- num_unallocated_slots
;
187 for (unsigned i
= first_unallocated_slot
; i
< num_unallocated_slots
; i
++) {
188 assigned_locations
[var
->data
.location
+ i
] = location
;
195 for (unsigned i
= 0; i
< var_size
; i
++) {
196 assigned_locations
[var
->data
.location
+ i
] = location
+ i
;
199 var
->data
.driver_location
= location
;
200 location
+= var_size
;
207 st_nir_lookup_parameter_index(const struct gl_program_parameter_list
*params
,
210 int loc
= _mesa_lookup_parameter_index(params
, name
);
212 /* is there a better way to do this? If we have something like:
220 * Then what we get in prog->Parameters looks like:
222 * 0: Name=color.f, Type=6, DataType=1406, Size=1
223 * 1: Name=color.v, Type=6, DataType=8b52, Size=4
225 * So the name doesn't match up and _mesa_lookup_parameter_index()
226 * fails. In this case just find the first matching "color.*"..
228 * Note for arrays you could end up w/ color[n].f, for example.
230 * glsl_to_tgsi works slightly differently in this regard. It is
231 * emitting something more low level, so it just translates the
232 * params list 1:1 to CONST[] regs. Going from GLSL IR to TGSI,
233 * it just calculates the additional offset of struct field members
234 * in glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) or
235 * glsl_to_tgsi_visitor::visit(ir_dereference_array *ir). It never
236 * needs to work backwards to get base var loc from the param-list
237 * which already has them separated out.
240 int namelen
= strlen(name
);
241 for (unsigned i
= 0; i
< params
->NumParameters
; i
++) {
242 struct gl_program_parameter
*p
= ¶ms
->Parameters
[i
];
243 if ((strncmp(p
->Name
, name
, namelen
) == 0) &&
244 ((p
->Name
[namelen
] == '.') || (p
->Name
[namelen
] == '['))) {
255 st_nir_assign_uniform_locations(struct gl_context
*ctx
,
256 struct gl_program
*prog
,
257 struct gl_shader_program
*shader_program
,
258 struct exec_list
*uniform_list
, unsigned *size
)
264 nir_foreach_variable(uniform
, uniform_list
) {
268 * UBO's have their own address spaces, so don't count them towards the
269 * number of global uniforms
271 if ((uniform
->data
.mode
== nir_var_uniform
|| uniform
->data
.mode
== nir_var_shader_storage
) &&
272 uniform
->interface_type
!= NULL
)
275 if (!uniform
->data
.bindless
&&
276 (uniform
->type
->is_sampler() || uniform
->type
->is_image())) {
277 if (uniform
->type
->is_sampler())
281 } else if (strncmp(uniform
->name
, "gl_", 3) == 0) {
282 const gl_state_index16
*const stateTokens
= uniform
->state_slots
[0].tokens
;
283 /* This state reference has already been setup by ir_to_mesa, but we'll
284 * get the same index back here.
288 const struct glsl_type
*type
= glsl_without_array(uniform
->type
);
289 if (glsl_type_is_struct(type
)) {
292 comps
= glsl_get_vector_elements(type
);
295 if (ctx
->Const
.PackedDriverUniformStorage
) {
296 loc
= _mesa_add_sized_state_reference(prog
->Parameters
,
297 stateTokens
, comps
, false);
298 loc
= prog
->Parameters
->ParameterValueOffset
[loc
];
300 loc
= _mesa_add_state_reference(prog
->Parameters
, stateTokens
);
303 loc
= st_nir_lookup_parameter_index(prog
->Parameters
, uniform
->name
);
305 if (ctx
->Const
.PackedDriverUniformStorage
) {
306 loc
= prog
->Parameters
->ParameterValueOffset
[loc
];
310 uniform
->data
.driver_location
= loc
;
312 max
= MAX2(max
, loc
+ type_size(uniform
->type
));
318 st_nir_opts(nir_shader
*nir
)
324 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
325 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
);
326 NIR_PASS_V(nir
, nir_lower_phis_to_scalar
);
328 NIR_PASS_V(nir
, nir_lower_pack
);
329 NIR_PASS(progress
, nir
, nir_copy_prop
);
330 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
331 NIR_PASS(progress
, nir
, nir_opt_dce
);
332 if (nir_opt_trivial_continues(nir
)) {
334 NIR_PASS(progress
, nir
, nir_copy_prop
);
335 NIR_PASS(progress
, nir
, nir_opt_dce
);
337 NIR_PASS(progress
, nir
, nir_opt_if
);
338 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
339 NIR_PASS(progress
, nir
, nir_opt_cse
);
340 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 8);
342 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
343 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
345 NIR_PASS(progress
, nir
, nir_opt_undef
);
346 NIR_PASS(progress
, nir
, nir_opt_conditional_discard
);
347 if (nir
->options
->max_unroll_iterations
) {
348 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
, (nir_variable_mode
)0);
353 /* First third of converting glsl_to_nir.. this leaves things in a pre-
354 * nir_lower_io state, so that shader variants can more easily insert/
355 * replace variables, etc.
358 st_glsl_to_nir(struct st_context
*st
, struct gl_program
*prog
,
359 struct gl_shader_program
*shader_program
,
360 gl_shader_stage stage
)
362 const nir_shader_compiler_options
*options
=
363 st
->ctx
->Const
.ShaderCompilerOptions
[prog
->info
.stage
].NirOptions
;
369 nir_shader
*nir
= glsl_to_nir(shader_program
, stage
, options
);
371 /* Set the next shader stage hint for VS and TES. */
372 if (!nir
->info
.separate_shader
&&
373 (nir
->info
.stage
== MESA_SHADER_VERTEX
||
374 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
)) {
376 unsigned prev_stages
= (1 << (prog
->info
.stage
+ 1)) - 1;
377 unsigned stages_mask
=
378 ~prev_stages
& shader_program
->data
->linked_stages
;
380 nir
->info
.next_stage
= stages_mask
?
381 (gl_shader_stage
) ffs(stages_mask
) : MESA_SHADER_FRAGMENT
;
383 nir
->info
.next_stage
= MESA_SHADER_FRAGMENT
;
386 nir_variable_mode mask
=
387 (nir_variable_mode
) (nir_var_shader_in
| nir_var_shader_out
);
388 nir_remove_dead_variables(nir
, mask
);
390 if (options
->lower_all_io_to_temps
||
391 nir
->info
.stage
== MESA_SHADER_VERTEX
||
392 nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
393 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
394 nir_shader_get_entrypoint(nir
),
396 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
397 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
398 nir_shader_get_entrypoint(nir
),
402 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
403 NIR_PASS_V(nir
, nir_split_var_copies
);
404 NIR_PASS_V(nir
, nir_lower_var_copies
);
411 /* Second third of converting glsl_to_nir. This creates uniforms, gathers
412 * info on varyings, etc after NIR link time opts have been applied.
415 st_glsl_to_nir_post_opts(struct st_context
*st
, struct gl_program
*prog
,
416 struct gl_shader_program
*shader_program
)
418 nir_shader
*nir
= prog
->nir
;
420 /* Make a pass over the IR to add state references for any built-in
421 * uniforms that are used. This has to be done now (during linking).
422 * Code generation doesn't happen until the first time this shader is
423 * used for rendering. Waiting until then to generate the parameters is
424 * too late. At that point, the values for the built-in uniforms won't
425 * get sent to the shader.
427 nir_foreach_variable(var
, &nir
->uniforms
) {
428 if (strncmp(var
->name
, "gl_", 3) == 0) {
429 const nir_state_slot
*const slots
= var
->state_slots
;
430 assert(var
->state_slots
!= NULL
);
432 const struct glsl_type
*type
= glsl_without_array(var
->type
);
433 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
435 if (glsl_type_is_struct(type
)) {
436 /* Builtin struct require specical handling for now we just
437 * make all members vec4. See st_nir_lower_builtin.
441 comps
= glsl_get_vector_elements(type
);
444 if (st
->ctx
->Const
.PackedDriverUniformStorage
) {
445 _mesa_add_sized_state_reference(prog
->Parameters
,
449 _mesa_add_state_reference(prog
->Parameters
,
456 /* Avoid reallocation of the program parameter list, because the uniform
457 * storage is only associated with the original parameter list.
458 * This should be enough for Bitmap and DrawPixels constants.
460 _mesa_reserve_parameter_storage(prog
->Parameters
, 8);
462 /* This has to be done last. Any operation the can cause
463 * prog->ParameterValues to get reallocated (e.g., anything that adds a
464 * program constant) has to happen before creating this linkage.
466 _mesa_associate_uniform_storage(st
->ctx
, shader_program
, prog
, true);
468 st_set_prog_affected_state_flags(prog
);
470 NIR_PASS_V(nir
, st_nir_lower_builtin
);
471 NIR_PASS_V(nir
, gl_nir_lower_atomics
, shader_program
, true);
473 if (st
->ctx
->_Shader
->Flags
& GLSL_DUMP
) {
475 _mesa_log("NIR IR for linked %s program %d:\n",
476 _mesa_shader_stage_to_string(prog
->info
.stage
),
477 shader_program
->Name
);
478 nir_print_shader(nir
, _mesa_get_log_file());
483 /* TODO any better helper somewhere to sort a list? */
486 insert_sorted(struct exec_list
*var_list
, nir_variable
*new_var
)
488 nir_foreach_variable(var
, var_list
) {
489 if (var
->data
.location
> new_var
->data
.location
) {
490 exec_node_insert_node_before(&var
->node
, &new_var
->node
);
494 exec_list_push_tail(var_list
, &new_var
->node
);
498 sort_varyings(struct exec_list
*var_list
)
500 struct exec_list new_list
;
501 exec_list_make_empty(&new_list
);
502 nir_foreach_variable_safe(var
, var_list
) {
503 exec_node_remove(&var
->node
);
504 insert_sorted(&new_list
, var
);
506 exec_list_move_nodes_to(&new_list
, var_list
);
510 set_st_program(struct gl_program
*prog
,
511 struct gl_shader_program
*shader_program
,
514 struct st_vertex_program
*stvp
;
515 struct st_common_program
*stp
;
516 struct st_fragment_program
*stfp
;
517 struct st_compute_program
*stcp
;
519 switch (prog
->info
.stage
) {
520 case MESA_SHADER_VERTEX
:
521 stvp
= (struct st_vertex_program
*)prog
;
522 stvp
->shader_program
= shader_program
;
523 stvp
->tgsi
.type
= PIPE_SHADER_IR_NIR
;
524 stvp
->tgsi
.ir
.nir
= nir
;
526 case MESA_SHADER_GEOMETRY
:
527 case MESA_SHADER_TESS_CTRL
:
528 case MESA_SHADER_TESS_EVAL
:
529 stp
= (struct st_common_program
*)prog
;
530 stp
->shader_program
= shader_program
;
531 stp
->tgsi
.type
= PIPE_SHADER_IR_NIR
;
532 stp
->tgsi
.ir
.nir
= nir
;
534 case MESA_SHADER_FRAGMENT
:
535 stfp
= (struct st_fragment_program
*)prog
;
536 stfp
->shader_program
= shader_program
;
537 stfp
->tgsi
.type
= PIPE_SHADER_IR_NIR
;
538 stfp
->tgsi
.ir
.nir
= nir
;
540 case MESA_SHADER_COMPUTE
:
541 stcp
= (struct st_compute_program
*)prog
;
542 stcp
->shader_program
= shader_program
;
543 stcp
->tgsi
.ir_type
= PIPE_SHADER_IR_NIR
;
544 stcp
->tgsi
.prog
= nir
;
547 unreachable("unknown shader stage");
552 st_nir_get_mesa_program(struct gl_context
*ctx
,
553 struct gl_shader_program
*shader_program
,
554 struct gl_linked_shader
*shader
)
556 struct st_context
*st
= st_context(ctx
);
557 struct gl_program
*prog
;
559 validate_ir_tree(shader
->ir
);
561 prog
= shader
->Program
;
563 prog
->Parameters
= _mesa_new_parameter_list();
565 _mesa_copy_linked_program_data(shader_program
, shader
);
566 _mesa_generate_parameters_list_for_uniforms(ctx
, shader_program
, shader
,
569 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
571 _mesa_log("GLSL IR for linked %s program %d:\n",
572 _mesa_shader_stage_to_string(shader
->Stage
),
573 shader_program
->Name
);
574 _mesa_print_ir(_mesa_get_log_file(), shader
->ir
, NULL
);
578 prog
->ExternalSamplersUsed
= gl_external_samplers(prog
);
579 _mesa_update_shader_textures_used(shader_program
, prog
);
581 nir_shader
*nir
= st_glsl_to_nir(st
, prog
, shader_program
, shader
->Stage
);
583 set_st_program(prog
, shader_program
, nir
);
588 st_nir_link_shaders(nir_shader
**producer
, nir_shader
**consumer
)
590 nir_lower_io_arrays_to_elements(*producer
, *consumer
);
592 NIR_PASS_V(*producer
, nir_remove_dead_variables
, nir_var_shader_out
);
593 NIR_PASS_V(*consumer
, nir_remove_dead_variables
, nir_var_shader_in
);
595 if (nir_remove_unused_varyings(*producer
, *consumer
)) {
596 NIR_PASS_V(*producer
, nir_lower_global_vars_to_local
);
597 NIR_PASS_V(*consumer
, nir_lower_global_vars_to_local
);
599 /* The backend might not be able to handle indirects on
600 * temporaries so we need to lower indirects on any of the
601 * varyings we have demoted here.
603 * TODO: radeonsi shouldn't need to do this, however LLVM isn't
604 * currently smart enough to handle indirects without causing excess
605 * spilling causing the gpu to hang.
607 * See the following thread for more details of the problem:
608 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
610 nir_variable_mode indirect_mask
= nir_var_local
;
612 NIR_PASS_V(*producer
, nir_lower_indirect_derefs
, indirect_mask
);
613 NIR_PASS_V(*consumer
, nir_lower_indirect_derefs
, indirect_mask
);
615 st_nir_opts(*producer
);
616 st_nir_opts(*consumer
);
623 st_link_nir(struct gl_context
*ctx
,
624 struct gl_shader_program
*shader_program
)
626 struct st_context
*st
= st_context(ctx
);
628 /* Determine first and last stage. */
629 unsigned first
= MESA_SHADER_STAGES
;
631 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
632 if (!shader_program
->_LinkedShaders
[i
])
634 if (first
== MESA_SHADER_STAGES
)
639 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
640 struct gl_linked_shader
*shader
= shader_program
->_LinkedShaders
[i
];
644 st_nir_get_mesa_program(ctx
, shader_program
, shader
);
646 nir_variable_mode mask
= (nir_variable_mode
) 0;
648 mask
= (nir_variable_mode
)(mask
| nir_var_shader_in
);
651 mask
= (nir_variable_mode
)(mask
| nir_var_shader_out
);
653 nir_shader
*nir
= shader
->Program
->nir
;
654 NIR_PASS_V(nir
, nir_lower_io_to_scalar_early
, mask
);
658 /* Linking the stages in the opposite order (from fragment to vertex)
659 * ensures that inter-shader outputs written to in an earlier stage
660 * are eliminated if they are (transitively) not used in a later
664 for (int i
= next
- 1; i
>= 0; i
--) {
665 struct gl_linked_shader
*shader
= shader_program
->_LinkedShaders
[i
];
669 st_nir_link_shaders(&shader
->Program
->nir
,
670 &shader_program
->_LinkedShaders
[next
]->Program
->nir
);
675 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
676 struct gl_linked_shader
*shader
= shader_program
->_LinkedShaders
[i
];
680 nir_shader
*nir
= shader
->Program
->nir
;
682 /* fragment shaders may need : */
683 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
684 static const gl_state_index16 wposTransformState
[STATE_LENGTH
] = {
685 STATE_INTERNAL
, STATE_FB_WPOS_Y_TRANSFORM
687 nir_lower_wpos_ytransform_options wpos_options
= { { 0 } };
688 struct pipe_screen
*pscreen
= st
->pipe
->screen
;
690 memcpy(wpos_options
.state_tokens
, wposTransformState
,
691 sizeof(wpos_options
.state_tokens
));
692 wpos_options
.fs_coord_origin_upper_left
=
693 pscreen
->get_param(pscreen
,
694 PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT
);
695 wpos_options
.fs_coord_origin_lower_left
=
696 pscreen
->get_param(pscreen
,
697 PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT
);
698 wpos_options
.fs_coord_pixel_center_integer
=
699 pscreen
->get_param(pscreen
,
700 PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER
);
701 wpos_options
.fs_coord_pixel_center_half_integer
=
702 pscreen
->get_param(pscreen
,
703 PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER
);
705 if (nir_lower_wpos_ytransform(nir
, &wpos_options
)) {
706 nir_validate_shader(nir
);
707 _mesa_add_state_reference(shader
->Program
->Parameters
,
712 NIR_PASS_V(nir
, nir_lower_system_values
);
714 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
715 shader
->Program
->info
= nir
->info
;
718 nir_compact_varyings(shader_program
->_LinkedShaders
[prev
]->Program
->nir
,
719 nir
, ctx
->API
!= API_OPENGL_COMPAT
);
724 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
725 struct gl_linked_shader
*shader
= shader_program
->_LinkedShaders
[i
];
729 st_glsl_to_nir_post_opts(st
, shader
->Program
, shader_program
);
731 assert(shader
->Program
);
732 if (!ctx
->Driver
.ProgramStringNotify(ctx
,
733 _mesa_shader_stage_to_program(i
),
735 _mesa_reference_program(ctx
, &shader
->Program
, NULL
);
743 /* Last third of preparing nir from glsl, which happens after shader
747 st_finalize_nir(struct st_context
*st
, struct gl_program
*prog
,
748 struct gl_shader_program
*shader_program
, nir_shader
*nir
)
750 struct pipe_screen
*screen
= st
->pipe
->screen
;
751 const nir_shader_compiler_options
*options
=
752 st
->ctx
->Const
.ShaderCompilerOptions
[prog
->info
.stage
].NirOptions
;
754 NIR_PASS_V(nir
, nir_split_var_copies
);
755 NIR_PASS_V(nir
, nir_lower_var_copies
);
756 if (options
->lower_all_io_to_temps
||
757 nir
->info
.stage
== MESA_SHADER_VERTEX
||
758 nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
759 NIR_PASS_V(nir
, nir_lower_io_arrays_to_elements_no_indirects
, false);
760 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
761 NIR_PASS_V(nir
, nir_lower_io_arrays_to_elements_no_indirects
, true);
764 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
765 /* Needs special handling so drvloc matches the vbo state: */
766 st_nir_assign_vs_in_locations(prog
, nir
);
767 /* Re-lower global vars, to deal with any dead VS inputs. */
768 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
770 sort_varyings(&nir
->outputs
);
771 st_nir_assign_var_locations(&nir
->outputs
,
774 st_nir_fixup_varying_slots(st
, &nir
->outputs
);
775 } else if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
776 nir
->info
.stage
== MESA_SHADER_TESS_CTRL
||
777 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
778 sort_varyings(&nir
->inputs
);
779 st_nir_assign_var_locations(&nir
->inputs
,
782 st_nir_fixup_varying_slots(st
, &nir
->inputs
);
784 sort_varyings(&nir
->outputs
);
785 st_nir_assign_var_locations(&nir
->outputs
,
788 st_nir_fixup_varying_slots(st
, &nir
->outputs
);
789 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
790 sort_varyings(&nir
->inputs
);
791 st_nir_assign_var_locations(&nir
->inputs
,
794 st_nir_fixup_varying_slots(st
, &nir
->inputs
);
795 st_nir_assign_var_locations(&nir
->outputs
,
798 } else if (nir
->info
.stage
== MESA_SHADER_COMPUTE
) {
801 unreachable("invalid shader type for tgsi bypass\n");
804 NIR_PASS_V(nir
, nir_lower_atomics_to_ssbo
,
805 st
->ctx
->Const
.Program
[nir
->info
.stage
].MaxAtomicBuffers
);
807 st_nir_assign_uniform_locations(st
->ctx
, prog
, shader_program
,
808 &nir
->uniforms
, &nir
->num_uniforms
);
810 if (st
->ctx
->Const
.PackedDriverUniformStorage
) {
811 NIR_PASS_V(nir
, nir_lower_io
, nir_var_uniform
, st_glsl_type_dword_size
,
812 (nir_lower_io_options
)0);
813 NIR_PASS_V(nir
, st_nir_lower_uniforms_to_ubo
);
816 if (screen
->get_param(screen
, PIPE_CAP_NIR_SAMPLERS_AS_DEREF
))
817 NIR_PASS_V(nir
, gl_nir_lower_samplers_as_deref
, shader_program
);
819 NIR_PASS_V(nir
, gl_nir_lower_samplers
, shader_program
);