2 * Copyright © 2015 Red Hat
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "pipe/p_defines.h"
27 #include "pipe/p_screen.h"
28 #include "pipe/p_context.h"
30 #include "program/program.h"
31 #include "program/prog_statevars.h"
32 #include "program/prog_parameter.h"
33 #include "program/ir_to_mesa.h"
34 #include "main/context.h"
35 #include "main/mtypes.h"
36 #include "main/errors.h"
37 #include "main/glspirv.h"
38 #include "main/shaderapi.h"
39 #include "main/uniforms.h"
41 #include "main/shaderobj.h"
42 #include "st_context.h"
43 #include "st_program.h"
44 #include "st_shader_cache.h"
46 #include "compiler/nir/nir.h"
47 #include "compiler/glsl_types.h"
48 #include "compiler/glsl/glsl_to_nir.h"
49 #include "compiler/glsl/gl_nir.h"
50 #include "compiler/glsl/gl_nir_linker.h"
51 #include "compiler/glsl/ir.h"
52 #include "compiler/glsl/ir_optimization.h"
53 #include "compiler/glsl/string_to_uint_map.h"
56 type_size(const struct glsl_type
*type
)
58 return type
->count_attribute_slots(false);
61 /* Depending on PIPE_CAP_TGSI_TEXCOORD (st->needs_texcoord_semantic) we
62 * may need to fix up varying slots so the glsl->nir path is aligned
63 * with the anything->tgsi->nir path.
66 st_nir_fixup_varying_slots(struct st_context
*st
, struct exec_list
*var_list
)
68 if (st
->needs_texcoord_semantic
)
71 nir_foreach_variable(var
, var_list
) {
72 if (var
->data
.location
>= VARYING_SLOT_VAR0
) {
73 var
->data
.location
+= 9;
74 } else if ((var
->data
.location
>= VARYING_SLOT_TEX0
) &&
75 (var
->data
.location
<= VARYING_SLOT_TEX7
)) {
76 var
->data
.location
+= VARYING_SLOT_VAR0
- VARYING_SLOT_TEX0
;
81 /* input location assignment for VS inputs must be handled specially, so
82 * that it is aligned w/ st's vbo state.
83 * (This isn't the case with, for ex, FS inputs, which only need to agree
84 * on varying-slot w/ the VS outputs)
87 st_nir_assign_vs_in_locations(struct nir_shader
*nir
)
89 if (nir
->info
.stage
!= MESA_SHADER_VERTEX
)
92 bool removed_inputs
= false;
94 nir
->num_inputs
= util_bitcount64(nir
->info
.inputs_read
);
95 nir_foreach_variable_safe(var
, &nir
->inputs
) {
96 /* NIR already assigns dual-slot inputs to two locations so all we have
97 * to do is compact everything down.
99 if (nir
->info
.inputs_read
& BITFIELD64_BIT(var
->data
.location
)) {
100 var
->data
.driver_location
=
101 util_bitcount64(nir
->info
.inputs_read
&
102 BITFIELD64_MASK(var
->data
.location
));
104 /* Move unused input variables to the globals list (with no
105 * initialization), to avoid confusing drivers looking through the
106 * inputs array and expecting to find inputs with a driver_location
109 exec_node_remove(&var
->node
);
110 var
->data
.mode
= nir_var_shader_temp
;
111 exec_list_push_tail(&nir
->globals
, &var
->node
);
112 removed_inputs
= true;
116 /* Re-lower global vars, to deal with any dead VS inputs. */
118 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
122 st_nir_lookup_parameter_index(struct gl_program
*prog
, nir_variable
*var
)
124 struct gl_program_parameter_list
*params
= prog
->Parameters
;
126 /* Lookup the first parameter that the uniform storage that match the
129 for (unsigned i
= 0; i
< params
->NumParameters
; i
++) {
130 int index
= params
->Parameters
[i
].MainUniformStorageIndex
;
131 if (index
== var
->data
.location
)
135 /* TODO: Handle this fallback for SPIR-V. We need this for GLSL e.g. in
136 * dEQP-GLES2.functional.uniform_api.random.3
139 /* is there a better way to do this? If we have something like:
147 * Then what we get in prog->Parameters looks like:
149 * 0: Name=color.f, Type=6, DataType=1406, Size=1
150 * 1: Name=color.v, Type=6, DataType=8b52, Size=4
152 * So the name doesn't match up and _mesa_lookup_parameter_index()
153 * fails. In this case just find the first matching "color.*"..
155 * Note for arrays you could end up w/ color[n].f, for example.
157 * glsl_to_tgsi works slightly differently in this regard. It is
158 * emitting something more low level, so it just translates the
159 * params list 1:1 to CONST[] regs. Going from GLSL IR to TGSI,
160 * it just calculates the additional offset of struct field members
161 * in glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) or
162 * glsl_to_tgsi_visitor::visit(ir_dereference_array *ir). It never
163 * needs to work backwards to get base var loc from the param-list
164 * which already has them separated out.
166 if (!prog
->sh
.data
->spirv
) {
167 int namelen
= strlen(var
->name
);
168 for (unsigned i
= 0; i
< params
->NumParameters
; i
++) {
169 struct gl_program_parameter
*p
= ¶ms
->Parameters
[i
];
170 if ((strncmp(p
->Name
, var
->name
, namelen
) == 0) &&
171 ((p
->Name
[namelen
] == '.') || (p
->Name
[namelen
] == '['))) {
181 st_nir_assign_uniform_locations(struct gl_context
*ctx
,
182 struct gl_program
*prog
,
183 struct exec_list
*uniform_list
)
188 nir_foreach_variable(uniform
, uniform_list
) {
192 * UBO's have their own address spaces, so don't count them towards the
193 * number of global uniforms
195 if (uniform
->data
.mode
== nir_var_mem_ubo
|| uniform
->data
.mode
== nir_var_mem_ssbo
)
198 const struct glsl_type
*type
= glsl_without_array(uniform
->type
);
199 if (!uniform
->data
.bindless
&& (type
->is_sampler() || type
->is_image())) {
200 if (type
->is_sampler()) {
202 shaderidx
+= type_size(uniform
->type
);
205 imageidx
+= type_size(uniform
->type
);
207 } else if (uniform
->state_slots
) {
208 const gl_state_index16
*const stateTokens
= uniform
->state_slots
[0].tokens
;
209 /* This state reference has already been setup by ir_to_mesa, but we'll
210 * get the same index back here.
214 if (glsl_type_is_struct_or_ifc(type
)) {
217 comps
= glsl_get_vector_elements(type
);
220 if (ctx
->Const
.PackedDriverUniformStorage
) {
221 loc
= _mesa_add_sized_state_reference(prog
->Parameters
,
222 stateTokens
, comps
, false);
223 loc
= prog
->Parameters
->ParameterValueOffset
[loc
];
225 loc
= _mesa_add_state_reference(prog
->Parameters
, stateTokens
);
228 loc
= st_nir_lookup_parameter_index(prog
, uniform
);
230 /* We need to check that loc is not -1 here before accessing the
231 * array. It can be negative for example when we have a struct that
232 * only contains opaque types.
234 if (loc
>= 0 && ctx
->Const
.PackedDriverUniformStorage
) {
235 loc
= prog
->Parameters
->ParameterValueOffset
[loc
];
239 uniform
->data
.driver_location
= loc
;
244 st_nir_opts(nir_shader
*nir
)
251 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
253 /* Linking deals with unused inputs/outputs, but here we can remove
254 * things local to the shader in the hopes that we can cleanup other
255 * things. This pass will also remove variables with only stores, so we
256 * might be able to make progress after it.
258 NIR_PASS(progress
, nir
, nir_remove_dead_variables
,
259 (nir_variable_mode
)(nir_var_function_temp
|
260 nir_var_shader_temp
|
261 nir_var_mem_shared
));
263 NIR_PASS(progress
, nir
, nir_opt_copy_prop_vars
);
264 NIR_PASS(progress
, nir
, nir_opt_dead_write_vars
);
266 if (nir
->options
->lower_to_scalar
) {
267 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
268 NIR_PASS_V(nir
, nir_lower_phis_to_scalar
);
271 NIR_PASS_V(nir
, nir_lower_alu
);
272 NIR_PASS_V(nir
, nir_lower_pack
);
273 NIR_PASS(progress
, nir
, nir_copy_prop
);
274 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
275 NIR_PASS(progress
, nir
, nir_opt_dce
);
276 if (nir_opt_trivial_continues(nir
)) {
278 NIR_PASS(progress
, nir
, nir_copy_prop
);
279 NIR_PASS(progress
, nir
, nir_opt_dce
);
281 NIR_PASS(progress
, nir
, nir_opt_if
, false);
282 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
283 NIR_PASS(progress
, nir
, nir_opt_cse
);
284 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 8, true, true);
286 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
287 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
289 if (!nir
->info
.flrp_lowered
) {
290 unsigned lower_flrp
=
291 (nir
->options
->lower_flrp16
? 16 : 0) |
292 (nir
->options
->lower_flrp32
? 32 : 0) |
293 (nir
->options
->lower_flrp64
? 64 : 0);
296 bool lower_flrp_progress
= false;
298 NIR_PASS(lower_flrp_progress
, nir
, nir_lower_flrp
,
300 false /* always_precise */,
301 nir
->options
->lower_ffma
);
302 if (lower_flrp_progress
) {
303 NIR_PASS(progress
, nir
,
304 nir_opt_constant_folding
);
309 /* Nothing should rematerialize any flrps, so we only need to do this
312 nir
->info
.flrp_lowered
= true;
315 NIR_PASS(progress
, nir
, nir_opt_undef
);
316 NIR_PASS(progress
, nir
, nir_opt_conditional_discard
);
317 if (nir
->options
->max_unroll_iterations
) {
318 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
, (nir_variable_mode
)0);
324 shared_type_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
326 assert(glsl_type_is_vector_or_scalar(type
));
328 uint32_t comp_size
= glsl_type_is_boolean(type
)
329 ? 4 : glsl_get_bit_size(type
) / 8;
330 unsigned length
= glsl_get_vector_elements(type
);
331 *size
= comp_size
* length
,
332 *align
= comp_size
* (length
== 3 ? 4 : length
);
335 /* First third of converting glsl_to_nir.. this leaves things in a pre-
336 * nir_lower_io state, so that shader variants can more easily insert/
337 * replace variables, etc.
340 st_nir_preprocess(struct st_context
*st
, struct gl_program
*prog
,
341 struct gl_shader_program
*shader_program
,
342 gl_shader_stage stage
)
344 struct pipe_screen
*screen
= st
->pipe
->screen
;
345 const nir_shader_compiler_options
*options
=
346 st
->ctx
->Const
.ShaderCompilerOptions
[prog
->info
.stage
].NirOptions
;
348 nir_shader
*nir
= prog
->nir
;
350 /* Set the next shader stage hint for VS and TES. */
351 if (!nir
->info
.separate_shader
&&
352 (nir
->info
.stage
== MESA_SHADER_VERTEX
||
353 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
)) {
355 unsigned prev_stages
= (1 << (prog
->info
.stage
+ 1)) - 1;
356 unsigned stages_mask
=
357 ~prev_stages
& shader_program
->data
->linked_stages
;
359 nir
->info
.next_stage
= stages_mask
?
360 (gl_shader_stage
) u_bit_scan(&stages_mask
) : MESA_SHADER_FRAGMENT
;
362 nir
->info
.next_stage
= MESA_SHADER_FRAGMENT
;
365 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
366 if (!st
->ctx
->SoftFP64
&& nir
->info
.uses_64bit
&&
367 (options
->lower_doubles_options
& nir_lower_fp64_full_software
) != 0) {
368 st
->ctx
->SoftFP64
= glsl_float64_funcs_to_nir(st
->ctx
, options
);
371 /* ES has strict SSO validation rules for shader IO matching so we can't
372 * remove dead IO until the resource list has been built. Here we skip
373 * removing them until later. This will potentially make the IO lowering
374 * calls below do a little extra work but should otherwise have no impact.
376 if (!_mesa_is_gles(st
->ctx
) || !nir
->info
.separate_shader
) {
377 nir_variable_mode mask
=
378 (nir_variable_mode
) (nir_var_shader_in
| nir_var_shader_out
);
379 nir_remove_dead_variables(nir
, mask
);
382 if (options
->lower_all_io_to_temps
||
383 nir
->info
.stage
== MESA_SHADER_VERTEX
||
384 nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
385 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
386 nir_shader_get_entrypoint(nir
),
388 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
||
389 !screen
->get_param(screen
, PIPE_CAP_TGSI_CAN_READ_OUTPUTS
)) {
390 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
391 nir_shader_get_entrypoint(nir
),
395 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
396 NIR_PASS_V(nir
, nir_split_var_copies
);
397 NIR_PASS_V(nir
, nir_lower_var_copies
);
399 if (options
->lower_to_scalar
) {
400 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
403 /* before buffers and vars_to_ssa */
404 NIR_PASS_V(nir
, gl_nir_lower_bindless_images
);
406 /* TODO: Change GLSL to not lower shared memory. */
407 if (prog
->nir
->info
.stage
== MESA_SHADER_COMPUTE
&&
408 shader_program
->data
->spirv
) {
409 NIR_PASS_V(prog
->nir
, nir_lower_vars_to_explicit_types
,
410 nir_var_mem_shared
, shared_type_info
);
411 NIR_PASS_V(prog
->nir
, nir_lower_explicit_io
,
412 nir_var_mem_shared
, nir_address_format_32bit_offset
);
415 /* Do a round of constant folding to clean up address calculations */
416 NIR_PASS_V(nir
, nir_opt_constant_folding
);
419 /* Second third of converting glsl_to_nir. This creates uniforms, gathers
420 * info on varyings, etc after NIR link time opts have been applied.
423 st_glsl_to_nir_post_opts(struct st_context
*st
, struct gl_program
*prog
,
424 struct gl_shader_program
*shader_program
)
426 nir_shader
*nir
= prog
->nir
;
428 /* Make a pass over the IR to add state references for any built-in
429 * uniforms that are used. This has to be done now (during linking).
430 * Code generation doesn't happen until the first time this shader is
431 * used for rendering. Waiting until then to generate the parameters is
432 * too late. At that point, the values for the built-in uniforms won't
433 * get sent to the shader.
435 nir_foreach_variable(var
, &nir
->uniforms
) {
436 const nir_state_slot
*const slots
= var
->state_slots
;
438 const struct glsl_type
*type
= glsl_without_array(var
->type
);
439 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
441 if (glsl_type_is_struct_or_ifc(type
)) {
442 /* Builtin struct require specical handling for now we just
443 * make all members vec4. See st_nir_lower_builtin.
447 comps
= glsl_get_vector_elements(type
);
450 if (st
->ctx
->Const
.PackedDriverUniformStorage
) {
451 _mesa_add_sized_state_reference(prog
->Parameters
,
455 _mesa_add_state_reference(prog
->Parameters
,
462 /* Avoid reallocation of the program parameter list, because the uniform
463 * storage is only associated with the original parameter list.
464 * This should be enough for Bitmap and DrawPixels constants.
466 _mesa_reserve_parameter_storage(prog
->Parameters
, 8);
468 /* This has to be done last. Any operation the can cause
469 * prog->ParameterValues to get reallocated (e.g., anything that adds a
470 * program constant) has to happen before creating this linkage.
472 _mesa_associate_uniform_storage(st
->ctx
, shader_program
, prog
);
474 st_set_prog_affected_state_flags(prog
);
476 /* None of the builtins being lowered here can be produced by SPIR-V. See
477 * _mesa_builtin_uniform_desc.
479 if (!shader_program
->data
->spirv
)
480 NIR_PASS_V(nir
, st_nir_lower_builtin
);
482 NIR_PASS_V(nir
, gl_nir_lower_atomics
, shader_program
, true);
483 NIR_PASS_V(nir
, nir_opt_intrinsics
);
485 /* Lower 64-bit ops. */
486 if (nir
->options
->lower_int64_options
||
487 nir
->options
->lower_doubles_options
) {
488 bool lowered_64bit_ops
= false;
489 if (nir
->options
->lower_doubles_options
) {
490 NIR_PASS(lowered_64bit_ops
, nir
, nir_lower_doubles
,
491 st
->ctx
->SoftFP64
, nir
->options
->lower_doubles_options
);
493 if (nir
->options
->lower_int64_options
) {
494 NIR_PASS(lowered_64bit_ops
, nir
, nir_lower_int64
,
495 nir
->options
->lower_int64_options
);
498 if (lowered_64bit_ops
)
502 nir_variable_mode mask
= (nir_variable_mode
)
503 (nir_var_shader_in
| nir_var_shader_out
| nir_var_function_temp
);
504 nir_remove_dead_variables(nir
, mask
);
506 if (!st
->has_hw_atomics
)
507 NIR_PASS_V(nir
, nir_lower_atomics_to_ssbo
);
509 st_finalize_nir_before_variants(nir
);
511 if (st
->allow_st_finalize_nir_twice
)
512 st_finalize_nir(st
, prog
, shader_program
, nir
, true);
514 if (st
->ctx
->_Shader
->Flags
& GLSL_DUMP
) {
516 _mesa_log("NIR IR for linked %s program %d:\n",
517 _mesa_shader_stage_to_string(prog
->info
.stage
),
518 shader_program
->Name
);
519 nir_print_shader(nir
, _mesa_get_log_file());
525 st_nir_vectorize_io(nir_shader
*producer
, nir_shader
*consumer
)
527 NIR_PASS_V(producer
, nir_lower_io_to_vector
, nir_var_shader_out
);
528 NIR_PASS_V(producer
, nir_opt_combine_stores
, nir_var_shader_out
);
529 NIR_PASS_V(consumer
, nir_lower_io_to_vector
, nir_var_shader_in
);
531 if ((producer
)->info
.stage
!= MESA_SHADER_TESS_CTRL
) {
532 /* Calling lower_io_to_vector creates output variable writes with
533 * write-masks. We only support these for TCS outputs, so for other
534 * stages, we need to call nir_lower_io_to_temporaries to get rid of
535 * them. This, in turn, creates temporary variables and extra
536 * copy_deref intrinsics that we need to clean up.
538 NIR_PASS_V(producer
, nir_lower_io_to_temporaries
,
539 nir_shader_get_entrypoint(producer
), true, false);
540 NIR_PASS_V(producer
, nir_lower_global_vars_to_local
);
541 NIR_PASS_V(producer
, nir_split_var_copies
);
542 NIR_PASS_V(producer
, nir_lower_var_copies
);
547 st_nir_link_shaders(nir_shader
*producer
, nir_shader
*consumer
)
549 if (producer
->options
->lower_to_scalar
) {
550 NIR_PASS_V(producer
, nir_lower_io_to_scalar_early
, nir_var_shader_out
);
551 NIR_PASS_V(consumer
, nir_lower_io_to_scalar_early
, nir_var_shader_in
);
554 nir_lower_io_arrays_to_elements(producer
, consumer
);
556 st_nir_opts(producer
);
557 st_nir_opts(consumer
);
559 if (nir_link_opt_varyings(producer
, consumer
))
560 st_nir_opts(consumer
);
562 NIR_PASS_V(producer
, nir_remove_dead_variables
, nir_var_shader_out
);
563 NIR_PASS_V(consumer
, nir_remove_dead_variables
, nir_var_shader_in
);
565 if (nir_remove_unused_varyings(producer
, consumer
)) {
566 NIR_PASS_V(producer
, nir_lower_global_vars_to_local
);
567 NIR_PASS_V(consumer
, nir_lower_global_vars_to_local
);
569 st_nir_opts(producer
);
570 st_nir_opts(consumer
);
572 /* Optimizations can cause varyings to become unused.
573 * nir_compact_varyings() depends on all dead varyings being removed so
574 * we need to call nir_remove_dead_variables() again here.
576 NIR_PASS_V(producer
, nir_remove_dead_variables
, nir_var_shader_out
);
577 NIR_PASS_V(consumer
, nir_remove_dead_variables
, nir_var_shader_in
);
582 st_lower_patch_vertices_in(struct gl_shader_program
*shader_prog
)
584 struct gl_linked_shader
*linked_tcs
=
585 shader_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
];
586 struct gl_linked_shader
*linked_tes
=
587 shader_prog
->_LinkedShaders
[MESA_SHADER_TESS_EVAL
];
589 /* If we have a TCS and TES linked together, lower TES patch vertices. */
590 if (linked_tcs
&& linked_tes
) {
591 nir_shader
*tcs_nir
= linked_tcs
->Program
->nir
;
592 nir_shader
*tes_nir
= linked_tes
->Program
->nir
;
594 /* The TES input vertex count is the TCS output vertex count,
595 * lower TES gl_PatchVerticesIn to a constant.
597 uint32_t tes_patch_verts
= tcs_nir
->info
.tess
.tcs_vertices_out
;
598 NIR_PASS_V(tes_nir
, nir_lower_patch_vertices
, tes_patch_verts
, NULL
);
605 st_nir_lower_wpos_ytransform(struct nir_shader
*nir
,
606 struct gl_program
*prog
,
607 struct pipe_screen
*pscreen
)
609 if (nir
->info
.stage
!= MESA_SHADER_FRAGMENT
)
612 static const gl_state_index16 wposTransformState
[STATE_LENGTH
] = {
613 STATE_INTERNAL
, STATE_FB_WPOS_Y_TRANSFORM
615 nir_lower_wpos_ytransform_options wpos_options
= { { 0 } };
617 memcpy(wpos_options
.state_tokens
, wposTransformState
,
618 sizeof(wpos_options
.state_tokens
));
619 wpos_options
.fs_coord_origin_upper_left
=
620 pscreen
->get_param(pscreen
,
621 PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT
);
622 wpos_options
.fs_coord_origin_lower_left
=
623 pscreen
->get_param(pscreen
,
624 PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT
);
625 wpos_options
.fs_coord_pixel_center_integer
=
626 pscreen
->get_param(pscreen
,
627 PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER
);
628 wpos_options
.fs_coord_pixel_center_half_integer
=
629 pscreen
->get_param(pscreen
,
630 PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER
);
632 if (nir_lower_wpos_ytransform(nir
, &wpos_options
)) {
633 nir_validate_shader(nir
, "after nir_lower_wpos_ytransform");
634 _mesa_add_state_reference(prog
->Parameters
, wposTransformState
);
639 st_link_nir(struct gl_context
*ctx
,
640 struct gl_shader_program
*shader_program
)
642 struct st_context
*st
= st_context(ctx
);
643 struct gl_linked_shader
*linked_shader
[MESA_SHADER_STAGES
];
644 unsigned num_shaders
= 0;
646 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
647 if (shader_program
->_LinkedShaders
[i
])
648 linked_shader
[num_shaders
++] = shader_program
->_LinkedShaders
[i
];
651 for (unsigned i
= 0; i
< num_shaders
; i
++) {
652 struct gl_linked_shader
*shader
= linked_shader
[i
];
653 const nir_shader_compiler_options
*options
=
654 st
->ctx
->Const
.ShaderCompilerOptions
[shader
->Stage
].NirOptions
;
655 struct gl_program
*prog
= shader
->Program
;
656 struct st_program
*stp
= (struct st_program
*)prog
;
658 _mesa_copy_linked_program_data(shader_program
, shader
);
661 stp
->shader_program
= shader_program
;
662 stp
->state
.type
= PIPE_SHADER_IR_NIR
;
664 if (shader_program
->data
->spirv
) {
665 prog
->Parameters
= _mesa_new_parameter_list();
666 /* Parameters will be filled during NIR linking. */
668 prog
->nir
= _mesa_spirv_to_nir(ctx
, shader_program
, shader
->Stage
, options
);
670 validate_ir_tree(shader
->ir
);
672 prog
->Parameters
= _mesa_new_parameter_list();
673 _mesa_generate_parameters_list_for_uniforms(ctx
, shader_program
, shader
,
676 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
678 _mesa_log("GLSL IR for linked %s program %d:\n",
679 _mesa_shader_stage_to_string(shader
->Stage
),
680 shader_program
->Name
);
681 _mesa_print_ir(_mesa_get_log_file(), shader
->ir
, NULL
);
685 prog
->ExternalSamplersUsed
= gl_external_samplers(prog
);
686 _mesa_update_shader_textures_used(shader_program
, prog
);
688 prog
->nir
= glsl_to_nir(st
->ctx
, shader_program
, shader
->Stage
, options
);
689 st_nir_preprocess(st
, prog
, shader_program
, shader
->Stage
);
692 if (options
->lower_to_scalar
) {
693 NIR_PASS_V(shader
->Program
->nir
, nir_lower_load_const_to_scalar
);
697 st_lower_patch_vertices_in(shader_program
);
699 /* For SPIR-V, we have to perform the NIR linking before applying
702 if (shader_program
->data
->spirv
) {
703 static const gl_nir_linker_options opts
= {
704 true /*fill_parameters */
706 if (!gl_nir_link_spirv(ctx
, shader_program
, &opts
))
709 nir_build_program_resource_list(ctx
, shader_program
, true);
711 for (unsigned i
= 0; i
< num_shaders
; i
++) {
712 struct gl_linked_shader
*shader
= linked_shader
[i
];
713 struct gl_program
*prog
= shader
->Program
;
715 prog
->ExternalSamplersUsed
= gl_external_samplers(prog
);
716 _mesa_update_shader_textures_used(shader_program
, prog
);
717 st_nir_preprocess(st
, prog
, shader_program
, shader
->Stage
);
721 /* Linking the stages in the opposite order (from fragment to vertex)
722 * ensures that inter-shader outputs written to in an earlier stage
723 * are eliminated if they are (transitively) not used in a later
726 for (int i
= num_shaders
- 2; i
>= 0; i
--) {
727 st_nir_link_shaders(linked_shader
[i
]->Program
->nir
,
728 linked_shader
[i
+ 1]->Program
->nir
);
730 /* Linking shaders also optimizes them. Separate shaders, compute shaders
731 * and shaders with a fixed-func VS or FS that don't need linking are
734 if (num_shaders
== 1)
735 st_nir_opts(linked_shader
[0]->Program
->nir
);
737 if (!shader_program
->data
->spirv
) {
738 if (!gl_nir_link_glsl(ctx
, shader_program
))
741 nir_build_program_resource_list(ctx
, shader_program
, false);
744 for (unsigned i
= 0; i
< num_shaders
; i
++) {
745 struct gl_linked_shader
*shader
= linked_shader
[i
];
746 nir_shader
*nir
= shader
->Program
->nir
;
748 /* This needs to run after the initial pass of nir_lower_vars_to_ssa, so
749 * that the buffer indices are constants in nir where they where
750 * constants in GLSL. */
751 NIR_PASS_V(nir
, gl_nir_lower_buffers
, shader_program
);
753 /* Remap the locations to slots so those requiring two slots will occupy
754 * two locations. For instance, if we have in the IR code a dvec3 attr0 in
755 * location 0 and vec4 attr1 in location 1, in NIR attr0 will use
756 * locations/slots 0 and 1, and attr1 will use location/slot 2
758 if (nir
->info
.stage
== MESA_SHADER_VERTEX
&& !shader_program
->data
->spirv
)
759 nir_remap_dual_slot_attributes(nir
, &shader
->Program
->DualSlotInputs
);
761 NIR_PASS_V(nir
, st_nir_lower_wpos_ytransform
, shader
->Program
,
764 NIR_PASS_V(nir
, nir_lower_system_values
);
765 NIR_PASS_V(nir
, nir_lower_clip_cull_distance_arrays
);
767 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
768 shader
->Program
->info
= nir
->info
;
769 if (shader
->Stage
== MESA_SHADER_VERTEX
) {
770 /* NIR expands dual-slot inputs out to two locations. We need to
771 * compact things back down GL-style single-slot inputs to avoid
772 * confusing the state tracker.
774 shader
->Program
->info
.inputs_read
=
775 nir_get_single_slot_attribs_mask(nir
->info
.inputs_read
,
776 shader
->Program
->DualSlotInputs
);
780 struct gl_program
*prev_shader
= linked_shader
[i
- 1]->Program
;
782 /* We can't use nir_compact_varyings with transform feedback, since
783 * the pipe_stream_output->output_register field is based on the
784 * pre-compacted driver_locations.
786 if (!(prev_shader
->sh
.LinkedTransformFeedback
&&
787 prev_shader
->sh
.LinkedTransformFeedback
->NumVarying
> 0))
788 nir_compact_varyings(prev_shader
->nir
,
789 nir
, ctx
->API
!= API_OPENGL_COMPAT
);
791 if (ctx
->Const
.ShaderCompilerOptions
[shader
->Stage
].NirOptions
->vectorize_io
)
792 st_nir_vectorize_io(prev_shader
->nir
, nir
);
796 for (unsigned i
= 0; i
< num_shaders
; i
++) {
797 struct gl_linked_shader
*shader
= linked_shader
[i
];
798 struct gl_program
*prog
= shader
->Program
;
799 struct st_program
*stp
= st_program(prog
);
800 st_glsl_to_nir_post_opts(st
, prog
, shader_program
);
802 /* Initialize st_vertex_program members. */
803 if (shader
->Stage
== MESA_SHADER_VERTEX
)
804 st_prepare_vertex_program(stp
);
806 /* Get pipe_stream_output_info. */
807 if (shader
->Stage
== MESA_SHADER_VERTEX
||
808 shader
->Stage
== MESA_SHADER_TESS_EVAL
||
809 shader
->Stage
== MESA_SHADER_GEOMETRY
)
810 st_translate_stream_output_info(prog
);
812 st_store_ir_in_disk_cache(st
, prog
, true);
814 st_release_variants(st
, stp
);
815 st_finalize_program(st
, prog
);
817 /* The GLSL IR won't be needed anymore. */
818 ralloc_free(shader
->ir
);
822 struct shader_info
*prev_info
= NULL
;
824 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
825 struct gl_linked_shader
*shader
= shader_program
->_LinkedShaders
[i
];
829 struct shader_info
*info
= &shader
->Program
->nir
->info
;
832 ctx
->Const
.ShaderCompilerOptions
[i
].NirOptions
->unify_interfaces
) {
833 prev_info
->outputs_written
|= info
->inputs_read
&
834 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
835 info
->inputs_read
|= prev_info
->outputs_written
&
836 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
838 prev_info
->patch_outputs_written
|= info
->patch_inputs_read
;
839 info
->patch_inputs_read
|= prev_info
->patch_outputs_written
;
848 st_nir_assign_varying_locations(struct st_context
*st
, nir_shader
*nir
)
850 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
851 nir_assign_io_var_locations(&nir
->outputs
,
854 st_nir_fixup_varying_slots(st
, &nir
->outputs
);
855 } else if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
856 nir
->info
.stage
== MESA_SHADER_TESS_CTRL
||
857 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
858 nir_assign_io_var_locations(&nir
->inputs
,
861 st_nir_fixup_varying_slots(st
, &nir
->inputs
);
863 nir_assign_io_var_locations(&nir
->outputs
,
866 st_nir_fixup_varying_slots(st
, &nir
->outputs
);
867 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
868 nir_assign_io_var_locations(&nir
->inputs
,
871 st_nir_fixup_varying_slots(st
, &nir
->inputs
);
872 nir_assign_io_var_locations(&nir
->outputs
,
875 } else if (nir
->info
.stage
== MESA_SHADER_COMPUTE
) {
878 unreachable("invalid shader type");
883 st_nir_lower_samplers(struct pipe_screen
*screen
, nir_shader
*nir
,
884 struct gl_shader_program
*shader_program
,
885 struct gl_program
*prog
)
887 if (screen
->get_param(screen
, PIPE_CAP_NIR_SAMPLERS_AS_DEREF
))
888 NIR_PASS_V(nir
, gl_nir_lower_samplers_as_deref
, shader_program
);
890 NIR_PASS_V(nir
, gl_nir_lower_samplers
, shader_program
);
893 prog
->info
.textures_used
= nir
->info
.textures_used
;
894 prog
->info
.textures_used_by_txf
= nir
->info
.textures_used_by_txf
;
899 st_packed_uniforms_type_size(const struct glsl_type
*type
, bool bindless
)
901 return glsl_count_dword_slots(type
, bindless
);
905 st_unpacked_uniforms_type_size(const struct glsl_type
*type
, bool bindless
)
907 return glsl_count_vec4_slots(type
, false, bindless
);
911 st_nir_lower_uniforms(struct st_context
*st
, nir_shader
*nir
)
913 if (st
->ctx
->Const
.PackedDriverUniformStorage
) {
914 NIR_PASS_V(nir
, nir_lower_io
, nir_var_uniform
,
915 st_packed_uniforms_type_size
,
916 (nir_lower_io_options
)0);
917 NIR_PASS_V(nir
, nir_lower_uniforms_to_ubo
, 4);
919 NIR_PASS_V(nir
, nir_lower_io
, nir_var_uniform
,
920 st_unpacked_uniforms_type_size
,
921 (nir_lower_io_options
)0);
925 /* Last third of preparing nir from glsl, which happens after shader
929 st_finalize_nir(struct st_context
*st
, struct gl_program
*prog
,
930 struct gl_shader_program
*shader_program
,
931 nir_shader
*nir
, bool finalize_by_driver
)
933 struct pipe_screen
*screen
= st
->pipe
->screen
;
935 NIR_PASS_V(nir
, nir_split_var_copies
);
936 NIR_PASS_V(nir
, nir_lower_var_copies
);
938 st_nir_assign_varying_locations(st
, nir
);
939 st_nir_assign_uniform_locations(st
->ctx
, prog
,
942 /* Set num_uniforms in number of attribute slots (vec4s) */
943 nir
->num_uniforms
= DIV_ROUND_UP(prog
->Parameters
->NumParameterValues
, 4);
945 st_nir_lower_uniforms(st
, nir
);
946 st_nir_lower_samplers(screen
, nir
, shader_program
, prog
);
948 if (finalize_by_driver
&& screen
->finalize_nir
)
949 screen
->finalize_nir(screen
, nir
, false);