2 * Copyright 2017 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "ac_nir_to_llvm.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "compiler/nir/nir_deref.h"
29 #include "compiler/nir_types.h"
31 #include "si_shader_internal.h"
32 #include "tgsi/tgsi_from_mesa.h"
34 static const nir_deref_instr
*tex_get_texture_deref(nir_tex_instr
*instr
)
36 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
37 switch (instr
->src
[i
].src_type
) {
38 case nir_tex_src_texture_deref
:
39 return nir_src_as_deref(instr
->src
[i
].src
);
48 static void scan_io_usage(struct si_shader_info
*info
, nir_intrinsic_instr
*intr
,
51 unsigned interp
= INTERP_MODE_FLAT
; /* load_input uses flat shading */
53 if (intr
->intrinsic
== nir_intrinsic_load_interpolated_input
) {
54 nir_intrinsic_instr
*baryc
= nir_instr_as_intrinsic(intr
->src
[0].ssa
->parent_instr
);
57 if (nir_intrinsic_infos
[baryc
->intrinsic
].index_map
[NIR_INTRINSIC_INTERP_MODE
] > 0)
58 interp
= nir_intrinsic_interp_mode(baryc
);
60 unreachable("unknown barycentric intrinsic");
62 unreachable("unknown barycentric expression");
66 unsigned mask
, bit_size
;
67 bool dual_slot
, is_output_load
;
69 if (nir_intrinsic_infos
[intr
->intrinsic
].index_map
[NIR_INTRINSIC_WRMASK
] > 0) {
70 mask
= nir_intrinsic_write_mask(intr
); /* store */
71 bit_size
= nir_src_bit_size(intr
->src
[0]);
72 dual_slot
= bit_size
== 64 && nir_src_num_components(intr
->src
[0]) >= 3;
73 is_output_load
= false;
75 mask
= nir_ssa_def_components_read(&intr
->dest
.ssa
); /* load */
76 bit_size
= intr
->dest
.ssa
.bit_size
;
77 dual_slot
= bit_size
== 64 && intr
->dest
.ssa
.num_components
>= 3;
78 is_output_load
= !is_input
;
81 /* Convert the 64-bit component mask to a 32-bit component mask. */
83 unsigned new_mask
= 0;
84 for (unsigned i
= 0; i
< 4; i
++) {
86 new_mask
|= 0x3 << (2 * i
);
91 /* Convert the 16-bit component mask to a 32-bit component mask. */
93 unsigned new_mask
= 0;
94 for (unsigned i
= 0; i
< 4; i
++) {
96 new_mask
|= 0x1 << (i
/ 2);
101 mask
<<= nir_intrinsic_component(intr
);
103 nir_src offset
= *nir_get_io_offset_src(intr
);
104 bool indirect
= !nir_src_is_const(offset
);
106 assert(nir_src_as_uint(offset
) == 0);
108 unsigned semantic
= 0;
109 /* VS doesn't have semantics. */
110 if (info
->stage
!= MESA_SHADER_VERTEX
|| !is_input
)
111 semantic
= nir_intrinsic_io_semantics(intr
).location
;
113 if (info
->stage
== MESA_SHADER_FRAGMENT
&& !is_input
) {
114 /* Never use FRAG_RESULT_COLOR directly. */
115 if (semantic
== FRAG_RESULT_COLOR
) {
116 semantic
= FRAG_RESULT_DATA0
;
117 info
->color0_writes_all_cbufs
= true;
119 semantic
+= nir_intrinsic_io_semantics(intr
).dual_source_blend_index
;
122 unsigned driver_location
= nir_intrinsic_base(intr
);
123 unsigned num_slots
= indirect
? nir_intrinsic_io_semantics(intr
).num_slots
: (1 + dual_slot
);
126 assert(driver_location
+ num_slots
<= ARRAY_SIZE(info
->input_usage_mask
));
128 for (unsigned i
= 0; i
< num_slots
; i
++) {
129 unsigned loc
= driver_location
+ i
;
130 unsigned slot_mask
= (dual_slot
&& i
% 2 ? mask
>> 4 : mask
) & 0xf;
132 info
->input_semantic
[loc
] = semantic
+ i
;
133 info
->input_interpolate
[loc
] = interp
;
136 info
->input_usage_mask
[loc
] |= slot_mask
;
137 info
->num_inputs
= MAX2(info
->num_inputs
, loc
+ 1);
139 if (semantic
== VARYING_SLOT_PRIMITIVE_ID
)
140 info
->uses_primid
= true;
145 assert(driver_location
+ num_slots
<= ARRAY_SIZE(info
->output_usagemask
));
146 assert(semantic
+ num_slots
< ARRAY_SIZE(info
->output_semantic_to_slot
));
148 for (unsigned i
= 0; i
< num_slots
; i
++) {
149 unsigned loc
= driver_location
+ i
;
150 unsigned slot_mask
= (dual_slot
&& i
% 2 ? mask
>> 4 : mask
) & 0xf;
152 info
->output_semantic
[loc
] = semantic
+ i
;
153 info
->output_semantic_to_slot
[semantic
+ i
] = loc
;
155 if (is_output_load
) {
156 /* Output loads have only a few things that we need to track. */
157 info
->output_readmask
[loc
] |= slot_mask
;
159 if (info
->stage
== MESA_SHADER_FRAGMENT
&&
160 nir_intrinsic_io_semantics(intr
).fb_fetch_output
)
161 info
->uses_fbfetch
= true;
162 } else if (slot_mask
) {
164 if (info
->stage
== MESA_SHADER_GEOMETRY
) {
165 unsigned gs_streams
= (uint32_t)nir_intrinsic_io_semantics(intr
).gs_streams
<<
166 (nir_intrinsic_component(intr
) * 2);
167 unsigned new_mask
= slot_mask
& ~info
->output_usagemask
[loc
];
169 for (unsigned i
= 0; i
< 4; i
++) {
170 unsigned stream
= (gs_streams
>> (i
* 2)) & 0x3;
172 if (new_mask
& (1 << i
)) {
173 info
->output_streams
[loc
] |= stream
<< (i
* 2);
174 info
->num_stream_output_components
[stream
]++;
179 info
->output_usagemask
[loc
] |= slot_mask
;
180 info
->num_outputs
= MAX2(info
->num_outputs
, loc
+ 1);
182 if (info
->stage
== MESA_SHADER_FRAGMENT
) {
184 case FRAG_RESULT_DEPTH
:
185 info
->writes_z
= true;
187 case FRAG_RESULT_STENCIL
:
188 info
->writes_stencil
= true;
190 case FRAG_RESULT_SAMPLE_MASK
:
191 info
->writes_samplemask
= true;
194 if (semantic
>= FRAG_RESULT_DATA0
&& semantic
<= FRAG_RESULT_DATA7
) {
195 unsigned index
= semantic
- FRAG_RESULT_DATA0
;
196 info
->colors_written
|= 1 << (index
+ i
);
202 case VARYING_SLOT_PRIMITIVE_ID
:
203 info
->writes_primid
= true;
205 case VARYING_SLOT_VIEWPORT
:
206 info
->writes_viewport_index
= true;
208 case VARYING_SLOT_LAYER
:
209 info
->writes_layer
= true;
211 case VARYING_SLOT_PSIZ
:
212 info
->writes_psize
= true;
214 case VARYING_SLOT_CLIP_VERTEX
:
215 info
->writes_clipvertex
= true;
217 case VARYING_SLOT_EDGE
:
218 info
->writes_edgeflag
= true;
220 case VARYING_SLOT_POS
:
221 info
->writes_position
= true;
230 static void scan_instruction(const struct nir_shader
*nir
, struct si_shader_info
*info
,
233 if (instr
->type
== nir_instr_type_alu
) {
234 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
239 case nir_op_fddx_fine
:
240 case nir_op_fddy_fine
:
241 case nir_op_fddx_coarse
:
242 case nir_op_fddy_coarse
:
243 info
->uses_derivatives
= true;
248 } else if (instr
->type
== nir_instr_type_tex
) {
249 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
250 const nir_deref_instr
*deref
= tex_get_texture_deref(tex
);
251 nir_variable
*var
= deref
? nir_deref_instr_get_variable(deref
) : NULL
;
254 if (deref
->mode
!= nir_var_uniform
|| var
->data
.bindless
)
255 info
->uses_bindless_samplers
= true;
262 info
->uses_derivatives
= true;
267 } else if (instr
->type
== nir_instr_type_intrinsic
) {
268 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
270 switch (intr
->intrinsic
) {
271 case nir_intrinsic_load_front_face
:
272 info
->uses_frontface
= 1;
274 case nir_intrinsic_load_instance_id
:
275 info
->uses_instanceid
= 1;
277 case nir_intrinsic_load_invocation_id
:
278 info
->uses_invocationid
= true;
280 case nir_intrinsic_load_num_work_groups
:
281 info
->uses_grid_size
= true;
283 case nir_intrinsic_load_local_invocation_index
:
284 case nir_intrinsic_load_subgroup_id
:
285 case nir_intrinsic_load_num_subgroups
:
286 info
->uses_subgroup_info
= true;
288 case nir_intrinsic_load_local_group_size
:
289 /* The block size is translated to IMM with a fixed block size. */
290 if (info
->base
.cs
.local_size
[0] == 0)
291 info
->uses_block_size
= true;
293 case nir_intrinsic_load_local_invocation_id
:
294 case nir_intrinsic_load_work_group_id
: {
295 unsigned mask
= nir_ssa_def_components_read(&intr
->dest
.ssa
);
297 unsigned i
= u_bit_scan(&mask
);
299 if (intr
->intrinsic
== nir_intrinsic_load_work_group_id
)
300 info
->uses_block_id
[i
] = true;
302 info
->uses_thread_id
[i
] = true;
306 case nir_intrinsic_load_draw_id
:
307 info
->uses_drawid
= 1;
309 case nir_intrinsic_load_primitive_id
:
310 info
->uses_primid
= 1;
312 case nir_intrinsic_load_sample_mask_in
:
313 info
->reads_samplemask
= true;
315 case nir_intrinsic_load_tess_level_inner
:
316 case nir_intrinsic_load_tess_level_outer
:
317 info
->reads_tess_factors
= true;
319 case nir_intrinsic_bindless_image_load
:
320 case nir_intrinsic_bindless_image_size
:
321 case nir_intrinsic_bindless_image_samples
:
322 info
->uses_bindless_images
= true;
324 case nir_intrinsic_bindless_image_store
:
325 info
->uses_bindless_images
= true;
326 info
->num_memory_stores
++;
328 case nir_intrinsic_image_deref_store
:
329 info
->num_memory_stores
++;
331 case nir_intrinsic_bindless_image_atomic_add
:
332 case nir_intrinsic_bindless_image_atomic_imin
:
333 case nir_intrinsic_bindless_image_atomic_umin
:
334 case nir_intrinsic_bindless_image_atomic_imax
:
335 case nir_intrinsic_bindless_image_atomic_umax
:
336 case nir_intrinsic_bindless_image_atomic_and
:
337 case nir_intrinsic_bindless_image_atomic_or
:
338 case nir_intrinsic_bindless_image_atomic_xor
:
339 case nir_intrinsic_bindless_image_atomic_exchange
:
340 case nir_intrinsic_bindless_image_atomic_comp_swap
:
341 info
->uses_bindless_images
= true;
342 info
->num_memory_stores
++;
344 case nir_intrinsic_image_deref_atomic_add
:
345 case nir_intrinsic_image_deref_atomic_imin
:
346 case nir_intrinsic_image_deref_atomic_umin
:
347 case nir_intrinsic_image_deref_atomic_imax
:
348 case nir_intrinsic_image_deref_atomic_umax
:
349 case nir_intrinsic_image_deref_atomic_and
:
350 case nir_intrinsic_image_deref_atomic_or
:
351 case nir_intrinsic_image_deref_atomic_xor
:
352 case nir_intrinsic_image_deref_atomic_exchange
:
353 case nir_intrinsic_image_deref_atomic_comp_swap
:
354 case nir_intrinsic_image_deref_atomic_inc_wrap
:
355 case nir_intrinsic_image_deref_atomic_dec_wrap
:
356 info
->num_memory_stores
++;
358 case nir_intrinsic_store_ssbo
:
359 case nir_intrinsic_ssbo_atomic_add
:
360 case nir_intrinsic_ssbo_atomic_imin
:
361 case nir_intrinsic_ssbo_atomic_umin
:
362 case nir_intrinsic_ssbo_atomic_imax
:
363 case nir_intrinsic_ssbo_atomic_umax
:
364 case nir_intrinsic_ssbo_atomic_and
:
365 case nir_intrinsic_ssbo_atomic_or
:
366 case nir_intrinsic_ssbo_atomic_xor
:
367 case nir_intrinsic_ssbo_atomic_exchange
:
368 case nir_intrinsic_ssbo_atomic_comp_swap
:
369 info
->num_memory_stores
++;
371 case nir_intrinsic_load_color0
:
372 case nir_intrinsic_load_color1
: {
373 unsigned index
= intr
->intrinsic
== nir_intrinsic_load_color1
;
374 uint8_t mask
= nir_ssa_def_components_read(&intr
->dest
.ssa
);
375 info
->colors_read
|= mask
<< (index
* 4);
378 case nir_intrinsic_load_barycentric_pixel
:
379 case nir_intrinsic_load_barycentric_centroid
:
380 case nir_intrinsic_load_barycentric_sample
:
381 case nir_intrinsic_load_barycentric_at_offset
: /* uses center */
382 case nir_intrinsic_load_barycentric_at_sample
: { /* uses center */
383 unsigned mode
= nir_intrinsic_interp_mode(intr
);
385 if (mode
== INTERP_MODE_FLAT
)
388 if (mode
== INTERP_MODE_NOPERSPECTIVE
) {
389 if (intr
->intrinsic
== nir_intrinsic_load_barycentric_sample
)
390 info
->uses_linear_sample
= true;
391 else if (intr
->intrinsic
== nir_intrinsic_load_barycentric_centroid
)
392 info
->uses_linear_centroid
= true;
394 info
->uses_linear_center
= true;
396 if (intr
->intrinsic
== nir_intrinsic_load_barycentric_sample
)
397 info
->uses_persp_sample
= true;
398 else if (intr
->intrinsic
== nir_intrinsic_load_barycentric_centroid
)
399 info
->uses_persp_centroid
= true;
401 info
->uses_persp_center
= true;
403 if (intr
->intrinsic
== nir_intrinsic_load_barycentric_at_sample
)
404 info
->uses_interp_at_sample
= true;
407 case nir_intrinsic_load_input
:
408 case nir_intrinsic_load_per_vertex_input
:
409 case nir_intrinsic_load_input_vertex
:
410 case nir_intrinsic_load_interpolated_input
:
411 scan_io_usage(info
, intr
, true);
413 case nir_intrinsic_load_output
:
414 case nir_intrinsic_load_per_vertex_output
:
415 case nir_intrinsic_store_output
:
416 case nir_intrinsic_store_per_vertex_output
:
417 scan_io_usage(info
, intr
, false);
419 case nir_intrinsic_load_deref
:
420 case nir_intrinsic_store_deref
:
421 case nir_intrinsic_interp_deref_at_centroid
:
422 case nir_intrinsic_interp_deref_at_sample
:
423 case nir_intrinsic_interp_deref_at_offset
:
424 unreachable("these opcodes should have been lowered");
432 void si_nir_scan_shader(const struct nir_shader
*nir
, struct si_shader_info
*info
)
436 info
->base
= nir
->info
;
437 info
->stage
= nir
->info
.stage
;
439 if (nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
440 if (info
->base
.tess
.primitive_mode
== GL_ISOLINES
)
441 info
->base
.tess
.primitive_mode
= GL_LINES
;
444 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
445 /* post_depth_coverage implies early_fragment_tests */
446 info
->base
.fs
.early_fragment_tests
|= info
->base
.fs
.post_depth_coverage
;
448 info
->color_interpolate
[0] = nir
->info
.fs
.color0_interp
;
449 info
->color_interpolate
[1] = nir
->info
.fs
.color1_interp
;
450 for (unsigned i
= 0; i
< 2; i
++) {
451 if (info
->color_interpolate
[i
] == INTERP_MODE_NONE
)
452 info
->color_interpolate
[i
] = INTERP_MODE_COLOR
;
455 info
->color_interpolate_loc
[0] = nir
->info
.fs
.color0_sample
? TGSI_INTERPOLATE_LOC_SAMPLE
:
456 nir
->info
.fs
.color0_centroid
? TGSI_INTERPOLATE_LOC_CENTROID
:
457 TGSI_INTERPOLATE_LOC_CENTER
;
458 info
->color_interpolate_loc
[1] = nir
->info
.fs
.color1_sample
? TGSI_INTERPOLATE_LOC_SAMPLE
:
459 nir
->info
.fs
.color1_centroid
? TGSI_INTERPOLATE_LOC_CENTROID
:
460 TGSI_INTERPOLATE_LOC_CENTER
;
463 info
->constbuf0_num_slots
= nir
->num_uniforms
;
465 if (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
466 info
->tessfactors_are_def_in_all_invocs
= ac_are_tessfactors_def_in_all_invocs(nir
);
469 memset(info
->output_semantic_to_slot
, -1, sizeof(info
->output_semantic_to_slot
));
471 func
= (struct nir_function
*)exec_list_get_head_const(&nir
->functions
);
472 nir_foreach_block (block
, func
->impl
) {
473 nir_foreach_instr (instr
, block
)
474 scan_instruction(nir
, info
, instr
);
477 /* Add color inputs to the list of inputs. */
478 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
479 for (unsigned i
= 0; i
< 2; i
++) {
480 if ((info
->colors_read
>> (i
* 4)) & 0xf) {
481 info
->input_semantic
[info
->num_inputs
] = VARYING_SLOT_COL0
+ i
;
482 info
->input_interpolate
[info
->num_inputs
] = info
->color_interpolate
[i
];
483 info
->input_usage_mask
[info
->num_inputs
] = info
->colors_read
>> (i
* 4);
489 /* Trim output read masks based on write masks. */
490 for (unsigned i
= 0; i
< info
->num_outputs
; i
++)
491 info
->output_readmask
[i
] &= info
->output_usagemask
[i
];
494 static void si_nir_opts(struct nir_shader
*nir
, bool first
)
498 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
499 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
500 NIR_PASS_V(nir
, nir_lower_phis_to_scalar
);
504 bool lower_alu_to_scalar
= false;
505 bool lower_phis_to_scalar
= false;
508 bool opt_find_array_copies
= false;
510 NIR_PASS(progress
, nir
, nir_split_array_vars
, nir_var_function_temp
);
511 NIR_PASS(lower_alu_to_scalar
, nir
, nir_shrink_vec_array_vars
, nir_var_function_temp
);
512 NIR_PASS(opt_find_array_copies
, nir
, nir_opt_find_array_copies
);
513 NIR_PASS(progress
, nir
, nir_opt_copy_prop_vars
);
515 /* Call nir_lower_var_copies() to remove any copies introduced
516 * by nir_opt_find_array_copies().
518 if (opt_find_array_copies
)
519 NIR_PASS(progress
, nir
, nir_lower_var_copies
);
520 progress
|= opt_find_array_copies
;
522 NIR_PASS(progress
, nir
, nir_opt_copy_prop_vars
);
525 NIR_PASS(progress
, nir
, nir_opt_dead_write_vars
);
527 NIR_PASS(lower_alu_to_scalar
, nir
, nir_opt_trivial_continues
);
528 /* (Constant) copy propagation is needed for txf with offsets. */
529 NIR_PASS(progress
, nir
, nir_copy_prop
);
530 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
531 NIR_PASS(progress
, nir
, nir_opt_dce
);
532 NIR_PASS(lower_phis_to_scalar
, nir
, nir_opt_if
, true);
533 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
535 if (lower_alu_to_scalar
)
536 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
537 if (lower_phis_to_scalar
)
538 NIR_PASS_V(nir
, nir_lower_phis_to_scalar
);
539 progress
|= lower_alu_to_scalar
| lower_phis_to_scalar
;
541 NIR_PASS(progress
, nir
, nir_opt_cse
);
542 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 8, true, true);
544 /* Needed for algebraic lowering */
545 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
546 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
548 if (!nir
->info
.flrp_lowered
) {
549 unsigned lower_flrp
= (nir
->options
->lower_flrp16
? 16 : 0) |
550 (nir
->options
->lower_flrp32
? 32 : 0) |
551 (nir
->options
->lower_flrp64
? 64 : 0);
553 bool lower_flrp_progress
= false;
555 NIR_PASS(lower_flrp_progress
, nir
, nir_lower_flrp
, lower_flrp
, false /* always_precise */);
556 if (lower_flrp_progress
) {
557 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
561 /* Nothing should rematerialize any flrps, so we only
562 * need to do this lowering once.
564 nir
->info
.flrp_lowered
= true;
567 NIR_PASS(progress
, nir
, nir_opt_undef
);
568 NIR_PASS(progress
, nir
, nir_opt_conditional_discard
);
569 if (nir
->options
->max_unroll_iterations
) {
570 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
, 0);
575 static int type_size_vec4(const struct glsl_type
*type
, bool bindless
)
577 return glsl_count_attribute_slots(type
, false);
580 static void si_nir_lower_color(nir_shader
*nir
)
582 nir_function_impl
*entrypoint
= nir_shader_get_entrypoint(nir
);
585 nir_builder_init(&b
, entrypoint
);
587 nir_foreach_block (block
, entrypoint
) {
588 nir_foreach_instr_safe (instr
, block
) {
589 if (instr
->type
!= nir_instr_type_intrinsic
)
592 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
594 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
597 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
598 if (deref
->mode
!= nir_var_shader_in
)
601 b
.cursor
= nir_before_instr(instr
);
602 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
605 if (var
->data
.location
== VARYING_SLOT_COL0
) {
606 def
= nir_load_color0(&b
);
607 nir
->info
.fs
.color0_interp
= var
->data
.interpolation
;
608 nir
->info
.fs
.color0_sample
= var
->data
.sample
;
609 nir
->info
.fs
.color0_centroid
= var
->data
.centroid
;
610 } else if (var
->data
.location
== VARYING_SLOT_COL1
) {
611 def
= nir_load_color1(&b
);
612 nir
->info
.fs
.color1_interp
= var
->data
.interpolation
;
613 nir
->info
.fs
.color1_sample
= var
->data
.sample
;
614 nir
->info
.fs
.color1_centroid
= var
->data
.centroid
;
619 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(def
));
620 nir_instr_remove(instr
);
625 static void si_lower_io(struct nir_shader
*nir
)
627 /* HW supports indirect indexing for: | Enabled in driver
628 * -------------------------------------------------------
633 * -------------------------------------------------------
634 * VS outputs before TCS | No
635 * VS outputs before GS | No
637 * TES outputs before GS | No
639 bool has_indirect_inputs
= nir
->info
.stage
== MESA_SHADER_TESS_CTRL
||
640 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
;
641 bool has_indirect_outputs
= nir
->info
.stage
== MESA_SHADER_TESS_CTRL
;
643 if (!has_indirect_inputs
|| !has_indirect_outputs
) {
644 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
, nir_shader_get_entrypoint(nir
),
645 !has_indirect_outputs
, !has_indirect_inputs
);
647 /* Since we're doing nir_lower_io_to_temporaries late, we need
648 * to lower all the copy_deref's introduced by
649 * lower_io_to_temporaries before calling nir_lower_io.
651 NIR_PASS_V(nir
, nir_split_var_copies
);
652 NIR_PASS_V(nir
, nir_lower_var_copies
);
653 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
656 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
)
657 si_nir_lower_color(nir
);
659 NIR_PASS_V(nir
, nir_lower_io
, nir_var_shader_out
| nir_var_shader_in
,
661 nir
->info
.io_lowered
= true;
663 /* This pass needs actual constants */
664 NIR_PASS_V(nir
, nir_opt_constant_folding
);
665 NIR_PASS_V(nir
, nir_io_add_const_offset_to_base
, nir_var_shader_in
);
666 NIR_PASS_V(nir
, nir_io_add_const_offset_to_base
, nir_var_shader_out
);
668 /* Remove dead derefs, so that nir_validate doesn't fail. */
669 NIR_PASS_V(nir
, nir_opt_dce
);
671 /* Remove input and output nir_variables, because we don't need them
672 * anymore. Also remove uniforms, because those should have been lowered
675 unsigned modes
= nir_var_shader_in
| nir_var_shader_out
| nir_var_uniform
;
676 nir_foreach_variable_with_modes_safe(var
, nir
, modes
) {
677 if (var
->data
.mode
== nir_var_uniform
&&
678 (glsl_type_get_image_count(var
->type
) ||
679 glsl_type_get_sampler_count(var
->type
)))
682 exec_node_remove(&var
->node
);
687 * Perform "lowering" operations on the NIR that are run once when the shader
688 * selector is created.
690 static void si_lower_nir(struct si_screen
*sscreen
, struct nir_shader
*nir
)
692 /* Perform lowerings (and optimizations) of code.
694 * Performance considerations aside, we must:
695 * - lower certain ALU operations
696 * - ensure constant offsets for texture instructions are folded
697 * and copy-propagated
700 static const struct nir_lower_tex_options lower_tex_options
= {
703 NIR_PASS_V(nir
, nir_lower_tex
, &lower_tex_options
);
705 const nir_lower_subgroups_options subgroups_options
= {
707 .ballot_bit_size
= 64,
708 .lower_to_scalar
= true,
709 .lower_subgroup_masks
= true,
710 .lower_vote_trivial
= false,
711 .lower_vote_eq_to_ballot
= true,
713 NIR_PASS_V(nir
, nir_lower_subgroups
, &subgroups_options
);
715 /* Lower load constants to scalar and then clean up the mess */
716 NIR_PASS_V(nir
, nir_lower_load_const_to_scalar
);
717 NIR_PASS_V(nir
, nir_lower_var_copies
);
718 NIR_PASS_V(nir
, nir_lower_pack
);
719 NIR_PASS_V(nir
, nir_opt_access
);
720 si_nir_opts(nir
, true);
722 /* Lower large variables that are always constant with load_constant
723 * intrinsics, which get turned into PC-relative loads from a data
724 * section next to the shader.
726 * st/mesa calls finalize_nir twice, but we can't call this pass twice.
728 bool changed
= false;
729 if (!nir
->constant_data
) {
730 /* The pass crashes if there are dead temps of lowered IO interface types. */
731 NIR_PASS_V(nir
, nir_remove_dead_variables
, nir_var_function_temp
, NULL
);
732 NIR_PASS(changed
, nir
, nir_opt_large_constants
, glsl_get_natural_size_align_bytes
, 16);
735 changed
|= ac_lower_indirect_derefs(nir
, sscreen
->info
.chip_class
);
737 si_nir_opts(nir
, false);
739 NIR_PASS_V(nir
, nir_lower_bool_to_int32
);
740 NIR_PASS_V(nir
, nir_remove_dead_variables
, nir_var_function_temp
, NULL
);
742 if (sscreen
->debug_flags
& DBG(FS_CORRECT_DERIVS_AFTER_KILL
))
743 NIR_PASS_V(nir
, nir_lower_discard_to_demote
);
746 void si_finalize_nir(struct pipe_screen
*screen
, void *nirptr
, bool optimize
)
748 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
749 struct nir_shader
*nir
= (struct nir_shader
*)nirptr
;
751 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
753 si_lower_nir(sscreen
, nir
);