2 * Copyright 2017 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_shader_internal.h"
28 #include "ac_nir_to_llvm.h"
30 #include "tgsi/tgsi_from_mesa.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir_types.h"
35 static nir_variable
* tex_get_texture_var(nir_tex_instr
*instr
)
37 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
38 switch (instr
->src
[i
].src_type
) {
39 case nir_tex_src_texture_deref
:
40 return nir_deref_instr_get_variable(nir_src_as_deref(instr
->src
[i
].src
));
49 static nir_variable
* intrinsic_get_var(nir_intrinsic_instr
*instr
)
51 return nir_deref_instr_get_variable(nir_src_as_deref(instr
->src
[0]));
54 static void gather_intrinsic_load_deref_info(const nir_shader
*nir
,
55 const nir_intrinsic_instr
*instr
,
57 struct tgsi_shader_info
*info
)
59 assert(var
&& var
->data
.mode
== nir_var_shader_in
);
61 switch (nir
->info
.stage
) {
62 case MESA_SHADER_VERTEX
: {
63 unsigned i
= var
->data
.driver_location
;
64 unsigned attrib_count
= glsl_count_attribute_slots(var
->type
, false);
66 for (unsigned j
= 0; j
< attrib_count
; j
++, i
++) {
67 if (glsl_type_is_64bit(glsl_without_array(var
->type
))) {
68 /* TODO: set usage mask more accurately for doubles */
69 info
->input_usage_mask
[i
] = TGSI_WRITEMASK_XYZW
;
71 uint8_t mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
72 info
->input_usage_mask
[i
] |= mask
<< var
->data
.location_frac
;
82 static void scan_instruction(const struct nir_shader
*nir
,
83 struct tgsi_shader_info
*info
,
86 if (instr
->type
== nir_instr_type_alu
) {
87 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
92 case nir_op_fddx_fine
:
93 case nir_op_fddy_fine
:
94 case nir_op_fddx_coarse
:
95 case nir_op_fddy_coarse
:
96 info
->uses_derivatives
= true;
101 } else if (instr
->type
== nir_instr_type_tex
) {
102 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
103 nir_variable
*texture
= tex_get_texture_var(tex
);
106 info
->samplers_declared
|=
107 u_bit_consecutive(tex
->sampler_index
, 1);
109 if (texture
->data
.bindless
)
110 info
->uses_bindless_samplers
= true;
117 info
->uses_derivatives
= true;
122 } else if (instr
->type
== nir_instr_type_intrinsic
) {
123 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
125 switch (intr
->intrinsic
) {
126 case nir_intrinsic_load_front_face
:
127 info
->uses_frontface
= 1;
129 case nir_intrinsic_load_instance_id
:
130 info
->uses_instanceid
= 1;
132 case nir_intrinsic_load_invocation_id
:
133 info
->uses_invocationid
= true;
135 case nir_intrinsic_load_num_work_groups
:
136 info
->uses_grid_size
= true;
138 case nir_intrinsic_load_local_group_size
:
139 /* The block size is translated to IMM with a fixed block size. */
140 if (info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] == 0)
141 info
->uses_block_size
= true;
143 case nir_intrinsic_load_local_invocation_id
:
144 case nir_intrinsic_load_work_group_id
: {
145 unsigned mask
= nir_ssa_def_components_read(&intr
->dest
.ssa
);
147 unsigned i
= u_bit_scan(&mask
);
149 if (intr
->intrinsic
== nir_intrinsic_load_work_group_id
)
150 info
->uses_block_id
[i
] = true;
152 info
->uses_thread_id
[i
] = true;
156 case nir_intrinsic_load_vertex_id
:
157 info
->uses_vertexid
= 1;
159 case nir_intrinsic_load_vertex_id_zero_base
:
160 info
->uses_vertexid_nobase
= 1;
162 case nir_intrinsic_load_base_vertex
:
163 info
->uses_basevertex
= 1;
165 case nir_intrinsic_load_primitive_id
:
166 info
->uses_primid
= 1;
168 case nir_intrinsic_load_sample_mask_in
:
169 info
->reads_samplemask
= true;
171 case nir_intrinsic_load_tess_level_inner
:
172 case nir_intrinsic_load_tess_level_outer
:
173 info
->reads_tess_factors
= true;
175 case nir_intrinsic_image_deref_load
: {
176 nir_variable
*var
= intrinsic_get_var(intr
);
177 if (var
->data
.bindless
) {
178 info
->uses_bindless_images
= true;
180 if (glsl_get_sampler_dim(var
->type
) == GLSL_SAMPLER_DIM_BUF
)
181 info
->uses_bindless_buffer_load
= true;
183 info
->uses_bindless_image_load
= true;
187 case nir_intrinsic_image_deref_size
:
188 case nir_intrinsic_image_deref_samples
: {
189 nir_variable
*var
= intrinsic_get_var(intr
);
190 if (var
->data
.bindless
)
191 info
->uses_bindless_images
= true;
194 case nir_intrinsic_image_deref_store
: {
195 const nir_deref_instr
*image_deref
= nir_instr_as_deref(intr
->src
[0].ssa
->parent_instr
);
196 nir_variable
*var
= intrinsic_get_var(intr
);
197 if (var
->data
.bindless
) {
198 info
->uses_bindless_images
= true;
200 if (glsl_get_sampler_dim(image_deref
->type
) == GLSL_SAMPLER_DIM_BUF
)
201 info
->uses_bindless_buffer_store
= true;
203 info
->uses_bindless_image_store
= true;
205 info
->writes_memory
= true;
208 case nir_intrinsic_image_deref_atomic_add
:
209 case nir_intrinsic_image_deref_atomic_min
:
210 case nir_intrinsic_image_deref_atomic_max
:
211 case nir_intrinsic_image_deref_atomic_and
:
212 case nir_intrinsic_image_deref_atomic_or
:
213 case nir_intrinsic_image_deref_atomic_xor
:
214 case nir_intrinsic_image_deref_atomic_exchange
:
215 case nir_intrinsic_image_deref_atomic_comp_swap
: {
216 nir_variable
*var
= intrinsic_get_var(intr
);
217 if (var
->data
.bindless
) {
218 info
->uses_bindless_images
= true;
220 if (glsl_get_sampler_dim(var
->type
) == GLSL_SAMPLER_DIM_BUF
)
221 info
->uses_bindless_buffer_atomic
= true;
223 info
->uses_bindless_image_atomic
= true;
225 info
->writes_memory
= true;
228 case nir_intrinsic_store_ssbo
:
229 case nir_intrinsic_ssbo_atomic_add
:
230 case nir_intrinsic_ssbo_atomic_imin
:
231 case nir_intrinsic_ssbo_atomic_umin
:
232 case nir_intrinsic_ssbo_atomic_imax
:
233 case nir_intrinsic_ssbo_atomic_umax
:
234 case nir_intrinsic_ssbo_atomic_and
:
235 case nir_intrinsic_ssbo_atomic_or
:
236 case nir_intrinsic_ssbo_atomic_xor
:
237 case nir_intrinsic_ssbo_atomic_exchange
:
238 case nir_intrinsic_ssbo_atomic_comp_swap
:
239 info
->writes_memory
= true;
241 case nir_intrinsic_load_deref
: {
242 nir_variable
*var
= intrinsic_get_var(intr
);
243 nir_variable_mode mode
= var
->data
.mode
;
244 enum glsl_base_type base_type
=
245 glsl_get_base_type(glsl_without_array(var
->type
));
247 if (mode
== nir_var_shader_in
) {
248 gather_intrinsic_load_deref_info(nir
, intr
, var
, info
);
250 switch (var
->data
.interpolation
) {
251 case INTERP_MODE_NONE
:
252 if (glsl_base_type_is_integer(base_type
))
256 case INTERP_MODE_SMOOTH
:
257 if (var
->data
.sample
)
258 info
->uses_persp_sample
= true;
259 else if (var
->data
.centroid
)
260 info
->uses_persp_centroid
= true;
262 info
->uses_persp_center
= true;
265 case INTERP_MODE_NOPERSPECTIVE
:
266 if (var
->data
.sample
)
267 info
->uses_linear_sample
= true;
268 else if (var
->data
.centroid
)
269 info
->uses_linear_centroid
= true;
271 info
->uses_linear_center
= true;
277 case nir_intrinsic_interp_deref_at_centroid
:
278 case nir_intrinsic_interp_deref_at_sample
:
279 case nir_intrinsic_interp_deref_at_offset
: {
280 enum glsl_interp_mode interp
= intrinsic_get_var(intr
)->data
.interpolation
;
282 case INTERP_MODE_SMOOTH
:
283 case INTERP_MODE_NONE
:
284 if (intr
->intrinsic
== nir_intrinsic_interp_deref_at_centroid
)
285 info
->uses_persp_opcode_interp_centroid
= true;
286 else if (intr
->intrinsic
== nir_intrinsic_interp_deref_at_sample
)
287 info
->uses_persp_opcode_interp_sample
= true;
289 info
->uses_persp_opcode_interp_offset
= true;
291 case INTERP_MODE_NOPERSPECTIVE
:
292 if (intr
->intrinsic
== nir_intrinsic_interp_deref_at_centroid
)
293 info
->uses_linear_opcode_interp_centroid
= true;
294 else if (intr
->intrinsic
== nir_intrinsic_interp_deref_at_sample
)
295 info
->uses_linear_opcode_interp_sample
= true;
297 info
->uses_linear_opcode_interp_offset
= true;
299 case INTERP_MODE_FLAT
:
302 unreachable("Unsupported interpoation type");
312 void si_nir_scan_tess_ctrl(const struct nir_shader
*nir
,
313 struct tgsi_tessctrl_info
*out
)
315 memset(out
, 0, sizeof(*out
));
317 if (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
320 out
->tessfactors_are_def_in_all_invocs
=
321 ac_are_tessfactors_def_in_all_invocs(nir
);
324 void si_nir_scan_shader(const struct nir_shader
*nir
,
325 struct tgsi_shader_info
*info
)
330 info
->processor
= pipe_shader_type_from_mesa(nir
->info
.stage
);
331 info
->num_tokens
= 2; /* indicate that the shader is non-empty */
332 info
->num_instructions
= 2;
334 info
->properties
[TGSI_PROPERTY_NEXT_SHADER
] =
335 pipe_shader_type_from_mesa(nir
->info
.next_stage
);
337 if (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
338 info
->properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
] =
339 nir
->info
.tess
.tcs_vertices_out
;
342 if (nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
343 if (nir
->info
.tess
.primitive_mode
== GL_ISOLINES
)
344 info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] = PIPE_PRIM_LINES
;
346 info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] = nir
->info
.tess
.primitive_mode
;
348 STATIC_ASSERT((TESS_SPACING_EQUAL
+ 1) % 3 == PIPE_TESS_SPACING_EQUAL
);
349 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD
+ 1) % 3 ==
350 PIPE_TESS_SPACING_FRACTIONAL_ODD
);
351 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN
+ 1) % 3 ==
352 PIPE_TESS_SPACING_FRACTIONAL_EVEN
);
354 info
->properties
[TGSI_PROPERTY_TES_SPACING
] = (nir
->info
.tess
.spacing
+ 1) % 3;
355 info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
] = !nir
->info
.tess
.ccw
;
356 info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
] = nir
->info
.tess
.point_mode
;
359 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
360 info
->properties
[TGSI_PROPERTY_GS_INPUT_PRIM
] = nir
->info
.gs
.input_primitive
;
361 info
->properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
] = nir
->info
.gs
.output_primitive
;
362 info
->properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
] = nir
->info
.gs
.vertices_out
;
363 info
->properties
[TGSI_PROPERTY_GS_INVOCATIONS
] = nir
->info
.gs
.invocations
;
366 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
367 info
->properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
] =
368 nir
->info
.fs
.early_fragment_tests
| nir
->info
.fs
.post_depth_coverage
;
369 info
->properties
[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE
] = nir
->info
.fs
.post_depth_coverage
;
371 if (nir
->info
.fs
.pixel_center_integer
) {
372 info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] =
373 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
;
376 if (nir
->info
.fs
.depth_layout
!= FRAG_DEPTH_LAYOUT_NONE
) {
377 switch (nir
->info
.fs
.depth_layout
) {
378 case FRAG_DEPTH_LAYOUT_ANY
:
379 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_ANY
;
381 case FRAG_DEPTH_LAYOUT_GREATER
:
382 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_GREATER
;
384 case FRAG_DEPTH_LAYOUT_LESS
:
385 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_LESS
;
387 case FRAG_DEPTH_LAYOUT_UNCHANGED
:
388 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED
;
391 unreachable("Unknow depth layout");
396 if (gl_shader_stage_is_compute(nir
->info
.stage
)) {
397 info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] = nir
->info
.cs
.local_size
[0];
398 info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT
] = nir
->info
.cs
.local_size
[1];
399 info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH
] = nir
->info
.cs
.local_size
[2];
403 uint64_t processed_inputs
= 0;
404 unsigned num_inputs
= 0;
405 nir_foreach_variable(variable
, &nir
->inputs
) {
406 unsigned semantic_name
, semantic_index
;
408 const struct glsl_type
*type
= variable
->type
;
409 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
)) {
410 assert(glsl_type_is_array(type
));
411 type
= glsl_get_array_element(type
);
414 unsigned attrib_count
= glsl_count_attribute_slots(type
,
415 nir
->info
.stage
== MESA_SHADER_VERTEX
);
417 i
= variable
->data
.driver_location
;
419 /* Vertex shader inputs don't have semantics. The state
420 * tracker has already mapped them to attributes via
421 * variable->data.driver_location.
423 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
424 if (glsl_type_is_dual_slot(glsl_without_array(variable
->type
)))
431 /* Fragment shader position is a system value. */
432 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
433 variable
->data
.location
== VARYING_SLOT_POS
) {
434 if (variable
->data
.pixel_center_integer
)
435 info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] =
436 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
;
442 for (unsigned j
= 0; j
< attrib_count
; j
++, i
++) {
444 if (processed_inputs
& ((uint64_t)1 << i
))
447 processed_inputs
|= ((uint64_t)1 << i
);
450 tgsi_get_gl_varying_semantic(variable
->data
.location
+ j
, true,
451 &semantic_name
, &semantic_index
);
453 info
->input_semantic_name
[i
] = semantic_name
;
454 info
->input_semantic_index
[i
] = semantic_index
;
456 if (semantic_name
== TGSI_SEMANTIC_PRIMID
)
457 info
->uses_primid
= true;
459 if (variable
->data
.sample
)
460 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_SAMPLE
;
461 else if (variable
->data
.centroid
)
462 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_CENTROID
;
464 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_CENTER
;
466 enum glsl_base_type base_type
=
467 glsl_get_base_type(glsl_without_array(variable
->type
));
469 switch (variable
->data
.interpolation
) {
470 case INTERP_MODE_NONE
:
471 if (glsl_base_type_is_integer(base_type
)) {
472 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_CONSTANT
;
476 if (semantic_name
== TGSI_SEMANTIC_COLOR
) {
477 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_COLOR
;
482 case INTERP_MODE_SMOOTH
:
483 assert(!glsl_base_type_is_integer(base_type
));
485 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_PERSPECTIVE
;
488 case INTERP_MODE_NOPERSPECTIVE
:
489 assert(!glsl_base_type_is_integer(base_type
));
491 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_LINEAR
;
494 case INTERP_MODE_FLAT
:
495 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_CONSTANT
;
499 /* TODO make this more precise */
500 if (variable
->data
.location
== VARYING_SLOT_COL0
)
501 info
->colors_read
|= 0x0f;
502 else if (variable
->data
.location
== VARYING_SLOT_COL1
)
503 info
->colors_read
|= 0xf0;
507 info
->num_inputs
= num_inputs
;
511 uint64_t processed_outputs
= 0;
512 unsigned num_outputs
= 0;
513 nir_foreach_variable(variable
, &nir
->outputs
) {
514 unsigned semantic_name
, semantic_index
;
516 i
= variable
->data
.driver_location
;
518 const struct glsl_type
*type
= variable
->type
;
519 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
)) {
520 assert(glsl_type_is_array(type
));
521 type
= glsl_get_array_element(type
);
524 unsigned attrib_count
= glsl_count_attribute_slots(type
, false);
525 for (unsigned k
= 0; k
< attrib_count
; k
++, i
++) {
527 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
528 tgsi_get_gl_frag_result_semantic(variable
->data
.location
+ k
,
529 &semantic_name
, &semantic_index
);
531 /* Adjust for dual source blending */
532 if (variable
->data
.index
> 0) {
536 tgsi_get_gl_varying_semantic(variable
->data
.location
+ k
, true,
537 &semantic_name
, &semantic_index
);
540 unsigned num_components
= 4;
541 unsigned vector_elements
= glsl_get_vector_elements(glsl_without_array(variable
->type
));
543 num_components
= vector_elements
;
545 unsigned component
= variable
->data
.location_frac
;
546 if (glsl_type_is_64bit(glsl_without_array(variable
->type
))) {
547 if (glsl_type_is_dual_slot(glsl_without_array(variable
->type
)) && k
% 2) {
548 num_components
= (num_components
* 2) - 4;
551 num_components
= MIN2(num_components
* 2, 4);
556 for (unsigned j
= component
; j
< num_components
+ component
; j
++) {
559 usagemask
|= TGSI_WRITEMASK_X
;
562 usagemask
|= TGSI_WRITEMASK_Y
;
565 usagemask
|= TGSI_WRITEMASK_Z
;
568 usagemask
|= TGSI_WRITEMASK_W
;
571 unreachable("error calculating component index");
575 unsigned gs_out_streams
;
576 if (variable
->data
.stream
& (1u << 31)) {
577 gs_out_streams
= variable
->data
.stream
& ~(1u << 31);
579 assert(variable
->data
.stream
< 4);
581 for (unsigned j
= 0; j
< num_components
; ++j
)
582 gs_out_streams
|= variable
->data
.stream
<< (2 * (component
+ j
));
585 unsigned streamx
= gs_out_streams
& 3;
586 unsigned streamy
= (gs_out_streams
>> 2) & 3;
587 unsigned streamz
= (gs_out_streams
>> 4) & 3;
588 unsigned streamw
= (gs_out_streams
>> 6) & 3;
590 if (usagemask
& TGSI_WRITEMASK_X
) {
591 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_X
;
592 info
->output_streams
[i
] |= streamx
;
593 info
->num_stream_output_components
[streamx
]++;
595 if (usagemask
& TGSI_WRITEMASK_Y
) {
596 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_Y
;
597 info
->output_streams
[i
] |= streamy
<< 2;
598 info
->num_stream_output_components
[streamy
]++;
600 if (usagemask
& TGSI_WRITEMASK_Z
) {
601 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_Z
;
602 info
->output_streams
[i
] |= streamz
<< 4;
603 info
->num_stream_output_components
[streamz
]++;
605 if (usagemask
& TGSI_WRITEMASK_W
) {
606 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_W
;
607 info
->output_streams
[i
] |= streamw
<< 6;
608 info
->num_stream_output_components
[streamw
]++;
611 /* make sure we only count this location once against
612 * the num_outputs counter.
614 if (processed_outputs
& ((uint64_t)1 << i
))
617 processed_outputs
|= ((uint64_t)1 << i
);
620 info
->output_semantic_name
[i
] = semantic_name
;
621 info
->output_semantic_index
[i
] = semantic_index
;
623 switch (semantic_name
) {
624 case TGSI_SEMANTIC_PRIMID
:
625 info
->writes_primid
= true;
627 case TGSI_SEMANTIC_VIEWPORT_INDEX
:
628 info
->writes_viewport_index
= true;
630 case TGSI_SEMANTIC_LAYER
:
631 info
->writes_layer
= true;
633 case TGSI_SEMANTIC_PSIZE
:
634 info
->writes_psize
= true;
636 case TGSI_SEMANTIC_CLIPVERTEX
:
637 info
->writes_clipvertex
= true;
639 case TGSI_SEMANTIC_COLOR
:
640 info
->colors_written
|= 1 << semantic_index
;
642 case TGSI_SEMANTIC_STENCIL
:
643 info
->writes_stencil
= true;
645 case TGSI_SEMANTIC_SAMPLEMASK
:
646 info
->writes_samplemask
= true;
648 case TGSI_SEMANTIC_EDGEFLAG
:
649 info
->writes_edgeflag
= true;
651 case TGSI_SEMANTIC_POSITION
:
652 if (info
->processor
== PIPE_SHADER_FRAGMENT
)
653 info
->writes_z
= true;
655 info
->writes_position
= true;
659 if (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
660 switch (semantic_name
) {
661 case TGSI_SEMANTIC_PATCH
:
662 info
->reads_perpatch_outputs
= true;
664 case TGSI_SEMANTIC_TESSINNER
:
665 case TGSI_SEMANTIC_TESSOUTER
:
666 info
->reads_tessfactor_outputs
= true;
669 info
->reads_pervertex_outputs
= true;
674 unsigned loc
= variable
->data
.location
;
675 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
676 loc
== FRAG_RESULT_COLOR
&&
677 nir
->info
.outputs_written
& (1ull << loc
)) {
678 assert(attrib_count
== 1);
679 info
->properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
] = true;
683 info
->num_outputs
= num_outputs
;
685 struct set
*ubo_set
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
686 _mesa_key_pointer_equal
);
688 /* Intialise const_file_max[0] */
689 info
->const_file_max
[0] = -1;
691 unsigned ubo_idx
= 1;
692 nir_foreach_variable(variable
, &nir
->uniforms
) {
693 const struct glsl_type
*type
= variable
->type
;
694 enum glsl_base_type base_type
=
695 glsl_get_base_type(glsl_without_array(type
));
696 unsigned aoa_size
= MAX2(1, glsl_get_aoa_size(type
));
697 unsigned loc
= variable
->data
.location
;
698 int slot_count
= glsl_count_attribute_slots(type
, false);
699 int max_slot
= MAX2(info
->const_file_max
[0], (int) loc
) + slot_count
;
701 /* Gather buffers declared bitmasks. Note: radeonsi doesn't
702 * really use the mask (other than ubo_idx == 1 for regular
703 * uniforms) its really only used for getting the buffer count
704 * so we don't need to worry about the ordering.
706 if (variable
->interface_type
!= NULL
) {
707 if (variable
->data
.mode
== nir_var_uniform
||
708 variable
->data
.mode
== nir_var_mem_ubo
) {
710 unsigned block_count
;
711 if (base_type
!= GLSL_TYPE_INTERFACE
) {
712 struct set_entry
*entry
=
713 _mesa_set_search(ubo_set
, variable
->interface_type
);
715 /* Check if we have already processed
716 * a member from this ubo.
723 block_count
= aoa_size
;
726 info
->const_buffers_declared
|= u_bit_consecutive(ubo_idx
, block_count
);
727 ubo_idx
+= block_count
;
729 _mesa_set_add(ubo_set
, variable
->interface_type
);
732 if (variable
->data
.mode
== nir_var_mem_ssbo
) {
733 /* TODO: make this more accurate */
734 info
->shader_buffers_declared
=
735 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS
);
741 /* We rely on the fact that nir_lower_samplers_as_deref has
742 * eliminated struct dereferences.
744 if (base_type
== GLSL_TYPE_SAMPLER
) {
745 if (variable
->data
.bindless
) {
746 info
->const_buffers_declared
|= 1;
747 info
->const_file_max
[0] = max_slot
;
749 info
->samplers_declared
|=
750 u_bit_consecutive(variable
->data
.binding
, aoa_size
);
752 } else if (base_type
== GLSL_TYPE_IMAGE
) {
753 if (variable
->data
.bindless
) {
754 info
->const_buffers_declared
|= 1;
755 info
->const_file_max
[0] = max_slot
;
757 info
->images_declared
|=
758 u_bit_consecutive(variable
->data
.binding
, aoa_size
);
760 } else if (base_type
!= GLSL_TYPE_ATOMIC_UINT
) {
761 if (strncmp(variable
->name
, "state.", 6) == 0 ||
762 strncmp(variable
->name
, "gl_", 3) == 0) {
763 /* FIXME: figure out why piglit tests with builtin
764 * uniforms are failing without this.
766 info
->const_buffers_declared
=
767 u_bit_consecutive(0, SI_NUM_CONST_BUFFERS
);
769 info
->const_buffers_declared
|= 1;
770 info
->const_file_max
[0] = max_slot
;
775 _mesa_set_destroy(ubo_set
, NULL
);
777 info
->num_written_clipdistance
= nir
->info
.clip_distance_array_size
;
778 info
->num_written_culldistance
= nir
->info
.cull_distance_array_size
;
779 info
->clipdist_writemask
= u_bit_consecutive(0, info
->num_written_clipdistance
);
780 info
->culldist_writemask
= u_bit_consecutive(0, info
->num_written_culldistance
);
782 if (info
->processor
== PIPE_SHADER_FRAGMENT
)
783 info
->uses_kill
= nir
->info
.fs
.uses_discard
;
785 func
= (struct nir_function
*)exec_list_get_head_const(&nir
->functions
);
786 nir_foreach_block(block
, func
->impl
) {
787 nir_foreach_instr(instr
, block
)
788 scan_instruction(nir
, info
, instr
);
793 * Perform "lowering" operations on the NIR that are run once when the shader
794 * selector is created.
797 si_lower_nir(struct si_shader_selector
* sel
)
799 /* Adjust the driver location of inputs and outputs. The state tracker
800 * interprets them as slots, while the ac/nir backend interprets them
801 * as individual components.
803 nir_foreach_variable(variable
, &sel
->nir
->inputs
)
804 variable
->data
.driver_location
*= 4;
806 nir_foreach_variable(variable
, &sel
->nir
->outputs
) {
807 variable
->data
.driver_location
*= 4;
809 if (sel
->nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
810 if (variable
->data
.location
== FRAG_RESULT_DEPTH
)
811 variable
->data
.driver_location
+= 2;
812 else if (variable
->data
.location
== FRAG_RESULT_STENCIL
)
813 variable
->data
.driver_location
+= 1;
817 /* Perform lowerings (and optimizations) of code.
819 * Performance considerations aside, we must:
820 * - lower certain ALU operations
821 * - ensure constant offsets for texture instructions are folded
822 * and copy-propagated
824 NIR_PASS_V(sel
->nir
, nir_lower_returns
);
825 NIR_PASS_V(sel
->nir
, nir_lower_vars_to_ssa
);
826 NIR_PASS_V(sel
->nir
, nir_lower_alu_to_scalar
);
827 NIR_PASS_V(sel
->nir
, nir_lower_phis_to_scalar
);
829 static const struct nir_lower_tex_options lower_tex_options
= {
832 NIR_PASS_V(sel
->nir
, nir_lower_tex
, &lower_tex_options
);
834 const nir_lower_subgroups_options subgroups_options
= {
836 .ballot_bit_size
= 64,
837 .lower_to_scalar
= true,
838 .lower_subgroup_masks
= true,
839 .lower_vote_trivial
= false,
840 .lower_vote_eq_to_ballot
= true,
842 NIR_PASS_V(sel
->nir
, nir_lower_subgroups
, &subgroups_options
);
844 ac_lower_indirect_derefs(sel
->nir
, sel
->screen
->info
.chip_class
);
850 /* (Constant) copy propagation is needed for txf with offsets. */
851 NIR_PASS(progress
, sel
->nir
, nir_copy_prop
);
852 NIR_PASS(progress
, sel
->nir
, nir_opt_remove_phis
);
853 NIR_PASS(progress
, sel
->nir
, nir_opt_dce
);
854 if (nir_opt_trivial_continues(sel
->nir
)) {
856 NIR_PASS(progress
, sel
->nir
, nir_copy_prop
);
857 NIR_PASS(progress
, sel
->nir
, nir_opt_dce
);
859 NIR_PASS(progress
, sel
->nir
, nir_opt_if
);
860 NIR_PASS(progress
, sel
->nir
, nir_opt_dead_cf
);
861 NIR_PASS(progress
, sel
->nir
, nir_opt_cse
);
862 NIR_PASS(progress
, sel
->nir
, nir_opt_peephole_select
, 8, true, true);
864 /* Needed for algebraic lowering */
865 NIR_PASS(progress
, sel
->nir
, nir_opt_algebraic
);
866 NIR_PASS(progress
, sel
->nir
, nir_opt_constant_folding
);
868 NIR_PASS(progress
, sel
->nir
, nir_opt_undef
);
869 NIR_PASS(progress
, sel
->nir
, nir_opt_conditional_discard
);
870 if (sel
->nir
->options
->max_unroll_iterations
) {
871 NIR_PASS(progress
, sel
->nir
, nir_opt_loop_unroll
, 0);
875 NIR_PASS_V(sel
->nir
, nir_lower_bool_to_int32
);
878 static void declare_nir_input_vs(struct si_shader_context
*ctx
,
879 struct nir_variable
*variable
,
880 unsigned input_index
,
883 si_llvm_load_input_vs(ctx
, input_index
, out
);
886 static void declare_nir_input_fs(struct si_shader_context
*ctx
,
887 struct nir_variable
*variable
,
888 unsigned input_index
,
891 unsigned slot
= variable
->data
.location
;
892 if (slot
== VARYING_SLOT_POS
) {
893 out
[0] = LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_X_FLOAT
);
894 out
[1] = LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_Y_FLOAT
);
895 out
[2] = LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_Z_FLOAT
);
896 out
[3] = ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
897 LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_W_FLOAT
));
901 si_llvm_load_input_fs(ctx
, input_index
, out
);
905 si_nir_lookup_interp_param(struct ac_shader_abi
*abi
,
906 enum glsl_interp_mode interp
, unsigned location
)
908 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
909 int interp_param_idx
= -1;
912 case INTERP_MODE_FLAT
:
914 case INTERP_MODE_SMOOTH
:
915 case INTERP_MODE_NONE
:
916 if (location
== INTERP_CENTER
)
917 interp_param_idx
= SI_PARAM_PERSP_CENTER
;
918 else if (location
== INTERP_CENTROID
)
919 interp_param_idx
= SI_PARAM_PERSP_CENTROID
;
920 else if (location
== INTERP_SAMPLE
)
921 interp_param_idx
= SI_PARAM_PERSP_SAMPLE
;
923 case INTERP_MODE_NOPERSPECTIVE
:
924 if (location
== INTERP_CENTER
)
925 interp_param_idx
= SI_PARAM_LINEAR_CENTER
;
926 else if (location
== INTERP_CENTROID
)
927 interp_param_idx
= SI_PARAM_LINEAR_CENTROID
;
928 else if (location
== INTERP_SAMPLE
)
929 interp_param_idx
= SI_PARAM_LINEAR_SAMPLE
;
932 assert(!"Unhandled interpolation mode.");
936 return interp_param_idx
!= -1 ?
937 LLVMGetParam(ctx
->main_fn
, interp_param_idx
) : NULL
;
941 si_nir_load_sampler_desc(struct ac_shader_abi
*abi
,
942 unsigned descriptor_set
, unsigned base_index
,
943 unsigned constant_index
, LLVMValueRef dynamic_index
,
944 enum ac_descriptor_type desc_type
, bool image
,
945 bool write
, bool bindless
)
947 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
948 const struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
949 LLVMBuilderRef builder
= ctx
->ac
.builder
;
950 unsigned const_index
= base_index
+ constant_index
;
951 bool dcc_off
= write
;
953 /* TODO: images_store and images_atomic are not set */
954 if (!dynamic_index
&& image
&&
955 (info
->images_store
| info
->images_atomic
) & (1 << const_index
))
958 assert(!descriptor_set
);
959 assert(!image
|| desc_type
== AC_DESC_IMAGE
|| desc_type
== AC_DESC_BUFFER
);
963 LLVMGetParam(ctx
->main_fn
, ctx
->param_bindless_samplers_and_images
);
965 /* dynamic_index is the bindless handle */
967 /* For simplicity, bindless image descriptors use fixed
968 * 16-dword slots for now.
970 dynamic_index
= LLVMBuildMul(ctx
->ac
.builder
, dynamic_index
,
971 LLVMConstInt(ctx
->i32
, 2, 0), "");
973 return si_load_image_desc(ctx
, list
, dynamic_index
, desc_type
,
977 /* Since bindless handle arithmetic can contain an unsigned integer
978 * wraparound and si_load_sampler_desc assumes there isn't any,
979 * use GEP without "inbounds" (inside ac_build_pointer_add)
980 * to prevent incorrect code generation and hangs.
982 dynamic_index
= LLVMBuildMul(ctx
->ac
.builder
, dynamic_index
,
983 LLVMConstInt(ctx
->i32
, 2, 0), "");
984 list
= ac_build_pointer_add(&ctx
->ac
, list
, dynamic_index
);
985 return si_load_sampler_desc(ctx
, list
, ctx
->i32_0
, desc_type
);
988 unsigned num_slots
= image
? ctx
->num_images
: ctx
->num_samplers
;
989 assert(const_index
< num_slots
);
991 LLVMValueRef list
= LLVMGetParam(ctx
->main_fn
, ctx
->param_samplers_and_images
);
992 LLVMValueRef index
= LLVMConstInt(ctx
->ac
.i32
, const_index
, false);
995 index
= LLVMBuildAdd(builder
, index
, dynamic_index
, "");
997 /* From the GL_ARB_shader_image_load_store extension spec:
999 * If a shader performs an image load, store, or atomic
1000 * operation using an image variable declared as an array,
1001 * and if the index used to select an individual element is
1002 * negative or greater than or equal to the size of the
1003 * array, the results of the operation are undefined but may
1004 * not lead to termination.
1006 index
= si_llvm_bound_index(ctx
, index
, num_slots
);
1010 index
= LLVMBuildSub(ctx
->ac
.builder
,
1011 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
- 1, 0),
1013 return si_load_image_desc(ctx
, list
, index
, desc_type
, dcc_off
, false);
1016 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
,
1017 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
/ 2, 0), "");
1018 return si_load_sampler_desc(ctx
, list
, index
, desc_type
);
1021 static void bitcast_inputs(struct si_shader_context
*ctx
,
1022 LLVMValueRef data
[4],
1025 for (unsigned chan
= 0; chan
< 4; chan
++) {
1026 ctx
->inputs
[input_idx
+ chan
] =
1027 LLVMBuildBitCast(ctx
->ac
.builder
, data
[chan
], ctx
->ac
.i32
, "");
1031 bool si_nir_build_llvm(struct si_shader_context
*ctx
, struct nir_shader
*nir
)
1033 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
1035 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
1036 nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1037 uint64_t processed_inputs
= 0;
1038 nir_foreach_variable(variable
, &nir
->inputs
) {
1039 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
,
1040 nir
->info
.stage
== MESA_SHADER_VERTEX
);
1041 unsigned input_idx
= variable
->data
.driver_location
;
1043 LLVMValueRef data
[4];
1044 unsigned loc
= variable
->data
.location
;
1046 if (loc
>= VARYING_SLOT_VAR0
&& nir
->info
.stage
== MESA_SHADER_FRAGMENT
)
1047 ctx
->abi
.fs_input_attr_indices
[loc
- VARYING_SLOT_VAR0
] = input_idx
/ 4;
1049 for (unsigned i
= 0; i
< attrib_count
; i
++) {
1050 /* Packed components share the same location so skip
1051 * them if we have already processed the location.
1053 if (processed_inputs
& ((uint64_t)1 << (loc
+ i
))) {
1058 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
1059 declare_nir_input_vs(ctx
, variable
, input_idx
/ 4, data
);
1060 bitcast_inputs(ctx
, data
, input_idx
);
1061 if (glsl_type_is_dual_slot(variable
->type
)) {
1063 declare_nir_input_vs(ctx
, variable
, input_idx
/ 4, data
);
1064 bitcast_inputs(ctx
, data
, input_idx
);
1066 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1067 declare_nir_input_fs(ctx
, variable
, input_idx
/ 4, data
);
1068 bitcast_inputs(ctx
, data
, input_idx
);
1071 processed_inputs
|= ((uint64_t)1 << (loc
+ i
));
1077 ctx
->abi
.inputs
= &ctx
->inputs
[0];
1078 ctx
->abi
.load_sampler_desc
= si_nir_load_sampler_desc
;
1079 ctx
->abi
.clamp_shadow_reference
= true;
1081 ctx
->num_samplers
= util_last_bit(info
->samplers_declared
);
1082 ctx
->num_images
= util_last_bit(info
->images_declared
);
1084 if (ctx
->shader
->selector
->info
.properties
[TGSI_PROPERTY_CS_LOCAL_SIZE
]) {
1085 assert(gl_shader_stage_is_compute(nir
->info
.stage
));
1086 si_declare_compute_memory(ctx
);
1088 ac_nir_translate(&ctx
->ac
, &ctx
->abi
, nir
);