2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "si_shader_internal.h"
27 #include "ac_nir_to_llvm.h"
29 #include "tgsi/tgsi_from_mesa.h"
31 #include "compiler/nir/nir.h"
32 #include "compiler/nir_types.h"
35 static void scan_instruction(struct tgsi_shader_info
*info
,
38 if (instr
->type
== nir_instr_type_alu
) {
39 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
44 case nir_op_fddx_fine
:
45 case nir_op_fddy_fine
:
46 case nir_op_fddx_coarse
:
47 case nir_op_fddy_coarse
:
48 info
->uses_derivatives
= true;
53 } else if (instr
->type
== nir_instr_type_tex
) {
54 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
57 info
->samplers_declared
|=
58 u_bit_consecutive(tex
->sampler_index
, 1);
60 if (tex
->texture
->var
->data
.bindless
)
61 info
->uses_bindless_samplers
= true;
68 info
->uses_derivatives
= true;
73 } else if (instr
->type
== nir_instr_type_intrinsic
) {
74 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
76 switch (intr
->intrinsic
) {
77 case nir_intrinsic_load_front_face
:
78 info
->uses_frontface
= 1;
80 case nir_intrinsic_load_instance_id
:
81 info
->uses_instanceid
= 1;
83 case nir_intrinsic_load_invocation_id
:
84 info
->uses_invocationid
= true;
86 case nir_intrinsic_load_num_work_groups
:
87 info
->uses_grid_size
= true;
89 case nir_intrinsic_load_local_group_size
:
90 /* The block size is translated to IMM with a fixed block size. */
91 if (info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] == 0)
92 info
->uses_block_size
= true;
94 case nir_intrinsic_load_local_invocation_id
:
95 case nir_intrinsic_load_work_group_id
: {
96 unsigned mask
= nir_ssa_def_components_read(&intr
->dest
.ssa
);
98 unsigned i
= u_bit_scan(&mask
);
100 if (intr
->intrinsic
== nir_intrinsic_load_work_group_id
)
101 info
->uses_block_id
[i
] = true;
103 info
->uses_thread_id
[i
] = true;
107 case nir_intrinsic_load_vertex_id
:
108 info
->uses_vertexid
= 1;
110 case nir_intrinsic_load_vertex_id_zero_base
:
111 info
->uses_vertexid_nobase
= 1;
113 case nir_intrinsic_load_base_vertex
:
114 info
->uses_basevertex
= 1;
116 case nir_intrinsic_load_primitive_id
:
117 info
->uses_primid
= 1;
119 case nir_intrinsic_load_sample_mask_in
:
120 info
->reads_samplemask
= true;
122 case nir_intrinsic_load_tess_level_inner
:
123 case nir_intrinsic_load_tess_level_outer
:
124 info
->reads_tess_factors
= true;
126 case nir_intrinsic_image_var_store
:
127 case nir_intrinsic_image_var_atomic_add
:
128 case nir_intrinsic_image_var_atomic_min
:
129 case nir_intrinsic_image_var_atomic_max
:
130 case nir_intrinsic_image_var_atomic_and
:
131 case nir_intrinsic_image_var_atomic_or
:
132 case nir_intrinsic_image_var_atomic_xor
:
133 case nir_intrinsic_image_var_atomic_exchange
:
134 case nir_intrinsic_image_var_atomic_comp_swap
:
135 case nir_intrinsic_store_ssbo
:
136 case nir_intrinsic_ssbo_atomic_add
:
137 case nir_intrinsic_ssbo_atomic_imin
:
138 case nir_intrinsic_ssbo_atomic_umin
:
139 case nir_intrinsic_ssbo_atomic_imax
:
140 case nir_intrinsic_ssbo_atomic_umax
:
141 case nir_intrinsic_ssbo_atomic_and
:
142 case nir_intrinsic_ssbo_atomic_or
:
143 case nir_intrinsic_ssbo_atomic_xor
:
144 case nir_intrinsic_ssbo_atomic_exchange
:
145 case nir_intrinsic_ssbo_atomic_comp_swap
:
146 info
->writes_memory
= true;
148 case nir_intrinsic_load_var
: {
149 nir_variable
*var
= intr
->variables
[0]->var
;
150 nir_variable_mode mode
= var
->data
.mode
;
151 enum glsl_base_type base_type
=
152 glsl_get_base_type(glsl_without_array(var
->type
));
154 if (mode
== nir_var_shader_in
) {
155 switch (var
->data
.interpolation
) {
156 case INTERP_MODE_NONE
:
157 if (glsl_base_type_is_integer(base_type
))
161 case INTERP_MODE_SMOOTH
:
162 if (var
->data
.sample
)
163 info
->uses_persp_sample
= true;
164 else if (var
->data
.centroid
)
165 info
->uses_persp_centroid
= true;
167 info
->uses_persp_center
= true;
170 case INTERP_MODE_NOPERSPECTIVE
:
171 if (var
->data
.sample
)
172 info
->uses_linear_sample
= true;
173 else if (var
->data
.centroid
)
174 info
->uses_linear_centroid
= true;
176 info
->uses_linear_center
= true;
182 case nir_intrinsic_interp_var_at_centroid
:
183 case nir_intrinsic_interp_var_at_sample
:
184 case nir_intrinsic_interp_var_at_offset
: {
185 enum glsl_interp_mode interp
=
186 intr
->variables
[0]->var
->data
.interpolation
;
188 case INTERP_MODE_SMOOTH
:
189 case INTERP_MODE_NONE
:
190 if (intr
->intrinsic
== nir_intrinsic_interp_var_at_centroid
)
191 info
->uses_persp_opcode_interp_centroid
= true;
192 else if (intr
->intrinsic
== nir_intrinsic_interp_var_at_sample
)
193 info
->uses_persp_opcode_interp_sample
= true;
195 info
->uses_persp_opcode_interp_offset
= true;
197 case INTERP_MODE_NOPERSPECTIVE
:
198 if (intr
->intrinsic
== nir_intrinsic_interp_var_at_centroid
)
199 info
->uses_linear_opcode_interp_centroid
= true;
200 else if (intr
->intrinsic
== nir_intrinsic_interp_var_at_sample
)
201 info
->uses_linear_opcode_interp_sample
= true;
203 info
->uses_linear_opcode_interp_offset
= true;
205 case INTERP_MODE_FLAT
:
208 unreachable("Unsupported interpoation type");
218 void si_nir_scan_tess_ctrl(const struct nir_shader
*nir
,
219 const struct tgsi_shader_info
*info
,
220 struct tgsi_tessctrl_info
*out
)
222 memset(out
, 0, sizeof(*out
));
224 if (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
227 /* Initial value = true. Here the pass will accumulate results from
228 * multiple segments surrounded by barriers. If tess factors aren't
229 * written at all, it's a shader bug and we don't care if this will be
232 out
->tessfactors_are_def_in_all_invocs
= true;
234 /* TODO: Implement scanning of tess factors, see tgsi backend. */
237 void si_nir_scan_shader(const struct nir_shader
*nir
,
238 struct tgsi_shader_info
*info
)
243 info
->processor
= pipe_shader_type_from_mesa(nir
->info
.stage
);
244 info
->num_tokens
= 2; /* indicate that the shader is non-empty */
245 info
->num_instructions
= 2;
247 info
->properties
[TGSI_PROPERTY_NEXT_SHADER
] =
248 pipe_shader_type_from_mesa(nir
->info
.next_stage
);
250 if (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
251 info
->properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
] =
252 nir
->info
.tess
.tcs_vertices_out
;
255 if (nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
256 if (nir
->info
.tess
.primitive_mode
== GL_ISOLINES
)
257 info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] = PIPE_PRIM_LINES
;
259 info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] = nir
->info
.tess
.primitive_mode
;
261 STATIC_ASSERT((TESS_SPACING_EQUAL
+ 1) % 3 == PIPE_TESS_SPACING_EQUAL
);
262 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD
+ 1) % 3 ==
263 PIPE_TESS_SPACING_FRACTIONAL_ODD
);
264 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN
+ 1) % 3 ==
265 PIPE_TESS_SPACING_FRACTIONAL_EVEN
);
267 info
->properties
[TGSI_PROPERTY_TES_SPACING
] = (nir
->info
.tess
.spacing
+ 1) % 3;
268 info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
] = !nir
->info
.tess
.ccw
;
269 info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
] = nir
->info
.tess
.point_mode
;
272 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
273 info
->properties
[TGSI_PROPERTY_GS_INPUT_PRIM
] = nir
->info
.gs
.input_primitive
;
274 info
->properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
] = nir
->info
.gs
.output_primitive
;
275 info
->properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
] = nir
->info
.gs
.vertices_out
;
276 info
->properties
[TGSI_PROPERTY_GS_INVOCATIONS
] = nir
->info
.gs
.invocations
;
279 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
280 info
->properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
] =
281 nir
->info
.fs
.early_fragment_tests
| nir
->info
.fs
.post_depth_coverage
;
282 info
->properties
[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE
] = nir
->info
.fs
.post_depth_coverage
;
284 if (nir
->info
.fs
.pixel_center_integer
) {
285 info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] =
286 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
;
289 if (nir
->info
.fs
.depth_layout
!= FRAG_DEPTH_LAYOUT_NONE
) {
290 switch (nir
->info
.fs
.depth_layout
) {
291 case FRAG_DEPTH_LAYOUT_ANY
:
292 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_ANY
;
294 case FRAG_DEPTH_LAYOUT_GREATER
:
295 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_GREATER
;
297 case FRAG_DEPTH_LAYOUT_LESS
:
298 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_LESS
;
300 case FRAG_DEPTH_LAYOUT_UNCHANGED
:
301 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED
;
304 unreachable("Unknow depth layout");
309 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
) {
310 info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] = nir
->info
.cs
.local_size
[0];
311 info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT
] = nir
->info
.cs
.local_size
[1];
312 info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH
] = nir
->info
.cs
.local_size
[2];
316 uint64_t processed_inputs
= 0;
317 unsigned num_inputs
= 0;
318 nir_foreach_variable(variable
, &nir
->inputs
) {
319 unsigned semantic_name
, semantic_index
;
321 const struct glsl_type
*type
= variable
->type
;
322 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
)) {
323 assert(glsl_type_is_array(type
));
324 type
= glsl_get_array_element(type
);
327 unsigned attrib_count
= glsl_count_attribute_slots(type
,
328 nir
->info
.stage
== MESA_SHADER_VERTEX
);
330 i
= variable
->data
.driver_location
;
332 /* Vertex shader inputs don't have semantics. The state
333 * tracker has already mapped them to attributes via
334 * variable->data.driver_location.
336 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
337 /* TODO: gather the actual input useage and remove this. */
338 info
->input_usage_mask
[i
] = TGSI_WRITEMASK_XYZW
;
340 if (glsl_type_is_dual_slot(variable
->type
)) {
343 /* TODO: gather the actual input useage and remove this. */
344 info
->input_usage_mask
[i
+1] = TGSI_WRITEMASK_XYZW
;
350 /* Fragment shader position is a system value. */
351 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
352 variable
->data
.location
== VARYING_SLOT_POS
) {
353 if (variable
->data
.pixel_center_integer
)
354 info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] =
355 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
;
361 for (unsigned j
= 0; j
< attrib_count
; j
++, i
++) {
363 if (processed_inputs
& ((uint64_t)1 << i
))
366 processed_inputs
|= ((uint64_t)1 << i
);
369 tgsi_get_gl_varying_semantic(variable
->data
.location
+ j
, true,
370 &semantic_name
, &semantic_index
);
372 info
->input_semantic_name
[i
] = semantic_name
;
373 info
->input_semantic_index
[i
] = semantic_index
;
375 if (semantic_name
== TGSI_SEMANTIC_PRIMID
)
376 info
->uses_primid
= true;
378 if (variable
->data
.sample
)
379 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_SAMPLE
;
380 else if (variable
->data
.centroid
)
381 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_CENTROID
;
383 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_CENTER
;
385 enum glsl_base_type base_type
=
386 glsl_get_base_type(glsl_without_array(variable
->type
));
388 switch (variable
->data
.interpolation
) {
389 case INTERP_MODE_NONE
:
390 if (glsl_base_type_is_integer(base_type
)) {
391 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_CONSTANT
;
395 if (semantic_name
== TGSI_SEMANTIC_COLOR
) {
396 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_COLOR
;
401 case INTERP_MODE_SMOOTH
:
402 assert(!glsl_base_type_is_integer(base_type
));
404 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_PERSPECTIVE
;
407 case INTERP_MODE_NOPERSPECTIVE
:
408 assert(!glsl_base_type_is_integer(base_type
));
410 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_LINEAR
;
413 case INTERP_MODE_FLAT
:
414 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_CONSTANT
;
418 /* TODO make this more precise */
419 if (variable
->data
.location
== VARYING_SLOT_COL0
)
420 info
->colors_read
|= 0x0f;
421 else if (variable
->data
.location
== VARYING_SLOT_COL1
)
422 info
->colors_read
|= 0xf0;
426 info
->num_inputs
= num_inputs
;
430 uint64_t processed_outputs
= 0;
431 unsigned num_outputs
= 0;
432 nir_foreach_variable(variable
, &nir
->outputs
) {
433 unsigned semantic_name
, semantic_index
;
435 i
= variable
->data
.driver_location
;
437 const struct glsl_type
*type
= variable
->type
;
438 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
)) {
439 assert(glsl_type_is_array(type
));
440 type
= glsl_get_array_element(type
);
443 unsigned attrib_count
= glsl_count_attribute_slots(type
, false);
444 for (unsigned k
= 0; k
< attrib_count
; k
++, i
++) {
446 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
447 tgsi_get_gl_frag_result_semantic(variable
->data
.location
+ k
,
448 &semantic_name
, &semantic_index
);
450 /* Adjust for dual source blending */
451 if (variable
->data
.index
> 0) {
455 tgsi_get_gl_varying_semantic(variable
->data
.location
+ k
, true,
456 &semantic_name
, &semantic_index
);
459 unsigned num_components
= 4;
460 unsigned vector_elements
= glsl_get_vector_elements(glsl_without_array(variable
->type
));
462 num_components
= vector_elements
;
464 unsigned component
= variable
->data
.location_frac
;
465 if (glsl_type_is_64bit(glsl_without_array(variable
->type
))) {
466 if (glsl_type_is_dual_slot(glsl_without_array(variable
->type
)) && k
% 2) {
467 num_components
= (num_components
* 2) - 4;
470 num_components
= MIN2(num_components
* 2, 4);
475 for (unsigned j
= component
; j
< num_components
+ component
; j
++) {
478 usagemask
|= TGSI_WRITEMASK_X
;
481 usagemask
|= TGSI_WRITEMASK_Y
;
484 usagemask
|= TGSI_WRITEMASK_Z
;
487 usagemask
|= TGSI_WRITEMASK_W
;
490 unreachable("error calculating component index");
494 unsigned gs_out_streams
;
495 if (variable
->data
.stream
& (1u << 31)) {
496 gs_out_streams
= variable
->data
.stream
& ~(1u << 31);
498 assert(variable
->data
.stream
< 4);
500 for (unsigned j
= 0; j
< num_components
; ++j
)
501 gs_out_streams
|= variable
->data
.stream
<< (2 * (component
+ j
));
504 unsigned streamx
= gs_out_streams
& 3;
505 unsigned streamy
= (gs_out_streams
>> 2) & 3;
506 unsigned streamz
= (gs_out_streams
>> 4) & 3;
507 unsigned streamw
= (gs_out_streams
>> 6) & 3;
509 if (usagemask
& TGSI_WRITEMASK_X
) {
510 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_X
;
511 info
->output_streams
[i
] |= streamx
;
512 info
->num_stream_output_components
[streamx
]++;
514 if (usagemask
& TGSI_WRITEMASK_Y
) {
515 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_Y
;
516 info
->output_streams
[i
] |= streamy
<< 2;
517 info
->num_stream_output_components
[streamy
]++;
519 if (usagemask
& TGSI_WRITEMASK_Z
) {
520 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_Z
;
521 info
->output_streams
[i
] |= streamz
<< 4;
522 info
->num_stream_output_components
[streamz
]++;
524 if (usagemask
& TGSI_WRITEMASK_W
) {
525 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_W
;
526 info
->output_streams
[i
] |= streamw
<< 6;
527 info
->num_stream_output_components
[streamw
]++;
530 /* make sure we only count this location once against
531 * the num_outputs counter.
533 if (processed_outputs
& ((uint64_t)1 << i
))
536 processed_outputs
|= ((uint64_t)1 << i
);
539 info
->output_semantic_name
[i
] = semantic_name
;
540 info
->output_semantic_index
[i
] = semantic_index
;
542 switch (semantic_name
) {
543 case TGSI_SEMANTIC_PRIMID
:
544 info
->writes_primid
= true;
546 case TGSI_SEMANTIC_VIEWPORT_INDEX
:
547 info
->writes_viewport_index
= true;
549 case TGSI_SEMANTIC_LAYER
:
550 info
->writes_layer
= true;
552 case TGSI_SEMANTIC_PSIZE
:
553 info
->writes_psize
= true;
555 case TGSI_SEMANTIC_CLIPVERTEX
:
556 info
->writes_clipvertex
= true;
558 case TGSI_SEMANTIC_COLOR
:
559 info
->colors_written
|= 1 << semantic_index
;
561 case TGSI_SEMANTIC_STENCIL
:
562 info
->writes_stencil
= true;
564 case TGSI_SEMANTIC_SAMPLEMASK
:
565 info
->writes_samplemask
= true;
567 case TGSI_SEMANTIC_EDGEFLAG
:
568 info
->writes_edgeflag
= true;
570 case TGSI_SEMANTIC_POSITION
:
571 if (info
->processor
== PIPE_SHADER_FRAGMENT
)
572 info
->writes_z
= true;
574 info
->writes_position
= true;
578 if (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
579 switch (semantic_name
) {
580 case TGSI_SEMANTIC_PATCH
:
581 info
->reads_perpatch_outputs
= true;
583 case TGSI_SEMANTIC_TESSINNER
:
584 case TGSI_SEMANTIC_TESSOUTER
:
585 info
->reads_tessfactor_outputs
= true;
588 info
->reads_pervertex_outputs
= true;
593 unsigned loc
= variable
->data
.location
;
594 if (loc
== FRAG_RESULT_COLOR
&&
595 nir
->info
.outputs_written
& (1ull << loc
)) {
596 assert(attrib_count
== 1);
597 info
->properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
] = true;
601 info
->num_outputs
= num_outputs
;
603 struct set
*ubo_set
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
604 _mesa_key_pointer_equal
);
606 /* Intialise const_file_max[0] */
607 info
->const_file_max
[0] = -1;
609 unsigned ubo_idx
= 1;
610 nir_foreach_variable(variable
, &nir
->uniforms
) {
611 const struct glsl_type
*type
= variable
->type
;
612 enum glsl_base_type base_type
=
613 glsl_get_base_type(glsl_without_array(type
));
614 unsigned aoa_size
= MAX2(1, glsl_get_aoa_size(type
));
616 /* Gather buffers declared bitmasks. Note: radeonsi doesn't
617 * really use the mask (other than ubo_idx == 1 for regular
618 * uniforms) its really only used for getting the buffer count
619 * so we don't need to worry about the ordering.
621 if (variable
->interface_type
!= NULL
) {
622 if (variable
->data
.mode
== nir_var_uniform
) {
624 unsigned block_count
;
625 if (base_type
!= GLSL_TYPE_INTERFACE
) {
626 struct set_entry
*entry
=
627 _mesa_set_search(ubo_set
, variable
->interface_type
);
629 /* Check if we have already processed
630 * a member from this ubo.
637 block_count
= aoa_size
;
640 info
->const_buffers_declared
|= u_bit_consecutive(ubo_idx
, block_count
);
641 ubo_idx
+= block_count
;
643 _mesa_set_add(ubo_set
, variable
->interface_type
);
646 if (variable
->data
.mode
== nir_var_shader_storage
) {
647 /* TODO: make this more accurate */
648 info
->shader_buffers_declared
=
649 u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS
);
655 /* We rely on the fact that nir_lower_samplers_as_deref has
656 * eliminated struct dereferences.
658 if (base_type
== GLSL_TYPE_SAMPLER
) {
659 info
->samplers_declared
|=
660 u_bit_consecutive(variable
->data
.binding
, aoa_size
);
662 if (variable
->data
.bindless
) {
663 info
->const_buffers_declared
|= 1;
664 info
->const_file_max
[0] +=
665 glsl_count_attribute_slots(type
, false);
667 } else if (base_type
== GLSL_TYPE_IMAGE
) {
668 info
->images_declared
|=
669 u_bit_consecutive(variable
->data
.binding
, aoa_size
);
671 if (variable
->data
.bindless
) {
672 info
->const_buffers_declared
|= 1;
673 info
->const_file_max
[0] +=
674 glsl_count_attribute_slots(type
, false);
676 } else if (base_type
!= GLSL_TYPE_ATOMIC_UINT
) {
677 if (strncmp(variable
->name
, "state.", 6) == 0 ||
678 strncmp(variable
->name
, "gl_", 3) == 0) {
679 /* FIXME: figure out why piglit tests with builtin
680 * uniforms are failing without this.
682 info
->const_buffers_declared
=
683 u_bit_consecutive(0, SI_NUM_CONST_BUFFERS
);
685 info
->const_buffers_declared
|= 1;
686 info
->const_file_max
[0] +=
687 glsl_count_attribute_slots(type
, false);
692 _mesa_set_destroy(ubo_set
, NULL
);
694 info
->num_written_clipdistance
= nir
->info
.clip_distance_array_size
;
695 info
->num_written_culldistance
= nir
->info
.cull_distance_array_size
;
696 info
->clipdist_writemask
= u_bit_consecutive(0, info
->num_written_clipdistance
);
697 info
->culldist_writemask
= u_bit_consecutive(0, info
->num_written_culldistance
);
699 if (info
->processor
== PIPE_SHADER_FRAGMENT
)
700 info
->uses_kill
= nir
->info
.fs
.uses_discard
;
702 func
= (struct nir_function
*)exec_list_get_head_const(&nir
->functions
);
703 nir_foreach_block(block
, func
->impl
) {
704 nir_foreach_instr(instr
, block
)
705 scan_instruction(info
, instr
);
710 * Perform "lowering" operations on the NIR that are run once when the shader
711 * selector is created.
714 si_lower_nir(struct si_shader_selector
* sel
)
716 /* Disable const buffer fast path for old LLVM versions */
717 if (sel
->screen
->info
.chip_class
== SI
&& HAVE_LLVM
< 0x0600 &&
718 sel
->info
.const_buffers_declared
== 1 &&
719 sel
->info
.shader_buffers_declared
== 0) {
720 sel
->info
.const_buffers_declared
|= 0x2;
723 /* Adjust the driver location of inputs and outputs. The state tracker
724 * interprets them as slots, while the ac/nir backend interprets them
725 * as individual components.
727 nir_foreach_variable(variable
, &sel
->nir
->inputs
)
728 variable
->data
.driver_location
*= 4;
730 nir_foreach_variable(variable
, &sel
->nir
->outputs
) {
731 variable
->data
.driver_location
*= 4;
733 if (sel
->nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
734 if (variable
->data
.location
== FRAG_RESULT_DEPTH
)
735 variable
->data
.driver_location
+= 2;
736 else if (variable
->data
.location
== FRAG_RESULT_STENCIL
)
737 variable
->data
.driver_location
+= 1;
741 /* Perform lowerings (and optimizations) of code.
743 * Performance considerations aside, we must:
744 * - lower certain ALU operations
745 * - ensure constant offsets for texture instructions are folded
746 * and copy-propagated
748 NIR_PASS_V(sel
->nir
, nir_lower_returns
);
749 NIR_PASS_V(sel
->nir
, nir_lower_vars_to_ssa
);
750 NIR_PASS_V(sel
->nir
, nir_lower_alu_to_scalar
);
751 NIR_PASS_V(sel
->nir
, nir_lower_phis_to_scalar
);
753 static const struct nir_lower_tex_options lower_tex_options
= {
756 NIR_PASS_V(sel
->nir
, nir_lower_tex
, &lower_tex_options
);
758 const nir_lower_subgroups_options subgroups_options
= {
760 .ballot_bit_size
= 64,
761 .lower_to_scalar
= true,
762 .lower_subgroup_masks
= true,
763 .lower_vote_trivial
= false,
764 .lower_vote_eq_to_ballot
= true,
766 NIR_PASS_V(sel
->nir
, nir_lower_subgroups
, &subgroups_options
);
768 ac_lower_indirect_derefs(sel
->nir
, sel
->screen
->info
.chip_class
);
774 /* (Constant) copy propagation is needed for txf with offsets. */
775 NIR_PASS(progress
, sel
->nir
, nir_copy_prop
);
776 NIR_PASS(progress
, sel
->nir
, nir_opt_remove_phis
);
777 NIR_PASS(progress
, sel
->nir
, nir_opt_dce
);
778 if (nir_opt_trivial_continues(sel
->nir
)) {
780 NIR_PASS(progress
, sel
->nir
, nir_copy_prop
);
781 NIR_PASS(progress
, sel
->nir
, nir_opt_dce
);
783 NIR_PASS(progress
, sel
->nir
, nir_opt_if
);
784 NIR_PASS(progress
, sel
->nir
, nir_opt_dead_cf
);
785 NIR_PASS(progress
, sel
->nir
, nir_opt_cse
);
786 NIR_PASS(progress
, sel
->nir
, nir_opt_peephole_select
, 8);
788 /* Needed for algebraic lowering */
789 NIR_PASS(progress
, sel
->nir
, nir_opt_algebraic
);
790 NIR_PASS(progress
, sel
->nir
, nir_opt_constant_folding
);
792 NIR_PASS(progress
, sel
->nir
, nir_opt_undef
);
793 NIR_PASS(progress
, sel
->nir
, nir_opt_conditional_discard
);
794 if (sel
->nir
->options
->max_unroll_iterations
) {
795 NIR_PASS(progress
, sel
->nir
, nir_opt_loop_unroll
, 0);
800 static void declare_nir_input_vs(struct si_shader_context
*ctx
,
801 struct nir_variable
*variable
,
802 unsigned input_index
,
805 si_llvm_load_input_vs(ctx
, input_index
, out
);
808 static void declare_nir_input_fs(struct si_shader_context
*ctx
,
809 struct nir_variable
*variable
,
810 unsigned input_index
,
813 unsigned slot
= variable
->data
.location
;
814 if (slot
== VARYING_SLOT_POS
) {
815 out
[0] = LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_X_FLOAT
);
816 out
[1] = LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_Y_FLOAT
);
817 out
[2] = LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_Z_FLOAT
);
818 out
[3] = ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
819 LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_W_FLOAT
));
823 si_llvm_load_input_fs(ctx
, input_index
, out
);
827 si_nir_lookup_interp_param(struct ac_shader_abi
*abi
,
828 enum glsl_interp_mode interp
, unsigned location
)
830 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
831 int interp_param_idx
= -1;
834 case INTERP_MODE_FLAT
:
836 case INTERP_MODE_SMOOTH
:
837 case INTERP_MODE_NONE
:
838 if (location
== INTERP_CENTER
)
839 interp_param_idx
= SI_PARAM_PERSP_CENTER
;
840 else if (location
== INTERP_CENTROID
)
841 interp_param_idx
= SI_PARAM_PERSP_CENTROID
;
842 else if (location
== INTERP_SAMPLE
)
843 interp_param_idx
= SI_PARAM_PERSP_SAMPLE
;
845 case INTERP_MODE_NOPERSPECTIVE
:
846 if (location
== INTERP_CENTER
)
847 interp_param_idx
= SI_PARAM_LINEAR_CENTER
;
848 else if (location
== INTERP_CENTROID
)
849 interp_param_idx
= SI_PARAM_LINEAR_CENTROID
;
850 else if (location
== INTERP_SAMPLE
)
851 interp_param_idx
= SI_PARAM_LINEAR_SAMPLE
;
854 assert(!"Unhandled interpolation mode.");
858 return interp_param_idx
!= -1 ?
859 LLVMGetParam(ctx
->main_fn
, interp_param_idx
) : NULL
;
863 si_nir_load_sampler_desc(struct ac_shader_abi
*abi
,
864 unsigned descriptor_set
, unsigned base_index
,
865 unsigned constant_index
, LLVMValueRef dynamic_index
,
866 enum ac_descriptor_type desc_type
, bool image
,
867 bool write
, bool bindless
)
869 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
870 LLVMBuilderRef builder
= ctx
->ac
.builder
;
871 LLVMValueRef list
= LLVMGetParam(ctx
->main_fn
, ctx
->param_samplers_and_images
);
872 LLVMValueRef index
= dynamic_index
;
874 assert(!descriptor_set
);
877 index
= ctx
->ac
.i32_0
;
879 index
= LLVMBuildAdd(builder
, index
,
880 LLVMConstInt(ctx
->ac
.i32
, base_index
+ constant_index
, false),
884 assert(desc_type
== AC_DESC_IMAGE
|| desc_type
== AC_DESC_BUFFER
);
885 assert(base_index
+ constant_index
< ctx
->num_images
);
888 index
= si_llvm_bound_index(ctx
, index
, ctx
->num_images
);
890 index
= LLVMBuildSub(ctx
->gallivm
.builder
,
891 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
- 1, 0),
894 /* TODO: be smarter about when we use dcc_off */
895 return si_load_image_desc(ctx
, list
, index
, desc_type
, write
);
898 assert(base_index
+ constant_index
< ctx
->num_samplers
);
901 index
= si_llvm_bound_index(ctx
, index
, ctx
->num_samplers
);
903 index
= LLVMBuildAdd(ctx
->gallivm
.builder
, index
,
904 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
/ 2, 0), "");
906 return si_load_sampler_desc(ctx
, list
, index
, desc_type
);
909 static void bitcast_inputs(struct si_shader_context
*ctx
,
910 LLVMValueRef data
[4],
913 for (unsigned chan
= 0; chan
< 4; chan
++) {
914 ctx
->inputs
[input_idx
+ chan
] =
915 LLVMBuildBitCast(ctx
->ac
.builder
, data
[chan
], ctx
->ac
.i32
, "");
919 bool si_nir_build_llvm(struct si_shader_context
*ctx
, struct nir_shader
*nir
)
921 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
923 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
924 nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
925 uint64_t processed_inputs
= 0;
926 nir_foreach_variable(variable
, &nir
->inputs
) {
927 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
,
928 nir
->info
.stage
== MESA_SHADER_VERTEX
);
929 unsigned input_idx
= variable
->data
.driver_location
;
931 LLVMValueRef data
[4];
932 unsigned loc
= variable
->data
.location
;
934 for (unsigned i
= 0; i
< attrib_count
; i
++) {
935 /* Packed components share the same location so skip
936 * them if we have already processed the location.
938 if (processed_inputs
& ((uint64_t)1 << (loc
+ i
))) {
943 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
944 declare_nir_input_vs(ctx
, variable
, input_idx
/ 4, data
);
945 bitcast_inputs(ctx
, data
, input_idx
);
946 if (glsl_type_is_dual_slot(variable
->type
)) {
948 declare_nir_input_vs(ctx
, variable
, input_idx
/ 4, data
);
949 bitcast_inputs(ctx
, data
, input_idx
);
951 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
952 declare_nir_input_fs(ctx
, variable
, input_idx
/ 4, data
);
953 bitcast_inputs(ctx
, data
, input_idx
);
956 processed_inputs
|= ((uint64_t)1 << (loc
+ i
));
962 ctx
->abi
.inputs
= &ctx
->inputs
[0];
963 ctx
->abi
.load_sampler_desc
= si_nir_load_sampler_desc
;
964 ctx
->abi
.clamp_shadow_reference
= true;
966 ctx
->num_samplers
= util_last_bit(info
->samplers_declared
);
967 ctx
->num_images
= util_last_bit(info
->images_declared
);
969 if (ctx
->shader
->selector
->local_size
) {
970 assert(nir
->info
.stage
== MESA_SHADER_COMPUTE
);
971 si_declare_compute_memory(ctx
);
973 ac_nir_translate(&ctx
->ac
, &ctx
->abi
, nir
);