2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "si_shader.h"
25 #include "si_shader_internal.h"
27 #include "ac_nir_to_llvm.h"
29 #include "tgsi/tgsi_from_mesa.h"
31 #include "compiler/nir/nir.h"
32 #include "compiler/nir_types.h"
36 type_size(const struct glsl_type
*type
)
38 return glsl_count_attribute_slots(type
, false);
41 static void scan_instruction(struct tgsi_shader_info
*info
,
44 if (instr
->type
== nir_instr_type_alu
) {
45 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
50 case nir_op_fddx_fine
:
51 case nir_op_fddy_fine
:
52 case nir_op_fddx_coarse
:
53 case nir_op_fddy_coarse
:
54 info
->uses_derivatives
= true;
59 } else if (instr
->type
== nir_instr_type_tex
) {
60 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
63 info
->samplers_declared
|=
64 u_bit_consecutive(tex
->sampler_index
, 1);
71 info
->uses_derivatives
= true;
76 } else if (instr
->type
== nir_instr_type_intrinsic
) {
77 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
79 switch (intr
->intrinsic
) {
80 case nir_intrinsic_load_front_face
:
81 info
->uses_frontface
= 1;
83 case nir_intrinsic_load_instance_id
:
84 info
->uses_instanceid
= 1;
86 case nir_intrinsic_load_invocation_id
:
87 info
->uses_invocationid
= true;
89 case nir_intrinsic_load_vertex_id
:
90 info
->uses_vertexid
= 1;
92 case nir_intrinsic_load_vertex_id_zero_base
:
93 info
->uses_vertexid_nobase
= 1;
95 case nir_intrinsic_load_base_vertex
:
96 info
->uses_basevertex
= 1;
98 case nir_intrinsic_load_primitive_id
:
99 info
->uses_primid
= 1;
101 case nir_intrinsic_load_sample_mask_in
:
102 info
->reads_samplemask
= true;
104 case nir_intrinsic_load_tess_level_inner
:
105 case nir_intrinsic_load_tess_level_outer
:
106 info
->reads_tess_factors
= true;
108 case nir_intrinsic_image_store
:
109 case nir_intrinsic_image_atomic_add
:
110 case nir_intrinsic_image_atomic_min
:
111 case nir_intrinsic_image_atomic_max
:
112 case nir_intrinsic_image_atomic_and
:
113 case nir_intrinsic_image_atomic_or
:
114 case nir_intrinsic_image_atomic_xor
:
115 case nir_intrinsic_image_atomic_exchange
:
116 case nir_intrinsic_image_atomic_comp_swap
:
117 case nir_intrinsic_store_ssbo
:
118 case nir_intrinsic_ssbo_atomic_add
:
119 case nir_intrinsic_ssbo_atomic_imin
:
120 case nir_intrinsic_ssbo_atomic_umin
:
121 case nir_intrinsic_ssbo_atomic_imax
:
122 case nir_intrinsic_ssbo_atomic_umax
:
123 case nir_intrinsic_ssbo_atomic_and
:
124 case nir_intrinsic_ssbo_atomic_or
:
125 case nir_intrinsic_ssbo_atomic_xor
:
126 case nir_intrinsic_ssbo_atomic_exchange
:
127 case nir_intrinsic_ssbo_atomic_comp_swap
:
128 info
->writes_memory
= true;
130 case nir_intrinsic_load_var
: {
131 nir_variable
*var
= intr
->variables
[0]->var
;
132 nir_variable_mode mode
= var
->data
.mode
;
133 enum glsl_base_type base_type
=
134 glsl_get_base_type(glsl_without_array(var
->type
));
136 if (mode
== nir_var_shader_in
) {
137 switch (var
->data
.interpolation
) {
138 case INTERP_MODE_NONE
:
139 if (glsl_base_type_is_integer(base_type
))
143 case INTERP_MODE_SMOOTH
:
144 if (var
->data
.sample
)
145 info
->uses_persp_sample
= true;
146 else if (var
->data
.centroid
)
147 info
->uses_persp_centroid
= true;
149 info
->uses_persp_center
= true;
152 case INTERP_MODE_NOPERSPECTIVE
:
153 if (var
->data
.sample
)
154 info
->uses_linear_sample
= true;
155 else if (var
->data
.centroid
)
156 info
->uses_linear_centroid
= true;
158 info
->uses_linear_center
= true;
164 case nir_intrinsic_interp_var_at_centroid
:
165 case nir_intrinsic_interp_var_at_sample
:
166 case nir_intrinsic_interp_var_at_offset
: {
167 enum glsl_interp_mode interp
=
168 intr
->variables
[0]->var
->data
.interpolation
;
170 case INTERP_MODE_SMOOTH
:
171 case INTERP_MODE_NONE
:
172 if (intr
->intrinsic
== nir_intrinsic_interp_var_at_centroid
)
173 info
->uses_persp_opcode_interp_centroid
= true;
174 else if (intr
->intrinsic
== nir_intrinsic_interp_var_at_sample
)
175 info
->uses_persp_opcode_interp_sample
= true;
177 info
->uses_persp_opcode_interp_offset
= true;
179 case INTERP_MODE_NOPERSPECTIVE
:
180 if (intr
->intrinsic
== nir_intrinsic_interp_var_at_centroid
)
181 info
->uses_linear_opcode_interp_centroid
= true;
182 else if (intr
->intrinsic
== nir_intrinsic_interp_var_at_sample
)
183 info
->uses_linear_opcode_interp_sample
= true;
185 info
->uses_linear_opcode_interp_offset
= true;
187 case INTERP_MODE_FLAT
:
190 unreachable("Unsupported interpoation type");
200 void si_nir_scan_tess_ctrl(const struct nir_shader
*nir
,
201 const struct tgsi_shader_info
*info
,
202 struct tgsi_tessctrl_info
*out
)
204 memset(out
, 0, sizeof(*out
));
206 if (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
209 /* Initial value = true. Here the pass will accumulate results from
210 * multiple segments surrounded by barriers. If tess factors aren't
211 * written at all, it's a shader bug and we don't care if this will be
214 out
->tessfactors_are_def_in_all_invocs
= true;
216 /* TODO: Implement scanning of tess factors, see tgsi backend. */
219 void si_nir_scan_shader(const struct nir_shader
*nir
,
220 struct tgsi_shader_info
*info
)
225 assert(nir
->info
.stage
== MESA_SHADER_VERTEX
||
226 nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
227 nir
->info
.stage
== MESA_SHADER_TESS_CTRL
||
228 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
||
229 nir
->info
.stage
== MESA_SHADER_FRAGMENT
);
231 info
->processor
= pipe_shader_type_from_mesa(nir
->info
.stage
);
232 info
->num_tokens
= 2; /* indicate that the shader is non-empty */
233 info
->num_instructions
= 2;
235 if (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
236 info
->properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
] =
237 nir
->info
.tess
.tcs_vertices_out
;
240 if (nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
241 if (nir
->info
.tess
.primitive_mode
== GL_ISOLINES
)
242 info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] = PIPE_PRIM_LINES
;
244 info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] = nir
->info
.tess
.primitive_mode
;
246 STATIC_ASSERT((TESS_SPACING_EQUAL
+ 1) % 3 == PIPE_TESS_SPACING_EQUAL
);
247 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD
+ 1) % 3 ==
248 PIPE_TESS_SPACING_FRACTIONAL_ODD
);
249 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN
+ 1) % 3 ==
250 PIPE_TESS_SPACING_FRACTIONAL_EVEN
);
252 info
->properties
[TGSI_PROPERTY_TES_SPACING
] = (nir
->info
.tess
.spacing
+ 1) % 3;
253 info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
] = !nir
->info
.tess
.ccw
;
254 info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
] = nir
->info
.tess
.point_mode
;
257 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
258 info
->properties
[TGSI_PROPERTY_GS_INPUT_PRIM
] = nir
->info
.gs
.input_primitive
;
259 info
->properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
] = nir
->info
.gs
.output_primitive
;
260 info
->properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
] = nir
->info
.gs
.vertices_out
;
261 info
->properties
[TGSI_PROPERTY_GS_INVOCATIONS
] = nir
->info
.gs
.invocations
;
265 uint64_t processed_inputs
= 0;
266 unsigned num_inputs
= 0;
267 nir_foreach_variable(variable
, &nir
->inputs
) {
268 unsigned semantic_name
, semantic_index
;
270 const struct glsl_type
*type
= variable
->type
;
271 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
)) {
272 assert(glsl_type_is_array(type
));
273 type
= glsl_get_array_element(type
);
276 unsigned attrib_count
= glsl_count_attribute_slots(type
,
277 nir
->info
.stage
== MESA_SHADER_VERTEX
);
279 /* Vertex shader inputs don't have semantics. The state
280 * tracker has already mapped them to attributes via
281 * variable->data.driver_location.
283 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
284 if (glsl_type_is_dual_slot(variable
->type
))
291 /* Fragment shader position is a system value. */
292 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
293 variable
->data
.location
== VARYING_SLOT_POS
) {
294 if (variable
->data
.pixel_center_integer
)
295 info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] =
296 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
;
302 i
= variable
->data
.driver_location
;
304 for (unsigned j
= 0; j
< attrib_count
; j
++, i
++) {
306 if (processed_inputs
& ((uint64_t)1 << i
))
309 processed_inputs
|= ((uint64_t)1 << i
);
312 tgsi_get_gl_varying_semantic(variable
->data
.location
+ j
, true,
313 &semantic_name
, &semantic_index
);
315 info
->input_semantic_name
[i
] = semantic_name
;
316 info
->input_semantic_index
[i
] = semantic_index
;
318 if (semantic_name
== TGSI_SEMANTIC_PRIMID
)
319 info
->uses_primid
= true;
321 if (variable
->data
.sample
)
322 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_SAMPLE
;
323 else if (variable
->data
.centroid
)
324 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_CENTROID
;
326 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_CENTER
;
328 enum glsl_base_type base_type
=
329 glsl_get_base_type(glsl_without_array(variable
->type
));
331 switch (variable
->data
.interpolation
) {
332 case INTERP_MODE_NONE
:
333 if (glsl_base_type_is_integer(base_type
)) {
334 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_CONSTANT
;
338 if (semantic_name
== TGSI_SEMANTIC_COLOR
) {
339 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_COLOR
;
344 case INTERP_MODE_SMOOTH
:
345 assert(!glsl_base_type_is_integer(base_type
));
347 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_PERSPECTIVE
;
350 case INTERP_MODE_NOPERSPECTIVE
:
351 assert(!glsl_base_type_is_integer(base_type
));
353 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_LINEAR
;
356 case INTERP_MODE_FLAT
:
357 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_CONSTANT
;
361 /* TODO make this more precise */
362 if (variable
->data
.location
== VARYING_SLOT_COL0
)
363 info
->colors_read
|= 0x0f;
364 else if (variable
->data
.location
== VARYING_SLOT_COL1
)
365 info
->colors_read
|= 0xf0;
369 info
->num_inputs
= num_inputs
;
373 uint64_t processed_outputs
= 0;
374 unsigned num_outputs
= 0;
375 nir_foreach_variable(variable
, &nir
->outputs
) {
376 unsigned semantic_name
, semantic_index
;
378 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
379 tgsi_get_gl_frag_result_semantic(variable
->data
.location
,
380 &semantic_name
, &semantic_index
);
382 /* Adjust for dual source blending */
383 if (variable
->data
.index
> 0) {
387 tgsi_get_gl_varying_semantic(variable
->data
.location
, true,
388 &semantic_name
, &semantic_index
);
391 i
= variable
->data
.driver_location
;
392 if (processed_outputs
& ((uint64_t)1 << i
))
395 processed_outputs
|= ((uint64_t)1 << i
);
398 info
->output_semantic_name
[i
] = semantic_name
;
399 info
->output_semantic_index
[i
] = semantic_index
;
400 info
->output_usagemask
[i
] = TGSI_WRITEMASK_XYZW
;
402 unsigned num_components
= 4;
403 unsigned vector_elements
= glsl_get_vector_elements(glsl_without_array(variable
->type
));
405 num_components
= vector_elements
;
407 unsigned gs_out_streams
;
408 if (variable
->data
.stream
& (1u << 31)) {
409 gs_out_streams
= variable
->data
.stream
& ~(1u << 31);
411 assert(variable
->data
.stream
< 4);
413 for (unsigned j
= 0; j
< num_components
; ++j
)
414 gs_out_streams
|= variable
->data
.stream
<< (2 * (variable
->data
.location_frac
+ j
));
417 unsigned streamx
= gs_out_streams
& 3;
418 unsigned streamy
= (gs_out_streams
>> 2) & 3;
419 unsigned streamz
= (gs_out_streams
>> 4) & 3;
420 unsigned streamw
= (gs_out_streams
>> 6) & 3;
422 if (info
->output_usagemask
[i
] & TGSI_WRITEMASK_X
) {
423 info
->output_streams
[i
] |= streamx
;
424 info
->num_stream_output_components
[streamx
]++;
426 if (info
->output_usagemask
[i
] & TGSI_WRITEMASK_Y
) {
427 info
->output_streams
[i
] |= streamy
<< 2;
428 info
->num_stream_output_components
[streamy
]++;
430 if (info
->output_usagemask
[i
] & TGSI_WRITEMASK_Z
) {
431 info
->output_streams
[i
] |= streamz
<< 4;
432 info
->num_stream_output_components
[streamz
]++;
434 if (info
->output_usagemask
[i
] & TGSI_WRITEMASK_W
) {
435 info
->output_streams
[i
] |= streamw
<< 6;
436 info
->num_stream_output_components
[streamw
]++;
439 switch (semantic_name
) {
440 case TGSI_SEMANTIC_PRIMID
:
441 info
->writes_primid
= true;
443 case TGSI_SEMANTIC_VIEWPORT_INDEX
:
444 info
->writes_viewport_index
= true;
446 case TGSI_SEMANTIC_LAYER
:
447 info
->writes_layer
= true;
449 case TGSI_SEMANTIC_PSIZE
:
450 info
->writes_psize
= true;
452 case TGSI_SEMANTIC_CLIPVERTEX
:
453 info
->writes_clipvertex
= true;
455 case TGSI_SEMANTIC_COLOR
:
456 info
->colors_written
|= 1 << semantic_index
;
458 case TGSI_SEMANTIC_STENCIL
:
459 info
->writes_stencil
= true;
461 case TGSI_SEMANTIC_SAMPLEMASK
:
462 info
->writes_samplemask
= true;
464 case TGSI_SEMANTIC_EDGEFLAG
:
465 info
->writes_edgeflag
= true;
467 case TGSI_SEMANTIC_POSITION
:
468 if (info
->processor
== PIPE_SHADER_FRAGMENT
)
469 info
->writes_z
= true;
471 info
->writes_position
= true;
475 if (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
476 switch (semantic_name
) {
477 case TGSI_SEMANTIC_PATCH
:
478 info
->reads_perpatch_outputs
= true;
480 case TGSI_SEMANTIC_TESSINNER
:
481 case TGSI_SEMANTIC_TESSOUTER
:
482 info
->reads_tessfactor_outputs
= true;
485 info
->reads_pervertex_outputs
= true;
490 info
->num_outputs
= num_outputs
;
492 nir_foreach_variable(variable
, &nir
->uniforms
) {
493 const struct glsl_type
*type
= variable
->type
;
494 enum glsl_base_type base_type
=
495 glsl_get_base_type(glsl_without_array(type
));
496 unsigned aoa_size
= MAX2(1, glsl_get_aoa_size(type
));
498 /* We rely on the fact that nir_lower_samplers_as_deref has
499 * eliminated struct dereferences.
501 if (base_type
== GLSL_TYPE_SAMPLER
)
502 info
->samplers_declared
|=
503 u_bit_consecutive(variable
->data
.binding
, aoa_size
);
504 else if (base_type
== GLSL_TYPE_IMAGE
)
505 info
->images_declared
|=
506 u_bit_consecutive(variable
->data
.binding
, aoa_size
);
509 info
->num_written_clipdistance
= nir
->info
.clip_distance_array_size
;
510 info
->num_written_culldistance
= nir
->info
.cull_distance_array_size
;
511 info
->clipdist_writemask
= u_bit_consecutive(0, info
->num_written_clipdistance
);
512 info
->culldist_writemask
= u_bit_consecutive(0, info
->num_written_culldistance
);
514 if (info
->processor
== PIPE_SHADER_FRAGMENT
)
515 info
->uses_kill
= nir
->info
.fs
.uses_discard
;
517 /* TODO make this more accurate */
518 info
->const_buffers_declared
= u_bit_consecutive(0, SI_NUM_CONST_BUFFERS
);
519 info
->shader_buffers_declared
= u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS
);
521 func
= (struct nir_function
*)exec_list_get_head_const(&nir
->functions
);
522 nir_foreach_block(block
, func
->impl
) {
523 nir_foreach_instr(instr
, block
)
524 scan_instruction(info
, instr
);
529 * Perform "lowering" operations on the NIR that are run once when the shader
530 * selector is created.
533 si_lower_nir(struct si_shader_selector
* sel
)
535 /* Adjust the driver location of inputs and outputs. The state tracker
536 * interprets them as slots, while the ac/nir backend interprets them
537 * as individual components.
539 nir_foreach_variable(variable
, &sel
->nir
->inputs
)
540 variable
->data
.driver_location
*= 4;
542 nir_foreach_variable(variable
, &sel
->nir
->outputs
) {
543 variable
->data
.driver_location
*= 4;
545 if (sel
->nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
546 if (variable
->data
.location
== FRAG_RESULT_DEPTH
)
547 variable
->data
.driver_location
+= 2;
548 else if (variable
->data
.location
== FRAG_RESULT_STENCIL
)
549 variable
->data
.driver_location
+= 1;
553 /* Perform lowerings (and optimizations) of code.
555 * Performance considerations aside, we must:
556 * - lower certain ALU operations
557 * - ensure constant offsets for texture instructions are folded
558 * and copy-propagated
560 NIR_PASS_V(sel
->nir
, nir_lower_io
, nir_var_uniform
, type_size
,
561 (nir_lower_io_options
)0);
562 NIR_PASS_V(sel
->nir
, nir_lower_uniforms_to_ubo
);
564 NIR_PASS_V(sel
->nir
, nir_lower_returns
);
565 NIR_PASS_V(sel
->nir
, nir_lower_vars_to_ssa
);
566 NIR_PASS_V(sel
->nir
, nir_lower_alu_to_scalar
);
567 NIR_PASS_V(sel
->nir
, nir_lower_phis_to_scalar
);
569 static const struct nir_lower_tex_options lower_tex_options
= {
572 NIR_PASS_V(sel
->nir
, nir_lower_tex
, &lower_tex_options
);
574 const nir_lower_subgroups_options subgroups_options
= {
576 .ballot_bit_size
= 32,
577 .lower_to_scalar
= true,
578 .lower_subgroup_masks
= true,
579 .lower_vote_trivial
= false,
581 NIR_PASS_V(sel
->nir
, nir_lower_subgroups
, &subgroups_options
);
587 /* (Constant) copy propagation is needed for txf with offsets. */
588 NIR_PASS(progress
, sel
->nir
, nir_copy_prop
);
589 NIR_PASS(progress
, sel
->nir
, nir_opt_remove_phis
);
590 NIR_PASS(progress
, sel
->nir
, nir_opt_dce
);
591 if (nir_opt_trivial_continues(sel
->nir
)) {
593 NIR_PASS(progress
, sel
->nir
, nir_copy_prop
);
594 NIR_PASS(progress
, sel
->nir
, nir_opt_dce
);
596 NIR_PASS(progress
, sel
->nir
, nir_opt_if
);
597 NIR_PASS(progress
, sel
->nir
, nir_opt_dead_cf
);
598 NIR_PASS(progress
, sel
->nir
, nir_opt_cse
);
599 NIR_PASS(progress
, sel
->nir
, nir_opt_peephole_select
, 8);
601 /* Needed for algebraic lowering */
602 NIR_PASS(progress
, sel
->nir
, nir_opt_algebraic
);
603 NIR_PASS(progress
, sel
->nir
, nir_opt_constant_folding
);
605 NIR_PASS(progress
, sel
->nir
, nir_opt_undef
);
606 NIR_PASS(progress
, sel
->nir
, nir_opt_conditional_discard
);
607 if (sel
->nir
->options
->max_unroll_iterations
) {
608 NIR_PASS(progress
, sel
->nir
, nir_opt_loop_unroll
, 0);
613 static void declare_nir_input_vs(struct si_shader_context
*ctx
,
614 struct nir_variable
*variable
,
615 unsigned input_index
,
618 si_llvm_load_input_vs(ctx
, input_index
, out
);
621 static void declare_nir_input_fs(struct si_shader_context
*ctx
,
622 struct nir_variable
*variable
,
623 unsigned input_index
,
626 unsigned slot
= variable
->data
.location
;
627 if (slot
== VARYING_SLOT_POS
) {
628 out
[0] = LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_X_FLOAT
);
629 out
[1] = LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_Y_FLOAT
);
630 out
[2] = LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_Z_FLOAT
);
631 out
[3] = ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
632 LLVMGetParam(ctx
->main_fn
, SI_PARAM_POS_W_FLOAT
));
636 si_llvm_load_input_fs(ctx
, input_index
, out
);
639 LLVMValueRef
si_nir_load_input_gs(struct ac_shader_abi
*abi
,
641 unsigned driver_location
,
643 unsigned num_components
,
644 unsigned vertex_index
,
645 unsigned const_index
,
648 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
650 LLVMValueRef value
[4];
651 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
652 value
[i
] = si_llvm_load_input_gs(&ctx
->abi
, driver_location
/ 4,
653 vertex_index
, type
, i
);
656 return ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
660 si_nir_lookup_interp_param(struct ac_shader_abi
*abi
,
661 enum glsl_interp_mode interp
, unsigned location
)
663 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
664 int interp_param_idx
= -1;
667 case INTERP_MODE_FLAT
:
669 case INTERP_MODE_SMOOTH
:
670 case INTERP_MODE_NONE
:
671 if (location
== INTERP_CENTER
)
672 interp_param_idx
= SI_PARAM_PERSP_CENTER
;
673 else if (location
== INTERP_CENTROID
)
674 interp_param_idx
= SI_PARAM_PERSP_CENTROID
;
675 else if (location
== INTERP_SAMPLE
)
676 interp_param_idx
= SI_PARAM_PERSP_SAMPLE
;
678 case INTERP_MODE_NOPERSPECTIVE
:
679 if (location
== INTERP_CENTER
)
680 interp_param_idx
= SI_PARAM_LINEAR_CENTER
;
681 else if (location
== INTERP_CENTROID
)
682 interp_param_idx
= SI_PARAM_LINEAR_CENTROID
;
683 else if (location
== INTERP_SAMPLE
)
684 interp_param_idx
= SI_PARAM_LINEAR_SAMPLE
;
687 assert(!"Unhandled interpolation mode.");
691 return interp_param_idx
!= -1 ?
692 LLVMGetParam(ctx
->main_fn
, interp_param_idx
) : NULL
;
696 si_nir_load_sampler_desc(struct ac_shader_abi
*abi
,
697 unsigned descriptor_set
, unsigned base_index
,
698 unsigned constant_index
, LLVMValueRef dynamic_index
,
699 enum ac_descriptor_type desc_type
, bool image
,
702 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
703 LLVMBuilderRef builder
= ctx
->ac
.builder
;
704 LLVMValueRef list
= LLVMGetParam(ctx
->main_fn
, ctx
->param_samplers_and_images
);
705 LLVMValueRef index
= dynamic_index
;
707 assert(!descriptor_set
);
710 index
= ctx
->ac
.i32_0
;
712 index
= LLVMBuildAdd(builder
, index
,
713 LLVMConstInt(ctx
->ac
.i32
, base_index
+ constant_index
, false),
717 assert(desc_type
== AC_DESC_IMAGE
|| desc_type
== AC_DESC_BUFFER
);
718 assert(base_index
+ constant_index
< ctx
->num_images
);
721 index
= si_llvm_bound_index(ctx
, index
, ctx
->num_images
);
723 index
= LLVMBuildSub(ctx
->gallivm
.builder
,
724 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
- 1, 0),
727 /* TODO: be smarter about when we use dcc_off */
728 return si_load_image_desc(ctx
, list
, index
, desc_type
, write
);
731 assert(base_index
+ constant_index
< ctx
->num_samplers
);
734 index
= si_llvm_bound_index(ctx
, index
, ctx
->num_samplers
);
736 index
= LLVMBuildAdd(ctx
->gallivm
.builder
, index
,
737 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
/ 2, 0), "");
739 return si_load_sampler_desc(ctx
, list
, index
, desc_type
);
742 static void bitcast_inputs(struct si_shader_context
*ctx
,
743 LLVMValueRef data
[4],
746 for (unsigned chan
= 0; chan
< 4; chan
++) {
747 ctx
->inputs
[input_idx
+ chan
] =
748 LLVMBuildBitCast(ctx
->ac
.builder
, data
[chan
], ctx
->ac
.i32
, "");
752 bool si_nir_build_llvm(struct si_shader_context
*ctx
, struct nir_shader
*nir
)
754 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
756 if (nir
->info
.stage
== MESA_SHADER_VERTEX
||
757 nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
758 uint64_t processed_inputs
= 0;
759 nir_foreach_variable(variable
, &nir
->inputs
) {
760 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
,
761 nir
->info
.stage
== MESA_SHADER_VERTEX
);
762 unsigned input_idx
= variable
->data
.driver_location
;
764 LLVMValueRef data
[4];
765 unsigned loc
= variable
->data
.location
;
767 for (unsigned i
= 0; i
< attrib_count
; i
++) {
768 /* Packed components share the same location so skip
769 * them if we have already processed the location.
771 if (processed_inputs
& ((uint64_t)1 << loc
)) {
776 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
777 declare_nir_input_vs(ctx
, variable
, input_idx
/ 4, data
);
778 bitcast_inputs(ctx
, data
, input_idx
);
779 if (glsl_type_is_dual_slot(variable
->type
)) {
781 declare_nir_input_vs(ctx
, variable
, input_idx
/ 4, data
);
782 bitcast_inputs(ctx
, data
, input_idx
);
784 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
785 declare_nir_input_fs(ctx
, variable
, input_idx
/ 4, data
);
786 bitcast_inputs(ctx
, data
, input_idx
);
789 processed_inputs
|= ((uint64_t)1 << loc
);
796 ctx
->abi
.inputs
= &ctx
->inputs
[0];
797 ctx
->abi
.load_sampler_desc
= si_nir_load_sampler_desc
;
798 ctx
->abi
.clamp_shadow_reference
= true;
800 ctx
->num_samplers
= util_last_bit(info
->samplers_declared
);
801 ctx
->num_images
= util_last_bit(info
->images_declared
);
803 ac_nir_translate(&ctx
->ac
, &ctx
->abi
, nir
, NULL
);