2 * Copyright 2017 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_shader_internal.h"
28 #include "ac_nir_to_llvm.h"
30 #include "tgsi/tgsi_from_mesa.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir_types.h"
34 #include "compiler/nir/nir_builder.h"
36 static nir_variable
* tex_get_texture_var(nir_tex_instr
*instr
)
38 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
39 switch (instr
->src
[i
].src_type
) {
40 case nir_tex_src_texture_deref
:
41 return nir_deref_instr_get_variable(nir_src_as_deref(instr
->src
[i
].src
));
50 static nir_variable
* intrinsic_get_var(nir_intrinsic_instr
*instr
)
52 return nir_deref_instr_get_variable(nir_src_as_deref(instr
->src
[0]));
55 static void gather_intrinsic_load_deref_input_info(const nir_shader
*nir
,
56 const nir_intrinsic_instr
*instr
,
58 struct tgsi_shader_info
*info
)
60 assert(var
&& var
->data
.mode
== nir_var_shader_in
);
62 switch (nir
->info
.stage
) {
63 case MESA_SHADER_VERTEX
: {
64 unsigned i
= var
->data
.driver_location
;
65 unsigned attrib_count
= glsl_count_attribute_slots(var
->type
, false);
66 uint8_t mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
68 for (unsigned j
= 0; j
< attrib_count
; j
++, i
++) {
69 if (glsl_type_is_64bit(glsl_without_array(var
->type
))) {
70 unsigned dmask
= mask
;
72 if (glsl_type_is_dual_slot(glsl_without_array(var
->type
)) && j
% 2)
75 dmask
<<= var
->data
.location_frac
/ 2;
78 info
->input_usage_mask
[i
] |= TGSI_WRITEMASK_XY
;
80 info
->input_usage_mask
[i
] |= TGSI_WRITEMASK_ZW
;
82 info
->input_usage_mask
[i
] |=
83 (mask
<< var
->data
.location_frac
) & 0xf;
92 static void gather_intrinsic_load_deref_output_info(const nir_shader
*nir
,
93 const nir_intrinsic_instr
*instr
,
95 struct tgsi_shader_info
*info
)
97 assert(var
&& var
->data
.mode
== nir_var_shader_out
);
99 switch (nir
->info
.stage
) {
100 case MESA_SHADER_TESS_CTRL
:
101 if (var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
102 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
103 info
->reads_tessfactor_outputs
= true;
104 else if (var
->data
.patch
)
105 info
->reads_perpatch_outputs
= true;
107 info
->reads_pervertex_outputs
= true;
110 case MESA_SHADER_FRAGMENT
:
111 if (var
->data
.fb_fetch_output
)
112 info
->uses_fbfetch
= true;
118 static void gather_intrinsic_store_deref_output_info(const nir_shader
*nir
,
119 const nir_intrinsic_instr
*instr
,
121 struct tgsi_shader_info
*info
)
123 assert(var
&& var
->data
.mode
== nir_var_shader_out
);
125 switch (nir
->info
.stage
) {
126 case MESA_SHADER_VERTEX
: /* needed by LS, ES */
127 case MESA_SHADER_TESS_EVAL
: /* needed by ES */
128 case MESA_SHADER_GEOMETRY
: {
129 unsigned i
= var
->data
.driver_location
;
130 unsigned attrib_count
= glsl_count_attribute_slots(var
->type
, false);
131 unsigned mask
= nir_intrinsic_write_mask(instr
);
133 assert(!var
->data
.compact
);
135 for (unsigned j
= 0; j
< attrib_count
; j
++, i
++) {
136 if (glsl_type_is_64bit(glsl_without_array(var
->type
))) {
137 unsigned dmask
= mask
;
139 if (glsl_type_is_dual_slot(glsl_without_array(var
->type
)) && j
% 2)
142 dmask
<<= var
->data
.location_frac
/ 2;
145 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_XY
;
147 info
->output_usagemask
[i
] |= TGSI_WRITEMASK_ZW
;
149 info
->output_usagemask
[i
] |=
150 (mask
<< var
->data
.location_frac
) & 0xf;
160 static void scan_instruction(const struct nir_shader
*nir
,
161 struct tgsi_shader_info
*info
,
164 if (instr
->type
== nir_instr_type_alu
) {
165 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
170 case nir_op_fddx_fine
:
171 case nir_op_fddy_fine
:
172 case nir_op_fddx_coarse
:
173 case nir_op_fddy_coarse
:
174 info
->uses_derivatives
= true;
179 } else if (instr
->type
== nir_instr_type_tex
) {
180 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
181 nir_variable
*texture
= tex_get_texture_var(tex
);
184 info
->samplers_declared
|=
185 u_bit_consecutive(tex
->sampler_index
, 1);
187 if (texture
->data
.bindless
)
188 info
->uses_bindless_samplers
= true;
195 info
->uses_derivatives
= true;
200 } else if (instr
->type
== nir_instr_type_intrinsic
) {
201 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
203 switch (intr
->intrinsic
) {
204 case nir_intrinsic_load_front_face
:
205 info
->uses_frontface
= 1;
207 case nir_intrinsic_load_instance_id
:
208 info
->uses_instanceid
= 1;
210 case nir_intrinsic_load_invocation_id
:
211 info
->uses_invocationid
= true;
213 case nir_intrinsic_load_num_work_groups
:
214 info
->uses_grid_size
= true;
216 case nir_intrinsic_load_local_group_size
:
217 /* The block size is translated to IMM with a fixed block size. */
218 if (info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] == 0)
219 info
->uses_block_size
= true;
221 case nir_intrinsic_load_local_invocation_id
:
222 case nir_intrinsic_load_work_group_id
: {
223 unsigned mask
= nir_ssa_def_components_read(&intr
->dest
.ssa
);
225 unsigned i
= u_bit_scan(&mask
);
227 if (intr
->intrinsic
== nir_intrinsic_load_work_group_id
)
228 info
->uses_block_id
[i
] = true;
230 info
->uses_thread_id
[i
] = true;
234 case nir_intrinsic_load_vertex_id
:
235 info
->uses_vertexid
= 1;
237 case nir_intrinsic_load_vertex_id_zero_base
:
238 info
->uses_vertexid_nobase
= 1;
240 case nir_intrinsic_load_base_vertex
:
241 info
->uses_basevertex
= 1;
243 case nir_intrinsic_load_draw_id
:
244 info
->uses_drawid
= 1;
246 case nir_intrinsic_load_primitive_id
:
247 info
->uses_primid
= 1;
249 case nir_intrinsic_load_sample_mask_in
:
250 info
->reads_samplemask
= true;
252 case nir_intrinsic_load_tess_level_inner
:
253 case nir_intrinsic_load_tess_level_outer
:
254 info
->reads_tess_factors
= true;
256 case nir_intrinsic_bindless_image_load
:
257 info
->uses_bindless_images
= true;
259 if (nir_intrinsic_image_dim(intr
) == GLSL_SAMPLER_DIM_BUF
)
260 info
->uses_bindless_buffer_load
= true;
262 info
->uses_bindless_image_load
= true;
264 case nir_intrinsic_bindless_image_size
:
265 case nir_intrinsic_bindless_image_samples
:
266 info
->uses_bindless_images
= true;
268 case nir_intrinsic_bindless_image_store
:
269 info
->uses_bindless_images
= true;
271 if (nir_intrinsic_image_dim(intr
) == GLSL_SAMPLER_DIM_BUF
)
272 info
->uses_bindless_buffer_store
= true;
274 info
->uses_bindless_image_store
= true;
276 info
->writes_memory
= true;
277 info
->num_memory_instructions
++; /* we only care about stores */
279 case nir_intrinsic_image_deref_store
:
280 info
->writes_memory
= true;
281 info
->num_memory_instructions
++; /* we only care about stores */
283 case nir_intrinsic_bindless_image_atomic_add
:
284 case nir_intrinsic_bindless_image_atomic_imin
:
285 case nir_intrinsic_bindless_image_atomic_umin
:
286 case nir_intrinsic_bindless_image_atomic_imax
:
287 case nir_intrinsic_bindless_image_atomic_umax
:
288 case nir_intrinsic_bindless_image_atomic_and
:
289 case nir_intrinsic_bindless_image_atomic_or
:
290 case nir_intrinsic_bindless_image_atomic_xor
:
291 case nir_intrinsic_bindless_image_atomic_exchange
:
292 case nir_intrinsic_bindless_image_atomic_comp_swap
:
293 info
->uses_bindless_images
= true;
295 if (nir_intrinsic_image_dim(intr
) == GLSL_SAMPLER_DIM_BUF
)
296 info
->uses_bindless_buffer_atomic
= true;
298 info
->uses_bindless_image_atomic
= true;
300 info
->writes_memory
= true;
301 info
->num_memory_instructions
++; /* we only care about stores */
303 case nir_intrinsic_image_deref_atomic_add
:
304 case nir_intrinsic_image_deref_atomic_imin
:
305 case nir_intrinsic_image_deref_atomic_umin
:
306 case nir_intrinsic_image_deref_atomic_imax
:
307 case nir_intrinsic_image_deref_atomic_umax
:
308 case nir_intrinsic_image_deref_atomic_and
:
309 case nir_intrinsic_image_deref_atomic_or
:
310 case nir_intrinsic_image_deref_atomic_xor
:
311 case nir_intrinsic_image_deref_atomic_exchange
:
312 case nir_intrinsic_image_deref_atomic_comp_swap
:
313 case nir_intrinsic_image_deref_atomic_inc_wrap
:
314 case nir_intrinsic_image_deref_atomic_dec_wrap
:
315 info
->writes_memory
= true;
316 info
->num_memory_instructions
++; /* we only care about stores */
318 case nir_intrinsic_store_ssbo
:
319 case nir_intrinsic_ssbo_atomic_add
:
320 case nir_intrinsic_ssbo_atomic_imin
:
321 case nir_intrinsic_ssbo_atomic_umin
:
322 case nir_intrinsic_ssbo_atomic_imax
:
323 case nir_intrinsic_ssbo_atomic_umax
:
324 case nir_intrinsic_ssbo_atomic_and
:
325 case nir_intrinsic_ssbo_atomic_or
:
326 case nir_intrinsic_ssbo_atomic_xor
:
327 case nir_intrinsic_ssbo_atomic_exchange
:
328 case nir_intrinsic_ssbo_atomic_comp_swap
:
329 info
->writes_memory
= true;
330 info
->num_memory_instructions
++; /* we only care about stores */
332 case nir_intrinsic_load_color0
:
333 case nir_intrinsic_load_color1
: {
334 unsigned index
= intr
->intrinsic
== nir_intrinsic_load_color1
;
335 uint8_t mask
= nir_ssa_def_components_read(&intr
->dest
.ssa
);
336 info
->colors_read
|= mask
<< (index
* 4);
339 case nir_intrinsic_load_barycentric_pixel
:
340 case nir_intrinsic_load_barycentric_centroid
:
341 case nir_intrinsic_load_barycentric_sample
:
342 case nir_intrinsic_load_barycentric_at_offset
: /* uses center */
343 case nir_intrinsic_load_barycentric_at_sample
: { /* uses center */
344 unsigned mode
= nir_intrinsic_interp_mode(intr
);
346 if (mode
== INTERP_MODE_FLAT
)
349 if (mode
== INTERP_MODE_NOPERSPECTIVE
) {
350 if (intr
->intrinsic
== nir_intrinsic_load_barycentric_sample
)
351 info
->uses_linear_sample
= true;
352 else if (intr
->intrinsic
== nir_intrinsic_load_barycentric_centroid
)
353 info
->uses_linear_centroid
= true;
355 info
->uses_linear_center
= true;
357 if (intr
->intrinsic
== nir_intrinsic_load_barycentric_at_sample
)
358 info
->uses_linear_opcode_interp_sample
= true;
360 if (intr
->intrinsic
== nir_intrinsic_load_barycentric_sample
)
361 info
->uses_persp_sample
= true;
362 else if (intr
->intrinsic
== nir_intrinsic_load_barycentric_centroid
)
363 info
->uses_persp_centroid
= true;
365 info
->uses_persp_center
= true;
367 if (intr
->intrinsic
== nir_intrinsic_load_barycentric_at_sample
)
368 info
->uses_persp_opcode_interp_sample
= true;
372 case nir_intrinsic_load_deref
: {
373 nir_variable
*var
= intrinsic_get_var(intr
);
374 nir_variable_mode mode
= var
->data
.mode
;
376 if (mode
== nir_var_shader_in
) {
377 /* PS inputs use the interpolated load intrinsics. */
378 assert(nir
->info
.stage
!= MESA_SHADER_FRAGMENT
);
379 gather_intrinsic_load_deref_input_info(nir
, intr
, var
, info
);
380 } else if (mode
== nir_var_shader_out
) {
381 gather_intrinsic_load_deref_output_info(nir
, intr
, var
, info
);
385 case nir_intrinsic_store_deref
: {
386 nir_variable
*var
= intrinsic_get_var(intr
);
388 if (var
->data
.mode
== nir_var_shader_out
)
389 gather_intrinsic_store_deref_output_info(nir
, intr
, var
, info
);
392 case nir_intrinsic_interp_deref_at_centroid
:
393 case nir_intrinsic_interp_deref_at_sample
:
394 case nir_intrinsic_interp_deref_at_offset
:
395 unreachable("interp opcodes should have been lowered");
403 void si_nir_scan_tess_ctrl(const struct nir_shader
*nir
,
404 struct tgsi_tessctrl_info
*out
)
406 memset(out
, 0, sizeof(*out
));
408 if (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
411 out
->tessfactors_are_def_in_all_invocs
=
412 ac_are_tessfactors_def_in_all_invocs(nir
);
415 void si_nir_scan_shader(const struct nir_shader
*nir
,
416 struct tgsi_shader_info
*info
)
421 info
->processor
= pipe_shader_type_from_mesa(nir
->info
.stage
);
422 info
->num_tokens
= 2; /* indicate that the shader is non-empty */
423 info
->num_instructions
= 2;
425 info
->properties
[TGSI_PROPERTY_NEXT_SHADER
] =
426 pipe_shader_type_from_mesa(nir
->info
.next_stage
);
428 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
429 info
->properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
] =
430 nir
->info
.vs
.window_space_position
;
431 info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
] =
432 nir
->info
.vs
.blit_sgprs_amd
;
435 if (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
436 info
->properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
] =
437 nir
->info
.tess
.tcs_vertices_out
;
440 if (nir
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
441 if (nir
->info
.tess
.primitive_mode
== GL_ISOLINES
)
442 info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] = PIPE_PRIM_LINES
;
444 info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] = nir
->info
.tess
.primitive_mode
;
446 STATIC_ASSERT((TESS_SPACING_EQUAL
+ 1) % 3 == PIPE_TESS_SPACING_EQUAL
);
447 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD
+ 1) % 3 ==
448 PIPE_TESS_SPACING_FRACTIONAL_ODD
);
449 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN
+ 1) % 3 ==
450 PIPE_TESS_SPACING_FRACTIONAL_EVEN
);
452 info
->properties
[TGSI_PROPERTY_TES_SPACING
] = (nir
->info
.tess
.spacing
+ 1) % 3;
453 info
->properties
[TGSI_PROPERTY_TES_VERTEX_ORDER_CW
] = !nir
->info
.tess
.ccw
;
454 info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
] = nir
->info
.tess
.point_mode
;
457 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
458 info
->properties
[TGSI_PROPERTY_GS_INPUT_PRIM
] = nir
->info
.gs
.input_primitive
;
459 info
->properties
[TGSI_PROPERTY_GS_OUTPUT_PRIM
] = nir
->info
.gs
.output_primitive
;
460 info
->properties
[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
] = nir
->info
.gs
.vertices_out
;
461 info
->properties
[TGSI_PROPERTY_GS_INVOCATIONS
] = nir
->info
.gs
.invocations
;
464 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
465 info
->properties
[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL
] =
466 nir
->info
.fs
.early_fragment_tests
| nir
->info
.fs
.post_depth_coverage
;
467 info
->properties
[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE
] = nir
->info
.fs
.post_depth_coverage
;
469 if (nir
->info
.fs
.pixel_center_integer
) {
470 info
->properties
[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
] =
471 TGSI_FS_COORD_PIXEL_CENTER_INTEGER
;
474 if (nir
->info
.fs
.depth_layout
!= FRAG_DEPTH_LAYOUT_NONE
) {
475 switch (nir
->info
.fs
.depth_layout
) {
476 case FRAG_DEPTH_LAYOUT_ANY
:
477 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_ANY
;
479 case FRAG_DEPTH_LAYOUT_GREATER
:
480 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_GREATER
;
482 case FRAG_DEPTH_LAYOUT_LESS
:
483 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_LESS
;
485 case FRAG_DEPTH_LAYOUT_UNCHANGED
:
486 info
->properties
[TGSI_PROPERTY_FS_DEPTH_LAYOUT
] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED
;
489 unreachable("Unknow depth layout");
494 if (gl_shader_stage_is_compute(nir
->info
.stage
)) {
495 info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] = nir
->info
.cs
.local_size
[0];
496 info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT
] = nir
->info
.cs
.local_size
[1];
497 info
->properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH
] = nir
->info
.cs
.local_size
[2];
498 info
->properties
[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD
] = nir
->info
.cs
.user_data_components_amd
;
502 uint64_t processed_inputs
= 0;
503 nir_foreach_variable(variable
, &nir
->inputs
) {
504 unsigned semantic_name
, semantic_index
;
506 const struct glsl_type
*type
= variable
->type
;
507 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
)) {
508 assert(glsl_type_is_array(type
));
509 type
= glsl_get_array_element(type
);
512 unsigned attrib_count
= glsl_count_attribute_slots(type
,
513 nir
->info
.stage
== MESA_SHADER_VERTEX
);
515 i
= variable
->data
.driver_location
;
517 /* Vertex shader inputs don't have semantics. The state
518 * tracker has already mapped them to attributes via
519 * variable->data.driver_location.
521 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
522 processed_inputs
|= 1ull << i
;
524 if (glsl_type_is_dual_slot(glsl_without_array(variable
->type
)))
525 processed_inputs
|= 2ull << i
;
529 for (unsigned j
= 0; j
< attrib_count
; j
++, i
++) {
531 if (processed_inputs
& ((uint64_t)1 << i
))
534 processed_inputs
|= ((uint64_t)1 << i
);
536 tgsi_get_gl_varying_semantic(variable
->data
.location
+ j
, true,
537 &semantic_name
, &semantic_index
);
539 info
->input_semantic_name
[i
] = semantic_name
;
540 info
->input_semantic_index
[i
] = semantic_index
;
542 if (semantic_name
== TGSI_SEMANTIC_PRIMID
)
543 info
->uses_primid
= true;
545 if (semantic_name
== TGSI_SEMANTIC_COLOR
) {
546 /* We only need this for color inputs. */
547 if (variable
->data
.sample
)
548 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_SAMPLE
;
549 else if (variable
->data
.centroid
)
550 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_CENTROID
;
552 info
->input_interpolate_loc
[i
] = TGSI_INTERPOLATE_LOC_CENTER
;
555 enum glsl_base_type base_type
=
556 glsl_get_base_type(glsl_without_array(variable
->type
));
558 switch (variable
->data
.interpolation
) {
559 case INTERP_MODE_NONE
:
560 if (glsl_base_type_is_integer(base_type
)) {
561 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_CONSTANT
;
565 if (semantic_name
== TGSI_SEMANTIC_COLOR
) {
566 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_COLOR
;
571 case INTERP_MODE_SMOOTH
:
572 assert(!glsl_base_type_is_integer(base_type
));
574 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_PERSPECTIVE
;
577 case INTERP_MODE_NOPERSPECTIVE
:
578 assert(!glsl_base_type_is_integer(base_type
));
580 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_LINEAR
;
583 case INTERP_MODE_FLAT
:
584 info
->input_interpolate
[i
] = TGSI_INTERPOLATE_CONSTANT
;
591 uint64_t processed_outputs
= 0;
592 nir_foreach_variable(variable
, &nir
->outputs
) {
593 unsigned semantic_name
, semantic_index
;
595 i
= variable
->data
.driver_location
;
597 const struct glsl_type
*type
= variable
->type
;
598 if (nir_is_per_vertex_io(variable
, nir
->info
.stage
)) {
599 assert(glsl_type_is_array(type
));
600 type
= glsl_get_array_element(type
);
603 unsigned attrib_count
= glsl_count_attribute_slots(type
, false);
604 for (unsigned k
= 0; k
< attrib_count
; k
++, i
++) {
606 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
607 tgsi_get_gl_frag_result_semantic(variable
->data
.location
+ k
,
608 &semantic_name
, &semantic_index
);
610 /* Adjust for dual source blending */
611 if (variable
->data
.index
> 0) {
615 tgsi_get_gl_varying_semantic(variable
->data
.location
+ k
, true,
616 &semantic_name
, &semantic_index
);
619 unsigned num_components
= 4;
620 unsigned vector_elements
= glsl_get_vector_elements(glsl_without_array(variable
->type
));
622 num_components
= vector_elements
;
624 unsigned component
= variable
->data
.location_frac
;
625 if (glsl_type_is_64bit(glsl_without_array(variable
->type
))) {
626 if (glsl_type_is_dual_slot(glsl_without_array(variable
->type
)) && k
% 2) {
627 num_components
= (num_components
* 2) - 4;
630 num_components
= MIN2(num_components
* 2, 4);
635 for (unsigned j
= component
; j
< num_components
+ component
; j
++) {
638 usagemask
|= TGSI_WRITEMASK_X
;
641 usagemask
|= TGSI_WRITEMASK_Y
;
644 usagemask
|= TGSI_WRITEMASK_Z
;
647 usagemask
|= TGSI_WRITEMASK_W
;
650 unreachable("error calculating component index");
654 unsigned gs_out_streams
;
655 if (variable
->data
.stream
& (1u << 31)) {
656 gs_out_streams
= variable
->data
.stream
& ~(1u << 31);
658 assert(variable
->data
.stream
< 4);
660 for (unsigned j
= 0; j
< num_components
; ++j
)
661 gs_out_streams
|= variable
->data
.stream
<< (2 * (component
+ j
));
664 unsigned streamx
= gs_out_streams
& 3;
665 unsigned streamy
= (gs_out_streams
>> 2) & 3;
666 unsigned streamz
= (gs_out_streams
>> 4) & 3;
667 unsigned streamw
= (gs_out_streams
>> 6) & 3;
669 if (usagemask
& TGSI_WRITEMASK_X
) {
670 info
->output_streams
[i
] |= streamx
;
671 info
->num_stream_output_components
[streamx
]++;
673 if (usagemask
& TGSI_WRITEMASK_Y
) {
674 info
->output_streams
[i
] |= streamy
<< 2;
675 info
->num_stream_output_components
[streamy
]++;
677 if (usagemask
& TGSI_WRITEMASK_Z
) {
678 info
->output_streams
[i
] |= streamz
<< 4;
679 info
->num_stream_output_components
[streamz
]++;
681 if (usagemask
& TGSI_WRITEMASK_W
) {
682 info
->output_streams
[i
] |= streamw
<< 6;
683 info
->num_stream_output_components
[streamw
]++;
686 /* make sure we only count this location once against
687 * the num_outputs counter.
689 if (processed_outputs
& ((uint64_t)1 << i
))
692 processed_outputs
|= ((uint64_t)1 << i
);
694 info
->output_semantic_name
[i
] = semantic_name
;
695 info
->output_semantic_index
[i
] = semantic_index
;
697 switch (semantic_name
) {
698 case TGSI_SEMANTIC_PRIMID
:
699 info
->writes_primid
= true;
701 case TGSI_SEMANTIC_VIEWPORT_INDEX
:
702 info
->writes_viewport_index
= true;
704 case TGSI_SEMANTIC_LAYER
:
705 info
->writes_layer
= true;
707 case TGSI_SEMANTIC_PSIZE
:
708 info
->writes_psize
= true;
710 case TGSI_SEMANTIC_CLIPVERTEX
:
711 info
->writes_clipvertex
= true;
713 case TGSI_SEMANTIC_COLOR
:
714 info
->colors_written
|= 1 << semantic_index
;
716 case TGSI_SEMANTIC_STENCIL
:
717 info
->writes_stencil
= true;
719 case TGSI_SEMANTIC_SAMPLEMASK
:
720 info
->writes_samplemask
= true;
722 case TGSI_SEMANTIC_EDGEFLAG
:
723 info
->writes_edgeflag
= true;
725 case TGSI_SEMANTIC_POSITION
:
726 if (info
->processor
== PIPE_SHADER_FRAGMENT
)
727 info
->writes_z
= true;
729 info
->writes_position
= true;
734 unsigned loc
= variable
->data
.location
;
735 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
736 loc
== FRAG_RESULT_COLOR
&&
737 nir
->info
.outputs_written
& (1ull << loc
)) {
738 assert(attrib_count
== 1);
739 info
->properties
[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS
] = true;
743 info
->num_inputs
= util_last_bit64(processed_inputs
);
744 info
->num_outputs
= util_last_bit64(processed_outputs
);
746 /* Inputs and outputs can't have holes. If this fails, use
747 * nir_assign_io_var_locations to re-assign driver_location.
749 assert(processed_inputs
== u_bit_consecutive64(0, info
->num_inputs
));
750 assert(processed_outputs
== u_bit_consecutive64(0, info
->num_outputs
));
752 struct set
*ubo_set
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
753 _mesa_key_pointer_equal
);
754 struct set
*ssbo_set
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
755 _mesa_key_pointer_equal
);
757 /* Intialise const_file_max[0] */
758 info
->const_file_max
[0] = -1;
760 /* The first 8 are reserved for atomic counters using ssbo */
761 unsigned ssbo_idx
= 8;
763 unsigned ubo_idx
= 1;
764 nir_foreach_variable(variable
, &nir
->uniforms
) {
765 const struct glsl_type
*type
= variable
->type
;
766 enum glsl_base_type base_type
=
767 glsl_get_base_type(glsl_without_array(type
));
768 unsigned aoa_size
= MAX2(1, glsl_get_aoa_size(type
));
769 unsigned loc
= variable
->data
.driver_location
/ 4;
770 int slot_count
= glsl_count_attribute_slots(type
, false);
771 int max_slot
= MAX2(info
->const_file_max
[0], (int) loc
) + slot_count
;
773 /* Gather buffers declared bitmasks. Note: radeonsi doesn't
774 * really use the mask (other than ubo_idx == 1 for regular
775 * uniforms) its really only used for getting the buffer count
776 * so we don't need to worry about the ordering.
778 if (variable
->interface_type
!= NULL
) {
779 if (variable
->data
.mode
== nir_var_uniform
||
780 variable
->data
.mode
== nir_var_mem_ubo
||
781 variable
->data
.mode
== nir_var_mem_ssbo
) {
783 struct set
*buf_set
= variable
->data
.mode
== nir_var_mem_ssbo
?
786 unsigned block_count
;
787 if (base_type
!= GLSL_TYPE_INTERFACE
) {
788 struct set_entry
*entry
=
789 _mesa_set_search(buf_set
, variable
->interface_type
);
791 /* Check if we have already processed
792 * a member from this ubo.
799 block_count
= aoa_size
;
802 if (variable
->data
.mode
== nir_var_uniform
||
803 variable
->data
.mode
== nir_var_mem_ubo
) {
804 info
->const_buffers_declared
|= u_bit_consecutive(ubo_idx
, block_count
);
805 ubo_idx
+= block_count
;
807 assert(variable
->data
.mode
== nir_var_mem_ssbo
);
809 info
->shader_buffers_declared
|= u_bit_consecutive(ssbo_idx
, block_count
);
810 ssbo_idx
+= block_count
;
813 _mesa_set_add(buf_set
, variable
->interface_type
);
819 /* We rely on the fact that nir_lower_samplers_as_deref has
820 * eliminated struct dereferences.
822 if (base_type
== GLSL_TYPE_SAMPLER
&& !variable
->data
.bindless
) {
823 info
->samplers_declared
|=
824 u_bit_consecutive(variable
->data
.binding
, aoa_size
);
825 } else if (base_type
== GLSL_TYPE_IMAGE
&& !variable
->data
.bindless
) {
826 info
->images_declared
|=
827 u_bit_consecutive(variable
->data
.binding
, aoa_size
);
828 } else if (base_type
!= GLSL_TYPE_ATOMIC_UINT
) {
829 info
->const_buffers_declared
|= 1;
830 info
->const_file_max
[0] = max_slot
;
834 _mesa_set_destroy(ubo_set
, NULL
);
835 _mesa_set_destroy(ssbo_set
, NULL
);
837 info
->num_written_clipdistance
= nir
->info
.clip_distance_array_size
;
838 info
->num_written_culldistance
= nir
->info
.cull_distance_array_size
;
839 info
->clipdist_writemask
= u_bit_consecutive(0, info
->num_written_clipdistance
);
840 info
->culldist_writemask
= u_bit_consecutive(0, info
->num_written_culldistance
);
842 if (info
->processor
== PIPE_SHADER_FRAGMENT
)
843 info
->uses_kill
= nir
->info
.fs
.uses_discard
;
845 func
= (struct nir_function
*)exec_list_get_head_const(&nir
->functions
);
846 nir_foreach_block(block
, func
->impl
) {
847 nir_foreach_instr(instr
, block
)
848 scan_instruction(nir
, info
, instr
);
853 si_nir_opts(struct nir_shader
*nir
)
856 unsigned lower_flrp
=
857 (nir
->options
->lower_flrp16
? 16 : 0) |
858 (nir
->options
->lower_flrp32
? 32 : 0) |
859 (nir
->options
->lower_flrp64
? 64 : 0);
864 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
866 NIR_PASS(progress
, nir
, nir_opt_copy_prop_vars
);
867 NIR_PASS(progress
, nir
, nir_opt_dead_write_vars
);
869 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
, NULL
);
870 NIR_PASS_V(nir
, nir_lower_phis_to_scalar
);
872 /* (Constant) copy propagation is needed for txf with offsets. */
873 NIR_PASS(progress
, nir
, nir_copy_prop
);
874 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
875 NIR_PASS(progress
, nir
, nir_opt_dce
);
876 if (nir_opt_trivial_continues(nir
)) {
878 NIR_PASS(progress
, nir
, nir_copy_prop
);
879 NIR_PASS(progress
, nir
, nir_opt_dce
);
881 NIR_PASS(progress
, nir
, nir_opt_if
, true);
882 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
883 NIR_PASS(progress
, nir
, nir_opt_cse
);
884 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 8, true, true);
886 /* Needed for algebraic lowering */
887 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
888 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
890 if (lower_flrp
!= 0) {
891 bool lower_flrp_progress
= false;
893 NIR_PASS(lower_flrp_progress
, nir
, nir_lower_flrp
,
895 false /* always_precise */,
896 nir
->options
->lower_ffma
);
897 if (lower_flrp_progress
) {
898 NIR_PASS(progress
, nir
,
899 nir_opt_constant_folding
);
903 /* Nothing should rematerialize any flrps, so we only
904 * need to do this lowering once.
909 NIR_PASS(progress
, nir
, nir_opt_undef
);
910 NIR_PASS(progress
, nir
, nir_opt_conditional_discard
);
911 if (nir
->options
->max_unroll_iterations
) {
912 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
, 0);
918 type_size_vec4(const struct glsl_type
*type
, bool bindless
)
920 return glsl_count_attribute_slots(type
, false);
924 si_nir_lower_color(nir_shader
*nir
)
926 nir_function_impl
*entrypoint
= nir_shader_get_entrypoint(nir
);
929 nir_builder_init(&b
, entrypoint
);
931 nir_foreach_block(block
, entrypoint
) {
932 nir_foreach_instr_safe(instr
, block
) {
933 if (instr
->type
!= nir_instr_type_intrinsic
)
936 nir_intrinsic_instr
*intrin
=
937 nir_instr_as_intrinsic(instr
);
939 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
942 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
943 if (deref
->mode
!= nir_var_shader_in
)
946 b
.cursor
= nir_before_instr(instr
);
947 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
950 if (var
->data
.location
== VARYING_SLOT_COL0
) {
951 def
= nir_load_color0(&b
);
952 } else if (var
->data
.location
== VARYING_SLOT_COL1
) {
953 def
= nir_load_color1(&b
);
958 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(def
));
959 nir_instr_remove(instr
);
964 void si_nir_lower_ps_inputs(struct nir_shader
*nir
)
966 if (nir
->info
.stage
!= MESA_SHADER_FRAGMENT
)
969 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
970 nir_shader_get_entrypoint(nir
), false, true);
972 /* Since we're doing nir_lower_io_to_temporaries late, we need
973 * to lower all the copy_deref's introduced by
974 * lower_io_to_temporaries before calling nir_lower_io.
976 NIR_PASS_V(nir
, nir_split_var_copies
);
977 NIR_PASS_V(nir
, nir_lower_var_copies
);
978 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
980 si_nir_lower_color(nir
);
981 NIR_PASS_V(nir
, nir_lower_io
, nir_var_shader_in
, type_size_vec4
, 0);
983 /* This pass needs actual constants */
984 NIR_PASS_V(nir
, nir_opt_constant_folding
);
985 NIR_PASS_V(nir
, nir_io_add_const_offset_to_base
,
990 * Perform "lowering" operations on the NIR that are run once when the shader
991 * selector is created.
993 void si_lower_nir(struct si_shader_selector
*sel
)
995 /* Adjust the driver location of inputs and outputs. The state tracker
996 * interprets them as slots, while the ac/nir backend interprets them
997 * as individual components.
999 if (sel
->nir
->info
.stage
!= MESA_SHADER_FRAGMENT
) {
1000 nir_foreach_variable(variable
, &sel
->nir
->inputs
)
1001 variable
->data
.driver_location
*= 4;
1004 nir_foreach_variable(variable
, &sel
->nir
->outputs
) {
1005 variable
->data
.driver_location
*= 4;
1007 if (sel
->nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1008 if (variable
->data
.location
== FRAG_RESULT_DEPTH
)
1009 variable
->data
.driver_location
+= 2;
1010 else if (variable
->data
.location
== FRAG_RESULT_STENCIL
)
1011 variable
->data
.driver_location
+= 1;
1015 /* Perform lowerings (and optimizations) of code.
1017 * Performance considerations aside, we must:
1018 * - lower certain ALU operations
1019 * - ensure constant offsets for texture instructions are folded
1020 * and copy-propagated
1023 static const struct nir_lower_tex_options lower_tex_options
= {
1026 NIR_PASS_V(sel
->nir
, nir_lower_tex
, &lower_tex_options
);
1028 const nir_lower_subgroups_options subgroups_options
= {
1029 .subgroup_size
= 64,
1030 .ballot_bit_size
= 64,
1031 .lower_to_scalar
= true,
1032 .lower_subgroup_masks
= true,
1033 .lower_vote_trivial
= false,
1034 .lower_vote_eq_to_ballot
= true,
1036 NIR_PASS_V(sel
->nir
, nir_lower_subgroups
, &subgroups_options
);
1038 ac_lower_indirect_derefs(sel
->nir
, sel
->screen
->info
.chip_class
);
1040 si_nir_opts(sel
->nir
);
1042 NIR_PASS_V(sel
->nir
, nir_lower_bool_to_int32
);
1044 /* Strip the resulting shader so that the shader cache is more likely
1045 * to hit from other similar shaders.
1047 nir_strip(sel
->nir
);
1050 static void declare_nir_input_vs(struct si_shader_context
*ctx
,
1051 struct nir_variable
*variable
,
1052 unsigned input_index
,
1053 LLVMValueRef out
[4])
1055 si_llvm_load_input_vs(ctx
, input_index
, out
);
1059 si_nir_lookup_interp_param(struct ac_shader_abi
*abi
,
1060 enum glsl_interp_mode interp
, unsigned location
)
1062 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
1063 int interp_param_idx
= -1;
1066 case INTERP_MODE_FLAT
:
1068 case INTERP_MODE_SMOOTH
:
1069 case INTERP_MODE_NONE
:
1070 if (location
== INTERP_CENTER
)
1071 interp_param_idx
= SI_PARAM_PERSP_CENTER
;
1072 else if (location
== INTERP_CENTROID
)
1073 interp_param_idx
= SI_PARAM_PERSP_CENTROID
;
1074 else if (location
== INTERP_SAMPLE
)
1075 interp_param_idx
= SI_PARAM_PERSP_SAMPLE
;
1077 case INTERP_MODE_NOPERSPECTIVE
:
1078 if (location
== INTERP_CENTER
)
1079 interp_param_idx
= SI_PARAM_LINEAR_CENTER
;
1080 else if (location
== INTERP_CENTROID
)
1081 interp_param_idx
= SI_PARAM_LINEAR_CENTROID
;
1082 else if (location
== INTERP_SAMPLE
)
1083 interp_param_idx
= SI_PARAM_LINEAR_SAMPLE
;
1086 assert(!"Unhandled interpolation mode.");
1090 return interp_param_idx
!= -1 ?
1091 LLVMGetParam(ctx
->main_fn
, interp_param_idx
) : NULL
;
1095 si_nir_load_sampler_desc(struct ac_shader_abi
*abi
,
1096 unsigned descriptor_set
, unsigned base_index
,
1097 unsigned constant_index
, LLVMValueRef dynamic_index
,
1098 enum ac_descriptor_type desc_type
, bool image
,
1099 bool write
, bool bindless
)
1101 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
1102 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1103 unsigned const_index
= base_index
+ constant_index
;
1105 assert(!descriptor_set
);
1106 assert(!image
|| desc_type
== AC_DESC_IMAGE
|| desc_type
== AC_DESC_BUFFER
);
1110 LLVMGetParam(ctx
->main_fn
, ctx
->param_bindless_samplers_and_images
);
1112 /* dynamic_index is the bindless handle */
1114 /* For simplicity, bindless image descriptors use fixed
1115 * 16-dword slots for now.
1117 dynamic_index
= LLVMBuildMul(ctx
->ac
.builder
, dynamic_index
,
1118 LLVMConstInt(ctx
->i64
, 2, 0), "");
1120 return si_load_image_desc(ctx
, list
, dynamic_index
, desc_type
,
1124 /* Since bindless handle arithmetic can contain an unsigned integer
1125 * wraparound and si_load_sampler_desc assumes there isn't any,
1126 * use GEP without "inbounds" (inside ac_build_pointer_add)
1127 * to prevent incorrect code generation and hangs.
1129 dynamic_index
= LLVMBuildMul(ctx
->ac
.builder
, dynamic_index
,
1130 LLVMConstInt(ctx
->i64
, 2, 0), "");
1131 list
= ac_build_pointer_add(&ctx
->ac
, list
, dynamic_index
);
1132 return si_load_sampler_desc(ctx
, list
, ctx
->i32_0
, desc_type
);
1135 unsigned num_slots
= image
? ctx
->num_images
: ctx
->num_samplers
;
1136 assert(const_index
< num_slots
|| dynamic_index
);
1138 LLVMValueRef list
= LLVMGetParam(ctx
->main_fn
, ctx
->param_samplers_and_images
);
1139 LLVMValueRef index
= LLVMConstInt(ctx
->ac
.i32
, const_index
, false);
1141 if (dynamic_index
) {
1142 index
= LLVMBuildAdd(builder
, index
, dynamic_index
, "");
1144 /* From the GL_ARB_shader_image_load_store extension spec:
1146 * If a shader performs an image load, store, or atomic
1147 * operation using an image variable declared as an array,
1148 * and if the index used to select an individual element is
1149 * negative or greater than or equal to the size of the
1150 * array, the results of the operation are undefined but may
1151 * not lead to termination.
1153 index
= si_llvm_bound_index(ctx
, index
, num_slots
);
1157 index
= LLVMBuildSub(ctx
->ac
.builder
,
1158 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
- 1, 0),
1160 return si_load_image_desc(ctx
, list
, index
, desc_type
, write
, false);
1163 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
,
1164 LLVMConstInt(ctx
->i32
, SI_NUM_IMAGES
/ 2, 0), "");
1165 return si_load_sampler_desc(ctx
, list
, index
, desc_type
);
1168 static void bitcast_inputs(struct si_shader_context
*ctx
,
1169 LLVMValueRef data
[4],
1172 for (unsigned chan
= 0; chan
< 4; chan
++) {
1173 ctx
->inputs
[input_idx
+ chan
] =
1174 LLVMBuildBitCast(ctx
->ac
.builder
, data
[chan
], ctx
->ac
.i32
, "");
1178 bool si_nir_build_llvm(struct si_shader_context
*ctx
, struct nir_shader
*nir
)
1180 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
1182 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
1183 uint64_t processed_inputs
= 0;
1184 nir_foreach_variable(variable
, &nir
->inputs
) {
1185 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
,
1187 unsigned input_idx
= variable
->data
.driver_location
;
1189 LLVMValueRef data
[4];
1190 unsigned loc
= variable
->data
.location
;
1192 for (unsigned i
= 0; i
< attrib_count
; i
++) {
1193 /* Packed components share the same location so skip
1194 * them if we have already processed the location.
1196 if (processed_inputs
& ((uint64_t)1 << (loc
+ i
))) {
1201 declare_nir_input_vs(ctx
, variable
, input_idx
/ 4, data
);
1202 bitcast_inputs(ctx
, data
, input_idx
);
1203 if (glsl_type_is_dual_slot(variable
->type
)) {
1205 declare_nir_input_vs(ctx
, variable
, input_idx
/ 4, data
);
1206 bitcast_inputs(ctx
, data
, input_idx
);
1209 processed_inputs
|= ((uint64_t)1 << (loc
+ i
));
1213 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1214 unsigned colors_read
=
1215 ctx
->shader
->selector
->info
.colors_read
;
1216 LLVMValueRef main_fn
= ctx
->main_fn
;
1218 LLVMValueRef undef
= LLVMGetUndef(ctx
->f32
);
1220 unsigned offset
= SI_PARAM_POS_FIXED_PT
+ 1;
1222 if (colors_read
& 0x0f) {
1223 unsigned mask
= colors_read
& 0x0f;
1224 LLVMValueRef values
[4];
1225 values
[0] = mask
& 0x1 ? LLVMGetParam(main_fn
, offset
++) : undef
;
1226 values
[1] = mask
& 0x2 ? LLVMGetParam(main_fn
, offset
++) : undef
;
1227 values
[2] = mask
& 0x4 ? LLVMGetParam(main_fn
, offset
++) : undef
;
1228 values
[3] = mask
& 0x8 ? LLVMGetParam(main_fn
, offset
++) : undef
;
1230 ac_to_integer(&ctx
->ac
,
1231 ac_build_gather_values(&ctx
->ac
, values
, 4));
1233 if (colors_read
& 0xf0) {
1234 unsigned mask
= (colors_read
& 0xf0) >> 4;
1235 LLVMValueRef values
[4];
1236 values
[0] = mask
& 0x1 ? LLVMGetParam(main_fn
, offset
++) : undef
;
1237 values
[1] = mask
& 0x2 ? LLVMGetParam(main_fn
, offset
++) : undef
;
1238 values
[2] = mask
& 0x4 ? LLVMGetParam(main_fn
, offset
++) : undef
;
1239 values
[3] = mask
& 0x8 ? LLVMGetParam(main_fn
, offset
++) : undef
;
1241 ac_to_integer(&ctx
->ac
,
1242 ac_build_gather_values(&ctx
->ac
, values
, 4));
1245 ctx
->abi
.interp_at_sample_force_center
=
1246 ctx
->shader
->key
.mono
.u
.ps
.interpolate_at_sample_force_center
;
1247 } else if (nir
->info
.stage
== MESA_SHADER_COMPUTE
) {
1248 if (nir
->info
.cs
.user_data_components_amd
) {
1249 ctx
->abi
.user_data
= LLVMGetParam(ctx
->main_fn
, ctx
->param_cs_user_data
);
1250 ctx
->abi
.user_data
= ac_build_expand_to_vec4(&ctx
->ac
, ctx
->abi
.user_data
,
1251 nir
->info
.cs
.user_data_components_amd
);
1255 ctx
->abi
.inputs
= &ctx
->inputs
[0];
1256 ctx
->abi
.load_sampler_desc
= si_nir_load_sampler_desc
;
1257 ctx
->abi
.clamp_shadow_reference
= true;
1258 ctx
->abi
.robust_buffer_access
= true;
1260 ctx
->num_samplers
= util_last_bit(info
->samplers_declared
);
1261 ctx
->num_images
= util_last_bit(info
->images_declared
);
1263 if (ctx
->shader
->selector
->info
.properties
[TGSI_PROPERTY_CS_LOCAL_SIZE
]) {
1264 assert(gl_shader_stage_is_compute(nir
->info
.stage
));
1265 si_declare_compute_memory(ctx
);
1267 ac_nir_translate(&ctx
->ac
, &ctx
->abi
, nir
);