2 * Copyright © 2016 Bas Nieuwenhuizen
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "ac_nir_to_llvm.h"
25 #include "ac_llvm_build.h"
26 #include "ac_llvm_util.h"
27 #include "ac_binary.h"
30 #include "../vulkan/radv_descriptor_set.h"
31 #include "util/bitscan.h"
32 #include <llvm-c/Transforms/Scalar.h>
33 #include "ac_shader_abi.h"
34 #include "ac_shader_info.h"
35 #include "ac_shader_util.h"
36 #include "ac_exp_param.h"
38 enum radeon_llvm_calling_convention
{
39 RADEON_LLVM_AMDGPU_VS
= 87,
40 RADEON_LLVM_AMDGPU_GS
= 88,
41 RADEON_LLVM_AMDGPU_PS
= 89,
42 RADEON_LLVM_AMDGPU_CS
= 90,
43 RADEON_LLVM_AMDGPU_HS
= 93,
46 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
48 struct ac_nir_context
{
49 struct ac_llvm_context ac
;
50 struct ac_shader_abi
*abi
;
52 gl_shader_stage stage
;
54 struct hash_table
*defs
;
55 struct hash_table
*phis
;
56 struct hash_table
*vars
;
58 LLVMValueRef main_function
;
59 LLVMBasicBlockRef continue_block
;
60 LLVMBasicBlockRef break_block
;
66 struct radv_shader_context
{
67 struct ac_llvm_context ac
;
68 const struct ac_nir_compiler_options
*options
;
69 struct ac_shader_variant_info
*shader_info
;
70 struct ac_shader_abi abi
;
72 unsigned max_workgroup_size
;
73 LLVMContextRef context
;
74 LLVMValueRef main_function
;
76 LLVMValueRef descriptor_sets
[AC_UD_MAX_SETS
];
77 LLVMValueRef ring_offsets
;
79 LLVMValueRef vertex_buffers
;
80 LLVMValueRef rel_auto_id
;
81 LLVMValueRef vs_prim_id
;
82 LLVMValueRef ls_out_layout
;
83 LLVMValueRef es2gs_offset
;
85 LLVMValueRef tcs_offchip_layout
;
86 LLVMValueRef tcs_out_offsets
;
87 LLVMValueRef tcs_out_layout
;
88 LLVMValueRef tcs_in_layout
;
90 LLVMValueRef merged_wave_info
;
91 LLVMValueRef tess_factor_offset
;
92 LLVMValueRef tes_rel_patch_id
;
96 LLVMValueRef gsvs_ring_stride
;
97 LLVMValueRef gsvs_num_entries
;
98 LLVMValueRef gs2vs_offset
;
99 LLVMValueRef gs_wave_id
;
100 LLVMValueRef gs_vtx_offset
[6];
102 LLVMValueRef esgs_ring
;
103 LLVMValueRef gsvs_ring
;
104 LLVMValueRef hs_ring_tess_offchip
;
105 LLVMValueRef hs_ring_tess_factor
;
107 LLVMValueRef sample_pos_offset
;
108 LLVMValueRef persp_sample
, persp_center
, persp_centroid
;
109 LLVMValueRef linear_sample
, linear_center
, linear_centroid
;
111 gl_shader_stage stage
;
113 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
116 uint64_t output_mask
;
117 uint8_t num_output_clips
;
118 uint8_t num_output_culls
;
120 bool is_gs_copy_shader
;
121 LLVMValueRef gs_next_vertex
;
122 unsigned gs_max_out_vertices
;
124 unsigned tes_primitive_mode
;
125 uint64_t tess_outputs_written
;
126 uint64_t tess_patch_outputs_written
;
128 uint32_t tcs_patch_outputs_read
;
129 uint64_t tcs_outputs_read
;
130 uint32_t tcs_vertices_per_patch
;
133 static inline struct radv_shader_context
*
134 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
136 struct radv_shader_context
*ctx
= NULL
;
137 return container_of(abi
, ctx
, abi
);
140 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
141 const nir_deref_var
*deref
,
142 enum ac_descriptor_type desc_type
,
143 const nir_tex_instr
*instr
,
144 bool image
, bool write
);
146 static unsigned radeon_llvm_reg_index_soa(unsigned index
, unsigned chan
)
148 return (index
* 4) + chan
;
151 static unsigned shader_io_get_unique_index(gl_varying_slot slot
)
153 /* handle patch indices separate */
154 if (slot
== VARYING_SLOT_TESS_LEVEL_OUTER
)
156 if (slot
== VARYING_SLOT_TESS_LEVEL_INNER
)
158 if (slot
>= VARYING_SLOT_PATCH0
&& slot
<= VARYING_SLOT_TESS_MAX
)
159 return 2 + (slot
- VARYING_SLOT_PATCH0
);
161 if (slot
== VARYING_SLOT_POS
)
163 if (slot
== VARYING_SLOT_PSIZ
)
165 if (slot
== VARYING_SLOT_CLIP_DIST0
)
167 /* 3 is reserved for clip dist as well */
168 if (slot
>= VARYING_SLOT_VAR0
&& slot
<= VARYING_SLOT_VAR31
)
169 return 4 + (slot
- VARYING_SLOT_VAR0
);
170 unreachable("illegal slot in get unique index\n");
173 static void set_llvm_calling_convention(LLVMValueRef func
,
174 gl_shader_stage stage
)
176 enum radeon_llvm_calling_convention calling_conv
;
179 case MESA_SHADER_VERTEX
:
180 case MESA_SHADER_TESS_EVAL
:
181 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
183 case MESA_SHADER_GEOMETRY
:
184 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
186 case MESA_SHADER_TESS_CTRL
:
187 calling_conv
= HAVE_LLVM
>= 0x0500 ? RADEON_LLVM_AMDGPU_HS
: RADEON_LLVM_AMDGPU_VS
;
189 case MESA_SHADER_FRAGMENT
:
190 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
192 case MESA_SHADER_COMPUTE
:
193 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
196 unreachable("Unhandle shader type");
199 LLVMSetFunctionCallConv(func
, calling_conv
);
204 LLVMTypeRef types
[MAX_ARGS
];
205 LLVMValueRef
*assign
[MAX_ARGS
];
206 unsigned array_params_mask
;
209 uint8_t num_sgprs_used
;
210 uint8_t num_vgprs_used
;
213 enum ac_arg_regfile
{
219 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
220 LLVMValueRef
*param_ptr
)
222 assert(info
->count
< MAX_ARGS
);
224 info
->assign
[info
->count
] = param_ptr
;
225 info
->types
[info
->count
] = type
;
228 if (regfile
== ARG_SGPR
) {
229 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
232 assert(regfile
== ARG_VGPR
);
233 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
238 add_array_arg(struct arg_info
*info
, LLVMTypeRef type
, LLVMValueRef
*param_ptr
)
240 info
->array_params_mask
|= (1 << info
->count
);
241 add_arg(info
, ARG_SGPR
, type
, param_ptr
);
244 static void assign_arguments(LLVMValueRef main_function
,
245 struct arg_info
*info
)
248 for (i
= 0; i
< info
->count
; i
++) {
250 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
255 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
256 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
257 unsigned num_return_elems
,
258 struct arg_info
*args
,
259 unsigned max_workgroup_size
,
262 LLVMTypeRef main_function_type
, ret_type
;
263 LLVMBasicBlockRef main_function_body
;
265 if (num_return_elems
)
266 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
267 num_return_elems
, true);
269 ret_type
= LLVMVoidTypeInContext(ctx
);
271 /* Setup the function */
273 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
274 LLVMValueRef main_function
=
275 LLVMAddFunction(module
, "main", main_function_type
);
277 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
278 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
280 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
281 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
282 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
284 if (args
->array_params_mask
& (1 << i
)) {
285 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
286 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
287 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
291 if (max_workgroup_size
) {
292 ac_llvm_add_target_dep_function_attr(main_function
,
293 "amdgpu-max-work-group-size",
297 /* These were copied from some LLVM test. */
298 LLVMAddTargetDependentFunctionAttr(main_function
,
299 "less-precise-fpmad",
301 LLVMAddTargetDependentFunctionAttr(main_function
,
304 LLVMAddTargetDependentFunctionAttr(main_function
,
307 LLVMAddTargetDependentFunctionAttr(main_function
,
310 LLVMAddTargetDependentFunctionAttr(main_function
,
311 "no-signed-zeros-fp-math",
314 return main_function
;
317 static LLVMValueRef
unpack_param(struct ac_llvm_context
*ctx
,
318 LLVMValueRef param
, unsigned rshift
,
321 LLVMValueRef value
= param
;
323 value
= LLVMBuildLShr(ctx
->builder
, value
,
324 LLVMConstInt(ctx
->i32
, rshift
, false), "");
326 if (rshift
+ bitwidth
< 32) {
327 unsigned mask
= (1 << bitwidth
) - 1;
328 value
= LLVMBuildAnd(ctx
->builder
, value
,
329 LLVMConstInt(ctx
->i32
, mask
, false), "");
334 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
336 switch (ctx
->stage
) {
337 case MESA_SHADER_TESS_CTRL
:
338 return unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
339 case MESA_SHADER_TESS_EVAL
:
340 return ctx
->tes_rel_patch_id
;
343 unreachable("Illegal stage");
347 /* Tessellation shaders pass outputs to the next shader using LDS.
349 * LS outputs = TCS inputs
350 * TCS outputs = TES inputs
353 * - TCS inputs for patch 0
354 * - TCS inputs for patch 1
355 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
357 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
358 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
359 * - TCS outputs for patch 1
360 * - Per-patch TCS outputs for patch 1
361 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
362 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
365 * All three shaders VS(LS), TCS, TES share the same LDS space.
368 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
370 if (ctx
->stage
== MESA_SHADER_VERTEX
)
371 return unpack_param(&ctx
->ac
, ctx
->ls_out_layout
, 0, 13);
372 else if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
373 return unpack_param(&ctx
->ac
, ctx
->tcs_in_layout
, 0, 13);
381 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
383 return unpack_param(&ctx
->ac
, ctx
->tcs_out_layout
, 0, 13);
387 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
389 return unpack_param(&ctx
->ac
, ctx
->tcs_out_layout
, 13, 8);
393 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
395 return LLVMBuildMul(ctx
->ac
.builder
,
396 unpack_param(&ctx
->ac
, ctx
->tcs_out_offsets
, 0, 16),
397 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
401 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
403 return LLVMBuildMul(ctx
->ac
.builder
,
404 unpack_param(&ctx
->ac
, ctx
->tcs_out_offsets
, 16, 16),
405 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
409 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
411 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
412 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
414 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
418 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
420 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
421 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
422 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
424 return LLVMBuildAdd(ctx
->ac
.builder
, patch0_offset
,
425 LLVMBuildMul(ctx
->ac
.builder
, patch_stride
,
431 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
433 LLVMValueRef patch0_patch_data_offset
=
434 get_tcs_out_patch0_patch_data_offset(ctx
);
435 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
436 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
438 return LLVMBuildAdd(ctx
->ac
.builder
, patch0_patch_data_offset
,
439 LLVMBuildMul(ctx
->ac
.builder
, patch_stride
,
445 set_loc(struct ac_userdata_info
*ud_info
, uint8_t *sgpr_idx
, uint8_t num_sgprs
,
446 uint32_t indirect_offset
)
448 ud_info
->sgpr_idx
= *sgpr_idx
;
449 ud_info
->num_sgprs
= num_sgprs
;
450 ud_info
->indirect
= indirect_offset
> 0;
451 ud_info
->indirect_offset
= indirect_offset
;
452 *sgpr_idx
+= num_sgprs
;
456 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
459 struct ac_userdata_info
*ud_info
=
460 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
463 set_loc(ud_info
, sgpr_idx
, num_sgprs
, 0);
467 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
468 uint32_t indirect_offset
)
470 struct ac_userdata_info
*ud_info
=
471 &ctx
->shader_info
->user_sgprs_locs
.descriptor_sets
[idx
];
474 set_loc(ud_info
, sgpr_idx
, 2, indirect_offset
);
477 struct user_sgpr_info
{
478 bool need_ring_offsets
;
480 bool indirect_all_descriptor_sets
;
483 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
484 gl_shader_stage stage
)
487 case MESA_SHADER_VERTEX
:
488 if (ctx
->shader_info
->info
.needs_multiview_view_index
||
489 (!ctx
->options
->key
.vs
.as_es
&& !ctx
->options
->key
.vs
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
492 case MESA_SHADER_TESS_EVAL
:
493 if (ctx
->shader_info
->info
.needs_multiview_view_index
|| (!ctx
->options
->key
.tes
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
496 case MESA_SHADER_GEOMETRY
:
497 case MESA_SHADER_TESS_CTRL
:
498 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
508 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
512 count
+= ctx
->shader_info
->info
.vs
.has_vertex_buffers
? 2 : 0;
513 count
+= ctx
->shader_info
->info
.vs
.needs_draw_id
? 3 : 2;
518 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
519 gl_shader_stage stage
,
520 bool has_previous_stage
,
521 gl_shader_stage previous_stage
,
522 bool needs_view_index
,
523 struct user_sgpr_info
*user_sgpr_info
)
525 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
527 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
528 if (stage
== MESA_SHADER_GEOMETRY
||
529 stage
== MESA_SHADER_VERTEX
||
530 stage
== MESA_SHADER_TESS_CTRL
||
531 stage
== MESA_SHADER_TESS_EVAL
||
532 ctx
->is_gs_copy_shader
)
533 user_sgpr_info
->need_ring_offsets
= true;
535 if (stage
== MESA_SHADER_FRAGMENT
&&
536 ctx
->shader_info
->info
.ps
.needs_sample_positions
)
537 user_sgpr_info
->need_ring_offsets
= true;
539 /* 2 user sgprs will nearly always be allocated for scratch/rings */
540 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
541 user_sgpr_info
->sgpr_count
+= 2;
545 case MESA_SHADER_COMPUTE
:
546 if (ctx
->shader_info
->info
.cs
.uses_grid_size
)
547 user_sgpr_info
->sgpr_count
+= 3;
549 case MESA_SHADER_FRAGMENT
:
550 user_sgpr_info
->sgpr_count
+= ctx
->shader_info
->info
.ps
.needs_sample_positions
;
552 case MESA_SHADER_VERTEX
:
553 if (!ctx
->is_gs_copy_shader
)
554 user_sgpr_info
->sgpr_count
+= count_vs_user_sgprs(ctx
);
555 if (ctx
->options
->key
.vs
.as_ls
)
556 user_sgpr_info
->sgpr_count
++;
558 case MESA_SHADER_TESS_CTRL
:
559 if (has_previous_stage
) {
560 if (previous_stage
== MESA_SHADER_VERTEX
)
561 user_sgpr_info
->sgpr_count
+= count_vs_user_sgprs(ctx
);
562 user_sgpr_info
->sgpr_count
++;
564 user_sgpr_info
->sgpr_count
+= 4;
566 case MESA_SHADER_TESS_EVAL
:
567 user_sgpr_info
->sgpr_count
+= 1;
569 case MESA_SHADER_GEOMETRY
:
570 if (has_previous_stage
) {
571 if (previous_stage
== MESA_SHADER_VERTEX
) {
572 user_sgpr_info
->sgpr_count
+= count_vs_user_sgprs(ctx
);
574 user_sgpr_info
->sgpr_count
++;
577 user_sgpr_info
->sgpr_count
+= 2;
583 if (needs_view_index
)
584 user_sgpr_info
->sgpr_count
++;
586 if (ctx
->shader_info
->info
.loads_push_constants
)
587 user_sgpr_info
->sgpr_count
+= 2;
589 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
? 32 : 16;
590 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_info
->sgpr_count
;
592 if (remaining_sgprs
/ 2 < util_bitcount(ctx
->shader_info
->info
.desc_set_used_mask
)) {
593 user_sgpr_info
->sgpr_count
+= 2;
594 user_sgpr_info
->indirect_all_descriptor_sets
= true;
596 user_sgpr_info
->sgpr_count
+= util_bitcount(ctx
->shader_info
->info
.desc_set_used_mask
) * 2;
601 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
602 gl_shader_stage stage
,
603 bool has_previous_stage
,
604 gl_shader_stage previous_stage
,
605 const struct user_sgpr_info
*user_sgpr_info
,
606 struct arg_info
*args
,
607 LLVMValueRef
*desc_sets
)
609 LLVMTypeRef type
= ac_array_in_const_addr_space(ctx
->ac
.i8
);
610 unsigned num_sets
= ctx
->options
->layout
?
611 ctx
->options
->layout
->num_sets
: 0;
612 unsigned stage_mask
= 1 << stage
;
614 if (has_previous_stage
)
615 stage_mask
|= 1 << previous_stage
;
617 /* 1 for each descriptor set */
618 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
619 for (unsigned i
= 0; i
< num_sets
; ++i
) {
620 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
621 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
622 add_array_arg(args
, type
,
623 &ctx
->descriptor_sets
[i
]);
627 add_array_arg(args
, ac_array_in_const_addr_space(type
), desc_sets
);
630 if (ctx
->shader_info
->info
.loads_push_constants
) {
631 /* 1 for push constants and dynamic descriptors */
632 add_array_arg(args
, type
, &ctx
->abi
.push_constants
);
637 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
638 gl_shader_stage stage
,
639 bool has_previous_stage
,
640 gl_shader_stage previous_stage
,
641 struct arg_info
*args
)
643 if (!ctx
->is_gs_copy_shader
&&
644 (stage
== MESA_SHADER_VERTEX
||
645 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
646 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
647 add_arg(args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
648 &ctx
->vertex_buffers
);
650 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
651 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
652 if (ctx
->shader_info
->info
.vs
.needs_draw_id
) {
653 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
659 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
661 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
662 if (!ctx
->is_gs_copy_shader
) {
663 if (ctx
->options
->key
.vs
.as_ls
) {
664 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
665 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
667 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
668 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
670 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
675 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
677 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
678 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
679 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
680 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
684 set_global_input_locs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
685 bool has_previous_stage
, gl_shader_stage previous_stage
,
686 const struct user_sgpr_info
*user_sgpr_info
,
687 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
689 unsigned num_sets
= ctx
->options
->layout
?
690 ctx
->options
->layout
->num_sets
: 0;
691 unsigned stage_mask
= 1 << stage
;
693 if (has_previous_stage
)
694 stage_mask
|= 1 << previous_stage
;
696 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
697 for (unsigned i
= 0; i
< num_sets
; ++i
) {
698 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
699 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
700 set_loc_desc(ctx
, i
, user_sgpr_idx
, 0);
702 ctx
->descriptor_sets
[i
] = NULL
;
705 set_loc_shader(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
708 for (unsigned i
= 0; i
< num_sets
; ++i
) {
709 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
710 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
711 set_loc_desc(ctx
, i
, user_sgpr_idx
, i
* 8);
712 ctx
->descriptor_sets
[i
] =
713 ac_build_load_to_sgpr(&ctx
->ac
,
715 LLVMConstInt(ctx
->ac
.i32
, i
, false));
718 ctx
->descriptor_sets
[i
] = NULL
;
720 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
723 if (ctx
->shader_info
->info
.loads_push_constants
) {
724 set_loc_shader(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
, 2);
729 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
730 gl_shader_stage stage
, bool has_previous_stage
,
731 gl_shader_stage previous_stage
,
732 uint8_t *user_sgpr_idx
)
734 if (!ctx
->is_gs_copy_shader
&&
735 (stage
== MESA_SHADER_VERTEX
||
736 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
737 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
738 set_loc_shader(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
743 if (ctx
->shader_info
->info
.vs
.needs_draw_id
)
746 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
747 user_sgpr_idx
, vs_num
);
751 static void create_function(struct radv_shader_context
*ctx
,
752 gl_shader_stage stage
,
753 bool has_previous_stage
,
754 gl_shader_stage previous_stage
)
756 uint8_t user_sgpr_idx
;
757 struct user_sgpr_info user_sgpr_info
;
758 struct arg_info args
= {};
759 LLVMValueRef desc_sets
;
760 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
761 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
762 previous_stage
, needs_view_index
, &user_sgpr_info
);
764 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
765 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
770 case MESA_SHADER_COMPUTE
:
771 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
772 previous_stage
, &user_sgpr_info
,
775 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
776 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
777 &ctx
->abi
.num_work_groups
);
780 for (int i
= 0; i
< 3; i
++) {
781 ctx
->abi
.workgroup_ids
[i
] = NULL
;
782 if (ctx
->shader_info
->info
.cs
.uses_block_id
[i
]) {
783 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
784 &ctx
->abi
.workgroup_ids
[i
]);
788 if (ctx
->shader_info
->info
.cs
.uses_local_invocation_idx
)
789 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
790 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
791 &ctx
->abi
.local_invocation_ids
);
793 case MESA_SHADER_VERTEX
:
794 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
795 previous_stage
, &user_sgpr_info
,
797 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
798 previous_stage
, &args
);
800 if (needs_view_index
)
801 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
802 &ctx
->abi
.view_index
);
803 if (ctx
->options
->key
.vs
.as_es
)
804 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
806 else if (ctx
->options
->key
.vs
.as_ls
)
807 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
808 &ctx
->ls_out_layout
);
810 declare_vs_input_vgprs(ctx
, &args
);
812 case MESA_SHADER_TESS_CTRL
:
813 if (has_previous_stage
) {
814 // First 6 system regs
815 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
816 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
817 &ctx
->merged_wave_info
);
818 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
819 &ctx
->tess_factor_offset
);
821 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
822 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
823 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
825 declare_global_input_sgprs(ctx
, stage
,
828 &user_sgpr_info
, &args
,
830 declare_vs_specific_input_sgprs(ctx
, stage
,
832 previous_stage
, &args
);
834 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
835 &ctx
->ls_out_layout
);
837 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
838 &ctx
->tcs_offchip_layout
);
839 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
840 &ctx
->tcs_out_offsets
);
841 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
842 &ctx
->tcs_out_layout
);
843 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
844 &ctx
->tcs_in_layout
);
845 if (needs_view_index
)
846 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
847 &ctx
->abi
.view_index
);
849 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
850 &ctx
->abi
.tcs_patch_id
);
851 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
852 &ctx
->abi
.tcs_rel_ids
);
854 declare_vs_input_vgprs(ctx
, &args
);
856 declare_global_input_sgprs(ctx
, stage
,
859 &user_sgpr_info
, &args
,
862 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
863 &ctx
->tcs_offchip_layout
);
864 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
865 &ctx
->tcs_out_offsets
);
866 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
867 &ctx
->tcs_out_layout
);
868 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
869 &ctx
->tcs_in_layout
);
870 if (needs_view_index
)
871 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
872 &ctx
->abi
.view_index
);
874 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
875 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
876 &ctx
->tess_factor_offset
);
877 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
878 &ctx
->abi
.tcs_patch_id
);
879 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
880 &ctx
->abi
.tcs_rel_ids
);
883 case MESA_SHADER_TESS_EVAL
:
884 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
885 previous_stage
, &user_sgpr_info
,
888 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->tcs_offchip_layout
);
889 if (needs_view_index
)
890 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
891 &ctx
->abi
.view_index
);
893 if (ctx
->options
->key
.tes
.as_es
) {
894 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
895 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
896 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
899 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
900 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
902 declare_tes_input_vgprs(ctx
, &args
);
904 case MESA_SHADER_GEOMETRY
:
905 if (has_previous_stage
) {
906 // First 6 system regs
907 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
909 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
910 &ctx
->merged_wave_info
);
911 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
913 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
914 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
915 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
917 declare_global_input_sgprs(ctx
, stage
,
920 &user_sgpr_info
, &args
,
923 if (previous_stage
== MESA_SHADER_TESS_EVAL
) {
924 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
925 &ctx
->tcs_offchip_layout
);
927 declare_vs_specific_input_sgprs(ctx
, stage
,
933 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
934 &ctx
->gsvs_ring_stride
);
935 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
936 &ctx
->gsvs_num_entries
);
937 if (needs_view_index
)
938 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
939 &ctx
->abi
.view_index
);
941 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
942 &ctx
->gs_vtx_offset
[0]);
943 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
944 &ctx
->gs_vtx_offset
[2]);
945 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
946 &ctx
->abi
.gs_prim_id
);
947 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
948 &ctx
->abi
.gs_invocation_id
);
949 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
950 &ctx
->gs_vtx_offset
[4]);
952 if (previous_stage
== MESA_SHADER_VERTEX
) {
953 declare_vs_input_vgprs(ctx
, &args
);
955 declare_tes_input_vgprs(ctx
, &args
);
958 declare_global_input_sgprs(ctx
, stage
,
961 &user_sgpr_info
, &args
,
964 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
965 &ctx
->gsvs_ring_stride
);
966 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
967 &ctx
->gsvs_num_entries
);
968 if (needs_view_index
)
969 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
970 &ctx
->abi
.view_index
);
972 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
973 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
974 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
975 &ctx
->gs_vtx_offset
[0]);
976 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
977 &ctx
->gs_vtx_offset
[1]);
978 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
979 &ctx
->abi
.gs_prim_id
);
980 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
981 &ctx
->gs_vtx_offset
[2]);
982 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
983 &ctx
->gs_vtx_offset
[3]);
984 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
985 &ctx
->gs_vtx_offset
[4]);
986 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
987 &ctx
->gs_vtx_offset
[5]);
988 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
989 &ctx
->abi
.gs_invocation_id
);
992 case MESA_SHADER_FRAGMENT
:
993 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
994 previous_stage
, &user_sgpr_info
,
997 if (ctx
->shader_info
->info
.ps
.needs_sample_positions
)
998 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
999 &ctx
->sample_pos_offset
);
1001 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1002 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_sample
);
1003 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_center
);
1004 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_centroid
);
1005 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1006 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_sample
);
1007 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_center
);
1008 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_centroid
);
1009 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1010 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1011 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1012 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1013 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1014 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1015 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1016 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1017 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1020 unreachable("Shader stage not implemented");
1023 ctx
->main_function
= create_llvm_function(
1024 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1025 ctx
->max_workgroup_size
,
1026 ctx
->options
->unsafe_math
);
1027 set_llvm_calling_convention(ctx
->main_function
, stage
);
1030 ctx
->shader_info
->num_input_vgprs
= 0;
1031 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1033 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1035 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1036 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1038 assign_arguments(ctx
->main_function
, &args
);
1042 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1043 set_loc_shader(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1045 if (ctx
->options
->supports_spill
) {
1046 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1047 LLVMPointerType(ctx
->ac
.i8
, AC_CONST_ADDR_SPACE
),
1048 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1049 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1050 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1054 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1055 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1056 if (has_previous_stage
)
1059 set_global_input_locs(ctx
, stage
, has_previous_stage
, previous_stage
,
1060 &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1063 case MESA_SHADER_COMPUTE
:
1064 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1065 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1069 case MESA_SHADER_VERTEX
:
1070 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1071 previous_stage
, &user_sgpr_idx
);
1072 if (ctx
->abi
.view_index
)
1073 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1074 if (ctx
->options
->key
.vs
.as_ls
) {
1075 set_loc_shader(ctx
, AC_UD_VS_LS_TCS_IN_LAYOUT
,
1079 case MESA_SHADER_TESS_CTRL
:
1080 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1081 previous_stage
, &user_sgpr_idx
);
1082 if (has_previous_stage
)
1083 set_loc_shader(ctx
, AC_UD_VS_LS_TCS_IN_LAYOUT
,
1085 set_loc_shader(ctx
, AC_UD_TCS_OFFCHIP_LAYOUT
, &user_sgpr_idx
, 4);
1086 if (ctx
->abi
.view_index
)
1087 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1089 case MESA_SHADER_TESS_EVAL
:
1090 set_loc_shader(ctx
, AC_UD_TES_OFFCHIP_LAYOUT
, &user_sgpr_idx
, 1);
1091 if (ctx
->abi
.view_index
)
1092 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1094 case MESA_SHADER_GEOMETRY
:
1095 if (has_previous_stage
) {
1096 if (previous_stage
== MESA_SHADER_VERTEX
)
1097 set_vs_specific_input_locs(ctx
, stage
,
1102 set_loc_shader(ctx
, AC_UD_TES_OFFCHIP_LAYOUT
,
1105 set_loc_shader(ctx
, AC_UD_GS_VS_RING_STRIDE_ENTRIES
,
1107 if (ctx
->abi
.view_index
)
1108 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1110 case MESA_SHADER_FRAGMENT
:
1111 if (ctx
->shader_info
->info
.ps
.needs_sample_positions
) {
1112 set_loc_shader(ctx
, AC_UD_PS_SAMPLE_POS_OFFSET
,
1117 unreachable("Shader stage not implemented");
1120 if (stage
== MESA_SHADER_TESS_CTRL
||
1121 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs
.as_ls
) ||
1122 /* GFX9 has the ESGS ring buffer in LDS. */
1123 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1124 ac_declare_lds_as_pointer(&ctx
->ac
);
1127 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1131 build_store_values_extended(struct ac_llvm_context
*ac
,
1132 LLVMValueRef
*values
,
1133 unsigned value_count
,
1134 unsigned value_stride
,
1137 LLVMBuilderRef builder
= ac
->builder
;
1140 for (i
= 0; i
< value_count
; i
++) {
1141 LLVMValueRef ptr
= values
[i
* value_stride
];
1142 LLVMValueRef index
= LLVMConstInt(ac
->i32
, i
, false);
1143 LLVMValueRef value
= LLVMBuildExtractElement(builder
, vec
, index
, "");
1144 LLVMBuildStore(builder
, value
, ptr
);
1148 static LLVMTypeRef
get_def_type(struct ac_nir_context
*ctx
,
1149 const nir_ssa_def
*def
)
1151 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, def
->bit_size
);
1152 if (def
->num_components
> 1) {
1153 type
= LLVMVectorType(type
, def
->num_components
);
1158 static LLVMValueRef
get_src(struct ac_nir_context
*nir
, nir_src src
)
1161 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, src
.ssa
);
1162 return (LLVMValueRef
)entry
->data
;
1166 get_memory_ptr(struct ac_nir_context
*ctx
, nir_src src
)
1168 LLVMValueRef ptr
= get_src(ctx
, src
);
1169 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ac
.lds
, &ptr
, 1, "");
1170 int addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
1172 return LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1173 LLVMPointerType(ctx
->ac
.i32
, addr_space
), "");
1176 static LLVMBasicBlockRef
get_block(struct ac_nir_context
*nir
,
1177 const struct nir_block
*b
)
1179 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, b
);
1180 return (LLVMBasicBlockRef
)entry
->data
;
1183 static LLVMValueRef
get_alu_src(struct ac_nir_context
*ctx
,
1185 unsigned num_components
)
1187 LLVMValueRef value
= get_src(ctx
, src
.src
);
1188 bool need_swizzle
= false;
1191 unsigned src_components
= ac_get_llvm_num_components(value
);
1192 for (unsigned i
= 0; i
< num_components
; ++i
) {
1193 assert(src
.swizzle
[i
] < src_components
);
1194 if (src
.swizzle
[i
] != i
)
1195 need_swizzle
= true;
1198 if (need_swizzle
|| num_components
!= src_components
) {
1199 LLVMValueRef masks
[] = {
1200 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[0], false),
1201 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[1], false),
1202 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[2], false),
1203 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[3], false)};
1205 if (src_components
> 1 && num_components
== 1) {
1206 value
= LLVMBuildExtractElement(ctx
->ac
.builder
, value
,
1208 } else if (src_components
== 1 && num_components
> 1) {
1209 LLVMValueRef values
[] = {value
, value
, value
, value
};
1210 value
= ac_build_gather_values(&ctx
->ac
, values
, num_components
);
1212 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
1213 value
= LLVMBuildShuffleVector(ctx
->ac
.builder
, value
, value
,
1217 assert(!src
.negate
);
1222 static LLVMValueRef
emit_int_cmp(struct ac_llvm_context
*ctx
,
1223 LLVMIntPredicate pred
, LLVMValueRef src0
,
1226 LLVMValueRef result
= LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, "");
1227 return LLVMBuildSelect(ctx
->builder
, result
,
1228 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
1232 static LLVMValueRef
emit_float_cmp(struct ac_llvm_context
*ctx
,
1233 LLVMRealPredicate pred
, LLVMValueRef src0
,
1236 LLVMValueRef result
;
1237 src0
= ac_to_float(ctx
, src0
);
1238 src1
= ac_to_float(ctx
, src1
);
1239 result
= LLVMBuildFCmp(ctx
->builder
, pred
, src0
, src1
, "");
1240 return LLVMBuildSelect(ctx
->builder
, result
,
1241 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
1245 static LLVMValueRef
emit_intrin_1f_param(struct ac_llvm_context
*ctx
,
1247 LLVMTypeRef result_type
,
1251 LLVMValueRef params
[] = {
1252 ac_to_float(ctx
, src0
),
1255 MAYBE_UNUSED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
1256 ac_get_elem_bits(ctx
, result_type
));
1257 assert(length
< sizeof(name
));
1258 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 1, AC_FUNC_ATTR_READNONE
);
1261 static LLVMValueRef
emit_intrin_2f_param(struct ac_llvm_context
*ctx
,
1263 LLVMTypeRef result_type
,
1264 LLVMValueRef src0
, LLVMValueRef src1
)
1267 LLVMValueRef params
[] = {
1268 ac_to_float(ctx
, src0
),
1269 ac_to_float(ctx
, src1
),
1272 MAYBE_UNUSED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
1273 ac_get_elem_bits(ctx
, result_type
));
1274 assert(length
< sizeof(name
));
1275 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 2, AC_FUNC_ATTR_READNONE
);
1278 static LLVMValueRef
emit_intrin_3f_param(struct ac_llvm_context
*ctx
,
1280 LLVMTypeRef result_type
,
1281 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
1284 LLVMValueRef params
[] = {
1285 ac_to_float(ctx
, src0
),
1286 ac_to_float(ctx
, src1
),
1287 ac_to_float(ctx
, src2
),
1290 MAYBE_UNUSED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
1291 ac_get_elem_bits(ctx
, result_type
));
1292 assert(length
< sizeof(name
));
1293 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 3, AC_FUNC_ATTR_READNONE
);
1296 static LLVMValueRef
emit_bcsel(struct ac_llvm_context
*ctx
,
1297 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
1299 LLVMValueRef v
= LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
,
1301 return LLVMBuildSelect(ctx
->builder
, v
, ac_to_integer(ctx
, src1
),
1302 ac_to_integer(ctx
, src2
), "");
1305 static LLVMValueRef
emit_minmax_int(struct ac_llvm_context
*ctx
,
1306 LLVMIntPredicate pred
,
1307 LLVMValueRef src0
, LLVMValueRef src1
)
1309 return LLVMBuildSelect(ctx
->builder
,
1310 LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, ""),
1315 static LLVMValueRef
emit_iabs(struct ac_llvm_context
*ctx
,
1318 return emit_minmax_int(ctx
, LLVMIntSGT
, src0
,
1319 LLVMBuildNeg(ctx
->builder
, src0
, ""));
1322 static LLVMValueRef
emit_uint_carry(struct ac_llvm_context
*ctx
,
1324 LLVMValueRef src0
, LLVMValueRef src1
)
1326 LLVMTypeRef ret_type
;
1327 LLVMTypeRef types
[] = { ctx
->i32
, ctx
->i1
};
1329 LLVMValueRef params
[] = { src0
, src1
};
1330 ret_type
= LLVMStructTypeInContext(ctx
->context
, types
,
1333 res
= ac_build_intrinsic(ctx
, intrin
, ret_type
,
1334 params
, 2, AC_FUNC_ATTR_READNONE
);
1336 res
= LLVMBuildExtractValue(ctx
->builder
, res
, 1, "");
1337 res
= LLVMBuildZExt(ctx
->builder
, res
, ctx
->i32
, "");
1341 static LLVMValueRef
emit_b2f(struct ac_llvm_context
*ctx
,
1344 return LLVMBuildAnd(ctx
->builder
, src0
, LLVMBuildBitCast(ctx
->builder
, LLVMConstReal(ctx
->f32
, 1.0), ctx
->i32
, ""), "");
1347 static LLVMValueRef
emit_f2b(struct ac_llvm_context
*ctx
,
1350 src0
= ac_to_float(ctx
, src0
);
1351 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
1352 return LLVMBuildSExt(ctx
->builder
,
1353 LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
, src0
, zero
, ""),
1357 static LLVMValueRef
emit_b2i(struct ac_llvm_context
*ctx
,
1361 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
, ctx
->i32_1
, "");
1366 return LLVMBuildZExt(ctx
->builder
, result
, ctx
->i64
, "");
1369 static LLVMValueRef
emit_i2b(struct ac_llvm_context
*ctx
,
1372 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
1373 return LLVMBuildSExt(ctx
->builder
,
1374 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
, zero
, ""),
1378 static LLVMValueRef
emit_f2f16(struct ac_llvm_context
*ctx
,
1381 LLVMValueRef result
;
1382 LLVMValueRef cond
= NULL
;
1384 src0
= ac_to_float(ctx
, src0
);
1385 result
= LLVMBuildFPTrunc(ctx
->builder
, src0
, ctx
->f16
, "");
1387 if (ctx
->chip_class
>= VI
) {
1388 LLVMValueRef args
[2];
1389 /* Check if the result is a denormal - and flush to 0 if so. */
1391 args
[1] = LLVMConstInt(ctx
->i32
, N_SUBNORMAL
| P_SUBNORMAL
, false);
1392 cond
= ac_build_intrinsic(ctx
, "llvm.amdgcn.class.f16", ctx
->i1
, args
, 2, AC_FUNC_ATTR_READNONE
);
1395 /* need to convert back up to f32 */
1396 result
= LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f32
, "");
1398 if (ctx
->chip_class
>= VI
)
1399 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
1402 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
1403 * so compare the result and flush to 0 if it's smaller.
1405 LLVMValueRef temp
, cond2
;
1406 temp
= emit_intrin_1f_param(ctx
, "llvm.fabs", ctx
->f32
, result
);
1407 cond
= LLVMBuildFCmp(ctx
->builder
, LLVMRealUGT
,
1408 LLVMBuildBitCast(ctx
->builder
, LLVMConstInt(ctx
->i32
, 0x38800000, false), ctx
->f32
, ""),
1410 cond2
= LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
,
1411 temp
, ctx
->f32_0
, "");
1412 cond
= LLVMBuildAnd(ctx
->builder
, cond
, cond2
, "");
1413 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
1418 static LLVMValueRef
emit_umul_high(struct ac_llvm_context
*ctx
,
1419 LLVMValueRef src0
, LLVMValueRef src1
)
1421 LLVMValueRef dst64
, result
;
1422 src0
= LLVMBuildZExt(ctx
->builder
, src0
, ctx
->i64
, "");
1423 src1
= LLVMBuildZExt(ctx
->builder
, src1
, ctx
->i64
, "");
1425 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
1426 dst64
= LLVMBuildLShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
1427 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
1431 static LLVMValueRef
emit_imul_high(struct ac_llvm_context
*ctx
,
1432 LLVMValueRef src0
, LLVMValueRef src1
)
1434 LLVMValueRef dst64
, result
;
1435 src0
= LLVMBuildSExt(ctx
->builder
, src0
, ctx
->i64
, "");
1436 src1
= LLVMBuildSExt(ctx
->builder
, src1
, ctx
->i64
, "");
1438 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
1439 dst64
= LLVMBuildAShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
1440 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
1444 static LLVMValueRef
emit_bitfield_extract(struct ac_llvm_context
*ctx
,
1446 const LLVMValueRef srcs
[3])
1448 LLVMValueRef result
;
1449 LLVMValueRef icond
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, srcs
[2], LLVMConstInt(ctx
->i32
, 32, false), "");
1451 result
= ac_build_bfe(ctx
, srcs
[0], srcs
[1], srcs
[2], is_signed
);
1452 result
= LLVMBuildSelect(ctx
->builder
, icond
, srcs
[0], result
, "");
1456 static LLVMValueRef
emit_bitfield_insert(struct ac_llvm_context
*ctx
,
1457 LLVMValueRef src0
, LLVMValueRef src1
,
1458 LLVMValueRef src2
, LLVMValueRef src3
)
1460 LLVMValueRef bfi_args
[3], result
;
1462 bfi_args
[0] = LLVMBuildShl(ctx
->builder
,
1463 LLVMBuildSub(ctx
->builder
,
1464 LLVMBuildShl(ctx
->builder
,
1469 bfi_args
[1] = LLVMBuildShl(ctx
->builder
, src1
, src2
, "");
1472 LLVMValueRef icond
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, src3
, LLVMConstInt(ctx
->i32
, 32, false), "");
1475 * (arg0 & arg1) | (~arg0 & arg2) = arg2 ^ (arg0 & (arg1 ^ arg2)
1476 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
1478 result
= LLVMBuildXor(ctx
->builder
, bfi_args
[2],
1479 LLVMBuildAnd(ctx
->builder
, bfi_args
[0],
1480 LLVMBuildXor(ctx
->builder
, bfi_args
[1], bfi_args
[2], ""), ""), "");
1482 result
= LLVMBuildSelect(ctx
->builder
, icond
, src1
, result
, "");
1486 static LLVMValueRef
emit_pack_half_2x16(struct ac_llvm_context
*ctx
,
1489 LLVMValueRef comp
[2];
1491 src0
= ac_to_float(ctx
, src0
);
1492 comp
[0] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_0
, "");
1493 comp
[1] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_1
, "");
1495 return ac_build_cvt_pkrtz_f16(ctx
, comp
);
1498 static LLVMValueRef
emit_unpack_half_2x16(struct ac_llvm_context
*ctx
,
1501 LLVMValueRef const16
= LLVMConstInt(ctx
->i32
, 16, false);
1502 LLVMValueRef temps
[2], result
, val
;
1505 for (i
= 0; i
< 2; i
++) {
1506 val
= i
== 1 ? LLVMBuildLShr(ctx
->builder
, src0
, const16
, "") : src0
;
1507 val
= LLVMBuildTrunc(ctx
->builder
, val
, ctx
->i16
, "");
1508 val
= LLVMBuildBitCast(ctx
->builder
, val
, ctx
->f16
, "");
1509 temps
[i
] = LLVMBuildFPExt(ctx
->builder
, val
, ctx
->f32
, "");
1512 result
= LLVMBuildInsertElement(ctx
->builder
, LLVMGetUndef(ctx
->v2f32
), temps
[0],
1514 result
= LLVMBuildInsertElement(ctx
->builder
, result
, temps
[1],
1519 static LLVMValueRef
emit_ddxy(struct ac_nir_context
*ctx
,
1525 LLVMValueRef result
;
1527 if (op
== nir_op_fddx_fine
)
1528 mask
= AC_TID_MASK_LEFT
;
1529 else if (op
== nir_op_fddy_fine
)
1530 mask
= AC_TID_MASK_TOP
;
1532 mask
= AC_TID_MASK_TOP_LEFT
;
1534 /* for DDX we want to next X pixel, DDY next Y pixel. */
1535 if (op
== nir_op_fddx_fine
||
1536 op
== nir_op_fddx_coarse
||
1542 result
= ac_build_ddxy(&ctx
->ac
, mask
, idx
, src0
);
1547 * this takes an I,J coordinate pair,
1548 * and works out the X and Y derivatives.
1549 * it returns DDX(I), DDX(J), DDY(I), DDY(J).
1551 static LLVMValueRef
emit_ddxy_interp(
1552 struct ac_nir_context
*ctx
,
1553 LLVMValueRef interp_ij
)
1555 LLVMValueRef result
[4], a
;
1558 for (i
= 0; i
< 2; i
++) {
1559 a
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_ij
,
1560 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
1561 result
[i
] = emit_ddxy(ctx
, nir_op_fddx
, a
);
1562 result
[2+i
] = emit_ddxy(ctx
, nir_op_fddy
, a
);
1564 return ac_build_gather_values(&ctx
->ac
, result
, 4);
1567 static void visit_alu(struct ac_nir_context
*ctx
, const nir_alu_instr
*instr
)
1569 LLVMValueRef src
[4], result
= NULL
;
1570 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
1571 unsigned src_components
;
1572 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.dest
.ssa
);
1574 assert(nir_op_infos
[instr
->op
].num_inputs
<= ARRAY_SIZE(src
));
1575 switch (instr
->op
) {
1581 case nir_op_pack_half_2x16
:
1584 case nir_op_unpack_half_2x16
:
1587 case nir_op_cube_face_coord
:
1588 case nir_op_cube_face_index
:
1592 src_components
= num_components
;
1595 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1596 src
[i
] = get_alu_src(ctx
, instr
->src
[i
], src_components
);
1598 switch (instr
->op
) {
1604 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1605 result
= LLVMBuildFNeg(ctx
->ac
.builder
, src
[0], "");
1608 result
= LLVMBuildNeg(ctx
->ac
.builder
, src
[0], "");
1611 result
= LLVMBuildNot(ctx
->ac
.builder
, src
[0], "");
1614 result
= LLVMBuildAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
1617 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1618 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1619 result
= LLVMBuildFAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
1622 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1623 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1624 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], src
[1], "");
1627 result
= LLVMBuildSub(ctx
->ac
.builder
, src
[0], src
[1], "");
1630 result
= LLVMBuildMul(ctx
->ac
.builder
, src
[0], src
[1], "");
1633 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
1636 result
= LLVMBuildURem(ctx
->ac
.builder
, src
[0], src
[1], "");
1639 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1640 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1641 result
= ac_build_fdiv(&ctx
->ac
, src
[0], src
[1]);
1642 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
1643 ac_to_float_type(&ctx
->ac
, def_type
), result
);
1644 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[1] , result
, "");
1645 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], result
, "");
1648 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1649 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1650 result
= LLVMBuildFRem(ctx
->ac
.builder
, src
[0], src
[1], "");
1653 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
1656 result
= LLVMBuildSDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
1659 result
= LLVMBuildUDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
1662 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1663 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1664 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[0], src
[1], "");
1667 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1668 result
= ac_build_fdiv(&ctx
->ac
, instr
->dest
.dest
.ssa
.bit_size
== 32 ? ctx
->ac
.f32_1
: ctx
->ac
.f64_1
,
1672 result
= LLVMBuildAnd(ctx
->ac
.builder
, src
[0], src
[1], "");
1675 result
= LLVMBuildOr(ctx
->ac
.builder
, src
[0], src
[1], "");
1678 result
= LLVMBuildXor(ctx
->ac
.builder
, src
[0], src
[1], "");
1681 result
= LLVMBuildShl(ctx
->ac
.builder
, src
[0],
1682 LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
1683 LLVMTypeOf(src
[0]), ""),
1687 result
= LLVMBuildAShr(ctx
->ac
.builder
, src
[0],
1688 LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
1689 LLVMTypeOf(src
[0]), ""),
1693 result
= LLVMBuildLShr(ctx
->ac
.builder
, src
[0],
1694 LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
1695 LLVMTypeOf(src
[0]), ""),
1699 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
1702 result
= emit_int_cmp(&ctx
->ac
, LLVMIntNE
, src
[0], src
[1]);
1705 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, src
[0], src
[1]);
1708 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSGE
, src
[0], src
[1]);
1711 result
= emit_int_cmp(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
1714 result
= emit_int_cmp(&ctx
->ac
, LLVMIntUGE
, src
[0], src
[1]);
1717 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOEQ
, src
[0], src
[1]);
1720 result
= emit_float_cmp(&ctx
->ac
, LLVMRealUNE
, src
[0], src
[1]);
1723 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOLT
, src
[0], src
[1]);
1726 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOGE
, src
[0], src
[1]);
1729 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.fabs",
1730 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1733 result
= emit_iabs(&ctx
->ac
, src
[0]);
1736 result
= emit_minmax_int(&ctx
->ac
, LLVMIntSGT
, src
[0], src
[1]);
1739 result
= emit_minmax_int(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
1742 result
= emit_minmax_int(&ctx
->ac
, LLVMIntUGT
, src
[0], src
[1]);
1745 result
= emit_minmax_int(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
1748 result
= ac_build_isign(&ctx
->ac
, src
[0],
1749 instr
->dest
.dest
.ssa
.bit_size
);
1752 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1753 result
= ac_build_fsign(&ctx
->ac
, src
[0],
1754 instr
->dest
.dest
.ssa
.bit_size
);
1757 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
1758 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1761 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.trunc",
1762 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1765 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.ceil",
1766 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1768 case nir_op_fround_even
:
1769 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.rint",
1770 ac_to_float_type(&ctx
->ac
, def_type
),src
[0]);
1773 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1774 result
= ac_build_fract(&ctx
->ac
, src
[0],
1775 instr
->dest
.dest
.ssa
.bit_size
);
1778 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sin",
1779 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1782 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.cos",
1783 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1786 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
1787 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1790 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.exp2",
1791 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1794 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.log2",
1795 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1798 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
1799 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
1800 result
= ac_build_fdiv(&ctx
->ac
, instr
->dest
.dest
.ssa
.bit_size
== 32 ? ctx
->ac
.f32_1
: ctx
->ac
.f64_1
,
1804 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1805 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1806 if (ctx
->ac
.chip_class
< GFX9
&&
1807 instr
->dest
.dest
.ssa
.bit_size
== 32) {
1808 /* Only pre-GFX9 chips do not flush denorms. */
1809 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.canonicalize",
1810 ac_to_float_type(&ctx
->ac
, def_type
),
1815 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1816 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1817 if (ctx
->ac
.chip_class
< GFX9
&&
1818 instr
->dest
.dest
.ssa
.bit_size
== 32) {
1819 /* Only pre-GFX9 chips do not flush denorms. */
1820 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.canonicalize",
1821 ac_to_float_type(&ctx
->ac
, def_type
),
1826 result
= emit_intrin_3f_param(&ctx
->ac
, "llvm.fmuladd",
1827 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1], src
[2]);
1830 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1831 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 32)
1832 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f32", ctx
->ac
.f32
, src
, 2, AC_FUNC_ATTR_READNONE
);
1834 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f64", ctx
->ac
.f64
, src
, 2, AC_FUNC_ATTR_READNONE
);
1836 case nir_op_ibitfield_extract
:
1837 result
= emit_bitfield_extract(&ctx
->ac
, true, src
);
1839 case nir_op_ubitfield_extract
:
1840 result
= emit_bitfield_extract(&ctx
->ac
, false, src
);
1842 case nir_op_bitfield_insert
:
1843 result
= emit_bitfield_insert(&ctx
->ac
, src
[0], src
[1], src
[2], src
[3]);
1845 case nir_op_bitfield_reverse
:
1846 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.bitreverse.i32", ctx
->ac
.i32
, src
, 1, AC_FUNC_ATTR_READNONE
);
1848 case nir_op_bit_count
:
1849 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 32)
1850 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.ctpop.i32", ctx
->ac
.i32
, src
, 1, AC_FUNC_ATTR_READNONE
);
1852 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.ctpop.i64", ctx
->ac
.i64
, src
, 1, AC_FUNC_ATTR_READNONE
);
1853 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
1859 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1860 src
[i
] = ac_to_integer(&ctx
->ac
, src
[i
]);
1861 result
= ac_build_gather_values(&ctx
->ac
, src
, num_components
);
1865 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1866 result
= LLVMBuildFPToSI(ctx
->ac
.builder
, src
[0], def_type
, "");
1870 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1871 result
= LLVMBuildFPToUI(ctx
->ac
.builder
, src
[0], def_type
, "");
1875 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1876 result
= LLVMBuildSIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
1880 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1881 result
= LLVMBuildUIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
1884 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1885 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
1888 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1889 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
1893 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1894 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
1895 result
= LLVMBuildZExt(ctx
->ac
.builder
, src
[0], def_type
, "");
1897 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
1901 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1902 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
1903 result
= LLVMBuildSExt(ctx
->ac
.builder
, src
[0], def_type
, "");
1905 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
1908 result
= emit_bcsel(&ctx
->ac
, src
[0], src
[1], src
[2]);
1910 case nir_op_find_lsb
:
1911 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1912 result
= ac_find_lsb(&ctx
->ac
, ctx
->ac
.i32
, src
[0]);
1914 case nir_op_ufind_msb
:
1915 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1916 result
= ac_build_umsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
1918 case nir_op_ifind_msb
:
1919 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1920 result
= ac_build_imsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
1922 case nir_op_uadd_carry
:
1923 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1924 src
[1] = ac_to_integer(&ctx
->ac
, src
[1]);
1925 result
= emit_uint_carry(&ctx
->ac
, "llvm.uadd.with.overflow.i32", src
[0], src
[1]);
1927 case nir_op_usub_borrow
:
1928 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1929 src
[1] = ac_to_integer(&ctx
->ac
, src
[1]);
1930 result
= emit_uint_carry(&ctx
->ac
, "llvm.usub.with.overflow.i32", src
[0], src
[1]);
1933 result
= emit_b2f(&ctx
->ac
, src
[0]);
1936 result
= emit_f2b(&ctx
->ac
, src
[0]);
1939 result
= emit_b2i(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1942 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1943 result
= emit_i2b(&ctx
->ac
, src
[0]);
1945 case nir_op_fquantize2f16
:
1946 result
= emit_f2f16(&ctx
->ac
, src
[0]);
1948 case nir_op_umul_high
:
1949 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1950 src
[1] = ac_to_integer(&ctx
->ac
, src
[1]);
1951 result
= emit_umul_high(&ctx
->ac
, src
[0], src
[1]);
1953 case nir_op_imul_high
:
1954 src
[0] = ac_to_integer(&ctx
->ac
, src
[0]);
1955 src
[1] = ac_to_integer(&ctx
->ac
, src
[1]);
1956 result
= emit_imul_high(&ctx
->ac
, src
[0], src
[1]);
1958 case nir_op_pack_half_2x16
:
1959 result
= emit_pack_half_2x16(&ctx
->ac
, src
[0]);
1961 case nir_op_unpack_half_2x16
:
1962 result
= emit_unpack_half_2x16(&ctx
->ac
, src
[0]);
1966 case nir_op_fddx_fine
:
1967 case nir_op_fddy_fine
:
1968 case nir_op_fddx_coarse
:
1969 case nir_op_fddy_coarse
:
1970 result
= emit_ddxy(ctx
, instr
->op
, src
[0]);
1973 case nir_op_unpack_64_2x32_split_x
: {
1974 assert(ac_get_llvm_num_components(src
[0]) == 1);
1975 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1978 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1983 case nir_op_unpack_64_2x32_split_y
: {
1984 assert(ac_get_llvm_num_components(src
[0]) == 1);
1985 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1988 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1993 case nir_op_pack_64_2x32_split
: {
1994 LLVMValueRef tmp
= LLVMGetUndef(ctx
->ac
.v2i32
);
1995 tmp
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp
,
1996 src
[0], ctx
->ac
.i32_0
, "");
1997 tmp
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp
,
1998 src
[1], ctx
->ac
.i32_1
, "");
1999 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i64
, "");
2003 case nir_op_cube_face_coord
: {
2004 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
2005 LLVMValueRef results
[2];
2007 for (unsigned chan
= 0; chan
< 3; chan
++)
2008 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
2009 results
[0] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubetc",
2010 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
2011 results
[1] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubesc",
2012 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
2013 result
= ac_build_gather_values(&ctx
->ac
, results
, 2);
2017 case nir_op_cube_face_index
: {
2018 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
2020 for (unsigned chan
= 0; chan
< 3; chan
++)
2021 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
2022 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubeid",
2023 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
2028 fprintf(stderr
, "Unknown NIR alu instr: ");
2029 nir_print_instr(&instr
->instr
, stderr
);
2030 fprintf(stderr
, "\n");
2035 assert(instr
->dest
.dest
.is_ssa
);
2036 result
= ac_to_integer(&ctx
->ac
, result
);
2037 _mesa_hash_table_insert(ctx
->defs
, &instr
->dest
.dest
.ssa
,
2042 static void visit_load_const(struct ac_nir_context
*ctx
,
2043 const nir_load_const_instr
*instr
)
2045 LLVMValueRef values
[4], value
= NULL
;
2046 LLVMTypeRef element_type
=
2047 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
2049 for (unsigned i
= 0; i
< instr
->def
.num_components
; ++i
) {
2050 switch (instr
->def
.bit_size
) {
2052 values
[i
] = LLVMConstInt(element_type
,
2053 instr
->value
.u32
[i
], false);
2056 values
[i
] = LLVMConstInt(element_type
,
2057 instr
->value
.u64
[i
], false);
2061 "unsupported nir load_const bit_size: %d\n",
2062 instr
->def
.bit_size
);
2066 if (instr
->def
.num_components
> 1) {
2067 value
= LLVMConstVector(values
, instr
->def
.num_components
);
2071 _mesa_hash_table_insert(ctx
->defs
, &instr
->def
, value
);
2075 get_buffer_size(struct ac_nir_context
*ctx
, LLVMValueRef descriptor
, bool in_elements
)
2078 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
2079 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
2082 if (ctx
->ac
.chip_class
== VI
&& in_elements
) {
2083 /* On VI, the descriptor contains the size in bytes,
2084 * but TXQ must return the size in elements.
2085 * The stride is always non-zero for resources using TXQ.
2087 LLVMValueRef stride
=
2088 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
2090 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
,
2091 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
2092 stride
= LLVMBuildAnd(ctx
->ac
.builder
, stride
,
2093 LLVMConstInt(ctx
->ac
.i32
, 0x3fff, false), "");
2095 size
= LLVMBuildUDiv(ctx
->ac
.builder
, size
, stride
, "");
2101 * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
2104 static void build_int_type_name(
2106 char *buf
, unsigned bufsize
)
2108 assert(bufsize
>= 6);
2110 if (LLVMGetTypeKind(type
) == LLVMVectorTypeKind
)
2111 snprintf(buf
, bufsize
, "v%ui32",
2112 LLVMGetVectorSize(type
));
2117 static LLVMValueRef
radv_lower_gather4_integer(struct ac_llvm_context
*ctx
,
2118 struct ac_image_args
*args
,
2119 const nir_tex_instr
*instr
)
2121 enum glsl_base_type stype
= glsl_get_sampler_result_type(instr
->texture
->var
->type
);
2122 LLVMValueRef coord
= args
->addr
;
2123 LLVMValueRef half_texel
[2];
2124 LLVMValueRef compare_cube_wa
= NULL
;
2125 LLVMValueRef result
;
2127 unsigned coord_vgpr_index
= (unsigned)args
->offset
+ (unsigned)args
->compare
;
2131 struct ac_image_args txq_args
= { 0 };
2133 txq_args
.da
= instr
->is_array
|| instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
;
2134 txq_args
.opcode
= ac_image_get_resinfo
;
2135 txq_args
.dmask
= 0xf;
2136 txq_args
.addr
= ctx
->i32_0
;
2137 txq_args
.resource
= args
->resource
;
2138 LLVMValueRef size
= ac_build_image_opcode(ctx
, &txq_args
);
2140 for (c
= 0; c
< 2; c
++) {
2141 half_texel
[c
] = LLVMBuildExtractElement(ctx
->builder
, size
,
2142 LLVMConstInt(ctx
->i32
, c
, false), "");
2143 half_texel
[c
] = LLVMBuildUIToFP(ctx
->builder
, half_texel
[c
], ctx
->f32
, "");
2144 half_texel
[c
] = ac_build_fdiv(ctx
, ctx
->f32_1
, half_texel
[c
]);
2145 half_texel
[c
] = LLVMBuildFMul(ctx
->builder
, half_texel
[c
],
2146 LLVMConstReal(ctx
->f32
, -0.5), "");
2150 LLVMValueRef orig_coords
= args
->addr
;
2152 for (c
= 0; c
< 2; c
++) {
2154 LLVMValueRef index
= LLVMConstInt(ctx
->i32
, coord_vgpr_index
+ c
, 0);
2155 tmp
= LLVMBuildExtractElement(ctx
->builder
, coord
, index
, "");
2156 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
2157 tmp
= LLVMBuildFAdd(ctx
->builder
, tmp
, half_texel
[c
], "");
2158 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
2159 coord
= LLVMBuildInsertElement(ctx
->builder
, coord
, tmp
, index
, "");
2164 * Apparantly cube has issue with integer types that the workaround doesn't solve,
2165 * so this tests if the format is 8_8_8_8 and an integer type do an alternate
2166 * workaround by sampling using a scaled type and converting.
2167 * This is taken from amdgpu-pro shaders.
2169 /* NOTE this produces some ugly code compared to amdgpu-pro,
2170 * LLVM ends up dumping SGPRs into VGPRs to deal with the compare/select,
2171 * and then reads them back. -pro generates two selects,
2172 * one s_cmp for the descriptor rewriting
2173 * one v_cmp for the coordinate and result changes.
2175 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
2176 LLVMValueRef tmp
, tmp2
;
2178 /* workaround 8/8/8/8 uint/sint cube gather bug */
2179 /* first detect it then change to a scaled read and f2i */
2180 tmp
= LLVMBuildExtractElement(ctx
->builder
, args
->resource
, ctx
->i32_1
, "");
2183 /* extract the DATA_FORMAT */
2184 tmp
= ac_build_bfe(ctx
, tmp
, LLVMConstInt(ctx
->i32
, 20, false),
2185 LLVMConstInt(ctx
->i32
, 6, false), false);
2187 /* is the DATA_FORMAT == 8_8_8_8 */
2188 compare_cube_wa
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, tmp
, LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false), "");
2190 if (stype
== GLSL_TYPE_UINT
)
2191 /* Create a NUM FORMAT - 0x2 or 0x4 - USCALED or UINT */
2192 tmp
= LLVMBuildSelect(ctx
->builder
, compare_cube_wa
, LLVMConstInt(ctx
->i32
, 0x8000000, false),
2193 LLVMConstInt(ctx
->i32
, 0x10000000, false), "");
2195 /* Create a NUM FORMAT - 0x3 or 0x5 - SSCALED or SINT */
2196 tmp
= LLVMBuildSelect(ctx
->builder
, compare_cube_wa
, LLVMConstInt(ctx
->i32
, 0xc000000, false),
2197 LLVMConstInt(ctx
->i32
, 0x14000000, false), "");
2199 /* replace the NUM FORMAT in the descriptor */
2200 tmp2
= LLVMBuildAnd(ctx
->builder
, tmp2
, LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT_GFX6
, false), "");
2201 tmp2
= LLVMBuildOr(ctx
->builder
, tmp2
, tmp
, "");
2203 args
->resource
= LLVMBuildInsertElement(ctx
->builder
, args
->resource
, tmp2
, ctx
->i32_1
, "");
2205 /* don't modify the coordinates for this case */
2206 coord
= LLVMBuildSelect(ctx
->builder
, compare_cube_wa
, orig_coords
, coord
, "");
2209 result
= ac_build_image_opcode(ctx
, args
);
2211 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
2212 LLVMValueRef tmp
, tmp2
;
2214 /* if the cube workaround is in place, f2i the result. */
2215 for (c
= 0; c
< 4; c
++) {
2216 tmp
= LLVMBuildExtractElement(ctx
->builder
, result
, LLVMConstInt(ctx
->i32
, c
, false), "");
2217 if (stype
== GLSL_TYPE_UINT
)
2218 tmp2
= LLVMBuildFPToUI(ctx
->builder
, tmp
, ctx
->i32
, "");
2220 tmp2
= LLVMBuildFPToSI(ctx
->builder
, tmp
, ctx
->i32
, "");
2221 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
2222 tmp2
= LLVMBuildBitCast(ctx
->builder
, tmp2
, ctx
->i32
, "");
2223 tmp
= LLVMBuildSelect(ctx
->builder
, compare_cube_wa
, tmp2
, tmp
, "");
2224 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
2225 result
= LLVMBuildInsertElement(ctx
->builder
, result
, tmp
, LLVMConstInt(ctx
->i32
, c
, false), "");
2231 static LLVMValueRef
build_tex_intrinsic(struct ac_nir_context
*ctx
,
2232 const nir_tex_instr
*instr
,
2234 struct ac_image_args
*args
)
2236 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
2237 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
2239 return ac_build_buffer_load_format(&ctx
->ac
,
2243 util_last_bit(mask
),
2247 args
->opcode
= ac_image_sample
;
2248 args
->compare
= instr
->is_shadow
;
2250 switch (instr
->op
) {
2252 case nir_texop_txf_ms
:
2253 case nir_texop_samples_identical
:
2254 args
->opcode
= lod_is_zero
||
2255 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
?
2256 ac_image_load
: ac_image_load_mip
;
2257 args
->compare
= false;
2258 args
->offset
= false;
2265 args
->level_zero
= true;
2270 case nir_texop_query_levels
:
2271 args
->opcode
= ac_image_get_resinfo
;
2274 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
2275 args
->level_zero
= true;
2281 args
->opcode
= ac_image_gather4
;
2282 args
->level_zero
= true;
2285 args
->opcode
= ac_image_get_lod
;
2286 args
->compare
= false;
2287 args
->offset
= false;
2293 if (instr
->op
== nir_texop_tg4
&& ctx
->ac
.chip_class
<= VI
) {
2294 enum glsl_base_type stype
= glsl_get_sampler_result_type(instr
->texture
->var
->type
);
2295 if (stype
== GLSL_TYPE_UINT
|| stype
== GLSL_TYPE_INT
) {
2296 return radv_lower_gather4_integer(&ctx
->ac
, args
, instr
);
2299 return ac_build_image_opcode(&ctx
->ac
, args
);
2303 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
2304 unsigned desc_set
, unsigned binding
)
2306 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
2307 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
2308 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
2309 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
2310 unsigned base_offset
= layout
->binding
[binding
].offset
;
2311 LLVMValueRef offset
, stride
;
2313 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
2314 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
2315 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
2316 layout
->binding
[binding
].dynamic_offset_offset
;
2317 desc_ptr
= ctx
->abi
.push_constants
;
2318 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
2319 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
2321 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
2323 offset
= LLVMConstInt(ctx
->ac
.i32
, base_offset
, false);
2324 index
= LLVMBuildMul(ctx
->ac
.builder
, index
, stride
, "");
2325 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, index
, "");
2327 desc_ptr
= ac_build_gep0(&ctx
->ac
, desc_ptr
, offset
);
2328 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
2329 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
2334 static LLVMValueRef
visit_vulkan_resource_reindex(struct ac_nir_context
*ctx
,
2335 nir_intrinsic_instr
*instr
)
2337 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
2338 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
2340 LLVMValueRef result
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
2341 LLVMSetMetadata(result
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
2345 static LLVMValueRef
visit_load_push_constant(struct ac_nir_context
*ctx
,
2346 nir_intrinsic_instr
*instr
)
2348 LLVMValueRef ptr
, addr
;
2350 addr
= LLVMConstInt(ctx
->ac
.i32
, nir_intrinsic_base(instr
), 0);
2351 addr
= LLVMBuildAdd(ctx
->ac
.builder
, addr
,
2352 get_src(ctx
, instr
->src
[0]), "");
2354 ptr
= ac_build_gep0(&ctx
->ac
, ctx
->abi
->push_constants
, addr
);
2355 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, get_def_type(ctx
, &instr
->dest
.ssa
));
2357 return LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
2360 static LLVMValueRef
visit_get_buffer_size(struct ac_nir_context
*ctx
,
2361 const nir_intrinsic_instr
*instr
)
2363 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
2365 return get_buffer_size(ctx
, ctx
->abi
->load_ssbo(ctx
->abi
, index
, false), false);
2368 static uint32_t widen_mask(uint32_t mask
, unsigned multiplier
)
2370 uint32_t new_mask
= 0;
2371 for(unsigned i
= 0; i
< 32 && (1u << i
) <= mask
; ++i
)
2372 if (mask
& (1u << i
))
2373 new_mask
|= ((1u << multiplier
) - 1u) << (i
* multiplier
);
2377 static LLVMValueRef
extract_vector_range(struct ac_llvm_context
*ctx
, LLVMValueRef src
,
2378 unsigned start
, unsigned count
)
2380 LLVMTypeRef type
= LLVMTypeOf(src
);
2382 if (LLVMGetTypeKind(type
) != LLVMVectorTypeKind
) {
2388 unsigned src_elements
= LLVMGetVectorSize(type
);
2389 assert(start
< src_elements
);
2390 assert(start
+ count
<= src_elements
);
2392 if (start
== 0 && count
== src_elements
)
2396 return LLVMBuildExtractElement(ctx
->builder
, src
, LLVMConstInt(ctx
->i32
, start
, false), "");
2399 LLVMValueRef indices
[8];
2400 for (unsigned i
= 0; i
< count
; ++i
)
2401 indices
[i
] = LLVMConstInt(ctx
->i32
, start
+ i
, false);
2403 LLVMValueRef swizzle
= LLVMConstVector(indices
, count
);
2404 return LLVMBuildShuffleVector(ctx
->builder
, src
, src
, swizzle
, "");
2407 static void visit_store_ssbo(struct ac_nir_context
*ctx
,
2408 nir_intrinsic_instr
*instr
)
2410 const char *store_name
;
2411 LLVMValueRef src_data
= get_src(ctx
, instr
->src
[0]);
2412 LLVMTypeRef data_type
= ctx
->ac
.f32
;
2413 int elem_size_mult
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src_data
)) / 32;
2414 int components_32bit
= elem_size_mult
* instr
->num_components
;
2415 unsigned writemask
= nir_intrinsic_write_mask(instr
);
2416 LLVMValueRef base_data
, base_offset
;
2417 LLVMValueRef params
[6];
2419 params
[1] = ctx
->abi
->load_ssbo(ctx
->abi
,
2420 get_src(ctx
, instr
->src
[1]), true);
2421 params
[2] = ctx
->ac
.i32_0
; /* vindex */
2422 params
[4] = ctx
->ac
.i1false
; /* glc */
2423 params
[5] = ctx
->ac
.i1false
; /* slc */
2425 if (components_32bit
> 1)
2426 data_type
= LLVMVectorType(ctx
->ac
.f32
, components_32bit
);
2428 writemask
= widen_mask(writemask
, elem_size_mult
);
2430 base_data
= ac_to_float(&ctx
->ac
, src_data
);
2431 base_data
= ac_trim_vector(&ctx
->ac
, base_data
, instr
->num_components
);
2432 base_data
= LLVMBuildBitCast(ctx
->ac
.builder
, base_data
,
2434 base_offset
= get_src(ctx
, instr
->src
[2]); /* voffset */
2438 LLVMValueRef offset
;
2440 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
2442 /* Due to an LLVM limitation, split 3-element writes
2443 * into a 2-element and a 1-element write. */
2445 writemask
|= 1 << (start
+ 2);
2450 writemask
|= ((1u << (count
- 4)) - 1u) << (start
+ 4);
2455 store_name
= "llvm.amdgcn.buffer.store.v4f32";
2456 } else if (count
== 2) {
2457 store_name
= "llvm.amdgcn.buffer.store.v2f32";
2461 store_name
= "llvm.amdgcn.buffer.store.f32";
2463 data
= extract_vector_range(&ctx
->ac
, base_data
, start
, count
);
2465 offset
= base_offset
;
2467 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, LLVMConstInt(ctx
->ac
.i32
, start
* 4, false), "");
2471 ac_build_intrinsic(&ctx
->ac
, store_name
,
2472 ctx
->ac
.voidt
, params
, 6, 0);
2476 static LLVMValueRef
visit_atomic_ssbo(struct ac_nir_context
*ctx
,
2477 const nir_intrinsic_instr
*instr
)
2480 LLVMValueRef params
[6];
2483 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
2484 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[3]), 0);
2486 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
2487 params
[arg_count
++] = ctx
->abi
->load_ssbo(ctx
->abi
,
2488 get_src(ctx
, instr
->src
[0]),
2490 params
[arg_count
++] = ctx
->ac
.i32_0
; /* vindex */
2491 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
2492 params
[arg_count
++] = LLVMConstInt(ctx
->ac
.i1
, 0, false); /* slc */
2494 switch (instr
->intrinsic
) {
2495 case nir_intrinsic_ssbo_atomic_add
:
2496 name
= "llvm.amdgcn.buffer.atomic.add";
2498 case nir_intrinsic_ssbo_atomic_imin
:
2499 name
= "llvm.amdgcn.buffer.atomic.smin";
2501 case nir_intrinsic_ssbo_atomic_umin
:
2502 name
= "llvm.amdgcn.buffer.atomic.umin";
2504 case nir_intrinsic_ssbo_atomic_imax
:
2505 name
= "llvm.amdgcn.buffer.atomic.smax";
2507 case nir_intrinsic_ssbo_atomic_umax
:
2508 name
= "llvm.amdgcn.buffer.atomic.umax";
2510 case nir_intrinsic_ssbo_atomic_and
:
2511 name
= "llvm.amdgcn.buffer.atomic.and";
2513 case nir_intrinsic_ssbo_atomic_or
:
2514 name
= "llvm.amdgcn.buffer.atomic.or";
2516 case nir_intrinsic_ssbo_atomic_xor
:
2517 name
= "llvm.amdgcn.buffer.atomic.xor";
2519 case nir_intrinsic_ssbo_atomic_exchange
:
2520 name
= "llvm.amdgcn.buffer.atomic.swap";
2522 case nir_intrinsic_ssbo_atomic_comp_swap
:
2523 name
= "llvm.amdgcn.buffer.atomic.cmpswap";
2529 return ac_build_intrinsic(&ctx
->ac
, name
, ctx
->ac
.i32
, params
, arg_count
, 0);
2532 static LLVMValueRef
visit_load_buffer(struct ac_nir_context
*ctx
,
2533 const nir_intrinsic_instr
*instr
)
2535 LLVMValueRef results
[2];
2536 int load_components
;
2537 int num_components
= instr
->num_components
;
2538 if (instr
->dest
.ssa
.bit_size
== 64)
2539 num_components
*= 2;
2541 for (int i
= 0; i
< num_components
; i
+= load_components
) {
2542 load_components
= MIN2(num_components
- i
, 4);
2543 const char *load_name
;
2544 LLVMTypeRef data_type
= ctx
->ac
.f32
;
2545 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, i
* 4, false);
2546 offset
= LLVMBuildAdd(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]), offset
, "");
2548 if (load_components
== 3)
2549 data_type
= LLVMVectorType(ctx
->ac
.f32
, 4);
2550 else if (load_components
> 1)
2551 data_type
= LLVMVectorType(ctx
->ac
.f32
, load_components
);
2553 if (load_components
>= 3)
2554 load_name
= "llvm.amdgcn.buffer.load.v4f32";
2555 else if (load_components
== 2)
2556 load_name
= "llvm.amdgcn.buffer.load.v2f32";
2557 else if (load_components
== 1)
2558 load_name
= "llvm.amdgcn.buffer.load.f32";
2560 unreachable("unhandled number of components");
2562 LLVMValueRef params
[] = {
2563 ctx
->abi
->load_ssbo(ctx
->abi
,
2564 get_src(ctx
, instr
->src
[0]),
2572 results
[i
> 0 ? 1 : 0] = ac_build_intrinsic(&ctx
->ac
, load_name
, data_type
, params
, 5, 0);
2576 LLVMValueRef ret
= results
[0];
2577 if (num_components
> 4 || num_components
== 3) {
2578 LLVMValueRef masks
[] = {
2579 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
2580 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
2581 LLVMConstInt(ctx
->ac
.i32
, 4, false), LLVMConstInt(ctx
->ac
.i32
, 5, false),
2582 LLVMConstInt(ctx
->ac
.i32
, 6, false), LLVMConstInt(ctx
->ac
.i32
, 7, false)
2585 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
2586 ret
= LLVMBuildShuffleVector(ctx
->ac
.builder
, results
[0],
2587 results
[num_components
> 4 ? 1 : 0], swizzle
, "");
2590 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
2591 get_def_type(ctx
, &instr
->dest
.ssa
), "");
2594 static LLVMValueRef
visit_load_ubo_buffer(struct ac_nir_context
*ctx
,
2595 const nir_intrinsic_instr
*instr
)
2598 LLVMValueRef rsrc
= get_src(ctx
, instr
->src
[0]);
2599 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
2600 int num_components
= instr
->num_components
;
2602 if (ctx
->abi
->load_ubo
)
2603 rsrc
= ctx
->abi
->load_ubo(ctx
->abi
, rsrc
);
2605 if (instr
->dest
.ssa
.bit_size
== 64)
2606 num_components
*= 2;
2608 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_components
, NULL
, offset
,
2609 NULL
, 0, false, false, true, true);
2610 ret
= ac_trim_vector(&ctx
->ac
, ret
, num_components
);
2611 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
2612 get_def_type(ctx
, &instr
->dest
.ssa
), "");
2616 get_deref_offset(struct ac_nir_context
*ctx
, nir_deref_var
*deref
,
2617 bool vs_in
, unsigned *vertex_index_out
,
2618 LLVMValueRef
*vertex_index_ref
,
2619 unsigned *const_out
, LLVMValueRef
*indir_out
)
2621 unsigned const_offset
= 0;
2622 nir_deref
*tail
= &deref
->deref
;
2623 LLVMValueRef offset
= NULL
;
2625 if (vertex_index_out
!= NULL
|| vertex_index_ref
!= NULL
) {
2627 nir_deref_array
*deref_array
= nir_deref_as_array(tail
);
2628 if (vertex_index_out
)
2629 *vertex_index_out
= deref_array
->base_offset
;
2631 if (vertex_index_ref
) {
2632 LLVMValueRef vtx
= LLVMConstInt(ctx
->ac
.i32
, deref_array
->base_offset
, false);
2633 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
2634 vtx
= LLVMBuildAdd(ctx
->ac
.builder
, vtx
, get_src(ctx
, deref_array
->indirect
), "");
2636 *vertex_index_ref
= vtx
;
2640 if (deref
->var
->data
.compact
) {
2641 assert(tail
->child
->deref_type
== nir_deref_type_array
);
2642 assert(glsl_type_is_scalar(glsl_without_array(deref
->var
->type
)));
2643 nir_deref_array
*deref_array
= nir_deref_as_array(tail
->child
);
2644 /* We always lower indirect dereferences for "compact" array vars. */
2645 assert(deref_array
->deref_array_type
== nir_deref_array_type_direct
);
2647 const_offset
= deref_array
->base_offset
;
2651 while (tail
->child
!= NULL
) {
2652 const struct glsl_type
*parent_type
= tail
->type
;
2655 if (tail
->deref_type
== nir_deref_type_array
) {
2656 nir_deref_array
*deref_array
= nir_deref_as_array(tail
);
2657 LLVMValueRef index
, stride
, local_offset
;
2658 unsigned size
= glsl_count_attribute_slots(tail
->type
, vs_in
);
2660 const_offset
+= size
* deref_array
->base_offset
;
2661 if (deref_array
->deref_array_type
== nir_deref_array_type_direct
)
2664 assert(deref_array
->deref_array_type
== nir_deref_array_type_indirect
);
2665 index
= get_src(ctx
, deref_array
->indirect
);
2666 stride
= LLVMConstInt(ctx
->ac
.i32
, size
, 0);
2667 local_offset
= LLVMBuildMul(ctx
->ac
.builder
, stride
, index
, "");
2670 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, local_offset
, "");
2672 offset
= local_offset
;
2673 } else if (tail
->deref_type
== nir_deref_type_struct
) {
2674 nir_deref_struct
*deref_struct
= nir_deref_as_struct(tail
);
2676 for (unsigned i
= 0; i
< deref_struct
->index
; i
++) {
2677 const struct glsl_type
*ft
= glsl_get_struct_field(parent_type
, i
);
2678 const_offset
+= glsl_count_attribute_slots(ft
, vs_in
);
2681 unreachable("unsupported deref type");
2685 if (const_offset
&& offset
)
2686 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
2687 LLVMConstInt(ctx
->ac
.i32
, const_offset
, 0),
2690 *const_out
= const_offset
;
2691 *indir_out
= offset
;
2695 /* The offchip buffer layout for TCS->TES is
2697 * - attribute 0 of patch 0 vertex 0
2698 * - attribute 0 of patch 0 vertex 1
2699 * - attribute 0 of patch 0 vertex 2
2701 * - attribute 0 of patch 1 vertex 0
2702 * - attribute 0 of patch 1 vertex 1
2704 * - attribute 1 of patch 0 vertex 0
2705 * - attribute 1 of patch 0 vertex 1
2707 * - per patch attribute 0 of patch 0
2708 * - per patch attribute 0 of patch 1
2711 * Note that every attribute has 4 components.
2713 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
2714 LLVMValueRef vertex_index
,
2715 LLVMValueRef param_index
)
2717 LLVMValueRef base_addr
, vertices_per_patch
, num_patches
;
2718 LLVMValueRef param_stride
, constant16
;
2719 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
2721 vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
, false);
2722 num_patches
= unpack_param(&ctx
->ac
, ctx
->tcs_offchip_layout
, 0, 9);
2724 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
2726 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
2727 vertices_per_patch
, "");
2729 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
2732 param_stride
= LLVMBuildMul(ctx
->ac
.builder
, vertices_per_patch
,
2735 base_addr
= rel_patch_id
;
2736 param_stride
= num_patches
;
2739 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
2740 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
2741 param_stride
, ""), "");
2743 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
2745 if (!vertex_index
) {
2746 LLVMValueRef patch_data_offset
=
2747 unpack_param(&ctx
->ac
, ctx
->tcs_offchip_layout
, 16, 16);
2749 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
2750 patch_data_offset
, "");
2755 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
2757 unsigned const_index
,
2759 LLVMValueRef vertex_index
,
2760 LLVMValueRef indir_index
)
2762 LLVMValueRef param_index
;
2765 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
2768 if (const_index
&& !is_compact
)
2769 param
+= const_index
;
2770 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
2772 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
2776 mark_tess_output(struct radv_shader_context
*ctx
,
2777 bool is_patch
, uint32_t param
)
2781 ctx
->tess_patch_outputs_written
|= (1ull << param
);
2783 ctx
->tess_outputs_written
|= (1ull << param
);
2787 get_dw_address(struct radv_shader_context
*ctx
,
2788 LLVMValueRef dw_addr
,
2790 unsigned const_index
,
2791 bool compact_const_index
,
2792 LLVMValueRef vertex_index
,
2793 LLVMValueRef stride
,
2794 LLVMValueRef indir_index
)
2799 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2800 LLVMBuildMul(ctx
->ac
.builder
,
2806 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2807 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
2808 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
2809 else if (const_index
&& !compact_const_index
)
2810 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2811 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
2813 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2814 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
2816 if (const_index
&& compact_const_index
)
2817 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2818 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
2823 load_tcs_varyings(struct ac_shader_abi
*abi
,
2825 LLVMValueRef vertex_index
,
2826 LLVMValueRef indir_index
,
2827 unsigned const_index
,
2829 unsigned driver_location
,
2831 unsigned num_components
,
2836 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
2837 LLVMValueRef dw_addr
, stride
;
2838 LLVMValueRef value
[4], result
;
2839 unsigned param
= shader_io_get_unique_index(location
);
2842 stride
= unpack_param(&ctx
->ac
, ctx
->tcs_in_layout
, 13, 8);
2843 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
2846 stride
= get_tcs_out_vertex_stride(ctx
);
2847 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
2849 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
2854 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
2857 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
2858 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
2859 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2862 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
2867 store_tcs_output(struct ac_shader_abi
*abi
,
2868 LLVMValueRef vertex_index
,
2869 LLVMValueRef param_index
,
2870 unsigned const_index
,
2872 unsigned driver_location
,
2879 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
2880 LLVMValueRef dw_addr
;
2881 LLVMValueRef stride
= NULL
;
2882 LLVMValueRef buf_addr
= NULL
;
2884 bool store_lds
= true;
2887 if (!(ctx
->tcs_patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
2890 if (!(ctx
->tcs_outputs_read
& (1ULL << location
)))
2894 param
= shader_io_get_unique_index(location
);
2895 if (location
== VARYING_SLOT_CLIP_DIST0
&&
2896 is_compact
&& const_index
> 3) {
2902 stride
= get_tcs_out_vertex_stride(ctx
);
2903 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
2905 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
2908 mark_tess_output(ctx
, is_patch
, param
);
2910 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
2912 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
2913 vertex_index
, param_index
);
2915 bool is_tess_factor
= false;
2916 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2917 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
2918 is_tess_factor
= true;
2920 unsigned base
= is_compact
? const_index
: 0;
2921 for (unsigned chan
= 0; chan
< 8; chan
++) {
2922 if (!(writemask
& (1 << chan
)))
2924 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
2926 if (store_lds
|| is_tess_factor
) {
2927 LLVMValueRef dw_addr_chan
=
2928 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2929 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
2930 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
2933 if (!is_tess_factor
&& writemask
!= 0xF)
2934 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
2935 buf_addr
, ctx
->oc_lds
,
2936 4 * (base
+ chan
), 1, 0, true, false);
2939 if (writemask
== 0xF) {
2940 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
2941 buf_addr
, ctx
->oc_lds
,
2942 (base
* 4), 1, 0, true, false);
2947 load_tes_input(struct ac_shader_abi
*abi
,
2949 LLVMValueRef vertex_index
,
2950 LLVMValueRef param_index
,
2951 unsigned const_index
,
2953 unsigned driver_location
,
2955 unsigned num_components
,
2960 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
2961 LLVMValueRef buf_addr
;
2962 LLVMValueRef result
;
2963 unsigned param
= shader_io_get_unique_index(location
);
2965 if (location
== VARYING_SLOT_CLIP_DIST0
&& is_compact
&& const_index
> 3) {
2970 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
2971 is_compact
, vertex_index
, param_index
);
2973 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
2974 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
2976 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
2977 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, 1, 0, true, false);
2978 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
2983 load_gs_input(struct ac_shader_abi
*abi
,
2985 unsigned driver_location
,
2987 unsigned num_components
,
2988 unsigned vertex_index
,
2989 unsigned const_index
,
2992 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
2993 LLVMValueRef vtx_offset
;
2994 unsigned param
, vtx_offset_param
;
2995 LLVMValueRef value
[4], result
;
2997 vtx_offset_param
= vertex_index
;
2998 assert(vtx_offset_param
< 6);
2999 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
3000 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
3002 param
= shader_io_get_unique_index(location
);
3004 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
3005 if (ctx
->ac
.chip_class
>= GFX9
) {
3006 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
3007 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
3008 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
3009 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
3011 LLVMValueRef soffset
=
3012 LLVMConstInt(ctx
->ac
.i32
,
3013 (param
* 4 + i
+ const_index
) * 256,
3016 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
3019 vtx_offset
, soffset
,
3020 0, 1, 0, true, false);
3022 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
],
3026 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
3027 result
= ac_to_integer(&ctx
->ac
, result
);
3032 build_gep_for_deref(struct ac_nir_context
*ctx
,
3033 nir_deref_var
*deref
)
3035 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->vars
, deref
->var
);
3036 assert(entry
->data
);
3037 LLVMValueRef val
= entry
->data
;
3038 nir_deref
*tail
= deref
->deref
.child
;
3039 while (tail
!= NULL
) {
3040 LLVMValueRef offset
;
3041 switch (tail
->deref_type
) {
3042 case nir_deref_type_array
: {
3043 nir_deref_array
*array
= nir_deref_as_array(tail
);
3044 offset
= LLVMConstInt(ctx
->ac
.i32
, array
->base_offset
, 0);
3045 if (array
->deref_array_type
==
3046 nir_deref_array_type_indirect
) {
3047 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
3054 case nir_deref_type_struct
: {
3055 nir_deref_struct
*deref_struct
=
3056 nir_deref_as_struct(tail
);
3057 offset
= LLVMConstInt(ctx
->ac
.i32
,
3058 deref_struct
->index
, 0);
3062 unreachable("bad deref type");
3064 val
= ac_build_gep0(&ctx
->ac
, val
, offset
);
3070 static LLVMValueRef
load_tess_varyings(struct ac_nir_context
*ctx
,
3071 nir_intrinsic_instr
*instr
,
3074 LLVMValueRef result
;
3075 LLVMValueRef vertex_index
= NULL
;
3076 LLVMValueRef indir_index
= NULL
;
3077 unsigned const_index
= 0;
3078 unsigned location
= instr
->variables
[0]->var
->data
.location
;
3079 unsigned driver_location
= instr
->variables
[0]->var
->data
.driver_location
;
3080 const bool is_patch
= instr
->variables
[0]->var
->data
.patch
;
3081 const bool is_compact
= instr
->variables
[0]->var
->data
.compact
;
3083 get_deref_offset(ctx
, instr
->variables
[0],
3084 false, NULL
, is_patch
? NULL
: &vertex_index
,
3085 &const_index
, &indir_index
);
3087 LLVMTypeRef dest_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
3089 LLVMTypeRef src_component_type
;
3090 if (LLVMGetTypeKind(dest_type
) == LLVMVectorTypeKind
)
3091 src_component_type
= LLVMGetElementType(dest_type
);
3093 src_component_type
= dest_type
;
3095 result
= ctx
->abi
->load_tess_varyings(ctx
->abi
, src_component_type
,
3096 vertex_index
, indir_index
,
3097 const_index
, location
, driver_location
,
3098 instr
->variables
[0]->var
->data
.location_frac
,
3099 instr
->num_components
,
3100 is_patch
, is_compact
, load_inputs
);
3101 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, dest_type
, "");
3104 static LLVMValueRef
visit_load_var(struct ac_nir_context
*ctx
,
3105 nir_intrinsic_instr
*instr
)
3107 LLVMValueRef values
[8];
3108 int idx
= instr
->variables
[0]->var
->data
.driver_location
;
3109 int ve
= instr
->dest
.ssa
.num_components
;
3110 unsigned comp
= instr
->variables
[0]->var
->data
.location_frac
;
3111 LLVMValueRef indir_index
;
3113 unsigned const_index
;
3114 unsigned stride
= instr
->variables
[0]->var
->data
.compact
? 1 : 4;
3115 bool vs_in
= ctx
->stage
== MESA_SHADER_VERTEX
&&
3116 instr
->variables
[0]->var
->data
.mode
== nir_var_shader_in
;
3117 get_deref_offset(ctx
, instr
->variables
[0], vs_in
, NULL
, NULL
,
3118 &const_index
, &indir_index
);
3120 if (instr
->dest
.ssa
.bit_size
== 64)
3123 switch (instr
->variables
[0]->var
->data
.mode
) {
3124 case nir_var_shader_in
:
3125 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
3126 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3127 return load_tess_varyings(ctx
, instr
, true);
3130 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3131 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
3132 LLVMValueRef indir_index
;
3133 unsigned const_index
, vertex_index
;
3134 get_deref_offset(ctx
, instr
->variables
[0],
3135 false, &vertex_index
, NULL
,
3136 &const_index
, &indir_index
);
3138 return ctx
->abi
->load_inputs(ctx
->abi
, instr
->variables
[0]->var
->data
.location
,
3139 instr
->variables
[0]->var
->data
.driver_location
,
3140 instr
->variables
[0]->var
->data
.location_frac
,
3141 instr
->num_components
, vertex_index
, const_index
, type
);
3144 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
3146 unsigned count
= glsl_count_attribute_slots(
3147 instr
->variables
[0]->var
->type
,
3148 ctx
->stage
== MESA_SHADER_VERTEX
);
3150 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
3151 &ctx
->ac
, ctx
->abi
->inputs
+ idx
+ chan
, count
,
3152 stride
, false, true);
3154 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
3158 values
[chan
] = ctx
->abi
->inputs
[idx
+ chan
+ const_index
* stride
];
3162 for (unsigned chan
= 0; chan
< ve
; chan
++) {
3164 unsigned count
= glsl_count_attribute_slots(
3165 instr
->variables
[0]->var
->type
, false);
3167 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
3168 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
3169 stride
, true, true);
3171 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
3175 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, ctx
->locals
[idx
+ chan
+ const_index
* stride
], "");
3179 case nir_var_shared
: {
3180 LLVMValueRef address
= build_gep_for_deref(ctx
,
3181 instr
->variables
[0]);
3182 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
3183 return LLVMBuildBitCast(ctx
->ac
.builder
, val
,
3184 get_def_type(ctx
, &instr
->dest
.ssa
),
3187 case nir_var_shader_out
:
3188 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3189 return load_tess_varyings(ctx
, instr
, false);
3192 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
3194 unsigned count
= glsl_count_attribute_slots(
3195 instr
->variables
[0]->var
->type
, false);
3197 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
3198 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
3199 stride
, true, true);
3201 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
3205 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
,
3206 ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
],
3212 unreachable("unhandle variable mode");
3214 ret
= ac_build_varying_gather_values(&ctx
->ac
, values
, ve
, comp
);
3215 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
3219 visit_store_var(struct ac_nir_context
*ctx
,
3220 nir_intrinsic_instr
*instr
)
3222 LLVMValueRef temp_ptr
, value
;
3223 int idx
= instr
->variables
[0]->var
->data
.driver_location
;
3224 unsigned comp
= instr
->variables
[0]->var
->data
.location_frac
;
3225 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3226 int writemask
= instr
->const_index
[0] << comp
;
3227 LLVMValueRef indir_index
;
3228 unsigned const_index
;
3229 get_deref_offset(ctx
, instr
->variables
[0], false,
3230 NULL
, NULL
, &const_index
, &indir_index
);
3232 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
)) == 64) {
3234 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
3235 LLVMVectorType(ctx
->ac
.f32
, ac_get_llvm_num_components(src
) * 2),
3238 writemask
= widen_mask(writemask
, 2);
3241 switch (instr
->variables
[0]->var
->data
.mode
) {
3242 case nir_var_shader_out
:
3244 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3245 LLVMValueRef vertex_index
= NULL
;
3246 LLVMValueRef indir_index
= NULL
;
3247 unsigned const_index
= 0;
3248 const unsigned location
= instr
->variables
[0]->var
->data
.location
;
3249 const unsigned driver_location
= instr
->variables
[0]->var
->data
.driver_location
;
3250 const unsigned comp
= instr
->variables
[0]->var
->data
.location_frac
;
3251 const bool is_patch
= instr
->variables
[0]->var
->data
.patch
;
3252 const bool is_compact
= instr
->variables
[0]->var
->data
.compact
;
3254 get_deref_offset(ctx
, instr
->variables
[0],
3255 false, NULL
, is_patch
? NULL
: &vertex_index
,
3256 &const_index
, &indir_index
);
3258 ctx
->abi
->store_tcs_outputs(ctx
->abi
, vertex_index
, indir_index
,
3259 const_index
, location
, driver_location
,
3260 src
, comp
, is_patch
, is_compact
, writemask
);
3264 for (unsigned chan
= 0; chan
< 8; chan
++) {
3266 if (!(writemask
& (1 << chan
)))
3269 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- comp
);
3271 if (instr
->variables
[0]->var
->data
.compact
)
3274 unsigned count
= glsl_count_attribute_slots(
3275 instr
->variables
[0]->var
->type
, false);
3277 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
3278 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
3279 stride
, true, true);
3281 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
3282 value
, indir_index
, "");
3283 build_store_values_extended(&ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
,
3284 count
, stride
, tmp_vec
);
3287 temp_ptr
= ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
];
3289 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
3294 for (unsigned chan
= 0; chan
< 8; chan
++) {
3295 if (!(writemask
& (1 << chan
)))
3298 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
3300 unsigned count
= glsl_count_attribute_slots(
3301 instr
->variables
[0]->var
->type
, false);
3303 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
3304 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
3307 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
3308 value
, indir_index
, "");
3309 build_store_values_extended(&ctx
->ac
, ctx
->locals
+ idx
+ chan
,
3312 temp_ptr
= ctx
->locals
[idx
+ chan
+ const_index
* 4];
3314 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
3318 case nir_var_shared
: {
3319 int writemask
= instr
->const_index
[0];
3320 LLVMValueRef address
= build_gep_for_deref(ctx
,
3321 instr
->variables
[0]);
3322 LLVMValueRef val
= get_src(ctx
, instr
->src
[0]);
3323 unsigned components
=
3324 glsl_get_vector_elements(
3325 nir_deref_tail(&instr
->variables
[0]->deref
)->type
);
3326 if (writemask
== (1 << components
) - 1) {
3327 val
= LLVMBuildBitCast(
3328 ctx
->ac
.builder
, val
,
3329 LLVMGetElementType(LLVMTypeOf(address
)), "");
3330 LLVMBuildStore(ctx
->ac
.builder
, val
, address
);
3332 for (unsigned chan
= 0; chan
< 4; chan
++) {
3333 if (!(writemask
& (1 << chan
)))
3336 LLVMBuildStructGEP(ctx
->ac
.builder
,
3338 LLVMValueRef src
= ac_llvm_extract_elem(&ctx
->ac
, val
,
3340 src
= LLVMBuildBitCast(
3341 ctx
->ac
.builder
, src
,
3342 LLVMGetElementType(LLVMTypeOf(ptr
)), "");
3343 LLVMBuildStore(ctx
->ac
.builder
, src
, ptr
);
3353 static int image_type_to_components_count(enum glsl_sampler_dim dim
, bool array
)
3356 case GLSL_SAMPLER_DIM_BUF
:
3358 case GLSL_SAMPLER_DIM_1D
:
3359 return array
? 2 : 1;
3360 case GLSL_SAMPLER_DIM_2D
:
3361 return array
? 3 : 2;
3362 case GLSL_SAMPLER_DIM_MS
:
3363 return array
? 4 : 3;
3364 case GLSL_SAMPLER_DIM_3D
:
3365 case GLSL_SAMPLER_DIM_CUBE
:
3367 case GLSL_SAMPLER_DIM_RECT
:
3368 case GLSL_SAMPLER_DIM_SUBPASS
:
3370 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
3379 glsl_is_array_image(const struct glsl_type
*type
)
3381 const enum glsl_sampler_dim dim
= glsl_get_sampler_dim(type
);
3383 if (glsl_sampler_type_is_array(type
))
3386 return dim
== GLSL_SAMPLER_DIM_CUBE
||
3387 dim
== GLSL_SAMPLER_DIM_3D
||
3388 dim
== GLSL_SAMPLER_DIM_SUBPASS
||
3389 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
;
3393 /* Adjust the sample index according to FMASK.
3395 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
3396 * which is the identity mapping. Each nibble says which physical sample
3397 * should be fetched to get that sample.
3399 * For example, 0x11111100 means there are only 2 samples stored and
3400 * the second sample covers 3/4 of the pixel. When reading samples 0
3401 * and 1, return physical sample 0 (determined by the first two 0s
3402 * in FMASK), otherwise return physical sample 1.
3404 * The sample index should be adjusted as follows:
3405 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
3407 static LLVMValueRef
adjust_sample_index_using_fmask(struct ac_llvm_context
*ctx
,
3408 LLVMValueRef coord_x
, LLVMValueRef coord_y
,
3409 LLVMValueRef coord_z
,
3410 LLVMValueRef sample_index
,
3411 LLVMValueRef fmask_desc_ptr
)
3413 LLVMValueRef fmask_load_address
[4];
3416 fmask_load_address
[0] = coord_x
;
3417 fmask_load_address
[1] = coord_y
;
3419 fmask_load_address
[2] = coord_z
;
3420 fmask_load_address
[3] = LLVMGetUndef(ctx
->i32
);
3423 struct ac_image_args args
= {0};
3425 args
.opcode
= ac_image_load
;
3426 args
.da
= coord_z
? true : false;
3427 args
.resource
= fmask_desc_ptr
;
3429 args
.addr
= ac_build_gather_values(ctx
, fmask_load_address
, coord_z
? 4 : 2);
3431 res
= ac_build_image_opcode(ctx
, &args
);
3433 res
= ac_to_integer(ctx
, res
);
3434 LLVMValueRef four
= LLVMConstInt(ctx
->i32
, 4, false);
3435 LLVMValueRef F
= LLVMConstInt(ctx
->i32
, 0xf, false);
3437 LLVMValueRef fmask
= LLVMBuildExtractElement(ctx
->builder
,
3441 LLVMValueRef sample_index4
=
3442 LLVMBuildMul(ctx
->builder
, sample_index
, four
, "");
3443 LLVMValueRef shifted_fmask
=
3444 LLVMBuildLShr(ctx
->builder
, fmask
, sample_index4
, "");
3445 LLVMValueRef final_sample
=
3446 LLVMBuildAnd(ctx
->builder
, shifted_fmask
, F
, "");
3448 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
3449 * resource descriptor is 0 (invalid),
3451 LLVMValueRef fmask_desc
=
3452 LLVMBuildBitCast(ctx
->builder
, fmask_desc_ptr
,
3455 LLVMValueRef fmask_word1
=
3456 LLVMBuildExtractElement(ctx
->builder
, fmask_desc
,
3459 LLVMValueRef word1_is_nonzero
=
3460 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
,
3461 fmask_word1
, ctx
->i32_0
, "");
3463 /* Replace the MSAA sample index. */
3465 LLVMBuildSelect(ctx
->builder
, word1_is_nonzero
,
3466 final_sample
, sample_index
, "");
3467 return sample_index
;
3470 static LLVMValueRef
get_image_coords(struct ac_nir_context
*ctx
,
3471 const nir_intrinsic_instr
*instr
)
3473 const struct glsl_type
*type
= glsl_without_array(instr
->variables
[0]->var
->type
);
3475 LLVMValueRef src0
= get_src(ctx
, instr
->src
[0]);
3476 LLVMValueRef coords
[4];
3477 LLVMValueRef masks
[] = {
3478 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
3479 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
3482 LLVMValueRef sample_index
= ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[1]), 0);
3485 enum glsl_sampler_dim dim
= glsl_get_sampler_dim(type
);
3486 bool is_array
= glsl_sampler_type_is_array(type
);
3487 bool add_frag_pos
= (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
3488 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
3489 bool is_ms
= (dim
== GLSL_SAMPLER_DIM_MS
||
3490 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
3491 bool gfx9_1d
= ctx
->ac
.chip_class
>= GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
;
3492 count
= image_type_to_components_count(dim
, is_array
);
3495 LLVMValueRef fmask_load_address
[3];
3498 fmask_load_address
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
3499 fmask_load_address
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[1], "");
3501 fmask_load_address
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[2], "");
3503 fmask_load_address
[2] = NULL
;
3505 for (chan
= 0; chan
< 2; ++chan
)
3506 fmask_load_address
[chan
] =
3507 LLVMBuildAdd(ctx
->ac
.builder
, fmask_load_address
[chan
],
3508 LLVMBuildFPToUI(ctx
->ac
.builder
, ctx
->abi
->frag_pos
[chan
],
3509 ctx
->ac
.i32
, ""), "");
3510 fmask_load_address
[2] = ac_to_integer(&ctx
->ac
, ctx
->abi
->inputs
[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)]);
3512 sample_index
= adjust_sample_index_using_fmask(&ctx
->ac
,
3513 fmask_load_address
[0],
3514 fmask_load_address
[1],
3515 fmask_load_address
[2],
3517 get_sampler_desc(ctx
, instr
->variables
[0], AC_DESC_FMASK
, NULL
, true, false));
3519 if (count
== 1 && !gfx9_1d
) {
3520 if (instr
->src
[0].ssa
->num_components
)
3521 res
= LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
3528 for (chan
= 0; chan
< count
; ++chan
) {
3529 coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src0
, chan
);
3532 for (chan
= 0; chan
< 2; ++chan
)
3533 coords
[chan
] = LLVMBuildAdd(ctx
->ac
.builder
, coords
[chan
], LLVMBuildFPToUI(ctx
->ac
.builder
, ctx
->abi
->frag_pos
[chan
],
3534 ctx
->ac
.i32
, ""), "");
3535 coords
[2] = ac_to_integer(&ctx
->ac
, ctx
->abi
->inputs
[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)]);
3541 coords
[2] = coords
[1];
3542 coords
[1] = ctx
->ac
.i32_0
;
3544 coords
[1] = ctx
->ac
.i32_0
;
3549 coords
[count
] = sample_index
;
3554 coords
[3] = LLVMGetUndef(ctx
->ac
.i32
);
3557 res
= ac_build_gather_values(&ctx
->ac
, coords
, count
);
3562 static LLVMValueRef
visit_image_load(struct ac_nir_context
*ctx
,
3563 const nir_intrinsic_instr
*instr
)
3565 LLVMValueRef params
[7];
3567 char intrinsic_name
[64];
3568 const nir_variable
*var
= instr
->variables
[0]->var
;
3569 const struct glsl_type
*type
= var
->type
;
3571 if(instr
->variables
[0]->deref
.child
)
3572 type
= instr
->variables
[0]->deref
.child
->type
;
3574 type
= glsl_without_array(type
);
3576 const enum glsl_sampler_dim dim
= glsl_get_sampler_dim(type
);
3577 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3578 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
3579 unsigned num_channels
= util_last_bit(mask
);
3580 LLVMValueRef rsrc
, vindex
;
3582 rsrc
= get_sampler_desc(ctx
, instr
->variables
[0], AC_DESC_BUFFER
, NULL
, true, false);
3583 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[0]),
3586 /* TODO: set "glc" and "can_speculate" when OpenGL needs it. */
3587 res
= ac_build_buffer_load_format(&ctx
->ac
, rsrc
, vindex
,
3588 ctx
->ac
.i32_0
, num_channels
,
3590 res
= ac_build_expand_to_vec4(&ctx
->ac
, res
, num_channels
);
3592 res
= ac_trim_vector(&ctx
->ac
, res
, instr
->dest
.ssa
.num_components
);
3593 res
= ac_to_integer(&ctx
->ac
, res
);
3595 LLVMValueRef da
= glsl_is_array_image(type
) ? ctx
->ac
.i1true
: ctx
->ac
.i1false
;
3596 LLVMValueRef slc
= ctx
->ac
.i1false
;
3598 params
[0] = get_image_coords(ctx
, instr
);
3599 params
[1] = get_sampler_desc(ctx
, instr
->variables
[0], AC_DESC_IMAGE
, NULL
, true, false);
3600 params
[2] = LLVMConstInt(ctx
->ac
.i32
, 15, false); /* dmask */
3601 params
[3] = (var
->data
.image
._volatile
|| var
->data
.image
.coherent
) ?
3602 ctx
->ac
.i1true
: ctx
->ac
.i1false
;
3604 params
[5] = ctx
->ac
.i1false
;
3607 ac_get_image_intr_name("llvm.amdgcn.image.load",
3608 ctx
->ac
.v4f32
, /* vdata */
3609 LLVMTypeOf(params
[0]), /* coords */
3610 LLVMTypeOf(params
[1]), /* rsrc */
3611 intrinsic_name
, sizeof(intrinsic_name
));
3613 res
= ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.v4f32
,
3614 params
, 7, AC_FUNC_ATTR_READONLY
);
3616 return ac_to_integer(&ctx
->ac
, res
);
3619 static void visit_image_store(struct ac_nir_context
*ctx
,
3620 nir_intrinsic_instr
*instr
)
3622 LLVMValueRef params
[8];
3623 char intrinsic_name
[64];
3624 const nir_variable
*var
= instr
->variables
[0]->var
;
3625 const struct glsl_type
*type
= glsl_without_array(var
->type
);
3626 const enum glsl_sampler_dim dim
= glsl_get_sampler_dim(type
);
3627 LLVMValueRef glc
= ctx
->ac
.i1false
;
3628 bool force_glc
= ctx
->ac
.chip_class
== SI
;
3630 glc
= ctx
->ac
.i1true
;
3632 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3633 params
[0] = ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[2])); /* data */
3634 params
[1] = get_sampler_desc(ctx
, instr
->variables
[0], AC_DESC_BUFFER
, NULL
, true, true);
3635 params
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[0]),
3636 ctx
->ac
.i32_0
, ""); /* vindex */
3637 params
[3] = ctx
->ac
.i32_0
; /* voffset */
3638 params
[4] = glc
; /* glc */
3639 params
[5] = ctx
->ac
.i1false
; /* slc */
3640 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.buffer.store.format.v4f32", ctx
->ac
.voidt
,
3643 LLVMValueRef da
= glsl_is_array_image(type
) ? ctx
->ac
.i1true
: ctx
->ac
.i1false
;
3644 LLVMValueRef slc
= ctx
->ac
.i1false
;
3646 params
[0] = ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[2]));
3647 params
[1] = get_image_coords(ctx
, instr
); /* coords */
3648 params
[2] = get_sampler_desc(ctx
, instr
->variables
[0], AC_DESC_IMAGE
, NULL
, true, true);
3649 params
[3] = LLVMConstInt(ctx
->ac
.i32
, 15, false); /* dmask */
3650 params
[4] = (force_glc
|| var
->data
.image
._volatile
|| var
->data
.image
.coherent
) ?
3651 ctx
->ac
.i1true
: ctx
->ac
.i1false
;
3653 params
[6] = ctx
->ac
.i1false
;
3656 ac_get_image_intr_name("llvm.amdgcn.image.store",
3657 LLVMTypeOf(params
[0]), /* vdata */
3658 LLVMTypeOf(params
[1]), /* coords */
3659 LLVMTypeOf(params
[2]), /* rsrc */
3660 intrinsic_name
, sizeof(intrinsic_name
));
3662 ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.voidt
,
3668 static LLVMValueRef
visit_image_atomic(struct ac_nir_context
*ctx
,
3669 const nir_intrinsic_instr
*instr
)
3671 LLVMValueRef params
[7];
3672 int param_count
= 0;
3673 const nir_variable
*var
= instr
->variables
[0]->var
;
3675 const char *atomic_name
;
3676 char intrinsic_name
[41];
3677 const struct glsl_type
*type
= glsl_without_array(var
->type
);
3678 MAYBE_UNUSED
int length
;
3680 bool is_unsigned
= glsl_get_sampler_result_type(type
) == GLSL_TYPE_UINT
;
3682 switch (instr
->intrinsic
) {
3683 case nir_intrinsic_image_atomic_add
:
3684 atomic_name
= "add";
3686 case nir_intrinsic_image_atomic_min
:
3687 atomic_name
= is_unsigned
? "umin" : "smin";
3689 case nir_intrinsic_image_atomic_max
:
3690 atomic_name
= is_unsigned
? "umax" : "smax";
3692 case nir_intrinsic_image_atomic_and
:
3693 atomic_name
= "and";
3695 case nir_intrinsic_image_atomic_or
:
3698 case nir_intrinsic_image_atomic_xor
:
3699 atomic_name
= "xor";
3701 case nir_intrinsic_image_atomic_exchange
:
3702 atomic_name
= "swap";
3704 case nir_intrinsic_image_atomic_comp_swap
:
3705 atomic_name
= "cmpswap";
3711 if (instr
->intrinsic
== nir_intrinsic_image_atomic_comp_swap
)
3712 params
[param_count
++] = get_src(ctx
, instr
->src
[3]);
3713 params
[param_count
++] = get_src(ctx
, instr
->src
[2]);
3715 if (glsl_get_sampler_dim(type
) == GLSL_SAMPLER_DIM_BUF
) {
3716 params
[param_count
++] = get_sampler_desc(ctx
, instr
->variables
[0], AC_DESC_BUFFER
,
3718 params
[param_count
++] = LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[0]),
3719 ctx
->ac
.i32_0
, ""); /* vindex */
3720 params
[param_count
++] = ctx
->ac
.i32_0
; /* voffset */
3721 params
[param_count
++] = ctx
->ac
.i1false
; /* slc */
3723 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3724 "llvm.amdgcn.buffer.atomic.%s", atomic_name
);
3726 char coords_type
[8];
3728 LLVMValueRef coords
= params
[param_count
++] = get_image_coords(ctx
, instr
);
3729 params
[param_count
++] = get_sampler_desc(ctx
, instr
->variables
[0], AC_DESC_IMAGE
,
3731 params
[param_count
++] = ctx
->ac
.i1false
; /* r128 */
3732 params
[param_count
++] = glsl_is_array_image(type
) ? ctx
->ac
.i1true
: ctx
->ac
.i1false
; /* da */
3733 params
[param_count
++] = ctx
->ac
.i1false
; /* slc */
3735 build_int_type_name(LLVMTypeOf(coords
),
3736 coords_type
, sizeof(coords_type
));
3738 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3739 "llvm.amdgcn.image.atomic.%s.%s", atomic_name
, coords_type
);
3742 assert(length
< sizeof(intrinsic_name
));
3743 return ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.i32
, params
, param_count
, 0);
3746 static LLVMValueRef
visit_image_samples(struct ac_nir_context
*ctx
,
3747 const nir_intrinsic_instr
*instr
)
3749 const nir_variable
*var
= instr
->variables
[0]->var
;
3750 const struct glsl_type
*type
= glsl_without_array(var
->type
);
3752 struct ac_image_args args
= { 0 };
3753 args
.da
= glsl_is_array_image(type
);
3755 args
.resource
= get_sampler_desc(ctx
, instr
->variables
[0],
3756 AC_DESC_IMAGE
, NULL
, true, false);
3757 args
.opcode
= ac_image_get_resinfo
;
3758 args
.addr
= ctx
->ac
.i32_0
;
3760 return ac_build_image_opcode(&ctx
->ac
, &args
);
3763 static LLVMValueRef
visit_image_size(struct ac_nir_context
*ctx
,
3764 const nir_intrinsic_instr
*instr
)
3767 const nir_variable
*var
= instr
->variables
[0]->var
;
3768 const struct glsl_type
*type
= glsl_without_array(var
->type
);
3770 if (glsl_get_sampler_dim(type
) == GLSL_SAMPLER_DIM_BUF
)
3771 return get_buffer_size(ctx
,
3772 get_sampler_desc(ctx
, instr
->variables
[0],
3773 AC_DESC_BUFFER
, NULL
, true, false), true);
3775 struct ac_image_args args
= { 0 };
3777 args
.da
= glsl_is_array_image(type
);
3779 args
.resource
= get_sampler_desc(ctx
, instr
->variables
[0], AC_DESC_IMAGE
, NULL
, true, false);
3780 args
.opcode
= ac_image_get_resinfo
;
3781 args
.addr
= ctx
->ac
.i32_0
;
3783 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
3785 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
3787 if (glsl_get_sampler_dim(type
) == GLSL_SAMPLER_DIM_CUBE
&&
3788 glsl_sampler_type_is_array(type
)) {
3789 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
3790 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3791 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
3792 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, z
, two
, "");
3794 if (ctx
->ac
.chip_class
>= GFX9
&&
3795 glsl_get_sampler_dim(type
) == GLSL_SAMPLER_DIM_1D
&&
3796 glsl_sampler_type_is_array(type
)) {
3797 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3798 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, layers
,
3805 #define NOOP_WAITCNT 0xf7f
3806 #define LGKM_CNT 0x07f
3807 #define VM_CNT 0xf70
3809 static void emit_membar(struct ac_llvm_context
*ac
,
3810 const nir_intrinsic_instr
*instr
)
3812 unsigned waitcnt
= NOOP_WAITCNT
;
3814 switch (instr
->intrinsic
) {
3815 case nir_intrinsic_memory_barrier
:
3816 case nir_intrinsic_group_memory_barrier
:
3817 waitcnt
&= VM_CNT
& LGKM_CNT
;
3819 case nir_intrinsic_memory_barrier_atomic_counter
:
3820 case nir_intrinsic_memory_barrier_buffer
:
3821 case nir_intrinsic_memory_barrier_image
:
3824 case nir_intrinsic_memory_barrier_shared
:
3825 waitcnt
&= LGKM_CNT
;
3830 if (waitcnt
!= NOOP_WAITCNT
)
3831 ac_build_waitcnt(ac
, waitcnt
);
3834 static void emit_barrier(struct ac_llvm_context
*ac
, gl_shader_stage stage
)
3836 /* SI only (thanks to a hw bug workaround):
3837 * The real barrier instruction isn’t needed, because an entire patch
3838 * always fits into a single wave.
3840 if (ac
->chip_class
== SI
&& stage
== MESA_SHADER_TESS_CTRL
) {
3841 ac_build_waitcnt(ac
, LGKM_CNT
& VM_CNT
);
3844 ac_build_intrinsic(ac
, "llvm.amdgcn.s.barrier",
3845 ac
->voidt
, NULL
, 0, AC_FUNC_ATTR_CONVERGENT
);
3848 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
3850 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
3851 ac_build_kill_if_false(&ctx
->ac
, visible
);
3854 static void emit_discard(struct ac_nir_context
*ctx
,
3855 const nir_intrinsic_instr
*instr
)
3859 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
3860 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3861 get_src(ctx
, instr
->src
[0]),
3864 assert(instr
->intrinsic
== nir_intrinsic_discard
);
3865 cond
= LLVMConstInt(ctx
->ac
.i1
, false, 0);
3868 ctx
->abi
->emit_kill(ctx
->abi
, cond
);
3872 visit_load_helper_invocation(struct ac_nir_context
*ctx
)
3874 LLVMValueRef result
= ac_build_intrinsic(&ctx
->ac
,
3875 "llvm.amdgcn.ps.live",
3876 ctx
->ac
.i1
, NULL
, 0,
3877 AC_FUNC_ATTR_READNONE
);
3878 result
= LLVMBuildNot(ctx
->ac
.builder
, result
, "");
3879 return LLVMBuildSExt(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
3883 visit_load_local_invocation_index(struct ac_nir_context
*ctx
)
3885 LLVMValueRef result
;
3886 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
->ac
);
3887 result
= LLVMBuildAnd(ctx
->ac
.builder
, ctx
->abi
->tg_size
,
3888 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3890 return LLVMBuildAdd(ctx
->ac
.builder
, result
, thread_id
, "");
3894 visit_load_subgroup_id(struct ac_nir_context
*ctx
)
3896 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3897 LLVMValueRef result
;
3898 result
= LLVMBuildAnd(ctx
->ac
.builder
, ctx
->abi
->tg_size
,
3899 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3900 return LLVMBuildLShr(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 6, false), "");
3902 return LLVMConstInt(ctx
->ac
.i32
, 0, false);
3907 visit_load_num_subgroups(struct ac_nir_context
*ctx
)
3909 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3910 return LLVMBuildAnd(ctx
->ac
.builder
, ctx
->abi
->tg_size
,
3911 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
3913 return LLVMConstInt(ctx
->ac
.i32
, 1, false);
3918 visit_first_invocation(struct ac_nir_context
*ctx
)
3920 LLVMValueRef active_set
= ac_build_ballot(&ctx
->ac
, ctx
->ac
.i32_1
);
3922 /* The second argument is whether cttz(0) should be defined, but we do not care. */
3923 LLVMValueRef args
[] = {active_set
, LLVMConstInt(ctx
->ac
.i1
, 0, false)};
3924 LLVMValueRef result
= ac_build_intrinsic(&ctx
->ac
,
3926 ctx
->ac
.i64
, args
, 2,
3927 AC_FUNC_ATTR_NOUNWIND
|
3928 AC_FUNC_ATTR_READNONE
);
3930 return LLVMBuildTrunc(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
3934 visit_load_shared(struct ac_nir_context
*ctx
,
3935 const nir_intrinsic_instr
*instr
)
3937 LLVMValueRef values
[4], derived_ptr
, index
, ret
;
3939 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0]);
3941 for (int chan
= 0; chan
< instr
->num_components
; chan
++) {
3942 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3943 derived_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
3944 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, derived_ptr
, "");
3947 ret
= ac_build_gather_values(&ctx
->ac
, values
, instr
->num_components
);
3948 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
3952 visit_store_shared(struct ac_nir_context
*ctx
,
3953 const nir_intrinsic_instr
*instr
)
3955 LLVMValueRef derived_ptr
, data
,index
;
3956 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3958 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[1]);
3959 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3961 int writemask
= nir_intrinsic_write_mask(instr
);
3962 for (int chan
= 0; chan
< 4; chan
++) {
3963 if (!(writemask
& (1 << chan
))) {
3966 data
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
3967 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3968 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
3969 LLVMBuildStore(builder
, data
, derived_ptr
);
3973 static LLVMValueRef
visit_var_atomic(struct ac_nir_context
*ctx
,
3974 const nir_intrinsic_instr
*instr
,
3975 LLVMValueRef ptr
, int src_idx
)
3977 LLVMValueRef result
;
3978 LLVMValueRef src
= get_src(ctx
, instr
->src
[src_idx
]);
3980 if (instr
->intrinsic
== nir_intrinsic_var_atomic_comp_swap
||
3981 instr
->intrinsic
== nir_intrinsic_shared_atomic_comp_swap
) {
3982 LLVMValueRef src1
= get_src(ctx
, instr
->src
[src_idx
+ 1]);
3983 result
= LLVMBuildAtomicCmpXchg(ctx
->ac
.builder
,
3985 LLVMAtomicOrderingSequentiallyConsistent
,
3986 LLVMAtomicOrderingSequentiallyConsistent
,
3989 LLVMAtomicRMWBinOp op
;
3990 switch (instr
->intrinsic
) {
3991 case nir_intrinsic_var_atomic_add
:
3992 case nir_intrinsic_shared_atomic_add
:
3993 op
= LLVMAtomicRMWBinOpAdd
;
3995 case nir_intrinsic_var_atomic_umin
:
3996 case nir_intrinsic_shared_atomic_umin
:
3997 op
= LLVMAtomicRMWBinOpUMin
;
3999 case nir_intrinsic_var_atomic_umax
:
4000 case nir_intrinsic_shared_atomic_umax
:
4001 op
= LLVMAtomicRMWBinOpUMax
;
4003 case nir_intrinsic_var_atomic_imin
:
4004 case nir_intrinsic_shared_atomic_imin
:
4005 op
= LLVMAtomicRMWBinOpMin
;
4007 case nir_intrinsic_var_atomic_imax
:
4008 case nir_intrinsic_shared_atomic_imax
:
4009 op
= LLVMAtomicRMWBinOpMax
;
4011 case nir_intrinsic_var_atomic_and
:
4012 case nir_intrinsic_shared_atomic_and
:
4013 op
= LLVMAtomicRMWBinOpAnd
;
4015 case nir_intrinsic_var_atomic_or
:
4016 case nir_intrinsic_shared_atomic_or
:
4017 op
= LLVMAtomicRMWBinOpOr
;
4019 case nir_intrinsic_var_atomic_xor
:
4020 case nir_intrinsic_shared_atomic_xor
:
4021 op
= LLVMAtomicRMWBinOpXor
;
4023 case nir_intrinsic_var_atomic_exchange
:
4024 case nir_intrinsic_shared_atomic_exchange
:
4025 op
= LLVMAtomicRMWBinOpXchg
;
4031 result
= LLVMBuildAtomicRMW(ctx
->ac
.builder
, op
, ptr
, ac_to_integer(&ctx
->ac
, src
),
4032 LLVMAtomicOrderingSequentiallyConsistent
,
4038 static LLVMValueRef
lookup_interp_param(struct ac_shader_abi
*abi
,
4039 enum glsl_interp_mode interp
, unsigned location
)
4041 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4044 case INTERP_MODE_FLAT
:
4047 case INTERP_MODE_SMOOTH
:
4048 case INTERP_MODE_NONE
:
4049 if (location
== INTERP_CENTER
)
4050 return ctx
->persp_center
;
4051 else if (location
== INTERP_CENTROID
)
4052 return ctx
->persp_centroid
;
4053 else if (location
== INTERP_SAMPLE
)
4054 return ctx
->persp_sample
;
4056 case INTERP_MODE_NOPERSPECTIVE
:
4057 if (location
== INTERP_CENTER
)
4058 return ctx
->linear_center
;
4059 else if (location
== INTERP_CENTROID
)
4060 return ctx
->linear_centroid
;
4061 else if (location
== INTERP_SAMPLE
)
4062 return ctx
->linear_sample
;
4068 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
4069 LLVMValueRef sample_id
)
4071 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4073 LLVMValueRef result
;
4074 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false));
4076 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4077 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
4079 sample_id
= LLVMBuildAdd(ctx
->ac
.builder
, sample_id
, ctx
->sample_pos_offset
, "");
4080 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
4085 static LLVMValueRef
load_sample_pos(struct ac_nir_context
*ctx
)
4087 LLVMValueRef values
[2];
4088 LLVMValueRef pos
[2];
4090 pos
[0] = ac_to_float(&ctx
->ac
, ctx
->abi
->frag_pos
[0]);
4091 pos
[1] = ac_to_float(&ctx
->ac
, ctx
->abi
->frag_pos
[1]);
4093 values
[0] = ac_build_fract(&ctx
->ac
, pos
[0], 32);
4094 values
[1] = ac_build_fract(&ctx
->ac
, pos
[1], 32);
4095 return ac_build_gather_values(&ctx
->ac
, values
, 2);
4098 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
4100 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4101 uint8_t log2_ps_iter_samples
= ctx
->shader_info
->info
.ps
.force_persample
?
4102 ctx
->options
->key
.fs
.log2_num_samples
:
4103 ctx
->options
->key
.fs
.log2_ps_iter_samples
;
4105 /* The bit pattern matches that used by fixed function fragment
4107 static const uint16_t ps_iter_masks
[] = {
4108 0xffff, /* not used */
4114 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
4116 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
4118 LLVMValueRef result
, sample_id
;
4119 sample_id
= unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
4120 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
4121 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
4125 static LLVMValueRef
visit_interp(struct ac_nir_context
*ctx
,
4126 const nir_intrinsic_instr
*instr
)
4128 LLVMValueRef result
[4];
4129 LLVMValueRef interp_param
, attr_number
;
4132 LLVMValueRef src_c0
= NULL
;
4133 LLVMValueRef src_c1
= NULL
;
4134 LLVMValueRef src0
= NULL
;
4135 int input_index
= instr
->variables
[0]->var
->data
.location
- VARYING_SLOT_VAR0
;
4136 switch (instr
->intrinsic
) {
4137 case nir_intrinsic_interp_var_at_centroid
:
4138 location
= INTERP_CENTROID
;
4140 case nir_intrinsic_interp_var_at_sample
:
4141 case nir_intrinsic_interp_var_at_offset
:
4142 location
= INTERP_CENTER
;
4143 src0
= get_src(ctx
, instr
->src
[0]);
4149 if (instr
->intrinsic
== nir_intrinsic_interp_var_at_offset
) {
4150 src_c0
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, ctx
->ac
.i32_0
, ""));
4151 src_c1
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, ctx
->ac
.i32_1
, ""));
4152 } else if (instr
->intrinsic
== nir_intrinsic_interp_var_at_sample
) {
4153 LLVMValueRef sample_position
;
4154 LLVMValueRef halfval
= LLVMConstReal(ctx
->ac
.f32
, 0.5f
);
4156 /* fetch sample ID */
4157 sample_position
= ctx
->abi
->load_sample_position(ctx
->abi
, src0
);
4159 src_c0
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_position
, ctx
->ac
.i32_0
, "");
4160 src_c0
= LLVMBuildFSub(ctx
->ac
.builder
, src_c0
, halfval
, "");
4161 src_c1
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_position
, ctx
->ac
.i32_1
, "");
4162 src_c1
= LLVMBuildFSub(ctx
->ac
.builder
, src_c1
, halfval
, "");
4164 interp_param
= ctx
->abi
->lookup_interp_param(ctx
->abi
, instr
->variables
[0]->var
->data
.interpolation
, location
);
4165 attr_number
= LLVMConstInt(ctx
->ac
.i32
, input_index
, false);
4167 if (location
== INTERP_CENTER
) {
4168 LLVMValueRef ij_out
[2];
4169 LLVMValueRef ddxy_out
= emit_ddxy_interp(ctx
, interp_param
);
4172 * take the I then J parameters, and the DDX/Y for it, and
4173 * calculate the IJ inputs for the interpolator.
4174 * temp1 = ddx * offset/sample.x + I;
4175 * interp_param.I = ddy * offset/sample.y + temp1;
4176 * temp1 = ddx * offset/sample.x + J;
4177 * interp_param.J = ddy * offset/sample.y + temp1;
4179 for (unsigned i
= 0; i
< 2; i
++) {
4180 LLVMValueRef ix_ll
= LLVMConstInt(ctx
->ac
.i32
, i
, false);
4181 LLVMValueRef iy_ll
= LLVMConstInt(ctx
->ac
.i32
, i
+ 2, false);
4182 LLVMValueRef ddx_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
4183 ddxy_out
, ix_ll
, "");
4184 LLVMValueRef ddy_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
4185 ddxy_out
, iy_ll
, "");
4186 LLVMValueRef interp_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
4187 interp_param
, ix_ll
, "");
4188 LLVMValueRef temp1
, temp2
;
4190 interp_el
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_el
,
4193 temp1
= LLVMBuildFMul(ctx
->ac
.builder
, ddx_el
, src_c0
, "");
4194 temp1
= LLVMBuildFAdd(ctx
->ac
.builder
, temp1
, interp_el
, "");
4196 temp2
= LLVMBuildFMul(ctx
->ac
.builder
, ddy_el
, src_c1
, "");
4197 temp2
= LLVMBuildFAdd(ctx
->ac
.builder
, temp2
, temp1
, "");
4199 ij_out
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
,
4200 temp2
, ctx
->ac
.i32
, "");
4202 interp_param
= ac_build_gather_values(&ctx
->ac
, ij_out
, 2);
4206 for (chan
= 0; chan
< 4; chan
++) {
4207 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
4210 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
,
4211 interp_param
, ctx
->ac
.v2f32
, "");
4212 LLVMValueRef i
= LLVMBuildExtractElement(
4213 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_0
, "");
4214 LLVMValueRef j
= LLVMBuildExtractElement(
4215 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_1
, "");
4217 result
[chan
] = ac_build_fs_interp(&ctx
->ac
,
4218 llvm_chan
, attr_number
,
4219 ctx
->abi
->prim_mask
, i
, j
);
4221 result
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
4222 LLVMConstInt(ctx
->ac
.i32
, 2, false),
4223 llvm_chan
, attr_number
,
4224 ctx
->abi
->prim_mask
);
4227 return ac_build_varying_gather_values(&ctx
->ac
, result
, instr
->num_components
,
4228 instr
->variables
[0]->var
->data
.location_frac
);
4232 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
4234 LLVMValueRef gs_next_vertex
;
4235 LLVMValueRef can_emit
;
4237 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4239 assert(stream
== 0);
4241 /* Write vertex attribute values to GSVS ring */
4242 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
4243 ctx
->gs_next_vertex
,
4246 /* If this thread has already emitted the declared maximum number of
4247 * vertices, kill it: excessive vertex emissions are not supposed to
4248 * have any effect, and GS threads have no externally observable
4249 * effects other than emitting vertices.
4251 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
4252 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
4253 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
4255 /* loop num outputs */
4257 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4258 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
4263 if (!(ctx
->output_mask
& (1ull << i
)))
4266 if (i
== VARYING_SLOT_CLIP_DIST0
) {
4267 /* pack clip and cull into a single set of slots */
4268 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
4272 for (unsigned j
= 0; j
< length
; j
++) {
4273 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
4275 LLVMValueRef voffset
= LLVMConstInt(ctx
->ac
.i32
, (slot
* 4 + j
) * ctx
->gs_max_out_vertices
, false);
4276 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
4277 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
4279 out_val
= LLVMBuildBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
4281 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->gsvs_ring
,
4283 voffset
, ctx
->gs2vs_offset
, 0,
4289 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
4291 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
);
4293 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (0 << 8), ctx
->gs_wave_id
);
4297 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
4299 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4300 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
4304 load_tess_coord(struct ac_shader_abi
*abi
)
4306 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4308 LLVMValueRef coord
[4] = {
4315 if (ctx
->tes_primitive_mode
== GL_TRIANGLES
)
4316 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
4317 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
4319 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
4323 load_patch_vertices_in(struct ac_shader_abi
*abi
)
4325 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4326 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
4329 static void visit_intrinsic(struct ac_nir_context
*ctx
,
4330 nir_intrinsic_instr
*instr
)
4332 LLVMValueRef result
= NULL
;
4334 switch (instr
->intrinsic
) {
4335 case nir_intrinsic_ballot
:
4336 result
= ac_build_ballot(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4338 case nir_intrinsic_read_invocation
:
4339 case nir_intrinsic_read_first_invocation
: {
4340 LLVMValueRef args
[2];
4343 args
[0] = get_src(ctx
, instr
->src
[0]);
4346 const char *intr_name
;
4347 if (instr
->intrinsic
== nir_intrinsic_read_invocation
) {
4349 intr_name
= "llvm.amdgcn.readlane";
4352 args
[1] = get_src(ctx
, instr
->src
[1]);
4355 intr_name
= "llvm.amdgcn.readfirstlane";
4358 /* We currently have no other way to prevent LLVM from lifting the icmp
4359 * calls to a dominating basic block.
4361 ac_build_optimization_barrier(&ctx
->ac
, &args
[0]);
4363 result
= ac_build_intrinsic(&ctx
->ac
, intr_name
,
4364 ctx
->ac
.i32
, args
, num_args
,
4365 AC_FUNC_ATTR_READNONE
|
4366 AC_FUNC_ATTR_CONVERGENT
);
4369 case nir_intrinsic_load_subgroup_invocation
:
4370 result
= ac_get_thread_id(&ctx
->ac
);
4372 case nir_intrinsic_load_work_group_id
: {
4373 LLVMValueRef values
[3];
4375 for (int i
= 0; i
< 3; i
++) {
4376 values
[i
] = ctx
->abi
->workgroup_ids
[i
] ?
4377 ctx
->abi
->workgroup_ids
[i
] : ctx
->ac
.i32_0
;
4380 result
= ac_build_gather_values(&ctx
->ac
, values
, 3);
4383 case nir_intrinsic_load_base_vertex
: {
4384 result
= ctx
->abi
->load_base_vertex(ctx
->abi
);
4387 case nir_intrinsic_load_local_group_size
:
4388 result
= ctx
->abi
->load_local_group_size(ctx
->abi
);
4390 case nir_intrinsic_load_vertex_id
:
4391 result
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
->vertex_id
,
4392 ctx
->abi
->base_vertex
, "");
4394 case nir_intrinsic_load_vertex_id_zero_base
: {
4395 result
= ctx
->abi
->vertex_id
;
4398 case nir_intrinsic_load_local_invocation_id
: {
4399 result
= ctx
->abi
->local_invocation_ids
;
4402 case nir_intrinsic_load_base_instance
:
4403 result
= ctx
->abi
->start_instance
;
4405 case nir_intrinsic_load_draw_id
:
4406 result
= ctx
->abi
->draw_id
;
4408 case nir_intrinsic_load_view_index
:
4409 result
= ctx
->abi
->view_index
;
4411 case nir_intrinsic_load_invocation_id
:
4412 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
4413 result
= unpack_param(&ctx
->ac
, ctx
->abi
->tcs_rel_ids
, 8, 5);
4415 result
= ctx
->abi
->gs_invocation_id
;
4417 case nir_intrinsic_load_primitive_id
:
4418 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
4419 result
= ctx
->abi
->gs_prim_id
;
4420 } else if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
4421 result
= ctx
->abi
->tcs_patch_id
;
4422 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
4423 result
= ctx
->abi
->tes_patch_id
;
4425 fprintf(stderr
, "Unknown primitive id intrinsic: %d", ctx
->stage
);
4427 case nir_intrinsic_load_sample_id
:
4428 result
= unpack_param(&ctx
->ac
, ctx
->abi
->ancillary
, 8, 4);
4430 case nir_intrinsic_load_sample_pos
:
4431 result
= load_sample_pos(ctx
);
4433 case nir_intrinsic_load_sample_mask_in
:
4434 result
= ctx
->abi
->load_sample_mask_in(ctx
->abi
);
4436 case nir_intrinsic_load_frag_coord
: {
4437 LLVMValueRef values
[4] = {
4438 ctx
->abi
->frag_pos
[0],
4439 ctx
->abi
->frag_pos
[1],
4440 ctx
->abi
->frag_pos
[2],
4441 ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
, ctx
->abi
->frag_pos
[3])
4443 result
= ac_build_gather_values(&ctx
->ac
, values
, 4);
4446 case nir_intrinsic_load_front_face
:
4447 result
= ctx
->abi
->front_face
;
4449 case nir_intrinsic_load_helper_invocation
:
4450 result
= visit_load_helper_invocation(ctx
);
4452 case nir_intrinsic_load_instance_id
:
4453 result
= ctx
->abi
->instance_id
;
4455 case nir_intrinsic_load_num_work_groups
:
4456 result
= ctx
->abi
->num_work_groups
;
4458 case nir_intrinsic_load_local_invocation_index
:
4459 result
= visit_load_local_invocation_index(ctx
);
4461 case nir_intrinsic_load_subgroup_id
:
4462 result
= visit_load_subgroup_id(ctx
);
4464 case nir_intrinsic_load_num_subgroups
:
4465 result
= visit_load_num_subgroups(ctx
);
4467 case nir_intrinsic_first_invocation
:
4468 result
= visit_first_invocation(ctx
);
4470 case nir_intrinsic_load_push_constant
:
4471 result
= visit_load_push_constant(ctx
, instr
);
4473 case nir_intrinsic_vulkan_resource_index
: {
4474 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
4475 unsigned desc_set
= nir_intrinsic_desc_set(instr
);
4476 unsigned binding
= nir_intrinsic_binding(instr
);
4478 result
= ctx
->abi
->load_resource(ctx
->abi
, index
, desc_set
,
4482 case nir_intrinsic_vulkan_resource_reindex
:
4483 result
= visit_vulkan_resource_reindex(ctx
, instr
);
4485 case nir_intrinsic_store_ssbo
:
4486 visit_store_ssbo(ctx
, instr
);
4488 case nir_intrinsic_load_ssbo
:
4489 result
= visit_load_buffer(ctx
, instr
);
4491 case nir_intrinsic_ssbo_atomic_add
:
4492 case nir_intrinsic_ssbo_atomic_imin
:
4493 case nir_intrinsic_ssbo_atomic_umin
:
4494 case nir_intrinsic_ssbo_atomic_imax
:
4495 case nir_intrinsic_ssbo_atomic_umax
:
4496 case nir_intrinsic_ssbo_atomic_and
:
4497 case nir_intrinsic_ssbo_atomic_or
:
4498 case nir_intrinsic_ssbo_atomic_xor
:
4499 case nir_intrinsic_ssbo_atomic_exchange
:
4500 case nir_intrinsic_ssbo_atomic_comp_swap
:
4501 result
= visit_atomic_ssbo(ctx
, instr
);
4503 case nir_intrinsic_load_ubo
:
4504 result
= visit_load_ubo_buffer(ctx
, instr
);
4506 case nir_intrinsic_get_buffer_size
:
4507 result
= visit_get_buffer_size(ctx
, instr
);
4509 case nir_intrinsic_load_var
:
4510 result
= visit_load_var(ctx
, instr
);
4512 case nir_intrinsic_store_var
:
4513 visit_store_var(ctx
, instr
);
4515 case nir_intrinsic_load_shared
:
4516 result
= visit_load_shared(ctx
, instr
);
4518 case nir_intrinsic_store_shared
:
4519 visit_store_shared(ctx
, instr
);
4521 case nir_intrinsic_image_samples
:
4522 result
= visit_image_samples(ctx
, instr
);
4524 case nir_intrinsic_image_load
:
4525 result
= visit_image_load(ctx
, instr
);
4527 case nir_intrinsic_image_store
:
4528 visit_image_store(ctx
, instr
);
4530 case nir_intrinsic_image_atomic_add
:
4531 case nir_intrinsic_image_atomic_min
:
4532 case nir_intrinsic_image_atomic_max
:
4533 case nir_intrinsic_image_atomic_and
:
4534 case nir_intrinsic_image_atomic_or
:
4535 case nir_intrinsic_image_atomic_xor
:
4536 case nir_intrinsic_image_atomic_exchange
:
4537 case nir_intrinsic_image_atomic_comp_swap
:
4538 result
= visit_image_atomic(ctx
, instr
);
4540 case nir_intrinsic_image_size
:
4541 result
= visit_image_size(ctx
, instr
);
4543 case nir_intrinsic_shader_clock
:
4544 result
= ac_build_shader_clock(&ctx
->ac
);
4546 case nir_intrinsic_discard
:
4547 case nir_intrinsic_discard_if
:
4548 emit_discard(ctx
, instr
);
4550 case nir_intrinsic_memory_barrier
:
4551 case nir_intrinsic_group_memory_barrier
:
4552 case nir_intrinsic_memory_barrier_atomic_counter
:
4553 case nir_intrinsic_memory_barrier_buffer
:
4554 case nir_intrinsic_memory_barrier_image
:
4555 case nir_intrinsic_memory_barrier_shared
:
4556 emit_membar(&ctx
->ac
, instr
);
4558 case nir_intrinsic_barrier
:
4559 emit_barrier(&ctx
->ac
, ctx
->stage
);
4561 case nir_intrinsic_shared_atomic_add
:
4562 case nir_intrinsic_shared_atomic_imin
:
4563 case nir_intrinsic_shared_atomic_umin
:
4564 case nir_intrinsic_shared_atomic_imax
:
4565 case nir_intrinsic_shared_atomic_umax
:
4566 case nir_intrinsic_shared_atomic_and
:
4567 case nir_intrinsic_shared_atomic_or
:
4568 case nir_intrinsic_shared_atomic_xor
:
4569 case nir_intrinsic_shared_atomic_exchange
:
4570 case nir_intrinsic_shared_atomic_comp_swap
: {
4571 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0]);
4572 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
4575 case nir_intrinsic_var_atomic_add
:
4576 case nir_intrinsic_var_atomic_imin
:
4577 case nir_intrinsic_var_atomic_umin
:
4578 case nir_intrinsic_var_atomic_imax
:
4579 case nir_intrinsic_var_atomic_umax
:
4580 case nir_intrinsic_var_atomic_and
:
4581 case nir_intrinsic_var_atomic_or
:
4582 case nir_intrinsic_var_atomic_xor
:
4583 case nir_intrinsic_var_atomic_exchange
:
4584 case nir_intrinsic_var_atomic_comp_swap
: {
4585 LLVMValueRef ptr
= build_gep_for_deref(ctx
, instr
->variables
[0]);
4586 result
= visit_var_atomic(ctx
, instr
, ptr
, 0);
4589 case nir_intrinsic_interp_var_at_centroid
:
4590 case nir_intrinsic_interp_var_at_sample
:
4591 case nir_intrinsic_interp_var_at_offset
:
4592 result
= visit_interp(ctx
, instr
);
4594 case nir_intrinsic_emit_vertex
:
4595 ctx
->abi
->emit_vertex(ctx
->abi
, nir_intrinsic_stream_id(instr
), ctx
->abi
->outputs
);
4597 case nir_intrinsic_end_primitive
:
4598 ctx
->abi
->emit_primitive(ctx
->abi
, nir_intrinsic_stream_id(instr
));
4600 case nir_intrinsic_load_tess_coord
:
4601 result
= ctx
->abi
->load_tess_coord(ctx
->abi
);
4603 case nir_intrinsic_load_tess_level_outer
:
4604 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
);
4606 case nir_intrinsic_load_tess_level_inner
:
4607 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
);
4609 case nir_intrinsic_load_patch_vertices_in
:
4610 result
= ctx
->abi
->load_patch_vertices_in(ctx
->abi
);
4612 case nir_intrinsic_vote_all
: {
4613 LLVMValueRef tmp
= ac_build_vote_all(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4614 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
4617 case nir_intrinsic_vote_any
: {
4618 LLVMValueRef tmp
= ac_build_vote_any(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4619 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
4623 fprintf(stderr
, "Unknown intrinsic: ");
4624 nir_print_instr(&instr
->instr
, stderr
);
4625 fprintf(stderr
, "\n");
4629 _mesa_hash_table_insert(ctx
->defs
, &instr
->dest
.ssa
, result
);
4633 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
4635 return abi
->base_vertex
;
4638 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
4639 LLVMValueRef buffer_ptr
, bool write
)
4641 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4642 LLVMValueRef result
;
4644 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
4646 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
4647 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
4652 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
4654 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4655 LLVMValueRef result
;
4657 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
4659 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
4660 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
4665 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
4666 unsigned descriptor_set
,
4667 unsigned base_index
,
4668 unsigned constant_index
,
4670 enum ac_descriptor_type desc_type
,
4671 bool image
, bool write
)
4673 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4674 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
4675 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
4676 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
4677 unsigned offset
= binding
->offset
;
4678 unsigned stride
= binding
->size
;
4680 LLVMBuilderRef builder
= ctx
->ac
.builder
;
4683 assert(base_index
< layout
->binding_count
);
4685 switch (desc_type
) {
4687 type
= ctx
->ac
.v8i32
;
4691 type
= ctx
->ac
.v8i32
;
4695 case AC_DESC_SAMPLER
:
4696 type
= ctx
->ac
.v4i32
;
4697 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
4702 case AC_DESC_BUFFER
:
4703 type
= ctx
->ac
.v4i32
;
4707 unreachable("invalid desc_type\n");
4710 offset
+= constant_index
* stride
;
4712 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
4713 (!index
|| binding
->immutable_samplers_equal
)) {
4714 if (binding
->immutable_samplers_equal
)
4717 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
4719 LLVMValueRef constants
[] = {
4720 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
4721 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
4722 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
4723 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
4725 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
4728 assert(stride
% type_size
== 0);
4731 index
= ctx
->ac
.i32_0
;
4733 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
4735 list
= ac_build_gep0(&ctx
->ac
, list
, LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
4736 list
= LLVMBuildPointerCast(builder
, list
, ac_array_in_const_addr_space(type
), "");
4738 return ac_build_load_to_sgpr(&ctx
->ac
, list
, index
);
4741 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
4742 const nir_deref_var
*deref
,
4743 enum ac_descriptor_type desc_type
,
4744 const nir_tex_instr
*tex_instr
,
4745 bool image
, bool write
)
4747 LLVMValueRef index
= NULL
;
4748 unsigned constant_index
= 0;
4749 unsigned descriptor_set
;
4750 unsigned base_index
;
4753 assert(tex_instr
&& !image
);
4755 base_index
= tex_instr
->sampler_index
;
4757 const nir_deref
*tail
= &deref
->deref
;
4758 while (tail
->child
) {
4759 const nir_deref_array
*child
= nir_deref_as_array(tail
->child
);
4760 unsigned array_size
= glsl_get_aoa_size(tail
->child
->type
);
4765 assert(child
->deref_array_type
!= nir_deref_array_type_wildcard
);
4767 if (child
->deref_array_type
== nir_deref_array_type_indirect
) {
4768 LLVMValueRef indirect
= get_src(ctx
, child
->indirect
);
4770 indirect
= LLVMBuildMul(ctx
->ac
.builder
, indirect
,
4771 LLVMConstInt(ctx
->ac
.i32
, array_size
, false), "");
4776 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
, indirect
, "");
4779 constant_index
+= child
->base_offset
* array_size
;
4781 tail
= &child
->deref
;
4783 descriptor_set
= deref
->var
->data
.descriptor_set
;
4784 base_index
= deref
->var
->data
.binding
;
4787 return ctx
->abi
->load_sampler_desc(ctx
->abi
,
4790 constant_index
, index
,
4791 desc_type
, image
, write
);
4794 static void set_tex_fetch_args(struct ac_llvm_context
*ctx
,
4795 struct ac_image_args
*args
,
4796 const nir_tex_instr
*instr
,
4798 LLVMValueRef res_ptr
, LLVMValueRef samp_ptr
,
4799 LLVMValueRef
*param
, unsigned count
,
4802 unsigned is_rect
= 0;
4803 bool da
= instr
->is_array
|| instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
;
4805 if (op
== nir_texop_lod
)
4807 /* Pad to power of two vector */
4808 while (count
< util_next_power_of_two(count
))
4809 param
[count
++] = LLVMGetUndef(ctx
->i32
);
4812 args
->addr
= ac_build_gather_values(ctx
, param
, count
);
4814 args
->addr
= param
[0];
4816 args
->resource
= res_ptr
;
4817 args
->sampler
= samp_ptr
;
4819 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
&& op
== nir_texop_txf
) {
4820 args
->addr
= param
[0];
4824 args
->dmask
= dmask
;
4825 args
->unorm
= is_rect
;
4829 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4832 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4833 * filtering manually. The driver sets img7 to a mask clearing
4834 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4835 * s_and_b32 samp0, samp0, img7
4838 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4840 static LLVMValueRef
sici_fix_sampler_aniso(struct ac_nir_context
*ctx
,
4841 LLVMValueRef res
, LLVMValueRef samp
)
4843 LLVMBuilderRef builder
= ctx
->ac
.builder
;
4844 LLVMValueRef img7
, samp0
;
4846 if (ctx
->ac
.chip_class
>= VI
)
4849 img7
= LLVMBuildExtractElement(builder
, res
,
4850 LLVMConstInt(ctx
->ac
.i32
, 7, 0), "");
4851 samp0
= LLVMBuildExtractElement(builder
, samp
,
4852 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4853 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
4854 return LLVMBuildInsertElement(builder
, samp
, samp0
,
4855 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4858 static void tex_fetch_ptrs(struct ac_nir_context
*ctx
,
4859 nir_tex_instr
*instr
,
4860 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
,
4861 LLVMValueRef
*fmask_ptr
)
4863 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
)
4864 *res_ptr
= get_sampler_desc(ctx
, instr
->texture
, AC_DESC_BUFFER
, instr
, false, false);
4866 *res_ptr
= get_sampler_desc(ctx
, instr
->texture
, AC_DESC_IMAGE
, instr
, false, false);
4869 *samp_ptr
= get_sampler_desc(ctx
, instr
->sampler
, AC_DESC_SAMPLER
, instr
, false, false);
4871 *samp_ptr
= get_sampler_desc(ctx
, instr
->texture
, AC_DESC_SAMPLER
, instr
, false, false);
4872 if (instr
->sampler_dim
< GLSL_SAMPLER_DIM_RECT
)
4873 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
4875 if (fmask_ptr
&& !instr
->sampler
&& (instr
->op
== nir_texop_txf_ms
||
4876 instr
->op
== nir_texop_samples_identical
))
4877 *fmask_ptr
= get_sampler_desc(ctx
, instr
->texture
, AC_DESC_FMASK
, instr
, false, false);
4880 static LLVMValueRef
apply_round_slice(struct ac_llvm_context
*ctx
,
4883 coord
= ac_to_float(ctx
, coord
);
4884 coord
= ac_build_intrinsic(ctx
, "llvm.rint.f32", ctx
->f32
, &coord
, 1, 0);
4885 coord
= ac_to_integer(ctx
, coord
);
4889 static void visit_tex(struct ac_nir_context
*ctx
, nir_tex_instr
*instr
)
4891 LLVMValueRef result
= NULL
;
4892 struct ac_image_args args
= { 0 };
4893 unsigned dmask
= 0xf;
4894 LLVMValueRef address
[16];
4895 LLVMValueRef coords
[5];
4896 LLVMValueRef coord
= NULL
, lod
= NULL
, comparator
= NULL
;
4897 LLVMValueRef bias
= NULL
, offsets
= NULL
;
4898 LLVMValueRef res_ptr
, samp_ptr
, fmask_ptr
= NULL
, sample_index
= NULL
;
4899 LLVMValueRef ddx
= NULL
, ddy
= NULL
;
4900 LLVMValueRef derivs
[6];
4901 unsigned chan
, count
= 0;
4902 unsigned const_src
= 0, num_deriv_comp
= 0;
4903 bool lod_is_zero
= false;
4905 tex_fetch_ptrs(ctx
, instr
, &res_ptr
, &samp_ptr
, &fmask_ptr
);
4907 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4908 switch (instr
->src
[i
].src_type
) {
4909 case nir_tex_src_coord
:
4910 coord
= get_src(ctx
, instr
->src
[i
].src
);
4912 case nir_tex_src_projector
:
4914 case nir_tex_src_comparator
:
4915 comparator
= get_src(ctx
, instr
->src
[i
].src
);
4917 case nir_tex_src_offset
:
4918 offsets
= get_src(ctx
, instr
->src
[i
].src
);
4921 case nir_tex_src_bias
:
4922 bias
= get_src(ctx
, instr
->src
[i
].src
);
4924 case nir_tex_src_lod
: {
4925 nir_const_value
*val
= nir_src_as_const_value(instr
->src
[i
].src
);
4927 if (val
&& val
->i32
[0] == 0)
4929 lod
= get_src(ctx
, instr
->src
[i
].src
);
4932 case nir_tex_src_ms_index
:
4933 sample_index
= get_src(ctx
, instr
->src
[i
].src
);
4935 case nir_tex_src_ms_mcs
:
4937 case nir_tex_src_ddx
:
4938 ddx
= get_src(ctx
, instr
->src
[i
].src
);
4939 num_deriv_comp
= instr
->src
[i
].src
.ssa
->num_components
;
4941 case nir_tex_src_ddy
:
4942 ddy
= get_src(ctx
, instr
->src
[i
].src
);
4944 case nir_tex_src_texture_offset
:
4945 case nir_tex_src_sampler_offset
:
4946 case nir_tex_src_plane
:
4952 if (instr
->op
== nir_texop_txs
&& instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
4953 result
= get_buffer_size(ctx
, res_ptr
, true);
4957 if (instr
->op
== nir_texop_texture_samples
) {
4958 LLVMValueRef res
, samples
, is_msaa
;
4959 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res_ptr
, ctx
->ac
.v8i32
, "");
4960 samples
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4961 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4962 is_msaa
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4963 LLVMConstInt(ctx
->ac
.i32
, 28, false), "");
4964 is_msaa
= LLVMBuildAnd(ctx
->ac
.builder
, is_msaa
,
4965 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4966 is_msaa
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, is_msaa
,
4967 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4969 samples
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4970 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
4971 samples
= LLVMBuildAnd(ctx
->ac
.builder
, samples
,
4972 LLVMConstInt(ctx
->ac
.i32
, 0xf, false), "");
4973 samples
= LLVMBuildShl(ctx
->ac
.builder
, ctx
->ac
.i32_1
,
4975 samples
= LLVMBuildSelect(ctx
->ac
.builder
, is_msaa
, samples
,
4982 for (chan
= 0; chan
< instr
->coord_components
; chan
++)
4983 coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, coord
, chan
);
4985 if (offsets
&& instr
->op
!= nir_texop_txf
) {
4986 LLVMValueRef offset
[3], pack
;
4987 for (chan
= 0; chan
< 3; ++chan
)
4988 offset
[chan
] = ctx
->ac
.i32_0
;
4991 for (chan
= 0; chan
< ac_get_llvm_num_components(offsets
); chan
++) {
4992 offset
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, offsets
, chan
);
4993 offset
[chan
] = LLVMBuildAnd(ctx
->ac
.builder
, offset
[chan
],
4994 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
4996 offset
[chan
] = LLVMBuildShl(ctx
->ac
.builder
, offset
[chan
],
4997 LLVMConstInt(ctx
->ac
.i32
, chan
* 8, false), "");
4999 pack
= LLVMBuildOr(ctx
->ac
.builder
, offset
[0], offset
[1], "");
5000 pack
= LLVMBuildOr(ctx
->ac
.builder
, pack
, offset
[2], "");
5001 address
[count
++] = pack
;
5004 /* pack LOD bias value */
5005 if (instr
->op
== nir_texop_txb
&& bias
) {
5006 address
[count
++] = bias
;
5009 /* Pack depth comparison value */
5010 if (instr
->is_shadow
&& comparator
) {
5011 LLVMValueRef z
= ac_to_float(&ctx
->ac
,
5012 ac_llvm_extract_elem(&ctx
->ac
, comparator
, 0));
5014 /* TC-compatible HTILE on radeonsi promotes Z16 and Z24 to Z32_FLOAT,
5015 * so the depth comparison value isn't clamped for Z16 and
5016 * Z24 anymore. Do it manually here.
5018 * It's unnecessary if the original texture format was
5019 * Z32_FLOAT, but we don't know that here.
5021 if (ctx
->ac
.chip_class
== VI
&& ctx
->abi
->clamp_shadow_reference
)
5022 z
= ac_build_clamp(&ctx
->ac
, z
);
5024 address
[count
++] = z
;
5027 /* pack derivatives */
5029 int num_src_deriv_channels
, num_dest_deriv_channels
;
5030 switch (instr
->sampler_dim
) {
5031 case GLSL_SAMPLER_DIM_3D
:
5032 case GLSL_SAMPLER_DIM_CUBE
:
5034 num_src_deriv_channels
= 3;
5035 num_dest_deriv_channels
= 3;
5037 case GLSL_SAMPLER_DIM_2D
:
5039 num_src_deriv_channels
= 2;
5040 num_dest_deriv_channels
= 2;
5043 case GLSL_SAMPLER_DIM_1D
:
5044 num_src_deriv_channels
= 1;
5045 if (ctx
->ac
.chip_class
>= GFX9
) {
5046 num_dest_deriv_channels
= 2;
5049 num_dest_deriv_channels
= 1;
5055 for (unsigned i
= 0; i
< num_src_deriv_channels
; i
++) {
5056 derivs
[i
] = ac_to_float(&ctx
->ac
, ac_llvm_extract_elem(&ctx
->ac
, ddx
, i
));
5057 derivs
[num_dest_deriv_channels
+ i
] = ac_to_float(&ctx
->ac
, ac_llvm_extract_elem(&ctx
->ac
, ddy
, i
));
5059 for (unsigned i
= num_src_deriv_channels
; i
< num_dest_deriv_channels
; i
++) {
5060 derivs
[i
] = ctx
->ac
.f32_0
;
5061 derivs
[num_dest_deriv_channels
+ i
] = ctx
->ac
.f32_0
;
5065 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&& coord
) {
5066 for (chan
= 0; chan
< instr
->coord_components
; chan
++)
5067 coords
[chan
] = ac_to_float(&ctx
->ac
, coords
[chan
]);
5068 if (instr
->coord_components
== 3)
5069 coords
[3] = LLVMGetUndef(ctx
->ac
.f32
);
5070 ac_prepare_cube_coords(&ctx
->ac
,
5071 instr
->op
== nir_texop_txd
, instr
->is_array
,
5072 instr
->op
== nir_texop_lod
, coords
, derivs
);
5078 for (unsigned i
= 0; i
< num_deriv_comp
* 2; i
++)
5079 address
[count
++] = derivs
[i
];
5082 /* Pack texture coordinates */
5084 address
[count
++] = coords
[0];
5085 if (instr
->coord_components
> 1) {
5086 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&& instr
->is_array
&& instr
->op
!= nir_texop_txf
) {
5087 coords
[1] = apply_round_slice(&ctx
->ac
, coords
[1]);
5089 address
[count
++] = coords
[1];
5091 if (instr
->coord_components
> 2) {
5092 if ((instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
5093 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
||
5094 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS
||
5095 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) &&
5097 instr
->op
!= nir_texop_txf
&& instr
->op
!= nir_texop_txf_ms
) {
5098 coords
[2] = apply_round_slice(&ctx
->ac
, coords
[2]);
5100 address
[count
++] = coords
[2];
5103 if (ctx
->ac
.chip_class
>= GFX9
) {
5104 LLVMValueRef filler
;
5105 if (instr
->op
== nir_texop_txf
)
5106 filler
= ctx
->ac
.i32_0
;
5108 filler
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
5110 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
) {
5111 /* No nir_texop_lod, because it does not take a slice
5112 * even with array textures. */
5113 if (instr
->is_array
&& instr
->op
!= nir_texop_lod
) {
5114 address
[count
] = address
[count
- 1];
5115 address
[count
- 1] = filler
;
5118 address
[count
++] = filler
;
5124 if (lod
&& ((instr
->op
== nir_texop_txl
|| instr
->op
== nir_texop_txf
) && !lod_is_zero
)) {
5125 address
[count
++] = lod
;
5126 } else if (instr
->op
== nir_texop_txf_ms
&& sample_index
) {
5127 address
[count
++] = sample_index
;
5128 } else if(instr
->op
== nir_texop_txs
) {
5131 address
[count
++] = lod
;
5133 address
[count
++] = ctx
->ac
.i32_0
;
5136 for (chan
= 0; chan
< count
; chan
++) {
5137 address
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
,
5138 address
[chan
], ctx
->ac
.i32
, "");
5141 if (instr
->op
== nir_texop_samples_identical
) {
5142 LLVMValueRef txf_address
[4];
5143 struct ac_image_args txf_args
= { 0 };
5144 unsigned txf_count
= count
;
5145 memcpy(txf_address
, address
, sizeof(txf_address
));
5147 if (!instr
->is_array
)
5148 txf_address
[2] = ctx
->ac
.i32_0
;
5149 txf_address
[3] = ctx
->ac
.i32_0
;
5151 set_tex_fetch_args(&ctx
->ac
, &txf_args
, instr
, nir_texop_txf
,
5153 txf_address
, txf_count
, 0xf);
5155 result
= build_tex_intrinsic(ctx
, instr
, false, &txf_args
);
5157 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
5158 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, result
, ctx
->ac
.i32_0
);
5162 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
&&
5163 instr
->op
!= nir_texop_txs
) {
5164 unsigned sample_chan
= instr
->is_array
? 3 : 2;
5165 address
[sample_chan
] = adjust_sample_index_using_fmask(&ctx
->ac
,
5168 instr
->is_array
? address
[2] : NULL
,
5169 address
[sample_chan
],
5173 if (offsets
&& instr
->op
== nir_texop_txf
) {
5174 nir_const_value
*const_offset
=
5175 nir_src_as_const_value(instr
->src
[const_src
].src
);
5176 int num_offsets
= instr
->src
[const_src
].src
.ssa
->num_components
;
5177 assert(const_offset
);
5178 num_offsets
= MIN2(num_offsets
, instr
->coord_components
);
5179 if (num_offsets
> 2)
5180 address
[2] = LLVMBuildAdd(ctx
->ac
.builder
,
5181 address
[2], LLVMConstInt(ctx
->ac
.i32
, const_offset
->i32
[2], false), "");
5182 if (num_offsets
> 1)
5183 address
[1] = LLVMBuildAdd(ctx
->ac
.builder
,
5184 address
[1], LLVMConstInt(ctx
->ac
.i32
, const_offset
->i32
[1], false), "");
5185 address
[0] = LLVMBuildAdd(ctx
->ac
.builder
,
5186 address
[0], LLVMConstInt(ctx
->ac
.i32
, const_offset
->i32
[0], false), "");
5190 /* TODO TG4 support */
5191 if (instr
->op
== nir_texop_tg4
) {
5192 if (instr
->is_shadow
)
5195 dmask
= 1 << instr
->component
;
5197 set_tex_fetch_args(&ctx
->ac
, &args
, instr
, instr
->op
,
5198 res_ptr
, samp_ptr
, address
, count
, dmask
);
5200 result
= build_tex_intrinsic(ctx
, instr
, lod_is_zero
, &args
);
5202 if (instr
->op
== nir_texop_query_levels
)
5203 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
5204 else if (instr
->is_shadow
&& instr
->is_new_style_shadow
&&
5205 instr
->op
!= nir_texop_txs
&& instr
->op
!= nir_texop_lod
&&
5206 instr
->op
!= nir_texop_tg4
)
5207 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
5208 else if (instr
->op
== nir_texop_txs
&&
5209 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
5211 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
5212 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
5213 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
5214 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
5215 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, z
, two
, "");
5216 } else if (ctx
->ac
.chip_class
>= GFX9
&&
5217 instr
->op
== nir_texop_txs
&&
5218 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
5220 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
5221 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
5222 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, layers
,
5224 } else if (instr
->dest
.ssa
.num_components
!= 4)
5225 result
= ac_trim_vector(&ctx
->ac
, result
, instr
->dest
.ssa
.num_components
);
5229 assert(instr
->dest
.is_ssa
);
5230 result
= ac_to_integer(&ctx
->ac
, result
);
5231 _mesa_hash_table_insert(ctx
->defs
, &instr
->dest
.ssa
, result
);
5236 static void visit_phi(struct ac_nir_context
*ctx
, nir_phi_instr
*instr
)
5238 LLVMTypeRef type
= get_def_type(ctx
, &instr
->dest
.ssa
);
5239 LLVMValueRef result
= LLVMBuildPhi(ctx
->ac
.builder
, type
, "");
5241 _mesa_hash_table_insert(ctx
->defs
, &instr
->dest
.ssa
, result
);
5242 _mesa_hash_table_insert(ctx
->phis
, instr
, result
);
5245 static void visit_post_phi(struct ac_nir_context
*ctx
,
5246 nir_phi_instr
*instr
,
5247 LLVMValueRef llvm_phi
)
5249 nir_foreach_phi_src(src
, instr
) {
5250 LLVMBasicBlockRef block
= get_block(ctx
, src
->pred
);
5251 LLVMValueRef llvm_src
= get_src(ctx
, src
->src
);
5253 LLVMAddIncoming(llvm_phi
, &llvm_src
, &block
, 1);
5257 static void phi_post_pass(struct ac_nir_context
*ctx
)
5259 struct hash_entry
*entry
;
5260 hash_table_foreach(ctx
->phis
, entry
) {
5261 visit_post_phi(ctx
, (nir_phi_instr
*)entry
->key
,
5262 (LLVMValueRef
)entry
->data
);
5267 static void visit_ssa_undef(struct ac_nir_context
*ctx
,
5268 const nir_ssa_undef_instr
*instr
)
5270 unsigned num_components
= instr
->def
.num_components
;
5271 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
5274 if (num_components
== 1)
5275 undef
= LLVMGetUndef(type
);
5277 undef
= LLVMGetUndef(LLVMVectorType(type
, num_components
));
5279 _mesa_hash_table_insert(ctx
->defs
, &instr
->def
, undef
);
5282 static void visit_jump(struct ac_llvm_context
*ctx
,
5283 const nir_jump_instr
*instr
)
5285 switch (instr
->type
) {
5286 case nir_jump_break
:
5287 ac_build_break(ctx
);
5289 case nir_jump_continue
:
5290 ac_build_continue(ctx
);
5293 fprintf(stderr
, "Unknown NIR jump instr: ");
5294 nir_print_instr(&instr
->instr
, stderr
);
5295 fprintf(stderr
, "\n");
5300 static void visit_cf_list(struct ac_nir_context
*ctx
,
5301 struct exec_list
*list
);
5303 static void visit_block(struct ac_nir_context
*ctx
, nir_block
*block
)
5305 LLVMBasicBlockRef llvm_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
5306 nir_foreach_instr(instr
, block
)
5308 switch (instr
->type
) {
5309 case nir_instr_type_alu
:
5310 visit_alu(ctx
, nir_instr_as_alu(instr
));
5312 case nir_instr_type_load_const
:
5313 visit_load_const(ctx
, nir_instr_as_load_const(instr
));
5315 case nir_instr_type_intrinsic
:
5316 visit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
5318 case nir_instr_type_tex
:
5319 visit_tex(ctx
, nir_instr_as_tex(instr
));
5321 case nir_instr_type_phi
:
5322 visit_phi(ctx
, nir_instr_as_phi(instr
));
5324 case nir_instr_type_ssa_undef
:
5325 visit_ssa_undef(ctx
, nir_instr_as_ssa_undef(instr
));
5327 case nir_instr_type_jump
:
5328 visit_jump(&ctx
->ac
, nir_instr_as_jump(instr
));
5331 fprintf(stderr
, "Unknown NIR instr type: ");
5332 nir_print_instr(instr
, stderr
);
5333 fprintf(stderr
, "\n");
5338 _mesa_hash_table_insert(ctx
->defs
, block
, llvm_block
);
5341 static void visit_if(struct ac_nir_context
*ctx
, nir_if
*if_stmt
)
5343 LLVMValueRef value
= get_src(ctx
, if_stmt
->condition
);
5345 nir_block
*then_block
=
5346 (nir_block
*) exec_list_get_head(&if_stmt
->then_list
);
5348 ac_build_uif(&ctx
->ac
, value
, then_block
->index
);
5350 visit_cf_list(ctx
, &if_stmt
->then_list
);
5352 if (!exec_list_is_empty(&if_stmt
->else_list
)) {
5353 nir_block
*else_block
=
5354 (nir_block
*) exec_list_get_head(&if_stmt
->else_list
);
5356 ac_build_else(&ctx
->ac
, else_block
->index
);
5357 visit_cf_list(ctx
, &if_stmt
->else_list
);
5360 ac_build_endif(&ctx
->ac
, then_block
->index
);
5363 static void visit_loop(struct ac_nir_context
*ctx
, nir_loop
*loop
)
5365 nir_block
*first_loop_block
=
5366 (nir_block
*) exec_list_get_head(&loop
->body
);
5368 ac_build_bgnloop(&ctx
->ac
, first_loop_block
->index
);
5370 visit_cf_list(ctx
, &loop
->body
);
5372 ac_build_endloop(&ctx
->ac
, first_loop_block
->index
);
5375 static void visit_cf_list(struct ac_nir_context
*ctx
,
5376 struct exec_list
*list
)
5378 foreach_list_typed(nir_cf_node
, node
, node
, list
)
5380 switch (node
->type
) {
5381 case nir_cf_node_block
:
5382 visit_block(ctx
, nir_cf_node_as_block(node
));
5385 case nir_cf_node_if
:
5386 visit_if(ctx
, nir_cf_node_as_if(node
));
5389 case nir_cf_node_loop
:
5390 visit_loop(ctx
, nir_cf_node_as_loop(node
));
5400 handle_vs_input_decl(struct radv_shader_context
*ctx
,
5401 struct nir_variable
*variable
)
5403 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
5404 LLVMValueRef t_offset
;
5405 LLVMValueRef t_list
;
5407 LLVMValueRef buffer_index
;
5408 int index
= variable
->data
.location
- VERT_ATTRIB_GENERIC0
;
5409 int idx
= variable
->data
.location
;
5410 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
5411 uint8_t input_usage_mask
=
5412 ctx
->shader_info
->info
.vs
.input_usage_mask
[variable
->data
.location
];
5413 unsigned num_channels
= util_last_bit(input_usage_mask
);
5415 variable
->data
.driver_location
= idx
* 4;
5417 for (unsigned i
= 0; i
< attrib_count
; ++i
, ++idx
) {
5418 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << (index
+ i
))) {
5419 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.instance_id
,
5420 ctx
->abi
.start_instance
, "");
5421 if (ctx
->options
->key
.vs
.as_ls
) {
5422 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
5423 MAX2(2, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
5425 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
5426 MAX2(1, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
5429 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
5430 ctx
->abi
.base_vertex
, "");
5431 t_offset
= LLVMConstInt(ctx
->ac
.i32
, index
+ i
, false);
5433 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
5435 input
= ac_build_buffer_load_format(&ctx
->ac
, t_list
,
5438 num_channels
, false, true);
5440 input
= ac_build_expand_to_vec4(&ctx
->ac
, input
, num_channels
);
5442 for (unsigned chan
= 0; chan
< 4; chan
++) {
5443 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
5444 ctx
->inputs
[radeon_llvm_reg_index_soa(idx
, chan
)] =
5445 ac_to_integer(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
,
5446 input
, llvm_chan
, ""));
5451 static void interp_fs_input(struct radv_shader_context
*ctx
,
5453 LLVMValueRef interp_param
,
5454 LLVMValueRef prim_mask
,
5455 LLVMValueRef result
[4])
5457 LLVMValueRef attr_number
;
5460 bool interp
= interp_param
!= NULL
;
5462 attr_number
= LLVMConstInt(ctx
->ac
.i32
, attr
, false);
5464 /* fs.constant returns the param from the middle vertex, so it's not
5465 * really useful for flat shading. It's meant to be used for custom
5466 * interpolation (but the intrinsic can't fetch from the other two
5469 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
5470 * to do the right thing. The only reason we use fs.constant is that
5471 * fs.interp cannot be used on integers, because they can be equal
5475 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
,
5478 i
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
5480 j
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
5484 for (chan
= 0; chan
< 4; chan
++) {
5485 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
5488 result
[chan
] = ac_build_fs_interp(&ctx
->ac
,
5493 result
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
5494 LLVMConstInt(ctx
->ac
.i32
, 2, false),
5503 handle_fs_input_decl(struct radv_shader_context
*ctx
,
5504 struct nir_variable
*variable
)
5506 int idx
= variable
->data
.location
;
5507 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5508 LLVMValueRef interp
;
5510 variable
->data
.driver_location
= idx
* 4;
5511 ctx
->input_mask
|= ((1ull << attrib_count
) - 1) << variable
->data
.location
;
5513 if (glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_FLOAT
) {
5514 unsigned interp_type
;
5515 if (variable
->data
.sample
)
5516 interp_type
= INTERP_SAMPLE
;
5517 else if (variable
->data
.centroid
)
5518 interp_type
= INTERP_CENTROID
;
5520 interp_type
= INTERP_CENTER
;
5522 interp
= lookup_interp_param(&ctx
->abi
, variable
->data
.interpolation
, interp_type
);
5526 for (unsigned i
= 0; i
< attrib_count
; ++i
)
5527 ctx
->inputs
[radeon_llvm_reg_index_soa(idx
+ i
, 0)] = interp
;
5532 handle_vs_inputs(struct radv_shader_context
*ctx
,
5533 struct nir_shader
*nir
) {
5534 nir_foreach_variable(variable
, &nir
->inputs
)
5535 handle_vs_input_decl(ctx
, variable
);
5539 prepare_interp_optimize(struct radv_shader_context
*ctx
,
5540 struct nir_shader
*nir
)
5542 if (!ctx
->options
->key
.fs
.multisample
)
5545 bool uses_center
= false;
5546 bool uses_centroid
= false;
5547 nir_foreach_variable(variable
, &nir
->inputs
) {
5548 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
5549 variable
->data
.sample
)
5552 if (variable
->data
.centroid
)
5553 uses_centroid
= true;
5558 if (uses_center
&& uses_centroid
) {
5559 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
5560 ctx
->persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->persp_center
, ctx
->persp_centroid
, "");
5561 ctx
->linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->linear_center
, ctx
->linear_centroid
, "");
5566 handle_fs_inputs(struct radv_shader_context
*ctx
,
5567 struct nir_shader
*nir
)
5569 prepare_interp_optimize(ctx
, nir
);
5571 nir_foreach_variable(variable
, &nir
->inputs
)
5572 handle_fs_input_decl(ctx
, variable
);
5576 if (ctx
->shader_info
->info
.ps
.uses_input_attachments
||
5577 ctx
->shader_info
->info
.needs_multiview_view_index
)
5578 ctx
->input_mask
|= 1ull << VARYING_SLOT_LAYER
;
5580 for (unsigned i
= 0; i
< RADEON_LLVM_MAX_INPUTS
; ++i
) {
5581 LLVMValueRef interp_param
;
5582 LLVMValueRef
*inputs
= ctx
->inputs
+radeon_llvm_reg_index_soa(i
, 0);
5584 if (!(ctx
->input_mask
& (1ull << i
)))
5587 if (i
>= VARYING_SLOT_VAR0
|| i
== VARYING_SLOT_PNTC
||
5588 i
== VARYING_SLOT_PRIMITIVE_ID
|| i
== VARYING_SLOT_LAYER
) {
5589 interp_param
= *inputs
;
5590 interp_fs_input(ctx
, index
, interp_param
, ctx
->abi
.prim_mask
,
5594 ctx
->shader_info
->fs
.flat_shaded_mask
|= 1u << index
;
5596 } else if (i
== VARYING_SLOT_POS
) {
5597 for(int i
= 0; i
< 3; ++i
)
5598 inputs
[i
] = ctx
->abi
.frag_pos
[i
];
5600 inputs
[3] = ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
5601 ctx
->abi
.frag_pos
[3]);
5604 ctx
->shader_info
->fs
.num_interp
= index
;
5605 ctx
->shader_info
->fs
.input_mask
= ctx
->input_mask
>> VARYING_SLOT_VAR0
;
5607 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
5608 ctx
->abi
.view_index
= ctx
->inputs
[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
5612 scan_shader_output_decl(struct radv_shader_context
*ctx
,
5613 struct nir_variable
*variable
,
5614 struct nir_shader
*shader
,
5615 gl_shader_stage stage
)
5617 int idx
= variable
->data
.location
+ variable
->data
.index
;
5618 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5619 uint64_t mask_attribs
;
5621 variable
->data
.driver_location
= idx
* 4;
5623 /* tess ctrl has it's own load/store paths for outputs */
5624 if (stage
== MESA_SHADER_TESS_CTRL
)
5627 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
5628 if (stage
== MESA_SHADER_VERTEX
||
5629 stage
== MESA_SHADER_TESS_EVAL
||
5630 stage
== MESA_SHADER_GEOMETRY
) {
5631 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
5632 int length
= shader
->info
.clip_distance_array_size
+
5633 shader
->info
.cull_distance_array_size
;
5634 if (stage
== MESA_SHADER_VERTEX
) {
5635 ctx
->shader_info
->vs
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
5636 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
5638 if (stage
== MESA_SHADER_TESS_EVAL
) {
5639 ctx
->shader_info
->tes
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
5640 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
5647 mask_attribs
= 1ull << idx
;
5651 ctx
->output_mask
|= mask_attribs
;
5655 handle_shader_output_decl(struct ac_nir_context
*ctx
,
5656 struct nir_shader
*nir
,
5657 struct nir_variable
*variable
)
5659 unsigned output_loc
= variable
->data
.driver_location
/ 4;
5660 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5662 /* tess ctrl has it's own load/store paths for outputs */
5663 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
5666 if (ctx
->stage
== MESA_SHADER_VERTEX
||
5667 ctx
->stage
== MESA_SHADER_TESS_EVAL
||
5668 ctx
->stage
== MESA_SHADER_GEOMETRY
) {
5669 int idx
= variable
->data
.location
+ variable
->data
.index
;
5670 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
5671 int length
= nir
->info
.clip_distance_array_size
+
5672 nir
->info
.cull_distance_array_size
;
5681 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
5682 for (unsigned chan
= 0; chan
< 4; chan
++) {
5683 ctx
->abi
->outputs
[radeon_llvm_reg_index_soa(output_loc
+ i
, chan
)] =
5684 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
5690 glsl_base_to_llvm_type(struct ac_llvm_context
*ac
,
5691 enum glsl_base_type type
)
5695 case GLSL_TYPE_UINT
:
5696 case GLSL_TYPE_BOOL
:
5697 case GLSL_TYPE_SUBROUTINE
:
5699 case GLSL_TYPE_FLOAT
: /* TODO handle mediump */
5701 case GLSL_TYPE_INT64
:
5702 case GLSL_TYPE_UINT64
:
5704 case GLSL_TYPE_DOUBLE
:
5707 unreachable("unknown GLSL type");
5712 glsl_to_llvm_type(struct ac_llvm_context
*ac
,
5713 const struct glsl_type
*type
)
5715 if (glsl_type_is_scalar(type
)) {
5716 return glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
));
5719 if (glsl_type_is_vector(type
)) {
5720 return LLVMVectorType(
5721 glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
)),
5722 glsl_get_vector_elements(type
));
5725 if (glsl_type_is_matrix(type
)) {
5726 return LLVMArrayType(
5727 glsl_to_llvm_type(ac
, glsl_get_column_type(type
)),
5728 glsl_get_matrix_columns(type
));
5731 if (glsl_type_is_array(type
)) {
5732 return LLVMArrayType(
5733 glsl_to_llvm_type(ac
, glsl_get_array_element(type
)),
5734 glsl_get_length(type
));
5737 assert(glsl_type_is_struct(type
));
5739 LLVMTypeRef member_types
[glsl_get_length(type
)];
5741 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
5743 glsl_to_llvm_type(ac
,
5744 glsl_get_struct_field(type
, i
));
5747 return LLVMStructTypeInContext(ac
->context
, member_types
,
5748 glsl_get_length(type
), false);
5752 setup_locals(struct ac_nir_context
*ctx
,
5753 struct nir_function
*func
)
5756 ctx
->num_locals
= 0;
5757 nir_foreach_variable(variable
, &func
->impl
->locals
) {
5758 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5759 variable
->data
.driver_location
= ctx
->num_locals
* 4;
5760 variable
->data
.location_frac
= 0;
5761 ctx
->num_locals
+= attrib_count
;
5763 ctx
->locals
= malloc(4 * ctx
->num_locals
* sizeof(LLVMValueRef
));
5767 for (i
= 0; i
< ctx
->num_locals
; i
++) {
5768 for (j
= 0; j
< 4; j
++) {
5769 ctx
->locals
[i
* 4 + j
] =
5770 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "temp");
5776 setup_shared(struct ac_nir_context
*ctx
,
5777 struct nir_shader
*nir
)
5779 nir_foreach_variable(variable
, &nir
->shared
) {
5780 LLVMValueRef shared
=
5781 LLVMAddGlobalInAddressSpace(
5782 ctx
->ac
.module
, glsl_to_llvm_type(&ctx
->ac
, variable
->type
),
5783 variable
->name
? variable
->name
: "",
5784 AC_LOCAL_ADDR_SPACE
);
5785 _mesa_hash_table_insert(ctx
->vars
, variable
, shared
);
5789 /* Initialize arguments for the shader export intrinsic */
5791 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
5792 LLVMValueRef
*values
,
5793 unsigned enabled_channels
,
5795 struct ac_export_args
*args
)
5797 /* Specify the channels that are enabled. */
5798 args
->enabled_channels
= enabled_channels
;
5800 /* Specify whether the EXEC mask represents the valid mask */
5801 args
->valid_mask
= 0;
5803 /* Specify whether this is the last export */
5806 /* Specify the target we are exporting */
5807 args
->target
= target
;
5809 args
->compr
= false;
5810 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
5811 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
5812 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
5813 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
5815 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&& target
>= V_008DFC_SQ_EXP_MRT
) {
5816 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
5817 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
5818 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
5819 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
5822 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
5823 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
5824 unsigned bits
, bool hi
) = NULL
;
5826 switch(col_format
) {
5827 case V_028714_SPI_SHADER_ZERO
:
5828 args
->enabled_channels
= 0; /* writemask */
5829 args
->target
= V_008DFC_SQ_EXP_NULL
;
5832 case V_028714_SPI_SHADER_32_R
:
5833 args
->enabled_channels
= 1;
5834 args
->out
[0] = values
[0];
5837 case V_028714_SPI_SHADER_32_GR
:
5838 args
->enabled_channels
= 0x3;
5839 args
->out
[0] = values
[0];
5840 args
->out
[1] = values
[1];
5843 case V_028714_SPI_SHADER_32_AR
:
5844 args
->enabled_channels
= 0x9;
5845 args
->out
[0] = values
[0];
5846 args
->out
[3] = values
[3];
5849 case V_028714_SPI_SHADER_FP16_ABGR
:
5850 args
->enabled_channels
= 0x5;
5851 packf
= ac_build_cvt_pkrtz_f16
;
5854 case V_028714_SPI_SHADER_UNORM16_ABGR
:
5855 args
->enabled_channels
= 0x5;
5856 packf
= ac_build_cvt_pknorm_u16
;
5859 case V_028714_SPI_SHADER_SNORM16_ABGR
:
5860 args
->enabled_channels
= 0x5;
5861 packf
= ac_build_cvt_pknorm_i16
;
5864 case V_028714_SPI_SHADER_UINT16_ABGR
:
5865 args
->enabled_channels
= 0x5;
5866 packi
= ac_build_cvt_pk_u16
;
5869 case V_028714_SPI_SHADER_SINT16_ABGR
:
5870 args
->enabled_channels
= 0x5;
5871 packi
= ac_build_cvt_pk_i16
;
5875 case V_028714_SPI_SHADER_32_ABGR
:
5876 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
5880 /* Pack f16 or norm_i16/u16. */
5882 for (chan
= 0; chan
< 2; chan
++) {
5883 LLVMValueRef pack_args
[2] = {
5885 values
[2 * chan
+ 1]
5887 LLVMValueRef packed
;
5889 packed
= packf(&ctx
->ac
, pack_args
);
5890 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
5892 args
->compr
= 1; /* COMPR flag */
5897 for (chan
= 0; chan
< 2; chan
++) {
5898 LLVMValueRef pack_args
[2] = {
5899 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
5900 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
5902 LLVMValueRef packed
;
5904 packed
= packi(&ctx
->ac
, pack_args
,
5905 is_int8
? 8 : is_int10
? 10 : 16,
5907 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
5909 args
->compr
= 1; /* COMPR flag */
5914 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
5916 for (unsigned i
= 0; i
< 4; ++i
) {
5917 if (!(args
->enabled_channels
& (1 << i
)))
5920 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
5925 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
5926 LLVMValueRef
*values
, unsigned enabled_channels
)
5928 struct ac_export_args args
;
5930 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
5931 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
5932 ac_build_export(&ctx
->ac
, &args
);
5936 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
5938 LLVMValueRef output
=
5939 ctx
->abi
.outputs
[radeon_llvm_reg_index_soa(index
, chan
)];
5941 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
5945 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
5946 bool export_prim_id
,
5947 struct ac_vs_output_info
*outinfo
)
5949 uint32_t param_count
= 0;
5951 unsigned pos_idx
, num_pos_exports
= 0;
5952 struct ac_export_args args
, pos_args
[4] = {};
5953 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_index_value
= NULL
;
5956 if (ctx
->options
->key
.has_multiview_view_index
) {
5957 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
5959 for(unsigned i
= 0; i
< 4; ++i
)
5960 ctx
->abi
.outputs
[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
5961 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
5964 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
5965 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
5968 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
5969 sizeof(outinfo
->vs_output_param_offset
));
5971 if (ctx
->output_mask
& (1ull << VARYING_SLOT_CLIP_DIST0
)) {
5972 LLVMValueRef slots
[8];
5975 if (outinfo
->cull_dist_mask
)
5976 outinfo
->cull_dist_mask
<<= ctx
->num_output_clips
;
5978 i
= VARYING_SLOT_CLIP_DIST0
;
5979 for (j
= 0; j
< ctx
->num_output_clips
+ ctx
->num_output_culls
; j
++)
5980 slots
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
5982 for (i
= ctx
->num_output_clips
+ ctx
->num_output_culls
; i
< 8; i
++)
5983 slots
[i
] = LLVMGetUndef(ctx
->ac
.f32
);
5985 if (ctx
->num_output_clips
+ ctx
->num_output_culls
> 4) {
5986 target
= V_008DFC_SQ_EXP_POS
+ 3;
5987 si_llvm_init_export_args(ctx
, &slots
[4], 0xf, target
, &args
);
5988 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
5989 &args
, sizeof(args
));
5992 target
= V_008DFC_SQ_EXP_POS
+ 2;
5993 si_llvm_init_export_args(ctx
, &slots
[0], 0xf, target
, &args
);
5994 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
5995 &args
, sizeof(args
));
5999 LLVMValueRef pos_values
[4] = {ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_1
};
6000 if (ctx
->output_mask
& (1ull << VARYING_SLOT_POS
)) {
6001 for (unsigned j
= 0; j
< 4; j
++)
6002 pos_values
[j
] = radv_load_output(ctx
, VARYING_SLOT_POS
, j
);
6004 si_llvm_init_export_args(ctx
, pos_values
, 0xf, V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
6006 if (ctx
->output_mask
& (1ull << VARYING_SLOT_PSIZ
)) {
6007 outinfo
->writes_pointsize
= true;
6008 psize_value
= radv_load_output(ctx
, VARYING_SLOT_PSIZ
, 0);
6011 if (ctx
->output_mask
& (1ull << VARYING_SLOT_LAYER
)) {
6012 outinfo
->writes_layer
= true;
6013 layer_value
= radv_load_output(ctx
, VARYING_SLOT_LAYER
, 0);
6016 if (ctx
->output_mask
& (1ull << VARYING_SLOT_VIEWPORT
)) {
6017 outinfo
->writes_viewport_index
= true;
6018 viewport_index_value
= radv_load_output(ctx
, VARYING_SLOT_VIEWPORT
, 0);
6021 if (outinfo
->writes_pointsize
||
6022 outinfo
->writes_layer
||
6023 outinfo
->writes_viewport_index
) {
6024 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
6025 (outinfo
->writes_layer
== true ? 4 : 0));
6026 pos_args
[1].valid_mask
= 0;
6027 pos_args
[1].done
= 0;
6028 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
6029 pos_args
[1].compr
= 0;
6030 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
6031 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
6032 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
6033 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
6035 if (outinfo
->writes_pointsize
== true)
6036 pos_args
[1].out
[0] = psize_value
;
6037 if (outinfo
->writes_layer
== true)
6038 pos_args
[1].out
[2] = layer_value
;
6039 if (outinfo
->writes_viewport_index
== true) {
6040 if (ctx
->options
->chip_class
>= GFX9
) {
6041 /* GFX9 has the layer in out.z[10:0] and the viewport
6042 * index in out.z[19:16].
6044 LLVMValueRef v
= viewport_index_value
;
6045 v
= ac_to_integer(&ctx
->ac
, v
);
6046 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
6047 LLVMConstInt(ctx
->ac
.i32
, 16, false),
6049 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
6050 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
6052 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
6053 pos_args
[1].enabled_channels
|= 1 << 2;
6055 pos_args
[1].out
[3] = viewport_index_value
;
6056 pos_args
[1].enabled_channels
|= 1 << 3;
6060 for (i
= 0; i
< 4; i
++) {
6061 if (pos_args
[i
].out
[0])
6066 for (i
= 0; i
< 4; i
++) {
6067 if (!pos_args
[i
].out
[0])
6070 /* Specify the target we are exporting */
6071 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
6072 if (pos_idx
== num_pos_exports
)
6073 pos_args
[i
].done
= 1;
6074 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
6077 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
6078 LLVMValueRef values
[4];
6079 if (!(ctx
->output_mask
& (1ull << i
)))
6082 if (i
!= VARYING_SLOT_LAYER
&&
6083 i
!= VARYING_SLOT_PRIMITIVE_ID
&&
6084 i
< VARYING_SLOT_VAR0
)
6087 for (unsigned j
= 0; j
< 4; j
++)
6088 values
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
6090 unsigned output_usage_mask
;
6092 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
6093 !ctx
->is_gs_copy_shader
) {
6095 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
6096 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
6098 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
6100 /* Enable all channels for the GS copy shader because
6101 * we don't know the output usage mask currently.
6103 output_usage_mask
= 0xf;
6106 radv_export_param(ctx
, param_count
, values
, output_usage_mask
);
6108 outinfo
->vs_output_param_offset
[i
] = param_count
++;
6111 if (export_prim_id
) {
6112 LLVMValueRef values
[4];
6114 values
[0] = ctx
->vs_prim_id
;
6115 ctx
->shader_info
->vs
.vgpr_comp_cnt
= MAX2(2,
6116 ctx
->shader_info
->vs
.vgpr_comp_cnt
);
6117 for (unsigned j
= 1; j
< 4; j
++)
6118 values
[j
] = ctx
->ac
.f32_0
;
6120 radv_export_param(ctx
, param_count
, values
, 0xf);
6122 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
6123 outinfo
->export_prim_id
= true;
6126 outinfo
->pos_exports
= num_pos_exports
;
6127 outinfo
->param_exports
= param_count
;
6131 handle_es_outputs_post(struct radv_shader_context
*ctx
,
6132 struct ac_es_output_info
*outinfo
)
6135 uint64_t max_output_written
= 0;
6136 LLVMValueRef lds_base
= NULL
;
6138 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
6142 if (!(ctx
->output_mask
& (1ull << i
)))
6145 if (i
== VARYING_SLOT_CLIP_DIST0
)
6146 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
6148 param_index
= shader_io_get_unique_index(i
);
6150 max_output_written
= MAX2(param_index
+ (length
> 4), max_output_written
);
6153 outinfo
->esgs_itemsize
= (max_output_written
+ 1) * 16;
6155 if (ctx
->ac
.chip_class
>= GFX9
) {
6156 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
6157 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
6158 LLVMValueRef wave_idx
= ac_build_bfe(&ctx
->ac
, ctx
->merged_wave_info
,
6159 LLVMConstInt(ctx
->ac
.i32
, 24, false),
6160 LLVMConstInt(ctx
->ac
.i32
, 4, false), false);
6161 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
6162 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
6163 LLVMConstInt(ctx
->ac
.i32
, 64, false), ""), "");
6164 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
6165 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
6168 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
6169 LLVMValueRef dw_addr
= NULL
;
6170 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
6174 if (!(ctx
->output_mask
& (1ull << i
)))
6177 if (i
== VARYING_SLOT_CLIP_DIST0
)
6178 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
6180 param_index
= shader_io_get_unique_index(i
);
6183 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
6184 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
6187 for (j
= 0; j
< length
; j
++) {
6188 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
6189 out_val
= LLVMBuildBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
6191 if (ctx
->ac
.chip_class
>= GFX9
) {
6192 ac_lds_store(&ctx
->ac
, dw_addr
,
6193 LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], ""));
6194 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
6196 ac_build_buffer_store_dword(&ctx
->ac
,
6199 NULL
, ctx
->es2gs_offset
,
6200 (4 * param_index
+ j
) * 4,
6208 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
6210 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
6211 LLVMValueRef vertex_dw_stride
= unpack_param(&ctx
->ac
, ctx
->ls_out_layout
, 13, 8);
6212 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
6213 vertex_dw_stride
, "");
6215 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
6216 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
6219 if (!(ctx
->output_mask
& (1ull << i
)))
6222 if (i
== VARYING_SLOT_CLIP_DIST0
)
6223 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
6224 int param
= shader_io_get_unique_index(i
);
6225 mark_tess_output(ctx
, false, param
);
6227 mark_tess_output(ctx
, false, param
+ 1);
6228 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
6229 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
6231 for (unsigned j
= 0; j
< length
; j
++) {
6232 ac_lds_store(&ctx
->ac
, dw_addr
,
6233 LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], ""));
6234 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
6239 struct ac_build_if_state
6241 struct radv_shader_context
*ctx
;
6242 LLVMValueRef condition
;
6243 LLVMBasicBlockRef entry_block
;
6244 LLVMBasicBlockRef true_block
;
6245 LLVMBasicBlockRef false_block
;
6246 LLVMBasicBlockRef merge_block
;
6249 static LLVMBasicBlockRef
6250 ac_build_insert_new_block(struct radv_shader_context
*ctx
, const char *name
)
6252 LLVMBasicBlockRef current_block
;
6253 LLVMBasicBlockRef next_block
;
6254 LLVMBasicBlockRef new_block
;
6256 /* get current basic block */
6257 current_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
6259 /* chqeck if there's another block after this one */
6260 next_block
= LLVMGetNextBasicBlock(current_block
);
6262 /* insert the new block before the next block */
6263 new_block
= LLVMInsertBasicBlockInContext(ctx
->context
, next_block
, name
);
6266 /* append new block after current block */
6267 LLVMValueRef function
= LLVMGetBasicBlockParent(current_block
);
6268 new_block
= LLVMAppendBasicBlockInContext(ctx
->context
, function
, name
);
6274 ac_nir_build_if(struct ac_build_if_state
*ifthen
,
6275 struct radv_shader_context
*ctx
,
6276 LLVMValueRef condition
)
6278 LLVMBasicBlockRef block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
6280 memset(ifthen
, 0, sizeof *ifthen
);
6282 ifthen
->condition
= condition
;
6283 ifthen
->entry_block
= block
;
6285 /* create endif/merge basic block for the phi functions */
6286 ifthen
->merge_block
= ac_build_insert_new_block(ctx
, "endif-block");
6288 /* create/insert true_block before merge_block */
6289 ifthen
->true_block
=
6290 LLVMInsertBasicBlockInContext(ctx
->context
,
6291 ifthen
->merge_block
,
6294 /* successive code goes into the true block */
6295 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, ifthen
->true_block
);
6299 * End a conditional.
6302 ac_nir_build_endif(struct ac_build_if_state
*ifthen
)
6304 LLVMBuilderRef builder
= ifthen
->ctx
->ac
.builder
;
6306 /* Insert branch to the merge block from current block */
6307 LLVMBuildBr(builder
, ifthen
->merge_block
);
6310 * Now patch in the various branch instructions.
6313 /* Insert the conditional branch instruction at the end of entry_block */
6314 LLVMPositionBuilderAtEnd(builder
, ifthen
->entry_block
);
6315 if (ifthen
->false_block
) {
6316 /* we have an else clause */
6317 LLVMBuildCondBr(builder
, ifthen
->condition
,
6318 ifthen
->true_block
, ifthen
->false_block
);
6321 /* no else clause */
6322 LLVMBuildCondBr(builder
, ifthen
->condition
,
6323 ifthen
->true_block
, ifthen
->merge_block
);
6326 /* Resume building code at end of the ifthen->merge_block */
6327 LLVMPositionBuilderAtEnd(builder
, ifthen
->merge_block
);
6331 write_tess_factors(struct radv_shader_context
*ctx
)
6333 unsigned stride
, outer_comps
, inner_comps
;
6334 struct ac_build_if_state if_ctx
, inner_if_ctx
;
6335 LLVMValueRef invocation_id
= unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
6336 LLVMValueRef rel_patch_id
= unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
6337 unsigned tess_inner_index
= 0, tess_outer_index
;
6338 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
6339 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
6341 emit_barrier(&ctx
->ac
, ctx
->stage
);
6343 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
6363 ac_nir_build_if(&if_ctx
, ctx
,
6364 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
6365 invocation_id
, ctx
->ac
.i32_0
, ""));
6367 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
6370 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
6371 mark_tess_output(ctx
, true, tess_inner_index
);
6372 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
6373 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
6376 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
6377 mark_tess_output(ctx
, true, tess_outer_index
);
6378 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
6379 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
6381 for (i
= 0; i
< 4; i
++) {
6382 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
6383 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
6387 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
6388 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
6389 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
6391 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
6393 for (i
= 0; i
< outer_comps
; i
++) {
6395 ac_lds_load(&ctx
->ac
, lds_outer
);
6396 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
6399 for (i
= 0; i
< inner_comps
; i
++) {
6400 inner
[i
] = out
[outer_comps
+i
] =
6401 ac_lds_load(&ctx
->ac
, lds_inner
);
6402 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
6407 /* Convert the outputs to vectors for stores. */
6408 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
6412 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
6415 buffer
= ctx
->hs_ring_tess_factor
;
6416 tf_base
= ctx
->tess_factor_offset
;
6417 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
6418 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
6419 unsigned tf_offset
= 0;
6421 if (ctx
->options
->chip_class
<= VI
) {
6422 ac_nir_build_if(&inner_if_ctx
, ctx
,
6423 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
6424 rel_patch_id
, ctx
->ac
.i32_0
, ""));
6426 /* Store the dynamic HS control word. */
6427 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
6428 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
6429 1, ctx
->ac
.i32_0
, tf_base
,
6430 0, 1, 0, true, false);
6433 ac_nir_build_endif(&inner_if_ctx
);
6436 /* Store the tessellation factors. */
6437 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
6438 MIN2(stride
, 4), byteoffset
, tf_base
,
6439 tf_offset
, 1, 0, true, false);
6441 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
6442 stride
- 4, byteoffset
, tf_base
,
6443 16 + tf_offset
, 1, 0, true, false);
6445 //store to offchip for TES to read - only if TES reads them
6446 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
6447 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
6448 LLVMValueRef tf_inner_offset
;
6449 unsigned param_outer
, param_inner
;
6451 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
6452 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
6453 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
6455 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
6456 util_next_power_of_two(outer_comps
));
6458 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
6459 outer_comps
, tf_outer_offset
,
6460 ctx
->oc_lds
, 0, 1, 0, true, false);
6462 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
6463 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
6464 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
6466 inner_vec
= inner_comps
== 1 ? inner
[0] :
6467 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
6468 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
6469 inner_comps
, tf_inner_offset
,
6470 ctx
->oc_lds
, 0, 1, 0, true, false);
6473 ac_nir_build_endif(&if_ctx
);
6477 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
6479 write_tess_factors(ctx
);
6483 si_export_mrt_color(struct radv_shader_context
*ctx
,
6484 LLVMValueRef
*color
, unsigned index
,
6485 struct ac_export_args
*args
)
6488 si_llvm_init_export_args(ctx
, color
, 0xf,
6489 V_008DFC_SQ_EXP_MRT
+ index
, args
);
6490 if (!args
->enabled_channels
)
6491 return false; /* unnecessary NULL export */
6497 radv_export_mrt_z(struct radv_shader_context
*ctx
,
6498 LLVMValueRef depth
, LLVMValueRef stencil
,
6499 LLVMValueRef samplemask
)
6501 struct ac_export_args args
;
6503 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
6505 ac_build_export(&ctx
->ac
, &args
);
6509 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
6512 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
6513 struct ac_export_args color_args
[8];
6515 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
6516 LLVMValueRef values
[4];
6518 if (!(ctx
->output_mask
& (1ull << i
)))
6521 if (i
< FRAG_RESULT_DATA0
)
6524 for (unsigned j
= 0; j
< 4; j
++)
6525 values
[j
] = ac_to_float(&ctx
->ac
,
6526 radv_load_output(ctx
, i
, j
));
6528 bool ret
= si_export_mrt_color(ctx
, values
,
6529 i
- FRAG_RESULT_DATA0
,
6530 &color_args
[index
]);
6535 /* Process depth, stencil, samplemask. */
6536 if (ctx
->shader_info
->info
.ps
.writes_z
) {
6537 depth
= ac_to_float(&ctx
->ac
,
6538 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
6540 if (ctx
->shader_info
->info
.ps
.writes_stencil
) {
6541 stencil
= ac_to_float(&ctx
->ac
,
6542 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
6544 if (ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
6545 samplemask
= ac_to_float(&ctx
->ac
,
6546 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
6549 /* Set the DONE bit on last non-null color export only if Z isn't
6553 !ctx
->shader_info
->info
.ps
.writes_z
&&
6554 !ctx
->shader_info
->info
.ps
.writes_stencil
&&
6555 !ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
6556 unsigned last
= index
- 1;
6558 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
6559 color_args
[last
].done
= 1; /* DONE bit */
6562 /* Export PS outputs. */
6563 for (unsigned i
= 0; i
< index
; i
++)
6564 ac_build_export(&ctx
->ac
, &color_args
[i
]);
6566 if (depth
|| stencil
|| samplemask
)
6567 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
6569 ac_build_export_null(&ctx
->ac
);
6573 emit_gs_epilogue(struct radv_shader_context
*ctx
)
6575 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
6579 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
6580 LLVMValueRef
*addrs
)
6582 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
6584 switch (ctx
->stage
) {
6585 case MESA_SHADER_VERTEX
:
6586 if (ctx
->options
->key
.vs
.as_ls
)
6587 handle_ls_outputs_post(ctx
);
6588 else if (ctx
->options
->key
.vs
.as_es
)
6589 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
6591 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs
.export_prim_id
,
6592 &ctx
->shader_info
->vs
.outinfo
);
6594 case MESA_SHADER_FRAGMENT
:
6595 handle_fs_outputs_post(ctx
);
6597 case MESA_SHADER_GEOMETRY
:
6598 emit_gs_epilogue(ctx
);
6600 case MESA_SHADER_TESS_CTRL
:
6601 handle_tcs_outputs_post(ctx
);
6603 case MESA_SHADER_TESS_EVAL
:
6604 if (ctx
->options
->key
.tes
.as_es
)
6605 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
6607 handle_vs_outputs_post(ctx
, ctx
->options
->key
.tes
.export_prim_id
,
6608 &ctx
->shader_info
->tes
.outinfo
);
6615 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
)
6617 LLVMPassManagerRef passmgr
;
6618 /* Create the pass manager */
6619 passmgr
= LLVMCreateFunctionPassManagerForModule(
6622 /* This pass should eliminate all the load and store instructions */
6623 LLVMAddPromoteMemoryToRegisterPass(passmgr
);
6625 /* Add some optimization passes */
6626 LLVMAddScalarReplAggregatesPass(passmgr
);
6627 LLVMAddLICMPass(passmgr
);
6628 LLVMAddAggressiveDCEPass(passmgr
);
6629 LLVMAddCFGSimplificationPass(passmgr
);
6630 LLVMAddInstructionCombiningPass(passmgr
);
6633 LLVMInitializeFunctionPassManager(passmgr
);
6634 LLVMRunFunctionPassManager(passmgr
, ctx
->main_function
);
6635 LLVMFinalizeFunctionPassManager(passmgr
);
6637 LLVMDisposeBuilder(ctx
->ac
.builder
);
6638 LLVMDisposePassManager(passmgr
);
6640 ac_llvm_context_dispose(&ctx
->ac
);
6644 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
6646 struct ac_vs_output_info
*outinfo
;
6648 switch (ctx
->stage
) {
6649 case MESA_SHADER_FRAGMENT
:
6650 case MESA_SHADER_COMPUTE
:
6651 case MESA_SHADER_TESS_CTRL
:
6652 case MESA_SHADER_GEOMETRY
:
6654 case MESA_SHADER_VERTEX
:
6655 if (ctx
->options
->key
.vs
.as_ls
||
6656 ctx
->options
->key
.vs
.as_es
)
6658 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
6660 case MESA_SHADER_TESS_EVAL
:
6661 if (ctx
->options
->key
.vs
.as_es
)
6663 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
6666 unreachable("Unhandled shader type");
6669 ac_optimize_vs_outputs(&ctx
->ac
,
6671 outinfo
->vs_output_param_offset
,
6673 &outinfo
->param_exports
);
6677 ac_setup_rings(struct radv_shader_context
*ctx
)
6679 if ((ctx
->stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs
.as_es
) ||
6680 (ctx
->stage
== MESA_SHADER_TESS_EVAL
&& ctx
->options
->key
.tes
.as_es
)) {
6681 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_ESGS_VS
, false));
6684 if (ctx
->is_gs_copy_shader
) {
6685 ctx
->gsvs_ring
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_GSVS_VS
, false));
6687 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
6689 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_ESGS_GS
, false));
6690 ctx
->gsvs_ring
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_GSVS_GS
, false));
6692 ctx
->gsvs_ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gsvs_ring
, ctx
->ac
.v4i32
, "");
6694 ctx
->gsvs_ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ctx
->gsvs_ring
, ctx
->gsvs_num_entries
, LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
6695 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ctx
->gsvs_ring
, ctx
->ac
.i32_1
, "");
6696 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
, ctx
->gsvs_ring_stride
, "");
6697 ctx
->gsvs_ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ctx
->gsvs_ring
, tmp
, ctx
->ac
.i32_1
, "");
6700 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
6701 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
6702 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
6703 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
6708 ac_nir_get_max_workgroup_size(enum chip_class chip_class
,
6709 const struct nir_shader
*nir
)
6711 switch (nir
->info
.stage
) {
6712 case MESA_SHADER_TESS_CTRL
:
6713 return chip_class
>= CIK
? 128 : 64;
6714 case MESA_SHADER_GEOMETRY
:
6715 return chip_class
>= GFX9
? 128 : 64;
6716 case MESA_SHADER_COMPUTE
:
6722 unsigned max_workgroup_size
= nir
->info
.cs
.local_size
[0] *
6723 nir
->info
.cs
.local_size
[1] *
6724 nir
->info
.cs
.local_size
[2];
6725 return max_workgroup_size
;
6728 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
6729 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
6731 LLVMValueRef count
= ac_build_bfe(&ctx
->ac
, ctx
->merged_wave_info
,
6732 LLVMConstInt(ctx
->ac
.i32
, 8, false),
6733 LLVMConstInt(ctx
->ac
.i32
, 8, false), false);
6734 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
6736 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
6737 ctx
->vs_prim_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.vertex_id
, ctx
->vs_prim_id
, "");
6738 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
6739 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
6742 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
6744 for(int i
= 5; i
>= 0; --i
) {
6745 ctx
->gs_vtx_offset
[i
] = ac_build_bfe(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
6746 LLVMConstInt(ctx
->ac
.i32
, (i
& 1) * 16, false),
6747 LLVMConstInt(ctx
->ac
.i32
, 16, false), false);
6750 ctx
->gs_wave_id
= ac_build_bfe(&ctx
->ac
, ctx
->merged_wave_info
,
6751 LLVMConstInt(ctx
->ac
.i32
, 16, false),
6752 LLVMConstInt(ctx
->ac
.i32
, 8, false), false);
6755 void ac_nir_translate(struct ac_llvm_context
*ac
, struct ac_shader_abi
*abi
,
6756 struct nir_shader
*nir
)
6758 struct ac_nir_context ctx
= {};
6759 struct nir_function
*func
;
6761 /* Last minute passes for both radv & radeonsi */
6762 ac_lower_subgroups(nir
);
6767 ctx
.stage
= nir
->info
.stage
;
6769 ctx
.main_function
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
6771 nir_foreach_variable(variable
, &nir
->outputs
)
6772 handle_shader_output_decl(&ctx
, nir
, variable
);
6774 ctx
.defs
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
6775 _mesa_key_pointer_equal
);
6776 ctx
.phis
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
6777 _mesa_key_pointer_equal
);
6778 ctx
.vars
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
6779 _mesa_key_pointer_equal
);
6781 func
= (struct nir_function
*)exec_list_get_head(&nir
->functions
);
6783 setup_locals(&ctx
, func
);
6785 if (nir
->info
.stage
== MESA_SHADER_COMPUTE
)
6786 setup_shared(&ctx
, nir
);
6788 visit_cf_list(&ctx
, &func
->impl
->body
);
6789 phi_post_pass(&ctx
);
6791 if (nir
->info
.stage
!= MESA_SHADER_COMPUTE
)
6792 ctx
.abi
->emit_outputs(ctx
.abi
, AC_LLVM_MAX_OUTPUTS
,
6796 ralloc_free(ctx
.defs
);
6797 ralloc_free(ctx
.phis
);
6798 ralloc_free(ctx
.vars
);
6802 LLVMModuleRef
ac_translate_nir_to_llvm(LLVMTargetMachineRef tm
,
6803 struct nir_shader
*const *shaders
,
6805 struct ac_shader_variant_info
*shader_info
,
6806 const struct ac_nir_compiler_options
*options
,
6809 struct radv_shader_context ctx
= {0};
6811 ctx
.options
= options
;
6812 ctx
.shader_info
= shader_info
;
6813 ctx
.context
= LLVMContextCreate();
6815 ac_llvm_context_init(&ctx
.ac
, ctx
.context
, options
->chip_class
,
6817 ctx
.ac
.module
= LLVMModuleCreateWithNameInContext("shader", ctx
.context
);
6818 LLVMSetTarget(ctx
.ac
.module
, options
->supports_spill
? "amdgcn-mesa-mesa3d" : "amdgcn--");
6820 LLVMTargetDataRef data_layout
= LLVMCreateTargetDataLayout(tm
);
6821 char *data_layout_str
= LLVMCopyStringRepOfTargetData(data_layout
);
6822 LLVMSetDataLayout(ctx
.ac
.module
, data_layout_str
);
6823 LLVMDisposeTargetData(data_layout
);
6824 LLVMDisposeMessage(data_layout_str
);
6826 enum ac_float_mode float_mode
=
6827 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
6828 AC_FLOAT_MODE_DEFAULT
;
6830 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
6832 memset(shader_info
, 0, sizeof(*shader_info
));
6834 for(int i
= 0; i
< shader_count
; ++i
)
6835 ac_nir_shader_info_pass(shaders
[i
], options
, &shader_info
->info
);
6837 for (i
= 0; i
< AC_UD_MAX_SETS
; i
++)
6838 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
6839 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
6840 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
6842 ctx
.max_workgroup_size
= 0;
6843 for (int i
= 0; i
< shader_count
; ++i
) {
6844 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
6845 ac_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
6849 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
6850 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
6852 ctx
.abi
.inputs
= &ctx
.inputs
[0];
6853 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
6854 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
6855 ctx
.abi
.load_ubo
= radv_load_ubo
;
6856 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
6857 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
6858 ctx
.abi
.load_resource
= radv_load_resource
;
6859 ctx
.abi
.clamp_shadow_reference
= false;
6861 if (shader_count
>= 2)
6862 ac_init_exec_full_mask(&ctx
.ac
);
6864 if (ctx
.ac
.chip_class
== GFX9
&&
6865 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
6866 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
6868 for(int i
= 0; i
< shader_count
; ++i
) {
6869 ctx
.stage
= shaders
[i
]->info
.stage
;
6870 ctx
.output_mask
= 0;
6871 ctx
.tess_outputs_written
= 0;
6872 ctx
.num_output_clips
= shaders
[i
]->info
.clip_distance_array_size
;
6873 ctx
.num_output_culls
= shaders
[i
]->info
.cull_distance_array_size
;
6875 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
6876 ctx
.gs_next_vertex
= ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "gs_next_vertex");
6877 ctx
.gs_max_out_vertices
= shaders
[i
]->info
.gs
.vertices_out
;
6878 ctx
.abi
.load_inputs
= load_gs_input
;
6879 ctx
.abi
.emit_primitive
= visit_end_primitive
;
6880 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
6881 ctx
.tcs_outputs_read
= shaders
[i
]->info
.outputs_read
;
6882 ctx
.tcs_patch_outputs_read
= shaders
[i
]->info
.patch_outputs_read
;
6883 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
6884 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
6885 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
6886 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
6887 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
6888 ctx
.tes_primitive_mode
= shaders
[i
]->info
.tess
.primitive_mode
;
6889 ctx
.abi
.load_tess_varyings
= load_tes_input
;
6890 ctx
.abi
.load_tess_coord
= load_tess_coord
;
6891 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
6892 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
6893 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
6894 if (shader_info
->info
.vs
.needs_instance_id
) {
6895 if (ctx
.options
->key
.vs
.as_ls
) {
6896 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
6897 MAX2(2, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
6899 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
6900 MAX2(1, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
6903 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
6904 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
6905 shader_info
->fs
.can_discard
= shaders
[i
]->info
.fs
.uses_discard
;
6906 ctx
.abi
.lookup_interp_param
= lookup_interp_param
;
6907 ctx
.abi
.load_sample_position
= load_sample_position
;
6908 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
6909 ctx
.abi
.emit_kill
= radv_emit_kill
;
6913 emit_barrier(&ctx
.ac
, ctx
.stage
);
6915 ac_setup_rings(&ctx
);
6917 LLVMBasicBlockRef merge_block
;
6918 if (shader_count
>= 2) {
6919 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
6920 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
6921 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
6923 LLVMValueRef count
= ac_build_bfe(&ctx
.ac
, ctx
.merged_wave_info
,
6924 LLVMConstInt(ctx
.ac
.i32
, 8 * i
, false),
6925 LLVMConstInt(ctx
.ac
.i32
, 8, false), false);
6926 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
6927 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
6928 thread_id
, count
, "");
6929 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
6931 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
6934 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
6935 handle_fs_inputs(&ctx
, shaders
[i
]);
6936 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
6937 handle_vs_inputs(&ctx
, shaders
[i
]);
6938 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
6939 prepare_gs_input_vgprs(&ctx
);
6941 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
6942 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
6944 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
6946 if (shader_count
>= 2) {
6947 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
6948 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
6951 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
6952 unsigned addclip
= shaders
[i
]->info
.clip_distance_array_size
+
6953 shaders
[i
]->info
.cull_distance_array_size
> 4;
6954 shader_info
->gs
.gsvs_vertex_size
= (util_bitcount64(ctx
.output_mask
) + addclip
) * 16;
6955 shader_info
->gs
.max_gsvs_emit_size
= shader_info
->gs
.gsvs_vertex_size
*
6956 shaders
[i
]->info
.gs
.vertices_out
;
6957 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
6958 shader_info
->tcs
.outputs_written
= ctx
.tess_outputs_written
;
6959 shader_info
->tcs
.patch_outputs_written
= ctx
.tess_patch_outputs_written
;
6960 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
&& ctx
.options
->key
.vs
.as_ls
) {
6961 shader_info
->vs
.outputs_written
= ctx
.tess_outputs_written
;
6965 LLVMBuildRetVoid(ctx
.ac
.builder
);
6967 if (options
->dump_preoptir
)
6968 ac_dump_module(ctx
.ac
.module
);
6970 ac_llvm_finalize_module(&ctx
);
6972 if (shader_count
== 1)
6973 ac_nir_eliminate_const_vs_outputs(&ctx
);
6976 ctx
.shader_info
->private_mem_vgprs
=
6977 ac_count_scratch_private_memory(ctx
.main_function
);
6980 return ctx
.ac
.module
;
6983 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
6985 unsigned *retval
= (unsigned *)context
;
6986 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
6987 char *description
= LLVMGetDiagInfoDescription(di
);
6989 if (severity
== LLVMDSError
) {
6991 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
6995 LLVMDisposeMessage(description
);
6998 static unsigned ac_llvm_compile(LLVMModuleRef M
,
6999 struct ac_shader_binary
*binary
,
7000 LLVMTargetMachineRef tm
)
7002 unsigned retval
= 0;
7004 LLVMContextRef llvm_ctx
;
7005 LLVMMemoryBufferRef out_buffer
;
7006 unsigned buffer_size
;
7007 const char *buffer_data
;
7010 /* Setup Diagnostic Handler*/
7011 llvm_ctx
= LLVMGetModuleContext(M
);
7013 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
7017 mem_err
= LLVMTargetMachineEmitToMemoryBuffer(tm
, M
, LLVMObjectFile
,
7020 /* Process Errors/Warnings */
7022 fprintf(stderr
, "%s: %s", __FUNCTION__
, err
);
7028 /* Extract Shader Code*/
7029 buffer_size
= LLVMGetBufferSize(out_buffer
);
7030 buffer_data
= LLVMGetBufferStart(out_buffer
);
7032 ac_elf_read(buffer_data
, buffer_size
, binary
);
7035 LLVMDisposeMemoryBuffer(out_buffer
);
7041 static void ac_compile_llvm_module(LLVMTargetMachineRef tm
,
7042 LLVMModuleRef llvm_module
,
7043 struct ac_shader_binary
*binary
,
7044 struct ac_shader_config
*config
,
7045 struct ac_shader_variant_info
*shader_info
,
7046 gl_shader_stage stage
,
7047 bool dump_shader
, bool supports_spill
)
7050 ac_dump_module(llvm_module
);
7052 memset(binary
, 0, sizeof(*binary
));
7053 int v
= ac_llvm_compile(llvm_module
, binary
, tm
);
7055 fprintf(stderr
, "compile failed\n");
7059 fprintf(stderr
, "disasm:\n%s\n", binary
->disasm_string
);
7061 ac_shader_binary_read_config(binary
, config
, 0, supports_spill
);
7063 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
7064 LLVMDisposeModule(llvm_module
);
7065 LLVMContextDispose(ctx
);
7067 if (stage
== MESA_SHADER_FRAGMENT
) {
7068 shader_info
->num_input_vgprs
= 0;
7069 if (G_0286CC_PERSP_SAMPLE_ENA(config
->spi_ps_input_addr
))
7070 shader_info
->num_input_vgprs
+= 2;
7071 if (G_0286CC_PERSP_CENTER_ENA(config
->spi_ps_input_addr
))
7072 shader_info
->num_input_vgprs
+= 2;
7073 if (G_0286CC_PERSP_CENTROID_ENA(config
->spi_ps_input_addr
))
7074 shader_info
->num_input_vgprs
+= 2;
7075 if (G_0286CC_PERSP_PULL_MODEL_ENA(config
->spi_ps_input_addr
))
7076 shader_info
->num_input_vgprs
+= 3;
7077 if (G_0286CC_LINEAR_SAMPLE_ENA(config
->spi_ps_input_addr
))
7078 shader_info
->num_input_vgprs
+= 2;
7079 if (G_0286CC_LINEAR_CENTER_ENA(config
->spi_ps_input_addr
))
7080 shader_info
->num_input_vgprs
+= 2;
7081 if (G_0286CC_LINEAR_CENTROID_ENA(config
->spi_ps_input_addr
))
7082 shader_info
->num_input_vgprs
+= 2;
7083 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config
->spi_ps_input_addr
))
7084 shader_info
->num_input_vgprs
+= 1;
7085 if (G_0286CC_POS_X_FLOAT_ENA(config
->spi_ps_input_addr
))
7086 shader_info
->num_input_vgprs
+= 1;
7087 if (G_0286CC_POS_Y_FLOAT_ENA(config
->spi_ps_input_addr
))
7088 shader_info
->num_input_vgprs
+= 1;
7089 if (G_0286CC_POS_Z_FLOAT_ENA(config
->spi_ps_input_addr
))
7090 shader_info
->num_input_vgprs
+= 1;
7091 if (G_0286CC_POS_W_FLOAT_ENA(config
->spi_ps_input_addr
))
7092 shader_info
->num_input_vgprs
+= 1;
7093 if (G_0286CC_FRONT_FACE_ENA(config
->spi_ps_input_addr
))
7094 shader_info
->num_input_vgprs
+= 1;
7095 if (G_0286CC_ANCILLARY_ENA(config
->spi_ps_input_addr
))
7096 shader_info
->num_input_vgprs
+= 1;
7097 if (G_0286CC_SAMPLE_COVERAGE_ENA(config
->spi_ps_input_addr
))
7098 shader_info
->num_input_vgprs
+= 1;
7099 if (G_0286CC_POS_FIXED_PT_ENA(config
->spi_ps_input_addr
))
7100 shader_info
->num_input_vgprs
+= 1;
7102 config
->num_vgprs
= MAX2(config
->num_vgprs
, shader_info
->num_input_vgprs
);
7104 /* +3 for scratch wave offset and VCC */
7105 config
->num_sgprs
= MAX2(config
->num_sgprs
,
7106 shader_info
->num_input_sgprs
+ 3);
7108 /* Enable 64-bit and 16-bit denormals, because there is no performance
7111 * If denormals are enabled, all floating-point output modifiers are
7114 * Don't enable denormals for 32-bit floats, because:
7115 * - Floating-point output modifiers would be ignored by the hw.
7116 * - Some opcodes don't support denormals, such as v_mad_f32. We would
7117 * have to stop using those.
7118 * - SI & CI would be very slow.
7120 config
->float_mode
|= V_00B028_FP_64_DENORMS
;
7124 ac_fill_shader_info(struct ac_shader_variant_info
*shader_info
, struct nir_shader
*nir
, const struct ac_nir_compiler_options
*options
)
7126 switch (nir
->info
.stage
) {
7127 case MESA_SHADER_COMPUTE
:
7128 for (int i
= 0; i
< 3; ++i
)
7129 shader_info
->cs
.block_size
[i
] = nir
->info
.cs
.local_size
[i
];
7131 case MESA_SHADER_FRAGMENT
:
7132 shader_info
->fs
.early_fragment_test
= nir
->info
.fs
.early_fragment_tests
;
7134 case MESA_SHADER_GEOMETRY
:
7135 shader_info
->gs
.vertices_in
= nir
->info
.gs
.vertices_in
;
7136 shader_info
->gs
.vertices_out
= nir
->info
.gs
.vertices_out
;
7137 shader_info
->gs
.output_prim
= nir
->info
.gs
.output_primitive
;
7138 shader_info
->gs
.invocations
= nir
->info
.gs
.invocations
;
7140 case MESA_SHADER_TESS_EVAL
:
7141 shader_info
->tes
.primitive_mode
= nir
->info
.tess
.primitive_mode
;
7142 shader_info
->tes
.spacing
= nir
->info
.tess
.spacing
;
7143 shader_info
->tes
.ccw
= nir
->info
.tess
.ccw
;
7144 shader_info
->tes
.point_mode
= nir
->info
.tess
.point_mode
;
7145 shader_info
->tes
.as_es
= options
->key
.tes
.as_es
;
7147 case MESA_SHADER_TESS_CTRL
:
7148 shader_info
->tcs
.tcs_vertices_out
= nir
->info
.tess
.tcs_vertices_out
;
7150 case MESA_SHADER_VERTEX
:
7151 shader_info
->vs
.as_es
= options
->key
.vs
.as_es
;
7152 shader_info
->vs
.as_ls
= options
->key
.vs
.as_ls
;
7153 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
7154 if (options
->key
.vs
.as_ls
)
7155 shader_info
->vs
.vgpr_comp_cnt
= MAX2(1, shader_info
->vs
.vgpr_comp_cnt
);
7162 void ac_compile_nir_shader(LLVMTargetMachineRef tm
,
7163 struct ac_shader_binary
*binary
,
7164 struct ac_shader_config
*config
,
7165 struct ac_shader_variant_info
*shader_info
,
7166 struct nir_shader
*const *nir
,
7168 const struct ac_nir_compiler_options
*options
,
7172 LLVMModuleRef llvm_module
= ac_translate_nir_to_llvm(tm
, nir
, nir_count
, shader_info
,
7173 options
, dump_shader
);
7175 ac_compile_llvm_module(tm
, llvm_module
, binary
, config
, shader_info
, nir
[0]->info
.stage
, dump_shader
, options
->supports_spill
);
7176 for (int i
= 0; i
< nir_count
; ++i
)
7177 ac_fill_shader_info(shader_info
, nir
[i
], options
);
7179 /* Determine the ES type (VS or TES) for the GS on GFX9. */
7180 if (options
->chip_class
== GFX9
) {
7181 if (nir_count
== 2 &&
7182 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
7183 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
7189 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
7191 LLVMValueRef vtx_offset
=
7192 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
7193 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
7196 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
7200 if (!(ctx
->output_mask
& (1ull << i
)))
7203 if (i
== VARYING_SLOT_CLIP_DIST0
) {
7204 /* unpack clip and cull from a single set of slots */
7205 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
7210 for (unsigned j
= 0; j
< length
; j
++) {
7211 LLVMValueRef value
, soffset
;
7213 soffset
= LLVMConstInt(ctx
->ac
.i32
,
7215 ctx
->gs_max_out_vertices
* 16 * 4, false);
7217 value
= ac_build_buffer_load(&ctx
->ac
, ctx
->gsvs_ring
,
7219 vtx_offset
, soffset
,
7220 0, 1, 1, true, false);
7222 LLVMBuildStore(ctx
->ac
.builder
,
7223 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[radeon_llvm_reg_index_soa(i
, j
)]);
7227 handle_vs_outputs_post(ctx
, false, &ctx
->shader_info
->vs
.outinfo
);
7230 void ac_create_gs_copy_shader(LLVMTargetMachineRef tm
,
7231 struct nir_shader
*geom_shader
,
7232 struct ac_shader_binary
*binary
,
7233 struct ac_shader_config
*config
,
7234 struct ac_shader_variant_info
*shader_info
,
7235 const struct ac_nir_compiler_options
*options
,
7238 struct radv_shader_context ctx
= {0};
7239 ctx
.context
= LLVMContextCreate();
7240 ctx
.options
= options
;
7241 ctx
.shader_info
= shader_info
;
7243 ac_llvm_context_init(&ctx
.ac
, ctx
.context
, options
->chip_class
,
7245 ctx
.ac
.module
= LLVMModuleCreateWithNameInContext("shader", ctx
.context
);
7247 ctx
.is_gs_copy_shader
= true;
7248 LLVMSetTarget(ctx
.ac
.module
, "amdgcn--");
7250 enum ac_float_mode float_mode
=
7251 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
7252 AC_FLOAT_MODE_DEFAULT
;
7254 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
7255 ctx
.stage
= MESA_SHADER_VERTEX
;
7257 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
7259 ctx
.gs_max_out_vertices
= geom_shader
->info
.gs
.vertices_out
;
7260 ac_setup_rings(&ctx
);
7262 ctx
.num_output_clips
= geom_shader
->info
.clip_distance_array_size
;
7263 ctx
.num_output_culls
= geom_shader
->info
.cull_distance_array_size
;
7265 struct ac_nir_context nir_ctx
= {};
7266 nir_ctx
.ac
= ctx
.ac
;
7267 nir_ctx
.abi
= &ctx
.abi
;
7269 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
7270 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
7271 handle_shader_output_decl(&nir_ctx
, geom_shader
, variable
);
7274 ac_gs_copy_shader_emit(&ctx
);
7276 LLVMBuildRetVoid(ctx
.ac
.builder
);
7278 ac_llvm_finalize_module(&ctx
);
7280 ac_compile_llvm_module(tm
, ctx
.ac
.module
, binary
, config
, shader_info
,
7282 dump_shader
, options
->supports_spill
);
7286 ac_lower_indirect_derefs(struct nir_shader
*nir
, enum chip_class chip_class
)
7288 /* While it would be nice not to have this flag, we are constrained
7289 * by the reality that LLVM 5.0 doesn't have working VGPR indexing
7292 bool llvm_has_working_vgpr_indexing
= chip_class
<= VI
;
7294 /* TODO: Indirect indexing of GS inputs is unimplemented.
7296 * TCS and TES load inputs directly from LDS or offchip memory, so
7297 * indirect indexing is trivial.
7299 nir_variable_mode indirect_mask
= 0;
7300 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
7301 (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
&&
7302 nir
->info
.stage
!= MESA_SHADER_TESS_EVAL
&&
7303 !llvm_has_working_vgpr_indexing
)) {
7304 indirect_mask
|= nir_var_shader_in
;
7306 if (!llvm_has_working_vgpr_indexing
&&
7307 nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
7308 indirect_mask
|= nir_var_shader_out
;
7310 /* TODO: We shouldn't need to do this, however LLVM isn't currently
7311 * smart enough to handle indirects without causing excess spilling
7312 * causing the gpu to hang.
7314 * See the following thread for more details of the problem:
7315 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
7317 indirect_mask
|= nir_var_local
;
7319 nir_lower_indirect_derefs(nir
, indirect_mask
);