2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #include <llvm-c/Transforms/Utils.h>
40 #include "ac_binary.h"
41 #include "ac_llvm_util.h"
42 #include "ac_llvm_build.h"
43 #include "ac_shader_abi.h"
44 #include "ac_shader_util.h"
45 #include "ac_exp_param.h"
47 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
49 struct radv_shader_context
{
50 struct ac_llvm_context ac
;
51 const struct radv_nir_compiler_options
*options
;
52 struct radv_shader_variant_info
*shader_info
;
53 struct ac_shader_abi abi
;
55 unsigned max_workgroup_size
;
56 LLVMContextRef context
;
57 LLVMValueRef main_function
;
59 LLVMValueRef descriptor_sets
[RADV_UD_MAX_SETS
];
60 LLVMValueRef ring_offsets
;
62 LLVMValueRef vertex_buffers
;
63 LLVMValueRef rel_auto_id
;
64 LLVMValueRef vs_prim_id
;
65 LLVMValueRef es2gs_offset
;
68 LLVMValueRef merged_wave_info
;
69 LLVMValueRef tess_factor_offset
;
70 LLVMValueRef tes_rel_patch_id
;
74 LLVMValueRef gs2vs_offset
;
75 LLVMValueRef gs_wave_id
;
76 LLVMValueRef gs_vtx_offset
[6];
78 LLVMValueRef esgs_ring
;
79 LLVMValueRef gsvs_ring
[4];
80 LLVMValueRef hs_ring_tess_offchip
;
81 LLVMValueRef hs_ring_tess_factor
;
83 LLVMValueRef persp_sample
, persp_center
, persp_centroid
;
84 LLVMValueRef linear_sample
, linear_center
, linear_centroid
;
87 LLVMValueRef streamout_buffers
;
88 LLVMValueRef streamout_write_idx
;
89 LLVMValueRef streamout_config
;
90 LLVMValueRef streamout_offset
[4];
92 gl_shader_stage stage
;
94 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
99 bool is_gs_copy_shader
;
100 LLVMValueRef gs_next_vertex
[4];
101 unsigned gs_max_out_vertices
;
103 unsigned tes_primitive_mode
;
105 uint32_t tcs_patch_outputs_read
;
106 uint64_t tcs_outputs_read
;
107 uint32_t tcs_vertices_per_patch
;
108 uint32_t tcs_num_inputs
;
109 uint32_t tcs_num_patches
;
110 uint32_t max_gsvs_emit_size
;
111 uint32_t gsvs_vertex_size
;
114 enum radeon_llvm_calling_convention
{
115 RADEON_LLVM_AMDGPU_VS
= 87,
116 RADEON_LLVM_AMDGPU_GS
= 88,
117 RADEON_LLVM_AMDGPU_PS
= 89,
118 RADEON_LLVM_AMDGPU_CS
= 90,
119 RADEON_LLVM_AMDGPU_HS
= 93,
122 static inline struct radv_shader_context
*
123 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
125 struct radv_shader_context
*ctx
= NULL
;
126 return container_of(abi
, ctx
, abi
);
129 struct ac_build_if_state
131 struct radv_shader_context
*ctx
;
132 LLVMValueRef condition
;
133 LLVMBasicBlockRef entry_block
;
134 LLVMBasicBlockRef true_block
;
135 LLVMBasicBlockRef false_block
;
136 LLVMBasicBlockRef merge_block
;
139 static LLVMBasicBlockRef
140 ac_build_insert_new_block(struct radv_shader_context
*ctx
, const char *name
)
142 LLVMBasicBlockRef current_block
;
143 LLVMBasicBlockRef next_block
;
144 LLVMBasicBlockRef new_block
;
146 /* get current basic block */
147 current_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
149 /* chqeck if there's another block after this one */
150 next_block
= LLVMGetNextBasicBlock(current_block
);
152 /* insert the new block before the next block */
153 new_block
= LLVMInsertBasicBlockInContext(ctx
->context
, next_block
, name
);
156 /* append new block after current block */
157 LLVMValueRef function
= LLVMGetBasicBlockParent(current_block
);
158 new_block
= LLVMAppendBasicBlockInContext(ctx
->context
, function
, name
);
164 ac_nir_build_if(struct ac_build_if_state
*ifthen
,
165 struct radv_shader_context
*ctx
,
166 LLVMValueRef condition
)
168 LLVMBasicBlockRef block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
170 memset(ifthen
, 0, sizeof *ifthen
);
172 ifthen
->condition
= condition
;
173 ifthen
->entry_block
= block
;
175 /* create endif/merge basic block for the phi functions */
176 ifthen
->merge_block
= ac_build_insert_new_block(ctx
, "endif-block");
178 /* create/insert true_block before merge_block */
180 LLVMInsertBasicBlockInContext(ctx
->context
,
184 /* successive code goes into the true block */
185 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, ifthen
->true_block
);
192 ac_nir_build_endif(struct ac_build_if_state
*ifthen
)
194 LLVMBuilderRef builder
= ifthen
->ctx
->ac
.builder
;
196 /* Insert branch to the merge block from current block */
197 LLVMBuildBr(builder
, ifthen
->merge_block
);
200 * Now patch in the various branch instructions.
203 /* Insert the conditional branch instruction at the end of entry_block */
204 LLVMPositionBuilderAtEnd(builder
, ifthen
->entry_block
);
205 if (ifthen
->false_block
) {
206 /* we have an else clause */
207 LLVMBuildCondBr(builder
, ifthen
->condition
,
208 ifthen
->true_block
, ifthen
->false_block
);
212 LLVMBuildCondBr(builder
, ifthen
->condition
,
213 ifthen
->true_block
, ifthen
->merge_block
);
216 /* Resume building code at end of the ifthen->merge_block */
217 LLVMPositionBuilderAtEnd(builder
, ifthen
->merge_block
);
221 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
223 switch (ctx
->stage
) {
224 case MESA_SHADER_TESS_CTRL
:
225 return ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
226 case MESA_SHADER_TESS_EVAL
:
227 return ctx
->tes_rel_patch_id
;
230 unreachable("Illegal stage");
235 get_tcs_num_patches(struct radv_shader_context
*ctx
)
237 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
238 unsigned num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
239 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
240 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
241 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
242 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
243 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
244 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
245 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
246 unsigned num_patches
;
247 unsigned hardware_lds_size
;
249 /* Ensure that we only need one wave per SIMD so we don't need to check
250 * resource usage. Also ensures that the number of tcs in and out
251 * vertices per threadgroup are at most 256.
253 num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
254 /* Make sure that the data fits in LDS. This assumes the shaders only
255 * use LDS for the inputs and outputs.
257 hardware_lds_size
= ctx
->options
->chip_class
>= CIK
? 65536 : 32768;
258 num_patches
= MIN2(num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
259 /* Make sure the output data fits in the offchip buffer */
260 num_patches
= MIN2(num_patches
, (ctx
->options
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
261 /* Not necessary for correctness, but improves performance. The
262 * specific value is taken from the proprietary driver.
264 num_patches
= MIN2(num_patches
, 40);
266 /* SI bug workaround - limit LS-HS threadgroups to only one wave. */
267 if (ctx
->options
->chip_class
== SI
) {
268 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
269 num_patches
= MIN2(num_patches
, one_wave
);
275 calculate_tess_lds_size(struct radv_shader_context
*ctx
)
277 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
278 unsigned num_tcs_output_cp
;
279 unsigned num_tcs_outputs
, num_tcs_patch_outputs
;
280 unsigned input_vertex_size
, output_vertex_size
;
281 unsigned input_patch_size
, output_patch_size
;
282 unsigned pervertex_output_patch_size
;
283 unsigned output_patch0_offset
;
284 unsigned num_patches
;
287 num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
288 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
289 num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
291 input_vertex_size
= ctx
->tcs_num_inputs
* 16;
292 output_vertex_size
= num_tcs_outputs
* 16;
294 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
296 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
297 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
299 num_patches
= ctx
->tcs_num_patches
;
300 output_patch0_offset
= input_patch_size
* num_patches
;
302 lds_size
= output_patch0_offset
+ output_patch_size
* num_patches
;
306 /* Tessellation shaders pass outputs to the next shader using LDS.
308 * LS outputs = TCS inputs
309 * TCS outputs = TES inputs
312 * - TCS inputs for patch 0
313 * - TCS inputs for patch 1
314 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
316 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
317 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
318 * - TCS outputs for patch 1
319 * - Per-patch TCS outputs for patch 1
320 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
321 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
324 * All three shaders VS(LS), TCS, TES share the same LDS space.
327 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
329 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
330 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
331 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
333 input_patch_size
/= 4;
334 return LLVMConstInt(ctx
->ac
.i32
, input_patch_size
, false);
338 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
340 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
341 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
342 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
343 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
344 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
345 output_patch_size
/= 4;
346 return LLVMConstInt(ctx
->ac
.i32
, output_patch_size
, false);
350 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
352 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
353 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
354 output_vertex_size
/= 4;
355 return LLVMConstInt(ctx
->ac
.i32
, output_vertex_size
, false);
359 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
361 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
362 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
363 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
364 uint32_t output_patch0_offset
= input_patch_size
;
365 unsigned num_patches
= ctx
->tcs_num_patches
;
367 output_patch0_offset
*= num_patches
;
368 output_patch0_offset
/= 4;
369 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
373 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
375 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
376 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
377 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
378 uint32_t output_patch0_offset
= input_patch_size
;
380 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
381 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
382 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
383 unsigned num_patches
= ctx
->tcs_num_patches
;
385 output_patch0_offset
*= num_patches
;
386 output_patch0_offset
+= pervertex_output_patch_size
;
387 output_patch0_offset
/= 4;
388 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
392 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
394 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
395 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
397 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
401 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
403 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
404 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
405 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
407 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
412 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
414 LLVMValueRef patch0_patch_data_offset
=
415 get_tcs_out_patch0_patch_data_offset(ctx
);
416 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
417 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
419 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
420 patch0_patch_data_offset
);
425 LLVMTypeRef types
[MAX_ARGS
];
426 LLVMValueRef
*assign
[MAX_ARGS
];
427 unsigned array_params_mask
;
430 uint8_t num_sgprs_used
;
431 uint8_t num_vgprs_used
;
434 enum ac_arg_regfile
{
440 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
441 LLVMValueRef
*param_ptr
)
443 assert(info
->count
< MAX_ARGS
);
445 info
->assign
[info
->count
] = param_ptr
;
446 info
->types
[info
->count
] = type
;
449 if (regfile
== ARG_SGPR
) {
450 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
453 assert(regfile
== ARG_VGPR
);
454 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
459 add_array_arg(struct arg_info
*info
, LLVMTypeRef type
, LLVMValueRef
*param_ptr
)
461 info
->array_params_mask
|= (1 << info
->count
);
462 add_arg(info
, ARG_SGPR
, type
, param_ptr
);
465 static void assign_arguments(LLVMValueRef main_function
,
466 struct arg_info
*info
)
469 for (i
= 0; i
< info
->count
; i
++) {
471 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
476 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
477 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
478 unsigned num_return_elems
,
479 struct arg_info
*args
,
480 unsigned max_workgroup_size
,
481 const struct radv_nir_compiler_options
*options
)
483 LLVMTypeRef main_function_type
, ret_type
;
484 LLVMBasicBlockRef main_function_body
;
486 if (num_return_elems
)
487 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
488 num_return_elems
, true);
490 ret_type
= LLVMVoidTypeInContext(ctx
);
492 /* Setup the function */
494 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
495 LLVMValueRef main_function
=
496 LLVMAddFunction(module
, "main", main_function_type
);
498 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
499 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
501 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
502 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
503 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
505 if (args
->array_params_mask
& (1 << i
)) {
506 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
507 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
508 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
512 if (options
->address32_hi
) {
513 ac_llvm_add_target_dep_function_attr(main_function
,
514 "amdgpu-32bit-address-high-bits",
515 options
->address32_hi
);
518 if (max_workgroup_size
) {
519 ac_llvm_add_target_dep_function_attr(main_function
,
520 "amdgpu-max-work-group-size",
523 if (options
->unsafe_math
) {
524 /* These were copied from some LLVM test. */
525 LLVMAddTargetDependentFunctionAttr(main_function
,
526 "less-precise-fpmad",
528 LLVMAddTargetDependentFunctionAttr(main_function
,
531 LLVMAddTargetDependentFunctionAttr(main_function
,
534 LLVMAddTargetDependentFunctionAttr(main_function
,
537 LLVMAddTargetDependentFunctionAttr(main_function
,
538 "no-signed-zeros-fp-math",
541 return main_function
;
546 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
,
547 uint8_t num_sgprs
, bool indirect
)
549 ud_info
->sgpr_idx
= *sgpr_idx
;
550 ud_info
->num_sgprs
= num_sgprs
;
551 ud_info
->indirect
= indirect
;
552 *sgpr_idx
+= num_sgprs
;
556 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
559 struct radv_userdata_info
*ud_info
=
560 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
563 set_loc(ud_info
, sgpr_idx
, num_sgprs
, false);
567 set_loc_shader_ptr(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
569 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
571 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
575 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
578 struct radv_userdata_locations
*locs
=
579 &ctx
->shader_info
->user_sgprs_locs
;
580 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
583 set_loc(ud_info
, sgpr_idx
, 1, indirect
);
586 locs
->descriptor_sets_enabled
|= 1 << idx
;
589 struct user_sgpr_info
{
590 bool need_ring_offsets
;
591 bool indirect_all_descriptor_sets
;
594 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
595 gl_shader_stage stage
)
598 case MESA_SHADER_VERTEX
:
599 if (ctx
->shader_info
->info
.needs_multiview_view_index
||
600 (!ctx
->options
->key
.vs
.as_es
&& !ctx
->options
->key
.vs
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
603 case MESA_SHADER_TESS_EVAL
:
604 if (ctx
->shader_info
->info
.needs_multiview_view_index
|| (!ctx
->options
->key
.tes
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
607 case MESA_SHADER_GEOMETRY
:
608 case MESA_SHADER_TESS_CTRL
:
609 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
619 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
623 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
)
625 count
+= ctx
->shader_info
->info
.vs
.needs_draw_id
? 3 : 2;
630 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
631 gl_shader_stage stage
,
632 bool has_previous_stage
,
633 gl_shader_stage previous_stage
,
634 bool needs_view_index
,
635 struct user_sgpr_info
*user_sgpr_info
)
637 uint8_t user_sgpr_count
= 0;
639 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
641 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
642 if (stage
== MESA_SHADER_GEOMETRY
||
643 stage
== MESA_SHADER_VERTEX
||
644 stage
== MESA_SHADER_TESS_CTRL
||
645 stage
== MESA_SHADER_TESS_EVAL
||
646 ctx
->is_gs_copy_shader
)
647 user_sgpr_info
->need_ring_offsets
= true;
649 if (stage
== MESA_SHADER_FRAGMENT
&&
650 ctx
->shader_info
->info
.ps
.needs_sample_positions
)
651 user_sgpr_info
->need_ring_offsets
= true;
653 /* 2 user sgprs will nearly always be allocated for scratch/rings */
654 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
655 user_sgpr_count
+= 2;
659 case MESA_SHADER_COMPUTE
:
660 if (ctx
->shader_info
->info
.cs
.uses_grid_size
)
661 user_sgpr_count
+= 3;
663 case MESA_SHADER_FRAGMENT
:
664 user_sgpr_count
+= ctx
->shader_info
->info
.ps
.needs_sample_positions
;
666 case MESA_SHADER_VERTEX
:
667 if (!ctx
->is_gs_copy_shader
)
668 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
670 case MESA_SHADER_TESS_CTRL
:
671 if (has_previous_stage
) {
672 if (previous_stage
== MESA_SHADER_VERTEX
)
673 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
676 case MESA_SHADER_TESS_EVAL
:
678 case MESA_SHADER_GEOMETRY
:
679 if (has_previous_stage
) {
680 if (previous_stage
== MESA_SHADER_VERTEX
) {
681 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
689 if (needs_view_index
)
692 if (ctx
->shader_info
->info
.loads_push_constants
)
695 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& stage
!= MESA_SHADER_COMPUTE
? 32 : 16;
696 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
697 uint32_t num_desc_set
=
698 util_bitcount(ctx
->shader_info
->info
.desc_set_used_mask
);
700 if (remaining_sgprs
< num_desc_set
) {
701 user_sgpr_info
->indirect_all_descriptor_sets
= true;
706 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
707 gl_shader_stage stage
,
708 bool has_previous_stage
,
709 gl_shader_stage previous_stage
,
710 const struct user_sgpr_info
*user_sgpr_info
,
711 struct arg_info
*args
,
712 LLVMValueRef
*desc_sets
)
714 LLVMTypeRef type
= ac_array_in_const32_addr_space(ctx
->ac
.i8
);
715 unsigned num_sets
= ctx
->options
->layout
?
716 ctx
->options
->layout
->num_sets
: 0;
717 unsigned stage_mask
= 1 << stage
;
719 if (has_previous_stage
)
720 stage_mask
|= 1 << previous_stage
;
722 /* 1 for each descriptor set */
723 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
724 for (unsigned i
= 0; i
< num_sets
; ++i
) {
725 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
726 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
727 add_array_arg(args
, type
,
728 &ctx
->descriptor_sets
[i
]);
732 add_array_arg(args
, ac_array_in_const32_addr_space(type
), desc_sets
);
735 if (ctx
->shader_info
->info
.loads_push_constants
) {
736 /* 1 for push constants and dynamic descriptors */
737 add_array_arg(args
, type
, &ctx
->abi
.push_constants
);
740 if (ctx
->shader_info
->info
.so
.num_outputs
) {
741 add_arg(args
, ARG_SGPR
,
742 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
743 &ctx
->streamout_buffers
);
748 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
749 gl_shader_stage stage
,
750 bool has_previous_stage
,
751 gl_shader_stage previous_stage
,
752 struct arg_info
*args
)
754 if (!ctx
->is_gs_copy_shader
&&
755 (stage
== MESA_SHADER_VERTEX
||
756 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
757 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
758 add_arg(args
, ARG_SGPR
,
759 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
760 &ctx
->vertex_buffers
);
762 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
763 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
764 if (ctx
->shader_info
->info
.vs
.needs_draw_id
) {
765 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
771 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
773 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
774 if (!ctx
->is_gs_copy_shader
) {
775 if (ctx
->options
->key
.vs
.as_ls
) {
776 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
777 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
779 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
780 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
782 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
787 declare_streamout_sgprs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
788 struct arg_info
*args
)
792 /* Streamout SGPRs. */
793 if (ctx
->shader_info
->info
.so
.num_outputs
) {
794 assert(stage
== MESA_SHADER_VERTEX
||
795 stage
== MESA_SHADER_TESS_EVAL
);
797 if (stage
!= MESA_SHADER_TESS_EVAL
) {
798 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_config
);
800 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
801 args
->types
[args
->count
- 1] = ctx
->ac
.i32
;
804 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_write_idx
);
807 /* A streamout buffer offset is loaded if the stride is non-zero. */
808 for (i
= 0; i
< 4; i
++) {
809 if (!ctx
->shader_info
->info
.so
.strides
[i
])
812 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_offset
[i
]);
817 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
819 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
820 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
821 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
822 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
826 set_global_input_locs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
827 bool has_previous_stage
, gl_shader_stage previous_stage
,
828 const struct user_sgpr_info
*user_sgpr_info
,
829 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
831 unsigned num_sets
= ctx
->options
->layout
?
832 ctx
->options
->layout
->num_sets
: 0;
833 unsigned stage_mask
= 1 << stage
;
835 if (has_previous_stage
)
836 stage_mask
|= 1 << previous_stage
;
838 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
839 for (unsigned i
= 0; i
< num_sets
; ++i
) {
840 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
841 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
842 set_loc_desc(ctx
, i
, user_sgpr_idx
, false);
844 ctx
->descriptor_sets
[i
] = NULL
;
847 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
850 for (unsigned i
= 0; i
< num_sets
; ++i
) {
851 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
852 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
853 ctx
->descriptor_sets
[i
] =
854 ac_build_load_to_sgpr(&ctx
->ac
,
856 LLVMConstInt(ctx
->ac
.i32
, i
, false));
859 ctx
->descriptor_sets
[i
] = NULL
;
861 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
864 if (ctx
->shader_info
->info
.loads_push_constants
) {
865 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
);
868 if (ctx
->streamout_buffers
) {
869 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
,
875 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
876 gl_shader_stage stage
, bool has_previous_stage
,
877 gl_shader_stage previous_stage
,
878 uint8_t *user_sgpr_idx
)
880 if (!ctx
->is_gs_copy_shader
&&
881 (stage
== MESA_SHADER_VERTEX
||
882 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
883 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
884 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
889 if (ctx
->shader_info
->info
.vs
.needs_draw_id
)
892 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
893 user_sgpr_idx
, vs_num
);
897 static void set_llvm_calling_convention(LLVMValueRef func
,
898 gl_shader_stage stage
)
900 enum radeon_llvm_calling_convention calling_conv
;
903 case MESA_SHADER_VERTEX
:
904 case MESA_SHADER_TESS_EVAL
:
905 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
907 case MESA_SHADER_GEOMETRY
:
908 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
910 case MESA_SHADER_TESS_CTRL
:
911 calling_conv
= RADEON_LLVM_AMDGPU_HS
;
913 case MESA_SHADER_FRAGMENT
:
914 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
916 case MESA_SHADER_COMPUTE
:
917 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
920 unreachable("Unhandle shader type");
923 LLVMSetFunctionCallConv(func
, calling_conv
);
926 static void create_function(struct radv_shader_context
*ctx
,
927 gl_shader_stage stage
,
928 bool has_previous_stage
,
929 gl_shader_stage previous_stage
)
931 uint8_t user_sgpr_idx
;
932 struct user_sgpr_info user_sgpr_info
;
933 struct arg_info args
= {};
934 LLVMValueRef desc_sets
;
935 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
936 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
937 previous_stage
, needs_view_index
, &user_sgpr_info
);
939 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
940 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
945 case MESA_SHADER_COMPUTE
:
946 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
947 previous_stage
, &user_sgpr_info
,
950 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
951 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
952 &ctx
->abi
.num_work_groups
);
955 for (int i
= 0; i
< 3; i
++) {
956 ctx
->abi
.workgroup_ids
[i
] = NULL
;
957 if (ctx
->shader_info
->info
.cs
.uses_block_id
[i
]) {
958 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
959 &ctx
->abi
.workgroup_ids
[i
]);
963 if (ctx
->shader_info
->info
.cs
.uses_local_invocation_idx
)
964 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
965 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
966 &ctx
->abi
.local_invocation_ids
);
968 case MESA_SHADER_VERTEX
:
969 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
970 previous_stage
, &user_sgpr_info
,
972 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
973 previous_stage
, &args
);
975 if (needs_view_index
)
976 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
977 &ctx
->abi
.view_index
);
978 if (ctx
->options
->key
.vs
.as_es
) {
979 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
981 } else if (ctx
->options
->key
.vs
.as_ls
) {
982 /* no extra parameters */
984 declare_streamout_sgprs(ctx
, stage
, &args
);
987 declare_vs_input_vgprs(ctx
, &args
);
989 case MESA_SHADER_TESS_CTRL
:
990 if (has_previous_stage
) {
991 // First 6 system regs
992 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
993 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
994 &ctx
->merged_wave_info
);
995 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
996 &ctx
->tess_factor_offset
);
998 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
999 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1000 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1002 declare_global_input_sgprs(ctx
, stage
,
1005 &user_sgpr_info
, &args
,
1007 declare_vs_specific_input_sgprs(ctx
, stage
,
1009 previous_stage
, &args
);
1011 if (needs_view_index
)
1012 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1013 &ctx
->abi
.view_index
);
1015 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1016 &ctx
->abi
.tcs_patch_id
);
1017 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1018 &ctx
->abi
.tcs_rel_ids
);
1020 declare_vs_input_vgprs(ctx
, &args
);
1022 declare_global_input_sgprs(ctx
, stage
,
1025 &user_sgpr_info
, &args
,
1028 if (needs_view_index
)
1029 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1030 &ctx
->abi
.view_index
);
1032 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1033 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1034 &ctx
->tess_factor_offset
);
1035 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1036 &ctx
->abi
.tcs_patch_id
);
1037 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1038 &ctx
->abi
.tcs_rel_ids
);
1041 case MESA_SHADER_TESS_EVAL
:
1042 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
1043 previous_stage
, &user_sgpr_info
,
1046 if (needs_view_index
)
1047 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1048 &ctx
->abi
.view_index
);
1050 if (ctx
->options
->key
.tes
.as_es
) {
1051 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1052 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1053 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1054 &ctx
->es2gs_offset
);
1056 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1057 declare_streamout_sgprs(ctx
, stage
, &args
);
1058 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1060 declare_tes_input_vgprs(ctx
, &args
);
1062 case MESA_SHADER_GEOMETRY
:
1063 if (has_previous_stage
) {
1064 // First 6 system regs
1065 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1066 &ctx
->gs2vs_offset
);
1067 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1068 &ctx
->merged_wave_info
);
1069 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1071 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1072 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1073 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1075 declare_global_input_sgprs(ctx
, stage
,
1078 &user_sgpr_info
, &args
,
1081 if (previous_stage
!= MESA_SHADER_TESS_EVAL
) {
1082 declare_vs_specific_input_sgprs(ctx
, stage
,
1088 if (needs_view_index
)
1089 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1090 &ctx
->abi
.view_index
);
1092 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1093 &ctx
->gs_vtx_offset
[0]);
1094 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1095 &ctx
->gs_vtx_offset
[2]);
1096 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1097 &ctx
->abi
.gs_prim_id
);
1098 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1099 &ctx
->abi
.gs_invocation_id
);
1100 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1101 &ctx
->gs_vtx_offset
[4]);
1103 if (previous_stage
== MESA_SHADER_VERTEX
) {
1104 declare_vs_input_vgprs(ctx
, &args
);
1106 declare_tes_input_vgprs(ctx
, &args
);
1109 declare_global_input_sgprs(ctx
, stage
,
1112 &user_sgpr_info
, &args
,
1115 if (needs_view_index
)
1116 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1117 &ctx
->abi
.view_index
);
1119 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
1120 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
1121 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1122 &ctx
->gs_vtx_offset
[0]);
1123 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1124 &ctx
->gs_vtx_offset
[1]);
1125 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1126 &ctx
->abi
.gs_prim_id
);
1127 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1128 &ctx
->gs_vtx_offset
[2]);
1129 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1130 &ctx
->gs_vtx_offset
[3]);
1131 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1132 &ctx
->gs_vtx_offset
[4]);
1133 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1134 &ctx
->gs_vtx_offset
[5]);
1135 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1136 &ctx
->abi
.gs_invocation_id
);
1139 case MESA_SHADER_FRAGMENT
:
1140 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
1141 previous_stage
, &user_sgpr_info
,
1144 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1145 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_sample
);
1146 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_center
);
1147 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_centroid
);
1148 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1149 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_sample
);
1150 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_center
);
1151 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_centroid
);
1152 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1153 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1154 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1155 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1156 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1157 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1158 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1159 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1160 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1163 unreachable("Shader stage not implemented");
1166 ctx
->main_function
= create_llvm_function(
1167 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1168 ctx
->max_workgroup_size
, ctx
->options
);
1169 set_llvm_calling_convention(ctx
->main_function
, stage
);
1172 ctx
->shader_info
->num_input_vgprs
= 0;
1173 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1175 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1177 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1178 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1180 assign_arguments(ctx
->main_function
, &args
);
1184 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1185 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1187 if (ctx
->options
->supports_spill
) {
1188 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1189 LLVMPointerType(ctx
->ac
.i8
, AC_ADDR_SPACE_CONST
),
1190 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1191 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1192 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1196 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1197 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1198 if (has_previous_stage
)
1201 set_global_input_locs(ctx
, stage
, has_previous_stage
, previous_stage
,
1202 &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1205 case MESA_SHADER_COMPUTE
:
1206 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1207 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1211 case MESA_SHADER_VERTEX
:
1212 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1213 previous_stage
, &user_sgpr_idx
);
1214 if (ctx
->abi
.view_index
)
1215 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1217 case MESA_SHADER_TESS_CTRL
:
1218 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1219 previous_stage
, &user_sgpr_idx
);
1220 if (ctx
->abi
.view_index
)
1221 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1223 case MESA_SHADER_TESS_EVAL
:
1224 if (ctx
->abi
.view_index
)
1225 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1227 case MESA_SHADER_GEOMETRY
:
1228 if (has_previous_stage
) {
1229 if (previous_stage
== MESA_SHADER_VERTEX
)
1230 set_vs_specific_input_locs(ctx
, stage
,
1235 if (ctx
->abi
.view_index
)
1236 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1238 case MESA_SHADER_FRAGMENT
:
1241 unreachable("Shader stage not implemented");
1244 if (stage
== MESA_SHADER_TESS_CTRL
||
1245 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs
.as_ls
) ||
1246 /* GFX9 has the ESGS ring buffer in LDS. */
1247 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1248 ac_declare_lds_as_pointer(&ctx
->ac
);
1251 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1256 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
1257 unsigned desc_set
, unsigned binding
)
1259 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1260 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
1261 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
1262 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
1263 unsigned base_offset
= layout
->binding
[binding
].offset
;
1264 LLVMValueRef offset
, stride
;
1266 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1267 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1268 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
1269 layout
->binding
[binding
].dynamic_offset_offset
;
1270 desc_ptr
= ctx
->abi
.push_constants
;
1271 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
1272 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1274 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
1276 offset
= ac_build_imad(&ctx
->ac
, index
, stride
,
1277 LLVMConstInt(ctx
->ac
.i32
, base_offset
, false));
1279 desc_ptr
= ac_build_gep0(&ctx
->ac
, desc_ptr
, offset
);
1280 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
1281 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1287 /* The offchip buffer layout for TCS->TES is
1289 * - attribute 0 of patch 0 vertex 0
1290 * - attribute 0 of patch 0 vertex 1
1291 * - attribute 0 of patch 0 vertex 2
1293 * - attribute 0 of patch 1 vertex 0
1294 * - attribute 0 of patch 1 vertex 1
1296 * - attribute 1 of patch 0 vertex 0
1297 * - attribute 1 of patch 0 vertex 1
1299 * - per patch attribute 0 of patch 0
1300 * - per patch attribute 0 of patch 1
1303 * Note that every attribute has 4 components.
1305 static LLVMValueRef
get_non_vertex_index_offset(struct radv_shader_context
*ctx
)
1307 uint32_t num_patches
= ctx
->tcs_num_patches
;
1308 uint32_t num_tcs_outputs
;
1309 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
1310 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
1312 num_tcs_outputs
= ctx
->options
->key
.tes
.tcs_num_outputs
;
1314 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
1315 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
1317 return LLVMConstInt(ctx
->ac
.i32
, pervertex_output_patch_size
* num_patches
, false);
1320 static LLVMValueRef
calc_param_stride(struct radv_shader_context
*ctx
,
1321 LLVMValueRef vertex_index
)
1323 LLVMValueRef param_stride
;
1325 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
* ctx
->tcs_num_patches
, false);
1327 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_num_patches
, false);
1328 return param_stride
;
1331 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
1332 LLVMValueRef vertex_index
,
1333 LLVMValueRef param_index
)
1335 LLVMValueRef base_addr
;
1336 LLVMValueRef param_stride
, constant16
;
1337 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
1338 LLVMValueRef vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
, false);
1339 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1340 param_stride
= calc_param_stride(ctx
, vertex_index
);
1342 base_addr
= ac_build_imad(&ctx
->ac
, rel_patch_id
,
1343 vertices_per_patch
, vertex_index
);
1345 base_addr
= rel_patch_id
;
1348 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1349 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
1350 param_stride
, ""), "");
1352 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
1354 if (!vertex_index
) {
1355 LLVMValueRef patch_data_offset
= get_non_vertex_index_offset(ctx
);
1357 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1358 patch_data_offset
, "");
1363 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
1365 unsigned const_index
,
1367 LLVMValueRef vertex_index
,
1368 LLVMValueRef indir_index
)
1370 LLVMValueRef param_index
;
1373 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
1376 if (const_index
&& !is_compact
)
1377 param
+= const_index
;
1378 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
1380 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
1384 get_dw_address(struct radv_shader_context
*ctx
,
1385 LLVMValueRef dw_addr
,
1387 unsigned const_index
,
1388 bool compact_const_index
,
1389 LLVMValueRef vertex_index
,
1390 LLVMValueRef stride
,
1391 LLVMValueRef indir_index
)
1396 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1397 LLVMBuildMul(ctx
->ac
.builder
,
1403 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1404 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
1405 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
1406 else if (const_index
&& !compact_const_index
)
1407 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1408 LLVMConstInt(ctx
->ac
.i32
, const_index
* 4, false), "");
1410 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1411 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
1413 if (const_index
&& compact_const_index
)
1414 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1415 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
1420 load_tcs_varyings(struct ac_shader_abi
*abi
,
1422 LLVMValueRef vertex_index
,
1423 LLVMValueRef indir_index
,
1424 unsigned const_index
,
1426 unsigned driver_location
,
1428 unsigned num_components
,
1433 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1434 LLVMValueRef dw_addr
, stride
;
1435 LLVMValueRef value
[4], result
;
1436 unsigned param
= shader_io_get_unique_index(location
);
1439 uint32_t input_vertex_size
= (ctx
->tcs_num_inputs
* 16) / 4;
1440 stride
= LLVMConstInt(ctx
->ac
.i32
, input_vertex_size
, false);
1441 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
1444 stride
= get_tcs_out_vertex_stride(ctx
);
1445 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1447 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1452 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1455 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
1456 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1457 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1460 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1465 store_tcs_output(struct ac_shader_abi
*abi
,
1466 const nir_variable
*var
,
1467 LLVMValueRef vertex_index
,
1468 LLVMValueRef param_index
,
1469 unsigned const_index
,
1473 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1474 const unsigned location
= var
->data
.location
;
1475 const unsigned component
= var
->data
.location_frac
;
1476 const bool is_patch
= var
->data
.patch
;
1477 const bool is_compact
= var
->data
.compact
;
1478 LLVMValueRef dw_addr
;
1479 LLVMValueRef stride
= NULL
;
1480 LLVMValueRef buf_addr
= NULL
;
1482 bool store_lds
= true;
1485 if (!(ctx
->tcs_patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
1488 if (!(ctx
->tcs_outputs_read
& (1ULL << location
)))
1492 param
= shader_io_get_unique_index(location
);
1493 if (location
== VARYING_SLOT_CLIP_DIST0
&&
1494 is_compact
&& const_index
> 3) {
1500 stride
= get_tcs_out_vertex_stride(ctx
);
1501 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1503 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1506 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1508 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
1509 vertex_index
, param_index
);
1511 bool is_tess_factor
= false;
1512 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
1513 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1514 is_tess_factor
= true;
1516 unsigned base
= is_compact
? const_index
: 0;
1517 for (unsigned chan
= 0; chan
< 8; chan
++) {
1518 if (!(writemask
& (1 << chan
)))
1520 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
1521 value
= ac_to_integer(&ctx
->ac
, value
);
1522 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
1524 if (store_lds
|| is_tess_factor
) {
1525 LLVMValueRef dw_addr_chan
=
1526 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1527 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
1528 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
1531 if (!is_tess_factor
&& writemask
!= 0xF)
1532 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
1533 buf_addr
, ctx
->oc_lds
,
1534 4 * (base
+ chan
), 1, 0, true, false);
1537 if (writemask
== 0xF) {
1538 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
1539 buf_addr
, ctx
->oc_lds
,
1540 (base
* 4), 1, 0, true, false);
1545 load_tes_input(struct ac_shader_abi
*abi
,
1547 LLVMValueRef vertex_index
,
1548 LLVMValueRef param_index
,
1549 unsigned const_index
,
1551 unsigned driver_location
,
1553 unsigned num_components
,
1558 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1559 LLVMValueRef buf_addr
;
1560 LLVMValueRef result
;
1561 unsigned param
= shader_io_get_unique_index(location
);
1563 if (location
== VARYING_SLOT_CLIP_DIST0
&& is_compact
&& const_index
> 3) {
1568 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
1569 is_compact
, vertex_index
, param_index
);
1571 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
1572 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
1574 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
1575 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, 1, 0, true, false);
1576 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
1581 load_gs_input(struct ac_shader_abi
*abi
,
1583 unsigned driver_location
,
1585 unsigned num_components
,
1586 unsigned vertex_index
,
1587 unsigned const_index
,
1590 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1591 LLVMValueRef vtx_offset
;
1592 unsigned param
, vtx_offset_param
;
1593 LLVMValueRef value
[4], result
;
1595 vtx_offset_param
= vertex_index
;
1596 assert(vtx_offset_param
< 6);
1597 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
1598 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1600 param
= shader_io_get_unique_index(location
);
1602 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
1603 if (ctx
->ac
.chip_class
>= GFX9
) {
1604 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
1605 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1606 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
1607 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1609 LLVMValueRef soffset
=
1610 LLVMConstInt(ctx
->ac
.i32
,
1611 (param
* 4 + i
+ const_index
) * 256,
1614 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
1617 vtx_offset
, soffset
,
1618 0, 1, 0, true, false);
1621 if (ac_get_type_size(type
) == 2) {
1622 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i32
, "");
1623 value
[i
] = LLVMBuildTrunc(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i16
, "");
1625 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], type
, "");
1627 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1628 result
= ac_to_integer(&ctx
->ac
, result
);
1633 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
1635 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1636 ac_build_kill_if_false(&ctx
->ac
, visible
);
1639 static LLVMValueRef
lookup_interp_param(struct ac_shader_abi
*abi
,
1640 enum glsl_interp_mode interp
, unsigned location
)
1642 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1645 case INTERP_MODE_FLAT
:
1648 case INTERP_MODE_SMOOTH
:
1649 case INTERP_MODE_NONE
:
1650 if (location
== INTERP_CENTER
)
1651 return ctx
->persp_center
;
1652 else if (location
== INTERP_CENTROID
)
1653 return ctx
->persp_centroid
;
1654 else if (location
== INTERP_SAMPLE
)
1655 return ctx
->persp_sample
;
1657 case INTERP_MODE_NOPERSPECTIVE
:
1658 if (location
== INTERP_CENTER
)
1659 return ctx
->linear_center
;
1660 else if (location
== INTERP_CENTROID
)
1661 return ctx
->linear_centroid
;
1662 else if (location
== INTERP_SAMPLE
)
1663 return ctx
->linear_sample
;
1670 radv_get_sample_pos_offset(uint32_t num_samples
)
1672 uint32_t sample_pos_offset
= 0;
1674 switch (num_samples
) {
1676 sample_pos_offset
= 1;
1679 sample_pos_offset
= 3;
1682 sample_pos_offset
= 7;
1685 sample_pos_offset
= 15;
1690 return sample_pos_offset
;
1693 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
1694 LLVMValueRef sample_id
)
1696 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1698 LLVMValueRef result
;
1699 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false));
1701 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1702 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
1704 uint32_t sample_pos_offset
=
1705 radv_get_sample_pos_offset(ctx
->options
->key
.fs
.num_samples
);
1708 LLVMBuildAdd(ctx
->ac
.builder
, sample_id
,
1709 LLVMConstInt(ctx
->ac
.i32
, sample_pos_offset
, false), "");
1710 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
1716 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
1718 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1719 uint8_t log2_ps_iter_samples
;
1721 if (ctx
->shader_info
->info
.ps
.force_persample
) {
1722 log2_ps_iter_samples
=
1723 util_logbase2(ctx
->options
->key
.fs
.num_samples
);
1725 log2_ps_iter_samples
= ctx
->options
->key
.fs
.log2_ps_iter_samples
;
1728 /* The bit pattern matches that used by fixed function fragment
1730 static const uint16_t ps_iter_masks
[] = {
1731 0xffff, /* not used */
1737 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
1739 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
1741 LLVMValueRef result
, sample_id
;
1742 sample_id
= ac_unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
1743 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
1744 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
1750 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
1752 LLVMValueRef gs_next_vertex
;
1753 LLVMValueRef can_emit
;
1754 unsigned offset
= 0;
1755 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1757 /* Write vertex attribute values to GSVS ring */
1758 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
1759 ctx
->gs_next_vertex
[stream
],
1762 /* If this thread has already emitted the declared maximum number of
1763 * vertices, kill it: excessive vertex emissions are not supposed to
1764 * have any effect, and GS threads have no externally observable
1765 * effects other than emitting vertices.
1767 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
1768 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
1769 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
1771 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
1772 unsigned output_usage_mask
=
1773 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
1774 uint8_t output_stream
=
1775 ctx
->shader_info
->info
.gs
.output_streams
[i
];
1776 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
1777 int length
= util_last_bit(output_usage_mask
);
1779 if (!(ctx
->output_mask
& (1ull << i
)) ||
1780 output_stream
!= stream
)
1783 for (unsigned j
= 0; j
< length
; j
++) {
1784 if (!(output_usage_mask
& (1 << j
)))
1787 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
1789 LLVMValueRef voffset
=
1790 LLVMConstInt(ctx
->ac
.i32
, offset
*
1791 ctx
->gs_max_out_vertices
, false);
1795 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
1796 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1798 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
1799 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
1801 ac_build_buffer_store_dword(&ctx
->ac
,
1802 ctx
->gsvs_ring
[stream
],
1804 voffset
, ctx
->gs2vs_offset
, 0,
1809 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
1811 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
[stream
]);
1813 ac_build_sendmsg(&ctx
->ac
,
1814 AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (stream
<< 8),
1819 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
1821 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1822 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
1826 load_tess_coord(struct ac_shader_abi
*abi
)
1828 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1830 LLVMValueRef coord
[4] = {
1837 if (ctx
->tes_primitive_mode
== GL_TRIANGLES
)
1838 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
1839 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
1841 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
1845 load_patch_vertices_in(struct ac_shader_abi
*abi
)
1847 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1848 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
1852 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
1854 return abi
->base_vertex
;
1857 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
1858 LLVMValueRef buffer_ptr
, bool write
)
1860 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1861 LLVMValueRef result
;
1863 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1865 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1866 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1871 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
1873 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1874 LLVMValueRef result
;
1876 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1878 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1879 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1884 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
1885 unsigned descriptor_set
,
1886 unsigned base_index
,
1887 unsigned constant_index
,
1889 enum ac_descriptor_type desc_type
,
1890 bool image
, bool write
,
1893 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1894 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
1895 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
1896 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
1897 unsigned offset
= binding
->offset
;
1898 unsigned stride
= binding
->size
;
1900 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1903 assert(base_index
< layout
->binding_count
);
1905 switch (desc_type
) {
1907 type
= ctx
->ac
.v8i32
;
1911 type
= ctx
->ac
.v8i32
;
1915 case AC_DESC_SAMPLER
:
1916 type
= ctx
->ac
.v4i32
;
1917 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
1922 case AC_DESC_BUFFER
:
1923 type
= ctx
->ac
.v4i32
;
1927 unreachable("invalid desc_type\n");
1930 offset
+= constant_index
* stride
;
1932 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
1933 (!index
|| binding
->immutable_samplers_equal
)) {
1934 if (binding
->immutable_samplers_equal
)
1937 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
1939 LLVMValueRef constants
[] = {
1940 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
1941 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
1942 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
1943 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
1945 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
1948 assert(stride
% type_size
== 0);
1951 index
= ctx
->ac
.i32_0
;
1953 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
1955 list
= ac_build_gep0(&ctx
->ac
, list
, LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
1956 list
= LLVMBuildPointerCast(builder
, list
,
1957 ac_array_in_const32_addr_space(type
), "");
1959 return ac_build_load_to_sgpr(&ctx
->ac
, list
, index
);
1962 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
1963 * so we may need to fix it up. */
1965 adjust_vertex_fetch_alpha(struct radv_shader_context
*ctx
,
1966 unsigned adjustment
,
1969 if (adjustment
== RADV_ALPHA_ADJUST_NONE
)
1972 LLVMValueRef c30
= LLVMConstInt(ctx
->ac
.i32
, 30, 0);
1974 if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
)
1975 alpha
= LLVMBuildFPToUI(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
1977 alpha
= ac_to_integer(&ctx
->ac
, alpha
);
1979 /* For the integer-like cases, do a natural sign extension.
1981 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
1982 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
1985 alpha
= LLVMBuildShl(ctx
->ac
.builder
, alpha
,
1986 adjustment
== RADV_ALPHA_ADJUST_SNORM
?
1987 LLVMConstInt(ctx
->ac
.i32
, 7, 0) : c30
, "");
1988 alpha
= LLVMBuildAShr(ctx
->ac
.builder
, alpha
, c30
, "");
1990 /* Convert back to the right type. */
1991 if (adjustment
== RADV_ALPHA_ADJUST_SNORM
) {
1993 LLVMValueRef neg_one
= LLVMConstReal(ctx
->ac
.f32
, -1.0);
1994 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
1995 clamp
= LLVMBuildFCmp(ctx
->ac
.builder
, LLVMRealULT
, alpha
, neg_one
, "");
1996 alpha
= LLVMBuildSelect(ctx
->ac
.builder
, clamp
, neg_one
, alpha
, "");
1997 } else if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
) {
1998 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2005 handle_vs_input_decl(struct radv_shader_context
*ctx
,
2006 struct nir_variable
*variable
)
2008 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
2009 LLVMValueRef t_offset
;
2010 LLVMValueRef t_list
;
2012 LLVMValueRef buffer_index
;
2013 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
2014 uint8_t input_usage_mask
=
2015 ctx
->shader_info
->info
.vs
.input_usage_mask
[variable
->data
.location
];
2016 unsigned num_channels
= util_last_bit(input_usage_mask
);
2018 variable
->data
.driver_location
= variable
->data
.location
* 4;
2020 enum glsl_base_type type
= glsl_get_base_type(variable
->type
);
2021 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
2022 LLVMValueRef output
[4];
2023 unsigned attrib_index
= variable
->data
.location
+ i
- VERT_ATTRIB_GENERIC0
;
2025 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << attrib_index
)) {
2026 uint32_t divisor
= ctx
->options
->key
.vs
.instance_rate_divisors
[attrib_index
];
2029 buffer_index
= ctx
->abi
.instance_id
;
2032 buffer_index
= LLVMBuildUDiv(ctx
->ac
.builder
, buffer_index
,
2033 LLVMConstInt(ctx
->ac
.i32
, divisor
, 0), "");
2036 if (ctx
->options
->key
.vs
.as_ls
) {
2037 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
2038 MAX2(2, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
2040 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
2041 MAX2(1, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
2044 buffer_index
= ctx
->ac
.i32_0
;
2047 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.start_instance
, buffer_index
, "");
2049 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
2050 ctx
->abi
.base_vertex
, "");
2051 t_offset
= LLVMConstInt(ctx
->ac
.i32
, attrib_index
, false);
2053 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
2055 input
= ac_build_buffer_load_format(&ctx
->ac
, t_list
,
2058 num_channels
, false, true);
2060 input
= ac_build_expand_to_vec4(&ctx
->ac
, input
, num_channels
);
2062 for (unsigned chan
= 0; chan
< 4; chan
++) {
2063 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2064 output
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
, input
, llvm_chan
, "");
2065 if (type
== GLSL_TYPE_FLOAT16
) {
2066 output
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f32
, "");
2067 output
[chan
] = LLVMBuildFPTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f16
, "");
2071 unsigned alpha_adjust
= (ctx
->options
->key
.vs
.alpha_adjust
>> (attrib_index
* 2)) & 3;
2072 output
[3] = adjust_vertex_fetch_alpha(ctx
, alpha_adjust
, output
[3]);
2074 for (unsigned chan
= 0; chan
< 4; chan
++) {
2075 output
[chan
] = ac_to_integer(&ctx
->ac
, output
[chan
]);
2076 if (type
== GLSL_TYPE_UINT16
|| type
== GLSL_TYPE_INT16
)
2077 output
[chan
] = LLVMBuildTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.i16
, "");
2079 ctx
->inputs
[ac_llvm_reg_index_soa(variable
->data
.location
+ i
, chan
)] = output
[chan
];
2084 static void interp_fs_input(struct radv_shader_context
*ctx
,
2086 LLVMValueRef interp_param
,
2087 LLVMValueRef prim_mask
,
2088 LLVMValueRef result
[4])
2090 LLVMValueRef attr_number
;
2093 bool interp
= !LLVMIsUndef(interp_param
);
2095 attr_number
= LLVMConstInt(ctx
->ac
.i32
, attr
, false);
2097 /* fs.constant returns the param from the middle vertex, so it's not
2098 * really useful for flat shading. It's meant to be used for custom
2099 * interpolation (but the intrinsic can't fetch from the other two
2102 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
2103 * to do the right thing. The only reason we use fs.constant is that
2104 * fs.interp cannot be used on integers, because they can be equal
2108 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
,
2111 i
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2113 j
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2117 for (chan
= 0; chan
< 4; chan
++) {
2118 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2121 result
[chan
] = ac_build_fs_interp(&ctx
->ac
,
2126 result
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
2127 LLVMConstInt(ctx
->ac
.i32
, 2, false),
2131 result
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, result
[chan
], ctx
->ac
.i32
, "");
2132 result
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, result
[chan
], LLVMTypeOf(interp_param
), "");
2138 handle_fs_input_decl(struct radv_shader_context
*ctx
,
2139 struct nir_variable
*variable
)
2141 int idx
= variable
->data
.location
;
2142 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2143 LLVMValueRef interp
= NULL
;
2146 variable
->data
.driver_location
= idx
* 4;
2147 mask
= ((1ull << attrib_count
) - 1) << variable
->data
.location
;
2149 if (glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_FLOAT
) {
2150 unsigned interp_type
;
2151 if (variable
->data
.sample
)
2152 interp_type
= INTERP_SAMPLE
;
2153 else if (variable
->data
.centroid
)
2154 interp_type
= INTERP_CENTROID
;
2156 interp_type
= INTERP_CENTER
;
2158 interp
= lookup_interp_param(&ctx
->abi
, variable
->data
.interpolation
, interp_type
);
2160 bool is_16bit
= glsl_type_is_16bit(variable
->type
);
2161 LLVMTypeRef type
= is_16bit
? ctx
->ac
.i16
: ctx
->ac
.i32
;
2163 interp
= LLVMGetUndef(type
);
2165 for (unsigned i
= 0; i
< attrib_count
; ++i
)
2166 ctx
->inputs
[ac_llvm_reg_index_soa(idx
+ i
, 0)] = interp
;
2168 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
2169 /* Do not account for the number of components inside the array
2170 * of clip/cull distances because this might wrongly set other
2171 * bits like primitive ID or layer.
2173 mask
= 1ull << VARYING_SLOT_CLIP_DIST0
;
2176 ctx
->input_mask
|= mask
;
2180 handle_vs_inputs(struct radv_shader_context
*ctx
,
2181 struct nir_shader
*nir
) {
2182 nir_foreach_variable(variable
, &nir
->inputs
)
2183 handle_vs_input_decl(ctx
, variable
);
2187 prepare_interp_optimize(struct radv_shader_context
*ctx
,
2188 struct nir_shader
*nir
)
2190 bool uses_center
= false;
2191 bool uses_centroid
= false;
2192 nir_foreach_variable(variable
, &nir
->inputs
) {
2193 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
2194 variable
->data
.sample
)
2197 if (variable
->data
.centroid
)
2198 uses_centroid
= true;
2203 if (uses_center
&& uses_centroid
) {
2204 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
2205 ctx
->persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->persp_center
, ctx
->persp_centroid
, "");
2206 ctx
->linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->linear_center
, ctx
->linear_centroid
, "");
2211 handle_fs_inputs(struct radv_shader_context
*ctx
,
2212 struct nir_shader
*nir
)
2214 prepare_interp_optimize(ctx
, nir
);
2216 nir_foreach_variable(variable
, &nir
->inputs
)
2217 handle_fs_input_decl(ctx
, variable
);
2221 if (ctx
->shader_info
->info
.ps
.uses_input_attachments
||
2222 ctx
->shader_info
->info
.needs_multiview_view_index
) {
2223 ctx
->input_mask
|= 1ull << VARYING_SLOT_LAYER
;
2224 ctx
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)] = LLVMGetUndef(ctx
->ac
.i32
);
2227 for (unsigned i
= 0; i
< RADEON_LLVM_MAX_INPUTS
; ++i
) {
2228 LLVMValueRef interp_param
;
2229 LLVMValueRef
*inputs
= ctx
->inputs
+ac_llvm_reg_index_soa(i
, 0);
2231 if (!(ctx
->input_mask
& (1ull << i
)))
2234 if (i
>= VARYING_SLOT_VAR0
|| i
== VARYING_SLOT_PNTC
||
2235 i
== VARYING_SLOT_PRIMITIVE_ID
|| i
== VARYING_SLOT_LAYER
) {
2236 interp_param
= *inputs
;
2237 interp_fs_input(ctx
, index
, interp_param
, ctx
->abi
.prim_mask
,
2240 if (LLVMIsUndef(interp_param
))
2241 ctx
->shader_info
->fs
.flat_shaded_mask
|= 1u << index
;
2242 if (i
>= VARYING_SLOT_VAR0
)
2243 ctx
->abi
.fs_input_attr_indices
[i
- VARYING_SLOT_VAR0
] = index
;
2245 } else if (i
== VARYING_SLOT_CLIP_DIST0
) {
2246 int length
= ctx
->shader_info
->info
.ps
.num_input_clips_culls
;
2248 for (unsigned j
= 0; j
< length
; j
+= 4) {
2249 inputs
= ctx
->inputs
+ ac_llvm_reg_index_soa(i
, j
);
2251 interp_param
= *inputs
;
2252 interp_fs_input(ctx
, index
, interp_param
,
2253 ctx
->abi
.prim_mask
, inputs
);
2256 } else if (i
== VARYING_SLOT_POS
) {
2257 for(int i
= 0; i
< 3; ++i
)
2258 inputs
[i
] = ctx
->abi
.frag_pos
[i
];
2260 inputs
[3] = ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
2261 ctx
->abi
.frag_pos
[3]);
2264 ctx
->shader_info
->fs
.num_interp
= index
;
2265 ctx
->shader_info
->fs
.input_mask
= ctx
->input_mask
>> VARYING_SLOT_VAR0
;
2267 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
2268 ctx
->abi
.view_index
= ctx
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2272 scan_shader_output_decl(struct radv_shader_context
*ctx
,
2273 struct nir_variable
*variable
,
2274 struct nir_shader
*shader
,
2275 gl_shader_stage stage
)
2277 int idx
= variable
->data
.location
+ variable
->data
.index
;
2278 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2279 uint64_t mask_attribs
;
2281 variable
->data
.driver_location
= idx
* 4;
2283 /* tess ctrl has it's own load/store paths for outputs */
2284 if (stage
== MESA_SHADER_TESS_CTRL
)
2287 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
2288 if (stage
== MESA_SHADER_VERTEX
||
2289 stage
== MESA_SHADER_TESS_EVAL
||
2290 stage
== MESA_SHADER_GEOMETRY
) {
2291 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
2292 if (stage
== MESA_SHADER_VERTEX
) {
2293 ctx
->shader_info
->vs
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2294 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2295 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2297 if (stage
== MESA_SHADER_TESS_EVAL
) {
2298 ctx
->shader_info
->tes
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2299 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2300 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2303 mask_attribs
= 1ull << idx
;
2307 ctx
->output_mask
|= mask_attribs
;
2311 /* Initialize arguments for the shader export intrinsic */
2313 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
2314 LLVMValueRef
*values
,
2315 unsigned enabled_channels
,
2317 struct ac_export_args
*args
)
2319 /* Specify the channels that are enabled. */
2320 args
->enabled_channels
= enabled_channels
;
2322 /* Specify whether the EXEC mask represents the valid mask */
2323 args
->valid_mask
= 0;
2325 /* Specify whether this is the last export */
2328 /* Specify the target we are exporting */
2329 args
->target
= target
;
2331 args
->compr
= false;
2332 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
2333 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
2334 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
2335 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
2340 bool is_16bit
= ac_get_type_size(LLVMTypeOf(values
[0])) == 2;
2341 if (ctx
->stage
== MESA_SHADER_FRAGMENT
) {
2342 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
2343 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
2344 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
2345 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
2348 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
2349 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
2350 unsigned bits
, bool hi
) = NULL
;
2352 switch(col_format
) {
2353 case V_028714_SPI_SHADER_ZERO
:
2354 args
->enabled_channels
= 0; /* writemask */
2355 args
->target
= V_008DFC_SQ_EXP_NULL
;
2358 case V_028714_SPI_SHADER_32_R
:
2359 args
->enabled_channels
= 1;
2360 args
->out
[0] = values
[0];
2363 case V_028714_SPI_SHADER_32_GR
:
2364 args
->enabled_channels
= 0x3;
2365 args
->out
[0] = values
[0];
2366 args
->out
[1] = values
[1];
2369 case V_028714_SPI_SHADER_32_AR
:
2370 args
->enabled_channels
= 0x9;
2371 args
->out
[0] = values
[0];
2372 args
->out
[3] = values
[3];
2375 case V_028714_SPI_SHADER_FP16_ABGR
:
2376 args
->enabled_channels
= 0x5;
2377 packf
= ac_build_cvt_pkrtz_f16
;
2379 for (unsigned chan
= 0; chan
< 4; chan
++)
2380 values
[chan
] = LLVMBuildFPExt(ctx
->ac
.builder
,
2386 case V_028714_SPI_SHADER_UNORM16_ABGR
:
2387 args
->enabled_channels
= 0x5;
2388 packf
= ac_build_cvt_pknorm_u16
;
2391 case V_028714_SPI_SHADER_SNORM16_ABGR
:
2392 args
->enabled_channels
= 0x5;
2393 packf
= ac_build_cvt_pknorm_i16
;
2396 case V_028714_SPI_SHADER_UINT16_ABGR
:
2397 args
->enabled_channels
= 0x5;
2398 packi
= ac_build_cvt_pk_u16
;
2400 for (unsigned chan
= 0; chan
< 4; chan
++)
2401 values
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
,
2407 case V_028714_SPI_SHADER_SINT16_ABGR
:
2408 args
->enabled_channels
= 0x5;
2409 packi
= ac_build_cvt_pk_i16
;
2411 for (unsigned chan
= 0; chan
< 4; chan
++)
2412 values
[chan
] = LLVMBuildSExt(ctx
->ac
.builder
,
2419 case V_028714_SPI_SHADER_32_ABGR
:
2420 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2424 /* Pack f16 or norm_i16/u16. */
2426 for (chan
= 0; chan
< 2; chan
++) {
2427 LLVMValueRef pack_args
[2] = {
2429 values
[2 * chan
+ 1]
2431 LLVMValueRef packed
;
2433 packed
= packf(&ctx
->ac
, pack_args
);
2434 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2436 args
->compr
= 1; /* COMPR flag */
2441 for (chan
= 0; chan
< 2; chan
++) {
2442 LLVMValueRef pack_args
[2] = {
2443 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
2444 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
2446 LLVMValueRef packed
;
2448 packed
= packi(&ctx
->ac
, pack_args
,
2449 is_int8
? 8 : is_int10
? 10 : 16,
2451 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2453 args
->compr
= 1; /* COMPR flag */
2459 for (unsigned chan
= 0; chan
< 4; chan
++) {
2460 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i16
, "");
2461 args
->out
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
2464 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2466 for (unsigned i
= 0; i
< 4; ++i
) {
2467 if (!(args
->enabled_channels
& (1 << i
)))
2470 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
2475 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
2476 LLVMValueRef
*values
, unsigned enabled_channels
)
2478 struct ac_export_args args
;
2480 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
2481 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
2482 ac_build_export(&ctx
->ac
, &args
);
2486 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
2488 LLVMValueRef output
=
2489 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(index
, chan
)];
2491 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
2495 radv_emit_stream_output(struct radv_shader_context
*ctx
,
2496 LLVMValueRef
const *so_buffers
,
2497 LLVMValueRef
const *so_write_offsets
,
2498 const struct radv_stream_output
*output
)
2500 unsigned num_comps
= util_bitcount(output
->component_mask
);
2501 unsigned loc
= output
->location
;
2502 unsigned buf
= output
->buffer
;
2503 unsigned offset
= output
->offset
;
2505 LLVMValueRef out
[4];
2507 assert(num_comps
&& num_comps
<= 4);
2508 if (!num_comps
|| num_comps
> 4)
2511 /* Get the first component. */
2512 start
= ffs(output
->component_mask
) - 1;
2514 /* Load the output as int. */
2515 for (int i
= 0; i
< num_comps
; i
++) {
2516 out
[i
] = ac_to_integer(&ctx
->ac
,
2517 radv_load_output(ctx
, loc
, start
+ i
));
2520 /* Pack the output. */
2521 LLVMValueRef vdata
= NULL
;
2523 switch (num_comps
) {
2524 case 1: /* as i32 */
2527 case 2: /* as v2i32 */
2528 case 3: /* as v4i32 (aligned to 4) */
2529 out
[3] = LLVMGetUndef(ctx
->ac
.i32
);
2531 case 4: /* as v4i32 */
2532 vdata
= ac_build_gather_values(&ctx
->ac
, out
,
2533 util_next_power_of_two(num_comps
));
2537 ac_build_buffer_store_dword(&ctx
->ac
, so_buffers
[buf
],
2538 vdata
, num_comps
, so_write_offsets
[buf
],
2539 ctx
->ac
.i32_0
, offset
,
2544 radv_emit_streamout(struct radv_shader_context
*ctx
, unsigned stream
)
2546 struct ac_build_if_state if_ctx
;
2549 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2550 assert(ctx
->streamout_config
);
2551 LLVMValueRef so_vtx_count
=
2552 ac_build_bfe(&ctx
->ac
, ctx
->streamout_config
,
2553 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2554 LLVMConstInt(ctx
->ac
.i32
, 7, false), false);
2556 LLVMValueRef tid
= ac_get_thread_id(&ctx
->ac
);
2558 /* can_emit = tid < so_vtx_count; */
2559 LLVMValueRef can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
2560 tid
, so_vtx_count
, "");
2562 /* Emit the streamout code conditionally. This actually avoids
2563 * out-of-bounds buffer access. The hw tells us via the SGPR
2564 * (so_vtx_count) which threads are allowed to emit streamout data.
2566 ac_nir_build_if(&if_ctx
, ctx
, can_emit
);
2568 /* The buffer offset is computed as follows:
2569 * ByteOffset = streamout_offset[buffer_id]*4 +
2570 * (streamout_write_index + thread_id)*stride[buffer_id] +
2573 LLVMValueRef so_write_index
= ctx
->streamout_write_idx
;
2575 /* Compute (streamout_write_index + thread_id). */
2577 LLVMBuildAdd(ctx
->ac
.builder
, so_write_index
, tid
, "");
2579 /* Load the descriptor and compute the write offset for each
2582 LLVMValueRef so_write_offset
[4] = {};
2583 LLVMValueRef so_buffers
[4] = {};
2584 LLVMValueRef buf_ptr
= ctx
->streamout_buffers
;
2586 for (i
= 0; i
< 4; i
++) {
2587 uint16_t stride
= ctx
->shader_info
->info
.so
.strides
[i
];
2592 LLVMValueRef offset
=
2593 LLVMConstInt(ctx
->ac
.i32
, i
, false);
2595 so_buffers
[i
] = ac_build_load_to_sgpr(&ctx
->ac
,
2598 LLVMValueRef so_offset
= ctx
->streamout_offset
[i
];
2600 so_offset
= LLVMBuildMul(ctx
->ac
.builder
, so_offset
,
2601 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
2603 so_write_offset
[i
] =
2604 ac_build_imad(&ctx
->ac
, so_write_index
,
2605 LLVMConstInt(ctx
->ac
.i32
,
2610 /* Write streamout data. */
2611 for (i
= 0; i
< ctx
->shader_info
->info
.so
.num_outputs
; i
++) {
2612 struct radv_stream_output
*output
=
2613 &ctx
->shader_info
->info
.so
.outputs
[i
];
2615 if (stream
!= output
->stream
)
2618 radv_emit_stream_output(ctx
, so_buffers
,
2619 so_write_offset
, output
);
2622 ac_nir_build_endif(&if_ctx
);
2626 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
2627 bool export_prim_id
, bool export_layer_id
,
2628 struct radv_vs_output_info
*outinfo
)
2630 uint32_t param_count
= 0;
2632 unsigned pos_idx
, num_pos_exports
= 0;
2633 struct ac_export_args args
, pos_args
[4] = {};
2634 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_index_value
= NULL
;
2637 if (ctx
->options
->key
.has_multiview_view_index
) {
2638 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2640 for(unsigned i
= 0; i
< 4; ++i
)
2641 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
2642 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
2645 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
2646 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
2649 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
2650 sizeof(outinfo
->vs_output_param_offset
));
2652 if (ctx
->output_mask
& (1ull << VARYING_SLOT_CLIP_DIST0
)) {
2653 unsigned output_usage_mask
, length
;
2654 LLVMValueRef slots
[8];
2657 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2658 !ctx
->is_gs_copy_shader
) {
2660 ctx
->shader_info
->info
.vs
.output_usage_mask
[VARYING_SLOT_CLIP_DIST0
];
2661 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2663 ctx
->shader_info
->info
.tes
.output_usage_mask
[VARYING_SLOT_CLIP_DIST0
];
2665 assert(ctx
->is_gs_copy_shader
);
2667 ctx
->shader_info
->info
.gs
.output_usage_mask
[VARYING_SLOT_CLIP_DIST0
];
2670 length
= util_last_bit(output_usage_mask
);
2672 i
= VARYING_SLOT_CLIP_DIST0
;
2673 for (j
= 0; j
< length
; j
++)
2674 slots
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2676 for (i
= length
; i
< 8; i
++)
2677 slots
[i
] = LLVMGetUndef(ctx
->ac
.f32
);
2680 target
= V_008DFC_SQ_EXP_POS
+ 3;
2681 si_llvm_init_export_args(ctx
, &slots
[4], 0xf, target
, &args
);
2682 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
2683 &args
, sizeof(args
));
2686 target
= V_008DFC_SQ_EXP_POS
+ 2;
2687 si_llvm_init_export_args(ctx
, &slots
[0], 0xf, target
, &args
);
2688 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
2689 &args
, sizeof(args
));
2691 /* Export the clip/cull distances values to the next stage. */
2692 radv_export_param(ctx
, param_count
, &slots
[0], 0xf);
2693 outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST0
] = param_count
++;
2695 radv_export_param(ctx
, param_count
, &slots
[4], 0xf);
2696 outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST1
] = param_count
++;
2700 LLVMValueRef pos_values
[4] = {ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_1
};
2701 if (ctx
->output_mask
& (1ull << VARYING_SLOT_POS
)) {
2702 for (unsigned j
= 0; j
< 4; j
++)
2703 pos_values
[j
] = radv_load_output(ctx
, VARYING_SLOT_POS
, j
);
2705 si_llvm_init_export_args(ctx
, pos_values
, 0xf, V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
2707 if (ctx
->output_mask
& (1ull << VARYING_SLOT_PSIZ
)) {
2708 outinfo
->writes_pointsize
= true;
2709 psize_value
= radv_load_output(ctx
, VARYING_SLOT_PSIZ
, 0);
2712 if (ctx
->output_mask
& (1ull << VARYING_SLOT_LAYER
)) {
2713 outinfo
->writes_layer
= true;
2714 layer_value
= radv_load_output(ctx
, VARYING_SLOT_LAYER
, 0);
2717 if (ctx
->output_mask
& (1ull << VARYING_SLOT_VIEWPORT
)) {
2718 outinfo
->writes_viewport_index
= true;
2719 viewport_index_value
= radv_load_output(ctx
, VARYING_SLOT_VIEWPORT
, 0);
2722 if (ctx
->shader_info
->info
.so
.num_outputs
&&
2723 !ctx
->is_gs_copy_shader
) {
2724 /* The GS copy shader emission already emits streamout. */
2725 radv_emit_streamout(ctx
, 0);
2728 if (outinfo
->writes_pointsize
||
2729 outinfo
->writes_layer
||
2730 outinfo
->writes_viewport_index
) {
2731 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
2732 (outinfo
->writes_layer
== true ? 4 : 0));
2733 pos_args
[1].valid_mask
= 0;
2734 pos_args
[1].done
= 0;
2735 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
2736 pos_args
[1].compr
= 0;
2737 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
2738 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
2739 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
2740 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
2742 if (outinfo
->writes_pointsize
== true)
2743 pos_args
[1].out
[0] = psize_value
;
2744 if (outinfo
->writes_layer
== true)
2745 pos_args
[1].out
[2] = layer_value
;
2746 if (outinfo
->writes_viewport_index
== true) {
2747 if (ctx
->options
->chip_class
>= GFX9
) {
2748 /* GFX9 has the layer in out.z[10:0] and the viewport
2749 * index in out.z[19:16].
2751 LLVMValueRef v
= viewport_index_value
;
2752 v
= ac_to_integer(&ctx
->ac
, v
);
2753 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
2754 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2756 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
2757 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
2759 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
2760 pos_args
[1].enabled_channels
|= 1 << 2;
2762 pos_args
[1].out
[3] = viewport_index_value
;
2763 pos_args
[1].enabled_channels
|= 1 << 3;
2767 for (i
= 0; i
< 4; i
++) {
2768 if (pos_args
[i
].out
[0])
2773 for (i
= 0; i
< 4; i
++) {
2774 if (!pos_args
[i
].out
[0])
2777 /* Specify the target we are exporting */
2778 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
2779 if (pos_idx
== num_pos_exports
)
2780 pos_args
[i
].done
= 1;
2781 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
2784 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2785 LLVMValueRef values
[4];
2786 if (!(ctx
->output_mask
& (1ull << i
)))
2789 if (i
!= VARYING_SLOT_LAYER
&&
2790 i
!= VARYING_SLOT_PRIMITIVE_ID
&&
2791 i
< VARYING_SLOT_VAR0
)
2794 for (unsigned j
= 0; j
< 4; j
++)
2795 values
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2797 unsigned output_usage_mask
;
2799 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2800 !ctx
->is_gs_copy_shader
) {
2802 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2803 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2805 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2807 assert(ctx
->is_gs_copy_shader
);
2809 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
2812 radv_export_param(ctx
, param_count
, values
, output_usage_mask
);
2814 outinfo
->vs_output_param_offset
[i
] = param_count
++;
2817 if (export_prim_id
) {
2818 LLVMValueRef values
[4];
2820 values
[0] = ctx
->vs_prim_id
;
2821 ctx
->shader_info
->vs
.vgpr_comp_cnt
= MAX2(2,
2822 ctx
->shader_info
->vs
.vgpr_comp_cnt
);
2823 for (unsigned j
= 1; j
< 4; j
++)
2824 values
[j
] = ctx
->ac
.f32_0
;
2826 radv_export_param(ctx
, param_count
, values
, 0x1);
2828 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
2829 outinfo
->export_prim_id
= true;
2832 if (export_layer_id
&& layer_value
) {
2833 LLVMValueRef values
[4];
2835 values
[0] = layer_value
;
2836 for (unsigned j
= 1; j
< 4; j
++)
2837 values
[j
] = ctx
->ac
.f32_0
;
2839 radv_export_param(ctx
, param_count
, values
, 0x1);
2841 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = param_count
++;
2844 outinfo
->pos_exports
= num_pos_exports
;
2845 outinfo
->param_exports
= param_count
;
2849 handle_es_outputs_post(struct radv_shader_context
*ctx
,
2850 struct radv_es_output_info
*outinfo
)
2853 uint64_t max_output_written
= 0;
2854 LLVMValueRef lds_base
= NULL
;
2856 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2857 unsigned output_usage_mask
;
2861 if (!(ctx
->output_mask
& (1ull << i
)))
2864 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
2866 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2868 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
2870 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2873 if (i
== VARYING_SLOT_CLIP_DIST0
)
2874 length
= util_last_bit(output_usage_mask
);
2876 param_index
= shader_io_get_unique_index(i
);
2878 max_output_written
= MAX2(param_index
+ (length
> 4), max_output_written
);
2881 outinfo
->esgs_itemsize
= (max_output_written
+ 1) * 16;
2883 if (ctx
->ac
.chip_class
>= GFX9
) {
2884 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
2885 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
2886 LLVMValueRef wave_idx
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
2887 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
2888 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
2889 LLVMConstInt(ctx
->ac
.i32
, 64, false), ""), "");
2890 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
2891 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
2894 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2895 LLVMValueRef dw_addr
= NULL
;
2896 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2897 unsigned output_usage_mask
;
2901 if (!(ctx
->output_mask
& (1ull << i
)))
2904 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
2906 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2908 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
2910 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2913 if (i
== VARYING_SLOT_CLIP_DIST0
)
2914 length
= util_last_bit(output_usage_mask
);
2916 param_index
= shader_io_get_unique_index(i
);
2919 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2920 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
2924 for (j
= 0; j
< length
; j
++) {
2925 if (!(output_usage_mask
& (1 << j
)))
2928 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2929 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
2930 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
2932 if (ctx
->ac
.chip_class
>= GFX9
) {
2933 LLVMValueRef dw_addr_offset
=
2934 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2935 LLVMConstInt(ctx
->ac
.i32
,
2938 ac_lds_store(&ctx
->ac
, dw_addr_offset
, out_val
);
2940 ac_build_buffer_store_dword(&ctx
->ac
,
2943 NULL
, ctx
->es2gs_offset
,
2944 (4 * param_index
+ j
) * 4,
2952 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
2954 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
2955 uint32_t num_tcs_inputs
= util_last_bit64(ctx
->shader_info
->info
.vs
.ls_outputs_written
);
2956 LLVMValueRef vertex_dw_stride
= LLVMConstInt(ctx
->ac
.i32
, num_tcs_inputs
* 4, false);
2957 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
2958 vertex_dw_stride
, "");
2960 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2961 unsigned output_usage_mask
=
2962 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2963 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2966 if (!(ctx
->output_mask
& (1ull << i
)))
2969 if (i
== VARYING_SLOT_CLIP_DIST0
)
2970 length
= util_last_bit(output_usage_mask
);
2972 int param
= shader_io_get_unique_index(i
);
2973 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
2974 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
2976 for (unsigned j
= 0; j
< length
; j
++) {
2977 LLVMValueRef value
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2978 value
= ac_to_integer(&ctx
->ac
, value
);
2979 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
2980 ac_lds_store(&ctx
->ac
, dw_addr
, value
);
2981 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
2987 write_tess_factors(struct radv_shader_context
*ctx
)
2989 unsigned stride
, outer_comps
, inner_comps
;
2990 struct ac_build_if_state if_ctx
, inner_if_ctx
;
2991 LLVMValueRef invocation_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
2992 LLVMValueRef rel_patch_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
2993 unsigned tess_inner_index
= 0, tess_outer_index
;
2994 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
2995 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
2997 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
2999 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
3019 ac_nir_build_if(&if_ctx
, ctx
,
3020 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3021 invocation_id
, ctx
->ac
.i32_0
, ""));
3023 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
3026 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3027 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3028 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
3031 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3032 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3033 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
3035 for (i
= 0; i
< 4; i
++) {
3036 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3037 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3041 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
3042 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
3043 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3045 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
3047 for (i
= 0; i
< outer_comps
; i
++) {
3049 ac_lds_load(&ctx
->ac
, lds_outer
);
3050 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3053 for (i
= 0; i
< inner_comps
; i
++) {
3054 inner
[i
] = out
[outer_comps
+i
] =
3055 ac_lds_load(&ctx
->ac
, lds_inner
);
3056 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
3061 /* Convert the outputs to vectors for stores. */
3062 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
3066 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
3069 buffer
= ctx
->hs_ring_tess_factor
;
3070 tf_base
= ctx
->tess_factor_offset
;
3071 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
3072 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
3073 unsigned tf_offset
= 0;
3075 if (ctx
->options
->chip_class
<= VI
) {
3076 ac_nir_build_if(&inner_if_ctx
, ctx
,
3077 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3078 rel_patch_id
, ctx
->ac
.i32_0
, ""));
3080 /* Store the dynamic HS control word. */
3081 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
3082 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
3083 1, ctx
->ac
.i32_0
, tf_base
,
3084 0, 1, 0, true, false);
3087 ac_nir_build_endif(&inner_if_ctx
);
3090 /* Store the tessellation factors. */
3091 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
3092 MIN2(stride
, 4), byteoffset
, tf_base
,
3093 tf_offset
, 1, 0, true, false);
3095 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
3096 stride
- 4, byteoffset
, tf_base
,
3097 16 + tf_offset
, 1, 0, true, false);
3099 //store to offchip for TES to read - only if TES reads them
3100 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
3101 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
3102 LLVMValueRef tf_inner_offset
;
3103 unsigned param_outer
, param_inner
;
3105 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3106 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3107 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
3109 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
3110 util_next_power_of_two(outer_comps
));
3112 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
3113 outer_comps
, tf_outer_offset
,
3114 ctx
->oc_lds
, 0, 1, 0, true, false);
3116 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3117 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3118 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
3120 inner_vec
= inner_comps
== 1 ? inner
[0] :
3121 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
3122 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
3123 inner_comps
, tf_inner_offset
,
3124 ctx
->oc_lds
, 0, 1, 0, true, false);
3127 ac_nir_build_endif(&if_ctx
);
3131 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
3133 write_tess_factors(ctx
);
3137 si_export_mrt_color(struct radv_shader_context
*ctx
,
3138 LLVMValueRef
*color
, unsigned index
,
3139 struct ac_export_args
*args
)
3142 si_llvm_init_export_args(ctx
, color
, 0xf,
3143 V_008DFC_SQ_EXP_MRT
+ index
, args
);
3144 if (!args
->enabled_channels
)
3145 return false; /* unnecessary NULL export */
3151 radv_export_mrt_z(struct radv_shader_context
*ctx
,
3152 LLVMValueRef depth
, LLVMValueRef stencil
,
3153 LLVMValueRef samplemask
)
3155 struct ac_export_args args
;
3157 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
3159 ac_build_export(&ctx
->ac
, &args
);
3163 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
3166 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
3167 struct ac_export_args color_args
[8];
3169 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3170 LLVMValueRef values
[4];
3172 if (!(ctx
->output_mask
& (1ull << i
)))
3175 if (i
< FRAG_RESULT_DATA0
)
3178 for (unsigned j
= 0; j
< 4; j
++)
3179 values
[j
] = ac_to_float(&ctx
->ac
,
3180 radv_load_output(ctx
, i
, j
));
3182 bool ret
= si_export_mrt_color(ctx
, values
,
3183 i
- FRAG_RESULT_DATA0
,
3184 &color_args
[index
]);
3189 /* Process depth, stencil, samplemask. */
3190 if (ctx
->shader_info
->info
.ps
.writes_z
) {
3191 depth
= ac_to_float(&ctx
->ac
,
3192 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
3194 if (ctx
->shader_info
->info
.ps
.writes_stencil
) {
3195 stencil
= ac_to_float(&ctx
->ac
,
3196 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
3198 if (ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3199 samplemask
= ac_to_float(&ctx
->ac
,
3200 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
3203 /* Set the DONE bit on last non-null color export only if Z isn't
3207 !ctx
->shader_info
->info
.ps
.writes_z
&&
3208 !ctx
->shader_info
->info
.ps
.writes_stencil
&&
3209 !ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3210 unsigned last
= index
- 1;
3212 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
3213 color_args
[last
].done
= 1; /* DONE bit */
3216 /* Export PS outputs. */
3217 for (unsigned i
= 0; i
< index
; i
++)
3218 ac_build_export(&ctx
->ac
, &color_args
[i
]);
3220 if (depth
|| stencil
|| samplemask
)
3221 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
3223 ac_build_export_null(&ctx
->ac
);
3227 emit_gs_epilogue(struct radv_shader_context
*ctx
)
3229 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
3233 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
3234 LLVMValueRef
*addrs
)
3236 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
3238 switch (ctx
->stage
) {
3239 case MESA_SHADER_VERTEX
:
3240 if (ctx
->options
->key
.vs
.as_ls
)
3241 handle_ls_outputs_post(ctx
);
3242 else if (ctx
->options
->key
.vs
.as_es
)
3243 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
3245 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs
.export_prim_id
,
3246 ctx
->options
->key
.vs
.export_layer_id
,
3247 &ctx
->shader_info
->vs
.outinfo
);
3249 case MESA_SHADER_FRAGMENT
:
3250 handle_fs_outputs_post(ctx
);
3252 case MESA_SHADER_GEOMETRY
:
3253 emit_gs_epilogue(ctx
);
3255 case MESA_SHADER_TESS_CTRL
:
3256 handle_tcs_outputs_post(ctx
);
3258 case MESA_SHADER_TESS_EVAL
:
3259 if (ctx
->options
->key
.tes
.as_es
)
3260 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
3262 handle_vs_outputs_post(ctx
, ctx
->options
->key
.tes
.export_prim_id
,
3263 ctx
->options
->key
.tes
.export_layer_id
,
3264 &ctx
->shader_info
->tes
.outinfo
);
3271 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
,
3272 LLVMPassManagerRef passmgr
,
3273 const struct radv_nir_compiler_options
*options
)
3275 LLVMRunPassManager(passmgr
, ctx
->ac
.module
);
3276 LLVMDisposeBuilder(ctx
->ac
.builder
);
3278 ac_llvm_context_dispose(&ctx
->ac
);
3282 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
3284 struct radv_vs_output_info
*outinfo
;
3286 switch (ctx
->stage
) {
3287 case MESA_SHADER_FRAGMENT
:
3288 case MESA_SHADER_COMPUTE
:
3289 case MESA_SHADER_TESS_CTRL
:
3290 case MESA_SHADER_GEOMETRY
:
3292 case MESA_SHADER_VERTEX
:
3293 if (ctx
->options
->key
.vs
.as_ls
||
3294 ctx
->options
->key
.vs
.as_es
)
3296 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
3298 case MESA_SHADER_TESS_EVAL
:
3299 if (ctx
->options
->key
.vs
.as_es
)
3301 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
3304 unreachable("Unhandled shader type");
3307 ac_optimize_vs_outputs(&ctx
->ac
,
3309 outinfo
->vs_output_param_offset
,
3311 &outinfo
->param_exports
);
3315 ac_setup_rings(struct radv_shader_context
*ctx
)
3317 if (ctx
->options
->chip_class
<= VI
&&
3318 (ctx
->stage
== MESA_SHADER_GEOMETRY
||
3319 ctx
->options
->key
.vs
.as_es
|| ctx
->options
->key
.tes
.as_es
)) {
3320 unsigned ring
= ctx
->stage
== MESA_SHADER_GEOMETRY
? RING_ESGS_GS
3322 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, ring
, false);
3324 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
,
3329 if (ctx
->is_gs_copy_shader
) {
3331 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
3332 LLVMConstInt(ctx
->ac
.i32
,
3333 RING_GSVS_VS
, false));
3336 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3337 /* The conceptual layout of the GSVS ring is
3338 * v0c0 .. vLv0 v0c1 .. vLc1 ..
3339 * but the real memory layout is swizzled across
3341 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3343 * Override the buffer descriptor accordingly.
3345 LLVMTypeRef v2i64
= LLVMVectorType(ctx
->ac
.i64
, 2);
3346 uint64_t stream_offset
= 0;
3347 unsigned num_records
= 64;
3348 LLVMValueRef base_ring
;
3351 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
3352 LLVMConstInt(ctx
->ac
.i32
,
3353 RING_GSVS_GS
, false));
3355 for (unsigned stream
= 0; stream
< 4; stream
++) {
3356 unsigned num_components
, stride
;
3357 LLVMValueRef ring
, tmp
;
3360 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
3362 if (!num_components
)
3365 stride
= 4 * num_components
* ctx
->gs_max_out_vertices
;
3367 /* Limit on the stride field for <= CIK. */
3368 assert(stride
< (1 << 14));
3370 ring
= LLVMBuildBitCast(ctx
->ac
.builder
,
3371 base_ring
, v2i64
, "");
3372 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3373 ring
, ctx
->ac
.i32_0
, "");
3374 tmp
= LLVMBuildAdd(ctx
->ac
.builder
, tmp
,
3375 LLVMConstInt(ctx
->ac
.i64
,
3376 stream_offset
, 0), "");
3377 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
,
3378 ring
, tmp
, ctx
->ac
.i32_0
, "");
3380 stream_offset
+= stride
* 64;
3382 ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ring
,
3385 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ring
,
3387 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
,
3388 LLVMConstInt(ctx
->ac
.i32
,
3389 S_008F04_STRIDE(stride
), false), "");
3390 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
, tmp
,
3393 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
,
3394 LLVMConstInt(ctx
->ac
.i32
,
3395 num_records
, false),
3396 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
3398 ctx
->gsvs_ring
[stream
] = ring
;
3402 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
3403 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3404 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
3405 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
3410 ac_nir_get_max_workgroup_size(enum chip_class chip_class
,
3411 const struct nir_shader
*nir
)
3413 switch (nir
->info
.stage
) {
3414 case MESA_SHADER_TESS_CTRL
:
3415 return chip_class
>= CIK
? 128 : 64;
3416 case MESA_SHADER_GEOMETRY
:
3417 return chip_class
>= GFX9
? 128 : 64;
3418 case MESA_SHADER_COMPUTE
:
3424 unsigned max_workgroup_size
= nir
->info
.cs
.local_size
[0] *
3425 nir
->info
.cs
.local_size
[1] *
3426 nir
->info
.cs
.local_size
[2];
3427 return max_workgroup_size
;
3430 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
3431 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
3433 LLVMValueRef count
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3434 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
3436 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
3437 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
3438 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
3441 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
3443 for(int i
= 5; i
>= 0; --i
) {
3444 ctx
->gs_vtx_offset
[i
] = ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
3448 ctx
->gs_wave_id
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 16, 8);
3453 LLVMModuleRef
ac_translate_nir_to_llvm(struct ac_llvm_compiler
*ac_llvm
,
3454 struct nir_shader
*const *shaders
,
3456 struct radv_shader_variant_info
*shader_info
,
3457 const struct radv_nir_compiler_options
*options
)
3459 struct radv_shader_context ctx
= {0};
3461 ctx
.options
= options
;
3462 ctx
.shader_info
= shader_info
;
3464 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
3465 ctx
.context
= ctx
.ac
.context
;
3466 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
3468 enum ac_float_mode float_mode
=
3469 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
3470 AC_FLOAT_MODE_DEFAULT
;
3472 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
3474 memset(shader_info
, 0, sizeof(*shader_info
));
3476 for(int i
= 0; i
< shader_count
; ++i
)
3477 radv_nir_shader_info_pass(shaders
[i
], options
, &shader_info
->info
);
3479 for (i
= 0; i
< RADV_UD_MAX_SETS
; i
++)
3480 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
3481 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
3482 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
3484 ctx
.max_workgroup_size
= 0;
3485 for (int i
= 0; i
< shader_count
; ++i
) {
3486 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
3487 ac_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
3491 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
3492 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
3494 ctx
.abi
.inputs
= &ctx
.inputs
[0];
3495 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
3496 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
3497 ctx
.abi
.load_ubo
= radv_load_ubo
;
3498 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
3499 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
3500 ctx
.abi
.load_resource
= radv_load_resource
;
3501 ctx
.abi
.clamp_shadow_reference
= false;
3502 ctx
.abi
.gfx9_stride_size_workaround
= ctx
.ac
.chip_class
== GFX9
&& HAVE_LLVM
< 0x800;
3504 if (shader_count
>= 2)
3505 ac_init_exec_full_mask(&ctx
.ac
);
3507 if (ctx
.ac
.chip_class
== GFX9
&&
3508 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
3509 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
3511 for(int i
= 0; i
< shader_count
; ++i
) {
3512 ctx
.stage
= shaders
[i
]->info
.stage
;
3513 ctx
.output_mask
= 0;
3515 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3516 for (int i
= 0; i
< 4; i
++) {
3517 ctx
.gs_next_vertex
[i
] =
3518 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
3520 ctx
.gs_max_out_vertices
= shaders
[i
]->info
.gs
.vertices_out
;
3521 ctx
.abi
.load_inputs
= load_gs_input
;
3522 ctx
.abi
.emit_primitive
= visit_end_primitive
;
3523 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3524 ctx
.tcs_outputs_read
= shaders
[i
]->info
.outputs_read
;
3525 ctx
.tcs_patch_outputs_read
= shaders
[i
]->info
.patch_outputs_read
;
3526 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
3527 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3528 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
3529 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3530 if (shader_count
== 1)
3531 ctx
.tcs_num_inputs
= ctx
.options
->key
.tcs
.num_inputs
;
3533 ctx
.tcs_num_inputs
= util_last_bit64(shader_info
->info
.vs
.ls_outputs_written
);
3534 ctx
.tcs_num_patches
= get_tcs_num_patches(&ctx
);
3535 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3536 ctx
.tes_primitive_mode
= shaders
[i
]->info
.tess
.primitive_mode
;
3537 ctx
.abi
.load_tess_varyings
= load_tes_input
;
3538 ctx
.abi
.load_tess_coord
= load_tess_coord
;
3539 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3540 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3541 ctx
.tcs_num_patches
= ctx
.options
->key
.tes
.num_patches
;
3542 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
3543 if (shader_info
->info
.vs
.needs_instance_id
) {
3544 if (ctx
.options
->key
.vs
.as_ls
) {
3545 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
3546 MAX2(2, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
3548 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
3549 MAX2(1, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
3552 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
3553 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
3554 shader_info
->fs
.can_discard
= shaders
[i
]->info
.fs
.uses_discard
;
3555 ctx
.abi
.lookup_interp_param
= lookup_interp_param
;
3556 ctx
.abi
.load_sample_position
= load_sample_position
;
3557 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
3558 ctx
.abi
.emit_kill
= radv_emit_kill
;
3562 ac_emit_barrier(&ctx
.ac
, ctx
.stage
);
3564 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
3565 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
3567 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3568 unsigned addclip
= shaders
[i
]->info
.clip_distance_array_size
+
3569 shaders
[i
]->info
.cull_distance_array_size
> 4;
3570 ctx
.gsvs_vertex_size
= (util_bitcount64(ctx
.output_mask
) + addclip
) * 16;
3571 ctx
.max_gsvs_emit_size
= ctx
.gsvs_vertex_size
*
3572 shaders
[i
]->info
.gs
.vertices_out
;
3575 ac_setup_rings(&ctx
);
3577 LLVMBasicBlockRef merge_block
;
3578 if (shader_count
>= 2) {
3579 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
3580 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3581 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3583 LLVMValueRef count
= ac_unpack_param(&ctx
.ac
, ctx
.merged_wave_info
, 8 * i
, 8);
3584 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
3585 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
3586 thread_id
, count
, "");
3587 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
3589 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
3592 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
3593 handle_fs_inputs(&ctx
, shaders
[i
]);
3594 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
3595 handle_vs_inputs(&ctx
, shaders
[i
]);
3596 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
3597 prepare_gs_input_vgprs(&ctx
);
3599 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
3601 if (shader_count
>= 2) {
3602 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
3603 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
3606 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3607 shader_info
->gs
.gsvs_vertex_size
= ctx
.gsvs_vertex_size
;
3608 shader_info
->gs
.max_gsvs_emit_size
= ctx
.max_gsvs_emit_size
;
3609 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3610 shader_info
->tcs
.num_patches
= ctx
.tcs_num_patches
;
3611 shader_info
->tcs
.lds_size
= calculate_tess_lds_size(&ctx
);
3615 LLVMBuildRetVoid(ctx
.ac
.builder
);
3617 if (options
->dump_preoptir
)
3618 ac_dump_module(ctx
.ac
.module
);
3620 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
3622 if (shader_count
== 1)
3623 ac_nir_eliminate_const_vs_outputs(&ctx
);
3625 if (options
->dump_shader
) {
3626 ctx
.shader_info
->private_mem_vgprs
=
3627 ac_count_scratch_private_memory(ctx
.main_function
);
3630 return ctx
.ac
.module
;
3633 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
3635 unsigned *retval
= (unsigned *)context
;
3636 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
3637 char *description
= LLVMGetDiagInfoDescription(di
);
3639 if (severity
== LLVMDSError
) {
3641 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
3645 LLVMDisposeMessage(description
);
3648 static unsigned ac_llvm_compile(LLVMModuleRef M
,
3649 struct ac_shader_binary
*binary
,
3650 struct ac_llvm_compiler
*ac_llvm
)
3652 unsigned retval
= 0;
3653 LLVMContextRef llvm_ctx
;
3655 /* Setup Diagnostic Handler*/
3656 llvm_ctx
= LLVMGetModuleContext(M
);
3658 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
3662 if (!radv_compile_to_binary(ac_llvm
, M
, binary
))
3667 static void ac_compile_llvm_module(struct ac_llvm_compiler
*ac_llvm
,
3668 LLVMModuleRef llvm_module
,
3669 struct ac_shader_binary
*binary
,
3670 struct ac_shader_config
*config
,
3671 struct radv_shader_variant_info
*shader_info
,
3672 gl_shader_stage stage
,
3673 const struct radv_nir_compiler_options
*options
)
3675 if (options
->dump_shader
)
3676 ac_dump_module(llvm_module
);
3678 memset(binary
, 0, sizeof(*binary
));
3680 if (options
->record_llvm_ir
) {
3681 char *llvm_ir
= LLVMPrintModuleToString(llvm_module
);
3682 binary
->llvm_ir_string
= strdup(llvm_ir
);
3683 LLVMDisposeMessage(llvm_ir
);
3686 int v
= ac_llvm_compile(llvm_module
, binary
, ac_llvm
);
3688 fprintf(stderr
, "compile failed\n");
3691 if (options
->dump_shader
)
3692 fprintf(stderr
, "disasm:\n%s\n", binary
->disasm_string
);
3694 ac_shader_binary_read_config(binary
, config
, 0, options
->supports_spill
);
3696 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
3697 LLVMDisposeModule(llvm_module
);
3698 LLVMContextDispose(ctx
);
3700 if (stage
== MESA_SHADER_FRAGMENT
) {
3701 shader_info
->num_input_vgprs
= 0;
3702 if (G_0286CC_PERSP_SAMPLE_ENA(config
->spi_ps_input_addr
))
3703 shader_info
->num_input_vgprs
+= 2;
3704 if (G_0286CC_PERSP_CENTER_ENA(config
->spi_ps_input_addr
))
3705 shader_info
->num_input_vgprs
+= 2;
3706 if (G_0286CC_PERSP_CENTROID_ENA(config
->spi_ps_input_addr
))
3707 shader_info
->num_input_vgprs
+= 2;
3708 if (G_0286CC_PERSP_PULL_MODEL_ENA(config
->spi_ps_input_addr
))
3709 shader_info
->num_input_vgprs
+= 3;
3710 if (G_0286CC_LINEAR_SAMPLE_ENA(config
->spi_ps_input_addr
))
3711 shader_info
->num_input_vgprs
+= 2;
3712 if (G_0286CC_LINEAR_CENTER_ENA(config
->spi_ps_input_addr
))
3713 shader_info
->num_input_vgprs
+= 2;
3714 if (G_0286CC_LINEAR_CENTROID_ENA(config
->spi_ps_input_addr
))
3715 shader_info
->num_input_vgprs
+= 2;
3716 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config
->spi_ps_input_addr
))
3717 shader_info
->num_input_vgprs
+= 1;
3718 if (G_0286CC_POS_X_FLOAT_ENA(config
->spi_ps_input_addr
))
3719 shader_info
->num_input_vgprs
+= 1;
3720 if (G_0286CC_POS_Y_FLOAT_ENA(config
->spi_ps_input_addr
))
3721 shader_info
->num_input_vgprs
+= 1;
3722 if (G_0286CC_POS_Z_FLOAT_ENA(config
->spi_ps_input_addr
))
3723 shader_info
->num_input_vgprs
+= 1;
3724 if (G_0286CC_POS_W_FLOAT_ENA(config
->spi_ps_input_addr
))
3725 shader_info
->num_input_vgprs
+= 1;
3726 if (G_0286CC_FRONT_FACE_ENA(config
->spi_ps_input_addr
))
3727 shader_info
->num_input_vgprs
+= 1;
3728 if (G_0286CC_ANCILLARY_ENA(config
->spi_ps_input_addr
))
3729 shader_info
->num_input_vgprs
+= 1;
3730 if (G_0286CC_SAMPLE_COVERAGE_ENA(config
->spi_ps_input_addr
))
3731 shader_info
->num_input_vgprs
+= 1;
3732 if (G_0286CC_POS_FIXED_PT_ENA(config
->spi_ps_input_addr
))
3733 shader_info
->num_input_vgprs
+= 1;
3735 config
->num_vgprs
= MAX2(config
->num_vgprs
, shader_info
->num_input_vgprs
);
3737 /* +3 for scratch wave offset and VCC */
3738 config
->num_sgprs
= MAX2(config
->num_sgprs
,
3739 shader_info
->num_input_sgprs
+ 3);
3741 /* Enable 64-bit and 16-bit denormals, because there is no performance
3744 * If denormals are enabled, all floating-point output modifiers are
3747 * Don't enable denormals for 32-bit floats, because:
3748 * - Floating-point output modifiers would be ignored by the hw.
3749 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3750 * have to stop using those.
3751 * - SI & CI would be very slow.
3753 config
->float_mode
|= V_00B028_FP_64_DENORMS
;
3757 ac_fill_shader_info(struct radv_shader_variant_info
*shader_info
, struct nir_shader
*nir
, const struct radv_nir_compiler_options
*options
)
3759 switch (nir
->info
.stage
) {
3760 case MESA_SHADER_COMPUTE
:
3761 for (int i
= 0; i
< 3; ++i
)
3762 shader_info
->cs
.block_size
[i
] = nir
->info
.cs
.local_size
[i
];
3764 case MESA_SHADER_FRAGMENT
:
3765 shader_info
->fs
.early_fragment_test
= nir
->info
.fs
.early_fragment_tests
;
3767 case MESA_SHADER_GEOMETRY
:
3768 shader_info
->gs
.vertices_in
= nir
->info
.gs
.vertices_in
;
3769 shader_info
->gs
.vertices_out
= nir
->info
.gs
.vertices_out
;
3770 shader_info
->gs
.output_prim
= nir
->info
.gs
.output_primitive
;
3771 shader_info
->gs
.invocations
= nir
->info
.gs
.invocations
;
3773 case MESA_SHADER_TESS_EVAL
:
3774 shader_info
->tes
.primitive_mode
= nir
->info
.tess
.primitive_mode
;
3775 shader_info
->tes
.spacing
= nir
->info
.tess
.spacing
;
3776 shader_info
->tes
.ccw
= nir
->info
.tess
.ccw
;
3777 shader_info
->tes
.point_mode
= nir
->info
.tess
.point_mode
;
3778 shader_info
->tes
.as_es
= options
->key
.tes
.as_es
;
3780 case MESA_SHADER_TESS_CTRL
:
3781 shader_info
->tcs
.tcs_vertices_out
= nir
->info
.tess
.tcs_vertices_out
;
3783 case MESA_SHADER_VERTEX
:
3784 shader_info
->vs
.as_es
= options
->key
.vs
.as_es
;
3785 shader_info
->vs
.as_ls
= options
->key
.vs
.as_ls
;
3786 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
3787 if (options
->key
.vs
.as_ls
)
3788 shader_info
->vs
.vgpr_comp_cnt
= MAX2(1, shader_info
->vs
.vgpr_comp_cnt
);
3796 radv_compile_nir_shader(struct ac_llvm_compiler
*ac_llvm
,
3797 struct ac_shader_binary
*binary
,
3798 struct ac_shader_config
*config
,
3799 struct radv_shader_variant_info
*shader_info
,
3800 struct nir_shader
*const *nir
,
3802 const struct radv_nir_compiler_options
*options
)
3805 LLVMModuleRef llvm_module
;
3807 llvm_module
= ac_translate_nir_to_llvm(ac_llvm
, nir
, nir_count
, shader_info
,
3810 ac_compile_llvm_module(ac_llvm
, llvm_module
, binary
, config
, shader_info
,
3811 nir
[0]->info
.stage
, options
);
3813 for (int i
= 0; i
< nir_count
; ++i
)
3814 ac_fill_shader_info(shader_info
, nir
[i
], options
);
3816 /* Determine the ES type (VS or TES) for the GS on GFX9. */
3817 if (options
->chip_class
== GFX9
) {
3818 if (nir_count
== 2 &&
3819 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3820 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
3826 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
3828 LLVMValueRef vtx_offset
=
3829 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
3830 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
3831 LLVMValueRef stream_id
;
3833 /* Fetch the vertex stream ID. */
3834 if (ctx
->shader_info
->info
.so
.num_outputs
) {
3836 ac_unpack_param(&ctx
->ac
, ctx
->streamout_config
, 24, 2);
3838 stream_id
= ctx
->ac
.i32_0
;
3841 LLVMBasicBlockRef end_bb
;
3842 LLVMValueRef switch_inst
;
3844 end_bb
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
,
3845 ctx
->main_function
, "end");
3846 switch_inst
= LLVMBuildSwitch(ctx
->ac
.builder
, stream_id
, end_bb
, 4);
3848 for (unsigned stream
= 0; stream
< 4; stream
++) {
3849 unsigned num_components
=
3850 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
3851 LLVMBasicBlockRef bb
;
3854 if (!num_components
)
3857 if (stream
> 0 && !ctx
->shader_info
->info
.so
.num_outputs
)
3860 bb
= LLVMInsertBasicBlockInContext(ctx
->ac
.context
, end_bb
, "out");
3861 LLVMAddCase(switch_inst
, LLVMConstInt(ctx
->ac
.i32
, stream
, 0), bb
);
3862 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, bb
);
3865 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3866 unsigned output_usage_mask
=
3867 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
3868 unsigned output_stream
=
3869 ctx
->shader_info
->info
.gs
.output_streams
[i
];
3870 int length
= util_last_bit(output_usage_mask
);
3872 if (!(ctx
->output_mask
& (1ull << i
)) ||
3873 output_stream
!= stream
)
3876 for (unsigned j
= 0; j
< length
; j
++) {
3877 LLVMValueRef value
, soffset
;
3879 if (!(output_usage_mask
& (1 << j
)))
3882 soffset
= LLVMConstInt(ctx
->ac
.i32
,
3884 ctx
->gs_max_out_vertices
* 16 * 4, false);
3888 value
= ac_build_buffer_load(&ctx
->ac
,
3891 vtx_offset
, soffset
,
3892 0, 1, 1, true, false);
3894 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
3895 if (ac_get_type_size(type
) == 2) {
3896 value
= LLVMBuildBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
3897 value
= LLVMBuildTrunc(ctx
->ac
.builder
, value
, ctx
->ac
.i16
, "");
3900 LLVMBuildStore(ctx
->ac
.builder
,
3901 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
3905 if (ctx
->shader_info
->info
.so
.num_outputs
)
3906 radv_emit_streamout(ctx
, stream
);
3909 handle_vs_outputs_post(ctx
, false, false,
3910 &ctx
->shader_info
->vs
.outinfo
);
3913 LLVMBuildBr(ctx
->ac
.builder
, end_bb
);
3916 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, end_bb
);
3920 radv_compile_gs_copy_shader(struct ac_llvm_compiler
*ac_llvm
,
3921 struct nir_shader
*geom_shader
,
3922 struct ac_shader_binary
*binary
,
3923 struct ac_shader_config
*config
,
3924 struct radv_shader_variant_info
*shader_info
,
3925 const struct radv_nir_compiler_options
*options
)
3927 struct radv_shader_context ctx
= {0};
3928 ctx
.options
= options
;
3929 ctx
.shader_info
= shader_info
;
3931 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
3932 ctx
.context
= ctx
.ac
.context
;
3933 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
3935 ctx
.is_gs_copy_shader
= true;
3937 enum ac_float_mode float_mode
=
3938 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
3939 AC_FLOAT_MODE_DEFAULT
;
3941 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
3942 ctx
.stage
= MESA_SHADER_VERTEX
;
3944 radv_nir_shader_info_pass(geom_shader
, options
, &shader_info
->info
);
3946 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
3948 ctx
.gs_max_out_vertices
= geom_shader
->info
.gs
.vertices_out
;
3949 ac_setup_rings(&ctx
);
3951 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
3952 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
3953 ac_handle_shader_output_decl(&ctx
.ac
, &ctx
.abi
, geom_shader
,
3954 variable
, MESA_SHADER_VERTEX
);
3957 ac_gs_copy_shader_emit(&ctx
);
3959 LLVMBuildRetVoid(ctx
.ac
.builder
);
3961 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
3963 ac_compile_llvm_module(ac_llvm
, ctx
.ac
.module
, binary
, config
, shader_info
,
3964 MESA_SHADER_VERTEX
, options
);