2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #include <llvm-c/Transforms/Utils.h>
39 #include "ac_binary.h"
40 #include "ac_llvm_util.h"
41 #include "ac_llvm_build.h"
42 #include "ac_shader_abi.h"
43 #include "ac_shader_util.h"
44 #include "ac_exp_param.h"
46 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
48 struct radv_shader_context
{
49 struct ac_llvm_context ac
;
50 const struct radv_nir_compiler_options
*options
;
51 struct radv_shader_variant_info
*shader_info
;
52 struct ac_shader_abi abi
;
54 unsigned max_workgroup_size
;
55 LLVMContextRef context
;
56 LLVMValueRef main_function
;
58 LLVMValueRef descriptor_sets
[RADV_UD_MAX_SETS
];
59 LLVMValueRef ring_offsets
;
61 LLVMValueRef vertex_buffers
;
62 LLVMValueRef rel_auto_id
;
63 LLVMValueRef vs_prim_id
;
64 LLVMValueRef es2gs_offset
;
67 LLVMValueRef merged_wave_info
;
68 LLVMValueRef tess_factor_offset
;
69 LLVMValueRef tes_rel_patch_id
;
75 * - bits 0..10: ordered_wave_id
76 * - bits 12..20: number of vertices in group
77 * - bits 22..30: number of primitives in group
79 LLVMValueRef gs_tg_info
;
80 LLVMValueRef gs2vs_offset
;
81 LLVMValueRef gs_wave_id
;
82 LLVMValueRef gs_vtx_offset
[6];
84 LLVMValueRef esgs_ring
;
85 LLVMValueRef gsvs_ring
[4];
86 LLVMValueRef hs_ring_tess_offchip
;
87 LLVMValueRef hs_ring_tess_factor
;
89 LLVMValueRef persp_sample
, persp_center
, persp_centroid
;
90 LLVMValueRef linear_sample
, linear_center
, linear_centroid
;
93 LLVMValueRef streamout_buffers
;
94 LLVMValueRef streamout_write_idx
;
95 LLVMValueRef streamout_config
;
96 LLVMValueRef streamout_offset
[4];
98 gl_shader_stage stage
;
100 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
101 uint64_t float16_shaded_mask
;
104 uint64_t output_mask
;
106 bool is_gs_copy_shader
;
107 LLVMValueRef gs_next_vertex
[4];
108 unsigned gs_max_out_vertices
;
110 unsigned tes_primitive_mode
;
112 uint32_t tcs_patch_outputs_read
;
113 uint64_t tcs_outputs_read
;
114 uint32_t tcs_vertices_per_patch
;
115 uint32_t tcs_num_inputs
;
116 uint32_t tcs_num_patches
;
117 uint32_t max_gsvs_emit_size
;
118 uint32_t gsvs_vertex_size
;
121 enum radeon_llvm_calling_convention
{
122 RADEON_LLVM_AMDGPU_VS
= 87,
123 RADEON_LLVM_AMDGPU_GS
= 88,
124 RADEON_LLVM_AMDGPU_PS
= 89,
125 RADEON_LLVM_AMDGPU_CS
= 90,
126 RADEON_LLVM_AMDGPU_HS
= 93,
129 static inline struct radv_shader_context
*
130 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
132 struct radv_shader_context
*ctx
= NULL
;
133 return container_of(abi
, ctx
, abi
);
136 struct ac_build_if_state
138 struct radv_shader_context
*ctx
;
139 LLVMValueRef condition
;
140 LLVMBasicBlockRef entry_block
;
141 LLVMBasicBlockRef true_block
;
142 LLVMBasicBlockRef false_block
;
143 LLVMBasicBlockRef merge_block
;
146 static LLVMBasicBlockRef
147 ac_build_insert_new_block(struct radv_shader_context
*ctx
, const char *name
)
149 LLVMBasicBlockRef current_block
;
150 LLVMBasicBlockRef next_block
;
151 LLVMBasicBlockRef new_block
;
153 /* get current basic block */
154 current_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
156 /* chqeck if there's another block after this one */
157 next_block
= LLVMGetNextBasicBlock(current_block
);
159 /* insert the new block before the next block */
160 new_block
= LLVMInsertBasicBlockInContext(ctx
->context
, next_block
, name
);
163 /* append new block after current block */
164 LLVMValueRef function
= LLVMGetBasicBlockParent(current_block
);
165 new_block
= LLVMAppendBasicBlockInContext(ctx
->context
, function
, name
);
171 ac_nir_build_if(struct ac_build_if_state
*ifthen
,
172 struct radv_shader_context
*ctx
,
173 LLVMValueRef condition
)
175 LLVMBasicBlockRef block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
177 memset(ifthen
, 0, sizeof *ifthen
);
179 ifthen
->condition
= condition
;
180 ifthen
->entry_block
= block
;
182 /* create endif/merge basic block for the phi functions */
183 ifthen
->merge_block
= ac_build_insert_new_block(ctx
, "endif-block");
185 /* create/insert true_block before merge_block */
187 LLVMInsertBasicBlockInContext(ctx
->context
,
191 /* successive code goes into the true block */
192 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, ifthen
->true_block
);
199 ac_nir_build_endif(struct ac_build_if_state
*ifthen
)
201 LLVMBuilderRef builder
= ifthen
->ctx
->ac
.builder
;
203 /* Insert branch to the merge block from current block */
204 LLVMBuildBr(builder
, ifthen
->merge_block
);
207 * Now patch in the various branch instructions.
210 /* Insert the conditional branch instruction at the end of entry_block */
211 LLVMPositionBuilderAtEnd(builder
, ifthen
->entry_block
);
212 if (ifthen
->false_block
) {
213 /* we have an else clause */
214 LLVMBuildCondBr(builder
, ifthen
->condition
,
215 ifthen
->true_block
, ifthen
->false_block
);
219 LLVMBuildCondBr(builder
, ifthen
->condition
,
220 ifthen
->true_block
, ifthen
->merge_block
);
223 /* Resume building code at end of the ifthen->merge_block */
224 LLVMPositionBuilderAtEnd(builder
, ifthen
->merge_block
);
228 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
230 switch (ctx
->stage
) {
231 case MESA_SHADER_TESS_CTRL
:
232 return ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
233 case MESA_SHADER_TESS_EVAL
:
234 return ctx
->tes_rel_patch_id
;
237 unreachable("Illegal stage");
242 get_tcs_num_patches(struct radv_shader_context
*ctx
)
244 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
245 unsigned num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
246 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
247 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
248 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
249 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
250 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
251 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
252 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
253 unsigned num_patches
;
254 unsigned hardware_lds_size
;
256 /* Ensure that we only need one wave per SIMD so we don't need to check
257 * resource usage. Also ensures that the number of tcs in and out
258 * vertices per threadgroup are at most 256.
260 num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
261 /* Make sure that the data fits in LDS. This assumes the shaders only
262 * use LDS for the inputs and outputs.
264 hardware_lds_size
= 32768;
266 /* Looks like STONEY hangs if we use more than 32 KiB LDS in a single
267 * threadgroup, even though there is more than 32 KiB LDS.
269 * Test: dEQP-VK.tessellation.shader_input_output.barrier
271 if (ctx
->options
->chip_class
>= GFX7
&& ctx
->options
->family
!= CHIP_STONEY
)
272 hardware_lds_size
= 65536;
274 num_patches
= MIN2(num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
275 /* Make sure the output data fits in the offchip buffer */
276 num_patches
= MIN2(num_patches
, (ctx
->options
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
277 /* Not necessary for correctness, but improves performance. The
278 * specific value is taken from the proprietary driver.
280 num_patches
= MIN2(num_patches
, 40);
282 /* GFX6 bug workaround - limit LS-HS threadgroups to only one wave. */
283 if (ctx
->options
->chip_class
== GFX6
) {
284 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
285 num_patches
= MIN2(num_patches
, one_wave
);
291 calculate_tess_lds_size(struct radv_shader_context
*ctx
)
293 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
294 unsigned num_tcs_output_cp
;
295 unsigned num_tcs_outputs
, num_tcs_patch_outputs
;
296 unsigned input_vertex_size
, output_vertex_size
;
297 unsigned input_patch_size
, output_patch_size
;
298 unsigned pervertex_output_patch_size
;
299 unsigned output_patch0_offset
;
300 unsigned num_patches
;
303 num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
304 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
305 num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
307 input_vertex_size
= ctx
->tcs_num_inputs
* 16;
308 output_vertex_size
= num_tcs_outputs
* 16;
310 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
312 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
313 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
315 num_patches
= ctx
->tcs_num_patches
;
316 output_patch0_offset
= input_patch_size
* num_patches
;
318 lds_size
= output_patch0_offset
+ output_patch_size
* num_patches
;
322 /* Tessellation shaders pass outputs to the next shader using LDS.
324 * LS outputs = TCS inputs
325 * TCS outputs = TES inputs
328 * - TCS inputs for patch 0
329 * - TCS inputs for patch 1
330 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
332 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
333 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
334 * - TCS outputs for patch 1
335 * - Per-patch TCS outputs for patch 1
336 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
337 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
340 * All three shaders VS(LS), TCS, TES share the same LDS space.
343 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
345 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
346 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
347 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
349 input_patch_size
/= 4;
350 return LLVMConstInt(ctx
->ac
.i32
, input_patch_size
, false);
354 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
356 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
357 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
358 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
359 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
360 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
361 output_patch_size
/= 4;
362 return LLVMConstInt(ctx
->ac
.i32
, output_patch_size
, false);
366 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
368 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
369 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
370 output_vertex_size
/= 4;
371 return LLVMConstInt(ctx
->ac
.i32
, output_vertex_size
, false);
375 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
377 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
378 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
379 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
380 uint32_t output_patch0_offset
= input_patch_size
;
381 unsigned num_patches
= ctx
->tcs_num_patches
;
383 output_patch0_offset
*= num_patches
;
384 output_patch0_offset
/= 4;
385 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
389 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
391 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
392 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
393 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
394 uint32_t output_patch0_offset
= input_patch_size
;
396 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
397 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
398 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
399 unsigned num_patches
= ctx
->tcs_num_patches
;
401 output_patch0_offset
*= num_patches
;
402 output_patch0_offset
+= pervertex_output_patch_size
;
403 output_patch0_offset
/= 4;
404 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
408 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
410 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
411 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
413 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
417 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
419 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
420 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
421 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
423 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
428 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
430 LLVMValueRef patch0_patch_data_offset
=
431 get_tcs_out_patch0_patch_data_offset(ctx
);
432 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
433 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
435 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
436 patch0_patch_data_offset
);
441 LLVMTypeRef types
[MAX_ARGS
];
442 LLVMValueRef
*assign
[MAX_ARGS
];
445 uint8_t num_sgprs_used
;
446 uint8_t num_vgprs_used
;
449 enum ac_arg_regfile
{
455 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
456 LLVMValueRef
*param_ptr
)
458 assert(info
->count
< MAX_ARGS
);
460 info
->assign
[info
->count
] = param_ptr
;
461 info
->types
[info
->count
] = type
;
464 if (regfile
== ARG_SGPR
) {
465 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
468 assert(regfile
== ARG_VGPR
);
469 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
473 static void assign_arguments(LLVMValueRef main_function
,
474 struct arg_info
*info
)
477 for (i
= 0; i
< info
->count
; i
++) {
479 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
484 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
485 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
486 unsigned num_return_elems
,
487 struct arg_info
*args
,
488 unsigned max_workgroup_size
,
489 const struct radv_nir_compiler_options
*options
)
491 LLVMTypeRef main_function_type
, ret_type
;
492 LLVMBasicBlockRef main_function_body
;
494 if (num_return_elems
)
495 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
496 num_return_elems
, true);
498 ret_type
= LLVMVoidTypeInContext(ctx
);
500 /* Setup the function */
502 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
503 LLVMValueRef main_function
=
504 LLVMAddFunction(module
, "main", main_function_type
);
506 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
507 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
509 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
510 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
511 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
513 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
515 if (LLVMGetTypeKind(LLVMTypeOf(P
)) == LLVMPointerTypeKind
) {
516 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
517 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
521 if (options
->address32_hi
) {
522 ac_llvm_add_target_dep_function_attr(main_function
,
523 "amdgpu-32bit-address-high-bits",
524 options
->address32_hi
);
527 ac_llvm_set_workgroup_size(main_function
, max_workgroup_size
);
529 if (options
->unsafe_math
) {
530 /* These were copied from some LLVM test. */
531 LLVMAddTargetDependentFunctionAttr(main_function
,
532 "less-precise-fpmad",
534 LLVMAddTargetDependentFunctionAttr(main_function
,
537 LLVMAddTargetDependentFunctionAttr(main_function
,
540 LLVMAddTargetDependentFunctionAttr(main_function
,
543 LLVMAddTargetDependentFunctionAttr(main_function
,
544 "no-signed-zeros-fp-math",
547 return main_function
;
552 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
,
555 ud_info
->sgpr_idx
= *sgpr_idx
;
556 ud_info
->num_sgprs
= num_sgprs
;
557 *sgpr_idx
+= num_sgprs
;
561 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
564 struct radv_userdata_info
*ud_info
=
565 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
568 set_loc(ud_info
, sgpr_idx
, num_sgprs
);
572 set_loc_shader_ptr(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
574 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
576 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
580 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
582 struct radv_userdata_locations
*locs
=
583 &ctx
->shader_info
->user_sgprs_locs
;
584 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
587 set_loc(ud_info
, sgpr_idx
, 1);
589 locs
->descriptor_sets_enabled
|= 1 << idx
;
592 struct user_sgpr_info
{
593 bool need_ring_offsets
;
594 bool indirect_all_descriptor_sets
;
595 uint8_t remaining_sgprs
;
598 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
599 gl_shader_stage stage
)
602 case MESA_SHADER_VERTEX
:
603 if (ctx
->shader_info
->info
.needs_multiview_view_index
||
604 (!ctx
->options
->key
.vs
.out
.as_es
&& !ctx
->options
->key
.vs
.out
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
607 case MESA_SHADER_TESS_EVAL
:
608 if (ctx
->shader_info
->info
.needs_multiview_view_index
|| (!ctx
->options
->key
.tes
.out
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
611 case MESA_SHADER_GEOMETRY
:
612 case MESA_SHADER_TESS_CTRL
:
613 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
623 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
627 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
)
629 count
+= ctx
->shader_info
->info
.vs
.needs_draw_id
? 3 : 2;
634 static void allocate_inline_push_consts(struct radv_shader_context
*ctx
,
635 struct user_sgpr_info
*user_sgpr_info
)
637 uint8_t remaining_sgprs
= user_sgpr_info
->remaining_sgprs
;
639 /* Only supported if shaders use push constants. */
640 if (ctx
->shader_info
->info
.min_push_constant_used
== UINT8_MAX
)
643 /* Only supported if shaders don't have indirect push constants. */
644 if (ctx
->shader_info
->info
.has_indirect_push_constants
)
647 /* Only supported for 32-bit push constants. */
648 if (!ctx
->shader_info
->info
.has_only_32bit_push_constants
)
651 uint8_t num_push_consts
=
652 (ctx
->shader_info
->info
.max_push_constant_used
-
653 ctx
->shader_info
->info
.min_push_constant_used
) / 4;
655 /* Check if the number of user SGPRs is large enough. */
656 if (num_push_consts
< remaining_sgprs
) {
657 ctx
->shader_info
->info
.num_inline_push_consts
= num_push_consts
;
659 ctx
->shader_info
->info
.num_inline_push_consts
= remaining_sgprs
;
662 /* Clamp to the maximum number of allowed inlined push constants. */
663 if (ctx
->shader_info
->info
.num_inline_push_consts
> AC_MAX_INLINE_PUSH_CONSTS
)
664 ctx
->shader_info
->info
.num_inline_push_consts
= AC_MAX_INLINE_PUSH_CONSTS
;
666 if (ctx
->shader_info
->info
.num_inline_push_consts
== num_push_consts
&&
667 !ctx
->shader_info
->info
.loads_dynamic_offsets
) {
668 /* Disable the default push constants path if all constants are
669 * inlined and if shaders don't use dynamic descriptors.
671 ctx
->shader_info
->info
.loads_push_constants
= false;
674 ctx
->shader_info
->info
.base_inline_push_consts
=
675 ctx
->shader_info
->info
.min_push_constant_used
/ 4;
678 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
679 gl_shader_stage stage
,
680 bool has_previous_stage
,
681 gl_shader_stage previous_stage
,
682 bool needs_view_index
,
683 struct user_sgpr_info
*user_sgpr_info
)
685 uint8_t user_sgpr_count
= 0;
687 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
689 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
690 if (stage
== MESA_SHADER_GEOMETRY
||
691 stage
== MESA_SHADER_VERTEX
||
692 stage
== MESA_SHADER_TESS_CTRL
||
693 stage
== MESA_SHADER_TESS_EVAL
||
694 ctx
->is_gs_copy_shader
)
695 user_sgpr_info
->need_ring_offsets
= true;
697 if (stage
== MESA_SHADER_FRAGMENT
&&
698 ctx
->shader_info
->info
.ps
.needs_sample_positions
)
699 user_sgpr_info
->need_ring_offsets
= true;
701 /* 2 user sgprs will nearly always be allocated for scratch/rings */
702 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
703 user_sgpr_count
+= 2;
707 case MESA_SHADER_COMPUTE
:
708 if (ctx
->shader_info
->info
.cs
.uses_grid_size
)
709 user_sgpr_count
+= 3;
711 case MESA_SHADER_FRAGMENT
:
712 user_sgpr_count
+= ctx
->shader_info
->info
.ps
.needs_sample_positions
;
714 case MESA_SHADER_VERTEX
:
715 if (!ctx
->is_gs_copy_shader
)
716 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
718 case MESA_SHADER_TESS_CTRL
:
719 if (has_previous_stage
) {
720 if (previous_stage
== MESA_SHADER_VERTEX
)
721 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
724 case MESA_SHADER_TESS_EVAL
:
726 case MESA_SHADER_GEOMETRY
:
727 if (has_previous_stage
) {
728 if (previous_stage
== MESA_SHADER_VERTEX
) {
729 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
737 if (needs_view_index
)
740 if (ctx
->shader_info
->info
.loads_push_constants
)
743 if (ctx
->streamout_buffers
)
746 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& stage
!= MESA_SHADER_COMPUTE
? 32 : 16;
747 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
748 uint32_t num_desc_set
=
749 util_bitcount(ctx
->shader_info
->info
.desc_set_used_mask
);
751 if (remaining_sgprs
< num_desc_set
) {
752 user_sgpr_info
->indirect_all_descriptor_sets
= true;
753 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- 1;
755 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- num_desc_set
;
758 allocate_inline_push_consts(ctx
, user_sgpr_info
);
762 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
763 const struct user_sgpr_info
*user_sgpr_info
,
764 struct arg_info
*args
,
765 LLVMValueRef
*desc_sets
)
767 LLVMTypeRef type
= ac_array_in_const32_addr_space(ctx
->ac
.i8
);
769 /* 1 for each descriptor set */
770 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
771 uint32_t mask
= ctx
->shader_info
->info
.desc_set_used_mask
;
774 int i
= u_bit_scan(&mask
);
776 add_arg(args
, ARG_SGPR
, type
, &ctx
->descriptor_sets
[i
]);
779 add_arg(args
, ARG_SGPR
, ac_array_in_const32_addr_space(type
),
783 if (ctx
->shader_info
->info
.loads_push_constants
) {
784 /* 1 for push constants and dynamic descriptors */
785 add_arg(args
, ARG_SGPR
, type
, &ctx
->abi
.push_constants
);
788 for (unsigned i
= 0; i
< ctx
->shader_info
->info
.num_inline_push_consts
; i
++) {
789 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
,
790 &ctx
->abi
.inline_push_consts
[i
]);
792 ctx
->abi
.num_inline_push_consts
= ctx
->shader_info
->info
.num_inline_push_consts
;
793 ctx
->abi
.base_inline_push_consts
= ctx
->shader_info
->info
.base_inline_push_consts
;
795 if (ctx
->shader_info
->info
.so
.num_outputs
) {
796 add_arg(args
, ARG_SGPR
,
797 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
798 &ctx
->streamout_buffers
);
803 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
804 gl_shader_stage stage
,
805 bool has_previous_stage
,
806 gl_shader_stage previous_stage
,
807 struct arg_info
*args
)
809 if (!ctx
->is_gs_copy_shader
&&
810 (stage
== MESA_SHADER_VERTEX
||
811 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
812 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
813 add_arg(args
, ARG_SGPR
,
814 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
815 &ctx
->vertex_buffers
);
817 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
818 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
819 if (ctx
->shader_info
->info
.vs
.needs_draw_id
) {
820 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
826 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
828 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
829 if (!ctx
->is_gs_copy_shader
) {
830 if (ctx
->options
->key
.vs
.out
.as_ls
) {
831 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
832 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
833 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
835 if (ctx
->ac
.chip_class
>= GFX10
) {
836 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
837 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
838 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
840 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
841 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
842 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
849 declare_streamout_sgprs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
850 struct arg_info
*args
)
854 if (ctx
->ac
.chip_class
>= GFX10
)
857 /* Streamout SGPRs. */
858 if (ctx
->shader_info
->info
.so
.num_outputs
) {
859 assert(stage
== MESA_SHADER_VERTEX
||
860 stage
== MESA_SHADER_TESS_EVAL
);
862 if (stage
!= MESA_SHADER_TESS_EVAL
) {
863 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_config
);
865 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
866 args
->types
[args
->count
- 1] = ctx
->ac
.i32
;
869 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_write_idx
);
872 /* A streamout buffer offset is loaded if the stride is non-zero. */
873 for (i
= 0; i
< 4; i
++) {
874 if (!ctx
->shader_info
->info
.so
.strides
[i
])
877 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_offset
[i
]);
882 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
884 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
885 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
886 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
887 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
891 set_global_input_locs(struct radv_shader_context
*ctx
,
892 const struct user_sgpr_info
*user_sgpr_info
,
893 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
895 uint32_t mask
= ctx
->shader_info
->info
.desc_set_used_mask
;
897 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
899 int i
= u_bit_scan(&mask
);
901 set_loc_desc(ctx
, i
, user_sgpr_idx
);
904 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
908 int i
= u_bit_scan(&mask
);
910 ctx
->descriptor_sets
[i
] =
911 ac_build_load_to_sgpr(&ctx
->ac
, desc_sets
,
912 LLVMConstInt(ctx
->ac
.i32
, i
, false));
916 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
919 if (ctx
->shader_info
->info
.loads_push_constants
) {
920 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
);
923 if (ctx
->shader_info
->info
.num_inline_push_consts
) {
924 set_loc_shader(ctx
, AC_UD_INLINE_PUSH_CONSTANTS
, user_sgpr_idx
,
925 ctx
->shader_info
->info
.num_inline_push_consts
);
928 if (ctx
->streamout_buffers
) {
929 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
,
935 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
936 gl_shader_stage stage
, bool has_previous_stage
,
937 gl_shader_stage previous_stage
,
938 uint8_t *user_sgpr_idx
)
940 if (!ctx
->is_gs_copy_shader
&&
941 (stage
== MESA_SHADER_VERTEX
||
942 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
943 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
944 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
949 if (ctx
->shader_info
->info
.vs
.needs_draw_id
)
952 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
953 user_sgpr_idx
, vs_num
);
957 static void set_llvm_calling_convention(LLVMValueRef func
,
958 gl_shader_stage stage
)
960 enum radeon_llvm_calling_convention calling_conv
;
963 case MESA_SHADER_VERTEX
:
964 case MESA_SHADER_TESS_EVAL
:
965 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
967 case MESA_SHADER_GEOMETRY
:
968 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
970 case MESA_SHADER_TESS_CTRL
:
971 calling_conv
= RADEON_LLVM_AMDGPU_HS
;
973 case MESA_SHADER_FRAGMENT
:
974 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
976 case MESA_SHADER_COMPUTE
:
977 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
980 unreachable("Unhandle shader type");
983 LLVMSetFunctionCallConv(func
, calling_conv
);
986 /* Returns whether the stage is a stage that can be directly before the GS */
987 static bool is_pre_gs_stage(gl_shader_stage stage
)
989 return stage
== MESA_SHADER_VERTEX
|| stage
== MESA_SHADER_TESS_EVAL
;
992 static void create_function(struct radv_shader_context
*ctx
,
993 gl_shader_stage stage
,
994 bool has_previous_stage
,
995 gl_shader_stage previous_stage
)
997 uint8_t user_sgpr_idx
;
998 struct user_sgpr_info user_sgpr_info
;
999 struct arg_info args
= {};
1000 LLVMValueRef desc_sets
;
1001 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
1002 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
1003 previous_stage
, needs_view_index
, &user_sgpr_info
);
1005 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
1006 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
1007 &ctx
->ring_offsets
);
1010 if (ctx
->ac
.chip_class
>= GFX10
) {
1011 if (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs
.out
.as_ngg
) {
1012 /* On GFX10, VS is merged into GS for NGG. */
1013 stage
= MESA_SHADER_GEOMETRY
;
1014 has_previous_stage
= true;
1015 previous_stage
= MESA_SHADER_VERTEX
;
1020 case MESA_SHADER_COMPUTE
:
1021 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1024 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1025 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
1026 &ctx
->abi
.num_work_groups
);
1029 for (int i
= 0; i
< 3; i
++) {
1030 ctx
->abi
.workgroup_ids
[i
] = NULL
;
1031 if (ctx
->shader_info
->info
.cs
.uses_block_id
[i
]) {
1032 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1033 &ctx
->abi
.workgroup_ids
[i
]);
1037 if (ctx
->shader_info
->info
.cs
.uses_local_invocation_idx
)
1038 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
1039 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
1040 &ctx
->abi
.local_invocation_ids
);
1042 case MESA_SHADER_VERTEX
:
1043 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1046 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
1047 previous_stage
, &args
);
1049 if (needs_view_index
)
1050 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1051 &ctx
->abi
.view_index
);
1052 if (ctx
->options
->key
.vs
.out
.as_es
) {
1053 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1054 &ctx
->es2gs_offset
);
1055 } else if (ctx
->options
->key
.vs
.out
.as_ls
) {
1056 /* no extra parameters */
1058 declare_streamout_sgprs(ctx
, stage
, &args
);
1061 declare_vs_input_vgprs(ctx
, &args
);
1063 case MESA_SHADER_TESS_CTRL
:
1064 if (has_previous_stage
) {
1065 // First 6 system regs
1066 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1067 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1068 &ctx
->merged_wave_info
);
1069 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1070 &ctx
->tess_factor_offset
);
1072 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1073 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1074 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1076 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1079 declare_vs_specific_input_sgprs(ctx
, stage
,
1081 previous_stage
, &args
);
1083 if (needs_view_index
)
1084 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1085 &ctx
->abi
.view_index
);
1087 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1088 &ctx
->abi
.tcs_patch_id
);
1089 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1090 &ctx
->abi
.tcs_rel_ids
);
1092 declare_vs_input_vgprs(ctx
, &args
);
1094 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1097 if (needs_view_index
)
1098 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1099 &ctx
->abi
.view_index
);
1101 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1102 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1103 &ctx
->tess_factor_offset
);
1104 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1105 &ctx
->abi
.tcs_patch_id
);
1106 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1107 &ctx
->abi
.tcs_rel_ids
);
1110 case MESA_SHADER_TESS_EVAL
:
1111 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1114 if (needs_view_index
)
1115 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1116 &ctx
->abi
.view_index
);
1118 if (ctx
->options
->key
.tes
.out
.as_es
) {
1119 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1120 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1121 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1122 &ctx
->es2gs_offset
);
1124 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1125 declare_streamout_sgprs(ctx
, stage
, &args
);
1126 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1128 declare_tes_input_vgprs(ctx
, &args
);
1130 case MESA_SHADER_GEOMETRY
:
1131 if (has_previous_stage
) {
1132 // First 6 system regs
1133 if (ctx
->options
->key
.vs
.out
.as_ngg
) {
1134 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1137 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1138 &ctx
->gs2vs_offset
);
1141 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1142 &ctx
->merged_wave_info
);
1143 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1145 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1146 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1147 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1149 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1152 if (previous_stage
!= MESA_SHADER_TESS_EVAL
) {
1153 declare_vs_specific_input_sgprs(ctx
, stage
,
1159 if (needs_view_index
)
1160 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1161 &ctx
->abi
.view_index
);
1163 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1164 &ctx
->gs_vtx_offset
[0]);
1165 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1166 &ctx
->gs_vtx_offset
[2]);
1167 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1168 &ctx
->abi
.gs_prim_id
);
1169 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1170 &ctx
->abi
.gs_invocation_id
);
1171 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1172 &ctx
->gs_vtx_offset
[4]);
1174 if (previous_stage
== MESA_SHADER_VERTEX
) {
1175 declare_vs_input_vgprs(ctx
, &args
);
1177 declare_tes_input_vgprs(ctx
, &args
);
1180 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1183 if (needs_view_index
)
1184 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1185 &ctx
->abi
.view_index
);
1187 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
1188 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
1189 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1190 &ctx
->gs_vtx_offset
[0]);
1191 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1192 &ctx
->gs_vtx_offset
[1]);
1193 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1194 &ctx
->abi
.gs_prim_id
);
1195 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1196 &ctx
->gs_vtx_offset
[2]);
1197 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1198 &ctx
->gs_vtx_offset
[3]);
1199 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1200 &ctx
->gs_vtx_offset
[4]);
1201 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1202 &ctx
->gs_vtx_offset
[5]);
1203 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1204 &ctx
->abi
.gs_invocation_id
);
1207 case MESA_SHADER_FRAGMENT
:
1208 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1211 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1212 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_sample
);
1213 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_center
);
1214 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_centroid
);
1215 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1216 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_sample
);
1217 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_center
);
1218 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_centroid
);
1219 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1220 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1221 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1222 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1223 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1224 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1225 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1226 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1227 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1230 unreachable("Shader stage not implemented");
1233 ctx
->main_function
= create_llvm_function(
1234 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1235 ctx
->max_workgroup_size
, ctx
->options
);
1236 set_llvm_calling_convention(ctx
->main_function
, stage
);
1239 ctx
->shader_info
->num_input_vgprs
= 0;
1240 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1242 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1244 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1245 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1247 assign_arguments(ctx
->main_function
, &args
);
1251 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1252 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1254 if (ctx
->options
->supports_spill
) {
1255 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1256 LLVMPointerType(ctx
->ac
.i8
, AC_ADDR_SPACE_CONST
),
1257 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1258 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1259 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1263 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1264 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1265 if (has_previous_stage
)
1268 set_global_input_locs(ctx
, &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1271 case MESA_SHADER_COMPUTE
:
1272 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1273 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1277 case MESA_SHADER_VERTEX
:
1278 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1279 previous_stage
, &user_sgpr_idx
);
1280 if (ctx
->abi
.view_index
)
1281 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1283 case MESA_SHADER_TESS_CTRL
:
1284 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1285 previous_stage
, &user_sgpr_idx
);
1286 if (ctx
->abi
.view_index
)
1287 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1289 case MESA_SHADER_TESS_EVAL
:
1290 if (ctx
->abi
.view_index
)
1291 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1293 case MESA_SHADER_GEOMETRY
:
1294 if (has_previous_stage
) {
1295 if (previous_stage
== MESA_SHADER_VERTEX
)
1296 set_vs_specific_input_locs(ctx
, stage
,
1301 if (ctx
->abi
.view_index
)
1302 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1304 case MESA_SHADER_FRAGMENT
:
1307 unreachable("Shader stage not implemented");
1310 if (stage
== MESA_SHADER_TESS_CTRL
||
1311 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs
.out
.as_ls
) ||
1312 /* GFX9 has the ESGS ring buffer in LDS. */
1313 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1314 ac_declare_lds_as_pointer(&ctx
->ac
);
1317 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1322 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
1323 unsigned desc_set
, unsigned binding
)
1325 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1326 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
1327 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
1328 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
1329 unsigned base_offset
= layout
->binding
[binding
].offset
;
1330 LLVMValueRef offset
, stride
;
1332 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1333 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1334 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
1335 layout
->binding
[binding
].dynamic_offset_offset
;
1336 desc_ptr
= ctx
->abi
.push_constants
;
1337 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
1338 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1340 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
1342 offset
= LLVMConstInt(ctx
->ac
.i32
, base_offset
, false);
1344 if (layout
->binding
[binding
].type
!= VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1345 offset
= ac_build_imad(&ctx
->ac
, index
, stride
, offset
);
1348 desc_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, desc_ptr
, &offset
, 1, "");
1349 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
1350 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1352 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1353 uint32_t desc_type
= S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1354 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1355 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1356 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
1357 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
1358 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
1360 LLVMValueRef desc_components
[4] = {
1361 LLVMBuildPtrToInt(ctx
->ac
.builder
, desc_ptr
, ctx
->ac
.intptr
, ""),
1362 LLVMConstInt(ctx
->ac
.i32
, S_008F04_BASE_ADDRESS_HI(ctx
->options
->address32_hi
), false),
1363 /* High limit to support variable sizes. */
1364 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
1365 LLVMConstInt(ctx
->ac
.i32
, desc_type
, false),
1368 return ac_build_gather_values(&ctx
->ac
, desc_components
, 4);
1375 /* The offchip buffer layout for TCS->TES is
1377 * - attribute 0 of patch 0 vertex 0
1378 * - attribute 0 of patch 0 vertex 1
1379 * - attribute 0 of patch 0 vertex 2
1381 * - attribute 0 of patch 1 vertex 0
1382 * - attribute 0 of patch 1 vertex 1
1384 * - attribute 1 of patch 0 vertex 0
1385 * - attribute 1 of patch 0 vertex 1
1387 * - per patch attribute 0 of patch 0
1388 * - per patch attribute 0 of patch 1
1391 * Note that every attribute has 4 components.
1393 static LLVMValueRef
get_non_vertex_index_offset(struct radv_shader_context
*ctx
)
1395 uint32_t num_patches
= ctx
->tcs_num_patches
;
1396 uint32_t num_tcs_outputs
;
1397 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
1398 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
1400 num_tcs_outputs
= ctx
->options
->key
.tes
.tcs_num_outputs
;
1402 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
1403 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
1405 return LLVMConstInt(ctx
->ac
.i32
, pervertex_output_patch_size
* num_patches
, false);
1408 static LLVMValueRef
calc_param_stride(struct radv_shader_context
*ctx
,
1409 LLVMValueRef vertex_index
)
1411 LLVMValueRef param_stride
;
1413 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
* ctx
->tcs_num_patches
, false);
1415 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_num_patches
, false);
1416 return param_stride
;
1419 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
1420 LLVMValueRef vertex_index
,
1421 LLVMValueRef param_index
)
1423 LLVMValueRef base_addr
;
1424 LLVMValueRef param_stride
, constant16
;
1425 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
1426 LLVMValueRef vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
, false);
1427 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1428 param_stride
= calc_param_stride(ctx
, vertex_index
);
1430 base_addr
= ac_build_imad(&ctx
->ac
, rel_patch_id
,
1431 vertices_per_patch
, vertex_index
);
1433 base_addr
= rel_patch_id
;
1436 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1437 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
1438 param_stride
, ""), "");
1440 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
1442 if (!vertex_index
) {
1443 LLVMValueRef patch_data_offset
= get_non_vertex_index_offset(ctx
);
1445 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1446 patch_data_offset
, "");
1451 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
1453 unsigned const_index
,
1455 LLVMValueRef vertex_index
,
1456 LLVMValueRef indir_index
)
1458 LLVMValueRef param_index
;
1461 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
1464 if (const_index
&& !is_compact
)
1465 param
+= const_index
;
1466 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
1468 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
1472 get_dw_address(struct radv_shader_context
*ctx
,
1473 LLVMValueRef dw_addr
,
1475 unsigned const_index
,
1476 bool compact_const_index
,
1477 LLVMValueRef vertex_index
,
1478 LLVMValueRef stride
,
1479 LLVMValueRef indir_index
)
1484 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1485 LLVMBuildMul(ctx
->ac
.builder
,
1491 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1492 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
1493 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
1494 else if (const_index
&& !compact_const_index
)
1495 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1496 LLVMConstInt(ctx
->ac
.i32
, const_index
* 4, false), "");
1498 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1499 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
1501 if (const_index
&& compact_const_index
)
1502 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1503 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
1508 load_tcs_varyings(struct ac_shader_abi
*abi
,
1510 LLVMValueRef vertex_index
,
1511 LLVMValueRef indir_index
,
1512 unsigned const_index
,
1514 unsigned driver_location
,
1516 unsigned num_components
,
1521 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1522 LLVMValueRef dw_addr
, stride
;
1523 LLVMValueRef value
[4], result
;
1524 unsigned param
= shader_io_get_unique_index(location
);
1527 uint32_t input_vertex_size
= (ctx
->tcs_num_inputs
* 16) / 4;
1528 stride
= LLVMConstInt(ctx
->ac
.i32
, input_vertex_size
, false);
1529 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
1532 stride
= get_tcs_out_vertex_stride(ctx
);
1533 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1535 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1540 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1543 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
1544 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1545 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1548 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1553 store_tcs_output(struct ac_shader_abi
*abi
,
1554 const nir_variable
*var
,
1555 LLVMValueRef vertex_index
,
1556 LLVMValueRef param_index
,
1557 unsigned const_index
,
1561 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1562 const unsigned location
= var
->data
.location
;
1563 unsigned component
= var
->data
.location_frac
;
1564 const bool is_patch
= var
->data
.patch
;
1565 const bool is_compact
= var
->data
.compact
;
1566 LLVMValueRef dw_addr
;
1567 LLVMValueRef stride
= NULL
;
1568 LLVMValueRef buf_addr
= NULL
;
1570 bool store_lds
= true;
1573 if (!(ctx
->tcs_patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
1576 if (!(ctx
->tcs_outputs_read
& (1ULL << location
)))
1580 param
= shader_io_get_unique_index(location
);
1581 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1582 const_index
+= component
;
1585 if (const_index
>= 4) {
1592 stride
= get_tcs_out_vertex_stride(ctx
);
1593 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1595 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1598 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1600 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
1601 vertex_index
, param_index
);
1603 bool is_tess_factor
= false;
1604 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
1605 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1606 is_tess_factor
= true;
1608 unsigned base
= is_compact
? const_index
: 0;
1609 for (unsigned chan
= 0; chan
< 8; chan
++) {
1610 if (!(writemask
& (1 << chan
)))
1612 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
1613 value
= ac_to_integer(&ctx
->ac
, value
);
1614 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
1616 if (store_lds
|| is_tess_factor
) {
1617 LLVMValueRef dw_addr_chan
=
1618 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1619 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
1620 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
1623 if (!is_tess_factor
&& writemask
!= 0xF)
1624 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
1625 buf_addr
, ctx
->oc_lds
,
1626 4 * (base
+ chan
), ac_glc
, false);
1629 if (writemask
== 0xF) {
1630 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
1631 buf_addr
, ctx
->oc_lds
,
1632 (base
* 4), ac_glc
, false);
1637 load_tes_input(struct ac_shader_abi
*abi
,
1639 LLVMValueRef vertex_index
,
1640 LLVMValueRef param_index
,
1641 unsigned const_index
,
1643 unsigned driver_location
,
1645 unsigned num_components
,
1650 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1651 LLVMValueRef buf_addr
;
1652 LLVMValueRef result
;
1653 unsigned param
= shader_io_get_unique_index(location
);
1655 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1656 const_index
+= component
;
1658 if (const_index
>= 4) {
1664 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
1665 is_compact
, vertex_index
, param_index
);
1667 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
1668 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
1670 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
1671 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, ac_glc
, true, false);
1672 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
1677 load_gs_input(struct ac_shader_abi
*abi
,
1679 unsigned driver_location
,
1681 unsigned num_components
,
1682 unsigned vertex_index
,
1683 unsigned const_index
,
1686 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1687 LLVMValueRef vtx_offset
;
1688 unsigned param
, vtx_offset_param
;
1689 LLVMValueRef value
[4], result
;
1691 vtx_offset_param
= vertex_index
;
1692 assert(vtx_offset_param
< 6);
1693 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
1694 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1696 param
= shader_io_get_unique_index(location
);
1698 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
1699 if (ctx
->ac
.chip_class
>= GFX9
) {
1700 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
1701 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1702 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
1703 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1705 LLVMValueRef soffset
=
1706 LLVMConstInt(ctx
->ac
.i32
,
1707 (param
* 4 + i
+ const_index
) * 256,
1710 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
1713 vtx_offset
, soffset
,
1714 0, ac_glc
, true, false);
1717 if (ac_get_type_size(type
) == 2) {
1718 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i32
, "");
1719 value
[i
] = LLVMBuildTrunc(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i16
, "");
1721 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], type
, "");
1723 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1724 result
= ac_to_integer(&ctx
->ac
, result
);
1729 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
1731 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1732 ac_build_kill_if_false(&ctx
->ac
, visible
);
1735 static LLVMValueRef
lookup_interp_param(struct ac_shader_abi
*abi
,
1736 enum glsl_interp_mode interp
, unsigned location
)
1738 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1741 case INTERP_MODE_FLAT
:
1744 case INTERP_MODE_SMOOTH
:
1745 case INTERP_MODE_NONE
:
1746 if (location
== INTERP_CENTER
)
1747 return ctx
->persp_center
;
1748 else if (location
== INTERP_CENTROID
)
1749 return ctx
->persp_centroid
;
1750 else if (location
== INTERP_SAMPLE
)
1751 return ctx
->persp_sample
;
1753 case INTERP_MODE_NOPERSPECTIVE
:
1754 if (location
== INTERP_CENTER
)
1755 return ctx
->linear_center
;
1756 else if (location
== INTERP_CENTROID
)
1757 return ctx
->linear_centroid
;
1758 else if (location
== INTERP_SAMPLE
)
1759 return ctx
->linear_sample
;
1766 radv_get_sample_pos_offset(uint32_t num_samples
)
1768 uint32_t sample_pos_offset
= 0;
1770 switch (num_samples
) {
1772 sample_pos_offset
= 1;
1775 sample_pos_offset
= 3;
1778 sample_pos_offset
= 7;
1783 return sample_pos_offset
;
1786 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
1787 LLVMValueRef sample_id
)
1789 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1791 LLVMValueRef result
;
1792 LLVMValueRef index
= LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false);
1793 LLVMValueRef ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ring_offsets
, &index
, 1, "");
1795 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1796 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
1798 uint32_t sample_pos_offset
=
1799 radv_get_sample_pos_offset(ctx
->options
->key
.fs
.num_samples
);
1802 LLVMBuildAdd(ctx
->ac
.builder
, sample_id
,
1803 LLVMConstInt(ctx
->ac
.i32
, sample_pos_offset
, false), "");
1804 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
1810 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
1812 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1813 uint8_t log2_ps_iter_samples
;
1815 if (ctx
->shader_info
->info
.ps
.force_persample
) {
1816 log2_ps_iter_samples
=
1817 util_logbase2(ctx
->options
->key
.fs
.num_samples
);
1819 log2_ps_iter_samples
= ctx
->options
->key
.fs
.log2_ps_iter_samples
;
1822 /* The bit pattern matches that used by fixed function fragment
1824 static const uint16_t ps_iter_masks
[] = {
1825 0xffff, /* not used */
1831 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
1833 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
1835 LLVMValueRef result
, sample_id
;
1836 sample_id
= ac_unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
1837 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
1838 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
1844 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
1846 LLVMValueRef gs_next_vertex
;
1847 LLVMValueRef can_emit
;
1848 unsigned offset
= 0;
1849 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1851 /* Write vertex attribute values to GSVS ring */
1852 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
1853 ctx
->gs_next_vertex
[stream
],
1856 /* If this thread has already emitted the declared maximum number of
1857 * vertices, kill it: excessive vertex emissions are not supposed to
1858 * have any effect, and GS threads have no externally observable
1859 * effects other than emitting vertices.
1861 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
1862 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
1863 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
1865 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
1866 unsigned output_usage_mask
=
1867 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
1868 uint8_t output_stream
=
1869 ctx
->shader_info
->info
.gs
.output_streams
[i
];
1870 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
1871 int length
= util_last_bit(output_usage_mask
);
1873 if (!(ctx
->output_mask
& (1ull << i
)) ||
1874 output_stream
!= stream
)
1877 for (unsigned j
= 0; j
< length
; j
++) {
1878 if (!(output_usage_mask
& (1 << j
)))
1881 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
1883 LLVMValueRef voffset
=
1884 LLVMConstInt(ctx
->ac
.i32
, offset
*
1885 ctx
->gs_max_out_vertices
, false);
1889 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
1890 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1892 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
1893 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
1895 ac_build_buffer_store_dword(&ctx
->ac
,
1896 ctx
->gsvs_ring
[stream
],
1898 voffset
, ctx
->gs2vs_offset
, 0,
1899 ac_glc
| ac_slc
, true);
1903 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
1905 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
[stream
]);
1907 ac_build_sendmsg(&ctx
->ac
,
1908 AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (stream
<< 8),
1913 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
1915 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1916 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
1920 load_tess_coord(struct ac_shader_abi
*abi
)
1922 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1924 LLVMValueRef coord
[4] = {
1931 if (ctx
->tes_primitive_mode
== GL_TRIANGLES
)
1932 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
1933 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
1935 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
1939 load_patch_vertices_in(struct ac_shader_abi
*abi
)
1941 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1942 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
1946 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
1948 return abi
->base_vertex
;
1951 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
1952 LLVMValueRef buffer_ptr
, bool write
)
1954 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1955 LLVMValueRef result
;
1957 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1959 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1960 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1965 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
1967 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1968 LLVMValueRef result
;
1970 if (LLVMGetTypeKind(LLVMTypeOf(buffer_ptr
)) != LLVMPointerTypeKind
) {
1971 /* Do not load the descriptor for inlined uniform blocks. */
1975 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1977 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1978 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1983 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
1984 unsigned descriptor_set
,
1985 unsigned base_index
,
1986 unsigned constant_index
,
1988 enum ac_descriptor_type desc_type
,
1989 bool image
, bool write
,
1992 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1993 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
1994 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
1995 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
1996 unsigned offset
= binding
->offset
;
1997 unsigned stride
= binding
->size
;
1999 LLVMBuilderRef builder
= ctx
->ac
.builder
;
2002 assert(base_index
< layout
->binding_count
);
2004 switch (desc_type
) {
2006 type
= ctx
->ac
.v8i32
;
2010 type
= ctx
->ac
.v8i32
;
2014 case AC_DESC_SAMPLER
:
2015 type
= ctx
->ac
.v4i32
;
2016 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) {
2017 offset
+= radv_combined_image_descriptor_sampler_offset(binding
);
2022 case AC_DESC_BUFFER
:
2023 type
= ctx
->ac
.v4i32
;
2026 case AC_DESC_PLANE_0
:
2027 case AC_DESC_PLANE_1
:
2028 case AC_DESC_PLANE_2
:
2029 type
= ctx
->ac
.v8i32
;
2031 offset
+= 32 * (desc_type
- AC_DESC_PLANE_0
);
2034 unreachable("invalid desc_type\n");
2037 offset
+= constant_index
* stride
;
2039 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
2040 (!index
|| binding
->immutable_samplers_equal
)) {
2041 if (binding
->immutable_samplers_equal
)
2044 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
2046 LLVMValueRef constants
[] = {
2047 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
2048 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
2049 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
2050 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
2052 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
2055 assert(stride
% type_size
== 0);
2057 LLVMValueRef adjusted_index
= index
;
2058 if (!adjusted_index
)
2059 adjusted_index
= ctx
->ac
.i32_0
;
2061 adjusted_index
= LLVMBuildMul(builder
, adjusted_index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
2063 LLVMValueRef val_offset
= LLVMConstInt(ctx
->ac
.i32
, offset
, 0);
2064 list
= LLVMBuildGEP(builder
, list
, &val_offset
, 1, "");
2065 list
= LLVMBuildPointerCast(builder
, list
,
2066 ac_array_in_const32_addr_space(type
), "");
2068 LLVMValueRef descriptor
= ac_build_load_to_sgpr(&ctx
->ac
, list
, adjusted_index
);
2070 /* 3 plane formats always have same size and format for plane 1 & 2, so
2071 * use the tail from plane 1 so that we can store only the first 16 bytes
2072 * of the last plane. */
2073 if (desc_type
== AC_DESC_PLANE_2
) {
2074 LLVMValueRef descriptor2
= radv_get_sampler_desc(abi
, descriptor_set
, base_index
, constant_index
, index
, AC_DESC_PLANE_1
,image
, write
, bindless
);
2076 LLVMValueRef components
[8];
2077 for (unsigned i
= 0; i
< 4; ++i
)
2078 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor
, i
);
2080 for (unsigned i
= 4; i
< 8; ++i
)
2081 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor2
, i
);
2082 descriptor
= ac_build_gather_values(&ctx
->ac
, components
, 8);
2088 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
2089 * so we may need to fix it up. */
2091 adjust_vertex_fetch_alpha(struct radv_shader_context
*ctx
,
2092 unsigned adjustment
,
2095 if (adjustment
== RADV_ALPHA_ADJUST_NONE
)
2098 LLVMValueRef c30
= LLVMConstInt(ctx
->ac
.i32
, 30, 0);
2100 alpha
= LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2102 if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
)
2103 alpha
= LLVMBuildFPToUI(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2105 alpha
= ac_to_integer(&ctx
->ac
, alpha
);
2107 /* For the integer-like cases, do a natural sign extension.
2109 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
2110 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
2113 alpha
= LLVMBuildShl(ctx
->ac
.builder
, alpha
,
2114 adjustment
== RADV_ALPHA_ADJUST_SNORM
?
2115 LLVMConstInt(ctx
->ac
.i32
, 7, 0) : c30
, "");
2116 alpha
= LLVMBuildAShr(ctx
->ac
.builder
, alpha
, c30
, "");
2118 /* Convert back to the right type. */
2119 if (adjustment
== RADV_ALPHA_ADJUST_SNORM
) {
2121 LLVMValueRef neg_one
= LLVMConstReal(ctx
->ac
.f32
, -1.0);
2122 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2123 clamp
= LLVMBuildFCmp(ctx
->ac
.builder
, LLVMRealULT
, alpha
, neg_one
, "");
2124 alpha
= LLVMBuildSelect(ctx
->ac
.builder
, clamp
, neg_one
, alpha
, "");
2125 } else if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
) {
2126 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2129 return LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2133 get_num_channels_from_data_format(unsigned data_format
)
2135 switch (data_format
) {
2136 case V_008F0C_BUF_DATA_FORMAT_8
:
2137 case V_008F0C_BUF_DATA_FORMAT_16
:
2138 case V_008F0C_BUF_DATA_FORMAT_32
:
2140 case V_008F0C_BUF_DATA_FORMAT_8_8
:
2141 case V_008F0C_BUF_DATA_FORMAT_16_16
:
2142 case V_008F0C_BUF_DATA_FORMAT_32_32
:
2144 case V_008F0C_BUF_DATA_FORMAT_10_11_11
:
2145 case V_008F0C_BUF_DATA_FORMAT_11_11_10
:
2146 case V_008F0C_BUF_DATA_FORMAT_32_32_32
:
2148 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8
:
2149 case V_008F0C_BUF_DATA_FORMAT_10_10_10_2
:
2150 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10
:
2151 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16
:
2152 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32
:
2162 radv_fixup_vertex_input_fetches(struct radv_shader_context
*ctx
,
2164 unsigned num_channels
,
2167 LLVMValueRef zero
= is_float
? ctx
->ac
.f32_0
: ctx
->ac
.i32_0
;
2168 LLVMValueRef one
= is_float
? ctx
->ac
.f32_1
: ctx
->ac
.i32_1
;
2169 LLVMValueRef chan
[4];
2171 if (LLVMGetTypeKind(LLVMTypeOf(value
)) == LLVMVectorTypeKind
) {
2172 unsigned vec_size
= LLVMGetVectorSize(LLVMTypeOf(value
));
2174 if (num_channels
== 4 && num_channels
== vec_size
)
2177 num_channels
= MIN2(num_channels
, vec_size
);
2179 for (unsigned i
= 0; i
< num_channels
; i
++)
2180 chan
[i
] = ac_llvm_extract_elem(&ctx
->ac
, value
, i
);
2183 assert(num_channels
== 1);
2188 for (unsigned i
= num_channels
; i
< 4; i
++) {
2189 chan
[i
] = i
== 3 ? one
: zero
;
2190 chan
[i
] = ac_to_integer(&ctx
->ac
, chan
[i
]);
2193 return ac_build_gather_values(&ctx
->ac
, chan
, 4);
2197 handle_vs_input_decl(struct radv_shader_context
*ctx
,
2198 struct nir_variable
*variable
)
2200 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
2201 LLVMValueRef t_offset
;
2202 LLVMValueRef t_list
;
2204 LLVMValueRef buffer_index
;
2205 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
2206 uint8_t input_usage_mask
=
2207 ctx
->shader_info
->info
.vs
.input_usage_mask
[variable
->data
.location
];
2208 unsigned num_input_channels
= util_last_bit(input_usage_mask
);
2210 variable
->data
.driver_location
= variable
->data
.location
* 4;
2212 enum glsl_base_type type
= glsl_get_base_type(variable
->type
);
2213 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
2214 LLVMValueRef output
[4];
2215 unsigned attrib_index
= variable
->data
.location
+ i
- VERT_ATTRIB_GENERIC0
;
2216 unsigned attrib_format
= ctx
->options
->key
.vs
.vertex_attribute_formats
[attrib_index
];
2217 unsigned data_format
= attrib_format
& 0x0f;
2218 unsigned num_format
= (attrib_format
>> 4) & 0x07;
2219 bool is_float
= num_format
!= V_008F0C_BUF_NUM_FORMAT_UINT
&&
2220 num_format
!= V_008F0C_BUF_NUM_FORMAT_SINT
;
2222 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << attrib_index
)) {
2223 uint32_t divisor
= ctx
->options
->key
.vs
.instance_rate_divisors
[attrib_index
];
2226 buffer_index
= ctx
->abi
.instance_id
;
2229 buffer_index
= LLVMBuildUDiv(ctx
->ac
.builder
, buffer_index
,
2230 LLVMConstInt(ctx
->ac
.i32
, divisor
, 0), "");
2233 buffer_index
= ctx
->ac
.i32_0
;
2236 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.start_instance
, buffer_index
, "");
2238 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
2239 ctx
->abi
.base_vertex
, "");
2241 /* Adjust the number of channels to load based on the vertex
2244 unsigned num_format_channels
= get_num_channels_from_data_format(data_format
);
2245 unsigned num_channels
= MIN2(num_input_channels
, num_format_channels
);
2246 unsigned attrib_binding
= ctx
->options
->key
.vs
.vertex_attribute_bindings
[attrib_index
];
2247 unsigned attrib_offset
= ctx
->options
->key
.vs
.vertex_attribute_offsets
[attrib_index
];
2248 unsigned attrib_stride
= ctx
->options
->key
.vs
.vertex_attribute_strides
[attrib_index
];
2250 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2251 /* Always load, at least, 3 channels for formats that
2252 * need to be shuffled because X<->Z.
2254 num_channels
= MAX2(num_channels
, 3);
2257 if (attrib_stride
!= 0 && attrib_offset
> attrib_stride
) {
2258 LLVMValueRef buffer_offset
=
2259 LLVMConstInt(ctx
->ac
.i32
,
2260 attrib_offset
/ attrib_stride
, false);
2262 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
,
2266 attrib_offset
= attrib_offset
% attrib_stride
;
2269 t_offset
= LLVMConstInt(ctx
->ac
.i32
, attrib_binding
, false);
2270 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
2272 input
= ac_build_struct_tbuffer_load(&ctx
->ac
, t_list
,
2274 LLVMConstInt(ctx
->ac
.i32
, attrib_offset
, false),
2275 ctx
->ac
.i32_0
, ctx
->ac
.i32_0
,
2277 data_format
, num_format
, 0, true);
2279 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2281 c
[0] = ac_llvm_extract_elem(&ctx
->ac
, input
, 2);
2282 c
[1] = ac_llvm_extract_elem(&ctx
->ac
, input
, 1);
2283 c
[2] = ac_llvm_extract_elem(&ctx
->ac
, input
, 0);
2284 c
[3] = ac_llvm_extract_elem(&ctx
->ac
, input
, 3);
2286 input
= ac_build_gather_values(&ctx
->ac
, c
, 4);
2289 input
= radv_fixup_vertex_input_fetches(ctx
, input
, num_channels
,
2292 for (unsigned chan
= 0; chan
< 4; chan
++) {
2293 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2294 output
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
, input
, llvm_chan
, "");
2295 if (type
== GLSL_TYPE_FLOAT16
) {
2296 output
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f32
, "");
2297 output
[chan
] = LLVMBuildFPTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f16
, "");
2301 unsigned alpha_adjust
= (ctx
->options
->key
.vs
.alpha_adjust
>> (attrib_index
* 2)) & 3;
2302 output
[3] = adjust_vertex_fetch_alpha(ctx
, alpha_adjust
, output
[3]);
2304 for (unsigned chan
= 0; chan
< 4; chan
++) {
2305 output
[chan
] = ac_to_integer(&ctx
->ac
, output
[chan
]);
2306 if (type
== GLSL_TYPE_UINT16
|| type
== GLSL_TYPE_INT16
)
2307 output
[chan
] = LLVMBuildTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.i16
, "");
2309 ctx
->inputs
[ac_llvm_reg_index_soa(variable
->data
.location
+ i
, chan
)] = output
[chan
];
2314 static void interp_fs_input(struct radv_shader_context
*ctx
,
2316 LLVMValueRef interp_param
,
2317 LLVMValueRef prim_mask
,
2319 LLVMValueRef result
[4])
2321 LLVMValueRef attr_number
;
2324 bool interp
= !LLVMIsUndef(interp_param
);
2326 attr_number
= LLVMConstInt(ctx
->ac
.i32
, attr
, false);
2328 /* fs.constant returns the param from the middle vertex, so it's not
2329 * really useful for flat shading. It's meant to be used for custom
2330 * interpolation (but the intrinsic can't fetch from the other two
2333 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
2334 * to do the right thing. The only reason we use fs.constant is that
2335 * fs.interp cannot be used on integers, because they can be equal
2339 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
,
2342 i
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2344 j
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2348 for (chan
= 0; chan
< 4; chan
++) {
2349 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2351 if (interp
&& float16
) {
2352 result
[chan
] = ac_build_fs_interp_f16(&ctx
->ac
,
2356 } else if (interp
) {
2357 result
[chan
] = ac_build_fs_interp(&ctx
->ac
,
2362 result
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
2363 LLVMConstInt(ctx
->ac
.i32
, 2, false),
2367 result
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, result
[chan
], ctx
->ac
.i32
, "");
2368 result
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, result
[chan
], float16
? ctx
->ac
.i16
: ctx
->ac
.i32
, "");
2373 static void mark_16bit_fs_input(struct radv_shader_context
*ctx
,
2374 const struct glsl_type
*type
,
2377 if (glsl_type_is_scalar(type
) || glsl_type_is_vector(type
) || glsl_type_is_matrix(type
)) {
2378 unsigned attrib_count
= glsl_count_attribute_slots(type
, false);
2379 if (glsl_type_is_16bit(type
)) {
2380 ctx
->float16_shaded_mask
|= ((1ull << attrib_count
) - 1) << location
;
2382 } else if (glsl_type_is_array(type
)) {
2383 unsigned stride
= glsl_count_attribute_slots(glsl_get_array_element(type
), false);
2384 for (unsigned i
= 0; i
< glsl_get_length(type
); ++i
) {
2385 mark_16bit_fs_input(ctx
, glsl_get_array_element(type
), location
+ i
* stride
);
2388 assert(glsl_type_is_struct_or_ifc(type
));
2389 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
2390 mark_16bit_fs_input(ctx
, glsl_get_struct_field(type
, i
), location
);
2391 location
+= glsl_count_attribute_slots(glsl_get_struct_field(type
, i
), false);
2397 handle_fs_input_decl(struct radv_shader_context
*ctx
,
2398 struct nir_variable
*variable
)
2400 int idx
= variable
->data
.location
;
2401 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2402 LLVMValueRef interp
= NULL
;
2405 variable
->data
.driver_location
= idx
* 4;
2408 if (variable
->data
.compact
) {
2409 unsigned component_count
= variable
->data
.location_frac
+
2410 glsl_get_length(variable
->type
);
2411 attrib_count
= (component_count
+ 3) / 4;
2413 mark_16bit_fs_input(ctx
, variable
->type
, idx
);
2415 mask
= ((1ull << attrib_count
) - 1) << variable
->data
.location
;
2417 if (glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_FLOAT
||
2418 glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_FLOAT16
||
2419 glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_STRUCT
) {
2420 unsigned interp_type
;
2421 if (variable
->data
.sample
)
2422 interp_type
= INTERP_SAMPLE
;
2423 else if (variable
->data
.centroid
)
2424 interp_type
= INTERP_CENTROID
;
2426 interp_type
= INTERP_CENTER
;
2428 interp
= lookup_interp_param(&ctx
->abi
, variable
->data
.interpolation
, interp_type
);
2431 interp
= LLVMGetUndef(ctx
->ac
.i32
);
2433 for (unsigned i
= 0; i
< attrib_count
; ++i
)
2434 ctx
->inputs
[ac_llvm_reg_index_soa(idx
+ i
, 0)] = interp
;
2436 ctx
->input_mask
|= mask
;
2440 handle_vs_inputs(struct radv_shader_context
*ctx
,
2441 struct nir_shader
*nir
) {
2442 nir_foreach_variable(variable
, &nir
->inputs
)
2443 handle_vs_input_decl(ctx
, variable
);
2447 prepare_interp_optimize(struct radv_shader_context
*ctx
,
2448 struct nir_shader
*nir
)
2450 bool uses_center
= false;
2451 bool uses_centroid
= false;
2452 nir_foreach_variable(variable
, &nir
->inputs
) {
2453 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
2454 variable
->data
.sample
)
2457 if (variable
->data
.centroid
)
2458 uses_centroid
= true;
2463 if (uses_center
&& uses_centroid
) {
2464 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
2465 ctx
->persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->persp_center
, ctx
->persp_centroid
, "");
2466 ctx
->linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->linear_center
, ctx
->linear_centroid
, "");
2471 handle_fs_inputs(struct radv_shader_context
*ctx
,
2472 struct nir_shader
*nir
)
2474 prepare_interp_optimize(ctx
, nir
);
2476 nir_foreach_variable(variable
, &nir
->inputs
)
2477 handle_fs_input_decl(ctx
, variable
);
2481 if (ctx
->shader_info
->info
.ps
.uses_input_attachments
||
2482 ctx
->shader_info
->info
.needs_multiview_view_index
||
2483 ctx
->shader_info
->info
.ps
.layer_input
) {
2484 ctx
->input_mask
|= 1ull << VARYING_SLOT_LAYER
;
2485 ctx
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)] = LLVMGetUndef(ctx
->ac
.i32
);
2488 for (unsigned i
= 0; i
< RADEON_LLVM_MAX_INPUTS
; ++i
) {
2489 LLVMValueRef interp_param
;
2490 LLVMValueRef
*inputs
= ctx
->inputs
+ac_llvm_reg_index_soa(i
, 0);
2492 if (!(ctx
->input_mask
& (1ull << i
)))
2495 if (i
>= VARYING_SLOT_VAR0
|| i
== VARYING_SLOT_PNTC
||
2496 i
== VARYING_SLOT_PRIMITIVE_ID
|| i
== VARYING_SLOT_LAYER
) {
2497 interp_param
= *inputs
;
2498 bool float16
= (ctx
->float16_shaded_mask
>> i
) & 1;
2499 interp_fs_input(ctx
, index
, interp_param
, ctx
->abi
.prim_mask
, float16
,
2502 if (LLVMIsUndef(interp_param
))
2503 ctx
->shader_info
->fs
.flat_shaded_mask
|= 1u << index
;
2505 ctx
->shader_info
->fs
.float16_shaded_mask
|= 1u << index
;
2506 if (i
>= VARYING_SLOT_VAR0
)
2507 ctx
->abi
.fs_input_attr_indices
[i
- VARYING_SLOT_VAR0
] = index
;
2509 } else if (i
== VARYING_SLOT_CLIP_DIST0
) {
2510 int length
= ctx
->shader_info
->info
.ps
.num_input_clips_culls
;
2512 for (unsigned j
= 0; j
< length
; j
+= 4) {
2513 inputs
= ctx
->inputs
+ ac_llvm_reg_index_soa(i
, j
);
2515 interp_param
= *inputs
;
2516 interp_fs_input(ctx
, index
, interp_param
,
2517 ctx
->abi
.prim_mask
, false, inputs
);
2520 } else if (i
== VARYING_SLOT_POS
) {
2521 for(int i
= 0; i
< 3; ++i
)
2522 inputs
[i
] = ctx
->abi
.frag_pos
[i
];
2524 inputs
[3] = ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
2525 ctx
->abi
.frag_pos
[3]);
2528 ctx
->shader_info
->fs
.num_interp
= index
;
2529 ctx
->shader_info
->fs
.input_mask
= ctx
->input_mask
>> VARYING_SLOT_VAR0
;
2531 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
2532 ctx
->abi
.view_index
= ctx
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2536 scan_shader_output_decl(struct radv_shader_context
*ctx
,
2537 struct nir_variable
*variable
,
2538 struct nir_shader
*shader
,
2539 gl_shader_stage stage
)
2541 int idx
= variable
->data
.location
+ variable
->data
.index
;
2542 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2543 uint64_t mask_attribs
;
2545 variable
->data
.driver_location
= idx
* 4;
2547 /* tess ctrl has it's own load/store paths for outputs */
2548 if (stage
== MESA_SHADER_TESS_CTRL
)
2551 if (variable
->data
.compact
) {
2552 unsigned component_count
= variable
->data
.location_frac
+
2553 glsl_get_length(variable
->type
);
2554 attrib_count
= (component_count
+ 3) / 4;
2557 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
2558 if (stage
== MESA_SHADER_VERTEX
||
2559 stage
== MESA_SHADER_TESS_EVAL
||
2560 stage
== MESA_SHADER_GEOMETRY
) {
2561 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
2562 if (stage
== MESA_SHADER_VERTEX
) {
2563 ctx
->shader_info
->vs
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2564 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2565 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2567 if (stage
== MESA_SHADER_TESS_EVAL
) {
2568 ctx
->shader_info
->tes
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2569 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2570 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2575 ctx
->output_mask
|= mask_attribs
;
2579 /* Initialize arguments for the shader export intrinsic */
2581 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
2582 LLVMValueRef
*values
,
2583 unsigned enabled_channels
,
2585 struct ac_export_args
*args
)
2587 /* Specify the channels that are enabled. */
2588 args
->enabled_channels
= enabled_channels
;
2590 /* Specify whether the EXEC mask represents the valid mask */
2591 args
->valid_mask
= 0;
2593 /* Specify whether this is the last export */
2596 /* Specify the target we are exporting */
2597 args
->target
= target
;
2599 args
->compr
= false;
2600 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
2601 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
2602 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
2603 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
2608 bool is_16bit
= ac_get_type_size(LLVMTypeOf(values
[0])) == 2;
2609 if (ctx
->stage
== MESA_SHADER_FRAGMENT
) {
2610 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
2611 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
2612 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
2613 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
2616 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
2617 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
2618 unsigned bits
, bool hi
) = NULL
;
2620 switch(col_format
) {
2621 case V_028714_SPI_SHADER_ZERO
:
2622 args
->enabled_channels
= 0; /* writemask */
2623 args
->target
= V_008DFC_SQ_EXP_NULL
;
2626 case V_028714_SPI_SHADER_32_R
:
2627 args
->enabled_channels
= 1;
2628 args
->out
[0] = values
[0];
2631 case V_028714_SPI_SHADER_32_GR
:
2632 args
->enabled_channels
= 0x3;
2633 args
->out
[0] = values
[0];
2634 args
->out
[1] = values
[1];
2637 case V_028714_SPI_SHADER_32_AR
:
2638 if (ctx
->ac
.chip_class
>= GFX10
) {
2639 args
->enabled_channels
= 0x3;
2640 args
->out
[0] = values
[0];
2641 args
->out
[1] = values
[3];
2643 args
->enabled_channels
= 0x9;
2644 args
->out
[0] = values
[0];
2645 args
->out
[3] = values
[3];
2649 case V_028714_SPI_SHADER_FP16_ABGR
:
2650 args
->enabled_channels
= 0x5;
2651 packf
= ac_build_cvt_pkrtz_f16
;
2653 for (unsigned chan
= 0; chan
< 4; chan
++)
2654 values
[chan
] = LLVMBuildFPExt(ctx
->ac
.builder
,
2660 case V_028714_SPI_SHADER_UNORM16_ABGR
:
2661 args
->enabled_channels
= 0x5;
2662 packf
= ac_build_cvt_pknorm_u16
;
2665 case V_028714_SPI_SHADER_SNORM16_ABGR
:
2666 args
->enabled_channels
= 0x5;
2667 packf
= ac_build_cvt_pknorm_i16
;
2670 case V_028714_SPI_SHADER_UINT16_ABGR
:
2671 args
->enabled_channels
= 0x5;
2672 packi
= ac_build_cvt_pk_u16
;
2674 for (unsigned chan
= 0; chan
< 4; chan
++)
2675 values
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
,
2676 ac_to_integer(&ctx
->ac
, values
[chan
]),
2681 case V_028714_SPI_SHADER_SINT16_ABGR
:
2682 args
->enabled_channels
= 0x5;
2683 packi
= ac_build_cvt_pk_i16
;
2685 for (unsigned chan
= 0; chan
< 4; chan
++)
2686 values
[chan
] = LLVMBuildSExt(ctx
->ac
.builder
,
2687 ac_to_integer(&ctx
->ac
, values
[chan
]),
2693 case V_028714_SPI_SHADER_32_ABGR
:
2694 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2698 /* Pack f16 or norm_i16/u16. */
2700 for (chan
= 0; chan
< 2; chan
++) {
2701 LLVMValueRef pack_args
[2] = {
2703 values
[2 * chan
+ 1]
2705 LLVMValueRef packed
;
2707 packed
= packf(&ctx
->ac
, pack_args
);
2708 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2710 args
->compr
= 1; /* COMPR flag */
2715 for (chan
= 0; chan
< 2; chan
++) {
2716 LLVMValueRef pack_args
[2] = {
2717 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
2718 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
2720 LLVMValueRef packed
;
2722 packed
= packi(&ctx
->ac
, pack_args
,
2723 is_int8
? 8 : is_int10
? 10 : 16,
2725 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2727 args
->compr
= 1; /* COMPR flag */
2733 for (unsigned chan
= 0; chan
< 4; chan
++) {
2734 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i16
, "");
2735 args
->out
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
2738 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2740 for (unsigned i
= 0; i
< 4; ++i
)
2741 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
2745 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
2746 LLVMValueRef
*values
, unsigned enabled_channels
)
2748 struct ac_export_args args
;
2750 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
2751 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
2752 ac_build_export(&ctx
->ac
, &args
);
2756 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
2758 LLVMValueRef output
=
2759 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(index
, chan
)];
2761 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
2765 radv_emit_stream_output(struct radv_shader_context
*ctx
,
2766 LLVMValueRef
const *so_buffers
,
2767 LLVMValueRef
const *so_write_offsets
,
2768 const struct radv_stream_output
*output
)
2770 unsigned num_comps
= util_bitcount(output
->component_mask
);
2771 unsigned loc
= output
->location
;
2772 unsigned buf
= output
->buffer
;
2773 unsigned offset
= output
->offset
;
2775 LLVMValueRef out
[4];
2777 assert(num_comps
&& num_comps
<= 4);
2778 if (!num_comps
|| num_comps
> 4)
2781 /* Get the first component. */
2782 start
= ffs(output
->component_mask
) - 1;
2784 /* Load the output as int. */
2785 for (int i
= 0; i
< num_comps
; i
++) {
2786 out
[i
] = ac_to_integer(&ctx
->ac
,
2787 radv_load_output(ctx
, loc
, start
+ i
));
2790 /* Pack the output. */
2791 LLVMValueRef vdata
= NULL
;
2793 switch (num_comps
) {
2794 case 1: /* as i32 */
2797 case 2: /* as v2i32 */
2798 case 3: /* as v4i32 (aligned to 4) */
2799 out
[3] = LLVMGetUndef(ctx
->ac
.i32
);
2801 case 4: /* as v4i32 */
2802 vdata
= ac_build_gather_values(&ctx
->ac
, out
,
2803 !ac_has_vec3_support(ctx
->ac
.chip_class
, false) ?
2804 util_next_power_of_two(num_comps
) :
2809 ac_build_buffer_store_dword(&ctx
->ac
, so_buffers
[buf
],
2810 vdata
, num_comps
, so_write_offsets
[buf
],
2811 ctx
->ac
.i32_0
, offset
,
2812 ac_glc
| ac_slc
, false);
2816 radv_emit_streamout(struct radv_shader_context
*ctx
, unsigned stream
)
2818 struct ac_build_if_state if_ctx
;
2821 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2822 assert(ctx
->streamout_config
);
2823 LLVMValueRef so_vtx_count
=
2824 ac_build_bfe(&ctx
->ac
, ctx
->streamout_config
,
2825 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2826 LLVMConstInt(ctx
->ac
.i32
, 7, false), false);
2828 LLVMValueRef tid
= ac_get_thread_id(&ctx
->ac
);
2830 /* can_emit = tid < so_vtx_count; */
2831 LLVMValueRef can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
2832 tid
, so_vtx_count
, "");
2834 /* Emit the streamout code conditionally. This actually avoids
2835 * out-of-bounds buffer access. The hw tells us via the SGPR
2836 * (so_vtx_count) which threads are allowed to emit streamout data.
2838 ac_nir_build_if(&if_ctx
, ctx
, can_emit
);
2840 /* The buffer offset is computed as follows:
2841 * ByteOffset = streamout_offset[buffer_id]*4 +
2842 * (streamout_write_index + thread_id)*stride[buffer_id] +
2845 LLVMValueRef so_write_index
= ctx
->streamout_write_idx
;
2847 /* Compute (streamout_write_index + thread_id). */
2849 LLVMBuildAdd(ctx
->ac
.builder
, so_write_index
, tid
, "");
2851 /* Load the descriptor and compute the write offset for each
2854 LLVMValueRef so_write_offset
[4] = {};
2855 LLVMValueRef so_buffers
[4] = {};
2856 LLVMValueRef buf_ptr
= ctx
->streamout_buffers
;
2858 for (i
= 0; i
< 4; i
++) {
2859 uint16_t stride
= ctx
->shader_info
->info
.so
.strides
[i
];
2864 LLVMValueRef offset
=
2865 LLVMConstInt(ctx
->ac
.i32
, i
, false);
2867 so_buffers
[i
] = ac_build_load_to_sgpr(&ctx
->ac
,
2870 LLVMValueRef so_offset
= ctx
->streamout_offset
[i
];
2872 so_offset
= LLVMBuildMul(ctx
->ac
.builder
, so_offset
,
2873 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
2875 so_write_offset
[i
] =
2876 ac_build_imad(&ctx
->ac
, so_write_index
,
2877 LLVMConstInt(ctx
->ac
.i32
,
2882 /* Write streamout data. */
2883 for (i
= 0; i
< ctx
->shader_info
->info
.so
.num_outputs
; i
++) {
2884 struct radv_stream_output
*output
=
2885 &ctx
->shader_info
->info
.so
.outputs
[i
];
2887 if (stream
!= output
->stream
)
2890 radv_emit_stream_output(ctx
, so_buffers
,
2891 so_write_offset
, output
);
2894 ac_nir_build_endif(&if_ctx
);
2898 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
2899 bool export_prim_id
, bool export_layer_id
,
2900 bool export_clip_dists
,
2901 struct radv_vs_output_info
*outinfo
)
2903 uint32_t param_count
= 0;
2905 unsigned pos_idx
, num_pos_exports
= 0;
2906 struct ac_export_args args
, pos_args
[4] = {};
2907 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_index_value
= NULL
;
2910 if (ctx
->options
->key
.has_multiview_view_index
) {
2911 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2913 for(unsigned i
= 0; i
< 4; ++i
)
2914 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
2915 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
2918 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
2919 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
2922 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
2923 sizeof(outinfo
->vs_output_param_offset
));
2925 for(unsigned location
= VARYING_SLOT_CLIP_DIST0
; location
<= VARYING_SLOT_CLIP_DIST1
; ++location
) {
2926 if (ctx
->output_mask
& (1ull << location
)) {
2927 unsigned output_usage_mask
, length
;
2928 LLVMValueRef slots
[4];
2931 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2932 !ctx
->is_gs_copy_shader
) {
2934 ctx
->shader_info
->info
.vs
.output_usage_mask
[location
];
2935 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2937 ctx
->shader_info
->info
.tes
.output_usage_mask
[location
];
2939 assert(ctx
->is_gs_copy_shader
);
2941 ctx
->shader_info
->info
.gs
.output_usage_mask
[location
];
2944 length
= util_last_bit(output_usage_mask
);
2946 for (j
= 0; j
< length
; j
++)
2947 slots
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, location
, j
));
2949 for (i
= length
; i
< 4; i
++)
2950 slots
[i
] = LLVMGetUndef(ctx
->ac
.f32
);
2952 target
= V_008DFC_SQ_EXP_POS
+ 2 + (location
- VARYING_SLOT_CLIP_DIST0
);
2953 si_llvm_init_export_args(ctx
, &slots
[0], 0xf, target
, &args
);
2954 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
2955 &args
, sizeof(args
));
2957 if (export_clip_dists
) {
2958 /* Export the clip/cull distances values to the next stage. */
2959 radv_export_param(ctx
, param_count
, &slots
[0], 0xf);
2960 outinfo
->vs_output_param_offset
[location
] = param_count
++;
2965 LLVMValueRef pos_values
[4] = {ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_1
};
2966 if (ctx
->output_mask
& (1ull << VARYING_SLOT_POS
)) {
2967 for (unsigned j
= 0; j
< 4; j
++)
2968 pos_values
[j
] = radv_load_output(ctx
, VARYING_SLOT_POS
, j
);
2970 si_llvm_init_export_args(ctx
, pos_values
, 0xf, V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
2972 if (ctx
->output_mask
& (1ull << VARYING_SLOT_PSIZ
)) {
2973 outinfo
->writes_pointsize
= true;
2974 psize_value
= radv_load_output(ctx
, VARYING_SLOT_PSIZ
, 0);
2977 if (ctx
->output_mask
& (1ull << VARYING_SLOT_LAYER
)) {
2978 outinfo
->writes_layer
= true;
2979 layer_value
= radv_load_output(ctx
, VARYING_SLOT_LAYER
, 0);
2982 if (ctx
->output_mask
& (1ull << VARYING_SLOT_VIEWPORT
)) {
2983 outinfo
->writes_viewport_index
= true;
2984 viewport_index_value
= radv_load_output(ctx
, VARYING_SLOT_VIEWPORT
, 0);
2987 if (ctx
->shader_info
->info
.so
.num_outputs
&&
2988 !ctx
->is_gs_copy_shader
) {
2989 /* The GS copy shader emission already emits streamout. */
2990 radv_emit_streamout(ctx
, 0);
2993 if (outinfo
->writes_pointsize
||
2994 outinfo
->writes_layer
||
2995 outinfo
->writes_viewport_index
) {
2996 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
2997 (outinfo
->writes_layer
== true ? 4 : 0));
2998 pos_args
[1].valid_mask
= 0;
2999 pos_args
[1].done
= 0;
3000 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
3001 pos_args
[1].compr
= 0;
3002 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
3003 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
3004 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
3005 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
3007 if (outinfo
->writes_pointsize
== true)
3008 pos_args
[1].out
[0] = psize_value
;
3009 if (outinfo
->writes_layer
== true)
3010 pos_args
[1].out
[2] = layer_value
;
3011 if (outinfo
->writes_viewport_index
== true) {
3012 if (ctx
->options
->chip_class
>= GFX9
) {
3013 /* GFX9 has the layer in out.z[10:0] and the viewport
3014 * index in out.z[19:16].
3016 LLVMValueRef v
= viewport_index_value
;
3017 v
= ac_to_integer(&ctx
->ac
, v
);
3018 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
3019 LLVMConstInt(ctx
->ac
.i32
, 16, false),
3021 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
3022 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
3024 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
3025 pos_args
[1].enabled_channels
|= 1 << 2;
3027 pos_args
[1].out
[3] = viewport_index_value
;
3028 pos_args
[1].enabled_channels
|= 1 << 3;
3032 for (i
= 0; i
< 4; i
++) {
3033 if (pos_args
[i
].out
[0])
3037 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
3038 * Setting valid_mask=1 prevents it and has no other effect.
3040 if (ctx
->ac
.family
== CHIP_NAVI10
||
3041 ctx
->ac
.family
== CHIP_NAVI12
||
3042 ctx
->ac
.family
== CHIP_NAVI14
)
3043 pos_args
[0].valid_mask
= 1;
3046 for (i
= 0; i
< 4; i
++) {
3047 if (!pos_args
[i
].out
[0])
3050 /* Specify the target we are exporting */
3051 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
3052 if (pos_idx
== num_pos_exports
)
3053 pos_args
[i
].done
= 1;
3054 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
3057 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3058 LLVMValueRef values
[4];
3059 if (!(ctx
->output_mask
& (1ull << i
)))
3062 if (i
!= VARYING_SLOT_LAYER
&&
3063 i
!= VARYING_SLOT_PRIMITIVE_ID
&&
3064 i
< VARYING_SLOT_VAR0
)
3067 for (unsigned j
= 0; j
< 4; j
++)
3068 values
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
3070 unsigned output_usage_mask
;
3072 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
3073 !ctx
->is_gs_copy_shader
) {
3075 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
3076 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3078 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
3080 assert(ctx
->is_gs_copy_shader
);
3082 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
3085 radv_export_param(ctx
, param_count
, values
, output_usage_mask
);
3087 outinfo
->vs_output_param_offset
[i
] = param_count
++;
3090 if (export_prim_id
) {
3091 LLVMValueRef values
[4];
3093 values
[0] = ctx
->vs_prim_id
;
3094 for (unsigned j
= 1; j
< 4; j
++)
3095 values
[j
] = ctx
->ac
.f32_0
;
3097 radv_export_param(ctx
, param_count
, values
, 0x1);
3099 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
3100 outinfo
->export_prim_id
= true;
3103 if (export_layer_id
&& layer_value
) {
3104 LLVMValueRef values
[4];
3106 values
[0] = layer_value
;
3107 for (unsigned j
= 1; j
< 4; j
++)
3108 values
[j
] = ctx
->ac
.f32_0
;
3110 radv_export_param(ctx
, param_count
, values
, 0x1);
3112 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = param_count
++;
3115 outinfo
->pos_exports
= num_pos_exports
;
3116 outinfo
->param_exports
= param_count
;
3120 handle_es_outputs_post(struct radv_shader_context
*ctx
,
3121 struct radv_es_output_info
*outinfo
)
3124 uint64_t max_output_written
= 0;
3125 LLVMValueRef lds_base
= NULL
;
3127 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3130 if (!(ctx
->output_mask
& (1ull << i
)))
3133 param_index
= shader_io_get_unique_index(i
);
3135 max_output_written
= MAX2(param_index
, max_output_written
);
3138 outinfo
->esgs_itemsize
= (max_output_written
+ 1) * 16;
3140 if (ctx
->ac
.chip_class
>= GFX9
) {
3141 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
3142 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
3143 LLVMValueRef wave_idx
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
3144 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
3145 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
3146 LLVMConstInt(ctx
->ac
.i32
, 64, false), ""), "");
3147 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
3148 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
3151 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3152 LLVMValueRef dw_addr
= NULL
;
3153 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
3154 unsigned output_usage_mask
;
3157 if (!(ctx
->output_mask
& (1ull << i
)))
3160 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
3162 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
3164 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
3166 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
3169 param_index
= shader_io_get_unique_index(i
);
3172 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3173 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
3177 for (j
= 0; j
< 4; j
++) {
3178 if (!(output_usage_mask
& (1 << j
)))
3181 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
3182 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
3183 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
3185 if (ctx
->ac
.chip_class
>= GFX9
) {
3186 LLVMValueRef dw_addr_offset
=
3187 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
3188 LLVMConstInt(ctx
->ac
.i32
,
3191 ac_lds_store(&ctx
->ac
, dw_addr_offset
, out_val
);
3193 ac_build_buffer_store_dword(&ctx
->ac
,
3196 NULL
, ctx
->es2gs_offset
,
3197 (4 * param_index
+ j
) * 4,
3198 ac_glc
| ac_slc
, true);
3205 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
3207 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
3208 uint32_t num_tcs_inputs
= util_last_bit64(ctx
->shader_info
->info
.vs
.ls_outputs_written
);
3209 LLVMValueRef vertex_dw_stride
= LLVMConstInt(ctx
->ac
.i32
, num_tcs_inputs
* 4, false);
3210 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
3211 vertex_dw_stride
, "");
3213 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3214 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
3216 if (!(ctx
->output_mask
& (1ull << i
)))
3219 int param
= shader_io_get_unique_index(i
);
3220 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
3221 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
3223 for (unsigned j
= 0; j
< 4; j
++) {
3224 LLVMValueRef value
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
3225 value
= ac_to_integer(&ctx
->ac
, value
);
3226 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
3227 ac_lds_store(&ctx
->ac
, dw_addr
, value
);
3228 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
3233 static LLVMValueRef
get_wave_id_in_tg(struct radv_shader_context
*ctx
)
3235 return ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
3238 static LLVMValueRef
ngg_get_vtx_cnt(struct radv_shader_context
*ctx
)
3240 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
3241 LLVMConstInt(ctx
->ac
.i32
, 12, false),
3242 LLVMConstInt(ctx
->ac
.i32
, 9, false),
3246 static LLVMValueRef
ngg_get_prim_cnt(struct radv_shader_context
*ctx
)
3248 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
3249 LLVMConstInt(ctx
->ac
.i32
, 22, false),
3250 LLVMConstInt(ctx
->ac
.i32
, 9, false),
3254 /* Send GS Alloc Req message from the first wave of the group to SPI.
3255 * Message payload is:
3256 * - bits 0..10: vertices in group
3257 * - bits 12..22: primitives in group
3259 static void build_sendmsg_gs_alloc_req(struct radv_shader_context
*ctx
,
3260 LLVMValueRef vtx_cnt
,
3261 LLVMValueRef prim_cnt
)
3263 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3266 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
3267 ac_build_ifcc(&ctx
->ac
, tmp
, 5020);
3269 tmp
= LLVMBuildShl(builder
, prim_cnt
, LLVMConstInt(ctx
->ac
.i32
, 12, false),"");
3270 tmp
= LLVMBuildOr(builder
, tmp
, vtx_cnt
, "");
3271 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_ALLOC_REQ
, tmp
);
3273 ac_build_endif(&ctx
->ac
, 5020);
3277 unsigned num_vertices
;
3278 LLVMValueRef isnull
;
3279 LLVMValueRef index
[3];
3280 LLVMValueRef edgeflag
[3];
3283 static void build_export_prim(struct radv_shader_context
*ctx
,
3284 const struct ngg_prim
*prim
)
3286 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3287 struct ac_export_args args
;
3290 tmp
= LLVMBuildZExt(builder
, prim
->isnull
, ctx
->ac
.i32
, "");
3291 args
.out
[0] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 31, false), "");
3293 for (unsigned i
= 0; i
< prim
->num_vertices
; ++i
) {
3294 tmp
= LLVMBuildShl(builder
, prim
->index
[i
],
3295 LLVMConstInt(ctx
->ac
.i32
, 10 * i
, false), "");
3296 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3297 tmp
= LLVMBuildZExt(builder
, prim
->edgeflag
[i
], ctx
->ac
.i32
, "");
3298 tmp
= LLVMBuildShl(builder
, tmp
,
3299 LLVMConstInt(ctx
->ac
.i32
, 10 * i
+ 9, false), "");
3300 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3303 args
.out
[0] = LLVMBuildBitCast(builder
, args
.out
[0], ctx
->ac
.f32
, "");
3304 args
.out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
3305 args
.out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
3306 args
.out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
3308 args
.target
= V_008DFC_SQ_EXP_PRIM
;
3309 args
.enabled_channels
= 1;
3311 args
.valid_mask
= false;
3314 ac_build_export(&ctx
->ac
, &args
);
3318 handle_ngg_outputs_post(struct radv_shader_context
*ctx
)
3320 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3321 struct ac_build_if_state if_state
;
3322 unsigned num_vertices
= 3;
3325 assert(ctx
->stage
== MESA_SHADER_VERTEX
&& !ctx
->is_gs_copy_shader
);
3327 LLVMValueRef prims_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3328 LLVMValueRef vtx_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 0, 8);
3329 LLVMValueRef is_gs_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3330 ac_get_thread_id(&ctx
->ac
), prims_in_wave
, "");
3331 LLVMValueRef is_es_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3332 ac_get_thread_id(&ctx
->ac
), vtx_in_wave
, "");
3333 LLVMValueRef vtxindex
[] = {
3334 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 0, 16),
3335 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 16, 16),
3336 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[2], 0, 16),
3339 /* TODO: streamout */
3341 /* TODO: VS primitive ID */
3342 if (ctx
->options
->key
.vs
.out
.export_prim_id
)
3345 /* TODO: primitive culling */
3347 build_sendmsg_gs_alloc_req(ctx
, ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
3349 /* TODO: streamout queries */
3350 /* Export primitive data to the index buffer. Format is:
3351 * - bits 0..8: index 0
3352 * - bit 9: edge flag 0
3353 * - bits 10..18: index 1
3354 * - bit 19: edge flag 1
3355 * - bits 20..28: index 2
3356 * - bit 29: edge flag 2
3357 * - bit 31: null primitive (skip)
3359 * For the first version, we will always build up all three indices
3360 * independent of the primitive type. The additional garbage data
3363 * TODO: culling depends on the primitive type, so can have some
3366 ac_nir_build_if(&if_state
, ctx
, is_gs_thread
);
3368 struct ngg_prim prim
= {};
3370 prim
.num_vertices
= num_vertices
;
3371 prim
.isnull
= ctx
->ac
.i1false
;
3372 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
3374 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
3375 tmp
= LLVMBuildLShr(builder
, ctx
->abi
.gs_invocation_id
,
3376 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
3377 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
3380 build_export_prim(ctx
, &prim
);
3382 ac_nir_build_endif(&if_state
);
3384 /* Export per-vertex data (positions and parameters). */
3385 ac_nir_build_if(&if_state
, ctx
, is_es_thread
);
3387 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs
.out
.export_prim_id
,
3388 ctx
->options
->key
.vs
.out
.export_layer_id
,
3389 ctx
->options
->key
.vs
.out
.export_clip_dists
,
3390 &ctx
->shader_info
->vs
.outinfo
);
3392 ac_nir_build_endif(&if_state
);
3396 write_tess_factors(struct radv_shader_context
*ctx
)
3398 unsigned stride
, outer_comps
, inner_comps
;
3399 struct ac_build_if_state if_ctx
, inner_if_ctx
;
3400 LLVMValueRef invocation_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
3401 LLVMValueRef rel_patch_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
3402 unsigned tess_inner_index
= 0, tess_outer_index
;
3403 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
3404 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
3406 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3408 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
3428 ac_nir_build_if(&if_ctx
, ctx
,
3429 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3430 invocation_id
, ctx
->ac
.i32_0
, ""));
3432 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
3435 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3436 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3437 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
3440 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3441 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3442 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
3444 for (i
= 0; i
< 4; i
++) {
3445 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3446 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3450 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
3451 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
3452 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3454 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
3456 for (i
= 0; i
< outer_comps
; i
++) {
3458 ac_lds_load(&ctx
->ac
, lds_outer
);
3459 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3462 for (i
= 0; i
< inner_comps
; i
++) {
3463 inner
[i
] = out
[outer_comps
+i
] =
3464 ac_lds_load(&ctx
->ac
, lds_inner
);
3465 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
3470 /* Convert the outputs to vectors for stores. */
3471 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
3475 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
3478 buffer
= ctx
->hs_ring_tess_factor
;
3479 tf_base
= ctx
->tess_factor_offset
;
3480 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
3481 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
3482 unsigned tf_offset
= 0;
3484 if (ctx
->options
->chip_class
<= GFX8
) {
3485 ac_nir_build_if(&inner_if_ctx
, ctx
,
3486 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3487 rel_patch_id
, ctx
->ac
.i32_0
, ""));
3489 /* Store the dynamic HS control word. */
3490 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
3491 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
3492 1, ctx
->ac
.i32_0
, tf_base
,
3496 ac_nir_build_endif(&inner_if_ctx
);
3499 /* Store the tessellation factors. */
3500 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
3501 MIN2(stride
, 4), byteoffset
, tf_base
,
3502 tf_offset
, ac_glc
, false);
3504 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
3505 stride
- 4, byteoffset
, tf_base
,
3506 16 + tf_offset
, ac_glc
, false);
3508 //store to offchip for TES to read - only if TES reads them
3509 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
3510 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
3511 LLVMValueRef tf_inner_offset
;
3512 unsigned param_outer
, param_inner
;
3514 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3515 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3516 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
3518 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
3519 util_next_power_of_two(outer_comps
));
3521 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
3522 outer_comps
, tf_outer_offset
,
3523 ctx
->oc_lds
, 0, ac_glc
, false);
3525 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3526 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3527 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
3529 inner_vec
= inner_comps
== 1 ? inner
[0] :
3530 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
3531 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
3532 inner_comps
, tf_inner_offset
,
3533 ctx
->oc_lds
, 0, ac_glc
, false);
3536 ac_nir_build_endif(&if_ctx
);
3540 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
3542 write_tess_factors(ctx
);
3546 si_export_mrt_color(struct radv_shader_context
*ctx
,
3547 LLVMValueRef
*color
, unsigned index
,
3548 struct ac_export_args
*args
)
3551 si_llvm_init_export_args(ctx
, color
, 0xf,
3552 V_008DFC_SQ_EXP_MRT
+ index
, args
);
3553 if (!args
->enabled_channels
)
3554 return false; /* unnecessary NULL export */
3560 radv_export_mrt_z(struct radv_shader_context
*ctx
,
3561 LLVMValueRef depth
, LLVMValueRef stencil
,
3562 LLVMValueRef samplemask
)
3564 struct ac_export_args args
;
3566 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
3568 ac_build_export(&ctx
->ac
, &args
);
3572 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
3575 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
3576 struct ac_export_args color_args
[8];
3578 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3579 LLVMValueRef values
[4];
3581 if (!(ctx
->output_mask
& (1ull << i
)))
3584 if (i
< FRAG_RESULT_DATA0
)
3587 for (unsigned j
= 0; j
< 4; j
++)
3588 values
[j
] = ac_to_float(&ctx
->ac
,
3589 radv_load_output(ctx
, i
, j
));
3591 bool ret
= si_export_mrt_color(ctx
, values
,
3592 i
- FRAG_RESULT_DATA0
,
3593 &color_args
[index
]);
3598 /* Process depth, stencil, samplemask. */
3599 if (ctx
->shader_info
->info
.ps
.writes_z
) {
3600 depth
= ac_to_float(&ctx
->ac
,
3601 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
3603 if (ctx
->shader_info
->info
.ps
.writes_stencil
) {
3604 stencil
= ac_to_float(&ctx
->ac
,
3605 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
3607 if (ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3608 samplemask
= ac_to_float(&ctx
->ac
,
3609 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
3612 /* Set the DONE bit on last non-null color export only if Z isn't
3616 !ctx
->shader_info
->info
.ps
.writes_z
&&
3617 !ctx
->shader_info
->info
.ps
.writes_stencil
&&
3618 !ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3619 unsigned last
= index
- 1;
3621 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
3622 color_args
[last
].done
= 1; /* DONE bit */
3625 /* Export PS outputs. */
3626 for (unsigned i
= 0; i
< index
; i
++)
3627 ac_build_export(&ctx
->ac
, &color_args
[i
]);
3629 if (depth
|| stencil
|| samplemask
)
3630 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
3632 ac_build_export_null(&ctx
->ac
);
3636 emit_gs_epilogue(struct radv_shader_context
*ctx
)
3638 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
3642 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
3643 LLVMValueRef
*addrs
)
3645 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
3647 switch (ctx
->stage
) {
3648 case MESA_SHADER_VERTEX
:
3649 if (ctx
->options
->key
.vs
.out
.as_ls
)
3650 handle_ls_outputs_post(ctx
);
3651 else if (ctx
->options
->key
.vs
.out
.as_ngg
)
3652 break; /* handled outside of the shader body */
3653 else if (ctx
->options
->key
.vs
.out
.as_es
)
3654 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
3656 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs
.out
.export_prim_id
,
3657 ctx
->options
->key
.vs
.out
.export_layer_id
,
3658 ctx
->options
->key
.vs
.out
.export_clip_dists
,
3659 &ctx
->shader_info
->vs
.outinfo
);
3661 case MESA_SHADER_FRAGMENT
:
3662 handle_fs_outputs_post(ctx
);
3664 case MESA_SHADER_GEOMETRY
:
3665 emit_gs_epilogue(ctx
);
3667 case MESA_SHADER_TESS_CTRL
:
3668 handle_tcs_outputs_post(ctx
);
3670 case MESA_SHADER_TESS_EVAL
:
3671 if (ctx
->options
->key
.tes
.out
.as_es
)
3672 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
3674 handle_vs_outputs_post(ctx
, ctx
->options
->key
.tes
.out
.export_prim_id
,
3675 ctx
->options
->key
.tes
.out
.export_layer_id
,
3676 ctx
->options
->key
.tes
.out
.export_clip_dists
,
3677 &ctx
->shader_info
->tes
.outinfo
);
3684 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
,
3685 LLVMPassManagerRef passmgr
,
3686 const struct radv_nir_compiler_options
*options
)
3688 LLVMRunPassManager(passmgr
, ctx
->ac
.module
);
3689 LLVMDisposeBuilder(ctx
->ac
.builder
);
3691 ac_llvm_context_dispose(&ctx
->ac
);
3695 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
3697 struct radv_vs_output_info
*outinfo
;
3699 switch (ctx
->stage
) {
3700 case MESA_SHADER_FRAGMENT
:
3701 case MESA_SHADER_COMPUTE
:
3702 case MESA_SHADER_TESS_CTRL
:
3703 case MESA_SHADER_GEOMETRY
:
3705 case MESA_SHADER_VERTEX
:
3706 if (ctx
->options
->key
.vs
.out
.as_ls
||
3707 ctx
->options
->key
.vs
.out
.as_es
)
3709 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
3711 case MESA_SHADER_TESS_EVAL
:
3712 if (ctx
->options
->key
.vs
.out
.as_es
)
3714 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
3717 unreachable("Unhandled shader type");
3720 ac_optimize_vs_outputs(&ctx
->ac
,
3722 outinfo
->vs_output_param_offset
,
3724 &outinfo
->param_exports
);
3728 ac_setup_rings(struct radv_shader_context
*ctx
)
3730 if (ctx
->options
->chip_class
<= GFX8
&&
3731 (ctx
->stage
== MESA_SHADER_GEOMETRY
||
3732 ctx
->options
->key
.vs
.out
.as_es
|| ctx
->options
->key
.tes
.out
.as_es
)) {
3733 unsigned ring
= ctx
->stage
== MESA_SHADER_GEOMETRY
? RING_ESGS_GS
3735 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, ring
, false);
3737 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
,
3742 if (ctx
->is_gs_copy_shader
) {
3744 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
3745 LLVMConstInt(ctx
->ac
.i32
,
3746 RING_GSVS_VS
, false));
3749 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3750 /* The conceptual layout of the GSVS ring is
3751 * v0c0 .. vLv0 v0c1 .. vLc1 ..
3752 * but the real memory layout is swizzled across
3754 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3756 * Override the buffer descriptor accordingly.
3758 LLVMTypeRef v2i64
= LLVMVectorType(ctx
->ac
.i64
, 2);
3759 uint64_t stream_offset
= 0;
3760 unsigned num_records
= 64;
3761 LLVMValueRef base_ring
;
3764 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
3765 LLVMConstInt(ctx
->ac
.i32
,
3766 RING_GSVS_GS
, false));
3768 for (unsigned stream
= 0; stream
< 4; stream
++) {
3769 unsigned num_components
, stride
;
3770 LLVMValueRef ring
, tmp
;
3773 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
3775 if (!num_components
)
3778 stride
= 4 * num_components
* ctx
->gs_max_out_vertices
;
3780 /* Limit on the stride field for <= GFX7. */
3781 assert(stride
< (1 << 14));
3783 ring
= LLVMBuildBitCast(ctx
->ac
.builder
,
3784 base_ring
, v2i64
, "");
3785 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3786 ring
, ctx
->ac
.i32_0
, "");
3787 tmp
= LLVMBuildAdd(ctx
->ac
.builder
, tmp
,
3788 LLVMConstInt(ctx
->ac
.i64
,
3789 stream_offset
, 0), "");
3790 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
,
3791 ring
, tmp
, ctx
->ac
.i32_0
, "");
3793 stream_offset
+= stride
* 64;
3795 ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ring
,
3798 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ring
,
3800 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
,
3801 LLVMConstInt(ctx
->ac
.i32
,
3802 S_008F04_STRIDE(stride
), false), "");
3803 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
, tmp
,
3806 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
,
3807 LLVMConstInt(ctx
->ac
.i32
,
3808 num_records
, false),
3809 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
3811 ctx
->gsvs_ring
[stream
] = ring
;
3815 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
3816 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3817 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
3818 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
3823 radv_nir_get_max_workgroup_size(enum chip_class chip_class
,
3824 const struct nir_shader
*nir
)
3826 switch (nir
->info
.stage
) {
3827 case MESA_SHADER_TESS_CTRL
:
3828 return chip_class
>= GFX7
? 128 : 64;
3829 case MESA_SHADER_GEOMETRY
:
3830 return chip_class
>= GFX9
? 128 : 64;
3831 case MESA_SHADER_COMPUTE
:
3837 unsigned max_workgroup_size
= nir
->info
.cs
.local_size
[0] *
3838 nir
->info
.cs
.local_size
[1] *
3839 nir
->info
.cs
.local_size
[2];
3840 return max_workgroup_size
;
3843 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
3844 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
3846 LLVMValueRef count
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3847 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
3849 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
3850 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
3851 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
3854 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
3856 for(int i
= 5; i
>= 0; --i
) {
3857 ctx
->gs_vtx_offset
[i
] = ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
3861 ctx
->gs_wave_id
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 16, 8);
3866 LLVMModuleRef
ac_translate_nir_to_llvm(struct ac_llvm_compiler
*ac_llvm
,
3867 struct nir_shader
*const *shaders
,
3869 struct radv_shader_variant_info
*shader_info
,
3870 const struct radv_nir_compiler_options
*options
)
3872 struct radv_shader_context ctx
= {0};
3874 ctx
.options
= options
;
3875 ctx
.shader_info
= shader_info
;
3877 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
3878 ctx
.context
= ctx
.ac
.context
;
3879 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
3881 enum ac_float_mode float_mode
=
3882 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
3883 AC_FLOAT_MODE_DEFAULT
;
3885 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
3887 memset(shader_info
, 0, sizeof(*shader_info
));
3889 radv_nir_shader_info_init(&shader_info
->info
);
3891 for(int i
= 0; i
< shader_count
; ++i
)
3892 radv_nir_shader_info_pass(shaders
[i
], options
, &shader_info
->info
);
3894 for (i
= 0; i
< RADV_UD_MAX_SETS
; i
++)
3895 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
3896 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
3897 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
3899 ctx
.max_workgroup_size
= 0;
3900 for (int i
= 0; i
< shader_count
; ++i
) {
3901 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
3902 radv_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
3906 if (ctx
.ac
.chip_class
>= GFX10
) {
3907 if (shaders
[0]->info
.stage
== MESA_SHADER_VERTEX
&&
3908 options
->key
.vs
.out
.as_ngg
) {
3909 ctx
.max_workgroup_size
= 128;
3913 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
3914 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
3916 ctx
.abi
.inputs
= &ctx
.inputs
[0];
3917 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
3918 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
3919 ctx
.abi
.load_ubo
= radv_load_ubo
;
3920 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
3921 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
3922 ctx
.abi
.load_resource
= radv_load_resource
;
3923 ctx
.abi
.clamp_shadow_reference
= false;
3924 ctx
.abi
.gfx9_stride_size_workaround
= ctx
.ac
.chip_class
== GFX9
&& HAVE_LLVM
< 0x800;
3926 /* Because the new raw/struct atomic intrinsics are buggy with LLVM 8,
3927 * we fallback to the old intrinsics for atomic buffer image operations
3928 * and thus we need to apply the indexing workaround...
3930 ctx
.abi
.gfx9_stride_size_workaround_for_atomic
= ctx
.ac
.chip_class
== GFX9
&& HAVE_LLVM
< 0x900;
3932 bool is_ngg
= is_pre_gs_stage(shaders
[0]->info
.stage
) && ctx
.options
->key
.vs
.out
.as_ngg
;
3933 if (shader_count
>= 2 || is_ngg
)
3934 ac_init_exec_full_mask(&ctx
.ac
);
3936 if ((ctx
.ac
.family
== CHIP_VEGA10
||
3937 ctx
.ac
.family
== CHIP_RAVEN
) &&
3938 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
3939 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
3941 for(int i
= 0; i
< shader_count
; ++i
) {
3942 ctx
.stage
= shaders
[i
]->info
.stage
;
3943 ctx
.output_mask
= 0;
3945 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3946 for (int i
= 0; i
< 4; i
++) {
3947 ctx
.gs_next_vertex
[i
] =
3948 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
3950 ctx
.gs_max_out_vertices
= shaders
[i
]->info
.gs
.vertices_out
;
3951 ctx
.abi
.load_inputs
= load_gs_input
;
3952 ctx
.abi
.emit_primitive
= visit_end_primitive
;
3953 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3954 ctx
.tcs_outputs_read
= shaders
[i
]->info
.outputs_read
;
3955 ctx
.tcs_patch_outputs_read
= shaders
[i
]->info
.patch_outputs_read
;
3956 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
3957 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3958 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
3959 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3960 if (shader_count
== 1)
3961 ctx
.tcs_num_inputs
= ctx
.options
->key
.tcs
.num_inputs
;
3963 ctx
.tcs_num_inputs
= util_last_bit64(shader_info
->info
.vs
.ls_outputs_written
);
3964 ctx
.tcs_num_patches
= get_tcs_num_patches(&ctx
);
3965 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3966 ctx
.tes_primitive_mode
= shaders
[i
]->info
.tess
.primitive_mode
;
3967 ctx
.abi
.load_tess_varyings
= load_tes_input
;
3968 ctx
.abi
.load_tess_coord
= load_tess_coord
;
3969 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3970 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3971 ctx
.tcs_num_patches
= ctx
.options
->key
.tes
.num_patches
;
3972 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
3973 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
3974 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
3975 shader_info
->fs
.can_discard
= shaders
[i
]->info
.fs
.uses_discard
;
3976 ctx
.abi
.lookup_interp_param
= lookup_interp_param
;
3977 ctx
.abi
.load_sample_position
= load_sample_position
;
3978 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
3979 ctx
.abi
.emit_kill
= radv_emit_kill
;
3983 ac_emit_barrier(&ctx
.ac
, ctx
.stage
);
3985 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
3986 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
3988 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3989 unsigned addclip
= shaders
[i
]->info
.clip_distance_array_size
+
3990 shaders
[i
]->info
.cull_distance_array_size
> 4;
3991 ctx
.gsvs_vertex_size
= (util_bitcount64(ctx
.output_mask
) + addclip
) * 16;
3992 ctx
.max_gsvs_emit_size
= ctx
.gsvs_vertex_size
*
3993 shaders
[i
]->info
.gs
.vertices_out
;
3996 ac_setup_rings(&ctx
);
3998 LLVMBasicBlockRef merge_block
;
3999 if (shader_count
>= 2 || is_ngg
) {
4000 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
4001 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
4002 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
4004 LLVMValueRef count
= ac_unpack_param(&ctx
.ac
, ctx
.merged_wave_info
, 8 * i
, 8);
4005 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
4006 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
4007 thread_id
, count
, "");
4008 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
4010 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
4013 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
4014 handle_fs_inputs(&ctx
, shaders
[i
]);
4015 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
4016 handle_vs_inputs(&ctx
, shaders
[i
]);
4017 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
4018 prepare_gs_input_vgprs(&ctx
);
4020 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
4022 if (shader_count
>= 2 || is_ngg
) {
4023 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
4024 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
4027 /* This needs to be outside the if wrapping the shader body, as sometimes
4028 * the HW generates waves with 0 es/vs threads. */
4029 if (is_pre_gs_stage(shaders
[i
]->info
.stage
) &&
4030 ctx
.options
->key
.vs
.out
.as_ngg
&&
4031 i
== shader_count
- 1) {
4032 handle_ngg_outputs_post(&ctx
);
4035 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4036 shader_info
->gs
.gsvs_vertex_size
= ctx
.gsvs_vertex_size
;
4037 shader_info
->gs
.max_gsvs_emit_size
= ctx
.max_gsvs_emit_size
;
4038 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
4039 shader_info
->tcs
.num_patches
= ctx
.tcs_num_patches
;
4040 shader_info
->tcs
.lds_size
= calculate_tess_lds_size(&ctx
);
4044 LLVMBuildRetVoid(ctx
.ac
.builder
);
4046 if (options
->dump_preoptir
)
4047 ac_dump_module(ctx
.ac
.module
);
4049 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
4051 if (shader_count
== 1)
4052 ac_nir_eliminate_const_vs_outputs(&ctx
);
4054 if (options
->dump_shader
) {
4055 ctx
.shader_info
->private_mem_vgprs
=
4056 ac_count_scratch_private_memory(ctx
.main_function
);
4059 return ctx
.ac
.module
;
4062 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
4064 unsigned *retval
= (unsigned *)context
;
4065 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
4066 char *description
= LLVMGetDiagInfoDescription(di
);
4068 if (severity
== LLVMDSError
) {
4070 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
4074 LLVMDisposeMessage(description
);
4077 static unsigned radv_llvm_compile(LLVMModuleRef M
,
4078 char **pelf_buffer
, size_t *pelf_size
,
4079 struct ac_llvm_compiler
*ac_llvm
)
4081 unsigned retval
= 0;
4082 LLVMContextRef llvm_ctx
;
4084 /* Setup Diagnostic Handler*/
4085 llvm_ctx
= LLVMGetModuleContext(M
);
4087 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
4091 if (!radv_compile_to_elf(ac_llvm
, M
, pelf_buffer
, pelf_size
))
4096 static void ac_compile_llvm_module(struct ac_llvm_compiler
*ac_llvm
,
4097 LLVMModuleRef llvm_module
,
4098 struct radv_shader_binary
**rbinary
,
4099 struct radv_shader_variant_info
*shader_info
,
4100 gl_shader_stage stage
,
4101 const struct radv_nir_compiler_options
*options
)
4103 char *elf_buffer
= NULL
;
4104 size_t elf_size
= 0;
4105 char *llvm_ir_string
= NULL
;
4106 if (options
->dump_shader
)
4107 ac_dump_module(llvm_module
);
4109 if (options
->record_llvm_ir
) {
4110 char *llvm_ir
= LLVMPrintModuleToString(llvm_module
);
4111 llvm_ir_string
= strdup(llvm_ir
);
4112 LLVMDisposeMessage(llvm_ir
);
4115 int v
= radv_llvm_compile(llvm_module
, &elf_buffer
, &elf_size
, ac_llvm
);
4117 fprintf(stderr
, "compile failed\n");
4120 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
4121 LLVMDisposeModule(llvm_module
);
4122 LLVMContextDispose(ctx
);
4124 size_t llvm_ir_size
= llvm_ir_string
? strlen(llvm_ir_string
) : 0;
4125 size_t alloc_size
= sizeof(struct radv_shader_binary_rtld
) + elf_size
+ llvm_ir_size
+ 1;
4126 struct radv_shader_binary_rtld
*rbin
= calloc(1, alloc_size
);
4127 memcpy(rbin
->data
, elf_buffer
, elf_size
);
4129 memcpy(rbin
->data
+ elf_size
, llvm_ir_string
, llvm_ir_size
+ 1);
4131 rbin
->base
.type
= RADV_BINARY_TYPE_RTLD
;
4132 rbin
->base
.stage
= stage
;
4133 rbin
->base
.total_size
= alloc_size
;
4134 rbin
->elf_size
= elf_size
;
4135 rbin
->llvm_ir_size
= llvm_ir_size
;
4136 *rbinary
= &rbin
->base
;
4138 free(llvm_ir_string
);
4143 ac_fill_shader_info(struct radv_shader_variant_info
*shader_info
, struct nir_shader
*nir
, const struct radv_nir_compiler_options
*options
)
4145 switch (nir
->info
.stage
) {
4146 case MESA_SHADER_COMPUTE
:
4147 for (int i
= 0; i
< 3; ++i
)
4148 shader_info
->cs
.block_size
[i
] = nir
->info
.cs
.local_size
[i
];
4150 case MESA_SHADER_FRAGMENT
:
4151 shader_info
->fs
.early_fragment_test
= nir
->info
.fs
.early_fragment_tests
;
4153 case MESA_SHADER_GEOMETRY
:
4154 shader_info
->gs
.vertices_in
= nir
->info
.gs
.vertices_in
;
4155 shader_info
->gs
.vertices_out
= nir
->info
.gs
.vertices_out
;
4156 shader_info
->gs
.output_prim
= nir
->info
.gs
.output_primitive
;
4157 shader_info
->gs
.invocations
= nir
->info
.gs
.invocations
;
4159 case MESA_SHADER_TESS_EVAL
:
4160 shader_info
->tes
.primitive_mode
= nir
->info
.tess
.primitive_mode
;
4161 shader_info
->tes
.spacing
= nir
->info
.tess
.spacing
;
4162 shader_info
->tes
.ccw
= nir
->info
.tess
.ccw
;
4163 shader_info
->tes
.point_mode
= nir
->info
.tess
.point_mode
;
4164 shader_info
->tes
.as_es
= options
->key
.tes
.out
.as_es
;
4165 shader_info
->tes
.export_prim_id
= options
->key
.tes
.out
.export_prim_id
;
4167 case MESA_SHADER_TESS_CTRL
:
4168 shader_info
->tcs
.tcs_vertices_out
= nir
->info
.tess
.tcs_vertices_out
;
4170 case MESA_SHADER_VERTEX
:
4171 shader_info
->vs
.as_es
= options
->key
.vs
.out
.as_es
;
4172 shader_info
->vs
.as_ls
= options
->key
.vs
.out
.as_ls
;
4173 shader_info
->vs
.export_prim_id
= options
->key
.vs
.out
.export_prim_id
;
4174 shader_info
->is_ngg
= options
->key
.vs
.out
.as_ngg
;
4182 radv_compile_nir_shader(struct ac_llvm_compiler
*ac_llvm
,
4183 struct radv_shader_binary
**rbinary
,
4184 struct radv_shader_variant_info
*shader_info
,
4185 struct nir_shader
*const *nir
,
4187 const struct radv_nir_compiler_options
*options
)
4190 LLVMModuleRef llvm_module
;
4192 llvm_module
= ac_translate_nir_to_llvm(ac_llvm
, nir
, nir_count
, shader_info
,
4195 ac_compile_llvm_module(ac_llvm
, llvm_module
, rbinary
, shader_info
,
4196 nir
[nir_count
- 1]->info
.stage
, options
);
4198 for (int i
= 0; i
< nir_count
; ++i
)
4199 ac_fill_shader_info(shader_info
, nir
[i
], options
);
4201 /* Determine the ES type (VS or TES) for the GS on GFX9. */
4202 if (options
->chip_class
== GFX9
) {
4203 if (nir_count
== 2 &&
4204 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4205 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
4211 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
4213 LLVMValueRef vtx_offset
=
4214 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
4215 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
4216 LLVMValueRef stream_id
;
4218 /* Fetch the vertex stream ID. */
4219 if (ctx
->shader_info
->info
.so
.num_outputs
) {
4221 ac_unpack_param(&ctx
->ac
, ctx
->streamout_config
, 24, 2);
4223 stream_id
= ctx
->ac
.i32_0
;
4226 LLVMBasicBlockRef end_bb
;
4227 LLVMValueRef switch_inst
;
4229 end_bb
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
,
4230 ctx
->main_function
, "end");
4231 switch_inst
= LLVMBuildSwitch(ctx
->ac
.builder
, stream_id
, end_bb
, 4);
4233 for (unsigned stream
= 0; stream
< 4; stream
++) {
4234 unsigned num_components
=
4235 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
4236 LLVMBasicBlockRef bb
;
4239 if (!num_components
)
4242 if (stream
> 0 && !ctx
->shader_info
->info
.so
.num_outputs
)
4245 bb
= LLVMInsertBasicBlockInContext(ctx
->ac
.context
, end_bb
, "out");
4246 LLVMAddCase(switch_inst
, LLVMConstInt(ctx
->ac
.i32
, stream
, 0), bb
);
4247 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, bb
);
4250 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4251 unsigned output_usage_mask
=
4252 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
4253 unsigned output_stream
=
4254 ctx
->shader_info
->info
.gs
.output_streams
[i
];
4255 int length
= util_last_bit(output_usage_mask
);
4257 if (!(ctx
->output_mask
& (1ull << i
)) ||
4258 output_stream
!= stream
)
4261 for (unsigned j
= 0; j
< length
; j
++) {
4262 LLVMValueRef value
, soffset
;
4264 if (!(output_usage_mask
& (1 << j
)))
4267 soffset
= LLVMConstInt(ctx
->ac
.i32
,
4269 ctx
->gs_max_out_vertices
* 16 * 4, false);
4273 value
= ac_build_buffer_load(&ctx
->ac
,
4276 vtx_offset
, soffset
,
4277 0, ac_glc
| ac_slc
, true, false);
4279 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4280 if (ac_get_type_size(type
) == 2) {
4281 value
= LLVMBuildBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
4282 value
= LLVMBuildTrunc(ctx
->ac
.builder
, value
, ctx
->ac
.i16
, "");
4285 LLVMBuildStore(ctx
->ac
.builder
,
4286 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4290 if (ctx
->shader_info
->info
.so
.num_outputs
)
4291 radv_emit_streamout(ctx
, stream
);
4294 handle_vs_outputs_post(ctx
, false, false, true,
4295 &ctx
->shader_info
->vs
.outinfo
);
4298 LLVMBuildBr(ctx
->ac
.builder
, end_bb
);
4301 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, end_bb
);
4305 radv_compile_gs_copy_shader(struct ac_llvm_compiler
*ac_llvm
,
4306 struct nir_shader
*geom_shader
,
4307 struct radv_shader_binary
**rbinary
,
4308 struct radv_shader_variant_info
*shader_info
,
4309 const struct radv_nir_compiler_options
*options
)
4311 struct radv_shader_context ctx
= {0};
4312 ctx
.options
= options
;
4313 ctx
.shader_info
= shader_info
;
4315 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
4316 ctx
.context
= ctx
.ac
.context
;
4317 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
4319 ctx
.is_gs_copy_shader
= true;
4321 enum ac_float_mode float_mode
=
4322 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
4323 AC_FLOAT_MODE_DEFAULT
;
4325 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
4326 ctx
.stage
= MESA_SHADER_VERTEX
;
4328 radv_nir_shader_info_pass(geom_shader
, options
, &shader_info
->info
);
4330 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
4332 ctx
.gs_max_out_vertices
= geom_shader
->info
.gs
.vertices_out
;
4333 ac_setup_rings(&ctx
);
4335 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
4336 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
4337 ac_handle_shader_output_decl(&ctx
.ac
, &ctx
.abi
, geom_shader
,
4338 variable
, MESA_SHADER_VERTEX
);
4341 ac_gs_copy_shader_emit(&ctx
);
4343 LLVMBuildRetVoid(ctx
.ac
.builder
);
4345 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
4347 ac_compile_llvm_module(ac_llvm
, ctx
.ac
.module
, rbinary
, shader_info
,
4348 MESA_SHADER_VERTEX
, options
);
4349 (*rbinary
)->is_gs_copy_shader
= true;