2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #include <llvm-c/Transforms/Utils.h>
39 #include "ac_binary.h"
40 #include "ac_llvm_util.h"
41 #include "ac_llvm_build.h"
42 #include "ac_shader_abi.h"
43 #include "ac_shader_util.h"
44 #include "ac_exp_param.h"
46 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
48 struct radv_shader_context
{
49 struct ac_llvm_context ac
;
50 const struct radv_nir_compiler_options
*options
;
51 struct radv_shader_variant_info
*shader_info
;
52 struct ac_shader_abi abi
;
54 unsigned max_workgroup_size
;
55 LLVMContextRef context
;
56 LLVMValueRef main_function
;
58 LLVMValueRef descriptor_sets
[RADV_UD_MAX_SETS
];
59 LLVMValueRef ring_offsets
;
61 LLVMValueRef vertex_buffers
;
62 LLVMValueRef rel_auto_id
;
63 LLVMValueRef vs_prim_id
;
64 LLVMValueRef es2gs_offset
;
67 LLVMValueRef merged_wave_info
;
68 LLVMValueRef tess_factor_offset
;
69 LLVMValueRef tes_rel_patch_id
;
75 * - bits 0..10: ordered_wave_id
76 * - bits 12..20: number of vertices in group
77 * - bits 22..30: number of primitives in group
79 LLVMValueRef gs_tg_info
;
80 LLVMValueRef gs2vs_offset
;
81 LLVMValueRef gs_wave_id
;
82 LLVMValueRef gs_vtx_offset
[6];
84 LLVMValueRef esgs_ring
;
85 LLVMValueRef gsvs_ring
[4];
86 LLVMValueRef hs_ring_tess_offchip
;
87 LLVMValueRef hs_ring_tess_factor
;
89 LLVMValueRef persp_sample
, persp_center
, persp_centroid
;
90 LLVMValueRef linear_sample
, linear_center
, linear_centroid
;
93 LLVMValueRef streamout_buffers
;
94 LLVMValueRef streamout_write_idx
;
95 LLVMValueRef streamout_config
;
96 LLVMValueRef streamout_offset
[4];
98 gl_shader_stage stage
;
100 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
101 uint64_t float16_shaded_mask
;
104 uint64_t output_mask
;
106 bool is_gs_copy_shader
;
107 LLVMValueRef gs_next_vertex
[4];
108 unsigned gs_max_out_vertices
;
110 unsigned tes_primitive_mode
;
112 uint32_t tcs_patch_outputs_read
;
113 uint64_t tcs_outputs_read
;
114 uint32_t tcs_vertices_per_patch
;
115 uint32_t tcs_num_inputs
;
116 uint32_t tcs_num_patches
;
117 uint32_t max_gsvs_emit_size
;
118 uint32_t gsvs_vertex_size
;
121 enum radeon_llvm_calling_convention
{
122 RADEON_LLVM_AMDGPU_VS
= 87,
123 RADEON_LLVM_AMDGPU_GS
= 88,
124 RADEON_LLVM_AMDGPU_PS
= 89,
125 RADEON_LLVM_AMDGPU_CS
= 90,
126 RADEON_LLVM_AMDGPU_HS
= 93,
129 static inline struct radv_shader_context
*
130 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
132 struct radv_shader_context
*ctx
= NULL
;
133 return container_of(abi
, ctx
, abi
);
136 struct ac_build_if_state
138 struct radv_shader_context
*ctx
;
139 LLVMValueRef condition
;
140 LLVMBasicBlockRef entry_block
;
141 LLVMBasicBlockRef true_block
;
142 LLVMBasicBlockRef false_block
;
143 LLVMBasicBlockRef merge_block
;
146 static LLVMBasicBlockRef
147 ac_build_insert_new_block(struct radv_shader_context
*ctx
, const char *name
)
149 LLVMBasicBlockRef current_block
;
150 LLVMBasicBlockRef next_block
;
151 LLVMBasicBlockRef new_block
;
153 /* get current basic block */
154 current_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
156 /* chqeck if there's another block after this one */
157 next_block
= LLVMGetNextBasicBlock(current_block
);
159 /* insert the new block before the next block */
160 new_block
= LLVMInsertBasicBlockInContext(ctx
->context
, next_block
, name
);
163 /* append new block after current block */
164 LLVMValueRef function
= LLVMGetBasicBlockParent(current_block
);
165 new_block
= LLVMAppendBasicBlockInContext(ctx
->context
, function
, name
);
171 ac_nir_build_if(struct ac_build_if_state
*ifthen
,
172 struct radv_shader_context
*ctx
,
173 LLVMValueRef condition
)
175 LLVMBasicBlockRef block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
177 memset(ifthen
, 0, sizeof *ifthen
);
179 ifthen
->condition
= condition
;
180 ifthen
->entry_block
= block
;
182 /* create endif/merge basic block for the phi functions */
183 ifthen
->merge_block
= ac_build_insert_new_block(ctx
, "endif-block");
185 /* create/insert true_block before merge_block */
187 LLVMInsertBasicBlockInContext(ctx
->context
,
191 /* successive code goes into the true block */
192 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, ifthen
->true_block
);
199 ac_nir_build_endif(struct ac_build_if_state
*ifthen
)
201 LLVMBuilderRef builder
= ifthen
->ctx
->ac
.builder
;
203 /* Insert branch to the merge block from current block */
204 LLVMBuildBr(builder
, ifthen
->merge_block
);
207 * Now patch in the various branch instructions.
210 /* Insert the conditional branch instruction at the end of entry_block */
211 LLVMPositionBuilderAtEnd(builder
, ifthen
->entry_block
);
212 if (ifthen
->false_block
) {
213 /* we have an else clause */
214 LLVMBuildCondBr(builder
, ifthen
->condition
,
215 ifthen
->true_block
, ifthen
->false_block
);
219 LLVMBuildCondBr(builder
, ifthen
->condition
,
220 ifthen
->true_block
, ifthen
->merge_block
);
223 /* Resume building code at end of the ifthen->merge_block */
224 LLVMPositionBuilderAtEnd(builder
, ifthen
->merge_block
);
228 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
230 switch (ctx
->stage
) {
231 case MESA_SHADER_TESS_CTRL
:
232 return ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
233 case MESA_SHADER_TESS_EVAL
:
234 return ctx
->tes_rel_patch_id
;
237 unreachable("Illegal stage");
242 get_tcs_num_patches(struct radv_shader_context
*ctx
)
244 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
245 unsigned num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
246 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
247 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
248 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
249 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
250 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
251 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
252 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
253 unsigned num_patches
;
254 unsigned hardware_lds_size
;
256 /* Ensure that we only need one wave per SIMD so we don't need to check
257 * resource usage. Also ensures that the number of tcs in and out
258 * vertices per threadgroup are at most 256.
260 num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
261 /* Make sure that the data fits in LDS. This assumes the shaders only
262 * use LDS for the inputs and outputs.
264 hardware_lds_size
= 32768;
266 /* Looks like STONEY hangs if we use more than 32 KiB LDS in a single
267 * threadgroup, even though there is more than 32 KiB LDS.
269 * Test: dEQP-VK.tessellation.shader_input_output.barrier
271 if (ctx
->options
->chip_class
>= GFX7
&& ctx
->options
->family
!= CHIP_STONEY
)
272 hardware_lds_size
= 65536;
274 num_patches
= MIN2(num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
275 /* Make sure the output data fits in the offchip buffer */
276 num_patches
= MIN2(num_patches
, (ctx
->options
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
277 /* Not necessary for correctness, but improves performance. The
278 * specific value is taken from the proprietary driver.
280 num_patches
= MIN2(num_patches
, 40);
282 /* GFX6 bug workaround - limit LS-HS threadgroups to only one wave. */
283 if (ctx
->options
->chip_class
== GFX6
) {
284 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
285 num_patches
= MIN2(num_patches
, one_wave
);
291 calculate_tess_lds_size(struct radv_shader_context
*ctx
)
293 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
294 unsigned num_tcs_output_cp
;
295 unsigned num_tcs_outputs
, num_tcs_patch_outputs
;
296 unsigned input_vertex_size
, output_vertex_size
;
297 unsigned input_patch_size
, output_patch_size
;
298 unsigned pervertex_output_patch_size
;
299 unsigned output_patch0_offset
;
300 unsigned num_patches
;
303 num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
304 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
305 num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
307 input_vertex_size
= ctx
->tcs_num_inputs
* 16;
308 output_vertex_size
= num_tcs_outputs
* 16;
310 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
312 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
313 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
315 num_patches
= ctx
->tcs_num_patches
;
316 output_patch0_offset
= input_patch_size
* num_patches
;
318 lds_size
= output_patch0_offset
+ output_patch_size
* num_patches
;
322 /* Tessellation shaders pass outputs to the next shader using LDS.
324 * LS outputs = TCS inputs
325 * TCS outputs = TES inputs
328 * - TCS inputs for patch 0
329 * - TCS inputs for patch 1
330 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
332 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
333 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
334 * - TCS outputs for patch 1
335 * - Per-patch TCS outputs for patch 1
336 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
337 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
340 * All three shaders VS(LS), TCS, TES share the same LDS space.
343 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
345 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
346 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
347 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
349 input_patch_size
/= 4;
350 return LLVMConstInt(ctx
->ac
.i32
, input_patch_size
, false);
354 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
356 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
357 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
358 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
359 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
360 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
361 output_patch_size
/= 4;
362 return LLVMConstInt(ctx
->ac
.i32
, output_patch_size
, false);
366 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
368 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
369 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
370 output_vertex_size
/= 4;
371 return LLVMConstInt(ctx
->ac
.i32
, output_vertex_size
, false);
375 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
377 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
378 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
379 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
380 uint32_t output_patch0_offset
= input_patch_size
;
381 unsigned num_patches
= ctx
->tcs_num_patches
;
383 output_patch0_offset
*= num_patches
;
384 output_patch0_offset
/= 4;
385 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
389 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
391 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
392 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
393 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
394 uint32_t output_patch0_offset
= input_patch_size
;
396 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
397 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
398 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
399 unsigned num_patches
= ctx
->tcs_num_patches
;
401 output_patch0_offset
*= num_patches
;
402 output_patch0_offset
+= pervertex_output_patch_size
;
403 output_patch0_offset
/= 4;
404 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
408 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
410 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
411 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
413 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
417 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
419 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
420 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
421 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
423 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
428 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
430 LLVMValueRef patch0_patch_data_offset
=
431 get_tcs_out_patch0_patch_data_offset(ctx
);
432 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
433 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
435 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
436 patch0_patch_data_offset
);
441 LLVMTypeRef types
[MAX_ARGS
];
442 LLVMValueRef
*assign
[MAX_ARGS
];
445 uint8_t num_sgprs_used
;
446 uint8_t num_vgprs_used
;
449 enum ac_arg_regfile
{
455 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
456 LLVMValueRef
*param_ptr
)
458 assert(info
->count
< MAX_ARGS
);
460 info
->assign
[info
->count
] = param_ptr
;
461 info
->types
[info
->count
] = type
;
464 if (regfile
== ARG_SGPR
) {
465 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
468 assert(regfile
== ARG_VGPR
);
469 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
473 static void assign_arguments(LLVMValueRef main_function
,
474 struct arg_info
*info
)
477 for (i
= 0; i
< info
->count
; i
++) {
479 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
484 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
485 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
486 unsigned num_return_elems
,
487 struct arg_info
*args
,
488 unsigned max_workgroup_size
,
489 const struct radv_nir_compiler_options
*options
)
491 LLVMTypeRef main_function_type
, ret_type
;
492 LLVMBasicBlockRef main_function_body
;
494 if (num_return_elems
)
495 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
496 num_return_elems
, true);
498 ret_type
= LLVMVoidTypeInContext(ctx
);
500 /* Setup the function */
502 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
503 LLVMValueRef main_function
=
504 LLVMAddFunction(module
, "main", main_function_type
);
506 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
507 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
509 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
510 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
511 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
513 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
515 if (LLVMGetTypeKind(LLVMTypeOf(P
)) == LLVMPointerTypeKind
) {
516 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
517 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
521 if (options
->address32_hi
) {
522 ac_llvm_add_target_dep_function_attr(main_function
,
523 "amdgpu-32bit-address-high-bits",
524 options
->address32_hi
);
527 ac_llvm_set_workgroup_size(main_function
, max_workgroup_size
);
529 if (options
->unsafe_math
) {
530 /* These were copied from some LLVM test. */
531 LLVMAddTargetDependentFunctionAttr(main_function
,
532 "less-precise-fpmad",
534 LLVMAddTargetDependentFunctionAttr(main_function
,
537 LLVMAddTargetDependentFunctionAttr(main_function
,
540 LLVMAddTargetDependentFunctionAttr(main_function
,
543 LLVMAddTargetDependentFunctionAttr(main_function
,
544 "no-signed-zeros-fp-math",
547 return main_function
;
552 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
,
555 ud_info
->sgpr_idx
= *sgpr_idx
;
556 ud_info
->num_sgprs
= num_sgprs
;
557 *sgpr_idx
+= num_sgprs
;
561 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
564 struct radv_userdata_info
*ud_info
=
565 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
568 set_loc(ud_info
, sgpr_idx
, num_sgprs
);
572 set_loc_shader_ptr(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
574 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
576 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
580 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
582 struct radv_userdata_locations
*locs
=
583 &ctx
->shader_info
->user_sgprs_locs
;
584 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
587 set_loc(ud_info
, sgpr_idx
, 1);
589 locs
->descriptor_sets_enabled
|= 1 << idx
;
592 struct user_sgpr_info
{
593 bool need_ring_offsets
;
594 bool indirect_all_descriptor_sets
;
595 uint8_t remaining_sgprs
;
598 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
599 gl_shader_stage stage
)
602 case MESA_SHADER_VERTEX
:
603 if (ctx
->shader_info
->info
.needs_multiview_view_index
||
604 (!ctx
->options
->key
.vs_common_out
.as_es
&& !ctx
->options
->key
.vs_common_out
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
607 case MESA_SHADER_TESS_EVAL
:
608 if (ctx
->shader_info
->info
.needs_multiview_view_index
|| (!ctx
->options
->key
.vs_common_out
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
611 case MESA_SHADER_GEOMETRY
:
612 case MESA_SHADER_TESS_CTRL
:
613 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
623 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
627 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
)
629 count
+= ctx
->shader_info
->info
.vs
.needs_draw_id
? 3 : 2;
634 static void allocate_inline_push_consts(struct radv_shader_context
*ctx
,
635 struct user_sgpr_info
*user_sgpr_info
)
637 uint8_t remaining_sgprs
= user_sgpr_info
->remaining_sgprs
;
639 /* Only supported if shaders use push constants. */
640 if (ctx
->shader_info
->info
.min_push_constant_used
== UINT8_MAX
)
643 /* Only supported if shaders don't have indirect push constants. */
644 if (ctx
->shader_info
->info
.has_indirect_push_constants
)
647 /* Only supported for 32-bit push constants. */
648 if (!ctx
->shader_info
->info
.has_only_32bit_push_constants
)
651 uint8_t num_push_consts
=
652 (ctx
->shader_info
->info
.max_push_constant_used
-
653 ctx
->shader_info
->info
.min_push_constant_used
) / 4;
655 /* Check if the number of user SGPRs is large enough. */
656 if (num_push_consts
< remaining_sgprs
) {
657 ctx
->shader_info
->info
.num_inline_push_consts
= num_push_consts
;
659 ctx
->shader_info
->info
.num_inline_push_consts
= remaining_sgprs
;
662 /* Clamp to the maximum number of allowed inlined push constants. */
663 if (ctx
->shader_info
->info
.num_inline_push_consts
> AC_MAX_INLINE_PUSH_CONSTS
)
664 ctx
->shader_info
->info
.num_inline_push_consts
= AC_MAX_INLINE_PUSH_CONSTS
;
666 if (ctx
->shader_info
->info
.num_inline_push_consts
== num_push_consts
&&
667 !ctx
->shader_info
->info
.loads_dynamic_offsets
) {
668 /* Disable the default push constants path if all constants are
669 * inlined and if shaders don't use dynamic descriptors.
671 ctx
->shader_info
->info
.loads_push_constants
= false;
674 ctx
->shader_info
->info
.base_inline_push_consts
=
675 ctx
->shader_info
->info
.min_push_constant_used
/ 4;
678 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
679 gl_shader_stage stage
,
680 bool has_previous_stage
,
681 gl_shader_stage previous_stage
,
682 bool needs_view_index
,
683 struct user_sgpr_info
*user_sgpr_info
)
685 uint8_t user_sgpr_count
= 0;
687 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
689 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
690 if (stage
== MESA_SHADER_GEOMETRY
||
691 stage
== MESA_SHADER_VERTEX
||
692 stage
== MESA_SHADER_TESS_CTRL
||
693 stage
== MESA_SHADER_TESS_EVAL
||
694 ctx
->is_gs_copy_shader
)
695 user_sgpr_info
->need_ring_offsets
= true;
697 if (stage
== MESA_SHADER_FRAGMENT
&&
698 ctx
->shader_info
->info
.ps
.needs_sample_positions
)
699 user_sgpr_info
->need_ring_offsets
= true;
701 /* 2 user sgprs will nearly always be allocated for scratch/rings */
702 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
703 user_sgpr_count
+= 2;
707 case MESA_SHADER_COMPUTE
:
708 if (ctx
->shader_info
->info
.cs
.uses_grid_size
)
709 user_sgpr_count
+= 3;
711 case MESA_SHADER_FRAGMENT
:
712 user_sgpr_count
+= ctx
->shader_info
->info
.ps
.needs_sample_positions
;
714 case MESA_SHADER_VERTEX
:
715 if (!ctx
->is_gs_copy_shader
)
716 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
718 case MESA_SHADER_TESS_CTRL
:
719 if (has_previous_stage
) {
720 if (previous_stage
== MESA_SHADER_VERTEX
)
721 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
724 case MESA_SHADER_TESS_EVAL
:
726 case MESA_SHADER_GEOMETRY
:
727 if (has_previous_stage
) {
728 if (previous_stage
== MESA_SHADER_VERTEX
) {
729 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
737 if (needs_view_index
)
740 if (ctx
->shader_info
->info
.loads_push_constants
)
743 if (ctx
->streamout_buffers
)
746 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& stage
!= MESA_SHADER_COMPUTE
? 32 : 16;
747 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
748 uint32_t num_desc_set
=
749 util_bitcount(ctx
->shader_info
->info
.desc_set_used_mask
);
751 if (remaining_sgprs
< num_desc_set
) {
752 user_sgpr_info
->indirect_all_descriptor_sets
= true;
753 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- 1;
755 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- num_desc_set
;
758 allocate_inline_push_consts(ctx
, user_sgpr_info
);
762 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
763 const struct user_sgpr_info
*user_sgpr_info
,
764 struct arg_info
*args
,
765 LLVMValueRef
*desc_sets
)
767 LLVMTypeRef type
= ac_array_in_const32_addr_space(ctx
->ac
.i8
);
769 /* 1 for each descriptor set */
770 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
771 uint32_t mask
= ctx
->shader_info
->info
.desc_set_used_mask
;
774 int i
= u_bit_scan(&mask
);
776 add_arg(args
, ARG_SGPR
, type
, &ctx
->descriptor_sets
[i
]);
779 add_arg(args
, ARG_SGPR
, ac_array_in_const32_addr_space(type
),
783 if (ctx
->shader_info
->info
.loads_push_constants
) {
784 /* 1 for push constants and dynamic descriptors */
785 add_arg(args
, ARG_SGPR
, type
, &ctx
->abi
.push_constants
);
788 for (unsigned i
= 0; i
< ctx
->shader_info
->info
.num_inline_push_consts
; i
++) {
789 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
,
790 &ctx
->abi
.inline_push_consts
[i
]);
792 ctx
->abi
.num_inline_push_consts
= ctx
->shader_info
->info
.num_inline_push_consts
;
793 ctx
->abi
.base_inline_push_consts
= ctx
->shader_info
->info
.base_inline_push_consts
;
795 if (ctx
->shader_info
->info
.so
.num_outputs
) {
796 add_arg(args
, ARG_SGPR
,
797 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
798 &ctx
->streamout_buffers
);
803 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
804 gl_shader_stage stage
,
805 bool has_previous_stage
,
806 gl_shader_stage previous_stage
,
807 struct arg_info
*args
)
809 if (!ctx
->is_gs_copy_shader
&&
810 (stage
== MESA_SHADER_VERTEX
||
811 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
812 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
813 add_arg(args
, ARG_SGPR
,
814 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
815 &ctx
->vertex_buffers
);
817 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
818 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
819 if (ctx
->shader_info
->info
.vs
.needs_draw_id
) {
820 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
826 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
828 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
829 if (!ctx
->is_gs_copy_shader
) {
830 if (ctx
->options
->key
.vs_common_out
.as_ls
) {
831 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
832 if (ctx
->ac
.chip_class
>= GFX10
) {
833 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
834 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
836 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
837 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
840 if (ctx
->ac
.chip_class
>= GFX10
) {
841 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
842 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
843 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
845 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
846 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
847 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
854 declare_streamout_sgprs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
855 struct arg_info
*args
)
859 if (ctx
->ac
.chip_class
>= GFX10
)
862 /* Streamout SGPRs. */
863 if (ctx
->shader_info
->info
.so
.num_outputs
) {
864 assert(stage
== MESA_SHADER_VERTEX
||
865 stage
== MESA_SHADER_TESS_EVAL
);
867 if (stage
!= MESA_SHADER_TESS_EVAL
) {
868 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_config
);
870 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
871 args
->types
[args
->count
- 1] = ctx
->ac
.i32
;
874 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_write_idx
);
877 /* A streamout buffer offset is loaded if the stride is non-zero. */
878 for (i
= 0; i
< 4; i
++) {
879 if (!ctx
->shader_info
->info
.so
.strides
[i
])
882 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_offset
[i
]);
887 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
889 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
890 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
891 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
892 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
896 set_global_input_locs(struct radv_shader_context
*ctx
,
897 const struct user_sgpr_info
*user_sgpr_info
,
898 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
900 uint32_t mask
= ctx
->shader_info
->info
.desc_set_used_mask
;
902 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
904 int i
= u_bit_scan(&mask
);
906 set_loc_desc(ctx
, i
, user_sgpr_idx
);
909 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
913 int i
= u_bit_scan(&mask
);
915 ctx
->descriptor_sets
[i
] =
916 ac_build_load_to_sgpr(&ctx
->ac
, desc_sets
,
917 LLVMConstInt(ctx
->ac
.i32
, i
, false));
921 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
924 if (ctx
->shader_info
->info
.loads_push_constants
) {
925 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
);
928 if (ctx
->shader_info
->info
.num_inline_push_consts
) {
929 set_loc_shader(ctx
, AC_UD_INLINE_PUSH_CONSTANTS
, user_sgpr_idx
,
930 ctx
->shader_info
->info
.num_inline_push_consts
);
933 if (ctx
->streamout_buffers
) {
934 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
,
940 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
941 gl_shader_stage stage
, bool has_previous_stage
,
942 gl_shader_stage previous_stage
,
943 uint8_t *user_sgpr_idx
)
945 if (!ctx
->is_gs_copy_shader
&&
946 (stage
== MESA_SHADER_VERTEX
||
947 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
948 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
949 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
954 if (ctx
->shader_info
->info
.vs
.needs_draw_id
)
957 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
958 user_sgpr_idx
, vs_num
);
962 static void set_llvm_calling_convention(LLVMValueRef func
,
963 gl_shader_stage stage
)
965 enum radeon_llvm_calling_convention calling_conv
;
968 case MESA_SHADER_VERTEX
:
969 case MESA_SHADER_TESS_EVAL
:
970 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
972 case MESA_SHADER_GEOMETRY
:
973 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
975 case MESA_SHADER_TESS_CTRL
:
976 calling_conv
= RADEON_LLVM_AMDGPU_HS
;
978 case MESA_SHADER_FRAGMENT
:
979 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
981 case MESA_SHADER_COMPUTE
:
982 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
985 unreachable("Unhandle shader type");
988 LLVMSetFunctionCallConv(func
, calling_conv
);
991 /* Returns whether the stage is a stage that can be directly before the GS */
992 static bool is_pre_gs_stage(gl_shader_stage stage
)
994 return stage
== MESA_SHADER_VERTEX
|| stage
== MESA_SHADER_TESS_EVAL
;
997 static void create_function(struct radv_shader_context
*ctx
,
998 gl_shader_stage stage
,
999 bool has_previous_stage
,
1000 gl_shader_stage previous_stage
)
1002 uint8_t user_sgpr_idx
;
1003 struct user_sgpr_info user_sgpr_info
;
1004 struct arg_info args
= {};
1005 LLVMValueRef desc_sets
;
1006 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
1008 if (ctx
->ac
.chip_class
>= GFX10
) {
1009 if (is_pre_gs_stage(stage
) && ctx
->options
->key
.vs_common_out
.as_ngg
) {
1010 /* On GFX10, VS is merged into GS for NGG. */
1011 previous_stage
= stage
;
1012 stage
= MESA_SHADER_GEOMETRY
;
1013 has_previous_stage
= true;
1017 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
1018 previous_stage
, needs_view_index
, &user_sgpr_info
);
1020 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
1021 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
1022 &ctx
->ring_offsets
);
1026 case MESA_SHADER_COMPUTE
:
1027 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1030 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1031 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
1032 &ctx
->abi
.num_work_groups
);
1035 for (int i
= 0; i
< 3; i
++) {
1036 ctx
->abi
.workgroup_ids
[i
] = NULL
;
1037 if (ctx
->shader_info
->info
.cs
.uses_block_id
[i
]) {
1038 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1039 &ctx
->abi
.workgroup_ids
[i
]);
1043 if (ctx
->shader_info
->info
.cs
.uses_local_invocation_idx
)
1044 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
1045 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
1046 &ctx
->abi
.local_invocation_ids
);
1048 case MESA_SHADER_VERTEX
:
1049 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1052 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
1053 previous_stage
, &args
);
1055 if (needs_view_index
)
1056 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1057 &ctx
->abi
.view_index
);
1058 if (ctx
->options
->key
.vs_common_out
.as_es
) {
1059 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1060 &ctx
->es2gs_offset
);
1061 } else if (ctx
->options
->key
.vs_common_out
.as_ls
) {
1062 /* no extra parameters */
1064 declare_streamout_sgprs(ctx
, stage
, &args
);
1067 declare_vs_input_vgprs(ctx
, &args
);
1069 case MESA_SHADER_TESS_CTRL
:
1070 if (has_previous_stage
) {
1071 // First 6 system regs
1072 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1073 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1074 &ctx
->merged_wave_info
);
1075 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1076 &ctx
->tess_factor_offset
);
1078 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1079 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1080 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1082 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1085 declare_vs_specific_input_sgprs(ctx
, stage
,
1087 previous_stage
, &args
);
1089 if (needs_view_index
)
1090 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1091 &ctx
->abi
.view_index
);
1093 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1094 &ctx
->abi
.tcs_patch_id
);
1095 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1096 &ctx
->abi
.tcs_rel_ids
);
1098 declare_vs_input_vgprs(ctx
, &args
);
1100 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1103 if (needs_view_index
)
1104 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1105 &ctx
->abi
.view_index
);
1107 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1108 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1109 &ctx
->tess_factor_offset
);
1110 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1111 &ctx
->abi
.tcs_patch_id
);
1112 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1113 &ctx
->abi
.tcs_rel_ids
);
1116 case MESA_SHADER_TESS_EVAL
:
1117 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1120 if (needs_view_index
)
1121 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1122 &ctx
->abi
.view_index
);
1124 if (ctx
->options
->key
.vs_common_out
.as_es
) {
1125 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1126 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1127 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1128 &ctx
->es2gs_offset
);
1130 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1131 declare_streamout_sgprs(ctx
, stage
, &args
);
1132 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1134 declare_tes_input_vgprs(ctx
, &args
);
1136 case MESA_SHADER_GEOMETRY
:
1137 if (has_previous_stage
) {
1138 // First 6 system regs
1139 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1140 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1143 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1144 &ctx
->gs2vs_offset
);
1147 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1148 &ctx
->merged_wave_info
);
1149 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1151 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1152 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1153 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1155 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1158 if (previous_stage
!= MESA_SHADER_TESS_EVAL
) {
1159 declare_vs_specific_input_sgprs(ctx
, stage
,
1165 if (needs_view_index
)
1166 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1167 &ctx
->abi
.view_index
);
1169 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1170 &ctx
->gs_vtx_offset
[0]);
1171 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1172 &ctx
->gs_vtx_offset
[2]);
1173 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1174 &ctx
->abi
.gs_prim_id
);
1175 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1176 &ctx
->abi
.gs_invocation_id
);
1177 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1178 &ctx
->gs_vtx_offset
[4]);
1180 if (previous_stage
== MESA_SHADER_VERTEX
) {
1181 declare_vs_input_vgprs(ctx
, &args
);
1183 declare_tes_input_vgprs(ctx
, &args
);
1186 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1189 if (needs_view_index
)
1190 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1191 &ctx
->abi
.view_index
);
1193 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
1194 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
1195 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1196 &ctx
->gs_vtx_offset
[0]);
1197 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1198 &ctx
->gs_vtx_offset
[1]);
1199 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1200 &ctx
->abi
.gs_prim_id
);
1201 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1202 &ctx
->gs_vtx_offset
[2]);
1203 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1204 &ctx
->gs_vtx_offset
[3]);
1205 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1206 &ctx
->gs_vtx_offset
[4]);
1207 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1208 &ctx
->gs_vtx_offset
[5]);
1209 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1210 &ctx
->abi
.gs_invocation_id
);
1213 case MESA_SHADER_FRAGMENT
:
1214 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1217 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1218 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_sample
);
1219 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_center
);
1220 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_centroid
);
1221 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1222 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_sample
);
1223 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_center
);
1224 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_centroid
);
1225 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1226 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1227 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1228 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1229 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1230 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1231 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1232 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1233 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1236 unreachable("Shader stage not implemented");
1239 ctx
->main_function
= create_llvm_function(
1240 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1241 ctx
->max_workgroup_size
, ctx
->options
);
1242 set_llvm_calling_convention(ctx
->main_function
, stage
);
1245 ctx
->shader_info
->num_input_vgprs
= 0;
1246 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1248 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1250 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1251 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1253 assign_arguments(ctx
->main_function
, &args
);
1257 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1258 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1260 if (ctx
->options
->supports_spill
) {
1261 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1262 LLVMPointerType(ctx
->ac
.i8
, AC_ADDR_SPACE_CONST
),
1263 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1264 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1265 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1269 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1270 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1271 if (has_previous_stage
)
1274 set_global_input_locs(ctx
, &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1277 case MESA_SHADER_COMPUTE
:
1278 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1279 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1283 case MESA_SHADER_VERTEX
:
1284 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1285 previous_stage
, &user_sgpr_idx
);
1286 if (ctx
->abi
.view_index
)
1287 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1289 case MESA_SHADER_TESS_CTRL
:
1290 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1291 previous_stage
, &user_sgpr_idx
);
1292 if (ctx
->abi
.view_index
)
1293 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1295 case MESA_SHADER_TESS_EVAL
:
1296 if (ctx
->abi
.view_index
)
1297 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1299 case MESA_SHADER_GEOMETRY
:
1300 if (has_previous_stage
) {
1301 if (previous_stage
== MESA_SHADER_VERTEX
)
1302 set_vs_specific_input_locs(ctx
, stage
,
1307 if (ctx
->abi
.view_index
)
1308 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1310 case MESA_SHADER_FRAGMENT
:
1313 unreachable("Shader stage not implemented");
1316 if (stage
== MESA_SHADER_TESS_CTRL
||
1317 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs_common_out
.as_ls
) ||
1318 /* GFX9 has the ESGS ring buffer in LDS. */
1319 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1320 ac_declare_lds_as_pointer(&ctx
->ac
);
1323 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1328 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
1329 unsigned desc_set
, unsigned binding
)
1331 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1332 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
1333 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
1334 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
1335 unsigned base_offset
= layout
->binding
[binding
].offset
;
1336 LLVMValueRef offset
, stride
;
1338 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1339 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1340 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
1341 layout
->binding
[binding
].dynamic_offset_offset
;
1342 desc_ptr
= ctx
->abi
.push_constants
;
1343 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
1344 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1346 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
1348 offset
= LLVMConstInt(ctx
->ac
.i32
, base_offset
, false);
1350 if (layout
->binding
[binding
].type
!= VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1351 offset
= ac_build_imad(&ctx
->ac
, index
, stride
, offset
);
1354 desc_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, desc_ptr
, &offset
, 1, "");
1355 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
1356 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1358 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1359 uint32_t desc_type
= S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1360 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1361 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1362 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
1363 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
1364 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
1366 LLVMValueRef desc_components
[4] = {
1367 LLVMBuildPtrToInt(ctx
->ac
.builder
, desc_ptr
, ctx
->ac
.intptr
, ""),
1368 LLVMConstInt(ctx
->ac
.i32
, S_008F04_BASE_ADDRESS_HI(ctx
->options
->address32_hi
), false),
1369 /* High limit to support variable sizes. */
1370 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
1371 LLVMConstInt(ctx
->ac
.i32
, desc_type
, false),
1374 return ac_build_gather_values(&ctx
->ac
, desc_components
, 4);
1381 /* The offchip buffer layout for TCS->TES is
1383 * - attribute 0 of patch 0 vertex 0
1384 * - attribute 0 of patch 0 vertex 1
1385 * - attribute 0 of patch 0 vertex 2
1387 * - attribute 0 of patch 1 vertex 0
1388 * - attribute 0 of patch 1 vertex 1
1390 * - attribute 1 of patch 0 vertex 0
1391 * - attribute 1 of patch 0 vertex 1
1393 * - per patch attribute 0 of patch 0
1394 * - per patch attribute 0 of patch 1
1397 * Note that every attribute has 4 components.
1399 static LLVMValueRef
get_non_vertex_index_offset(struct radv_shader_context
*ctx
)
1401 uint32_t num_patches
= ctx
->tcs_num_patches
;
1402 uint32_t num_tcs_outputs
;
1403 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
1404 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
1406 num_tcs_outputs
= ctx
->options
->key
.tes
.tcs_num_outputs
;
1408 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
1409 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
1411 return LLVMConstInt(ctx
->ac
.i32
, pervertex_output_patch_size
* num_patches
, false);
1414 static LLVMValueRef
calc_param_stride(struct radv_shader_context
*ctx
,
1415 LLVMValueRef vertex_index
)
1417 LLVMValueRef param_stride
;
1419 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
* ctx
->tcs_num_patches
, false);
1421 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_num_patches
, false);
1422 return param_stride
;
1425 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
1426 LLVMValueRef vertex_index
,
1427 LLVMValueRef param_index
)
1429 LLVMValueRef base_addr
;
1430 LLVMValueRef param_stride
, constant16
;
1431 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
1432 LLVMValueRef vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
, false);
1433 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1434 param_stride
= calc_param_stride(ctx
, vertex_index
);
1436 base_addr
= ac_build_imad(&ctx
->ac
, rel_patch_id
,
1437 vertices_per_patch
, vertex_index
);
1439 base_addr
= rel_patch_id
;
1442 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1443 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
1444 param_stride
, ""), "");
1446 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
1448 if (!vertex_index
) {
1449 LLVMValueRef patch_data_offset
= get_non_vertex_index_offset(ctx
);
1451 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1452 patch_data_offset
, "");
1457 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
1459 unsigned const_index
,
1461 LLVMValueRef vertex_index
,
1462 LLVMValueRef indir_index
)
1464 LLVMValueRef param_index
;
1467 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
1470 if (const_index
&& !is_compact
)
1471 param
+= const_index
;
1472 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
1474 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
1478 get_dw_address(struct radv_shader_context
*ctx
,
1479 LLVMValueRef dw_addr
,
1481 unsigned const_index
,
1482 bool compact_const_index
,
1483 LLVMValueRef vertex_index
,
1484 LLVMValueRef stride
,
1485 LLVMValueRef indir_index
)
1490 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1491 LLVMBuildMul(ctx
->ac
.builder
,
1497 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1498 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
1499 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
1500 else if (const_index
&& !compact_const_index
)
1501 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1502 LLVMConstInt(ctx
->ac
.i32
, const_index
* 4, false), "");
1504 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1505 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
1507 if (const_index
&& compact_const_index
)
1508 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1509 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
1514 load_tcs_varyings(struct ac_shader_abi
*abi
,
1516 LLVMValueRef vertex_index
,
1517 LLVMValueRef indir_index
,
1518 unsigned const_index
,
1520 unsigned driver_location
,
1522 unsigned num_components
,
1527 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1528 LLVMValueRef dw_addr
, stride
;
1529 LLVMValueRef value
[4], result
;
1530 unsigned param
= shader_io_get_unique_index(location
);
1533 uint32_t input_vertex_size
= (ctx
->tcs_num_inputs
* 16) / 4;
1534 stride
= LLVMConstInt(ctx
->ac
.i32
, input_vertex_size
, false);
1535 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
1538 stride
= get_tcs_out_vertex_stride(ctx
);
1539 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1541 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1546 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1549 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
1550 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1551 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1554 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1559 store_tcs_output(struct ac_shader_abi
*abi
,
1560 const nir_variable
*var
,
1561 LLVMValueRef vertex_index
,
1562 LLVMValueRef param_index
,
1563 unsigned const_index
,
1567 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1568 const unsigned location
= var
->data
.location
;
1569 unsigned component
= var
->data
.location_frac
;
1570 const bool is_patch
= var
->data
.patch
;
1571 const bool is_compact
= var
->data
.compact
;
1572 LLVMValueRef dw_addr
;
1573 LLVMValueRef stride
= NULL
;
1574 LLVMValueRef buf_addr
= NULL
;
1576 bool store_lds
= true;
1579 if (!(ctx
->tcs_patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
1582 if (!(ctx
->tcs_outputs_read
& (1ULL << location
)))
1586 param
= shader_io_get_unique_index(location
);
1587 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1588 const_index
+= component
;
1591 if (const_index
>= 4) {
1598 stride
= get_tcs_out_vertex_stride(ctx
);
1599 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1601 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1604 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1606 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
1607 vertex_index
, param_index
);
1609 bool is_tess_factor
= false;
1610 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
1611 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1612 is_tess_factor
= true;
1614 unsigned base
= is_compact
? const_index
: 0;
1615 for (unsigned chan
= 0; chan
< 8; chan
++) {
1616 if (!(writemask
& (1 << chan
)))
1618 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
1619 value
= ac_to_integer(&ctx
->ac
, value
);
1620 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
1622 if (store_lds
|| is_tess_factor
) {
1623 LLVMValueRef dw_addr_chan
=
1624 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1625 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
1626 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
1629 if (!is_tess_factor
&& writemask
!= 0xF)
1630 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
1631 buf_addr
, ctx
->oc_lds
,
1632 4 * (base
+ chan
), ac_glc
, false);
1635 if (writemask
== 0xF) {
1636 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
1637 buf_addr
, ctx
->oc_lds
,
1638 (base
* 4), ac_glc
, false);
1643 load_tes_input(struct ac_shader_abi
*abi
,
1645 LLVMValueRef vertex_index
,
1646 LLVMValueRef param_index
,
1647 unsigned const_index
,
1649 unsigned driver_location
,
1651 unsigned num_components
,
1656 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1657 LLVMValueRef buf_addr
;
1658 LLVMValueRef result
;
1659 unsigned param
= shader_io_get_unique_index(location
);
1661 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1662 const_index
+= component
;
1664 if (const_index
>= 4) {
1670 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
1671 is_compact
, vertex_index
, param_index
);
1673 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
1674 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
1676 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
1677 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, ac_glc
, true, false);
1678 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
1683 load_gs_input(struct ac_shader_abi
*abi
,
1685 unsigned driver_location
,
1687 unsigned num_components
,
1688 unsigned vertex_index
,
1689 unsigned const_index
,
1692 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1693 LLVMValueRef vtx_offset
;
1694 unsigned param
, vtx_offset_param
;
1695 LLVMValueRef value
[4], result
;
1697 vtx_offset_param
= vertex_index
;
1698 assert(vtx_offset_param
< 6);
1699 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
1700 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1702 param
= shader_io_get_unique_index(location
);
1704 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
1705 if (ctx
->ac
.chip_class
>= GFX9
) {
1706 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
1707 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1708 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
1709 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1711 LLVMValueRef soffset
=
1712 LLVMConstInt(ctx
->ac
.i32
,
1713 (param
* 4 + i
+ const_index
) * 256,
1716 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
1719 vtx_offset
, soffset
,
1720 0, ac_glc
, true, false);
1723 if (ac_get_type_size(type
) == 2) {
1724 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i32
, "");
1725 value
[i
] = LLVMBuildTrunc(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i16
, "");
1727 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], type
, "");
1729 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1730 result
= ac_to_integer(&ctx
->ac
, result
);
1735 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
1737 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1738 ac_build_kill_if_false(&ctx
->ac
, visible
);
1741 static LLVMValueRef
lookup_interp_param(struct ac_shader_abi
*abi
,
1742 enum glsl_interp_mode interp
, unsigned location
)
1744 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1747 case INTERP_MODE_FLAT
:
1750 case INTERP_MODE_SMOOTH
:
1751 case INTERP_MODE_NONE
:
1752 if (location
== INTERP_CENTER
)
1753 return ctx
->persp_center
;
1754 else if (location
== INTERP_CENTROID
)
1755 return ctx
->persp_centroid
;
1756 else if (location
== INTERP_SAMPLE
)
1757 return ctx
->persp_sample
;
1759 case INTERP_MODE_NOPERSPECTIVE
:
1760 if (location
== INTERP_CENTER
)
1761 return ctx
->linear_center
;
1762 else if (location
== INTERP_CENTROID
)
1763 return ctx
->linear_centroid
;
1764 else if (location
== INTERP_SAMPLE
)
1765 return ctx
->linear_sample
;
1772 radv_get_sample_pos_offset(uint32_t num_samples
)
1774 uint32_t sample_pos_offset
= 0;
1776 switch (num_samples
) {
1778 sample_pos_offset
= 1;
1781 sample_pos_offset
= 3;
1784 sample_pos_offset
= 7;
1789 return sample_pos_offset
;
1792 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
1793 LLVMValueRef sample_id
)
1795 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1797 LLVMValueRef result
;
1798 LLVMValueRef index
= LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false);
1799 LLVMValueRef ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ring_offsets
, &index
, 1, "");
1801 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1802 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
1804 uint32_t sample_pos_offset
=
1805 radv_get_sample_pos_offset(ctx
->options
->key
.fs
.num_samples
);
1808 LLVMBuildAdd(ctx
->ac
.builder
, sample_id
,
1809 LLVMConstInt(ctx
->ac
.i32
, sample_pos_offset
, false), "");
1810 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
1816 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
1818 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1819 uint8_t log2_ps_iter_samples
;
1821 if (ctx
->shader_info
->info
.ps
.force_persample
) {
1822 log2_ps_iter_samples
=
1823 util_logbase2(ctx
->options
->key
.fs
.num_samples
);
1825 log2_ps_iter_samples
= ctx
->options
->key
.fs
.log2_ps_iter_samples
;
1828 /* The bit pattern matches that used by fixed function fragment
1830 static const uint16_t ps_iter_masks
[] = {
1831 0xffff, /* not used */
1837 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
1839 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
1841 LLVMValueRef result
, sample_id
;
1842 sample_id
= ac_unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
1843 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
1844 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
1850 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
1852 LLVMValueRef gs_next_vertex
;
1853 LLVMValueRef can_emit
;
1854 unsigned offset
= 0;
1855 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1857 /* Write vertex attribute values to GSVS ring */
1858 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
1859 ctx
->gs_next_vertex
[stream
],
1862 /* If this thread has already emitted the declared maximum number of
1863 * vertices, kill it: excessive vertex emissions are not supposed to
1864 * have any effect, and GS threads have no externally observable
1865 * effects other than emitting vertices.
1867 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
1868 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
1869 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
1871 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
1872 unsigned output_usage_mask
=
1873 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
1874 uint8_t output_stream
=
1875 ctx
->shader_info
->info
.gs
.output_streams
[i
];
1876 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
1877 int length
= util_last_bit(output_usage_mask
);
1879 if (!(ctx
->output_mask
& (1ull << i
)) ||
1880 output_stream
!= stream
)
1883 for (unsigned j
= 0; j
< length
; j
++) {
1884 if (!(output_usage_mask
& (1 << j
)))
1887 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
1889 LLVMValueRef voffset
=
1890 LLVMConstInt(ctx
->ac
.i32
, offset
*
1891 ctx
->gs_max_out_vertices
, false);
1895 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
1896 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1898 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
1899 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
1901 ac_build_buffer_store_dword(&ctx
->ac
,
1902 ctx
->gsvs_ring
[stream
],
1904 voffset
, ctx
->gs2vs_offset
, 0,
1905 ac_glc
| ac_slc
, true);
1909 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
1911 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
[stream
]);
1913 ac_build_sendmsg(&ctx
->ac
,
1914 AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (stream
<< 8),
1919 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
1921 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1922 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
1926 load_tess_coord(struct ac_shader_abi
*abi
)
1928 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1930 LLVMValueRef coord
[4] = {
1937 if (ctx
->tes_primitive_mode
== GL_TRIANGLES
)
1938 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
1939 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
1941 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
1945 load_patch_vertices_in(struct ac_shader_abi
*abi
)
1947 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1948 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
1952 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
1954 return abi
->base_vertex
;
1957 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
1958 LLVMValueRef buffer_ptr
, bool write
)
1960 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1961 LLVMValueRef result
;
1963 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1965 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1966 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1971 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
1973 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1974 LLVMValueRef result
;
1976 if (LLVMGetTypeKind(LLVMTypeOf(buffer_ptr
)) != LLVMPointerTypeKind
) {
1977 /* Do not load the descriptor for inlined uniform blocks. */
1981 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1983 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1984 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1989 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
1990 unsigned descriptor_set
,
1991 unsigned base_index
,
1992 unsigned constant_index
,
1994 enum ac_descriptor_type desc_type
,
1995 bool image
, bool write
,
1998 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1999 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
2000 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
2001 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
2002 unsigned offset
= binding
->offset
;
2003 unsigned stride
= binding
->size
;
2005 LLVMBuilderRef builder
= ctx
->ac
.builder
;
2008 assert(base_index
< layout
->binding_count
);
2010 switch (desc_type
) {
2012 type
= ctx
->ac
.v8i32
;
2016 type
= ctx
->ac
.v8i32
;
2020 case AC_DESC_SAMPLER
:
2021 type
= ctx
->ac
.v4i32
;
2022 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) {
2023 offset
+= radv_combined_image_descriptor_sampler_offset(binding
);
2028 case AC_DESC_BUFFER
:
2029 type
= ctx
->ac
.v4i32
;
2032 case AC_DESC_PLANE_0
:
2033 case AC_DESC_PLANE_1
:
2034 case AC_DESC_PLANE_2
:
2035 type
= ctx
->ac
.v8i32
;
2037 offset
+= 32 * (desc_type
- AC_DESC_PLANE_0
);
2040 unreachable("invalid desc_type\n");
2043 offset
+= constant_index
* stride
;
2045 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
2046 (!index
|| binding
->immutable_samplers_equal
)) {
2047 if (binding
->immutable_samplers_equal
)
2050 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
2052 LLVMValueRef constants
[] = {
2053 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
2054 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
2055 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
2056 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
2058 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
2061 assert(stride
% type_size
== 0);
2063 LLVMValueRef adjusted_index
= index
;
2064 if (!adjusted_index
)
2065 adjusted_index
= ctx
->ac
.i32_0
;
2067 adjusted_index
= LLVMBuildMul(builder
, adjusted_index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
2069 LLVMValueRef val_offset
= LLVMConstInt(ctx
->ac
.i32
, offset
, 0);
2070 list
= LLVMBuildGEP(builder
, list
, &val_offset
, 1, "");
2071 list
= LLVMBuildPointerCast(builder
, list
,
2072 ac_array_in_const32_addr_space(type
), "");
2074 LLVMValueRef descriptor
= ac_build_load_to_sgpr(&ctx
->ac
, list
, adjusted_index
);
2076 /* 3 plane formats always have same size and format for plane 1 & 2, so
2077 * use the tail from plane 1 so that we can store only the first 16 bytes
2078 * of the last plane. */
2079 if (desc_type
== AC_DESC_PLANE_2
) {
2080 LLVMValueRef descriptor2
= radv_get_sampler_desc(abi
, descriptor_set
, base_index
, constant_index
, index
, AC_DESC_PLANE_1
,image
, write
, bindless
);
2082 LLVMValueRef components
[8];
2083 for (unsigned i
= 0; i
< 4; ++i
)
2084 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor
, i
);
2086 for (unsigned i
= 4; i
< 8; ++i
)
2087 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor2
, i
);
2088 descriptor
= ac_build_gather_values(&ctx
->ac
, components
, 8);
2094 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
2095 * so we may need to fix it up. */
2097 adjust_vertex_fetch_alpha(struct radv_shader_context
*ctx
,
2098 unsigned adjustment
,
2101 if (adjustment
== RADV_ALPHA_ADJUST_NONE
)
2104 LLVMValueRef c30
= LLVMConstInt(ctx
->ac
.i32
, 30, 0);
2106 alpha
= LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2108 if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
)
2109 alpha
= LLVMBuildFPToUI(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2111 alpha
= ac_to_integer(&ctx
->ac
, alpha
);
2113 /* For the integer-like cases, do a natural sign extension.
2115 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
2116 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
2119 alpha
= LLVMBuildShl(ctx
->ac
.builder
, alpha
,
2120 adjustment
== RADV_ALPHA_ADJUST_SNORM
?
2121 LLVMConstInt(ctx
->ac
.i32
, 7, 0) : c30
, "");
2122 alpha
= LLVMBuildAShr(ctx
->ac
.builder
, alpha
, c30
, "");
2124 /* Convert back to the right type. */
2125 if (adjustment
== RADV_ALPHA_ADJUST_SNORM
) {
2127 LLVMValueRef neg_one
= LLVMConstReal(ctx
->ac
.f32
, -1.0);
2128 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2129 clamp
= LLVMBuildFCmp(ctx
->ac
.builder
, LLVMRealULT
, alpha
, neg_one
, "");
2130 alpha
= LLVMBuildSelect(ctx
->ac
.builder
, clamp
, neg_one
, alpha
, "");
2131 } else if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
) {
2132 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2135 return LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2139 get_num_channels_from_data_format(unsigned data_format
)
2141 switch (data_format
) {
2142 case V_008F0C_BUF_DATA_FORMAT_8
:
2143 case V_008F0C_BUF_DATA_FORMAT_16
:
2144 case V_008F0C_BUF_DATA_FORMAT_32
:
2146 case V_008F0C_BUF_DATA_FORMAT_8_8
:
2147 case V_008F0C_BUF_DATA_FORMAT_16_16
:
2148 case V_008F0C_BUF_DATA_FORMAT_32_32
:
2150 case V_008F0C_BUF_DATA_FORMAT_10_11_11
:
2151 case V_008F0C_BUF_DATA_FORMAT_11_11_10
:
2152 case V_008F0C_BUF_DATA_FORMAT_32_32_32
:
2154 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8
:
2155 case V_008F0C_BUF_DATA_FORMAT_10_10_10_2
:
2156 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10
:
2157 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16
:
2158 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32
:
2168 radv_fixup_vertex_input_fetches(struct radv_shader_context
*ctx
,
2170 unsigned num_channels
,
2173 LLVMValueRef zero
= is_float
? ctx
->ac
.f32_0
: ctx
->ac
.i32_0
;
2174 LLVMValueRef one
= is_float
? ctx
->ac
.f32_1
: ctx
->ac
.i32_1
;
2175 LLVMValueRef chan
[4];
2177 if (LLVMGetTypeKind(LLVMTypeOf(value
)) == LLVMVectorTypeKind
) {
2178 unsigned vec_size
= LLVMGetVectorSize(LLVMTypeOf(value
));
2180 if (num_channels
== 4 && num_channels
== vec_size
)
2183 num_channels
= MIN2(num_channels
, vec_size
);
2185 for (unsigned i
= 0; i
< num_channels
; i
++)
2186 chan
[i
] = ac_llvm_extract_elem(&ctx
->ac
, value
, i
);
2189 assert(num_channels
== 1);
2194 for (unsigned i
= num_channels
; i
< 4; i
++) {
2195 chan
[i
] = i
== 3 ? one
: zero
;
2196 chan
[i
] = ac_to_integer(&ctx
->ac
, chan
[i
]);
2199 return ac_build_gather_values(&ctx
->ac
, chan
, 4);
2203 handle_vs_input_decl(struct radv_shader_context
*ctx
,
2204 struct nir_variable
*variable
)
2206 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
2207 LLVMValueRef t_offset
;
2208 LLVMValueRef t_list
;
2210 LLVMValueRef buffer_index
;
2211 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
2212 uint8_t input_usage_mask
=
2213 ctx
->shader_info
->info
.vs
.input_usage_mask
[variable
->data
.location
];
2214 unsigned num_input_channels
= util_last_bit(input_usage_mask
);
2216 variable
->data
.driver_location
= variable
->data
.location
* 4;
2218 enum glsl_base_type type
= glsl_get_base_type(variable
->type
);
2219 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
2220 LLVMValueRef output
[4];
2221 unsigned attrib_index
= variable
->data
.location
+ i
- VERT_ATTRIB_GENERIC0
;
2222 unsigned attrib_format
= ctx
->options
->key
.vs
.vertex_attribute_formats
[attrib_index
];
2223 unsigned data_format
= attrib_format
& 0x0f;
2224 unsigned num_format
= (attrib_format
>> 4) & 0x07;
2225 bool is_float
= num_format
!= V_008F0C_BUF_NUM_FORMAT_UINT
&&
2226 num_format
!= V_008F0C_BUF_NUM_FORMAT_SINT
;
2228 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << attrib_index
)) {
2229 uint32_t divisor
= ctx
->options
->key
.vs
.instance_rate_divisors
[attrib_index
];
2232 buffer_index
= ctx
->abi
.instance_id
;
2235 buffer_index
= LLVMBuildUDiv(ctx
->ac
.builder
, buffer_index
,
2236 LLVMConstInt(ctx
->ac
.i32
, divisor
, 0), "");
2239 buffer_index
= ctx
->ac
.i32_0
;
2242 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.start_instance
, buffer_index
, "");
2244 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
2245 ctx
->abi
.base_vertex
, "");
2247 /* Adjust the number of channels to load based on the vertex
2250 unsigned num_format_channels
= get_num_channels_from_data_format(data_format
);
2251 unsigned num_channels
= MIN2(num_input_channels
, num_format_channels
);
2252 unsigned attrib_binding
= ctx
->options
->key
.vs
.vertex_attribute_bindings
[attrib_index
];
2253 unsigned attrib_offset
= ctx
->options
->key
.vs
.vertex_attribute_offsets
[attrib_index
];
2254 unsigned attrib_stride
= ctx
->options
->key
.vs
.vertex_attribute_strides
[attrib_index
];
2256 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2257 /* Always load, at least, 3 channels for formats that
2258 * need to be shuffled because X<->Z.
2260 num_channels
= MAX2(num_channels
, 3);
2263 if (attrib_stride
!= 0 && attrib_offset
> attrib_stride
) {
2264 LLVMValueRef buffer_offset
=
2265 LLVMConstInt(ctx
->ac
.i32
,
2266 attrib_offset
/ attrib_stride
, false);
2268 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
,
2272 attrib_offset
= attrib_offset
% attrib_stride
;
2275 t_offset
= LLVMConstInt(ctx
->ac
.i32
, attrib_binding
, false);
2276 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
2278 input
= ac_build_struct_tbuffer_load(&ctx
->ac
, t_list
,
2280 LLVMConstInt(ctx
->ac
.i32
, attrib_offset
, false),
2281 ctx
->ac
.i32_0
, ctx
->ac
.i32_0
,
2283 data_format
, num_format
, 0, true);
2285 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2287 c
[0] = ac_llvm_extract_elem(&ctx
->ac
, input
, 2);
2288 c
[1] = ac_llvm_extract_elem(&ctx
->ac
, input
, 1);
2289 c
[2] = ac_llvm_extract_elem(&ctx
->ac
, input
, 0);
2290 c
[3] = ac_llvm_extract_elem(&ctx
->ac
, input
, 3);
2292 input
= ac_build_gather_values(&ctx
->ac
, c
, 4);
2295 input
= radv_fixup_vertex_input_fetches(ctx
, input
, num_channels
,
2298 for (unsigned chan
= 0; chan
< 4; chan
++) {
2299 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2300 output
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
, input
, llvm_chan
, "");
2301 if (type
== GLSL_TYPE_FLOAT16
) {
2302 output
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f32
, "");
2303 output
[chan
] = LLVMBuildFPTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f16
, "");
2307 unsigned alpha_adjust
= (ctx
->options
->key
.vs
.alpha_adjust
>> (attrib_index
* 2)) & 3;
2308 output
[3] = adjust_vertex_fetch_alpha(ctx
, alpha_adjust
, output
[3]);
2310 for (unsigned chan
= 0; chan
< 4; chan
++) {
2311 output
[chan
] = ac_to_integer(&ctx
->ac
, output
[chan
]);
2312 if (type
== GLSL_TYPE_UINT16
|| type
== GLSL_TYPE_INT16
)
2313 output
[chan
] = LLVMBuildTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.i16
, "");
2315 ctx
->inputs
[ac_llvm_reg_index_soa(variable
->data
.location
+ i
, chan
)] = output
[chan
];
2321 handle_vs_inputs(struct radv_shader_context
*ctx
,
2322 struct nir_shader
*nir
) {
2323 nir_foreach_variable(variable
, &nir
->inputs
)
2324 handle_vs_input_decl(ctx
, variable
);
2328 prepare_interp_optimize(struct radv_shader_context
*ctx
,
2329 struct nir_shader
*nir
)
2331 bool uses_center
= false;
2332 bool uses_centroid
= false;
2333 nir_foreach_variable(variable
, &nir
->inputs
) {
2334 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
2335 variable
->data
.sample
)
2338 if (variable
->data
.centroid
)
2339 uses_centroid
= true;
2344 if (uses_center
&& uses_centroid
) {
2345 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
2346 ctx
->persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->persp_center
, ctx
->persp_centroid
, "");
2347 ctx
->linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->linear_center
, ctx
->linear_centroid
, "");
2352 scan_shader_output_decl(struct radv_shader_context
*ctx
,
2353 struct nir_variable
*variable
,
2354 struct nir_shader
*shader
,
2355 gl_shader_stage stage
)
2357 int idx
= variable
->data
.location
+ variable
->data
.index
;
2358 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2359 uint64_t mask_attribs
;
2361 variable
->data
.driver_location
= idx
* 4;
2363 /* tess ctrl has it's own load/store paths for outputs */
2364 if (stage
== MESA_SHADER_TESS_CTRL
)
2367 if (variable
->data
.compact
) {
2368 unsigned component_count
= variable
->data
.location_frac
+
2369 glsl_get_length(variable
->type
);
2370 attrib_count
= (component_count
+ 3) / 4;
2373 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
2374 if (stage
== MESA_SHADER_VERTEX
||
2375 stage
== MESA_SHADER_TESS_EVAL
||
2376 stage
== MESA_SHADER_GEOMETRY
) {
2377 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
2378 if (stage
== MESA_SHADER_VERTEX
) {
2379 ctx
->shader_info
->vs
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2380 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2381 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2383 if (stage
== MESA_SHADER_TESS_EVAL
) {
2384 ctx
->shader_info
->tes
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2385 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2386 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2391 ctx
->output_mask
|= mask_attribs
;
2395 /* Initialize arguments for the shader export intrinsic */
2397 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
2398 LLVMValueRef
*values
,
2399 unsigned enabled_channels
,
2401 struct ac_export_args
*args
)
2403 /* Specify the channels that are enabled. */
2404 args
->enabled_channels
= enabled_channels
;
2406 /* Specify whether the EXEC mask represents the valid mask */
2407 args
->valid_mask
= 0;
2409 /* Specify whether this is the last export */
2412 /* Specify the target we are exporting */
2413 args
->target
= target
;
2415 args
->compr
= false;
2416 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
2417 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
2418 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
2419 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
2424 bool is_16bit
= ac_get_type_size(LLVMTypeOf(values
[0])) == 2;
2425 if (ctx
->stage
== MESA_SHADER_FRAGMENT
) {
2426 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
2427 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
2428 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
2429 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
2432 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
2433 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
2434 unsigned bits
, bool hi
) = NULL
;
2436 switch(col_format
) {
2437 case V_028714_SPI_SHADER_ZERO
:
2438 args
->enabled_channels
= 0; /* writemask */
2439 args
->target
= V_008DFC_SQ_EXP_NULL
;
2442 case V_028714_SPI_SHADER_32_R
:
2443 args
->enabled_channels
= 1;
2444 args
->out
[0] = values
[0];
2447 case V_028714_SPI_SHADER_32_GR
:
2448 args
->enabled_channels
= 0x3;
2449 args
->out
[0] = values
[0];
2450 args
->out
[1] = values
[1];
2453 case V_028714_SPI_SHADER_32_AR
:
2454 if (ctx
->ac
.chip_class
>= GFX10
) {
2455 args
->enabled_channels
= 0x3;
2456 args
->out
[0] = values
[0];
2457 args
->out
[1] = values
[3];
2459 args
->enabled_channels
= 0x9;
2460 args
->out
[0] = values
[0];
2461 args
->out
[3] = values
[3];
2465 case V_028714_SPI_SHADER_FP16_ABGR
:
2466 args
->enabled_channels
= 0x5;
2467 packf
= ac_build_cvt_pkrtz_f16
;
2469 for (unsigned chan
= 0; chan
< 4; chan
++)
2470 values
[chan
] = LLVMBuildFPExt(ctx
->ac
.builder
,
2476 case V_028714_SPI_SHADER_UNORM16_ABGR
:
2477 args
->enabled_channels
= 0x5;
2478 packf
= ac_build_cvt_pknorm_u16
;
2481 case V_028714_SPI_SHADER_SNORM16_ABGR
:
2482 args
->enabled_channels
= 0x5;
2483 packf
= ac_build_cvt_pknorm_i16
;
2486 case V_028714_SPI_SHADER_UINT16_ABGR
:
2487 args
->enabled_channels
= 0x5;
2488 packi
= ac_build_cvt_pk_u16
;
2490 for (unsigned chan
= 0; chan
< 4; chan
++)
2491 values
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
,
2492 ac_to_integer(&ctx
->ac
, values
[chan
]),
2497 case V_028714_SPI_SHADER_SINT16_ABGR
:
2498 args
->enabled_channels
= 0x5;
2499 packi
= ac_build_cvt_pk_i16
;
2501 for (unsigned chan
= 0; chan
< 4; chan
++)
2502 values
[chan
] = LLVMBuildSExt(ctx
->ac
.builder
,
2503 ac_to_integer(&ctx
->ac
, values
[chan
]),
2509 case V_028714_SPI_SHADER_32_ABGR
:
2510 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2514 /* Pack f16 or norm_i16/u16. */
2516 for (chan
= 0; chan
< 2; chan
++) {
2517 LLVMValueRef pack_args
[2] = {
2519 values
[2 * chan
+ 1]
2521 LLVMValueRef packed
;
2523 packed
= packf(&ctx
->ac
, pack_args
);
2524 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2526 args
->compr
= 1; /* COMPR flag */
2531 for (chan
= 0; chan
< 2; chan
++) {
2532 LLVMValueRef pack_args
[2] = {
2533 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
2534 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
2536 LLVMValueRef packed
;
2538 packed
= packi(&ctx
->ac
, pack_args
,
2539 is_int8
? 8 : is_int10
? 10 : 16,
2541 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2543 args
->compr
= 1; /* COMPR flag */
2549 for (unsigned chan
= 0; chan
< 4; chan
++) {
2550 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i16
, "");
2551 args
->out
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
2554 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2556 for (unsigned i
= 0; i
< 4; ++i
)
2557 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
2561 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
2562 LLVMValueRef
*values
, unsigned enabled_channels
)
2564 struct ac_export_args args
;
2566 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
2567 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
2568 ac_build_export(&ctx
->ac
, &args
);
2572 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
2574 LLVMValueRef output
=
2575 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(index
, chan
)];
2577 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
2581 radv_emit_stream_output(struct radv_shader_context
*ctx
,
2582 LLVMValueRef
const *so_buffers
,
2583 LLVMValueRef
const *so_write_offsets
,
2584 const struct radv_stream_output
*output
)
2586 unsigned num_comps
= util_bitcount(output
->component_mask
);
2587 unsigned loc
= output
->location
;
2588 unsigned buf
= output
->buffer
;
2589 unsigned offset
= output
->offset
;
2591 LLVMValueRef out
[4];
2593 assert(num_comps
&& num_comps
<= 4);
2594 if (!num_comps
|| num_comps
> 4)
2597 /* Get the first component. */
2598 start
= ffs(output
->component_mask
) - 1;
2600 /* Load the output as int. */
2601 for (int i
= 0; i
< num_comps
; i
++) {
2602 out
[i
] = ac_to_integer(&ctx
->ac
,
2603 radv_load_output(ctx
, loc
, start
+ i
));
2606 /* Pack the output. */
2607 LLVMValueRef vdata
= NULL
;
2609 switch (num_comps
) {
2610 case 1: /* as i32 */
2613 case 2: /* as v2i32 */
2614 case 3: /* as v4i32 (aligned to 4) */
2615 out
[3] = LLVMGetUndef(ctx
->ac
.i32
);
2617 case 4: /* as v4i32 */
2618 vdata
= ac_build_gather_values(&ctx
->ac
, out
,
2619 !ac_has_vec3_support(ctx
->ac
.chip_class
, false) ?
2620 util_next_power_of_two(num_comps
) :
2625 ac_build_buffer_store_dword(&ctx
->ac
, so_buffers
[buf
],
2626 vdata
, num_comps
, so_write_offsets
[buf
],
2627 ctx
->ac
.i32_0
, offset
,
2628 ac_glc
| ac_slc
, false);
2632 radv_emit_streamout(struct radv_shader_context
*ctx
, unsigned stream
)
2634 struct ac_build_if_state if_ctx
;
2637 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2638 assert(ctx
->streamout_config
);
2639 LLVMValueRef so_vtx_count
=
2640 ac_build_bfe(&ctx
->ac
, ctx
->streamout_config
,
2641 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2642 LLVMConstInt(ctx
->ac
.i32
, 7, false), false);
2644 LLVMValueRef tid
= ac_get_thread_id(&ctx
->ac
);
2646 /* can_emit = tid < so_vtx_count; */
2647 LLVMValueRef can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
2648 tid
, so_vtx_count
, "");
2650 /* Emit the streamout code conditionally. This actually avoids
2651 * out-of-bounds buffer access. The hw tells us via the SGPR
2652 * (so_vtx_count) which threads are allowed to emit streamout data.
2654 ac_nir_build_if(&if_ctx
, ctx
, can_emit
);
2656 /* The buffer offset is computed as follows:
2657 * ByteOffset = streamout_offset[buffer_id]*4 +
2658 * (streamout_write_index + thread_id)*stride[buffer_id] +
2661 LLVMValueRef so_write_index
= ctx
->streamout_write_idx
;
2663 /* Compute (streamout_write_index + thread_id). */
2665 LLVMBuildAdd(ctx
->ac
.builder
, so_write_index
, tid
, "");
2667 /* Load the descriptor and compute the write offset for each
2670 LLVMValueRef so_write_offset
[4] = {};
2671 LLVMValueRef so_buffers
[4] = {};
2672 LLVMValueRef buf_ptr
= ctx
->streamout_buffers
;
2674 for (i
= 0; i
< 4; i
++) {
2675 uint16_t stride
= ctx
->shader_info
->info
.so
.strides
[i
];
2680 LLVMValueRef offset
=
2681 LLVMConstInt(ctx
->ac
.i32
, i
, false);
2683 so_buffers
[i
] = ac_build_load_to_sgpr(&ctx
->ac
,
2686 LLVMValueRef so_offset
= ctx
->streamout_offset
[i
];
2688 so_offset
= LLVMBuildMul(ctx
->ac
.builder
, so_offset
,
2689 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
2691 so_write_offset
[i
] =
2692 ac_build_imad(&ctx
->ac
, so_write_index
,
2693 LLVMConstInt(ctx
->ac
.i32
,
2698 /* Write streamout data. */
2699 for (i
= 0; i
< ctx
->shader_info
->info
.so
.num_outputs
; i
++) {
2700 struct radv_stream_output
*output
=
2701 &ctx
->shader_info
->info
.so
.outputs
[i
];
2703 if (stream
!= output
->stream
)
2706 radv_emit_stream_output(ctx
, so_buffers
,
2707 so_write_offset
, output
);
2710 ac_nir_build_endif(&if_ctx
);
2714 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
2715 bool export_prim_id
,
2716 bool export_clip_dists
,
2717 struct radv_vs_output_info
*outinfo
)
2719 uint32_t param_count
= 0;
2720 unsigned pos_idx
, num_pos_exports
= 0;
2721 struct ac_export_args pos_args
[4] = {};
2722 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_index_value
= NULL
;
2725 if (ctx
->options
->key
.has_multiview_view_index
) {
2726 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2728 for(unsigned i
= 0; i
< 4; ++i
)
2729 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
2730 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
2733 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
2734 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
2737 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
2738 sizeof(outinfo
->vs_output_param_offset
));
2740 for(unsigned location
= VARYING_SLOT_CLIP_DIST0
; location
<= VARYING_SLOT_CLIP_DIST1
; ++location
) {
2741 if (ctx
->output_mask
& (1ull << location
)) {
2742 unsigned output_usage_mask
, length
;
2743 LLVMValueRef slots
[4];
2746 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2747 !ctx
->is_gs_copy_shader
) {
2749 ctx
->shader_info
->info
.vs
.output_usage_mask
[location
];
2750 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2752 ctx
->shader_info
->info
.tes
.output_usage_mask
[location
];
2754 assert(ctx
->is_gs_copy_shader
);
2756 ctx
->shader_info
->info
.gs
.output_usage_mask
[location
];
2759 length
= util_last_bit(output_usage_mask
);
2761 for (j
= 0; j
< length
; j
++)
2762 slots
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, location
, j
));
2764 for (i
= length
; i
< 4; i
++)
2765 slots
[i
] = LLVMGetUndef(ctx
->ac
.f32
);
2767 unsigned index
= 2 + (location
- VARYING_SLOT_CLIP_DIST0
);
2768 si_llvm_init_export_args(ctx
, &slots
[0], 0xf,
2769 V_008DFC_SQ_EXP_POS
+ index
,
2774 LLVMValueRef pos_values
[4] = {ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_1
};
2775 if (ctx
->output_mask
& (1ull << VARYING_SLOT_POS
)) {
2776 for (unsigned j
= 0; j
< 4; j
++)
2777 pos_values
[j
] = radv_load_output(ctx
, VARYING_SLOT_POS
, j
);
2779 si_llvm_init_export_args(ctx
, pos_values
, 0xf, V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
2781 if (ctx
->output_mask
& (1ull << VARYING_SLOT_PSIZ
)) {
2782 outinfo
->writes_pointsize
= true;
2783 psize_value
= radv_load_output(ctx
, VARYING_SLOT_PSIZ
, 0);
2786 if (ctx
->output_mask
& (1ull << VARYING_SLOT_LAYER
)) {
2787 outinfo
->writes_layer
= true;
2788 layer_value
= radv_load_output(ctx
, VARYING_SLOT_LAYER
, 0);
2791 if (ctx
->output_mask
& (1ull << VARYING_SLOT_VIEWPORT
)) {
2792 outinfo
->writes_viewport_index
= true;
2793 viewport_index_value
= radv_load_output(ctx
, VARYING_SLOT_VIEWPORT
, 0);
2796 if (ctx
->shader_info
->info
.so
.num_outputs
&&
2797 !ctx
->is_gs_copy_shader
) {
2798 /* The GS copy shader emission already emits streamout. */
2799 radv_emit_streamout(ctx
, 0);
2802 if (outinfo
->writes_pointsize
||
2803 outinfo
->writes_layer
||
2804 outinfo
->writes_viewport_index
) {
2805 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
2806 (outinfo
->writes_layer
== true ? 4 : 0));
2807 pos_args
[1].valid_mask
= 0;
2808 pos_args
[1].done
= 0;
2809 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
2810 pos_args
[1].compr
= 0;
2811 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
2812 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
2813 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
2814 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
2816 if (outinfo
->writes_pointsize
== true)
2817 pos_args
[1].out
[0] = psize_value
;
2818 if (outinfo
->writes_layer
== true)
2819 pos_args
[1].out
[2] = layer_value
;
2820 if (outinfo
->writes_viewport_index
== true) {
2821 if (ctx
->options
->chip_class
>= GFX9
) {
2822 /* GFX9 has the layer in out.z[10:0] and the viewport
2823 * index in out.z[19:16].
2825 LLVMValueRef v
= viewport_index_value
;
2826 v
= ac_to_integer(&ctx
->ac
, v
);
2827 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
2828 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2830 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
2831 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
2833 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
2834 pos_args
[1].enabled_channels
|= 1 << 2;
2836 pos_args
[1].out
[3] = viewport_index_value
;
2837 pos_args
[1].enabled_channels
|= 1 << 3;
2841 for (i
= 0; i
< 4; i
++) {
2842 if (pos_args
[i
].out
[0])
2846 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
2847 * Setting valid_mask=1 prevents it and has no other effect.
2849 if (ctx
->ac
.family
== CHIP_NAVI10
||
2850 ctx
->ac
.family
== CHIP_NAVI12
||
2851 ctx
->ac
.family
== CHIP_NAVI14
)
2852 pos_args
[0].valid_mask
= 1;
2855 for (i
= 0; i
< 4; i
++) {
2856 if (!pos_args
[i
].out
[0])
2859 /* Specify the target we are exporting */
2860 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
2861 if (pos_idx
== num_pos_exports
)
2862 pos_args
[i
].done
= 1;
2863 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
2866 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2867 LLVMValueRef values
[4];
2868 if (!(ctx
->output_mask
& (1ull << i
)))
2871 if (i
!= VARYING_SLOT_LAYER
&&
2872 i
!= VARYING_SLOT_PRIMITIVE_ID
&&
2873 i
!= VARYING_SLOT_CLIP_DIST0
&&
2874 i
!= VARYING_SLOT_CLIP_DIST1
&&
2875 i
< VARYING_SLOT_VAR0
)
2878 if ((i
== VARYING_SLOT_CLIP_DIST0
||
2879 i
== VARYING_SLOT_CLIP_DIST1
) && !export_clip_dists
)
2882 for (unsigned j
= 0; j
< 4; j
++)
2883 values
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2885 unsigned output_usage_mask
;
2887 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2888 !ctx
->is_gs_copy_shader
) {
2890 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2891 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2893 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2895 assert(ctx
->is_gs_copy_shader
);
2897 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
2900 radv_export_param(ctx
, param_count
, values
, output_usage_mask
);
2902 outinfo
->vs_output_param_offset
[i
] = param_count
++;
2905 if (export_prim_id
) {
2906 LLVMValueRef values
[4];
2908 values
[0] = ctx
->vs_prim_id
;
2909 for (unsigned j
= 1; j
< 4; j
++)
2910 values
[j
] = ctx
->ac
.f32_0
;
2912 radv_export_param(ctx
, param_count
, values
, 0x1);
2914 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
2915 outinfo
->export_prim_id
= true;
2918 outinfo
->pos_exports
= num_pos_exports
;
2919 outinfo
->param_exports
= param_count
;
2923 handle_es_outputs_post(struct radv_shader_context
*ctx
,
2924 struct radv_es_output_info
*outinfo
)
2927 uint64_t max_output_written
= 0;
2928 LLVMValueRef lds_base
= NULL
;
2930 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2933 if (!(ctx
->output_mask
& (1ull << i
)))
2936 param_index
= shader_io_get_unique_index(i
);
2938 max_output_written
= MAX2(param_index
, max_output_written
);
2941 outinfo
->esgs_itemsize
= (max_output_written
+ 1) * 16;
2943 if (ctx
->ac
.chip_class
>= GFX9
) {
2944 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
2945 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
2946 LLVMValueRef wave_idx
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
2947 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
2948 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
2949 LLVMConstInt(ctx
->ac
.i32
, 64, false), ""), "");
2950 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
2951 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
2954 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2955 LLVMValueRef dw_addr
= NULL
;
2956 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2957 unsigned output_usage_mask
;
2960 if (!(ctx
->output_mask
& (1ull << i
)))
2963 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
2965 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2967 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
2969 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2972 param_index
= shader_io_get_unique_index(i
);
2975 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2976 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
2980 for (j
= 0; j
< 4; j
++) {
2981 if (!(output_usage_mask
& (1 << j
)))
2984 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2985 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
2986 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
2988 if (ctx
->ac
.chip_class
>= GFX9
) {
2989 LLVMValueRef dw_addr_offset
=
2990 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2991 LLVMConstInt(ctx
->ac
.i32
,
2994 ac_lds_store(&ctx
->ac
, dw_addr_offset
, out_val
);
2996 ac_build_buffer_store_dword(&ctx
->ac
,
2999 NULL
, ctx
->es2gs_offset
,
3000 (4 * param_index
+ j
) * 4,
3001 ac_glc
| ac_slc
, true);
3008 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
3010 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
3011 uint32_t num_tcs_inputs
= util_last_bit64(ctx
->shader_info
->info
.vs
.ls_outputs_written
);
3012 LLVMValueRef vertex_dw_stride
= LLVMConstInt(ctx
->ac
.i32
, num_tcs_inputs
* 4, false);
3013 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
3014 vertex_dw_stride
, "");
3016 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3017 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
3019 if (!(ctx
->output_mask
& (1ull << i
)))
3022 int param
= shader_io_get_unique_index(i
);
3023 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
3024 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
3026 for (unsigned j
= 0; j
< 4; j
++) {
3027 LLVMValueRef value
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
3028 value
= ac_to_integer(&ctx
->ac
, value
);
3029 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
3030 ac_lds_store(&ctx
->ac
, dw_addr
, value
);
3031 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
3036 static LLVMValueRef
get_wave_id_in_tg(struct radv_shader_context
*ctx
)
3038 return ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
3041 static LLVMValueRef
ngg_get_vtx_cnt(struct radv_shader_context
*ctx
)
3043 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
3044 LLVMConstInt(ctx
->ac
.i32
, 12, false),
3045 LLVMConstInt(ctx
->ac
.i32
, 9, false),
3049 static LLVMValueRef
ngg_get_prim_cnt(struct radv_shader_context
*ctx
)
3051 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
3052 LLVMConstInt(ctx
->ac
.i32
, 22, false),
3053 LLVMConstInt(ctx
->ac
.i32
, 9, false),
3057 /* Send GS Alloc Req message from the first wave of the group to SPI.
3058 * Message payload is:
3059 * - bits 0..10: vertices in group
3060 * - bits 12..22: primitives in group
3062 static void build_sendmsg_gs_alloc_req(struct radv_shader_context
*ctx
,
3063 LLVMValueRef vtx_cnt
,
3064 LLVMValueRef prim_cnt
)
3066 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3069 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
3070 ac_build_ifcc(&ctx
->ac
, tmp
, 5020);
3072 tmp
= LLVMBuildShl(builder
, prim_cnt
, LLVMConstInt(ctx
->ac
.i32
, 12, false),"");
3073 tmp
= LLVMBuildOr(builder
, tmp
, vtx_cnt
, "");
3074 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_ALLOC_REQ
, tmp
);
3076 ac_build_endif(&ctx
->ac
, 5020);
3080 unsigned num_vertices
;
3081 LLVMValueRef isnull
;
3082 LLVMValueRef index
[3];
3083 LLVMValueRef edgeflag
[3];
3086 static void build_export_prim(struct radv_shader_context
*ctx
,
3087 const struct ngg_prim
*prim
)
3089 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3090 struct ac_export_args args
;
3093 tmp
= LLVMBuildZExt(builder
, prim
->isnull
, ctx
->ac
.i32
, "");
3094 args
.out
[0] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 31, false), "");
3096 for (unsigned i
= 0; i
< prim
->num_vertices
; ++i
) {
3097 tmp
= LLVMBuildShl(builder
, prim
->index
[i
],
3098 LLVMConstInt(ctx
->ac
.i32
, 10 * i
, false), "");
3099 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3100 tmp
= LLVMBuildZExt(builder
, prim
->edgeflag
[i
], ctx
->ac
.i32
, "");
3101 tmp
= LLVMBuildShl(builder
, tmp
,
3102 LLVMConstInt(ctx
->ac
.i32
, 10 * i
+ 9, false), "");
3103 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3106 args
.out
[0] = LLVMBuildBitCast(builder
, args
.out
[0], ctx
->ac
.f32
, "");
3107 args
.out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
3108 args
.out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
3109 args
.out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
3111 args
.target
= V_008DFC_SQ_EXP_PRIM
;
3112 args
.enabled_channels
= 1;
3114 args
.valid_mask
= false;
3117 ac_build_export(&ctx
->ac
, &args
);
3121 handle_ngg_outputs_post(struct radv_shader_context
*ctx
)
3123 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3124 struct ac_build_if_state if_state
;
3125 unsigned num_vertices
= 3;
3128 assert((ctx
->stage
== MESA_SHADER_VERTEX
||
3129 ctx
->stage
== MESA_SHADER_TESS_EVAL
) && !ctx
->is_gs_copy_shader
);
3131 LLVMValueRef prims_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3132 LLVMValueRef vtx_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 0, 8);
3133 LLVMValueRef is_gs_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3134 ac_get_thread_id(&ctx
->ac
), prims_in_wave
, "");
3135 LLVMValueRef is_es_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3136 ac_get_thread_id(&ctx
->ac
), vtx_in_wave
, "");
3137 LLVMValueRef vtxindex
[] = {
3138 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 0, 16),
3139 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 16, 16),
3140 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[2], 0, 16),
3143 /* TODO: streamout */
3145 /* TODO: VS primitive ID */
3146 if (ctx
->options
->key
.vs_common_out
.export_prim_id
)
3149 /* TODO: primitive culling */
3151 build_sendmsg_gs_alloc_req(ctx
, ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
3153 /* TODO: streamout queries */
3154 /* Export primitive data to the index buffer. Format is:
3155 * - bits 0..8: index 0
3156 * - bit 9: edge flag 0
3157 * - bits 10..18: index 1
3158 * - bit 19: edge flag 1
3159 * - bits 20..28: index 2
3160 * - bit 29: edge flag 2
3161 * - bit 31: null primitive (skip)
3163 * For the first version, we will always build up all three indices
3164 * independent of the primitive type. The additional garbage data
3167 * TODO: culling depends on the primitive type, so can have some
3170 ac_nir_build_if(&if_state
, ctx
, is_gs_thread
);
3172 struct ngg_prim prim
= {};
3174 prim
.num_vertices
= num_vertices
;
3175 prim
.isnull
= ctx
->ac
.i1false
;
3176 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
3178 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
3179 tmp
= LLVMBuildLShr(builder
, ctx
->abi
.gs_invocation_id
,
3180 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
3181 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
3184 build_export_prim(ctx
, &prim
);
3186 ac_nir_build_endif(&if_state
);
3188 /* Export per-vertex data (positions and parameters). */
3189 ac_nir_build_if(&if_state
, ctx
, is_es_thread
);
3191 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs_common_out
.export_prim_id
,
3192 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
3193 ctx
->stage
== MESA_SHADER_TESS_EVAL
? &ctx
->shader_info
->tes
.outinfo
: &ctx
->shader_info
->vs
.outinfo
);
3195 ac_nir_build_endif(&if_state
);
3199 write_tess_factors(struct radv_shader_context
*ctx
)
3201 unsigned stride
, outer_comps
, inner_comps
;
3202 struct ac_build_if_state if_ctx
, inner_if_ctx
;
3203 LLVMValueRef invocation_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
3204 LLVMValueRef rel_patch_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
3205 unsigned tess_inner_index
= 0, tess_outer_index
;
3206 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
3207 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
3209 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3211 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
3231 ac_nir_build_if(&if_ctx
, ctx
,
3232 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3233 invocation_id
, ctx
->ac
.i32_0
, ""));
3235 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
3238 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3239 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3240 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
3243 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3244 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3245 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
3247 for (i
= 0; i
< 4; i
++) {
3248 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3249 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3253 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
3254 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
3255 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3257 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
3259 for (i
= 0; i
< outer_comps
; i
++) {
3261 ac_lds_load(&ctx
->ac
, lds_outer
);
3262 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3265 for (i
= 0; i
< inner_comps
; i
++) {
3266 inner
[i
] = out
[outer_comps
+i
] =
3267 ac_lds_load(&ctx
->ac
, lds_inner
);
3268 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
3273 /* Convert the outputs to vectors for stores. */
3274 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
3278 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
3281 buffer
= ctx
->hs_ring_tess_factor
;
3282 tf_base
= ctx
->tess_factor_offset
;
3283 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
3284 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
3285 unsigned tf_offset
= 0;
3287 if (ctx
->options
->chip_class
<= GFX8
) {
3288 ac_nir_build_if(&inner_if_ctx
, ctx
,
3289 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3290 rel_patch_id
, ctx
->ac
.i32_0
, ""));
3292 /* Store the dynamic HS control word. */
3293 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
3294 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
3295 1, ctx
->ac
.i32_0
, tf_base
,
3299 ac_nir_build_endif(&inner_if_ctx
);
3302 /* Store the tessellation factors. */
3303 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
3304 MIN2(stride
, 4), byteoffset
, tf_base
,
3305 tf_offset
, ac_glc
, false);
3307 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
3308 stride
- 4, byteoffset
, tf_base
,
3309 16 + tf_offset
, ac_glc
, false);
3311 //store to offchip for TES to read - only if TES reads them
3312 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
3313 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
3314 LLVMValueRef tf_inner_offset
;
3315 unsigned param_outer
, param_inner
;
3317 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3318 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3319 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
3321 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
3322 util_next_power_of_two(outer_comps
));
3324 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
3325 outer_comps
, tf_outer_offset
,
3326 ctx
->oc_lds
, 0, ac_glc
, false);
3328 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3329 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3330 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
3332 inner_vec
= inner_comps
== 1 ? inner
[0] :
3333 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
3334 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
3335 inner_comps
, tf_inner_offset
,
3336 ctx
->oc_lds
, 0, ac_glc
, false);
3339 ac_nir_build_endif(&if_ctx
);
3343 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
3345 write_tess_factors(ctx
);
3349 si_export_mrt_color(struct radv_shader_context
*ctx
,
3350 LLVMValueRef
*color
, unsigned index
,
3351 struct ac_export_args
*args
)
3354 si_llvm_init_export_args(ctx
, color
, 0xf,
3355 V_008DFC_SQ_EXP_MRT
+ index
, args
);
3356 if (!args
->enabled_channels
)
3357 return false; /* unnecessary NULL export */
3363 radv_export_mrt_z(struct radv_shader_context
*ctx
,
3364 LLVMValueRef depth
, LLVMValueRef stencil
,
3365 LLVMValueRef samplemask
)
3367 struct ac_export_args args
;
3369 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
3371 ac_build_export(&ctx
->ac
, &args
);
3375 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
3378 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
3379 struct ac_export_args color_args
[8];
3381 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3382 LLVMValueRef values
[4];
3384 if (!(ctx
->output_mask
& (1ull << i
)))
3387 if (i
< FRAG_RESULT_DATA0
)
3390 for (unsigned j
= 0; j
< 4; j
++)
3391 values
[j
] = ac_to_float(&ctx
->ac
,
3392 radv_load_output(ctx
, i
, j
));
3394 bool ret
= si_export_mrt_color(ctx
, values
,
3395 i
- FRAG_RESULT_DATA0
,
3396 &color_args
[index
]);
3401 /* Process depth, stencil, samplemask. */
3402 if (ctx
->shader_info
->info
.ps
.writes_z
) {
3403 depth
= ac_to_float(&ctx
->ac
,
3404 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
3406 if (ctx
->shader_info
->info
.ps
.writes_stencil
) {
3407 stencil
= ac_to_float(&ctx
->ac
,
3408 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
3410 if (ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3411 samplemask
= ac_to_float(&ctx
->ac
,
3412 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
3415 /* Set the DONE bit on last non-null color export only if Z isn't
3419 !ctx
->shader_info
->info
.ps
.writes_z
&&
3420 !ctx
->shader_info
->info
.ps
.writes_stencil
&&
3421 !ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3422 unsigned last
= index
- 1;
3424 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
3425 color_args
[last
].done
= 1; /* DONE bit */
3428 /* Export PS outputs. */
3429 for (unsigned i
= 0; i
< index
; i
++)
3430 ac_build_export(&ctx
->ac
, &color_args
[i
]);
3432 if (depth
|| stencil
|| samplemask
)
3433 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
3435 ac_build_export_null(&ctx
->ac
);
3439 emit_gs_epilogue(struct radv_shader_context
*ctx
)
3441 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
3445 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
3446 LLVMValueRef
*addrs
)
3448 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
3450 switch (ctx
->stage
) {
3451 case MESA_SHADER_VERTEX
:
3452 if (ctx
->options
->key
.vs_common_out
.as_ls
)
3453 handle_ls_outputs_post(ctx
);
3454 else if (ctx
->options
->key
.vs_common_out
.as_ngg
)
3455 break; /* handled outside of the shader body */
3456 else if (ctx
->options
->key
.vs_common_out
.as_es
)
3457 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
3459 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs_common_out
.export_prim_id
,
3460 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
3461 &ctx
->shader_info
->vs
.outinfo
);
3463 case MESA_SHADER_FRAGMENT
:
3464 handle_fs_outputs_post(ctx
);
3466 case MESA_SHADER_GEOMETRY
:
3467 emit_gs_epilogue(ctx
);
3469 case MESA_SHADER_TESS_CTRL
:
3470 handle_tcs_outputs_post(ctx
);
3472 case MESA_SHADER_TESS_EVAL
:
3473 if (ctx
->options
->key
.vs_common_out
.as_ngg
)
3474 break; /* handled outside of the shader body */
3475 else if (ctx
->options
->key
.vs_common_out
.as_es
)
3476 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
3478 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs_common_out
.export_prim_id
,
3479 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
3480 &ctx
->shader_info
->tes
.outinfo
);
3487 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
,
3488 LLVMPassManagerRef passmgr
,
3489 const struct radv_nir_compiler_options
*options
)
3491 LLVMRunPassManager(passmgr
, ctx
->ac
.module
);
3492 LLVMDisposeBuilder(ctx
->ac
.builder
);
3494 ac_llvm_context_dispose(&ctx
->ac
);
3498 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
3500 struct radv_vs_output_info
*outinfo
;
3502 switch (ctx
->stage
) {
3503 case MESA_SHADER_FRAGMENT
:
3504 case MESA_SHADER_COMPUTE
:
3505 case MESA_SHADER_TESS_CTRL
:
3506 case MESA_SHADER_GEOMETRY
:
3508 case MESA_SHADER_VERTEX
:
3509 if (ctx
->options
->key
.vs_common_out
.as_ls
||
3510 ctx
->options
->key
.vs_common_out
.as_es
)
3512 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
3514 case MESA_SHADER_TESS_EVAL
:
3515 if (ctx
->options
->key
.vs_common_out
.as_es
)
3517 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
3520 unreachable("Unhandled shader type");
3523 ac_optimize_vs_outputs(&ctx
->ac
,
3525 outinfo
->vs_output_param_offset
,
3527 &outinfo
->param_exports
);
3531 ac_setup_rings(struct radv_shader_context
*ctx
)
3533 if (ctx
->options
->chip_class
<= GFX8
&&
3534 (ctx
->stage
== MESA_SHADER_GEOMETRY
||
3535 ctx
->options
->key
.vs_common_out
.as_es
|| ctx
->options
->key
.vs_common_out
.as_es
)) {
3536 unsigned ring
= ctx
->stage
== MESA_SHADER_GEOMETRY
? RING_ESGS_GS
3538 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, ring
, false);
3540 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
,
3545 if (ctx
->is_gs_copy_shader
) {
3547 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
3548 LLVMConstInt(ctx
->ac
.i32
,
3549 RING_GSVS_VS
, false));
3552 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3553 /* The conceptual layout of the GSVS ring is
3554 * v0c0 .. vLv0 v0c1 .. vLc1 ..
3555 * but the real memory layout is swizzled across
3557 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3559 * Override the buffer descriptor accordingly.
3561 LLVMTypeRef v2i64
= LLVMVectorType(ctx
->ac
.i64
, 2);
3562 uint64_t stream_offset
= 0;
3563 unsigned num_records
= 64;
3564 LLVMValueRef base_ring
;
3567 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
3568 LLVMConstInt(ctx
->ac
.i32
,
3569 RING_GSVS_GS
, false));
3571 for (unsigned stream
= 0; stream
< 4; stream
++) {
3572 unsigned num_components
, stride
;
3573 LLVMValueRef ring
, tmp
;
3576 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
3578 if (!num_components
)
3581 stride
= 4 * num_components
* ctx
->gs_max_out_vertices
;
3583 /* Limit on the stride field for <= GFX7. */
3584 assert(stride
< (1 << 14));
3586 ring
= LLVMBuildBitCast(ctx
->ac
.builder
,
3587 base_ring
, v2i64
, "");
3588 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3589 ring
, ctx
->ac
.i32_0
, "");
3590 tmp
= LLVMBuildAdd(ctx
->ac
.builder
, tmp
,
3591 LLVMConstInt(ctx
->ac
.i64
,
3592 stream_offset
, 0), "");
3593 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
,
3594 ring
, tmp
, ctx
->ac
.i32_0
, "");
3596 stream_offset
+= stride
* 64;
3598 ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ring
,
3601 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ring
,
3603 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
,
3604 LLVMConstInt(ctx
->ac
.i32
,
3605 S_008F04_STRIDE(stride
), false), "");
3606 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
, tmp
,
3609 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
,
3610 LLVMConstInt(ctx
->ac
.i32
,
3611 num_records
, false),
3612 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
3614 ctx
->gsvs_ring
[stream
] = ring
;
3618 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
3619 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3620 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
3621 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
3626 radv_nir_get_max_workgroup_size(enum chip_class chip_class
,
3627 const struct nir_shader
*nir
)
3629 switch (nir
->info
.stage
) {
3630 case MESA_SHADER_TESS_CTRL
:
3631 return chip_class
>= GFX7
? 128 : 64;
3632 case MESA_SHADER_GEOMETRY
:
3633 return chip_class
>= GFX9
? 128 : 64;
3634 case MESA_SHADER_COMPUTE
:
3640 unsigned max_workgroup_size
= nir
->info
.cs
.local_size
[0] *
3641 nir
->info
.cs
.local_size
[1] *
3642 nir
->info
.cs
.local_size
[2];
3643 return max_workgroup_size
;
3646 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
3647 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
3649 LLVMValueRef count
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3650 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
3652 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
3653 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
3654 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
3657 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
3659 for(int i
= 5; i
>= 0; --i
) {
3660 ctx
->gs_vtx_offset
[i
] = ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
3664 ctx
->gs_wave_id
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 16, 8);
3669 LLVMModuleRef
ac_translate_nir_to_llvm(struct ac_llvm_compiler
*ac_llvm
,
3670 struct nir_shader
*const *shaders
,
3672 struct radv_shader_variant_info
*shader_info
,
3673 const struct radv_nir_compiler_options
*options
)
3675 struct radv_shader_context ctx
= {0};
3677 ctx
.options
= options
;
3678 ctx
.shader_info
= shader_info
;
3680 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
3681 ctx
.context
= ctx
.ac
.context
;
3682 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
3684 enum ac_float_mode float_mode
=
3685 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
3686 AC_FLOAT_MODE_DEFAULT
;
3688 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
3690 radv_nir_shader_info_init(&shader_info
->info
);
3692 for(int i
= 0; i
< shader_count
; ++i
)
3693 radv_nir_shader_info_pass(shaders
[i
], options
, &shader_info
->info
);
3695 for (i
= 0; i
< RADV_UD_MAX_SETS
; i
++)
3696 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
3697 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
3698 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
3700 ctx
.max_workgroup_size
= 0;
3701 for (int i
= 0; i
< shader_count
; ++i
) {
3702 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
3703 radv_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
3707 if (ctx
.ac
.chip_class
>= GFX10
) {
3708 if (is_pre_gs_stage(shaders
[0]->info
.stage
) &&
3709 options
->key
.vs_common_out
.as_ngg
) {
3710 ctx
.max_workgroup_size
= 128;
3714 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
3715 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
3717 ctx
.abi
.inputs
= &ctx
.inputs
[0];
3718 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
3719 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
3720 ctx
.abi
.load_ubo
= radv_load_ubo
;
3721 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
3722 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
3723 ctx
.abi
.load_resource
= radv_load_resource
;
3724 ctx
.abi
.clamp_shadow_reference
= false;
3725 ctx
.abi
.gfx9_stride_size_workaround
= ctx
.ac
.chip_class
== GFX9
&& HAVE_LLVM
< 0x800;
3727 /* Because the new raw/struct atomic intrinsics are buggy with LLVM 8,
3728 * we fallback to the old intrinsics for atomic buffer image operations
3729 * and thus we need to apply the indexing workaround...
3731 ctx
.abi
.gfx9_stride_size_workaround_for_atomic
= ctx
.ac
.chip_class
== GFX9
&& HAVE_LLVM
< 0x900;
3733 bool is_ngg
= is_pre_gs_stage(shaders
[0]->info
.stage
) && ctx
.options
->key
.vs_common_out
.as_ngg
;
3734 if (shader_count
>= 2 || is_ngg
)
3735 ac_init_exec_full_mask(&ctx
.ac
);
3737 if ((ctx
.ac
.family
== CHIP_VEGA10
||
3738 ctx
.ac
.family
== CHIP_RAVEN
) &&
3739 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
3740 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
3742 for(int i
= 0; i
< shader_count
; ++i
) {
3743 ctx
.stage
= shaders
[i
]->info
.stage
;
3744 ctx
.output_mask
= 0;
3746 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3747 for (int i
= 0; i
< 4; i
++) {
3748 ctx
.gs_next_vertex
[i
] =
3749 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
3751 ctx
.gs_max_out_vertices
= shaders
[i
]->info
.gs
.vertices_out
;
3752 ctx
.abi
.load_inputs
= load_gs_input
;
3753 ctx
.abi
.emit_primitive
= visit_end_primitive
;
3754 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3755 ctx
.tcs_outputs_read
= shaders
[i
]->info
.outputs_read
;
3756 ctx
.tcs_patch_outputs_read
= shaders
[i
]->info
.patch_outputs_read
;
3757 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
3758 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3759 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
3760 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3761 if (shader_count
== 1)
3762 ctx
.tcs_num_inputs
= ctx
.options
->key
.tcs
.num_inputs
;
3764 ctx
.tcs_num_inputs
= util_last_bit64(shader_info
->info
.vs
.ls_outputs_written
);
3765 ctx
.tcs_num_patches
= get_tcs_num_patches(&ctx
);
3766 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3767 ctx
.tes_primitive_mode
= shaders
[i
]->info
.tess
.primitive_mode
;
3768 ctx
.abi
.load_tess_varyings
= load_tes_input
;
3769 ctx
.abi
.load_tess_coord
= load_tess_coord
;
3770 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3771 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3772 ctx
.tcs_num_patches
= ctx
.options
->key
.tes
.num_patches
;
3773 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
3774 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
3775 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
3776 shader_info
->fs
.can_discard
= shaders
[i
]->info
.fs
.uses_discard
;
3777 ctx
.abi
.lookup_interp_param
= lookup_interp_param
;
3778 ctx
.abi
.load_sample_position
= load_sample_position
;
3779 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
3780 ctx
.abi
.emit_kill
= radv_emit_kill
;
3784 ac_emit_barrier(&ctx
.ac
, ctx
.stage
);
3786 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
3787 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
3789 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3790 unsigned addclip
= shaders
[i
]->info
.clip_distance_array_size
+
3791 shaders
[i
]->info
.cull_distance_array_size
> 4;
3792 ctx
.gsvs_vertex_size
= (util_bitcount64(ctx
.output_mask
) + addclip
) * 16;
3793 ctx
.max_gsvs_emit_size
= ctx
.gsvs_vertex_size
*
3794 shaders
[i
]->info
.gs
.vertices_out
;
3797 ac_setup_rings(&ctx
);
3799 LLVMBasicBlockRef merge_block
;
3800 if (shader_count
>= 2 || is_ngg
) {
3801 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
3802 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3803 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3805 LLVMValueRef count
= ac_unpack_param(&ctx
.ac
, ctx
.merged_wave_info
, 8 * i
, 8);
3806 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
3807 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
3808 thread_id
, count
, "");
3809 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
3811 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
3814 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
3815 prepare_interp_optimize(&ctx
, shaders
[i
]);
3816 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
3817 handle_vs_inputs(&ctx
, shaders
[i
]);
3818 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
3819 prepare_gs_input_vgprs(&ctx
);
3821 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
3823 if (shader_count
>= 2 || is_ngg
) {
3824 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
3825 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
3828 /* This needs to be outside the if wrapping the shader body, as sometimes
3829 * the HW generates waves with 0 es/vs threads. */
3830 if (is_pre_gs_stage(shaders
[i
]->info
.stage
) &&
3831 ctx
.options
->key
.vs_common_out
.as_ngg
&&
3832 i
== shader_count
- 1) {
3833 handle_ngg_outputs_post(&ctx
);
3836 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3837 shader_info
->gs
.gsvs_vertex_size
= ctx
.gsvs_vertex_size
;
3838 shader_info
->gs
.max_gsvs_emit_size
= ctx
.max_gsvs_emit_size
;
3839 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3840 shader_info
->tcs
.num_patches
= ctx
.tcs_num_patches
;
3841 shader_info
->tcs
.lds_size
= calculate_tess_lds_size(&ctx
);
3845 LLVMBuildRetVoid(ctx
.ac
.builder
);
3847 if (options
->dump_preoptir
)
3848 ac_dump_module(ctx
.ac
.module
);
3850 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
3852 if (shader_count
== 1)
3853 ac_nir_eliminate_const_vs_outputs(&ctx
);
3855 if (options
->dump_shader
) {
3856 ctx
.shader_info
->private_mem_vgprs
=
3857 ac_count_scratch_private_memory(ctx
.main_function
);
3860 return ctx
.ac
.module
;
3863 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
3865 unsigned *retval
= (unsigned *)context
;
3866 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
3867 char *description
= LLVMGetDiagInfoDescription(di
);
3869 if (severity
== LLVMDSError
) {
3871 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
3875 LLVMDisposeMessage(description
);
3878 static unsigned radv_llvm_compile(LLVMModuleRef M
,
3879 char **pelf_buffer
, size_t *pelf_size
,
3880 struct ac_llvm_compiler
*ac_llvm
)
3882 unsigned retval
= 0;
3883 LLVMContextRef llvm_ctx
;
3885 /* Setup Diagnostic Handler*/
3886 llvm_ctx
= LLVMGetModuleContext(M
);
3888 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
3892 if (!radv_compile_to_elf(ac_llvm
, M
, pelf_buffer
, pelf_size
))
3897 static void ac_compile_llvm_module(struct ac_llvm_compiler
*ac_llvm
,
3898 LLVMModuleRef llvm_module
,
3899 struct radv_shader_binary
**rbinary
,
3900 struct radv_shader_variant_info
*shader_info
,
3901 gl_shader_stage stage
,
3902 const struct radv_nir_compiler_options
*options
)
3904 char *elf_buffer
= NULL
;
3905 size_t elf_size
= 0;
3906 char *llvm_ir_string
= NULL
;
3907 if (options
->dump_shader
)
3908 ac_dump_module(llvm_module
);
3910 if (options
->record_llvm_ir
) {
3911 char *llvm_ir
= LLVMPrintModuleToString(llvm_module
);
3912 llvm_ir_string
= strdup(llvm_ir
);
3913 LLVMDisposeMessage(llvm_ir
);
3916 int v
= radv_llvm_compile(llvm_module
, &elf_buffer
, &elf_size
, ac_llvm
);
3918 fprintf(stderr
, "compile failed\n");
3921 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
3922 LLVMDisposeModule(llvm_module
);
3923 LLVMContextDispose(ctx
);
3925 size_t llvm_ir_size
= llvm_ir_string
? strlen(llvm_ir_string
) : 0;
3926 size_t alloc_size
= sizeof(struct radv_shader_binary_rtld
) + elf_size
+ llvm_ir_size
+ 1;
3927 struct radv_shader_binary_rtld
*rbin
= calloc(1, alloc_size
);
3928 memcpy(rbin
->data
, elf_buffer
, elf_size
);
3930 memcpy(rbin
->data
+ elf_size
, llvm_ir_string
, llvm_ir_size
+ 1);
3932 rbin
->base
.type
= RADV_BINARY_TYPE_RTLD
;
3933 rbin
->base
.stage
= stage
;
3934 rbin
->base
.total_size
= alloc_size
;
3935 rbin
->elf_size
= elf_size
;
3936 rbin
->llvm_ir_size
= llvm_ir_size
;
3937 *rbinary
= &rbin
->base
;
3939 free(llvm_ir_string
);
3944 ac_fill_shader_info(struct radv_shader_variant_info
*shader_info
, struct nir_shader
*nir
, const struct radv_nir_compiler_options
*options
)
3946 switch (nir
->info
.stage
) {
3947 case MESA_SHADER_COMPUTE
:
3948 for (int i
= 0; i
< 3; ++i
)
3949 shader_info
->cs
.block_size
[i
] = nir
->info
.cs
.local_size
[i
];
3951 case MESA_SHADER_FRAGMENT
:
3952 shader_info
->fs
.early_fragment_test
= nir
->info
.fs
.early_fragment_tests
;
3954 case MESA_SHADER_GEOMETRY
:
3955 shader_info
->gs
.vertices_in
= nir
->info
.gs
.vertices_in
;
3956 shader_info
->gs
.vertices_out
= nir
->info
.gs
.vertices_out
;
3957 shader_info
->gs
.output_prim
= nir
->info
.gs
.output_primitive
;
3958 shader_info
->gs
.invocations
= nir
->info
.gs
.invocations
;
3960 case MESA_SHADER_TESS_EVAL
:
3961 shader_info
->tes
.primitive_mode
= nir
->info
.tess
.primitive_mode
;
3962 shader_info
->tes
.spacing
= nir
->info
.tess
.spacing
;
3963 shader_info
->tes
.ccw
= nir
->info
.tess
.ccw
;
3964 shader_info
->tes
.point_mode
= nir
->info
.tess
.point_mode
;
3965 shader_info
->tes
.as_es
= options
->key
.vs_common_out
.as_es
;
3966 shader_info
->tes
.export_prim_id
= options
->key
.vs_common_out
.export_prim_id
;
3967 shader_info
->is_ngg
= options
->key
.vs_common_out
.as_ngg
;
3969 case MESA_SHADER_TESS_CTRL
:
3970 shader_info
->tcs
.tcs_vertices_out
= nir
->info
.tess
.tcs_vertices_out
;
3972 case MESA_SHADER_VERTEX
:
3973 shader_info
->vs
.as_es
= options
->key
.vs_common_out
.as_es
;
3974 shader_info
->vs
.as_ls
= options
->key
.vs_common_out
.as_ls
;
3975 shader_info
->vs
.export_prim_id
= options
->key
.vs_common_out
.export_prim_id
;
3976 shader_info
->is_ngg
= options
->key
.vs_common_out
.as_ngg
;
3984 radv_compile_nir_shader(struct ac_llvm_compiler
*ac_llvm
,
3985 struct radv_shader_binary
**rbinary
,
3986 struct radv_shader_variant_info
*shader_info
,
3987 struct nir_shader
*const *nir
,
3989 const struct radv_nir_compiler_options
*options
)
3992 LLVMModuleRef llvm_module
;
3994 llvm_module
= ac_translate_nir_to_llvm(ac_llvm
, nir
, nir_count
, shader_info
,
3997 ac_compile_llvm_module(ac_llvm
, llvm_module
, rbinary
, shader_info
,
3998 nir
[nir_count
- 1]->info
.stage
, options
);
4000 for (int i
= 0; i
< nir_count
; ++i
)
4001 ac_fill_shader_info(shader_info
, nir
[i
], options
);
4003 /* Determine the ES type (VS or TES) for the GS on GFX9. */
4004 if (options
->chip_class
>= GFX9
) {
4005 if (nir_count
== 2 &&
4006 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4007 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
4013 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
4015 LLVMValueRef vtx_offset
=
4016 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
4017 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
4018 LLVMValueRef stream_id
;
4020 /* Fetch the vertex stream ID. */
4021 if (ctx
->shader_info
->info
.so
.num_outputs
) {
4023 ac_unpack_param(&ctx
->ac
, ctx
->streamout_config
, 24, 2);
4025 stream_id
= ctx
->ac
.i32_0
;
4028 LLVMBasicBlockRef end_bb
;
4029 LLVMValueRef switch_inst
;
4031 end_bb
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
,
4032 ctx
->main_function
, "end");
4033 switch_inst
= LLVMBuildSwitch(ctx
->ac
.builder
, stream_id
, end_bb
, 4);
4035 for (unsigned stream
= 0; stream
< 4; stream
++) {
4036 unsigned num_components
=
4037 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
4038 LLVMBasicBlockRef bb
;
4041 if (!num_components
)
4044 if (stream
> 0 && !ctx
->shader_info
->info
.so
.num_outputs
)
4047 bb
= LLVMInsertBasicBlockInContext(ctx
->ac
.context
, end_bb
, "out");
4048 LLVMAddCase(switch_inst
, LLVMConstInt(ctx
->ac
.i32
, stream
, 0), bb
);
4049 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, bb
);
4052 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4053 unsigned output_usage_mask
=
4054 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
4055 unsigned output_stream
=
4056 ctx
->shader_info
->info
.gs
.output_streams
[i
];
4057 int length
= util_last_bit(output_usage_mask
);
4059 if (!(ctx
->output_mask
& (1ull << i
)) ||
4060 output_stream
!= stream
)
4063 for (unsigned j
= 0; j
< length
; j
++) {
4064 LLVMValueRef value
, soffset
;
4066 if (!(output_usage_mask
& (1 << j
)))
4069 soffset
= LLVMConstInt(ctx
->ac
.i32
,
4071 ctx
->gs_max_out_vertices
* 16 * 4, false);
4075 value
= ac_build_buffer_load(&ctx
->ac
,
4078 vtx_offset
, soffset
,
4079 0, ac_glc
| ac_slc
, true, false);
4081 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4082 if (ac_get_type_size(type
) == 2) {
4083 value
= LLVMBuildBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
4084 value
= LLVMBuildTrunc(ctx
->ac
.builder
, value
, ctx
->ac
.i16
, "");
4087 LLVMBuildStore(ctx
->ac
.builder
,
4088 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4092 if (ctx
->shader_info
->info
.so
.num_outputs
)
4093 radv_emit_streamout(ctx
, stream
);
4096 handle_vs_outputs_post(ctx
, false, true,
4097 &ctx
->shader_info
->vs
.outinfo
);
4100 LLVMBuildBr(ctx
->ac
.builder
, end_bb
);
4103 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, end_bb
);
4107 radv_compile_gs_copy_shader(struct ac_llvm_compiler
*ac_llvm
,
4108 struct nir_shader
*geom_shader
,
4109 struct radv_shader_binary
**rbinary
,
4110 struct radv_shader_variant_info
*shader_info
,
4111 const struct radv_nir_compiler_options
*options
)
4113 struct radv_shader_context ctx
= {0};
4114 ctx
.options
= options
;
4115 ctx
.shader_info
= shader_info
;
4117 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
4118 ctx
.context
= ctx
.ac
.context
;
4119 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
4121 ctx
.is_gs_copy_shader
= true;
4123 enum ac_float_mode float_mode
=
4124 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
4125 AC_FLOAT_MODE_DEFAULT
;
4127 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
4128 ctx
.stage
= MESA_SHADER_VERTEX
;
4130 radv_nir_shader_info_pass(geom_shader
, options
, &shader_info
->info
);
4132 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
4134 ctx
.gs_max_out_vertices
= geom_shader
->info
.gs
.vertices_out
;
4135 ac_setup_rings(&ctx
);
4137 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
4138 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
4139 ac_handle_shader_output_decl(&ctx
.ac
, &ctx
.abi
, geom_shader
,
4140 variable
, MESA_SHADER_VERTEX
);
4143 ac_gs_copy_shader_emit(&ctx
);
4145 LLVMBuildRetVoid(ctx
.ac
.builder
);
4147 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
4149 ac_compile_llvm_module(ac_llvm
, ctx
.ac
.module
, rbinary
, shader_info
,
4150 MESA_SHADER_VERTEX
, options
);
4151 (*rbinary
)->is_gs_copy_shader
= true;