2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #include <llvm-c/Transforms/Utils.h>
39 #include "ac_binary.h"
40 #include "ac_llvm_util.h"
41 #include "ac_llvm_build.h"
42 #include "ac_shader_abi.h"
43 #include "ac_shader_util.h"
44 #include "ac_exp_param.h"
46 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
48 struct radv_shader_context
{
49 struct ac_llvm_context ac
;
50 const struct radv_nir_compiler_options
*options
;
51 struct radv_shader_variant_info
*shader_info
;
52 struct ac_shader_abi abi
;
54 unsigned max_workgroup_size
;
55 LLVMContextRef context
;
56 LLVMValueRef main_function
;
58 LLVMValueRef descriptor_sets
[MAX_SETS
];
59 LLVMValueRef ring_offsets
;
61 LLVMValueRef vertex_buffers
;
62 LLVMValueRef rel_auto_id
;
63 LLVMValueRef vs_prim_id
;
64 LLVMValueRef es2gs_offset
;
67 LLVMValueRef merged_wave_info
;
68 LLVMValueRef tess_factor_offset
;
69 LLVMValueRef tes_rel_patch_id
;
75 * - bits 0..10: ordered_wave_id
76 * - bits 12..20: number of vertices in group
77 * - bits 22..30: number of primitives in group
79 LLVMValueRef gs_tg_info
;
80 LLVMValueRef gs2vs_offset
;
81 LLVMValueRef gs_wave_id
;
82 LLVMValueRef gs_vtx_offset
[6];
84 LLVMValueRef esgs_ring
;
85 LLVMValueRef gsvs_ring
[4];
86 LLVMValueRef hs_ring_tess_offchip
;
87 LLVMValueRef hs_ring_tess_factor
;
89 LLVMValueRef persp_sample
, persp_center
, persp_centroid
;
90 LLVMValueRef linear_sample
, linear_center
, linear_centroid
;
93 LLVMValueRef streamout_buffers
;
94 LLVMValueRef streamout_write_idx
;
95 LLVMValueRef streamout_config
;
96 LLVMValueRef streamout_offset
[4];
98 gl_shader_stage stage
;
100 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
101 uint64_t float16_shaded_mask
;
104 uint64_t output_mask
;
106 bool is_gs_copy_shader
;
107 LLVMValueRef gs_next_vertex
[4];
108 LLVMValueRef gs_curprim_verts
[4];
109 LLVMValueRef gs_generated_prims
[4];
110 LLVMValueRef gs_ngg_emit
;
111 LLVMValueRef gs_ngg_scratch
;
112 unsigned gs_max_out_vertices
;
113 unsigned gs_output_prim
;
115 unsigned tes_primitive_mode
;
117 uint32_t tcs_patch_outputs_read
;
118 uint64_t tcs_outputs_read
;
119 uint32_t tcs_vertices_per_patch
;
120 uint32_t tcs_num_inputs
;
121 uint32_t tcs_num_patches
;
122 uint32_t max_gsvs_emit_size
;
123 uint32_t gsvs_vertex_size
;
125 LLVMValueRef vertexptr
; /* GFX10 only */
128 struct radv_shader_output_values
{
129 LLVMValueRef values
[4];
135 enum radeon_llvm_calling_convention
{
136 RADEON_LLVM_AMDGPU_VS
= 87,
137 RADEON_LLVM_AMDGPU_GS
= 88,
138 RADEON_LLVM_AMDGPU_PS
= 89,
139 RADEON_LLVM_AMDGPU_CS
= 90,
140 RADEON_LLVM_AMDGPU_HS
= 93,
143 static inline struct radv_shader_context
*
144 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
146 struct radv_shader_context
*ctx
= NULL
;
147 return container_of(abi
, ctx
, abi
);
150 struct ac_build_if_state
152 struct radv_shader_context
*ctx
;
153 LLVMValueRef condition
;
154 LLVMBasicBlockRef entry_block
;
155 LLVMBasicBlockRef true_block
;
156 LLVMBasicBlockRef false_block
;
157 LLVMBasicBlockRef merge_block
;
160 static LLVMBasicBlockRef
161 ac_build_insert_new_block(struct radv_shader_context
*ctx
, const char *name
)
163 LLVMBasicBlockRef current_block
;
164 LLVMBasicBlockRef next_block
;
165 LLVMBasicBlockRef new_block
;
167 /* get current basic block */
168 current_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
170 /* chqeck if there's another block after this one */
171 next_block
= LLVMGetNextBasicBlock(current_block
);
173 /* insert the new block before the next block */
174 new_block
= LLVMInsertBasicBlockInContext(ctx
->context
, next_block
, name
);
177 /* append new block after current block */
178 LLVMValueRef function
= LLVMGetBasicBlockParent(current_block
);
179 new_block
= LLVMAppendBasicBlockInContext(ctx
->context
, function
, name
);
185 ac_nir_build_if(struct ac_build_if_state
*ifthen
,
186 struct radv_shader_context
*ctx
,
187 LLVMValueRef condition
)
189 LLVMBasicBlockRef block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
191 memset(ifthen
, 0, sizeof *ifthen
);
193 ifthen
->condition
= condition
;
194 ifthen
->entry_block
= block
;
196 /* create endif/merge basic block for the phi functions */
197 ifthen
->merge_block
= ac_build_insert_new_block(ctx
, "endif-block");
199 /* create/insert true_block before merge_block */
201 LLVMInsertBasicBlockInContext(ctx
->context
,
205 /* successive code goes into the true block */
206 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, ifthen
->true_block
);
213 ac_nir_build_endif(struct ac_build_if_state
*ifthen
)
215 LLVMBuilderRef builder
= ifthen
->ctx
->ac
.builder
;
217 /* Insert branch to the merge block from current block */
218 LLVMBuildBr(builder
, ifthen
->merge_block
);
221 * Now patch in the various branch instructions.
224 /* Insert the conditional branch instruction at the end of entry_block */
225 LLVMPositionBuilderAtEnd(builder
, ifthen
->entry_block
);
226 if (ifthen
->false_block
) {
227 /* we have an else clause */
228 LLVMBuildCondBr(builder
, ifthen
->condition
,
229 ifthen
->true_block
, ifthen
->false_block
);
233 LLVMBuildCondBr(builder
, ifthen
->condition
,
234 ifthen
->true_block
, ifthen
->merge_block
);
237 /* Resume building code at end of the ifthen->merge_block */
238 LLVMPositionBuilderAtEnd(builder
, ifthen
->merge_block
);
242 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
244 switch (ctx
->stage
) {
245 case MESA_SHADER_TESS_CTRL
:
246 return ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
247 case MESA_SHADER_TESS_EVAL
:
248 return ctx
->tes_rel_patch_id
;
251 unreachable("Illegal stage");
256 get_tcs_num_patches(struct radv_shader_context
*ctx
)
258 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
259 unsigned num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
260 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
261 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
262 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
263 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
264 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
265 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
266 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
267 unsigned num_patches
;
268 unsigned hardware_lds_size
;
270 /* Ensure that we only need one wave per SIMD so we don't need to check
271 * resource usage. Also ensures that the number of tcs in and out
272 * vertices per threadgroup are at most 256.
274 num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
275 /* Make sure that the data fits in LDS. This assumes the shaders only
276 * use LDS for the inputs and outputs.
278 hardware_lds_size
= 32768;
280 /* Looks like STONEY hangs if we use more than 32 KiB LDS in a single
281 * threadgroup, even though there is more than 32 KiB LDS.
283 * Test: dEQP-VK.tessellation.shader_input_output.barrier
285 if (ctx
->options
->chip_class
>= GFX7
&& ctx
->options
->family
!= CHIP_STONEY
)
286 hardware_lds_size
= 65536;
288 num_patches
= MIN2(num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
289 /* Make sure the output data fits in the offchip buffer */
290 num_patches
= MIN2(num_patches
, (ctx
->options
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
291 /* Not necessary for correctness, but improves performance. The
292 * specific value is taken from the proprietary driver.
294 num_patches
= MIN2(num_patches
, 40);
296 /* GFX6 bug workaround - limit LS-HS threadgroups to only one wave. */
297 if (ctx
->options
->chip_class
== GFX6
) {
298 unsigned one_wave
= ctx
->options
->wave_size
/ MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
299 num_patches
= MIN2(num_patches
, one_wave
);
305 calculate_tess_lds_size(struct radv_shader_context
*ctx
)
307 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
308 unsigned num_tcs_output_cp
;
309 unsigned num_tcs_outputs
, num_tcs_patch_outputs
;
310 unsigned input_vertex_size
, output_vertex_size
;
311 unsigned input_patch_size
, output_patch_size
;
312 unsigned pervertex_output_patch_size
;
313 unsigned output_patch0_offset
;
314 unsigned num_patches
;
317 num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
318 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
319 num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
321 input_vertex_size
= ctx
->tcs_num_inputs
* 16;
322 output_vertex_size
= num_tcs_outputs
* 16;
324 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
326 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
327 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
329 num_patches
= ctx
->tcs_num_patches
;
330 output_patch0_offset
= input_patch_size
* num_patches
;
332 lds_size
= output_patch0_offset
+ output_patch_size
* num_patches
;
336 /* Tessellation shaders pass outputs to the next shader using LDS.
338 * LS outputs = TCS inputs
339 * TCS outputs = TES inputs
342 * - TCS inputs for patch 0
343 * - TCS inputs for patch 1
344 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
346 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
347 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
348 * - TCS outputs for patch 1
349 * - Per-patch TCS outputs for patch 1
350 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
351 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
354 * All three shaders VS(LS), TCS, TES share the same LDS space.
357 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
359 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
360 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
361 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
363 input_patch_size
/= 4;
364 return LLVMConstInt(ctx
->ac
.i32
, input_patch_size
, false);
368 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
370 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
371 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
372 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
373 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
374 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
375 output_patch_size
/= 4;
376 return LLVMConstInt(ctx
->ac
.i32
, output_patch_size
, false);
380 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
382 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
383 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
384 output_vertex_size
/= 4;
385 return LLVMConstInt(ctx
->ac
.i32
, output_vertex_size
, false);
389 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
391 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
392 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
393 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
394 uint32_t output_patch0_offset
= input_patch_size
;
395 unsigned num_patches
= ctx
->tcs_num_patches
;
397 output_patch0_offset
*= num_patches
;
398 output_patch0_offset
/= 4;
399 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
403 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
405 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
406 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
407 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
408 uint32_t output_patch0_offset
= input_patch_size
;
410 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
411 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
412 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
413 unsigned num_patches
= ctx
->tcs_num_patches
;
415 output_patch0_offset
*= num_patches
;
416 output_patch0_offset
+= pervertex_output_patch_size
;
417 output_patch0_offset
/= 4;
418 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
422 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
424 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
425 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
427 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
431 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
433 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
434 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
435 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
437 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
442 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
444 LLVMValueRef patch0_patch_data_offset
=
445 get_tcs_out_patch0_patch_data_offset(ctx
);
446 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
447 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
449 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
450 patch0_patch_data_offset
);
455 LLVMTypeRef types
[MAX_ARGS
];
456 LLVMValueRef
*assign
[MAX_ARGS
];
459 uint8_t num_sgprs_used
;
460 uint8_t num_vgprs_used
;
463 enum ac_arg_regfile
{
469 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
470 LLVMValueRef
*param_ptr
)
472 assert(info
->count
< MAX_ARGS
);
474 info
->assign
[info
->count
] = param_ptr
;
475 info
->types
[info
->count
] = type
;
478 if (regfile
== ARG_SGPR
) {
479 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
482 assert(regfile
== ARG_VGPR
);
483 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
487 static void assign_arguments(LLVMValueRef main_function
,
488 struct arg_info
*info
)
491 for (i
= 0; i
< info
->count
; i
++) {
493 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
498 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
499 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
500 unsigned num_return_elems
,
501 struct arg_info
*args
,
502 unsigned max_workgroup_size
,
503 const struct radv_nir_compiler_options
*options
)
505 LLVMTypeRef main_function_type
, ret_type
;
506 LLVMBasicBlockRef main_function_body
;
508 if (num_return_elems
)
509 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
510 num_return_elems
, true);
512 ret_type
= LLVMVoidTypeInContext(ctx
);
514 /* Setup the function */
516 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
517 LLVMValueRef main_function
=
518 LLVMAddFunction(module
, "main", main_function_type
);
520 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
521 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
523 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
524 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
525 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
527 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
529 if (LLVMGetTypeKind(LLVMTypeOf(P
)) == LLVMPointerTypeKind
) {
530 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
531 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
535 if (options
->address32_hi
) {
536 ac_llvm_add_target_dep_function_attr(main_function
,
537 "amdgpu-32bit-address-high-bits",
538 options
->address32_hi
);
541 ac_llvm_set_workgroup_size(main_function
, max_workgroup_size
);
543 if (options
->unsafe_math
) {
544 /* These were copied from some LLVM test. */
545 LLVMAddTargetDependentFunctionAttr(main_function
,
546 "less-precise-fpmad",
548 LLVMAddTargetDependentFunctionAttr(main_function
,
551 LLVMAddTargetDependentFunctionAttr(main_function
,
554 LLVMAddTargetDependentFunctionAttr(main_function
,
557 LLVMAddTargetDependentFunctionAttr(main_function
,
558 "no-signed-zeros-fp-math",
561 return main_function
;
566 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
,
569 ud_info
->sgpr_idx
= *sgpr_idx
;
570 ud_info
->num_sgprs
= num_sgprs
;
571 *sgpr_idx
+= num_sgprs
;
575 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
578 struct radv_userdata_info
*ud_info
=
579 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
582 set_loc(ud_info
, sgpr_idx
, num_sgprs
);
586 set_loc_shader_ptr(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
588 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
590 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
594 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
596 struct radv_userdata_locations
*locs
=
597 &ctx
->shader_info
->user_sgprs_locs
;
598 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
601 set_loc(ud_info
, sgpr_idx
, 1);
603 locs
->descriptor_sets_enabled
|= 1 << idx
;
606 struct user_sgpr_info
{
607 bool need_ring_offsets
;
608 bool indirect_all_descriptor_sets
;
609 uint8_t remaining_sgprs
;
612 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
613 gl_shader_stage stage
)
616 case MESA_SHADER_VERTEX
:
617 if (ctx
->shader_info
->info
.needs_multiview_view_index
||
618 (!ctx
->options
->key
.vs_common_out
.as_es
&& !ctx
->options
->key
.vs_common_out
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
621 case MESA_SHADER_TESS_EVAL
:
622 if (ctx
->shader_info
->info
.needs_multiview_view_index
|| (!ctx
->options
->key
.vs_common_out
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
625 case MESA_SHADER_GEOMETRY
:
626 case MESA_SHADER_TESS_CTRL
:
627 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
637 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
641 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
)
643 count
+= ctx
->shader_info
->info
.vs
.needs_draw_id
? 3 : 2;
648 static void allocate_inline_push_consts(struct radv_shader_context
*ctx
,
649 struct user_sgpr_info
*user_sgpr_info
)
651 uint8_t remaining_sgprs
= user_sgpr_info
->remaining_sgprs
;
653 /* Only supported if shaders use push constants. */
654 if (ctx
->shader_info
->info
.min_push_constant_used
== UINT8_MAX
)
657 /* Only supported if shaders don't have indirect push constants. */
658 if (ctx
->shader_info
->info
.has_indirect_push_constants
)
661 /* Only supported for 32-bit push constants. */
662 if (!ctx
->shader_info
->info
.has_only_32bit_push_constants
)
665 uint8_t num_push_consts
=
666 (ctx
->shader_info
->info
.max_push_constant_used
-
667 ctx
->shader_info
->info
.min_push_constant_used
) / 4;
669 /* Check if the number of user SGPRs is large enough. */
670 if (num_push_consts
< remaining_sgprs
) {
671 ctx
->shader_info
->info
.num_inline_push_consts
= num_push_consts
;
673 ctx
->shader_info
->info
.num_inline_push_consts
= remaining_sgprs
;
676 /* Clamp to the maximum number of allowed inlined push constants. */
677 if (ctx
->shader_info
->info
.num_inline_push_consts
> AC_MAX_INLINE_PUSH_CONSTS
)
678 ctx
->shader_info
->info
.num_inline_push_consts
= AC_MAX_INLINE_PUSH_CONSTS
;
680 if (ctx
->shader_info
->info
.num_inline_push_consts
== num_push_consts
&&
681 !ctx
->shader_info
->info
.loads_dynamic_offsets
) {
682 /* Disable the default push constants path if all constants are
683 * inlined and if shaders don't use dynamic descriptors.
685 ctx
->shader_info
->info
.loads_push_constants
= false;
688 ctx
->shader_info
->info
.base_inline_push_consts
=
689 ctx
->shader_info
->info
.min_push_constant_used
/ 4;
692 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
693 gl_shader_stage stage
,
694 bool has_previous_stage
,
695 gl_shader_stage previous_stage
,
696 bool needs_view_index
,
697 struct user_sgpr_info
*user_sgpr_info
)
699 uint8_t user_sgpr_count
= 0;
701 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
703 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
704 if (stage
== MESA_SHADER_GEOMETRY
||
705 stage
== MESA_SHADER_VERTEX
||
706 stage
== MESA_SHADER_TESS_CTRL
||
707 stage
== MESA_SHADER_TESS_EVAL
||
708 ctx
->is_gs_copy_shader
)
709 user_sgpr_info
->need_ring_offsets
= true;
711 if (stage
== MESA_SHADER_FRAGMENT
&&
712 ctx
->shader_info
->info
.ps
.needs_sample_positions
)
713 user_sgpr_info
->need_ring_offsets
= true;
715 /* 2 user sgprs will nearly always be allocated for scratch/rings */
716 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
717 user_sgpr_count
+= 2;
721 case MESA_SHADER_COMPUTE
:
722 if (ctx
->shader_info
->info
.cs
.uses_grid_size
)
723 user_sgpr_count
+= 3;
725 case MESA_SHADER_FRAGMENT
:
726 user_sgpr_count
+= ctx
->shader_info
->info
.ps
.needs_sample_positions
;
728 case MESA_SHADER_VERTEX
:
729 if (!ctx
->is_gs_copy_shader
)
730 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
732 case MESA_SHADER_TESS_CTRL
:
733 if (has_previous_stage
) {
734 if (previous_stage
== MESA_SHADER_VERTEX
)
735 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
738 case MESA_SHADER_TESS_EVAL
:
740 case MESA_SHADER_GEOMETRY
:
741 if (has_previous_stage
) {
742 if (previous_stage
== MESA_SHADER_VERTEX
) {
743 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
751 if (needs_view_index
)
754 if (ctx
->shader_info
->info
.loads_push_constants
)
757 if (ctx
->streamout_buffers
)
760 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& stage
!= MESA_SHADER_COMPUTE
? 32 : 16;
761 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
762 uint32_t num_desc_set
=
763 util_bitcount(ctx
->shader_info
->info
.desc_set_used_mask
);
765 if (remaining_sgprs
< num_desc_set
) {
766 user_sgpr_info
->indirect_all_descriptor_sets
= true;
767 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- 1;
769 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- num_desc_set
;
772 allocate_inline_push_consts(ctx
, user_sgpr_info
);
776 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
777 const struct user_sgpr_info
*user_sgpr_info
,
778 struct arg_info
*args
,
779 LLVMValueRef
*desc_sets
)
781 LLVMTypeRef type
= ac_array_in_const32_addr_space(ctx
->ac
.i8
);
783 /* 1 for each descriptor set */
784 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
785 uint32_t mask
= ctx
->shader_info
->info
.desc_set_used_mask
;
788 int i
= u_bit_scan(&mask
);
790 add_arg(args
, ARG_SGPR
, type
, &ctx
->descriptor_sets
[i
]);
793 add_arg(args
, ARG_SGPR
, ac_array_in_const32_addr_space(type
),
797 if (ctx
->shader_info
->info
.loads_push_constants
) {
798 /* 1 for push constants and dynamic descriptors */
799 add_arg(args
, ARG_SGPR
, type
, &ctx
->abi
.push_constants
);
802 for (unsigned i
= 0; i
< ctx
->shader_info
->info
.num_inline_push_consts
; i
++) {
803 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
,
804 &ctx
->abi
.inline_push_consts
[i
]);
806 ctx
->abi
.num_inline_push_consts
= ctx
->shader_info
->info
.num_inline_push_consts
;
807 ctx
->abi
.base_inline_push_consts
= ctx
->shader_info
->info
.base_inline_push_consts
;
809 if (ctx
->shader_info
->info
.so
.num_outputs
) {
810 add_arg(args
, ARG_SGPR
,
811 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
812 &ctx
->streamout_buffers
);
817 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
818 gl_shader_stage stage
,
819 bool has_previous_stage
,
820 gl_shader_stage previous_stage
,
821 struct arg_info
*args
)
823 if (!ctx
->is_gs_copy_shader
&&
824 (stage
== MESA_SHADER_VERTEX
||
825 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
826 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
827 add_arg(args
, ARG_SGPR
,
828 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
829 &ctx
->vertex_buffers
);
831 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
832 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
833 if (ctx
->shader_info
->info
.vs
.needs_draw_id
) {
834 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
840 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
842 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
843 if (!ctx
->is_gs_copy_shader
) {
844 if (ctx
->options
->key
.vs_common_out
.as_ls
) {
845 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
846 if (ctx
->ac
.chip_class
>= GFX10
) {
847 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
848 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
850 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
851 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
854 if (ctx
->ac
.chip_class
>= GFX10
) {
855 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
856 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
857 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
858 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
860 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
861 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
862 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
865 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
866 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
867 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
874 declare_streamout_sgprs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
875 struct arg_info
*args
)
879 /* Streamout SGPRs. */
880 if (ctx
->shader_info
->info
.so
.num_outputs
) {
881 assert(stage
== MESA_SHADER_VERTEX
||
882 stage
== MESA_SHADER_TESS_EVAL
);
884 if (stage
!= MESA_SHADER_TESS_EVAL
) {
885 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_config
);
887 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
888 args
->types
[args
->count
- 1] = ctx
->ac
.i32
;
891 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_write_idx
);
894 /* A streamout buffer offset is loaded if the stride is non-zero. */
895 for (i
= 0; i
< 4; i
++) {
896 if (!ctx
->shader_info
->info
.so
.strides
[i
])
899 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_offset
[i
]);
904 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
906 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
907 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
908 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
909 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
913 set_global_input_locs(struct radv_shader_context
*ctx
,
914 const struct user_sgpr_info
*user_sgpr_info
,
915 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
917 uint32_t mask
= ctx
->shader_info
->info
.desc_set_used_mask
;
919 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
921 int i
= u_bit_scan(&mask
);
923 set_loc_desc(ctx
, i
, user_sgpr_idx
);
926 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
930 int i
= u_bit_scan(&mask
);
932 ctx
->descriptor_sets
[i
] =
933 ac_build_load_to_sgpr(&ctx
->ac
, desc_sets
,
934 LLVMConstInt(ctx
->ac
.i32
, i
, false));
938 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
941 if (ctx
->shader_info
->info
.loads_push_constants
) {
942 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
);
945 if (ctx
->shader_info
->info
.num_inline_push_consts
) {
946 set_loc_shader(ctx
, AC_UD_INLINE_PUSH_CONSTANTS
, user_sgpr_idx
,
947 ctx
->shader_info
->info
.num_inline_push_consts
);
950 if (ctx
->streamout_buffers
) {
951 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
,
957 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
958 gl_shader_stage stage
, bool has_previous_stage
,
959 gl_shader_stage previous_stage
,
960 uint8_t *user_sgpr_idx
)
962 if (!ctx
->is_gs_copy_shader
&&
963 (stage
== MESA_SHADER_VERTEX
||
964 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
965 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
966 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
971 if (ctx
->shader_info
->info
.vs
.needs_draw_id
)
974 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
975 user_sgpr_idx
, vs_num
);
979 static void set_llvm_calling_convention(LLVMValueRef func
,
980 gl_shader_stage stage
)
982 enum radeon_llvm_calling_convention calling_conv
;
985 case MESA_SHADER_VERTEX
:
986 case MESA_SHADER_TESS_EVAL
:
987 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
989 case MESA_SHADER_GEOMETRY
:
990 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
992 case MESA_SHADER_TESS_CTRL
:
993 calling_conv
= RADEON_LLVM_AMDGPU_HS
;
995 case MESA_SHADER_FRAGMENT
:
996 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
998 case MESA_SHADER_COMPUTE
:
999 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
1002 unreachable("Unhandle shader type");
1005 LLVMSetFunctionCallConv(func
, calling_conv
);
1008 /* Returns whether the stage is a stage that can be directly before the GS */
1009 static bool is_pre_gs_stage(gl_shader_stage stage
)
1011 return stage
== MESA_SHADER_VERTEX
|| stage
== MESA_SHADER_TESS_EVAL
;
1014 static void create_function(struct radv_shader_context
*ctx
,
1015 gl_shader_stage stage
,
1016 bool has_previous_stage
,
1017 gl_shader_stage previous_stage
)
1019 uint8_t user_sgpr_idx
;
1020 struct user_sgpr_info user_sgpr_info
;
1021 struct arg_info args
= {};
1022 LLVMValueRef desc_sets
;
1023 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
1025 if (ctx
->ac
.chip_class
>= GFX10
) {
1026 if (is_pre_gs_stage(stage
) && ctx
->options
->key
.vs_common_out
.as_ngg
) {
1027 /* On GFX10, VS is merged into GS for NGG. */
1028 previous_stage
= stage
;
1029 stage
= MESA_SHADER_GEOMETRY
;
1030 has_previous_stage
= true;
1034 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
1035 previous_stage
, needs_view_index
, &user_sgpr_info
);
1037 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
1038 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
1039 &ctx
->ring_offsets
);
1043 case MESA_SHADER_COMPUTE
:
1044 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1047 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1048 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
1049 &ctx
->abi
.num_work_groups
);
1052 for (int i
= 0; i
< 3; i
++) {
1053 ctx
->abi
.workgroup_ids
[i
] = NULL
;
1054 if (ctx
->shader_info
->info
.cs
.uses_block_id
[i
]) {
1055 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1056 &ctx
->abi
.workgroup_ids
[i
]);
1060 if (ctx
->shader_info
->info
.cs
.uses_local_invocation_idx
)
1061 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
1062 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
1063 &ctx
->abi
.local_invocation_ids
);
1065 case MESA_SHADER_VERTEX
:
1066 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1069 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
1070 previous_stage
, &args
);
1072 if (needs_view_index
)
1073 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1074 &ctx
->abi
.view_index
);
1075 if (ctx
->options
->key
.vs_common_out
.as_es
) {
1076 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1077 &ctx
->es2gs_offset
);
1078 } else if (ctx
->options
->key
.vs_common_out
.as_ls
) {
1079 /* no extra parameters */
1081 declare_streamout_sgprs(ctx
, stage
, &args
);
1084 declare_vs_input_vgprs(ctx
, &args
);
1086 case MESA_SHADER_TESS_CTRL
:
1087 if (has_previous_stage
) {
1088 // First 6 system regs
1089 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1090 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1091 &ctx
->merged_wave_info
);
1092 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1093 &ctx
->tess_factor_offset
);
1095 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1096 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1097 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1099 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1102 declare_vs_specific_input_sgprs(ctx
, stage
,
1104 previous_stage
, &args
);
1106 if (needs_view_index
)
1107 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1108 &ctx
->abi
.view_index
);
1110 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1111 &ctx
->abi
.tcs_patch_id
);
1112 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1113 &ctx
->abi
.tcs_rel_ids
);
1115 declare_vs_input_vgprs(ctx
, &args
);
1117 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1120 if (needs_view_index
)
1121 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1122 &ctx
->abi
.view_index
);
1124 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1125 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1126 &ctx
->tess_factor_offset
);
1127 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1128 &ctx
->abi
.tcs_patch_id
);
1129 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1130 &ctx
->abi
.tcs_rel_ids
);
1133 case MESA_SHADER_TESS_EVAL
:
1134 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1137 if (needs_view_index
)
1138 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1139 &ctx
->abi
.view_index
);
1141 if (ctx
->options
->key
.vs_common_out
.as_es
) {
1142 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1143 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1144 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1145 &ctx
->es2gs_offset
);
1147 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1148 declare_streamout_sgprs(ctx
, stage
, &args
);
1149 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1151 declare_tes_input_vgprs(ctx
, &args
);
1153 case MESA_SHADER_GEOMETRY
:
1154 if (has_previous_stage
) {
1155 // First 6 system regs
1156 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1157 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1160 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1161 &ctx
->gs2vs_offset
);
1164 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1165 &ctx
->merged_wave_info
);
1166 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1168 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1169 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1170 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1172 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1175 if (previous_stage
!= MESA_SHADER_TESS_EVAL
) {
1176 declare_vs_specific_input_sgprs(ctx
, stage
,
1182 if (needs_view_index
)
1183 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1184 &ctx
->abi
.view_index
);
1186 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1187 &ctx
->gs_vtx_offset
[0]);
1188 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1189 &ctx
->gs_vtx_offset
[2]);
1190 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1191 &ctx
->abi
.gs_prim_id
);
1192 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1193 &ctx
->abi
.gs_invocation_id
);
1194 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1195 &ctx
->gs_vtx_offset
[4]);
1197 if (previous_stage
== MESA_SHADER_VERTEX
) {
1198 declare_vs_input_vgprs(ctx
, &args
);
1200 declare_tes_input_vgprs(ctx
, &args
);
1203 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1206 if (needs_view_index
)
1207 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1208 &ctx
->abi
.view_index
);
1210 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
1211 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
1212 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1213 &ctx
->gs_vtx_offset
[0]);
1214 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1215 &ctx
->gs_vtx_offset
[1]);
1216 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1217 &ctx
->abi
.gs_prim_id
);
1218 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1219 &ctx
->gs_vtx_offset
[2]);
1220 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1221 &ctx
->gs_vtx_offset
[3]);
1222 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1223 &ctx
->gs_vtx_offset
[4]);
1224 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1225 &ctx
->gs_vtx_offset
[5]);
1226 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1227 &ctx
->abi
.gs_invocation_id
);
1230 case MESA_SHADER_FRAGMENT
:
1231 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1234 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1235 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_sample
);
1236 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_center
);
1237 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_centroid
);
1238 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1239 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_sample
);
1240 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_center
);
1241 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_centroid
);
1242 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1243 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1244 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1245 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1246 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1247 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1248 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1249 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1250 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1253 unreachable("Shader stage not implemented");
1256 ctx
->main_function
= create_llvm_function(
1257 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1258 ctx
->max_workgroup_size
, ctx
->options
);
1259 set_llvm_calling_convention(ctx
->main_function
, stage
);
1262 ctx
->shader_info
->num_input_vgprs
= 0;
1263 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1265 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1267 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1268 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1270 assign_arguments(ctx
->main_function
, &args
);
1274 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1275 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1277 if (ctx
->options
->supports_spill
) {
1278 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1279 LLVMPointerType(ctx
->ac
.i8
, AC_ADDR_SPACE_CONST
),
1280 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1281 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1282 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1286 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1287 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1288 if (has_previous_stage
)
1291 set_global_input_locs(ctx
, &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1294 case MESA_SHADER_COMPUTE
:
1295 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1296 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1300 case MESA_SHADER_VERTEX
:
1301 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1302 previous_stage
, &user_sgpr_idx
);
1303 if (ctx
->abi
.view_index
)
1304 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1306 case MESA_SHADER_TESS_CTRL
:
1307 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1308 previous_stage
, &user_sgpr_idx
);
1309 if (ctx
->abi
.view_index
)
1310 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1312 case MESA_SHADER_TESS_EVAL
:
1313 if (ctx
->abi
.view_index
)
1314 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1316 case MESA_SHADER_GEOMETRY
:
1317 if (has_previous_stage
) {
1318 if (previous_stage
== MESA_SHADER_VERTEX
)
1319 set_vs_specific_input_locs(ctx
, stage
,
1324 if (ctx
->abi
.view_index
)
1325 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1327 case MESA_SHADER_FRAGMENT
:
1330 unreachable("Shader stage not implemented");
1333 if (stage
== MESA_SHADER_TESS_CTRL
||
1334 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs_common_out
.as_ls
) ||
1335 /* GFX9 has the ESGS ring buffer in LDS. */
1336 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1337 ac_declare_lds_as_pointer(&ctx
->ac
);
1340 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1345 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
1346 unsigned desc_set
, unsigned binding
)
1348 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1349 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
1350 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
1351 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
1352 unsigned base_offset
= layout
->binding
[binding
].offset
;
1353 LLVMValueRef offset
, stride
;
1355 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1356 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1357 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
1358 layout
->binding
[binding
].dynamic_offset_offset
;
1359 desc_ptr
= ctx
->abi
.push_constants
;
1360 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
1361 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1363 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
1365 offset
= LLVMConstInt(ctx
->ac
.i32
, base_offset
, false);
1367 if (layout
->binding
[binding
].type
!= VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1368 offset
= ac_build_imad(&ctx
->ac
, index
, stride
, offset
);
1371 desc_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, desc_ptr
, &offset
, 1, "");
1372 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
1373 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1375 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1376 uint32_t desc_type
= S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1377 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1378 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1379 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
);
1381 if (ctx
->ac
.chip_class
>= GFX10
) {
1382 desc_type
|= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT
) |
1383 S_008F0C_OOB_SELECT(3) |
1384 S_008F0C_RESOURCE_LEVEL(1);
1386 desc_type
|= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
1387 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
1390 LLVMValueRef desc_components
[4] = {
1391 LLVMBuildPtrToInt(ctx
->ac
.builder
, desc_ptr
, ctx
->ac
.intptr
, ""),
1392 LLVMConstInt(ctx
->ac
.i32
, S_008F04_BASE_ADDRESS_HI(ctx
->options
->address32_hi
), false),
1393 /* High limit to support variable sizes. */
1394 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
1395 LLVMConstInt(ctx
->ac
.i32
, desc_type
, false),
1398 return ac_build_gather_values(&ctx
->ac
, desc_components
, 4);
1405 /* The offchip buffer layout for TCS->TES is
1407 * - attribute 0 of patch 0 vertex 0
1408 * - attribute 0 of patch 0 vertex 1
1409 * - attribute 0 of patch 0 vertex 2
1411 * - attribute 0 of patch 1 vertex 0
1412 * - attribute 0 of patch 1 vertex 1
1414 * - attribute 1 of patch 0 vertex 0
1415 * - attribute 1 of patch 0 vertex 1
1417 * - per patch attribute 0 of patch 0
1418 * - per patch attribute 0 of patch 1
1421 * Note that every attribute has 4 components.
1423 static LLVMValueRef
get_non_vertex_index_offset(struct radv_shader_context
*ctx
)
1425 uint32_t num_patches
= ctx
->tcs_num_patches
;
1426 uint32_t num_tcs_outputs
;
1427 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
1428 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
1430 num_tcs_outputs
= ctx
->options
->key
.tes
.tcs_num_outputs
;
1432 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
1433 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
1435 return LLVMConstInt(ctx
->ac
.i32
, pervertex_output_patch_size
* num_patches
, false);
1438 static LLVMValueRef
calc_param_stride(struct radv_shader_context
*ctx
,
1439 LLVMValueRef vertex_index
)
1441 LLVMValueRef param_stride
;
1443 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
* ctx
->tcs_num_patches
, false);
1445 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_num_patches
, false);
1446 return param_stride
;
1449 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
1450 LLVMValueRef vertex_index
,
1451 LLVMValueRef param_index
)
1453 LLVMValueRef base_addr
;
1454 LLVMValueRef param_stride
, constant16
;
1455 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
1456 LLVMValueRef vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
, false);
1457 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1458 param_stride
= calc_param_stride(ctx
, vertex_index
);
1460 base_addr
= ac_build_imad(&ctx
->ac
, rel_patch_id
,
1461 vertices_per_patch
, vertex_index
);
1463 base_addr
= rel_patch_id
;
1466 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1467 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
1468 param_stride
, ""), "");
1470 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
1472 if (!vertex_index
) {
1473 LLVMValueRef patch_data_offset
= get_non_vertex_index_offset(ctx
);
1475 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1476 patch_data_offset
, "");
1481 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
1483 unsigned const_index
,
1485 LLVMValueRef vertex_index
,
1486 LLVMValueRef indir_index
)
1488 LLVMValueRef param_index
;
1491 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
1494 if (const_index
&& !is_compact
)
1495 param
+= const_index
;
1496 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
1498 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
1502 get_dw_address(struct radv_shader_context
*ctx
,
1503 LLVMValueRef dw_addr
,
1505 unsigned const_index
,
1506 bool compact_const_index
,
1507 LLVMValueRef vertex_index
,
1508 LLVMValueRef stride
,
1509 LLVMValueRef indir_index
)
1514 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1515 LLVMBuildMul(ctx
->ac
.builder
,
1521 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1522 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
1523 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
1524 else if (const_index
&& !compact_const_index
)
1525 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1526 LLVMConstInt(ctx
->ac
.i32
, const_index
* 4, false), "");
1528 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1529 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
1531 if (const_index
&& compact_const_index
)
1532 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1533 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
1538 load_tcs_varyings(struct ac_shader_abi
*abi
,
1540 LLVMValueRef vertex_index
,
1541 LLVMValueRef indir_index
,
1542 unsigned const_index
,
1544 unsigned driver_location
,
1546 unsigned num_components
,
1551 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1552 LLVMValueRef dw_addr
, stride
;
1553 LLVMValueRef value
[4], result
;
1554 unsigned param
= shader_io_get_unique_index(location
);
1557 uint32_t input_vertex_size
= (ctx
->tcs_num_inputs
* 16) / 4;
1558 stride
= LLVMConstInt(ctx
->ac
.i32
, input_vertex_size
, false);
1559 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
1562 stride
= get_tcs_out_vertex_stride(ctx
);
1563 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1565 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1570 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1573 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
1574 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1575 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1578 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1583 store_tcs_output(struct ac_shader_abi
*abi
,
1584 const nir_variable
*var
,
1585 LLVMValueRef vertex_index
,
1586 LLVMValueRef param_index
,
1587 unsigned const_index
,
1591 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1592 const unsigned location
= var
->data
.location
;
1593 unsigned component
= var
->data
.location_frac
;
1594 const bool is_patch
= var
->data
.patch
;
1595 const bool is_compact
= var
->data
.compact
;
1596 LLVMValueRef dw_addr
;
1597 LLVMValueRef stride
= NULL
;
1598 LLVMValueRef buf_addr
= NULL
;
1600 bool store_lds
= true;
1603 if (!(ctx
->tcs_patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
1606 if (!(ctx
->tcs_outputs_read
& (1ULL << location
)))
1610 param
= shader_io_get_unique_index(location
);
1611 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1612 const_index
+= component
;
1615 if (const_index
>= 4) {
1622 stride
= get_tcs_out_vertex_stride(ctx
);
1623 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1625 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1628 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1630 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
1631 vertex_index
, param_index
);
1633 bool is_tess_factor
= false;
1634 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
1635 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1636 is_tess_factor
= true;
1638 unsigned base
= is_compact
? const_index
: 0;
1639 for (unsigned chan
= 0; chan
< 8; chan
++) {
1640 if (!(writemask
& (1 << chan
)))
1642 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
1643 value
= ac_to_integer(&ctx
->ac
, value
);
1644 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
1646 if (store_lds
|| is_tess_factor
) {
1647 LLVMValueRef dw_addr_chan
=
1648 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1649 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
1650 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
1653 if (!is_tess_factor
&& writemask
!= 0xF)
1654 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
1655 buf_addr
, ctx
->oc_lds
,
1656 4 * (base
+ chan
), ac_glc
, false);
1659 if (writemask
== 0xF) {
1660 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
1661 buf_addr
, ctx
->oc_lds
,
1662 (base
* 4), ac_glc
, false);
1667 load_tes_input(struct ac_shader_abi
*abi
,
1669 LLVMValueRef vertex_index
,
1670 LLVMValueRef param_index
,
1671 unsigned const_index
,
1673 unsigned driver_location
,
1675 unsigned num_components
,
1680 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1681 LLVMValueRef buf_addr
;
1682 LLVMValueRef result
;
1683 unsigned param
= shader_io_get_unique_index(location
);
1685 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1686 const_index
+= component
;
1688 if (const_index
>= 4) {
1694 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
1695 is_compact
, vertex_index
, param_index
);
1697 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
1698 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
1700 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
1701 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, ac_glc
, true, false);
1702 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
1707 load_gs_input(struct ac_shader_abi
*abi
,
1709 unsigned driver_location
,
1711 unsigned num_components
,
1712 unsigned vertex_index
,
1713 unsigned const_index
,
1716 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1717 LLVMValueRef vtx_offset
;
1718 unsigned param
, vtx_offset_param
;
1719 LLVMValueRef value
[4], result
;
1721 vtx_offset_param
= vertex_index
;
1722 assert(vtx_offset_param
< 6);
1723 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
1724 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1726 param
= shader_io_get_unique_index(location
);
1728 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
1729 if (ctx
->ac
.chip_class
>= GFX9
) {
1730 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
1731 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1732 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
1733 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1735 LLVMValueRef soffset
=
1736 LLVMConstInt(ctx
->ac
.i32
,
1737 (param
* 4 + i
+ const_index
) * 256,
1740 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
1743 vtx_offset
, soffset
,
1744 0, ac_glc
, true, false);
1747 if (ac_get_type_size(type
) == 2) {
1748 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i32
, "");
1749 value
[i
] = LLVMBuildTrunc(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i16
, "");
1751 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], type
, "");
1753 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1754 result
= ac_to_integer(&ctx
->ac
, result
);
1759 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
1761 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1762 ac_build_kill_if_false(&ctx
->ac
, visible
);
1765 static LLVMValueRef
lookup_interp_param(struct ac_shader_abi
*abi
,
1766 enum glsl_interp_mode interp
, unsigned location
)
1768 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1771 case INTERP_MODE_FLAT
:
1774 case INTERP_MODE_SMOOTH
:
1775 case INTERP_MODE_NONE
:
1776 if (location
== INTERP_CENTER
)
1777 return ctx
->persp_center
;
1778 else if (location
== INTERP_CENTROID
)
1779 return ctx
->persp_centroid
;
1780 else if (location
== INTERP_SAMPLE
)
1781 return ctx
->persp_sample
;
1783 case INTERP_MODE_NOPERSPECTIVE
:
1784 if (location
== INTERP_CENTER
)
1785 return ctx
->linear_center
;
1786 else if (location
== INTERP_CENTROID
)
1787 return ctx
->linear_centroid
;
1788 else if (location
== INTERP_SAMPLE
)
1789 return ctx
->linear_sample
;
1796 radv_get_sample_pos_offset(uint32_t num_samples
)
1798 uint32_t sample_pos_offset
= 0;
1800 switch (num_samples
) {
1802 sample_pos_offset
= 1;
1805 sample_pos_offset
= 3;
1808 sample_pos_offset
= 7;
1813 return sample_pos_offset
;
1816 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
1817 LLVMValueRef sample_id
)
1819 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1821 LLVMValueRef result
;
1822 LLVMValueRef index
= LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false);
1823 LLVMValueRef ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ring_offsets
, &index
, 1, "");
1825 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1826 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
1828 uint32_t sample_pos_offset
=
1829 radv_get_sample_pos_offset(ctx
->options
->key
.fs
.num_samples
);
1832 LLVMBuildAdd(ctx
->ac
.builder
, sample_id
,
1833 LLVMConstInt(ctx
->ac
.i32
, sample_pos_offset
, false), "");
1834 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
1840 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
1842 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1843 uint8_t log2_ps_iter_samples
;
1845 if (ctx
->shader_info
->info
.ps
.force_persample
) {
1846 log2_ps_iter_samples
=
1847 util_logbase2(ctx
->options
->key
.fs
.num_samples
);
1849 log2_ps_iter_samples
= ctx
->options
->key
.fs
.log2_ps_iter_samples
;
1852 /* The bit pattern matches that used by fixed function fragment
1854 static const uint16_t ps_iter_masks
[] = {
1855 0xffff, /* not used */
1861 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
1863 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
1865 LLVMValueRef result
, sample_id
;
1866 sample_id
= ac_unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
1867 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
1868 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
1873 static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context
*ctx
,
1875 LLVMValueRef
*addrs
);
1878 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
1880 LLVMValueRef gs_next_vertex
;
1881 LLVMValueRef can_emit
;
1882 unsigned offset
= 0;
1883 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1885 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1886 gfx10_ngg_gs_emit_vertex(ctx
, stream
, addrs
);
1890 /* Write vertex attribute values to GSVS ring */
1891 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
1892 ctx
->gs_next_vertex
[stream
],
1895 /* If this thread has already emitted the declared maximum number of
1896 * vertices, kill it: excessive vertex emissions are not supposed to
1897 * have any effect, and GS threads have no externally observable
1898 * effects other than emitting vertices.
1900 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
1901 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
1902 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
1904 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
1905 unsigned output_usage_mask
=
1906 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
1907 uint8_t output_stream
=
1908 ctx
->shader_info
->info
.gs
.output_streams
[i
];
1909 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
1910 int length
= util_last_bit(output_usage_mask
);
1912 if (!(ctx
->output_mask
& (1ull << i
)) ||
1913 output_stream
!= stream
)
1916 for (unsigned j
= 0; j
< length
; j
++) {
1917 if (!(output_usage_mask
& (1 << j
)))
1920 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
1922 LLVMValueRef voffset
=
1923 LLVMConstInt(ctx
->ac
.i32
, offset
*
1924 ctx
->gs_max_out_vertices
, false);
1928 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
1929 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1931 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
1932 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
1934 ac_build_buffer_store_dword(&ctx
->ac
,
1935 ctx
->gsvs_ring
[stream
],
1937 voffset
, ctx
->gs2vs_offset
, 0,
1938 ac_glc
| ac_slc
, true);
1942 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
1944 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
[stream
]);
1946 ac_build_sendmsg(&ctx
->ac
,
1947 AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (stream
<< 8),
1952 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
1954 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1956 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1957 LLVMBuildStore(ctx
->ac
.builder
, ctx
->ac
.i32_0
, ctx
->gs_curprim_verts
[stream
]);
1961 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
1965 load_tess_coord(struct ac_shader_abi
*abi
)
1967 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1969 LLVMValueRef coord
[4] = {
1976 if (ctx
->tes_primitive_mode
== GL_TRIANGLES
)
1977 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
1978 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
1980 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
1984 load_patch_vertices_in(struct ac_shader_abi
*abi
)
1986 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1987 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
1991 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
1993 return abi
->base_vertex
;
1996 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
1997 LLVMValueRef buffer_ptr
, bool write
)
1999 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
2000 LLVMValueRef result
;
2002 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
2004 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
2005 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
2010 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
2012 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
2013 LLVMValueRef result
;
2015 if (LLVMGetTypeKind(LLVMTypeOf(buffer_ptr
)) != LLVMPointerTypeKind
) {
2016 /* Do not load the descriptor for inlined uniform blocks. */
2020 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
2022 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
2023 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
2028 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
2029 unsigned descriptor_set
,
2030 unsigned base_index
,
2031 unsigned constant_index
,
2033 enum ac_descriptor_type desc_type
,
2034 bool image
, bool write
,
2037 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
2038 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
2039 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
2040 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
2041 unsigned offset
= binding
->offset
;
2042 unsigned stride
= binding
->size
;
2044 LLVMBuilderRef builder
= ctx
->ac
.builder
;
2047 assert(base_index
< layout
->binding_count
);
2049 switch (desc_type
) {
2051 type
= ctx
->ac
.v8i32
;
2055 type
= ctx
->ac
.v8i32
;
2059 case AC_DESC_SAMPLER
:
2060 type
= ctx
->ac
.v4i32
;
2061 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) {
2062 offset
+= radv_combined_image_descriptor_sampler_offset(binding
);
2067 case AC_DESC_BUFFER
:
2068 type
= ctx
->ac
.v4i32
;
2071 case AC_DESC_PLANE_0
:
2072 case AC_DESC_PLANE_1
:
2073 case AC_DESC_PLANE_2
:
2074 type
= ctx
->ac
.v8i32
;
2076 offset
+= 32 * (desc_type
- AC_DESC_PLANE_0
);
2079 unreachable("invalid desc_type\n");
2082 offset
+= constant_index
* stride
;
2084 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
2085 (!index
|| binding
->immutable_samplers_equal
)) {
2086 if (binding
->immutable_samplers_equal
)
2089 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
2091 LLVMValueRef constants
[] = {
2092 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
2093 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
2094 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
2095 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
2097 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
2100 assert(stride
% type_size
== 0);
2102 LLVMValueRef adjusted_index
= index
;
2103 if (!adjusted_index
)
2104 adjusted_index
= ctx
->ac
.i32_0
;
2106 adjusted_index
= LLVMBuildMul(builder
, adjusted_index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
2108 LLVMValueRef val_offset
= LLVMConstInt(ctx
->ac
.i32
, offset
, 0);
2109 list
= LLVMBuildGEP(builder
, list
, &val_offset
, 1, "");
2110 list
= LLVMBuildPointerCast(builder
, list
,
2111 ac_array_in_const32_addr_space(type
), "");
2113 LLVMValueRef descriptor
= ac_build_load_to_sgpr(&ctx
->ac
, list
, adjusted_index
);
2115 /* 3 plane formats always have same size and format for plane 1 & 2, so
2116 * use the tail from plane 1 so that we can store only the first 16 bytes
2117 * of the last plane. */
2118 if (desc_type
== AC_DESC_PLANE_2
) {
2119 LLVMValueRef descriptor2
= radv_get_sampler_desc(abi
, descriptor_set
, base_index
, constant_index
, index
, AC_DESC_PLANE_1
,image
, write
, bindless
);
2121 LLVMValueRef components
[8];
2122 for (unsigned i
= 0; i
< 4; ++i
)
2123 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor
, i
);
2125 for (unsigned i
= 4; i
< 8; ++i
)
2126 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor2
, i
);
2127 descriptor
= ac_build_gather_values(&ctx
->ac
, components
, 8);
2133 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
2134 * so we may need to fix it up. */
2136 adjust_vertex_fetch_alpha(struct radv_shader_context
*ctx
,
2137 unsigned adjustment
,
2140 if (adjustment
== RADV_ALPHA_ADJUST_NONE
)
2143 LLVMValueRef c30
= LLVMConstInt(ctx
->ac
.i32
, 30, 0);
2145 alpha
= LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2147 if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
)
2148 alpha
= LLVMBuildFPToUI(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2150 alpha
= ac_to_integer(&ctx
->ac
, alpha
);
2152 /* For the integer-like cases, do a natural sign extension.
2154 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
2155 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
2158 alpha
= LLVMBuildShl(ctx
->ac
.builder
, alpha
,
2159 adjustment
== RADV_ALPHA_ADJUST_SNORM
?
2160 LLVMConstInt(ctx
->ac
.i32
, 7, 0) : c30
, "");
2161 alpha
= LLVMBuildAShr(ctx
->ac
.builder
, alpha
, c30
, "");
2163 /* Convert back to the right type. */
2164 if (adjustment
== RADV_ALPHA_ADJUST_SNORM
) {
2166 LLVMValueRef neg_one
= LLVMConstReal(ctx
->ac
.f32
, -1.0);
2167 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2168 clamp
= LLVMBuildFCmp(ctx
->ac
.builder
, LLVMRealULT
, alpha
, neg_one
, "");
2169 alpha
= LLVMBuildSelect(ctx
->ac
.builder
, clamp
, neg_one
, alpha
, "");
2170 } else if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
) {
2171 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2174 return LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2178 get_num_channels_from_data_format(unsigned data_format
)
2180 switch (data_format
) {
2181 case V_008F0C_BUF_DATA_FORMAT_8
:
2182 case V_008F0C_BUF_DATA_FORMAT_16
:
2183 case V_008F0C_BUF_DATA_FORMAT_32
:
2185 case V_008F0C_BUF_DATA_FORMAT_8_8
:
2186 case V_008F0C_BUF_DATA_FORMAT_16_16
:
2187 case V_008F0C_BUF_DATA_FORMAT_32_32
:
2189 case V_008F0C_BUF_DATA_FORMAT_10_11_11
:
2190 case V_008F0C_BUF_DATA_FORMAT_11_11_10
:
2191 case V_008F0C_BUF_DATA_FORMAT_32_32_32
:
2193 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8
:
2194 case V_008F0C_BUF_DATA_FORMAT_10_10_10_2
:
2195 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10
:
2196 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16
:
2197 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32
:
2207 radv_fixup_vertex_input_fetches(struct radv_shader_context
*ctx
,
2209 unsigned num_channels
,
2212 LLVMValueRef zero
= is_float
? ctx
->ac
.f32_0
: ctx
->ac
.i32_0
;
2213 LLVMValueRef one
= is_float
? ctx
->ac
.f32_1
: ctx
->ac
.i32_1
;
2214 LLVMValueRef chan
[4];
2216 if (LLVMGetTypeKind(LLVMTypeOf(value
)) == LLVMVectorTypeKind
) {
2217 unsigned vec_size
= LLVMGetVectorSize(LLVMTypeOf(value
));
2219 if (num_channels
== 4 && num_channels
== vec_size
)
2222 num_channels
= MIN2(num_channels
, vec_size
);
2224 for (unsigned i
= 0; i
< num_channels
; i
++)
2225 chan
[i
] = ac_llvm_extract_elem(&ctx
->ac
, value
, i
);
2228 assert(num_channels
== 1);
2233 for (unsigned i
= num_channels
; i
< 4; i
++) {
2234 chan
[i
] = i
== 3 ? one
: zero
;
2235 chan
[i
] = ac_to_integer(&ctx
->ac
, chan
[i
]);
2238 return ac_build_gather_values(&ctx
->ac
, chan
, 4);
2242 handle_vs_input_decl(struct radv_shader_context
*ctx
,
2243 struct nir_variable
*variable
)
2245 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
2246 LLVMValueRef t_offset
;
2247 LLVMValueRef t_list
;
2249 LLVMValueRef buffer_index
;
2250 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
2251 uint8_t input_usage_mask
=
2252 ctx
->shader_info
->info
.vs
.input_usage_mask
[variable
->data
.location
];
2253 unsigned num_input_channels
= util_last_bit(input_usage_mask
);
2255 variable
->data
.driver_location
= variable
->data
.location
* 4;
2257 enum glsl_base_type type
= glsl_get_base_type(variable
->type
);
2258 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
2259 LLVMValueRef output
[4];
2260 unsigned attrib_index
= variable
->data
.location
+ i
- VERT_ATTRIB_GENERIC0
;
2261 unsigned attrib_format
= ctx
->options
->key
.vs
.vertex_attribute_formats
[attrib_index
];
2262 unsigned data_format
= attrib_format
& 0x0f;
2263 unsigned num_format
= (attrib_format
>> 4) & 0x07;
2264 bool is_float
= num_format
!= V_008F0C_BUF_NUM_FORMAT_UINT
&&
2265 num_format
!= V_008F0C_BUF_NUM_FORMAT_SINT
;
2267 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << attrib_index
)) {
2268 uint32_t divisor
= ctx
->options
->key
.vs
.instance_rate_divisors
[attrib_index
];
2271 buffer_index
= ctx
->abi
.instance_id
;
2274 buffer_index
= LLVMBuildUDiv(ctx
->ac
.builder
, buffer_index
,
2275 LLVMConstInt(ctx
->ac
.i32
, divisor
, 0), "");
2278 buffer_index
= ctx
->ac
.i32_0
;
2281 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.start_instance
, buffer_index
, "");
2283 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
2284 ctx
->abi
.base_vertex
, "");
2286 /* Adjust the number of channels to load based on the vertex
2289 unsigned num_format_channels
= get_num_channels_from_data_format(data_format
);
2290 unsigned num_channels
= MIN2(num_input_channels
, num_format_channels
);
2291 unsigned attrib_binding
= ctx
->options
->key
.vs
.vertex_attribute_bindings
[attrib_index
];
2292 unsigned attrib_offset
= ctx
->options
->key
.vs
.vertex_attribute_offsets
[attrib_index
];
2293 unsigned attrib_stride
= ctx
->options
->key
.vs
.vertex_attribute_strides
[attrib_index
];
2295 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2296 /* Always load, at least, 3 channels for formats that
2297 * need to be shuffled because X<->Z.
2299 num_channels
= MAX2(num_channels
, 3);
2302 if (attrib_stride
!= 0 && attrib_offset
> attrib_stride
) {
2303 LLVMValueRef buffer_offset
=
2304 LLVMConstInt(ctx
->ac
.i32
,
2305 attrib_offset
/ attrib_stride
, false);
2307 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
,
2311 attrib_offset
= attrib_offset
% attrib_stride
;
2314 t_offset
= LLVMConstInt(ctx
->ac
.i32
, attrib_binding
, false);
2315 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
2317 input
= ac_build_struct_tbuffer_load(&ctx
->ac
, t_list
,
2319 LLVMConstInt(ctx
->ac
.i32
, attrib_offset
, false),
2320 ctx
->ac
.i32_0
, ctx
->ac
.i32_0
,
2322 data_format
, num_format
, 0, true);
2324 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2326 c
[0] = ac_llvm_extract_elem(&ctx
->ac
, input
, 2);
2327 c
[1] = ac_llvm_extract_elem(&ctx
->ac
, input
, 1);
2328 c
[2] = ac_llvm_extract_elem(&ctx
->ac
, input
, 0);
2329 c
[3] = ac_llvm_extract_elem(&ctx
->ac
, input
, 3);
2331 input
= ac_build_gather_values(&ctx
->ac
, c
, 4);
2334 input
= radv_fixup_vertex_input_fetches(ctx
, input
, num_channels
,
2337 for (unsigned chan
= 0; chan
< 4; chan
++) {
2338 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2339 output
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
, input
, llvm_chan
, "");
2340 if (type
== GLSL_TYPE_FLOAT16
) {
2341 output
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f32
, "");
2342 output
[chan
] = LLVMBuildFPTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f16
, "");
2346 unsigned alpha_adjust
= (ctx
->options
->key
.vs
.alpha_adjust
>> (attrib_index
* 2)) & 3;
2347 output
[3] = adjust_vertex_fetch_alpha(ctx
, alpha_adjust
, output
[3]);
2349 for (unsigned chan
= 0; chan
< 4; chan
++) {
2350 output
[chan
] = ac_to_integer(&ctx
->ac
, output
[chan
]);
2351 if (type
== GLSL_TYPE_UINT16
|| type
== GLSL_TYPE_INT16
)
2352 output
[chan
] = LLVMBuildTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.i16
, "");
2354 ctx
->inputs
[ac_llvm_reg_index_soa(variable
->data
.location
+ i
, chan
)] = output
[chan
];
2360 handle_vs_inputs(struct radv_shader_context
*ctx
,
2361 struct nir_shader
*nir
) {
2362 nir_foreach_variable(variable
, &nir
->inputs
)
2363 handle_vs_input_decl(ctx
, variable
);
2367 prepare_interp_optimize(struct radv_shader_context
*ctx
,
2368 struct nir_shader
*nir
)
2370 bool uses_center
= false;
2371 bool uses_centroid
= false;
2372 nir_foreach_variable(variable
, &nir
->inputs
) {
2373 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
2374 variable
->data
.sample
)
2377 if (variable
->data
.centroid
)
2378 uses_centroid
= true;
2383 if (uses_center
&& uses_centroid
) {
2384 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
2385 ctx
->persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->persp_center
, ctx
->persp_centroid
, "");
2386 ctx
->linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->linear_center
, ctx
->linear_centroid
, "");
2391 scan_shader_output_decl(struct radv_shader_context
*ctx
,
2392 struct nir_variable
*variable
,
2393 struct nir_shader
*shader
,
2394 gl_shader_stage stage
)
2396 int idx
= variable
->data
.location
+ variable
->data
.index
;
2397 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2398 uint64_t mask_attribs
;
2400 variable
->data
.driver_location
= idx
* 4;
2402 /* tess ctrl has it's own load/store paths for outputs */
2403 if (stage
== MESA_SHADER_TESS_CTRL
)
2406 if (variable
->data
.compact
) {
2407 unsigned component_count
= variable
->data
.location_frac
+
2408 glsl_get_length(variable
->type
);
2409 attrib_count
= (component_count
+ 3) / 4;
2412 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
2413 if (stage
== MESA_SHADER_VERTEX
||
2414 stage
== MESA_SHADER_TESS_EVAL
||
2415 stage
== MESA_SHADER_GEOMETRY
) {
2416 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
2417 if (stage
== MESA_SHADER_VERTEX
) {
2418 ctx
->shader_info
->vs
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2419 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2420 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2422 if (stage
== MESA_SHADER_TESS_EVAL
) {
2423 ctx
->shader_info
->tes
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2424 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2425 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2427 if (stage
== MESA_SHADER_GEOMETRY
) {
2428 ctx
->shader_info
->vs
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2429 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2430 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2435 ctx
->output_mask
|= mask_attribs
;
2439 /* Initialize arguments for the shader export intrinsic */
2441 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
2442 LLVMValueRef
*values
,
2443 unsigned enabled_channels
,
2445 struct ac_export_args
*args
)
2447 /* Specify the channels that are enabled. */
2448 args
->enabled_channels
= enabled_channels
;
2450 /* Specify whether the EXEC mask represents the valid mask */
2451 args
->valid_mask
= 0;
2453 /* Specify whether this is the last export */
2456 /* Specify the target we are exporting */
2457 args
->target
= target
;
2459 args
->compr
= false;
2460 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
2461 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
2462 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
2463 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
2468 bool is_16bit
= ac_get_type_size(LLVMTypeOf(values
[0])) == 2;
2469 if (ctx
->stage
== MESA_SHADER_FRAGMENT
) {
2470 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
2471 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
2472 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
2473 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
2476 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
2477 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
2478 unsigned bits
, bool hi
) = NULL
;
2480 switch(col_format
) {
2481 case V_028714_SPI_SHADER_ZERO
:
2482 args
->enabled_channels
= 0; /* writemask */
2483 args
->target
= V_008DFC_SQ_EXP_NULL
;
2486 case V_028714_SPI_SHADER_32_R
:
2487 args
->enabled_channels
= 1;
2488 args
->out
[0] = values
[0];
2491 case V_028714_SPI_SHADER_32_GR
:
2492 args
->enabled_channels
= 0x3;
2493 args
->out
[0] = values
[0];
2494 args
->out
[1] = values
[1];
2497 case V_028714_SPI_SHADER_32_AR
:
2498 if (ctx
->ac
.chip_class
>= GFX10
) {
2499 args
->enabled_channels
= 0x3;
2500 args
->out
[0] = values
[0];
2501 args
->out
[1] = values
[3];
2503 args
->enabled_channels
= 0x9;
2504 args
->out
[0] = values
[0];
2505 args
->out
[3] = values
[3];
2509 case V_028714_SPI_SHADER_FP16_ABGR
:
2510 args
->enabled_channels
= 0x5;
2511 packf
= ac_build_cvt_pkrtz_f16
;
2513 for (unsigned chan
= 0; chan
< 4; chan
++)
2514 values
[chan
] = LLVMBuildFPExt(ctx
->ac
.builder
,
2520 case V_028714_SPI_SHADER_UNORM16_ABGR
:
2521 args
->enabled_channels
= 0x5;
2522 packf
= ac_build_cvt_pknorm_u16
;
2525 case V_028714_SPI_SHADER_SNORM16_ABGR
:
2526 args
->enabled_channels
= 0x5;
2527 packf
= ac_build_cvt_pknorm_i16
;
2530 case V_028714_SPI_SHADER_UINT16_ABGR
:
2531 args
->enabled_channels
= 0x5;
2532 packi
= ac_build_cvt_pk_u16
;
2534 for (unsigned chan
= 0; chan
< 4; chan
++)
2535 values
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
,
2536 ac_to_integer(&ctx
->ac
, values
[chan
]),
2541 case V_028714_SPI_SHADER_SINT16_ABGR
:
2542 args
->enabled_channels
= 0x5;
2543 packi
= ac_build_cvt_pk_i16
;
2545 for (unsigned chan
= 0; chan
< 4; chan
++)
2546 values
[chan
] = LLVMBuildSExt(ctx
->ac
.builder
,
2547 ac_to_integer(&ctx
->ac
, values
[chan
]),
2553 case V_028714_SPI_SHADER_32_ABGR
:
2554 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2558 /* Pack f16 or norm_i16/u16. */
2560 for (chan
= 0; chan
< 2; chan
++) {
2561 LLVMValueRef pack_args
[2] = {
2563 values
[2 * chan
+ 1]
2565 LLVMValueRef packed
;
2567 packed
= packf(&ctx
->ac
, pack_args
);
2568 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2570 args
->compr
= 1; /* COMPR flag */
2575 for (chan
= 0; chan
< 2; chan
++) {
2576 LLVMValueRef pack_args
[2] = {
2577 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
2578 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
2580 LLVMValueRef packed
;
2582 packed
= packi(&ctx
->ac
, pack_args
,
2583 is_int8
? 8 : is_int10
? 10 : 16,
2585 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2587 args
->compr
= 1; /* COMPR flag */
2593 for (unsigned chan
= 0; chan
< 4; chan
++) {
2594 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i16
, "");
2595 args
->out
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
2598 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2600 for (unsigned i
= 0; i
< 4; ++i
)
2601 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
2605 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
2606 LLVMValueRef
*values
, unsigned enabled_channels
)
2608 struct ac_export_args args
;
2610 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
2611 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
2612 ac_build_export(&ctx
->ac
, &args
);
2616 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
2618 LLVMValueRef output
= ctx
->abi
.outputs
[ac_llvm_reg_index_soa(index
, chan
)];
2619 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
2623 radv_emit_stream_output(struct radv_shader_context
*ctx
,
2624 LLVMValueRef
const *so_buffers
,
2625 LLVMValueRef
const *so_write_offsets
,
2626 const struct radv_stream_output
*output
,
2627 struct radv_shader_output_values
*shader_out
)
2629 unsigned num_comps
= util_bitcount(output
->component_mask
);
2630 unsigned buf
= output
->buffer
;
2631 unsigned offset
= output
->offset
;
2633 LLVMValueRef out
[4];
2635 assert(num_comps
&& num_comps
<= 4);
2636 if (!num_comps
|| num_comps
> 4)
2639 /* Get the first component. */
2640 start
= ffs(output
->component_mask
) - 1;
2642 /* Load the output as int. */
2643 for (int i
= 0; i
< num_comps
; i
++) {
2644 out
[i
] = ac_to_integer(&ctx
->ac
, shader_out
->values
[start
+ i
]);
2647 /* Pack the output. */
2648 LLVMValueRef vdata
= NULL
;
2650 switch (num_comps
) {
2651 case 1: /* as i32 */
2654 case 2: /* as v2i32 */
2655 case 3: /* as v4i32 (aligned to 4) */
2656 out
[3] = LLVMGetUndef(ctx
->ac
.i32
);
2658 case 4: /* as v4i32 */
2659 vdata
= ac_build_gather_values(&ctx
->ac
, out
,
2660 !ac_has_vec3_support(ctx
->ac
.chip_class
, false) ?
2661 util_next_power_of_two(num_comps
) :
2666 ac_build_buffer_store_dword(&ctx
->ac
, so_buffers
[buf
],
2667 vdata
, num_comps
, so_write_offsets
[buf
],
2668 ctx
->ac
.i32_0
, offset
,
2669 ac_glc
| ac_slc
, false);
2673 radv_emit_streamout(struct radv_shader_context
*ctx
, unsigned stream
)
2675 struct ac_build_if_state if_ctx
;
2678 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2679 assert(ctx
->streamout_config
);
2680 LLVMValueRef so_vtx_count
=
2681 ac_build_bfe(&ctx
->ac
, ctx
->streamout_config
,
2682 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2683 LLVMConstInt(ctx
->ac
.i32
, 7, false), false);
2685 LLVMValueRef tid
= ac_get_thread_id(&ctx
->ac
);
2687 /* can_emit = tid < so_vtx_count; */
2688 LLVMValueRef can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
2689 tid
, so_vtx_count
, "");
2691 /* Emit the streamout code conditionally. This actually avoids
2692 * out-of-bounds buffer access. The hw tells us via the SGPR
2693 * (so_vtx_count) which threads are allowed to emit streamout data.
2695 ac_nir_build_if(&if_ctx
, ctx
, can_emit
);
2697 /* The buffer offset is computed as follows:
2698 * ByteOffset = streamout_offset[buffer_id]*4 +
2699 * (streamout_write_index + thread_id)*stride[buffer_id] +
2702 LLVMValueRef so_write_index
= ctx
->streamout_write_idx
;
2704 /* Compute (streamout_write_index + thread_id). */
2706 LLVMBuildAdd(ctx
->ac
.builder
, so_write_index
, tid
, "");
2708 /* Load the descriptor and compute the write offset for each
2711 LLVMValueRef so_write_offset
[4] = {};
2712 LLVMValueRef so_buffers
[4] = {};
2713 LLVMValueRef buf_ptr
= ctx
->streamout_buffers
;
2715 for (i
= 0; i
< 4; i
++) {
2716 uint16_t stride
= ctx
->shader_info
->info
.so
.strides
[i
];
2721 LLVMValueRef offset
=
2722 LLVMConstInt(ctx
->ac
.i32
, i
, false);
2724 so_buffers
[i
] = ac_build_load_to_sgpr(&ctx
->ac
,
2727 LLVMValueRef so_offset
= ctx
->streamout_offset
[i
];
2729 so_offset
= LLVMBuildMul(ctx
->ac
.builder
, so_offset
,
2730 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
2732 so_write_offset
[i
] =
2733 ac_build_imad(&ctx
->ac
, so_write_index
,
2734 LLVMConstInt(ctx
->ac
.i32
,
2739 /* Write streamout data. */
2740 for (i
= 0; i
< ctx
->shader_info
->info
.so
.num_outputs
; i
++) {
2741 struct radv_shader_output_values shader_out
= {};
2742 struct radv_stream_output
*output
=
2743 &ctx
->shader_info
->info
.so
.outputs
[i
];
2745 if (stream
!= output
->stream
)
2748 for (int j
= 0; j
< 4; j
++) {
2749 shader_out
.values
[j
] =
2750 radv_load_output(ctx
, output
->location
, j
);
2753 radv_emit_stream_output(ctx
, so_buffers
,so_write_offset
,
2754 output
, &shader_out
);
2757 ac_nir_build_endif(&if_ctx
);
2761 radv_build_param_exports(struct radv_shader_context
*ctx
,
2762 struct radv_shader_output_values
*outputs
,
2764 struct radv_vs_output_info
*outinfo
,
2765 bool export_clip_dists
)
2767 unsigned param_count
= 0;
2769 for (unsigned i
= 0; i
< noutput
; i
++) {
2770 unsigned slot_name
= outputs
[i
].slot_name
;
2771 unsigned usage_mask
= outputs
[i
].usage_mask
;
2773 if (slot_name
!= VARYING_SLOT_LAYER
&&
2774 slot_name
!= VARYING_SLOT_PRIMITIVE_ID
&&
2775 slot_name
!= VARYING_SLOT_CLIP_DIST0
&&
2776 slot_name
!= VARYING_SLOT_CLIP_DIST1
&&
2777 slot_name
< VARYING_SLOT_VAR0
)
2780 if ((slot_name
== VARYING_SLOT_CLIP_DIST0
||
2781 slot_name
== VARYING_SLOT_CLIP_DIST1
) && !export_clip_dists
)
2784 radv_export_param(ctx
, param_count
, outputs
[i
].values
, usage_mask
);
2786 assert(i
< ARRAY_SIZE(outinfo
->vs_output_param_offset
));
2787 outinfo
->vs_output_param_offset
[slot_name
] = param_count
++;
2790 outinfo
->param_exports
= param_count
;
2793 /* Generate export instructions for hardware VS shader stage or NGG GS stage
2794 * (position and parameter data only).
2797 radv_llvm_export_vs(struct radv_shader_context
*ctx
,
2798 struct radv_shader_output_values
*outputs
,
2800 struct radv_vs_output_info
*outinfo
,
2801 bool export_clip_dists
)
2803 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_value
= NULL
;
2804 struct ac_export_args pos_args
[4] = {};
2805 unsigned pos_idx
, index
;
2808 /* Build position exports */
2809 for (i
= 0; i
< noutput
; i
++) {
2810 switch (outputs
[i
].slot_name
) {
2811 case VARYING_SLOT_POS
:
2812 si_llvm_init_export_args(ctx
, outputs
[i
].values
, 0xf,
2813 V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
2815 case VARYING_SLOT_PSIZ
:
2816 psize_value
= outputs
[i
].values
[0];
2818 case VARYING_SLOT_LAYER
:
2819 layer_value
= outputs
[i
].values
[0];
2821 case VARYING_SLOT_VIEWPORT
:
2822 viewport_value
= outputs
[i
].values
[0];
2824 case VARYING_SLOT_CLIP_DIST0
:
2825 case VARYING_SLOT_CLIP_DIST1
:
2826 index
= 2 + outputs
[i
].slot_index
;
2827 si_llvm_init_export_args(ctx
, outputs
[i
].values
, 0xf,
2828 V_008DFC_SQ_EXP_POS
+ index
,
2836 /* We need to add the position output manually if it's missing. */
2837 if (!pos_args
[0].out
[0]) {
2838 pos_args
[0].enabled_channels
= 0xf; /* writemask */
2839 pos_args
[0].valid_mask
= 0; /* EXEC mask */
2840 pos_args
[0].done
= 0; /* last export? */
2841 pos_args
[0].target
= V_008DFC_SQ_EXP_POS
;
2842 pos_args
[0].compr
= 0; /* COMPR flag */
2843 pos_args
[0].out
[0] = ctx
->ac
.f32_0
; /* X */
2844 pos_args
[0].out
[1] = ctx
->ac
.f32_0
; /* Y */
2845 pos_args
[0].out
[2] = ctx
->ac
.f32_0
; /* Z */
2846 pos_args
[0].out
[3] = ctx
->ac
.f32_1
; /* W */
2849 if (outinfo
->writes_pointsize
||
2850 outinfo
->writes_layer
||
2851 outinfo
->writes_viewport_index
) {
2852 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
2853 (outinfo
->writes_layer
== true ? 4 : 0));
2854 pos_args
[1].valid_mask
= 0;
2855 pos_args
[1].done
= 0;
2856 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
2857 pos_args
[1].compr
= 0;
2858 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
2859 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
2860 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
2861 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
2863 if (outinfo
->writes_pointsize
== true)
2864 pos_args
[1].out
[0] = psize_value
;
2865 if (outinfo
->writes_layer
== true)
2866 pos_args
[1].out
[2] = layer_value
;
2867 if (outinfo
->writes_viewport_index
== true) {
2868 if (ctx
->options
->chip_class
>= GFX9
) {
2869 /* GFX9 has the layer in out.z[10:0] and the viewport
2870 * index in out.z[19:16].
2872 LLVMValueRef v
= viewport_value
;
2873 v
= ac_to_integer(&ctx
->ac
, v
);
2874 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
2875 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2877 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
2878 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
2880 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
2881 pos_args
[1].enabled_channels
|= 1 << 2;
2883 pos_args
[1].out
[3] = viewport_value
;
2884 pos_args
[1].enabled_channels
|= 1 << 3;
2889 for (i
= 0; i
< 4; i
++) {
2890 if (pos_args
[i
].out
[0])
2891 outinfo
->pos_exports
++;
2894 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
2895 * Setting valid_mask=1 prevents it and has no other effect.
2897 if (ctx
->ac
.family
== CHIP_NAVI10
||
2898 ctx
->ac
.family
== CHIP_NAVI12
||
2899 ctx
->ac
.family
== CHIP_NAVI14
)
2900 pos_args
[0].valid_mask
= 1;
2903 for (i
= 0; i
< 4; i
++) {
2904 if (!pos_args
[i
].out
[0])
2907 /* Specify the target we are exporting */
2908 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
2910 if (pos_idx
== outinfo
->pos_exports
)
2911 /* Specify that this is the last export */
2912 pos_args
[i
].done
= 1;
2914 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
2917 /* Build parameter exports */
2918 radv_build_param_exports(ctx
, outputs
, noutput
, outinfo
, export_clip_dists
);
2922 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
2923 bool export_prim_id
,
2924 bool export_clip_dists
,
2925 struct radv_vs_output_info
*outinfo
)
2927 struct radv_shader_output_values
*outputs
;
2928 unsigned noutput
= 0;
2930 if (ctx
->options
->key
.has_multiview_view_index
) {
2931 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2933 for(unsigned i
= 0; i
< 4; ++i
)
2934 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
2935 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
2938 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
2939 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
2942 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
2943 sizeof(outinfo
->vs_output_param_offset
));
2944 outinfo
->pos_exports
= 0;
2946 if (ctx
->output_mask
& (1ull << VARYING_SLOT_PSIZ
)) {
2947 outinfo
->writes_pointsize
= true;
2950 if (ctx
->output_mask
& (1ull << VARYING_SLOT_LAYER
)) {
2951 outinfo
->writes_layer
= true;
2954 if (ctx
->output_mask
& (1ull << VARYING_SLOT_VIEWPORT
)) {
2955 outinfo
->writes_viewport_index
= true;
2958 if (ctx
->shader_info
->info
.so
.num_outputs
&&
2959 !ctx
->is_gs_copy_shader
) {
2960 /* The GS copy shader emission already emits streamout. */
2961 radv_emit_streamout(ctx
, 0);
2964 /* Allocate a temporary array for the output values. */
2965 unsigned num_outputs
= util_bitcount64(ctx
->output_mask
) + export_prim_id
;
2966 outputs
= malloc(num_outputs
* sizeof(outputs
[0]));
2968 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2969 if (!(ctx
->output_mask
& (1ull << i
)))
2972 outputs
[noutput
].slot_name
= i
;
2973 outputs
[noutput
].slot_index
= i
== VARYING_SLOT_CLIP_DIST1
;
2975 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2976 !ctx
->is_gs_copy_shader
) {
2977 outputs
[noutput
].usage_mask
=
2978 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2979 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2980 outputs
[noutput
].usage_mask
=
2981 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2983 assert(ctx
->is_gs_copy_shader
);
2984 outputs
[noutput
].usage_mask
=
2985 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
2988 for (unsigned j
= 0; j
< 4; j
++) {
2989 outputs
[noutput
].values
[j
] =
2990 ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2996 /* Export PrimitiveID. */
2997 if (export_prim_id
) {
2998 outinfo
->export_prim_id
= true;
3000 outputs
[noutput
].slot_name
= VARYING_SLOT_PRIMITIVE_ID
;
3001 outputs
[noutput
].slot_index
= 0;
3002 outputs
[noutput
].usage_mask
= 0x1;
3003 outputs
[noutput
].values
[0] = ctx
->vs_prim_id
;
3004 for (unsigned j
= 1; j
< 4; j
++)
3005 outputs
[noutput
].values
[j
] = ctx
->ac
.f32_0
;
3009 radv_llvm_export_vs(ctx
, outputs
, noutput
, outinfo
, export_clip_dists
);
3015 handle_es_outputs_post(struct radv_shader_context
*ctx
,
3016 struct radv_es_output_info
*outinfo
)
3019 uint64_t max_output_written
= 0;
3020 LLVMValueRef lds_base
= NULL
;
3022 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3025 if (!(ctx
->output_mask
& (1ull << i
)))
3028 param_index
= shader_io_get_unique_index(i
);
3030 max_output_written
= MAX2(param_index
, max_output_written
);
3033 outinfo
->esgs_itemsize
= (max_output_written
+ 1) * 16;
3035 if (ctx
->ac
.chip_class
>= GFX9
) {
3036 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
3037 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
3038 LLVMValueRef wave_idx
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
3039 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
3040 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
3041 LLVMConstInt(ctx
->ac
.i32
,
3042 ctx
->ac
.wave_size
, false), ""), "");
3043 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
3044 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
3047 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3048 LLVMValueRef dw_addr
= NULL
;
3049 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
3050 unsigned output_usage_mask
;
3053 if (!(ctx
->output_mask
& (1ull << i
)))
3056 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
3058 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
3060 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
3062 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
3065 param_index
= shader_io_get_unique_index(i
);
3068 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3069 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
3073 for (j
= 0; j
< 4; j
++) {
3074 if (!(output_usage_mask
& (1 << j
)))
3077 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
3078 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
3079 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
3081 if (ctx
->ac
.chip_class
>= GFX9
) {
3082 LLVMValueRef dw_addr_offset
=
3083 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
3084 LLVMConstInt(ctx
->ac
.i32
,
3087 ac_lds_store(&ctx
->ac
, dw_addr_offset
, out_val
);
3089 ac_build_buffer_store_dword(&ctx
->ac
,
3092 NULL
, ctx
->es2gs_offset
,
3093 (4 * param_index
+ j
) * 4,
3094 ac_glc
| ac_slc
, true);
3101 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
3103 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
3104 uint32_t num_tcs_inputs
= util_last_bit64(ctx
->shader_info
->info
.vs
.ls_outputs_written
);
3105 LLVMValueRef vertex_dw_stride
= LLVMConstInt(ctx
->ac
.i32
, num_tcs_inputs
* 4, false);
3106 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
3107 vertex_dw_stride
, "");
3109 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3110 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
3112 if (!(ctx
->output_mask
& (1ull << i
)))
3115 int param
= shader_io_get_unique_index(i
);
3116 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
3117 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
3119 for (unsigned j
= 0; j
< 4; j
++) {
3120 LLVMValueRef value
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
3121 value
= ac_to_integer(&ctx
->ac
, value
);
3122 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
3123 ac_lds_store(&ctx
->ac
, dw_addr
, value
);
3124 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
3129 static LLVMValueRef
get_wave_id_in_tg(struct radv_shader_context
*ctx
)
3131 return ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
3134 static LLVMValueRef
get_tgsize(struct radv_shader_context
*ctx
)
3136 return ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 28, 4);
3139 static LLVMValueRef
get_thread_id_in_tg(struct radv_shader_context
*ctx
)
3141 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3143 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
3144 LLVMConstInt(ctx
->ac
.i32
, ctx
->ac
.wave_size
, false), "");
3145 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
3148 static LLVMValueRef
ngg_get_vtx_cnt(struct radv_shader_context
*ctx
)
3150 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
3151 LLVMConstInt(ctx
->ac
.i32
, 12, false),
3152 LLVMConstInt(ctx
->ac
.i32
, 9, false),
3156 static LLVMValueRef
ngg_get_prim_cnt(struct radv_shader_context
*ctx
)
3158 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
3159 LLVMConstInt(ctx
->ac
.i32
, 22, false),
3160 LLVMConstInt(ctx
->ac
.i32
, 9, false),
3165 ngg_gs_get_vertex_storage(struct radv_shader_context
*ctx
)
3167 unsigned num_outputs
= util_bitcount64(ctx
->output_mask
);
3169 LLVMTypeRef elements
[2] = {
3170 LLVMArrayType(ctx
->ac
.i32
, 4 * num_outputs
),
3171 LLVMArrayType(ctx
->ac
.i8
, 4),
3173 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
3174 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
3175 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
3179 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
3180 * is in emit order; that is:
3181 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
3182 * - during vertex emit, i.e. while the API GS shader invocation is running,
3183 * N = threadidx * gs_max_out_vertices + emitidx
3185 * Goals of the LDS memory layout:
3186 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
3187 * in uniform control flow
3188 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
3190 * 3. Agnostic to the number of waves (since we don't know it before compiling)
3191 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
3192 * 5. Avoid wasting memory.
3194 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
3195 * layout, elimination of bank conflicts requires that each vertex occupy an
3196 * odd number of dwords. We use the additional dword to store the output stream
3197 * index as well as a flag to indicate whether this vertex ends a primitive
3198 * for rasterization.
3200 * Swizzling is required to satisfy points 1 and 2 simultaneously.
3202 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
3203 * Indices are swizzled in groups of 32, which ensures point 1 without
3204 * disturbing point 2.
3206 * \return an LDS pointer to type {[N x i32], [4 x i8]}
3209 ngg_gs_vertex_ptr(struct radv_shader_context
*ctx
, LLVMValueRef vertexidx
)
3211 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3212 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
3214 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
3215 unsigned write_stride_2exp
= ffs(ctx
->gs_max_out_vertices
) - 1;
3216 if (write_stride_2exp
) {
3218 LLVMBuildLShr(builder
, vertexidx
,
3219 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
3220 LLVMValueRef swizzle
=
3221 LLVMBuildAnd(builder
, row
,
3222 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
3224 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
3227 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
3231 ngg_gs_emit_vertex_ptr(struct radv_shader_context
*ctx
, LLVMValueRef gsthread
,
3232 LLVMValueRef emitidx
)
3234 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3237 tmp
= LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false);
3238 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
3239 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
3240 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
3243 /* Send GS Alloc Req message from the first wave of the group to SPI.
3244 * Message payload is:
3245 * - bits 0..10: vertices in group
3246 * - bits 12..22: primitives in group
3248 static void build_sendmsg_gs_alloc_req(struct radv_shader_context
*ctx
,
3249 LLVMValueRef vtx_cnt
,
3250 LLVMValueRef prim_cnt
)
3252 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3255 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
3256 ac_build_ifcc(&ctx
->ac
, tmp
, 5020);
3258 tmp
= LLVMBuildShl(builder
, prim_cnt
, LLVMConstInt(ctx
->ac
.i32
, 12, false),"");
3259 tmp
= LLVMBuildOr(builder
, tmp
, vtx_cnt
, "");
3260 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_ALLOC_REQ
, tmp
);
3262 ac_build_endif(&ctx
->ac
, 5020);
3266 unsigned num_vertices
;
3267 LLVMValueRef isnull
;
3268 LLVMValueRef index
[3];
3269 LLVMValueRef edgeflag
[3];
3272 static void build_export_prim(struct radv_shader_context
*ctx
,
3273 const struct ngg_prim
*prim
)
3275 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3276 struct ac_export_args args
;
3279 tmp
= LLVMBuildZExt(builder
, prim
->isnull
, ctx
->ac
.i32
, "");
3280 args
.out
[0] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 31, false), "");
3282 for (unsigned i
= 0; i
< prim
->num_vertices
; ++i
) {
3283 tmp
= LLVMBuildShl(builder
, prim
->index
[i
],
3284 LLVMConstInt(ctx
->ac
.i32
, 10 * i
, false), "");
3285 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3286 tmp
= LLVMBuildZExt(builder
, prim
->edgeflag
[i
], ctx
->ac
.i32
, "");
3287 tmp
= LLVMBuildShl(builder
, tmp
,
3288 LLVMConstInt(ctx
->ac
.i32
, 10 * i
+ 9, false), "");
3289 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3292 args
.out
[0] = LLVMBuildBitCast(builder
, args
.out
[0], ctx
->ac
.f32
, "");
3293 args
.out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
3294 args
.out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
3295 args
.out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
3297 args
.target
= V_008DFC_SQ_EXP_PRIM
;
3298 args
.enabled_channels
= 1;
3300 args
.valid_mask
= false;
3303 ac_build_export(&ctx
->ac
, &args
);
3307 handle_ngg_outputs_post(struct radv_shader_context
*ctx
)
3309 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3310 struct ac_build_if_state if_state
;
3311 unsigned num_vertices
= 3;
3314 assert((ctx
->stage
== MESA_SHADER_VERTEX
||
3315 ctx
->stage
== MESA_SHADER_TESS_EVAL
) && !ctx
->is_gs_copy_shader
);
3317 LLVMValueRef prims_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3318 LLVMValueRef vtx_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 0, 8);
3319 LLVMValueRef is_gs_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3320 ac_get_thread_id(&ctx
->ac
), prims_in_wave
, "");
3321 LLVMValueRef is_es_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3322 ac_get_thread_id(&ctx
->ac
), vtx_in_wave
, "");
3323 LLVMValueRef vtxindex
[] = {
3324 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 0, 16),
3325 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 16, 16),
3326 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[2], 0, 16),
3329 /* TODO: streamout */
3331 /* Copy Primitive IDs from GS threads to the LDS address corresponding
3332 * to the ES thread of the provoking vertex.
3334 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
3335 ctx
->options
->key
.vs_common_out
.export_prim_id
) {
3336 /* TODO: streamout */
3338 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
3339 /* Extract the PROVOKING_VTX_INDEX field. */
3340 LLVMValueRef provoking_vtx_in_prim
=
3341 LLVMConstInt(ctx
->ac
.i32
, 0, false);
3343 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
3344 LLVMValueRef indices
= ac_build_gather_values(&ctx
->ac
, vtxindex
, 3);
3345 LLVMValueRef provoking_vtx_index
=
3346 LLVMBuildExtractElement(builder
, indices
, provoking_vtx_in_prim
, "");
3348 LLVMBuildStore(builder
, ctx
->abi
.gs_prim_id
,
3349 ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
, provoking_vtx_index
));
3350 ac_build_endif(&ctx
->ac
, 5400);
3353 /* TODO: primitive culling */
3355 build_sendmsg_gs_alloc_req(ctx
, ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
3357 /* TODO: streamout queries */
3358 /* Export primitive data to the index buffer. Format is:
3359 * - bits 0..8: index 0
3360 * - bit 9: edge flag 0
3361 * - bits 10..18: index 1
3362 * - bit 19: edge flag 1
3363 * - bits 20..28: index 2
3364 * - bit 29: edge flag 2
3365 * - bit 31: null primitive (skip)
3367 * For the first version, we will always build up all three indices
3368 * independent of the primitive type. The additional garbage data
3371 * TODO: culling depends on the primitive type, so can have some
3374 ac_nir_build_if(&if_state
, ctx
, is_gs_thread
);
3376 struct ngg_prim prim
= {};
3378 prim
.num_vertices
= num_vertices
;
3379 prim
.isnull
= ctx
->ac
.i1false
;
3380 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
3382 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
3383 tmp
= LLVMBuildLShr(builder
, ctx
->abi
.gs_invocation_id
,
3384 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
3385 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
3388 build_export_prim(ctx
, &prim
);
3390 ac_nir_build_endif(&if_state
);
3392 /* Export per-vertex data (positions and parameters). */
3393 ac_nir_build_if(&if_state
, ctx
, is_es_thread
);
3395 struct radv_vs_output_info
*outinfo
=
3396 ctx
->stage
== MESA_SHADER_TESS_EVAL
? &ctx
->shader_info
->tes
.outinfo
: &ctx
->shader_info
->vs
.outinfo
;
3398 /* Exporting the primitive ID is handled below. */
3399 /* TODO: use the new VS export path */
3400 handle_vs_outputs_post(ctx
, false,
3401 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
3404 if (ctx
->options
->key
.vs_common_out
.export_prim_id
) {
3405 unsigned param_count
= outinfo
->param_exports
;
3406 LLVMValueRef values
[4];
3408 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
3409 /* Wait for GS stores to finish. */
3410 ac_build_s_barrier(&ctx
->ac
);
3412 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
,
3413 get_thread_id_in_tg(ctx
));
3414 values
[0] = LLVMBuildLoad(builder
, tmp
, "");
3416 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
3417 values
[0] = ctx
->abi
.tes_patch_id
;
3420 values
[0] = ac_to_float(&ctx
->ac
, values
[0]);
3421 for (unsigned j
= 1; j
< 4; j
++)
3422 values
[j
] = ctx
->ac
.f32_0
;
3424 radv_export_param(ctx
, param_count
, values
, 0x1);
3426 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
3427 outinfo
->export_prim_id
= true;
3428 outinfo
->param_exports
= param_count
;
3431 ac_nir_build_endif(&if_state
);
3434 static void gfx10_ngg_gs_emit_prologue(struct radv_shader_context
*ctx
)
3436 /* Zero out the part of LDS scratch that is used to accumulate the
3437 * per-stream generated primitive count.
3439 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3440 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
3441 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
3442 LLVMBasicBlockRef merge_block
;
3445 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
->ac
.builder
));
3446 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
, fn
, "");
3447 merge_block
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
, fn
, "");
3449 cond
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
3450 LLVMBuildCondBr(ctx
->ac
.builder
, cond
, then_block
, merge_block
);
3451 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, then_block
);
3453 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
3454 LLVMBuildStore(builder
, ctx
->ac
.i32_0
, ptr
);
3456 LLVMBuildBr(ctx
->ac
.builder
, merge_block
);
3457 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, merge_block
);
3459 ac_build_s_barrier(&ctx
->ac
);
3462 static void gfx10_ngg_gs_emit_epilogue_1(struct radv_shader_context
*ctx
)
3464 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3465 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
3468 /* Zero out remaining (non-emitted) primitive flags.
3470 * Note: Alternatively, we could pass the relevant gs_next_vertex to
3471 * the emit threads via LDS. This is likely worse in the expected
3472 * typical case where each GS thread emits the full set of
3475 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3476 unsigned num_components
;
3479 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
3480 if (!num_components
)
3483 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
3485 ac_build_bgnloop(&ctx
->ac
, 5100);
3487 const LLVMValueRef vertexidx
=
3488 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
3489 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
3490 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
3491 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
3492 ac_build_break(&ctx
->ac
);
3493 ac_build_endif(&ctx
->ac
, 5101);
3495 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
3496 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
3498 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
3499 LLVMValueRef gep_idx
[3] = {
3500 ctx
->ac
.i32_0
, /* implied C-style array */
3501 ctx
->ac
.i32_1
, /* second entry of struct */
3502 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
3504 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3505 LLVMBuildStore(builder
, i8_0
, tmp
);
3507 ac_build_endloop(&ctx
->ac
, 5100);
3511 static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context
*ctx
)
3513 const unsigned verts_per_prim
= si_conv_gl_prim_to_vertices(ctx
->gs_output_prim
);
3514 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3515 LLVMValueRef tmp
, tmp2
;
3517 ac_build_s_barrier(&ctx
->ac
);
3519 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
3520 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
3522 /* TODO: streamout */
3526 /* Determine vertex liveness. */
3527 LLVMValueRef vertliveptr
= ac_build_alloca(&ctx
->ac
, ctx
->ac
.i1
, "vertexlive");
3529 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
3530 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
3532 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
3533 const LLVMValueRef primidx
=
3534 LLVMBuildAdd(builder
, tid
,
3535 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
3538 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
3539 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
3542 /* Load primitive liveness */
3543 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
3544 LLVMValueRef gep_idx
[3] = {
3545 ctx
->ac
.i32_0
, /* implicit C-style array */
3546 ctx
->ac
.i32_1
, /* second value of struct */
3547 ctx
->ac
.i32_0
, /* stream 0 */
3549 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3550 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3551 const LLVMValueRef primlive
=
3552 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
3554 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
3555 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
3556 LLVMBuildStore(builder
, tmp
, vertliveptr
);
3559 ac_build_endif(&ctx
->ac
, 5121 + i
);
3562 ac_build_endif(&ctx
->ac
, 5120);
3564 /* Inclusive scan addition across the current wave. */
3565 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
3566 struct ac_wg_scan vertlive_scan
= {};
3567 vertlive_scan
.op
= nir_op_iadd
;
3568 vertlive_scan
.enable_reduce
= true;
3569 vertlive_scan
.enable_exclusive
= true;
3570 vertlive_scan
.src
= vertlive
;
3571 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->ac
.i32_0
);
3572 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
3573 vertlive_scan
.numwaves
= get_tgsize(ctx
);
3574 vertlive_scan
.maxwaves
= 8;
3576 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
3578 /* Skip all exports (including index exports) when possible. At least on
3579 * early gfx10 revisions this is also to avoid hangs.
3581 LLVMValueRef have_exports
=
3582 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
3584 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
3586 /* Allocate export space. Send this message as early as possible, to
3587 * hide the latency of the SQ <-> SPI roundtrip.
3589 * Note: We could consider compacting primitives for export as well.
3590 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
3591 * prim data per clock and skips null primitives at no additional
3592 * cost. So compacting primitives can only be beneficial when
3593 * there are 4 or more contiguous null primitives in the export
3594 * (in the common case of single-dword prim exports).
3596 build_sendmsg_gs_alloc_req(ctx
, vertlive_scan
.result_reduce
, num_emit_threads
);
3598 /* Setup the reverse vertex compaction permutation. We re-use stream 1
3599 * of the primitive liveness flags, relying on the fact that each
3600 * threadgroup can have at most 256 threads. */
3601 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
3603 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
3604 LLVMValueRef gep_idx
[3] = {
3605 ctx
->ac
.i32_0
, /* implicit C-style array */
3606 ctx
->ac
.i32_1
, /* second value of struct */
3607 ctx
->ac
.i32_1
, /* stream 1 */
3609 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3610 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
3611 LLVMBuildStore(builder
, tmp2
, tmp
);
3613 ac_build_endif(&ctx
->ac
, 5130);
3615 ac_build_s_barrier(&ctx
->ac
);
3617 /* Export primitive data */
3618 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
3619 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
3621 struct ngg_prim prim
= {};
3622 prim
.num_vertices
= verts_per_prim
;
3624 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
3625 LLVMValueRef gep_idx
[3] = {
3626 ctx
->ac
.i32_0
, /* implicit C-style array */
3627 ctx
->ac
.i32_1
, /* second value of struct */
3628 ctx
->ac
.i32_0
, /* primflag */
3630 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3631 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3632 prim
.isnull
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
,
3633 LLVMConstInt(ctx
->ac
.i8
, 0, false), "");
3635 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
3636 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
3637 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
3638 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
3641 build_export_prim(ctx
, &prim
);
3643 ac_build_endif(&ctx
->ac
, 5140);
3645 /* Export position and parameter data */
3646 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
3647 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
3649 struct radv_vs_output_info
*outinfo
= &ctx
->shader_info
->vs
.outinfo
;
3650 bool export_view_index
= ctx
->options
->key
.has_multiview_view_index
;
3651 struct radv_shader_output_values
*outputs
;
3652 unsigned noutput
= 0;
3654 /* Allocate a temporary array for the output values. */
3655 unsigned num_outputs
= util_bitcount64(ctx
->output_mask
) + export_view_index
;
3656 outputs
= calloc(num_outputs
, sizeof(outputs
[0]));
3658 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
3659 sizeof(outinfo
->vs_output_param_offset
));
3660 outinfo
->pos_exports
= 0;
3662 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
3663 LLVMValueRef gep_idx
[3] = {
3664 ctx
->ac
.i32_0
, /* implicit C-style array */
3665 ctx
->ac
.i32_1
, /* second value of struct */
3666 ctx
->ac
.i32_1
, /* stream 1: source data index */
3668 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3669 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3670 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
3671 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
3673 if (ctx
->output_mask
& (1ull << VARYING_SLOT_PSIZ
)) {
3674 outinfo
->writes_pointsize
= true;
3677 if (ctx
->output_mask
& (1ull << VARYING_SLOT_LAYER
)) {
3678 outinfo
->writes_layer
= true;
3681 if (ctx
->output_mask
& (1ull << VARYING_SLOT_VIEWPORT
)) {
3682 outinfo
->writes_viewport_index
= true;
3685 unsigned out_idx
= 0;
3686 gep_idx
[1] = ctx
->ac
.i32_0
;
3687 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3688 if (!(ctx
->output_mask
& (1ull << i
)))
3691 outputs
[noutput
].slot_name
= i
;
3692 outputs
[noutput
].slot_index
= i
== VARYING_SLOT_CLIP_DIST1
;
3694 outputs
[noutput
].usage_mask
= ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
3695 int length
= util_last_bit(outputs
[noutput
].usage_mask
);
3697 for (unsigned j
= 0; j
< length
; j
++, out_idx
++) {
3698 gep_idx
[2] = LLVMConstInt(ctx
->ac
.i32
, out_idx
, false);
3699 tmp
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
3700 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3702 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
3703 if (ac_get_type_size(type
) == 2) {
3704 tmp
= ac_to_integer(&ctx
->ac
, tmp
);
3705 tmp
= LLVMBuildTrunc(ctx
->ac
.builder
, tmp
, ctx
->ac
.i16
, "");
3708 outputs
[noutput
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
3711 for (unsigned j
= length
; j
< 4; j
++)
3712 outputs
[noutput
].values
[j
] = LLVMGetUndef(ctx
->ac
.f32
);
3717 /* Export ViewIndex. */
3718 if (export_view_index
) {
3719 outinfo
->writes_layer
= true;
3721 outputs
[noutput
].slot_name
= VARYING_SLOT_LAYER
;
3722 outputs
[noutput
].slot_index
= 0;
3723 outputs
[noutput
].usage_mask
= 0x1;
3724 outputs
[noutput
].values
[0] = ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
);
3725 for (unsigned j
= 1; j
< 4; j
++)
3726 outputs
[noutput
].values
[j
] = ctx
->ac
.f32_0
;
3730 radv_llvm_export_vs(ctx
, outputs
, noutput
, outinfo
,
3731 ctx
->options
->key
.vs_common_out
.export_clip_dists
);
3734 ac_build_endif(&ctx
->ac
, 5145);
3737 static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context
*ctx
,
3739 LLVMValueRef
*addrs
)
3741 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3743 const LLVMValueRef vertexidx
=
3744 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
3746 /* If this thread has already emitted the declared maximum number of
3747 * vertices, skip the write: excessive vertex emissions are not
3748 * supposed to have any effect.
3750 const LLVMValueRef can_emit
=
3751 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
3752 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
3753 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
3755 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
3756 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
3757 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
3759 const LLVMValueRef vertexptr
=
3760 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
3761 unsigned out_idx
= 0;
3762 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3763 unsigned output_usage_mask
=
3764 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
3765 uint8_t output_stream
=
3766 ctx
->shader_info
->info
.gs
.output_streams
[i
];
3767 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
3768 int length
= util_last_bit(output_usage_mask
);
3770 if (!(ctx
->output_mask
& (1ull << i
)) ||
3771 output_stream
!= stream
)
3774 for (unsigned j
= 0; j
< length
; j
++, out_idx
++) {
3775 if (!(output_usage_mask
& (1 << j
)))
3778 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
3780 LLVMValueRef gep_idx
[3] = {
3781 ctx
->ac
.i32_0
, /* implied C-style array */
3782 ctx
->ac
.i32_0
, /* first entry of struct */
3783 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
3785 LLVMValueRef ptr
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
3787 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
3788 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
3790 LLVMBuildStore(builder
, out_val
, ptr
);
3793 assert(out_idx
* 4 <= ctx
->gsvs_vertex_size
);
3795 /* Determine and store whether this vertex completed a primitive. */
3796 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
3798 tmp
= LLVMConstInt(ctx
->ac
.i32
, si_conv_gl_prim_to_vertices(ctx
->gs_output_prim
) - 1, false);
3799 const LLVMValueRef iscompleteprim
=
3800 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
3802 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
3803 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
3805 LLVMValueRef gep_idx
[3] = {
3806 ctx
->ac
.i32_0
, /* implied C-style array */
3807 ctx
->ac
.i32_1
, /* second struct entry */
3808 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
3810 const LLVMValueRef primflagptr
=
3811 LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
3813 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
3814 LLVMBuildStore(builder
, tmp
, primflagptr
);
3816 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
3817 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
3818 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
3822 write_tess_factors(struct radv_shader_context
*ctx
)
3824 unsigned stride
, outer_comps
, inner_comps
;
3825 struct ac_build_if_state if_ctx
, inner_if_ctx
;
3826 LLVMValueRef invocation_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
3827 LLVMValueRef rel_patch_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
3828 unsigned tess_inner_index
= 0, tess_outer_index
;
3829 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
3830 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
3832 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3834 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
3854 ac_nir_build_if(&if_ctx
, ctx
,
3855 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3856 invocation_id
, ctx
->ac
.i32_0
, ""));
3858 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
3861 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3862 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3863 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
3866 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3867 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3868 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
3870 for (i
= 0; i
< 4; i
++) {
3871 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3872 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3876 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
3877 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
3878 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3880 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
3882 for (i
= 0; i
< outer_comps
; i
++) {
3884 ac_lds_load(&ctx
->ac
, lds_outer
);
3885 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3888 for (i
= 0; i
< inner_comps
; i
++) {
3889 inner
[i
] = out
[outer_comps
+i
] =
3890 ac_lds_load(&ctx
->ac
, lds_inner
);
3891 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
3896 /* Convert the outputs to vectors for stores. */
3897 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
3901 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
3904 buffer
= ctx
->hs_ring_tess_factor
;
3905 tf_base
= ctx
->tess_factor_offset
;
3906 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
3907 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
3908 unsigned tf_offset
= 0;
3910 if (ctx
->options
->chip_class
<= GFX8
) {
3911 ac_nir_build_if(&inner_if_ctx
, ctx
,
3912 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3913 rel_patch_id
, ctx
->ac
.i32_0
, ""));
3915 /* Store the dynamic HS control word. */
3916 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
3917 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
3918 1, ctx
->ac
.i32_0
, tf_base
,
3922 ac_nir_build_endif(&inner_if_ctx
);
3925 /* Store the tessellation factors. */
3926 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
3927 MIN2(stride
, 4), byteoffset
, tf_base
,
3928 tf_offset
, ac_glc
, false);
3930 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
3931 stride
- 4, byteoffset
, tf_base
,
3932 16 + tf_offset
, ac_glc
, false);
3934 //store to offchip for TES to read - only if TES reads them
3935 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
3936 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
3937 LLVMValueRef tf_inner_offset
;
3938 unsigned param_outer
, param_inner
;
3940 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3941 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3942 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
3944 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
3945 util_next_power_of_two(outer_comps
));
3947 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
3948 outer_comps
, tf_outer_offset
,
3949 ctx
->oc_lds
, 0, ac_glc
, false);
3951 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3952 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3953 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
3955 inner_vec
= inner_comps
== 1 ? inner
[0] :
3956 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
3957 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
3958 inner_comps
, tf_inner_offset
,
3959 ctx
->oc_lds
, 0, ac_glc
, false);
3962 ac_nir_build_endif(&if_ctx
);
3966 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
3968 write_tess_factors(ctx
);
3972 si_export_mrt_color(struct radv_shader_context
*ctx
,
3973 LLVMValueRef
*color
, unsigned index
,
3974 struct ac_export_args
*args
)
3977 si_llvm_init_export_args(ctx
, color
, 0xf,
3978 V_008DFC_SQ_EXP_MRT
+ index
, args
);
3979 if (!args
->enabled_channels
)
3980 return false; /* unnecessary NULL export */
3986 radv_export_mrt_z(struct radv_shader_context
*ctx
,
3987 LLVMValueRef depth
, LLVMValueRef stencil
,
3988 LLVMValueRef samplemask
)
3990 struct ac_export_args args
;
3992 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
3994 ac_build_export(&ctx
->ac
, &args
);
3998 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
4001 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
4002 struct ac_export_args color_args
[8];
4004 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4005 LLVMValueRef values
[4];
4007 if (!(ctx
->output_mask
& (1ull << i
)))
4010 if (i
< FRAG_RESULT_DATA0
)
4013 for (unsigned j
= 0; j
< 4; j
++)
4014 values
[j
] = ac_to_float(&ctx
->ac
,
4015 radv_load_output(ctx
, i
, j
));
4017 bool ret
= si_export_mrt_color(ctx
, values
,
4018 i
- FRAG_RESULT_DATA0
,
4019 &color_args
[index
]);
4024 /* Process depth, stencil, samplemask. */
4025 if (ctx
->shader_info
->info
.ps
.writes_z
) {
4026 depth
= ac_to_float(&ctx
->ac
,
4027 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
4029 if (ctx
->shader_info
->info
.ps
.writes_stencil
) {
4030 stencil
= ac_to_float(&ctx
->ac
,
4031 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
4033 if (ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
4034 samplemask
= ac_to_float(&ctx
->ac
,
4035 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
4038 /* Set the DONE bit on last non-null color export only if Z isn't
4042 !ctx
->shader_info
->info
.ps
.writes_z
&&
4043 !ctx
->shader_info
->info
.ps
.writes_stencil
&&
4044 !ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
4045 unsigned last
= index
- 1;
4047 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
4048 color_args
[last
].done
= 1; /* DONE bit */
4051 /* Export PS outputs. */
4052 for (unsigned i
= 0; i
< index
; i
++)
4053 ac_build_export(&ctx
->ac
, &color_args
[i
]);
4055 if (depth
|| stencil
|| samplemask
)
4056 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
4058 ac_build_export_null(&ctx
->ac
);
4062 emit_gs_epilogue(struct radv_shader_context
*ctx
)
4064 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
4065 gfx10_ngg_gs_emit_epilogue_1(ctx
);
4069 if (ctx
->ac
.chip_class
>= GFX10
)
4070 LLVMBuildFence(ctx
->ac
.builder
, LLVMAtomicOrderingRelease
, false, "");
4072 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
4076 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
4077 LLVMValueRef
*addrs
)
4079 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4081 switch (ctx
->stage
) {
4082 case MESA_SHADER_VERTEX
:
4083 if (ctx
->options
->key
.vs_common_out
.as_ls
)
4084 handle_ls_outputs_post(ctx
);
4085 else if (ctx
->options
->key
.vs_common_out
.as_es
)
4086 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
4087 else if (ctx
->options
->key
.vs_common_out
.as_ngg
)
4088 break; /* handled outside of the shader body */
4090 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs_common_out
.export_prim_id
,
4091 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
4092 &ctx
->shader_info
->vs
.outinfo
);
4094 case MESA_SHADER_FRAGMENT
:
4095 handle_fs_outputs_post(ctx
);
4097 case MESA_SHADER_GEOMETRY
:
4098 emit_gs_epilogue(ctx
);
4100 case MESA_SHADER_TESS_CTRL
:
4101 handle_tcs_outputs_post(ctx
);
4103 case MESA_SHADER_TESS_EVAL
:
4104 if (ctx
->options
->key
.vs_common_out
.as_es
)
4105 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
4106 else if (ctx
->options
->key
.vs_common_out
.as_ngg
)
4107 break; /* handled outside of the shader body */
4109 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs_common_out
.export_prim_id
,
4110 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
4111 &ctx
->shader_info
->tes
.outinfo
);
4118 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
,
4119 LLVMPassManagerRef passmgr
,
4120 const struct radv_nir_compiler_options
*options
)
4122 LLVMRunPassManager(passmgr
, ctx
->ac
.module
);
4123 LLVMDisposeBuilder(ctx
->ac
.builder
);
4125 ac_llvm_context_dispose(&ctx
->ac
);
4129 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
4131 struct radv_vs_output_info
*outinfo
;
4133 switch (ctx
->stage
) {
4134 case MESA_SHADER_FRAGMENT
:
4135 case MESA_SHADER_COMPUTE
:
4136 case MESA_SHADER_TESS_CTRL
:
4137 case MESA_SHADER_GEOMETRY
:
4139 case MESA_SHADER_VERTEX
:
4140 if (ctx
->options
->key
.vs_common_out
.as_ls
||
4141 ctx
->options
->key
.vs_common_out
.as_es
)
4143 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
4145 case MESA_SHADER_TESS_EVAL
:
4146 if (ctx
->options
->key
.vs_common_out
.as_es
)
4148 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
4151 unreachable("Unhandled shader type");
4154 ac_optimize_vs_outputs(&ctx
->ac
,
4156 outinfo
->vs_output_param_offset
,
4158 &outinfo
->param_exports
);
4162 ac_setup_rings(struct radv_shader_context
*ctx
)
4164 if (ctx
->options
->chip_class
<= GFX8
&&
4165 (ctx
->stage
== MESA_SHADER_GEOMETRY
||
4166 ctx
->options
->key
.vs_common_out
.as_es
|| ctx
->options
->key
.vs_common_out
.as_es
)) {
4167 unsigned ring
= ctx
->stage
== MESA_SHADER_GEOMETRY
? RING_ESGS_GS
4169 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, ring
, false);
4171 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
,
4176 if (ctx
->is_gs_copy_shader
) {
4178 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
4179 LLVMConstInt(ctx
->ac
.i32
,
4180 RING_GSVS_VS
, false));
4183 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
4184 /* The conceptual layout of the GSVS ring is
4185 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4186 * but the real memory layout is swizzled across
4188 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4190 * Override the buffer descriptor accordingly.
4192 LLVMTypeRef v2i64
= LLVMVectorType(ctx
->ac
.i64
, 2);
4193 uint64_t stream_offset
= 0;
4194 unsigned num_records
= ctx
->ac
.wave_size
;
4195 LLVMValueRef base_ring
;
4198 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
4199 LLVMConstInt(ctx
->ac
.i32
,
4200 RING_GSVS_GS
, false));
4202 for (unsigned stream
= 0; stream
< 4; stream
++) {
4203 unsigned num_components
, stride
;
4204 LLVMValueRef ring
, tmp
;
4207 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
4209 if (!num_components
)
4212 stride
= 4 * num_components
* ctx
->gs_max_out_vertices
;
4214 /* Limit on the stride field for <= GFX7. */
4215 assert(stride
< (1 << 14));
4217 ring
= LLVMBuildBitCast(ctx
->ac
.builder
,
4218 base_ring
, v2i64
, "");
4219 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
,
4220 ring
, ctx
->ac
.i32_0
, "");
4221 tmp
= LLVMBuildAdd(ctx
->ac
.builder
, tmp
,
4222 LLVMConstInt(ctx
->ac
.i64
,
4223 stream_offset
, 0), "");
4224 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
,
4225 ring
, tmp
, ctx
->ac
.i32_0
, "");
4227 stream_offset
+= stride
* ctx
->ac
.wave_size
;
4229 ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ring
,
4232 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ring
,
4234 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
,
4235 LLVMConstInt(ctx
->ac
.i32
,
4236 S_008F04_STRIDE(stride
), false), "");
4237 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
, tmp
,
4240 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
,
4241 LLVMConstInt(ctx
->ac
.i32
,
4242 num_records
, false),
4243 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
4245 ctx
->gsvs_ring
[stream
] = ring
;
4249 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
4250 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
4251 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
4252 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
4257 radv_nir_get_max_workgroup_size(enum chip_class chip_class
,
4258 gl_shader_stage stage
,
4259 const struct nir_shader
*nir
)
4261 const unsigned backup_sizes
[] = {chip_class
>= GFX9
? 128 : 64, 1, 1};
4262 return radv_get_max_workgroup_size(chip_class
, stage
, nir
? nir
->info
.cs
.local_size
: backup_sizes
);
4265 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
4266 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
4268 LLVMValueRef count
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
4269 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
4271 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
4272 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
4273 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
4276 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
4278 for(int i
= 5; i
>= 0; --i
) {
4279 ctx
->gs_vtx_offset
[i
] = ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
4283 ctx
->gs_wave_id
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 16, 8);
4286 /* Ensure that the esgs ring is declared.
4288 * We declare it with 64KB alignment as a hint that the
4289 * pointer value will always be 0.
4291 static void declare_esgs_ring(struct radv_shader_context
*ctx
)
4296 assert(!LLVMGetNamedGlobal(ctx
->ac
.module
, "esgs_ring"));
4298 ctx
->esgs_ring
= LLVMAddGlobalInAddressSpace(
4299 ctx
->ac
.module
, LLVMArrayType(ctx
->ac
.i32
, 0),
4302 LLVMSetLinkage(ctx
->esgs_ring
, LLVMExternalLinkage
);
4303 LLVMSetAlignment(ctx
->esgs_ring
, 64 * 1024);
4307 LLVMModuleRef
ac_translate_nir_to_llvm(struct ac_llvm_compiler
*ac_llvm
,
4308 struct nir_shader
*const *shaders
,
4310 struct radv_shader_variant_info
*shader_info
,
4311 const struct radv_nir_compiler_options
*options
)
4313 struct radv_shader_context ctx
= {0};
4315 ctx
.options
= options
;
4316 ctx
.shader_info
= shader_info
;
4318 enum ac_float_mode float_mode
=
4319 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
4320 AC_FLOAT_MODE_DEFAULT
;
4322 ac_llvm_context_init(&ctx
.ac
, ac_llvm
, options
->chip_class
,
4323 options
->family
, float_mode
, options
->wave_size
);
4324 ctx
.context
= ctx
.ac
.context
;
4326 radv_nir_shader_info_init(&shader_info
->info
);
4328 for(int i
= 0; i
< shader_count
; ++i
)
4329 radv_nir_shader_info_pass(shaders
[i
], options
, &shader_info
->info
);
4331 for (i
= 0; i
< MAX_SETS
; i
++)
4332 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
4333 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
4334 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
4336 ctx
.max_workgroup_size
= 0;
4337 for (int i
= 0; i
< shader_count
; ++i
) {
4338 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
4339 radv_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
4340 shaders
[i
]->info
.stage
,
4344 if (ctx
.ac
.chip_class
>= GFX10
) {
4345 if (is_pre_gs_stage(shaders
[0]->info
.stage
) &&
4346 options
->key
.vs_common_out
.as_ngg
) {
4347 ctx
.max_workgroup_size
= 128;
4351 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
4352 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
4354 ctx
.abi
.inputs
= &ctx
.inputs
[0];
4355 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
4356 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
4357 ctx
.abi
.load_ubo
= radv_load_ubo
;
4358 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
4359 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
4360 ctx
.abi
.load_resource
= radv_load_resource
;
4361 ctx
.abi
.clamp_shadow_reference
= false;
4362 ctx
.abi
.gfx9_stride_size_workaround
= ctx
.ac
.chip_class
== GFX9
&& HAVE_LLVM
< 0x800;
4363 ctx
.abi
.robust_buffer_access
= options
->robust_buffer_access
;
4365 /* Because the new raw/struct atomic intrinsics are buggy with LLVM 8,
4366 * we fallback to the old intrinsics for atomic buffer image operations
4367 * and thus we need to apply the indexing workaround...
4369 ctx
.abi
.gfx9_stride_size_workaround_for_atomic
= ctx
.ac
.chip_class
== GFX9
&& HAVE_LLVM
< 0x900;
4371 bool is_ngg
= is_pre_gs_stage(shaders
[0]->info
.stage
) && ctx
.options
->key
.vs_common_out
.as_ngg
;
4372 if (shader_count
>= 2 || is_ngg
)
4373 ac_init_exec_full_mask(&ctx
.ac
);
4375 if ((ctx
.ac
.family
== CHIP_VEGA10
||
4376 ctx
.ac
.family
== CHIP_RAVEN
) &&
4377 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
4378 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
4380 for(int i
= 0; i
< shader_count
; ++i
) {
4381 ctx
.stage
= shaders
[i
]->info
.stage
;
4382 ctx
.output_mask
= 0;
4384 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4385 for (int i
= 0; i
< 4; i
++) {
4386 ctx
.gs_next_vertex
[i
] =
4387 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
4389 if (ctx
.options
->key
.vs_common_out
.as_ngg
) {
4390 for (unsigned i
= 0; i
< 4; ++i
) {
4391 ctx
.gs_curprim_verts
[i
] =
4392 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
4393 ctx
.gs_generated_prims
[i
] =
4394 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
4397 /* TODO: streamout */
4399 LLVMTypeRef ai32
= LLVMArrayType(ctx
.ac
.i32
, 8);
4400 ctx
.gs_ngg_scratch
=
4401 LLVMAddGlobalInAddressSpace(ctx
.ac
.module
,
4402 ai32
, "ngg_scratch", AC_ADDR_SPACE_LDS
);
4403 LLVMSetInitializer(ctx
.gs_ngg_scratch
, LLVMGetUndef(ai32
));
4404 LLVMSetAlignment(ctx
.gs_ngg_scratch
, 4);
4406 ctx
.gs_ngg_emit
= LLVMBuildIntToPtr(ctx
.ac
.builder
, ctx
.ac
.i32_0
,
4407 LLVMPointerType(LLVMArrayType(ctx
.ac
.i32
, 0), AC_ADDR_SPACE_LDS
),
4411 ctx
.gs_max_out_vertices
= shaders
[i
]->info
.gs
.vertices_out
;
4412 ctx
.gs_output_prim
= shaders
[i
]->info
.gs
.output_primitive
;
4413 ctx
.abi
.load_inputs
= load_gs_input
;
4414 ctx
.abi
.emit_primitive
= visit_end_primitive
;
4415 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
4416 ctx
.tcs_outputs_read
= shaders
[i
]->info
.outputs_read
;
4417 ctx
.tcs_patch_outputs_read
= shaders
[i
]->info
.patch_outputs_read
;
4418 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
4419 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
4420 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
4421 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
4422 if (shader_count
== 1)
4423 ctx
.tcs_num_inputs
= ctx
.options
->key
.tcs
.num_inputs
;
4425 ctx
.tcs_num_inputs
= util_last_bit64(shader_info
->info
.vs
.ls_outputs_written
);
4426 ctx
.tcs_num_patches
= get_tcs_num_patches(&ctx
);
4427 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4428 ctx
.tes_primitive_mode
= shaders
[i
]->info
.tess
.primitive_mode
;
4429 ctx
.abi
.load_tess_varyings
= load_tes_input
;
4430 ctx
.abi
.load_tess_coord
= load_tess_coord
;
4431 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
4432 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
4433 ctx
.tcs_num_patches
= ctx
.options
->key
.tes
.num_patches
;
4434 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
4435 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
4436 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
4437 shader_info
->fs
.can_discard
= shaders
[i
]->info
.fs
.uses_discard
;
4438 ctx
.abi
.lookup_interp_param
= lookup_interp_param
;
4439 ctx
.abi
.load_sample_position
= load_sample_position
;
4440 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
4441 ctx
.abi
.emit_kill
= radv_emit_kill
;
4444 if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
&&
4445 ctx
.options
->key
.vs_common_out
.as_ngg
&&
4446 ctx
.options
->key
.vs_common_out
.export_prim_id
) {
4447 declare_esgs_ring(&ctx
);
4450 bool nested_barrier
= false;
4453 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
&&
4454 ctx
.options
->key
.vs_common_out
.as_ngg
) {
4455 gfx10_ngg_gs_emit_prologue(&ctx
);
4456 nested_barrier
= false;
4458 nested_barrier
= true;
4462 if (nested_barrier
) {
4463 /* Execute a barrier before the second shader in
4466 * Execute the barrier inside the conditional block,
4467 * so that empty waves can jump directly to s_endpgm,
4468 * which will also signal the barrier.
4470 * This is possible in gfx9, because an empty wave
4471 * for the second shader does not participate in
4472 * the epilogue. With NGG, empty waves may still
4473 * be required to export data (e.g. GS output vertices),
4474 * so we cannot let them exit early.
4476 * If the shader is TCS and the TCS epilog is present
4477 * and contains a barrier, it will wait there and then
4480 ac_emit_barrier(&ctx
.ac
, ctx
.stage
);
4483 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
4484 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
4486 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4487 unsigned addclip
= shaders
[i
]->info
.clip_distance_array_size
+
4488 shaders
[i
]->info
.cull_distance_array_size
> 4;
4489 ctx
.gsvs_vertex_size
= (util_bitcount64(ctx
.output_mask
) + addclip
) * 16;
4490 ctx
.max_gsvs_emit_size
= ctx
.gsvs_vertex_size
*
4491 shaders
[i
]->info
.gs
.vertices_out
;
4494 ac_setup_rings(&ctx
);
4496 LLVMBasicBlockRef merge_block
;
4497 if (shader_count
>= 2 || is_ngg
) {
4498 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
4499 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
4500 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
4502 LLVMValueRef count
= ac_unpack_param(&ctx
.ac
, ctx
.merged_wave_info
, 8 * i
, 8);
4503 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
4504 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
4505 thread_id
, count
, "");
4506 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
4508 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
4511 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
4512 prepare_interp_optimize(&ctx
, shaders
[i
]);
4513 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
4514 handle_vs_inputs(&ctx
, shaders
[i
]);
4515 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
4516 prepare_gs_input_vgprs(&ctx
);
4518 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
4520 if (shader_count
>= 2 || is_ngg
) {
4521 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
4522 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
4525 /* This needs to be outside the if wrapping the shader body, as sometimes
4526 * the HW generates waves with 0 es/vs threads. */
4527 if (is_pre_gs_stage(shaders
[i
]->info
.stage
) &&
4528 ctx
.options
->key
.vs_common_out
.as_ngg
&&
4529 i
== shader_count
- 1) {
4530 handle_ngg_outputs_post(&ctx
);
4531 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
&&
4532 ctx
.options
->key
.vs_common_out
.as_ngg
) {
4533 gfx10_ngg_gs_emit_epilogue_2(&ctx
);
4536 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4537 shader_info
->gs
.gsvs_vertex_size
= ctx
.gsvs_vertex_size
;
4538 shader_info
->gs
.max_gsvs_emit_size
= ctx
.max_gsvs_emit_size
;
4539 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
4540 shader_info
->tcs
.num_patches
= ctx
.tcs_num_patches
;
4541 shader_info
->tcs
.lds_size
= calculate_tess_lds_size(&ctx
);
4545 LLVMBuildRetVoid(ctx
.ac
.builder
);
4547 if (options
->dump_preoptir
) {
4548 fprintf(stderr
, "%s LLVM IR:\n\n",
4549 radv_get_shader_name(shader_info
,
4550 shaders
[shader_count
- 1]->info
.stage
));
4551 ac_dump_module(ctx
.ac
.module
);
4552 fprintf(stderr
, "\n");
4555 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
4557 if (shader_count
== 1)
4558 ac_nir_eliminate_const_vs_outputs(&ctx
);
4560 if (options
->dump_shader
) {
4561 ctx
.shader_info
->private_mem_vgprs
=
4562 ac_count_scratch_private_memory(ctx
.main_function
);
4565 return ctx
.ac
.module
;
4568 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
4570 unsigned *retval
= (unsigned *)context
;
4571 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
4572 char *description
= LLVMGetDiagInfoDescription(di
);
4574 if (severity
== LLVMDSError
) {
4576 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
4580 LLVMDisposeMessage(description
);
4583 static unsigned radv_llvm_compile(LLVMModuleRef M
,
4584 char **pelf_buffer
, size_t *pelf_size
,
4585 struct ac_llvm_compiler
*ac_llvm
)
4587 unsigned retval
= 0;
4588 LLVMContextRef llvm_ctx
;
4590 /* Setup Diagnostic Handler*/
4591 llvm_ctx
= LLVMGetModuleContext(M
);
4593 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
4597 if (!radv_compile_to_elf(ac_llvm
, M
, pelf_buffer
, pelf_size
))
4602 static void ac_compile_llvm_module(struct ac_llvm_compiler
*ac_llvm
,
4603 LLVMModuleRef llvm_module
,
4604 struct radv_shader_binary
**rbinary
,
4605 struct radv_shader_variant_info
*shader_info
,
4606 gl_shader_stage stage
,
4608 const struct radv_nir_compiler_options
*options
)
4610 char *elf_buffer
= NULL
;
4611 size_t elf_size
= 0;
4612 char *llvm_ir_string
= NULL
;
4614 if (options
->dump_shader
) {
4615 fprintf(stderr
, "%s LLVM IR:\n\n", name
);
4616 ac_dump_module(llvm_module
);
4617 fprintf(stderr
, "\n");
4620 if (options
->record_llvm_ir
) {
4621 char *llvm_ir
= LLVMPrintModuleToString(llvm_module
);
4622 llvm_ir_string
= strdup(llvm_ir
);
4623 LLVMDisposeMessage(llvm_ir
);
4626 int v
= radv_llvm_compile(llvm_module
, &elf_buffer
, &elf_size
, ac_llvm
);
4628 fprintf(stderr
, "compile failed\n");
4631 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
4632 LLVMDisposeModule(llvm_module
);
4633 LLVMContextDispose(ctx
);
4635 size_t llvm_ir_size
= llvm_ir_string
? strlen(llvm_ir_string
) : 0;
4636 size_t alloc_size
= sizeof(struct radv_shader_binary_rtld
) + elf_size
+ llvm_ir_size
+ 1;
4637 struct radv_shader_binary_rtld
*rbin
= calloc(1, alloc_size
);
4638 memcpy(rbin
->data
, elf_buffer
, elf_size
);
4640 memcpy(rbin
->data
+ elf_size
, llvm_ir_string
, llvm_ir_size
+ 1);
4642 rbin
->base
.type
= RADV_BINARY_TYPE_RTLD
;
4643 rbin
->base
.stage
= stage
;
4644 rbin
->base
.total_size
= alloc_size
;
4645 rbin
->elf_size
= elf_size
;
4646 rbin
->llvm_ir_size
= llvm_ir_size
;
4647 *rbinary
= &rbin
->base
;
4649 free(llvm_ir_string
);
4654 ac_fill_shader_info(struct radv_shader_variant_info
*shader_info
, struct nir_shader
*nir
, const struct radv_nir_compiler_options
*options
)
4656 switch (nir
->info
.stage
) {
4657 case MESA_SHADER_COMPUTE
:
4658 for (int i
= 0; i
< 3; ++i
)
4659 shader_info
->cs
.block_size
[i
] = nir
->info
.cs
.local_size
[i
];
4661 case MESA_SHADER_FRAGMENT
:
4662 shader_info
->fs
.early_fragment_test
= nir
->info
.fs
.early_fragment_tests
;
4663 shader_info
->fs
.post_depth_coverage
= nir
->info
.fs
.post_depth_coverage
;
4665 case MESA_SHADER_GEOMETRY
:
4666 shader_info
->gs
.vertices_in
= nir
->info
.gs
.vertices_in
;
4667 shader_info
->gs
.vertices_out
= nir
->info
.gs
.vertices_out
;
4668 shader_info
->gs
.output_prim
= nir
->info
.gs
.output_primitive
;
4669 shader_info
->gs
.invocations
= nir
->info
.gs
.invocations
;
4671 case MESA_SHADER_TESS_EVAL
:
4672 shader_info
->tes
.primitive_mode
= nir
->info
.tess
.primitive_mode
;
4673 shader_info
->tes
.spacing
= nir
->info
.tess
.spacing
;
4674 shader_info
->tes
.ccw
= nir
->info
.tess
.ccw
;
4675 shader_info
->tes
.point_mode
= nir
->info
.tess
.point_mode
;
4676 shader_info
->tes
.as_es
= options
->key
.vs_common_out
.as_es
;
4677 shader_info
->tes
.export_prim_id
= options
->key
.vs_common_out
.export_prim_id
;
4678 shader_info
->is_ngg
= options
->key
.vs_common_out
.as_ngg
;
4680 case MESA_SHADER_TESS_CTRL
:
4681 shader_info
->tcs
.tcs_vertices_out
= nir
->info
.tess
.tcs_vertices_out
;
4683 case MESA_SHADER_VERTEX
:
4684 shader_info
->vs
.as_es
= options
->key
.vs_common_out
.as_es
;
4685 shader_info
->vs
.as_ls
= options
->key
.vs_common_out
.as_ls
;
4686 shader_info
->vs
.export_prim_id
= options
->key
.vs_common_out
.export_prim_id
;
4687 shader_info
->is_ngg
= options
->key
.vs_common_out
.as_ngg
;
4695 radv_compile_nir_shader(struct ac_llvm_compiler
*ac_llvm
,
4696 struct radv_shader_binary
**rbinary
,
4697 struct radv_shader_variant_info
*shader_info
,
4698 struct nir_shader
*const *nir
,
4700 const struct radv_nir_compiler_options
*options
)
4703 LLVMModuleRef llvm_module
;
4705 llvm_module
= ac_translate_nir_to_llvm(ac_llvm
, nir
, nir_count
, shader_info
,
4708 ac_compile_llvm_module(ac_llvm
, llvm_module
, rbinary
, shader_info
,
4709 nir
[nir_count
- 1]->info
.stage
,
4710 radv_get_shader_name(shader_info
,
4711 nir
[nir_count
- 1]->info
.stage
),
4714 for (int i
= 0; i
< nir_count
; ++i
)
4715 ac_fill_shader_info(shader_info
, nir
[i
], options
);
4717 /* Determine the ES type (VS or TES) for the GS on GFX9. */
4718 if (options
->chip_class
>= GFX9
) {
4719 if (nir_count
== 2 &&
4720 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4721 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
4724 shader_info
->info
.wave_size
= options
->wave_size
;
4728 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
4730 LLVMValueRef vtx_offset
=
4731 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
4732 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
4733 LLVMValueRef stream_id
;
4735 /* Fetch the vertex stream ID. */
4736 if (ctx
->shader_info
->info
.so
.num_outputs
) {
4738 ac_unpack_param(&ctx
->ac
, ctx
->streamout_config
, 24, 2);
4740 stream_id
= ctx
->ac
.i32_0
;
4743 LLVMBasicBlockRef end_bb
;
4744 LLVMValueRef switch_inst
;
4746 end_bb
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
,
4747 ctx
->main_function
, "end");
4748 switch_inst
= LLVMBuildSwitch(ctx
->ac
.builder
, stream_id
, end_bb
, 4);
4750 for (unsigned stream
= 0; stream
< 4; stream
++) {
4751 unsigned num_components
=
4752 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
4753 LLVMBasicBlockRef bb
;
4756 if (!num_components
)
4759 if (stream
> 0 && !ctx
->shader_info
->info
.so
.num_outputs
)
4762 bb
= LLVMInsertBasicBlockInContext(ctx
->ac
.context
, end_bb
, "out");
4763 LLVMAddCase(switch_inst
, LLVMConstInt(ctx
->ac
.i32
, stream
, 0), bb
);
4764 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, bb
);
4767 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4768 unsigned output_usage_mask
=
4769 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
4770 unsigned output_stream
=
4771 ctx
->shader_info
->info
.gs
.output_streams
[i
];
4772 int length
= util_last_bit(output_usage_mask
);
4774 if (!(ctx
->output_mask
& (1ull << i
)) ||
4775 output_stream
!= stream
)
4778 for (unsigned j
= 0; j
< length
; j
++) {
4779 LLVMValueRef value
, soffset
;
4781 if (!(output_usage_mask
& (1 << j
)))
4784 soffset
= LLVMConstInt(ctx
->ac
.i32
,
4786 ctx
->gs_max_out_vertices
* 16 * 4, false);
4790 value
= ac_build_buffer_load(&ctx
->ac
,
4793 vtx_offset
, soffset
,
4794 0, ac_glc
| ac_slc
, true, false);
4796 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4797 if (ac_get_type_size(type
) == 2) {
4798 value
= LLVMBuildBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
4799 value
= LLVMBuildTrunc(ctx
->ac
.builder
, value
, ctx
->ac
.i16
, "");
4802 LLVMBuildStore(ctx
->ac
.builder
,
4803 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4807 if (ctx
->shader_info
->info
.so
.num_outputs
)
4808 radv_emit_streamout(ctx
, stream
);
4811 handle_vs_outputs_post(ctx
, false, true,
4812 &ctx
->shader_info
->vs
.outinfo
);
4815 LLVMBuildBr(ctx
->ac
.builder
, end_bb
);
4818 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, end_bb
);
4822 radv_compile_gs_copy_shader(struct ac_llvm_compiler
*ac_llvm
,
4823 struct nir_shader
*geom_shader
,
4824 struct radv_shader_binary
**rbinary
,
4825 struct radv_shader_variant_info
*shader_info
,
4826 const struct radv_nir_compiler_options
*options
)
4828 struct radv_shader_context ctx
= {0};
4829 ctx
.options
= options
;
4830 ctx
.shader_info
= shader_info
;
4832 enum ac_float_mode float_mode
=
4833 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
4834 AC_FLOAT_MODE_DEFAULT
;
4836 ac_llvm_context_init(&ctx
.ac
, ac_llvm
, options
->chip_class
,
4837 options
->family
, float_mode
, 64);
4838 ctx
.context
= ctx
.ac
.context
;
4840 ctx
.is_gs_copy_shader
= true;
4841 ctx
.stage
= MESA_SHADER_VERTEX
;
4843 radv_nir_shader_info_pass(geom_shader
, options
, &shader_info
->info
);
4845 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
4847 ctx
.gs_max_out_vertices
= geom_shader
->info
.gs
.vertices_out
;
4848 ac_setup_rings(&ctx
);
4850 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
4851 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
4852 ac_handle_shader_output_decl(&ctx
.ac
, &ctx
.abi
, geom_shader
,
4853 variable
, MESA_SHADER_VERTEX
);
4856 ac_gs_copy_shader_emit(&ctx
);
4858 LLVMBuildRetVoid(ctx
.ac
.builder
);
4860 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
4862 ac_compile_llvm_module(ac_llvm
, ctx
.ac
.module
, rbinary
, shader_info
,
4863 MESA_SHADER_VERTEX
, "GS Copy Shader", options
);
4864 (*rbinary
)->is_gs_copy_shader
= true;