2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #include <llvm-c/Transforms/Utils.h>
39 #include "ac_binary.h"
40 #include "ac_llvm_util.h"
41 #include "ac_llvm_build.h"
42 #include "ac_shader_abi.h"
43 #include "ac_shader_util.h"
44 #include "ac_exp_param.h"
46 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
48 struct radv_shader_context
{
49 struct ac_llvm_context ac
;
50 const struct radv_nir_compiler_options
*options
;
51 struct radv_shader_info
*shader_info
;
52 const struct nir_shader
*shader
;
53 struct ac_shader_abi abi
;
55 unsigned max_workgroup_size
;
56 LLVMContextRef context
;
57 LLVMValueRef main_function
;
59 LLVMValueRef descriptor_sets
[MAX_SETS
];
60 LLVMValueRef ring_offsets
;
62 LLVMValueRef vertex_buffers
;
63 LLVMValueRef rel_auto_id
;
64 LLVMValueRef vs_prim_id
;
65 LLVMValueRef es2gs_offset
;
68 LLVMValueRef merged_wave_info
;
69 LLVMValueRef tess_factor_offset
;
70 LLVMValueRef tes_rel_patch_id
;
76 * - bits 0..10: ordered_wave_id
77 * - bits 12..20: number of vertices in group
78 * - bits 22..30: number of primitives in group
80 LLVMValueRef gs_tg_info
;
81 LLVMValueRef gs2vs_offset
;
82 LLVMValueRef gs_wave_id
;
83 LLVMValueRef gs_vtx_offset
[6];
85 LLVMValueRef esgs_ring
;
86 LLVMValueRef gsvs_ring
[4];
87 LLVMValueRef hs_ring_tess_offchip
;
88 LLVMValueRef hs_ring_tess_factor
;
91 LLVMValueRef streamout_buffers
;
92 LLVMValueRef streamout_write_idx
;
93 LLVMValueRef streamout_config
;
94 LLVMValueRef streamout_offset
[4];
96 gl_shader_stage stage
;
98 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
100 uint64_t output_mask
;
102 bool is_gs_copy_shader
;
103 LLVMValueRef gs_next_vertex
[4];
104 LLVMValueRef gs_curprim_verts
[4];
105 LLVMValueRef gs_generated_prims
[4];
106 LLVMValueRef gs_ngg_emit
;
107 LLVMValueRef gs_ngg_scratch
;
109 uint32_t tcs_num_inputs
;
110 uint32_t tcs_num_patches
;
112 LLVMValueRef vertexptr
; /* GFX10 only */
115 struct radv_shader_output_values
{
116 LLVMValueRef values
[4];
122 enum radeon_llvm_calling_convention
{
123 RADEON_LLVM_AMDGPU_VS
= 87,
124 RADEON_LLVM_AMDGPU_GS
= 88,
125 RADEON_LLVM_AMDGPU_PS
= 89,
126 RADEON_LLVM_AMDGPU_CS
= 90,
127 RADEON_LLVM_AMDGPU_HS
= 93,
130 static inline struct radv_shader_context
*
131 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
133 struct radv_shader_context
*ctx
= NULL
;
134 return container_of(abi
, ctx
, abi
);
137 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
139 switch (ctx
->stage
) {
140 case MESA_SHADER_TESS_CTRL
:
141 return ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
142 case MESA_SHADER_TESS_EVAL
:
143 return ctx
->tes_rel_patch_id
;
146 unreachable("Illegal stage");
151 get_tcs_num_patches(struct radv_shader_context
*ctx
)
153 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
154 unsigned num_tcs_output_cp
= ctx
->shader
->info
.tess
.tcs_vertices_out
;
155 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
156 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
157 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
158 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.patch_outputs_written
);
159 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
160 uint32_t pervertex_output_patch_size
= ctx
->shader
->info
.tess
.tcs_vertices_out
* output_vertex_size
;
161 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
162 unsigned num_patches
;
163 unsigned hardware_lds_size
;
165 /* Ensure that we only need one wave per SIMD so we don't need to check
166 * resource usage. Also ensures that the number of tcs in and out
167 * vertices per threadgroup are at most 256.
169 num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
170 /* Make sure that the data fits in LDS. This assumes the shaders only
171 * use LDS for the inputs and outputs.
173 hardware_lds_size
= 32768;
175 /* Looks like STONEY hangs if we use more than 32 KiB LDS in a single
176 * threadgroup, even though there is more than 32 KiB LDS.
178 * Test: dEQP-VK.tessellation.shader_input_output.barrier
180 if (ctx
->options
->chip_class
>= GFX7
&& ctx
->options
->family
!= CHIP_STONEY
)
181 hardware_lds_size
= 65536;
183 num_patches
= MIN2(num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
184 /* Make sure the output data fits in the offchip buffer */
185 num_patches
= MIN2(num_patches
, (ctx
->options
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
186 /* Not necessary for correctness, but improves performance. The
187 * specific value is taken from the proprietary driver.
189 num_patches
= MIN2(num_patches
, 40);
191 /* GFX6 bug workaround - limit LS-HS threadgroups to only one wave. */
192 if (ctx
->options
->chip_class
== GFX6
) {
193 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
194 num_patches
= MIN2(num_patches
, one_wave
);
200 calculate_tess_lds_size(struct radv_shader_context
*ctx
)
202 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
203 unsigned num_tcs_output_cp
;
204 unsigned num_tcs_outputs
, num_tcs_patch_outputs
;
205 unsigned input_vertex_size
, output_vertex_size
;
206 unsigned input_patch_size
, output_patch_size
;
207 unsigned pervertex_output_patch_size
;
208 unsigned output_patch0_offset
;
209 unsigned num_patches
;
212 num_tcs_output_cp
= ctx
->shader
->info
.tess
.tcs_vertices_out
;
213 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
214 num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.patch_outputs_written
);
216 input_vertex_size
= ctx
->tcs_num_inputs
* 16;
217 output_vertex_size
= num_tcs_outputs
* 16;
219 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
221 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
222 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
224 num_patches
= ctx
->tcs_num_patches
;
225 output_patch0_offset
= input_patch_size
* num_patches
;
227 lds_size
= output_patch0_offset
+ output_patch_size
* num_patches
;
231 /* Tessellation shaders pass outputs to the next shader using LDS.
233 * LS outputs = TCS inputs
234 * TCS outputs = TES inputs
237 * - TCS inputs for patch 0
238 * - TCS inputs for patch 1
239 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
241 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
242 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
243 * - TCS outputs for patch 1
244 * - Per-patch TCS outputs for patch 1
245 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
246 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
249 * All three shaders VS(LS), TCS, TES share the same LDS space.
252 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
254 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
255 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
256 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
258 input_patch_size
/= 4;
259 return LLVMConstInt(ctx
->ac
.i32
, input_patch_size
, false);
263 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
265 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
266 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.patch_outputs_written
);
267 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
268 uint32_t pervertex_output_patch_size
= ctx
->shader
->info
.tess
.tcs_vertices_out
* output_vertex_size
;
269 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
270 output_patch_size
/= 4;
271 return LLVMConstInt(ctx
->ac
.i32
, output_patch_size
, false);
275 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
277 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
278 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
279 output_vertex_size
/= 4;
280 return LLVMConstInt(ctx
->ac
.i32
, output_vertex_size
, false);
284 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
286 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
287 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
288 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
289 uint32_t output_patch0_offset
= input_patch_size
;
290 unsigned num_patches
= ctx
->tcs_num_patches
;
292 output_patch0_offset
*= num_patches
;
293 output_patch0_offset
/= 4;
294 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
298 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
300 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
301 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
302 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
303 uint32_t output_patch0_offset
= input_patch_size
;
305 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
306 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
307 uint32_t pervertex_output_patch_size
= ctx
->shader
->info
.tess
.tcs_vertices_out
* output_vertex_size
;
308 unsigned num_patches
= ctx
->tcs_num_patches
;
310 output_patch0_offset
*= num_patches
;
311 output_patch0_offset
+= pervertex_output_patch_size
;
312 output_patch0_offset
/= 4;
313 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
317 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
319 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
320 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
322 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
326 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
328 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
329 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
330 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
332 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
337 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
339 LLVMValueRef patch0_patch_data_offset
=
340 get_tcs_out_patch0_patch_data_offset(ctx
);
341 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
342 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
344 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
345 patch0_patch_data_offset
);
350 LLVMTypeRef types
[MAX_ARGS
];
351 LLVMValueRef
*assign
[MAX_ARGS
];
354 uint8_t num_sgprs_used
;
355 uint8_t num_vgprs_used
;
358 enum ac_arg_regfile
{
364 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
365 LLVMValueRef
*param_ptr
)
367 assert(info
->count
< MAX_ARGS
);
369 info
->assign
[info
->count
] = param_ptr
;
370 info
->types
[info
->count
] = type
;
373 if (regfile
== ARG_SGPR
) {
374 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
377 assert(regfile
== ARG_VGPR
);
378 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
382 static void assign_arguments(LLVMValueRef main_function
,
383 struct arg_info
*info
)
386 for (i
= 0; i
< info
->count
; i
++) {
388 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
393 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
394 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
395 unsigned num_return_elems
,
396 struct arg_info
*args
,
397 unsigned max_workgroup_size
,
398 const struct radv_nir_compiler_options
*options
)
400 LLVMTypeRef main_function_type
, ret_type
;
401 LLVMBasicBlockRef main_function_body
;
403 if (num_return_elems
)
404 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
405 num_return_elems
, true);
407 ret_type
= LLVMVoidTypeInContext(ctx
);
409 /* Setup the function */
411 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
412 LLVMValueRef main_function
=
413 LLVMAddFunction(module
, "main", main_function_type
);
415 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
416 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
418 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
419 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
420 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
422 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
424 if (LLVMGetTypeKind(LLVMTypeOf(P
)) == LLVMPointerTypeKind
) {
425 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
426 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
430 if (options
->address32_hi
) {
431 ac_llvm_add_target_dep_function_attr(main_function
,
432 "amdgpu-32bit-address-high-bits",
433 options
->address32_hi
);
436 ac_llvm_set_workgroup_size(main_function
, max_workgroup_size
);
438 return main_function
;
443 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
,
446 ud_info
->sgpr_idx
= *sgpr_idx
;
447 ud_info
->num_sgprs
= num_sgprs
;
448 *sgpr_idx
+= num_sgprs
;
452 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
455 struct radv_userdata_info
*ud_info
=
456 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
459 set_loc(ud_info
, sgpr_idx
, num_sgprs
);
463 set_loc_shader_ptr(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
465 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
467 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
471 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
473 struct radv_userdata_locations
*locs
=
474 &ctx
->shader_info
->user_sgprs_locs
;
475 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
478 set_loc(ud_info
, sgpr_idx
, 1);
480 locs
->descriptor_sets_enabled
|= 1 << idx
;
483 struct user_sgpr_info
{
484 bool need_ring_offsets
;
485 bool indirect_all_descriptor_sets
;
486 uint8_t remaining_sgprs
;
489 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
490 gl_shader_stage stage
)
493 case MESA_SHADER_VERTEX
:
494 if (ctx
->shader_info
->needs_multiview_view_index
||
495 (!ctx
->options
->key
.vs_common_out
.as_es
&& !ctx
->options
->key
.vs_common_out
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
498 case MESA_SHADER_TESS_EVAL
:
499 if (ctx
->shader_info
->needs_multiview_view_index
|| (!ctx
->options
->key
.vs_common_out
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
502 case MESA_SHADER_GEOMETRY
:
503 case MESA_SHADER_TESS_CTRL
:
504 if (ctx
->shader_info
->needs_multiview_view_index
)
514 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
518 if (ctx
->shader_info
->vs
.has_vertex_buffers
)
520 count
+= ctx
->shader_info
->vs
.needs_draw_id
? 3 : 2;
525 static void allocate_inline_push_consts(struct radv_shader_context
*ctx
,
526 struct user_sgpr_info
*user_sgpr_info
)
528 uint8_t remaining_sgprs
= user_sgpr_info
->remaining_sgprs
;
530 /* Only supported if shaders use push constants. */
531 if (ctx
->shader_info
->min_push_constant_used
== UINT8_MAX
)
534 /* Only supported if shaders don't have indirect push constants. */
535 if (ctx
->shader_info
->has_indirect_push_constants
)
538 /* Only supported for 32-bit push constants. */
539 if (!ctx
->shader_info
->has_only_32bit_push_constants
)
542 uint8_t num_push_consts
=
543 (ctx
->shader_info
->max_push_constant_used
-
544 ctx
->shader_info
->min_push_constant_used
) / 4;
546 /* Check if the number of user SGPRs is large enough. */
547 if (num_push_consts
< remaining_sgprs
) {
548 ctx
->shader_info
->num_inline_push_consts
= num_push_consts
;
550 ctx
->shader_info
->num_inline_push_consts
= remaining_sgprs
;
553 /* Clamp to the maximum number of allowed inlined push constants. */
554 if (ctx
->shader_info
->num_inline_push_consts
> AC_MAX_INLINE_PUSH_CONSTS
)
555 ctx
->shader_info
->num_inline_push_consts
= AC_MAX_INLINE_PUSH_CONSTS
;
557 if (ctx
->shader_info
->num_inline_push_consts
== num_push_consts
&&
558 !ctx
->shader_info
->loads_dynamic_offsets
) {
559 /* Disable the default push constants path if all constants are
560 * inlined and if shaders don't use dynamic descriptors.
562 ctx
->shader_info
->loads_push_constants
= false;
565 ctx
->shader_info
->base_inline_push_consts
=
566 ctx
->shader_info
->min_push_constant_used
/ 4;
569 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
570 gl_shader_stage stage
,
571 bool has_previous_stage
,
572 gl_shader_stage previous_stage
,
573 bool needs_view_index
,
574 struct user_sgpr_info
*user_sgpr_info
)
576 uint8_t user_sgpr_count
= 0;
578 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
580 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
581 if (stage
== MESA_SHADER_GEOMETRY
||
582 stage
== MESA_SHADER_VERTEX
||
583 stage
== MESA_SHADER_TESS_CTRL
||
584 stage
== MESA_SHADER_TESS_EVAL
||
585 ctx
->is_gs_copy_shader
)
586 user_sgpr_info
->need_ring_offsets
= true;
588 if (stage
== MESA_SHADER_FRAGMENT
&&
589 ctx
->shader_info
->ps
.needs_sample_positions
)
590 user_sgpr_info
->need_ring_offsets
= true;
592 /* 2 user sgprs will nearly always be allocated for scratch/rings */
593 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
594 user_sgpr_count
+= 2;
598 case MESA_SHADER_COMPUTE
:
599 if (ctx
->shader_info
->cs
.uses_grid_size
)
600 user_sgpr_count
+= 3;
602 case MESA_SHADER_FRAGMENT
:
603 user_sgpr_count
+= ctx
->shader_info
->ps
.needs_sample_positions
;
605 case MESA_SHADER_VERTEX
:
606 if (!ctx
->is_gs_copy_shader
)
607 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
609 case MESA_SHADER_TESS_CTRL
:
610 if (has_previous_stage
) {
611 if (previous_stage
== MESA_SHADER_VERTEX
)
612 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
615 case MESA_SHADER_TESS_EVAL
:
617 case MESA_SHADER_GEOMETRY
:
618 if (has_previous_stage
) {
619 if (previous_stage
== MESA_SHADER_VERTEX
) {
620 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
628 if (needs_view_index
)
631 if (ctx
->shader_info
->loads_push_constants
)
634 if (ctx
->shader_info
->so
.num_outputs
)
637 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& stage
!= MESA_SHADER_COMPUTE
? 32 : 16;
638 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
639 uint32_t num_desc_set
=
640 util_bitcount(ctx
->shader_info
->desc_set_used_mask
);
642 if (remaining_sgprs
< num_desc_set
) {
643 user_sgpr_info
->indirect_all_descriptor_sets
= true;
644 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- 1;
646 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- num_desc_set
;
649 allocate_inline_push_consts(ctx
, user_sgpr_info
);
653 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
654 const struct user_sgpr_info
*user_sgpr_info
,
655 struct arg_info
*args
,
656 LLVMValueRef
*desc_sets
)
658 LLVMTypeRef type
= ac_array_in_const32_addr_space(ctx
->ac
.i8
);
660 /* 1 for each descriptor set */
661 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
662 uint32_t mask
= ctx
->shader_info
->desc_set_used_mask
;
665 int i
= u_bit_scan(&mask
);
667 add_arg(args
, ARG_SGPR
, type
, &ctx
->descriptor_sets
[i
]);
670 add_arg(args
, ARG_SGPR
, ac_array_in_const32_addr_space(type
),
674 if (ctx
->shader_info
->loads_push_constants
) {
675 /* 1 for push constants and dynamic descriptors */
676 add_arg(args
, ARG_SGPR
, type
, &ctx
->abi
.push_constants
);
679 for (unsigned i
= 0; i
< ctx
->shader_info
->num_inline_push_consts
; i
++) {
680 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
,
681 &ctx
->abi
.inline_push_consts
[i
]);
683 ctx
->abi
.num_inline_push_consts
= ctx
->shader_info
->num_inline_push_consts
;
684 ctx
->abi
.base_inline_push_consts
= ctx
->shader_info
->base_inline_push_consts
;
686 if (ctx
->shader_info
->so
.num_outputs
) {
687 add_arg(args
, ARG_SGPR
,
688 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
689 &ctx
->streamout_buffers
);
694 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
695 gl_shader_stage stage
,
696 bool has_previous_stage
,
697 gl_shader_stage previous_stage
,
698 struct arg_info
*args
)
700 if (!ctx
->is_gs_copy_shader
&&
701 (stage
== MESA_SHADER_VERTEX
||
702 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
703 if (ctx
->shader_info
->vs
.has_vertex_buffers
) {
704 add_arg(args
, ARG_SGPR
,
705 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
706 &ctx
->vertex_buffers
);
708 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
709 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
710 if (ctx
->shader_info
->vs
.needs_draw_id
) {
711 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
717 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
719 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
720 if (!ctx
->is_gs_copy_shader
) {
721 if (ctx
->options
->key
.vs_common_out
.as_ls
) {
722 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
723 if (ctx
->ac
.chip_class
>= GFX10
) {
724 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
725 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
727 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
728 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
731 if (ctx
->ac
.chip_class
>= GFX10
) {
732 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
733 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
734 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
735 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
737 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
738 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
739 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
742 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
743 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
744 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
751 declare_streamout_sgprs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
752 struct arg_info
*args
)
756 if (ctx
->options
->use_ngg_streamout
)
759 /* Streamout SGPRs. */
760 if (ctx
->shader_info
->so
.num_outputs
) {
761 assert(stage
== MESA_SHADER_VERTEX
||
762 stage
== MESA_SHADER_TESS_EVAL
);
764 if (stage
!= MESA_SHADER_TESS_EVAL
) {
765 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_config
);
767 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
768 args
->types
[args
->count
- 1] = ctx
->ac
.i32
;
771 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_write_idx
);
774 /* A streamout buffer offset is loaded if the stride is non-zero. */
775 for (i
= 0; i
< 4; i
++) {
776 if (!ctx
->shader_info
->so
.strides
[i
])
779 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_offset
[i
]);
784 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
786 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
787 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
788 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
789 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
793 set_global_input_locs(struct radv_shader_context
*ctx
,
794 const struct user_sgpr_info
*user_sgpr_info
,
795 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
797 uint32_t mask
= ctx
->shader_info
->desc_set_used_mask
;
799 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
801 int i
= u_bit_scan(&mask
);
803 set_loc_desc(ctx
, i
, user_sgpr_idx
);
806 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
810 int i
= u_bit_scan(&mask
);
812 ctx
->descriptor_sets
[i
] =
813 ac_build_load_to_sgpr(&ctx
->ac
, desc_sets
,
814 LLVMConstInt(ctx
->ac
.i32
, i
, false));
818 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
821 if (ctx
->shader_info
->loads_push_constants
) {
822 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
);
825 if (ctx
->shader_info
->num_inline_push_consts
) {
826 set_loc_shader(ctx
, AC_UD_INLINE_PUSH_CONSTANTS
, user_sgpr_idx
,
827 ctx
->shader_info
->num_inline_push_consts
);
830 if (ctx
->streamout_buffers
) {
831 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
,
837 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
838 gl_shader_stage stage
, bool has_previous_stage
,
839 gl_shader_stage previous_stage
,
840 uint8_t *user_sgpr_idx
)
842 if (!ctx
->is_gs_copy_shader
&&
843 (stage
== MESA_SHADER_VERTEX
||
844 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
845 if (ctx
->shader_info
->vs
.has_vertex_buffers
) {
846 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
851 if (ctx
->shader_info
->vs
.needs_draw_id
)
854 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
855 user_sgpr_idx
, vs_num
);
859 static void set_llvm_calling_convention(LLVMValueRef func
,
860 gl_shader_stage stage
)
862 enum radeon_llvm_calling_convention calling_conv
;
865 case MESA_SHADER_VERTEX
:
866 case MESA_SHADER_TESS_EVAL
:
867 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
869 case MESA_SHADER_GEOMETRY
:
870 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
872 case MESA_SHADER_TESS_CTRL
:
873 calling_conv
= RADEON_LLVM_AMDGPU_HS
;
875 case MESA_SHADER_FRAGMENT
:
876 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
878 case MESA_SHADER_COMPUTE
:
879 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
882 unreachable("Unhandle shader type");
885 LLVMSetFunctionCallConv(func
, calling_conv
);
888 /* Returns whether the stage is a stage that can be directly before the GS */
889 static bool is_pre_gs_stage(gl_shader_stage stage
)
891 return stage
== MESA_SHADER_VERTEX
|| stage
== MESA_SHADER_TESS_EVAL
;
894 static void create_function(struct radv_shader_context
*ctx
,
895 gl_shader_stage stage
,
896 bool has_previous_stage
,
897 gl_shader_stage previous_stage
)
899 uint8_t user_sgpr_idx
;
900 struct user_sgpr_info user_sgpr_info
;
901 struct arg_info args
= {};
902 LLVMValueRef desc_sets
;
903 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
905 if (ctx
->ac
.chip_class
>= GFX10
) {
906 if (is_pre_gs_stage(stage
) && ctx
->options
->key
.vs_common_out
.as_ngg
) {
907 /* On GFX10, VS is merged into GS for NGG. */
908 previous_stage
= stage
;
909 stage
= MESA_SHADER_GEOMETRY
;
910 has_previous_stage
= true;
914 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
915 previous_stage
, needs_view_index
, &user_sgpr_info
);
917 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
918 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
923 case MESA_SHADER_COMPUTE
:
924 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
927 if (ctx
->shader_info
->cs
.uses_grid_size
) {
928 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
929 &ctx
->abi
.num_work_groups
);
932 for (int i
= 0; i
< 3; i
++) {
933 ctx
->abi
.workgroup_ids
[i
] = NULL
;
934 if (ctx
->shader_info
->cs
.uses_block_id
[i
]) {
935 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
936 &ctx
->abi
.workgroup_ids
[i
]);
940 if (ctx
->shader_info
->cs
.uses_local_invocation_idx
)
941 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
942 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
943 &ctx
->abi
.local_invocation_ids
);
945 case MESA_SHADER_VERTEX
:
946 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
949 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
950 previous_stage
, &args
);
952 if (needs_view_index
)
953 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
954 &ctx
->abi
.view_index
);
955 if (ctx
->options
->key
.vs_common_out
.as_es
) {
956 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
958 } else if (ctx
->options
->key
.vs_common_out
.as_ls
) {
959 /* no extra parameters */
961 declare_streamout_sgprs(ctx
, stage
, &args
);
964 declare_vs_input_vgprs(ctx
, &args
);
966 case MESA_SHADER_TESS_CTRL
:
967 if (has_previous_stage
) {
968 // First 6 system regs
969 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
970 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
971 &ctx
->merged_wave_info
);
972 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
973 &ctx
->tess_factor_offset
);
975 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
976 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
977 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
979 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
982 declare_vs_specific_input_sgprs(ctx
, stage
,
984 previous_stage
, &args
);
986 if (needs_view_index
)
987 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
988 &ctx
->abi
.view_index
);
990 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
991 &ctx
->abi
.tcs_patch_id
);
992 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
993 &ctx
->abi
.tcs_rel_ids
);
995 declare_vs_input_vgprs(ctx
, &args
);
997 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1000 if (needs_view_index
)
1001 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1002 &ctx
->abi
.view_index
);
1004 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1005 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1006 &ctx
->tess_factor_offset
);
1007 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1008 &ctx
->abi
.tcs_patch_id
);
1009 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1010 &ctx
->abi
.tcs_rel_ids
);
1013 case MESA_SHADER_TESS_EVAL
:
1014 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1017 if (needs_view_index
)
1018 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1019 &ctx
->abi
.view_index
);
1021 if (ctx
->options
->key
.vs_common_out
.as_es
) {
1022 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1023 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1024 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1025 &ctx
->es2gs_offset
);
1027 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1028 declare_streamout_sgprs(ctx
, stage
, &args
);
1029 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1031 declare_tes_input_vgprs(ctx
, &args
);
1033 case MESA_SHADER_GEOMETRY
:
1034 if (has_previous_stage
) {
1035 // First 6 system regs
1036 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1037 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1040 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1041 &ctx
->gs2vs_offset
);
1044 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1045 &ctx
->merged_wave_info
);
1046 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1048 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1049 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1050 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1052 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1055 if (previous_stage
!= MESA_SHADER_TESS_EVAL
) {
1056 declare_vs_specific_input_sgprs(ctx
, stage
,
1062 if (needs_view_index
)
1063 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1064 &ctx
->abi
.view_index
);
1066 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1067 &ctx
->gs_vtx_offset
[0]);
1068 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1069 &ctx
->gs_vtx_offset
[2]);
1070 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1071 &ctx
->abi
.gs_prim_id
);
1072 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1073 &ctx
->abi
.gs_invocation_id
);
1074 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1075 &ctx
->gs_vtx_offset
[4]);
1077 if (previous_stage
== MESA_SHADER_VERTEX
) {
1078 declare_vs_input_vgprs(ctx
, &args
);
1080 declare_tes_input_vgprs(ctx
, &args
);
1083 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1086 if (needs_view_index
)
1087 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1088 &ctx
->abi
.view_index
);
1090 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
1091 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
1092 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1093 &ctx
->gs_vtx_offset
[0]);
1094 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1095 &ctx
->gs_vtx_offset
[1]);
1096 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1097 &ctx
->abi
.gs_prim_id
);
1098 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1099 &ctx
->gs_vtx_offset
[2]);
1100 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1101 &ctx
->gs_vtx_offset
[3]);
1102 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1103 &ctx
->gs_vtx_offset
[4]);
1104 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1105 &ctx
->gs_vtx_offset
[5]);
1106 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1107 &ctx
->abi
.gs_invocation_id
);
1110 case MESA_SHADER_FRAGMENT
:
1111 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1114 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1115 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.persp_sample
);
1116 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.persp_center
);
1117 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.persp_centroid
);
1118 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1119 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.linear_sample
);
1120 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.linear_center
);
1121 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.linear_centroid
);
1122 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1123 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1124 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1125 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1126 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1127 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1128 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1129 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1130 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1133 unreachable("Shader stage not implemented");
1136 ctx
->main_function
= create_llvm_function(
1137 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1138 ctx
->max_workgroup_size
, ctx
->options
);
1139 set_llvm_calling_convention(ctx
->main_function
, stage
);
1142 ctx
->shader_info
->num_input_vgprs
= 0;
1143 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1145 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1147 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1148 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1150 assign_arguments(ctx
->main_function
, &args
);
1154 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1155 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1157 if (ctx
->options
->supports_spill
) {
1158 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1159 LLVMPointerType(ctx
->ac
.i8
, AC_ADDR_SPACE_CONST
),
1160 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1161 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1162 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1166 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1167 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1168 if (has_previous_stage
)
1171 set_global_input_locs(ctx
, &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1174 case MESA_SHADER_COMPUTE
:
1175 if (ctx
->shader_info
->cs
.uses_grid_size
) {
1176 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1180 case MESA_SHADER_VERTEX
:
1181 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1182 previous_stage
, &user_sgpr_idx
);
1183 if (ctx
->abi
.view_index
)
1184 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1186 case MESA_SHADER_TESS_CTRL
:
1187 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1188 previous_stage
, &user_sgpr_idx
);
1189 if (ctx
->abi
.view_index
)
1190 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1192 case MESA_SHADER_TESS_EVAL
:
1193 if (ctx
->abi
.view_index
)
1194 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1196 case MESA_SHADER_GEOMETRY
:
1197 if (has_previous_stage
) {
1198 if (previous_stage
== MESA_SHADER_VERTEX
)
1199 set_vs_specific_input_locs(ctx
, stage
,
1204 if (ctx
->abi
.view_index
)
1205 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1207 case MESA_SHADER_FRAGMENT
:
1210 unreachable("Shader stage not implemented");
1213 if (stage
== MESA_SHADER_TESS_CTRL
||
1214 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs_common_out
.as_ls
) ||
1215 /* GFX9 has the ESGS ring buffer in LDS. */
1216 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1217 ac_declare_lds_as_pointer(&ctx
->ac
);
1220 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1225 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
1226 unsigned desc_set
, unsigned binding
)
1228 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1229 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
1230 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
1231 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
1232 unsigned base_offset
= layout
->binding
[binding
].offset
;
1233 LLVMValueRef offset
, stride
;
1235 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1236 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1237 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
1238 layout
->binding
[binding
].dynamic_offset_offset
;
1239 desc_ptr
= ctx
->abi
.push_constants
;
1240 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
1241 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1243 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
1245 offset
= LLVMConstInt(ctx
->ac
.i32
, base_offset
, false);
1247 if (layout
->binding
[binding
].type
!= VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1248 offset
= ac_build_imad(&ctx
->ac
, index
, stride
, offset
);
1251 desc_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, desc_ptr
, &offset
, 1, "");
1252 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
1253 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1255 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1256 uint32_t desc_type
= S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1257 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1258 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1259 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
);
1261 if (ctx
->ac
.chip_class
>= GFX10
) {
1262 desc_type
|= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT
) |
1263 S_008F0C_OOB_SELECT(3) |
1264 S_008F0C_RESOURCE_LEVEL(1);
1266 desc_type
|= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
1267 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
1270 LLVMValueRef desc_components
[4] = {
1271 LLVMBuildPtrToInt(ctx
->ac
.builder
, desc_ptr
, ctx
->ac
.intptr
, ""),
1272 LLVMConstInt(ctx
->ac
.i32
, S_008F04_BASE_ADDRESS_HI(ctx
->options
->address32_hi
), false),
1273 /* High limit to support variable sizes. */
1274 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
1275 LLVMConstInt(ctx
->ac
.i32
, desc_type
, false),
1278 return ac_build_gather_values(&ctx
->ac
, desc_components
, 4);
1285 /* The offchip buffer layout for TCS->TES is
1287 * - attribute 0 of patch 0 vertex 0
1288 * - attribute 0 of patch 0 vertex 1
1289 * - attribute 0 of patch 0 vertex 2
1291 * - attribute 0 of patch 1 vertex 0
1292 * - attribute 0 of patch 1 vertex 1
1294 * - attribute 1 of patch 0 vertex 0
1295 * - attribute 1 of patch 0 vertex 1
1297 * - per patch attribute 0 of patch 0
1298 * - per patch attribute 0 of patch 1
1301 * Note that every attribute has 4 components.
1303 static LLVMValueRef
get_non_vertex_index_offset(struct radv_shader_context
*ctx
)
1305 uint32_t num_patches
= ctx
->tcs_num_patches
;
1306 uint32_t num_tcs_outputs
;
1307 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
1308 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
1310 num_tcs_outputs
= ctx
->options
->key
.tes
.tcs_num_outputs
;
1312 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
1313 uint32_t pervertex_output_patch_size
= ctx
->shader
->info
.tess
.tcs_vertices_out
* output_vertex_size
;
1315 return LLVMConstInt(ctx
->ac
.i32
, pervertex_output_patch_size
* num_patches
, false);
1318 static LLVMValueRef
calc_param_stride(struct radv_shader_context
*ctx
,
1319 LLVMValueRef vertex_index
)
1321 LLVMValueRef param_stride
;
1323 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.tess
.tcs_vertices_out
* ctx
->tcs_num_patches
, false);
1325 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_num_patches
, false);
1326 return param_stride
;
1329 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
1330 LLVMValueRef vertex_index
,
1331 LLVMValueRef param_index
)
1333 LLVMValueRef base_addr
;
1334 LLVMValueRef param_stride
, constant16
;
1335 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
1336 LLVMValueRef vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.tess
.tcs_vertices_out
, false);
1337 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1338 param_stride
= calc_param_stride(ctx
, vertex_index
);
1340 base_addr
= ac_build_imad(&ctx
->ac
, rel_patch_id
,
1341 vertices_per_patch
, vertex_index
);
1343 base_addr
= rel_patch_id
;
1346 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1347 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
1348 param_stride
, ""), "");
1350 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
1352 if (!vertex_index
) {
1353 LLVMValueRef patch_data_offset
= get_non_vertex_index_offset(ctx
);
1355 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1356 patch_data_offset
, "");
1361 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
1363 unsigned const_index
,
1365 LLVMValueRef vertex_index
,
1366 LLVMValueRef indir_index
)
1368 LLVMValueRef param_index
;
1371 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
1374 if (const_index
&& !is_compact
)
1375 param
+= const_index
;
1376 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
1378 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
1382 get_dw_address(struct radv_shader_context
*ctx
,
1383 LLVMValueRef dw_addr
,
1385 unsigned const_index
,
1386 bool compact_const_index
,
1387 LLVMValueRef vertex_index
,
1388 LLVMValueRef stride
,
1389 LLVMValueRef indir_index
)
1394 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1395 LLVMBuildMul(ctx
->ac
.builder
,
1401 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1402 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
1403 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
1404 else if (const_index
&& !compact_const_index
)
1405 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1406 LLVMConstInt(ctx
->ac
.i32
, const_index
* 4, false), "");
1408 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1409 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
1411 if (const_index
&& compact_const_index
)
1412 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1413 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
1418 load_tcs_varyings(struct ac_shader_abi
*abi
,
1420 LLVMValueRef vertex_index
,
1421 LLVMValueRef indir_index
,
1422 unsigned const_index
,
1424 unsigned driver_location
,
1426 unsigned num_components
,
1431 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1432 LLVMValueRef dw_addr
, stride
;
1433 LLVMValueRef value
[4], result
;
1434 unsigned param
= shader_io_get_unique_index(location
);
1437 uint32_t input_vertex_size
= (ctx
->tcs_num_inputs
* 16) / 4;
1438 stride
= LLVMConstInt(ctx
->ac
.i32
, input_vertex_size
, false);
1439 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
1442 stride
= get_tcs_out_vertex_stride(ctx
);
1443 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1445 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1450 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1453 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
1454 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1455 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1458 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1463 store_tcs_output(struct ac_shader_abi
*abi
,
1464 const nir_variable
*var
,
1465 LLVMValueRef vertex_index
,
1466 LLVMValueRef param_index
,
1467 unsigned const_index
,
1471 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1472 const unsigned location
= var
->data
.location
;
1473 unsigned component
= var
->data
.location_frac
;
1474 const bool is_patch
= var
->data
.patch
;
1475 const bool is_compact
= var
->data
.compact
;
1476 LLVMValueRef dw_addr
;
1477 LLVMValueRef stride
= NULL
;
1478 LLVMValueRef buf_addr
= NULL
;
1480 bool store_lds
= true;
1483 if (!(ctx
->shader
->info
.patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
1486 if (!(ctx
->shader
->info
.outputs_read
& (1ULL << location
)))
1490 param
= shader_io_get_unique_index(location
);
1491 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1492 const_index
+= component
;
1495 if (const_index
>= 4) {
1502 stride
= get_tcs_out_vertex_stride(ctx
);
1503 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1505 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1508 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1510 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
1511 vertex_index
, param_index
);
1513 bool is_tess_factor
= false;
1514 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
1515 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1516 is_tess_factor
= true;
1518 unsigned base
= is_compact
? const_index
: 0;
1519 for (unsigned chan
= 0; chan
< 8; chan
++) {
1520 if (!(writemask
& (1 << chan
)))
1522 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
1523 value
= ac_to_integer(&ctx
->ac
, value
);
1524 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
1526 if (store_lds
|| is_tess_factor
) {
1527 LLVMValueRef dw_addr_chan
=
1528 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1529 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
1530 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
1533 if (!is_tess_factor
&& writemask
!= 0xF)
1534 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
1535 buf_addr
, ctx
->oc_lds
,
1536 4 * (base
+ chan
), ac_glc
, false);
1539 if (writemask
== 0xF) {
1540 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
1541 buf_addr
, ctx
->oc_lds
,
1542 (base
* 4), ac_glc
, false);
1547 load_tes_input(struct ac_shader_abi
*abi
,
1549 LLVMValueRef vertex_index
,
1550 LLVMValueRef param_index
,
1551 unsigned const_index
,
1553 unsigned driver_location
,
1555 unsigned num_components
,
1560 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1561 LLVMValueRef buf_addr
;
1562 LLVMValueRef result
;
1563 unsigned param
= shader_io_get_unique_index(location
);
1565 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1566 const_index
+= component
;
1568 if (const_index
>= 4) {
1574 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
1575 is_compact
, vertex_index
, param_index
);
1577 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
1578 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
1580 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
1581 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, ac_glc
, true, false);
1582 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
1587 radv_emit_fetch_64bit(struct radv_shader_context
*ctx
,
1588 LLVMTypeRef type
, LLVMValueRef a
, LLVMValueRef b
)
1590 LLVMValueRef values
[2] = {
1591 ac_to_integer(&ctx
->ac
, a
),
1592 ac_to_integer(&ctx
->ac
, b
),
1594 LLVMValueRef result
= ac_build_gather_values(&ctx
->ac
, values
, 2);
1595 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, type
, "");
1599 load_gs_input(struct ac_shader_abi
*abi
,
1601 unsigned driver_location
,
1603 unsigned num_components
,
1604 unsigned vertex_index
,
1605 unsigned const_index
,
1608 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1609 LLVMValueRef vtx_offset
;
1610 unsigned param
, vtx_offset_param
;
1611 LLVMValueRef value
[4], result
;
1613 vtx_offset_param
= vertex_index
;
1614 assert(vtx_offset_param
< 6);
1615 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
1616 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1618 param
= shader_io_get_unique_index(location
);
1620 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
1621 if (ctx
->ac
.chip_class
>= GFX9
) {
1622 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
1623 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1624 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
1625 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1627 if (ac_get_type_size(type
) == 8) {
1628 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1629 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
+ 1, 0), "");
1630 LLVMValueRef tmp
= ac_lds_load(&ctx
->ac
, dw_addr
);
1632 value
[i
] = radv_emit_fetch_64bit(ctx
, type
, value
[i
], tmp
);
1635 LLVMValueRef soffset
=
1636 LLVMConstInt(ctx
->ac
.i32
,
1637 (param
* 4 + i
+ const_index
) * 256,
1640 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
1643 vtx_offset
, soffset
,
1644 0, ac_glc
, true, false);
1646 if (ac_get_type_size(type
) == 8) {
1647 soffset
= LLVMConstInt(ctx
->ac
.i32
,
1648 (param
* 4 + i
+ const_index
+ 1) * 256,
1652 ac_build_buffer_load(&ctx
->ac
,
1655 vtx_offset
, soffset
,
1656 0, ac_glc
, true, false);
1658 value
[i
] = radv_emit_fetch_64bit(ctx
, type
, value
[i
], tmp
);
1662 if (ac_get_type_size(type
) == 2) {
1663 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i32
, "");
1664 value
[i
] = LLVMBuildTrunc(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i16
, "");
1666 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], type
, "");
1668 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1669 result
= ac_to_integer(&ctx
->ac
, result
);
1674 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
1676 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1677 ac_build_kill_if_false(&ctx
->ac
, visible
);
1681 radv_get_sample_pos_offset(uint32_t num_samples
)
1683 uint32_t sample_pos_offset
= 0;
1685 switch (num_samples
) {
1687 sample_pos_offset
= 1;
1690 sample_pos_offset
= 3;
1693 sample_pos_offset
= 7;
1698 return sample_pos_offset
;
1701 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
1702 LLVMValueRef sample_id
)
1704 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1706 LLVMValueRef result
;
1707 LLVMValueRef index
= LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false);
1708 LLVMValueRef ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ring_offsets
, &index
, 1, "");
1710 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1711 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
1713 uint32_t sample_pos_offset
=
1714 radv_get_sample_pos_offset(ctx
->options
->key
.fs
.num_samples
);
1717 LLVMBuildAdd(ctx
->ac
.builder
, sample_id
,
1718 LLVMConstInt(ctx
->ac
.i32
, sample_pos_offset
, false), "");
1719 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
1725 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
1727 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1728 uint8_t log2_ps_iter_samples
;
1730 if (ctx
->shader_info
->ps
.force_persample
) {
1731 log2_ps_iter_samples
=
1732 util_logbase2(ctx
->options
->key
.fs
.num_samples
);
1734 log2_ps_iter_samples
= ctx
->options
->key
.fs
.log2_ps_iter_samples
;
1737 /* The bit pattern matches that used by fixed function fragment
1739 static const uint16_t ps_iter_masks
[] = {
1740 0xffff, /* not used */
1746 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
1748 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
1750 LLVMValueRef result
, sample_id
;
1751 sample_id
= ac_unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
1752 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
1753 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
1758 static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context
*ctx
,
1760 LLVMValueRef
*addrs
);
1763 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
1765 LLVMValueRef gs_next_vertex
;
1766 LLVMValueRef can_emit
;
1767 unsigned offset
= 0;
1768 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1770 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1771 gfx10_ngg_gs_emit_vertex(ctx
, stream
, addrs
);
1775 /* Write vertex attribute values to GSVS ring */
1776 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
1777 ctx
->gs_next_vertex
[stream
],
1780 /* If this thread has already emitted the declared maximum number of
1781 * vertices, don't emit any more: excessive vertex emissions are not
1782 * supposed to have any effect.
1784 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
1785 LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.gs
.vertices_out
, false), "");
1787 bool use_kill
= !ctx
->shader_info
->gs
.writes_memory
;
1789 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
1791 ac_build_ifcc(&ctx
->ac
, can_emit
, 6505);
1793 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
1794 unsigned output_usage_mask
=
1795 ctx
->shader_info
->gs
.output_usage_mask
[i
];
1796 uint8_t output_stream
=
1797 ctx
->shader_info
->gs
.output_streams
[i
];
1798 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
1799 int length
= util_last_bit(output_usage_mask
);
1801 if (!(ctx
->output_mask
& (1ull << i
)) ||
1802 output_stream
!= stream
)
1805 for (unsigned j
= 0; j
< length
; j
++) {
1806 if (!(output_usage_mask
& (1 << j
)))
1809 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
1811 LLVMValueRef voffset
=
1812 LLVMConstInt(ctx
->ac
.i32
, offset
*
1813 ctx
->shader
->info
.gs
.vertices_out
, false);
1817 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
1818 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1820 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
1821 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
1823 ac_build_buffer_store_dword(&ctx
->ac
,
1824 ctx
->gsvs_ring
[stream
],
1826 voffset
, ctx
->gs2vs_offset
, 0,
1827 ac_glc
| ac_slc
, true);
1831 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
1833 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
[stream
]);
1835 ac_build_sendmsg(&ctx
->ac
,
1836 AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (stream
<< 8),
1840 ac_build_endif(&ctx
->ac
, 6505);
1844 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
1846 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1848 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1849 LLVMBuildStore(ctx
->ac
.builder
, ctx
->ac
.i32_0
, ctx
->gs_curprim_verts
[stream
]);
1853 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
1857 load_tess_coord(struct ac_shader_abi
*abi
)
1859 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1861 LLVMValueRef coord
[4] = {
1868 if (ctx
->shader
->info
.tess
.primitive_mode
== GL_TRIANGLES
)
1869 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
1870 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
1872 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
1876 load_patch_vertices_in(struct ac_shader_abi
*abi
)
1878 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1879 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
1883 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
1885 return abi
->base_vertex
;
1888 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
1889 LLVMValueRef buffer_ptr
, bool write
)
1891 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1892 LLVMValueRef result
;
1894 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1896 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1897 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1902 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
1904 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1905 LLVMValueRef result
;
1907 if (LLVMGetTypeKind(LLVMTypeOf(buffer_ptr
)) != LLVMPointerTypeKind
) {
1908 /* Do not load the descriptor for inlined uniform blocks. */
1912 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1914 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1915 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1920 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
1921 unsigned descriptor_set
,
1922 unsigned base_index
,
1923 unsigned constant_index
,
1925 enum ac_descriptor_type desc_type
,
1926 bool image
, bool write
,
1929 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1930 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
1931 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
1932 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
1933 unsigned offset
= binding
->offset
;
1934 unsigned stride
= binding
->size
;
1936 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1939 assert(base_index
< layout
->binding_count
);
1941 switch (desc_type
) {
1943 type
= ctx
->ac
.v8i32
;
1947 type
= ctx
->ac
.v8i32
;
1951 case AC_DESC_SAMPLER
:
1952 type
= ctx
->ac
.v4i32
;
1953 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) {
1954 offset
+= radv_combined_image_descriptor_sampler_offset(binding
);
1959 case AC_DESC_BUFFER
:
1960 type
= ctx
->ac
.v4i32
;
1963 case AC_DESC_PLANE_0
:
1964 case AC_DESC_PLANE_1
:
1965 case AC_DESC_PLANE_2
:
1966 type
= ctx
->ac
.v8i32
;
1968 offset
+= 32 * (desc_type
- AC_DESC_PLANE_0
);
1971 unreachable("invalid desc_type\n");
1974 offset
+= constant_index
* stride
;
1976 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
1977 (!index
|| binding
->immutable_samplers_equal
)) {
1978 if (binding
->immutable_samplers_equal
)
1981 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
1983 LLVMValueRef constants
[] = {
1984 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
1985 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
1986 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
1987 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
1989 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
1992 assert(stride
% type_size
== 0);
1994 LLVMValueRef adjusted_index
= index
;
1995 if (!adjusted_index
)
1996 adjusted_index
= ctx
->ac
.i32_0
;
1998 adjusted_index
= LLVMBuildMul(builder
, adjusted_index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
2000 LLVMValueRef val_offset
= LLVMConstInt(ctx
->ac
.i32
, offset
, 0);
2001 list
= LLVMBuildGEP(builder
, list
, &val_offset
, 1, "");
2002 list
= LLVMBuildPointerCast(builder
, list
,
2003 ac_array_in_const32_addr_space(type
), "");
2005 LLVMValueRef descriptor
= ac_build_load_to_sgpr(&ctx
->ac
, list
, adjusted_index
);
2007 /* 3 plane formats always have same size and format for plane 1 & 2, so
2008 * use the tail from plane 1 so that we can store only the first 16 bytes
2009 * of the last plane. */
2010 if (desc_type
== AC_DESC_PLANE_2
) {
2011 LLVMValueRef descriptor2
= radv_get_sampler_desc(abi
, descriptor_set
, base_index
, constant_index
, index
, AC_DESC_PLANE_1
,image
, write
, bindless
);
2013 LLVMValueRef components
[8];
2014 for (unsigned i
= 0; i
< 4; ++i
)
2015 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor
, i
);
2017 for (unsigned i
= 4; i
< 8; ++i
)
2018 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor2
, i
);
2019 descriptor
= ac_build_gather_values(&ctx
->ac
, components
, 8);
2025 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
2026 * so we may need to fix it up. */
2028 adjust_vertex_fetch_alpha(struct radv_shader_context
*ctx
,
2029 unsigned adjustment
,
2032 if (adjustment
== RADV_ALPHA_ADJUST_NONE
)
2035 LLVMValueRef c30
= LLVMConstInt(ctx
->ac
.i32
, 30, 0);
2037 alpha
= LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2039 if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
)
2040 alpha
= LLVMBuildFPToUI(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2042 alpha
= ac_to_integer(&ctx
->ac
, alpha
);
2044 /* For the integer-like cases, do a natural sign extension.
2046 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
2047 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
2050 alpha
= LLVMBuildShl(ctx
->ac
.builder
, alpha
,
2051 adjustment
== RADV_ALPHA_ADJUST_SNORM
?
2052 LLVMConstInt(ctx
->ac
.i32
, 7, 0) : c30
, "");
2053 alpha
= LLVMBuildAShr(ctx
->ac
.builder
, alpha
, c30
, "");
2055 /* Convert back to the right type. */
2056 if (adjustment
== RADV_ALPHA_ADJUST_SNORM
) {
2058 LLVMValueRef neg_one
= LLVMConstReal(ctx
->ac
.f32
, -1.0);
2059 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2060 clamp
= LLVMBuildFCmp(ctx
->ac
.builder
, LLVMRealULT
, alpha
, neg_one
, "");
2061 alpha
= LLVMBuildSelect(ctx
->ac
.builder
, clamp
, neg_one
, alpha
, "");
2062 } else if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
) {
2063 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2066 return LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2070 get_num_channels_from_data_format(unsigned data_format
)
2072 switch (data_format
) {
2073 case V_008F0C_BUF_DATA_FORMAT_8
:
2074 case V_008F0C_BUF_DATA_FORMAT_16
:
2075 case V_008F0C_BUF_DATA_FORMAT_32
:
2077 case V_008F0C_BUF_DATA_FORMAT_8_8
:
2078 case V_008F0C_BUF_DATA_FORMAT_16_16
:
2079 case V_008F0C_BUF_DATA_FORMAT_32_32
:
2081 case V_008F0C_BUF_DATA_FORMAT_10_11_11
:
2082 case V_008F0C_BUF_DATA_FORMAT_11_11_10
:
2083 case V_008F0C_BUF_DATA_FORMAT_32_32_32
:
2085 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8
:
2086 case V_008F0C_BUF_DATA_FORMAT_10_10_10_2
:
2087 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10
:
2088 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16
:
2089 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32
:
2099 radv_fixup_vertex_input_fetches(struct radv_shader_context
*ctx
,
2101 unsigned num_channels
,
2104 LLVMValueRef zero
= is_float
? ctx
->ac
.f32_0
: ctx
->ac
.i32_0
;
2105 LLVMValueRef one
= is_float
? ctx
->ac
.f32_1
: ctx
->ac
.i32_1
;
2106 LLVMValueRef chan
[4];
2108 if (LLVMGetTypeKind(LLVMTypeOf(value
)) == LLVMVectorTypeKind
) {
2109 unsigned vec_size
= LLVMGetVectorSize(LLVMTypeOf(value
));
2111 if (num_channels
== 4 && num_channels
== vec_size
)
2114 num_channels
= MIN2(num_channels
, vec_size
);
2116 for (unsigned i
= 0; i
< num_channels
; i
++)
2117 chan
[i
] = ac_llvm_extract_elem(&ctx
->ac
, value
, i
);
2120 assert(num_channels
== 1);
2125 for (unsigned i
= num_channels
; i
< 4; i
++) {
2126 chan
[i
] = i
== 3 ? one
: zero
;
2127 chan
[i
] = ac_to_integer(&ctx
->ac
, chan
[i
]);
2130 return ac_build_gather_values(&ctx
->ac
, chan
, 4);
2134 handle_vs_input_decl(struct radv_shader_context
*ctx
,
2135 struct nir_variable
*variable
)
2137 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
2138 LLVMValueRef t_offset
;
2139 LLVMValueRef t_list
;
2141 LLVMValueRef buffer_index
;
2142 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
2143 uint8_t input_usage_mask
=
2144 ctx
->shader_info
->vs
.input_usage_mask
[variable
->data
.location
];
2145 unsigned num_input_channels
= util_last_bit(input_usage_mask
);
2147 variable
->data
.driver_location
= variable
->data
.location
* 4;
2149 enum glsl_base_type type
= glsl_get_base_type(variable
->type
);
2150 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
2151 LLVMValueRef output
[4];
2152 unsigned attrib_index
= variable
->data
.location
+ i
- VERT_ATTRIB_GENERIC0
;
2153 unsigned attrib_format
= ctx
->options
->key
.vs
.vertex_attribute_formats
[attrib_index
];
2154 unsigned data_format
= attrib_format
& 0x0f;
2155 unsigned num_format
= (attrib_format
>> 4) & 0x07;
2156 bool is_float
= num_format
!= V_008F0C_BUF_NUM_FORMAT_UINT
&&
2157 num_format
!= V_008F0C_BUF_NUM_FORMAT_SINT
;
2159 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << attrib_index
)) {
2160 uint32_t divisor
= ctx
->options
->key
.vs
.instance_rate_divisors
[attrib_index
];
2163 buffer_index
= ctx
->abi
.instance_id
;
2166 buffer_index
= LLVMBuildUDiv(ctx
->ac
.builder
, buffer_index
,
2167 LLVMConstInt(ctx
->ac
.i32
, divisor
, 0), "");
2170 buffer_index
= ctx
->ac
.i32_0
;
2173 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.start_instance
, buffer_index
, "");
2175 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
2176 ctx
->abi
.base_vertex
, "");
2178 /* Adjust the number of channels to load based on the vertex
2181 unsigned num_format_channels
= get_num_channels_from_data_format(data_format
);
2182 unsigned num_channels
= MIN2(num_input_channels
, num_format_channels
);
2183 unsigned attrib_binding
= ctx
->options
->key
.vs
.vertex_attribute_bindings
[attrib_index
];
2184 unsigned attrib_offset
= ctx
->options
->key
.vs
.vertex_attribute_offsets
[attrib_index
];
2185 unsigned attrib_stride
= ctx
->options
->key
.vs
.vertex_attribute_strides
[attrib_index
];
2187 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2188 /* Always load, at least, 3 channels for formats that
2189 * need to be shuffled because X<->Z.
2191 num_channels
= MAX2(num_channels
, 3);
2194 if (attrib_stride
!= 0 && attrib_offset
> attrib_stride
) {
2195 LLVMValueRef buffer_offset
=
2196 LLVMConstInt(ctx
->ac
.i32
,
2197 attrib_offset
/ attrib_stride
, false);
2199 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
,
2203 attrib_offset
= attrib_offset
% attrib_stride
;
2206 t_offset
= LLVMConstInt(ctx
->ac
.i32
, attrib_binding
, false);
2207 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
2209 input
= ac_build_struct_tbuffer_load(&ctx
->ac
, t_list
,
2211 LLVMConstInt(ctx
->ac
.i32
, attrib_offset
, false),
2212 ctx
->ac
.i32_0
, ctx
->ac
.i32_0
,
2214 data_format
, num_format
, 0, true);
2216 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2218 c
[0] = ac_llvm_extract_elem(&ctx
->ac
, input
, 2);
2219 c
[1] = ac_llvm_extract_elem(&ctx
->ac
, input
, 1);
2220 c
[2] = ac_llvm_extract_elem(&ctx
->ac
, input
, 0);
2221 c
[3] = ac_llvm_extract_elem(&ctx
->ac
, input
, 3);
2223 input
= ac_build_gather_values(&ctx
->ac
, c
, 4);
2226 input
= radv_fixup_vertex_input_fetches(ctx
, input
, num_channels
,
2229 for (unsigned chan
= 0; chan
< 4; chan
++) {
2230 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2231 output
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
, input
, llvm_chan
, "");
2232 if (type
== GLSL_TYPE_FLOAT16
) {
2233 output
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f32
, "");
2234 output
[chan
] = LLVMBuildFPTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f16
, "");
2238 unsigned alpha_adjust
= (ctx
->options
->key
.vs
.alpha_adjust
>> (attrib_index
* 2)) & 3;
2239 output
[3] = adjust_vertex_fetch_alpha(ctx
, alpha_adjust
, output
[3]);
2241 for (unsigned chan
= 0; chan
< 4; chan
++) {
2242 output
[chan
] = ac_to_integer(&ctx
->ac
, output
[chan
]);
2243 if (type
== GLSL_TYPE_UINT16
|| type
== GLSL_TYPE_INT16
)
2244 output
[chan
] = LLVMBuildTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.i16
, "");
2246 ctx
->inputs
[ac_llvm_reg_index_soa(variable
->data
.location
+ i
, chan
)] = output
[chan
];
2252 handle_vs_inputs(struct radv_shader_context
*ctx
,
2253 struct nir_shader
*nir
) {
2254 nir_foreach_variable(variable
, &nir
->inputs
)
2255 handle_vs_input_decl(ctx
, variable
);
2259 prepare_interp_optimize(struct radv_shader_context
*ctx
,
2260 struct nir_shader
*nir
)
2262 bool uses_center
= false;
2263 bool uses_centroid
= false;
2264 nir_foreach_variable(variable
, &nir
->inputs
) {
2265 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
2266 variable
->data
.sample
)
2269 if (variable
->data
.centroid
)
2270 uses_centroid
= true;
2275 if (uses_center
&& uses_centroid
) {
2276 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
2277 ctx
->abi
.persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->abi
.persp_center
, ctx
->abi
.persp_centroid
, "");
2278 ctx
->abi
.linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->abi
.linear_center
, ctx
->abi
.linear_centroid
, "");
2283 scan_shader_output_decl(struct radv_shader_context
*ctx
,
2284 struct nir_variable
*variable
,
2285 struct nir_shader
*shader
,
2286 gl_shader_stage stage
)
2288 int idx
= variable
->data
.location
+ variable
->data
.index
;
2289 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2290 uint64_t mask_attribs
;
2292 variable
->data
.driver_location
= idx
* 4;
2294 /* tess ctrl has it's own load/store paths for outputs */
2295 if (stage
== MESA_SHADER_TESS_CTRL
)
2298 if (variable
->data
.compact
) {
2299 unsigned component_count
= variable
->data
.location_frac
+
2300 glsl_get_length(variable
->type
);
2301 attrib_count
= (component_count
+ 3) / 4;
2304 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
2306 ctx
->output_mask
|= mask_attribs
;
2310 /* Initialize arguments for the shader export intrinsic */
2312 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
2313 LLVMValueRef
*values
,
2314 unsigned enabled_channels
,
2316 struct ac_export_args
*args
)
2318 /* Specify the channels that are enabled. */
2319 args
->enabled_channels
= enabled_channels
;
2321 /* Specify whether the EXEC mask represents the valid mask */
2322 args
->valid_mask
= 0;
2324 /* Specify whether this is the last export */
2327 /* Specify the target we are exporting */
2328 args
->target
= target
;
2330 args
->compr
= false;
2331 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
2332 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
2333 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
2334 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
2339 bool is_16bit
= ac_get_type_size(LLVMTypeOf(values
[0])) == 2;
2340 if (ctx
->stage
== MESA_SHADER_FRAGMENT
) {
2341 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
2342 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
2343 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
2344 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
2347 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
2348 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
2349 unsigned bits
, bool hi
) = NULL
;
2351 switch(col_format
) {
2352 case V_028714_SPI_SHADER_ZERO
:
2353 args
->enabled_channels
= 0; /* writemask */
2354 args
->target
= V_008DFC_SQ_EXP_NULL
;
2357 case V_028714_SPI_SHADER_32_R
:
2358 args
->enabled_channels
= 1;
2359 args
->out
[0] = values
[0];
2362 case V_028714_SPI_SHADER_32_GR
:
2363 args
->enabled_channels
= 0x3;
2364 args
->out
[0] = values
[0];
2365 args
->out
[1] = values
[1];
2368 case V_028714_SPI_SHADER_32_AR
:
2369 if (ctx
->ac
.chip_class
>= GFX10
) {
2370 args
->enabled_channels
= 0x3;
2371 args
->out
[0] = values
[0];
2372 args
->out
[1] = values
[3];
2374 args
->enabled_channels
= 0x9;
2375 args
->out
[0] = values
[0];
2376 args
->out
[3] = values
[3];
2380 case V_028714_SPI_SHADER_FP16_ABGR
:
2381 args
->enabled_channels
= 0x5;
2382 packf
= ac_build_cvt_pkrtz_f16
;
2384 for (unsigned chan
= 0; chan
< 4; chan
++)
2385 values
[chan
] = LLVMBuildFPExt(ctx
->ac
.builder
,
2391 case V_028714_SPI_SHADER_UNORM16_ABGR
:
2392 args
->enabled_channels
= 0x5;
2393 packf
= ac_build_cvt_pknorm_u16
;
2396 case V_028714_SPI_SHADER_SNORM16_ABGR
:
2397 args
->enabled_channels
= 0x5;
2398 packf
= ac_build_cvt_pknorm_i16
;
2401 case V_028714_SPI_SHADER_UINT16_ABGR
:
2402 args
->enabled_channels
= 0x5;
2403 packi
= ac_build_cvt_pk_u16
;
2405 for (unsigned chan
= 0; chan
< 4; chan
++)
2406 values
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
,
2407 ac_to_integer(&ctx
->ac
, values
[chan
]),
2412 case V_028714_SPI_SHADER_SINT16_ABGR
:
2413 args
->enabled_channels
= 0x5;
2414 packi
= ac_build_cvt_pk_i16
;
2416 for (unsigned chan
= 0; chan
< 4; chan
++)
2417 values
[chan
] = LLVMBuildSExt(ctx
->ac
.builder
,
2418 ac_to_integer(&ctx
->ac
, values
[chan
]),
2424 case V_028714_SPI_SHADER_32_ABGR
:
2425 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2429 /* Pack f16 or norm_i16/u16. */
2431 for (chan
= 0; chan
< 2; chan
++) {
2432 LLVMValueRef pack_args
[2] = {
2434 values
[2 * chan
+ 1]
2436 LLVMValueRef packed
;
2438 packed
= packf(&ctx
->ac
, pack_args
);
2439 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2441 args
->compr
= 1; /* COMPR flag */
2446 for (chan
= 0; chan
< 2; chan
++) {
2447 LLVMValueRef pack_args
[2] = {
2448 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
2449 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
2451 LLVMValueRef packed
;
2453 packed
= packi(&ctx
->ac
, pack_args
,
2454 is_int8
? 8 : is_int10
? 10 : 16,
2456 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2458 args
->compr
= 1; /* COMPR flag */
2464 for (unsigned chan
= 0; chan
< 4; chan
++) {
2465 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i16
, "");
2466 args
->out
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
2469 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2471 for (unsigned i
= 0; i
< 4; ++i
)
2472 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
2476 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
2477 LLVMValueRef
*values
, unsigned enabled_channels
)
2479 struct ac_export_args args
;
2481 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
2482 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
2483 ac_build_export(&ctx
->ac
, &args
);
2487 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
2489 LLVMValueRef output
= ctx
->abi
.outputs
[ac_llvm_reg_index_soa(index
, chan
)];
2490 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
2494 radv_emit_stream_output(struct radv_shader_context
*ctx
,
2495 LLVMValueRef
const *so_buffers
,
2496 LLVMValueRef
const *so_write_offsets
,
2497 const struct radv_stream_output
*output
,
2498 struct radv_shader_output_values
*shader_out
)
2500 unsigned num_comps
= util_bitcount(output
->component_mask
);
2501 unsigned buf
= output
->buffer
;
2502 unsigned offset
= output
->offset
;
2504 LLVMValueRef out
[4];
2506 assert(num_comps
&& num_comps
<= 4);
2507 if (!num_comps
|| num_comps
> 4)
2510 /* Get the first component. */
2511 start
= ffs(output
->component_mask
) - 1;
2513 /* Load the output as int. */
2514 for (int i
= 0; i
< num_comps
; i
++) {
2515 out
[i
] = ac_to_integer(&ctx
->ac
, shader_out
->values
[start
+ i
]);
2518 /* Pack the output. */
2519 LLVMValueRef vdata
= NULL
;
2521 switch (num_comps
) {
2522 case 1: /* as i32 */
2525 case 2: /* as v2i32 */
2526 case 3: /* as v4i32 (aligned to 4) */
2527 out
[3] = LLVMGetUndef(ctx
->ac
.i32
);
2529 case 4: /* as v4i32 */
2530 vdata
= ac_build_gather_values(&ctx
->ac
, out
,
2531 !ac_has_vec3_support(ctx
->ac
.chip_class
, false) ?
2532 util_next_power_of_two(num_comps
) :
2537 ac_build_buffer_store_dword(&ctx
->ac
, so_buffers
[buf
],
2538 vdata
, num_comps
, so_write_offsets
[buf
],
2539 ctx
->ac
.i32_0
, offset
,
2540 ac_glc
| ac_slc
, false);
2544 radv_emit_streamout(struct radv_shader_context
*ctx
, unsigned stream
)
2548 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2549 assert(ctx
->streamout_config
);
2550 LLVMValueRef so_vtx_count
=
2551 ac_build_bfe(&ctx
->ac
, ctx
->streamout_config
,
2552 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2553 LLVMConstInt(ctx
->ac
.i32
, 7, false), false);
2555 LLVMValueRef tid
= ac_get_thread_id(&ctx
->ac
);
2557 /* can_emit = tid < so_vtx_count; */
2558 LLVMValueRef can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
2559 tid
, so_vtx_count
, "");
2561 /* Emit the streamout code conditionally. This actually avoids
2562 * out-of-bounds buffer access. The hw tells us via the SGPR
2563 * (so_vtx_count) which threads are allowed to emit streamout data.
2565 ac_build_ifcc(&ctx
->ac
, can_emit
, 6501);
2567 /* The buffer offset is computed as follows:
2568 * ByteOffset = streamout_offset[buffer_id]*4 +
2569 * (streamout_write_index + thread_id)*stride[buffer_id] +
2572 LLVMValueRef so_write_index
= ctx
->streamout_write_idx
;
2574 /* Compute (streamout_write_index + thread_id). */
2576 LLVMBuildAdd(ctx
->ac
.builder
, so_write_index
, tid
, "");
2578 /* Load the descriptor and compute the write offset for each
2581 LLVMValueRef so_write_offset
[4] = {};
2582 LLVMValueRef so_buffers
[4] = {};
2583 LLVMValueRef buf_ptr
= ctx
->streamout_buffers
;
2585 for (i
= 0; i
< 4; i
++) {
2586 uint16_t stride
= ctx
->shader_info
->so
.strides
[i
];
2591 LLVMValueRef offset
=
2592 LLVMConstInt(ctx
->ac
.i32
, i
, false);
2594 so_buffers
[i
] = ac_build_load_to_sgpr(&ctx
->ac
,
2597 LLVMValueRef so_offset
= ctx
->streamout_offset
[i
];
2599 so_offset
= LLVMBuildMul(ctx
->ac
.builder
, so_offset
,
2600 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
2602 so_write_offset
[i
] =
2603 ac_build_imad(&ctx
->ac
, so_write_index
,
2604 LLVMConstInt(ctx
->ac
.i32
,
2609 /* Write streamout data. */
2610 for (i
= 0; i
< ctx
->shader_info
->so
.num_outputs
; i
++) {
2611 struct radv_shader_output_values shader_out
= {};
2612 struct radv_stream_output
*output
=
2613 &ctx
->shader_info
->so
.outputs
[i
];
2615 if (stream
!= output
->stream
)
2618 for (int j
= 0; j
< 4; j
++) {
2619 shader_out
.values
[j
] =
2620 radv_load_output(ctx
, output
->location
, j
);
2623 radv_emit_stream_output(ctx
, so_buffers
,so_write_offset
,
2624 output
, &shader_out
);
2627 ac_build_endif(&ctx
->ac
, 6501);
2631 radv_build_param_exports(struct radv_shader_context
*ctx
,
2632 struct radv_shader_output_values
*outputs
,
2634 struct radv_vs_output_info
*outinfo
,
2635 bool export_clip_dists
)
2637 unsigned param_count
= 0;
2639 for (unsigned i
= 0; i
< noutput
; i
++) {
2640 unsigned slot_name
= outputs
[i
].slot_name
;
2641 unsigned usage_mask
= outputs
[i
].usage_mask
;
2643 if (slot_name
!= VARYING_SLOT_LAYER
&&
2644 slot_name
!= VARYING_SLOT_PRIMITIVE_ID
&&
2645 slot_name
!= VARYING_SLOT_CLIP_DIST0
&&
2646 slot_name
!= VARYING_SLOT_CLIP_DIST1
&&
2647 slot_name
< VARYING_SLOT_VAR0
)
2650 if ((slot_name
== VARYING_SLOT_CLIP_DIST0
||
2651 slot_name
== VARYING_SLOT_CLIP_DIST1
) && !export_clip_dists
)
2654 radv_export_param(ctx
, param_count
, outputs
[i
].values
, usage_mask
);
2656 assert(i
< ARRAY_SIZE(outinfo
->vs_output_param_offset
));
2657 outinfo
->vs_output_param_offset
[slot_name
] = param_count
++;
2660 outinfo
->param_exports
= param_count
;
2663 /* Generate export instructions for hardware VS shader stage or NGG GS stage
2664 * (position and parameter data only).
2667 radv_llvm_export_vs(struct radv_shader_context
*ctx
,
2668 struct radv_shader_output_values
*outputs
,
2670 struct radv_vs_output_info
*outinfo
,
2671 bool export_clip_dists
)
2673 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_value
= NULL
;
2674 struct ac_export_args pos_args
[4] = {};
2675 unsigned pos_idx
, index
;
2678 /* Build position exports */
2679 for (i
= 0; i
< noutput
; i
++) {
2680 switch (outputs
[i
].slot_name
) {
2681 case VARYING_SLOT_POS
:
2682 si_llvm_init_export_args(ctx
, outputs
[i
].values
, 0xf,
2683 V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
2685 case VARYING_SLOT_PSIZ
:
2686 psize_value
= outputs
[i
].values
[0];
2688 case VARYING_SLOT_LAYER
:
2689 layer_value
= outputs
[i
].values
[0];
2691 case VARYING_SLOT_VIEWPORT
:
2692 viewport_value
= outputs
[i
].values
[0];
2694 case VARYING_SLOT_CLIP_DIST0
:
2695 case VARYING_SLOT_CLIP_DIST1
:
2696 index
= 2 + outputs
[i
].slot_index
;
2697 si_llvm_init_export_args(ctx
, outputs
[i
].values
, 0xf,
2698 V_008DFC_SQ_EXP_POS
+ index
,
2706 /* We need to add the position output manually if it's missing. */
2707 if (!pos_args
[0].out
[0]) {
2708 pos_args
[0].enabled_channels
= 0xf; /* writemask */
2709 pos_args
[0].valid_mask
= 0; /* EXEC mask */
2710 pos_args
[0].done
= 0; /* last export? */
2711 pos_args
[0].target
= V_008DFC_SQ_EXP_POS
;
2712 pos_args
[0].compr
= 0; /* COMPR flag */
2713 pos_args
[0].out
[0] = ctx
->ac
.f32_0
; /* X */
2714 pos_args
[0].out
[1] = ctx
->ac
.f32_0
; /* Y */
2715 pos_args
[0].out
[2] = ctx
->ac
.f32_0
; /* Z */
2716 pos_args
[0].out
[3] = ctx
->ac
.f32_1
; /* W */
2719 if (outinfo
->writes_pointsize
||
2720 outinfo
->writes_layer
||
2721 outinfo
->writes_viewport_index
) {
2722 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
2723 (outinfo
->writes_layer
== true ? 4 : 0));
2724 pos_args
[1].valid_mask
= 0;
2725 pos_args
[1].done
= 0;
2726 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
2727 pos_args
[1].compr
= 0;
2728 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
2729 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
2730 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
2731 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
2733 if (outinfo
->writes_pointsize
== true)
2734 pos_args
[1].out
[0] = psize_value
;
2735 if (outinfo
->writes_layer
== true)
2736 pos_args
[1].out
[2] = layer_value
;
2737 if (outinfo
->writes_viewport_index
== true) {
2738 if (ctx
->options
->chip_class
>= GFX9
) {
2739 /* GFX9 has the layer in out.z[10:0] and the viewport
2740 * index in out.z[19:16].
2742 LLVMValueRef v
= viewport_value
;
2743 v
= ac_to_integer(&ctx
->ac
, v
);
2744 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
2745 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2747 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
2748 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
2750 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
2751 pos_args
[1].enabled_channels
|= 1 << 2;
2753 pos_args
[1].out
[3] = viewport_value
;
2754 pos_args
[1].enabled_channels
|= 1 << 3;
2759 for (i
= 0; i
< 4; i
++) {
2760 if (pos_args
[i
].out
[0])
2761 outinfo
->pos_exports
++;
2764 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
2765 * Setting valid_mask=1 prevents it and has no other effect.
2767 if (ctx
->ac
.family
== CHIP_NAVI10
||
2768 ctx
->ac
.family
== CHIP_NAVI12
||
2769 ctx
->ac
.family
== CHIP_NAVI14
)
2770 pos_args
[0].valid_mask
= 1;
2773 for (i
= 0; i
< 4; i
++) {
2774 if (!pos_args
[i
].out
[0])
2777 /* Specify the target we are exporting */
2778 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
2780 if (pos_idx
== outinfo
->pos_exports
)
2781 /* Specify that this is the last export */
2782 pos_args
[i
].done
= 1;
2784 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
2787 /* Build parameter exports */
2788 radv_build_param_exports(ctx
, outputs
, noutput
, outinfo
, export_clip_dists
);
2792 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
2793 bool export_prim_id
,
2794 bool export_clip_dists
,
2795 struct radv_vs_output_info
*outinfo
)
2797 struct radv_shader_output_values
*outputs
;
2798 unsigned noutput
= 0;
2800 if (ctx
->options
->key
.has_multiview_view_index
) {
2801 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2803 for(unsigned i
= 0; i
< 4; ++i
)
2804 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
2805 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
2808 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
2809 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
2812 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
2813 sizeof(outinfo
->vs_output_param_offset
));
2814 outinfo
->pos_exports
= 0;
2816 if (!ctx
->options
->use_ngg_streamout
&&
2817 ctx
->shader_info
->so
.num_outputs
&&
2818 !ctx
->is_gs_copy_shader
) {
2819 /* The GS copy shader emission already emits streamout. */
2820 radv_emit_streamout(ctx
, 0);
2823 /* Allocate a temporary array for the output values. */
2824 unsigned num_outputs
= util_bitcount64(ctx
->output_mask
) + export_prim_id
;
2825 outputs
= malloc(num_outputs
* sizeof(outputs
[0]));
2827 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2828 if (!(ctx
->output_mask
& (1ull << i
)))
2831 outputs
[noutput
].slot_name
= i
;
2832 outputs
[noutput
].slot_index
= i
== VARYING_SLOT_CLIP_DIST1
;
2834 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2835 !ctx
->is_gs_copy_shader
) {
2836 outputs
[noutput
].usage_mask
=
2837 ctx
->shader_info
->vs
.output_usage_mask
[i
];
2838 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2839 outputs
[noutput
].usage_mask
=
2840 ctx
->shader_info
->tes
.output_usage_mask
[i
];
2842 assert(ctx
->is_gs_copy_shader
);
2843 outputs
[noutput
].usage_mask
=
2844 ctx
->shader_info
->gs
.output_usage_mask
[i
];
2847 for (unsigned j
= 0; j
< 4; j
++) {
2848 outputs
[noutput
].values
[j
] =
2849 ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2855 /* Export PrimitiveID. */
2856 if (export_prim_id
) {
2857 outputs
[noutput
].slot_name
= VARYING_SLOT_PRIMITIVE_ID
;
2858 outputs
[noutput
].slot_index
= 0;
2859 outputs
[noutput
].usage_mask
= 0x1;
2860 outputs
[noutput
].values
[0] = ctx
->vs_prim_id
;
2861 for (unsigned j
= 1; j
< 4; j
++)
2862 outputs
[noutput
].values
[j
] = ctx
->ac
.f32_0
;
2866 radv_llvm_export_vs(ctx
, outputs
, noutput
, outinfo
, export_clip_dists
);
2872 handle_es_outputs_post(struct radv_shader_context
*ctx
,
2873 struct radv_es_output_info
*outinfo
)
2876 LLVMValueRef lds_base
= NULL
;
2878 if (ctx
->ac
.chip_class
>= GFX9
) {
2879 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
2880 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
2881 LLVMValueRef wave_idx
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
2882 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
2883 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
2884 LLVMConstInt(ctx
->ac
.i32
,
2885 ctx
->ac
.wave_size
, false), ""), "");
2886 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
2887 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
2890 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2891 LLVMValueRef dw_addr
= NULL
;
2892 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2893 unsigned output_usage_mask
;
2896 if (!(ctx
->output_mask
& (1ull << i
)))
2899 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
2901 ctx
->shader_info
->vs
.output_usage_mask
[i
];
2903 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
2905 ctx
->shader_info
->tes
.output_usage_mask
[i
];
2908 param_index
= shader_io_get_unique_index(i
);
2911 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2912 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
2916 for (j
= 0; j
< 4; j
++) {
2917 if (!(output_usage_mask
& (1 << j
)))
2920 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2921 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
2922 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
2924 if (ctx
->ac
.chip_class
>= GFX9
) {
2925 LLVMValueRef dw_addr_offset
=
2926 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2927 LLVMConstInt(ctx
->ac
.i32
,
2930 ac_lds_store(&ctx
->ac
, dw_addr_offset
, out_val
);
2932 ac_build_buffer_store_dword(&ctx
->ac
,
2935 NULL
, ctx
->es2gs_offset
,
2936 (4 * param_index
+ j
) * 4,
2937 ac_glc
| ac_slc
, true);
2944 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
2946 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
2947 uint32_t num_tcs_inputs
= util_last_bit64(ctx
->shader_info
->vs
.ls_outputs_written
);
2948 LLVMValueRef vertex_dw_stride
= LLVMConstInt(ctx
->ac
.i32
, num_tcs_inputs
* 4, false);
2949 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
2950 vertex_dw_stride
, "");
2952 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2953 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2955 if (!(ctx
->output_mask
& (1ull << i
)))
2958 int param
= shader_io_get_unique_index(i
);
2959 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
2960 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
2962 for (unsigned j
= 0; j
< 4; j
++) {
2963 LLVMValueRef value
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2964 value
= ac_to_integer(&ctx
->ac
, value
);
2965 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
2966 ac_lds_store(&ctx
->ac
, dw_addr
, value
);
2967 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
2972 static LLVMValueRef
get_wave_id_in_tg(struct radv_shader_context
*ctx
)
2974 return ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
2977 static LLVMValueRef
get_tgsize(struct radv_shader_context
*ctx
)
2979 return ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 28, 4);
2982 static LLVMValueRef
get_thread_id_in_tg(struct radv_shader_context
*ctx
)
2984 LLVMBuilderRef builder
= ctx
->ac
.builder
;
2986 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
2987 LLVMConstInt(ctx
->ac
.i32
, ctx
->ac
.wave_size
, false), "");
2988 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
2991 static LLVMValueRef
ngg_get_vtx_cnt(struct radv_shader_context
*ctx
)
2993 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
2994 LLVMConstInt(ctx
->ac
.i32
, 12, false),
2995 LLVMConstInt(ctx
->ac
.i32
, 9, false),
2999 static LLVMValueRef
ngg_get_prim_cnt(struct radv_shader_context
*ctx
)
3001 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
3002 LLVMConstInt(ctx
->ac
.i32
, 22, false),
3003 LLVMConstInt(ctx
->ac
.i32
, 9, false),
3007 static LLVMValueRef
ngg_get_ordered_id(struct radv_shader_context
*ctx
)
3009 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
3011 LLVMConstInt(ctx
->ac
.i32
, 11, false),
3016 ngg_gs_get_vertex_storage(struct radv_shader_context
*ctx
)
3018 unsigned num_outputs
= util_bitcount64(ctx
->output_mask
);
3020 if (ctx
->options
->key
.has_multiview_view_index
)
3023 LLVMTypeRef elements
[2] = {
3024 LLVMArrayType(ctx
->ac
.i32
, 4 * num_outputs
),
3025 LLVMArrayType(ctx
->ac
.i8
, 4),
3027 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
3028 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
3029 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
3033 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
3034 * is in emit order; that is:
3035 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
3036 * - during vertex emit, i.e. while the API GS shader invocation is running,
3037 * N = threadidx * gs_max_out_vertices + emitidx
3039 * Goals of the LDS memory layout:
3040 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
3041 * in uniform control flow
3042 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
3044 * 3. Agnostic to the number of waves (since we don't know it before compiling)
3045 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
3046 * 5. Avoid wasting memory.
3048 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
3049 * layout, elimination of bank conflicts requires that each vertex occupy an
3050 * odd number of dwords. We use the additional dword to store the output stream
3051 * index as well as a flag to indicate whether this vertex ends a primitive
3052 * for rasterization.
3054 * Swizzling is required to satisfy points 1 and 2 simultaneously.
3056 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
3057 * Indices are swizzled in groups of 32, which ensures point 1 without
3058 * disturbing point 2.
3060 * \return an LDS pointer to type {[N x i32], [4 x i8]}
3063 ngg_gs_vertex_ptr(struct radv_shader_context
*ctx
, LLVMValueRef vertexidx
)
3065 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3066 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
3068 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
3069 unsigned write_stride_2exp
= ffs(ctx
->shader
->info
.gs
.vertices_out
) - 1;
3070 if (write_stride_2exp
) {
3072 LLVMBuildLShr(builder
, vertexidx
,
3073 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
3074 LLVMValueRef swizzle
=
3075 LLVMBuildAnd(builder
, row
,
3076 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
3078 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
3081 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
3085 ngg_gs_emit_vertex_ptr(struct radv_shader_context
*ctx
, LLVMValueRef gsthread
,
3086 LLVMValueRef emitidx
)
3088 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3091 tmp
= LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.gs
.vertices_out
, false);
3092 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
3093 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
3094 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
3097 /* Send GS Alloc Req message from the first wave of the group to SPI.
3098 * Message payload is:
3099 * - bits 0..10: vertices in group
3100 * - bits 12..22: primitives in group
3102 static void build_sendmsg_gs_alloc_req(struct radv_shader_context
*ctx
,
3103 LLVMValueRef vtx_cnt
,
3104 LLVMValueRef prim_cnt
)
3106 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3109 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
3110 ac_build_ifcc(&ctx
->ac
, tmp
, 5020);
3112 tmp
= LLVMBuildShl(builder
, prim_cnt
, LLVMConstInt(ctx
->ac
.i32
, 12, false),"");
3113 tmp
= LLVMBuildOr(builder
, tmp
, vtx_cnt
, "");
3114 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_ALLOC_REQ
, tmp
);
3116 ac_build_endif(&ctx
->ac
, 5020);
3120 unsigned num_vertices
;
3121 LLVMValueRef isnull
;
3123 LLVMValueRef index
[3];
3124 LLVMValueRef edgeflag
[3];
3127 static void build_export_prim(struct radv_shader_context
*ctx
,
3128 const struct ngg_prim
*prim
)
3130 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3131 struct ac_export_args args
;
3132 LLVMValueRef vertices
[3];
3133 LLVMValueRef odd
, even
;
3136 tmp
= LLVMBuildZExt(builder
, prim
->isnull
, ctx
->ac
.i32
, "");
3137 args
.out
[0] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 31, false), "");
3139 for (unsigned i
= 0; i
< prim
->num_vertices
; ++i
) {
3140 tmp
= LLVMBuildZExt(builder
, prim
->edgeflag
[i
], ctx
->ac
.i32
, "");
3141 tmp
= LLVMBuildShl(builder
, tmp
,
3142 LLVMConstInt(ctx
->ac
.i32
, 9, false), "");
3143 vertices
[i
] = LLVMBuildOr(builder
, prim
->index
[i
], tmp
, "");
3146 switch (prim
->num_vertices
) {
3148 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], vertices
[0], "");
3151 tmp
= LLVMBuildShl(builder
, vertices
[1],
3152 LLVMConstInt(ctx
->ac
.i32
, 10, false), "");
3153 tmp
= LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3154 args
.out
[0] = LLVMBuildOr(builder
, tmp
, vertices
[0], "");
3157 /* Swap vertices if needed to follow drawing order. */
3158 tmp
= LLVMBuildShl(builder
, vertices
[2],
3159 LLVMConstInt(ctx
->ac
.i32
, 20, false), "");
3160 even
= LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3161 tmp
= LLVMBuildShl(builder
, vertices
[1],
3162 LLVMConstInt(ctx
->ac
.i32
, 10, false), "");
3163 even
= LLVMBuildOr(builder
, even
, tmp
, "");
3164 even
= LLVMBuildOr(builder
, even
, vertices
[0], "");
3166 tmp
= LLVMBuildShl(builder
, vertices
[1],
3167 LLVMConstInt(ctx
->ac
.i32
, 20, false), "");
3168 odd
= LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3169 tmp
= LLVMBuildShl(builder
, vertices
[2],
3170 LLVMConstInt(ctx
->ac
.i32
, 10, false), "");
3171 odd
= LLVMBuildOr(builder
, odd
, tmp
, "");
3172 odd
= LLVMBuildOr(builder
, odd
, vertices
[0], "");
3174 args
.out
[0] = LLVMBuildSelect(builder
, prim
->swap
, odd
, even
, "");
3177 unreachable("invalid number of vertices");
3180 args
.out
[0] = LLVMBuildBitCast(builder
, args
.out
[0], ctx
->ac
.f32
, "");
3181 args
.out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
3182 args
.out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
3183 args
.out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
3185 args
.target
= V_008DFC_SQ_EXP_PRIM
;
3186 args
.enabled_channels
= 1;
3188 args
.valid_mask
= false;
3191 ac_build_export(&ctx
->ac
, &args
);
3194 static struct radv_stream_output
*
3195 radv_get_stream_output_by_loc(struct radv_streamout_info
*so
, unsigned location
)
3197 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
3198 if (so
->outputs
[i
].location
== location
)
3199 return &so
->outputs
[i
];
3205 static void build_streamout_vertex(struct radv_shader_context
*ctx
,
3206 LLVMValueRef
*so_buffer
, LLVMValueRef
*wg_offset_dw
,
3207 unsigned stream
, LLVMValueRef offset_vtx
,
3208 LLVMValueRef vertexptr
)
3210 struct radv_streamout_info
*so
= &ctx
->shader_info
->so
;
3211 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3212 LLVMValueRef offset
[4] = {};
3215 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
3216 if (!wg_offset_dw
[buffer
])
3219 tmp
= LLVMBuildMul(builder
, offset_vtx
,
3220 LLVMConstInt(ctx
->ac
.i32
, so
->strides
[buffer
], false), "");
3221 tmp
= LLVMBuildAdd(builder
, wg_offset_dw
[buffer
], tmp
, "");
3222 offset
[buffer
] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
3225 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3226 struct radv_shader_output_values outputs
[AC_LLVM_MAX_OUTPUTS
];
3227 unsigned noutput
= 0;
3228 unsigned out_idx
= 0;
3230 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3231 unsigned output_usage_mask
=
3232 ctx
->shader_info
->gs
.output_usage_mask
[i
];
3233 uint8_t output_stream
=
3234 output_stream
= ctx
->shader_info
->gs
.output_streams
[i
];
3236 if (!(ctx
->output_mask
& (1ull << i
)) ||
3237 output_stream
!= stream
)
3240 outputs
[noutput
].slot_name
= i
;
3241 outputs
[noutput
].slot_index
= i
== VARYING_SLOT_CLIP_DIST1
;
3242 outputs
[noutput
].usage_mask
= output_usage_mask
;
3244 int length
= util_last_bit(output_usage_mask
);
3246 for (unsigned j
= 0; j
< length
; j
++, out_idx
++) {
3247 if (!(output_usage_mask
& (1 << j
)))
3250 tmp
= ac_build_gep0(&ctx
->ac
, vertexptr
,
3251 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false));
3252 outputs
[noutput
].values
[j
] = LLVMBuildLoad(builder
, tmp
, "");
3255 for (unsigned j
= length
; j
< 4; j
++)
3256 outputs
[noutput
].values
[j
] = LLVMGetUndef(ctx
->ac
.f32
);
3261 for (unsigned i
= 0; i
< noutput
; i
++) {
3262 struct radv_stream_output
*output
=
3263 radv_get_stream_output_by_loc(so
, outputs
[i
].slot_name
);
3266 output
->stream
!= stream
)
3269 struct radv_shader_output_values out
= {};
3271 for (unsigned j
= 0; j
< 4; j
++) {
3272 out
.values
[j
] = outputs
[i
].values
[j
];
3275 radv_emit_stream_output(ctx
, so_buffer
, offset
, output
, &out
);
3278 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
3279 struct radv_stream_output
*output
=
3280 &ctx
->shader_info
->so
.outputs
[i
];
3282 if (stream
!= output
->stream
)
3285 struct radv_shader_output_values out
= {};
3287 for (unsigned comp
= 0; comp
< 4; comp
++) {
3288 if (!(output
->component_mask
& (1 << comp
)))
3291 tmp
= ac_build_gep0(&ctx
->ac
, vertexptr
,
3292 LLVMConstInt(ctx
->ac
.i32
, 4 * i
+ comp
, false));
3293 out
.values
[comp
] = LLVMBuildLoad(builder
, tmp
, "");
3296 radv_emit_stream_output(ctx
, so_buffer
, offset
, output
, &out
);
3301 struct ngg_streamout
{
3302 LLVMValueRef num_vertices
;
3304 /* per-thread data */
3305 LLVMValueRef prim_enable
[4]; /* i1 per stream */
3306 LLVMValueRef vertices
[3]; /* [N x i32] addrspace(LDS)* */
3309 LLVMValueRef emit
[4]; /* per-stream emitted primitives (only valid for used streams) */
3313 * Build streamout logic.
3315 * Implies a barrier.
3317 * Writes number of emitted primitives to gs_ngg_scratch[4:7].
3319 * Clobbers gs_ngg_scratch[8:].
3321 static void build_streamout(struct radv_shader_context
*ctx
,
3322 struct ngg_streamout
*nggso
)
3324 struct radv_streamout_info
*so
= &ctx
->shader_info
->so
;
3325 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3326 LLVMValueRef buf_ptr
= ctx
->streamout_buffers
;
3327 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
3328 LLVMValueRef cond
, tmp
, tmp2
;
3329 LLVMValueRef i32_2
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
3330 LLVMValueRef i32_4
= LLVMConstInt(ctx
->ac
.i32
, 4, false);
3331 LLVMValueRef i32_8
= LLVMConstInt(ctx
->ac
.i32
, 8, false);
3332 LLVMValueRef so_buffer
[4] = {};
3333 unsigned max_num_vertices
= 1 + (nggso
->vertices
[1] ? 1 : 0) +
3334 (nggso
->vertices
[2] ? 1 : 0);
3335 LLVMValueRef prim_stride_dw
[4] = {};
3336 LLVMValueRef prim_stride_dw_vgpr
= LLVMGetUndef(ctx
->ac
.i32
);
3337 int stream_for_buffer
[4] = { -1, -1, -1, -1 };
3338 unsigned bufmask_for_stream
[4] = {};
3339 bool isgs
= ctx
->stage
== MESA_SHADER_GEOMETRY
;
3340 unsigned scratch_emit_base
= isgs
? 4 : 0;
3341 LLVMValueRef scratch_emit_basev
= isgs
? i32_4
: ctx
->ac
.i32_0
;
3342 unsigned scratch_offset_base
= isgs
? 8 : 4;
3343 LLVMValueRef scratch_offset_basev
= isgs
? i32_8
: i32_4
;
3345 ac_llvm_add_target_dep_function_attr(ctx
->main_function
,
3346 "amdgpu-gds-size", 256);
3348 /* Determine the mapping of streamout buffers to vertex streams. */
3349 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
3350 unsigned buf
= so
->outputs
[i
].buffer
;
3351 unsigned stream
= so
->outputs
[i
].stream
;
3352 assert(stream_for_buffer
[buf
] < 0 || stream_for_buffer
[buf
] == stream
);
3353 stream_for_buffer
[buf
] = stream
;
3354 bufmask_for_stream
[stream
] |= 1 << buf
;
3357 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
3358 if (stream_for_buffer
[buffer
] == -1)
3361 assert(so
->strides
[buffer
]);
3363 LLVMValueRef stride_for_buffer
=
3364 LLVMConstInt(ctx
->ac
.i32
, so
->strides
[buffer
], false);
3365 prim_stride_dw
[buffer
] =
3366 LLVMBuildMul(builder
, stride_for_buffer
,
3367 nggso
->num_vertices
, "");
3368 prim_stride_dw_vgpr
= ac_build_writelane(
3369 &ctx
->ac
, prim_stride_dw_vgpr
, prim_stride_dw
[buffer
],
3370 LLVMConstInt(ctx
->ac
.i32
, buffer
, false));
3372 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, buffer
, false);
3373 so_buffer
[buffer
] = ac_build_load_to_sgpr(&ctx
->ac
, buf_ptr
,
3377 cond
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
3378 ac_build_ifcc(&ctx
->ac
, cond
, 5200);
3380 LLVMTypeRef gdsptr
= LLVMPointerType(ctx
->ac
.i32
, AC_ADDR_SPACE_GDS
);
3381 LLVMValueRef gdsbase
= LLVMBuildIntToPtr(builder
, ctx
->ac
.i32_0
, gdsptr
, "");
3383 /* Advance the streamout offsets in GDS. */
3384 LLVMValueRef offsets_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.i32
, "");
3385 LLVMValueRef generated_by_stream_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.i32
, "");
3387 cond
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
3388 ac_build_ifcc(&ctx
->ac
, cond
, 5210);
3390 /* Fetch the number of generated primitives and store
3391 * it in GDS for later use.
3394 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
);
3395 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3397 tmp
= ac_build_writelane(&ctx
->ac
, ctx
->ac
.i32_0
,
3398 ngg_get_prim_cnt(ctx
), ctx
->ac
.i32_0
);
3400 LLVMBuildStore(builder
, tmp
, generated_by_stream_vgpr
);
3402 unsigned swizzle
[4];
3403 int unused_stream
= -1;
3404 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3405 if (!ctx
->shader_info
->gs
.num_stream_output_components
[stream
]) {
3406 unused_stream
= stream
;
3410 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
3411 if (stream_for_buffer
[buffer
] >= 0) {
3412 swizzle
[buffer
] = stream_for_buffer
[buffer
];
3414 assert(unused_stream
>= 0);
3415 swizzle
[buffer
] = unused_stream
;
3419 tmp
= ac_build_quad_swizzle(&ctx
->ac
, tmp
,
3420 swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
3421 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
3423 LLVMValueRef args
[] = {
3424 LLVMBuildIntToPtr(builder
, ngg_get_ordered_id(ctx
), gdsptr
, ""),
3426 ctx
->ac
.i32_0
, // ordering
3427 ctx
->ac
.i32_0
, // scope
3428 ctx
->ac
.i1false
, // isVolatile
3429 LLVMConstInt(ctx
->ac
.i32
, 4 << 24, false), // OA index
3430 ctx
->ac
.i1true
, // wave release
3431 ctx
->ac
.i1true
, // wave done
3434 tmp
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ds.ordered.add",
3435 ctx
->ac
.i32
, args
, ARRAY_SIZE(args
), 0);
3437 /* Keep offsets in a VGPR for quick retrieval via readlane by
3438 * the first wave for bounds checking, and also store in LDS
3439 * for retrieval by all waves later. */
3440 LLVMBuildStore(builder
, tmp
, offsets_vgpr
);
3442 tmp2
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
3443 scratch_offset_basev
, "");
3444 tmp2
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp2
);
3445 LLVMBuildStore(builder
, tmp
, tmp2
);
3447 ac_build_endif(&ctx
->ac
, 5210);
3449 /* Determine the max emit per buffer. This is done via the SALU, in part
3450 * because LLVM can't generate divide-by-multiply if we try to do this
3451 * via VALU with one lane per buffer.
3453 LLVMValueRef max_emit
[4] = {};
3454 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
3455 if (stream_for_buffer
[buffer
] == -1)
3458 /* Compute the streamout buffer size in DWORD. */
3459 LLVMValueRef bufsize_dw
=
3460 LLVMBuildLShr(builder
,
3461 LLVMBuildExtractElement(builder
, so_buffer
[buffer
], i32_2
, ""),
3464 /* Load the streamout buffer offset from GDS. */
3465 tmp
= LLVMBuildLoad(builder
, offsets_vgpr
, "");
3466 LLVMValueRef offset_dw
=
3467 ac_build_readlane(&ctx
->ac
, tmp
,
3468 LLVMConstInt(ctx
->ac
.i32
, buffer
, false));
3470 /* Compute the remaining size to emit. */
3471 LLVMValueRef remaining_dw
=
3472 LLVMBuildSub(builder
, bufsize_dw
, offset_dw
, "");
3473 tmp
= LLVMBuildUDiv(builder
, remaining_dw
,
3474 prim_stride_dw
[buffer
], "");
3476 cond
= LLVMBuildICmp(builder
, LLVMIntULT
,
3477 bufsize_dw
, offset_dw
, "");
3478 max_emit
[buffer
] = LLVMBuildSelect(builder
, cond
,
3479 ctx
->ac
.i32_0
, tmp
, "");
3482 /* Determine the number of emitted primitives per stream and fixup the
3483 * GDS counter if necessary.
3485 * This is complicated by the fact that a single stream can emit to
3486 * multiple buffers (but luckily not vice versa).
3488 LLVMValueRef emit_vgpr
= ctx
->ac
.i32_0
;
3490 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3491 if (!ctx
->shader_info
->gs
.num_stream_output_components
[stream
])
3494 /* Load the number of generated primitives from GDS and
3495 * determine that number for the given stream.
3497 tmp
= LLVMBuildLoad(builder
, generated_by_stream_vgpr
, "");
3498 LLVMValueRef generated
=
3499 ac_build_readlane(&ctx
->ac
, tmp
,
3500 LLVMConstInt(ctx
->ac
.i32
, stream
, false));
3503 /* Compute the number of emitted primitives. */
3504 LLVMValueRef emit
= generated
;
3505 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
3506 if (stream_for_buffer
[buffer
] == stream
)
3507 emit
= ac_build_umin(&ctx
->ac
, emit
, max_emit
[buffer
]);
3510 /* Store the number of emitted primitives for that
3513 emit_vgpr
= ac_build_writelane(&ctx
->ac
, emit_vgpr
, emit
,
3514 LLVMConstInt(ctx
->ac
.i32
, stream
, false));
3516 /* Fixup the offset using a plain GDS atomic if we overflowed. */
3517 cond
= LLVMBuildICmp(builder
, LLVMIntULT
, emit
, generated
, "");
3518 ac_build_ifcc(&ctx
->ac
, cond
, 5221); /* scalar branch */
3519 tmp
= LLVMBuildLShr(builder
,
3520 LLVMConstInt(ctx
->ac
.i32
, bufmask_for_stream
[stream
], false),
3521 ac_get_thread_id(&ctx
->ac
), "");
3522 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
3523 ac_build_ifcc(&ctx
->ac
, tmp
, 5222);
3525 tmp
= LLVMBuildSub(builder
, generated
, emit
, "");
3526 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
3527 tmp2
= LLVMBuildGEP(builder
, gdsbase
, &tid
, 1, "");
3528 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpSub
, tmp2
, tmp
,
3529 LLVMAtomicOrderingMonotonic
, false);
3531 ac_build_endif(&ctx
->ac
, 5222);
3532 ac_build_endif(&ctx
->ac
, 5221);
3535 /* Store the number of emitted primitives to LDS for later use. */
3536 cond
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
3537 ac_build_ifcc(&ctx
->ac
, cond
, 5225);
3539 tmp
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
3540 scratch_emit_basev
, "");
3541 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp
);
3542 LLVMBuildStore(builder
, emit_vgpr
, tmp
);
3544 ac_build_endif(&ctx
->ac
, 5225);
3546 ac_build_endif(&ctx
->ac
, 5200);
3548 /* Determine the workgroup-relative per-thread / primitive offset into
3549 * the streamout buffers */
3550 struct ac_wg_scan primemit_scan
[4] = {};
3553 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3554 if (!ctx
->shader_info
->gs
.num_stream_output_components
[stream
])
3557 primemit_scan
[stream
].enable_exclusive
= true;
3558 primemit_scan
[stream
].op
= nir_op_iadd
;
3559 primemit_scan
[stream
].src
= nggso
->prim_enable
[stream
];
3560 primemit_scan
[stream
].scratch
=
3561 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
3562 LLVMConstInt(ctx
->ac
.i32
, 12 + 8 * stream
, false));
3563 primemit_scan
[stream
].waveidx
= get_wave_id_in_tg(ctx
);
3564 primemit_scan
[stream
].numwaves
= get_tgsize(ctx
);
3565 primemit_scan
[stream
].maxwaves
= 8;
3566 ac_build_wg_scan_top(&ctx
->ac
, &primemit_scan
[stream
]);
3570 ac_build_s_barrier(&ctx
->ac
);
3572 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
3573 LLVMValueRef wgoffset_dw
[4] = {};
3576 LLVMValueRef scratch_vgpr
;
3578 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ac_get_thread_id(&ctx
->ac
));
3579 scratch_vgpr
= LLVMBuildLoad(builder
, tmp
, "");
3581 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
3582 if (stream_for_buffer
[buffer
] >= 0) {
3583 wgoffset_dw
[buffer
] = ac_build_readlane(
3584 &ctx
->ac
, scratch_vgpr
,
3585 LLVMConstInt(ctx
->ac
.i32
, scratch_offset_base
+ buffer
, false));
3589 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3590 if (ctx
->shader_info
->gs
.num_stream_output_components
[stream
]) {
3591 nggso
->emit
[stream
] = ac_build_readlane(
3592 &ctx
->ac
, scratch_vgpr
,
3593 LLVMConstInt(ctx
->ac
.i32
, scratch_emit_base
+ stream
, false));
3598 /* Write out primitive data */
3599 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3600 if (!ctx
->shader_info
->gs
.num_stream_output_components
[stream
])
3604 ac_build_wg_scan_bottom(&ctx
->ac
, &primemit_scan
[stream
]);
3606 primemit_scan
[stream
].result_exclusive
= tid
;
3609 cond
= LLVMBuildICmp(builder
, LLVMIntULT
,
3610 primemit_scan
[stream
].result_exclusive
,
3611 nggso
->emit
[stream
], "");
3612 cond
= LLVMBuildAnd(builder
, cond
, nggso
->prim_enable
[stream
], "");
3613 ac_build_ifcc(&ctx
->ac
, cond
, 5240);
3615 LLVMValueRef offset_vtx
=
3616 LLVMBuildMul(builder
, primemit_scan
[stream
].result_exclusive
,
3617 nggso
->num_vertices
, "");
3619 for (unsigned i
= 0; i
< max_num_vertices
; ++i
) {
3620 cond
= LLVMBuildICmp(builder
, LLVMIntULT
,
3621 LLVMConstInt(ctx
->ac
.i32
, i
, false),
3622 nggso
->num_vertices
, "");
3623 ac_build_ifcc(&ctx
->ac
, cond
, 5241);
3624 build_streamout_vertex(ctx
, so_buffer
, wgoffset_dw
,
3625 stream
, offset_vtx
, nggso
->vertices
[i
]);
3626 ac_build_endif(&ctx
->ac
, 5241);
3627 offset_vtx
= LLVMBuildAdd(builder
, offset_vtx
, ctx
->ac
.i32_1
, "");
3630 ac_build_endif(&ctx
->ac
, 5240);
3634 static unsigned ngg_nogs_vertex_size(struct radv_shader_context
*ctx
)
3636 unsigned lds_vertex_size
= 0;
3638 if (ctx
->shader_info
->so
.num_outputs
)
3639 lds_vertex_size
= 4 * ctx
->shader_info
->so
.num_outputs
+ 1;
3641 return lds_vertex_size
;
3645 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
3646 * for the vertex outputs.
3648 static LLVMValueRef
ngg_nogs_vertex_ptr(struct radv_shader_context
*ctx
,
3651 /* The extra dword is used to avoid LDS bank conflicts. */
3652 unsigned vertex_size
= ngg_nogs_vertex_size(ctx
);
3653 LLVMTypeRef ai32
= LLVMArrayType(ctx
->ac
.i32
, vertex_size
);
3654 LLVMTypeRef pai32
= LLVMPointerType(ai32
, AC_ADDR_SPACE_LDS
);
3655 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->esgs_ring
, pai32
, "");
3656 return LLVMBuildGEP(ctx
->ac
.builder
, tmp
, &vtxid
, 1, "");
3660 handle_ngg_outputs_post_1(struct radv_shader_context
*ctx
)
3662 struct radv_streamout_info
*so
= &ctx
->shader_info
->so
;
3663 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3664 LLVMValueRef vertex_ptr
= NULL
;
3665 LLVMValueRef tmp
, tmp2
;
3667 assert((ctx
->stage
== MESA_SHADER_VERTEX
||
3668 ctx
->stage
== MESA_SHADER_TESS_EVAL
) && !ctx
->is_gs_copy_shader
);
3670 if (!ctx
->shader_info
->so
.num_outputs
)
3673 vertex_ptr
= ngg_nogs_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
));
3675 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
3676 struct radv_stream_output
*output
=
3677 &ctx
->shader_info
->so
.outputs
[i
];
3679 unsigned loc
= output
->location
;
3681 for (unsigned comp
= 0; comp
< 4; comp
++) {
3682 if (!(output
->component_mask
& (1 << comp
)))
3685 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
,
3686 LLVMConstInt(ctx
->ac
.i32
, 4 * i
+ comp
, false));
3687 tmp2
= LLVMBuildLoad(builder
,
3688 ctx
->abi
.outputs
[4 * loc
+ comp
], "");
3689 tmp2
= ac_to_integer(&ctx
->ac
, tmp2
);
3690 LLVMBuildStore(builder
, tmp2
, tmp
);
3696 handle_ngg_outputs_post_2(struct radv_shader_context
*ctx
)
3698 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3701 assert((ctx
->stage
== MESA_SHADER_VERTEX
||
3702 ctx
->stage
== MESA_SHADER_TESS_EVAL
) && !ctx
->is_gs_copy_shader
);
3704 LLVMValueRef prims_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3705 LLVMValueRef vtx_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 0, 8);
3706 LLVMValueRef is_gs_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3707 ac_get_thread_id(&ctx
->ac
), prims_in_wave
, "");
3708 LLVMValueRef is_es_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3709 ac_get_thread_id(&ctx
->ac
), vtx_in_wave
, "");
3710 LLVMValueRef vtxindex
[] = {
3711 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 0, 16),
3712 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 16, 16),
3713 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[2], 0, 16),
3716 /* Determine the number of vertices per primitive. */
3717 unsigned num_vertices
;
3718 LLVMValueRef num_vertices_val
;
3720 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
3721 LLVMValueRef outprim_val
=
3722 LLVMConstInt(ctx
->ac
.i32
,
3723 ctx
->options
->key
.vs
.outprim
, false);
3724 num_vertices_val
= LLVMBuildAdd(builder
, outprim_val
,
3726 num_vertices
= 3; /* TODO: optimize for points & lines */
3728 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
3730 if (ctx
->shader
->info
.tess
.point_mode
)
3732 else if (ctx
->shader
->info
.tess
.primitive_mode
== GL_ISOLINES
)
3737 num_vertices_val
= LLVMConstInt(ctx
->ac
.i32
, num_vertices
, false);
3741 if (ctx
->shader_info
->so
.num_outputs
) {
3742 struct ngg_streamout nggso
= {};
3744 nggso
.num_vertices
= num_vertices_val
;
3745 nggso
.prim_enable
[0] = is_gs_thread
;
3747 for (unsigned i
= 0; i
< num_vertices
; ++i
)
3748 nggso
.vertices
[i
] = ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
3750 build_streamout(ctx
, &nggso
);
3753 /* Copy Primitive IDs from GS threads to the LDS address corresponding
3754 * to the ES thread of the provoking vertex.
3756 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
3757 ctx
->options
->key
.vs_common_out
.export_prim_id
) {
3758 if (ctx
->shader_info
->so
.num_outputs
)
3759 ac_build_s_barrier(&ctx
->ac
);
3761 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
3762 /* Extract the PROVOKING_VTX_INDEX field. */
3763 LLVMValueRef provoking_vtx_in_prim
=
3764 LLVMConstInt(ctx
->ac
.i32
, 0, false);
3766 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
3767 LLVMValueRef indices
= ac_build_gather_values(&ctx
->ac
, vtxindex
, 3);
3768 LLVMValueRef provoking_vtx_index
=
3769 LLVMBuildExtractElement(builder
, indices
, provoking_vtx_in_prim
, "");
3771 LLVMBuildStore(builder
, ctx
->abi
.gs_prim_id
,
3772 ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
, provoking_vtx_index
));
3773 ac_build_endif(&ctx
->ac
, 5400);
3776 /* TODO: primitive culling */
3778 build_sendmsg_gs_alloc_req(ctx
, ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
3780 /* TODO: streamout queries */
3781 /* Export primitive data to the index buffer. Format is:
3782 * - bits 0..8: index 0
3783 * - bit 9: edge flag 0
3784 * - bits 10..18: index 1
3785 * - bit 19: edge flag 1
3786 * - bits 20..28: index 2
3787 * - bit 29: edge flag 2
3788 * - bit 31: null primitive (skip)
3790 * For the first version, we will always build up all three indices
3791 * independent of the primitive type. The additional garbage data
3794 * TODO: culling depends on the primitive type, so can have some
3797 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 6001);
3799 struct ngg_prim prim
= {};
3801 prim
.num_vertices
= num_vertices
;
3802 prim
.isnull
= ctx
->ac
.i1false
;
3803 prim
.swap
= ctx
->ac
.i1false
;
3804 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
3806 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
3807 tmp
= LLVMBuildLShr(builder
, ctx
->abi
.gs_invocation_id
,
3808 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
3809 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
3812 build_export_prim(ctx
, &prim
);
3814 ac_build_endif(&ctx
->ac
, 6001);
3816 /* Export per-vertex data (positions and parameters). */
3817 ac_build_ifcc(&ctx
->ac
, is_es_thread
, 6002);
3819 struct radv_vs_output_info
*outinfo
=
3820 ctx
->stage
== MESA_SHADER_TESS_EVAL
? &ctx
->shader_info
->tes
.outinfo
: &ctx
->shader_info
->vs
.outinfo
;
3822 /* Exporting the primitive ID is handled below. */
3823 /* TODO: use the new VS export path */
3824 handle_vs_outputs_post(ctx
, false,
3825 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
3828 if (ctx
->options
->key
.vs_common_out
.export_prim_id
) {
3829 unsigned param_count
= outinfo
->param_exports
;
3830 LLVMValueRef values
[4];
3832 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
3833 /* Wait for GS stores to finish. */
3834 ac_build_s_barrier(&ctx
->ac
);
3836 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
,
3837 get_thread_id_in_tg(ctx
));
3838 values
[0] = LLVMBuildLoad(builder
, tmp
, "");
3840 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
3841 values
[0] = ctx
->abi
.tes_patch_id
;
3844 values
[0] = ac_to_float(&ctx
->ac
, values
[0]);
3845 for (unsigned j
= 1; j
< 4; j
++)
3846 values
[j
] = ctx
->ac
.f32_0
;
3848 radv_export_param(ctx
, param_count
, values
, 0x1);
3850 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
3851 outinfo
->param_exports
= param_count
;
3854 ac_build_endif(&ctx
->ac
, 6002);
3857 static void gfx10_ngg_gs_emit_prologue(struct radv_shader_context
*ctx
)
3859 /* Zero out the part of LDS scratch that is used to accumulate the
3860 * per-stream generated primitive count.
3862 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3863 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
3864 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
3865 LLVMBasicBlockRef merge_block
;
3868 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
->ac
.builder
));
3869 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
, fn
, "");
3870 merge_block
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
, fn
, "");
3872 cond
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
3873 LLVMBuildCondBr(ctx
->ac
.builder
, cond
, then_block
, merge_block
);
3874 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, then_block
);
3876 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
3877 LLVMBuildStore(builder
, ctx
->ac
.i32_0
, ptr
);
3879 LLVMBuildBr(ctx
->ac
.builder
, merge_block
);
3880 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, merge_block
);
3882 ac_build_s_barrier(&ctx
->ac
);
3885 static void gfx10_ngg_gs_emit_epilogue_1(struct radv_shader_context
*ctx
)
3887 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3888 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
3891 /* Zero out remaining (non-emitted) primitive flags.
3893 * Note: Alternatively, we could pass the relevant gs_next_vertex to
3894 * the emit threads via LDS. This is likely worse in the expected
3895 * typical case where each GS thread emits the full set of
3898 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3899 unsigned num_components
;
3902 ctx
->shader_info
->gs
.num_stream_output_components
[stream
];
3903 if (!num_components
)
3906 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
3908 ac_build_bgnloop(&ctx
->ac
, 5100);
3910 const LLVMValueRef vertexidx
=
3911 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
3912 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
3913 LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.gs
.vertices_out
, false), "");
3914 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
3915 ac_build_break(&ctx
->ac
);
3916 ac_build_endif(&ctx
->ac
, 5101);
3918 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
3919 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
3921 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
3922 LLVMValueRef gep_idx
[3] = {
3923 ctx
->ac
.i32_0
, /* implied C-style array */
3924 ctx
->ac
.i32_1
, /* second entry of struct */
3925 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
3927 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3928 LLVMBuildStore(builder
, i8_0
, tmp
);
3930 ac_build_endloop(&ctx
->ac
, 5100);
3933 /* Accumulate generated primitives counts across the entire threadgroup. */
3934 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3935 unsigned num_components
;
3938 ctx
->shader_info
->gs
.num_stream_output_components
[stream
];
3939 if (!num_components
)
3942 LLVMValueRef numprims
=
3943 LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
3944 numprims
= ac_build_reduce(&ctx
->ac
, numprims
, nir_op_iadd
, ctx
->ac
.wave_size
);
3946 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, ac_get_thread_id(&ctx
->ac
), ctx
->ac
.i32_0
, "");
3947 ac_build_ifcc(&ctx
->ac
, tmp
, 5105);
3949 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
3950 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
3951 LLVMConstInt(ctx
->ac
.i32
, stream
, false)),
3952 numprims
, LLVMAtomicOrderingMonotonic
, false);
3954 ac_build_endif(&ctx
->ac
, 5105);
3958 static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context
*ctx
)
3960 const unsigned verts_per_prim
= si_conv_gl_prim_to_vertices(ctx
->shader
->info
.gs
.output_primitive
);
3961 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3962 LLVMValueRef tmp
, tmp2
;
3964 ac_build_s_barrier(&ctx
->ac
);
3966 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
3967 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
3970 if (ctx
->shader_info
->so
.num_outputs
) {
3971 struct ngg_streamout nggso
= {};
3973 nggso
.num_vertices
= LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
, false);
3975 LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tid
);
3976 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3977 if (!ctx
->shader_info
->gs
.num_stream_output_components
[stream
])
3980 LLVMValueRef gep_idx
[3] = {
3981 ctx
->ac
.i32_0
, /* implicit C-style array */
3982 ctx
->ac
.i32_1
, /* second value of struct */
3983 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
3985 tmp
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
3986 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3987 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
3988 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
3989 nggso
.prim_enable
[stream
] = LLVMBuildAnd(builder
, tmp
, tmp2
, "");
3992 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
3993 tmp
= LLVMBuildSub(builder
, tid
,
3994 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
3995 tmp
= ngg_gs_vertex_ptr(ctx
, tmp
);
3996 nggso
.vertices
[i
] = ac_build_gep0(&ctx
->ac
, tmp
, ctx
->ac
.i32_0
);
3999 build_streamout(ctx
, &nggso
);
4004 /* Determine vertex liveness. */
4005 LLVMValueRef vertliveptr
= ac_build_alloca(&ctx
->ac
, ctx
->ac
.i1
, "vertexlive");
4007 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
4008 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
4010 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
4011 const LLVMValueRef primidx
=
4012 LLVMBuildAdd(builder
, tid
,
4013 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
4016 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
4017 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
4020 /* Load primitive liveness */
4021 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
4022 LLVMValueRef gep_idx
[3] = {
4023 ctx
->ac
.i32_0
, /* implicit C-style array */
4024 ctx
->ac
.i32_1
, /* second value of struct */
4025 ctx
->ac
.i32_0
, /* stream 0 */
4027 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
4028 tmp
= LLVMBuildLoad(builder
, tmp
, "");
4029 const LLVMValueRef primlive
=
4030 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
4032 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
4033 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
4034 LLVMBuildStore(builder
, tmp
, vertliveptr
);
4037 ac_build_endif(&ctx
->ac
, 5121 + i
);
4040 ac_build_endif(&ctx
->ac
, 5120);
4042 /* Inclusive scan addition across the current wave. */
4043 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
4044 struct ac_wg_scan vertlive_scan
= {};
4045 vertlive_scan
.op
= nir_op_iadd
;
4046 vertlive_scan
.enable_reduce
= true;
4047 vertlive_scan
.enable_exclusive
= true;
4048 vertlive_scan
.src
= vertlive
;
4049 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->ac
.i32_0
);
4050 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
4051 vertlive_scan
.numwaves
= get_tgsize(ctx
);
4052 vertlive_scan
.maxwaves
= 8;
4054 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
4056 /* Skip all exports (including index exports) when possible. At least on
4057 * early gfx10 revisions this is also to avoid hangs.
4059 LLVMValueRef have_exports
=
4060 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
4062 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
4064 /* Allocate export space. Send this message as early as possible, to
4065 * hide the latency of the SQ <-> SPI roundtrip.
4067 * Note: We could consider compacting primitives for export as well.
4068 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
4069 * prim data per clock and skips null primitives at no additional
4070 * cost. So compacting primitives can only be beneficial when
4071 * there are 4 or more contiguous null primitives in the export
4072 * (in the common case of single-dword prim exports).
4074 build_sendmsg_gs_alloc_req(ctx
, vertlive_scan
.result_reduce
, num_emit_threads
);
4076 /* Setup the reverse vertex compaction permutation. We re-use stream 1
4077 * of the primitive liveness flags, relying on the fact that each
4078 * threadgroup can have at most 256 threads. */
4079 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
4081 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
4082 LLVMValueRef gep_idx
[3] = {
4083 ctx
->ac
.i32_0
, /* implicit C-style array */
4084 ctx
->ac
.i32_1
, /* second value of struct */
4085 ctx
->ac
.i32_1
, /* stream 1 */
4087 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
4088 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
4089 LLVMBuildStore(builder
, tmp2
, tmp
);
4091 ac_build_endif(&ctx
->ac
, 5130);
4093 ac_build_s_barrier(&ctx
->ac
);
4095 /* Export primitive data */
4096 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
4097 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
4099 struct ngg_prim prim
= {};
4100 prim
.num_vertices
= verts_per_prim
;
4102 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
4103 LLVMValueRef gep_idx
[3] = {
4104 ctx
->ac
.i32_0
, /* implicit C-style array */
4105 ctx
->ac
.i32_1
, /* second value of struct */
4106 ctx
->ac
.i32_0
, /* primflag */
4108 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
4109 tmp
= LLVMBuildLoad(builder
, tmp
, "");
4110 prim
.isnull
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
,
4111 LLVMConstInt(ctx
->ac
.i8
, 0, false), "");
4112 prim
.swap
= LLVMBuildICmp(builder
, LLVMIntEQ
,
4113 LLVMBuildAnd(builder
, tid
, LLVMConstInt(ctx
->ac
.i32
, 1, false), ""),
4114 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
4116 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
4117 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
4118 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
4119 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
4122 build_export_prim(ctx
, &prim
);
4124 ac_build_endif(&ctx
->ac
, 5140);
4126 /* Export position and parameter data */
4127 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
4128 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
4130 struct radv_vs_output_info
*outinfo
= &ctx
->shader_info
->vs
.outinfo
;
4131 bool export_view_index
= ctx
->options
->key
.has_multiview_view_index
;
4132 struct radv_shader_output_values
*outputs
;
4133 unsigned noutput
= 0;
4135 /* Allocate a temporary array for the output values. */
4136 unsigned num_outputs
= util_bitcount64(ctx
->output_mask
) + export_view_index
;
4137 outputs
= calloc(num_outputs
, sizeof(outputs
[0]));
4139 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
4140 sizeof(outinfo
->vs_output_param_offset
));
4141 outinfo
->pos_exports
= 0;
4143 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
4144 LLVMValueRef gep_idx
[3] = {
4145 ctx
->ac
.i32_0
, /* implicit C-style array */
4146 ctx
->ac
.i32_1
, /* second value of struct */
4147 ctx
->ac
.i32_1
, /* stream 1: source data index */
4149 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
4150 tmp
= LLVMBuildLoad(builder
, tmp
, "");
4151 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
4152 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
4154 unsigned out_idx
= 0;
4155 gep_idx
[1] = ctx
->ac
.i32_0
;
4156 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4157 unsigned output_usage_mask
=
4158 ctx
->shader_info
->gs
.output_usage_mask
[i
];
4159 int length
= util_last_bit(output_usage_mask
);
4161 if (!(ctx
->output_mask
& (1ull << i
)))
4164 outputs
[noutput
].slot_name
= i
;
4165 outputs
[noutput
].slot_index
= i
== VARYING_SLOT_CLIP_DIST1
;
4166 outputs
[noutput
].usage_mask
= output_usage_mask
;
4168 for (unsigned j
= 0; j
< length
; j
++, out_idx
++) {
4169 if (!(output_usage_mask
& (1 << j
)))
4172 gep_idx
[2] = LLVMConstInt(ctx
->ac
.i32
, out_idx
, false);
4173 tmp
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
4174 tmp
= LLVMBuildLoad(builder
, tmp
, "");
4176 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4177 if (ac_get_type_size(type
) == 2) {
4178 tmp
= ac_to_integer(&ctx
->ac
, tmp
);
4179 tmp
= LLVMBuildTrunc(ctx
->ac
.builder
, tmp
, ctx
->ac
.i16
, "");
4182 outputs
[noutput
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
4185 for (unsigned j
= length
; j
< 4; j
++)
4186 outputs
[noutput
].values
[j
] = LLVMGetUndef(ctx
->ac
.f32
);
4191 /* Export ViewIndex. */
4192 if (export_view_index
) {
4193 outputs
[noutput
].slot_name
= VARYING_SLOT_LAYER
;
4194 outputs
[noutput
].slot_index
= 0;
4195 outputs
[noutput
].usage_mask
= 0x1;
4196 outputs
[noutput
].values
[0] = ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
);
4197 for (unsigned j
= 1; j
< 4; j
++)
4198 outputs
[noutput
].values
[j
] = ctx
->ac
.f32_0
;
4202 radv_llvm_export_vs(ctx
, outputs
, noutput
, outinfo
,
4203 ctx
->options
->key
.vs_common_out
.export_clip_dists
);
4206 ac_build_endif(&ctx
->ac
, 5145);
4209 static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context
*ctx
,
4211 LLVMValueRef
*addrs
)
4213 LLVMBuilderRef builder
= ctx
->ac
.builder
;
4215 const LLVMValueRef vertexidx
=
4216 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
4218 /* If this thread has already emitted the declared maximum number of
4219 * vertices, skip the write: excessive vertex emissions are not
4220 * supposed to have any effect.
4222 const LLVMValueRef can_emit
=
4223 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
4224 LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.gs
.vertices_out
, false), "");
4225 ac_build_ifcc(&ctx
->ac
, can_emit
, 9001);
4227 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
4228 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
4229 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
4231 const LLVMValueRef vertexptr
=
4232 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
4233 unsigned out_idx
= 0;
4234 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4235 unsigned output_usage_mask
=
4236 ctx
->shader_info
->gs
.output_usage_mask
[i
];
4237 uint8_t output_stream
=
4238 ctx
->shader_info
->gs
.output_streams
[i
];
4239 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
4240 int length
= util_last_bit(output_usage_mask
);
4242 if (!(ctx
->output_mask
& (1ull << i
)) ||
4243 output_stream
!= stream
)
4246 for (unsigned j
= 0; j
< length
; j
++, out_idx
++) {
4247 if (!(output_usage_mask
& (1 << j
)))
4250 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
4252 LLVMValueRef gep_idx
[3] = {
4253 ctx
->ac
.i32_0
, /* implied C-style array */
4254 ctx
->ac
.i32_0
, /* first entry of struct */
4255 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
4257 LLVMValueRef ptr
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
4259 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
4260 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
4262 LLVMBuildStore(builder
, out_val
, ptr
);
4265 assert(out_idx
* 4 <= ctx
->shader_info
->gs
.gsvs_vertex_size
);
4267 /* Determine and store whether this vertex completed a primitive. */
4268 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
4270 tmp
= LLVMConstInt(ctx
->ac
.i32
, si_conv_gl_prim_to_vertices(ctx
->shader
->info
.gs
.output_primitive
) - 1, false);
4271 const LLVMValueRef iscompleteprim
=
4272 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
4274 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
4275 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
4277 LLVMValueRef gep_idx
[3] = {
4278 ctx
->ac
.i32_0
, /* implied C-style array */
4279 ctx
->ac
.i32_1
, /* second struct entry */
4280 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
4282 const LLVMValueRef primflagptr
=
4283 LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
4285 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
4286 LLVMBuildStore(builder
, tmp
, primflagptr
);
4288 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
4289 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
4290 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
4292 ac_build_endif(&ctx
->ac
, 9001);
4296 write_tess_factors(struct radv_shader_context
*ctx
)
4298 unsigned stride
, outer_comps
, inner_comps
;
4299 LLVMValueRef invocation_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
4300 LLVMValueRef rel_patch_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
4301 unsigned tess_inner_index
= 0, tess_outer_index
;
4302 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
4303 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
4305 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
4307 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
4327 ac_build_ifcc(&ctx
->ac
,
4328 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
4329 invocation_id
, ctx
->ac
.i32_0
, ""), 6503);
4331 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
4334 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
4335 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
4336 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
4339 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
4340 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
4341 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
4343 for (i
= 0; i
< 4; i
++) {
4344 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
4345 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
4349 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
4350 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
4351 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
4353 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
4355 for (i
= 0; i
< outer_comps
; i
++) {
4357 ac_lds_load(&ctx
->ac
, lds_outer
);
4358 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
4361 for (i
= 0; i
< inner_comps
; i
++) {
4362 inner
[i
] = out
[outer_comps
+i
] =
4363 ac_lds_load(&ctx
->ac
, lds_inner
);
4364 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
4369 /* Convert the outputs to vectors for stores. */
4370 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
4374 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
4377 buffer
= ctx
->hs_ring_tess_factor
;
4378 tf_base
= ctx
->tess_factor_offset
;
4379 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
4380 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
4381 unsigned tf_offset
= 0;
4383 if (ctx
->options
->chip_class
<= GFX8
) {
4384 ac_build_ifcc(&ctx
->ac
,
4385 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
4386 rel_patch_id
, ctx
->ac
.i32_0
, ""), 6504);
4388 /* Store the dynamic HS control word. */
4389 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
4390 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
4391 1, ctx
->ac
.i32_0
, tf_base
,
4395 ac_build_endif(&ctx
->ac
, 6504);
4398 /* Store the tessellation factors. */
4399 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
4400 MIN2(stride
, 4), byteoffset
, tf_base
,
4401 tf_offset
, ac_glc
, false);
4403 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
4404 stride
- 4, byteoffset
, tf_base
,
4405 16 + tf_offset
, ac_glc
, false);
4407 //store to offchip for TES to read - only if TES reads them
4408 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
4409 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
4410 LLVMValueRef tf_inner_offset
;
4411 unsigned param_outer
, param_inner
;
4413 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
4414 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
4415 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
4417 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
4418 util_next_power_of_two(outer_comps
));
4420 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
4421 outer_comps
, tf_outer_offset
,
4422 ctx
->oc_lds
, 0, ac_glc
, false);
4424 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
4425 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
4426 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
4428 inner_vec
= inner_comps
== 1 ? inner
[0] :
4429 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
4430 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
4431 inner_comps
, tf_inner_offset
,
4432 ctx
->oc_lds
, 0, ac_glc
, false);
4436 ac_build_endif(&ctx
->ac
, 6503);
4440 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
4442 write_tess_factors(ctx
);
4446 si_export_mrt_color(struct radv_shader_context
*ctx
,
4447 LLVMValueRef
*color
, unsigned index
,
4448 struct ac_export_args
*args
)
4451 si_llvm_init_export_args(ctx
, color
, 0xf,
4452 V_008DFC_SQ_EXP_MRT
+ index
, args
);
4453 if (!args
->enabled_channels
)
4454 return false; /* unnecessary NULL export */
4460 radv_export_mrt_z(struct radv_shader_context
*ctx
,
4461 LLVMValueRef depth
, LLVMValueRef stencil
,
4462 LLVMValueRef samplemask
)
4464 struct ac_export_args args
;
4466 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
4468 ac_build_export(&ctx
->ac
, &args
);
4472 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
4475 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
4476 struct ac_export_args color_args
[8];
4478 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4479 LLVMValueRef values
[4];
4481 if (!(ctx
->output_mask
& (1ull << i
)))
4484 if (i
< FRAG_RESULT_DATA0
)
4487 for (unsigned j
= 0; j
< 4; j
++)
4488 values
[j
] = ac_to_float(&ctx
->ac
,
4489 radv_load_output(ctx
, i
, j
));
4491 bool ret
= si_export_mrt_color(ctx
, values
,
4492 i
- FRAG_RESULT_DATA0
,
4493 &color_args
[index
]);
4498 /* Process depth, stencil, samplemask. */
4499 if (ctx
->shader_info
->ps
.writes_z
) {
4500 depth
= ac_to_float(&ctx
->ac
,
4501 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
4503 if (ctx
->shader_info
->ps
.writes_stencil
) {
4504 stencil
= ac_to_float(&ctx
->ac
,
4505 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
4507 if (ctx
->shader_info
->ps
.writes_sample_mask
) {
4508 samplemask
= ac_to_float(&ctx
->ac
,
4509 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
4512 /* Set the DONE bit on last non-null color export only if Z isn't
4516 !ctx
->shader_info
->ps
.writes_z
&&
4517 !ctx
->shader_info
->ps
.writes_stencil
&&
4518 !ctx
->shader_info
->ps
.writes_sample_mask
) {
4519 unsigned last
= index
- 1;
4521 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
4522 color_args
[last
].done
= 1; /* DONE bit */
4525 /* Export PS outputs. */
4526 for (unsigned i
= 0; i
< index
; i
++)
4527 ac_build_export(&ctx
->ac
, &color_args
[i
]);
4529 if (depth
|| stencil
|| samplemask
)
4530 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
4532 ac_build_export_null(&ctx
->ac
);
4536 emit_gs_epilogue(struct radv_shader_context
*ctx
)
4538 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
4539 gfx10_ngg_gs_emit_epilogue_1(ctx
);
4543 if (ctx
->ac
.chip_class
>= GFX10
)
4544 LLVMBuildFence(ctx
->ac
.builder
, LLVMAtomicOrderingRelease
, false, "");
4546 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
4550 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
4551 LLVMValueRef
*addrs
)
4553 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
4555 switch (ctx
->stage
) {
4556 case MESA_SHADER_VERTEX
:
4557 if (ctx
->options
->key
.vs_common_out
.as_ls
)
4558 handle_ls_outputs_post(ctx
);
4559 else if (ctx
->options
->key
.vs_common_out
.as_es
)
4560 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
4561 else if (ctx
->options
->key
.vs_common_out
.as_ngg
)
4562 handle_ngg_outputs_post_1(ctx
);
4564 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs_common_out
.export_prim_id
,
4565 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
4566 &ctx
->shader_info
->vs
.outinfo
);
4568 case MESA_SHADER_FRAGMENT
:
4569 handle_fs_outputs_post(ctx
);
4571 case MESA_SHADER_GEOMETRY
:
4572 emit_gs_epilogue(ctx
);
4574 case MESA_SHADER_TESS_CTRL
:
4575 handle_tcs_outputs_post(ctx
);
4577 case MESA_SHADER_TESS_EVAL
:
4578 if (ctx
->options
->key
.vs_common_out
.as_es
)
4579 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
4580 else if (ctx
->options
->key
.vs_common_out
.as_ngg
)
4581 handle_ngg_outputs_post_1(ctx
);
4583 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs_common_out
.export_prim_id
,
4584 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
4585 &ctx
->shader_info
->tes
.outinfo
);
4592 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
,
4593 LLVMPassManagerRef passmgr
,
4594 const struct radv_nir_compiler_options
*options
)
4596 LLVMRunPassManager(passmgr
, ctx
->ac
.module
);
4597 LLVMDisposeBuilder(ctx
->ac
.builder
);
4599 ac_llvm_context_dispose(&ctx
->ac
);
4603 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
4605 struct radv_vs_output_info
*outinfo
;
4607 switch (ctx
->stage
) {
4608 case MESA_SHADER_FRAGMENT
:
4609 case MESA_SHADER_COMPUTE
:
4610 case MESA_SHADER_TESS_CTRL
:
4611 case MESA_SHADER_GEOMETRY
:
4613 case MESA_SHADER_VERTEX
:
4614 if (ctx
->options
->key
.vs_common_out
.as_ls
||
4615 ctx
->options
->key
.vs_common_out
.as_es
)
4617 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
4619 case MESA_SHADER_TESS_EVAL
:
4620 if (ctx
->options
->key
.vs_common_out
.as_es
)
4622 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
4625 unreachable("Unhandled shader type");
4628 ac_optimize_vs_outputs(&ctx
->ac
,
4630 outinfo
->vs_output_param_offset
,
4632 &outinfo
->param_exports
);
4636 ac_setup_rings(struct radv_shader_context
*ctx
)
4638 if (ctx
->options
->chip_class
<= GFX8
&&
4639 (ctx
->stage
== MESA_SHADER_GEOMETRY
||
4640 ctx
->options
->key
.vs_common_out
.as_es
|| ctx
->options
->key
.vs_common_out
.as_es
)) {
4641 unsigned ring
= ctx
->stage
== MESA_SHADER_GEOMETRY
? RING_ESGS_GS
4643 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, ring
, false);
4645 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
,
4650 if (ctx
->is_gs_copy_shader
) {
4652 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
4653 LLVMConstInt(ctx
->ac
.i32
,
4654 RING_GSVS_VS
, false));
4657 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
4658 /* The conceptual layout of the GSVS ring is
4659 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4660 * but the real memory layout is swizzled across
4662 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4664 * Override the buffer descriptor accordingly.
4666 LLVMTypeRef v2i64
= LLVMVectorType(ctx
->ac
.i64
, 2);
4667 uint64_t stream_offset
= 0;
4668 unsigned num_records
= ctx
->ac
.wave_size
;
4669 LLVMValueRef base_ring
;
4672 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
4673 LLVMConstInt(ctx
->ac
.i32
,
4674 RING_GSVS_GS
, false));
4676 for (unsigned stream
= 0; stream
< 4; stream
++) {
4677 unsigned num_components
, stride
;
4678 LLVMValueRef ring
, tmp
;
4681 ctx
->shader_info
->gs
.num_stream_output_components
[stream
];
4683 if (!num_components
)
4686 stride
= 4 * num_components
* ctx
->shader
->info
.gs
.vertices_out
;
4688 /* Limit on the stride field for <= GFX7. */
4689 assert(stride
< (1 << 14));
4691 ring
= LLVMBuildBitCast(ctx
->ac
.builder
,
4692 base_ring
, v2i64
, "");
4693 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
,
4694 ring
, ctx
->ac
.i32_0
, "");
4695 tmp
= LLVMBuildAdd(ctx
->ac
.builder
, tmp
,
4696 LLVMConstInt(ctx
->ac
.i64
,
4697 stream_offset
, 0), "");
4698 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
,
4699 ring
, tmp
, ctx
->ac
.i32_0
, "");
4701 stream_offset
+= stride
* ctx
->ac
.wave_size
;
4703 ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ring
,
4706 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ring
,
4708 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
,
4709 LLVMConstInt(ctx
->ac
.i32
,
4710 S_008F04_STRIDE(stride
), false), "");
4711 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
, tmp
,
4714 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
,
4715 LLVMConstInt(ctx
->ac
.i32
,
4716 num_records
, false),
4717 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
4719 ctx
->gsvs_ring
[stream
] = ring
;
4723 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
4724 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
4725 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
4726 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
4731 radv_nir_get_max_workgroup_size(enum chip_class chip_class
,
4732 gl_shader_stage stage
,
4733 const struct nir_shader
*nir
)
4735 const unsigned backup_sizes
[] = {chip_class
>= GFX9
? 128 : 64, 1, 1};
4737 for (unsigned i
= 0; i
< 3; i
++)
4738 sizes
[i
] = nir
? nir
->info
.cs
.local_size
[i
] : backup_sizes
[i
];
4739 return radv_get_max_workgroup_size(chip_class
, stage
, sizes
);
4742 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
4743 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
4745 LLVMValueRef count
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
4746 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
4748 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
4749 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
4750 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
4753 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
4755 for(int i
= 5; i
>= 0; --i
) {
4756 ctx
->gs_vtx_offset
[i
] = ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
4760 ctx
->gs_wave_id
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 16, 8);
4763 /* Ensure that the esgs ring is declared.
4765 * We declare it with 64KB alignment as a hint that the
4766 * pointer value will always be 0.
4768 static void declare_esgs_ring(struct radv_shader_context
*ctx
)
4773 assert(!LLVMGetNamedGlobal(ctx
->ac
.module
, "esgs_ring"));
4775 ctx
->esgs_ring
= LLVMAddGlobalInAddressSpace(
4776 ctx
->ac
.module
, LLVMArrayType(ctx
->ac
.i32
, 0),
4779 LLVMSetLinkage(ctx
->esgs_ring
, LLVMExternalLinkage
);
4780 LLVMSetAlignment(ctx
->esgs_ring
, 64 * 1024);
4784 LLVMModuleRef
ac_translate_nir_to_llvm(struct ac_llvm_compiler
*ac_llvm
,
4785 struct nir_shader
*const *shaders
,
4787 struct radv_shader_info
*shader_info
,
4788 const struct radv_nir_compiler_options
*options
)
4790 struct radv_shader_context ctx
= {0};
4792 ctx
.options
= options
;
4793 ctx
.shader_info
= shader_info
;
4795 enum ac_float_mode float_mode
= AC_FLOAT_MODE_DEFAULT
;
4797 if (shader_info
->float_controls_mode
& FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32
) {
4798 float_mode
= AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
;
4801 ac_llvm_context_init(&ctx
.ac
, ac_llvm
, options
->chip_class
,
4802 options
->family
, float_mode
, shader_info
->wave_size
, 64);
4803 ctx
.context
= ctx
.ac
.context
;
4805 for (i
= 0; i
< MAX_SETS
; i
++)
4806 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
4807 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
4808 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
4810 ctx
.max_workgroup_size
= 0;
4811 for (int i
= 0; i
< shader_count
; ++i
) {
4812 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
4813 radv_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
4814 shaders
[i
]->info
.stage
,
4818 if (ctx
.ac
.chip_class
>= GFX10
) {
4819 if (is_pre_gs_stage(shaders
[0]->info
.stage
) &&
4820 options
->key
.vs_common_out
.as_ngg
) {
4821 ctx
.max_workgroup_size
= 128;
4825 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
4826 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
4828 ctx
.abi
.inputs
= &ctx
.inputs
[0];
4829 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
4830 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
4831 ctx
.abi
.load_ubo
= radv_load_ubo
;
4832 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
4833 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
4834 ctx
.abi
.load_resource
= radv_load_resource
;
4835 ctx
.abi
.clamp_shadow_reference
= false;
4836 ctx
.abi
.robust_buffer_access
= options
->robust_buffer_access
;
4838 bool is_ngg
= is_pre_gs_stage(shaders
[0]->info
.stage
) && ctx
.options
->key
.vs_common_out
.as_ngg
;
4839 if (shader_count
>= 2 || is_ngg
)
4840 ac_init_exec_full_mask(&ctx
.ac
);
4842 if (options
->has_ls_vgpr_init_bug
&&
4843 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
4844 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
4847 /* Declare scratch space base for streamout and vertex
4848 * compaction. Whether space is actually allocated is
4849 * determined during linking / PM4 creation.
4851 * Add an extra dword per vertex to ensure an odd stride, which
4852 * avoids bank conflicts for SoA accesses.
4854 declare_esgs_ring(&ctx
);
4856 /* This is really only needed when streamout and / or vertex
4857 * compaction is enabled.
4859 LLVMTypeRef asi32
= LLVMArrayType(ctx
.ac
.i32
, 8);
4860 ctx
.gs_ngg_scratch
= LLVMAddGlobalInAddressSpace(ctx
.ac
.module
,
4861 asi32
, "ngg_scratch", AC_ADDR_SPACE_LDS
);
4862 LLVMSetInitializer(ctx
.gs_ngg_scratch
, LLVMGetUndef(asi32
));
4863 LLVMSetAlignment(ctx
.gs_ngg_scratch
, 4);
4866 for(int i
= 0; i
< shader_count
; ++i
) {
4867 ctx
.stage
= shaders
[i
]->info
.stage
;
4868 ctx
.shader
= shaders
[i
];
4869 ctx
.output_mask
= 0;
4871 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4872 for (int i
= 0; i
< 4; i
++) {
4873 ctx
.gs_next_vertex
[i
] =
4874 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
4876 if (ctx
.options
->key
.vs_common_out
.as_ngg
) {
4877 for (unsigned i
= 0; i
< 4; ++i
) {
4878 ctx
.gs_curprim_verts
[i
] =
4879 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
4880 ctx
.gs_generated_prims
[i
] =
4881 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
4884 unsigned scratch_size
= 8;
4885 if (ctx
.shader_info
->so
.num_outputs
)
4888 LLVMTypeRef ai32
= LLVMArrayType(ctx
.ac
.i32
, scratch_size
);
4889 ctx
.gs_ngg_scratch
=
4890 LLVMAddGlobalInAddressSpace(ctx
.ac
.module
,
4891 ai32
, "ngg_scratch", AC_ADDR_SPACE_LDS
);
4892 LLVMSetInitializer(ctx
.gs_ngg_scratch
, LLVMGetUndef(ai32
));
4893 LLVMSetAlignment(ctx
.gs_ngg_scratch
, 4);
4895 ctx
.gs_ngg_emit
= LLVMAddGlobalInAddressSpace(ctx
.ac
.module
,
4896 LLVMArrayType(ctx
.ac
.i32
, 0), "ngg_emit", AC_ADDR_SPACE_LDS
);
4897 LLVMSetLinkage(ctx
.gs_ngg_emit
, LLVMExternalLinkage
);
4898 LLVMSetAlignment(ctx
.gs_ngg_emit
, 4);
4901 ctx
.abi
.load_inputs
= load_gs_input
;
4902 ctx
.abi
.emit_primitive
= visit_end_primitive
;
4903 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
4904 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
4905 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
4906 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
4907 if (shader_count
== 1)
4908 ctx
.tcs_num_inputs
= ctx
.options
->key
.tcs
.num_inputs
;
4910 ctx
.tcs_num_inputs
= util_last_bit64(shader_info
->vs
.ls_outputs_written
);
4911 ctx
.tcs_num_patches
= get_tcs_num_patches(&ctx
);
4912 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4913 ctx
.abi
.load_tess_varyings
= load_tes_input
;
4914 ctx
.abi
.load_tess_coord
= load_tess_coord
;
4915 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
4916 ctx
.tcs_num_patches
= ctx
.options
->key
.tes
.num_patches
;
4917 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
4918 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
4919 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
4920 ctx
.abi
.load_sample_position
= load_sample_position
;
4921 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
4922 ctx
.abi
.emit_kill
= radv_emit_kill
;
4925 if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
&&
4926 ctx
.options
->key
.vs_common_out
.as_ngg
&&
4927 ctx
.options
->key
.vs_common_out
.export_prim_id
) {
4928 declare_esgs_ring(&ctx
);
4931 bool nested_barrier
= false;
4934 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
&&
4935 ctx
.options
->key
.vs_common_out
.as_ngg
) {
4936 gfx10_ngg_gs_emit_prologue(&ctx
);
4937 nested_barrier
= false;
4939 nested_barrier
= true;
4943 if (nested_barrier
) {
4944 /* Execute a barrier before the second shader in
4947 * Execute the barrier inside the conditional block,
4948 * so that empty waves can jump directly to s_endpgm,
4949 * which will also signal the barrier.
4951 * This is possible in gfx9, because an empty wave
4952 * for the second shader does not participate in
4953 * the epilogue. With NGG, empty waves may still
4954 * be required to export data (e.g. GS output vertices),
4955 * so we cannot let them exit early.
4957 * If the shader is TCS and the TCS epilog is present
4958 * and contains a barrier, it will wait there and then
4961 ac_emit_barrier(&ctx
.ac
, ctx
.stage
);
4964 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
4965 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
4967 ac_setup_rings(&ctx
);
4969 LLVMBasicBlockRef merge_block
;
4970 if (shader_count
>= 2 || is_ngg
) {
4971 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
4972 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
4973 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
4975 LLVMValueRef count
= ac_unpack_param(&ctx
.ac
, ctx
.merged_wave_info
, 8 * i
, 8);
4976 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
4977 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
4978 thread_id
, count
, "");
4979 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
4981 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
4984 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
4985 prepare_interp_optimize(&ctx
, shaders
[i
]);
4986 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
4987 handle_vs_inputs(&ctx
, shaders
[i
]);
4988 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
4989 prepare_gs_input_vgprs(&ctx
);
4991 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
4993 if (shader_count
>= 2 || is_ngg
) {
4994 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
4995 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
4998 /* This needs to be outside the if wrapping the shader body, as sometimes
4999 * the HW generates waves with 0 es/vs threads. */
5000 if (is_pre_gs_stage(shaders
[i
]->info
.stage
) &&
5001 ctx
.options
->key
.vs_common_out
.as_ngg
&&
5002 i
== shader_count
- 1) {
5003 handle_ngg_outputs_post_2(&ctx
);
5004 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
&&
5005 ctx
.options
->key
.vs_common_out
.as_ngg
) {
5006 gfx10_ngg_gs_emit_epilogue_2(&ctx
);
5009 if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
5010 shader_info
->tcs
.num_patches
= ctx
.tcs_num_patches
;
5011 shader_info
->tcs
.lds_size
= calculate_tess_lds_size(&ctx
);
5015 LLVMBuildRetVoid(ctx
.ac
.builder
);
5017 if (options
->dump_preoptir
) {
5018 fprintf(stderr
, "%s LLVM IR:\n\n",
5019 radv_get_shader_name(shader_info
,
5020 shaders
[shader_count
- 1]->info
.stage
));
5021 ac_dump_module(ctx
.ac
.module
);
5022 fprintf(stderr
, "\n");
5025 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
5027 if (shader_count
== 1)
5028 ac_nir_eliminate_const_vs_outputs(&ctx
);
5030 if (options
->dump_shader
) {
5031 ctx
.shader_info
->private_mem_vgprs
=
5032 ac_count_scratch_private_memory(ctx
.main_function
);
5035 return ctx
.ac
.module
;
5038 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
5040 unsigned *retval
= (unsigned *)context
;
5041 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
5042 char *description
= LLVMGetDiagInfoDescription(di
);
5044 if (severity
== LLVMDSError
) {
5046 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
5050 LLVMDisposeMessage(description
);
5053 static unsigned radv_llvm_compile(LLVMModuleRef M
,
5054 char **pelf_buffer
, size_t *pelf_size
,
5055 struct ac_llvm_compiler
*ac_llvm
)
5057 unsigned retval
= 0;
5058 LLVMContextRef llvm_ctx
;
5060 /* Setup Diagnostic Handler*/
5061 llvm_ctx
= LLVMGetModuleContext(M
);
5063 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
5067 if (!radv_compile_to_elf(ac_llvm
, M
, pelf_buffer
, pelf_size
))
5072 static void ac_compile_llvm_module(struct ac_llvm_compiler
*ac_llvm
,
5073 LLVMModuleRef llvm_module
,
5074 struct radv_shader_binary
**rbinary
,
5075 gl_shader_stage stage
,
5077 const struct radv_nir_compiler_options
*options
)
5079 char *elf_buffer
= NULL
;
5080 size_t elf_size
= 0;
5081 char *llvm_ir_string
= NULL
;
5083 if (options
->dump_shader
) {
5084 fprintf(stderr
, "%s LLVM IR:\n\n", name
);
5085 ac_dump_module(llvm_module
);
5086 fprintf(stderr
, "\n");
5089 if (options
->record_ir
) {
5090 char *llvm_ir
= LLVMPrintModuleToString(llvm_module
);
5091 llvm_ir_string
= strdup(llvm_ir
);
5092 LLVMDisposeMessage(llvm_ir
);
5095 int v
= radv_llvm_compile(llvm_module
, &elf_buffer
, &elf_size
, ac_llvm
);
5097 fprintf(stderr
, "compile failed\n");
5100 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
5101 LLVMDisposeModule(llvm_module
);
5102 LLVMContextDispose(ctx
);
5104 size_t llvm_ir_size
= llvm_ir_string
? strlen(llvm_ir_string
) : 0;
5105 size_t alloc_size
= sizeof(struct radv_shader_binary_rtld
) + elf_size
+ llvm_ir_size
+ 1;
5106 struct radv_shader_binary_rtld
*rbin
= calloc(1, alloc_size
);
5107 memcpy(rbin
->data
, elf_buffer
, elf_size
);
5109 memcpy(rbin
->data
+ elf_size
, llvm_ir_string
, llvm_ir_size
+ 1);
5111 rbin
->base
.type
= RADV_BINARY_TYPE_RTLD
;
5112 rbin
->base
.stage
= stage
;
5113 rbin
->base
.total_size
= alloc_size
;
5114 rbin
->elf_size
= elf_size
;
5115 rbin
->llvm_ir_size
= llvm_ir_size
;
5116 *rbinary
= &rbin
->base
;
5118 free(llvm_ir_string
);
5123 radv_compile_nir_shader(struct ac_llvm_compiler
*ac_llvm
,
5124 struct radv_shader_binary
**rbinary
,
5125 struct radv_shader_info
*shader_info
,
5126 struct nir_shader
*const *nir
,
5128 const struct radv_nir_compiler_options
*options
)
5131 LLVMModuleRef llvm_module
;
5133 llvm_module
= ac_translate_nir_to_llvm(ac_llvm
, nir
, nir_count
, shader_info
,
5136 ac_compile_llvm_module(ac_llvm
, llvm_module
, rbinary
,
5137 nir
[nir_count
- 1]->info
.stage
,
5138 radv_get_shader_name(shader_info
,
5139 nir
[nir_count
- 1]->info
.stage
),
5142 /* Determine the ES type (VS or TES) for the GS on GFX9. */
5143 if (options
->chip_class
>= GFX9
) {
5144 if (nir_count
== 2 &&
5145 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
5146 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
5152 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
5154 LLVMValueRef vtx_offset
=
5155 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
5156 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
5157 LLVMValueRef stream_id
;
5159 /* Fetch the vertex stream ID. */
5160 if (!ctx
->options
->use_ngg_streamout
&&
5161 ctx
->shader_info
->so
.num_outputs
) {
5163 ac_unpack_param(&ctx
->ac
, ctx
->streamout_config
, 24, 2);
5165 stream_id
= ctx
->ac
.i32_0
;
5168 LLVMBasicBlockRef end_bb
;
5169 LLVMValueRef switch_inst
;
5171 end_bb
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
,
5172 ctx
->main_function
, "end");
5173 switch_inst
= LLVMBuildSwitch(ctx
->ac
.builder
, stream_id
, end_bb
, 4);
5175 for (unsigned stream
= 0; stream
< 4; stream
++) {
5176 unsigned num_components
=
5177 ctx
->shader_info
->gs
.num_stream_output_components
[stream
];
5178 LLVMBasicBlockRef bb
;
5181 if (stream
> 0 && !num_components
)
5184 if (stream
> 0 && !ctx
->shader_info
->so
.num_outputs
)
5187 bb
= LLVMInsertBasicBlockInContext(ctx
->ac
.context
, end_bb
, "out");
5188 LLVMAddCase(switch_inst
, LLVMConstInt(ctx
->ac
.i32
, stream
, 0), bb
);
5189 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, bb
);
5192 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
5193 unsigned output_usage_mask
=
5194 ctx
->shader_info
->gs
.output_usage_mask
[i
];
5195 unsigned output_stream
=
5196 ctx
->shader_info
->gs
.output_streams
[i
];
5197 int length
= util_last_bit(output_usage_mask
);
5199 if (!(ctx
->output_mask
& (1ull << i
)) ||
5200 output_stream
!= stream
)
5203 for (unsigned j
= 0; j
< length
; j
++) {
5204 LLVMValueRef value
, soffset
;
5206 if (!(output_usage_mask
& (1 << j
)))
5209 soffset
= LLVMConstInt(ctx
->ac
.i32
,
5211 ctx
->shader
->info
.gs
.vertices_out
* 16 * 4, false);
5215 value
= ac_build_buffer_load(&ctx
->ac
,
5218 vtx_offset
, soffset
,
5219 0, ac_glc
| ac_slc
, true, false);
5221 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
5222 if (ac_get_type_size(type
) == 2) {
5223 value
= LLVMBuildBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
5224 value
= LLVMBuildTrunc(ctx
->ac
.builder
, value
, ctx
->ac
.i16
, "");
5227 LLVMBuildStore(ctx
->ac
.builder
,
5228 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
5232 if (!ctx
->options
->use_ngg_streamout
&&
5233 ctx
->shader_info
->so
.num_outputs
)
5234 radv_emit_streamout(ctx
, stream
);
5237 handle_vs_outputs_post(ctx
, false, true,
5238 &ctx
->shader_info
->vs
.outinfo
);
5241 LLVMBuildBr(ctx
->ac
.builder
, end_bb
);
5244 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, end_bb
);
5248 radv_compile_gs_copy_shader(struct ac_llvm_compiler
*ac_llvm
,
5249 struct nir_shader
*geom_shader
,
5250 struct radv_shader_binary
**rbinary
,
5251 struct radv_shader_info
*shader_info
,
5252 const struct radv_nir_compiler_options
*options
)
5254 struct radv_shader_context ctx
= {0};
5255 ctx
.options
= options
;
5256 ctx
.shader_info
= shader_info
;
5258 ac_llvm_context_init(&ctx
.ac
, ac_llvm
, options
->chip_class
,
5259 options
->family
, AC_FLOAT_MODE_DEFAULT
, 64, 64);
5260 ctx
.context
= ctx
.ac
.context
;
5262 ctx
.is_gs_copy_shader
= true;
5263 ctx
.stage
= MESA_SHADER_VERTEX
;
5264 ctx
.shader
= geom_shader
;
5266 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
5268 ac_setup_rings(&ctx
);
5270 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
5271 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
5272 ac_handle_shader_output_decl(&ctx
.ac
, &ctx
.abi
, geom_shader
,
5273 variable
, MESA_SHADER_VERTEX
);
5276 ac_gs_copy_shader_emit(&ctx
);
5278 LLVMBuildRetVoid(ctx
.ac
.builder
);
5280 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
5282 ac_compile_llvm_module(ac_llvm
, ctx
.ac
.module
, rbinary
,
5283 MESA_SHADER_VERTEX
, "GS Copy Shader", options
);
5284 (*rbinary
)->is_gs_copy_shader
= true;