2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #include <llvm-c/Transforms/Utils.h>
39 #include "ac_binary.h"
40 #include "ac_llvm_util.h"
41 #include "ac_llvm_build.h"
42 #include "ac_shader_abi.h"
43 #include "ac_shader_util.h"
44 #include "ac_exp_param.h"
46 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
48 struct radv_shader_context
{
49 struct ac_llvm_context ac
;
50 const struct radv_nir_compiler_options
*options
;
51 struct radv_shader_info
*shader_info
;
52 const struct nir_shader
*shader
;
53 struct ac_shader_abi abi
;
55 unsigned max_workgroup_size
;
56 LLVMContextRef context
;
57 LLVMValueRef main_function
;
59 LLVMValueRef descriptor_sets
[MAX_SETS
];
60 LLVMValueRef ring_offsets
;
62 LLVMValueRef vertex_buffers
;
63 LLVMValueRef rel_auto_id
;
64 LLVMValueRef vs_prim_id
;
65 LLVMValueRef es2gs_offset
;
68 LLVMValueRef merged_wave_info
;
69 LLVMValueRef tess_factor_offset
;
70 LLVMValueRef tes_rel_patch_id
;
76 * - bits 0..10: ordered_wave_id
77 * - bits 12..20: number of vertices in group
78 * - bits 22..30: number of primitives in group
80 LLVMValueRef gs_tg_info
;
81 LLVMValueRef gs2vs_offset
;
82 LLVMValueRef gs_wave_id
;
83 LLVMValueRef gs_vtx_offset
[6];
85 LLVMValueRef esgs_ring
;
86 LLVMValueRef gsvs_ring
[4];
87 LLVMValueRef hs_ring_tess_offchip
;
88 LLVMValueRef hs_ring_tess_factor
;
91 LLVMValueRef streamout_buffers
;
92 LLVMValueRef streamout_write_idx
;
93 LLVMValueRef streamout_config
;
94 LLVMValueRef streamout_offset
[4];
96 gl_shader_stage stage
;
98 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
100 uint64_t output_mask
;
102 bool is_gs_copy_shader
;
103 LLVMValueRef gs_next_vertex
[4];
104 LLVMValueRef gs_curprim_verts
[4];
105 LLVMValueRef gs_generated_prims
[4];
106 LLVMValueRef gs_ngg_emit
;
107 LLVMValueRef gs_ngg_scratch
;
109 uint32_t tcs_num_inputs
;
110 uint32_t tcs_num_patches
;
111 uint32_t max_gsvs_emit_size
;
112 uint32_t gsvs_vertex_size
;
114 LLVMValueRef vertexptr
; /* GFX10 only */
117 struct radv_shader_output_values
{
118 LLVMValueRef values
[4];
124 enum radeon_llvm_calling_convention
{
125 RADEON_LLVM_AMDGPU_VS
= 87,
126 RADEON_LLVM_AMDGPU_GS
= 88,
127 RADEON_LLVM_AMDGPU_PS
= 89,
128 RADEON_LLVM_AMDGPU_CS
= 90,
129 RADEON_LLVM_AMDGPU_HS
= 93,
132 static inline struct radv_shader_context
*
133 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
135 struct radv_shader_context
*ctx
= NULL
;
136 return container_of(abi
, ctx
, abi
);
139 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
141 switch (ctx
->stage
) {
142 case MESA_SHADER_TESS_CTRL
:
143 return ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
144 case MESA_SHADER_TESS_EVAL
:
145 return ctx
->tes_rel_patch_id
;
148 unreachable("Illegal stage");
153 get_tcs_num_patches(struct radv_shader_context
*ctx
)
155 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
156 unsigned num_tcs_output_cp
= ctx
->shader
->info
.tess
.tcs_vertices_out
;
157 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
158 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
159 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
160 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.patch_outputs_written
);
161 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
162 uint32_t pervertex_output_patch_size
= ctx
->shader
->info
.tess
.tcs_vertices_out
* output_vertex_size
;
163 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
164 unsigned num_patches
;
165 unsigned hardware_lds_size
;
167 /* Ensure that we only need one wave per SIMD so we don't need to check
168 * resource usage. Also ensures that the number of tcs in and out
169 * vertices per threadgroup are at most 256.
171 num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
172 /* Make sure that the data fits in LDS. This assumes the shaders only
173 * use LDS for the inputs and outputs.
175 hardware_lds_size
= 32768;
177 /* Looks like STONEY hangs if we use more than 32 KiB LDS in a single
178 * threadgroup, even though there is more than 32 KiB LDS.
180 * Test: dEQP-VK.tessellation.shader_input_output.barrier
182 if (ctx
->options
->chip_class
>= GFX7
&& ctx
->options
->family
!= CHIP_STONEY
)
183 hardware_lds_size
= 65536;
185 num_patches
= MIN2(num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
186 /* Make sure the output data fits in the offchip buffer */
187 num_patches
= MIN2(num_patches
, (ctx
->options
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
188 /* Not necessary for correctness, but improves performance. The
189 * specific value is taken from the proprietary driver.
191 num_patches
= MIN2(num_patches
, 40);
193 /* GFX6 bug workaround - limit LS-HS threadgroups to only one wave. */
194 if (ctx
->options
->chip_class
== GFX6
) {
195 unsigned one_wave
= ctx
->options
->wave_size
/ MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
196 num_patches
= MIN2(num_patches
, one_wave
);
202 calculate_tess_lds_size(struct radv_shader_context
*ctx
)
204 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
205 unsigned num_tcs_output_cp
;
206 unsigned num_tcs_outputs
, num_tcs_patch_outputs
;
207 unsigned input_vertex_size
, output_vertex_size
;
208 unsigned input_patch_size
, output_patch_size
;
209 unsigned pervertex_output_patch_size
;
210 unsigned output_patch0_offset
;
211 unsigned num_patches
;
214 num_tcs_output_cp
= ctx
->shader
->info
.tess
.tcs_vertices_out
;
215 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
216 num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.patch_outputs_written
);
218 input_vertex_size
= ctx
->tcs_num_inputs
* 16;
219 output_vertex_size
= num_tcs_outputs
* 16;
221 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
223 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
224 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
226 num_patches
= ctx
->tcs_num_patches
;
227 output_patch0_offset
= input_patch_size
* num_patches
;
229 lds_size
= output_patch0_offset
+ output_patch_size
* num_patches
;
233 /* Tessellation shaders pass outputs to the next shader using LDS.
235 * LS outputs = TCS inputs
236 * TCS outputs = TES inputs
239 * - TCS inputs for patch 0
240 * - TCS inputs for patch 1
241 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
243 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
244 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
245 * - TCS outputs for patch 1
246 * - Per-patch TCS outputs for patch 1
247 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
248 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
251 * All three shaders VS(LS), TCS, TES share the same LDS space.
254 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
256 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
257 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
258 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
260 input_patch_size
/= 4;
261 return LLVMConstInt(ctx
->ac
.i32
, input_patch_size
, false);
265 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
267 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
268 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.patch_outputs_written
);
269 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
270 uint32_t pervertex_output_patch_size
= ctx
->shader
->info
.tess
.tcs_vertices_out
* output_vertex_size
;
271 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
272 output_patch_size
/= 4;
273 return LLVMConstInt(ctx
->ac
.i32
, output_patch_size
, false);
277 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
279 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
280 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
281 output_vertex_size
/= 4;
282 return LLVMConstInt(ctx
->ac
.i32
, output_vertex_size
, false);
286 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
288 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
289 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
290 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
291 uint32_t output_patch0_offset
= input_patch_size
;
292 unsigned num_patches
= ctx
->tcs_num_patches
;
294 output_patch0_offset
*= num_patches
;
295 output_patch0_offset
/= 4;
296 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
300 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
302 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
303 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
304 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
305 uint32_t output_patch0_offset
= input_patch_size
;
307 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
308 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
309 uint32_t pervertex_output_patch_size
= ctx
->shader
->info
.tess
.tcs_vertices_out
* output_vertex_size
;
310 unsigned num_patches
= ctx
->tcs_num_patches
;
312 output_patch0_offset
*= num_patches
;
313 output_patch0_offset
+= pervertex_output_patch_size
;
314 output_patch0_offset
/= 4;
315 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
319 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
321 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
322 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
324 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
328 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
330 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
331 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
332 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
334 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
339 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
341 LLVMValueRef patch0_patch_data_offset
=
342 get_tcs_out_patch0_patch_data_offset(ctx
);
343 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
344 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
346 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
347 patch0_patch_data_offset
);
352 LLVMTypeRef types
[MAX_ARGS
];
353 LLVMValueRef
*assign
[MAX_ARGS
];
356 uint8_t num_sgprs_used
;
357 uint8_t num_vgprs_used
;
360 enum ac_arg_regfile
{
366 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
367 LLVMValueRef
*param_ptr
)
369 assert(info
->count
< MAX_ARGS
);
371 info
->assign
[info
->count
] = param_ptr
;
372 info
->types
[info
->count
] = type
;
375 if (regfile
== ARG_SGPR
) {
376 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
379 assert(regfile
== ARG_VGPR
);
380 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
384 static void assign_arguments(LLVMValueRef main_function
,
385 struct arg_info
*info
)
388 for (i
= 0; i
< info
->count
; i
++) {
390 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
395 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
396 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
397 unsigned num_return_elems
,
398 struct arg_info
*args
,
399 unsigned max_workgroup_size
,
400 const struct radv_nir_compiler_options
*options
)
402 LLVMTypeRef main_function_type
, ret_type
;
403 LLVMBasicBlockRef main_function_body
;
405 if (num_return_elems
)
406 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
407 num_return_elems
, true);
409 ret_type
= LLVMVoidTypeInContext(ctx
);
411 /* Setup the function */
413 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
414 LLVMValueRef main_function
=
415 LLVMAddFunction(module
, "main", main_function_type
);
417 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
418 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
420 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
421 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
422 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
424 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
426 if (LLVMGetTypeKind(LLVMTypeOf(P
)) == LLVMPointerTypeKind
) {
427 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
428 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
432 if (options
->address32_hi
) {
433 ac_llvm_add_target_dep_function_attr(main_function
,
434 "amdgpu-32bit-address-high-bits",
435 options
->address32_hi
);
438 ac_llvm_set_workgroup_size(main_function
, max_workgroup_size
);
440 if (options
->unsafe_math
) {
441 /* These were copied from some LLVM test. */
442 LLVMAddTargetDependentFunctionAttr(main_function
,
443 "less-precise-fpmad",
445 LLVMAddTargetDependentFunctionAttr(main_function
,
448 LLVMAddTargetDependentFunctionAttr(main_function
,
451 LLVMAddTargetDependentFunctionAttr(main_function
,
454 LLVMAddTargetDependentFunctionAttr(main_function
,
455 "no-signed-zeros-fp-math",
458 return main_function
;
463 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
,
466 ud_info
->sgpr_idx
= *sgpr_idx
;
467 ud_info
->num_sgprs
= num_sgprs
;
468 *sgpr_idx
+= num_sgprs
;
472 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
475 struct radv_userdata_info
*ud_info
=
476 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
479 set_loc(ud_info
, sgpr_idx
, num_sgprs
);
483 set_loc_shader_ptr(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
485 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
487 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
491 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
493 struct radv_userdata_locations
*locs
=
494 &ctx
->shader_info
->user_sgprs_locs
;
495 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
498 set_loc(ud_info
, sgpr_idx
, 1);
500 locs
->descriptor_sets_enabled
|= 1 << idx
;
503 struct user_sgpr_info
{
504 bool need_ring_offsets
;
505 bool indirect_all_descriptor_sets
;
506 uint8_t remaining_sgprs
;
509 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
510 gl_shader_stage stage
)
513 case MESA_SHADER_VERTEX
:
514 if (ctx
->shader_info
->needs_multiview_view_index
||
515 (!ctx
->options
->key
.vs_common_out
.as_es
&& !ctx
->options
->key
.vs_common_out
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
518 case MESA_SHADER_TESS_EVAL
:
519 if (ctx
->shader_info
->needs_multiview_view_index
|| (!ctx
->options
->key
.vs_common_out
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
522 case MESA_SHADER_GEOMETRY
:
523 case MESA_SHADER_TESS_CTRL
:
524 if (ctx
->shader_info
->needs_multiview_view_index
)
534 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
538 if (ctx
->shader_info
->vs
.has_vertex_buffers
)
540 count
+= ctx
->shader_info
->vs
.needs_draw_id
? 3 : 2;
545 static void allocate_inline_push_consts(struct radv_shader_context
*ctx
,
546 struct user_sgpr_info
*user_sgpr_info
)
548 uint8_t remaining_sgprs
= user_sgpr_info
->remaining_sgprs
;
550 /* Only supported if shaders use push constants. */
551 if (ctx
->shader_info
->min_push_constant_used
== UINT8_MAX
)
554 /* Only supported if shaders don't have indirect push constants. */
555 if (ctx
->shader_info
->has_indirect_push_constants
)
558 /* Only supported for 32-bit push constants. */
559 if (!ctx
->shader_info
->has_only_32bit_push_constants
)
562 uint8_t num_push_consts
=
563 (ctx
->shader_info
->max_push_constant_used
-
564 ctx
->shader_info
->min_push_constant_used
) / 4;
566 /* Check if the number of user SGPRs is large enough. */
567 if (num_push_consts
< remaining_sgprs
) {
568 ctx
->shader_info
->num_inline_push_consts
= num_push_consts
;
570 ctx
->shader_info
->num_inline_push_consts
= remaining_sgprs
;
573 /* Clamp to the maximum number of allowed inlined push constants. */
574 if (ctx
->shader_info
->num_inline_push_consts
> AC_MAX_INLINE_PUSH_CONSTS
)
575 ctx
->shader_info
->num_inline_push_consts
= AC_MAX_INLINE_PUSH_CONSTS
;
577 if (ctx
->shader_info
->num_inline_push_consts
== num_push_consts
&&
578 !ctx
->shader_info
->loads_dynamic_offsets
) {
579 /* Disable the default push constants path if all constants are
580 * inlined and if shaders don't use dynamic descriptors.
582 ctx
->shader_info
->loads_push_constants
= false;
585 ctx
->shader_info
->base_inline_push_consts
=
586 ctx
->shader_info
->min_push_constant_used
/ 4;
589 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
590 gl_shader_stage stage
,
591 bool has_previous_stage
,
592 gl_shader_stage previous_stage
,
593 bool needs_view_index
,
594 struct user_sgpr_info
*user_sgpr_info
)
596 uint8_t user_sgpr_count
= 0;
598 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
600 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
601 if (stage
== MESA_SHADER_GEOMETRY
||
602 stage
== MESA_SHADER_VERTEX
||
603 stage
== MESA_SHADER_TESS_CTRL
||
604 stage
== MESA_SHADER_TESS_EVAL
||
605 ctx
->is_gs_copy_shader
)
606 user_sgpr_info
->need_ring_offsets
= true;
608 if (stage
== MESA_SHADER_FRAGMENT
&&
609 ctx
->shader_info
->ps
.needs_sample_positions
)
610 user_sgpr_info
->need_ring_offsets
= true;
612 /* 2 user sgprs will nearly always be allocated for scratch/rings */
613 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
614 user_sgpr_count
+= 2;
618 case MESA_SHADER_COMPUTE
:
619 if (ctx
->shader_info
->cs
.uses_grid_size
)
620 user_sgpr_count
+= 3;
622 case MESA_SHADER_FRAGMENT
:
623 user_sgpr_count
+= ctx
->shader_info
->ps
.needs_sample_positions
;
625 case MESA_SHADER_VERTEX
:
626 if (!ctx
->is_gs_copy_shader
)
627 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
629 case MESA_SHADER_TESS_CTRL
:
630 if (has_previous_stage
) {
631 if (previous_stage
== MESA_SHADER_VERTEX
)
632 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
635 case MESA_SHADER_TESS_EVAL
:
637 case MESA_SHADER_GEOMETRY
:
638 if (has_previous_stage
) {
639 if (previous_stage
== MESA_SHADER_VERTEX
) {
640 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
648 if (needs_view_index
)
651 if (ctx
->shader_info
->loads_push_constants
)
654 if (ctx
->streamout_buffers
)
657 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& stage
!= MESA_SHADER_COMPUTE
? 32 : 16;
658 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
659 uint32_t num_desc_set
=
660 util_bitcount(ctx
->shader_info
->desc_set_used_mask
);
662 if (remaining_sgprs
< num_desc_set
) {
663 user_sgpr_info
->indirect_all_descriptor_sets
= true;
664 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- 1;
666 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- num_desc_set
;
669 allocate_inline_push_consts(ctx
, user_sgpr_info
);
673 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
674 const struct user_sgpr_info
*user_sgpr_info
,
675 struct arg_info
*args
,
676 LLVMValueRef
*desc_sets
)
678 LLVMTypeRef type
= ac_array_in_const32_addr_space(ctx
->ac
.i8
);
680 /* 1 for each descriptor set */
681 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
682 uint32_t mask
= ctx
->shader_info
->desc_set_used_mask
;
685 int i
= u_bit_scan(&mask
);
687 add_arg(args
, ARG_SGPR
, type
, &ctx
->descriptor_sets
[i
]);
690 add_arg(args
, ARG_SGPR
, ac_array_in_const32_addr_space(type
),
694 if (ctx
->shader_info
->loads_push_constants
) {
695 /* 1 for push constants and dynamic descriptors */
696 add_arg(args
, ARG_SGPR
, type
, &ctx
->abi
.push_constants
);
699 for (unsigned i
= 0; i
< ctx
->shader_info
->num_inline_push_consts
; i
++) {
700 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
,
701 &ctx
->abi
.inline_push_consts
[i
]);
703 ctx
->abi
.num_inline_push_consts
= ctx
->shader_info
->num_inline_push_consts
;
704 ctx
->abi
.base_inline_push_consts
= ctx
->shader_info
->base_inline_push_consts
;
706 if (ctx
->shader_info
->so
.num_outputs
) {
707 add_arg(args
, ARG_SGPR
,
708 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
709 &ctx
->streamout_buffers
);
714 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
715 gl_shader_stage stage
,
716 bool has_previous_stage
,
717 gl_shader_stage previous_stage
,
718 struct arg_info
*args
)
720 if (!ctx
->is_gs_copy_shader
&&
721 (stage
== MESA_SHADER_VERTEX
||
722 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
723 if (ctx
->shader_info
->vs
.has_vertex_buffers
) {
724 add_arg(args
, ARG_SGPR
,
725 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
726 &ctx
->vertex_buffers
);
728 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
729 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
730 if (ctx
->shader_info
->vs
.needs_draw_id
) {
731 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
737 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
739 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
740 if (!ctx
->is_gs_copy_shader
) {
741 if (ctx
->options
->key
.vs_common_out
.as_ls
) {
742 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
743 if (ctx
->ac
.chip_class
>= GFX10
) {
744 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
745 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
747 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
748 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
751 if (ctx
->ac
.chip_class
>= GFX10
) {
752 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
753 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
754 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* user vgpr */
755 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
757 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
758 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
759 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
762 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
763 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
764 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
771 declare_streamout_sgprs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
772 struct arg_info
*args
)
776 /* Streamout SGPRs. */
777 if (ctx
->shader_info
->so
.num_outputs
) {
778 assert(stage
== MESA_SHADER_VERTEX
||
779 stage
== MESA_SHADER_TESS_EVAL
);
781 if (stage
!= MESA_SHADER_TESS_EVAL
) {
782 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_config
);
784 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
785 args
->types
[args
->count
- 1] = ctx
->ac
.i32
;
788 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_write_idx
);
791 /* A streamout buffer offset is loaded if the stride is non-zero. */
792 for (i
= 0; i
< 4; i
++) {
793 if (!ctx
->shader_info
->so
.strides
[i
])
796 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_offset
[i
]);
801 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
803 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
804 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
805 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
806 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
810 set_global_input_locs(struct radv_shader_context
*ctx
,
811 const struct user_sgpr_info
*user_sgpr_info
,
812 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
814 uint32_t mask
= ctx
->shader_info
->desc_set_used_mask
;
816 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
818 int i
= u_bit_scan(&mask
);
820 set_loc_desc(ctx
, i
, user_sgpr_idx
);
823 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
827 int i
= u_bit_scan(&mask
);
829 ctx
->descriptor_sets
[i
] =
830 ac_build_load_to_sgpr(&ctx
->ac
, desc_sets
,
831 LLVMConstInt(ctx
->ac
.i32
, i
, false));
835 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
838 if (ctx
->shader_info
->loads_push_constants
) {
839 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
);
842 if (ctx
->shader_info
->num_inline_push_consts
) {
843 set_loc_shader(ctx
, AC_UD_INLINE_PUSH_CONSTANTS
, user_sgpr_idx
,
844 ctx
->shader_info
->num_inline_push_consts
);
847 if (ctx
->streamout_buffers
) {
848 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
,
854 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
855 gl_shader_stage stage
, bool has_previous_stage
,
856 gl_shader_stage previous_stage
,
857 uint8_t *user_sgpr_idx
)
859 if (!ctx
->is_gs_copy_shader
&&
860 (stage
== MESA_SHADER_VERTEX
||
861 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
862 if (ctx
->shader_info
->vs
.has_vertex_buffers
) {
863 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
868 if (ctx
->shader_info
->vs
.needs_draw_id
)
871 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
872 user_sgpr_idx
, vs_num
);
876 static void set_llvm_calling_convention(LLVMValueRef func
,
877 gl_shader_stage stage
)
879 enum radeon_llvm_calling_convention calling_conv
;
882 case MESA_SHADER_VERTEX
:
883 case MESA_SHADER_TESS_EVAL
:
884 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
886 case MESA_SHADER_GEOMETRY
:
887 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
889 case MESA_SHADER_TESS_CTRL
:
890 calling_conv
= RADEON_LLVM_AMDGPU_HS
;
892 case MESA_SHADER_FRAGMENT
:
893 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
895 case MESA_SHADER_COMPUTE
:
896 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
899 unreachable("Unhandle shader type");
902 LLVMSetFunctionCallConv(func
, calling_conv
);
905 /* Returns whether the stage is a stage that can be directly before the GS */
906 static bool is_pre_gs_stage(gl_shader_stage stage
)
908 return stage
== MESA_SHADER_VERTEX
|| stage
== MESA_SHADER_TESS_EVAL
;
911 static void create_function(struct radv_shader_context
*ctx
,
912 gl_shader_stage stage
,
913 bool has_previous_stage
,
914 gl_shader_stage previous_stage
)
916 uint8_t user_sgpr_idx
;
917 struct user_sgpr_info user_sgpr_info
;
918 struct arg_info args
= {};
919 LLVMValueRef desc_sets
;
920 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
922 if (ctx
->ac
.chip_class
>= GFX10
) {
923 if (is_pre_gs_stage(stage
) && ctx
->options
->key
.vs_common_out
.as_ngg
) {
924 /* On GFX10, VS is merged into GS for NGG. */
925 previous_stage
= stage
;
926 stage
= MESA_SHADER_GEOMETRY
;
927 has_previous_stage
= true;
931 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
932 previous_stage
, needs_view_index
, &user_sgpr_info
);
934 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
935 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
940 case MESA_SHADER_COMPUTE
:
941 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
944 if (ctx
->shader_info
->cs
.uses_grid_size
) {
945 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
946 &ctx
->abi
.num_work_groups
);
949 for (int i
= 0; i
< 3; i
++) {
950 ctx
->abi
.workgroup_ids
[i
] = NULL
;
951 if (ctx
->shader_info
->cs
.uses_block_id
[i
]) {
952 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
953 &ctx
->abi
.workgroup_ids
[i
]);
957 if (ctx
->shader_info
->cs
.uses_local_invocation_idx
)
958 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
959 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
960 &ctx
->abi
.local_invocation_ids
);
962 case MESA_SHADER_VERTEX
:
963 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
966 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
967 previous_stage
, &args
);
969 if (needs_view_index
)
970 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
971 &ctx
->abi
.view_index
);
972 if (ctx
->options
->key
.vs_common_out
.as_es
) {
973 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
975 } else if (ctx
->options
->key
.vs_common_out
.as_ls
) {
976 /* no extra parameters */
978 declare_streamout_sgprs(ctx
, stage
, &args
);
981 declare_vs_input_vgprs(ctx
, &args
);
983 case MESA_SHADER_TESS_CTRL
:
984 if (has_previous_stage
) {
985 // First 6 system regs
986 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
987 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
988 &ctx
->merged_wave_info
);
989 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
990 &ctx
->tess_factor_offset
);
992 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
993 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
994 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
996 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
999 declare_vs_specific_input_sgprs(ctx
, stage
,
1001 previous_stage
, &args
);
1003 if (needs_view_index
)
1004 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1005 &ctx
->abi
.view_index
);
1007 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1008 &ctx
->abi
.tcs_patch_id
);
1009 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1010 &ctx
->abi
.tcs_rel_ids
);
1012 declare_vs_input_vgprs(ctx
, &args
);
1014 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1017 if (needs_view_index
)
1018 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1019 &ctx
->abi
.view_index
);
1021 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1022 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1023 &ctx
->tess_factor_offset
);
1024 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1025 &ctx
->abi
.tcs_patch_id
);
1026 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1027 &ctx
->abi
.tcs_rel_ids
);
1030 case MESA_SHADER_TESS_EVAL
:
1031 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1034 if (needs_view_index
)
1035 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1036 &ctx
->abi
.view_index
);
1038 if (ctx
->options
->key
.vs_common_out
.as_es
) {
1039 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1040 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1041 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1042 &ctx
->es2gs_offset
);
1044 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1045 declare_streamout_sgprs(ctx
, stage
, &args
);
1046 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1048 declare_tes_input_vgprs(ctx
, &args
);
1050 case MESA_SHADER_GEOMETRY
:
1051 if (has_previous_stage
) {
1052 // First 6 system regs
1053 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1054 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1057 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1058 &ctx
->gs2vs_offset
);
1061 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1062 &ctx
->merged_wave_info
);
1063 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1065 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1066 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1067 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1069 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1072 if (previous_stage
!= MESA_SHADER_TESS_EVAL
) {
1073 declare_vs_specific_input_sgprs(ctx
, stage
,
1079 if (needs_view_index
)
1080 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1081 &ctx
->abi
.view_index
);
1083 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1084 &ctx
->gs_vtx_offset
[0]);
1085 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1086 &ctx
->gs_vtx_offset
[2]);
1087 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1088 &ctx
->abi
.gs_prim_id
);
1089 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1090 &ctx
->abi
.gs_invocation_id
);
1091 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1092 &ctx
->gs_vtx_offset
[4]);
1094 if (previous_stage
== MESA_SHADER_VERTEX
) {
1095 declare_vs_input_vgprs(ctx
, &args
);
1097 declare_tes_input_vgprs(ctx
, &args
);
1100 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1103 if (needs_view_index
)
1104 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1105 &ctx
->abi
.view_index
);
1107 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
1108 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
1109 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1110 &ctx
->gs_vtx_offset
[0]);
1111 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1112 &ctx
->gs_vtx_offset
[1]);
1113 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1114 &ctx
->abi
.gs_prim_id
);
1115 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1116 &ctx
->gs_vtx_offset
[2]);
1117 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1118 &ctx
->gs_vtx_offset
[3]);
1119 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1120 &ctx
->gs_vtx_offset
[4]);
1121 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1122 &ctx
->gs_vtx_offset
[5]);
1123 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1124 &ctx
->abi
.gs_invocation_id
);
1127 case MESA_SHADER_FRAGMENT
:
1128 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1131 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1132 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.persp_sample
);
1133 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.persp_center
);
1134 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.persp_centroid
);
1135 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1136 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.linear_sample
);
1137 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.linear_center
);
1138 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->abi
.linear_centroid
);
1139 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1140 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1141 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1142 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1143 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1144 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1145 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1146 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1147 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1150 unreachable("Shader stage not implemented");
1153 ctx
->main_function
= create_llvm_function(
1154 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1155 ctx
->max_workgroup_size
, ctx
->options
);
1156 set_llvm_calling_convention(ctx
->main_function
, stage
);
1159 ctx
->shader_info
->num_input_vgprs
= 0;
1160 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1162 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1164 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1165 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1167 assign_arguments(ctx
->main_function
, &args
);
1171 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1172 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1174 if (ctx
->options
->supports_spill
) {
1175 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1176 LLVMPointerType(ctx
->ac
.i8
, AC_ADDR_SPACE_CONST
),
1177 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1178 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1179 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1183 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1184 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1185 if (has_previous_stage
)
1188 set_global_input_locs(ctx
, &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1191 case MESA_SHADER_COMPUTE
:
1192 if (ctx
->shader_info
->cs
.uses_grid_size
) {
1193 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1197 case MESA_SHADER_VERTEX
:
1198 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1199 previous_stage
, &user_sgpr_idx
);
1200 if (ctx
->abi
.view_index
)
1201 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1203 case MESA_SHADER_TESS_CTRL
:
1204 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1205 previous_stage
, &user_sgpr_idx
);
1206 if (ctx
->abi
.view_index
)
1207 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1209 case MESA_SHADER_TESS_EVAL
:
1210 if (ctx
->abi
.view_index
)
1211 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1213 case MESA_SHADER_GEOMETRY
:
1214 if (has_previous_stage
) {
1215 if (previous_stage
== MESA_SHADER_VERTEX
)
1216 set_vs_specific_input_locs(ctx
, stage
,
1221 if (ctx
->abi
.view_index
)
1222 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1224 case MESA_SHADER_FRAGMENT
:
1227 unreachable("Shader stage not implemented");
1230 if (stage
== MESA_SHADER_TESS_CTRL
||
1231 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs_common_out
.as_ls
) ||
1232 /* GFX9 has the ESGS ring buffer in LDS. */
1233 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1234 ac_declare_lds_as_pointer(&ctx
->ac
);
1237 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1242 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
1243 unsigned desc_set
, unsigned binding
)
1245 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1246 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
1247 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
1248 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
1249 unsigned base_offset
= layout
->binding
[binding
].offset
;
1250 LLVMValueRef offset
, stride
;
1252 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1253 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1254 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
1255 layout
->binding
[binding
].dynamic_offset_offset
;
1256 desc_ptr
= ctx
->abi
.push_constants
;
1257 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
1258 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1260 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
1262 offset
= LLVMConstInt(ctx
->ac
.i32
, base_offset
, false);
1264 if (layout
->binding
[binding
].type
!= VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1265 offset
= ac_build_imad(&ctx
->ac
, index
, stride
, offset
);
1268 desc_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, desc_ptr
, &offset
, 1, "");
1269 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
1270 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1272 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1273 uint32_t desc_type
= S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1274 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1275 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1276 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
);
1278 if (ctx
->ac
.chip_class
>= GFX10
) {
1279 desc_type
|= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT
) |
1280 S_008F0C_OOB_SELECT(3) |
1281 S_008F0C_RESOURCE_LEVEL(1);
1283 desc_type
|= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
1284 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
1287 LLVMValueRef desc_components
[4] = {
1288 LLVMBuildPtrToInt(ctx
->ac
.builder
, desc_ptr
, ctx
->ac
.intptr
, ""),
1289 LLVMConstInt(ctx
->ac
.i32
, S_008F04_BASE_ADDRESS_HI(ctx
->options
->address32_hi
), false),
1290 /* High limit to support variable sizes. */
1291 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
1292 LLVMConstInt(ctx
->ac
.i32
, desc_type
, false),
1295 return ac_build_gather_values(&ctx
->ac
, desc_components
, 4);
1302 /* The offchip buffer layout for TCS->TES is
1304 * - attribute 0 of patch 0 vertex 0
1305 * - attribute 0 of patch 0 vertex 1
1306 * - attribute 0 of patch 0 vertex 2
1308 * - attribute 0 of patch 1 vertex 0
1309 * - attribute 0 of patch 1 vertex 1
1311 * - attribute 1 of patch 0 vertex 0
1312 * - attribute 1 of patch 0 vertex 1
1314 * - per patch attribute 0 of patch 0
1315 * - per patch attribute 0 of patch 1
1318 * Note that every attribute has 4 components.
1320 static LLVMValueRef
get_non_vertex_index_offset(struct radv_shader_context
*ctx
)
1322 uint32_t num_patches
= ctx
->tcs_num_patches
;
1323 uint32_t num_tcs_outputs
;
1324 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
1325 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->tcs
.outputs_written
);
1327 num_tcs_outputs
= ctx
->options
->key
.tes
.tcs_num_outputs
;
1329 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
1330 uint32_t pervertex_output_patch_size
= ctx
->shader
->info
.tess
.tcs_vertices_out
* output_vertex_size
;
1332 return LLVMConstInt(ctx
->ac
.i32
, pervertex_output_patch_size
* num_patches
, false);
1335 static LLVMValueRef
calc_param_stride(struct radv_shader_context
*ctx
,
1336 LLVMValueRef vertex_index
)
1338 LLVMValueRef param_stride
;
1340 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.tess
.tcs_vertices_out
* ctx
->tcs_num_patches
, false);
1342 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_num_patches
, false);
1343 return param_stride
;
1346 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
1347 LLVMValueRef vertex_index
,
1348 LLVMValueRef param_index
)
1350 LLVMValueRef base_addr
;
1351 LLVMValueRef param_stride
, constant16
;
1352 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
1353 LLVMValueRef vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.tess
.tcs_vertices_out
, false);
1354 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1355 param_stride
= calc_param_stride(ctx
, vertex_index
);
1357 base_addr
= ac_build_imad(&ctx
->ac
, rel_patch_id
,
1358 vertices_per_patch
, vertex_index
);
1360 base_addr
= rel_patch_id
;
1363 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1364 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
1365 param_stride
, ""), "");
1367 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
1369 if (!vertex_index
) {
1370 LLVMValueRef patch_data_offset
= get_non_vertex_index_offset(ctx
);
1372 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1373 patch_data_offset
, "");
1378 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
1380 unsigned const_index
,
1382 LLVMValueRef vertex_index
,
1383 LLVMValueRef indir_index
)
1385 LLVMValueRef param_index
;
1388 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
1391 if (const_index
&& !is_compact
)
1392 param
+= const_index
;
1393 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
1395 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
1399 get_dw_address(struct radv_shader_context
*ctx
,
1400 LLVMValueRef dw_addr
,
1402 unsigned const_index
,
1403 bool compact_const_index
,
1404 LLVMValueRef vertex_index
,
1405 LLVMValueRef stride
,
1406 LLVMValueRef indir_index
)
1411 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1412 LLVMBuildMul(ctx
->ac
.builder
,
1418 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1419 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
1420 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
1421 else if (const_index
&& !compact_const_index
)
1422 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1423 LLVMConstInt(ctx
->ac
.i32
, const_index
* 4, false), "");
1425 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1426 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
1428 if (const_index
&& compact_const_index
)
1429 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1430 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
1435 load_tcs_varyings(struct ac_shader_abi
*abi
,
1437 LLVMValueRef vertex_index
,
1438 LLVMValueRef indir_index
,
1439 unsigned const_index
,
1441 unsigned driver_location
,
1443 unsigned num_components
,
1448 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1449 LLVMValueRef dw_addr
, stride
;
1450 LLVMValueRef value
[4], result
;
1451 unsigned param
= shader_io_get_unique_index(location
);
1454 uint32_t input_vertex_size
= (ctx
->tcs_num_inputs
* 16) / 4;
1455 stride
= LLVMConstInt(ctx
->ac
.i32
, input_vertex_size
, false);
1456 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
1459 stride
= get_tcs_out_vertex_stride(ctx
);
1460 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1462 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1467 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1470 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
1471 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1472 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1475 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1480 store_tcs_output(struct ac_shader_abi
*abi
,
1481 const nir_variable
*var
,
1482 LLVMValueRef vertex_index
,
1483 LLVMValueRef param_index
,
1484 unsigned const_index
,
1488 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1489 const unsigned location
= var
->data
.location
;
1490 unsigned component
= var
->data
.location_frac
;
1491 const bool is_patch
= var
->data
.patch
;
1492 const bool is_compact
= var
->data
.compact
;
1493 LLVMValueRef dw_addr
;
1494 LLVMValueRef stride
= NULL
;
1495 LLVMValueRef buf_addr
= NULL
;
1497 bool store_lds
= true;
1500 if (!(ctx
->shader
->info
.patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
1503 if (!(ctx
->shader
->info
.outputs_read
& (1ULL << location
)))
1507 param
= shader_io_get_unique_index(location
);
1508 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1509 const_index
+= component
;
1512 if (const_index
>= 4) {
1519 stride
= get_tcs_out_vertex_stride(ctx
);
1520 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1522 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1525 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1527 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
1528 vertex_index
, param_index
);
1530 bool is_tess_factor
= false;
1531 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
1532 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1533 is_tess_factor
= true;
1535 unsigned base
= is_compact
? const_index
: 0;
1536 for (unsigned chan
= 0; chan
< 8; chan
++) {
1537 if (!(writemask
& (1 << chan
)))
1539 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
1540 value
= ac_to_integer(&ctx
->ac
, value
);
1541 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
1543 if (store_lds
|| is_tess_factor
) {
1544 LLVMValueRef dw_addr_chan
=
1545 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1546 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
1547 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
1550 if (!is_tess_factor
&& writemask
!= 0xF)
1551 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
1552 buf_addr
, ctx
->oc_lds
,
1553 4 * (base
+ chan
), ac_glc
, false);
1556 if (writemask
== 0xF) {
1557 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
1558 buf_addr
, ctx
->oc_lds
,
1559 (base
* 4), ac_glc
, false);
1564 load_tes_input(struct ac_shader_abi
*abi
,
1566 LLVMValueRef vertex_index
,
1567 LLVMValueRef param_index
,
1568 unsigned const_index
,
1570 unsigned driver_location
,
1572 unsigned num_components
,
1577 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1578 LLVMValueRef buf_addr
;
1579 LLVMValueRef result
;
1580 unsigned param
= shader_io_get_unique_index(location
);
1582 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1583 const_index
+= component
;
1585 if (const_index
>= 4) {
1591 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
1592 is_compact
, vertex_index
, param_index
);
1594 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
1595 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
1597 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
1598 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, ac_glc
, true, false);
1599 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
1604 load_gs_input(struct ac_shader_abi
*abi
,
1606 unsigned driver_location
,
1608 unsigned num_components
,
1609 unsigned vertex_index
,
1610 unsigned const_index
,
1613 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1614 LLVMValueRef vtx_offset
;
1615 unsigned param
, vtx_offset_param
;
1616 LLVMValueRef value
[4], result
;
1618 vtx_offset_param
= vertex_index
;
1619 assert(vtx_offset_param
< 6);
1620 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
1621 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1623 param
= shader_io_get_unique_index(location
);
1625 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
1626 if (ctx
->ac
.chip_class
>= GFX9
) {
1627 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
1628 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1629 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
1630 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1632 LLVMValueRef soffset
=
1633 LLVMConstInt(ctx
->ac
.i32
,
1634 (param
* 4 + i
+ const_index
) * 256,
1637 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
1640 vtx_offset
, soffset
,
1641 0, ac_glc
, true, false);
1644 if (ac_get_type_size(type
) == 2) {
1645 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i32
, "");
1646 value
[i
] = LLVMBuildTrunc(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i16
, "");
1648 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], type
, "");
1650 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1651 result
= ac_to_integer(&ctx
->ac
, result
);
1656 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
1658 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1659 ac_build_kill_if_false(&ctx
->ac
, visible
);
1663 radv_get_sample_pos_offset(uint32_t num_samples
)
1665 uint32_t sample_pos_offset
= 0;
1667 switch (num_samples
) {
1669 sample_pos_offset
= 1;
1672 sample_pos_offset
= 3;
1675 sample_pos_offset
= 7;
1680 return sample_pos_offset
;
1683 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
1684 LLVMValueRef sample_id
)
1686 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1688 LLVMValueRef result
;
1689 LLVMValueRef index
= LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false);
1690 LLVMValueRef ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ring_offsets
, &index
, 1, "");
1692 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1693 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
1695 uint32_t sample_pos_offset
=
1696 radv_get_sample_pos_offset(ctx
->options
->key
.fs
.num_samples
);
1699 LLVMBuildAdd(ctx
->ac
.builder
, sample_id
,
1700 LLVMConstInt(ctx
->ac
.i32
, sample_pos_offset
, false), "");
1701 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
1707 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
1709 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1710 uint8_t log2_ps_iter_samples
;
1712 if (ctx
->shader_info
->ps
.force_persample
) {
1713 log2_ps_iter_samples
=
1714 util_logbase2(ctx
->options
->key
.fs
.num_samples
);
1716 log2_ps_iter_samples
= ctx
->options
->key
.fs
.log2_ps_iter_samples
;
1719 /* The bit pattern matches that used by fixed function fragment
1721 static const uint16_t ps_iter_masks
[] = {
1722 0xffff, /* not used */
1728 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
1730 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
1732 LLVMValueRef result
, sample_id
;
1733 sample_id
= ac_unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
1734 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
1735 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
1740 static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context
*ctx
,
1742 LLVMValueRef
*addrs
);
1745 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
1747 LLVMValueRef gs_next_vertex
;
1748 LLVMValueRef can_emit
;
1749 unsigned offset
= 0;
1750 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1752 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1753 gfx10_ngg_gs_emit_vertex(ctx
, stream
, addrs
);
1757 /* Write vertex attribute values to GSVS ring */
1758 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
1759 ctx
->gs_next_vertex
[stream
],
1762 /* If this thread has already emitted the declared maximum number of
1763 * vertices, kill it: excessive vertex emissions are not supposed to
1764 * have any effect, and GS threads have no externally observable
1765 * effects other than emitting vertices.
1767 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
1768 LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.gs
.vertices_out
, false), "");
1769 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
1771 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
1772 unsigned output_usage_mask
=
1773 ctx
->shader_info
->gs
.output_usage_mask
[i
];
1774 uint8_t output_stream
=
1775 ctx
->shader_info
->gs
.output_streams
[i
];
1776 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
1777 int length
= util_last_bit(output_usage_mask
);
1779 if (!(ctx
->output_mask
& (1ull << i
)) ||
1780 output_stream
!= stream
)
1783 for (unsigned j
= 0; j
< length
; j
++) {
1784 if (!(output_usage_mask
& (1 << j
)))
1787 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
1789 LLVMValueRef voffset
=
1790 LLVMConstInt(ctx
->ac
.i32
, offset
*
1791 ctx
->shader
->info
.gs
.vertices_out
, false);
1795 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
1796 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1798 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
1799 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
1801 ac_build_buffer_store_dword(&ctx
->ac
,
1802 ctx
->gsvs_ring
[stream
],
1804 voffset
, ctx
->gs2vs_offset
, 0,
1805 ac_glc
| ac_slc
, true);
1809 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
1811 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
[stream
]);
1813 ac_build_sendmsg(&ctx
->ac
,
1814 AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (stream
<< 8),
1819 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
1821 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1823 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
1824 LLVMBuildStore(ctx
->ac
.builder
, ctx
->ac
.i32_0
, ctx
->gs_curprim_verts
[stream
]);
1828 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
1832 load_tess_coord(struct ac_shader_abi
*abi
)
1834 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1836 LLVMValueRef coord
[4] = {
1843 if (ctx
->shader
->info
.tess
.primitive_mode
== GL_TRIANGLES
)
1844 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
1845 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
1847 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
1851 load_patch_vertices_in(struct ac_shader_abi
*abi
)
1853 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1854 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
1858 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
1860 return abi
->base_vertex
;
1863 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
1864 LLVMValueRef buffer_ptr
, bool write
)
1866 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1867 LLVMValueRef result
;
1869 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1871 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1872 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1877 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
1879 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1880 LLVMValueRef result
;
1882 if (LLVMGetTypeKind(LLVMTypeOf(buffer_ptr
)) != LLVMPointerTypeKind
) {
1883 /* Do not load the descriptor for inlined uniform blocks. */
1887 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1889 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1890 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1895 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
1896 unsigned descriptor_set
,
1897 unsigned base_index
,
1898 unsigned constant_index
,
1900 enum ac_descriptor_type desc_type
,
1901 bool image
, bool write
,
1904 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1905 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
1906 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
1907 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
1908 unsigned offset
= binding
->offset
;
1909 unsigned stride
= binding
->size
;
1911 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1914 assert(base_index
< layout
->binding_count
);
1916 switch (desc_type
) {
1918 type
= ctx
->ac
.v8i32
;
1922 type
= ctx
->ac
.v8i32
;
1926 case AC_DESC_SAMPLER
:
1927 type
= ctx
->ac
.v4i32
;
1928 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) {
1929 offset
+= radv_combined_image_descriptor_sampler_offset(binding
);
1934 case AC_DESC_BUFFER
:
1935 type
= ctx
->ac
.v4i32
;
1938 case AC_DESC_PLANE_0
:
1939 case AC_DESC_PLANE_1
:
1940 case AC_DESC_PLANE_2
:
1941 type
= ctx
->ac
.v8i32
;
1943 offset
+= 32 * (desc_type
- AC_DESC_PLANE_0
);
1946 unreachable("invalid desc_type\n");
1949 offset
+= constant_index
* stride
;
1951 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
1952 (!index
|| binding
->immutable_samplers_equal
)) {
1953 if (binding
->immutable_samplers_equal
)
1956 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
1958 LLVMValueRef constants
[] = {
1959 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
1960 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
1961 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
1962 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
1964 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
1967 assert(stride
% type_size
== 0);
1969 LLVMValueRef adjusted_index
= index
;
1970 if (!adjusted_index
)
1971 adjusted_index
= ctx
->ac
.i32_0
;
1973 adjusted_index
= LLVMBuildMul(builder
, adjusted_index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
1975 LLVMValueRef val_offset
= LLVMConstInt(ctx
->ac
.i32
, offset
, 0);
1976 list
= LLVMBuildGEP(builder
, list
, &val_offset
, 1, "");
1977 list
= LLVMBuildPointerCast(builder
, list
,
1978 ac_array_in_const32_addr_space(type
), "");
1980 LLVMValueRef descriptor
= ac_build_load_to_sgpr(&ctx
->ac
, list
, adjusted_index
);
1982 /* 3 plane formats always have same size and format for plane 1 & 2, so
1983 * use the tail from plane 1 so that we can store only the first 16 bytes
1984 * of the last plane. */
1985 if (desc_type
== AC_DESC_PLANE_2
) {
1986 LLVMValueRef descriptor2
= radv_get_sampler_desc(abi
, descriptor_set
, base_index
, constant_index
, index
, AC_DESC_PLANE_1
,image
, write
, bindless
);
1988 LLVMValueRef components
[8];
1989 for (unsigned i
= 0; i
< 4; ++i
)
1990 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor
, i
);
1992 for (unsigned i
= 4; i
< 8; ++i
)
1993 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor2
, i
);
1994 descriptor
= ac_build_gather_values(&ctx
->ac
, components
, 8);
2000 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
2001 * so we may need to fix it up. */
2003 adjust_vertex_fetch_alpha(struct radv_shader_context
*ctx
,
2004 unsigned adjustment
,
2007 if (adjustment
== RADV_ALPHA_ADJUST_NONE
)
2010 LLVMValueRef c30
= LLVMConstInt(ctx
->ac
.i32
, 30, 0);
2012 alpha
= LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2014 if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
)
2015 alpha
= LLVMBuildFPToUI(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2017 alpha
= ac_to_integer(&ctx
->ac
, alpha
);
2019 /* For the integer-like cases, do a natural sign extension.
2021 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
2022 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
2025 alpha
= LLVMBuildShl(ctx
->ac
.builder
, alpha
,
2026 adjustment
== RADV_ALPHA_ADJUST_SNORM
?
2027 LLVMConstInt(ctx
->ac
.i32
, 7, 0) : c30
, "");
2028 alpha
= LLVMBuildAShr(ctx
->ac
.builder
, alpha
, c30
, "");
2030 /* Convert back to the right type. */
2031 if (adjustment
== RADV_ALPHA_ADJUST_SNORM
) {
2033 LLVMValueRef neg_one
= LLVMConstReal(ctx
->ac
.f32
, -1.0);
2034 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2035 clamp
= LLVMBuildFCmp(ctx
->ac
.builder
, LLVMRealULT
, alpha
, neg_one
, "");
2036 alpha
= LLVMBuildSelect(ctx
->ac
.builder
, clamp
, neg_one
, alpha
, "");
2037 } else if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
) {
2038 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2041 return LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2045 get_num_channels_from_data_format(unsigned data_format
)
2047 switch (data_format
) {
2048 case V_008F0C_BUF_DATA_FORMAT_8
:
2049 case V_008F0C_BUF_DATA_FORMAT_16
:
2050 case V_008F0C_BUF_DATA_FORMAT_32
:
2052 case V_008F0C_BUF_DATA_FORMAT_8_8
:
2053 case V_008F0C_BUF_DATA_FORMAT_16_16
:
2054 case V_008F0C_BUF_DATA_FORMAT_32_32
:
2056 case V_008F0C_BUF_DATA_FORMAT_10_11_11
:
2057 case V_008F0C_BUF_DATA_FORMAT_11_11_10
:
2058 case V_008F0C_BUF_DATA_FORMAT_32_32_32
:
2060 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8
:
2061 case V_008F0C_BUF_DATA_FORMAT_10_10_10_2
:
2062 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10
:
2063 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16
:
2064 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32
:
2074 radv_fixup_vertex_input_fetches(struct radv_shader_context
*ctx
,
2076 unsigned num_channels
,
2079 LLVMValueRef zero
= is_float
? ctx
->ac
.f32_0
: ctx
->ac
.i32_0
;
2080 LLVMValueRef one
= is_float
? ctx
->ac
.f32_1
: ctx
->ac
.i32_1
;
2081 LLVMValueRef chan
[4];
2083 if (LLVMGetTypeKind(LLVMTypeOf(value
)) == LLVMVectorTypeKind
) {
2084 unsigned vec_size
= LLVMGetVectorSize(LLVMTypeOf(value
));
2086 if (num_channels
== 4 && num_channels
== vec_size
)
2089 num_channels
= MIN2(num_channels
, vec_size
);
2091 for (unsigned i
= 0; i
< num_channels
; i
++)
2092 chan
[i
] = ac_llvm_extract_elem(&ctx
->ac
, value
, i
);
2095 assert(num_channels
== 1);
2100 for (unsigned i
= num_channels
; i
< 4; i
++) {
2101 chan
[i
] = i
== 3 ? one
: zero
;
2102 chan
[i
] = ac_to_integer(&ctx
->ac
, chan
[i
]);
2105 return ac_build_gather_values(&ctx
->ac
, chan
, 4);
2109 handle_vs_input_decl(struct radv_shader_context
*ctx
,
2110 struct nir_variable
*variable
)
2112 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
2113 LLVMValueRef t_offset
;
2114 LLVMValueRef t_list
;
2116 LLVMValueRef buffer_index
;
2117 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
2118 uint8_t input_usage_mask
=
2119 ctx
->shader_info
->vs
.input_usage_mask
[variable
->data
.location
];
2120 unsigned num_input_channels
= util_last_bit(input_usage_mask
);
2122 variable
->data
.driver_location
= variable
->data
.location
* 4;
2124 enum glsl_base_type type
= glsl_get_base_type(variable
->type
);
2125 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
2126 LLVMValueRef output
[4];
2127 unsigned attrib_index
= variable
->data
.location
+ i
- VERT_ATTRIB_GENERIC0
;
2128 unsigned attrib_format
= ctx
->options
->key
.vs
.vertex_attribute_formats
[attrib_index
];
2129 unsigned data_format
= attrib_format
& 0x0f;
2130 unsigned num_format
= (attrib_format
>> 4) & 0x07;
2131 bool is_float
= num_format
!= V_008F0C_BUF_NUM_FORMAT_UINT
&&
2132 num_format
!= V_008F0C_BUF_NUM_FORMAT_SINT
;
2134 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << attrib_index
)) {
2135 uint32_t divisor
= ctx
->options
->key
.vs
.instance_rate_divisors
[attrib_index
];
2138 buffer_index
= ctx
->abi
.instance_id
;
2141 buffer_index
= LLVMBuildUDiv(ctx
->ac
.builder
, buffer_index
,
2142 LLVMConstInt(ctx
->ac
.i32
, divisor
, 0), "");
2145 buffer_index
= ctx
->ac
.i32_0
;
2148 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.start_instance
, buffer_index
, "");
2150 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
2151 ctx
->abi
.base_vertex
, "");
2153 /* Adjust the number of channels to load based on the vertex
2156 unsigned num_format_channels
= get_num_channels_from_data_format(data_format
);
2157 unsigned num_channels
= MIN2(num_input_channels
, num_format_channels
);
2158 unsigned attrib_binding
= ctx
->options
->key
.vs
.vertex_attribute_bindings
[attrib_index
];
2159 unsigned attrib_offset
= ctx
->options
->key
.vs
.vertex_attribute_offsets
[attrib_index
];
2160 unsigned attrib_stride
= ctx
->options
->key
.vs
.vertex_attribute_strides
[attrib_index
];
2162 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2163 /* Always load, at least, 3 channels for formats that
2164 * need to be shuffled because X<->Z.
2166 num_channels
= MAX2(num_channels
, 3);
2169 if (attrib_stride
!= 0 && attrib_offset
> attrib_stride
) {
2170 LLVMValueRef buffer_offset
=
2171 LLVMConstInt(ctx
->ac
.i32
,
2172 attrib_offset
/ attrib_stride
, false);
2174 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
,
2178 attrib_offset
= attrib_offset
% attrib_stride
;
2181 t_offset
= LLVMConstInt(ctx
->ac
.i32
, attrib_binding
, false);
2182 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
2184 input
= ac_build_struct_tbuffer_load(&ctx
->ac
, t_list
,
2186 LLVMConstInt(ctx
->ac
.i32
, attrib_offset
, false),
2187 ctx
->ac
.i32_0
, ctx
->ac
.i32_0
,
2189 data_format
, num_format
, 0, true);
2191 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2193 c
[0] = ac_llvm_extract_elem(&ctx
->ac
, input
, 2);
2194 c
[1] = ac_llvm_extract_elem(&ctx
->ac
, input
, 1);
2195 c
[2] = ac_llvm_extract_elem(&ctx
->ac
, input
, 0);
2196 c
[3] = ac_llvm_extract_elem(&ctx
->ac
, input
, 3);
2198 input
= ac_build_gather_values(&ctx
->ac
, c
, 4);
2201 input
= radv_fixup_vertex_input_fetches(ctx
, input
, num_channels
,
2204 for (unsigned chan
= 0; chan
< 4; chan
++) {
2205 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2206 output
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
, input
, llvm_chan
, "");
2207 if (type
== GLSL_TYPE_FLOAT16
) {
2208 output
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f32
, "");
2209 output
[chan
] = LLVMBuildFPTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f16
, "");
2213 unsigned alpha_adjust
= (ctx
->options
->key
.vs
.alpha_adjust
>> (attrib_index
* 2)) & 3;
2214 output
[3] = adjust_vertex_fetch_alpha(ctx
, alpha_adjust
, output
[3]);
2216 for (unsigned chan
= 0; chan
< 4; chan
++) {
2217 output
[chan
] = ac_to_integer(&ctx
->ac
, output
[chan
]);
2218 if (type
== GLSL_TYPE_UINT16
|| type
== GLSL_TYPE_INT16
)
2219 output
[chan
] = LLVMBuildTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.i16
, "");
2221 ctx
->inputs
[ac_llvm_reg_index_soa(variable
->data
.location
+ i
, chan
)] = output
[chan
];
2227 handle_vs_inputs(struct radv_shader_context
*ctx
,
2228 struct nir_shader
*nir
) {
2229 nir_foreach_variable(variable
, &nir
->inputs
)
2230 handle_vs_input_decl(ctx
, variable
);
2234 prepare_interp_optimize(struct radv_shader_context
*ctx
,
2235 struct nir_shader
*nir
)
2237 bool uses_center
= false;
2238 bool uses_centroid
= false;
2239 nir_foreach_variable(variable
, &nir
->inputs
) {
2240 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
2241 variable
->data
.sample
)
2244 if (variable
->data
.centroid
)
2245 uses_centroid
= true;
2250 if (uses_center
&& uses_centroid
) {
2251 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
2252 ctx
->abi
.persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->abi
.persp_center
, ctx
->abi
.persp_centroid
, "");
2253 ctx
->abi
.linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->abi
.linear_center
, ctx
->abi
.linear_centroid
, "");
2258 scan_shader_output_decl(struct radv_shader_context
*ctx
,
2259 struct nir_variable
*variable
,
2260 struct nir_shader
*shader
,
2261 gl_shader_stage stage
)
2263 int idx
= variable
->data
.location
+ variable
->data
.index
;
2264 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2265 uint64_t mask_attribs
;
2267 variable
->data
.driver_location
= idx
* 4;
2269 /* tess ctrl has it's own load/store paths for outputs */
2270 if (stage
== MESA_SHADER_TESS_CTRL
)
2273 if (variable
->data
.compact
) {
2274 unsigned component_count
= variable
->data
.location_frac
+
2275 glsl_get_length(variable
->type
);
2276 attrib_count
= (component_count
+ 3) / 4;
2279 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
2281 ctx
->output_mask
|= mask_attribs
;
2285 /* Initialize arguments for the shader export intrinsic */
2287 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
2288 LLVMValueRef
*values
,
2289 unsigned enabled_channels
,
2291 struct ac_export_args
*args
)
2293 /* Specify the channels that are enabled. */
2294 args
->enabled_channels
= enabled_channels
;
2296 /* Specify whether the EXEC mask represents the valid mask */
2297 args
->valid_mask
= 0;
2299 /* Specify whether this is the last export */
2302 /* Specify the target we are exporting */
2303 args
->target
= target
;
2305 args
->compr
= false;
2306 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
2307 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
2308 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
2309 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
2314 bool is_16bit
= ac_get_type_size(LLVMTypeOf(values
[0])) == 2;
2315 if (ctx
->stage
== MESA_SHADER_FRAGMENT
) {
2316 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
2317 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
2318 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
2319 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
2322 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
2323 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
2324 unsigned bits
, bool hi
) = NULL
;
2326 switch(col_format
) {
2327 case V_028714_SPI_SHADER_ZERO
:
2328 args
->enabled_channels
= 0; /* writemask */
2329 args
->target
= V_008DFC_SQ_EXP_NULL
;
2332 case V_028714_SPI_SHADER_32_R
:
2333 args
->enabled_channels
= 1;
2334 args
->out
[0] = values
[0];
2337 case V_028714_SPI_SHADER_32_GR
:
2338 args
->enabled_channels
= 0x3;
2339 args
->out
[0] = values
[0];
2340 args
->out
[1] = values
[1];
2343 case V_028714_SPI_SHADER_32_AR
:
2344 if (ctx
->ac
.chip_class
>= GFX10
) {
2345 args
->enabled_channels
= 0x3;
2346 args
->out
[0] = values
[0];
2347 args
->out
[1] = values
[3];
2349 args
->enabled_channels
= 0x9;
2350 args
->out
[0] = values
[0];
2351 args
->out
[3] = values
[3];
2355 case V_028714_SPI_SHADER_FP16_ABGR
:
2356 args
->enabled_channels
= 0x5;
2357 packf
= ac_build_cvt_pkrtz_f16
;
2359 for (unsigned chan
= 0; chan
< 4; chan
++)
2360 values
[chan
] = LLVMBuildFPExt(ctx
->ac
.builder
,
2366 case V_028714_SPI_SHADER_UNORM16_ABGR
:
2367 args
->enabled_channels
= 0x5;
2368 packf
= ac_build_cvt_pknorm_u16
;
2371 case V_028714_SPI_SHADER_SNORM16_ABGR
:
2372 args
->enabled_channels
= 0x5;
2373 packf
= ac_build_cvt_pknorm_i16
;
2376 case V_028714_SPI_SHADER_UINT16_ABGR
:
2377 args
->enabled_channels
= 0x5;
2378 packi
= ac_build_cvt_pk_u16
;
2380 for (unsigned chan
= 0; chan
< 4; chan
++)
2381 values
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
,
2382 ac_to_integer(&ctx
->ac
, values
[chan
]),
2387 case V_028714_SPI_SHADER_SINT16_ABGR
:
2388 args
->enabled_channels
= 0x5;
2389 packi
= ac_build_cvt_pk_i16
;
2391 for (unsigned chan
= 0; chan
< 4; chan
++)
2392 values
[chan
] = LLVMBuildSExt(ctx
->ac
.builder
,
2393 ac_to_integer(&ctx
->ac
, values
[chan
]),
2399 case V_028714_SPI_SHADER_32_ABGR
:
2400 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2404 /* Pack f16 or norm_i16/u16. */
2406 for (chan
= 0; chan
< 2; chan
++) {
2407 LLVMValueRef pack_args
[2] = {
2409 values
[2 * chan
+ 1]
2411 LLVMValueRef packed
;
2413 packed
= packf(&ctx
->ac
, pack_args
);
2414 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2416 args
->compr
= 1; /* COMPR flag */
2421 for (chan
= 0; chan
< 2; chan
++) {
2422 LLVMValueRef pack_args
[2] = {
2423 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
2424 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
2426 LLVMValueRef packed
;
2428 packed
= packi(&ctx
->ac
, pack_args
,
2429 is_int8
? 8 : is_int10
? 10 : 16,
2431 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2433 args
->compr
= 1; /* COMPR flag */
2439 for (unsigned chan
= 0; chan
< 4; chan
++) {
2440 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i16
, "");
2441 args
->out
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
2444 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2446 for (unsigned i
= 0; i
< 4; ++i
)
2447 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
2451 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
2452 LLVMValueRef
*values
, unsigned enabled_channels
)
2454 struct ac_export_args args
;
2456 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
2457 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
2458 ac_build_export(&ctx
->ac
, &args
);
2462 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
2464 LLVMValueRef output
= ctx
->abi
.outputs
[ac_llvm_reg_index_soa(index
, chan
)];
2465 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
2469 radv_emit_stream_output(struct radv_shader_context
*ctx
,
2470 LLVMValueRef
const *so_buffers
,
2471 LLVMValueRef
const *so_write_offsets
,
2472 const struct radv_stream_output
*output
,
2473 struct radv_shader_output_values
*shader_out
)
2475 unsigned num_comps
= util_bitcount(output
->component_mask
);
2476 unsigned buf
= output
->buffer
;
2477 unsigned offset
= output
->offset
;
2479 LLVMValueRef out
[4];
2481 assert(num_comps
&& num_comps
<= 4);
2482 if (!num_comps
|| num_comps
> 4)
2485 /* Get the first component. */
2486 start
= ffs(output
->component_mask
) - 1;
2488 /* Load the output as int. */
2489 for (int i
= 0; i
< num_comps
; i
++) {
2490 out
[i
] = ac_to_integer(&ctx
->ac
, shader_out
->values
[start
+ i
]);
2493 /* Pack the output. */
2494 LLVMValueRef vdata
= NULL
;
2496 switch (num_comps
) {
2497 case 1: /* as i32 */
2500 case 2: /* as v2i32 */
2501 case 3: /* as v4i32 (aligned to 4) */
2502 out
[3] = LLVMGetUndef(ctx
->ac
.i32
);
2504 case 4: /* as v4i32 */
2505 vdata
= ac_build_gather_values(&ctx
->ac
, out
,
2506 !ac_has_vec3_support(ctx
->ac
.chip_class
, false) ?
2507 util_next_power_of_two(num_comps
) :
2512 ac_build_buffer_store_dword(&ctx
->ac
, so_buffers
[buf
],
2513 vdata
, num_comps
, so_write_offsets
[buf
],
2514 ctx
->ac
.i32_0
, offset
,
2515 ac_glc
| ac_slc
, false);
2519 radv_emit_streamout(struct radv_shader_context
*ctx
, unsigned stream
)
2523 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2524 assert(ctx
->streamout_config
);
2525 LLVMValueRef so_vtx_count
=
2526 ac_build_bfe(&ctx
->ac
, ctx
->streamout_config
,
2527 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2528 LLVMConstInt(ctx
->ac
.i32
, 7, false), false);
2530 LLVMValueRef tid
= ac_get_thread_id(&ctx
->ac
);
2532 /* can_emit = tid < so_vtx_count; */
2533 LLVMValueRef can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
2534 tid
, so_vtx_count
, "");
2536 /* Emit the streamout code conditionally. This actually avoids
2537 * out-of-bounds buffer access. The hw tells us via the SGPR
2538 * (so_vtx_count) which threads are allowed to emit streamout data.
2540 ac_build_ifcc(&ctx
->ac
, can_emit
, 6501);
2542 /* The buffer offset is computed as follows:
2543 * ByteOffset = streamout_offset[buffer_id]*4 +
2544 * (streamout_write_index + thread_id)*stride[buffer_id] +
2547 LLVMValueRef so_write_index
= ctx
->streamout_write_idx
;
2549 /* Compute (streamout_write_index + thread_id). */
2551 LLVMBuildAdd(ctx
->ac
.builder
, so_write_index
, tid
, "");
2553 /* Load the descriptor and compute the write offset for each
2556 LLVMValueRef so_write_offset
[4] = {};
2557 LLVMValueRef so_buffers
[4] = {};
2558 LLVMValueRef buf_ptr
= ctx
->streamout_buffers
;
2560 for (i
= 0; i
< 4; i
++) {
2561 uint16_t stride
= ctx
->shader_info
->so
.strides
[i
];
2566 LLVMValueRef offset
=
2567 LLVMConstInt(ctx
->ac
.i32
, i
, false);
2569 so_buffers
[i
] = ac_build_load_to_sgpr(&ctx
->ac
,
2572 LLVMValueRef so_offset
= ctx
->streamout_offset
[i
];
2574 so_offset
= LLVMBuildMul(ctx
->ac
.builder
, so_offset
,
2575 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
2577 so_write_offset
[i
] =
2578 ac_build_imad(&ctx
->ac
, so_write_index
,
2579 LLVMConstInt(ctx
->ac
.i32
,
2584 /* Write streamout data. */
2585 for (i
= 0; i
< ctx
->shader_info
->so
.num_outputs
; i
++) {
2586 struct radv_shader_output_values shader_out
= {};
2587 struct radv_stream_output
*output
=
2588 &ctx
->shader_info
->so
.outputs
[i
];
2590 if (stream
!= output
->stream
)
2593 for (int j
= 0; j
< 4; j
++) {
2594 shader_out
.values
[j
] =
2595 radv_load_output(ctx
, output
->location
, j
);
2598 radv_emit_stream_output(ctx
, so_buffers
,so_write_offset
,
2599 output
, &shader_out
);
2602 ac_build_endif(&ctx
->ac
, 6501);
2606 radv_build_param_exports(struct radv_shader_context
*ctx
,
2607 struct radv_shader_output_values
*outputs
,
2609 struct radv_vs_output_info
*outinfo
,
2610 bool export_clip_dists
)
2612 unsigned param_count
= 0;
2614 for (unsigned i
= 0; i
< noutput
; i
++) {
2615 unsigned slot_name
= outputs
[i
].slot_name
;
2616 unsigned usage_mask
= outputs
[i
].usage_mask
;
2618 if (slot_name
!= VARYING_SLOT_LAYER
&&
2619 slot_name
!= VARYING_SLOT_PRIMITIVE_ID
&&
2620 slot_name
!= VARYING_SLOT_CLIP_DIST0
&&
2621 slot_name
!= VARYING_SLOT_CLIP_DIST1
&&
2622 slot_name
< VARYING_SLOT_VAR0
)
2625 if ((slot_name
== VARYING_SLOT_CLIP_DIST0
||
2626 slot_name
== VARYING_SLOT_CLIP_DIST1
) && !export_clip_dists
)
2629 radv_export_param(ctx
, param_count
, outputs
[i
].values
, usage_mask
);
2631 assert(i
< ARRAY_SIZE(outinfo
->vs_output_param_offset
));
2632 outinfo
->vs_output_param_offset
[slot_name
] = param_count
++;
2635 outinfo
->param_exports
= param_count
;
2638 /* Generate export instructions for hardware VS shader stage or NGG GS stage
2639 * (position and parameter data only).
2642 radv_llvm_export_vs(struct radv_shader_context
*ctx
,
2643 struct radv_shader_output_values
*outputs
,
2645 struct radv_vs_output_info
*outinfo
,
2646 bool export_clip_dists
)
2648 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_value
= NULL
;
2649 struct ac_export_args pos_args
[4] = {};
2650 unsigned pos_idx
, index
;
2653 /* Build position exports */
2654 for (i
= 0; i
< noutput
; i
++) {
2655 switch (outputs
[i
].slot_name
) {
2656 case VARYING_SLOT_POS
:
2657 si_llvm_init_export_args(ctx
, outputs
[i
].values
, 0xf,
2658 V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
2660 case VARYING_SLOT_PSIZ
:
2661 psize_value
= outputs
[i
].values
[0];
2663 case VARYING_SLOT_LAYER
:
2664 layer_value
= outputs
[i
].values
[0];
2666 case VARYING_SLOT_VIEWPORT
:
2667 viewport_value
= outputs
[i
].values
[0];
2669 case VARYING_SLOT_CLIP_DIST0
:
2670 case VARYING_SLOT_CLIP_DIST1
:
2671 index
= 2 + outputs
[i
].slot_index
;
2672 si_llvm_init_export_args(ctx
, outputs
[i
].values
, 0xf,
2673 V_008DFC_SQ_EXP_POS
+ index
,
2681 /* We need to add the position output manually if it's missing. */
2682 if (!pos_args
[0].out
[0]) {
2683 pos_args
[0].enabled_channels
= 0xf; /* writemask */
2684 pos_args
[0].valid_mask
= 0; /* EXEC mask */
2685 pos_args
[0].done
= 0; /* last export? */
2686 pos_args
[0].target
= V_008DFC_SQ_EXP_POS
;
2687 pos_args
[0].compr
= 0; /* COMPR flag */
2688 pos_args
[0].out
[0] = ctx
->ac
.f32_0
; /* X */
2689 pos_args
[0].out
[1] = ctx
->ac
.f32_0
; /* Y */
2690 pos_args
[0].out
[2] = ctx
->ac
.f32_0
; /* Z */
2691 pos_args
[0].out
[3] = ctx
->ac
.f32_1
; /* W */
2694 if (outinfo
->writes_pointsize
||
2695 outinfo
->writes_layer
||
2696 outinfo
->writes_viewport_index
) {
2697 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
2698 (outinfo
->writes_layer
== true ? 4 : 0));
2699 pos_args
[1].valid_mask
= 0;
2700 pos_args
[1].done
= 0;
2701 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
2702 pos_args
[1].compr
= 0;
2703 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
2704 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
2705 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
2706 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
2708 if (outinfo
->writes_pointsize
== true)
2709 pos_args
[1].out
[0] = psize_value
;
2710 if (outinfo
->writes_layer
== true)
2711 pos_args
[1].out
[2] = layer_value
;
2712 if (outinfo
->writes_viewport_index
== true) {
2713 if (ctx
->options
->chip_class
>= GFX9
) {
2714 /* GFX9 has the layer in out.z[10:0] and the viewport
2715 * index in out.z[19:16].
2717 LLVMValueRef v
= viewport_value
;
2718 v
= ac_to_integer(&ctx
->ac
, v
);
2719 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
2720 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2722 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
2723 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
2725 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
2726 pos_args
[1].enabled_channels
|= 1 << 2;
2728 pos_args
[1].out
[3] = viewport_value
;
2729 pos_args
[1].enabled_channels
|= 1 << 3;
2734 for (i
= 0; i
< 4; i
++) {
2735 if (pos_args
[i
].out
[0])
2736 outinfo
->pos_exports
++;
2739 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
2740 * Setting valid_mask=1 prevents it and has no other effect.
2742 if (ctx
->ac
.family
== CHIP_NAVI10
||
2743 ctx
->ac
.family
== CHIP_NAVI12
||
2744 ctx
->ac
.family
== CHIP_NAVI14
)
2745 pos_args
[0].valid_mask
= 1;
2748 for (i
= 0; i
< 4; i
++) {
2749 if (!pos_args
[i
].out
[0])
2752 /* Specify the target we are exporting */
2753 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
2755 if (pos_idx
== outinfo
->pos_exports
)
2756 /* Specify that this is the last export */
2757 pos_args
[i
].done
= 1;
2759 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
2762 /* Build parameter exports */
2763 radv_build_param_exports(ctx
, outputs
, noutput
, outinfo
, export_clip_dists
);
2767 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
2768 bool export_prim_id
,
2769 bool export_clip_dists
,
2770 struct radv_vs_output_info
*outinfo
)
2772 struct radv_shader_output_values
*outputs
;
2773 unsigned noutput
= 0;
2775 if (ctx
->options
->key
.has_multiview_view_index
) {
2776 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2778 for(unsigned i
= 0; i
< 4; ++i
)
2779 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
2780 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
2783 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
2784 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
2787 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
2788 sizeof(outinfo
->vs_output_param_offset
));
2789 outinfo
->pos_exports
= 0;
2791 if (ctx
->shader_info
->so
.num_outputs
&&
2792 !ctx
->is_gs_copy_shader
) {
2793 /* The GS copy shader emission already emits streamout. */
2794 radv_emit_streamout(ctx
, 0);
2797 /* Allocate a temporary array for the output values. */
2798 unsigned num_outputs
= util_bitcount64(ctx
->output_mask
) + export_prim_id
;
2799 outputs
= malloc(num_outputs
* sizeof(outputs
[0]));
2801 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2802 if (!(ctx
->output_mask
& (1ull << i
)))
2805 outputs
[noutput
].slot_name
= i
;
2806 outputs
[noutput
].slot_index
= i
== VARYING_SLOT_CLIP_DIST1
;
2808 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2809 !ctx
->is_gs_copy_shader
) {
2810 outputs
[noutput
].usage_mask
=
2811 ctx
->shader_info
->vs
.output_usage_mask
[i
];
2812 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2813 outputs
[noutput
].usage_mask
=
2814 ctx
->shader_info
->tes
.output_usage_mask
[i
];
2816 assert(ctx
->is_gs_copy_shader
);
2817 outputs
[noutput
].usage_mask
=
2818 ctx
->shader_info
->gs
.output_usage_mask
[i
];
2821 for (unsigned j
= 0; j
< 4; j
++) {
2822 outputs
[noutput
].values
[j
] =
2823 ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2829 /* Export PrimitiveID. */
2830 if (export_prim_id
) {
2831 outinfo
->export_prim_id
= true;
2833 outputs
[noutput
].slot_name
= VARYING_SLOT_PRIMITIVE_ID
;
2834 outputs
[noutput
].slot_index
= 0;
2835 outputs
[noutput
].usage_mask
= 0x1;
2836 outputs
[noutput
].values
[0] = ctx
->vs_prim_id
;
2837 for (unsigned j
= 1; j
< 4; j
++)
2838 outputs
[noutput
].values
[j
] = ctx
->ac
.f32_0
;
2842 radv_llvm_export_vs(ctx
, outputs
, noutput
, outinfo
, export_clip_dists
);
2848 handle_es_outputs_post(struct radv_shader_context
*ctx
,
2849 struct radv_es_output_info
*outinfo
)
2852 uint64_t max_output_written
= 0;
2853 LLVMValueRef lds_base
= NULL
;
2855 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2858 if (!(ctx
->output_mask
& (1ull << i
)))
2861 param_index
= shader_io_get_unique_index(i
);
2863 max_output_written
= MAX2(param_index
, max_output_written
);
2866 outinfo
->esgs_itemsize
= (max_output_written
+ 1) * 16;
2868 if (ctx
->ac
.chip_class
>= GFX9
) {
2869 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
2870 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
2871 LLVMValueRef wave_idx
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
2872 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
2873 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
2874 LLVMConstInt(ctx
->ac
.i32
,
2875 ctx
->ac
.wave_size
, false), ""), "");
2876 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
2877 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
2880 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2881 LLVMValueRef dw_addr
= NULL
;
2882 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2883 unsigned output_usage_mask
;
2886 if (!(ctx
->output_mask
& (1ull << i
)))
2889 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
2891 ctx
->shader_info
->vs
.output_usage_mask
[i
];
2893 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
2895 ctx
->shader_info
->tes
.output_usage_mask
[i
];
2898 param_index
= shader_io_get_unique_index(i
);
2901 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2902 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
2906 for (j
= 0; j
< 4; j
++) {
2907 if (!(output_usage_mask
& (1 << j
)))
2910 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2911 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
2912 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
2914 if (ctx
->ac
.chip_class
>= GFX9
) {
2915 LLVMValueRef dw_addr_offset
=
2916 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2917 LLVMConstInt(ctx
->ac
.i32
,
2920 ac_lds_store(&ctx
->ac
, dw_addr_offset
, out_val
);
2922 ac_build_buffer_store_dword(&ctx
->ac
,
2925 NULL
, ctx
->es2gs_offset
,
2926 (4 * param_index
+ j
) * 4,
2927 ac_glc
| ac_slc
, true);
2934 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
2936 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
2937 uint32_t num_tcs_inputs
= util_last_bit64(ctx
->shader_info
->vs
.ls_outputs_written
);
2938 LLVMValueRef vertex_dw_stride
= LLVMConstInt(ctx
->ac
.i32
, num_tcs_inputs
* 4, false);
2939 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
2940 vertex_dw_stride
, "");
2942 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2943 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2945 if (!(ctx
->output_mask
& (1ull << i
)))
2948 int param
= shader_io_get_unique_index(i
);
2949 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
2950 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
2952 for (unsigned j
= 0; j
< 4; j
++) {
2953 LLVMValueRef value
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2954 value
= ac_to_integer(&ctx
->ac
, value
);
2955 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
2956 ac_lds_store(&ctx
->ac
, dw_addr
, value
);
2957 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
2962 static LLVMValueRef
get_wave_id_in_tg(struct radv_shader_context
*ctx
)
2964 return ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
2967 static LLVMValueRef
get_tgsize(struct radv_shader_context
*ctx
)
2969 return ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 28, 4);
2972 static LLVMValueRef
get_thread_id_in_tg(struct radv_shader_context
*ctx
)
2974 LLVMBuilderRef builder
= ctx
->ac
.builder
;
2976 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
2977 LLVMConstInt(ctx
->ac
.i32
, ctx
->ac
.wave_size
, false), "");
2978 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
2981 static LLVMValueRef
ngg_get_vtx_cnt(struct radv_shader_context
*ctx
)
2983 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
2984 LLVMConstInt(ctx
->ac
.i32
, 12, false),
2985 LLVMConstInt(ctx
->ac
.i32
, 9, false),
2989 static LLVMValueRef
ngg_get_prim_cnt(struct radv_shader_context
*ctx
)
2991 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
2992 LLVMConstInt(ctx
->ac
.i32
, 22, false),
2993 LLVMConstInt(ctx
->ac
.i32
, 9, false),
2998 ngg_gs_get_vertex_storage(struct radv_shader_context
*ctx
)
3000 unsigned num_outputs
= util_bitcount64(ctx
->output_mask
);
3002 LLVMTypeRef elements
[2] = {
3003 LLVMArrayType(ctx
->ac
.i32
, 4 * num_outputs
),
3004 LLVMArrayType(ctx
->ac
.i8
, 4),
3006 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
3007 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
3008 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
3012 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
3013 * is in emit order; that is:
3014 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
3015 * - during vertex emit, i.e. while the API GS shader invocation is running,
3016 * N = threadidx * gs_max_out_vertices + emitidx
3018 * Goals of the LDS memory layout:
3019 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
3020 * in uniform control flow
3021 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
3023 * 3. Agnostic to the number of waves (since we don't know it before compiling)
3024 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
3025 * 5. Avoid wasting memory.
3027 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
3028 * layout, elimination of bank conflicts requires that each vertex occupy an
3029 * odd number of dwords. We use the additional dword to store the output stream
3030 * index as well as a flag to indicate whether this vertex ends a primitive
3031 * for rasterization.
3033 * Swizzling is required to satisfy points 1 and 2 simultaneously.
3035 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
3036 * Indices are swizzled in groups of 32, which ensures point 1 without
3037 * disturbing point 2.
3039 * \return an LDS pointer to type {[N x i32], [4 x i8]}
3042 ngg_gs_vertex_ptr(struct radv_shader_context
*ctx
, LLVMValueRef vertexidx
)
3044 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3045 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
3047 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
3048 unsigned write_stride_2exp
= ffs(ctx
->shader
->info
.gs
.vertices_out
) - 1;
3049 if (write_stride_2exp
) {
3051 LLVMBuildLShr(builder
, vertexidx
,
3052 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
3053 LLVMValueRef swizzle
=
3054 LLVMBuildAnd(builder
, row
,
3055 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
3057 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
3060 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
3064 ngg_gs_emit_vertex_ptr(struct radv_shader_context
*ctx
, LLVMValueRef gsthread
,
3065 LLVMValueRef emitidx
)
3067 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3070 tmp
= LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.gs
.vertices_out
, false);
3071 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
3072 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
3073 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
3076 /* Send GS Alloc Req message from the first wave of the group to SPI.
3077 * Message payload is:
3078 * - bits 0..10: vertices in group
3079 * - bits 12..22: primitives in group
3081 static void build_sendmsg_gs_alloc_req(struct radv_shader_context
*ctx
,
3082 LLVMValueRef vtx_cnt
,
3083 LLVMValueRef prim_cnt
)
3085 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3088 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
3089 ac_build_ifcc(&ctx
->ac
, tmp
, 5020);
3091 tmp
= LLVMBuildShl(builder
, prim_cnt
, LLVMConstInt(ctx
->ac
.i32
, 12, false),"");
3092 tmp
= LLVMBuildOr(builder
, tmp
, vtx_cnt
, "");
3093 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_ALLOC_REQ
, tmp
);
3095 ac_build_endif(&ctx
->ac
, 5020);
3099 unsigned num_vertices
;
3100 LLVMValueRef isnull
;
3101 LLVMValueRef index
[3];
3102 LLVMValueRef edgeflag
[3];
3105 static void build_export_prim(struct radv_shader_context
*ctx
,
3106 const struct ngg_prim
*prim
)
3108 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3109 struct ac_export_args args
;
3112 tmp
= LLVMBuildZExt(builder
, prim
->isnull
, ctx
->ac
.i32
, "");
3113 args
.out
[0] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 31, false), "");
3115 for (unsigned i
= 0; i
< prim
->num_vertices
; ++i
) {
3116 tmp
= LLVMBuildShl(builder
, prim
->index
[i
],
3117 LLVMConstInt(ctx
->ac
.i32
, 10 * i
, false), "");
3118 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3119 tmp
= LLVMBuildZExt(builder
, prim
->edgeflag
[i
], ctx
->ac
.i32
, "");
3120 tmp
= LLVMBuildShl(builder
, tmp
,
3121 LLVMConstInt(ctx
->ac
.i32
, 10 * i
+ 9, false), "");
3122 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
3125 args
.out
[0] = LLVMBuildBitCast(builder
, args
.out
[0], ctx
->ac
.f32
, "");
3126 args
.out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
3127 args
.out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
3128 args
.out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
3130 args
.target
= V_008DFC_SQ_EXP_PRIM
;
3131 args
.enabled_channels
= 1;
3133 args
.valid_mask
= false;
3136 ac_build_export(&ctx
->ac
, &args
);
3140 handle_ngg_outputs_post(struct radv_shader_context
*ctx
)
3142 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3143 unsigned num_vertices
= 3;
3146 assert((ctx
->stage
== MESA_SHADER_VERTEX
||
3147 ctx
->stage
== MESA_SHADER_TESS_EVAL
) && !ctx
->is_gs_copy_shader
);
3149 LLVMValueRef prims_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3150 LLVMValueRef vtx_in_wave
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 0, 8);
3151 LLVMValueRef is_gs_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3152 ac_get_thread_id(&ctx
->ac
), prims_in_wave
, "");
3153 LLVMValueRef is_es_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
3154 ac_get_thread_id(&ctx
->ac
), vtx_in_wave
, "");
3155 LLVMValueRef vtxindex
[] = {
3156 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 0, 16),
3157 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[0], 16, 16),
3158 ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[2], 0, 16),
3161 /* TODO: streamout */
3163 /* Copy Primitive IDs from GS threads to the LDS address corresponding
3164 * to the ES thread of the provoking vertex.
3166 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
3167 ctx
->options
->key
.vs_common_out
.export_prim_id
) {
3168 /* TODO: streamout */
3170 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
3171 /* Extract the PROVOKING_VTX_INDEX field. */
3172 LLVMValueRef provoking_vtx_in_prim
=
3173 LLVMConstInt(ctx
->ac
.i32
, 0, false);
3175 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
3176 LLVMValueRef indices
= ac_build_gather_values(&ctx
->ac
, vtxindex
, 3);
3177 LLVMValueRef provoking_vtx_index
=
3178 LLVMBuildExtractElement(builder
, indices
, provoking_vtx_in_prim
, "");
3180 LLVMBuildStore(builder
, ctx
->abi
.gs_prim_id
,
3181 ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
, provoking_vtx_index
));
3182 ac_build_endif(&ctx
->ac
, 5400);
3185 /* TODO: primitive culling */
3187 build_sendmsg_gs_alloc_req(ctx
, ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
3189 /* TODO: streamout queries */
3190 /* Export primitive data to the index buffer. Format is:
3191 * - bits 0..8: index 0
3192 * - bit 9: edge flag 0
3193 * - bits 10..18: index 1
3194 * - bit 19: edge flag 1
3195 * - bits 20..28: index 2
3196 * - bit 29: edge flag 2
3197 * - bit 31: null primitive (skip)
3199 * For the first version, we will always build up all three indices
3200 * independent of the primitive type. The additional garbage data
3203 * TODO: culling depends on the primitive type, so can have some
3206 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 6001);
3208 struct ngg_prim prim
= {};
3210 prim
.num_vertices
= num_vertices
;
3211 prim
.isnull
= ctx
->ac
.i1false
;
3212 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
3214 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
3215 tmp
= LLVMBuildLShr(builder
, ctx
->abi
.gs_invocation_id
,
3216 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
3217 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
3220 build_export_prim(ctx
, &prim
);
3222 ac_build_endif(&ctx
->ac
, 6001);
3224 /* Export per-vertex data (positions and parameters). */
3225 ac_build_ifcc(&ctx
->ac
, is_es_thread
, 6002);
3227 struct radv_vs_output_info
*outinfo
=
3228 ctx
->stage
== MESA_SHADER_TESS_EVAL
? &ctx
->shader_info
->tes
.outinfo
: &ctx
->shader_info
->vs
.outinfo
;
3230 /* Exporting the primitive ID is handled below. */
3231 /* TODO: use the new VS export path */
3232 handle_vs_outputs_post(ctx
, false,
3233 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
3236 if (ctx
->options
->key
.vs_common_out
.export_prim_id
) {
3237 unsigned param_count
= outinfo
->param_exports
;
3238 LLVMValueRef values
[4];
3240 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
3241 /* Wait for GS stores to finish. */
3242 ac_build_s_barrier(&ctx
->ac
);
3244 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
,
3245 get_thread_id_in_tg(ctx
));
3246 values
[0] = LLVMBuildLoad(builder
, tmp
, "");
3248 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
3249 values
[0] = ctx
->abi
.tes_patch_id
;
3252 values
[0] = ac_to_float(&ctx
->ac
, values
[0]);
3253 for (unsigned j
= 1; j
< 4; j
++)
3254 values
[j
] = ctx
->ac
.f32_0
;
3256 radv_export_param(ctx
, param_count
, values
, 0x1);
3258 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
3259 outinfo
->export_prim_id
= true;
3260 outinfo
->param_exports
= param_count
;
3263 ac_build_endif(&ctx
->ac
, 6002);
3266 static void gfx10_ngg_gs_emit_prologue(struct radv_shader_context
*ctx
)
3268 /* Zero out the part of LDS scratch that is used to accumulate the
3269 * per-stream generated primitive count.
3271 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3272 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
3273 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
3274 LLVMBasicBlockRef merge_block
;
3277 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
->ac
.builder
));
3278 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
, fn
, "");
3279 merge_block
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
, fn
, "");
3281 cond
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
3282 LLVMBuildCondBr(ctx
->ac
.builder
, cond
, then_block
, merge_block
);
3283 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, then_block
);
3285 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
3286 LLVMBuildStore(builder
, ctx
->ac
.i32_0
, ptr
);
3288 LLVMBuildBr(ctx
->ac
.builder
, merge_block
);
3289 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, merge_block
);
3291 ac_build_s_barrier(&ctx
->ac
);
3294 static void gfx10_ngg_gs_emit_epilogue_1(struct radv_shader_context
*ctx
)
3296 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3297 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
3300 /* Zero out remaining (non-emitted) primitive flags.
3302 * Note: Alternatively, we could pass the relevant gs_next_vertex to
3303 * the emit threads via LDS. This is likely worse in the expected
3304 * typical case where each GS thread emits the full set of
3307 for (unsigned stream
= 0; stream
< 4; ++stream
) {
3308 unsigned num_components
;
3311 ctx
->shader_info
->gs
.num_stream_output_components
[stream
];
3312 if (!num_components
)
3315 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
3317 ac_build_bgnloop(&ctx
->ac
, 5100);
3319 const LLVMValueRef vertexidx
=
3320 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
3321 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
3322 LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.gs
.vertices_out
, false), "");
3323 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
3324 ac_build_break(&ctx
->ac
);
3325 ac_build_endif(&ctx
->ac
, 5101);
3327 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
3328 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
3330 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
3331 LLVMValueRef gep_idx
[3] = {
3332 ctx
->ac
.i32_0
, /* implied C-style array */
3333 ctx
->ac
.i32_1
, /* second entry of struct */
3334 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
3336 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3337 LLVMBuildStore(builder
, i8_0
, tmp
);
3339 ac_build_endloop(&ctx
->ac
, 5100);
3343 static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context
*ctx
)
3345 const unsigned verts_per_prim
= si_conv_gl_prim_to_vertices(ctx
->shader
->info
.gs
.output_primitive
);
3346 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3347 LLVMValueRef tmp
, tmp2
;
3349 ac_build_s_barrier(&ctx
->ac
);
3351 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
3352 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
3354 /* TODO: streamout */
3358 /* Determine vertex liveness. */
3359 LLVMValueRef vertliveptr
= ac_build_alloca(&ctx
->ac
, ctx
->ac
.i1
, "vertexlive");
3361 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
3362 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
3364 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
3365 const LLVMValueRef primidx
=
3366 LLVMBuildAdd(builder
, tid
,
3367 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
3370 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
3371 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
3374 /* Load primitive liveness */
3375 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
3376 LLVMValueRef gep_idx
[3] = {
3377 ctx
->ac
.i32_0
, /* implicit C-style array */
3378 ctx
->ac
.i32_1
, /* second value of struct */
3379 ctx
->ac
.i32_0
, /* stream 0 */
3381 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3382 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3383 const LLVMValueRef primlive
=
3384 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
3386 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
3387 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
3388 LLVMBuildStore(builder
, tmp
, vertliveptr
);
3391 ac_build_endif(&ctx
->ac
, 5121 + i
);
3394 ac_build_endif(&ctx
->ac
, 5120);
3396 /* Inclusive scan addition across the current wave. */
3397 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
3398 struct ac_wg_scan vertlive_scan
= {};
3399 vertlive_scan
.op
= nir_op_iadd
;
3400 vertlive_scan
.enable_reduce
= true;
3401 vertlive_scan
.enable_exclusive
= true;
3402 vertlive_scan
.src
= vertlive
;
3403 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->ac
.i32_0
);
3404 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
3405 vertlive_scan
.numwaves
= get_tgsize(ctx
);
3406 vertlive_scan
.maxwaves
= 8;
3408 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
3410 /* Skip all exports (including index exports) when possible. At least on
3411 * early gfx10 revisions this is also to avoid hangs.
3413 LLVMValueRef have_exports
=
3414 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
3416 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
3418 /* Allocate export space. Send this message as early as possible, to
3419 * hide the latency of the SQ <-> SPI roundtrip.
3421 * Note: We could consider compacting primitives for export as well.
3422 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
3423 * prim data per clock and skips null primitives at no additional
3424 * cost. So compacting primitives can only be beneficial when
3425 * there are 4 or more contiguous null primitives in the export
3426 * (in the common case of single-dword prim exports).
3428 build_sendmsg_gs_alloc_req(ctx
, vertlive_scan
.result_reduce
, num_emit_threads
);
3430 /* Setup the reverse vertex compaction permutation. We re-use stream 1
3431 * of the primitive liveness flags, relying on the fact that each
3432 * threadgroup can have at most 256 threads. */
3433 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
3435 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
3436 LLVMValueRef gep_idx
[3] = {
3437 ctx
->ac
.i32_0
, /* implicit C-style array */
3438 ctx
->ac
.i32_1
, /* second value of struct */
3439 ctx
->ac
.i32_1
, /* stream 1 */
3441 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3442 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
3443 LLVMBuildStore(builder
, tmp2
, tmp
);
3445 ac_build_endif(&ctx
->ac
, 5130);
3447 ac_build_s_barrier(&ctx
->ac
);
3449 /* Export primitive data */
3450 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
3451 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
3453 struct ngg_prim prim
= {};
3454 prim
.num_vertices
= verts_per_prim
;
3456 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
3457 LLVMValueRef gep_idx
[3] = {
3458 ctx
->ac
.i32_0
, /* implicit C-style array */
3459 ctx
->ac
.i32_1
, /* second value of struct */
3460 ctx
->ac
.i32_0
, /* primflag */
3462 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3463 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3464 prim
.isnull
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
,
3465 LLVMConstInt(ctx
->ac
.i8
, 0, false), "");
3467 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
3468 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
3469 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
3470 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
3473 build_export_prim(ctx
, &prim
);
3475 ac_build_endif(&ctx
->ac
, 5140);
3477 /* Export position and parameter data */
3478 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
3479 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
3481 struct radv_vs_output_info
*outinfo
= &ctx
->shader_info
->vs
.outinfo
;
3482 bool export_view_index
= ctx
->options
->key
.has_multiview_view_index
;
3483 struct radv_shader_output_values
*outputs
;
3484 unsigned noutput
= 0;
3486 /* Allocate a temporary array for the output values. */
3487 unsigned num_outputs
= util_bitcount64(ctx
->output_mask
) + export_view_index
;
3488 outputs
= calloc(num_outputs
, sizeof(outputs
[0]));
3490 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
3491 sizeof(outinfo
->vs_output_param_offset
));
3492 outinfo
->pos_exports
= 0;
3494 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
3495 LLVMValueRef gep_idx
[3] = {
3496 ctx
->ac
.i32_0
, /* implicit C-style array */
3497 ctx
->ac
.i32_1
, /* second value of struct */
3498 ctx
->ac
.i32_1
, /* stream 1: source data index */
3500 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
3501 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3502 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
3503 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
3505 unsigned out_idx
= 0;
3506 gep_idx
[1] = ctx
->ac
.i32_0
;
3507 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3508 if (!(ctx
->output_mask
& (1ull << i
)))
3511 outputs
[noutput
].slot_name
= i
;
3512 outputs
[noutput
].slot_index
= i
== VARYING_SLOT_CLIP_DIST1
;
3514 outputs
[noutput
].usage_mask
= ctx
->shader_info
->gs
.output_usage_mask
[i
];
3515 int length
= util_last_bit(outputs
[noutput
].usage_mask
);
3517 for (unsigned j
= 0; j
< length
; j
++, out_idx
++) {
3518 gep_idx
[2] = LLVMConstInt(ctx
->ac
.i32
, out_idx
, false);
3519 tmp
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
3520 tmp
= LLVMBuildLoad(builder
, tmp
, "");
3522 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
3523 if (ac_get_type_size(type
) == 2) {
3524 tmp
= ac_to_integer(&ctx
->ac
, tmp
);
3525 tmp
= LLVMBuildTrunc(ctx
->ac
.builder
, tmp
, ctx
->ac
.i16
, "");
3528 outputs
[noutput
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
3531 for (unsigned j
= length
; j
< 4; j
++)
3532 outputs
[noutput
].values
[j
] = LLVMGetUndef(ctx
->ac
.f32
);
3537 /* Export ViewIndex. */
3538 if (export_view_index
) {
3539 outputs
[noutput
].slot_name
= VARYING_SLOT_LAYER
;
3540 outputs
[noutput
].slot_index
= 0;
3541 outputs
[noutput
].usage_mask
= 0x1;
3542 outputs
[noutput
].values
[0] = ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
);
3543 for (unsigned j
= 1; j
< 4; j
++)
3544 outputs
[noutput
].values
[j
] = ctx
->ac
.f32_0
;
3548 radv_llvm_export_vs(ctx
, outputs
, noutput
, outinfo
,
3549 ctx
->options
->key
.vs_common_out
.export_clip_dists
);
3552 ac_build_endif(&ctx
->ac
, 5145);
3555 static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context
*ctx
,
3557 LLVMValueRef
*addrs
)
3559 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3561 const LLVMValueRef vertexidx
=
3562 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
3564 /* If this thread has already emitted the declared maximum number of
3565 * vertices, skip the write: excessive vertex emissions are not
3566 * supposed to have any effect.
3568 const LLVMValueRef can_emit
=
3569 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
3570 LLVMConstInt(ctx
->ac
.i32
, ctx
->shader
->info
.gs
.vertices_out
, false), "");
3571 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
3573 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
3574 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
3575 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
3577 const LLVMValueRef vertexptr
=
3578 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
3579 unsigned out_idx
= 0;
3580 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3581 unsigned output_usage_mask
=
3582 ctx
->shader_info
->gs
.output_usage_mask
[i
];
3583 uint8_t output_stream
=
3584 ctx
->shader_info
->gs
.output_streams
[i
];
3585 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
3586 int length
= util_last_bit(output_usage_mask
);
3588 if (!(ctx
->output_mask
& (1ull << i
)) ||
3589 output_stream
!= stream
)
3592 for (unsigned j
= 0; j
< length
; j
++, out_idx
++) {
3593 if (!(output_usage_mask
& (1 << j
)))
3596 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
3598 LLVMValueRef gep_idx
[3] = {
3599 ctx
->ac
.i32_0
, /* implied C-style array */
3600 ctx
->ac
.i32_0
, /* first entry of struct */
3601 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
3603 LLVMValueRef ptr
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
3605 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
3606 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
3608 LLVMBuildStore(builder
, out_val
, ptr
);
3611 assert(out_idx
* 4 <= ctx
->gsvs_vertex_size
);
3613 /* Determine and store whether this vertex completed a primitive. */
3614 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
3616 tmp
= LLVMConstInt(ctx
->ac
.i32
, si_conv_gl_prim_to_vertices(ctx
->shader
->info
.gs
.output_primitive
) - 1, false);
3617 const LLVMValueRef iscompleteprim
=
3618 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
3620 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
3621 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
3623 LLVMValueRef gep_idx
[3] = {
3624 ctx
->ac
.i32_0
, /* implied C-style array */
3625 ctx
->ac
.i32_1
, /* second struct entry */
3626 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
3628 const LLVMValueRef primflagptr
=
3629 LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
3631 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
3632 LLVMBuildStore(builder
, tmp
, primflagptr
);
3634 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
3635 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
3636 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
3640 write_tess_factors(struct radv_shader_context
*ctx
)
3642 unsigned stride
, outer_comps
, inner_comps
;
3643 LLVMValueRef invocation_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
3644 LLVMValueRef rel_patch_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
3645 unsigned tess_inner_index
= 0, tess_outer_index
;
3646 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
3647 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
3649 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3651 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
3671 ac_build_ifcc(&ctx
->ac
,
3672 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3673 invocation_id
, ctx
->ac
.i32_0
, ""), 6503);
3675 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
3678 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3679 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3680 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
3683 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3684 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3685 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
3687 for (i
= 0; i
< 4; i
++) {
3688 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3689 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3693 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
3694 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
3695 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3697 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
3699 for (i
= 0; i
< outer_comps
; i
++) {
3701 ac_lds_load(&ctx
->ac
, lds_outer
);
3702 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3705 for (i
= 0; i
< inner_comps
; i
++) {
3706 inner
[i
] = out
[outer_comps
+i
] =
3707 ac_lds_load(&ctx
->ac
, lds_inner
);
3708 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
3713 /* Convert the outputs to vectors for stores. */
3714 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
3718 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
3721 buffer
= ctx
->hs_ring_tess_factor
;
3722 tf_base
= ctx
->tess_factor_offset
;
3723 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
3724 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
3725 unsigned tf_offset
= 0;
3727 if (ctx
->options
->chip_class
<= GFX8
) {
3728 ac_build_ifcc(&ctx
->ac
,
3729 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3730 rel_patch_id
, ctx
->ac
.i32_0
, ""), 6504);
3732 /* Store the dynamic HS control word. */
3733 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
3734 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
3735 1, ctx
->ac
.i32_0
, tf_base
,
3739 ac_build_endif(&ctx
->ac
, 6504);
3742 /* Store the tessellation factors. */
3743 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
3744 MIN2(stride
, 4), byteoffset
, tf_base
,
3745 tf_offset
, ac_glc
, false);
3747 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
3748 stride
- 4, byteoffset
, tf_base
,
3749 16 + tf_offset
, ac_glc
, false);
3751 //store to offchip for TES to read - only if TES reads them
3752 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
3753 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
3754 LLVMValueRef tf_inner_offset
;
3755 unsigned param_outer
, param_inner
;
3757 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3758 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3759 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
3761 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
3762 util_next_power_of_two(outer_comps
));
3764 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
3765 outer_comps
, tf_outer_offset
,
3766 ctx
->oc_lds
, 0, ac_glc
, false);
3768 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3769 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3770 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
3772 inner_vec
= inner_comps
== 1 ? inner
[0] :
3773 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
3774 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
3775 inner_comps
, tf_inner_offset
,
3776 ctx
->oc_lds
, 0, ac_glc
, false);
3780 ac_build_endif(&ctx
->ac
, 6503);
3784 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
3786 write_tess_factors(ctx
);
3790 si_export_mrt_color(struct radv_shader_context
*ctx
,
3791 LLVMValueRef
*color
, unsigned index
,
3792 struct ac_export_args
*args
)
3795 si_llvm_init_export_args(ctx
, color
, 0xf,
3796 V_008DFC_SQ_EXP_MRT
+ index
, args
);
3797 if (!args
->enabled_channels
)
3798 return false; /* unnecessary NULL export */
3804 radv_export_mrt_z(struct radv_shader_context
*ctx
,
3805 LLVMValueRef depth
, LLVMValueRef stencil
,
3806 LLVMValueRef samplemask
)
3808 struct ac_export_args args
;
3810 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
3812 ac_build_export(&ctx
->ac
, &args
);
3816 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
3819 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
3820 struct ac_export_args color_args
[8];
3822 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3823 LLVMValueRef values
[4];
3825 if (!(ctx
->output_mask
& (1ull << i
)))
3828 if (i
< FRAG_RESULT_DATA0
)
3831 for (unsigned j
= 0; j
< 4; j
++)
3832 values
[j
] = ac_to_float(&ctx
->ac
,
3833 radv_load_output(ctx
, i
, j
));
3835 bool ret
= si_export_mrt_color(ctx
, values
,
3836 i
- FRAG_RESULT_DATA0
,
3837 &color_args
[index
]);
3842 /* Process depth, stencil, samplemask. */
3843 if (ctx
->shader_info
->ps
.writes_z
) {
3844 depth
= ac_to_float(&ctx
->ac
,
3845 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
3847 if (ctx
->shader_info
->ps
.writes_stencil
) {
3848 stencil
= ac_to_float(&ctx
->ac
,
3849 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
3851 if (ctx
->shader_info
->ps
.writes_sample_mask
) {
3852 samplemask
= ac_to_float(&ctx
->ac
,
3853 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
3856 /* Set the DONE bit on last non-null color export only if Z isn't
3860 !ctx
->shader_info
->ps
.writes_z
&&
3861 !ctx
->shader_info
->ps
.writes_stencil
&&
3862 !ctx
->shader_info
->ps
.writes_sample_mask
) {
3863 unsigned last
= index
- 1;
3865 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
3866 color_args
[last
].done
= 1; /* DONE bit */
3869 /* Export PS outputs. */
3870 for (unsigned i
= 0; i
< index
; i
++)
3871 ac_build_export(&ctx
->ac
, &color_args
[i
]);
3873 if (depth
|| stencil
|| samplemask
)
3874 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
3876 ac_build_export_null(&ctx
->ac
);
3880 emit_gs_epilogue(struct radv_shader_context
*ctx
)
3882 if (ctx
->options
->key
.vs_common_out
.as_ngg
) {
3883 gfx10_ngg_gs_emit_epilogue_1(ctx
);
3887 if (ctx
->ac
.chip_class
>= GFX10
)
3888 LLVMBuildFence(ctx
->ac
.builder
, LLVMAtomicOrderingRelease
, false, "");
3890 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
3894 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
3895 LLVMValueRef
*addrs
)
3897 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
3899 switch (ctx
->stage
) {
3900 case MESA_SHADER_VERTEX
:
3901 if (ctx
->options
->key
.vs_common_out
.as_ls
)
3902 handle_ls_outputs_post(ctx
);
3903 else if (ctx
->options
->key
.vs_common_out
.as_es
)
3904 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
3905 else if (ctx
->options
->key
.vs_common_out
.as_ngg
)
3906 break; /* handled outside of the shader body */
3908 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs_common_out
.export_prim_id
,
3909 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
3910 &ctx
->shader_info
->vs
.outinfo
);
3912 case MESA_SHADER_FRAGMENT
:
3913 handle_fs_outputs_post(ctx
);
3915 case MESA_SHADER_GEOMETRY
:
3916 emit_gs_epilogue(ctx
);
3918 case MESA_SHADER_TESS_CTRL
:
3919 handle_tcs_outputs_post(ctx
);
3921 case MESA_SHADER_TESS_EVAL
:
3922 if (ctx
->options
->key
.vs_common_out
.as_es
)
3923 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
3924 else if (ctx
->options
->key
.vs_common_out
.as_ngg
)
3925 break; /* handled outside of the shader body */
3927 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs_common_out
.export_prim_id
,
3928 ctx
->options
->key
.vs_common_out
.export_clip_dists
,
3929 &ctx
->shader_info
->tes
.outinfo
);
3936 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
,
3937 LLVMPassManagerRef passmgr
,
3938 const struct radv_nir_compiler_options
*options
)
3940 LLVMRunPassManager(passmgr
, ctx
->ac
.module
);
3941 LLVMDisposeBuilder(ctx
->ac
.builder
);
3943 ac_llvm_context_dispose(&ctx
->ac
);
3947 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
3949 struct radv_vs_output_info
*outinfo
;
3951 switch (ctx
->stage
) {
3952 case MESA_SHADER_FRAGMENT
:
3953 case MESA_SHADER_COMPUTE
:
3954 case MESA_SHADER_TESS_CTRL
:
3955 case MESA_SHADER_GEOMETRY
:
3957 case MESA_SHADER_VERTEX
:
3958 if (ctx
->options
->key
.vs_common_out
.as_ls
||
3959 ctx
->options
->key
.vs_common_out
.as_es
)
3961 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
3963 case MESA_SHADER_TESS_EVAL
:
3964 if (ctx
->options
->key
.vs_common_out
.as_es
)
3966 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
3969 unreachable("Unhandled shader type");
3972 ac_optimize_vs_outputs(&ctx
->ac
,
3974 outinfo
->vs_output_param_offset
,
3976 &outinfo
->param_exports
);
3980 ac_setup_rings(struct radv_shader_context
*ctx
)
3982 if (ctx
->options
->chip_class
<= GFX8
&&
3983 (ctx
->stage
== MESA_SHADER_GEOMETRY
||
3984 ctx
->options
->key
.vs_common_out
.as_es
|| ctx
->options
->key
.vs_common_out
.as_es
)) {
3985 unsigned ring
= ctx
->stage
== MESA_SHADER_GEOMETRY
? RING_ESGS_GS
3987 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, ring
, false);
3989 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
,
3994 if (ctx
->is_gs_copy_shader
) {
3996 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
3997 LLVMConstInt(ctx
->ac
.i32
,
3998 RING_GSVS_VS
, false));
4001 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
4002 /* The conceptual layout of the GSVS ring is
4003 * v0c0 .. vLv0 v0c1 .. vLc1 ..
4004 * but the real memory layout is swizzled across
4006 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
4008 * Override the buffer descriptor accordingly.
4010 LLVMTypeRef v2i64
= LLVMVectorType(ctx
->ac
.i64
, 2);
4011 uint64_t stream_offset
= 0;
4012 unsigned num_records
= ctx
->ac
.wave_size
;
4013 LLVMValueRef base_ring
;
4016 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
4017 LLVMConstInt(ctx
->ac
.i32
,
4018 RING_GSVS_GS
, false));
4020 for (unsigned stream
= 0; stream
< 4; stream
++) {
4021 unsigned num_components
, stride
;
4022 LLVMValueRef ring
, tmp
;
4025 ctx
->shader_info
->gs
.num_stream_output_components
[stream
];
4027 if (!num_components
)
4030 stride
= 4 * num_components
* ctx
->shader
->info
.gs
.vertices_out
;
4032 /* Limit on the stride field for <= GFX7. */
4033 assert(stride
< (1 << 14));
4035 ring
= LLVMBuildBitCast(ctx
->ac
.builder
,
4036 base_ring
, v2i64
, "");
4037 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
,
4038 ring
, ctx
->ac
.i32_0
, "");
4039 tmp
= LLVMBuildAdd(ctx
->ac
.builder
, tmp
,
4040 LLVMConstInt(ctx
->ac
.i64
,
4041 stream_offset
, 0), "");
4042 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
,
4043 ring
, tmp
, ctx
->ac
.i32_0
, "");
4045 stream_offset
+= stride
* ctx
->ac
.wave_size
;
4047 ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ring
,
4050 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ring
,
4052 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
,
4053 LLVMConstInt(ctx
->ac
.i32
,
4054 S_008F04_STRIDE(stride
), false), "");
4055 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
, tmp
,
4058 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
,
4059 LLVMConstInt(ctx
->ac
.i32
,
4060 num_records
, false),
4061 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
4063 ctx
->gsvs_ring
[stream
] = ring
;
4067 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
4068 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
4069 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
4070 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
4075 radv_nir_get_max_workgroup_size(enum chip_class chip_class
,
4076 gl_shader_stage stage
,
4077 const struct nir_shader
*nir
)
4079 const unsigned backup_sizes
[] = {chip_class
>= GFX9
? 128 : 64, 1, 1};
4080 return radv_get_max_workgroup_size(chip_class
, stage
, nir
? nir
->info
.cs
.local_size
: backup_sizes
);
4083 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
4084 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
4086 LLVMValueRef count
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
4087 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
4089 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
4090 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
4091 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
4094 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
4096 for(int i
= 5; i
>= 0; --i
) {
4097 ctx
->gs_vtx_offset
[i
] = ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
4101 ctx
->gs_wave_id
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 16, 8);
4104 /* Ensure that the esgs ring is declared.
4106 * We declare it with 64KB alignment as a hint that the
4107 * pointer value will always be 0.
4109 static void declare_esgs_ring(struct radv_shader_context
*ctx
)
4114 assert(!LLVMGetNamedGlobal(ctx
->ac
.module
, "esgs_ring"));
4116 ctx
->esgs_ring
= LLVMAddGlobalInAddressSpace(
4117 ctx
->ac
.module
, LLVMArrayType(ctx
->ac
.i32
, 0),
4120 LLVMSetLinkage(ctx
->esgs_ring
, LLVMExternalLinkage
);
4121 LLVMSetAlignment(ctx
->esgs_ring
, 64 * 1024);
4125 LLVMModuleRef
ac_translate_nir_to_llvm(struct ac_llvm_compiler
*ac_llvm
,
4126 struct nir_shader
*const *shaders
,
4128 struct radv_shader_info
*shader_info
,
4129 const struct radv_nir_compiler_options
*options
)
4131 struct radv_shader_context ctx
= {0};
4133 ctx
.options
= options
;
4134 ctx
.shader_info
= shader_info
;
4136 enum ac_float_mode float_mode
=
4137 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
4138 AC_FLOAT_MODE_DEFAULT
;
4140 ac_llvm_context_init(&ctx
.ac
, ac_llvm
, options
->chip_class
,
4141 options
->family
, float_mode
, options
->wave_size
, 64);
4142 ctx
.context
= ctx
.ac
.context
;
4144 radv_nir_shader_info_init(shader_info
);
4146 for(int i
= 0; i
< shader_count
; ++i
)
4147 radv_nir_shader_info_pass(shaders
[i
], options
, shader_info
);
4149 for (i
= 0; i
< MAX_SETS
; i
++)
4150 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
4151 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
4152 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
4154 ctx
.max_workgroup_size
= 0;
4155 for (int i
= 0; i
< shader_count
; ++i
) {
4156 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
4157 radv_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
4158 shaders
[i
]->info
.stage
,
4162 if (ctx
.ac
.chip_class
>= GFX10
) {
4163 if (is_pre_gs_stage(shaders
[0]->info
.stage
) &&
4164 options
->key
.vs_common_out
.as_ngg
) {
4165 ctx
.max_workgroup_size
= 128;
4169 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
4170 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
4172 ctx
.abi
.inputs
= &ctx
.inputs
[0];
4173 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
4174 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
4175 ctx
.abi
.load_ubo
= radv_load_ubo
;
4176 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
4177 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
4178 ctx
.abi
.load_resource
= radv_load_resource
;
4179 ctx
.abi
.clamp_shadow_reference
= false;
4180 ctx
.abi
.robust_buffer_access
= options
->robust_buffer_access
;
4182 bool is_ngg
= is_pre_gs_stage(shaders
[0]->info
.stage
) && ctx
.options
->key
.vs_common_out
.as_ngg
;
4183 if (shader_count
>= 2 || is_ngg
)
4184 ac_init_exec_full_mask(&ctx
.ac
);
4186 if (options
->has_ls_vgpr_init_bug
&&
4187 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
4188 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
4190 for(int i
= 0; i
< shader_count
; ++i
) {
4191 ctx
.stage
= shaders
[i
]->info
.stage
;
4192 ctx
.shader
= shaders
[i
];
4193 ctx
.output_mask
= 0;
4195 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4196 for (int i
= 0; i
< 4; i
++) {
4197 ctx
.gs_next_vertex
[i
] =
4198 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
4200 if (ctx
.options
->key
.vs_common_out
.as_ngg
) {
4201 for (unsigned i
= 0; i
< 4; ++i
) {
4202 ctx
.gs_curprim_verts
[i
] =
4203 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
4204 ctx
.gs_generated_prims
[i
] =
4205 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
4208 /* TODO: streamout */
4210 LLVMTypeRef ai32
= LLVMArrayType(ctx
.ac
.i32
, 8);
4211 ctx
.gs_ngg_scratch
=
4212 LLVMAddGlobalInAddressSpace(ctx
.ac
.module
,
4213 ai32
, "ngg_scratch", AC_ADDR_SPACE_LDS
);
4214 LLVMSetInitializer(ctx
.gs_ngg_scratch
, LLVMGetUndef(ai32
));
4215 LLVMSetAlignment(ctx
.gs_ngg_scratch
, 4);
4217 ctx
.gs_ngg_emit
= LLVMBuildIntToPtr(ctx
.ac
.builder
, ctx
.ac
.i32_0
,
4218 LLVMPointerType(LLVMArrayType(ctx
.ac
.i32
, 0), AC_ADDR_SPACE_LDS
),
4222 ctx
.abi
.load_inputs
= load_gs_input
;
4223 ctx
.abi
.emit_primitive
= visit_end_primitive
;
4224 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
4225 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
4226 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
4227 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
4228 if (shader_count
== 1)
4229 ctx
.tcs_num_inputs
= ctx
.options
->key
.tcs
.num_inputs
;
4231 ctx
.tcs_num_inputs
= util_last_bit64(shader_info
->vs
.ls_outputs_written
);
4232 ctx
.tcs_num_patches
= get_tcs_num_patches(&ctx
);
4233 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4234 ctx
.abi
.load_tess_varyings
= load_tes_input
;
4235 ctx
.abi
.load_tess_coord
= load_tess_coord
;
4236 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
4237 ctx
.tcs_num_patches
= ctx
.options
->key
.tes
.num_patches
;
4238 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
4239 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
4240 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
4241 ctx
.abi
.load_sample_position
= load_sample_position
;
4242 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
4243 ctx
.abi
.emit_kill
= radv_emit_kill
;
4246 if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
&&
4247 ctx
.options
->key
.vs_common_out
.as_ngg
&&
4248 ctx
.options
->key
.vs_common_out
.export_prim_id
) {
4249 declare_esgs_ring(&ctx
);
4252 bool nested_barrier
= false;
4255 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
&&
4256 ctx
.options
->key
.vs_common_out
.as_ngg
) {
4257 gfx10_ngg_gs_emit_prologue(&ctx
);
4258 nested_barrier
= false;
4260 nested_barrier
= true;
4264 if (nested_barrier
) {
4265 /* Execute a barrier before the second shader in
4268 * Execute the barrier inside the conditional block,
4269 * so that empty waves can jump directly to s_endpgm,
4270 * which will also signal the barrier.
4272 * This is possible in gfx9, because an empty wave
4273 * for the second shader does not participate in
4274 * the epilogue. With NGG, empty waves may still
4275 * be required to export data (e.g. GS output vertices),
4276 * so we cannot let them exit early.
4278 * If the shader is TCS and the TCS epilog is present
4279 * and contains a barrier, it will wait there and then
4282 ac_emit_barrier(&ctx
.ac
, ctx
.stage
);
4285 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
4286 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
4288 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4289 unsigned addclip
= shaders
[i
]->info
.clip_distance_array_size
+
4290 shaders
[i
]->info
.cull_distance_array_size
> 4;
4291 ctx
.gsvs_vertex_size
= (util_bitcount64(ctx
.output_mask
) + addclip
) * 16;
4292 ctx
.max_gsvs_emit_size
= ctx
.gsvs_vertex_size
*
4293 shaders
[i
]->info
.gs
.vertices_out
;
4296 ac_setup_rings(&ctx
);
4298 LLVMBasicBlockRef merge_block
;
4299 if (shader_count
>= 2 || is_ngg
) {
4300 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
4301 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
4302 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
4304 LLVMValueRef count
= ac_unpack_param(&ctx
.ac
, ctx
.merged_wave_info
, 8 * i
, 8);
4305 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
4306 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
4307 thread_id
, count
, "");
4308 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
4310 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
4313 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
4314 prepare_interp_optimize(&ctx
, shaders
[i
]);
4315 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
4316 handle_vs_inputs(&ctx
, shaders
[i
]);
4317 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
4318 prepare_gs_input_vgprs(&ctx
);
4320 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
4322 if (shader_count
>= 2 || is_ngg
) {
4323 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
4324 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
4327 /* This needs to be outside the if wrapping the shader body, as sometimes
4328 * the HW generates waves with 0 es/vs threads. */
4329 if (is_pre_gs_stage(shaders
[i
]->info
.stage
) &&
4330 ctx
.options
->key
.vs_common_out
.as_ngg
&&
4331 i
== shader_count
- 1) {
4332 handle_ngg_outputs_post(&ctx
);
4333 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
&&
4334 ctx
.options
->key
.vs_common_out
.as_ngg
) {
4335 gfx10_ngg_gs_emit_epilogue_2(&ctx
);
4338 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4339 shader_info
->gs
.gsvs_vertex_size
= ctx
.gsvs_vertex_size
;
4340 shader_info
->gs
.max_gsvs_emit_size
= ctx
.max_gsvs_emit_size
;
4341 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
4342 shader_info
->tcs
.num_patches
= ctx
.tcs_num_patches
;
4343 shader_info
->tcs
.lds_size
= calculate_tess_lds_size(&ctx
);
4347 LLVMBuildRetVoid(ctx
.ac
.builder
);
4349 if (options
->dump_preoptir
) {
4350 fprintf(stderr
, "%s LLVM IR:\n\n",
4351 radv_get_shader_name(shader_info
,
4352 shaders
[shader_count
- 1]->info
.stage
));
4353 ac_dump_module(ctx
.ac
.module
);
4354 fprintf(stderr
, "\n");
4357 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
4359 if (shader_count
== 1)
4360 ac_nir_eliminate_const_vs_outputs(&ctx
);
4362 if (options
->dump_shader
) {
4363 ctx
.shader_info
->private_mem_vgprs
=
4364 ac_count_scratch_private_memory(ctx
.main_function
);
4367 return ctx
.ac
.module
;
4370 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
4372 unsigned *retval
= (unsigned *)context
;
4373 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
4374 char *description
= LLVMGetDiagInfoDescription(di
);
4376 if (severity
== LLVMDSError
) {
4378 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
4382 LLVMDisposeMessage(description
);
4385 static unsigned radv_llvm_compile(LLVMModuleRef M
,
4386 char **pelf_buffer
, size_t *pelf_size
,
4387 struct ac_llvm_compiler
*ac_llvm
)
4389 unsigned retval
= 0;
4390 LLVMContextRef llvm_ctx
;
4392 /* Setup Diagnostic Handler*/
4393 llvm_ctx
= LLVMGetModuleContext(M
);
4395 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
4399 if (!radv_compile_to_elf(ac_llvm
, M
, pelf_buffer
, pelf_size
))
4404 static void ac_compile_llvm_module(struct ac_llvm_compiler
*ac_llvm
,
4405 LLVMModuleRef llvm_module
,
4406 struct radv_shader_binary
**rbinary
,
4407 gl_shader_stage stage
,
4409 const struct radv_nir_compiler_options
*options
)
4411 char *elf_buffer
= NULL
;
4412 size_t elf_size
= 0;
4413 char *llvm_ir_string
= NULL
;
4415 if (options
->dump_shader
) {
4416 fprintf(stderr
, "%s LLVM IR:\n\n", name
);
4417 ac_dump_module(llvm_module
);
4418 fprintf(stderr
, "\n");
4421 if (options
->record_llvm_ir
) {
4422 char *llvm_ir
= LLVMPrintModuleToString(llvm_module
);
4423 llvm_ir_string
= strdup(llvm_ir
);
4424 LLVMDisposeMessage(llvm_ir
);
4427 int v
= radv_llvm_compile(llvm_module
, &elf_buffer
, &elf_size
, ac_llvm
);
4429 fprintf(stderr
, "compile failed\n");
4432 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
4433 LLVMDisposeModule(llvm_module
);
4434 LLVMContextDispose(ctx
);
4436 size_t llvm_ir_size
= llvm_ir_string
? strlen(llvm_ir_string
) : 0;
4437 size_t alloc_size
= sizeof(struct radv_shader_binary_rtld
) + elf_size
+ llvm_ir_size
+ 1;
4438 struct radv_shader_binary_rtld
*rbin
= calloc(1, alloc_size
);
4439 memcpy(rbin
->data
, elf_buffer
, elf_size
);
4441 memcpy(rbin
->data
+ elf_size
, llvm_ir_string
, llvm_ir_size
+ 1);
4443 rbin
->base
.type
= RADV_BINARY_TYPE_RTLD
;
4444 rbin
->base
.stage
= stage
;
4445 rbin
->base
.total_size
= alloc_size
;
4446 rbin
->elf_size
= elf_size
;
4447 rbin
->llvm_ir_size
= llvm_ir_size
;
4448 *rbinary
= &rbin
->base
;
4450 free(llvm_ir_string
);
4455 radv_compile_nir_shader(struct ac_llvm_compiler
*ac_llvm
,
4456 struct radv_shader_binary
**rbinary
,
4457 struct radv_shader_info
*shader_info
,
4458 struct nir_shader
*const *nir
,
4460 const struct radv_nir_compiler_options
*options
)
4463 LLVMModuleRef llvm_module
;
4465 llvm_module
= ac_translate_nir_to_llvm(ac_llvm
, nir
, nir_count
, shader_info
,
4468 ac_compile_llvm_module(ac_llvm
, llvm_module
, rbinary
,
4469 nir
[nir_count
- 1]->info
.stage
,
4470 radv_get_shader_name(shader_info
,
4471 nir
[nir_count
- 1]->info
.stage
),
4474 /* Determine the ES type (VS or TES) for the GS on GFX9. */
4475 if (options
->chip_class
>= GFX9
) {
4476 if (nir_count
== 2 &&
4477 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4478 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
4481 shader_info
->wave_size
= options
->wave_size
;
4485 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
4487 LLVMValueRef vtx_offset
=
4488 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
4489 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
4490 LLVMValueRef stream_id
;
4492 /* Fetch the vertex stream ID. */
4493 if (ctx
->shader_info
->so
.num_outputs
) {
4495 ac_unpack_param(&ctx
->ac
, ctx
->streamout_config
, 24, 2);
4497 stream_id
= ctx
->ac
.i32_0
;
4500 LLVMBasicBlockRef end_bb
;
4501 LLVMValueRef switch_inst
;
4503 end_bb
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
,
4504 ctx
->main_function
, "end");
4505 switch_inst
= LLVMBuildSwitch(ctx
->ac
.builder
, stream_id
, end_bb
, 4);
4507 for (unsigned stream
= 0; stream
< 4; stream
++) {
4508 unsigned num_components
=
4509 ctx
->shader_info
->gs
.num_stream_output_components
[stream
];
4510 LLVMBasicBlockRef bb
;
4513 if (!num_components
)
4516 if (stream
> 0 && !ctx
->shader_info
->so
.num_outputs
)
4519 bb
= LLVMInsertBasicBlockInContext(ctx
->ac
.context
, end_bb
, "out");
4520 LLVMAddCase(switch_inst
, LLVMConstInt(ctx
->ac
.i32
, stream
, 0), bb
);
4521 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, bb
);
4524 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4525 unsigned output_usage_mask
=
4526 ctx
->shader_info
->gs
.output_usage_mask
[i
];
4527 unsigned output_stream
=
4528 ctx
->shader_info
->gs
.output_streams
[i
];
4529 int length
= util_last_bit(output_usage_mask
);
4531 if (!(ctx
->output_mask
& (1ull << i
)) ||
4532 output_stream
!= stream
)
4535 for (unsigned j
= 0; j
< length
; j
++) {
4536 LLVMValueRef value
, soffset
;
4538 if (!(output_usage_mask
& (1 << j
)))
4541 soffset
= LLVMConstInt(ctx
->ac
.i32
,
4543 ctx
->shader
->info
.gs
.vertices_out
* 16 * 4, false);
4547 value
= ac_build_buffer_load(&ctx
->ac
,
4550 vtx_offset
, soffset
,
4551 0, ac_glc
| ac_slc
, true, false);
4553 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4554 if (ac_get_type_size(type
) == 2) {
4555 value
= LLVMBuildBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
4556 value
= LLVMBuildTrunc(ctx
->ac
.builder
, value
, ctx
->ac
.i16
, "");
4559 LLVMBuildStore(ctx
->ac
.builder
,
4560 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4564 if (ctx
->shader_info
->so
.num_outputs
)
4565 radv_emit_streamout(ctx
, stream
);
4568 handle_vs_outputs_post(ctx
, false, true,
4569 &ctx
->shader_info
->vs
.outinfo
);
4572 LLVMBuildBr(ctx
->ac
.builder
, end_bb
);
4575 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, end_bb
);
4579 radv_compile_gs_copy_shader(struct ac_llvm_compiler
*ac_llvm
,
4580 struct nir_shader
*geom_shader
,
4581 struct radv_shader_binary
**rbinary
,
4582 struct radv_shader_info
*shader_info
,
4583 const struct radv_nir_compiler_options
*options
)
4585 struct radv_shader_context ctx
= {0};
4586 ctx
.options
= options
;
4587 ctx
.shader_info
= shader_info
;
4589 enum ac_float_mode float_mode
=
4590 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
4591 AC_FLOAT_MODE_DEFAULT
;
4593 ac_llvm_context_init(&ctx
.ac
, ac_llvm
, options
->chip_class
,
4594 options
->family
, float_mode
, 64, 64);
4595 ctx
.context
= ctx
.ac
.context
;
4597 ctx
.is_gs_copy_shader
= true;
4598 ctx
.stage
= MESA_SHADER_VERTEX
;
4599 ctx
.shader
= geom_shader
;
4601 radv_nir_shader_info_pass(geom_shader
, options
, shader_info
);
4603 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
4605 ac_setup_rings(&ctx
);
4607 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
4608 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
4609 ac_handle_shader_output_decl(&ctx
.ac
, &ctx
.abi
, geom_shader
,
4610 variable
, MESA_SHADER_VERTEX
);
4613 ac_gs_copy_shader_emit(&ctx
);
4615 LLVMBuildRetVoid(ctx
.ac
.builder
);
4617 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
4619 ac_compile_llvm_module(ac_llvm
, ctx
.ac
.module
, rbinary
,
4620 MESA_SHADER_VERTEX
, "GS Copy Shader", options
);
4621 (*rbinary
)->is_gs_copy_shader
= true;