2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #if HAVE_LLVM >= 0x0700
37 #include <llvm-c/Transforms/Utils.h>
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_llvm_build.h"
45 #include "ac_shader_abi.h"
46 #include "ac_shader_util.h"
47 #include "ac_exp_param.h"
49 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
51 struct radv_shader_context
{
52 struct ac_llvm_context ac
;
53 const struct radv_nir_compiler_options
*options
;
54 struct radv_shader_variant_info
*shader_info
;
55 struct ac_shader_abi abi
;
57 unsigned max_workgroup_size
;
58 LLVMContextRef context
;
59 LLVMValueRef main_function
;
61 LLVMValueRef descriptor_sets
[RADV_UD_MAX_SETS
];
62 LLVMValueRef ring_offsets
;
64 LLVMValueRef vertex_buffers
;
65 LLVMValueRef rel_auto_id
;
66 LLVMValueRef vs_prim_id
;
67 LLVMValueRef es2gs_offset
;
70 LLVMValueRef merged_wave_info
;
71 LLVMValueRef tess_factor_offset
;
72 LLVMValueRef tes_rel_patch_id
;
76 LLVMValueRef gs2vs_offset
;
77 LLVMValueRef gs_wave_id
;
78 LLVMValueRef gs_vtx_offset
[6];
80 LLVMValueRef esgs_ring
;
81 LLVMValueRef gsvs_ring
;
82 LLVMValueRef hs_ring_tess_offchip
;
83 LLVMValueRef hs_ring_tess_factor
;
85 LLVMValueRef persp_sample
, persp_center
, persp_centroid
;
86 LLVMValueRef linear_sample
, linear_center
, linear_centroid
;
88 gl_shader_stage stage
;
90 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
95 bool is_gs_copy_shader
;
96 LLVMValueRef gs_next_vertex
;
97 unsigned gs_max_out_vertices
;
99 unsigned tes_primitive_mode
;
101 uint32_t tcs_patch_outputs_read
;
102 uint64_t tcs_outputs_read
;
103 uint32_t tcs_vertices_per_patch
;
104 uint32_t tcs_num_inputs
;
105 uint32_t tcs_num_patches
;
106 uint32_t max_gsvs_emit_size
;
107 uint32_t gsvs_vertex_size
;
110 enum radeon_llvm_calling_convention
{
111 RADEON_LLVM_AMDGPU_VS
= 87,
112 RADEON_LLVM_AMDGPU_GS
= 88,
113 RADEON_LLVM_AMDGPU_PS
= 89,
114 RADEON_LLVM_AMDGPU_CS
= 90,
115 RADEON_LLVM_AMDGPU_HS
= 93,
118 static inline struct radv_shader_context
*
119 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
121 struct radv_shader_context
*ctx
= NULL
;
122 return container_of(abi
, ctx
, abi
);
125 struct ac_build_if_state
127 struct radv_shader_context
*ctx
;
128 LLVMValueRef condition
;
129 LLVMBasicBlockRef entry_block
;
130 LLVMBasicBlockRef true_block
;
131 LLVMBasicBlockRef false_block
;
132 LLVMBasicBlockRef merge_block
;
135 static LLVMBasicBlockRef
136 ac_build_insert_new_block(struct radv_shader_context
*ctx
, const char *name
)
138 LLVMBasicBlockRef current_block
;
139 LLVMBasicBlockRef next_block
;
140 LLVMBasicBlockRef new_block
;
142 /* get current basic block */
143 current_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
145 /* chqeck if there's another block after this one */
146 next_block
= LLVMGetNextBasicBlock(current_block
);
148 /* insert the new block before the next block */
149 new_block
= LLVMInsertBasicBlockInContext(ctx
->context
, next_block
, name
);
152 /* append new block after current block */
153 LLVMValueRef function
= LLVMGetBasicBlockParent(current_block
);
154 new_block
= LLVMAppendBasicBlockInContext(ctx
->context
, function
, name
);
160 ac_nir_build_if(struct ac_build_if_state
*ifthen
,
161 struct radv_shader_context
*ctx
,
162 LLVMValueRef condition
)
164 LLVMBasicBlockRef block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
166 memset(ifthen
, 0, sizeof *ifthen
);
168 ifthen
->condition
= condition
;
169 ifthen
->entry_block
= block
;
171 /* create endif/merge basic block for the phi functions */
172 ifthen
->merge_block
= ac_build_insert_new_block(ctx
, "endif-block");
174 /* create/insert true_block before merge_block */
176 LLVMInsertBasicBlockInContext(ctx
->context
,
180 /* successive code goes into the true block */
181 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, ifthen
->true_block
);
188 ac_nir_build_endif(struct ac_build_if_state
*ifthen
)
190 LLVMBuilderRef builder
= ifthen
->ctx
->ac
.builder
;
192 /* Insert branch to the merge block from current block */
193 LLVMBuildBr(builder
, ifthen
->merge_block
);
196 * Now patch in the various branch instructions.
199 /* Insert the conditional branch instruction at the end of entry_block */
200 LLVMPositionBuilderAtEnd(builder
, ifthen
->entry_block
);
201 if (ifthen
->false_block
) {
202 /* we have an else clause */
203 LLVMBuildCondBr(builder
, ifthen
->condition
,
204 ifthen
->true_block
, ifthen
->false_block
);
208 LLVMBuildCondBr(builder
, ifthen
->condition
,
209 ifthen
->true_block
, ifthen
->merge_block
);
212 /* Resume building code at end of the ifthen->merge_block */
213 LLVMPositionBuilderAtEnd(builder
, ifthen
->merge_block
);
217 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
219 switch (ctx
->stage
) {
220 case MESA_SHADER_TESS_CTRL
:
221 return ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
222 case MESA_SHADER_TESS_EVAL
:
223 return ctx
->tes_rel_patch_id
;
226 unreachable("Illegal stage");
231 get_tcs_num_patches(struct radv_shader_context
*ctx
)
233 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
234 unsigned num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
235 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
236 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
237 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
238 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
239 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
240 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
241 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
242 unsigned num_patches
;
243 unsigned hardware_lds_size
;
245 /* Ensure that we only need one wave per SIMD so we don't need to check
246 * resource usage. Also ensures that the number of tcs in and out
247 * vertices per threadgroup are at most 256.
249 num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
250 /* Make sure that the data fits in LDS. This assumes the shaders only
251 * use LDS for the inputs and outputs.
253 hardware_lds_size
= ctx
->options
->chip_class
>= CIK
? 65536 : 32768;
254 num_patches
= MIN2(num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
255 /* Make sure the output data fits in the offchip buffer */
256 num_patches
= MIN2(num_patches
, (ctx
->options
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
257 /* Not necessary for correctness, but improves performance. The
258 * specific value is taken from the proprietary driver.
260 num_patches
= MIN2(num_patches
, 40);
262 /* SI bug workaround - limit LS-HS threadgroups to only one wave. */
263 if (ctx
->options
->chip_class
== SI
) {
264 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
265 num_patches
= MIN2(num_patches
, one_wave
);
271 calculate_tess_lds_size(struct radv_shader_context
*ctx
)
273 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
274 unsigned num_tcs_output_cp
;
275 unsigned num_tcs_outputs
, num_tcs_patch_outputs
;
276 unsigned input_vertex_size
, output_vertex_size
;
277 unsigned input_patch_size
, output_patch_size
;
278 unsigned pervertex_output_patch_size
;
279 unsigned output_patch0_offset
;
280 unsigned num_patches
;
283 num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
284 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
285 num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
287 input_vertex_size
= ctx
->tcs_num_inputs
* 16;
288 output_vertex_size
= num_tcs_outputs
* 16;
290 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
292 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
293 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
295 num_patches
= ctx
->tcs_num_patches
;
296 output_patch0_offset
= input_patch_size
* num_patches
;
298 lds_size
= output_patch0_offset
+ output_patch_size
* num_patches
;
302 /* Tessellation shaders pass outputs to the next shader using LDS.
304 * LS outputs = TCS inputs
305 * TCS outputs = TES inputs
308 * - TCS inputs for patch 0
309 * - TCS inputs for patch 1
310 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
312 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
313 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
314 * - TCS outputs for patch 1
315 * - Per-patch TCS outputs for patch 1
316 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
317 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
320 * All three shaders VS(LS), TCS, TES share the same LDS space.
323 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
325 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
326 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
327 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
329 input_patch_size
/= 4;
330 return LLVMConstInt(ctx
->ac
.i32
, input_patch_size
, false);
334 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
336 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
337 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
338 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
339 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
340 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
341 output_patch_size
/= 4;
342 return LLVMConstInt(ctx
->ac
.i32
, output_patch_size
, false);
346 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
348 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
349 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
350 output_vertex_size
/= 4;
351 return LLVMConstInt(ctx
->ac
.i32
, output_vertex_size
, false);
355 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
357 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
358 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
359 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
360 uint32_t output_patch0_offset
= input_patch_size
;
361 unsigned num_patches
= ctx
->tcs_num_patches
;
363 output_patch0_offset
*= num_patches
;
364 output_patch0_offset
/= 4;
365 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
369 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
371 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
372 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
373 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
374 uint32_t output_patch0_offset
= input_patch_size
;
376 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
377 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
378 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
379 unsigned num_patches
= ctx
->tcs_num_patches
;
381 output_patch0_offset
*= num_patches
;
382 output_patch0_offset
+= pervertex_output_patch_size
;
383 output_patch0_offset
/= 4;
384 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
388 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
390 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
391 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
393 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
397 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
399 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
400 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
401 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
403 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
408 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
410 LLVMValueRef patch0_patch_data_offset
=
411 get_tcs_out_patch0_patch_data_offset(ctx
);
412 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
413 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
415 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
416 patch0_patch_data_offset
);
421 LLVMTypeRef types
[MAX_ARGS
];
422 LLVMValueRef
*assign
[MAX_ARGS
];
423 unsigned array_params_mask
;
426 uint8_t num_sgprs_used
;
427 uint8_t num_vgprs_used
;
430 enum ac_arg_regfile
{
436 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
437 LLVMValueRef
*param_ptr
)
439 assert(info
->count
< MAX_ARGS
);
441 info
->assign
[info
->count
] = param_ptr
;
442 info
->types
[info
->count
] = type
;
445 if (regfile
== ARG_SGPR
) {
446 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
449 assert(regfile
== ARG_VGPR
);
450 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
455 add_array_arg(struct arg_info
*info
, LLVMTypeRef type
, LLVMValueRef
*param_ptr
)
457 info
->array_params_mask
|= (1 << info
->count
);
458 add_arg(info
, ARG_SGPR
, type
, param_ptr
);
461 static void assign_arguments(LLVMValueRef main_function
,
462 struct arg_info
*info
)
465 for (i
= 0; i
< info
->count
; i
++) {
467 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
472 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
473 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
474 unsigned num_return_elems
,
475 struct arg_info
*args
,
476 unsigned max_workgroup_size
,
477 const struct radv_nir_compiler_options
*options
)
479 LLVMTypeRef main_function_type
, ret_type
;
480 LLVMBasicBlockRef main_function_body
;
482 if (num_return_elems
)
483 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
484 num_return_elems
, true);
486 ret_type
= LLVMVoidTypeInContext(ctx
);
488 /* Setup the function */
490 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
491 LLVMValueRef main_function
=
492 LLVMAddFunction(module
, "main", main_function_type
);
494 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
495 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
497 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
498 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
499 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
501 if (args
->array_params_mask
& (1 << i
)) {
502 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
503 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
504 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
508 if (options
->address32_hi
) {
509 ac_llvm_add_target_dep_function_attr(main_function
,
510 "amdgpu-32bit-address-high-bits",
511 options
->address32_hi
);
514 if (max_workgroup_size
) {
515 ac_llvm_add_target_dep_function_attr(main_function
,
516 "amdgpu-max-work-group-size",
519 if (options
->unsafe_math
) {
520 /* These were copied from some LLVM test. */
521 LLVMAddTargetDependentFunctionAttr(main_function
,
522 "less-precise-fpmad",
524 LLVMAddTargetDependentFunctionAttr(main_function
,
527 LLVMAddTargetDependentFunctionAttr(main_function
,
530 LLVMAddTargetDependentFunctionAttr(main_function
,
533 LLVMAddTargetDependentFunctionAttr(main_function
,
534 "no-signed-zeros-fp-math",
537 return main_function
;
542 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
, uint8_t num_sgprs
,
543 uint32_t indirect_offset
)
545 ud_info
->sgpr_idx
= *sgpr_idx
;
546 ud_info
->num_sgprs
= num_sgprs
;
547 ud_info
->indirect
= indirect_offset
> 0;
548 ud_info
->indirect_offset
= indirect_offset
;
549 *sgpr_idx
+= num_sgprs
;
553 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
556 struct radv_userdata_info
*ud_info
=
557 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
560 set_loc(ud_info
, sgpr_idx
, num_sgprs
, 0);
564 set_loc_shader_ptr(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
566 bool use_32bit_pointers
= HAVE_32BIT_POINTERS
&&
567 idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
569 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
573 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
574 uint32_t indirect_offset
)
576 struct radv_userdata_locations
*locs
=
577 &ctx
->shader_info
->user_sgprs_locs
;
578 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
581 set_loc(ud_info
, sgpr_idx
, HAVE_32BIT_POINTERS
? 1 : 2, indirect_offset
);
582 if (indirect_offset
== 0)
583 locs
->descriptor_sets_enabled
|= 1 << idx
;
586 struct user_sgpr_info
{
587 bool need_ring_offsets
;
588 bool indirect_all_descriptor_sets
;
591 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
592 gl_shader_stage stage
)
595 case MESA_SHADER_VERTEX
:
596 if (ctx
->shader_info
->info
.needs_multiview_view_index
||
597 (!ctx
->options
->key
.vs
.as_es
&& !ctx
->options
->key
.vs
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
600 case MESA_SHADER_TESS_EVAL
:
601 if (ctx
->shader_info
->info
.needs_multiview_view_index
|| (!ctx
->options
->key
.tes
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
604 case MESA_SHADER_GEOMETRY
:
605 case MESA_SHADER_TESS_CTRL
:
606 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
616 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
620 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
)
621 count
+= HAVE_32BIT_POINTERS
? 1 : 2;
622 count
+= ctx
->shader_info
->info
.vs
.needs_draw_id
? 3 : 2;
627 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
628 gl_shader_stage stage
,
629 bool has_previous_stage
,
630 gl_shader_stage previous_stage
,
631 bool needs_view_index
,
632 struct user_sgpr_info
*user_sgpr_info
)
634 uint8_t user_sgpr_count
= 0;
636 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
638 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
639 if (stage
== MESA_SHADER_GEOMETRY
||
640 stage
== MESA_SHADER_VERTEX
||
641 stage
== MESA_SHADER_TESS_CTRL
||
642 stage
== MESA_SHADER_TESS_EVAL
||
643 ctx
->is_gs_copy_shader
)
644 user_sgpr_info
->need_ring_offsets
= true;
646 if (stage
== MESA_SHADER_FRAGMENT
&&
647 ctx
->shader_info
->info
.ps
.needs_sample_positions
)
648 user_sgpr_info
->need_ring_offsets
= true;
650 /* 2 user sgprs will nearly always be allocated for scratch/rings */
651 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
652 user_sgpr_count
+= 2;
656 case MESA_SHADER_COMPUTE
:
657 if (ctx
->shader_info
->info
.cs
.uses_grid_size
)
658 user_sgpr_count
+= 3;
660 case MESA_SHADER_FRAGMENT
:
661 user_sgpr_count
+= ctx
->shader_info
->info
.ps
.needs_sample_positions
;
663 case MESA_SHADER_VERTEX
:
664 if (!ctx
->is_gs_copy_shader
)
665 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
667 case MESA_SHADER_TESS_CTRL
:
668 if (has_previous_stage
) {
669 if (previous_stage
== MESA_SHADER_VERTEX
)
670 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
673 case MESA_SHADER_TESS_EVAL
:
675 case MESA_SHADER_GEOMETRY
:
676 if (has_previous_stage
) {
677 if (previous_stage
== MESA_SHADER_VERTEX
) {
678 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
686 if (needs_view_index
)
689 if (ctx
->shader_info
->info
.loads_push_constants
)
690 user_sgpr_count
+= HAVE_32BIT_POINTERS
? 1 : 2;
692 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
? 32 : 16;
693 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
694 uint32_t num_desc_set
=
695 util_bitcount(ctx
->shader_info
->info
.desc_set_used_mask
);
697 if (remaining_sgprs
/ (HAVE_32BIT_POINTERS
? 1 : 2) < num_desc_set
) {
698 user_sgpr_info
->indirect_all_descriptor_sets
= true;
703 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
704 gl_shader_stage stage
,
705 bool has_previous_stage
,
706 gl_shader_stage previous_stage
,
707 const struct user_sgpr_info
*user_sgpr_info
,
708 struct arg_info
*args
,
709 LLVMValueRef
*desc_sets
)
711 LLVMTypeRef type
= ac_array_in_const32_addr_space(ctx
->ac
.i8
);
712 unsigned num_sets
= ctx
->options
->layout
?
713 ctx
->options
->layout
->num_sets
: 0;
714 unsigned stage_mask
= 1 << stage
;
716 if (has_previous_stage
)
717 stage_mask
|= 1 << previous_stage
;
719 /* 1 for each descriptor set */
720 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
721 for (unsigned i
= 0; i
< num_sets
; ++i
) {
722 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
723 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
724 add_array_arg(args
, type
,
725 &ctx
->descriptor_sets
[i
]);
729 add_array_arg(args
, ac_array_in_const32_addr_space(type
), desc_sets
);
732 if (ctx
->shader_info
->info
.loads_push_constants
) {
733 /* 1 for push constants and dynamic descriptors */
734 add_array_arg(args
, type
, &ctx
->abi
.push_constants
);
739 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
740 gl_shader_stage stage
,
741 bool has_previous_stage
,
742 gl_shader_stage previous_stage
,
743 struct arg_info
*args
)
745 if (!ctx
->is_gs_copy_shader
&&
746 (stage
== MESA_SHADER_VERTEX
||
747 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
748 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
749 add_arg(args
, ARG_SGPR
,
750 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
751 &ctx
->vertex_buffers
);
753 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
754 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
755 if (ctx
->shader_info
->info
.vs
.needs_draw_id
) {
756 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
762 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
764 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
765 if (!ctx
->is_gs_copy_shader
) {
766 if (ctx
->options
->key
.vs
.as_ls
) {
767 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
768 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
770 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
771 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
773 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
778 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
780 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
781 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
782 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
783 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
787 set_global_input_locs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
788 bool has_previous_stage
, gl_shader_stage previous_stage
,
789 const struct user_sgpr_info
*user_sgpr_info
,
790 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
792 unsigned num_sets
= ctx
->options
->layout
?
793 ctx
->options
->layout
->num_sets
: 0;
794 unsigned stage_mask
= 1 << stage
;
796 if (has_previous_stage
)
797 stage_mask
|= 1 << previous_stage
;
799 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
800 for (unsigned i
= 0; i
< num_sets
; ++i
) {
801 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
802 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
803 set_loc_desc(ctx
, i
, user_sgpr_idx
, 0);
805 ctx
->descriptor_sets
[i
] = NULL
;
808 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
811 for (unsigned i
= 0; i
< num_sets
; ++i
) {
812 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
813 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
814 set_loc_desc(ctx
, i
, user_sgpr_idx
, i
* 8);
815 ctx
->descriptor_sets
[i
] =
816 ac_build_load_to_sgpr(&ctx
->ac
,
818 LLVMConstInt(ctx
->ac
.i32
, i
, false));
821 ctx
->descriptor_sets
[i
] = NULL
;
823 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
826 if (ctx
->shader_info
->info
.loads_push_constants
) {
827 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
);
832 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
833 gl_shader_stage stage
, bool has_previous_stage
,
834 gl_shader_stage previous_stage
,
835 uint8_t *user_sgpr_idx
)
837 if (!ctx
->is_gs_copy_shader
&&
838 (stage
== MESA_SHADER_VERTEX
||
839 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
840 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
841 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
846 if (ctx
->shader_info
->info
.vs
.needs_draw_id
)
849 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
850 user_sgpr_idx
, vs_num
);
854 static void set_llvm_calling_convention(LLVMValueRef func
,
855 gl_shader_stage stage
)
857 enum radeon_llvm_calling_convention calling_conv
;
860 case MESA_SHADER_VERTEX
:
861 case MESA_SHADER_TESS_EVAL
:
862 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
864 case MESA_SHADER_GEOMETRY
:
865 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
867 case MESA_SHADER_TESS_CTRL
:
868 calling_conv
= RADEON_LLVM_AMDGPU_HS
;
870 case MESA_SHADER_FRAGMENT
:
871 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
873 case MESA_SHADER_COMPUTE
:
874 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
877 unreachable("Unhandle shader type");
880 LLVMSetFunctionCallConv(func
, calling_conv
);
883 static void create_function(struct radv_shader_context
*ctx
,
884 gl_shader_stage stage
,
885 bool has_previous_stage
,
886 gl_shader_stage previous_stage
)
888 uint8_t user_sgpr_idx
;
889 struct user_sgpr_info user_sgpr_info
;
890 struct arg_info args
= {};
891 LLVMValueRef desc_sets
;
892 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
893 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
894 previous_stage
, needs_view_index
, &user_sgpr_info
);
896 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
897 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
902 case MESA_SHADER_COMPUTE
:
903 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
904 previous_stage
, &user_sgpr_info
,
907 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
908 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
909 &ctx
->abi
.num_work_groups
);
912 for (int i
= 0; i
< 3; i
++) {
913 ctx
->abi
.workgroup_ids
[i
] = NULL
;
914 if (ctx
->shader_info
->info
.cs
.uses_block_id
[i
]) {
915 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
916 &ctx
->abi
.workgroup_ids
[i
]);
920 if (ctx
->shader_info
->info
.cs
.uses_local_invocation_idx
)
921 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
922 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
923 &ctx
->abi
.local_invocation_ids
);
925 case MESA_SHADER_VERTEX
:
926 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
927 previous_stage
, &user_sgpr_info
,
929 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
930 previous_stage
, &args
);
932 if (needs_view_index
)
933 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
934 &ctx
->abi
.view_index
);
935 if (ctx
->options
->key
.vs
.as_es
)
936 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
939 declare_vs_input_vgprs(ctx
, &args
);
941 case MESA_SHADER_TESS_CTRL
:
942 if (has_previous_stage
) {
943 // First 6 system regs
944 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
945 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
946 &ctx
->merged_wave_info
);
947 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
948 &ctx
->tess_factor_offset
);
950 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
951 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
952 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
954 declare_global_input_sgprs(ctx
, stage
,
957 &user_sgpr_info
, &args
,
959 declare_vs_specific_input_sgprs(ctx
, stage
,
961 previous_stage
, &args
);
963 if (needs_view_index
)
964 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
965 &ctx
->abi
.view_index
);
967 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
968 &ctx
->abi
.tcs_patch_id
);
969 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
970 &ctx
->abi
.tcs_rel_ids
);
972 declare_vs_input_vgprs(ctx
, &args
);
974 declare_global_input_sgprs(ctx
, stage
,
977 &user_sgpr_info
, &args
,
980 if (needs_view_index
)
981 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
982 &ctx
->abi
.view_index
);
984 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
985 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
986 &ctx
->tess_factor_offset
);
987 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
988 &ctx
->abi
.tcs_patch_id
);
989 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
990 &ctx
->abi
.tcs_rel_ids
);
993 case MESA_SHADER_TESS_EVAL
:
994 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
995 previous_stage
, &user_sgpr_info
,
998 if (needs_view_index
)
999 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1000 &ctx
->abi
.view_index
);
1002 if (ctx
->options
->key
.tes
.as_es
) {
1003 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1004 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1005 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1006 &ctx
->es2gs_offset
);
1008 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1009 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1011 declare_tes_input_vgprs(ctx
, &args
);
1013 case MESA_SHADER_GEOMETRY
:
1014 if (has_previous_stage
) {
1015 // First 6 system regs
1016 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1017 &ctx
->gs2vs_offset
);
1018 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1019 &ctx
->merged_wave_info
);
1020 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1022 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1023 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1024 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1026 declare_global_input_sgprs(ctx
, stage
,
1029 &user_sgpr_info
, &args
,
1032 if (previous_stage
!= MESA_SHADER_TESS_EVAL
) {
1033 declare_vs_specific_input_sgprs(ctx
, stage
,
1039 if (needs_view_index
)
1040 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1041 &ctx
->abi
.view_index
);
1043 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1044 &ctx
->gs_vtx_offset
[0]);
1045 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1046 &ctx
->gs_vtx_offset
[2]);
1047 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1048 &ctx
->abi
.gs_prim_id
);
1049 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1050 &ctx
->abi
.gs_invocation_id
);
1051 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1052 &ctx
->gs_vtx_offset
[4]);
1054 if (previous_stage
== MESA_SHADER_VERTEX
) {
1055 declare_vs_input_vgprs(ctx
, &args
);
1057 declare_tes_input_vgprs(ctx
, &args
);
1060 declare_global_input_sgprs(ctx
, stage
,
1063 &user_sgpr_info
, &args
,
1066 if (needs_view_index
)
1067 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1068 &ctx
->abi
.view_index
);
1070 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
1071 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
1072 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1073 &ctx
->gs_vtx_offset
[0]);
1074 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1075 &ctx
->gs_vtx_offset
[1]);
1076 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1077 &ctx
->abi
.gs_prim_id
);
1078 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1079 &ctx
->gs_vtx_offset
[2]);
1080 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1081 &ctx
->gs_vtx_offset
[3]);
1082 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1083 &ctx
->gs_vtx_offset
[4]);
1084 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1085 &ctx
->gs_vtx_offset
[5]);
1086 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1087 &ctx
->abi
.gs_invocation_id
);
1090 case MESA_SHADER_FRAGMENT
:
1091 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
1092 previous_stage
, &user_sgpr_info
,
1095 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1096 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_sample
);
1097 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_center
);
1098 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_centroid
);
1099 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1100 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_sample
);
1101 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_center
);
1102 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_centroid
);
1103 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1104 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1105 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1106 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1107 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1108 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1109 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1110 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1111 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1114 unreachable("Shader stage not implemented");
1117 ctx
->main_function
= create_llvm_function(
1118 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1119 ctx
->max_workgroup_size
, ctx
->options
);
1120 set_llvm_calling_convention(ctx
->main_function
, stage
);
1123 ctx
->shader_info
->num_input_vgprs
= 0;
1124 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1126 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1128 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1129 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1131 assign_arguments(ctx
->main_function
, &args
);
1135 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1136 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1138 if (ctx
->options
->supports_spill
) {
1139 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1140 LLVMPointerType(ctx
->ac
.i8
, AC_CONST_ADDR_SPACE
),
1141 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1142 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1143 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1147 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1148 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1149 if (has_previous_stage
)
1152 set_global_input_locs(ctx
, stage
, has_previous_stage
, previous_stage
,
1153 &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1156 case MESA_SHADER_COMPUTE
:
1157 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1158 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1162 case MESA_SHADER_VERTEX
:
1163 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1164 previous_stage
, &user_sgpr_idx
);
1165 if (ctx
->abi
.view_index
)
1166 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1168 case MESA_SHADER_TESS_CTRL
:
1169 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1170 previous_stage
, &user_sgpr_idx
);
1171 if (ctx
->abi
.view_index
)
1172 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1174 case MESA_SHADER_TESS_EVAL
:
1175 if (ctx
->abi
.view_index
)
1176 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1178 case MESA_SHADER_GEOMETRY
:
1179 if (has_previous_stage
) {
1180 if (previous_stage
== MESA_SHADER_VERTEX
)
1181 set_vs_specific_input_locs(ctx
, stage
,
1186 if (ctx
->abi
.view_index
)
1187 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1189 case MESA_SHADER_FRAGMENT
:
1192 unreachable("Shader stage not implemented");
1195 if (stage
== MESA_SHADER_TESS_CTRL
||
1196 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs
.as_ls
) ||
1197 /* GFX9 has the ESGS ring buffer in LDS. */
1198 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1199 ac_declare_lds_as_pointer(&ctx
->ac
);
1202 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1207 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
1208 unsigned desc_set
, unsigned binding
)
1210 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1211 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
1212 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
1213 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
1214 unsigned base_offset
= layout
->binding
[binding
].offset
;
1215 LLVMValueRef offset
, stride
;
1217 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1218 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1219 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
1220 layout
->binding
[binding
].dynamic_offset_offset
;
1221 desc_ptr
= ctx
->abi
.push_constants
;
1222 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
1223 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1225 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
1227 offset
= ac_build_imad(&ctx
->ac
, index
, stride
,
1228 LLVMConstInt(ctx
->ac
.i32
, base_offset
, false));
1230 desc_ptr
= ac_build_gep0(&ctx
->ac
, desc_ptr
, offset
);
1231 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
1232 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1238 /* The offchip buffer layout for TCS->TES is
1240 * - attribute 0 of patch 0 vertex 0
1241 * - attribute 0 of patch 0 vertex 1
1242 * - attribute 0 of patch 0 vertex 2
1244 * - attribute 0 of patch 1 vertex 0
1245 * - attribute 0 of patch 1 vertex 1
1247 * - attribute 1 of patch 0 vertex 0
1248 * - attribute 1 of patch 0 vertex 1
1250 * - per patch attribute 0 of patch 0
1251 * - per patch attribute 0 of patch 1
1254 * Note that every attribute has 4 components.
1256 static LLVMValueRef
get_non_vertex_index_offset(struct radv_shader_context
*ctx
)
1258 uint32_t num_patches
= ctx
->tcs_num_patches
;
1259 uint32_t num_tcs_outputs
;
1260 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
1261 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
1263 num_tcs_outputs
= ctx
->options
->key
.tes
.tcs_num_outputs
;
1265 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
1266 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
1268 return LLVMConstInt(ctx
->ac
.i32
, pervertex_output_patch_size
* num_patches
, false);
1271 static LLVMValueRef
calc_param_stride(struct radv_shader_context
*ctx
,
1272 LLVMValueRef vertex_index
)
1274 LLVMValueRef param_stride
;
1276 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
* ctx
->tcs_num_patches
, false);
1278 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_num_patches
, false);
1279 return param_stride
;
1282 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
1283 LLVMValueRef vertex_index
,
1284 LLVMValueRef param_index
)
1286 LLVMValueRef base_addr
;
1287 LLVMValueRef param_stride
, constant16
;
1288 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
1289 LLVMValueRef vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
, false);
1290 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1291 param_stride
= calc_param_stride(ctx
, vertex_index
);
1293 base_addr
= ac_build_imad(&ctx
->ac
, rel_patch_id
,
1294 vertices_per_patch
, vertex_index
);
1296 base_addr
= rel_patch_id
;
1299 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1300 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
1301 param_stride
, ""), "");
1303 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
1305 if (!vertex_index
) {
1306 LLVMValueRef patch_data_offset
= get_non_vertex_index_offset(ctx
);
1308 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1309 patch_data_offset
, "");
1314 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
1316 unsigned const_index
,
1318 LLVMValueRef vertex_index
,
1319 LLVMValueRef indir_index
)
1321 LLVMValueRef param_index
;
1324 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
1327 if (const_index
&& !is_compact
)
1328 param
+= const_index
;
1329 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
1331 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
1335 get_dw_address(struct radv_shader_context
*ctx
,
1336 LLVMValueRef dw_addr
,
1338 unsigned const_index
,
1339 bool compact_const_index
,
1340 LLVMValueRef vertex_index
,
1341 LLVMValueRef stride
,
1342 LLVMValueRef indir_index
)
1347 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1348 LLVMBuildMul(ctx
->ac
.builder
,
1354 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1355 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
1356 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
1357 else if (const_index
&& !compact_const_index
)
1358 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1359 LLVMConstInt(ctx
->ac
.i32
, const_index
* 4, false), "");
1361 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1362 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
1364 if (const_index
&& compact_const_index
)
1365 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1366 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
1371 load_tcs_varyings(struct ac_shader_abi
*abi
,
1373 LLVMValueRef vertex_index
,
1374 LLVMValueRef indir_index
,
1375 unsigned const_index
,
1377 unsigned driver_location
,
1379 unsigned num_components
,
1384 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1385 LLVMValueRef dw_addr
, stride
;
1386 LLVMValueRef value
[4], result
;
1387 unsigned param
= shader_io_get_unique_index(location
);
1390 uint32_t input_vertex_size
= (ctx
->tcs_num_inputs
* 16) / 4;
1391 stride
= LLVMConstInt(ctx
->ac
.i32
, input_vertex_size
, false);
1392 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
1395 stride
= get_tcs_out_vertex_stride(ctx
);
1396 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1398 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1403 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1406 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
1407 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1408 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1411 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1416 store_tcs_output(struct ac_shader_abi
*abi
,
1417 const nir_variable
*var
,
1418 LLVMValueRef vertex_index
,
1419 LLVMValueRef param_index
,
1420 unsigned const_index
,
1424 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1425 const unsigned location
= var
->data
.location
;
1426 const unsigned component
= var
->data
.location_frac
;
1427 const bool is_patch
= var
->data
.patch
;
1428 const bool is_compact
= var
->data
.compact
;
1429 LLVMValueRef dw_addr
;
1430 LLVMValueRef stride
= NULL
;
1431 LLVMValueRef buf_addr
= NULL
;
1433 bool store_lds
= true;
1436 if (!(ctx
->tcs_patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
1439 if (!(ctx
->tcs_outputs_read
& (1ULL << location
)))
1443 param
= shader_io_get_unique_index(location
);
1444 if (location
== VARYING_SLOT_CLIP_DIST0
&&
1445 is_compact
&& const_index
> 3) {
1451 stride
= get_tcs_out_vertex_stride(ctx
);
1452 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1454 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1457 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1459 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
1460 vertex_index
, param_index
);
1462 bool is_tess_factor
= false;
1463 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
1464 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1465 is_tess_factor
= true;
1467 unsigned base
= is_compact
? const_index
: 0;
1468 for (unsigned chan
= 0; chan
< 8; chan
++) {
1469 if (!(writemask
& (1 << chan
)))
1471 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
1472 value
= ac_to_integer(&ctx
->ac
, value
);
1473 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
1475 if (store_lds
|| is_tess_factor
) {
1476 LLVMValueRef dw_addr_chan
=
1477 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1478 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
1479 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
1482 if (!is_tess_factor
&& writemask
!= 0xF)
1483 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
1484 buf_addr
, ctx
->oc_lds
,
1485 4 * (base
+ chan
), 1, 0, true, false);
1488 if (writemask
== 0xF) {
1489 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
1490 buf_addr
, ctx
->oc_lds
,
1491 (base
* 4), 1, 0, true, false);
1496 load_tes_input(struct ac_shader_abi
*abi
,
1498 LLVMValueRef vertex_index
,
1499 LLVMValueRef param_index
,
1500 unsigned const_index
,
1502 unsigned driver_location
,
1504 unsigned num_components
,
1509 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1510 LLVMValueRef buf_addr
;
1511 LLVMValueRef result
;
1512 unsigned param
= shader_io_get_unique_index(location
);
1514 if (location
== VARYING_SLOT_CLIP_DIST0
&& is_compact
&& const_index
> 3) {
1519 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
1520 is_compact
, vertex_index
, param_index
);
1522 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
1523 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
1525 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
1526 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, 1, 0, true, false);
1527 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
1532 load_gs_input(struct ac_shader_abi
*abi
,
1534 unsigned driver_location
,
1536 unsigned num_components
,
1537 unsigned vertex_index
,
1538 unsigned const_index
,
1541 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1542 LLVMValueRef vtx_offset
;
1543 unsigned param
, vtx_offset_param
;
1544 LLVMValueRef value
[4], result
;
1546 vtx_offset_param
= vertex_index
;
1547 assert(vtx_offset_param
< 6);
1548 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
1549 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1551 param
= shader_io_get_unique_index(location
);
1553 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
1554 if (ctx
->ac
.chip_class
>= GFX9
) {
1555 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
1556 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1557 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
1558 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1560 LLVMValueRef soffset
=
1561 LLVMConstInt(ctx
->ac
.i32
,
1562 (param
* 4 + i
+ const_index
) * 256,
1565 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
1568 vtx_offset
, soffset
,
1569 0, 1, 0, true, false);
1572 if (ac_get_type_size(type
) == 2) {
1573 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i32
, "");
1574 value
[i
] = LLVMBuildTrunc(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i16
, "");
1576 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], type
, "");
1578 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1579 result
= ac_to_integer(&ctx
->ac
, result
);
1584 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
1586 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1587 ac_build_kill_if_false(&ctx
->ac
, visible
);
1590 static LLVMValueRef
lookup_interp_param(struct ac_shader_abi
*abi
,
1591 enum glsl_interp_mode interp
, unsigned location
)
1593 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1596 case INTERP_MODE_FLAT
:
1599 case INTERP_MODE_SMOOTH
:
1600 case INTERP_MODE_NONE
:
1601 if (location
== INTERP_CENTER
)
1602 return ctx
->persp_center
;
1603 else if (location
== INTERP_CENTROID
)
1604 return ctx
->persp_centroid
;
1605 else if (location
== INTERP_SAMPLE
)
1606 return ctx
->persp_sample
;
1608 case INTERP_MODE_NOPERSPECTIVE
:
1609 if (location
== INTERP_CENTER
)
1610 return ctx
->linear_center
;
1611 else if (location
== INTERP_CENTROID
)
1612 return ctx
->linear_centroid
;
1613 else if (location
== INTERP_SAMPLE
)
1614 return ctx
->linear_sample
;
1621 radv_get_sample_pos_offset(uint32_t num_samples
)
1623 uint32_t sample_pos_offset
= 0;
1625 switch (num_samples
) {
1627 sample_pos_offset
= 1;
1630 sample_pos_offset
= 3;
1633 sample_pos_offset
= 7;
1636 sample_pos_offset
= 15;
1641 return sample_pos_offset
;
1644 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
1645 LLVMValueRef sample_id
)
1647 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1649 LLVMValueRef result
;
1650 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false));
1652 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1653 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
1655 uint32_t sample_pos_offset
=
1656 radv_get_sample_pos_offset(ctx
->options
->key
.fs
.num_samples
);
1659 LLVMBuildAdd(ctx
->ac
.builder
, sample_id
,
1660 LLVMConstInt(ctx
->ac
.i32
, sample_pos_offset
, false), "");
1661 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
1667 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
1669 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1670 uint8_t log2_ps_iter_samples
;
1672 if (ctx
->shader_info
->info
.ps
.force_persample
) {
1673 log2_ps_iter_samples
=
1674 util_logbase2(ctx
->options
->key
.fs
.num_samples
);
1676 log2_ps_iter_samples
= ctx
->options
->key
.fs
.log2_ps_iter_samples
;
1679 /* The bit pattern matches that used by fixed function fragment
1681 static const uint16_t ps_iter_masks
[] = {
1682 0xffff, /* not used */
1688 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
1690 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
1692 LLVMValueRef result
, sample_id
;
1693 sample_id
= ac_unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
1694 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
1695 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
1701 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
1703 LLVMValueRef gs_next_vertex
;
1704 LLVMValueRef can_emit
;
1705 unsigned offset
= 0;
1706 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1708 assert(stream
== 0);
1710 /* Write vertex attribute values to GSVS ring */
1711 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
1712 ctx
->gs_next_vertex
,
1715 /* If this thread has already emitted the declared maximum number of
1716 * vertices, kill it: excessive vertex emissions are not supposed to
1717 * have any effect, and GS threads have no externally observable
1718 * effects other than emitting vertices.
1720 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
1721 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
1722 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
1724 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
1725 unsigned output_usage_mask
=
1726 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
1727 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
1730 if (!(ctx
->output_mask
& (1ull << i
)))
1733 if (i
== VARYING_SLOT_CLIP_DIST0
) {
1734 /* pack clip and cull into a single set of slots */
1735 length
= util_last_bit(output_usage_mask
);
1738 for (unsigned j
= 0; j
< length
; j
++) {
1739 if (!(output_usage_mask
& (1 << j
)))
1742 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
1744 LLVMValueRef voffset
=
1745 LLVMConstInt(ctx
->ac
.i32
, offset
*
1746 ctx
->gs_max_out_vertices
, false);
1750 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
1751 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1753 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
1754 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
1756 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->gsvs_ring
,
1758 voffset
, ctx
->gs2vs_offset
, 0,
1763 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
1765 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
);
1767 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (0 << 8), ctx
->gs_wave_id
);
1771 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
1773 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1774 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
1778 load_tess_coord(struct ac_shader_abi
*abi
)
1780 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1782 LLVMValueRef coord
[4] = {
1789 if (ctx
->tes_primitive_mode
== GL_TRIANGLES
)
1790 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
1791 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
1793 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
1797 load_patch_vertices_in(struct ac_shader_abi
*abi
)
1799 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1800 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
1804 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
1806 return abi
->base_vertex
;
1809 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
1810 LLVMValueRef buffer_ptr
, bool write
)
1812 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1813 LLVMValueRef result
;
1815 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1817 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1818 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1823 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
1825 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1826 LLVMValueRef result
;
1828 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1830 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1831 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1836 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
1837 unsigned descriptor_set
,
1838 unsigned base_index
,
1839 unsigned constant_index
,
1841 enum ac_descriptor_type desc_type
,
1842 bool image
, bool write
,
1845 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1846 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
1847 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
1848 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
1849 unsigned offset
= binding
->offset
;
1850 unsigned stride
= binding
->size
;
1852 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1855 assert(base_index
< layout
->binding_count
);
1857 switch (desc_type
) {
1859 type
= ctx
->ac
.v8i32
;
1863 type
= ctx
->ac
.v8i32
;
1867 case AC_DESC_SAMPLER
:
1868 type
= ctx
->ac
.v4i32
;
1869 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
1874 case AC_DESC_BUFFER
:
1875 type
= ctx
->ac
.v4i32
;
1879 unreachable("invalid desc_type\n");
1882 offset
+= constant_index
* stride
;
1884 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
1885 (!index
|| binding
->immutable_samplers_equal
)) {
1886 if (binding
->immutable_samplers_equal
)
1889 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
1891 LLVMValueRef constants
[] = {
1892 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
1893 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
1894 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
1895 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
1897 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
1900 assert(stride
% type_size
== 0);
1903 index
= ctx
->ac
.i32_0
;
1905 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
1907 list
= ac_build_gep0(&ctx
->ac
, list
, LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
1908 list
= LLVMBuildPointerCast(builder
, list
,
1909 ac_array_in_const32_addr_space(type
), "");
1911 return ac_build_load_to_sgpr(&ctx
->ac
, list
, index
);
1914 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
1915 * so we may need to fix it up. */
1917 adjust_vertex_fetch_alpha(struct radv_shader_context
*ctx
,
1918 unsigned adjustment
,
1921 if (adjustment
== RADV_ALPHA_ADJUST_NONE
)
1924 LLVMValueRef c30
= LLVMConstInt(ctx
->ac
.i32
, 30, 0);
1926 if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
)
1927 alpha
= LLVMBuildFPToUI(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
1929 alpha
= ac_to_integer(&ctx
->ac
, alpha
);
1931 /* For the integer-like cases, do a natural sign extension.
1933 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
1934 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
1937 alpha
= LLVMBuildShl(ctx
->ac
.builder
, alpha
,
1938 adjustment
== RADV_ALPHA_ADJUST_SNORM
?
1939 LLVMConstInt(ctx
->ac
.i32
, 7, 0) : c30
, "");
1940 alpha
= LLVMBuildAShr(ctx
->ac
.builder
, alpha
, c30
, "");
1942 /* Convert back to the right type. */
1943 if (adjustment
== RADV_ALPHA_ADJUST_SNORM
) {
1945 LLVMValueRef neg_one
= LLVMConstReal(ctx
->ac
.f32
, -1.0);
1946 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
1947 clamp
= LLVMBuildFCmp(ctx
->ac
.builder
, LLVMRealULT
, alpha
, neg_one
, "");
1948 alpha
= LLVMBuildSelect(ctx
->ac
.builder
, clamp
, neg_one
, alpha
, "");
1949 } else if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
) {
1950 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
1957 handle_vs_input_decl(struct radv_shader_context
*ctx
,
1958 struct nir_variable
*variable
)
1960 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
1961 LLVMValueRef t_offset
;
1962 LLVMValueRef t_list
;
1964 LLVMValueRef buffer_index
;
1965 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
1966 uint8_t input_usage_mask
=
1967 ctx
->shader_info
->info
.vs
.input_usage_mask
[variable
->data
.location
];
1968 unsigned num_channels
= util_last_bit(input_usage_mask
);
1970 variable
->data
.driver_location
= variable
->data
.location
* 4;
1972 enum glsl_base_type type
= glsl_get_base_type(variable
->type
);
1973 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
1974 LLVMValueRef output
[4];
1975 unsigned attrib_index
= variable
->data
.location
+ i
- VERT_ATTRIB_GENERIC0
;
1977 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << attrib_index
)) {
1978 uint32_t divisor
= ctx
->options
->key
.vs
.instance_rate_divisors
[attrib_index
];
1981 buffer_index
= ctx
->abi
.instance_id
;
1984 buffer_index
= LLVMBuildUDiv(ctx
->ac
.builder
, buffer_index
,
1985 LLVMConstInt(ctx
->ac
.i32
, divisor
, 0), "");
1988 if (ctx
->options
->key
.vs
.as_ls
) {
1989 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
1990 MAX2(2, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
1992 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
1993 MAX2(1, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
1996 buffer_index
= ctx
->ac
.i32_0
;
1999 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.start_instance
, buffer_index
, "");
2001 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
2002 ctx
->abi
.base_vertex
, "");
2003 t_offset
= LLVMConstInt(ctx
->ac
.i32
, attrib_index
, false);
2005 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
2007 input
= ac_build_buffer_load_format(&ctx
->ac
, t_list
,
2010 num_channels
, false, true);
2012 input
= ac_build_expand_to_vec4(&ctx
->ac
, input
, num_channels
);
2014 for (unsigned chan
= 0; chan
< 4; chan
++) {
2015 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2016 output
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
, input
, llvm_chan
, "");
2017 if (type
== GLSL_TYPE_FLOAT16
) {
2018 output
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f32
, "");
2019 output
[chan
] = LLVMBuildFPTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f16
, "");
2023 unsigned alpha_adjust
= (ctx
->options
->key
.vs
.alpha_adjust
>> (attrib_index
* 2)) & 3;
2024 output
[3] = adjust_vertex_fetch_alpha(ctx
, alpha_adjust
, output
[3]);
2026 for (unsigned chan
= 0; chan
< 4; chan
++) {
2027 output
[chan
] = ac_to_integer(&ctx
->ac
, output
[chan
]);
2028 if (type
== GLSL_TYPE_UINT16
|| type
== GLSL_TYPE_INT16
)
2029 output
[chan
] = LLVMBuildTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.i16
, "");
2031 ctx
->inputs
[ac_llvm_reg_index_soa(variable
->data
.location
+ i
, chan
)] = output
[chan
];
2036 static void interp_fs_input(struct radv_shader_context
*ctx
,
2038 LLVMValueRef interp_param
,
2039 LLVMValueRef prim_mask
,
2040 LLVMValueRef result
[4])
2042 LLVMValueRef attr_number
;
2045 bool interp
= !LLVMIsUndef(interp_param
);
2047 attr_number
= LLVMConstInt(ctx
->ac
.i32
, attr
, false);
2049 /* fs.constant returns the param from the middle vertex, so it's not
2050 * really useful for flat shading. It's meant to be used for custom
2051 * interpolation (but the intrinsic can't fetch from the other two
2054 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
2055 * to do the right thing. The only reason we use fs.constant is that
2056 * fs.interp cannot be used on integers, because they can be equal
2060 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
,
2063 i
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2065 j
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2069 for (chan
= 0; chan
< 4; chan
++) {
2070 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2073 result
[chan
] = ac_build_fs_interp(&ctx
->ac
,
2078 result
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
2079 LLVMConstInt(ctx
->ac
.i32
, 2, false),
2083 result
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, result
[chan
], ctx
->ac
.i32
, "");
2084 result
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, result
[chan
], LLVMTypeOf(interp_param
), "");
2090 handle_fs_input_decl(struct radv_shader_context
*ctx
,
2091 struct nir_variable
*variable
)
2093 int idx
= variable
->data
.location
;
2094 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2095 LLVMValueRef interp
= NULL
;
2098 variable
->data
.driver_location
= idx
* 4;
2099 mask
= ((1ull << attrib_count
) - 1) << variable
->data
.location
;
2101 if (glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_FLOAT
) {
2102 unsigned interp_type
;
2103 if (variable
->data
.sample
)
2104 interp_type
= INTERP_SAMPLE
;
2105 else if (variable
->data
.centroid
)
2106 interp_type
= INTERP_CENTROID
;
2108 interp_type
= INTERP_CENTER
;
2110 interp
= lookup_interp_param(&ctx
->abi
, variable
->data
.interpolation
, interp_type
);
2112 bool is_16bit
= glsl_type_is_16bit(variable
->type
);
2113 LLVMTypeRef type
= is_16bit
? ctx
->ac
.i16
: ctx
->ac
.i32
;
2115 interp
= LLVMGetUndef(type
);
2117 for (unsigned i
= 0; i
< attrib_count
; ++i
)
2118 ctx
->inputs
[ac_llvm_reg_index_soa(idx
+ i
, 0)] = interp
;
2120 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
2121 /* Do not account for the number of components inside the array
2122 * of clip/cull distances because this might wrongly set other
2123 * bits like primitive ID or layer.
2125 mask
= 1ull << VARYING_SLOT_CLIP_DIST0
;
2128 ctx
->input_mask
|= mask
;
2132 handle_vs_inputs(struct radv_shader_context
*ctx
,
2133 struct nir_shader
*nir
) {
2134 nir_foreach_variable(variable
, &nir
->inputs
)
2135 handle_vs_input_decl(ctx
, variable
);
2139 prepare_interp_optimize(struct radv_shader_context
*ctx
,
2140 struct nir_shader
*nir
)
2142 bool uses_center
= false;
2143 bool uses_centroid
= false;
2144 nir_foreach_variable(variable
, &nir
->inputs
) {
2145 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
2146 variable
->data
.sample
)
2149 if (variable
->data
.centroid
)
2150 uses_centroid
= true;
2155 if (uses_center
&& uses_centroid
) {
2156 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
2157 ctx
->persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->persp_center
, ctx
->persp_centroid
, "");
2158 ctx
->linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->linear_center
, ctx
->linear_centroid
, "");
2163 handle_fs_inputs(struct radv_shader_context
*ctx
,
2164 struct nir_shader
*nir
)
2166 prepare_interp_optimize(ctx
, nir
);
2168 nir_foreach_variable(variable
, &nir
->inputs
)
2169 handle_fs_input_decl(ctx
, variable
);
2173 if (ctx
->shader_info
->info
.ps
.uses_input_attachments
||
2174 ctx
->shader_info
->info
.needs_multiview_view_index
) {
2175 ctx
->input_mask
|= 1ull << VARYING_SLOT_LAYER
;
2176 ctx
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)] = LLVMGetUndef(ctx
->ac
.i32
);
2179 for (unsigned i
= 0; i
< RADEON_LLVM_MAX_INPUTS
; ++i
) {
2180 LLVMValueRef interp_param
;
2181 LLVMValueRef
*inputs
= ctx
->inputs
+ac_llvm_reg_index_soa(i
, 0);
2183 if (!(ctx
->input_mask
& (1ull << i
)))
2186 if (i
>= VARYING_SLOT_VAR0
|| i
== VARYING_SLOT_PNTC
||
2187 i
== VARYING_SLOT_PRIMITIVE_ID
|| i
== VARYING_SLOT_LAYER
) {
2188 interp_param
= *inputs
;
2189 interp_fs_input(ctx
, index
, interp_param
, ctx
->abi
.prim_mask
,
2192 if (LLVMIsUndef(interp_param
))
2193 ctx
->shader_info
->fs
.flat_shaded_mask
|= 1u << index
;
2195 } else if (i
== VARYING_SLOT_CLIP_DIST0
) {
2196 int length
= ctx
->shader_info
->info
.ps
.num_input_clips_culls
;
2198 for (unsigned j
= 0; j
< length
; j
+= 4) {
2199 inputs
= ctx
->inputs
+ ac_llvm_reg_index_soa(i
, j
);
2201 interp_param
= *inputs
;
2202 interp_fs_input(ctx
, index
, interp_param
,
2203 ctx
->abi
.prim_mask
, inputs
);
2206 } else if (i
== VARYING_SLOT_POS
) {
2207 for(int i
= 0; i
< 3; ++i
)
2208 inputs
[i
] = ctx
->abi
.frag_pos
[i
];
2210 inputs
[3] = ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
2211 ctx
->abi
.frag_pos
[3]);
2214 ctx
->shader_info
->fs
.num_interp
= index
;
2215 ctx
->shader_info
->fs
.input_mask
= ctx
->input_mask
>> VARYING_SLOT_VAR0
;
2217 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
2218 ctx
->abi
.view_index
= ctx
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2222 scan_shader_output_decl(struct radv_shader_context
*ctx
,
2223 struct nir_variable
*variable
,
2224 struct nir_shader
*shader
,
2225 gl_shader_stage stage
)
2227 int idx
= variable
->data
.location
+ variable
->data
.index
;
2228 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2229 uint64_t mask_attribs
;
2231 variable
->data
.driver_location
= idx
* 4;
2233 /* tess ctrl has it's own load/store paths for outputs */
2234 if (stage
== MESA_SHADER_TESS_CTRL
)
2237 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
2238 if (stage
== MESA_SHADER_VERTEX
||
2239 stage
== MESA_SHADER_TESS_EVAL
||
2240 stage
== MESA_SHADER_GEOMETRY
) {
2241 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
2242 if (stage
== MESA_SHADER_VERTEX
) {
2243 ctx
->shader_info
->vs
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2244 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2245 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2247 if (stage
== MESA_SHADER_TESS_EVAL
) {
2248 ctx
->shader_info
->tes
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2249 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2250 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2253 mask_attribs
= 1ull << idx
;
2257 ctx
->output_mask
|= mask_attribs
;
2261 /* Initialize arguments for the shader export intrinsic */
2263 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
2264 LLVMValueRef
*values
,
2265 unsigned enabled_channels
,
2267 struct ac_export_args
*args
)
2269 /* Specify the channels that are enabled. */
2270 args
->enabled_channels
= enabled_channels
;
2272 /* Specify whether the EXEC mask represents the valid mask */
2273 args
->valid_mask
= 0;
2275 /* Specify whether this is the last export */
2278 /* Specify the target we are exporting */
2279 args
->target
= target
;
2281 args
->compr
= false;
2282 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
2283 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
2284 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
2285 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
2290 bool is_16bit
= ac_get_type_size(LLVMTypeOf(values
[0])) == 2;
2291 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&& target
>= V_008DFC_SQ_EXP_MRT
) {
2292 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
2293 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
2294 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
2295 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
2298 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
2299 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
2300 unsigned bits
, bool hi
) = NULL
;
2302 switch(col_format
) {
2303 case V_028714_SPI_SHADER_ZERO
:
2304 args
->enabled_channels
= 0; /* writemask */
2305 args
->target
= V_008DFC_SQ_EXP_NULL
;
2308 case V_028714_SPI_SHADER_32_R
:
2309 args
->enabled_channels
= 1;
2310 args
->out
[0] = values
[0];
2313 case V_028714_SPI_SHADER_32_GR
:
2314 args
->enabled_channels
= 0x3;
2315 args
->out
[0] = values
[0];
2316 args
->out
[1] = values
[1];
2319 case V_028714_SPI_SHADER_32_AR
:
2320 args
->enabled_channels
= 0x9;
2321 args
->out
[0] = values
[0];
2322 args
->out
[3] = values
[3];
2325 case V_028714_SPI_SHADER_FP16_ABGR
:
2326 args
->enabled_channels
= 0x5;
2327 packf
= ac_build_cvt_pkrtz_f16
;
2329 for (unsigned chan
= 0; chan
< 4; chan
++)
2330 values
[chan
] = LLVMBuildFPExt(ctx
->ac
.builder
,
2336 case V_028714_SPI_SHADER_UNORM16_ABGR
:
2337 args
->enabled_channels
= 0x5;
2338 packf
= ac_build_cvt_pknorm_u16
;
2341 case V_028714_SPI_SHADER_SNORM16_ABGR
:
2342 args
->enabled_channels
= 0x5;
2343 packf
= ac_build_cvt_pknorm_i16
;
2346 case V_028714_SPI_SHADER_UINT16_ABGR
:
2347 args
->enabled_channels
= 0x5;
2348 packi
= ac_build_cvt_pk_u16
;
2350 for (unsigned chan
= 0; chan
< 4; chan
++)
2351 values
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
,
2357 case V_028714_SPI_SHADER_SINT16_ABGR
:
2358 args
->enabled_channels
= 0x5;
2359 packi
= ac_build_cvt_pk_i16
;
2361 for (unsigned chan
= 0; chan
< 4; chan
++)
2362 values
[chan
] = LLVMBuildSExt(ctx
->ac
.builder
,
2369 case V_028714_SPI_SHADER_32_ABGR
:
2370 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2374 /* Pack f16 or norm_i16/u16. */
2376 for (chan
= 0; chan
< 2; chan
++) {
2377 LLVMValueRef pack_args
[2] = {
2379 values
[2 * chan
+ 1]
2381 LLVMValueRef packed
;
2383 packed
= packf(&ctx
->ac
, pack_args
);
2384 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2386 args
->compr
= 1; /* COMPR flag */
2391 for (chan
= 0; chan
< 2; chan
++) {
2392 LLVMValueRef pack_args
[2] = {
2393 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
2394 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
2396 LLVMValueRef packed
;
2398 packed
= packi(&ctx
->ac
, pack_args
,
2399 is_int8
? 8 : is_int10
? 10 : 16,
2401 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2403 args
->compr
= 1; /* COMPR flag */
2409 for (unsigned chan
= 0; chan
< 4; chan
++) {
2410 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i16
, "");
2411 args
->out
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
2414 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2416 for (unsigned i
= 0; i
< 4; ++i
) {
2417 if (!(args
->enabled_channels
& (1 << i
)))
2420 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
2425 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
2426 LLVMValueRef
*values
, unsigned enabled_channels
)
2428 struct ac_export_args args
;
2430 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
2431 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
2432 ac_build_export(&ctx
->ac
, &args
);
2436 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
2438 LLVMValueRef output
=
2439 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(index
, chan
)];
2441 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
2445 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
2446 bool export_prim_id
, bool export_layer_id
,
2447 struct radv_vs_output_info
*outinfo
)
2449 uint32_t param_count
= 0;
2451 unsigned pos_idx
, num_pos_exports
= 0;
2452 struct ac_export_args args
, pos_args
[4] = {};
2453 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_index_value
= NULL
;
2456 if (ctx
->options
->key
.has_multiview_view_index
) {
2457 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2459 for(unsigned i
= 0; i
< 4; ++i
)
2460 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
2461 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
2464 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
2465 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
2468 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
2469 sizeof(outinfo
->vs_output_param_offset
));
2471 if (ctx
->output_mask
& (1ull << VARYING_SLOT_CLIP_DIST0
)) {
2472 unsigned output_usage_mask
, length
;
2473 LLVMValueRef slots
[8];
2476 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2477 !ctx
->is_gs_copy_shader
) {
2479 ctx
->shader_info
->info
.vs
.output_usage_mask
[VARYING_SLOT_CLIP_DIST0
];
2480 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2482 ctx
->shader_info
->info
.tes
.output_usage_mask
[VARYING_SLOT_CLIP_DIST0
];
2484 assert(ctx
->is_gs_copy_shader
);
2486 ctx
->shader_info
->info
.gs
.output_usage_mask
[VARYING_SLOT_CLIP_DIST0
];
2489 length
= util_last_bit(output_usage_mask
);
2491 i
= VARYING_SLOT_CLIP_DIST0
;
2492 for (j
= 0; j
< length
; j
++)
2493 slots
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2495 for (i
= length
; i
< 8; i
++)
2496 slots
[i
] = LLVMGetUndef(ctx
->ac
.f32
);
2499 target
= V_008DFC_SQ_EXP_POS
+ 3;
2500 si_llvm_init_export_args(ctx
, &slots
[4], 0xf, target
, &args
);
2501 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
2502 &args
, sizeof(args
));
2505 target
= V_008DFC_SQ_EXP_POS
+ 2;
2506 si_llvm_init_export_args(ctx
, &slots
[0], 0xf, target
, &args
);
2507 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
2508 &args
, sizeof(args
));
2510 /* Export the clip/cull distances values to the next stage. */
2511 radv_export_param(ctx
, param_count
, &slots
[0], 0xf);
2512 outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST0
] = param_count
++;
2514 radv_export_param(ctx
, param_count
, &slots
[4], 0xf);
2515 outinfo
->vs_output_param_offset
[VARYING_SLOT_CLIP_DIST1
] = param_count
++;
2519 LLVMValueRef pos_values
[4] = {ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_1
};
2520 if (ctx
->output_mask
& (1ull << VARYING_SLOT_POS
)) {
2521 for (unsigned j
= 0; j
< 4; j
++)
2522 pos_values
[j
] = radv_load_output(ctx
, VARYING_SLOT_POS
, j
);
2524 si_llvm_init_export_args(ctx
, pos_values
, 0xf, V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
2526 if (ctx
->output_mask
& (1ull << VARYING_SLOT_PSIZ
)) {
2527 outinfo
->writes_pointsize
= true;
2528 psize_value
= radv_load_output(ctx
, VARYING_SLOT_PSIZ
, 0);
2531 if (ctx
->output_mask
& (1ull << VARYING_SLOT_LAYER
)) {
2532 outinfo
->writes_layer
= true;
2533 layer_value
= radv_load_output(ctx
, VARYING_SLOT_LAYER
, 0);
2536 if (ctx
->output_mask
& (1ull << VARYING_SLOT_VIEWPORT
)) {
2537 outinfo
->writes_viewport_index
= true;
2538 viewport_index_value
= radv_load_output(ctx
, VARYING_SLOT_VIEWPORT
, 0);
2541 if (outinfo
->writes_pointsize
||
2542 outinfo
->writes_layer
||
2543 outinfo
->writes_viewport_index
) {
2544 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
2545 (outinfo
->writes_layer
== true ? 4 : 0));
2546 pos_args
[1].valid_mask
= 0;
2547 pos_args
[1].done
= 0;
2548 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
2549 pos_args
[1].compr
= 0;
2550 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
2551 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
2552 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
2553 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
2555 if (outinfo
->writes_pointsize
== true)
2556 pos_args
[1].out
[0] = psize_value
;
2557 if (outinfo
->writes_layer
== true)
2558 pos_args
[1].out
[2] = layer_value
;
2559 if (outinfo
->writes_viewport_index
== true) {
2560 if (ctx
->options
->chip_class
>= GFX9
) {
2561 /* GFX9 has the layer in out.z[10:0] and the viewport
2562 * index in out.z[19:16].
2564 LLVMValueRef v
= viewport_index_value
;
2565 v
= ac_to_integer(&ctx
->ac
, v
);
2566 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
2567 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2569 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
2570 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
2572 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
2573 pos_args
[1].enabled_channels
|= 1 << 2;
2575 pos_args
[1].out
[3] = viewport_index_value
;
2576 pos_args
[1].enabled_channels
|= 1 << 3;
2580 for (i
= 0; i
< 4; i
++) {
2581 if (pos_args
[i
].out
[0])
2586 for (i
= 0; i
< 4; i
++) {
2587 if (!pos_args
[i
].out
[0])
2590 /* Specify the target we are exporting */
2591 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
2592 if (pos_idx
== num_pos_exports
)
2593 pos_args
[i
].done
= 1;
2594 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
2597 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2598 LLVMValueRef values
[4];
2599 if (!(ctx
->output_mask
& (1ull << i
)))
2602 if (i
!= VARYING_SLOT_LAYER
&&
2603 i
!= VARYING_SLOT_PRIMITIVE_ID
&&
2604 i
< VARYING_SLOT_VAR0
)
2607 for (unsigned j
= 0; j
< 4; j
++)
2608 values
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2610 unsigned output_usage_mask
;
2612 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2613 !ctx
->is_gs_copy_shader
) {
2615 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2616 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2618 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2620 assert(ctx
->is_gs_copy_shader
);
2622 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
2625 radv_export_param(ctx
, param_count
, values
, output_usage_mask
);
2627 outinfo
->vs_output_param_offset
[i
] = param_count
++;
2630 if (export_prim_id
) {
2631 LLVMValueRef values
[4];
2633 values
[0] = ctx
->vs_prim_id
;
2634 ctx
->shader_info
->vs
.vgpr_comp_cnt
= MAX2(2,
2635 ctx
->shader_info
->vs
.vgpr_comp_cnt
);
2636 for (unsigned j
= 1; j
< 4; j
++)
2637 values
[j
] = ctx
->ac
.f32_0
;
2639 radv_export_param(ctx
, param_count
, values
, 0x1);
2641 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
2642 outinfo
->export_prim_id
= true;
2645 if (export_layer_id
&& layer_value
) {
2646 LLVMValueRef values
[4];
2648 values
[0] = layer_value
;
2649 for (unsigned j
= 1; j
< 4; j
++)
2650 values
[j
] = ctx
->ac
.f32_0
;
2652 radv_export_param(ctx
, param_count
, values
, 0x1);
2654 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = param_count
++;
2657 outinfo
->pos_exports
= num_pos_exports
;
2658 outinfo
->param_exports
= param_count
;
2662 handle_es_outputs_post(struct radv_shader_context
*ctx
,
2663 struct radv_es_output_info
*outinfo
)
2666 uint64_t max_output_written
= 0;
2667 LLVMValueRef lds_base
= NULL
;
2669 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2670 unsigned output_usage_mask
;
2674 if (!(ctx
->output_mask
& (1ull << i
)))
2677 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
2679 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2681 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
2683 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2686 if (i
== VARYING_SLOT_CLIP_DIST0
)
2687 length
= util_last_bit(output_usage_mask
);
2689 param_index
= shader_io_get_unique_index(i
);
2691 max_output_written
= MAX2(param_index
+ (length
> 4), max_output_written
);
2694 outinfo
->esgs_itemsize
= (max_output_written
+ 1) * 16;
2696 if (ctx
->ac
.chip_class
>= GFX9
) {
2697 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
2698 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
2699 LLVMValueRef wave_idx
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
2700 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
2701 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
2702 LLVMConstInt(ctx
->ac
.i32
, 64, false), ""), "");
2703 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
2704 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
2707 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2708 LLVMValueRef dw_addr
= NULL
;
2709 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2710 unsigned output_usage_mask
;
2714 if (!(ctx
->output_mask
& (1ull << i
)))
2717 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
2719 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2721 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
2723 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2726 if (i
== VARYING_SLOT_CLIP_DIST0
)
2727 length
= util_last_bit(output_usage_mask
);
2729 param_index
= shader_io_get_unique_index(i
);
2732 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2733 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
2737 for (j
= 0; j
< length
; j
++) {
2738 if (!(output_usage_mask
& (1 << j
)))
2741 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2742 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
2743 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
2745 if (ctx
->ac
.chip_class
>= GFX9
) {
2746 LLVMValueRef dw_addr_offset
=
2747 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2748 LLVMConstInt(ctx
->ac
.i32
,
2751 ac_lds_store(&ctx
->ac
, dw_addr_offset
, out_val
);
2753 ac_build_buffer_store_dword(&ctx
->ac
,
2756 NULL
, ctx
->es2gs_offset
,
2757 (4 * param_index
+ j
) * 4,
2765 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
2767 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
2768 uint32_t num_tcs_inputs
= util_last_bit64(ctx
->shader_info
->info
.vs
.ls_outputs_written
);
2769 LLVMValueRef vertex_dw_stride
= LLVMConstInt(ctx
->ac
.i32
, num_tcs_inputs
* 4, false);
2770 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
2771 vertex_dw_stride
, "");
2773 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2774 unsigned output_usage_mask
=
2775 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2776 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2779 if (!(ctx
->output_mask
& (1ull << i
)))
2782 if (i
== VARYING_SLOT_CLIP_DIST0
)
2783 length
= util_last_bit(output_usage_mask
);
2785 int param
= shader_io_get_unique_index(i
);
2786 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
2787 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
2789 for (unsigned j
= 0; j
< length
; j
++) {
2790 LLVMValueRef value
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2791 value
= ac_to_integer(&ctx
->ac
, value
);
2792 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
2793 ac_lds_store(&ctx
->ac
, dw_addr
, value
);
2794 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
2800 write_tess_factors(struct radv_shader_context
*ctx
)
2802 unsigned stride
, outer_comps
, inner_comps
;
2803 struct ac_build_if_state if_ctx
, inner_if_ctx
;
2804 LLVMValueRef invocation_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
2805 LLVMValueRef rel_patch_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
2806 unsigned tess_inner_index
= 0, tess_outer_index
;
2807 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
2808 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
2810 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
2812 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
2832 ac_nir_build_if(&if_ctx
, ctx
,
2833 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
2834 invocation_id
, ctx
->ac
.i32_0
, ""));
2836 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
2839 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
2840 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2841 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
2844 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
2845 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2846 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
2848 for (i
= 0; i
< 4; i
++) {
2849 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
2850 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
2854 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
2855 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
2856 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
2858 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
2860 for (i
= 0; i
< outer_comps
; i
++) {
2862 ac_lds_load(&ctx
->ac
, lds_outer
);
2863 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
2866 for (i
= 0; i
< inner_comps
; i
++) {
2867 inner
[i
] = out
[outer_comps
+i
] =
2868 ac_lds_load(&ctx
->ac
, lds_inner
);
2869 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
2874 /* Convert the outputs to vectors for stores. */
2875 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
2879 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
2882 buffer
= ctx
->hs_ring_tess_factor
;
2883 tf_base
= ctx
->tess_factor_offset
;
2884 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
2885 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
2886 unsigned tf_offset
= 0;
2888 if (ctx
->options
->chip_class
<= VI
) {
2889 ac_nir_build_if(&inner_if_ctx
, ctx
,
2890 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
2891 rel_patch_id
, ctx
->ac
.i32_0
, ""));
2893 /* Store the dynamic HS control word. */
2894 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
2895 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
2896 1, ctx
->ac
.i32_0
, tf_base
,
2897 0, 1, 0, true, false);
2900 ac_nir_build_endif(&inner_if_ctx
);
2903 /* Store the tessellation factors. */
2904 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
2905 MIN2(stride
, 4), byteoffset
, tf_base
,
2906 tf_offset
, 1, 0, true, false);
2908 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
2909 stride
- 4, byteoffset
, tf_base
,
2910 16 + tf_offset
, 1, 0, true, false);
2912 //store to offchip for TES to read - only if TES reads them
2913 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
2914 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
2915 LLVMValueRef tf_inner_offset
;
2916 unsigned param_outer
, param_inner
;
2918 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
2919 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
2920 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
2922 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
2923 util_next_power_of_two(outer_comps
));
2925 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
2926 outer_comps
, tf_outer_offset
,
2927 ctx
->oc_lds
, 0, 1, 0, true, false);
2929 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
2930 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
2931 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
2933 inner_vec
= inner_comps
== 1 ? inner
[0] :
2934 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
2935 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
2936 inner_comps
, tf_inner_offset
,
2937 ctx
->oc_lds
, 0, 1, 0, true, false);
2940 ac_nir_build_endif(&if_ctx
);
2944 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
2946 write_tess_factors(ctx
);
2950 si_export_mrt_color(struct radv_shader_context
*ctx
,
2951 LLVMValueRef
*color
, unsigned index
,
2952 struct ac_export_args
*args
)
2955 si_llvm_init_export_args(ctx
, color
, 0xf,
2956 V_008DFC_SQ_EXP_MRT
+ index
, args
);
2957 if (!args
->enabled_channels
)
2958 return false; /* unnecessary NULL export */
2964 radv_export_mrt_z(struct radv_shader_context
*ctx
,
2965 LLVMValueRef depth
, LLVMValueRef stencil
,
2966 LLVMValueRef samplemask
)
2968 struct ac_export_args args
;
2970 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
2972 ac_build_export(&ctx
->ac
, &args
);
2976 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
2979 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
2980 struct ac_export_args color_args
[8];
2982 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2983 LLVMValueRef values
[4];
2985 if (!(ctx
->output_mask
& (1ull << i
)))
2988 if (i
< FRAG_RESULT_DATA0
)
2991 for (unsigned j
= 0; j
< 4; j
++)
2992 values
[j
] = ac_to_float(&ctx
->ac
,
2993 radv_load_output(ctx
, i
, j
));
2995 bool ret
= si_export_mrt_color(ctx
, values
,
2996 i
- FRAG_RESULT_DATA0
,
2997 &color_args
[index
]);
3002 /* Process depth, stencil, samplemask. */
3003 if (ctx
->shader_info
->info
.ps
.writes_z
) {
3004 depth
= ac_to_float(&ctx
->ac
,
3005 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
3007 if (ctx
->shader_info
->info
.ps
.writes_stencil
) {
3008 stencil
= ac_to_float(&ctx
->ac
,
3009 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
3011 if (ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3012 samplemask
= ac_to_float(&ctx
->ac
,
3013 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
3016 /* Set the DONE bit on last non-null color export only if Z isn't
3020 !ctx
->shader_info
->info
.ps
.writes_z
&&
3021 !ctx
->shader_info
->info
.ps
.writes_stencil
&&
3022 !ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3023 unsigned last
= index
- 1;
3025 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
3026 color_args
[last
].done
= 1; /* DONE bit */
3029 /* Export PS outputs. */
3030 for (unsigned i
= 0; i
< index
; i
++)
3031 ac_build_export(&ctx
->ac
, &color_args
[i
]);
3033 if (depth
|| stencil
|| samplemask
)
3034 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
3036 ac_build_export_null(&ctx
->ac
);
3040 emit_gs_epilogue(struct radv_shader_context
*ctx
)
3042 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
3046 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
3047 LLVMValueRef
*addrs
)
3049 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
3051 switch (ctx
->stage
) {
3052 case MESA_SHADER_VERTEX
:
3053 if (ctx
->options
->key
.vs
.as_ls
)
3054 handle_ls_outputs_post(ctx
);
3055 else if (ctx
->options
->key
.vs
.as_es
)
3056 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
3058 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs
.export_prim_id
,
3059 ctx
->options
->key
.vs
.export_layer_id
,
3060 &ctx
->shader_info
->vs
.outinfo
);
3062 case MESA_SHADER_FRAGMENT
:
3063 handle_fs_outputs_post(ctx
);
3065 case MESA_SHADER_GEOMETRY
:
3066 emit_gs_epilogue(ctx
);
3068 case MESA_SHADER_TESS_CTRL
:
3069 handle_tcs_outputs_post(ctx
);
3071 case MESA_SHADER_TESS_EVAL
:
3072 if (ctx
->options
->key
.tes
.as_es
)
3073 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
3075 handle_vs_outputs_post(ctx
, ctx
->options
->key
.tes
.export_prim_id
,
3076 ctx
->options
->key
.tes
.export_layer_id
,
3077 &ctx
->shader_info
->tes
.outinfo
);
3084 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
,
3085 LLVMPassManagerRef passmgr
,
3086 const struct radv_nir_compiler_options
*options
)
3088 LLVMRunPassManager(passmgr
, ctx
->ac
.module
);
3089 LLVMDisposeBuilder(ctx
->ac
.builder
);
3091 ac_llvm_context_dispose(&ctx
->ac
);
3095 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
3097 struct radv_vs_output_info
*outinfo
;
3099 switch (ctx
->stage
) {
3100 case MESA_SHADER_FRAGMENT
:
3101 case MESA_SHADER_COMPUTE
:
3102 case MESA_SHADER_TESS_CTRL
:
3103 case MESA_SHADER_GEOMETRY
:
3105 case MESA_SHADER_VERTEX
:
3106 if (ctx
->options
->key
.vs
.as_ls
||
3107 ctx
->options
->key
.vs
.as_es
)
3109 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
3111 case MESA_SHADER_TESS_EVAL
:
3112 if (ctx
->options
->key
.vs
.as_es
)
3114 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
3117 unreachable("Unhandled shader type");
3120 ac_optimize_vs_outputs(&ctx
->ac
,
3122 outinfo
->vs_output_param_offset
,
3124 &outinfo
->param_exports
);
3128 ac_setup_rings(struct radv_shader_context
*ctx
)
3130 if (ctx
->options
->chip_class
<= VI
&&
3131 (ctx
->stage
== MESA_SHADER_GEOMETRY
||
3132 ctx
->options
->key
.vs
.as_es
|| ctx
->options
->key
.tes
.as_es
)) {
3133 unsigned ring
= ctx
->stage
== MESA_SHADER_GEOMETRY
? RING_ESGS_GS
3135 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, ring
, false);
3137 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
,
3142 if (ctx
->is_gs_copy_shader
) {
3143 ctx
->gsvs_ring
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_GSVS_VS
, false));
3145 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3147 uint32_t num_entries
= 64;
3148 LLVMValueRef gsvs_ring_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->max_gsvs_emit_size
, false);
3149 LLVMValueRef gsvs_ring_desc
= LLVMConstInt(ctx
->ac
.i32
, ctx
->max_gsvs_emit_size
<< 16, false);
3150 ctx
->gsvs_ring
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_GSVS_GS
, false));
3152 ctx
->gsvs_ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gsvs_ring
, ctx
->ac
.v4i32
, "");
3154 tmp
= LLVMConstInt(ctx
->ac
.i32
, num_entries
, false);
3155 if (ctx
->options
->chip_class
>= VI
)
3156 tmp
= LLVMBuildMul(ctx
->ac
.builder
, gsvs_ring_stride
, tmp
, "");
3157 ctx
->gsvs_ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ctx
->gsvs_ring
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
3158 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ctx
->gsvs_ring
, ctx
->ac
.i32_1
, "");
3159 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
, gsvs_ring_desc
, "");
3160 ctx
->gsvs_ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ctx
->gsvs_ring
, tmp
, ctx
->ac
.i32_1
, "");
3163 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
3164 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3165 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
3166 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
3171 ac_nir_get_max_workgroup_size(enum chip_class chip_class
,
3172 const struct nir_shader
*nir
)
3174 switch (nir
->info
.stage
) {
3175 case MESA_SHADER_TESS_CTRL
:
3176 return chip_class
>= CIK
? 128 : 64;
3177 case MESA_SHADER_GEOMETRY
:
3178 return chip_class
>= GFX9
? 128 : 64;
3179 case MESA_SHADER_COMPUTE
:
3185 unsigned max_workgroup_size
= nir
->info
.cs
.local_size
[0] *
3186 nir
->info
.cs
.local_size
[1] *
3187 nir
->info
.cs
.local_size
[2];
3188 return max_workgroup_size
;
3191 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
3192 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
3194 LLVMValueRef count
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3195 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
3197 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
3198 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
3199 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
3202 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
3204 for(int i
= 5; i
>= 0; --i
) {
3205 ctx
->gs_vtx_offset
[i
] = ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
3209 ctx
->gs_wave_id
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 16, 8);
3214 LLVMModuleRef
ac_translate_nir_to_llvm(struct ac_llvm_compiler
*ac_llvm
,
3215 struct nir_shader
*const *shaders
,
3217 struct radv_shader_variant_info
*shader_info
,
3218 const struct radv_nir_compiler_options
*options
)
3220 struct radv_shader_context ctx
= {0};
3222 ctx
.options
= options
;
3223 ctx
.shader_info
= shader_info
;
3225 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
3226 ctx
.context
= ctx
.ac
.context
;
3227 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
3229 enum ac_float_mode float_mode
=
3230 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
3231 AC_FLOAT_MODE_DEFAULT
;
3233 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
3235 memset(shader_info
, 0, sizeof(*shader_info
));
3237 for(int i
= 0; i
< shader_count
; ++i
)
3238 radv_nir_shader_info_pass(shaders
[i
], options
, &shader_info
->info
);
3240 for (i
= 0; i
< RADV_UD_MAX_SETS
; i
++)
3241 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
3242 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
3243 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
3245 ctx
.max_workgroup_size
= 0;
3246 for (int i
= 0; i
< shader_count
; ++i
) {
3247 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
3248 ac_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
3252 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
3253 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
3255 ctx
.abi
.inputs
= &ctx
.inputs
[0];
3256 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
3257 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
3258 ctx
.abi
.load_ubo
= radv_load_ubo
;
3259 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
3260 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
3261 ctx
.abi
.load_resource
= radv_load_resource
;
3262 ctx
.abi
.clamp_shadow_reference
= false;
3263 ctx
.abi
.gfx9_stride_size_workaround
= ctx
.ac
.chip_class
== GFX9
;
3265 if (shader_count
>= 2)
3266 ac_init_exec_full_mask(&ctx
.ac
);
3268 if (ctx
.ac
.chip_class
== GFX9
&&
3269 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
3270 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
3272 for(int i
= 0; i
< shader_count
; ++i
) {
3273 ctx
.stage
= shaders
[i
]->info
.stage
;
3274 ctx
.output_mask
= 0;
3276 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3277 ctx
.gs_next_vertex
= ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "gs_next_vertex");
3278 ctx
.gs_max_out_vertices
= shaders
[i
]->info
.gs
.vertices_out
;
3279 ctx
.abi
.load_inputs
= load_gs_input
;
3280 ctx
.abi
.emit_primitive
= visit_end_primitive
;
3281 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3282 ctx
.tcs_outputs_read
= shaders
[i
]->info
.outputs_read
;
3283 ctx
.tcs_patch_outputs_read
= shaders
[i
]->info
.patch_outputs_read
;
3284 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
3285 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3286 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
3287 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3288 if (shader_count
== 1)
3289 ctx
.tcs_num_inputs
= ctx
.options
->key
.tcs
.num_inputs
;
3291 ctx
.tcs_num_inputs
= util_last_bit64(shader_info
->info
.vs
.ls_outputs_written
);
3292 ctx
.tcs_num_patches
= get_tcs_num_patches(&ctx
);
3293 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3294 ctx
.tes_primitive_mode
= shaders
[i
]->info
.tess
.primitive_mode
;
3295 ctx
.abi
.load_tess_varyings
= load_tes_input
;
3296 ctx
.abi
.load_tess_coord
= load_tess_coord
;
3297 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3298 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3299 ctx
.tcs_num_patches
= ctx
.options
->key
.tes
.num_patches
;
3300 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
3301 if (shader_info
->info
.vs
.needs_instance_id
) {
3302 if (ctx
.options
->key
.vs
.as_ls
) {
3303 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
3304 MAX2(2, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
3306 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
3307 MAX2(1, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
3310 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
3311 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
3312 shader_info
->fs
.can_discard
= shaders
[i
]->info
.fs
.uses_discard
;
3313 ctx
.abi
.lookup_interp_param
= lookup_interp_param
;
3314 ctx
.abi
.load_sample_position
= load_sample_position
;
3315 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
3316 ctx
.abi
.emit_kill
= radv_emit_kill
;
3320 ac_emit_barrier(&ctx
.ac
, ctx
.stage
);
3322 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
3323 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
3325 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3326 unsigned addclip
= shaders
[i
]->info
.clip_distance_array_size
+
3327 shaders
[i
]->info
.cull_distance_array_size
> 4;
3328 ctx
.gsvs_vertex_size
= (util_bitcount64(ctx
.output_mask
) + addclip
) * 16;
3329 ctx
.max_gsvs_emit_size
= ctx
.gsvs_vertex_size
*
3330 shaders
[i
]->info
.gs
.vertices_out
;
3333 ac_setup_rings(&ctx
);
3335 LLVMBasicBlockRef merge_block
;
3336 if (shader_count
>= 2) {
3337 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
3338 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3339 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3341 LLVMValueRef count
= ac_unpack_param(&ctx
.ac
, ctx
.merged_wave_info
, 8 * i
, 8);
3342 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
3343 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
3344 thread_id
, count
, "");
3345 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
3347 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
3350 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
3351 handle_fs_inputs(&ctx
, shaders
[i
]);
3352 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
3353 handle_vs_inputs(&ctx
, shaders
[i
]);
3354 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
3355 prepare_gs_input_vgprs(&ctx
);
3357 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
3359 if (shader_count
>= 2) {
3360 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
3361 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
3364 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3365 shader_info
->gs
.gsvs_vertex_size
= ctx
.gsvs_vertex_size
;
3366 shader_info
->gs
.max_gsvs_emit_size
= ctx
.max_gsvs_emit_size
;
3367 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3368 shader_info
->tcs
.num_patches
= ctx
.tcs_num_patches
;
3369 shader_info
->tcs
.lds_size
= calculate_tess_lds_size(&ctx
);
3373 LLVMBuildRetVoid(ctx
.ac
.builder
);
3375 if (options
->dump_preoptir
)
3376 ac_dump_module(ctx
.ac
.module
);
3378 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
3380 if (shader_count
== 1)
3381 ac_nir_eliminate_const_vs_outputs(&ctx
);
3383 if (options
->dump_shader
) {
3384 ctx
.shader_info
->private_mem_vgprs
=
3385 ac_count_scratch_private_memory(ctx
.main_function
);
3388 return ctx
.ac
.module
;
3391 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
3393 unsigned *retval
= (unsigned *)context
;
3394 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
3395 char *description
= LLVMGetDiagInfoDescription(di
);
3397 if (severity
== LLVMDSError
) {
3399 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
3403 LLVMDisposeMessage(description
);
3406 static unsigned ac_llvm_compile(LLVMModuleRef M
,
3407 struct ac_shader_binary
*binary
,
3408 struct ac_llvm_compiler
*ac_llvm
)
3410 unsigned retval
= 0;
3411 LLVMContextRef llvm_ctx
;
3413 /* Setup Diagnostic Handler*/
3414 llvm_ctx
= LLVMGetModuleContext(M
);
3416 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
3420 if (!radv_compile_to_binary(ac_llvm
, M
, binary
))
3425 static void ac_compile_llvm_module(struct ac_llvm_compiler
*ac_llvm
,
3426 LLVMModuleRef llvm_module
,
3427 struct ac_shader_binary
*binary
,
3428 struct ac_shader_config
*config
,
3429 struct radv_shader_variant_info
*shader_info
,
3430 gl_shader_stage stage
,
3431 const struct radv_nir_compiler_options
*options
)
3433 if (options
->dump_shader
)
3434 ac_dump_module(llvm_module
);
3436 memset(binary
, 0, sizeof(*binary
));
3438 if (options
->record_llvm_ir
) {
3439 char *llvm_ir
= LLVMPrintModuleToString(llvm_module
);
3440 binary
->llvm_ir_string
= strdup(llvm_ir
);
3441 LLVMDisposeMessage(llvm_ir
);
3444 int v
= ac_llvm_compile(llvm_module
, binary
, ac_llvm
);
3446 fprintf(stderr
, "compile failed\n");
3449 if (options
->dump_shader
)
3450 fprintf(stderr
, "disasm:\n%s\n", binary
->disasm_string
);
3452 ac_shader_binary_read_config(binary
, config
, 0, options
->supports_spill
);
3454 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
3455 LLVMDisposeModule(llvm_module
);
3456 LLVMContextDispose(ctx
);
3458 if (stage
== MESA_SHADER_FRAGMENT
) {
3459 shader_info
->num_input_vgprs
= 0;
3460 if (G_0286CC_PERSP_SAMPLE_ENA(config
->spi_ps_input_addr
))
3461 shader_info
->num_input_vgprs
+= 2;
3462 if (G_0286CC_PERSP_CENTER_ENA(config
->spi_ps_input_addr
))
3463 shader_info
->num_input_vgprs
+= 2;
3464 if (G_0286CC_PERSP_CENTROID_ENA(config
->spi_ps_input_addr
))
3465 shader_info
->num_input_vgprs
+= 2;
3466 if (G_0286CC_PERSP_PULL_MODEL_ENA(config
->spi_ps_input_addr
))
3467 shader_info
->num_input_vgprs
+= 3;
3468 if (G_0286CC_LINEAR_SAMPLE_ENA(config
->spi_ps_input_addr
))
3469 shader_info
->num_input_vgprs
+= 2;
3470 if (G_0286CC_LINEAR_CENTER_ENA(config
->spi_ps_input_addr
))
3471 shader_info
->num_input_vgprs
+= 2;
3472 if (G_0286CC_LINEAR_CENTROID_ENA(config
->spi_ps_input_addr
))
3473 shader_info
->num_input_vgprs
+= 2;
3474 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config
->spi_ps_input_addr
))
3475 shader_info
->num_input_vgprs
+= 1;
3476 if (G_0286CC_POS_X_FLOAT_ENA(config
->spi_ps_input_addr
))
3477 shader_info
->num_input_vgprs
+= 1;
3478 if (G_0286CC_POS_Y_FLOAT_ENA(config
->spi_ps_input_addr
))
3479 shader_info
->num_input_vgprs
+= 1;
3480 if (G_0286CC_POS_Z_FLOAT_ENA(config
->spi_ps_input_addr
))
3481 shader_info
->num_input_vgprs
+= 1;
3482 if (G_0286CC_POS_W_FLOAT_ENA(config
->spi_ps_input_addr
))
3483 shader_info
->num_input_vgprs
+= 1;
3484 if (G_0286CC_FRONT_FACE_ENA(config
->spi_ps_input_addr
))
3485 shader_info
->num_input_vgprs
+= 1;
3486 if (G_0286CC_ANCILLARY_ENA(config
->spi_ps_input_addr
))
3487 shader_info
->num_input_vgprs
+= 1;
3488 if (G_0286CC_SAMPLE_COVERAGE_ENA(config
->spi_ps_input_addr
))
3489 shader_info
->num_input_vgprs
+= 1;
3490 if (G_0286CC_POS_FIXED_PT_ENA(config
->spi_ps_input_addr
))
3491 shader_info
->num_input_vgprs
+= 1;
3493 config
->num_vgprs
= MAX2(config
->num_vgprs
, shader_info
->num_input_vgprs
);
3495 /* +3 for scratch wave offset and VCC */
3496 config
->num_sgprs
= MAX2(config
->num_sgprs
,
3497 shader_info
->num_input_sgprs
+ 3);
3499 /* Enable 64-bit and 16-bit denormals, because there is no performance
3502 * If denormals are enabled, all floating-point output modifiers are
3505 * Don't enable denormals for 32-bit floats, because:
3506 * - Floating-point output modifiers would be ignored by the hw.
3507 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3508 * have to stop using those.
3509 * - SI & CI would be very slow.
3511 config
->float_mode
|= V_00B028_FP_64_DENORMS
;
3515 ac_fill_shader_info(struct radv_shader_variant_info
*shader_info
, struct nir_shader
*nir
, const struct radv_nir_compiler_options
*options
)
3517 switch (nir
->info
.stage
) {
3518 case MESA_SHADER_COMPUTE
:
3519 for (int i
= 0; i
< 3; ++i
)
3520 shader_info
->cs
.block_size
[i
] = nir
->info
.cs
.local_size
[i
];
3522 case MESA_SHADER_FRAGMENT
:
3523 shader_info
->fs
.early_fragment_test
= nir
->info
.fs
.early_fragment_tests
;
3525 case MESA_SHADER_GEOMETRY
:
3526 shader_info
->gs
.vertices_in
= nir
->info
.gs
.vertices_in
;
3527 shader_info
->gs
.vertices_out
= nir
->info
.gs
.vertices_out
;
3528 shader_info
->gs
.output_prim
= nir
->info
.gs
.output_primitive
;
3529 shader_info
->gs
.invocations
= nir
->info
.gs
.invocations
;
3531 case MESA_SHADER_TESS_EVAL
:
3532 shader_info
->tes
.primitive_mode
= nir
->info
.tess
.primitive_mode
;
3533 shader_info
->tes
.spacing
= nir
->info
.tess
.spacing
;
3534 shader_info
->tes
.ccw
= nir
->info
.tess
.ccw
;
3535 shader_info
->tes
.point_mode
= nir
->info
.tess
.point_mode
;
3536 shader_info
->tes
.as_es
= options
->key
.tes
.as_es
;
3538 case MESA_SHADER_TESS_CTRL
:
3539 shader_info
->tcs
.tcs_vertices_out
= nir
->info
.tess
.tcs_vertices_out
;
3541 case MESA_SHADER_VERTEX
:
3542 shader_info
->vs
.as_es
= options
->key
.vs
.as_es
;
3543 shader_info
->vs
.as_ls
= options
->key
.vs
.as_ls
;
3544 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
3545 if (options
->key
.vs
.as_ls
)
3546 shader_info
->vs
.vgpr_comp_cnt
= MAX2(1, shader_info
->vs
.vgpr_comp_cnt
);
3554 radv_compile_nir_shader(struct ac_llvm_compiler
*ac_llvm
,
3555 struct ac_shader_binary
*binary
,
3556 struct ac_shader_config
*config
,
3557 struct radv_shader_variant_info
*shader_info
,
3558 struct nir_shader
*const *nir
,
3560 const struct radv_nir_compiler_options
*options
)
3563 LLVMModuleRef llvm_module
;
3565 llvm_module
= ac_translate_nir_to_llvm(ac_llvm
, nir
, nir_count
, shader_info
,
3568 ac_compile_llvm_module(ac_llvm
, llvm_module
, binary
, config
, shader_info
,
3569 nir
[0]->info
.stage
, options
);
3571 for (int i
= 0; i
< nir_count
; ++i
)
3572 ac_fill_shader_info(shader_info
, nir
[i
], options
);
3574 /* Determine the ES type (VS or TES) for the GS on GFX9. */
3575 if (options
->chip_class
== GFX9
) {
3576 if (nir_count
== 2 &&
3577 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3578 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
3584 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
3586 LLVMValueRef vtx_offset
=
3587 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
3588 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
3589 unsigned offset
= 0;
3591 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3592 unsigned output_usage_mask
=
3593 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
3596 if (!(ctx
->output_mask
& (1ull << i
)))
3599 if (i
== VARYING_SLOT_CLIP_DIST0
) {
3600 /* unpack clip and cull from a single set of slots */
3601 length
= util_last_bit(output_usage_mask
);
3604 for (unsigned j
= 0; j
< length
; j
++) {
3605 LLVMValueRef value
, soffset
;
3607 if (!(output_usage_mask
& (1 << j
)))
3610 soffset
= LLVMConstInt(ctx
->ac
.i32
,
3612 ctx
->gs_max_out_vertices
* 16 * 4, false);
3616 value
= ac_build_buffer_load(&ctx
->ac
, ctx
->gsvs_ring
,
3618 vtx_offset
, soffset
,
3619 0, 1, 1, true, false);
3621 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
3622 if (ac_get_type_size(type
) == 2) {
3623 value
= LLVMBuildBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
3624 value
= LLVMBuildTrunc(ctx
->ac
.builder
, value
, ctx
->ac
.i16
, "");
3627 LLVMBuildStore(ctx
->ac
.builder
,
3628 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
3631 handle_vs_outputs_post(ctx
, false, false, &ctx
->shader_info
->vs
.outinfo
);
3635 radv_compile_gs_copy_shader(struct ac_llvm_compiler
*ac_llvm
,
3636 struct nir_shader
*geom_shader
,
3637 struct ac_shader_binary
*binary
,
3638 struct ac_shader_config
*config
,
3639 struct radv_shader_variant_info
*shader_info
,
3640 const struct radv_nir_compiler_options
*options
)
3642 struct radv_shader_context ctx
= {0};
3643 ctx
.options
= options
;
3644 ctx
.shader_info
= shader_info
;
3646 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
3647 ctx
.context
= ctx
.ac
.context
;
3648 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
3650 ctx
.is_gs_copy_shader
= true;
3652 enum ac_float_mode float_mode
=
3653 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
3654 AC_FLOAT_MODE_DEFAULT
;
3656 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
3657 ctx
.stage
= MESA_SHADER_VERTEX
;
3659 radv_nir_shader_info_pass(geom_shader
, options
, &shader_info
->info
);
3661 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
3663 ctx
.gs_max_out_vertices
= geom_shader
->info
.gs
.vertices_out
;
3664 ac_setup_rings(&ctx
);
3666 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
3667 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
3668 ac_handle_shader_output_decl(&ctx
.ac
, &ctx
.abi
, geom_shader
,
3669 variable
, MESA_SHADER_VERTEX
);
3672 ac_gs_copy_shader_emit(&ctx
);
3674 LLVMBuildRetVoid(ctx
.ac
.builder
);
3676 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
3678 ac_compile_llvm_module(ac_llvm
, ctx
.ac
.module
, binary
, config
, shader_info
,
3679 MESA_SHADER_VERTEX
, options
);