2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #include <llvm-c/Transforms/Utils.h>
40 #include "ac_binary.h"
41 #include "ac_llvm_util.h"
42 #include "ac_llvm_build.h"
43 #include "ac_shader_abi.h"
44 #include "ac_shader_util.h"
45 #include "ac_exp_param.h"
47 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
49 struct radv_shader_context
{
50 struct ac_llvm_context ac
;
51 const struct radv_nir_compiler_options
*options
;
52 struct radv_shader_variant_info
*shader_info
;
53 struct ac_shader_abi abi
;
55 unsigned max_workgroup_size
;
56 LLVMContextRef context
;
57 LLVMValueRef main_function
;
59 LLVMValueRef descriptor_sets
[RADV_UD_MAX_SETS
];
60 LLVMValueRef ring_offsets
;
62 LLVMValueRef vertex_buffers
;
63 LLVMValueRef rel_auto_id
;
64 LLVMValueRef vs_prim_id
;
65 LLVMValueRef es2gs_offset
;
68 LLVMValueRef merged_wave_info
;
69 LLVMValueRef tess_factor_offset
;
70 LLVMValueRef tes_rel_patch_id
;
74 LLVMValueRef gs2vs_offset
;
75 LLVMValueRef gs_wave_id
;
76 LLVMValueRef gs_vtx_offset
[6];
78 LLVMValueRef esgs_ring
;
79 LLVMValueRef gsvs_ring
[4];
80 LLVMValueRef hs_ring_tess_offchip
;
81 LLVMValueRef hs_ring_tess_factor
;
83 LLVMValueRef persp_sample
, persp_center
, persp_centroid
;
84 LLVMValueRef linear_sample
, linear_center
, linear_centroid
;
87 LLVMValueRef streamout_buffers
;
88 LLVMValueRef streamout_write_idx
;
89 LLVMValueRef streamout_config
;
90 LLVMValueRef streamout_offset
[4];
92 gl_shader_stage stage
;
94 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
95 uint64_t float16_shaded_mask
;
100 bool is_gs_copy_shader
;
101 LLVMValueRef gs_next_vertex
[4];
102 unsigned gs_max_out_vertices
;
104 unsigned tes_primitive_mode
;
106 uint32_t tcs_patch_outputs_read
;
107 uint64_t tcs_outputs_read
;
108 uint32_t tcs_vertices_per_patch
;
109 uint32_t tcs_num_inputs
;
110 uint32_t tcs_num_patches
;
111 uint32_t max_gsvs_emit_size
;
112 uint32_t gsvs_vertex_size
;
115 enum radeon_llvm_calling_convention
{
116 RADEON_LLVM_AMDGPU_VS
= 87,
117 RADEON_LLVM_AMDGPU_GS
= 88,
118 RADEON_LLVM_AMDGPU_PS
= 89,
119 RADEON_LLVM_AMDGPU_CS
= 90,
120 RADEON_LLVM_AMDGPU_HS
= 93,
123 static inline struct radv_shader_context
*
124 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
126 struct radv_shader_context
*ctx
= NULL
;
127 return container_of(abi
, ctx
, abi
);
130 struct ac_build_if_state
132 struct radv_shader_context
*ctx
;
133 LLVMValueRef condition
;
134 LLVMBasicBlockRef entry_block
;
135 LLVMBasicBlockRef true_block
;
136 LLVMBasicBlockRef false_block
;
137 LLVMBasicBlockRef merge_block
;
140 static LLVMBasicBlockRef
141 ac_build_insert_new_block(struct radv_shader_context
*ctx
, const char *name
)
143 LLVMBasicBlockRef current_block
;
144 LLVMBasicBlockRef next_block
;
145 LLVMBasicBlockRef new_block
;
147 /* get current basic block */
148 current_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
150 /* chqeck if there's another block after this one */
151 next_block
= LLVMGetNextBasicBlock(current_block
);
153 /* insert the new block before the next block */
154 new_block
= LLVMInsertBasicBlockInContext(ctx
->context
, next_block
, name
);
157 /* append new block after current block */
158 LLVMValueRef function
= LLVMGetBasicBlockParent(current_block
);
159 new_block
= LLVMAppendBasicBlockInContext(ctx
->context
, function
, name
);
165 ac_nir_build_if(struct ac_build_if_state
*ifthen
,
166 struct radv_shader_context
*ctx
,
167 LLVMValueRef condition
)
169 LLVMBasicBlockRef block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
171 memset(ifthen
, 0, sizeof *ifthen
);
173 ifthen
->condition
= condition
;
174 ifthen
->entry_block
= block
;
176 /* create endif/merge basic block for the phi functions */
177 ifthen
->merge_block
= ac_build_insert_new_block(ctx
, "endif-block");
179 /* create/insert true_block before merge_block */
181 LLVMInsertBasicBlockInContext(ctx
->context
,
185 /* successive code goes into the true block */
186 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, ifthen
->true_block
);
193 ac_nir_build_endif(struct ac_build_if_state
*ifthen
)
195 LLVMBuilderRef builder
= ifthen
->ctx
->ac
.builder
;
197 /* Insert branch to the merge block from current block */
198 LLVMBuildBr(builder
, ifthen
->merge_block
);
201 * Now patch in the various branch instructions.
204 /* Insert the conditional branch instruction at the end of entry_block */
205 LLVMPositionBuilderAtEnd(builder
, ifthen
->entry_block
);
206 if (ifthen
->false_block
) {
207 /* we have an else clause */
208 LLVMBuildCondBr(builder
, ifthen
->condition
,
209 ifthen
->true_block
, ifthen
->false_block
);
213 LLVMBuildCondBr(builder
, ifthen
->condition
,
214 ifthen
->true_block
, ifthen
->merge_block
);
217 /* Resume building code at end of the ifthen->merge_block */
218 LLVMPositionBuilderAtEnd(builder
, ifthen
->merge_block
);
222 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
224 switch (ctx
->stage
) {
225 case MESA_SHADER_TESS_CTRL
:
226 return ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
227 case MESA_SHADER_TESS_EVAL
:
228 return ctx
->tes_rel_patch_id
;
231 unreachable("Illegal stage");
236 get_tcs_num_patches(struct radv_shader_context
*ctx
)
238 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
239 unsigned num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
240 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
241 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
242 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
243 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
244 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
245 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
246 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
247 unsigned num_patches
;
248 unsigned hardware_lds_size
;
250 /* Ensure that we only need one wave per SIMD so we don't need to check
251 * resource usage. Also ensures that the number of tcs in and out
252 * vertices per threadgroup are at most 256.
254 num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
255 /* Make sure that the data fits in LDS. This assumes the shaders only
256 * use LDS for the inputs and outputs.
258 hardware_lds_size
= 32768;
260 /* Looks like STONEY hangs if we use more than 32 KiB LDS in a single
261 * threadgroup, even though there is more than 32 KiB LDS.
263 * Test: dEQP-VK.tessellation.shader_input_output.barrier
265 if (ctx
->options
->chip_class
>= GFX7
&& ctx
->options
->family
!= CHIP_STONEY
)
266 hardware_lds_size
= 65536;
268 num_patches
= MIN2(num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
269 /* Make sure the output data fits in the offchip buffer */
270 num_patches
= MIN2(num_patches
, (ctx
->options
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
271 /* Not necessary for correctness, but improves performance. The
272 * specific value is taken from the proprietary driver.
274 num_patches
= MIN2(num_patches
, 40);
276 /* GFX6 bug workaround - limit LS-HS threadgroups to only one wave. */
277 if (ctx
->options
->chip_class
== GFX6
) {
278 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
279 num_patches
= MIN2(num_patches
, one_wave
);
285 calculate_tess_lds_size(struct radv_shader_context
*ctx
)
287 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
288 unsigned num_tcs_output_cp
;
289 unsigned num_tcs_outputs
, num_tcs_patch_outputs
;
290 unsigned input_vertex_size
, output_vertex_size
;
291 unsigned input_patch_size
, output_patch_size
;
292 unsigned pervertex_output_patch_size
;
293 unsigned output_patch0_offset
;
294 unsigned num_patches
;
297 num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
298 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
299 num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
301 input_vertex_size
= ctx
->tcs_num_inputs
* 16;
302 output_vertex_size
= num_tcs_outputs
* 16;
304 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
306 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
307 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
309 num_patches
= ctx
->tcs_num_patches
;
310 output_patch0_offset
= input_patch_size
* num_patches
;
312 lds_size
= output_patch0_offset
+ output_patch_size
* num_patches
;
316 /* Tessellation shaders pass outputs to the next shader using LDS.
318 * LS outputs = TCS inputs
319 * TCS outputs = TES inputs
322 * - TCS inputs for patch 0
323 * - TCS inputs for patch 1
324 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
326 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
327 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
328 * - TCS outputs for patch 1
329 * - Per-patch TCS outputs for patch 1
330 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
331 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
334 * All three shaders VS(LS), TCS, TES share the same LDS space.
337 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
339 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
340 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
341 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
343 input_patch_size
/= 4;
344 return LLVMConstInt(ctx
->ac
.i32
, input_patch_size
, false);
348 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
350 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
351 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
352 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
353 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
354 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
355 output_patch_size
/= 4;
356 return LLVMConstInt(ctx
->ac
.i32
, output_patch_size
, false);
360 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
362 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
363 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
364 output_vertex_size
/= 4;
365 return LLVMConstInt(ctx
->ac
.i32
, output_vertex_size
, false);
369 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
371 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
372 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
373 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
374 uint32_t output_patch0_offset
= input_patch_size
;
375 unsigned num_patches
= ctx
->tcs_num_patches
;
377 output_patch0_offset
*= num_patches
;
378 output_patch0_offset
/= 4;
379 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
383 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
385 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
386 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
387 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
388 uint32_t output_patch0_offset
= input_patch_size
;
390 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
391 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
392 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
393 unsigned num_patches
= ctx
->tcs_num_patches
;
395 output_patch0_offset
*= num_patches
;
396 output_patch0_offset
+= pervertex_output_patch_size
;
397 output_patch0_offset
/= 4;
398 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
402 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
404 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
405 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
407 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
411 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
413 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
414 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
415 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
417 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
422 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
424 LLVMValueRef patch0_patch_data_offset
=
425 get_tcs_out_patch0_patch_data_offset(ctx
);
426 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
427 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
429 return ac_build_imad(&ctx
->ac
, patch_stride
, rel_patch_id
,
430 patch0_patch_data_offset
);
435 LLVMTypeRef types
[MAX_ARGS
];
436 LLVMValueRef
*assign
[MAX_ARGS
];
439 uint8_t num_sgprs_used
;
440 uint8_t num_vgprs_used
;
443 enum ac_arg_regfile
{
449 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
450 LLVMValueRef
*param_ptr
)
452 assert(info
->count
< MAX_ARGS
);
454 info
->assign
[info
->count
] = param_ptr
;
455 info
->types
[info
->count
] = type
;
458 if (regfile
== ARG_SGPR
) {
459 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
462 assert(regfile
== ARG_VGPR
);
463 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
467 static void assign_arguments(LLVMValueRef main_function
,
468 struct arg_info
*info
)
471 for (i
= 0; i
< info
->count
; i
++) {
473 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
478 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
479 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
480 unsigned num_return_elems
,
481 struct arg_info
*args
,
482 unsigned max_workgroup_size
,
483 const struct radv_nir_compiler_options
*options
)
485 LLVMTypeRef main_function_type
, ret_type
;
486 LLVMBasicBlockRef main_function_body
;
488 if (num_return_elems
)
489 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
490 num_return_elems
, true);
492 ret_type
= LLVMVoidTypeInContext(ctx
);
494 /* Setup the function */
496 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
497 LLVMValueRef main_function
=
498 LLVMAddFunction(module
, "main", main_function_type
);
500 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
501 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
503 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
504 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
505 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
507 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
509 if (LLVMGetTypeKind(LLVMTypeOf(P
)) == LLVMPointerTypeKind
) {
510 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
511 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
515 if (options
->address32_hi
) {
516 ac_llvm_add_target_dep_function_attr(main_function
,
517 "amdgpu-32bit-address-high-bits",
518 options
->address32_hi
);
521 ac_llvm_set_workgroup_size(main_function
, max_workgroup_size
);
523 if (options
->unsafe_math
) {
524 /* These were copied from some LLVM test. */
525 LLVMAddTargetDependentFunctionAttr(main_function
,
526 "less-precise-fpmad",
528 LLVMAddTargetDependentFunctionAttr(main_function
,
531 LLVMAddTargetDependentFunctionAttr(main_function
,
534 LLVMAddTargetDependentFunctionAttr(main_function
,
537 LLVMAddTargetDependentFunctionAttr(main_function
,
538 "no-signed-zeros-fp-math",
541 return main_function
;
546 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
,
549 ud_info
->sgpr_idx
= *sgpr_idx
;
550 ud_info
->num_sgprs
= num_sgprs
;
551 *sgpr_idx
+= num_sgprs
;
555 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
558 struct radv_userdata_info
*ud_info
=
559 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
562 set_loc(ud_info
, sgpr_idx
, num_sgprs
);
566 set_loc_shader_ptr(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
568 bool use_32bit_pointers
= idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
570 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
574 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
576 struct radv_userdata_locations
*locs
=
577 &ctx
->shader_info
->user_sgprs_locs
;
578 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
581 set_loc(ud_info
, sgpr_idx
, 1);
583 locs
->descriptor_sets_enabled
|= 1 << idx
;
586 struct user_sgpr_info
{
587 bool need_ring_offsets
;
588 bool indirect_all_descriptor_sets
;
589 uint8_t remaining_sgprs
;
592 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
593 gl_shader_stage stage
)
596 case MESA_SHADER_VERTEX
:
597 if (ctx
->shader_info
->info
.needs_multiview_view_index
||
598 (!ctx
->options
->key
.vs
.as_es
&& !ctx
->options
->key
.vs
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
601 case MESA_SHADER_TESS_EVAL
:
602 if (ctx
->shader_info
->info
.needs_multiview_view_index
|| (!ctx
->options
->key
.tes
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
605 case MESA_SHADER_GEOMETRY
:
606 case MESA_SHADER_TESS_CTRL
:
607 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
617 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
621 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
)
623 count
+= ctx
->shader_info
->info
.vs
.needs_draw_id
? 3 : 2;
628 static void allocate_inline_push_consts(struct radv_shader_context
*ctx
,
629 struct user_sgpr_info
*user_sgpr_info
)
631 uint8_t remaining_sgprs
= user_sgpr_info
->remaining_sgprs
;
633 /* Only supported if shaders use push constants. */
634 if (ctx
->shader_info
->info
.min_push_constant_used
== UINT8_MAX
)
637 /* Only supported if shaders don't have indirect push constants. */
638 if (ctx
->shader_info
->info
.has_indirect_push_constants
)
641 /* Only supported for 32-bit push constants. */
642 if (!ctx
->shader_info
->info
.has_only_32bit_push_constants
)
645 uint8_t num_push_consts
=
646 (ctx
->shader_info
->info
.max_push_constant_used
-
647 ctx
->shader_info
->info
.min_push_constant_used
) / 4;
649 /* Check if the number of user SGPRs is large enough. */
650 if (num_push_consts
< remaining_sgprs
) {
651 ctx
->shader_info
->info
.num_inline_push_consts
= num_push_consts
;
653 ctx
->shader_info
->info
.num_inline_push_consts
= remaining_sgprs
;
656 /* Clamp to the maximum number of allowed inlined push constants. */
657 if (ctx
->shader_info
->info
.num_inline_push_consts
> AC_MAX_INLINE_PUSH_CONSTS
)
658 ctx
->shader_info
->info
.num_inline_push_consts
= AC_MAX_INLINE_PUSH_CONSTS
;
660 if (ctx
->shader_info
->info
.num_inline_push_consts
== num_push_consts
&&
661 !ctx
->shader_info
->info
.loads_dynamic_offsets
) {
662 /* Disable the default push constants path if all constants are
663 * inlined and if shaders don't use dynamic descriptors.
665 ctx
->shader_info
->info
.loads_push_constants
= false;
668 ctx
->shader_info
->info
.base_inline_push_consts
=
669 ctx
->shader_info
->info
.min_push_constant_used
/ 4;
672 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
673 gl_shader_stage stage
,
674 bool has_previous_stage
,
675 gl_shader_stage previous_stage
,
676 bool needs_view_index
,
677 struct user_sgpr_info
*user_sgpr_info
)
679 uint8_t user_sgpr_count
= 0;
681 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
683 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
684 if (stage
== MESA_SHADER_GEOMETRY
||
685 stage
== MESA_SHADER_VERTEX
||
686 stage
== MESA_SHADER_TESS_CTRL
||
687 stage
== MESA_SHADER_TESS_EVAL
||
688 ctx
->is_gs_copy_shader
)
689 user_sgpr_info
->need_ring_offsets
= true;
691 if (stage
== MESA_SHADER_FRAGMENT
&&
692 ctx
->shader_info
->info
.ps
.needs_sample_positions
)
693 user_sgpr_info
->need_ring_offsets
= true;
695 /* 2 user sgprs will nearly always be allocated for scratch/rings */
696 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
697 user_sgpr_count
+= 2;
701 case MESA_SHADER_COMPUTE
:
702 if (ctx
->shader_info
->info
.cs
.uses_grid_size
)
703 user_sgpr_count
+= 3;
705 case MESA_SHADER_FRAGMENT
:
706 user_sgpr_count
+= ctx
->shader_info
->info
.ps
.needs_sample_positions
;
708 case MESA_SHADER_VERTEX
:
709 if (!ctx
->is_gs_copy_shader
)
710 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
712 case MESA_SHADER_TESS_CTRL
:
713 if (has_previous_stage
) {
714 if (previous_stage
== MESA_SHADER_VERTEX
)
715 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
718 case MESA_SHADER_TESS_EVAL
:
720 case MESA_SHADER_GEOMETRY
:
721 if (has_previous_stage
) {
722 if (previous_stage
== MESA_SHADER_VERTEX
) {
723 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
731 if (needs_view_index
)
734 if (ctx
->shader_info
->info
.loads_push_constants
)
737 if (ctx
->streamout_buffers
)
740 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
&& stage
!= MESA_SHADER_COMPUTE
? 32 : 16;
741 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
742 uint32_t num_desc_set
=
743 util_bitcount(ctx
->shader_info
->info
.desc_set_used_mask
);
745 if (remaining_sgprs
< num_desc_set
) {
746 user_sgpr_info
->indirect_all_descriptor_sets
= true;
747 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- 1;
749 user_sgpr_info
->remaining_sgprs
= remaining_sgprs
- num_desc_set
;
752 allocate_inline_push_consts(ctx
, user_sgpr_info
);
756 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
757 const struct user_sgpr_info
*user_sgpr_info
,
758 struct arg_info
*args
,
759 LLVMValueRef
*desc_sets
)
761 LLVMTypeRef type
= ac_array_in_const32_addr_space(ctx
->ac
.i8
);
763 /* 1 for each descriptor set */
764 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
765 uint32_t mask
= ctx
->shader_info
->info
.desc_set_used_mask
;
768 int i
= u_bit_scan(&mask
);
770 add_arg(args
, ARG_SGPR
, type
, &ctx
->descriptor_sets
[i
]);
773 add_arg(args
, ARG_SGPR
, ac_array_in_const32_addr_space(type
),
777 if (ctx
->shader_info
->info
.loads_push_constants
) {
778 /* 1 for push constants and dynamic descriptors */
779 add_arg(args
, ARG_SGPR
, type
, &ctx
->abi
.push_constants
);
782 for (unsigned i
= 0; i
< ctx
->shader_info
->info
.num_inline_push_consts
; i
++) {
783 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
,
784 &ctx
->abi
.inline_push_consts
[i
]);
786 ctx
->abi
.num_inline_push_consts
= ctx
->shader_info
->info
.num_inline_push_consts
;
787 ctx
->abi
.base_inline_push_consts
= ctx
->shader_info
->info
.base_inline_push_consts
;
789 if (ctx
->shader_info
->info
.so
.num_outputs
) {
790 add_arg(args
, ARG_SGPR
,
791 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
792 &ctx
->streamout_buffers
);
797 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
798 gl_shader_stage stage
,
799 bool has_previous_stage
,
800 gl_shader_stage previous_stage
,
801 struct arg_info
*args
)
803 if (!ctx
->is_gs_copy_shader
&&
804 (stage
== MESA_SHADER_VERTEX
||
805 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
806 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
807 add_arg(args
, ARG_SGPR
,
808 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
809 &ctx
->vertex_buffers
);
811 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
812 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
813 if (ctx
->shader_info
->info
.vs
.needs_draw_id
) {
814 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
820 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
822 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
823 if (!ctx
->is_gs_copy_shader
) {
824 if (ctx
->options
->key
.vs
.as_ls
) {
825 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
826 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
828 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
829 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
831 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
836 declare_streamout_sgprs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
837 struct arg_info
*args
)
841 /* Streamout SGPRs. */
842 if (ctx
->shader_info
->info
.so
.num_outputs
) {
843 assert(stage
== MESA_SHADER_VERTEX
||
844 stage
== MESA_SHADER_TESS_EVAL
);
846 if (stage
!= MESA_SHADER_TESS_EVAL
) {
847 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_config
);
849 args
->assign
[args
->count
- 1] = &ctx
->streamout_config
;
850 args
->types
[args
->count
- 1] = ctx
->ac
.i32
;
853 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_write_idx
);
856 /* A streamout buffer offset is loaded if the stride is non-zero. */
857 for (i
= 0; i
< 4; i
++) {
858 if (!ctx
->shader_info
->info
.so
.strides
[i
])
861 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->streamout_offset
[i
]);
866 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
868 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
869 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
870 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
871 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
875 set_global_input_locs(struct radv_shader_context
*ctx
,
876 const struct user_sgpr_info
*user_sgpr_info
,
877 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
879 uint32_t mask
= ctx
->shader_info
->info
.desc_set_used_mask
;
881 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
883 int i
= u_bit_scan(&mask
);
885 set_loc_desc(ctx
, i
, user_sgpr_idx
);
888 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
892 int i
= u_bit_scan(&mask
);
894 ctx
->descriptor_sets
[i
] =
895 ac_build_load_to_sgpr(&ctx
->ac
, desc_sets
,
896 LLVMConstInt(ctx
->ac
.i32
, i
, false));
900 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
903 if (ctx
->shader_info
->info
.loads_push_constants
) {
904 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
);
907 if (ctx
->shader_info
->info
.num_inline_push_consts
) {
908 set_loc_shader(ctx
, AC_UD_INLINE_PUSH_CONSTANTS
, user_sgpr_idx
,
909 ctx
->shader_info
->info
.num_inline_push_consts
);
912 if (ctx
->streamout_buffers
) {
913 set_loc_shader_ptr(ctx
, AC_UD_STREAMOUT_BUFFERS
,
919 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
920 gl_shader_stage stage
, bool has_previous_stage
,
921 gl_shader_stage previous_stage
,
922 uint8_t *user_sgpr_idx
)
924 if (!ctx
->is_gs_copy_shader
&&
925 (stage
== MESA_SHADER_VERTEX
||
926 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
927 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
928 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
933 if (ctx
->shader_info
->info
.vs
.needs_draw_id
)
936 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
937 user_sgpr_idx
, vs_num
);
941 static void set_llvm_calling_convention(LLVMValueRef func
,
942 gl_shader_stage stage
)
944 enum radeon_llvm_calling_convention calling_conv
;
947 case MESA_SHADER_VERTEX
:
948 case MESA_SHADER_TESS_EVAL
:
949 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
951 case MESA_SHADER_GEOMETRY
:
952 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
954 case MESA_SHADER_TESS_CTRL
:
955 calling_conv
= RADEON_LLVM_AMDGPU_HS
;
957 case MESA_SHADER_FRAGMENT
:
958 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
960 case MESA_SHADER_COMPUTE
:
961 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
964 unreachable("Unhandle shader type");
967 LLVMSetFunctionCallConv(func
, calling_conv
);
970 static void create_function(struct radv_shader_context
*ctx
,
971 gl_shader_stage stage
,
972 bool has_previous_stage
,
973 gl_shader_stage previous_stage
)
975 uint8_t user_sgpr_idx
;
976 struct user_sgpr_info user_sgpr_info
;
977 struct arg_info args
= {};
978 LLVMValueRef desc_sets
;
979 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
980 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
981 previous_stage
, needs_view_index
, &user_sgpr_info
);
983 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
984 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
989 case MESA_SHADER_COMPUTE
:
990 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
993 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
994 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
995 &ctx
->abi
.num_work_groups
);
998 for (int i
= 0; i
< 3; i
++) {
999 ctx
->abi
.workgroup_ids
[i
] = NULL
;
1000 if (ctx
->shader_info
->info
.cs
.uses_block_id
[i
]) {
1001 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1002 &ctx
->abi
.workgroup_ids
[i
]);
1006 if (ctx
->shader_info
->info
.cs
.uses_local_invocation_idx
)
1007 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
1008 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
1009 &ctx
->abi
.local_invocation_ids
);
1011 case MESA_SHADER_VERTEX
:
1012 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1015 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
1016 previous_stage
, &args
);
1018 if (needs_view_index
)
1019 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1020 &ctx
->abi
.view_index
);
1021 if (ctx
->options
->key
.vs
.as_es
) {
1022 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1023 &ctx
->es2gs_offset
);
1024 } else if (ctx
->options
->key
.vs
.as_ls
) {
1025 /* no extra parameters */
1027 declare_streamout_sgprs(ctx
, stage
, &args
);
1030 declare_vs_input_vgprs(ctx
, &args
);
1032 case MESA_SHADER_TESS_CTRL
:
1033 if (has_previous_stage
) {
1034 // First 6 system regs
1035 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1036 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1037 &ctx
->merged_wave_info
);
1038 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1039 &ctx
->tess_factor_offset
);
1041 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1042 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1043 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1045 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1048 declare_vs_specific_input_sgprs(ctx
, stage
,
1050 previous_stage
, &args
);
1052 if (needs_view_index
)
1053 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1054 &ctx
->abi
.view_index
);
1056 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1057 &ctx
->abi
.tcs_patch_id
);
1058 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1059 &ctx
->abi
.tcs_rel_ids
);
1061 declare_vs_input_vgprs(ctx
, &args
);
1063 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1066 if (needs_view_index
)
1067 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1068 &ctx
->abi
.view_index
);
1070 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1071 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1072 &ctx
->tess_factor_offset
);
1073 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1074 &ctx
->abi
.tcs_patch_id
);
1075 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1076 &ctx
->abi
.tcs_rel_ids
);
1079 case MESA_SHADER_TESS_EVAL
:
1080 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1083 if (needs_view_index
)
1084 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1085 &ctx
->abi
.view_index
);
1087 if (ctx
->options
->key
.tes
.as_es
) {
1088 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1089 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1090 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1091 &ctx
->es2gs_offset
);
1093 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1094 declare_streamout_sgprs(ctx
, stage
, &args
);
1095 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1097 declare_tes_input_vgprs(ctx
, &args
);
1099 case MESA_SHADER_GEOMETRY
:
1100 if (has_previous_stage
) {
1101 // First 6 system regs
1102 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1103 &ctx
->gs2vs_offset
);
1104 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1105 &ctx
->merged_wave_info
);
1106 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1108 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1109 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1110 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1112 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1115 if (previous_stage
!= MESA_SHADER_TESS_EVAL
) {
1116 declare_vs_specific_input_sgprs(ctx
, stage
,
1122 if (needs_view_index
)
1123 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1124 &ctx
->abi
.view_index
);
1126 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1127 &ctx
->gs_vtx_offset
[0]);
1128 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1129 &ctx
->gs_vtx_offset
[2]);
1130 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1131 &ctx
->abi
.gs_prim_id
);
1132 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1133 &ctx
->abi
.gs_invocation_id
);
1134 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1135 &ctx
->gs_vtx_offset
[4]);
1137 if (previous_stage
== MESA_SHADER_VERTEX
) {
1138 declare_vs_input_vgprs(ctx
, &args
);
1140 declare_tes_input_vgprs(ctx
, &args
);
1143 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1146 if (needs_view_index
)
1147 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1148 &ctx
->abi
.view_index
);
1150 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
1151 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
1152 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1153 &ctx
->gs_vtx_offset
[0]);
1154 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1155 &ctx
->gs_vtx_offset
[1]);
1156 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1157 &ctx
->abi
.gs_prim_id
);
1158 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1159 &ctx
->gs_vtx_offset
[2]);
1160 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1161 &ctx
->gs_vtx_offset
[3]);
1162 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1163 &ctx
->gs_vtx_offset
[4]);
1164 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1165 &ctx
->gs_vtx_offset
[5]);
1166 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1167 &ctx
->abi
.gs_invocation_id
);
1170 case MESA_SHADER_FRAGMENT
:
1171 declare_global_input_sgprs(ctx
, &user_sgpr_info
, &args
,
1174 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1175 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_sample
);
1176 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_center
);
1177 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_centroid
);
1178 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1179 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_sample
);
1180 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_center
);
1181 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_centroid
);
1182 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1183 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1184 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1185 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1186 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1187 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1188 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1189 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1190 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1193 unreachable("Shader stage not implemented");
1196 ctx
->main_function
= create_llvm_function(
1197 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1198 ctx
->max_workgroup_size
, ctx
->options
);
1199 set_llvm_calling_convention(ctx
->main_function
, stage
);
1202 ctx
->shader_info
->num_input_vgprs
= 0;
1203 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1205 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1207 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1208 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1210 assign_arguments(ctx
->main_function
, &args
);
1214 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1215 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1217 if (ctx
->options
->supports_spill
) {
1218 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1219 LLVMPointerType(ctx
->ac
.i8
, AC_ADDR_SPACE_CONST
),
1220 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1221 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1222 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1226 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1227 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1228 if (has_previous_stage
)
1231 set_global_input_locs(ctx
, &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1234 case MESA_SHADER_COMPUTE
:
1235 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1236 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1240 case MESA_SHADER_VERTEX
:
1241 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1242 previous_stage
, &user_sgpr_idx
);
1243 if (ctx
->abi
.view_index
)
1244 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1246 case MESA_SHADER_TESS_CTRL
:
1247 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1248 previous_stage
, &user_sgpr_idx
);
1249 if (ctx
->abi
.view_index
)
1250 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1252 case MESA_SHADER_TESS_EVAL
:
1253 if (ctx
->abi
.view_index
)
1254 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1256 case MESA_SHADER_GEOMETRY
:
1257 if (has_previous_stage
) {
1258 if (previous_stage
== MESA_SHADER_VERTEX
)
1259 set_vs_specific_input_locs(ctx
, stage
,
1264 if (ctx
->abi
.view_index
)
1265 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1267 case MESA_SHADER_FRAGMENT
:
1270 unreachable("Shader stage not implemented");
1273 if (stage
== MESA_SHADER_TESS_CTRL
||
1274 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs
.as_ls
) ||
1275 /* GFX9 has the ESGS ring buffer in LDS. */
1276 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1277 ac_declare_lds_as_pointer(&ctx
->ac
);
1280 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1285 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
1286 unsigned desc_set
, unsigned binding
)
1288 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1289 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
1290 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
1291 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
1292 unsigned base_offset
= layout
->binding
[binding
].offset
;
1293 LLVMValueRef offset
, stride
;
1295 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1296 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1297 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
1298 layout
->binding
[binding
].dynamic_offset_offset
;
1299 desc_ptr
= ctx
->abi
.push_constants
;
1300 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
1301 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1303 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
1305 offset
= LLVMConstInt(ctx
->ac
.i32
, base_offset
, false);
1307 if (layout
->binding
[binding
].type
!= VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1308 offset
= ac_build_imad(&ctx
->ac
, index
, stride
, offset
);
1311 desc_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, desc_ptr
, &offset
, 1, "");
1312 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
1313 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1315 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
1316 uint32_t desc_type
= S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
1317 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
1318 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
1319 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
1320 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
1321 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
1323 LLVMValueRef desc_components
[4] = {
1324 LLVMBuildPtrToInt(ctx
->ac
.builder
, desc_ptr
, ctx
->ac
.intptr
, ""),
1325 LLVMConstInt(ctx
->ac
.i32
, S_008F04_BASE_ADDRESS_HI(ctx
->options
->address32_hi
), false),
1326 /* High limit to support variable sizes. */
1327 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
1328 LLVMConstInt(ctx
->ac
.i32
, desc_type
, false),
1331 return ac_build_gather_values(&ctx
->ac
, desc_components
, 4);
1338 /* The offchip buffer layout for TCS->TES is
1340 * - attribute 0 of patch 0 vertex 0
1341 * - attribute 0 of patch 0 vertex 1
1342 * - attribute 0 of patch 0 vertex 2
1344 * - attribute 0 of patch 1 vertex 0
1345 * - attribute 0 of patch 1 vertex 1
1347 * - attribute 1 of patch 0 vertex 0
1348 * - attribute 1 of patch 0 vertex 1
1350 * - per patch attribute 0 of patch 0
1351 * - per patch attribute 0 of patch 1
1354 * Note that every attribute has 4 components.
1356 static LLVMValueRef
get_non_vertex_index_offset(struct radv_shader_context
*ctx
)
1358 uint32_t num_patches
= ctx
->tcs_num_patches
;
1359 uint32_t num_tcs_outputs
;
1360 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
1361 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
1363 num_tcs_outputs
= ctx
->options
->key
.tes
.tcs_num_outputs
;
1365 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
1366 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
1368 return LLVMConstInt(ctx
->ac
.i32
, pervertex_output_patch_size
* num_patches
, false);
1371 static LLVMValueRef
calc_param_stride(struct radv_shader_context
*ctx
,
1372 LLVMValueRef vertex_index
)
1374 LLVMValueRef param_stride
;
1376 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
* ctx
->tcs_num_patches
, false);
1378 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_num_patches
, false);
1379 return param_stride
;
1382 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
1383 LLVMValueRef vertex_index
,
1384 LLVMValueRef param_index
)
1386 LLVMValueRef base_addr
;
1387 LLVMValueRef param_stride
, constant16
;
1388 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
1389 LLVMValueRef vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
, false);
1390 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1391 param_stride
= calc_param_stride(ctx
, vertex_index
);
1393 base_addr
= ac_build_imad(&ctx
->ac
, rel_patch_id
,
1394 vertices_per_patch
, vertex_index
);
1396 base_addr
= rel_patch_id
;
1399 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1400 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
1401 param_stride
, ""), "");
1403 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
1405 if (!vertex_index
) {
1406 LLVMValueRef patch_data_offset
= get_non_vertex_index_offset(ctx
);
1408 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1409 patch_data_offset
, "");
1414 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
1416 unsigned const_index
,
1418 LLVMValueRef vertex_index
,
1419 LLVMValueRef indir_index
)
1421 LLVMValueRef param_index
;
1424 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
1427 if (const_index
&& !is_compact
)
1428 param
+= const_index
;
1429 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
1431 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
1435 get_dw_address(struct radv_shader_context
*ctx
,
1436 LLVMValueRef dw_addr
,
1438 unsigned const_index
,
1439 bool compact_const_index
,
1440 LLVMValueRef vertex_index
,
1441 LLVMValueRef stride
,
1442 LLVMValueRef indir_index
)
1447 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1448 LLVMBuildMul(ctx
->ac
.builder
,
1454 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1455 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
1456 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
1457 else if (const_index
&& !compact_const_index
)
1458 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1459 LLVMConstInt(ctx
->ac
.i32
, const_index
* 4, false), "");
1461 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1462 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
1464 if (const_index
&& compact_const_index
)
1465 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1466 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
1471 load_tcs_varyings(struct ac_shader_abi
*abi
,
1473 LLVMValueRef vertex_index
,
1474 LLVMValueRef indir_index
,
1475 unsigned const_index
,
1477 unsigned driver_location
,
1479 unsigned num_components
,
1484 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1485 LLVMValueRef dw_addr
, stride
;
1486 LLVMValueRef value
[4], result
;
1487 unsigned param
= shader_io_get_unique_index(location
);
1490 uint32_t input_vertex_size
= (ctx
->tcs_num_inputs
* 16) / 4;
1491 stride
= LLVMConstInt(ctx
->ac
.i32
, input_vertex_size
, false);
1492 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
1495 stride
= get_tcs_out_vertex_stride(ctx
);
1496 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1498 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1503 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1506 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
1507 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1508 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1511 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1516 store_tcs_output(struct ac_shader_abi
*abi
,
1517 const nir_variable
*var
,
1518 LLVMValueRef vertex_index
,
1519 LLVMValueRef param_index
,
1520 unsigned const_index
,
1524 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1525 const unsigned location
= var
->data
.location
;
1526 unsigned component
= var
->data
.location_frac
;
1527 const bool is_patch
= var
->data
.patch
;
1528 const bool is_compact
= var
->data
.compact
;
1529 LLVMValueRef dw_addr
;
1530 LLVMValueRef stride
= NULL
;
1531 LLVMValueRef buf_addr
= NULL
;
1533 bool store_lds
= true;
1536 if (!(ctx
->tcs_patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
1539 if (!(ctx
->tcs_outputs_read
& (1ULL << location
)))
1543 param
= shader_io_get_unique_index(location
);
1544 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1545 const_index
+= component
;
1548 if (const_index
>= 4) {
1555 stride
= get_tcs_out_vertex_stride(ctx
);
1556 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1558 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1561 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1563 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
1564 vertex_index
, param_index
);
1566 bool is_tess_factor
= false;
1567 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
1568 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1569 is_tess_factor
= true;
1571 unsigned base
= is_compact
? const_index
: 0;
1572 for (unsigned chan
= 0; chan
< 8; chan
++) {
1573 if (!(writemask
& (1 << chan
)))
1575 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
1576 value
= ac_to_integer(&ctx
->ac
, value
);
1577 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
1579 if (store_lds
|| is_tess_factor
) {
1580 LLVMValueRef dw_addr_chan
=
1581 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1582 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
1583 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
1586 if (!is_tess_factor
&& writemask
!= 0xF)
1587 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
1588 buf_addr
, ctx
->oc_lds
,
1589 4 * (base
+ chan
), 1, 0, true, false);
1592 if (writemask
== 0xF) {
1593 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
1594 buf_addr
, ctx
->oc_lds
,
1595 (base
* 4), 1, 0, true, false);
1600 load_tes_input(struct ac_shader_abi
*abi
,
1602 LLVMValueRef vertex_index
,
1603 LLVMValueRef param_index
,
1604 unsigned const_index
,
1606 unsigned driver_location
,
1608 unsigned num_components
,
1613 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1614 LLVMValueRef buf_addr
;
1615 LLVMValueRef result
;
1616 unsigned param
= shader_io_get_unique_index(location
);
1618 if ((location
== VARYING_SLOT_CLIP_DIST0
|| location
== VARYING_SLOT_CLIP_DIST1
) && is_compact
) {
1619 const_index
+= component
;
1621 if (const_index
>= 4) {
1627 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
1628 is_compact
, vertex_index
, param_index
);
1630 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
1631 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
1633 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
1634 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, 1, 0, true, false);
1635 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
1640 load_gs_input(struct ac_shader_abi
*abi
,
1642 unsigned driver_location
,
1644 unsigned num_components
,
1645 unsigned vertex_index
,
1646 unsigned const_index
,
1649 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1650 LLVMValueRef vtx_offset
;
1651 unsigned param
, vtx_offset_param
;
1652 LLVMValueRef value
[4], result
;
1654 vtx_offset_param
= vertex_index
;
1655 assert(vtx_offset_param
< 6);
1656 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
1657 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1659 param
= shader_io_get_unique_index(location
);
1661 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
1662 if (ctx
->ac
.chip_class
>= GFX9
) {
1663 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
1664 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1665 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
1666 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1668 LLVMValueRef soffset
=
1669 LLVMConstInt(ctx
->ac
.i32
,
1670 (param
* 4 + i
+ const_index
) * 256,
1673 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
1676 vtx_offset
, soffset
,
1677 0, 1, 0, true, false);
1680 if (ac_get_type_size(type
) == 2) {
1681 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i32
, "");
1682 value
[i
] = LLVMBuildTrunc(ctx
->ac
.builder
, value
[i
], ctx
->ac
.i16
, "");
1684 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
], type
, "");
1686 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1687 result
= ac_to_integer(&ctx
->ac
, result
);
1692 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
1694 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1695 ac_build_kill_if_false(&ctx
->ac
, visible
);
1698 static LLVMValueRef
lookup_interp_param(struct ac_shader_abi
*abi
,
1699 enum glsl_interp_mode interp
, unsigned location
)
1701 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1704 case INTERP_MODE_FLAT
:
1707 case INTERP_MODE_SMOOTH
:
1708 case INTERP_MODE_NONE
:
1709 if (location
== INTERP_CENTER
)
1710 return ctx
->persp_center
;
1711 else if (location
== INTERP_CENTROID
)
1712 return ctx
->persp_centroid
;
1713 else if (location
== INTERP_SAMPLE
)
1714 return ctx
->persp_sample
;
1716 case INTERP_MODE_NOPERSPECTIVE
:
1717 if (location
== INTERP_CENTER
)
1718 return ctx
->linear_center
;
1719 else if (location
== INTERP_CENTROID
)
1720 return ctx
->linear_centroid
;
1721 else if (location
== INTERP_SAMPLE
)
1722 return ctx
->linear_sample
;
1729 radv_get_sample_pos_offset(uint32_t num_samples
)
1731 uint32_t sample_pos_offset
= 0;
1733 switch (num_samples
) {
1735 sample_pos_offset
= 1;
1738 sample_pos_offset
= 3;
1741 sample_pos_offset
= 7;
1746 return sample_pos_offset
;
1749 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
1750 LLVMValueRef sample_id
)
1752 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1754 LLVMValueRef result
;
1755 LLVMValueRef index
= LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false);
1756 LLVMValueRef ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ring_offsets
, &index
, 1, "");
1758 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1759 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
1761 uint32_t sample_pos_offset
=
1762 radv_get_sample_pos_offset(ctx
->options
->key
.fs
.num_samples
);
1765 LLVMBuildAdd(ctx
->ac
.builder
, sample_id
,
1766 LLVMConstInt(ctx
->ac
.i32
, sample_pos_offset
, false), "");
1767 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
1773 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
1775 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1776 uint8_t log2_ps_iter_samples
;
1778 if (ctx
->shader_info
->info
.ps
.force_persample
) {
1779 log2_ps_iter_samples
=
1780 util_logbase2(ctx
->options
->key
.fs
.num_samples
);
1782 log2_ps_iter_samples
= ctx
->options
->key
.fs
.log2_ps_iter_samples
;
1785 /* The bit pattern matches that used by fixed function fragment
1787 static const uint16_t ps_iter_masks
[] = {
1788 0xffff, /* not used */
1794 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
1796 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
1798 LLVMValueRef result
, sample_id
;
1799 sample_id
= ac_unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
1800 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
1801 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
1807 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
1809 LLVMValueRef gs_next_vertex
;
1810 LLVMValueRef can_emit
;
1811 unsigned offset
= 0;
1812 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1814 /* Write vertex attribute values to GSVS ring */
1815 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
1816 ctx
->gs_next_vertex
[stream
],
1819 /* If this thread has already emitted the declared maximum number of
1820 * vertices, kill it: excessive vertex emissions are not supposed to
1821 * have any effect, and GS threads have no externally observable
1822 * effects other than emitting vertices.
1824 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
1825 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
1826 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
1828 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
1829 unsigned output_usage_mask
=
1830 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
1831 uint8_t output_stream
=
1832 ctx
->shader_info
->info
.gs
.output_streams
[i
];
1833 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
1834 int length
= util_last_bit(output_usage_mask
);
1836 if (!(ctx
->output_mask
& (1ull << i
)) ||
1837 output_stream
!= stream
)
1840 for (unsigned j
= 0; j
< length
; j
++) {
1841 if (!(output_usage_mask
& (1 << j
)))
1844 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
1846 LLVMValueRef voffset
=
1847 LLVMConstInt(ctx
->ac
.i32
, offset
*
1848 ctx
->gs_max_out_vertices
, false);
1852 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
1853 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1855 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
1856 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
1858 ac_build_buffer_store_dword(&ctx
->ac
,
1859 ctx
->gsvs_ring
[stream
],
1861 voffset
, ctx
->gs2vs_offset
, 0,
1866 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
1868 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
[stream
]);
1870 ac_build_sendmsg(&ctx
->ac
,
1871 AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (stream
<< 8),
1876 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
1878 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1879 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
1883 load_tess_coord(struct ac_shader_abi
*abi
)
1885 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1887 LLVMValueRef coord
[4] = {
1894 if (ctx
->tes_primitive_mode
== GL_TRIANGLES
)
1895 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
1896 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
1898 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
1902 load_patch_vertices_in(struct ac_shader_abi
*abi
)
1904 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1905 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
1909 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
1911 return abi
->base_vertex
;
1914 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
1915 LLVMValueRef buffer_ptr
, bool write
)
1917 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1918 LLVMValueRef result
;
1920 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1922 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1923 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1928 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
1930 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1931 LLVMValueRef result
;
1933 if (LLVMGetTypeKind(LLVMTypeOf(buffer_ptr
)) != LLVMPointerTypeKind
) {
1934 /* Do not load the descriptor for inlined uniform blocks. */
1938 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1940 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1941 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1946 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
1947 unsigned descriptor_set
,
1948 unsigned base_index
,
1949 unsigned constant_index
,
1951 enum ac_descriptor_type desc_type
,
1952 bool image
, bool write
,
1955 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1956 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
1957 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
1958 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
1959 unsigned offset
= binding
->offset
;
1960 unsigned stride
= binding
->size
;
1962 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1965 assert(base_index
< layout
->binding_count
);
1967 switch (desc_type
) {
1969 type
= ctx
->ac
.v8i32
;
1973 type
= ctx
->ac
.v8i32
;
1977 case AC_DESC_SAMPLER
:
1978 type
= ctx
->ac
.v4i32
;
1979 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) {
1980 offset
+= radv_combined_image_descriptor_sampler_offset(binding
);
1985 case AC_DESC_BUFFER
:
1986 type
= ctx
->ac
.v4i32
;
1989 case AC_DESC_PLANE_0
:
1990 case AC_DESC_PLANE_1
:
1991 case AC_DESC_PLANE_2
:
1992 type
= ctx
->ac
.v8i32
;
1994 offset
+= 32 * (desc_type
- AC_DESC_PLANE_0
);
1997 unreachable("invalid desc_type\n");
2000 offset
+= constant_index
* stride
;
2002 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
2003 (!index
|| binding
->immutable_samplers_equal
)) {
2004 if (binding
->immutable_samplers_equal
)
2007 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
2009 LLVMValueRef constants
[] = {
2010 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
2011 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
2012 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
2013 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
2015 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
2018 assert(stride
% type_size
== 0);
2020 LLVMValueRef adjusted_index
= index
;
2021 if (!adjusted_index
)
2022 adjusted_index
= ctx
->ac
.i32_0
;
2024 adjusted_index
= LLVMBuildMul(builder
, adjusted_index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
2026 LLVMValueRef val_offset
= LLVMConstInt(ctx
->ac
.i32
, offset
, 0);
2027 list
= LLVMBuildGEP(builder
, list
, &val_offset
, 1, "");
2028 list
= LLVMBuildPointerCast(builder
, list
,
2029 ac_array_in_const32_addr_space(type
), "");
2031 LLVMValueRef descriptor
= ac_build_load_to_sgpr(&ctx
->ac
, list
, adjusted_index
);
2033 /* 3 plane formats always have same size and format for plane 1 & 2, so
2034 * use the tail from plane 1 so that we can store only the first 16 bytes
2035 * of the last plane. */
2036 if (desc_type
== AC_DESC_PLANE_2
) {
2037 LLVMValueRef descriptor2
= radv_get_sampler_desc(abi
, descriptor_set
, base_index
, constant_index
, index
, AC_DESC_PLANE_1
,image
, write
, bindless
);
2039 LLVMValueRef components
[8];
2040 for (unsigned i
= 0; i
< 4; ++i
)
2041 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor
, i
);
2043 for (unsigned i
= 4; i
< 8; ++i
)
2044 components
[i
] = ac_llvm_extract_elem(&ctx
->ac
, descriptor2
, i
);
2045 descriptor
= ac_build_gather_values(&ctx
->ac
, components
, 8);
2051 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
2052 * so we may need to fix it up. */
2054 adjust_vertex_fetch_alpha(struct radv_shader_context
*ctx
,
2055 unsigned adjustment
,
2058 if (adjustment
== RADV_ALPHA_ADJUST_NONE
)
2061 LLVMValueRef c30
= LLVMConstInt(ctx
->ac
.i32
, 30, 0);
2063 alpha
= LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2065 if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
)
2066 alpha
= LLVMBuildFPToUI(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2068 alpha
= ac_to_integer(&ctx
->ac
, alpha
);
2070 /* For the integer-like cases, do a natural sign extension.
2072 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
2073 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
2076 alpha
= LLVMBuildShl(ctx
->ac
.builder
, alpha
,
2077 adjustment
== RADV_ALPHA_ADJUST_SNORM
?
2078 LLVMConstInt(ctx
->ac
.i32
, 7, 0) : c30
, "");
2079 alpha
= LLVMBuildAShr(ctx
->ac
.builder
, alpha
, c30
, "");
2081 /* Convert back to the right type. */
2082 if (adjustment
== RADV_ALPHA_ADJUST_SNORM
) {
2084 LLVMValueRef neg_one
= LLVMConstReal(ctx
->ac
.f32
, -1.0);
2085 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2086 clamp
= LLVMBuildFCmp(ctx
->ac
.builder
, LLVMRealULT
, alpha
, neg_one
, "");
2087 alpha
= LLVMBuildSelect(ctx
->ac
.builder
, clamp
, neg_one
, alpha
, "");
2088 } else if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
) {
2089 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
2092 return LLVMBuildBitCast(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
2096 get_num_channels_from_data_format(unsigned data_format
)
2098 switch (data_format
) {
2099 case V_008F0C_BUF_DATA_FORMAT_8
:
2100 case V_008F0C_BUF_DATA_FORMAT_16
:
2101 case V_008F0C_BUF_DATA_FORMAT_32
:
2103 case V_008F0C_BUF_DATA_FORMAT_8_8
:
2104 case V_008F0C_BUF_DATA_FORMAT_16_16
:
2105 case V_008F0C_BUF_DATA_FORMAT_32_32
:
2107 case V_008F0C_BUF_DATA_FORMAT_10_11_11
:
2108 case V_008F0C_BUF_DATA_FORMAT_11_11_10
:
2109 case V_008F0C_BUF_DATA_FORMAT_32_32_32
:
2111 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8
:
2112 case V_008F0C_BUF_DATA_FORMAT_10_10_10_2
:
2113 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10
:
2114 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16
:
2115 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32
:
2125 radv_fixup_vertex_input_fetches(struct radv_shader_context
*ctx
,
2127 unsigned num_channels
,
2130 LLVMValueRef zero
= is_float
? ctx
->ac
.f32_0
: ctx
->ac
.i32_0
;
2131 LLVMValueRef one
= is_float
? ctx
->ac
.f32_1
: ctx
->ac
.i32_1
;
2132 LLVMValueRef chan
[4];
2134 if (LLVMGetTypeKind(LLVMTypeOf(value
)) == LLVMVectorTypeKind
) {
2135 unsigned vec_size
= LLVMGetVectorSize(LLVMTypeOf(value
));
2137 if (num_channels
== 4 && num_channels
== vec_size
)
2140 num_channels
= MIN2(num_channels
, vec_size
);
2142 for (unsigned i
= 0; i
< num_channels
; i
++)
2143 chan
[i
] = ac_llvm_extract_elem(&ctx
->ac
, value
, i
);
2146 assert(num_channels
== 1);
2151 for (unsigned i
= num_channels
; i
< 4; i
++) {
2152 chan
[i
] = i
== 3 ? one
: zero
;
2153 chan
[i
] = ac_to_integer(&ctx
->ac
, chan
[i
]);
2156 return ac_build_gather_values(&ctx
->ac
, chan
, 4);
2160 handle_vs_input_decl(struct radv_shader_context
*ctx
,
2161 struct nir_variable
*variable
)
2163 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
2164 LLVMValueRef t_offset
;
2165 LLVMValueRef t_list
;
2167 LLVMValueRef buffer_index
;
2168 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
2169 uint8_t input_usage_mask
=
2170 ctx
->shader_info
->info
.vs
.input_usage_mask
[variable
->data
.location
];
2171 unsigned num_input_channels
= util_last_bit(input_usage_mask
);
2173 variable
->data
.driver_location
= variable
->data
.location
* 4;
2175 enum glsl_base_type type
= glsl_get_base_type(variable
->type
);
2176 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
2177 LLVMValueRef output
[4];
2178 unsigned attrib_index
= variable
->data
.location
+ i
- VERT_ATTRIB_GENERIC0
;
2179 unsigned attrib_format
= ctx
->options
->key
.vs
.vertex_attribute_formats
[attrib_index
];
2180 unsigned data_format
= attrib_format
& 0x0f;
2181 unsigned num_format
= (attrib_format
>> 4) & 0x07;
2182 bool is_float
= num_format
!= V_008F0C_BUF_NUM_FORMAT_UINT
&&
2183 num_format
!= V_008F0C_BUF_NUM_FORMAT_SINT
;
2185 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << attrib_index
)) {
2186 uint32_t divisor
= ctx
->options
->key
.vs
.instance_rate_divisors
[attrib_index
];
2189 buffer_index
= ctx
->abi
.instance_id
;
2192 buffer_index
= LLVMBuildUDiv(ctx
->ac
.builder
, buffer_index
,
2193 LLVMConstInt(ctx
->ac
.i32
, divisor
, 0), "");
2196 if (ctx
->options
->key
.vs
.as_ls
) {
2197 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
2198 MAX2(2, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
2200 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
2201 MAX2(1, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
2204 buffer_index
= ctx
->ac
.i32_0
;
2207 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.start_instance
, buffer_index
, "");
2209 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
2210 ctx
->abi
.base_vertex
, "");
2212 /* Adjust the number of channels to load based on the vertex
2215 unsigned num_format_channels
= get_num_channels_from_data_format(data_format
);
2216 unsigned num_channels
= MIN2(num_input_channels
, num_format_channels
);
2217 unsigned attrib_binding
= ctx
->options
->key
.vs
.vertex_attribute_bindings
[attrib_index
];
2218 unsigned attrib_offset
= ctx
->options
->key
.vs
.vertex_attribute_offsets
[attrib_index
];
2219 unsigned attrib_stride
= ctx
->options
->key
.vs
.vertex_attribute_strides
[attrib_index
];
2221 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2222 /* Always load, at least, 3 channels for formats that
2223 * need to be shuffled because X<->Z.
2225 num_channels
= MAX2(num_channels
, 3);
2228 if (attrib_stride
!= 0 && attrib_offset
> attrib_stride
) {
2229 LLVMValueRef buffer_offset
=
2230 LLVMConstInt(ctx
->ac
.i32
,
2231 attrib_offset
/ attrib_stride
, false);
2233 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
,
2237 attrib_offset
= attrib_offset
% attrib_stride
;
2240 t_offset
= LLVMConstInt(ctx
->ac
.i32
, attrib_binding
, false);
2241 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
2243 input
= ac_build_struct_tbuffer_load(&ctx
->ac
, t_list
,
2245 LLVMConstInt(ctx
->ac
.i32
, attrib_offset
, false),
2246 ctx
->ac
.i32_0
, ctx
->ac
.i32_0
,
2248 data_format
, num_format
,
2249 false, false, true);
2251 if (ctx
->options
->key
.vs
.post_shuffle
& (1 << attrib_index
)) {
2253 c
[0] = ac_llvm_extract_elem(&ctx
->ac
, input
, 2);
2254 c
[1] = ac_llvm_extract_elem(&ctx
->ac
, input
, 1);
2255 c
[2] = ac_llvm_extract_elem(&ctx
->ac
, input
, 0);
2256 c
[3] = ac_llvm_extract_elem(&ctx
->ac
, input
, 3);
2258 input
= ac_build_gather_values(&ctx
->ac
, c
, 4);
2261 input
= radv_fixup_vertex_input_fetches(ctx
, input
, num_channels
,
2264 for (unsigned chan
= 0; chan
< 4; chan
++) {
2265 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2266 output
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
, input
, llvm_chan
, "");
2267 if (type
== GLSL_TYPE_FLOAT16
) {
2268 output
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f32
, "");
2269 output
[chan
] = LLVMBuildFPTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.f16
, "");
2273 unsigned alpha_adjust
= (ctx
->options
->key
.vs
.alpha_adjust
>> (attrib_index
* 2)) & 3;
2274 output
[3] = adjust_vertex_fetch_alpha(ctx
, alpha_adjust
, output
[3]);
2276 for (unsigned chan
= 0; chan
< 4; chan
++) {
2277 output
[chan
] = ac_to_integer(&ctx
->ac
, output
[chan
]);
2278 if (type
== GLSL_TYPE_UINT16
|| type
== GLSL_TYPE_INT16
)
2279 output
[chan
] = LLVMBuildTrunc(ctx
->ac
.builder
, output
[chan
], ctx
->ac
.i16
, "");
2281 ctx
->inputs
[ac_llvm_reg_index_soa(variable
->data
.location
+ i
, chan
)] = output
[chan
];
2286 static void interp_fs_input(struct radv_shader_context
*ctx
,
2288 LLVMValueRef interp_param
,
2289 LLVMValueRef prim_mask
,
2291 LLVMValueRef result
[4])
2293 LLVMValueRef attr_number
;
2296 bool interp
= !LLVMIsUndef(interp_param
);
2298 attr_number
= LLVMConstInt(ctx
->ac
.i32
, attr
, false);
2300 /* fs.constant returns the param from the middle vertex, so it's not
2301 * really useful for flat shading. It's meant to be used for custom
2302 * interpolation (but the intrinsic can't fetch from the other two
2305 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
2306 * to do the right thing. The only reason we use fs.constant is that
2307 * fs.interp cannot be used on integers, because they can be equal
2311 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
,
2314 i
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2316 j
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2320 for (chan
= 0; chan
< 4; chan
++) {
2321 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2323 if (interp
&& float16
) {
2324 result
[chan
] = ac_build_fs_interp_f16(&ctx
->ac
,
2328 } else if (interp
) {
2329 result
[chan
] = ac_build_fs_interp(&ctx
->ac
,
2334 result
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
2335 LLVMConstInt(ctx
->ac
.i32
, 2, false),
2339 result
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, result
[chan
], ctx
->ac
.i32
, "");
2340 result
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, result
[chan
], float16
? ctx
->ac
.i16
: ctx
->ac
.i32
, "");
2345 static void mark_16bit_fs_input(struct radv_shader_context
*ctx
,
2346 const struct glsl_type
*type
,
2349 if (glsl_type_is_scalar(type
) || glsl_type_is_vector(type
) || glsl_type_is_matrix(type
)) {
2350 unsigned attrib_count
= glsl_count_attribute_slots(type
, false);
2351 if (glsl_type_is_16bit(type
)) {
2352 ctx
->float16_shaded_mask
|= ((1ull << attrib_count
) - 1) << location
;
2354 } else if (glsl_type_is_array(type
)) {
2355 unsigned stride
= glsl_count_attribute_slots(glsl_get_array_element(type
), false);
2356 for (unsigned i
= 0; i
< glsl_get_length(type
); ++i
) {
2357 mark_16bit_fs_input(ctx
, glsl_get_array_element(type
), location
+ i
* stride
);
2360 assert(glsl_type_is_struct_or_ifc(type
));
2361 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
2362 mark_16bit_fs_input(ctx
, glsl_get_struct_field(type
, i
), location
);
2363 location
+= glsl_count_attribute_slots(glsl_get_struct_field(type
, i
), false);
2369 handle_fs_input_decl(struct radv_shader_context
*ctx
,
2370 struct nir_variable
*variable
)
2372 int idx
= variable
->data
.location
;
2373 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2374 LLVMValueRef interp
= NULL
;
2377 variable
->data
.driver_location
= idx
* 4;
2380 if (variable
->data
.compact
) {
2381 unsigned component_count
= variable
->data
.location_frac
+
2382 glsl_get_length(variable
->type
);
2383 attrib_count
= (component_count
+ 3) / 4;
2385 mark_16bit_fs_input(ctx
, variable
->type
, idx
);
2387 mask
= ((1ull << attrib_count
) - 1) << variable
->data
.location
;
2389 if (glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_FLOAT
||
2390 glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_FLOAT16
||
2391 glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_STRUCT
) {
2392 unsigned interp_type
;
2393 if (variable
->data
.sample
)
2394 interp_type
= INTERP_SAMPLE
;
2395 else if (variable
->data
.centroid
)
2396 interp_type
= INTERP_CENTROID
;
2398 interp_type
= INTERP_CENTER
;
2400 interp
= lookup_interp_param(&ctx
->abi
, variable
->data
.interpolation
, interp_type
);
2403 interp
= LLVMGetUndef(ctx
->ac
.i32
);
2405 for (unsigned i
= 0; i
< attrib_count
; ++i
)
2406 ctx
->inputs
[ac_llvm_reg_index_soa(idx
+ i
, 0)] = interp
;
2408 ctx
->input_mask
|= mask
;
2412 handle_vs_inputs(struct radv_shader_context
*ctx
,
2413 struct nir_shader
*nir
) {
2414 nir_foreach_variable(variable
, &nir
->inputs
)
2415 handle_vs_input_decl(ctx
, variable
);
2419 prepare_interp_optimize(struct radv_shader_context
*ctx
,
2420 struct nir_shader
*nir
)
2422 bool uses_center
= false;
2423 bool uses_centroid
= false;
2424 nir_foreach_variable(variable
, &nir
->inputs
) {
2425 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
2426 variable
->data
.sample
)
2429 if (variable
->data
.centroid
)
2430 uses_centroid
= true;
2435 if (uses_center
&& uses_centroid
) {
2436 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
2437 ctx
->persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->persp_center
, ctx
->persp_centroid
, "");
2438 ctx
->linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->linear_center
, ctx
->linear_centroid
, "");
2443 handle_fs_inputs(struct radv_shader_context
*ctx
,
2444 struct nir_shader
*nir
)
2446 prepare_interp_optimize(ctx
, nir
);
2448 nir_foreach_variable(variable
, &nir
->inputs
)
2449 handle_fs_input_decl(ctx
, variable
);
2453 if (ctx
->shader_info
->info
.ps
.uses_input_attachments
||
2454 ctx
->shader_info
->info
.needs_multiview_view_index
) {
2455 ctx
->input_mask
|= 1ull << VARYING_SLOT_LAYER
;
2456 ctx
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)] = LLVMGetUndef(ctx
->ac
.i32
);
2459 for (unsigned i
= 0; i
< RADEON_LLVM_MAX_INPUTS
; ++i
) {
2460 LLVMValueRef interp_param
;
2461 LLVMValueRef
*inputs
= ctx
->inputs
+ac_llvm_reg_index_soa(i
, 0);
2463 if (!(ctx
->input_mask
& (1ull << i
)))
2466 if (i
>= VARYING_SLOT_VAR0
|| i
== VARYING_SLOT_PNTC
||
2467 i
== VARYING_SLOT_PRIMITIVE_ID
|| i
== VARYING_SLOT_LAYER
) {
2468 interp_param
= *inputs
;
2469 bool float16
= (ctx
->float16_shaded_mask
>> i
) & 1;
2470 interp_fs_input(ctx
, index
, interp_param
, ctx
->abi
.prim_mask
, float16
,
2473 if (LLVMIsUndef(interp_param
))
2474 ctx
->shader_info
->fs
.flat_shaded_mask
|= 1u << index
;
2476 ctx
->shader_info
->fs
.float16_shaded_mask
|= 1u << index
;
2477 if (i
>= VARYING_SLOT_VAR0
)
2478 ctx
->abi
.fs_input_attr_indices
[i
- VARYING_SLOT_VAR0
] = index
;
2480 } else if (i
== VARYING_SLOT_CLIP_DIST0
) {
2481 int length
= ctx
->shader_info
->info
.ps
.num_input_clips_culls
;
2483 for (unsigned j
= 0; j
< length
; j
+= 4) {
2484 inputs
= ctx
->inputs
+ ac_llvm_reg_index_soa(i
, j
);
2486 interp_param
= *inputs
;
2487 interp_fs_input(ctx
, index
, interp_param
,
2488 ctx
->abi
.prim_mask
, false, inputs
);
2491 } else if (i
== VARYING_SLOT_POS
) {
2492 for(int i
= 0; i
< 3; ++i
)
2493 inputs
[i
] = ctx
->abi
.frag_pos
[i
];
2495 inputs
[3] = ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
2496 ctx
->abi
.frag_pos
[3]);
2499 ctx
->shader_info
->fs
.num_interp
= index
;
2500 ctx
->shader_info
->fs
.input_mask
= ctx
->input_mask
>> VARYING_SLOT_VAR0
;
2502 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
2503 ctx
->abi
.view_index
= ctx
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2507 scan_shader_output_decl(struct radv_shader_context
*ctx
,
2508 struct nir_variable
*variable
,
2509 struct nir_shader
*shader
,
2510 gl_shader_stage stage
)
2512 int idx
= variable
->data
.location
+ variable
->data
.index
;
2513 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2514 uint64_t mask_attribs
;
2516 variable
->data
.driver_location
= idx
* 4;
2518 /* tess ctrl has it's own load/store paths for outputs */
2519 if (stage
== MESA_SHADER_TESS_CTRL
)
2522 if (variable
->data
.compact
) {
2523 unsigned component_count
= variable
->data
.location_frac
+
2524 glsl_get_length(variable
->type
);
2525 attrib_count
= (component_count
+ 3) / 4;
2528 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
2529 if (stage
== MESA_SHADER_VERTEX
||
2530 stage
== MESA_SHADER_TESS_EVAL
||
2531 stage
== MESA_SHADER_GEOMETRY
) {
2532 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
2533 if (stage
== MESA_SHADER_VERTEX
) {
2534 ctx
->shader_info
->vs
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2535 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2536 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2538 if (stage
== MESA_SHADER_TESS_EVAL
) {
2539 ctx
->shader_info
->tes
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2540 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2541 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
<<= shader
->info
.clip_distance_array_size
;
2546 ctx
->output_mask
|= mask_attribs
;
2550 /* Initialize arguments for the shader export intrinsic */
2552 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
2553 LLVMValueRef
*values
,
2554 unsigned enabled_channels
,
2556 struct ac_export_args
*args
)
2558 /* Specify the channels that are enabled. */
2559 args
->enabled_channels
= enabled_channels
;
2561 /* Specify whether the EXEC mask represents the valid mask */
2562 args
->valid_mask
= 0;
2564 /* Specify whether this is the last export */
2567 /* Specify the target we are exporting */
2568 args
->target
= target
;
2570 args
->compr
= false;
2571 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
2572 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
2573 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
2574 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
2579 bool is_16bit
= ac_get_type_size(LLVMTypeOf(values
[0])) == 2;
2580 if (ctx
->stage
== MESA_SHADER_FRAGMENT
) {
2581 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
2582 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
2583 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
2584 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
2587 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
2588 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
2589 unsigned bits
, bool hi
) = NULL
;
2591 switch(col_format
) {
2592 case V_028714_SPI_SHADER_ZERO
:
2593 args
->enabled_channels
= 0; /* writemask */
2594 args
->target
= V_008DFC_SQ_EXP_NULL
;
2597 case V_028714_SPI_SHADER_32_R
:
2598 args
->enabled_channels
= 1;
2599 args
->out
[0] = values
[0];
2602 case V_028714_SPI_SHADER_32_GR
:
2603 args
->enabled_channels
= 0x3;
2604 args
->out
[0] = values
[0];
2605 args
->out
[1] = values
[1];
2608 case V_028714_SPI_SHADER_32_AR
:
2609 args
->enabled_channels
= 0x9;
2610 args
->out
[0] = values
[0];
2611 args
->out
[3] = values
[3];
2614 case V_028714_SPI_SHADER_FP16_ABGR
:
2615 args
->enabled_channels
= 0x5;
2616 packf
= ac_build_cvt_pkrtz_f16
;
2618 for (unsigned chan
= 0; chan
< 4; chan
++)
2619 values
[chan
] = LLVMBuildFPExt(ctx
->ac
.builder
,
2625 case V_028714_SPI_SHADER_UNORM16_ABGR
:
2626 args
->enabled_channels
= 0x5;
2627 packf
= ac_build_cvt_pknorm_u16
;
2630 case V_028714_SPI_SHADER_SNORM16_ABGR
:
2631 args
->enabled_channels
= 0x5;
2632 packf
= ac_build_cvt_pknorm_i16
;
2635 case V_028714_SPI_SHADER_UINT16_ABGR
:
2636 args
->enabled_channels
= 0x5;
2637 packi
= ac_build_cvt_pk_u16
;
2639 for (unsigned chan
= 0; chan
< 4; chan
++)
2640 values
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
,
2641 ac_to_integer(&ctx
->ac
, values
[chan
]),
2646 case V_028714_SPI_SHADER_SINT16_ABGR
:
2647 args
->enabled_channels
= 0x5;
2648 packi
= ac_build_cvt_pk_i16
;
2650 for (unsigned chan
= 0; chan
< 4; chan
++)
2651 values
[chan
] = LLVMBuildSExt(ctx
->ac
.builder
,
2652 ac_to_integer(&ctx
->ac
, values
[chan
]),
2658 case V_028714_SPI_SHADER_32_ABGR
:
2659 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2663 /* Pack f16 or norm_i16/u16. */
2665 for (chan
= 0; chan
< 2; chan
++) {
2666 LLVMValueRef pack_args
[2] = {
2668 values
[2 * chan
+ 1]
2670 LLVMValueRef packed
;
2672 packed
= packf(&ctx
->ac
, pack_args
);
2673 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2675 args
->compr
= 1; /* COMPR flag */
2680 for (chan
= 0; chan
< 2; chan
++) {
2681 LLVMValueRef pack_args
[2] = {
2682 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
2683 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
2685 LLVMValueRef packed
;
2687 packed
= packi(&ctx
->ac
, pack_args
,
2688 is_int8
? 8 : is_int10
? 10 : 16,
2690 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2692 args
->compr
= 1; /* COMPR flag */
2698 for (unsigned chan
= 0; chan
< 4; chan
++) {
2699 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i16
, "");
2700 args
->out
[chan
] = LLVMBuildZExt(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
2703 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2705 for (unsigned i
= 0; i
< 4; ++i
)
2706 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
2710 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
2711 LLVMValueRef
*values
, unsigned enabled_channels
)
2713 struct ac_export_args args
;
2715 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
2716 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
2717 ac_build_export(&ctx
->ac
, &args
);
2721 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
2723 LLVMValueRef output
=
2724 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(index
, chan
)];
2726 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
2730 radv_emit_stream_output(struct radv_shader_context
*ctx
,
2731 LLVMValueRef
const *so_buffers
,
2732 LLVMValueRef
const *so_write_offsets
,
2733 const struct radv_stream_output
*output
)
2735 unsigned num_comps
= util_bitcount(output
->component_mask
);
2736 unsigned loc
= output
->location
;
2737 unsigned buf
= output
->buffer
;
2738 unsigned offset
= output
->offset
;
2740 LLVMValueRef out
[4];
2742 assert(num_comps
&& num_comps
<= 4);
2743 if (!num_comps
|| num_comps
> 4)
2746 /* Get the first component. */
2747 start
= ffs(output
->component_mask
) - 1;
2749 /* Load the output as int. */
2750 for (int i
= 0; i
< num_comps
; i
++) {
2751 out
[i
] = ac_to_integer(&ctx
->ac
,
2752 radv_load_output(ctx
, loc
, start
+ i
));
2755 /* Pack the output. */
2756 LLVMValueRef vdata
= NULL
;
2758 switch (num_comps
) {
2759 case 1: /* as i32 */
2762 case 2: /* as v2i32 */
2763 case 3: /* as v4i32 (aligned to 4) */
2764 out
[3] = LLVMGetUndef(ctx
->ac
.i32
);
2766 case 4: /* as v4i32 */
2767 vdata
= ac_build_gather_values(&ctx
->ac
, out
,
2769 util_next_power_of_two(num_comps
) :
2774 ac_build_buffer_store_dword(&ctx
->ac
, so_buffers
[buf
],
2775 vdata
, num_comps
, so_write_offsets
[buf
],
2776 ctx
->ac
.i32_0
, offset
,
2781 radv_emit_streamout(struct radv_shader_context
*ctx
, unsigned stream
)
2783 struct ac_build_if_state if_ctx
;
2786 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
2787 assert(ctx
->streamout_config
);
2788 LLVMValueRef so_vtx_count
=
2789 ac_build_bfe(&ctx
->ac
, ctx
->streamout_config
,
2790 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2791 LLVMConstInt(ctx
->ac
.i32
, 7, false), false);
2793 LLVMValueRef tid
= ac_get_thread_id(&ctx
->ac
);
2795 /* can_emit = tid < so_vtx_count; */
2796 LLVMValueRef can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
2797 tid
, so_vtx_count
, "");
2799 /* Emit the streamout code conditionally. This actually avoids
2800 * out-of-bounds buffer access. The hw tells us via the SGPR
2801 * (so_vtx_count) which threads are allowed to emit streamout data.
2803 ac_nir_build_if(&if_ctx
, ctx
, can_emit
);
2805 /* The buffer offset is computed as follows:
2806 * ByteOffset = streamout_offset[buffer_id]*4 +
2807 * (streamout_write_index + thread_id)*stride[buffer_id] +
2810 LLVMValueRef so_write_index
= ctx
->streamout_write_idx
;
2812 /* Compute (streamout_write_index + thread_id). */
2814 LLVMBuildAdd(ctx
->ac
.builder
, so_write_index
, tid
, "");
2816 /* Load the descriptor and compute the write offset for each
2819 LLVMValueRef so_write_offset
[4] = {};
2820 LLVMValueRef so_buffers
[4] = {};
2821 LLVMValueRef buf_ptr
= ctx
->streamout_buffers
;
2823 for (i
= 0; i
< 4; i
++) {
2824 uint16_t stride
= ctx
->shader_info
->info
.so
.strides
[i
];
2829 LLVMValueRef offset
=
2830 LLVMConstInt(ctx
->ac
.i32
, i
, false);
2832 so_buffers
[i
] = ac_build_load_to_sgpr(&ctx
->ac
,
2835 LLVMValueRef so_offset
= ctx
->streamout_offset
[i
];
2837 so_offset
= LLVMBuildMul(ctx
->ac
.builder
, so_offset
,
2838 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
2840 so_write_offset
[i
] =
2841 ac_build_imad(&ctx
->ac
, so_write_index
,
2842 LLVMConstInt(ctx
->ac
.i32
,
2847 /* Write streamout data. */
2848 for (i
= 0; i
< ctx
->shader_info
->info
.so
.num_outputs
; i
++) {
2849 struct radv_stream_output
*output
=
2850 &ctx
->shader_info
->info
.so
.outputs
[i
];
2852 if (stream
!= output
->stream
)
2855 radv_emit_stream_output(ctx
, so_buffers
,
2856 so_write_offset
, output
);
2859 ac_nir_build_endif(&if_ctx
);
2863 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
2864 bool export_prim_id
, bool export_layer_id
,
2865 struct radv_vs_output_info
*outinfo
)
2867 uint32_t param_count
= 0;
2869 unsigned pos_idx
, num_pos_exports
= 0;
2870 struct ac_export_args args
, pos_args
[4] = {};
2871 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_index_value
= NULL
;
2874 if (ctx
->options
->key
.has_multiview_view_index
) {
2875 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2877 for(unsigned i
= 0; i
< 4; ++i
)
2878 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
2879 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
2882 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
2883 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
2886 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
2887 sizeof(outinfo
->vs_output_param_offset
));
2889 for(unsigned location
= VARYING_SLOT_CLIP_DIST0
; location
<= VARYING_SLOT_CLIP_DIST1
; ++location
) {
2890 if (ctx
->output_mask
& (1ull << location
)) {
2891 unsigned output_usage_mask
, length
;
2892 LLVMValueRef slots
[4];
2895 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2896 !ctx
->is_gs_copy_shader
) {
2898 ctx
->shader_info
->info
.vs
.output_usage_mask
[location
];
2899 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2901 ctx
->shader_info
->info
.tes
.output_usage_mask
[location
];
2903 assert(ctx
->is_gs_copy_shader
);
2905 ctx
->shader_info
->info
.gs
.output_usage_mask
[location
];
2908 length
= util_last_bit(output_usage_mask
);
2910 for (j
= 0; j
< length
; j
++)
2911 slots
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, location
, j
));
2913 for (i
= length
; i
< 4; i
++)
2914 slots
[i
] = LLVMGetUndef(ctx
->ac
.f32
);
2916 target
= V_008DFC_SQ_EXP_POS
+ 2 + (location
- VARYING_SLOT_CLIP_DIST0
);
2917 si_llvm_init_export_args(ctx
, &slots
[0], 0xf, target
, &args
);
2918 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
2919 &args
, sizeof(args
));
2921 /* Export the clip/cull distances values to the next stage. */
2922 radv_export_param(ctx
, param_count
, &slots
[0], 0xf);
2923 outinfo
->vs_output_param_offset
[location
] = param_count
++;
2927 LLVMValueRef pos_values
[4] = {ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_1
};
2928 if (ctx
->output_mask
& (1ull << VARYING_SLOT_POS
)) {
2929 for (unsigned j
= 0; j
< 4; j
++)
2930 pos_values
[j
] = radv_load_output(ctx
, VARYING_SLOT_POS
, j
);
2932 si_llvm_init_export_args(ctx
, pos_values
, 0xf, V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
2934 if (ctx
->output_mask
& (1ull << VARYING_SLOT_PSIZ
)) {
2935 outinfo
->writes_pointsize
= true;
2936 psize_value
= radv_load_output(ctx
, VARYING_SLOT_PSIZ
, 0);
2939 if (ctx
->output_mask
& (1ull << VARYING_SLOT_LAYER
)) {
2940 outinfo
->writes_layer
= true;
2941 layer_value
= radv_load_output(ctx
, VARYING_SLOT_LAYER
, 0);
2944 if (ctx
->output_mask
& (1ull << VARYING_SLOT_VIEWPORT
)) {
2945 outinfo
->writes_viewport_index
= true;
2946 viewport_index_value
= radv_load_output(ctx
, VARYING_SLOT_VIEWPORT
, 0);
2949 if (ctx
->shader_info
->info
.so
.num_outputs
&&
2950 !ctx
->is_gs_copy_shader
) {
2951 /* The GS copy shader emission already emits streamout. */
2952 radv_emit_streamout(ctx
, 0);
2955 if (outinfo
->writes_pointsize
||
2956 outinfo
->writes_layer
||
2957 outinfo
->writes_viewport_index
) {
2958 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
2959 (outinfo
->writes_layer
== true ? 4 : 0));
2960 pos_args
[1].valid_mask
= 0;
2961 pos_args
[1].done
= 0;
2962 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
2963 pos_args
[1].compr
= 0;
2964 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
2965 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
2966 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
2967 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
2969 if (outinfo
->writes_pointsize
== true)
2970 pos_args
[1].out
[0] = psize_value
;
2971 if (outinfo
->writes_layer
== true)
2972 pos_args
[1].out
[2] = layer_value
;
2973 if (outinfo
->writes_viewport_index
== true) {
2974 if (ctx
->options
->chip_class
>= GFX9
) {
2975 /* GFX9 has the layer in out.z[10:0] and the viewport
2976 * index in out.z[19:16].
2978 LLVMValueRef v
= viewport_index_value
;
2979 v
= ac_to_integer(&ctx
->ac
, v
);
2980 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
2981 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2983 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
2984 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
2986 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
2987 pos_args
[1].enabled_channels
|= 1 << 2;
2989 pos_args
[1].out
[3] = viewport_index_value
;
2990 pos_args
[1].enabled_channels
|= 1 << 3;
2994 for (i
= 0; i
< 4; i
++) {
2995 if (pos_args
[i
].out
[0])
3000 for (i
= 0; i
< 4; i
++) {
3001 if (!pos_args
[i
].out
[0])
3004 /* Specify the target we are exporting */
3005 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
3006 if (pos_idx
== num_pos_exports
)
3007 pos_args
[i
].done
= 1;
3008 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
3011 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3012 LLVMValueRef values
[4];
3013 if (!(ctx
->output_mask
& (1ull << i
)))
3016 if (i
!= VARYING_SLOT_LAYER
&&
3017 i
!= VARYING_SLOT_PRIMITIVE_ID
&&
3018 i
< VARYING_SLOT_VAR0
)
3021 for (unsigned j
= 0; j
< 4; j
++)
3022 values
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
3024 unsigned output_usage_mask
;
3026 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
3027 !ctx
->is_gs_copy_shader
) {
3029 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
3030 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3032 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
3034 assert(ctx
->is_gs_copy_shader
);
3036 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
3039 radv_export_param(ctx
, param_count
, values
, output_usage_mask
);
3041 outinfo
->vs_output_param_offset
[i
] = param_count
++;
3044 if (export_prim_id
) {
3045 LLVMValueRef values
[4];
3047 values
[0] = ctx
->vs_prim_id
;
3048 ctx
->shader_info
->vs
.vgpr_comp_cnt
= MAX2(2,
3049 ctx
->shader_info
->vs
.vgpr_comp_cnt
);
3050 for (unsigned j
= 1; j
< 4; j
++)
3051 values
[j
] = ctx
->ac
.f32_0
;
3053 radv_export_param(ctx
, param_count
, values
, 0x1);
3055 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
3056 outinfo
->export_prim_id
= true;
3059 if (export_layer_id
&& layer_value
) {
3060 LLVMValueRef values
[4];
3062 values
[0] = layer_value
;
3063 for (unsigned j
= 1; j
< 4; j
++)
3064 values
[j
] = ctx
->ac
.f32_0
;
3066 radv_export_param(ctx
, param_count
, values
, 0x1);
3068 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = param_count
++;
3071 outinfo
->pos_exports
= num_pos_exports
;
3072 outinfo
->param_exports
= param_count
;
3076 handle_es_outputs_post(struct radv_shader_context
*ctx
,
3077 struct radv_es_output_info
*outinfo
)
3080 uint64_t max_output_written
= 0;
3081 LLVMValueRef lds_base
= NULL
;
3083 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3086 if (!(ctx
->output_mask
& (1ull << i
)))
3089 param_index
= shader_io_get_unique_index(i
);
3091 max_output_written
= MAX2(param_index
, max_output_written
);
3094 outinfo
->esgs_itemsize
= (max_output_written
+ 1) * 16;
3096 if (ctx
->ac
.chip_class
>= GFX9
) {
3097 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
3098 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
3099 LLVMValueRef wave_idx
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 24, 4);
3100 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
3101 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
3102 LLVMConstInt(ctx
->ac
.i32
, 64, false), ""), "");
3103 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
3104 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
3107 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3108 LLVMValueRef dw_addr
= NULL
;
3109 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
3110 unsigned output_usage_mask
;
3113 if (!(ctx
->output_mask
& (1ull << i
)))
3116 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
3118 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
3120 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
3122 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
3125 param_index
= shader_io_get_unique_index(i
);
3128 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3129 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
3133 for (j
= 0; j
< 4; j
++) {
3134 if (!(output_usage_mask
& (1 << j
)))
3137 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
3138 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
3139 out_val
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
3141 if (ctx
->ac
.chip_class
>= GFX9
) {
3142 LLVMValueRef dw_addr_offset
=
3143 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
3144 LLVMConstInt(ctx
->ac
.i32
,
3147 ac_lds_store(&ctx
->ac
, dw_addr_offset
, out_val
);
3149 ac_build_buffer_store_dword(&ctx
->ac
,
3152 NULL
, ctx
->es2gs_offset
,
3153 (4 * param_index
+ j
) * 4,
3161 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
3163 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
3164 uint32_t num_tcs_inputs
= util_last_bit64(ctx
->shader_info
->info
.vs
.ls_outputs_written
);
3165 LLVMValueRef vertex_dw_stride
= LLVMConstInt(ctx
->ac
.i32
, num_tcs_inputs
* 4, false);
3166 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
3167 vertex_dw_stride
, "");
3169 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3170 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
3172 if (!(ctx
->output_mask
& (1ull << i
)))
3175 int param
= shader_io_get_unique_index(i
);
3176 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
3177 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
3179 for (unsigned j
= 0; j
< 4; j
++) {
3180 LLVMValueRef value
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
3181 value
= ac_to_integer(&ctx
->ac
, value
);
3182 value
= LLVMBuildZExtOrBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
3183 ac_lds_store(&ctx
->ac
, dw_addr
, value
);
3184 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
3190 write_tess_factors(struct radv_shader_context
*ctx
)
3192 unsigned stride
, outer_comps
, inner_comps
;
3193 struct ac_build_if_state if_ctx
, inner_if_ctx
;
3194 LLVMValueRef invocation_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
3195 LLVMValueRef rel_patch_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
3196 unsigned tess_inner_index
= 0, tess_outer_index
;
3197 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
3198 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
3200 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3202 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
3222 ac_nir_build_if(&if_ctx
, ctx
,
3223 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3224 invocation_id
, ctx
->ac
.i32_0
, ""));
3226 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
3229 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3230 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3231 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
3234 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3235 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
3236 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
3238 for (i
= 0; i
< 4; i
++) {
3239 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3240 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
3244 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
3245 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
3246 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3248 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
3250 for (i
= 0; i
< outer_comps
; i
++) {
3252 ac_lds_load(&ctx
->ac
, lds_outer
);
3253 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
3256 for (i
= 0; i
< inner_comps
; i
++) {
3257 inner
[i
] = out
[outer_comps
+i
] =
3258 ac_lds_load(&ctx
->ac
, lds_inner
);
3259 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
3264 /* Convert the outputs to vectors for stores. */
3265 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
3269 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
3272 buffer
= ctx
->hs_ring_tess_factor
;
3273 tf_base
= ctx
->tess_factor_offset
;
3274 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
3275 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
3276 unsigned tf_offset
= 0;
3278 if (ctx
->options
->chip_class
<= GFX8
) {
3279 ac_nir_build_if(&inner_if_ctx
, ctx
,
3280 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3281 rel_patch_id
, ctx
->ac
.i32_0
, ""));
3283 /* Store the dynamic HS control word. */
3284 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
3285 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
3286 1, ctx
->ac
.i32_0
, tf_base
,
3287 0, 1, 0, true, false);
3290 ac_nir_build_endif(&inner_if_ctx
);
3293 /* Store the tessellation factors. */
3294 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
3295 MIN2(stride
, 4), byteoffset
, tf_base
,
3296 tf_offset
, 1, 0, true, false);
3298 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
3299 stride
- 4, byteoffset
, tf_base
,
3300 16 + tf_offset
, 1, 0, true, false);
3302 //store to offchip for TES to read - only if TES reads them
3303 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
3304 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
3305 LLVMValueRef tf_inner_offset
;
3306 unsigned param_outer
, param_inner
;
3308 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
3309 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3310 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
3312 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
3313 util_next_power_of_two(outer_comps
));
3315 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
3316 outer_comps
, tf_outer_offset
,
3317 ctx
->oc_lds
, 0, 1, 0, true, false);
3319 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
3320 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
3321 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
3323 inner_vec
= inner_comps
== 1 ? inner
[0] :
3324 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
3325 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
3326 inner_comps
, tf_inner_offset
,
3327 ctx
->oc_lds
, 0, 1, 0, true, false);
3330 ac_nir_build_endif(&if_ctx
);
3334 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
3336 write_tess_factors(ctx
);
3340 si_export_mrt_color(struct radv_shader_context
*ctx
,
3341 LLVMValueRef
*color
, unsigned index
,
3342 struct ac_export_args
*args
)
3345 si_llvm_init_export_args(ctx
, color
, 0xf,
3346 V_008DFC_SQ_EXP_MRT
+ index
, args
);
3347 if (!args
->enabled_channels
)
3348 return false; /* unnecessary NULL export */
3354 radv_export_mrt_z(struct radv_shader_context
*ctx
,
3355 LLVMValueRef depth
, LLVMValueRef stencil
,
3356 LLVMValueRef samplemask
)
3358 struct ac_export_args args
;
3360 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
3362 ac_build_export(&ctx
->ac
, &args
);
3366 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
3369 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
3370 struct ac_export_args color_args
[8];
3372 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3373 LLVMValueRef values
[4];
3375 if (!(ctx
->output_mask
& (1ull << i
)))
3378 if (i
< FRAG_RESULT_DATA0
)
3381 for (unsigned j
= 0; j
< 4; j
++)
3382 values
[j
] = ac_to_float(&ctx
->ac
,
3383 radv_load_output(ctx
, i
, j
));
3385 bool ret
= si_export_mrt_color(ctx
, values
,
3386 i
- FRAG_RESULT_DATA0
,
3387 &color_args
[index
]);
3392 /* Process depth, stencil, samplemask. */
3393 if (ctx
->shader_info
->info
.ps
.writes_z
) {
3394 depth
= ac_to_float(&ctx
->ac
,
3395 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
3397 if (ctx
->shader_info
->info
.ps
.writes_stencil
) {
3398 stencil
= ac_to_float(&ctx
->ac
,
3399 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
3401 if (ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3402 samplemask
= ac_to_float(&ctx
->ac
,
3403 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
3406 /* Set the DONE bit on last non-null color export only if Z isn't
3410 !ctx
->shader_info
->info
.ps
.writes_z
&&
3411 !ctx
->shader_info
->info
.ps
.writes_stencil
&&
3412 !ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
3413 unsigned last
= index
- 1;
3415 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
3416 color_args
[last
].done
= 1; /* DONE bit */
3419 /* Export PS outputs. */
3420 for (unsigned i
= 0; i
< index
; i
++)
3421 ac_build_export(&ctx
->ac
, &color_args
[i
]);
3423 if (depth
|| stencil
|| samplemask
)
3424 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
3426 ac_build_export_null(&ctx
->ac
);
3430 emit_gs_epilogue(struct radv_shader_context
*ctx
)
3432 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
3436 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
3437 LLVMValueRef
*addrs
)
3439 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
3441 switch (ctx
->stage
) {
3442 case MESA_SHADER_VERTEX
:
3443 if (ctx
->options
->key
.vs
.as_ls
)
3444 handle_ls_outputs_post(ctx
);
3445 else if (ctx
->options
->key
.vs
.as_es
)
3446 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
3448 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs
.export_prim_id
,
3449 ctx
->options
->key
.vs
.export_layer_id
,
3450 &ctx
->shader_info
->vs
.outinfo
);
3452 case MESA_SHADER_FRAGMENT
:
3453 handle_fs_outputs_post(ctx
);
3455 case MESA_SHADER_GEOMETRY
:
3456 emit_gs_epilogue(ctx
);
3458 case MESA_SHADER_TESS_CTRL
:
3459 handle_tcs_outputs_post(ctx
);
3461 case MESA_SHADER_TESS_EVAL
:
3462 if (ctx
->options
->key
.tes
.as_es
)
3463 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
3465 handle_vs_outputs_post(ctx
, ctx
->options
->key
.tes
.export_prim_id
,
3466 ctx
->options
->key
.tes
.export_layer_id
,
3467 &ctx
->shader_info
->tes
.outinfo
);
3474 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
,
3475 LLVMPassManagerRef passmgr
,
3476 const struct radv_nir_compiler_options
*options
)
3478 LLVMRunPassManager(passmgr
, ctx
->ac
.module
);
3479 LLVMDisposeBuilder(ctx
->ac
.builder
);
3481 ac_llvm_context_dispose(&ctx
->ac
);
3485 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
3487 struct radv_vs_output_info
*outinfo
;
3489 switch (ctx
->stage
) {
3490 case MESA_SHADER_FRAGMENT
:
3491 case MESA_SHADER_COMPUTE
:
3492 case MESA_SHADER_TESS_CTRL
:
3493 case MESA_SHADER_GEOMETRY
:
3495 case MESA_SHADER_VERTEX
:
3496 if (ctx
->options
->key
.vs
.as_ls
||
3497 ctx
->options
->key
.vs
.as_es
)
3499 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
3501 case MESA_SHADER_TESS_EVAL
:
3502 if (ctx
->options
->key
.vs
.as_es
)
3504 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
3507 unreachable("Unhandled shader type");
3510 ac_optimize_vs_outputs(&ctx
->ac
,
3512 outinfo
->vs_output_param_offset
,
3514 &outinfo
->param_exports
);
3518 ac_setup_rings(struct radv_shader_context
*ctx
)
3520 if (ctx
->options
->chip_class
<= GFX8
&&
3521 (ctx
->stage
== MESA_SHADER_GEOMETRY
||
3522 ctx
->options
->key
.vs
.as_es
|| ctx
->options
->key
.tes
.as_es
)) {
3523 unsigned ring
= ctx
->stage
== MESA_SHADER_GEOMETRY
? RING_ESGS_GS
3525 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, ring
, false);
3527 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
,
3532 if (ctx
->is_gs_copy_shader
) {
3534 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
3535 LLVMConstInt(ctx
->ac
.i32
,
3536 RING_GSVS_VS
, false));
3539 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3540 /* The conceptual layout of the GSVS ring is
3541 * v0c0 .. vLv0 v0c1 .. vLc1 ..
3542 * but the real memory layout is swizzled across
3544 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3546 * Override the buffer descriptor accordingly.
3548 LLVMTypeRef v2i64
= LLVMVectorType(ctx
->ac
.i64
, 2);
3549 uint64_t stream_offset
= 0;
3550 unsigned num_records
= 64;
3551 LLVMValueRef base_ring
;
3554 ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
,
3555 LLVMConstInt(ctx
->ac
.i32
,
3556 RING_GSVS_GS
, false));
3558 for (unsigned stream
= 0; stream
< 4; stream
++) {
3559 unsigned num_components
, stride
;
3560 LLVMValueRef ring
, tmp
;
3563 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
3565 if (!num_components
)
3568 stride
= 4 * num_components
* ctx
->gs_max_out_vertices
;
3570 /* Limit on the stride field for <= GFX7. */
3571 assert(stride
< (1 << 14));
3573 ring
= LLVMBuildBitCast(ctx
->ac
.builder
,
3574 base_ring
, v2i64
, "");
3575 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3576 ring
, ctx
->ac
.i32_0
, "");
3577 tmp
= LLVMBuildAdd(ctx
->ac
.builder
, tmp
,
3578 LLVMConstInt(ctx
->ac
.i64
,
3579 stream_offset
, 0), "");
3580 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
,
3581 ring
, tmp
, ctx
->ac
.i32_0
, "");
3583 stream_offset
+= stride
* 64;
3585 ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ring
,
3588 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ring
,
3590 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
,
3591 LLVMConstInt(ctx
->ac
.i32
,
3592 S_008F04_STRIDE(stride
), false), "");
3593 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
, tmp
,
3596 ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ring
,
3597 LLVMConstInt(ctx
->ac
.i32
,
3598 num_records
, false),
3599 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
3601 ctx
->gsvs_ring
[stream
] = ring
;
3605 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
3606 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3607 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
3608 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
3613 radv_nir_get_max_workgroup_size(enum chip_class chip_class
,
3614 const struct nir_shader
*nir
)
3616 switch (nir
->info
.stage
) {
3617 case MESA_SHADER_TESS_CTRL
:
3618 return chip_class
>= GFX7
? 128 : 64;
3619 case MESA_SHADER_GEOMETRY
:
3620 return chip_class
>= GFX9
? 128 : 64;
3621 case MESA_SHADER_COMPUTE
:
3627 unsigned max_workgroup_size
= nir
->info
.cs
.local_size
[0] *
3628 nir
->info
.cs
.local_size
[1] *
3629 nir
->info
.cs
.local_size
[2];
3630 return max_workgroup_size
;
3633 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
3634 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
3636 LLVMValueRef count
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 8, 8);
3637 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
3639 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
3640 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
3641 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
3644 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
3646 for(int i
= 5; i
>= 0; --i
) {
3647 ctx
->gs_vtx_offset
[i
] = ac_unpack_param(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
3651 ctx
->gs_wave_id
= ac_unpack_param(&ctx
->ac
, ctx
->merged_wave_info
, 16, 8);
3656 LLVMModuleRef
ac_translate_nir_to_llvm(struct ac_llvm_compiler
*ac_llvm
,
3657 struct nir_shader
*const *shaders
,
3659 struct radv_shader_variant_info
*shader_info
,
3660 const struct radv_nir_compiler_options
*options
)
3662 struct radv_shader_context ctx
= {0};
3664 ctx
.options
= options
;
3665 ctx
.shader_info
= shader_info
;
3667 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
3668 ctx
.context
= ctx
.ac
.context
;
3669 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
3671 enum ac_float_mode float_mode
=
3672 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
3673 AC_FLOAT_MODE_DEFAULT
;
3675 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
3677 memset(shader_info
, 0, sizeof(*shader_info
));
3679 radv_nir_shader_info_init(&shader_info
->info
);
3681 for(int i
= 0; i
< shader_count
; ++i
)
3682 radv_nir_shader_info_pass(shaders
[i
], options
, &shader_info
->info
);
3684 for (i
= 0; i
< RADV_UD_MAX_SETS
; i
++)
3685 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
3686 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
3687 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
3689 ctx
.max_workgroup_size
= 0;
3690 for (int i
= 0; i
< shader_count
; ++i
) {
3691 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
3692 radv_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
3696 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
3697 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
3699 ctx
.abi
.inputs
= &ctx
.inputs
[0];
3700 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
3701 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
3702 ctx
.abi
.load_ubo
= radv_load_ubo
;
3703 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
3704 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
3705 ctx
.abi
.load_resource
= radv_load_resource
;
3706 ctx
.abi
.clamp_shadow_reference
= false;
3707 ctx
.abi
.gfx9_stride_size_workaround
= ctx
.ac
.chip_class
== GFX9
&& HAVE_LLVM
< 0x800;
3709 /* Because the new raw/struct atomic intrinsics are buggy with LLVM 8,
3710 * we fallback to the old intrinsics for atomic buffer image operations
3711 * and thus we need to apply the indexing workaround...
3713 ctx
.abi
.gfx9_stride_size_workaround_for_atomic
= ctx
.ac
.chip_class
== GFX9
&& HAVE_LLVM
< 0x900;
3715 if (shader_count
>= 2)
3716 ac_init_exec_full_mask(&ctx
.ac
);
3718 if ((ctx
.ac
.family
== CHIP_VEGA10
||
3719 ctx
.ac
.family
== CHIP_RAVEN
) &&
3720 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
3721 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
3723 for(int i
= 0; i
< shader_count
; ++i
) {
3724 ctx
.stage
= shaders
[i
]->info
.stage
;
3725 ctx
.output_mask
= 0;
3727 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3728 for (int i
= 0; i
< 4; i
++) {
3729 ctx
.gs_next_vertex
[i
] =
3730 ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "");
3732 ctx
.gs_max_out_vertices
= shaders
[i
]->info
.gs
.vertices_out
;
3733 ctx
.abi
.load_inputs
= load_gs_input
;
3734 ctx
.abi
.emit_primitive
= visit_end_primitive
;
3735 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3736 ctx
.tcs_outputs_read
= shaders
[i
]->info
.outputs_read
;
3737 ctx
.tcs_patch_outputs_read
= shaders
[i
]->info
.patch_outputs_read
;
3738 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
3739 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3740 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
3741 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3742 if (shader_count
== 1)
3743 ctx
.tcs_num_inputs
= ctx
.options
->key
.tcs
.num_inputs
;
3745 ctx
.tcs_num_inputs
= util_last_bit64(shader_info
->info
.vs
.ls_outputs_written
);
3746 ctx
.tcs_num_patches
= get_tcs_num_patches(&ctx
);
3747 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3748 ctx
.tes_primitive_mode
= shaders
[i
]->info
.tess
.primitive_mode
;
3749 ctx
.abi
.load_tess_varyings
= load_tes_input
;
3750 ctx
.abi
.load_tess_coord
= load_tess_coord
;
3751 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3752 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3753 ctx
.tcs_num_patches
= ctx
.options
->key
.tes
.num_patches
;
3754 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
3755 if (shader_info
->info
.vs
.needs_instance_id
) {
3756 if (ctx
.options
->key
.vs
.as_ls
) {
3757 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
3758 MAX2(2, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
3760 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
3761 MAX2(1, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
3764 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
3765 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
3766 shader_info
->fs
.can_discard
= shaders
[i
]->info
.fs
.uses_discard
;
3767 ctx
.abi
.lookup_interp_param
= lookup_interp_param
;
3768 ctx
.abi
.load_sample_position
= load_sample_position
;
3769 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
3770 ctx
.abi
.emit_kill
= radv_emit_kill
;
3774 ac_emit_barrier(&ctx
.ac
, ctx
.stage
);
3776 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
3777 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
3779 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3780 unsigned addclip
= shaders
[i
]->info
.clip_distance_array_size
+
3781 shaders
[i
]->info
.cull_distance_array_size
> 4;
3782 ctx
.gsvs_vertex_size
= (util_bitcount64(ctx
.output_mask
) + addclip
) * 16;
3783 ctx
.max_gsvs_emit_size
= ctx
.gsvs_vertex_size
*
3784 shaders
[i
]->info
.gs
.vertices_out
;
3787 ac_setup_rings(&ctx
);
3789 LLVMBasicBlockRef merge_block
;
3790 if (shader_count
>= 2) {
3791 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
3792 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3793 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3795 LLVMValueRef count
= ac_unpack_param(&ctx
.ac
, ctx
.merged_wave_info
, 8 * i
, 8);
3796 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
3797 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
3798 thread_id
, count
, "");
3799 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
3801 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
3804 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
3805 handle_fs_inputs(&ctx
, shaders
[i
]);
3806 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
3807 handle_vs_inputs(&ctx
, shaders
[i
]);
3808 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
3809 prepare_gs_input_vgprs(&ctx
);
3811 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
3813 if (shader_count
>= 2) {
3814 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
3815 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
3818 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3819 shader_info
->gs
.gsvs_vertex_size
= ctx
.gsvs_vertex_size
;
3820 shader_info
->gs
.max_gsvs_emit_size
= ctx
.max_gsvs_emit_size
;
3821 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3822 shader_info
->tcs
.num_patches
= ctx
.tcs_num_patches
;
3823 shader_info
->tcs
.lds_size
= calculate_tess_lds_size(&ctx
);
3827 LLVMBuildRetVoid(ctx
.ac
.builder
);
3829 if (options
->dump_preoptir
)
3830 ac_dump_module(ctx
.ac
.module
);
3832 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
3834 if (shader_count
== 1)
3835 ac_nir_eliminate_const_vs_outputs(&ctx
);
3837 if (options
->dump_shader
) {
3838 ctx
.shader_info
->private_mem_vgprs
=
3839 ac_count_scratch_private_memory(ctx
.main_function
);
3842 return ctx
.ac
.module
;
3845 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
3847 unsigned *retval
= (unsigned *)context
;
3848 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
3849 char *description
= LLVMGetDiagInfoDescription(di
);
3851 if (severity
== LLVMDSError
) {
3853 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
3857 LLVMDisposeMessage(description
);
3860 static unsigned ac_llvm_compile(LLVMModuleRef M
,
3861 struct ac_shader_binary
*binary
,
3862 struct ac_llvm_compiler
*ac_llvm
)
3864 unsigned retval
= 0;
3865 LLVMContextRef llvm_ctx
;
3867 /* Setup Diagnostic Handler*/
3868 llvm_ctx
= LLVMGetModuleContext(M
);
3870 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
3874 if (!radv_compile_to_binary(ac_llvm
, M
, binary
))
3879 static void ac_compile_llvm_module(struct ac_llvm_compiler
*ac_llvm
,
3880 LLVMModuleRef llvm_module
,
3881 struct ac_shader_binary
*binary
,
3882 struct ac_shader_config
*config
,
3883 struct radv_shader_variant_info
*shader_info
,
3884 gl_shader_stage stage
,
3885 const struct radv_nir_compiler_options
*options
)
3887 if (options
->dump_shader
)
3888 ac_dump_module(llvm_module
);
3890 memset(binary
, 0, sizeof(*binary
));
3892 if (options
->record_llvm_ir
) {
3893 char *llvm_ir
= LLVMPrintModuleToString(llvm_module
);
3894 binary
->llvm_ir_string
= strdup(llvm_ir
);
3895 LLVMDisposeMessage(llvm_ir
);
3898 int v
= ac_llvm_compile(llvm_module
, binary
, ac_llvm
);
3900 fprintf(stderr
, "compile failed\n");
3903 if (options
->dump_shader
)
3904 fprintf(stderr
, "disasm:\n%s\n", binary
->disasm_string
);
3906 ac_shader_binary_read_config(binary
, config
, 0, options
->supports_spill
);
3908 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
3909 LLVMDisposeModule(llvm_module
);
3910 LLVMContextDispose(ctx
);
3912 if (stage
== MESA_SHADER_FRAGMENT
) {
3913 shader_info
->num_input_vgprs
= 0;
3914 if (G_0286CC_PERSP_SAMPLE_ENA(config
->spi_ps_input_addr
))
3915 shader_info
->num_input_vgprs
+= 2;
3916 if (G_0286CC_PERSP_CENTER_ENA(config
->spi_ps_input_addr
))
3917 shader_info
->num_input_vgprs
+= 2;
3918 if (G_0286CC_PERSP_CENTROID_ENA(config
->spi_ps_input_addr
))
3919 shader_info
->num_input_vgprs
+= 2;
3920 if (G_0286CC_PERSP_PULL_MODEL_ENA(config
->spi_ps_input_addr
))
3921 shader_info
->num_input_vgprs
+= 3;
3922 if (G_0286CC_LINEAR_SAMPLE_ENA(config
->spi_ps_input_addr
))
3923 shader_info
->num_input_vgprs
+= 2;
3924 if (G_0286CC_LINEAR_CENTER_ENA(config
->spi_ps_input_addr
))
3925 shader_info
->num_input_vgprs
+= 2;
3926 if (G_0286CC_LINEAR_CENTROID_ENA(config
->spi_ps_input_addr
))
3927 shader_info
->num_input_vgprs
+= 2;
3928 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config
->spi_ps_input_addr
))
3929 shader_info
->num_input_vgprs
+= 1;
3930 if (G_0286CC_POS_X_FLOAT_ENA(config
->spi_ps_input_addr
))
3931 shader_info
->num_input_vgprs
+= 1;
3932 if (G_0286CC_POS_Y_FLOAT_ENA(config
->spi_ps_input_addr
))
3933 shader_info
->num_input_vgprs
+= 1;
3934 if (G_0286CC_POS_Z_FLOAT_ENA(config
->spi_ps_input_addr
))
3935 shader_info
->num_input_vgprs
+= 1;
3936 if (G_0286CC_POS_W_FLOAT_ENA(config
->spi_ps_input_addr
))
3937 shader_info
->num_input_vgprs
+= 1;
3938 if (G_0286CC_FRONT_FACE_ENA(config
->spi_ps_input_addr
))
3939 shader_info
->num_input_vgprs
+= 1;
3940 if (G_0286CC_ANCILLARY_ENA(config
->spi_ps_input_addr
))
3941 shader_info
->num_input_vgprs
+= 1;
3942 if (G_0286CC_SAMPLE_COVERAGE_ENA(config
->spi_ps_input_addr
))
3943 shader_info
->num_input_vgprs
+= 1;
3944 if (G_0286CC_POS_FIXED_PT_ENA(config
->spi_ps_input_addr
))
3945 shader_info
->num_input_vgprs
+= 1;
3947 config
->num_vgprs
= MAX2(config
->num_vgprs
, shader_info
->num_input_vgprs
);
3949 /* +3 for scratch wave offset and VCC */
3950 config
->num_sgprs
= MAX2(config
->num_sgprs
,
3951 shader_info
->num_input_sgprs
+ 3);
3953 /* Enable 64-bit and 16-bit denormals, because there is no performance
3956 * If denormals are enabled, all floating-point output modifiers are
3959 * Don't enable denormals for 32-bit floats, because:
3960 * - Floating-point output modifiers would be ignored by the hw.
3961 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3962 * have to stop using those.
3963 * - GFX6 & GFX7 would be very slow.
3965 config
->float_mode
|= V_00B028_FP_64_DENORMS
;
3969 ac_fill_shader_info(struct radv_shader_variant_info
*shader_info
, struct nir_shader
*nir
, const struct radv_nir_compiler_options
*options
)
3971 switch (nir
->info
.stage
) {
3972 case MESA_SHADER_COMPUTE
:
3973 for (int i
= 0; i
< 3; ++i
)
3974 shader_info
->cs
.block_size
[i
] = nir
->info
.cs
.local_size
[i
];
3976 case MESA_SHADER_FRAGMENT
:
3977 shader_info
->fs
.early_fragment_test
= nir
->info
.fs
.early_fragment_tests
;
3979 case MESA_SHADER_GEOMETRY
:
3980 shader_info
->gs
.vertices_in
= nir
->info
.gs
.vertices_in
;
3981 shader_info
->gs
.vertices_out
= nir
->info
.gs
.vertices_out
;
3982 shader_info
->gs
.output_prim
= nir
->info
.gs
.output_primitive
;
3983 shader_info
->gs
.invocations
= nir
->info
.gs
.invocations
;
3985 case MESA_SHADER_TESS_EVAL
:
3986 shader_info
->tes
.primitive_mode
= nir
->info
.tess
.primitive_mode
;
3987 shader_info
->tes
.spacing
= nir
->info
.tess
.spacing
;
3988 shader_info
->tes
.ccw
= nir
->info
.tess
.ccw
;
3989 shader_info
->tes
.point_mode
= nir
->info
.tess
.point_mode
;
3990 shader_info
->tes
.as_es
= options
->key
.tes
.as_es
;
3992 case MESA_SHADER_TESS_CTRL
:
3993 shader_info
->tcs
.tcs_vertices_out
= nir
->info
.tess
.tcs_vertices_out
;
3995 case MESA_SHADER_VERTEX
:
3996 shader_info
->vs
.as_es
= options
->key
.vs
.as_es
;
3997 shader_info
->vs
.as_ls
= options
->key
.vs
.as_ls
;
3998 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
3999 if (options
->key
.vs
.as_ls
)
4000 shader_info
->vs
.vgpr_comp_cnt
= MAX2(1, shader_info
->vs
.vgpr_comp_cnt
);
4008 radv_compile_nir_shader(struct ac_llvm_compiler
*ac_llvm
,
4009 struct ac_shader_binary
*binary
,
4010 struct ac_shader_config
*config
,
4011 struct radv_shader_variant_info
*shader_info
,
4012 struct nir_shader
*const *nir
,
4014 const struct radv_nir_compiler_options
*options
)
4017 LLVMModuleRef llvm_module
;
4019 llvm_module
= ac_translate_nir_to_llvm(ac_llvm
, nir
, nir_count
, shader_info
,
4022 ac_compile_llvm_module(ac_llvm
, llvm_module
, binary
, config
, shader_info
,
4023 nir
[0]->info
.stage
, options
);
4025 for (int i
= 0; i
< nir_count
; ++i
)
4026 ac_fill_shader_info(shader_info
, nir
[i
], options
);
4028 /* Determine the ES type (VS or TES) for the GS on GFX9. */
4029 if (options
->chip_class
== GFX9
) {
4030 if (nir_count
== 2 &&
4031 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
4032 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
4038 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
4040 LLVMValueRef vtx_offset
=
4041 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
4042 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
4043 LLVMValueRef stream_id
;
4045 /* Fetch the vertex stream ID. */
4046 if (ctx
->shader_info
->info
.so
.num_outputs
) {
4048 ac_unpack_param(&ctx
->ac
, ctx
->streamout_config
, 24, 2);
4050 stream_id
= ctx
->ac
.i32_0
;
4053 LLVMBasicBlockRef end_bb
;
4054 LLVMValueRef switch_inst
;
4056 end_bb
= LLVMAppendBasicBlockInContext(ctx
->ac
.context
,
4057 ctx
->main_function
, "end");
4058 switch_inst
= LLVMBuildSwitch(ctx
->ac
.builder
, stream_id
, end_bb
, 4);
4060 for (unsigned stream
= 0; stream
< 4; stream
++) {
4061 unsigned num_components
=
4062 ctx
->shader_info
->info
.gs
.num_stream_output_components
[stream
];
4063 LLVMBasicBlockRef bb
;
4066 if (!num_components
)
4069 if (stream
> 0 && !ctx
->shader_info
->info
.so
.num_outputs
)
4072 bb
= LLVMInsertBasicBlockInContext(ctx
->ac
.context
, end_bb
, "out");
4073 LLVMAddCase(switch_inst
, LLVMConstInt(ctx
->ac
.i32
, stream
, 0), bb
);
4074 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, bb
);
4077 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
4078 unsigned output_usage_mask
=
4079 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
4080 unsigned output_stream
=
4081 ctx
->shader_info
->info
.gs
.output_streams
[i
];
4082 int length
= util_last_bit(output_usage_mask
);
4084 if (!(ctx
->output_mask
& (1ull << i
)) ||
4085 output_stream
!= stream
)
4088 for (unsigned j
= 0; j
< length
; j
++) {
4089 LLVMValueRef value
, soffset
;
4091 if (!(output_usage_mask
& (1 << j
)))
4094 soffset
= LLVMConstInt(ctx
->ac
.i32
,
4096 ctx
->gs_max_out_vertices
* 16 * 4, false);
4100 value
= ac_build_buffer_load(&ctx
->ac
,
4103 vtx_offset
, soffset
,
4104 0, 1, 1, true, false);
4106 LLVMTypeRef type
= LLVMGetAllocatedType(ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4107 if (ac_get_type_size(type
) == 2) {
4108 value
= LLVMBuildBitCast(ctx
->ac
.builder
, value
, ctx
->ac
.i32
, "");
4109 value
= LLVMBuildTrunc(ctx
->ac
.builder
, value
, ctx
->ac
.i16
, "");
4112 LLVMBuildStore(ctx
->ac
.builder
,
4113 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
4117 if (ctx
->shader_info
->info
.so
.num_outputs
)
4118 radv_emit_streamout(ctx
, stream
);
4121 handle_vs_outputs_post(ctx
, false, false,
4122 &ctx
->shader_info
->vs
.outinfo
);
4125 LLVMBuildBr(ctx
->ac
.builder
, end_bb
);
4128 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, end_bb
);
4132 radv_compile_gs_copy_shader(struct ac_llvm_compiler
*ac_llvm
,
4133 struct nir_shader
*geom_shader
,
4134 struct ac_shader_binary
*binary
,
4135 struct ac_shader_config
*config
,
4136 struct radv_shader_variant_info
*shader_info
,
4137 const struct radv_nir_compiler_options
*options
)
4139 struct radv_shader_context ctx
= {0};
4140 ctx
.options
= options
;
4141 ctx
.shader_info
= shader_info
;
4143 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
4144 ctx
.context
= ctx
.ac
.context
;
4145 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
4147 ctx
.is_gs_copy_shader
= true;
4149 enum ac_float_mode float_mode
=
4150 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
4151 AC_FLOAT_MODE_DEFAULT
;
4153 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
4154 ctx
.stage
= MESA_SHADER_VERTEX
;
4156 radv_nir_shader_info_pass(geom_shader
, options
, &shader_info
->info
);
4158 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
4160 ctx
.gs_max_out_vertices
= geom_shader
->info
.gs
.vertices_out
;
4161 ac_setup_rings(&ctx
);
4163 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
4164 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
4165 ac_handle_shader_output_decl(&ctx
.ac
, &ctx
.abi
, geom_shader
,
4166 variable
, MESA_SHADER_VERTEX
);
4169 ac_gs_copy_shader_emit(&ctx
);
4171 LLVMBuildRetVoid(ctx
.ac
.builder
);
4173 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
4175 ac_compile_llvm_module(ac_llvm
, ctx
.ac
.module
, binary
, config
, shader_info
,
4176 MESA_SHADER_VERTEX
, options
);