2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_shader.h"
32 #include <llvm-c/Core.h>
33 #include <llvm-c/TargetMachine.h>
34 #include <llvm-c/Transforms/Scalar.h>
35 #if HAVE_LLVM >= 0x0700
36 #include <llvm-c/Transforms/Utils.h>
41 #include "ac_binary.h"
42 #include "ac_llvm_util.h"
43 #include "ac_llvm_build.h"
44 #include "ac_shader_abi.h"
45 #include "ac_shader_util.h"
46 #include "ac_exp_param.h"
48 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
50 struct radv_shader_context
{
51 struct ac_llvm_context ac
;
52 const struct radv_nir_compiler_options
*options
;
53 struct radv_shader_variant_info
*shader_info
;
54 struct ac_shader_abi abi
;
56 unsigned max_workgroup_size
;
57 LLVMContextRef context
;
58 LLVMValueRef main_function
;
60 LLVMValueRef descriptor_sets
[RADV_UD_MAX_SETS
];
61 LLVMValueRef ring_offsets
;
63 LLVMValueRef vertex_buffers
;
64 LLVMValueRef rel_auto_id
;
65 LLVMValueRef vs_prim_id
;
66 LLVMValueRef es2gs_offset
;
69 LLVMValueRef merged_wave_info
;
70 LLVMValueRef tess_factor_offset
;
71 LLVMValueRef tes_rel_patch_id
;
75 LLVMValueRef gs2vs_offset
;
76 LLVMValueRef gs_wave_id
;
77 LLVMValueRef gs_vtx_offset
[6];
79 LLVMValueRef esgs_ring
;
80 LLVMValueRef gsvs_ring
;
81 LLVMValueRef hs_ring_tess_offchip
;
82 LLVMValueRef hs_ring_tess_factor
;
84 LLVMValueRef persp_sample
, persp_center
, persp_centroid
;
85 LLVMValueRef linear_sample
, linear_center
, linear_centroid
;
87 gl_shader_stage stage
;
89 LLVMValueRef inputs
[RADEON_LLVM_MAX_INPUTS
* 4];
93 uint8_t num_output_clips
;
94 uint8_t num_output_culls
;
96 bool is_gs_copy_shader
;
97 LLVMValueRef gs_next_vertex
;
98 unsigned gs_max_out_vertices
;
100 unsigned tes_primitive_mode
;
102 uint32_t tcs_patch_outputs_read
;
103 uint64_t tcs_outputs_read
;
104 uint32_t tcs_vertices_per_patch
;
105 uint32_t tcs_num_inputs
;
106 uint32_t tcs_num_patches
;
107 uint32_t max_gsvs_emit_size
;
108 uint32_t gsvs_vertex_size
;
111 enum radeon_llvm_calling_convention
{
112 RADEON_LLVM_AMDGPU_VS
= 87,
113 RADEON_LLVM_AMDGPU_GS
= 88,
114 RADEON_LLVM_AMDGPU_PS
= 89,
115 RADEON_LLVM_AMDGPU_CS
= 90,
116 RADEON_LLVM_AMDGPU_HS
= 93,
119 static inline struct radv_shader_context
*
120 radv_shader_context_from_abi(struct ac_shader_abi
*abi
)
122 struct radv_shader_context
*ctx
= NULL
;
123 return container_of(abi
, ctx
, abi
);
126 struct ac_build_if_state
128 struct radv_shader_context
*ctx
;
129 LLVMValueRef condition
;
130 LLVMBasicBlockRef entry_block
;
131 LLVMBasicBlockRef true_block
;
132 LLVMBasicBlockRef false_block
;
133 LLVMBasicBlockRef merge_block
;
136 static LLVMBasicBlockRef
137 ac_build_insert_new_block(struct radv_shader_context
*ctx
, const char *name
)
139 LLVMBasicBlockRef current_block
;
140 LLVMBasicBlockRef next_block
;
141 LLVMBasicBlockRef new_block
;
143 /* get current basic block */
144 current_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
146 /* chqeck if there's another block after this one */
147 next_block
= LLVMGetNextBasicBlock(current_block
);
149 /* insert the new block before the next block */
150 new_block
= LLVMInsertBasicBlockInContext(ctx
->context
, next_block
, name
);
153 /* append new block after current block */
154 LLVMValueRef function
= LLVMGetBasicBlockParent(current_block
);
155 new_block
= LLVMAppendBasicBlockInContext(ctx
->context
, function
, name
);
161 ac_nir_build_if(struct ac_build_if_state
*ifthen
,
162 struct radv_shader_context
*ctx
,
163 LLVMValueRef condition
)
165 LLVMBasicBlockRef block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
167 memset(ifthen
, 0, sizeof *ifthen
);
169 ifthen
->condition
= condition
;
170 ifthen
->entry_block
= block
;
172 /* create endif/merge basic block for the phi functions */
173 ifthen
->merge_block
= ac_build_insert_new_block(ctx
, "endif-block");
175 /* create/insert true_block before merge_block */
177 LLVMInsertBasicBlockInContext(ctx
->context
,
181 /* successive code goes into the true block */
182 LLVMPositionBuilderAtEnd(ctx
->ac
.builder
, ifthen
->true_block
);
189 ac_nir_build_endif(struct ac_build_if_state
*ifthen
)
191 LLVMBuilderRef builder
= ifthen
->ctx
->ac
.builder
;
193 /* Insert branch to the merge block from current block */
194 LLVMBuildBr(builder
, ifthen
->merge_block
);
197 * Now patch in the various branch instructions.
200 /* Insert the conditional branch instruction at the end of entry_block */
201 LLVMPositionBuilderAtEnd(builder
, ifthen
->entry_block
);
202 if (ifthen
->false_block
) {
203 /* we have an else clause */
204 LLVMBuildCondBr(builder
, ifthen
->condition
,
205 ifthen
->true_block
, ifthen
->false_block
);
209 LLVMBuildCondBr(builder
, ifthen
->condition
,
210 ifthen
->true_block
, ifthen
->merge_block
);
213 /* Resume building code at end of the ifthen->merge_block */
214 LLVMPositionBuilderAtEnd(builder
, ifthen
->merge_block
);
218 static LLVMValueRef
get_rel_patch_id(struct radv_shader_context
*ctx
)
220 switch (ctx
->stage
) {
221 case MESA_SHADER_TESS_CTRL
:
222 return ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
223 case MESA_SHADER_TESS_EVAL
:
224 return ctx
->tes_rel_patch_id
;
227 unreachable("Illegal stage");
232 get_tcs_num_patches(struct radv_shader_context
*ctx
)
234 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
235 unsigned num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
236 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
237 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
238 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
239 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
240 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
241 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
242 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
243 unsigned num_patches
;
244 unsigned hardware_lds_size
;
246 /* Ensure that we only need one wave per SIMD so we don't need to check
247 * resource usage. Also ensures that the number of tcs in and out
248 * vertices per threadgroup are at most 256.
250 num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
251 /* Make sure that the data fits in LDS. This assumes the shaders only
252 * use LDS for the inputs and outputs.
254 hardware_lds_size
= ctx
->options
->chip_class
>= CIK
? 65536 : 32768;
255 num_patches
= MIN2(num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
256 /* Make sure the output data fits in the offchip buffer */
257 num_patches
= MIN2(num_patches
, (ctx
->options
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
258 /* Not necessary for correctness, but improves performance. The
259 * specific value is taken from the proprietary driver.
261 num_patches
= MIN2(num_patches
, 40);
263 /* SI bug workaround - limit LS-HS threadgroups to only one wave. */
264 if (ctx
->options
->chip_class
== SI
) {
265 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
266 num_patches
= MIN2(num_patches
, one_wave
);
272 calculate_tess_lds_size(struct radv_shader_context
*ctx
)
274 unsigned num_tcs_input_cp
= ctx
->options
->key
.tcs
.input_vertices
;
275 unsigned num_tcs_output_cp
;
276 unsigned num_tcs_outputs
, num_tcs_patch_outputs
;
277 unsigned input_vertex_size
, output_vertex_size
;
278 unsigned input_patch_size
, output_patch_size
;
279 unsigned pervertex_output_patch_size
;
280 unsigned output_patch0_offset
;
281 unsigned num_patches
;
284 num_tcs_output_cp
= ctx
->tcs_vertices_per_patch
;
285 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
286 num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
288 input_vertex_size
= ctx
->tcs_num_inputs
* 16;
289 output_vertex_size
= num_tcs_outputs
* 16;
291 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
293 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
294 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
296 num_patches
= ctx
->tcs_num_patches
;
297 output_patch0_offset
= input_patch_size
* num_patches
;
299 lds_size
= output_patch0_offset
+ output_patch_size
* num_patches
;
303 /* Tessellation shaders pass outputs to the next shader using LDS.
305 * LS outputs = TCS inputs
306 * TCS outputs = TES inputs
309 * - TCS inputs for patch 0
310 * - TCS inputs for patch 1
311 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
313 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
314 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
315 * - TCS outputs for patch 1
316 * - Per-patch TCS outputs for patch 1
317 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
318 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
321 * All three shaders VS(LS), TCS, TES share the same LDS space.
324 get_tcs_in_patch_stride(struct radv_shader_context
*ctx
)
326 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
327 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
328 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
330 input_patch_size
/= 4;
331 return LLVMConstInt(ctx
->ac
.i32
, input_patch_size
, false);
335 get_tcs_out_patch_stride(struct radv_shader_context
*ctx
)
337 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
338 uint32_t num_tcs_patch_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.patch_outputs_written
);
339 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
340 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
341 uint32_t output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
342 output_patch_size
/= 4;
343 return LLVMConstInt(ctx
->ac
.i32
, output_patch_size
, false);
347 get_tcs_out_vertex_stride(struct radv_shader_context
*ctx
)
349 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
350 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
351 output_vertex_size
/= 4;
352 return LLVMConstInt(ctx
->ac
.i32
, output_vertex_size
, false);
356 get_tcs_out_patch0_offset(struct radv_shader_context
*ctx
)
358 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
359 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
360 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
361 uint32_t output_patch0_offset
= input_patch_size
;
362 unsigned num_patches
= ctx
->tcs_num_patches
;
364 output_patch0_offset
*= num_patches
;
365 output_patch0_offset
/= 4;
366 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
370 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context
*ctx
)
372 assert (ctx
->stage
== MESA_SHADER_TESS_CTRL
);
373 uint32_t input_vertex_size
= ctx
->tcs_num_inputs
* 16;
374 uint32_t input_patch_size
= ctx
->options
->key
.tcs
.input_vertices
* input_vertex_size
;
375 uint32_t output_patch0_offset
= input_patch_size
;
377 uint32_t num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
378 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
379 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
380 unsigned num_patches
= ctx
->tcs_num_patches
;
382 output_patch0_offset
*= num_patches
;
383 output_patch0_offset
+= pervertex_output_patch_size
;
384 output_patch0_offset
/= 4;
385 return LLVMConstInt(ctx
->ac
.i32
, output_patch0_offset
, false);
389 get_tcs_in_current_patch_offset(struct radv_shader_context
*ctx
)
391 LLVMValueRef patch_stride
= get_tcs_in_patch_stride(ctx
);
392 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
394 return LLVMBuildMul(ctx
->ac
.builder
, patch_stride
, rel_patch_id
, "");
398 get_tcs_out_current_patch_offset(struct radv_shader_context
*ctx
)
400 LLVMValueRef patch0_offset
= get_tcs_out_patch0_offset(ctx
);
401 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
402 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
404 return LLVMBuildAdd(ctx
->ac
.builder
, patch0_offset
,
405 LLVMBuildMul(ctx
->ac
.builder
, patch_stride
,
411 get_tcs_out_current_patch_data_offset(struct radv_shader_context
*ctx
)
413 LLVMValueRef patch0_patch_data_offset
=
414 get_tcs_out_patch0_patch_data_offset(ctx
);
415 LLVMValueRef patch_stride
= get_tcs_out_patch_stride(ctx
);
416 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
418 return LLVMBuildAdd(ctx
->ac
.builder
, patch0_patch_data_offset
,
419 LLVMBuildMul(ctx
->ac
.builder
, patch_stride
,
426 LLVMTypeRef types
[MAX_ARGS
];
427 LLVMValueRef
*assign
[MAX_ARGS
];
428 unsigned array_params_mask
;
431 uint8_t num_sgprs_used
;
432 uint8_t num_vgprs_used
;
435 enum ac_arg_regfile
{
441 add_arg(struct arg_info
*info
, enum ac_arg_regfile regfile
, LLVMTypeRef type
,
442 LLVMValueRef
*param_ptr
)
444 assert(info
->count
< MAX_ARGS
);
446 info
->assign
[info
->count
] = param_ptr
;
447 info
->types
[info
->count
] = type
;
450 if (regfile
== ARG_SGPR
) {
451 info
->num_sgprs_used
+= ac_get_type_size(type
) / 4;
454 assert(regfile
== ARG_VGPR
);
455 info
->num_vgprs_used
+= ac_get_type_size(type
) / 4;
460 add_array_arg(struct arg_info
*info
, LLVMTypeRef type
, LLVMValueRef
*param_ptr
)
462 info
->array_params_mask
|= (1 << info
->count
);
463 add_arg(info
, ARG_SGPR
, type
, param_ptr
);
466 static void assign_arguments(LLVMValueRef main_function
,
467 struct arg_info
*info
)
470 for (i
= 0; i
< info
->count
; i
++) {
472 *info
->assign
[i
] = LLVMGetParam(main_function
, i
);
477 create_llvm_function(LLVMContextRef ctx
, LLVMModuleRef module
,
478 LLVMBuilderRef builder
, LLVMTypeRef
*return_types
,
479 unsigned num_return_elems
,
480 struct arg_info
*args
,
481 unsigned max_workgroup_size
,
482 const struct radv_nir_compiler_options
*options
)
484 LLVMTypeRef main_function_type
, ret_type
;
485 LLVMBasicBlockRef main_function_body
;
487 if (num_return_elems
)
488 ret_type
= LLVMStructTypeInContext(ctx
, return_types
,
489 num_return_elems
, true);
491 ret_type
= LLVMVoidTypeInContext(ctx
);
493 /* Setup the function */
495 LLVMFunctionType(ret_type
, args
->types
, args
->count
, 0);
496 LLVMValueRef main_function
=
497 LLVMAddFunction(module
, "main", main_function_type
);
499 LLVMAppendBasicBlockInContext(ctx
, main_function
, "main_body");
500 LLVMPositionBuilderAtEnd(builder
, main_function_body
);
502 LLVMSetFunctionCallConv(main_function
, RADEON_LLVM_AMDGPU_CS
);
503 for (unsigned i
= 0; i
< args
->sgpr_count
; ++i
) {
504 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_INREG
);
506 if (args
->array_params_mask
& (1 << i
)) {
507 LLVMValueRef P
= LLVMGetParam(main_function
, i
);
508 ac_add_function_attr(ctx
, main_function
, i
+ 1, AC_FUNC_ATTR_NOALIAS
);
509 ac_add_attr_dereferenceable(P
, UINT64_MAX
);
513 if (options
->address32_hi
) {
514 ac_llvm_add_target_dep_function_attr(main_function
,
515 "amdgpu-32bit-address-high-bits",
516 options
->address32_hi
);
519 if (max_workgroup_size
) {
520 ac_llvm_add_target_dep_function_attr(main_function
,
521 "amdgpu-max-work-group-size",
524 if (options
->unsafe_math
) {
525 /* These were copied from some LLVM test. */
526 LLVMAddTargetDependentFunctionAttr(main_function
,
527 "less-precise-fpmad",
529 LLVMAddTargetDependentFunctionAttr(main_function
,
532 LLVMAddTargetDependentFunctionAttr(main_function
,
535 LLVMAddTargetDependentFunctionAttr(main_function
,
538 LLVMAddTargetDependentFunctionAttr(main_function
,
539 "no-signed-zeros-fp-math",
542 return main_function
;
547 set_loc(struct radv_userdata_info
*ud_info
, uint8_t *sgpr_idx
, uint8_t num_sgprs
,
548 uint32_t indirect_offset
)
550 ud_info
->sgpr_idx
= *sgpr_idx
;
551 ud_info
->num_sgprs
= num_sgprs
;
552 ud_info
->indirect
= indirect_offset
> 0;
553 ud_info
->indirect_offset
= indirect_offset
;
554 *sgpr_idx
+= num_sgprs
;
558 set_loc_shader(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
561 struct radv_userdata_info
*ud_info
=
562 &ctx
->shader_info
->user_sgprs_locs
.shader_data
[idx
];
565 set_loc(ud_info
, sgpr_idx
, num_sgprs
, 0);
569 set_loc_shader_ptr(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
)
571 bool use_32bit_pointers
= HAVE_32BIT_POINTERS
&&
572 idx
!= AC_UD_SCRATCH_RING_OFFSETS
;
574 set_loc_shader(ctx
, idx
, sgpr_idx
, use_32bit_pointers
? 1 : 2);
578 set_loc_desc(struct radv_shader_context
*ctx
, int idx
, uint8_t *sgpr_idx
,
579 uint32_t indirect_offset
)
581 struct radv_userdata_locations
*locs
=
582 &ctx
->shader_info
->user_sgprs_locs
;
583 struct radv_userdata_info
*ud_info
= &locs
->descriptor_sets
[idx
];
586 set_loc(ud_info
, sgpr_idx
, HAVE_32BIT_POINTERS
? 1 : 2, indirect_offset
);
587 if (indirect_offset
== 0)
588 locs
->descriptor_sets_enabled
|= 1 << idx
;
591 struct user_sgpr_info
{
592 bool need_ring_offsets
;
593 bool indirect_all_descriptor_sets
;
596 static bool needs_view_index_sgpr(struct radv_shader_context
*ctx
,
597 gl_shader_stage stage
)
600 case MESA_SHADER_VERTEX
:
601 if (ctx
->shader_info
->info
.needs_multiview_view_index
||
602 (!ctx
->options
->key
.vs
.as_es
&& !ctx
->options
->key
.vs
.as_ls
&& ctx
->options
->key
.has_multiview_view_index
))
605 case MESA_SHADER_TESS_EVAL
:
606 if (ctx
->shader_info
->info
.needs_multiview_view_index
|| (!ctx
->options
->key
.tes
.as_es
&& ctx
->options
->key
.has_multiview_view_index
))
609 case MESA_SHADER_GEOMETRY
:
610 case MESA_SHADER_TESS_CTRL
:
611 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
621 count_vs_user_sgprs(struct radv_shader_context
*ctx
)
625 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
)
626 count
+= HAVE_32BIT_POINTERS
? 1 : 2;
627 count
+= ctx
->shader_info
->info
.vs
.needs_draw_id
? 3 : 2;
632 static void allocate_user_sgprs(struct radv_shader_context
*ctx
,
633 gl_shader_stage stage
,
634 bool has_previous_stage
,
635 gl_shader_stage previous_stage
,
636 bool needs_view_index
,
637 struct user_sgpr_info
*user_sgpr_info
)
639 uint8_t user_sgpr_count
= 0;
641 memset(user_sgpr_info
, 0, sizeof(struct user_sgpr_info
));
643 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
644 if (stage
== MESA_SHADER_GEOMETRY
||
645 stage
== MESA_SHADER_VERTEX
||
646 stage
== MESA_SHADER_TESS_CTRL
||
647 stage
== MESA_SHADER_TESS_EVAL
||
648 ctx
->is_gs_copy_shader
)
649 user_sgpr_info
->need_ring_offsets
= true;
651 if (stage
== MESA_SHADER_FRAGMENT
&&
652 ctx
->shader_info
->info
.ps
.needs_sample_positions
)
653 user_sgpr_info
->need_ring_offsets
= true;
655 /* 2 user sgprs will nearly always be allocated for scratch/rings */
656 if (ctx
->options
->supports_spill
|| user_sgpr_info
->need_ring_offsets
) {
657 user_sgpr_count
+= 2;
661 case MESA_SHADER_COMPUTE
:
662 if (ctx
->shader_info
->info
.cs
.uses_grid_size
)
663 user_sgpr_count
+= 3;
665 case MESA_SHADER_FRAGMENT
:
666 user_sgpr_count
+= ctx
->shader_info
->info
.ps
.needs_sample_positions
;
668 case MESA_SHADER_VERTEX
:
669 if (!ctx
->is_gs_copy_shader
)
670 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
672 case MESA_SHADER_TESS_CTRL
:
673 if (has_previous_stage
) {
674 if (previous_stage
== MESA_SHADER_VERTEX
)
675 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
678 case MESA_SHADER_TESS_EVAL
:
680 case MESA_SHADER_GEOMETRY
:
681 if (has_previous_stage
) {
682 if (previous_stage
== MESA_SHADER_VERTEX
) {
683 user_sgpr_count
+= count_vs_user_sgprs(ctx
);
691 if (needs_view_index
)
694 if (ctx
->shader_info
->info
.loads_push_constants
)
695 user_sgpr_count
+= HAVE_32BIT_POINTERS
? 1 : 2;
697 uint32_t available_sgprs
= ctx
->options
->chip_class
>= GFX9
? 32 : 16;
698 uint32_t remaining_sgprs
= available_sgprs
- user_sgpr_count
;
699 uint32_t num_desc_set
=
700 util_bitcount(ctx
->shader_info
->info
.desc_set_used_mask
);
702 if (remaining_sgprs
/ (HAVE_32BIT_POINTERS
? 1 : 2) < num_desc_set
) {
703 user_sgpr_info
->indirect_all_descriptor_sets
= true;
708 declare_global_input_sgprs(struct radv_shader_context
*ctx
,
709 gl_shader_stage stage
,
710 bool has_previous_stage
,
711 gl_shader_stage previous_stage
,
712 const struct user_sgpr_info
*user_sgpr_info
,
713 struct arg_info
*args
,
714 LLVMValueRef
*desc_sets
)
716 LLVMTypeRef type
= ac_array_in_const32_addr_space(ctx
->ac
.i8
);
717 unsigned num_sets
= ctx
->options
->layout
?
718 ctx
->options
->layout
->num_sets
: 0;
719 unsigned stage_mask
= 1 << stage
;
721 if (has_previous_stage
)
722 stage_mask
|= 1 << previous_stage
;
724 /* 1 for each descriptor set */
725 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
726 for (unsigned i
= 0; i
< num_sets
; ++i
) {
727 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
728 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
729 add_array_arg(args
, type
,
730 &ctx
->descriptor_sets
[i
]);
734 add_array_arg(args
, ac_array_in_const32_addr_space(type
), desc_sets
);
737 if (ctx
->shader_info
->info
.loads_push_constants
) {
738 /* 1 for push constants and dynamic descriptors */
739 add_array_arg(args
, type
, &ctx
->abi
.push_constants
);
744 declare_vs_specific_input_sgprs(struct radv_shader_context
*ctx
,
745 gl_shader_stage stage
,
746 bool has_previous_stage
,
747 gl_shader_stage previous_stage
,
748 struct arg_info
*args
)
750 if (!ctx
->is_gs_copy_shader
&&
751 (stage
== MESA_SHADER_VERTEX
||
752 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
753 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
754 add_arg(args
, ARG_SGPR
,
755 ac_array_in_const32_addr_space(ctx
->ac
.v4i32
),
756 &ctx
->vertex_buffers
);
758 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.base_vertex
);
759 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.start_instance
);
760 if (ctx
->shader_info
->info
.vs
.needs_draw_id
) {
761 add_arg(args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.draw_id
);
767 declare_vs_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
769 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.vertex_id
);
770 if (!ctx
->is_gs_copy_shader
) {
771 if (ctx
->options
->key
.vs
.as_ls
) {
772 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->rel_auto_id
);
773 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
775 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.instance_id
);
776 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->vs_prim_id
);
778 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* unused */
783 declare_tes_input_vgprs(struct radv_shader_context
*ctx
, struct arg_info
*args
)
785 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_u
);
786 add_arg(args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->tes_v
);
787 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->tes_rel_patch_id
);
788 add_arg(args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.tes_patch_id
);
792 set_global_input_locs(struct radv_shader_context
*ctx
, gl_shader_stage stage
,
793 bool has_previous_stage
, gl_shader_stage previous_stage
,
794 const struct user_sgpr_info
*user_sgpr_info
,
795 LLVMValueRef desc_sets
, uint8_t *user_sgpr_idx
)
797 unsigned num_sets
= ctx
->options
->layout
?
798 ctx
->options
->layout
->num_sets
: 0;
799 unsigned stage_mask
= 1 << stage
;
801 if (has_previous_stage
)
802 stage_mask
|= 1 << previous_stage
;
804 if (!user_sgpr_info
->indirect_all_descriptor_sets
) {
805 for (unsigned i
= 0; i
< num_sets
; ++i
) {
806 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
807 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
808 set_loc_desc(ctx
, i
, user_sgpr_idx
, 0);
810 ctx
->descriptor_sets
[i
] = NULL
;
813 set_loc_shader_ptr(ctx
, AC_UD_INDIRECT_DESCRIPTOR_SETS
,
816 for (unsigned i
= 0; i
< num_sets
; ++i
) {
817 if ((ctx
->shader_info
->info
.desc_set_used_mask
& (1 << i
)) &&
818 ctx
->options
->layout
->set
[i
].layout
->shader_stages
& stage_mask
) {
819 set_loc_desc(ctx
, i
, user_sgpr_idx
, i
* 8);
820 ctx
->descriptor_sets
[i
] =
821 ac_build_load_to_sgpr(&ctx
->ac
,
823 LLVMConstInt(ctx
->ac
.i32
, i
, false));
826 ctx
->descriptor_sets
[i
] = NULL
;
828 ctx
->shader_info
->need_indirect_descriptor_sets
= true;
831 if (ctx
->shader_info
->info
.loads_push_constants
) {
832 set_loc_shader_ptr(ctx
, AC_UD_PUSH_CONSTANTS
, user_sgpr_idx
);
837 set_vs_specific_input_locs(struct radv_shader_context
*ctx
,
838 gl_shader_stage stage
, bool has_previous_stage
,
839 gl_shader_stage previous_stage
,
840 uint8_t *user_sgpr_idx
)
842 if (!ctx
->is_gs_copy_shader
&&
843 (stage
== MESA_SHADER_VERTEX
||
844 (has_previous_stage
&& previous_stage
== MESA_SHADER_VERTEX
))) {
845 if (ctx
->shader_info
->info
.vs
.has_vertex_buffers
) {
846 set_loc_shader_ptr(ctx
, AC_UD_VS_VERTEX_BUFFERS
,
851 if (ctx
->shader_info
->info
.vs
.needs_draw_id
)
854 set_loc_shader(ctx
, AC_UD_VS_BASE_VERTEX_START_INSTANCE
,
855 user_sgpr_idx
, vs_num
);
859 static void set_llvm_calling_convention(LLVMValueRef func
,
860 gl_shader_stage stage
)
862 enum radeon_llvm_calling_convention calling_conv
;
865 case MESA_SHADER_VERTEX
:
866 case MESA_SHADER_TESS_EVAL
:
867 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
869 case MESA_SHADER_GEOMETRY
:
870 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
872 case MESA_SHADER_TESS_CTRL
:
873 calling_conv
= RADEON_LLVM_AMDGPU_HS
;
875 case MESA_SHADER_FRAGMENT
:
876 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
878 case MESA_SHADER_COMPUTE
:
879 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
882 unreachable("Unhandle shader type");
885 LLVMSetFunctionCallConv(func
, calling_conv
);
888 static void create_function(struct radv_shader_context
*ctx
,
889 gl_shader_stage stage
,
890 bool has_previous_stage
,
891 gl_shader_stage previous_stage
)
893 uint8_t user_sgpr_idx
;
894 struct user_sgpr_info user_sgpr_info
;
895 struct arg_info args
= {};
896 LLVMValueRef desc_sets
;
897 bool needs_view_index
= needs_view_index_sgpr(ctx
, stage
);
898 allocate_user_sgprs(ctx
, stage
, has_previous_stage
,
899 previous_stage
, needs_view_index
, &user_sgpr_info
);
901 if (user_sgpr_info
.need_ring_offsets
&& !ctx
->options
->supports_spill
) {
902 add_arg(&args
, ARG_SGPR
, ac_array_in_const_addr_space(ctx
->ac
.v4i32
),
907 case MESA_SHADER_COMPUTE
:
908 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
909 previous_stage
, &user_sgpr_info
,
912 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
913 add_arg(&args
, ARG_SGPR
, ctx
->ac
.v3i32
,
914 &ctx
->abi
.num_work_groups
);
917 for (int i
= 0; i
< 3; i
++) {
918 ctx
->abi
.workgroup_ids
[i
] = NULL
;
919 if (ctx
->shader_info
->info
.cs
.uses_block_id
[i
]) {
920 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
921 &ctx
->abi
.workgroup_ids
[i
]);
925 if (ctx
->shader_info
->info
.cs
.uses_local_invocation_idx
)
926 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.tg_size
);
927 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
,
928 &ctx
->abi
.local_invocation_ids
);
930 case MESA_SHADER_VERTEX
:
931 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
932 previous_stage
, &user_sgpr_info
,
934 declare_vs_specific_input_sgprs(ctx
, stage
, has_previous_stage
,
935 previous_stage
, &args
);
937 if (needs_view_index
)
938 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
939 &ctx
->abi
.view_index
);
940 if (ctx
->options
->key
.vs
.as_es
)
941 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
944 declare_vs_input_vgprs(ctx
, &args
);
946 case MESA_SHADER_TESS_CTRL
:
947 if (has_previous_stage
) {
948 // First 6 system regs
949 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
950 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
951 &ctx
->merged_wave_info
);
952 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
953 &ctx
->tess_factor_offset
);
955 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
956 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
957 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
959 declare_global_input_sgprs(ctx
, stage
,
962 &user_sgpr_info
, &args
,
964 declare_vs_specific_input_sgprs(ctx
, stage
,
966 previous_stage
, &args
);
968 if (needs_view_index
)
969 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
970 &ctx
->abi
.view_index
);
972 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
973 &ctx
->abi
.tcs_patch_id
);
974 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
975 &ctx
->abi
.tcs_rel_ids
);
977 declare_vs_input_vgprs(ctx
, &args
);
979 declare_global_input_sgprs(ctx
, stage
,
982 &user_sgpr_info
, &args
,
985 if (needs_view_index
)
986 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
987 &ctx
->abi
.view_index
);
989 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
990 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
991 &ctx
->tess_factor_offset
);
992 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
993 &ctx
->abi
.tcs_patch_id
);
994 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
995 &ctx
->abi
.tcs_rel_ids
);
998 case MESA_SHADER_TESS_EVAL
:
999 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
1000 previous_stage
, &user_sgpr_info
,
1003 if (needs_view_index
)
1004 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1005 &ctx
->abi
.view_index
);
1007 if (ctx
->options
->key
.tes
.as_es
) {
1008 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1009 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1010 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1011 &ctx
->es2gs_offset
);
1013 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
);
1014 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1016 declare_tes_input_vgprs(ctx
, &args
);
1018 case MESA_SHADER_GEOMETRY
:
1019 if (has_previous_stage
) {
1020 // First 6 system regs
1021 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1022 &ctx
->gs2vs_offset
);
1023 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1024 &ctx
->merged_wave_info
);
1025 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->oc_lds
);
1027 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // scratch offset
1028 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1029 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, NULL
); // unknown
1031 declare_global_input_sgprs(ctx
, stage
,
1034 &user_sgpr_info
, &args
,
1037 if (previous_stage
!= MESA_SHADER_TESS_EVAL
) {
1038 declare_vs_specific_input_sgprs(ctx
, stage
,
1044 if (needs_view_index
)
1045 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1046 &ctx
->abi
.view_index
);
1048 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1049 &ctx
->gs_vtx_offset
[0]);
1050 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1051 &ctx
->gs_vtx_offset
[2]);
1052 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1053 &ctx
->abi
.gs_prim_id
);
1054 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1055 &ctx
->abi
.gs_invocation_id
);
1056 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1057 &ctx
->gs_vtx_offset
[4]);
1059 if (previous_stage
== MESA_SHADER_VERTEX
) {
1060 declare_vs_input_vgprs(ctx
, &args
);
1062 declare_tes_input_vgprs(ctx
, &args
);
1065 declare_global_input_sgprs(ctx
, stage
,
1068 &user_sgpr_info
, &args
,
1071 if (needs_view_index
)
1072 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
,
1073 &ctx
->abi
.view_index
);
1075 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs2vs_offset
);
1076 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->gs_wave_id
);
1077 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1078 &ctx
->gs_vtx_offset
[0]);
1079 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1080 &ctx
->gs_vtx_offset
[1]);
1081 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1082 &ctx
->abi
.gs_prim_id
);
1083 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1084 &ctx
->gs_vtx_offset
[2]);
1085 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1086 &ctx
->gs_vtx_offset
[3]);
1087 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1088 &ctx
->gs_vtx_offset
[4]);
1089 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1090 &ctx
->gs_vtx_offset
[5]);
1091 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
,
1092 &ctx
->abi
.gs_invocation_id
);
1095 case MESA_SHADER_FRAGMENT
:
1096 declare_global_input_sgprs(ctx
, stage
, has_previous_stage
,
1097 previous_stage
, &user_sgpr_info
,
1100 add_arg(&args
, ARG_SGPR
, ctx
->ac
.i32
, &ctx
->abi
.prim_mask
);
1101 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_sample
);
1102 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_center
);
1103 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->persp_centroid
);
1104 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v3i32
, NULL
); /* persp pull model */
1105 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_sample
);
1106 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_center
);
1107 add_arg(&args
, ARG_VGPR
, ctx
->ac
.v2i32
, &ctx
->linear_centroid
);
1108 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, NULL
); /* line stipple tex */
1109 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[0]);
1110 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[1]);
1111 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[2]);
1112 add_arg(&args
, ARG_VGPR
, ctx
->ac
.f32
, &ctx
->abi
.frag_pos
[3]);
1113 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.front_face
);
1114 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.ancillary
);
1115 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, &ctx
->abi
.sample_coverage
);
1116 add_arg(&args
, ARG_VGPR
, ctx
->ac
.i32
, NULL
); /* fixed pt */
1119 unreachable("Shader stage not implemented");
1122 ctx
->main_function
= create_llvm_function(
1123 ctx
->context
, ctx
->ac
.module
, ctx
->ac
.builder
, NULL
, 0, &args
,
1124 ctx
->max_workgroup_size
, ctx
->options
);
1125 set_llvm_calling_convention(ctx
->main_function
, stage
);
1128 ctx
->shader_info
->num_input_vgprs
= 0;
1129 ctx
->shader_info
->num_input_sgprs
= ctx
->options
->supports_spill
? 2 : 0;
1131 ctx
->shader_info
->num_input_sgprs
+= args
.num_sgprs_used
;
1133 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
)
1134 ctx
->shader_info
->num_input_vgprs
= args
.num_vgprs_used
;
1136 assign_arguments(ctx
->main_function
, &args
);
1140 if (ctx
->options
->supports_spill
|| user_sgpr_info
.need_ring_offsets
) {
1141 set_loc_shader_ptr(ctx
, AC_UD_SCRATCH_RING_OFFSETS
,
1143 if (ctx
->options
->supports_spill
) {
1144 ctx
->ring_offsets
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.implicit.buffer.ptr",
1145 LLVMPointerType(ctx
->ac
.i8
, AC_CONST_ADDR_SPACE
),
1146 NULL
, 0, AC_FUNC_ATTR_READNONE
);
1147 ctx
->ring_offsets
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->ring_offsets
,
1148 ac_array_in_const_addr_space(ctx
->ac
.v4i32
), "");
1152 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1153 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1154 if (has_previous_stage
)
1157 set_global_input_locs(ctx
, stage
, has_previous_stage
, previous_stage
,
1158 &user_sgpr_info
, desc_sets
, &user_sgpr_idx
);
1161 case MESA_SHADER_COMPUTE
:
1162 if (ctx
->shader_info
->info
.cs
.uses_grid_size
) {
1163 set_loc_shader(ctx
, AC_UD_CS_GRID_SIZE
,
1167 case MESA_SHADER_VERTEX
:
1168 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1169 previous_stage
, &user_sgpr_idx
);
1170 if (ctx
->abi
.view_index
)
1171 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1173 case MESA_SHADER_TESS_CTRL
:
1174 set_vs_specific_input_locs(ctx
, stage
, has_previous_stage
,
1175 previous_stage
, &user_sgpr_idx
);
1176 if (ctx
->abi
.view_index
)
1177 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1179 case MESA_SHADER_TESS_EVAL
:
1180 if (ctx
->abi
.view_index
)
1181 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1183 case MESA_SHADER_GEOMETRY
:
1184 if (has_previous_stage
) {
1185 if (previous_stage
== MESA_SHADER_VERTEX
)
1186 set_vs_specific_input_locs(ctx
, stage
,
1191 if (ctx
->abi
.view_index
)
1192 set_loc_shader(ctx
, AC_UD_VIEW_INDEX
, &user_sgpr_idx
, 1);
1194 case MESA_SHADER_FRAGMENT
:
1197 unreachable("Shader stage not implemented");
1200 if (stage
== MESA_SHADER_TESS_CTRL
||
1201 (stage
== MESA_SHADER_VERTEX
&& ctx
->options
->key
.vs
.as_ls
) ||
1202 /* GFX9 has the ESGS ring buffer in LDS. */
1203 (stage
== MESA_SHADER_GEOMETRY
&& has_previous_stage
)) {
1204 ac_declare_lds_as_pointer(&ctx
->ac
);
1207 ctx
->shader_info
->num_user_sgprs
= user_sgpr_idx
;
1212 radv_load_resource(struct ac_shader_abi
*abi
, LLVMValueRef index
,
1213 unsigned desc_set
, unsigned binding
)
1215 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1216 LLVMValueRef desc_ptr
= ctx
->descriptor_sets
[desc_set
];
1217 struct radv_pipeline_layout
*pipeline_layout
= ctx
->options
->layout
;
1218 struct radv_descriptor_set_layout
*layout
= pipeline_layout
->set
[desc_set
].layout
;
1219 unsigned base_offset
= layout
->binding
[binding
].offset
;
1220 LLVMValueRef offset
, stride
;
1222 if (layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1223 layout
->binding
[binding
].type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1224 unsigned idx
= pipeline_layout
->set
[desc_set
].dynamic_offset_start
+
1225 layout
->binding
[binding
].dynamic_offset_offset
;
1226 desc_ptr
= ctx
->abi
.push_constants
;
1227 base_offset
= pipeline_layout
->push_constant_size
+ 16 * idx
;
1228 stride
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1230 stride
= LLVMConstInt(ctx
->ac
.i32
, layout
->binding
[binding
].size
, false);
1232 offset
= LLVMConstInt(ctx
->ac
.i32
, base_offset
, false);
1233 index
= LLVMBuildMul(ctx
->ac
.builder
, index
, stride
, "");
1234 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, index
, "");
1236 desc_ptr
= ac_build_gep0(&ctx
->ac
, desc_ptr
, offset
);
1237 desc_ptr
= ac_cast_ptr(&ctx
->ac
, desc_ptr
, ctx
->ac
.v4i32
);
1238 LLVMSetMetadata(desc_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1244 /* The offchip buffer layout for TCS->TES is
1246 * - attribute 0 of patch 0 vertex 0
1247 * - attribute 0 of patch 0 vertex 1
1248 * - attribute 0 of patch 0 vertex 2
1250 * - attribute 0 of patch 1 vertex 0
1251 * - attribute 0 of patch 1 vertex 1
1253 * - attribute 1 of patch 0 vertex 0
1254 * - attribute 1 of patch 0 vertex 1
1256 * - per patch attribute 0 of patch 0
1257 * - per patch attribute 0 of patch 1
1260 * Note that every attribute has 4 components.
1262 static LLVMValueRef
get_non_vertex_index_offset(struct radv_shader_context
*ctx
)
1264 uint32_t num_patches
= ctx
->tcs_num_patches
;
1265 uint32_t num_tcs_outputs
;
1266 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
)
1267 num_tcs_outputs
= util_last_bit64(ctx
->shader_info
->info
.tcs
.outputs_written
);
1269 num_tcs_outputs
= ctx
->options
->key
.tes
.tcs_num_outputs
;
1271 uint32_t output_vertex_size
= num_tcs_outputs
* 16;
1272 uint32_t pervertex_output_patch_size
= ctx
->tcs_vertices_per_patch
* output_vertex_size
;
1274 return LLVMConstInt(ctx
->ac
.i32
, pervertex_output_patch_size
* num_patches
, false);
1277 static LLVMValueRef
calc_param_stride(struct radv_shader_context
*ctx
,
1278 LLVMValueRef vertex_index
)
1280 LLVMValueRef param_stride
;
1282 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
* ctx
->tcs_num_patches
, false);
1284 param_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_num_patches
, false);
1285 return param_stride
;
1288 static LLVMValueRef
get_tcs_tes_buffer_address(struct radv_shader_context
*ctx
,
1289 LLVMValueRef vertex_index
,
1290 LLVMValueRef param_index
)
1292 LLVMValueRef base_addr
;
1293 LLVMValueRef param_stride
, constant16
;
1294 LLVMValueRef rel_patch_id
= get_rel_patch_id(ctx
);
1295 LLVMValueRef vertices_per_patch
= LLVMConstInt(ctx
->ac
.i32
, ctx
->tcs_vertices_per_patch
, false);
1296 constant16
= LLVMConstInt(ctx
->ac
.i32
, 16, false);
1297 param_stride
= calc_param_stride(ctx
, vertex_index
);
1299 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
1300 vertices_per_patch
, "");
1302 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1305 base_addr
= rel_patch_id
;
1308 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1309 LLVMBuildMul(ctx
->ac
.builder
, param_index
,
1310 param_stride
, ""), "");
1312 base_addr
= LLVMBuildMul(ctx
->ac
.builder
, base_addr
, constant16
, "");
1314 if (!vertex_index
) {
1315 LLVMValueRef patch_data_offset
= get_non_vertex_index_offset(ctx
);
1317 base_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_addr
,
1318 patch_data_offset
, "");
1323 static LLVMValueRef
get_tcs_tes_buffer_address_params(struct radv_shader_context
*ctx
,
1325 unsigned const_index
,
1327 LLVMValueRef vertex_index
,
1328 LLVMValueRef indir_index
)
1330 LLVMValueRef param_index
;
1333 param_index
= LLVMBuildAdd(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, param
, false),
1336 if (const_index
&& !is_compact
)
1337 param
+= const_index
;
1338 param_index
= LLVMConstInt(ctx
->ac
.i32
, param
, false);
1340 return get_tcs_tes_buffer_address(ctx
, vertex_index
, param_index
);
1344 get_dw_address(struct radv_shader_context
*ctx
,
1345 LLVMValueRef dw_addr
,
1347 unsigned const_index
,
1348 bool compact_const_index
,
1349 LLVMValueRef vertex_index
,
1350 LLVMValueRef stride
,
1351 LLVMValueRef indir_index
)
1356 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1357 LLVMBuildMul(ctx
->ac
.builder
,
1363 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1364 LLVMBuildMul(ctx
->ac
.builder
, indir_index
,
1365 LLVMConstInt(ctx
->ac
.i32
, 4, false), ""), "");
1366 else if (const_index
&& !compact_const_index
)
1367 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1368 LLVMConstInt(ctx
->ac
.i32
, const_index
* 4, false), "");
1370 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1371 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false), "");
1373 if (const_index
&& compact_const_index
)
1374 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1375 LLVMConstInt(ctx
->ac
.i32
, const_index
, false), "");
1380 load_tcs_varyings(struct ac_shader_abi
*abi
,
1382 LLVMValueRef vertex_index
,
1383 LLVMValueRef indir_index
,
1384 unsigned const_index
,
1386 unsigned driver_location
,
1388 unsigned num_components
,
1393 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1394 LLVMValueRef dw_addr
, stride
;
1395 LLVMValueRef value
[4], result
;
1396 unsigned param
= shader_io_get_unique_index(location
);
1399 uint32_t input_vertex_size
= (ctx
->tcs_num_inputs
* 16) / 4;
1400 stride
= LLVMConstInt(ctx
->ac
.i32
, input_vertex_size
, false);
1401 dw_addr
= get_tcs_in_current_patch_offset(ctx
);
1404 stride
= get_tcs_out_vertex_stride(ctx
);
1405 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1407 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1412 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1415 for (unsigned i
= 0; i
< num_components
+ component
; i
++) {
1416 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1417 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1420 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1425 store_tcs_output(struct ac_shader_abi
*abi
,
1426 const nir_variable
*var
,
1427 LLVMValueRef vertex_index
,
1428 LLVMValueRef param_index
,
1429 unsigned const_index
,
1433 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1434 const unsigned location
= var
->data
.location
;
1435 const unsigned component
= var
->data
.location_frac
;
1436 const bool is_patch
= var
->data
.patch
;
1437 const bool is_compact
= var
->data
.compact
;
1438 LLVMValueRef dw_addr
;
1439 LLVMValueRef stride
= NULL
;
1440 LLVMValueRef buf_addr
= NULL
;
1442 bool store_lds
= true;
1445 if (!(ctx
->tcs_patch_outputs_read
& (1U << (location
- VARYING_SLOT_PATCH0
))))
1448 if (!(ctx
->tcs_outputs_read
& (1ULL << location
)))
1452 param
= shader_io_get_unique_index(location
);
1453 if (location
== VARYING_SLOT_CLIP_DIST0
&&
1454 is_compact
&& const_index
> 3) {
1460 stride
= get_tcs_out_vertex_stride(ctx
);
1461 dw_addr
= get_tcs_out_current_patch_offset(ctx
);
1463 dw_addr
= get_tcs_out_current_patch_data_offset(ctx
);
1466 dw_addr
= get_dw_address(ctx
, dw_addr
, param
, const_index
, is_compact
, vertex_index
, stride
,
1468 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
, is_compact
,
1469 vertex_index
, param_index
);
1471 bool is_tess_factor
= false;
1472 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
||
1473 location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
1474 is_tess_factor
= true;
1476 unsigned base
= is_compact
? const_index
: 0;
1477 for (unsigned chan
= 0; chan
< 8; chan
++) {
1478 if (!(writemask
& (1 << chan
)))
1480 LLVMValueRef value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- component
);
1482 if (store_lds
|| is_tess_factor
) {
1483 LLVMValueRef dw_addr_chan
=
1484 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1485 LLVMConstInt(ctx
->ac
.i32
, chan
, false), "");
1486 ac_lds_store(&ctx
->ac
, dw_addr_chan
, value
);
1489 if (!is_tess_factor
&& writemask
!= 0xF)
1490 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, value
, 1,
1491 buf_addr
, ctx
->oc_lds
,
1492 4 * (base
+ chan
), 1, 0, true, false);
1495 if (writemask
== 0xF) {
1496 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, src
, 4,
1497 buf_addr
, ctx
->oc_lds
,
1498 (base
* 4), 1, 0, true, false);
1503 load_tes_input(struct ac_shader_abi
*abi
,
1505 LLVMValueRef vertex_index
,
1506 LLVMValueRef param_index
,
1507 unsigned const_index
,
1509 unsigned driver_location
,
1511 unsigned num_components
,
1516 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1517 LLVMValueRef buf_addr
;
1518 LLVMValueRef result
;
1519 unsigned param
= shader_io_get_unique_index(location
);
1521 if (location
== VARYING_SLOT_CLIP_DIST0
&& is_compact
&& const_index
> 3) {
1526 buf_addr
= get_tcs_tes_buffer_address_params(ctx
, param
, const_index
,
1527 is_compact
, vertex_index
, param_index
);
1529 LLVMValueRef comp_offset
= LLVMConstInt(ctx
->ac
.i32
, component
* 4, false);
1530 buf_addr
= LLVMBuildAdd(ctx
->ac
.builder
, buf_addr
, comp_offset
, "");
1532 result
= ac_build_buffer_load(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, num_components
, NULL
,
1533 buf_addr
, ctx
->oc_lds
, is_compact
? (4 * const_index
) : 0, 1, 0, true, false);
1534 result
= ac_trim_vector(&ctx
->ac
, result
, num_components
);
1539 load_gs_input(struct ac_shader_abi
*abi
,
1541 unsigned driver_location
,
1543 unsigned num_components
,
1544 unsigned vertex_index
,
1545 unsigned const_index
,
1548 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1549 LLVMValueRef vtx_offset
;
1550 unsigned param
, vtx_offset_param
;
1551 LLVMValueRef value
[4], result
;
1553 vtx_offset_param
= vertex_index
;
1554 assert(vtx_offset_param
< 6);
1555 vtx_offset
= LLVMBuildMul(ctx
->ac
.builder
, ctx
->gs_vtx_offset
[vtx_offset_param
],
1556 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1558 param
= shader_io_get_unique_index(location
);
1560 for (unsigned i
= component
; i
< num_components
+ component
; i
++) {
1561 if (ctx
->ac
.chip_class
>= GFX9
) {
1562 LLVMValueRef dw_addr
= ctx
->gs_vtx_offset
[vtx_offset_param
];
1563 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
1564 LLVMConstInt(ctx
->ac
.i32
, param
* 4 + i
+ const_index
, 0), "");
1565 value
[i
] = ac_lds_load(&ctx
->ac
, dw_addr
);
1567 LLVMValueRef soffset
=
1568 LLVMConstInt(ctx
->ac
.i32
,
1569 (param
* 4 + i
+ const_index
) * 256,
1572 value
[i
] = ac_build_buffer_load(&ctx
->ac
,
1575 vtx_offset
, soffset
,
1576 0, 1, 0, true, false);
1578 value
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
, value
[i
],
1582 result
= ac_build_varying_gather_values(&ctx
->ac
, value
, num_components
, component
);
1583 result
= ac_to_integer(&ctx
->ac
, result
);
1588 static void radv_emit_kill(struct ac_shader_abi
*abi
, LLVMValueRef visible
)
1590 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1591 ac_build_kill_if_false(&ctx
->ac
, visible
);
1594 static LLVMValueRef
lookup_interp_param(struct ac_shader_abi
*abi
,
1595 enum glsl_interp_mode interp
, unsigned location
)
1597 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1600 case INTERP_MODE_FLAT
:
1603 case INTERP_MODE_SMOOTH
:
1604 case INTERP_MODE_NONE
:
1605 if (location
== INTERP_CENTER
)
1606 return ctx
->persp_center
;
1607 else if (location
== INTERP_CENTROID
)
1608 return ctx
->persp_centroid
;
1609 else if (location
== INTERP_SAMPLE
)
1610 return ctx
->persp_sample
;
1612 case INTERP_MODE_NOPERSPECTIVE
:
1613 if (location
== INTERP_CENTER
)
1614 return ctx
->linear_center
;
1615 else if (location
== INTERP_CENTROID
)
1616 return ctx
->linear_centroid
;
1617 else if (location
== INTERP_SAMPLE
)
1618 return ctx
->linear_sample
;
1625 radv_get_sample_pos_offset(uint32_t num_samples
)
1627 uint32_t sample_pos_offset
= 0;
1629 switch (num_samples
) {
1631 sample_pos_offset
= 1;
1634 sample_pos_offset
= 3;
1637 sample_pos_offset
= 7;
1640 sample_pos_offset
= 15;
1645 return sample_pos_offset
;
1648 static LLVMValueRef
load_sample_position(struct ac_shader_abi
*abi
,
1649 LLVMValueRef sample_id
)
1651 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1653 LLVMValueRef result
;
1654 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_PS_SAMPLE_POSITIONS
, false));
1656 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
1657 ac_array_in_const_addr_space(ctx
->ac
.v2f32
), "");
1659 uint32_t sample_pos_offset
=
1660 radv_get_sample_pos_offset(ctx
->options
->key
.fs
.num_samples
);
1663 LLVMBuildAdd(ctx
->ac
.builder
, sample_id
,
1664 LLVMConstInt(ctx
->ac
.i32
, sample_pos_offset
, false), "");
1665 result
= ac_build_load_invariant(&ctx
->ac
, ptr
, sample_id
);
1671 static LLVMValueRef
load_sample_mask_in(struct ac_shader_abi
*abi
)
1673 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1674 uint8_t log2_ps_iter_samples
;
1676 if (ctx
->shader_info
->info
.ps
.force_persample
) {
1677 log2_ps_iter_samples
=
1678 util_logbase2(ctx
->options
->key
.fs
.num_samples
);
1680 log2_ps_iter_samples
= ctx
->options
->key
.fs
.log2_ps_iter_samples
;
1683 /* The bit pattern matches that used by fixed function fragment
1685 static const uint16_t ps_iter_masks
[] = {
1686 0xffff, /* not used */
1692 assert(log2_ps_iter_samples
< ARRAY_SIZE(ps_iter_masks
));
1694 uint32_t ps_iter_mask
= ps_iter_masks
[log2_ps_iter_samples
];
1696 LLVMValueRef result
, sample_id
;
1697 sample_id
= ac_unpack_param(&ctx
->ac
, abi
->ancillary
, 8, 4);
1698 sample_id
= LLVMBuildShl(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, ps_iter_mask
, false), sample_id
, "");
1699 result
= LLVMBuildAnd(ctx
->ac
.builder
, sample_id
, abi
->sample_coverage
, "");
1705 visit_emit_vertex(struct ac_shader_abi
*abi
, unsigned stream
, LLVMValueRef
*addrs
)
1707 LLVMValueRef gs_next_vertex
;
1708 LLVMValueRef can_emit
;
1710 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1712 assert(stream
== 0);
1714 /* Write vertex attribute values to GSVS ring */
1715 gs_next_vertex
= LLVMBuildLoad(ctx
->ac
.builder
,
1716 ctx
->gs_next_vertex
,
1719 /* If this thread has already emitted the declared maximum number of
1720 * vertices, kill it: excessive vertex emissions are not supposed to
1721 * have any effect, and GS threads have no externally observable
1722 * effects other than emitting vertices.
1724 can_emit
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, gs_next_vertex
,
1725 LLVMConstInt(ctx
->ac
.i32
, ctx
->gs_max_out_vertices
, false), "");
1726 ac_build_kill_if_false(&ctx
->ac
, can_emit
);
1728 /* loop num outputs */
1730 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
1731 unsigned output_usage_mask
=
1732 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
1733 LLVMValueRef
*out_ptr
= &addrs
[i
* 4];
1738 if (!(ctx
->output_mask
& (1ull << i
)))
1741 if (i
== VARYING_SLOT_CLIP_DIST0
) {
1742 /* pack clip and cull into a single set of slots */
1743 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
1746 output_usage_mask
= (1 << length
) - 1;
1749 for (unsigned j
= 0; j
< length
; j
++) {
1750 if (!(output_usage_mask
& (1 << j
)))
1753 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
,
1755 LLVMValueRef voffset
= LLVMConstInt(ctx
->ac
.i32
, (slot
* 4 + j
) * ctx
->gs_max_out_vertices
, false);
1756 voffset
= LLVMBuildAdd(ctx
->ac
.builder
, voffset
, gs_next_vertex
, "");
1757 voffset
= LLVMBuildMul(ctx
->ac
.builder
, voffset
, LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
1759 out_val
= LLVMBuildBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
1761 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->gsvs_ring
,
1763 voffset
, ctx
->gs2vs_offset
, 0,
1769 gs_next_vertex
= LLVMBuildAdd(ctx
->ac
.builder
, gs_next_vertex
,
1771 LLVMBuildStore(ctx
->ac
.builder
, gs_next_vertex
, ctx
->gs_next_vertex
);
1773 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_EMIT
| AC_SENDMSG_GS
| (0 << 8), ctx
->gs_wave_id
);
1777 visit_end_primitive(struct ac_shader_abi
*abi
, unsigned stream
)
1779 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1780 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_CUT
| AC_SENDMSG_GS
| (stream
<< 8), ctx
->gs_wave_id
);
1784 load_tess_coord(struct ac_shader_abi
*abi
)
1786 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1788 LLVMValueRef coord
[4] = {
1795 if (ctx
->tes_primitive_mode
== GL_TRIANGLES
)
1796 coord
[2] = LLVMBuildFSub(ctx
->ac
.builder
, ctx
->ac
.f32_1
,
1797 LLVMBuildFAdd(ctx
->ac
.builder
, coord
[0], coord
[1], ""), "");
1799 return ac_build_gather_values(&ctx
->ac
, coord
, 3);
1803 load_patch_vertices_in(struct ac_shader_abi
*abi
)
1805 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1806 return LLVMConstInt(ctx
->ac
.i32
, ctx
->options
->key
.tcs
.input_vertices
, false);
1810 static LLVMValueRef
radv_load_base_vertex(struct ac_shader_abi
*abi
)
1812 return abi
->base_vertex
;
1815 static LLVMValueRef
radv_load_ssbo(struct ac_shader_abi
*abi
,
1816 LLVMValueRef buffer_ptr
, bool write
)
1818 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1819 LLVMValueRef result
;
1821 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1823 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1824 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1829 static LLVMValueRef
radv_load_ubo(struct ac_shader_abi
*abi
, LLVMValueRef buffer_ptr
)
1831 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1832 LLVMValueRef result
;
1834 LLVMSetMetadata(buffer_ptr
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1836 result
= LLVMBuildLoad(ctx
->ac
.builder
, buffer_ptr
, "");
1837 LLVMSetMetadata(result
, ctx
->ac
.invariant_load_md_kind
, ctx
->ac
.empty_md
);
1842 static LLVMValueRef
radv_get_sampler_desc(struct ac_shader_abi
*abi
,
1843 unsigned descriptor_set
,
1844 unsigned base_index
,
1845 unsigned constant_index
,
1847 enum ac_descriptor_type desc_type
,
1848 bool image
, bool write
,
1851 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
1852 LLVMValueRef list
= ctx
->descriptor_sets
[descriptor_set
];
1853 struct radv_descriptor_set_layout
*layout
= ctx
->options
->layout
->set
[descriptor_set
].layout
;
1854 struct radv_descriptor_set_binding_layout
*binding
= layout
->binding
+ base_index
;
1855 unsigned offset
= binding
->offset
;
1856 unsigned stride
= binding
->size
;
1858 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1861 assert(base_index
< layout
->binding_count
);
1863 switch (desc_type
) {
1865 type
= ctx
->ac
.v8i32
;
1869 type
= ctx
->ac
.v8i32
;
1873 case AC_DESC_SAMPLER
:
1874 type
= ctx
->ac
.v4i32
;
1875 if (binding
->type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
1880 case AC_DESC_BUFFER
:
1881 type
= ctx
->ac
.v4i32
;
1885 unreachable("invalid desc_type\n");
1888 offset
+= constant_index
* stride
;
1890 if (desc_type
== AC_DESC_SAMPLER
&& binding
->immutable_samplers_offset
&&
1891 (!index
|| binding
->immutable_samplers_equal
)) {
1892 if (binding
->immutable_samplers_equal
)
1895 const uint32_t *samplers
= radv_immutable_samplers(layout
, binding
);
1897 LLVMValueRef constants
[] = {
1898 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 0], 0),
1899 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 1], 0),
1900 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 2], 0),
1901 LLVMConstInt(ctx
->ac
.i32
, samplers
[constant_index
* 4 + 3], 0),
1903 return ac_build_gather_values(&ctx
->ac
, constants
, 4);
1906 assert(stride
% type_size
== 0);
1909 index
= ctx
->ac
.i32_0
;
1911 index
= LLVMBuildMul(builder
, index
, LLVMConstInt(ctx
->ac
.i32
, stride
/ type_size
, 0), "");
1913 list
= ac_build_gep0(&ctx
->ac
, list
, LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
1914 list
= LLVMBuildPointerCast(builder
, list
,
1915 ac_array_in_const32_addr_space(type
), "");
1917 return ac_build_load_to_sgpr(&ctx
->ac
, list
, index
);
1920 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
1921 * so we may need to fix it up. */
1923 adjust_vertex_fetch_alpha(struct radv_shader_context
*ctx
,
1924 unsigned adjustment
,
1927 if (adjustment
== RADV_ALPHA_ADJUST_NONE
)
1930 LLVMValueRef c30
= LLVMConstInt(ctx
->ac
.i32
, 30, 0);
1932 if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
)
1933 alpha
= LLVMBuildFPToUI(ctx
->ac
.builder
, alpha
, ctx
->ac
.i32
, "");
1935 alpha
= ac_to_integer(&ctx
->ac
, alpha
);
1937 /* For the integer-like cases, do a natural sign extension.
1939 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
1940 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
1943 alpha
= LLVMBuildShl(ctx
->ac
.builder
, alpha
,
1944 adjustment
== RADV_ALPHA_ADJUST_SNORM
?
1945 LLVMConstInt(ctx
->ac
.i32
, 7, 0) : c30
, "");
1946 alpha
= LLVMBuildAShr(ctx
->ac
.builder
, alpha
, c30
, "");
1948 /* Convert back to the right type. */
1949 if (adjustment
== RADV_ALPHA_ADJUST_SNORM
) {
1951 LLVMValueRef neg_one
= LLVMConstReal(ctx
->ac
.f32
, -1.0);
1952 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
1953 clamp
= LLVMBuildFCmp(ctx
->ac
.builder
, LLVMRealULT
, alpha
, neg_one
, "");
1954 alpha
= LLVMBuildSelect(ctx
->ac
.builder
, clamp
, neg_one
, alpha
, "");
1955 } else if (adjustment
== RADV_ALPHA_ADJUST_SSCALED
) {
1956 alpha
= LLVMBuildSIToFP(ctx
->ac
.builder
, alpha
, ctx
->ac
.f32
, "");
1963 handle_vs_input_decl(struct radv_shader_context
*ctx
,
1964 struct nir_variable
*variable
)
1966 LLVMValueRef t_list_ptr
= ctx
->vertex_buffers
;
1967 LLVMValueRef t_offset
;
1968 LLVMValueRef t_list
;
1970 LLVMValueRef buffer_index
;
1971 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, true);
1972 uint8_t input_usage_mask
=
1973 ctx
->shader_info
->info
.vs
.input_usage_mask
[variable
->data
.location
];
1974 unsigned num_channels
= util_last_bit(input_usage_mask
);
1976 variable
->data
.driver_location
= variable
->data
.location
* 4;
1978 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
1979 LLVMValueRef output
[4];
1980 unsigned attrib_index
= variable
->data
.location
+ i
- VERT_ATTRIB_GENERIC0
;
1982 if (ctx
->options
->key
.vs
.instance_rate_inputs
& (1u << attrib_index
)) {
1983 uint32_t divisor
= ctx
->options
->key
.vs
.instance_rate_divisors
[attrib_index
];
1986 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.instance_id
,
1987 ctx
->abi
.start_instance
, "");
1990 buffer_index
= LLVMBuildUDiv(ctx
->ac
.builder
, buffer_index
,
1991 LLVMConstInt(ctx
->ac
.i32
, divisor
, 0), "");
1994 if (ctx
->options
->key
.vs
.as_ls
) {
1995 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
1996 MAX2(2, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
1998 ctx
->shader_info
->vs
.vgpr_comp_cnt
=
1999 MAX2(1, ctx
->shader_info
->vs
.vgpr_comp_cnt
);
2002 buffer_index
= ctx
->ac
.i32_0
;
2005 buffer_index
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
2006 ctx
->abi
.base_vertex
, "");
2007 t_offset
= LLVMConstInt(ctx
->ac
.i32
, attrib_index
, false);
2009 t_list
= ac_build_load_to_sgpr(&ctx
->ac
, t_list_ptr
, t_offset
);
2011 input
= ac_build_buffer_load_format(&ctx
->ac
, t_list
,
2014 num_channels
, false, true);
2016 input
= ac_build_expand_to_vec4(&ctx
->ac
, input
, num_channels
);
2018 for (unsigned chan
= 0; chan
< 4; chan
++) {
2019 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2020 output
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
, input
, llvm_chan
, "");
2023 unsigned alpha_adjust
= (ctx
->options
->key
.vs
.alpha_adjust
>> (attrib_index
* 2)) & 3;
2024 output
[3] = adjust_vertex_fetch_alpha(ctx
, alpha_adjust
, output
[3]);
2026 for (unsigned chan
= 0; chan
< 4; chan
++) {
2027 ctx
->inputs
[ac_llvm_reg_index_soa(variable
->data
.location
+ i
, chan
)] =
2028 ac_to_integer(&ctx
->ac
, output
[chan
]);
2033 static void interp_fs_input(struct radv_shader_context
*ctx
,
2035 LLVMValueRef interp_param
,
2036 LLVMValueRef prim_mask
,
2037 LLVMValueRef result
[4])
2039 LLVMValueRef attr_number
;
2042 bool interp
= interp_param
!= NULL
;
2044 attr_number
= LLVMConstInt(ctx
->ac
.i32
, attr
, false);
2046 /* fs.constant returns the param from the middle vertex, so it's not
2047 * really useful for flat shading. It's meant to be used for custom
2048 * interpolation (but the intrinsic can't fetch from the other two
2051 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
2052 * to do the right thing. The only reason we use fs.constant is that
2053 * fs.interp cannot be used on integers, because they can be equal
2057 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
,
2060 i
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2062 j
= LLVMBuildExtractElement(ctx
->ac
.builder
, interp_param
,
2066 for (chan
= 0; chan
< 4; chan
++) {
2067 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, chan
, false);
2070 result
[chan
] = ac_build_fs_interp(&ctx
->ac
,
2075 result
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
2076 LLVMConstInt(ctx
->ac
.i32
, 2, false),
2085 handle_fs_input_decl(struct radv_shader_context
*ctx
,
2086 struct nir_variable
*variable
)
2088 int idx
= variable
->data
.location
;
2089 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2090 LLVMValueRef interp
;
2092 variable
->data
.driver_location
= idx
* 4;
2093 ctx
->input_mask
|= ((1ull << attrib_count
) - 1) << variable
->data
.location
;
2095 if (glsl_get_base_type(glsl_without_array(variable
->type
)) == GLSL_TYPE_FLOAT
) {
2096 unsigned interp_type
;
2097 if (variable
->data
.sample
)
2098 interp_type
= INTERP_SAMPLE
;
2099 else if (variable
->data
.centroid
)
2100 interp_type
= INTERP_CENTROID
;
2102 interp_type
= INTERP_CENTER
;
2104 interp
= lookup_interp_param(&ctx
->abi
, variable
->data
.interpolation
, interp_type
);
2108 for (unsigned i
= 0; i
< attrib_count
; ++i
)
2109 ctx
->inputs
[ac_llvm_reg_index_soa(idx
+ i
, 0)] = interp
;
2114 handle_vs_inputs(struct radv_shader_context
*ctx
,
2115 struct nir_shader
*nir
) {
2116 nir_foreach_variable(variable
, &nir
->inputs
)
2117 handle_vs_input_decl(ctx
, variable
);
2121 prepare_interp_optimize(struct radv_shader_context
*ctx
,
2122 struct nir_shader
*nir
)
2124 bool uses_center
= false;
2125 bool uses_centroid
= false;
2126 nir_foreach_variable(variable
, &nir
->inputs
) {
2127 if (glsl_get_base_type(glsl_without_array(variable
->type
)) != GLSL_TYPE_FLOAT
||
2128 variable
->data
.sample
)
2131 if (variable
->data
.centroid
)
2132 uses_centroid
= true;
2137 if (uses_center
&& uses_centroid
) {
2138 LLVMValueRef sel
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntSLT
, ctx
->abi
.prim_mask
, ctx
->ac
.i32_0
, "");
2139 ctx
->persp_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->persp_center
, ctx
->persp_centroid
, "");
2140 ctx
->linear_centroid
= LLVMBuildSelect(ctx
->ac
.builder
, sel
, ctx
->linear_center
, ctx
->linear_centroid
, "");
2145 handle_fs_inputs(struct radv_shader_context
*ctx
,
2146 struct nir_shader
*nir
)
2148 prepare_interp_optimize(ctx
, nir
);
2150 nir_foreach_variable(variable
, &nir
->inputs
)
2151 handle_fs_input_decl(ctx
, variable
);
2155 if (ctx
->shader_info
->info
.ps
.uses_input_attachments
||
2156 ctx
->shader_info
->info
.needs_multiview_view_index
)
2157 ctx
->input_mask
|= 1ull << VARYING_SLOT_LAYER
;
2159 for (unsigned i
= 0; i
< RADEON_LLVM_MAX_INPUTS
; ++i
) {
2160 LLVMValueRef interp_param
;
2161 LLVMValueRef
*inputs
= ctx
->inputs
+ac_llvm_reg_index_soa(i
, 0);
2163 if (!(ctx
->input_mask
& (1ull << i
)))
2166 if (i
>= VARYING_SLOT_VAR0
|| i
== VARYING_SLOT_PNTC
||
2167 i
== VARYING_SLOT_PRIMITIVE_ID
|| i
== VARYING_SLOT_LAYER
) {
2168 interp_param
= *inputs
;
2169 interp_fs_input(ctx
, index
, interp_param
, ctx
->abi
.prim_mask
,
2173 ctx
->shader_info
->fs
.flat_shaded_mask
|= 1u << index
;
2175 } else if (i
== VARYING_SLOT_POS
) {
2176 for(int i
= 0; i
< 3; ++i
)
2177 inputs
[i
] = ctx
->abi
.frag_pos
[i
];
2179 inputs
[3] = ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
2180 ctx
->abi
.frag_pos
[3]);
2183 ctx
->shader_info
->fs
.num_interp
= index
;
2184 ctx
->shader_info
->fs
.input_mask
= ctx
->input_mask
>> VARYING_SLOT_VAR0
;
2186 if (ctx
->shader_info
->info
.needs_multiview_view_index
)
2187 ctx
->abi
.view_index
= ctx
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2191 scan_shader_output_decl(struct radv_shader_context
*ctx
,
2192 struct nir_variable
*variable
,
2193 struct nir_shader
*shader
,
2194 gl_shader_stage stage
)
2196 int idx
= variable
->data
.location
+ variable
->data
.index
;
2197 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
2198 uint64_t mask_attribs
;
2200 variable
->data
.driver_location
= idx
* 4;
2202 /* tess ctrl has it's own load/store paths for outputs */
2203 if (stage
== MESA_SHADER_TESS_CTRL
)
2206 mask_attribs
= ((1ull << attrib_count
) - 1) << idx
;
2207 if (stage
== MESA_SHADER_VERTEX
||
2208 stage
== MESA_SHADER_TESS_EVAL
||
2209 stage
== MESA_SHADER_GEOMETRY
) {
2210 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
2211 int length
= shader
->info
.clip_distance_array_size
+
2212 shader
->info
.cull_distance_array_size
;
2213 if (stage
== MESA_SHADER_VERTEX
) {
2214 ctx
->shader_info
->vs
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2215 ctx
->shader_info
->vs
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2217 if (stage
== MESA_SHADER_TESS_EVAL
) {
2218 ctx
->shader_info
->tes
.outinfo
.clip_dist_mask
= (1 << shader
->info
.clip_distance_array_size
) - 1;
2219 ctx
->shader_info
->tes
.outinfo
.cull_dist_mask
= (1 << shader
->info
.cull_distance_array_size
) - 1;
2226 mask_attribs
= 1ull << idx
;
2230 ctx
->output_mask
|= mask_attribs
;
2234 /* Initialize arguments for the shader export intrinsic */
2236 si_llvm_init_export_args(struct radv_shader_context
*ctx
,
2237 LLVMValueRef
*values
,
2238 unsigned enabled_channels
,
2240 struct ac_export_args
*args
)
2242 /* Specify the channels that are enabled. */
2243 args
->enabled_channels
= enabled_channels
;
2245 /* Specify whether the EXEC mask represents the valid mask */
2246 args
->valid_mask
= 0;
2248 /* Specify whether this is the last export */
2251 /* Specify the target we are exporting */
2252 args
->target
= target
;
2254 args
->compr
= false;
2255 args
->out
[0] = LLVMGetUndef(ctx
->ac
.f32
);
2256 args
->out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
2257 args
->out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
2258 args
->out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
2260 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&& target
>= V_008DFC_SQ_EXP_MRT
) {
2261 unsigned index
= target
- V_008DFC_SQ_EXP_MRT
;
2262 unsigned col_format
= (ctx
->options
->key
.fs
.col_format
>> (4 * index
)) & 0xf;
2263 bool is_int8
= (ctx
->options
->key
.fs
.is_int8
>> index
) & 1;
2264 bool is_int10
= (ctx
->options
->key
.fs
.is_int10
>> index
) & 1;
2267 LLVMValueRef (*packf
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2]) = NULL
;
2268 LLVMValueRef (*packi
)(struct ac_llvm_context
*ctx
, LLVMValueRef args
[2],
2269 unsigned bits
, bool hi
) = NULL
;
2271 switch(col_format
) {
2272 case V_028714_SPI_SHADER_ZERO
:
2273 args
->enabled_channels
= 0; /* writemask */
2274 args
->target
= V_008DFC_SQ_EXP_NULL
;
2277 case V_028714_SPI_SHADER_32_R
:
2278 args
->enabled_channels
= 1;
2279 args
->out
[0] = values
[0];
2282 case V_028714_SPI_SHADER_32_GR
:
2283 args
->enabled_channels
= 0x3;
2284 args
->out
[0] = values
[0];
2285 args
->out
[1] = values
[1];
2288 case V_028714_SPI_SHADER_32_AR
:
2289 args
->enabled_channels
= 0x9;
2290 args
->out
[0] = values
[0];
2291 args
->out
[3] = values
[3];
2294 case V_028714_SPI_SHADER_FP16_ABGR
:
2295 args
->enabled_channels
= 0x5;
2296 packf
= ac_build_cvt_pkrtz_f16
;
2299 case V_028714_SPI_SHADER_UNORM16_ABGR
:
2300 args
->enabled_channels
= 0x5;
2301 packf
= ac_build_cvt_pknorm_u16
;
2304 case V_028714_SPI_SHADER_SNORM16_ABGR
:
2305 args
->enabled_channels
= 0x5;
2306 packf
= ac_build_cvt_pknorm_i16
;
2309 case V_028714_SPI_SHADER_UINT16_ABGR
:
2310 args
->enabled_channels
= 0x5;
2311 packi
= ac_build_cvt_pk_u16
;
2314 case V_028714_SPI_SHADER_SINT16_ABGR
:
2315 args
->enabled_channels
= 0x5;
2316 packi
= ac_build_cvt_pk_i16
;
2320 case V_028714_SPI_SHADER_32_ABGR
:
2321 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2325 /* Pack f16 or norm_i16/u16. */
2327 for (chan
= 0; chan
< 2; chan
++) {
2328 LLVMValueRef pack_args
[2] = {
2330 values
[2 * chan
+ 1]
2332 LLVMValueRef packed
;
2334 packed
= packf(&ctx
->ac
, pack_args
);
2335 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2337 args
->compr
= 1; /* COMPR flag */
2342 for (chan
= 0; chan
< 2; chan
++) {
2343 LLVMValueRef pack_args
[2] = {
2344 ac_to_integer(&ctx
->ac
, values
[2 * chan
]),
2345 ac_to_integer(&ctx
->ac
, values
[2 * chan
+ 1])
2347 LLVMValueRef packed
;
2349 packed
= packi(&ctx
->ac
, pack_args
,
2350 is_int8
? 8 : is_int10
? 10 : 16,
2352 args
->out
[chan
] = ac_to_float(&ctx
->ac
, packed
);
2354 args
->compr
= 1; /* COMPR flag */
2359 memcpy(&args
->out
[0], values
, sizeof(values
[0]) * 4);
2361 for (unsigned i
= 0; i
< 4; ++i
) {
2362 if (!(args
->enabled_channels
& (1 << i
)))
2365 args
->out
[i
] = ac_to_float(&ctx
->ac
, args
->out
[i
]);
2370 radv_export_param(struct radv_shader_context
*ctx
, unsigned index
,
2371 LLVMValueRef
*values
, unsigned enabled_channels
)
2373 struct ac_export_args args
;
2375 si_llvm_init_export_args(ctx
, values
, enabled_channels
,
2376 V_008DFC_SQ_EXP_PARAM
+ index
, &args
);
2377 ac_build_export(&ctx
->ac
, &args
);
2381 radv_load_output(struct radv_shader_context
*ctx
, unsigned index
, unsigned chan
)
2383 LLVMValueRef output
=
2384 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(index
, chan
)];
2386 return LLVMBuildLoad(ctx
->ac
.builder
, output
, "");
2390 handle_vs_outputs_post(struct radv_shader_context
*ctx
,
2391 bool export_prim_id
, bool export_layer_id
,
2392 struct radv_vs_output_info
*outinfo
)
2394 uint32_t param_count
= 0;
2396 unsigned pos_idx
, num_pos_exports
= 0;
2397 struct ac_export_args args
, pos_args
[4] = {};
2398 LLVMValueRef psize_value
= NULL
, layer_value
= NULL
, viewport_index_value
= NULL
;
2401 if (ctx
->options
->key
.has_multiview_view_index
) {
2402 LLVMValueRef
* tmp_out
= &ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
2404 for(unsigned i
= 0; i
< 4; ++i
)
2405 ctx
->abi
.outputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, i
)] =
2406 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
2409 LLVMBuildStore(ctx
->ac
.builder
, ac_to_float(&ctx
->ac
, ctx
->abi
.view_index
), *tmp_out
);
2410 ctx
->output_mask
|= 1ull << VARYING_SLOT_LAYER
;
2413 memset(outinfo
->vs_output_param_offset
, AC_EXP_PARAM_UNDEFINED
,
2414 sizeof(outinfo
->vs_output_param_offset
));
2416 if (ctx
->output_mask
& (1ull << VARYING_SLOT_CLIP_DIST0
)) {
2417 LLVMValueRef slots
[8];
2420 if (outinfo
->cull_dist_mask
)
2421 outinfo
->cull_dist_mask
<<= ctx
->num_output_clips
;
2423 i
= VARYING_SLOT_CLIP_DIST0
;
2424 for (j
= 0; j
< ctx
->num_output_clips
+ ctx
->num_output_culls
; j
++)
2425 slots
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2427 for (i
= ctx
->num_output_clips
+ ctx
->num_output_culls
; i
< 8; i
++)
2428 slots
[i
] = LLVMGetUndef(ctx
->ac
.f32
);
2430 if (ctx
->num_output_clips
+ ctx
->num_output_culls
> 4) {
2431 target
= V_008DFC_SQ_EXP_POS
+ 3;
2432 si_llvm_init_export_args(ctx
, &slots
[4], 0xf, target
, &args
);
2433 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
2434 &args
, sizeof(args
));
2437 target
= V_008DFC_SQ_EXP_POS
+ 2;
2438 si_llvm_init_export_args(ctx
, &slots
[0], 0xf, target
, &args
);
2439 memcpy(&pos_args
[target
- V_008DFC_SQ_EXP_POS
],
2440 &args
, sizeof(args
));
2444 LLVMValueRef pos_values
[4] = {ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_0
, ctx
->ac
.f32_1
};
2445 if (ctx
->output_mask
& (1ull << VARYING_SLOT_POS
)) {
2446 for (unsigned j
= 0; j
< 4; j
++)
2447 pos_values
[j
] = radv_load_output(ctx
, VARYING_SLOT_POS
, j
);
2449 si_llvm_init_export_args(ctx
, pos_values
, 0xf, V_008DFC_SQ_EXP_POS
, &pos_args
[0]);
2451 if (ctx
->output_mask
& (1ull << VARYING_SLOT_PSIZ
)) {
2452 outinfo
->writes_pointsize
= true;
2453 psize_value
= radv_load_output(ctx
, VARYING_SLOT_PSIZ
, 0);
2456 if (ctx
->output_mask
& (1ull << VARYING_SLOT_LAYER
)) {
2457 outinfo
->writes_layer
= true;
2458 layer_value
= radv_load_output(ctx
, VARYING_SLOT_LAYER
, 0);
2461 if (ctx
->output_mask
& (1ull << VARYING_SLOT_VIEWPORT
)) {
2462 outinfo
->writes_viewport_index
= true;
2463 viewport_index_value
= radv_load_output(ctx
, VARYING_SLOT_VIEWPORT
, 0);
2466 if (outinfo
->writes_pointsize
||
2467 outinfo
->writes_layer
||
2468 outinfo
->writes_viewport_index
) {
2469 pos_args
[1].enabled_channels
= ((outinfo
->writes_pointsize
== true ? 1 : 0) |
2470 (outinfo
->writes_layer
== true ? 4 : 0));
2471 pos_args
[1].valid_mask
= 0;
2472 pos_args
[1].done
= 0;
2473 pos_args
[1].target
= V_008DFC_SQ_EXP_POS
+ 1;
2474 pos_args
[1].compr
= 0;
2475 pos_args
[1].out
[0] = ctx
->ac
.f32_0
; /* X */
2476 pos_args
[1].out
[1] = ctx
->ac
.f32_0
; /* Y */
2477 pos_args
[1].out
[2] = ctx
->ac
.f32_0
; /* Z */
2478 pos_args
[1].out
[3] = ctx
->ac
.f32_0
; /* W */
2480 if (outinfo
->writes_pointsize
== true)
2481 pos_args
[1].out
[0] = psize_value
;
2482 if (outinfo
->writes_layer
== true)
2483 pos_args
[1].out
[2] = layer_value
;
2484 if (outinfo
->writes_viewport_index
== true) {
2485 if (ctx
->options
->chip_class
>= GFX9
) {
2486 /* GFX9 has the layer in out.z[10:0] and the viewport
2487 * index in out.z[19:16].
2489 LLVMValueRef v
= viewport_index_value
;
2490 v
= ac_to_integer(&ctx
->ac
, v
);
2491 v
= LLVMBuildShl(ctx
->ac
.builder
, v
,
2492 LLVMConstInt(ctx
->ac
.i32
, 16, false),
2494 v
= LLVMBuildOr(ctx
->ac
.builder
, v
,
2495 ac_to_integer(&ctx
->ac
, pos_args
[1].out
[2]), "");
2497 pos_args
[1].out
[2] = ac_to_float(&ctx
->ac
, v
);
2498 pos_args
[1].enabled_channels
|= 1 << 2;
2500 pos_args
[1].out
[3] = viewport_index_value
;
2501 pos_args
[1].enabled_channels
|= 1 << 3;
2505 for (i
= 0; i
< 4; i
++) {
2506 if (pos_args
[i
].out
[0])
2511 for (i
= 0; i
< 4; i
++) {
2512 if (!pos_args
[i
].out
[0])
2515 /* Specify the target we are exporting */
2516 pos_args
[i
].target
= V_008DFC_SQ_EXP_POS
+ pos_idx
++;
2517 if (pos_idx
== num_pos_exports
)
2518 pos_args
[i
].done
= 1;
2519 ac_build_export(&ctx
->ac
, &pos_args
[i
]);
2522 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2523 LLVMValueRef values
[4];
2524 if (!(ctx
->output_mask
& (1ull << i
)))
2527 if (i
!= VARYING_SLOT_LAYER
&&
2528 i
!= VARYING_SLOT_PRIMITIVE_ID
&&
2529 i
< VARYING_SLOT_VAR0
)
2532 for (unsigned j
= 0; j
< 4; j
++)
2533 values
[j
] = ac_to_float(&ctx
->ac
, radv_load_output(ctx
, i
, j
));
2535 unsigned output_usage_mask
;
2537 if (ctx
->stage
== MESA_SHADER_VERTEX
&&
2538 !ctx
->is_gs_copy_shader
) {
2540 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2541 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2543 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2545 assert(ctx
->is_gs_copy_shader
);
2547 ctx
->shader_info
->info
.gs
.output_usage_mask
[i
];
2550 radv_export_param(ctx
, param_count
, values
, output_usage_mask
);
2552 outinfo
->vs_output_param_offset
[i
] = param_count
++;
2555 if (export_prim_id
) {
2556 LLVMValueRef values
[4];
2558 values
[0] = ctx
->vs_prim_id
;
2559 ctx
->shader_info
->vs
.vgpr_comp_cnt
= MAX2(2,
2560 ctx
->shader_info
->vs
.vgpr_comp_cnt
);
2561 for (unsigned j
= 1; j
< 4; j
++)
2562 values
[j
] = ctx
->ac
.f32_0
;
2564 radv_export_param(ctx
, param_count
, values
, 0x1);
2566 outinfo
->vs_output_param_offset
[VARYING_SLOT_PRIMITIVE_ID
] = param_count
++;
2567 outinfo
->export_prim_id
= true;
2570 if (export_layer_id
&& layer_value
) {
2571 LLVMValueRef values
[4];
2573 values
[0] = layer_value
;
2574 for (unsigned j
= 1; j
< 4; j
++)
2575 values
[j
] = ctx
->ac
.f32_0
;
2577 radv_export_param(ctx
, param_count
, values
, 0x1);
2579 outinfo
->vs_output_param_offset
[VARYING_SLOT_LAYER
] = param_count
++;
2582 outinfo
->pos_exports
= num_pos_exports
;
2583 outinfo
->param_exports
= param_count
;
2587 handle_es_outputs_post(struct radv_shader_context
*ctx
,
2588 struct radv_es_output_info
*outinfo
)
2591 uint64_t max_output_written
= 0;
2592 LLVMValueRef lds_base
= NULL
;
2594 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2598 if (!(ctx
->output_mask
& (1ull << i
)))
2601 if (i
== VARYING_SLOT_CLIP_DIST0
)
2602 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
2604 param_index
= shader_io_get_unique_index(i
);
2606 max_output_written
= MAX2(param_index
+ (length
> 4), max_output_written
);
2609 outinfo
->esgs_itemsize
= (max_output_written
+ 1) * 16;
2611 if (ctx
->ac
.chip_class
>= GFX9
) {
2612 unsigned itemsize_dw
= outinfo
->esgs_itemsize
/ 4;
2613 LLVMValueRef vertex_idx
= ac_get_thread_id(&ctx
->ac
);
2614 LLVMValueRef wave_idx
= ac_build_bfe(&ctx
->ac
, ctx
->merged_wave_info
,
2615 LLVMConstInt(ctx
->ac
.i32
, 24, false),
2616 LLVMConstInt(ctx
->ac
.i32
, 4, false), false);
2617 vertex_idx
= LLVMBuildOr(ctx
->ac
.builder
, vertex_idx
,
2618 LLVMBuildMul(ctx
->ac
.builder
, wave_idx
,
2619 LLVMConstInt(ctx
->ac
.i32
, 64, false), ""), "");
2620 lds_base
= LLVMBuildMul(ctx
->ac
.builder
, vertex_idx
,
2621 LLVMConstInt(ctx
->ac
.i32
, itemsize_dw
, 0), "");
2624 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2625 LLVMValueRef dw_addr
= NULL
;
2626 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2627 unsigned output_usage_mask
;
2631 if (!(ctx
->output_mask
& (1ull << i
)))
2634 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
2636 ctx
->shader_info
->info
.vs
.output_usage_mask
[i
];
2638 assert(ctx
->stage
== MESA_SHADER_TESS_EVAL
);
2640 ctx
->shader_info
->info
.tes
.output_usage_mask
[i
];
2643 if (i
== VARYING_SLOT_CLIP_DIST0
) {
2644 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
2645 output_usage_mask
= (1 << length
) - 1;
2648 param_index
= shader_io_get_unique_index(i
);
2651 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2652 LLVMConstInt(ctx
->ac
.i32
, param_index
* 4, false),
2656 for (j
= 0; j
< length
; j
++) {
2657 if (!(output_usage_mask
& (1 << j
)))
2660 LLVMValueRef out_val
= LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], "");
2661 out_val
= LLVMBuildBitCast(ctx
->ac
.builder
, out_val
, ctx
->ac
.i32
, "");
2663 if (ctx
->ac
.chip_class
>= GFX9
) {
2664 LLVMValueRef dw_addr_offset
=
2665 LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
,
2666 LLVMConstInt(ctx
->ac
.i32
,
2669 ac_lds_store(&ctx
->ac
, dw_addr_offset
,
2670 LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], ""));
2672 ac_build_buffer_store_dword(&ctx
->ac
,
2675 NULL
, ctx
->es2gs_offset
,
2676 (4 * param_index
+ j
) * 4,
2684 handle_ls_outputs_post(struct radv_shader_context
*ctx
)
2686 LLVMValueRef vertex_id
= ctx
->rel_auto_id
;
2687 uint32_t num_tcs_inputs
= util_last_bit64(ctx
->shader_info
->info
.vs
.ls_outputs_written
);
2688 LLVMValueRef vertex_dw_stride
= LLVMConstInt(ctx
->ac
.i32
, num_tcs_inputs
* 4, false);
2689 LLVMValueRef base_dw_addr
= LLVMBuildMul(ctx
->ac
.builder
, vertex_id
,
2690 vertex_dw_stride
, "");
2692 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2693 LLVMValueRef
*out_ptr
= &ctx
->abi
.outputs
[i
* 4];
2696 if (!(ctx
->output_mask
& (1ull << i
)))
2699 if (i
== VARYING_SLOT_CLIP_DIST0
)
2700 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
2701 int param
= shader_io_get_unique_index(i
);
2702 LLVMValueRef dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, base_dw_addr
,
2703 LLVMConstInt(ctx
->ac
.i32
, param
* 4, false),
2705 for (unsigned j
= 0; j
< length
; j
++) {
2706 ac_lds_store(&ctx
->ac
, dw_addr
,
2707 LLVMBuildLoad(ctx
->ac
.builder
, out_ptr
[j
], ""));
2708 dw_addr
= LLVMBuildAdd(ctx
->ac
.builder
, dw_addr
, ctx
->ac
.i32_1
, "");
2714 write_tess_factors(struct radv_shader_context
*ctx
)
2716 unsigned stride
, outer_comps
, inner_comps
;
2717 struct ac_build_if_state if_ctx
, inner_if_ctx
;
2718 LLVMValueRef invocation_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 8, 5);
2719 LLVMValueRef rel_patch_id
= ac_unpack_param(&ctx
->ac
, ctx
->abi
.tcs_rel_ids
, 0, 8);
2720 unsigned tess_inner_index
= 0, tess_outer_index
;
2721 LLVMValueRef lds_base
, lds_inner
= NULL
, lds_outer
, byteoffset
, buffer
;
2722 LLVMValueRef out
[6], vec0
, vec1
, tf_base
, inner
[4], outer
[4];
2724 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
2726 switch (ctx
->options
->key
.tcs
.primitive_mode
) {
2746 ac_nir_build_if(&if_ctx
, ctx
,
2747 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
2748 invocation_id
, ctx
->ac
.i32_0
, ""));
2750 lds_base
= get_tcs_out_current_patch_data_offset(ctx
);
2753 tess_inner_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
2754 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2755 LLVMConstInt(ctx
->ac
.i32
, tess_inner_index
* 4, false), "");
2758 tess_outer_index
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
2759 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_base
,
2760 LLVMConstInt(ctx
->ac
.i32
, tess_outer_index
* 4, false), "");
2762 for (i
= 0; i
< 4; i
++) {
2763 inner
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
2764 outer
[i
] = LLVMGetUndef(ctx
->ac
.i32
);
2768 if (ctx
->options
->key
.tcs
.primitive_mode
== GL_ISOLINES
) {
2769 outer
[0] = out
[1] = ac_lds_load(&ctx
->ac
, lds_outer
);
2770 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
2772 outer
[1] = out
[0] = ac_lds_load(&ctx
->ac
, lds_outer
);
2774 for (i
= 0; i
< outer_comps
; i
++) {
2776 ac_lds_load(&ctx
->ac
, lds_outer
);
2777 lds_outer
= LLVMBuildAdd(ctx
->ac
.builder
, lds_outer
,
2780 for (i
= 0; i
< inner_comps
; i
++) {
2781 inner
[i
] = out
[outer_comps
+i
] =
2782 ac_lds_load(&ctx
->ac
, lds_inner
);
2783 lds_inner
= LLVMBuildAdd(ctx
->ac
.builder
, lds_inner
,
2788 /* Convert the outputs to vectors for stores. */
2789 vec0
= ac_build_gather_values(&ctx
->ac
, out
, MIN2(stride
, 4));
2793 vec1
= ac_build_gather_values(&ctx
->ac
, out
+ 4, stride
- 4);
2796 buffer
= ctx
->hs_ring_tess_factor
;
2797 tf_base
= ctx
->tess_factor_offset
;
2798 byteoffset
= LLVMBuildMul(ctx
->ac
.builder
, rel_patch_id
,
2799 LLVMConstInt(ctx
->ac
.i32
, 4 * stride
, false), "");
2800 unsigned tf_offset
= 0;
2802 if (ctx
->options
->chip_class
<= VI
) {
2803 ac_nir_build_if(&inner_if_ctx
, ctx
,
2804 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
2805 rel_patch_id
, ctx
->ac
.i32_0
, ""));
2807 /* Store the dynamic HS control word. */
2808 ac_build_buffer_store_dword(&ctx
->ac
, buffer
,
2809 LLVMConstInt(ctx
->ac
.i32
, 0x80000000, false),
2810 1, ctx
->ac
.i32_0
, tf_base
,
2811 0, 1, 0, true, false);
2814 ac_nir_build_endif(&inner_if_ctx
);
2817 /* Store the tessellation factors. */
2818 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec0
,
2819 MIN2(stride
, 4), byteoffset
, tf_base
,
2820 tf_offset
, 1, 0, true, false);
2822 ac_build_buffer_store_dword(&ctx
->ac
, buffer
, vec1
,
2823 stride
- 4, byteoffset
, tf_base
,
2824 16 + tf_offset
, 1, 0, true, false);
2826 //store to offchip for TES to read - only if TES reads them
2827 if (ctx
->options
->key
.tcs
.tes_reads_tess_factors
) {
2828 LLVMValueRef inner_vec
, outer_vec
, tf_outer_offset
;
2829 LLVMValueRef tf_inner_offset
;
2830 unsigned param_outer
, param_inner
;
2832 param_outer
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER
);
2833 tf_outer_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
2834 LLVMConstInt(ctx
->ac
.i32
, param_outer
, 0));
2836 outer_vec
= ac_build_gather_values(&ctx
->ac
, outer
,
2837 util_next_power_of_two(outer_comps
));
2839 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, outer_vec
,
2840 outer_comps
, tf_outer_offset
,
2841 ctx
->oc_lds
, 0, 1, 0, true, false);
2843 param_inner
= shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER
);
2844 tf_inner_offset
= get_tcs_tes_buffer_address(ctx
, NULL
,
2845 LLVMConstInt(ctx
->ac
.i32
, param_inner
, 0));
2847 inner_vec
= inner_comps
== 1 ? inner
[0] :
2848 ac_build_gather_values(&ctx
->ac
, inner
, inner_comps
);
2849 ac_build_buffer_store_dword(&ctx
->ac
, ctx
->hs_ring_tess_offchip
, inner_vec
,
2850 inner_comps
, tf_inner_offset
,
2851 ctx
->oc_lds
, 0, 1, 0, true, false);
2854 ac_nir_build_endif(&if_ctx
);
2858 handle_tcs_outputs_post(struct radv_shader_context
*ctx
)
2860 write_tess_factors(ctx
);
2864 si_export_mrt_color(struct radv_shader_context
*ctx
,
2865 LLVMValueRef
*color
, unsigned index
,
2866 struct ac_export_args
*args
)
2869 si_llvm_init_export_args(ctx
, color
, 0xf,
2870 V_008DFC_SQ_EXP_MRT
+ index
, args
);
2871 if (!args
->enabled_channels
)
2872 return false; /* unnecessary NULL export */
2878 radv_export_mrt_z(struct radv_shader_context
*ctx
,
2879 LLVMValueRef depth
, LLVMValueRef stencil
,
2880 LLVMValueRef samplemask
)
2882 struct ac_export_args args
;
2884 ac_export_mrt_z(&ctx
->ac
, depth
, stencil
, samplemask
, &args
);
2886 ac_build_export(&ctx
->ac
, &args
);
2890 handle_fs_outputs_post(struct radv_shader_context
*ctx
)
2893 LLVMValueRef depth
= NULL
, stencil
= NULL
, samplemask
= NULL
;
2894 struct ac_export_args color_args
[8];
2896 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
2897 LLVMValueRef values
[4];
2899 if (!(ctx
->output_mask
& (1ull << i
)))
2902 if (i
< FRAG_RESULT_DATA0
)
2905 for (unsigned j
= 0; j
< 4; j
++)
2906 values
[j
] = ac_to_float(&ctx
->ac
,
2907 radv_load_output(ctx
, i
, j
));
2909 bool ret
= si_export_mrt_color(ctx
, values
,
2910 i
- FRAG_RESULT_DATA0
,
2911 &color_args
[index
]);
2916 /* Process depth, stencil, samplemask. */
2917 if (ctx
->shader_info
->info
.ps
.writes_z
) {
2918 depth
= ac_to_float(&ctx
->ac
,
2919 radv_load_output(ctx
, FRAG_RESULT_DEPTH
, 0));
2921 if (ctx
->shader_info
->info
.ps
.writes_stencil
) {
2922 stencil
= ac_to_float(&ctx
->ac
,
2923 radv_load_output(ctx
, FRAG_RESULT_STENCIL
, 0));
2925 if (ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
2926 samplemask
= ac_to_float(&ctx
->ac
,
2927 radv_load_output(ctx
, FRAG_RESULT_SAMPLE_MASK
, 0));
2930 /* Set the DONE bit on last non-null color export only if Z isn't
2934 !ctx
->shader_info
->info
.ps
.writes_z
&&
2935 !ctx
->shader_info
->info
.ps
.writes_stencil
&&
2936 !ctx
->shader_info
->info
.ps
.writes_sample_mask
) {
2937 unsigned last
= index
- 1;
2939 color_args
[last
].valid_mask
= 1; /* whether the EXEC mask is valid */
2940 color_args
[last
].done
= 1; /* DONE bit */
2943 /* Export PS outputs. */
2944 for (unsigned i
= 0; i
< index
; i
++)
2945 ac_build_export(&ctx
->ac
, &color_args
[i
]);
2947 if (depth
|| stencil
|| samplemask
)
2948 radv_export_mrt_z(ctx
, depth
, stencil
, samplemask
);
2950 ac_build_export_null(&ctx
->ac
);
2954 emit_gs_epilogue(struct radv_shader_context
*ctx
)
2956 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_OP_NOP
| AC_SENDMSG_GS_DONE
, ctx
->gs_wave_id
);
2960 handle_shader_outputs_post(struct ac_shader_abi
*abi
, unsigned max_outputs
,
2961 LLVMValueRef
*addrs
)
2963 struct radv_shader_context
*ctx
= radv_shader_context_from_abi(abi
);
2965 switch (ctx
->stage
) {
2966 case MESA_SHADER_VERTEX
:
2967 if (ctx
->options
->key
.vs
.as_ls
)
2968 handle_ls_outputs_post(ctx
);
2969 else if (ctx
->options
->key
.vs
.as_es
)
2970 handle_es_outputs_post(ctx
, &ctx
->shader_info
->vs
.es_info
);
2972 handle_vs_outputs_post(ctx
, ctx
->options
->key
.vs
.export_prim_id
,
2973 ctx
->options
->key
.vs
.export_layer_id
,
2974 &ctx
->shader_info
->vs
.outinfo
);
2976 case MESA_SHADER_FRAGMENT
:
2977 handle_fs_outputs_post(ctx
);
2979 case MESA_SHADER_GEOMETRY
:
2980 emit_gs_epilogue(ctx
);
2982 case MESA_SHADER_TESS_CTRL
:
2983 handle_tcs_outputs_post(ctx
);
2985 case MESA_SHADER_TESS_EVAL
:
2986 if (ctx
->options
->key
.tes
.as_es
)
2987 handle_es_outputs_post(ctx
, &ctx
->shader_info
->tes
.es_info
);
2989 handle_vs_outputs_post(ctx
, ctx
->options
->key
.tes
.export_prim_id
,
2990 ctx
->options
->key
.tes
.export_layer_id
,
2991 &ctx
->shader_info
->tes
.outinfo
);
2998 static void ac_llvm_finalize_module(struct radv_shader_context
*ctx
,
2999 LLVMPassManagerRef passmgr
,
3000 const struct radv_nir_compiler_options
*options
)
3002 LLVMRunPassManager(passmgr
, ctx
->ac
.module
);
3003 LLVMDisposeBuilder(ctx
->ac
.builder
);
3005 ac_llvm_context_dispose(&ctx
->ac
);
3009 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context
*ctx
)
3011 struct radv_vs_output_info
*outinfo
;
3013 switch (ctx
->stage
) {
3014 case MESA_SHADER_FRAGMENT
:
3015 case MESA_SHADER_COMPUTE
:
3016 case MESA_SHADER_TESS_CTRL
:
3017 case MESA_SHADER_GEOMETRY
:
3019 case MESA_SHADER_VERTEX
:
3020 if (ctx
->options
->key
.vs
.as_ls
||
3021 ctx
->options
->key
.vs
.as_es
)
3023 outinfo
= &ctx
->shader_info
->vs
.outinfo
;
3025 case MESA_SHADER_TESS_EVAL
:
3026 if (ctx
->options
->key
.vs
.as_es
)
3028 outinfo
= &ctx
->shader_info
->tes
.outinfo
;
3031 unreachable("Unhandled shader type");
3034 ac_optimize_vs_outputs(&ctx
->ac
,
3036 outinfo
->vs_output_param_offset
,
3038 &outinfo
->param_exports
);
3042 ac_setup_rings(struct radv_shader_context
*ctx
)
3044 if (ctx
->options
->chip_class
<= VI
&&
3045 (ctx
->stage
== MESA_SHADER_GEOMETRY
||
3046 ctx
->options
->key
.vs
.as_es
|| ctx
->options
->key
.tes
.as_es
)) {
3047 unsigned ring
= ctx
->stage
== MESA_SHADER_GEOMETRY
? RING_ESGS_GS
3049 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, ring
, false);
3051 ctx
->esgs_ring
= ac_build_load_to_sgpr(&ctx
->ac
,
3056 if (ctx
->is_gs_copy_shader
) {
3057 ctx
->gsvs_ring
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_GSVS_VS
, false));
3059 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3061 uint32_t num_entries
= 64;
3062 LLVMValueRef gsvs_ring_stride
= LLVMConstInt(ctx
->ac
.i32
, ctx
->max_gsvs_emit_size
, false);
3063 LLVMValueRef gsvs_ring_desc
= LLVMConstInt(ctx
->ac
.i32
, ctx
->max_gsvs_emit_size
<< 16, false);
3064 ctx
->gsvs_ring
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_GSVS_GS
, false));
3066 ctx
->gsvs_ring
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gsvs_ring
, ctx
->ac
.v4i32
, "");
3068 tmp
= LLVMConstInt(ctx
->ac
.i32
, num_entries
, false);
3069 if (ctx
->options
->chip_class
>= VI
)
3070 tmp
= LLVMBuildMul(ctx
->ac
.builder
, gsvs_ring_stride
, tmp
, "");
3071 ctx
->gsvs_ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ctx
->gsvs_ring
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
3072 tmp
= LLVMBuildExtractElement(ctx
->ac
.builder
, ctx
->gsvs_ring
, ctx
->ac
.i32_1
, "");
3073 tmp
= LLVMBuildOr(ctx
->ac
.builder
, tmp
, gsvs_ring_desc
, "");
3074 ctx
->gsvs_ring
= LLVMBuildInsertElement(ctx
->ac
.builder
, ctx
->gsvs_ring
, tmp
, ctx
->ac
.i32_1
, "");
3077 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
3078 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3079 ctx
->hs_ring_tess_offchip
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_OFFCHIP
, false));
3080 ctx
->hs_ring_tess_factor
= ac_build_load_to_sgpr(&ctx
->ac
, ctx
->ring_offsets
, LLVMConstInt(ctx
->ac
.i32
, RING_HS_TESS_FACTOR
, false));
3085 ac_nir_get_max_workgroup_size(enum chip_class chip_class
,
3086 const struct nir_shader
*nir
)
3088 switch (nir
->info
.stage
) {
3089 case MESA_SHADER_TESS_CTRL
:
3090 return chip_class
>= CIK
? 128 : 64;
3091 case MESA_SHADER_GEOMETRY
:
3092 return chip_class
>= GFX9
? 128 : 64;
3093 case MESA_SHADER_COMPUTE
:
3099 unsigned max_workgroup_size
= nir
->info
.cs
.local_size
[0] *
3100 nir
->info
.cs
.local_size
[1] *
3101 nir
->info
.cs
.local_size
[2];
3102 return max_workgroup_size
;
3105 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
3106 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context
*ctx
)
3108 LLVMValueRef count
= ac_build_bfe(&ctx
->ac
, ctx
->merged_wave_info
,
3109 LLVMConstInt(ctx
->ac
.i32
, 8, false),
3110 LLVMConstInt(ctx
->ac
.i32
, 8, false), false);
3111 LLVMValueRef hs_empty
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, count
,
3113 ctx
->abi
.instance_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->rel_auto_id
, ctx
->abi
.instance_id
, "");
3114 ctx
->rel_auto_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_rel_ids
, ctx
->rel_auto_id
, "");
3115 ctx
->abi
.vertex_id
= LLVMBuildSelect(ctx
->ac
.builder
, hs_empty
, ctx
->abi
.tcs_patch_id
, ctx
->abi
.vertex_id
, "");
3118 static void prepare_gs_input_vgprs(struct radv_shader_context
*ctx
)
3120 for(int i
= 5; i
>= 0; --i
) {
3121 ctx
->gs_vtx_offset
[i
] = ac_build_bfe(&ctx
->ac
, ctx
->gs_vtx_offset
[i
& ~1],
3122 LLVMConstInt(ctx
->ac
.i32
, (i
& 1) * 16, false),
3123 LLVMConstInt(ctx
->ac
.i32
, 16, false), false);
3126 ctx
->gs_wave_id
= ac_build_bfe(&ctx
->ac
, ctx
->merged_wave_info
,
3127 LLVMConstInt(ctx
->ac
.i32
, 16, false),
3128 LLVMConstInt(ctx
->ac
.i32
, 8, false), false);
3133 LLVMModuleRef
ac_translate_nir_to_llvm(struct ac_llvm_compiler
*ac_llvm
,
3134 struct nir_shader
*const *shaders
,
3136 struct radv_shader_variant_info
*shader_info
,
3137 const struct radv_nir_compiler_options
*options
)
3139 struct radv_shader_context ctx
= {0};
3141 ctx
.options
= options
;
3142 ctx
.shader_info
= shader_info
;
3144 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
3145 ctx
.context
= ctx
.ac
.context
;
3146 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
3148 enum ac_float_mode float_mode
=
3149 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
3150 AC_FLOAT_MODE_DEFAULT
;
3152 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
3154 memset(shader_info
, 0, sizeof(*shader_info
));
3156 for(int i
= 0; i
< shader_count
; ++i
)
3157 radv_nir_shader_info_pass(shaders
[i
], options
, &shader_info
->info
);
3159 for (i
= 0; i
< RADV_UD_MAX_SETS
; i
++)
3160 shader_info
->user_sgprs_locs
.descriptor_sets
[i
].sgpr_idx
= -1;
3161 for (i
= 0; i
< AC_UD_MAX_UD
; i
++)
3162 shader_info
->user_sgprs_locs
.shader_data
[i
].sgpr_idx
= -1;
3164 ctx
.max_workgroup_size
= 0;
3165 for (int i
= 0; i
< shader_count
; ++i
) {
3166 ctx
.max_workgroup_size
= MAX2(ctx
.max_workgroup_size
,
3167 ac_nir_get_max_workgroup_size(ctx
.options
->chip_class
,
3171 create_function(&ctx
, shaders
[shader_count
- 1]->info
.stage
, shader_count
>= 2,
3172 shader_count
>= 2 ? shaders
[shader_count
- 2]->info
.stage
: MESA_SHADER_VERTEX
);
3174 ctx
.abi
.inputs
= &ctx
.inputs
[0];
3175 ctx
.abi
.emit_outputs
= handle_shader_outputs_post
;
3176 ctx
.abi
.emit_vertex
= visit_emit_vertex
;
3177 ctx
.abi
.load_ubo
= radv_load_ubo
;
3178 ctx
.abi
.load_ssbo
= radv_load_ssbo
;
3179 ctx
.abi
.load_sampler_desc
= radv_get_sampler_desc
;
3180 ctx
.abi
.load_resource
= radv_load_resource
;
3181 ctx
.abi
.clamp_shadow_reference
= false;
3182 ctx
.abi
.gfx9_stride_size_workaround
= ctx
.ac
.chip_class
== GFX9
;
3184 if (shader_count
>= 2)
3185 ac_init_exec_full_mask(&ctx
.ac
);
3187 if (ctx
.ac
.chip_class
== GFX9
&&
3188 shaders
[shader_count
- 1]->info
.stage
== MESA_SHADER_TESS_CTRL
)
3189 ac_nir_fixup_ls_hs_input_vgprs(&ctx
);
3191 for(int i
= 0; i
< shader_count
; ++i
) {
3192 ctx
.stage
= shaders
[i
]->info
.stage
;
3193 ctx
.output_mask
= 0;
3194 ctx
.num_output_clips
= shaders
[i
]->info
.clip_distance_array_size
;
3195 ctx
.num_output_culls
= shaders
[i
]->info
.cull_distance_array_size
;
3197 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3198 ctx
.gs_next_vertex
= ac_build_alloca(&ctx
.ac
, ctx
.ac
.i32
, "gs_next_vertex");
3199 ctx
.gs_max_out_vertices
= shaders
[i
]->info
.gs
.vertices_out
;
3200 ctx
.abi
.load_inputs
= load_gs_input
;
3201 ctx
.abi
.emit_primitive
= visit_end_primitive
;
3202 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3203 ctx
.tcs_outputs_read
= shaders
[i
]->info
.outputs_read
;
3204 ctx
.tcs_patch_outputs_read
= shaders
[i
]->info
.patch_outputs_read
;
3205 ctx
.abi
.load_tess_varyings
= load_tcs_varyings
;
3206 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3207 ctx
.abi
.store_tcs_outputs
= store_tcs_output
;
3208 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3209 if (shader_count
== 1)
3210 ctx
.tcs_num_inputs
= ctx
.options
->key
.tcs
.num_inputs
;
3212 ctx
.tcs_num_inputs
= util_last_bit64(shader_info
->info
.vs
.ls_outputs_written
);
3213 ctx
.tcs_num_patches
= get_tcs_num_patches(&ctx
);
3214 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3215 ctx
.tes_primitive_mode
= shaders
[i
]->info
.tess
.primitive_mode
;
3216 ctx
.abi
.load_tess_varyings
= load_tes_input
;
3217 ctx
.abi
.load_tess_coord
= load_tess_coord
;
3218 ctx
.abi
.load_patch_vertices_in
= load_patch_vertices_in
;
3219 ctx
.tcs_vertices_per_patch
= shaders
[i
]->info
.tess
.tcs_vertices_out
;
3220 ctx
.tcs_num_patches
= ctx
.options
->key
.tes
.num_patches
;
3221 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
) {
3222 if (shader_info
->info
.vs
.needs_instance_id
) {
3223 if (ctx
.options
->key
.vs
.as_ls
) {
3224 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
3225 MAX2(2, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
3227 ctx
.shader_info
->vs
.vgpr_comp_cnt
=
3228 MAX2(1, ctx
.shader_info
->vs
.vgpr_comp_cnt
);
3231 ctx
.abi
.load_base_vertex
= radv_load_base_vertex
;
3232 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
) {
3233 shader_info
->fs
.can_discard
= shaders
[i
]->info
.fs
.uses_discard
;
3234 ctx
.abi
.lookup_interp_param
= lookup_interp_param
;
3235 ctx
.abi
.load_sample_position
= load_sample_position
;
3236 ctx
.abi
.load_sample_mask_in
= load_sample_mask_in
;
3237 ctx
.abi
.emit_kill
= radv_emit_kill
;
3241 ac_emit_barrier(&ctx
.ac
, ctx
.stage
);
3243 nir_foreach_variable(variable
, &shaders
[i
]->outputs
)
3244 scan_shader_output_decl(&ctx
, variable
, shaders
[i
], shaders
[i
]->info
.stage
);
3246 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3247 unsigned addclip
= shaders
[i
]->info
.clip_distance_array_size
+
3248 shaders
[i
]->info
.cull_distance_array_size
> 4;
3249 ctx
.gsvs_vertex_size
= (util_bitcount64(ctx
.output_mask
) + addclip
) * 16;
3250 ctx
.max_gsvs_emit_size
= ctx
.gsvs_vertex_size
*
3251 shaders
[i
]->info
.gs
.vertices_out
;
3254 ac_setup_rings(&ctx
);
3256 LLVMBasicBlockRef merge_block
;
3257 if (shader_count
>= 2) {
3258 LLVMValueRef fn
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
3259 LLVMBasicBlockRef then_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3260 merge_block
= LLVMAppendBasicBlockInContext(ctx
.ac
.context
, fn
, "");
3262 LLVMValueRef count
= ac_build_bfe(&ctx
.ac
, ctx
.merged_wave_info
,
3263 LLVMConstInt(ctx
.ac
.i32
, 8 * i
, false),
3264 LLVMConstInt(ctx
.ac
.i32
, 8, false), false);
3265 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
.ac
);
3266 LLVMValueRef cond
= LLVMBuildICmp(ctx
.ac
.builder
, LLVMIntULT
,
3267 thread_id
, count
, "");
3268 LLVMBuildCondBr(ctx
.ac
.builder
, cond
, then_block
, merge_block
);
3270 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, then_block
);
3273 if (shaders
[i
]->info
.stage
== MESA_SHADER_FRAGMENT
)
3274 handle_fs_inputs(&ctx
, shaders
[i
]);
3275 else if(shaders
[i
]->info
.stage
== MESA_SHADER_VERTEX
)
3276 handle_vs_inputs(&ctx
, shaders
[i
]);
3277 else if(shader_count
>= 2 && shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
)
3278 prepare_gs_input_vgprs(&ctx
);
3280 ac_nir_translate(&ctx
.ac
, &ctx
.abi
, shaders
[i
]);
3282 if (shader_count
>= 2) {
3283 LLVMBuildBr(ctx
.ac
.builder
, merge_block
);
3284 LLVMPositionBuilderAtEnd(ctx
.ac
.builder
, merge_block
);
3287 if (shaders
[i
]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3288 shader_info
->gs
.gsvs_vertex_size
= ctx
.gsvs_vertex_size
;
3289 shader_info
->gs
.max_gsvs_emit_size
= ctx
.max_gsvs_emit_size
;
3290 } else if (shaders
[i
]->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3291 shader_info
->tcs
.num_patches
= ctx
.tcs_num_patches
;
3292 shader_info
->tcs
.lds_size
= calculate_tess_lds_size(&ctx
);
3296 LLVMBuildRetVoid(ctx
.ac
.builder
);
3298 if (options
->dump_preoptir
)
3299 ac_dump_module(ctx
.ac
.module
);
3301 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
3303 if (shader_count
== 1)
3304 ac_nir_eliminate_const_vs_outputs(&ctx
);
3306 if (options
->dump_shader
) {
3307 ctx
.shader_info
->private_mem_vgprs
=
3308 ac_count_scratch_private_memory(ctx
.main_function
);
3311 return ctx
.ac
.module
;
3314 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
3316 unsigned *retval
= (unsigned *)context
;
3317 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
3318 char *description
= LLVMGetDiagInfoDescription(di
);
3320 if (severity
== LLVMDSError
) {
3322 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n",
3326 LLVMDisposeMessage(description
);
3329 static unsigned ac_llvm_compile(LLVMModuleRef M
,
3330 struct ac_shader_binary
*binary
,
3331 struct ac_llvm_compiler
*ac_llvm
)
3333 unsigned retval
= 0;
3335 LLVMContextRef llvm_ctx
;
3336 LLVMMemoryBufferRef out_buffer
;
3337 unsigned buffer_size
;
3338 const char *buffer_data
;
3341 /* Setup Diagnostic Handler*/
3342 llvm_ctx
= LLVMGetModuleContext(M
);
3344 LLVMContextSetDiagnosticHandler(llvm_ctx
, ac_diagnostic_handler
,
3348 mem_err
= LLVMTargetMachineEmitToMemoryBuffer(ac_llvm
->tm
, M
, LLVMObjectFile
,
3351 /* Process Errors/Warnings */
3353 fprintf(stderr
, "%s: %s", __FUNCTION__
, err
);
3359 /* Extract Shader Code*/
3360 buffer_size
= LLVMGetBufferSize(out_buffer
);
3361 buffer_data
= LLVMGetBufferStart(out_buffer
);
3363 ac_elf_read(buffer_data
, buffer_size
, binary
);
3366 LLVMDisposeMemoryBuffer(out_buffer
);
3372 static void ac_compile_llvm_module(struct ac_llvm_compiler
*ac_llvm
,
3373 LLVMModuleRef llvm_module
,
3374 struct ac_shader_binary
*binary
,
3375 struct ac_shader_config
*config
,
3376 struct radv_shader_variant_info
*shader_info
,
3377 gl_shader_stage stage
,
3378 const struct radv_nir_compiler_options
*options
)
3380 if (options
->dump_shader
)
3381 ac_dump_module(llvm_module
);
3383 memset(binary
, 0, sizeof(*binary
));
3385 if (options
->record_llvm_ir
) {
3386 char *llvm_ir
= LLVMPrintModuleToString(llvm_module
);
3387 binary
->llvm_ir_string
= strdup(llvm_ir
);
3388 LLVMDisposeMessage(llvm_ir
);
3391 int v
= ac_llvm_compile(llvm_module
, binary
, ac_llvm
);
3393 fprintf(stderr
, "compile failed\n");
3396 if (options
->dump_shader
)
3397 fprintf(stderr
, "disasm:\n%s\n", binary
->disasm_string
);
3399 ac_shader_binary_read_config(binary
, config
, 0, options
->supports_spill
);
3401 LLVMContextRef ctx
= LLVMGetModuleContext(llvm_module
);
3402 LLVMDisposeModule(llvm_module
);
3403 LLVMContextDispose(ctx
);
3405 if (stage
== MESA_SHADER_FRAGMENT
) {
3406 shader_info
->num_input_vgprs
= 0;
3407 if (G_0286CC_PERSP_SAMPLE_ENA(config
->spi_ps_input_addr
))
3408 shader_info
->num_input_vgprs
+= 2;
3409 if (G_0286CC_PERSP_CENTER_ENA(config
->spi_ps_input_addr
))
3410 shader_info
->num_input_vgprs
+= 2;
3411 if (G_0286CC_PERSP_CENTROID_ENA(config
->spi_ps_input_addr
))
3412 shader_info
->num_input_vgprs
+= 2;
3413 if (G_0286CC_PERSP_PULL_MODEL_ENA(config
->spi_ps_input_addr
))
3414 shader_info
->num_input_vgprs
+= 3;
3415 if (G_0286CC_LINEAR_SAMPLE_ENA(config
->spi_ps_input_addr
))
3416 shader_info
->num_input_vgprs
+= 2;
3417 if (G_0286CC_LINEAR_CENTER_ENA(config
->spi_ps_input_addr
))
3418 shader_info
->num_input_vgprs
+= 2;
3419 if (G_0286CC_LINEAR_CENTROID_ENA(config
->spi_ps_input_addr
))
3420 shader_info
->num_input_vgprs
+= 2;
3421 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config
->spi_ps_input_addr
))
3422 shader_info
->num_input_vgprs
+= 1;
3423 if (G_0286CC_POS_X_FLOAT_ENA(config
->spi_ps_input_addr
))
3424 shader_info
->num_input_vgprs
+= 1;
3425 if (G_0286CC_POS_Y_FLOAT_ENA(config
->spi_ps_input_addr
))
3426 shader_info
->num_input_vgprs
+= 1;
3427 if (G_0286CC_POS_Z_FLOAT_ENA(config
->spi_ps_input_addr
))
3428 shader_info
->num_input_vgprs
+= 1;
3429 if (G_0286CC_POS_W_FLOAT_ENA(config
->spi_ps_input_addr
))
3430 shader_info
->num_input_vgprs
+= 1;
3431 if (G_0286CC_FRONT_FACE_ENA(config
->spi_ps_input_addr
))
3432 shader_info
->num_input_vgprs
+= 1;
3433 if (G_0286CC_ANCILLARY_ENA(config
->spi_ps_input_addr
))
3434 shader_info
->num_input_vgprs
+= 1;
3435 if (G_0286CC_SAMPLE_COVERAGE_ENA(config
->spi_ps_input_addr
))
3436 shader_info
->num_input_vgprs
+= 1;
3437 if (G_0286CC_POS_FIXED_PT_ENA(config
->spi_ps_input_addr
))
3438 shader_info
->num_input_vgprs
+= 1;
3440 config
->num_vgprs
= MAX2(config
->num_vgprs
, shader_info
->num_input_vgprs
);
3442 /* +3 for scratch wave offset and VCC */
3443 config
->num_sgprs
= MAX2(config
->num_sgprs
,
3444 shader_info
->num_input_sgprs
+ 3);
3446 /* Enable 64-bit and 16-bit denormals, because there is no performance
3449 * If denormals are enabled, all floating-point output modifiers are
3452 * Don't enable denormals for 32-bit floats, because:
3453 * - Floating-point output modifiers would be ignored by the hw.
3454 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3455 * have to stop using those.
3456 * - SI & CI would be very slow.
3458 config
->float_mode
|= V_00B028_FP_64_DENORMS
;
3462 ac_fill_shader_info(struct radv_shader_variant_info
*shader_info
, struct nir_shader
*nir
, const struct radv_nir_compiler_options
*options
)
3464 switch (nir
->info
.stage
) {
3465 case MESA_SHADER_COMPUTE
:
3466 for (int i
= 0; i
< 3; ++i
)
3467 shader_info
->cs
.block_size
[i
] = nir
->info
.cs
.local_size
[i
];
3469 case MESA_SHADER_FRAGMENT
:
3470 shader_info
->fs
.early_fragment_test
= nir
->info
.fs
.early_fragment_tests
;
3472 case MESA_SHADER_GEOMETRY
:
3473 shader_info
->gs
.vertices_in
= nir
->info
.gs
.vertices_in
;
3474 shader_info
->gs
.vertices_out
= nir
->info
.gs
.vertices_out
;
3475 shader_info
->gs
.output_prim
= nir
->info
.gs
.output_primitive
;
3476 shader_info
->gs
.invocations
= nir
->info
.gs
.invocations
;
3478 case MESA_SHADER_TESS_EVAL
:
3479 shader_info
->tes
.primitive_mode
= nir
->info
.tess
.primitive_mode
;
3480 shader_info
->tes
.spacing
= nir
->info
.tess
.spacing
;
3481 shader_info
->tes
.ccw
= nir
->info
.tess
.ccw
;
3482 shader_info
->tes
.point_mode
= nir
->info
.tess
.point_mode
;
3483 shader_info
->tes
.as_es
= options
->key
.tes
.as_es
;
3485 case MESA_SHADER_TESS_CTRL
:
3486 shader_info
->tcs
.tcs_vertices_out
= nir
->info
.tess
.tcs_vertices_out
;
3488 case MESA_SHADER_VERTEX
:
3489 shader_info
->vs
.as_es
= options
->key
.vs
.as_es
;
3490 shader_info
->vs
.as_ls
= options
->key
.vs
.as_ls
;
3491 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
3492 if (options
->key
.vs
.as_ls
)
3493 shader_info
->vs
.vgpr_comp_cnt
= MAX2(1, shader_info
->vs
.vgpr_comp_cnt
);
3501 radv_compile_nir_shader(struct ac_llvm_compiler
*ac_llvm
,
3502 struct ac_shader_binary
*binary
,
3503 struct ac_shader_config
*config
,
3504 struct radv_shader_variant_info
*shader_info
,
3505 struct nir_shader
*const *nir
,
3507 const struct radv_nir_compiler_options
*options
)
3510 LLVMModuleRef llvm_module
;
3512 llvm_module
= ac_translate_nir_to_llvm(ac_llvm
, nir
, nir_count
, shader_info
,
3515 ac_compile_llvm_module(ac_llvm
, llvm_module
, binary
, config
, shader_info
,
3516 nir
[0]->info
.stage
, options
);
3518 for (int i
= 0; i
< nir_count
; ++i
)
3519 ac_fill_shader_info(shader_info
, nir
[i
], options
);
3521 /* Determine the ES type (VS or TES) for the GS on GFX9. */
3522 if (options
->chip_class
== GFX9
) {
3523 if (nir_count
== 2 &&
3524 nir
[1]->info
.stage
== MESA_SHADER_GEOMETRY
) {
3525 shader_info
->gs
.es_type
= nir
[0]->info
.stage
;
3531 ac_gs_copy_shader_emit(struct radv_shader_context
*ctx
)
3533 LLVMValueRef vtx_offset
=
3534 LLVMBuildMul(ctx
->ac
.builder
, ctx
->abi
.vertex_id
,
3535 LLVMConstInt(ctx
->ac
.i32
, 4, false), "");
3538 for (unsigned i
= 0; i
< AC_LLVM_MAX_OUTPUTS
; ++i
) {
3542 if (!(ctx
->output_mask
& (1ull << i
)))
3545 if (i
== VARYING_SLOT_CLIP_DIST0
) {
3546 /* unpack clip and cull from a single set of slots */
3547 length
= ctx
->num_output_clips
+ ctx
->num_output_culls
;
3552 for (unsigned j
= 0; j
< length
; j
++) {
3553 LLVMValueRef value
, soffset
;
3555 soffset
= LLVMConstInt(ctx
->ac
.i32
,
3557 ctx
->gs_max_out_vertices
* 16 * 4, false);
3559 value
= ac_build_buffer_load(&ctx
->ac
, ctx
->gsvs_ring
,
3561 vtx_offset
, soffset
,
3562 0, 1, 1, true, false);
3564 LLVMBuildStore(ctx
->ac
.builder
,
3565 ac_to_float(&ctx
->ac
, value
), ctx
->abi
.outputs
[ac_llvm_reg_index_soa(i
, j
)]);
3569 handle_vs_outputs_post(ctx
, false, false, &ctx
->shader_info
->vs
.outinfo
);
3573 radv_compile_gs_copy_shader(struct ac_llvm_compiler
*ac_llvm
,
3574 struct nir_shader
*geom_shader
,
3575 struct ac_shader_binary
*binary
,
3576 struct ac_shader_config
*config
,
3577 struct radv_shader_variant_info
*shader_info
,
3578 const struct radv_nir_compiler_options
*options
)
3580 struct radv_shader_context ctx
= {0};
3581 ctx
.options
= options
;
3582 ctx
.shader_info
= shader_info
;
3584 ac_llvm_context_init(&ctx
.ac
, options
->chip_class
, options
->family
);
3585 ctx
.context
= ctx
.ac
.context
;
3586 ctx
.ac
.module
= ac_create_module(ac_llvm
->tm
, ctx
.context
);
3588 ctx
.is_gs_copy_shader
= true;
3590 enum ac_float_mode float_mode
=
3591 options
->unsafe_math
? AC_FLOAT_MODE_UNSAFE_FP_MATH
:
3592 AC_FLOAT_MODE_DEFAULT
;
3594 ctx
.ac
.builder
= ac_create_builder(ctx
.context
, float_mode
);
3595 ctx
.stage
= MESA_SHADER_VERTEX
;
3597 radv_nir_shader_info_pass(geom_shader
, options
, &shader_info
->info
);
3599 create_function(&ctx
, MESA_SHADER_VERTEX
, false, MESA_SHADER_VERTEX
);
3601 ctx
.gs_max_out_vertices
= geom_shader
->info
.gs
.vertices_out
;
3602 ac_setup_rings(&ctx
);
3604 ctx
.num_output_clips
= geom_shader
->info
.clip_distance_array_size
;
3605 ctx
.num_output_culls
= geom_shader
->info
.cull_distance_array_size
;
3607 nir_foreach_variable(variable
, &geom_shader
->outputs
) {
3608 scan_shader_output_decl(&ctx
, variable
, geom_shader
, MESA_SHADER_VERTEX
);
3609 ac_handle_shader_output_decl(&ctx
.ac
, &ctx
.abi
, geom_shader
,
3610 variable
, MESA_SHADER_VERTEX
);
3613 ac_gs_copy_shader_emit(&ctx
);
3615 LLVMBuildRetVoid(ctx
.ac
.builder
);
3617 ac_llvm_finalize_module(&ctx
, ac_llvm
->passmgr
, options
);
3619 ac_compile_llvm_module(ac_llvm
, ctx
.ac
.module
, binary
, config
, shader_info
,
3620 MESA_SHADER_VERTEX
, options
);