2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "ac_nir_to_llvm.h"
28 #include "si_shader_internal.h"
30 #include "tgsi/tgsi_from_mesa.h"
31 #include "util/u_memory.h"
33 struct si_llvm_diagnostics
{
34 struct pipe_debug_callback
*debug
;
38 static void si_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
40 struct si_llvm_diagnostics
*diag
= (struct si_llvm_diagnostics
*)context
;
41 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
42 const char *severity_str
= NULL
;
46 severity_str
= "error";
49 severity_str
= "warning";
57 char *description
= LLVMGetDiagInfoDescription(di
);
59 pipe_debug_message(diag
->debug
, SHADER_INFO
, "LLVM diagnostic (%s): %s", severity_str
,
62 if (severity
== LLVMDSError
) {
64 fprintf(stderr
, "LLVM triggered Diagnostic Handler: %s\n", description
);
67 LLVMDisposeMessage(description
);
70 bool si_compile_llvm(struct si_screen
*sscreen
, struct si_shader_binary
*binary
,
71 struct ac_shader_config
*conf
, struct ac_llvm_compiler
*compiler
,
72 struct ac_llvm_context
*ac
, struct pipe_debug_callback
*debug
,
73 gl_shader_stage stage
, const char *name
, bool less_optimized
)
75 unsigned count
= p_atomic_inc_return(&sscreen
->num_compilations
);
77 if (si_can_dump_shader(sscreen
, stage
)) {
78 fprintf(stderr
, "radeonsi: Compiling shader %d\n", count
);
80 if (!(sscreen
->debug_flags
& (DBG(NO_IR
) | DBG(PREOPT_IR
)))) {
81 fprintf(stderr
, "%s LLVM IR:\n\n", name
);
82 ac_dump_module(ac
->module
);
83 fprintf(stderr
, "\n");
87 if (sscreen
->record_llvm_ir
) {
88 char *ir
= LLVMPrintModuleToString(ac
->module
);
89 binary
->llvm_ir_string
= strdup(ir
);
90 LLVMDisposeMessage(ir
);
93 if (!si_replace_shader(count
, binary
)) {
94 struct ac_compiler_passes
*passes
= compiler
->passes
;
96 if (ac
->wave_size
== 32)
97 passes
= compiler
->passes_wave32
;
98 else if (less_optimized
&& compiler
->low_opt_passes
)
99 passes
= compiler
->low_opt_passes
;
101 struct si_llvm_diagnostics diag
= {debug
};
102 LLVMContextSetDiagnosticHandler(ac
->context
, si_diagnostic_handler
, &diag
);
104 if (!ac_compile_module_to_elf(passes
, ac
->module
, (char **)&binary
->elf_buffer
,
108 if (diag
.retval
!= 0) {
109 pipe_debug_message(debug
, SHADER_INFO
, "LLVM compilation failed");
114 struct ac_rtld_binary rtld
;
115 if (!ac_rtld_open(&rtld
, (struct ac_rtld_open_info
){
116 .info
= &sscreen
->info
,
117 .shader_type
= stage
,
118 .wave_size
= ac
->wave_size
,
120 .elf_ptrs
= &binary
->elf_buffer
,
121 .elf_sizes
= &binary
->elf_size
}))
124 bool ok
= ac_rtld_read_config(&sscreen
->info
, &rtld
, conf
);
125 ac_rtld_close(&rtld
);
129 void si_llvm_context_init(struct si_shader_context
*ctx
, struct si_screen
*sscreen
,
130 struct ac_llvm_compiler
*compiler
, unsigned wave_size
)
132 memset(ctx
, 0, sizeof(*ctx
));
133 ctx
->screen
= sscreen
;
134 ctx
->compiler
= compiler
;
136 ac_llvm_context_init(&ctx
->ac
, compiler
, sscreen
->info
.chip_class
, sscreen
->info
.family
,
137 AC_FLOAT_MODE_DEFAULT_OPENGL
, wave_size
, 64);
140 void si_llvm_create_func(struct si_shader_context
*ctx
, const char *name
, LLVMTypeRef
*return_types
,
141 unsigned num_return_elems
, unsigned max_workgroup_size
)
143 LLVMTypeRef ret_type
;
144 enum ac_llvm_calling_convention call_conv
;
146 if (num_return_elems
)
147 ret_type
= LLVMStructTypeInContext(ctx
->ac
.context
, return_types
, num_return_elems
, true);
149 ret_type
= ctx
->ac
.voidt
;
151 gl_shader_stage real_stage
= ctx
->stage
;
153 /* LS is merged into HS (TCS), and ES is merged into GS. */
154 if (ctx
->screen
->info
.chip_class
>= GFX9
) {
155 if (ctx
->shader
->key
.as_ls
)
156 real_stage
= MESA_SHADER_TESS_CTRL
;
157 else if (ctx
->shader
->key
.as_es
|| ctx
->shader
->key
.as_ngg
)
158 real_stage
= MESA_SHADER_GEOMETRY
;
161 switch (real_stage
) {
162 case MESA_SHADER_VERTEX
:
163 case MESA_SHADER_TESS_EVAL
:
164 call_conv
= AC_LLVM_AMDGPU_VS
;
166 case MESA_SHADER_TESS_CTRL
:
167 call_conv
= AC_LLVM_AMDGPU_HS
;
169 case MESA_SHADER_GEOMETRY
:
170 call_conv
= AC_LLVM_AMDGPU_GS
;
172 case MESA_SHADER_FRAGMENT
:
173 call_conv
= AC_LLVM_AMDGPU_PS
;
175 case MESA_SHADER_COMPUTE
:
176 call_conv
= AC_LLVM_AMDGPU_CS
;
179 unreachable("Unhandle shader type");
182 /* Setup the function */
183 ctx
->return_type
= ret_type
;
184 ctx
->main_fn
= ac_build_main(&ctx
->args
, &ctx
->ac
, call_conv
, name
, ret_type
, ctx
->ac
.module
);
185 ctx
->return_value
= LLVMGetUndef(ctx
->return_type
);
187 if (ctx
->screen
->info
.address32_hi
) {
188 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-32bit-address-high-bits",
189 ctx
->screen
->info
.address32_hi
);
192 ac_llvm_set_workgroup_size(ctx
->main_fn
, max_workgroup_size
);
195 void si_llvm_optimize_module(struct si_shader_context
*ctx
)
197 /* Dump LLVM IR before any optimization passes */
198 if (ctx
->screen
->debug_flags
& DBG(PREOPT_IR
) && si_can_dump_shader(ctx
->screen
, ctx
->stage
))
199 LLVMDumpModule(ctx
->ac
.module
);
202 LLVMRunPassManager(ctx
->compiler
->passmgr
, ctx
->ac
.module
);
203 LLVMDisposeBuilder(ctx
->ac
.builder
);
206 void si_llvm_dispose(struct si_shader_context
*ctx
)
208 LLVMDisposeModule(ctx
->ac
.module
);
209 LLVMContextDispose(ctx
->ac
.context
);
210 ac_llvm_context_dispose(&ctx
->ac
);
214 * Load a dword from a constant buffer.
216 LLVMValueRef
si_buffer_load_const(struct si_shader_context
*ctx
, LLVMValueRef resource
,
219 return ac_build_buffer_load(&ctx
->ac
, resource
, 1, NULL
, offset
, NULL
, 0, 0, true, true);
222 void si_llvm_build_ret(struct si_shader_context
*ctx
, LLVMValueRef ret
)
224 if (LLVMGetTypeKind(LLVMTypeOf(ret
)) == LLVMVoidTypeKind
)
225 LLVMBuildRetVoid(ctx
->ac
.builder
);
227 LLVMBuildRet(ctx
->ac
.builder
, ret
);
230 LLVMValueRef
si_insert_input_ret(struct si_shader_context
*ctx
, LLVMValueRef ret
,
231 struct ac_arg param
, unsigned return_index
)
233 return LLVMBuildInsertValue(ctx
->ac
.builder
, ret
, ac_get_arg(&ctx
->ac
, param
), return_index
, "");
236 LLVMValueRef
si_insert_input_ret_float(struct si_shader_context
*ctx
, LLVMValueRef ret
,
237 struct ac_arg param
, unsigned return_index
)
239 LLVMBuilderRef builder
= ctx
->ac
.builder
;
240 LLVMValueRef p
= ac_get_arg(&ctx
->ac
, param
);
242 return LLVMBuildInsertValue(builder
, ret
, ac_to_float(&ctx
->ac
, p
), return_index
, "");
245 LLVMValueRef
si_insert_input_ptr(struct si_shader_context
*ctx
, LLVMValueRef ret
,
246 struct ac_arg param
, unsigned return_index
)
248 LLVMBuilderRef builder
= ctx
->ac
.builder
;
249 LLVMValueRef ptr
= ac_get_arg(&ctx
->ac
, param
);
250 ptr
= LLVMBuildPtrToInt(builder
, ptr
, ctx
->ac
.i32
, "");
251 return LLVMBuildInsertValue(builder
, ret
, ptr
, return_index
, "");
254 LLVMValueRef
si_prolog_get_rw_buffers(struct si_shader_context
*ctx
)
256 LLVMValueRef ptr
[2], list
;
257 bool merged_shader
= si_is_merged_shader(ctx
->shader
);
259 ptr
[0] = LLVMGetParam(ctx
->main_fn
, (merged_shader
? 8 : 0) + SI_SGPR_RW_BUFFERS
);
261 LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
[0], ac_array_in_const32_addr_space(ctx
->ac
.v4i32
), "");
265 LLVMValueRef
si_build_gather_64bit(struct si_shader_context
*ctx
, LLVMTypeRef type
,
266 LLVMValueRef val1
, LLVMValueRef val2
)
268 LLVMValueRef values
[2] = {
269 ac_to_integer(&ctx
->ac
, val1
),
270 ac_to_integer(&ctx
->ac
, val2
),
272 LLVMValueRef result
= ac_build_gather_values(&ctx
->ac
, values
, 2);
273 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, type
, "");
276 void si_llvm_emit_barrier(struct si_shader_context
*ctx
)
278 /* GFX6 only (thanks to a hw bug workaround):
279 * The real barrier instruction isn’t needed, because an entire patch
280 * always fits into a single wave.
282 if (ctx
->screen
->info
.chip_class
== GFX6
&& ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
283 ac_build_waitcnt(&ctx
->ac
, AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
);
287 ac_build_s_barrier(&ctx
->ac
);
290 /* Ensure that the esgs ring is declared.
292 * We declare it with 64KB alignment as a hint that the
293 * pointer value will always be 0.
295 void si_llvm_declare_esgs_ring(struct si_shader_context
*ctx
)
300 assert(!LLVMGetNamedGlobal(ctx
->ac
.module
, "esgs_ring"));
302 ctx
->esgs_ring
= LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, LLVMArrayType(ctx
->ac
.i32
, 0),
303 "esgs_ring", AC_ADDR_SPACE_LDS
);
304 LLVMSetLinkage(ctx
->esgs_ring
, LLVMExternalLinkage
);
305 LLVMSetAlignment(ctx
->esgs_ring
, 64 * 1024);
308 void si_init_exec_from_input(struct si_shader_context
*ctx
, struct ac_arg param
, unsigned bitoffset
)
310 LLVMValueRef args
[] = {
311 ac_get_arg(&ctx
->ac
, param
),
312 LLVMConstInt(ctx
->ac
.i32
, bitoffset
, 0),
314 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.init.exec.from.input", ctx
->ac
.voidt
, args
, 2,
315 AC_FUNC_ATTR_CONVERGENT
);
319 * Get the value of a shader input parameter and extract a bitfield.
321 static LLVMValueRef
unpack_llvm_param(struct si_shader_context
*ctx
, LLVMValueRef value
,
322 unsigned rshift
, unsigned bitwidth
)
324 if (LLVMGetTypeKind(LLVMTypeOf(value
)) == LLVMFloatTypeKind
)
325 value
= ac_to_integer(&ctx
->ac
, value
);
328 value
= LLVMBuildLShr(ctx
->ac
.builder
, value
, LLVMConstInt(ctx
->ac
.i32
, rshift
, 0), "");
330 if (rshift
+ bitwidth
< 32) {
331 unsigned mask
= (1 << bitwidth
) - 1;
332 value
= LLVMBuildAnd(ctx
->ac
.builder
, value
, LLVMConstInt(ctx
->ac
.i32
, mask
, 0), "");
338 LLVMValueRef
si_unpack_param(struct si_shader_context
*ctx
, struct ac_arg param
, unsigned rshift
,
341 LLVMValueRef value
= ac_get_arg(&ctx
->ac
, param
);
343 return unpack_llvm_param(ctx
, value
, rshift
, bitwidth
);
346 LLVMValueRef
si_get_primitive_id(struct si_shader_context
*ctx
, unsigned swizzle
)
349 return ctx
->ac
.i32_0
;
351 switch (ctx
->stage
) {
352 case MESA_SHADER_VERTEX
:
353 return ac_get_arg(&ctx
->ac
, ctx
->vs_prim_id
);
354 case MESA_SHADER_TESS_CTRL
:
355 return ac_get_arg(&ctx
->ac
, ctx
->args
.tcs_patch_id
);
356 case MESA_SHADER_TESS_EVAL
:
357 return ac_get_arg(&ctx
->ac
, ctx
->args
.tes_patch_id
);
358 case MESA_SHADER_GEOMETRY
:
359 return ac_get_arg(&ctx
->ac
, ctx
->args
.gs_prim_id
);
362 return ctx
->ac
.i32_0
;
366 LLVMValueRef
si_llvm_get_block_size(struct ac_shader_abi
*abi
)
368 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
370 LLVMValueRef values
[3];
373 unsigned *properties
= ctx
->shader
->selector
->info
.properties
;
375 if (properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
] != 0) {
376 unsigned sizes
[3] = {properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH
],
377 properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT
],
378 properties
[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH
]};
380 for (i
= 0; i
< 3; ++i
)
381 values
[i
] = LLVMConstInt(ctx
->ac
.i32
, sizes
[i
], 0);
383 result
= ac_build_gather_values(&ctx
->ac
, values
, 3);
385 result
= ac_get_arg(&ctx
->ac
, ctx
->block_size
);
391 void si_llvm_declare_compute_memory(struct si_shader_context
*ctx
)
393 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
394 unsigned lds_size
= sel
->info
.properties
[TGSI_PROPERTY_CS_LOCAL_SIZE
];
396 LLVMTypeRef i8p
= LLVMPointerType(ctx
->ac
.i8
, AC_ADDR_SPACE_LDS
);
399 assert(!ctx
->ac
.lds
);
401 var
= LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, LLVMArrayType(ctx
->ac
.i8
, lds_size
),
402 "compute_lds", AC_ADDR_SPACE_LDS
);
403 LLVMSetAlignment(var
, 64 * 1024);
405 ctx
->ac
.lds
= LLVMBuildBitCast(ctx
->ac
.builder
, var
, i8p
, "");
408 bool si_nir_build_llvm(struct si_shader_context
*ctx
, struct nir_shader
*nir
)
410 if (nir
->info
.stage
== MESA_SHADER_VERTEX
) {
411 si_llvm_load_vs_inputs(ctx
, nir
);
412 } else if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
) {
413 unsigned colors_read
= ctx
->shader
->selector
->info
.colors_read
;
414 LLVMValueRef main_fn
= ctx
->main_fn
;
416 LLVMValueRef undef
= LLVMGetUndef(ctx
->ac
.f32
);
418 unsigned offset
= SI_PARAM_POS_FIXED_PT
+ 1;
420 if (colors_read
& 0x0f) {
421 unsigned mask
= colors_read
& 0x0f;
422 LLVMValueRef values
[4];
423 values
[0] = mask
& 0x1 ? LLVMGetParam(main_fn
, offset
++) : undef
;
424 values
[1] = mask
& 0x2 ? LLVMGetParam(main_fn
, offset
++) : undef
;
425 values
[2] = mask
& 0x4 ? LLVMGetParam(main_fn
, offset
++) : undef
;
426 values
[3] = mask
& 0x8 ? LLVMGetParam(main_fn
, offset
++) : undef
;
427 ctx
->abi
.color0
= ac_to_integer(&ctx
->ac
, ac_build_gather_values(&ctx
->ac
, values
, 4));
429 if (colors_read
& 0xf0) {
430 unsigned mask
= (colors_read
& 0xf0) >> 4;
431 LLVMValueRef values
[4];
432 values
[0] = mask
& 0x1 ? LLVMGetParam(main_fn
, offset
++) : undef
;
433 values
[1] = mask
& 0x2 ? LLVMGetParam(main_fn
, offset
++) : undef
;
434 values
[2] = mask
& 0x4 ? LLVMGetParam(main_fn
, offset
++) : undef
;
435 values
[3] = mask
& 0x8 ? LLVMGetParam(main_fn
, offset
++) : undef
;
436 ctx
->abi
.color1
= ac_to_integer(&ctx
->ac
, ac_build_gather_values(&ctx
->ac
, values
, 4));
439 ctx
->abi
.interp_at_sample_force_center
=
440 ctx
->shader
->key
.mono
.u
.ps
.interpolate_at_sample_force_center
;
442 ctx
->abi
.kill_ps_if_inf_interp
=
443 (ctx
->screen
->debug_flags
& DBG(KILL_PS_INF_INTERP
)) &&
444 (ctx
->shader
->selector
->info
.uses_persp_center
||
445 ctx
->shader
->selector
->info
.uses_persp_centroid
||
446 ctx
->shader
->selector
->info
.uses_persp_sample
);
448 } else if (nir
->info
.stage
== MESA_SHADER_COMPUTE
) {
449 if (nir
->info
.cs
.user_data_components_amd
) {
450 ctx
->abi
.user_data
= ac_get_arg(&ctx
->ac
, ctx
->cs_user_data
);
451 ctx
->abi
.user_data
= ac_build_expand_to_vec4(&ctx
->ac
, ctx
->abi
.user_data
,
452 nir
->info
.cs
.user_data_components_amd
);
456 ctx
->abi
.inputs
= &ctx
->inputs
[0];
457 ctx
->abi
.clamp_shadow_reference
= true;
458 ctx
->abi
.robust_buffer_access
= true;
459 ctx
->abi
.convert_undef_to_zero
= true;
460 ctx
->abi
.clamp_div_by_zero
= ctx
->screen
->options
.clamp_div_by_zero
;
462 if (ctx
->shader
->selector
->info
.properties
[TGSI_PROPERTY_CS_LOCAL_SIZE
]) {
463 assert(gl_shader_stage_is_compute(nir
->info
.stage
));
464 si_llvm_declare_compute_memory(ctx
);
467 const struct si_shader_info
*info
= &ctx
->shader
->selector
->info
;
468 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
469 for (unsigned j
= 0; j
< 4; j
++)
470 ctx
->abi
.outputs
[i
* 4 + j
] = ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "");
473 ac_nir_translate(&ctx
->ac
, &ctx
->abi
, &ctx
->args
, nir
);
479 * Given a list of shader part functions, build a wrapper function that
480 * runs them in sequence to form a monolithic shader.
482 void si_build_wrapper_function(struct si_shader_context
*ctx
, LLVMValueRef
*parts
,
483 unsigned num_parts
, unsigned main_part
,
484 unsigned next_shader_first_part
)
486 LLVMBuilderRef builder
= ctx
->ac
.builder
;
487 /* PS epilog has one arg per color component; gfx9 merged shader
488 * prologs need to forward 40 SGPRs.
490 LLVMValueRef initial
[AC_MAX_ARGS
], out
[AC_MAX_ARGS
];
491 LLVMTypeRef function_type
;
492 unsigned num_first_params
;
493 unsigned num_out
, initial_num_out
;
494 ASSERTED
unsigned num_out_sgpr
; /* used in debug checks */
495 ASSERTED
unsigned initial_num_out_sgpr
; /* used in debug checks */
496 unsigned num_sgprs
, num_vgprs
;
499 memset(&ctx
->args
, 0, sizeof(ctx
->args
));
501 for (unsigned i
= 0; i
< num_parts
; ++i
) {
502 ac_add_function_attr(ctx
->ac
.context
, parts
[i
], -1, AC_FUNC_ATTR_ALWAYSINLINE
);
503 LLVMSetLinkage(parts
[i
], LLVMPrivateLinkage
);
506 /* The parameters of the wrapper function correspond to those of the
507 * first part in terms of SGPRs and VGPRs, but we use the types of the
508 * main part to get the right types. This is relevant for the
509 * dereferenceable attribute on descriptor table pointers.
514 function_type
= LLVMGetElementType(LLVMTypeOf(parts
[0]));
515 num_first_params
= LLVMCountParamTypes(function_type
);
517 for (unsigned i
= 0; i
< num_first_params
; ++i
) {
518 LLVMValueRef param
= LLVMGetParam(parts
[0], i
);
520 if (ac_is_sgpr_param(param
)) {
521 assert(num_vgprs
== 0);
522 num_sgprs
+= ac_get_type_size(LLVMTypeOf(param
)) / 4;
524 num_vgprs
+= ac_get_type_size(LLVMTypeOf(param
)) / 4;
529 while (gprs
< num_sgprs
+ num_vgprs
) {
530 LLVMValueRef param
= LLVMGetParam(parts
[main_part
], ctx
->args
.arg_count
);
531 LLVMTypeRef type
= LLVMTypeOf(param
);
532 unsigned size
= ac_get_type_size(type
) / 4;
534 /* This is going to get casted anyways, so we don't have to
535 * have the exact same type. But we do have to preserve the
536 * pointer-ness so that LLVM knows about it.
538 enum ac_arg_type arg_type
= AC_ARG_INT
;
539 if (LLVMGetTypeKind(type
) == LLVMPointerTypeKind
) {
540 type
= LLVMGetElementType(type
);
542 if (LLVMGetTypeKind(type
) == LLVMVectorTypeKind
) {
543 if (LLVMGetVectorSize(type
) == 4)
544 arg_type
= AC_ARG_CONST_DESC_PTR
;
545 else if (LLVMGetVectorSize(type
) == 8)
546 arg_type
= AC_ARG_CONST_IMAGE_PTR
;
549 } else if (type
== ctx
->ac
.f32
) {
550 arg_type
= AC_ARG_CONST_FLOAT_PTR
;
556 ac_add_arg(&ctx
->args
, gprs
< num_sgprs
? AC_ARG_SGPR
: AC_ARG_VGPR
, size
, arg_type
, NULL
);
558 assert(ac_is_sgpr_param(param
) == (gprs
< num_sgprs
));
559 assert(gprs
+ size
<= num_sgprs
+ num_vgprs
&&
560 (gprs
>= num_sgprs
|| gprs
+ size
<= num_sgprs
));
565 /* Prepare the return type. */
566 unsigned num_returns
= 0;
567 LLVMTypeRef returns
[AC_MAX_ARGS
], last_func_type
, return_type
;
569 last_func_type
= LLVMGetElementType(LLVMTypeOf(parts
[num_parts
- 1]));
570 return_type
= LLVMGetReturnType(last_func_type
);
572 switch (LLVMGetTypeKind(return_type
)) {
573 case LLVMStructTypeKind
:
574 num_returns
= LLVMCountStructElementTypes(return_type
);
575 assert(num_returns
<= ARRAY_SIZE(returns
));
576 LLVMGetStructElementTypes(return_type
, returns
);
578 case LLVMVoidTypeKind
:
581 unreachable("unexpected type");
584 si_llvm_create_func(ctx
, "wrapper", returns
, num_returns
,
585 si_get_max_workgroup_size(ctx
->shader
));
587 if (si_is_merged_shader(ctx
->shader
))
588 ac_init_exec_full_mask(&ctx
->ac
);
590 /* Record the arguments of the function as if they were an output of
596 for (unsigned i
= 0; i
< ctx
->args
.arg_count
; ++i
) {
597 LLVMValueRef param
= LLVMGetParam(ctx
->main_fn
, i
);
598 LLVMTypeRef param_type
= LLVMTypeOf(param
);
599 LLVMTypeRef out_type
= ctx
->args
.args
[i
].file
== AC_ARG_SGPR
? ctx
->ac
.i32
: ctx
->ac
.f32
;
600 unsigned size
= ac_get_type_size(param_type
) / 4;
603 if (LLVMGetTypeKind(param_type
) == LLVMPointerTypeKind
) {
604 param
= LLVMBuildPtrToInt(builder
, param
, ctx
->ac
.i32
, "");
605 param_type
= ctx
->ac
.i32
;
608 if (param_type
!= out_type
)
609 param
= LLVMBuildBitCast(builder
, param
, out_type
, "");
610 out
[num_out
++] = param
;
612 LLVMTypeRef vector_type
= LLVMVectorType(out_type
, size
);
614 if (LLVMGetTypeKind(param_type
) == LLVMPointerTypeKind
) {
615 param
= LLVMBuildPtrToInt(builder
, param
, ctx
->ac
.i64
, "");
616 param_type
= ctx
->ac
.i64
;
619 if (param_type
!= vector_type
)
620 param
= LLVMBuildBitCast(builder
, param
, vector_type
, "");
622 for (unsigned j
= 0; j
< size
; ++j
)
624 LLVMBuildExtractElement(builder
, param
, LLVMConstInt(ctx
->ac
.i32
, j
, 0), "");
627 if (ctx
->args
.args
[i
].file
== AC_ARG_SGPR
)
628 num_out_sgpr
= num_out
;
631 memcpy(initial
, out
, sizeof(out
));
632 initial_num_out
= num_out
;
633 initial_num_out_sgpr
= num_out_sgpr
;
635 /* Now chain the parts. */
636 LLVMValueRef ret
= NULL
;
637 for (unsigned part
= 0; part
< num_parts
; ++part
) {
638 LLVMValueRef in
[AC_MAX_ARGS
];
639 LLVMTypeRef ret_type
;
640 unsigned out_idx
= 0;
641 unsigned num_params
= LLVMCountParams(parts
[part
]);
643 /* Merged shaders are executed conditionally depending
644 * on the number of enabled threads passed in the input SGPRs. */
645 if (si_is_multi_part_shader(ctx
->shader
) && part
== 0) {
646 LLVMValueRef ena
, count
= initial
[3];
648 count
= LLVMBuildAnd(builder
, count
, LLVMConstInt(ctx
->ac
.i32
, 0x7f, 0), "");
649 ena
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), count
, "");
650 ac_build_ifcc(&ctx
->ac
, ena
, 6506);
653 /* Derive arguments for the next part from outputs of the
656 for (unsigned param_idx
= 0; param_idx
< num_params
; ++param_idx
) {
658 LLVMTypeRef param_type
;
661 LLVMValueRef arg
= NULL
;
663 param
= LLVMGetParam(parts
[part
], param_idx
);
664 param_type
= LLVMTypeOf(param
);
665 param_size
= ac_get_type_size(param_type
) / 4;
666 is_sgpr
= ac_is_sgpr_param(param
);
669 ac_add_function_attr(ctx
->ac
.context
, parts
[part
], param_idx
+ 1, AC_FUNC_ATTR_INREG
);
670 } else if (out_idx
< num_out_sgpr
) {
671 /* Skip returned SGPRs the current part doesn't
672 * declare on the input. */
673 out_idx
= num_out_sgpr
;
676 assert(out_idx
+ param_size
<= (is_sgpr
? num_out_sgpr
: num_out
));
681 arg
= ac_build_gather_values(&ctx
->ac
, &out
[out_idx
], param_size
);
683 if (LLVMTypeOf(arg
) != param_type
) {
684 if (LLVMGetTypeKind(param_type
) == LLVMPointerTypeKind
) {
685 if (LLVMGetPointerAddressSpace(param_type
) == AC_ADDR_SPACE_CONST_32BIT
) {
686 arg
= LLVMBuildBitCast(builder
, arg
, ctx
->ac
.i32
, "");
687 arg
= LLVMBuildIntToPtr(builder
, arg
, param_type
, "");
689 arg
= LLVMBuildBitCast(builder
, arg
, ctx
->ac
.i64
, "");
690 arg
= LLVMBuildIntToPtr(builder
, arg
, param_type
, "");
693 arg
= LLVMBuildBitCast(builder
, arg
, param_type
, "");
698 out_idx
+= param_size
;
701 ret
= ac_build_call(&ctx
->ac
, parts
[part
], in
, num_params
);
703 if (si_is_multi_part_shader(ctx
->shader
) && part
+ 1 == next_shader_first_part
) {
704 ac_build_endif(&ctx
->ac
, 6506);
706 /* The second half of the merged shader should use
707 * the inputs from the toplevel (wrapper) function,
708 * not the return value from the last call.
710 * That's because the last call was executed condi-
711 * tionally, so we can't consume it in the main
714 memcpy(out
, initial
, sizeof(initial
));
715 num_out
= initial_num_out
;
716 num_out_sgpr
= initial_num_out_sgpr
;
720 /* Extract the returned GPRs. */
721 ret_type
= LLVMTypeOf(ret
);
725 if (LLVMGetTypeKind(ret_type
) != LLVMVoidTypeKind
) {
726 assert(LLVMGetTypeKind(ret_type
) == LLVMStructTypeKind
);
728 unsigned ret_size
= LLVMCountStructElementTypes(ret_type
);
730 for (unsigned i
= 0; i
< ret_size
; ++i
) {
731 LLVMValueRef val
= LLVMBuildExtractValue(builder
, ret
, i
, "");
733 assert(num_out
< ARRAY_SIZE(out
));
734 out
[num_out
++] = val
;
736 if (LLVMTypeOf(val
) == ctx
->ac
.i32
) {
737 assert(num_out_sgpr
+ 1 == num_out
);
738 num_out_sgpr
= num_out
;
744 /* Return the value from the last part. */
745 if (LLVMGetTypeKind(LLVMTypeOf(ret
)) == LLVMVoidTypeKind
)
746 LLVMBuildRetVoid(builder
);
748 LLVMBuildRet(builder
, ret
);