2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "si_shader_internal.h"
26 #include "radeon/radeon_elf_util.h"
28 #include "gallivm/lp_bld_const.h"
29 #include "gallivm/lp_bld_gather.h"
30 #include "gallivm/lp_bld_flow.h"
31 #include "gallivm/lp_bld_init.h"
32 #include "gallivm/lp_bld_intr.h"
33 #include "gallivm/lp_bld_misc.h"
34 #include "gallivm/lp_bld_swizzle.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "util/u_math.h"
38 #include "util/u_memory.h"
39 #include "util/u_debug.h"
42 #include <llvm-c/Transforms/IPO.h>
43 #include <llvm-c/Transforms/Scalar.h>
45 /* Data for if/else/endif and bgnloop/endloop control flow structures.
48 /* Loop exit or next part of if/else/endif. */
49 LLVMBasicBlockRef next_block
;
50 LLVMBasicBlockRef loop_entry_block
;
53 #define CPU_STRING_LEN 30
54 #define FS_STRING_LEN 30
55 #define TRIPLE_STRING_LEN 7
58 * Shader types for the LLVM backend.
60 enum si_llvm_shader_type
{
61 RADEON_LLVM_SHADER_PS
= 0,
62 RADEON_LLVM_SHADER_VS
= 1,
63 RADEON_LLVM_SHADER_GS
= 2,
64 RADEON_LLVM_SHADER_CS
= 3,
67 enum si_llvm_calling_convention
{
68 RADEON_LLVM_AMDGPU_VS
= 87,
69 RADEON_LLVM_AMDGPU_GS
= 88,
70 RADEON_LLVM_AMDGPU_PS
= 89,
71 RADEON_LLVM_AMDGPU_CS
= 90,
74 void si_llvm_add_attribute(LLVMValueRef F
, const char *name
, int value
)
78 snprintf(str
, sizeof(str
), "%i", value
);
79 LLVMAddTargetDependentFunctionAttr(F
, name
, str
);
83 * Set the shader type we want to compile
85 * @param type shader type to set
87 void si_llvm_shader_type(LLVMValueRef F
, unsigned type
)
89 enum si_llvm_shader_type llvm_type
;
90 enum si_llvm_calling_convention calling_conv
;
93 case PIPE_SHADER_VERTEX
:
94 case PIPE_SHADER_TESS_CTRL
:
95 case PIPE_SHADER_TESS_EVAL
:
96 llvm_type
= RADEON_LLVM_SHADER_VS
;
97 calling_conv
= RADEON_LLVM_AMDGPU_VS
;
99 case PIPE_SHADER_GEOMETRY
:
100 llvm_type
= RADEON_LLVM_SHADER_GS
;
101 calling_conv
= RADEON_LLVM_AMDGPU_GS
;
103 case PIPE_SHADER_FRAGMENT
:
104 llvm_type
= RADEON_LLVM_SHADER_PS
;
105 calling_conv
= RADEON_LLVM_AMDGPU_PS
;
107 case PIPE_SHADER_COMPUTE
:
108 llvm_type
= RADEON_LLVM_SHADER_CS
;
109 calling_conv
= RADEON_LLVM_AMDGPU_CS
;
112 unreachable("Unhandle shader type");
115 if (HAVE_LLVM
>= 0x309)
116 LLVMSetFunctionCallConv(F
, calling_conv
);
118 si_llvm_add_attribute(F
, "ShaderType", llvm_type
);
121 static void init_amdgpu_target()
123 gallivm_init_llvm_targets();
124 #if HAVE_LLVM < 0x0307
125 LLVMInitializeR600TargetInfo();
126 LLVMInitializeR600Target();
127 LLVMInitializeR600TargetMC();
128 LLVMInitializeR600AsmPrinter();
130 LLVMInitializeAMDGPUTargetInfo();
131 LLVMInitializeAMDGPUTarget();
132 LLVMInitializeAMDGPUTargetMC();
133 LLVMInitializeAMDGPUAsmPrinter();
138 static once_flag init_amdgpu_target_once_flag
= ONCE_FLAG_INIT
;
140 LLVMTargetRef
si_llvm_get_amdgpu_target(const char *triple
)
142 LLVMTargetRef target
= NULL
;
143 char *err_message
= NULL
;
145 call_once(&init_amdgpu_target_once_flag
, init_amdgpu_target
);
147 if (LLVMGetTargetFromTriple(triple
, &target
, &err_message
)) {
148 fprintf(stderr
, "Cannot find target for triple %s ", triple
);
150 fprintf(stderr
, "%s\n", err_message
);
152 LLVMDisposeMessage(err_message
);
158 struct si_llvm_diagnostics
{
159 struct pipe_debug_callback
*debug
;
163 static void si_diagnostic_handler(LLVMDiagnosticInfoRef di
, void *context
)
165 struct si_llvm_diagnostics
*diag
= (struct si_llvm_diagnostics
*)context
;
166 LLVMDiagnosticSeverity severity
= LLVMGetDiagInfoSeverity(di
);
167 char *description
= LLVMGetDiagInfoDescription(di
);
168 const char *severity_str
= NULL
;
172 severity_str
= "error";
175 severity_str
= "warning";
178 severity_str
= "remark";
181 severity_str
= "note";
184 severity_str
= "unknown";
187 pipe_debug_message(diag
->debug
, SHADER_INFO
,
188 "LLVM diagnostic (%s): %s", severity_str
, description
);
190 if (severity
== LLVMDSError
) {
192 fprintf(stderr
,"LLVM triggered Diagnostic Handler: %s\n", description
);
195 LLVMDisposeMessage(description
);
199 * Compile an LLVM module to machine code.
201 * @returns 0 for success, 1 for failure
203 unsigned si_llvm_compile(LLVMModuleRef M
, struct radeon_shader_binary
*binary
,
204 LLVMTargetMachineRef tm
,
205 struct pipe_debug_callback
*debug
)
207 struct si_llvm_diagnostics diag
;
209 LLVMContextRef llvm_ctx
;
210 LLVMMemoryBufferRef out_buffer
;
211 unsigned buffer_size
;
212 const char *buffer_data
;
218 /* Setup Diagnostic Handler*/
219 llvm_ctx
= LLVMGetModuleContext(M
);
221 LLVMContextSetDiagnosticHandler(llvm_ctx
, si_diagnostic_handler
, &diag
);
224 mem_err
= LLVMTargetMachineEmitToMemoryBuffer(tm
, M
, LLVMObjectFile
, &err
,
227 /* Process Errors/Warnings */
229 fprintf(stderr
, "%s: %s", __FUNCTION__
, err
);
230 pipe_debug_message(debug
, SHADER_INFO
,
231 "LLVM emit error: %s", err
);
237 /* Extract Shader Code*/
238 buffer_size
= LLVMGetBufferSize(out_buffer
);
239 buffer_data
= LLVMGetBufferStart(out_buffer
);
241 radeon_elf_read(buffer_data
, buffer_size
, binary
);
244 LLVMDisposeMemoryBuffer(out_buffer
);
247 if (diag
.retval
!= 0)
248 pipe_debug_message(debug
, SHADER_INFO
, "LLVM compile failed");
252 LLVMTypeRef
tgsi2llvmtype(struct lp_build_tgsi_context
*bld_base
,
253 enum tgsi_opcode_type type
)
255 LLVMContextRef ctx
= bld_base
->base
.gallivm
->context
;
258 case TGSI_TYPE_UNSIGNED
:
259 case TGSI_TYPE_SIGNED
:
260 return LLVMInt32TypeInContext(ctx
);
261 case TGSI_TYPE_UNSIGNED64
:
262 case TGSI_TYPE_SIGNED64
:
263 return LLVMInt64TypeInContext(ctx
);
264 case TGSI_TYPE_DOUBLE
:
265 return LLVMDoubleTypeInContext(ctx
);
266 case TGSI_TYPE_UNTYPED
:
267 case TGSI_TYPE_FLOAT
:
268 return LLVMFloatTypeInContext(ctx
);
274 LLVMValueRef
bitcast(struct lp_build_tgsi_context
*bld_base
,
275 enum tgsi_opcode_type type
, LLVMValueRef value
)
277 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
278 LLVMTypeRef dst_type
= tgsi2llvmtype(bld_base
, type
);
281 return LLVMBuildBitCast(builder
, value
, dst_type
, "");
287 * Return a value that is equal to the given i32 \p index if it lies in [0,num)
288 * or an undefined value in the same interval otherwise.
290 LLVMValueRef
si_llvm_bound_index(struct si_shader_context
*ctx
,
294 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
295 LLVMBuilderRef builder
= gallivm
->builder
;
296 LLVMValueRef c_max
= lp_build_const_int32(gallivm
, num
- 1);
299 if (util_is_power_of_two(num
)) {
300 index
= LLVMBuildAnd(builder
, index
, c_max
, "");
302 /* In theory, this MAX pattern should result in code that is
303 * as good as the bit-wise AND above.
305 * In practice, LLVM generates worse code (at the time of
306 * writing), because its value tracking is not strong enough.
308 cc
= LLVMBuildICmp(builder
, LLVMIntULE
, index
, c_max
, "");
309 index
= LLVMBuildSelect(builder
, cc
, index
, c_max
, "");
315 static struct si_llvm_flow
*
316 get_current_flow(struct si_shader_context
*ctx
)
318 if (ctx
->flow_depth
> 0)
319 return &ctx
->flow
[ctx
->flow_depth
- 1];
323 static struct si_llvm_flow
*
324 get_innermost_loop(struct si_shader_context
*ctx
)
326 for (unsigned i
= ctx
->flow_depth
; i
> 0; --i
) {
327 if (ctx
->flow
[i
- 1].loop_entry_block
)
328 return &ctx
->flow
[i
- 1];
333 static struct si_llvm_flow
*
334 push_flow(struct si_shader_context
*ctx
)
336 struct si_llvm_flow
*flow
;
338 if (ctx
->flow_depth
>= ctx
->flow_depth_max
) {
339 unsigned new_max
= MAX2(ctx
->flow_depth
<< 1, RADEON_LLVM_INITIAL_CF_DEPTH
);
340 ctx
->flow
= REALLOC(ctx
->flow
,
341 ctx
->flow_depth_max
* sizeof(*ctx
->flow
),
342 new_max
* sizeof(*ctx
->flow
));
343 ctx
->flow_depth_max
= new_max
;
346 flow
= &ctx
->flow
[ctx
->flow_depth
];
349 flow
->next_block
= NULL
;
350 flow
->loop_entry_block
= NULL
;
354 static LLVMValueRef
emit_swizzle(struct lp_build_tgsi_context
*bld_base
,
361 LLVMValueRef swizzles
[4];
363 LLVMInt32TypeInContext(bld_base
->base
.gallivm
->context
);
365 swizzles
[0] = LLVMConstInt(i32t
, swizzle_x
, 0);
366 swizzles
[1] = LLVMConstInt(i32t
, swizzle_y
, 0);
367 swizzles
[2] = LLVMConstInt(i32t
, swizzle_z
, 0);
368 swizzles
[3] = LLVMConstInt(i32t
, swizzle_w
, 0);
370 return LLVMBuildShuffleVector(bld_base
->base
.gallivm
->builder
,
372 LLVMGetUndef(LLVMTypeOf(value
)),
373 LLVMConstVector(swizzles
, 4), "");
377 * Return the description of the array covering the given temporary register
381 get_temp_array_id(struct lp_build_tgsi_context
*bld_base
,
383 const struct tgsi_ind_register
*reg
)
385 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
386 unsigned num_arrays
= ctx
->soa
.bld_base
.info
->array_max
[TGSI_FILE_TEMPORARY
];
389 if (reg
&& reg
->ArrayID
> 0 && reg
->ArrayID
<= num_arrays
)
392 for (i
= 0; i
< num_arrays
; i
++) {
393 const struct tgsi_array_info
*array
= &ctx
->temp_arrays
[i
];
395 if (reg_index
>= array
->range
.First
&& reg_index
<= array
->range
.Last
)
402 static struct tgsi_declaration_range
403 get_array_range(struct lp_build_tgsi_context
*bld_base
,
404 unsigned File
, unsigned reg_index
,
405 const struct tgsi_ind_register
*reg
)
407 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
408 struct tgsi_declaration_range range
;
410 if (File
== TGSI_FILE_TEMPORARY
) {
411 unsigned array_id
= get_temp_array_id(bld_base
, reg_index
, reg
);
413 return ctx
->temp_arrays
[array_id
- 1].range
;
417 range
.Last
= bld_base
->info
->file_max
[File
];
422 emit_array_index(struct lp_build_tgsi_soa_context
*bld
,
423 const struct tgsi_ind_register
*reg
,
426 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
429 return lp_build_const_int32(gallivm
, offset
);
431 LLVMValueRef addr
= LLVMBuildLoad(gallivm
->builder
, bld
->addr
[reg
->Index
][reg
->Swizzle
], "");
432 return LLVMBuildAdd(gallivm
->builder
, addr
, lp_build_const_int32(gallivm
, offset
), "");
436 * For indirect registers, construct a pointer directly to the requested
437 * element using getelementptr if possible.
439 * Returns NULL if the insertelement/extractelement fallback for array access
443 get_pointer_into_array(struct si_shader_context
*ctx
,
447 const struct tgsi_ind_register
*reg_indirect
)
450 struct tgsi_array_info
*array
;
451 struct gallivm_state
*gallivm
= ctx
->soa
.bld_base
.base
.gallivm
;
452 LLVMBuilderRef builder
= gallivm
->builder
;
453 LLVMValueRef idxs
[2];
457 if (file
!= TGSI_FILE_TEMPORARY
)
460 array_id
= get_temp_array_id(&ctx
->soa
.bld_base
, reg_index
, reg_indirect
);
464 alloca
= ctx
->temp_array_allocas
[array_id
- 1];
468 array
= &ctx
->temp_arrays
[array_id
- 1];
470 if (!(array
->writemask
& (1 << swizzle
)))
471 return ctx
->undef_alloca
;
473 index
= emit_array_index(&ctx
->soa
, reg_indirect
,
474 reg_index
- ctx
->temp_arrays
[array_id
- 1].range
.First
);
476 /* Ensure that the index is within a valid range, to guard against
477 * VM faults and overwriting critical data (e.g. spilled resource
480 * TODO It should be possible to avoid the additional instructions
481 * if LLVM is changed so that it guarantuees:
482 * 1. the scratch space descriptor isolates the current wave (this
483 * could even save the scratch offset SGPR at the cost of an
484 * additional SALU instruction)
485 * 2. the memory for allocas must be allocated at the _end_ of the
486 * scratch space (after spilled registers)
488 index
= si_llvm_bound_index(ctx
, index
, array
->range
.Last
- array
->range
.First
+ 1);
490 index
= LLVMBuildMul(
492 lp_build_const_int32(gallivm
, util_bitcount(array
->writemask
)),
494 index
= LLVMBuildAdd(
496 lp_build_const_int32(
498 util_bitcount(array
->writemask
& ((1 << swizzle
) - 1))),
500 idxs
[0] = ctx
->soa
.bld_base
.uint_bld
.zero
;
502 return LLVMBuildGEP(builder
, alloca
, idxs
, 2, "");
506 si_llvm_emit_fetch_64bit(struct lp_build_tgsi_context
*bld_base
,
507 enum tgsi_opcode_type type
,
511 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
514 result
= LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base
->base
.gallivm
->context
, 32), bld_base
->base
.type
.length
* 2));
516 result
= LLVMBuildInsertElement(builder
,
518 bitcast(bld_base
, TGSI_TYPE_UNSIGNED
, ptr
),
519 bld_base
->int_bld
.zero
, "");
520 result
= LLVMBuildInsertElement(builder
,
522 bitcast(bld_base
, TGSI_TYPE_UNSIGNED
, ptr2
),
523 bld_base
->int_bld
.one
, "");
524 return bitcast(bld_base
, type
, result
);
528 emit_array_fetch(struct lp_build_tgsi_context
*bld_base
,
529 unsigned File
, enum tgsi_opcode_type type
,
530 struct tgsi_declaration_range range
,
533 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
534 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
535 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
537 unsigned i
, size
= range
.Last
- range
.First
+ 1;
538 LLVMTypeRef vec
= LLVMVectorType(tgsi2llvmtype(bld_base
, type
), size
);
539 LLVMValueRef result
= LLVMGetUndef(vec
);
541 struct tgsi_full_src_register tmp_reg
= {};
542 tmp_reg
.Register
.File
= File
;
544 for (i
= 0; i
< size
; ++i
) {
545 tmp_reg
.Register
.Index
= i
+ range
.First
;
546 LLVMValueRef temp
= si_llvm_emit_fetch(bld_base
, &tmp_reg
, type
, swizzle
);
547 result
= LLVMBuildInsertElement(builder
, result
, temp
,
548 lp_build_const_int32(gallivm
, i
), "array_vector");
554 load_value_from_array(struct lp_build_tgsi_context
*bld_base
,
556 enum tgsi_opcode_type type
,
559 const struct tgsi_ind_register
*reg_indirect
)
561 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
562 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
563 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
564 LLVMBuilderRef builder
= gallivm
->builder
;
567 ptr
= get_pointer_into_array(ctx
, file
, swizzle
, reg_index
, reg_indirect
);
569 LLVMValueRef val
= LLVMBuildLoad(builder
, ptr
, "");
570 if (tgsi_type_is_64bit(type
)) {
571 LLVMValueRef ptr_hi
, val_hi
;
572 ptr_hi
= LLVMBuildGEP(builder
, ptr
, &bld_base
->uint_bld
.one
, 1, "");
573 val_hi
= LLVMBuildLoad(builder
, ptr_hi
, "");
574 val
= si_llvm_emit_fetch_64bit(bld_base
, type
, val
, val_hi
);
579 struct tgsi_declaration_range range
=
580 get_array_range(bld_base
, file
, reg_index
, reg_indirect
);
582 emit_array_index(bld
, reg_indirect
, reg_index
- range
.First
);
584 emit_array_fetch(bld_base
, file
, type
, range
, swizzle
);
585 return LLVMBuildExtractElement(builder
, array
, index
, "");
590 store_value_to_array(struct lp_build_tgsi_context
*bld_base
,
595 const struct tgsi_ind_register
*reg_indirect
)
597 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
598 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
599 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
600 LLVMBuilderRef builder
= gallivm
->builder
;
603 ptr
= get_pointer_into_array(ctx
, file
, chan_index
, reg_index
, reg_indirect
);
605 LLVMBuildStore(builder
, value
, ptr
);
608 struct tgsi_declaration_range range
= get_array_range(bld_base
, file
, reg_index
, reg_indirect
);
609 LLVMValueRef index
= emit_array_index(bld
, reg_indirect
, reg_index
- range
.First
);
611 emit_array_fetch(bld_base
, file
, TGSI_TYPE_FLOAT
, range
, chan_index
);
612 LLVMValueRef temp_ptr
;
614 array
= LLVMBuildInsertElement(builder
, array
, value
, index
, "");
616 size
= range
.Last
- range
.First
+ 1;
617 for (i
= 0; i
< size
; ++i
) {
619 case TGSI_FILE_OUTPUT
:
620 temp_ptr
= bld
->outputs
[i
+ range
.First
][chan_index
];
623 case TGSI_FILE_TEMPORARY
:
624 if (range
.First
+ i
>= ctx
->temps_count
)
626 temp_ptr
= ctx
->temps
[(i
+ range
.First
) * TGSI_NUM_CHANNELS
+ chan_index
];
632 value
= LLVMBuildExtractElement(builder
, array
,
633 lp_build_const_int32(gallivm
, i
), "");
634 LLVMBuildStore(builder
, value
, temp_ptr
);
639 /* If this is true, preload FS inputs at the beginning of shaders. Otherwise,
640 * reload them at each use. This must be true if the shader is using
641 * derivatives, because all inputs should be loaded in the WQM mode.
643 static bool si_preload_fs_inputs(struct si_shader_context
*ctx
)
645 return ctx
->shader
->selector
->info
.uses_derivatives
;
648 LLVMValueRef
si_llvm_emit_fetch(struct lp_build_tgsi_context
*bld_base
,
649 const struct tgsi_full_src_register
*reg
,
650 enum tgsi_opcode_type type
,
653 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
654 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
655 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
656 LLVMValueRef result
= NULL
, ptr
, ptr2
;
659 LLVMValueRef values
[TGSI_NUM_CHANNELS
];
661 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; chan
++) {
662 values
[chan
] = si_llvm_emit_fetch(bld_base
, reg
, type
, chan
);
664 return lp_build_gather_values(bld_base
->base
.gallivm
, values
,
668 if (reg
->Register
.Indirect
) {
669 LLVMValueRef load
= load_value_from_array(bld_base
, reg
->Register
.File
, type
,
670 swizzle
, reg
->Register
.Index
, ®
->Indirect
);
671 return bitcast(bld_base
, type
, load
);
674 switch(reg
->Register
.File
) {
675 case TGSI_FILE_IMMEDIATE
: {
676 LLVMTypeRef ctype
= tgsi2llvmtype(bld_base
, type
);
677 if (tgsi_type_is_64bit(type
)) {
678 result
= LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base
->base
.gallivm
->context
, 32), bld_base
->base
.type
.length
* 2));
679 result
= LLVMConstInsertElement(result
,
680 ctx
->imms
[reg
->Register
.Index
* TGSI_NUM_CHANNELS
+ swizzle
],
681 bld_base
->int_bld
.zero
);
682 result
= LLVMConstInsertElement(result
,
683 ctx
->imms
[reg
->Register
.Index
* TGSI_NUM_CHANNELS
+ swizzle
+ 1],
684 bld_base
->int_bld
.one
);
685 return LLVMConstBitCast(result
, ctype
);
687 return LLVMConstBitCast(ctx
->imms
[reg
->Register
.Index
* TGSI_NUM_CHANNELS
+ swizzle
], ctype
);
691 case TGSI_FILE_INPUT
: {
692 unsigned index
= reg
->Register
.Index
;
693 LLVMValueRef input
[4];
695 /* I don't think doing this for vertex shaders is beneficial.
696 * For those, we want to make sure the VMEM loads are executed
697 * only once. Fragment shaders don't care much, because
698 * v_interp instructions are much cheaper than VMEM loads.
700 if (!si_preload_fs_inputs(ctx
) &&
701 ctx
->soa
.bld_base
.info
->processor
== PIPE_SHADER_FRAGMENT
)
702 ctx
->load_input(ctx
, index
, &ctx
->input_decls
[index
], input
);
704 memcpy(input
, &ctx
->inputs
[index
* 4], sizeof(input
));
706 result
= input
[swizzle
];
708 if (tgsi_type_is_64bit(type
)) {
710 ptr2
= input
[swizzle
+ 1];
711 return si_llvm_emit_fetch_64bit(bld_base
, type
, ptr
, ptr2
);
716 case TGSI_FILE_TEMPORARY
:
717 if (reg
->Register
.Index
>= ctx
->temps_count
)
718 return LLVMGetUndef(tgsi2llvmtype(bld_base
, type
));
719 ptr
= ctx
->temps
[reg
->Register
.Index
* TGSI_NUM_CHANNELS
+ swizzle
];
720 if (tgsi_type_is_64bit(type
)) {
721 ptr2
= ctx
->temps
[reg
->Register
.Index
* TGSI_NUM_CHANNELS
+ swizzle
+ 1];
722 return si_llvm_emit_fetch_64bit(bld_base
, type
,
723 LLVMBuildLoad(builder
, ptr
, ""),
724 LLVMBuildLoad(builder
, ptr2
, ""));
726 result
= LLVMBuildLoad(builder
, ptr
, "");
729 case TGSI_FILE_OUTPUT
:
730 ptr
= lp_get_output_ptr(bld
, reg
->Register
.Index
, swizzle
);
731 if (tgsi_type_is_64bit(type
)) {
732 ptr2
= lp_get_output_ptr(bld
, reg
->Register
.Index
, swizzle
+ 1);
733 return si_llvm_emit_fetch_64bit(bld_base
, type
,
734 LLVMBuildLoad(builder
, ptr
, ""),
735 LLVMBuildLoad(builder
, ptr2
, ""));
737 result
= LLVMBuildLoad(builder
, ptr
, "");
741 return LLVMGetUndef(tgsi2llvmtype(bld_base
, type
));
744 return bitcast(bld_base
, type
, result
);
747 static LLVMValueRef
fetch_system_value(struct lp_build_tgsi_context
*bld_base
,
748 const struct tgsi_full_src_register
*reg
,
749 enum tgsi_opcode_type type
,
752 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
753 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
755 LLVMValueRef cval
= ctx
->system_values
[reg
->Register
.Index
];
756 if (LLVMGetTypeKind(LLVMTypeOf(cval
)) == LLVMVectorTypeKind
) {
757 cval
= LLVMBuildExtractElement(gallivm
->builder
, cval
,
758 lp_build_const_int32(gallivm
, swizzle
), "");
760 return bitcast(bld_base
, type
, cval
);
763 static void emit_declaration(struct lp_build_tgsi_context
*bld_base
,
764 const struct tgsi_full_declaration
*decl
)
766 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
767 LLVMBuilderRef builder
= bld_base
->base
.gallivm
->builder
;
768 unsigned first
, last
, i
;
769 switch(decl
->Declaration
.File
) {
770 case TGSI_FILE_ADDRESS
:
773 for (idx
= decl
->Range
.First
; idx
<= decl
->Range
.Last
; idx
++) {
775 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; chan
++) {
776 ctx
->soa
.addr
[idx
][chan
] = lp_build_alloca_undef(
778 ctx
->soa
.bld_base
.uint_bld
.elem_type
, "");
784 case TGSI_FILE_TEMPORARY
:
787 LLVMValueRef array_alloca
= NULL
;
789 unsigned writemask
= decl
->Declaration
.UsageMask
;
790 first
= decl
->Range
.First
;
791 last
= decl
->Range
.Last
;
792 decl_size
= 4 * ((last
- first
) + 1);
794 if (decl
->Declaration
.Array
) {
795 unsigned id
= decl
->Array
.ArrayID
- 1;
798 writemask
&= ctx
->temp_arrays
[id
].writemask
;
799 ctx
->temp_arrays
[id
].writemask
= writemask
;
800 array_size
= ((last
- first
) + 1) * util_bitcount(writemask
);
802 /* If the array has more than 16 elements, store it
803 * in memory using an alloca that spans the entire
806 * Otherwise, store each array element individually.
807 * We will then generate vectors (per-channel, up to
808 * <16 x float> if the usagemask is a single bit) for
809 * indirect addressing.
811 * Note that 16 is the number of vector elements that
812 * LLVM will store in a register, so theoretically an
813 * array with up to 4 * 16 = 64 elements could be
814 * handled this way, but whether that's a good idea
815 * depends on VGPR register pressure elsewhere.
817 * FIXME: We shouldn't need to have the non-alloca
818 * code path for arrays. LLVM should be smart enough to
819 * promote allocas into registers when profitable.
821 * LLVM 3.8 crashes with this.
823 if (HAVE_LLVM
>= 0x0309 && array_size
> 16) {
824 array_alloca
= LLVMBuildAlloca(builder
,
825 LLVMArrayType(bld_base
->base
.vec_type
,
826 array_size
), "array");
827 ctx
->temp_array_allocas
[id
] = array_alloca
;
831 if (!ctx
->temps_count
) {
832 ctx
->temps_count
= bld_base
->info
->file_max
[TGSI_FILE_TEMPORARY
] + 1;
833 ctx
->temps
= MALLOC(TGSI_NUM_CHANNELS
* ctx
->temps_count
* sizeof(LLVMValueRef
));
836 for (i
= 0; i
< decl_size
; ++i
) {
838 snprintf(name
, sizeof(name
), "TEMP%d.%c",
839 first
+ i
/ 4, "xyzw"[i
% 4]);
841 ctx
->temps
[first
* TGSI_NUM_CHANNELS
+ i
] =
842 lp_build_alloca_undef(bld_base
->base
.gallivm
,
843 bld_base
->base
.vec_type
,
847 LLVMValueRef idxs
[2] = {
848 bld_base
->uint_bld
.zero
,
853 if (writemask
!= TGSI_WRITEMASK_XYZW
&&
854 !ctx
->undef_alloca
) {
855 /* Create a dummy alloca. We use it so that we
856 * have a pointer that is safe to load from if
857 * a shader ever reads from a channel that
858 * it never writes to.
860 ctx
->undef_alloca
= lp_build_alloca_undef(
861 bld_base
->base
.gallivm
,
862 bld_base
->base
.vec_type
, "undef");
865 for (i
= 0; i
< decl_size
; ++i
) {
867 if (writemask
& (1 << (i
% 4))) {
869 snprintf(name
, sizeof(name
), "TEMP%d.%c",
870 first
+ i
/ 4, "xyzw"[i
% 4]);
872 idxs
[1] = lp_build_const_int32(bld_base
->base
.gallivm
, j
);
873 ptr
= LLVMBuildGEP(builder
, array_alloca
, idxs
, 2, name
);
876 ptr
= ctx
->undef_alloca
;
878 ctx
->temps
[first
* TGSI_NUM_CHANNELS
+ i
] = ptr
;
883 case TGSI_FILE_INPUT
:
886 for (idx
= decl
->Range
.First
; idx
<= decl
->Range
.Last
; idx
++) {
887 if (ctx
->load_input
&&
888 ctx
->input_decls
[idx
].Declaration
.File
!= TGSI_FILE_INPUT
) {
889 ctx
->input_decls
[idx
] = *decl
;
890 ctx
->input_decls
[idx
].Range
.First
= idx
;
891 ctx
->input_decls
[idx
].Range
.Last
= idx
;
892 ctx
->input_decls
[idx
].Semantic
.Index
+= idx
- decl
->Range
.First
;
894 if (si_preload_fs_inputs(ctx
) ||
895 bld_base
->info
->processor
!= PIPE_SHADER_FRAGMENT
)
896 ctx
->load_input(ctx
, idx
, &ctx
->input_decls
[idx
],
897 &ctx
->inputs
[idx
* 4]);
903 case TGSI_FILE_SYSTEM_VALUE
:
906 for (idx
= decl
->Range
.First
; idx
<= decl
->Range
.Last
; idx
++) {
907 ctx
->load_system_value(ctx
, idx
, decl
);
912 case TGSI_FILE_OUTPUT
:
916 for (idx
= decl
->Range
.First
; idx
<= decl
->Range
.Last
; idx
++) {
918 assert(idx
< RADEON_LLVM_MAX_OUTPUTS
);
919 if (ctx
->soa
.outputs
[idx
][0])
921 for (chan
= 0; chan
< TGSI_NUM_CHANNELS
; chan
++) {
923 snprintf(name
, sizeof(name
), "OUT%d.%c",
924 idx
, "xyzw"[chan
% 4]);
926 ctx
->soa
.outputs
[idx
][chan
] = lp_build_alloca_undef(
928 ctx
->soa
.bld_base
.base
.elem_type
, name
);
934 case TGSI_FILE_MEMORY
:
935 ctx
->declare_memory_region(ctx
, decl
);
943 LLVMValueRef
si_llvm_saturate(struct lp_build_tgsi_context
*bld_base
,
946 struct lp_build_emit_data clamp_emit_data
;
948 memset(&clamp_emit_data
, 0, sizeof(clamp_emit_data
));
949 clamp_emit_data
.arg_count
= 3;
950 clamp_emit_data
.args
[0] = value
;
951 clamp_emit_data
.args
[2] = bld_base
->base
.one
;
952 clamp_emit_data
.args
[1] = bld_base
->base
.zero
;
954 return lp_build_emit_llvm(bld_base
, TGSI_OPCODE_CLAMP
,
958 void si_llvm_emit_store(struct lp_build_tgsi_context
*bld_base
,
959 const struct tgsi_full_instruction
*inst
,
960 const struct tgsi_opcode_info
*info
,
963 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
964 struct lp_build_tgsi_soa_context
*bld
= lp_soa_context(bld_base
);
965 struct gallivm_state
*gallivm
= bld
->bld_base
.base
.gallivm
;
966 const struct tgsi_full_dst_register
*reg
= &inst
->Dst
[0];
967 LLVMBuilderRef builder
= bld
->bld_base
.base
.gallivm
->builder
;
968 LLVMValueRef temp_ptr
, temp_ptr2
= NULL
;
969 unsigned chan
, chan_index
;
970 bool is_vec_store
= false;
971 enum tgsi_opcode_type dtype
= tgsi_opcode_infer_dst_type(inst
->Instruction
.Opcode
);
974 LLVMTypeKind k
= LLVMGetTypeKind(LLVMTypeOf(dst
[0]));
975 is_vec_store
= (k
== LLVMVectorTypeKind
);
979 LLVMValueRef values
[4] = {};
980 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst
, chan
) {
981 LLVMValueRef index
= lp_build_const_int32(gallivm
, chan
);
982 values
[chan
] = LLVMBuildExtractElement(gallivm
->builder
,
985 bld_base
->emit_store(bld_base
, inst
, info
, values
);
989 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst
, chan_index
) {
990 LLVMValueRef value
= dst
[chan_index
];
992 if (tgsi_type_is_64bit(dtype
) && (chan_index
== 1 || chan_index
== 3))
994 if (inst
->Instruction
.Saturate
)
995 value
= si_llvm_saturate(bld_base
, value
);
997 if (reg
->Register
.File
== TGSI_FILE_ADDRESS
) {
998 temp_ptr
= bld
->addr
[reg
->Register
.Index
][chan_index
];
999 LLVMBuildStore(builder
, value
, temp_ptr
);
1003 if (!tgsi_type_is_64bit(dtype
))
1004 value
= bitcast(bld_base
, TGSI_TYPE_FLOAT
, value
);
1006 if (reg
->Register
.Indirect
) {
1007 unsigned file
= reg
->Register
.File
;
1008 unsigned reg_index
= reg
->Register
.Index
;
1009 store_value_to_array(bld_base
, value
, file
, chan_index
,
1010 reg_index
, ®
->Indirect
);
1012 switch(reg
->Register
.File
) {
1013 case TGSI_FILE_OUTPUT
:
1014 temp_ptr
= bld
->outputs
[reg
->Register
.Index
][chan_index
];
1015 if (tgsi_type_is_64bit(dtype
))
1016 temp_ptr2
= bld
->outputs
[reg
->Register
.Index
][chan_index
+ 1];
1019 case TGSI_FILE_TEMPORARY
:
1021 if (reg
->Register
.Index
>= ctx
->temps_count
)
1024 temp_ptr
= ctx
->temps
[ TGSI_NUM_CHANNELS
* reg
->Register
.Index
+ chan_index
];
1025 if (tgsi_type_is_64bit(dtype
))
1026 temp_ptr2
= ctx
->temps
[ TGSI_NUM_CHANNELS
* reg
->Register
.Index
+ chan_index
+ 1];
1033 if (!tgsi_type_is_64bit(dtype
))
1034 LLVMBuildStore(builder
, value
, temp_ptr
);
1036 LLVMValueRef ptr
= LLVMBuildBitCast(builder
, value
,
1037 LLVMVectorType(LLVMIntTypeInContext(bld_base
->base
.gallivm
->context
, 32), 2), "");
1039 value
= LLVMBuildExtractElement(builder
, ptr
,
1040 bld_base
->uint_bld
.zero
, "");
1041 val2
= LLVMBuildExtractElement(builder
, ptr
,
1042 bld_base
->uint_bld
.one
, "");
1044 LLVMBuildStore(builder
, bitcast(bld_base
, TGSI_TYPE_FLOAT
, value
), temp_ptr
);
1045 LLVMBuildStore(builder
, bitcast(bld_base
, TGSI_TYPE_FLOAT
, val2
), temp_ptr2
);
1051 static void set_basicblock_name(LLVMBasicBlockRef bb
, const char *base
, int pc
)
1054 /* Subtract 1 so that the number shown is that of the corresponding
1055 * opcode in the TGSI dump, e.g. an if block has the same suffix as
1056 * the instruction number of the corresponding TGSI IF.
1058 snprintf(buf
, sizeof(buf
), "%s%d", base
, pc
- 1);
1059 LLVMSetValueName(LLVMBasicBlockAsValue(bb
), buf
);
1062 /* Append a basic block at the level of the parent flow.
1064 static LLVMBasicBlockRef
append_basic_block(struct si_shader_context
*ctx
,
1067 struct gallivm_state
*gallivm
= &ctx
->gallivm
;
1069 assert(ctx
->flow_depth
>= 1);
1071 if (ctx
->flow_depth
>= 2) {
1072 struct si_llvm_flow
*flow
= &ctx
->flow
[ctx
->flow_depth
- 2];
1074 return LLVMInsertBasicBlockInContext(gallivm
->context
,
1075 flow
->next_block
, name
);
1078 return LLVMAppendBasicBlockInContext(gallivm
->context
, ctx
->main_fn
, name
);
1081 /* Emit a branch to the given default target for the current block if
1082 * applicable -- that is, if the current block does not already contain a
1083 * branch from a break or continue.
1085 static void emit_default_branch(LLVMBuilderRef builder
, LLVMBasicBlockRef target
)
1087 if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(builder
)))
1088 LLVMBuildBr(builder
, target
);
1091 static void bgnloop_emit(const struct lp_build_tgsi_action
*action
,
1092 struct lp_build_tgsi_context
*bld_base
,
1093 struct lp_build_emit_data
*emit_data
)
1095 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1096 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1097 struct si_llvm_flow
*flow
= push_flow(ctx
);
1098 flow
->loop_entry_block
= append_basic_block(ctx
, "LOOP");
1099 flow
->next_block
= append_basic_block(ctx
, "ENDLOOP");
1100 set_basicblock_name(flow
->loop_entry_block
, "loop", bld_base
->pc
);
1101 LLVMBuildBr(gallivm
->builder
, flow
->loop_entry_block
);
1102 LLVMPositionBuilderAtEnd(gallivm
->builder
, flow
->loop_entry_block
);
1105 static void brk_emit(const struct lp_build_tgsi_action
*action
,
1106 struct lp_build_tgsi_context
*bld_base
,
1107 struct lp_build_emit_data
*emit_data
)
1109 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1110 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1111 struct si_llvm_flow
*flow
= get_innermost_loop(ctx
);
1113 LLVMBuildBr(gallivm
->builder
, flow
->next_block
);
1116 static void cont_emit(const struct lp_build_tgsi_action
*action
,
1117 struct lp_build_tgsi_context
*bld_base
,
1118 struct lp_build_emit_data
*emit_data
)
1120 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1121 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1122 struct si_llvm_flow
*flow
= get_innermost_loop(ctx
);
1124 LLVMBuildBr(gallivm
->builder
, flow
->loop_entry_block
);
1127 static void else_emit(const struct lp_build_tgsi_action
*action
,
1128 struct lp_build_tgsi_context
*bld_base
,
1129 struct lp_build_emit_data
*emit_data
)
1131 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1132 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1133 struct si_llvm_flow
*current_branch
= get_current_flow(ctx
);
1134 LLVMBasicBlockRef endif_block
;
1136 assert(!current_branch
->loop_entry_block
);
1138 endif_block
= append_basic_block(ctx
, "ENDIF");
1139 emit_default_branch(gallivm
->builder
, endif_block
);
1141 LLVMPositionBuilderAtEnd(gallivm
->builder
, current_branch
->next_block
);
1142 set_basicblock_name(current_branch
->next_block
, "else", bld_base
->pc
);
1144 current_branch
->next_block
= endif_block
;
1147 static void endif_emit(const struct lp_build_tgsi_action
*action
,
1148 struct lp_build_tgsi_context
*bld_base
,
1149 struct lp_build_emit_data
*emit_data
)
1151 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1152 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1153 struct si_llvm_flow
*current_branch
= get_current_flow(ctx
);
1155 assert(!current_branch
->loop_entry_block
);
1157 emit_default_branch(gallivm
->builder
, current_branch
->next_block
);
1158 LLVMPositionBuilderAtEnd(gallivm
->builder
, current_branch
->next_block
);
1159 set_basicblock_name(current_branch
->next_block
, "endif", bld_base
->pc
);
1164 static void endloop_emit(const struct lp_build_tgsi_action
*action
,
1165 struct lp_build_tgsi_context
*bld_base
,
1166 struct lp_build_emit_data
*emit_data
)
1168 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1169 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1170 struct si_llvm_flow
*current_loop
= get_current_flow(ctx
);
1172 assert(current_loop
->loop_entry_block
);
1174 emit_default_branch(gallivm
->builder
, current_loop
->loop_entry_block
);
1176 LLVMPositionBuilderAtEnd(gallivm
->builder
, current_loop
->next_block
);
1177 set_basicblock_name(current_loop
->next_block
, "endloop", bld_base
->pc
);
1181 static void if_cond_emit(const struct lp_build_tgsi_action
*action
,
1182 struct lp_build_tgsi_context
*bld_base
,
1183 struct lp_build_emit_data
*emit_data
,
1186 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1187 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1188 struct si_llvm_flow
*flow
= push_flow(ctx
);
1189 LLVMBasicBlockRef if_block
;
1191 if_block
= append_basic_block(ctx
, "IF");
1192 flow
->next_block
= append_basic_block(ctx
, "ELSE");
1193 set_basicblock_name(if_block
, "if", bld_base
->pc
);
1194 LLVMBuildCondBr(gallivm
->builder
, cond
, if_block
, flow
->next_block
);
1195 LLVMPositionBuilderAtEnd(gallivm
->builder
, if_block
);
1198 static void if_emit(const struct lp_build_tgsi_action
*action
,
1199 struct lp_build_tgsi_context
*bld_base
,
1200 struct lp_build_emit_data
*emit_data
)
1202 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1205 cond
= LLVMBuildFCmp(gallivm
->builder
, LLVMRealUNE
,
1207 bld_base
->base
.zero
, "");
1209 if_cond_emit(action
, bld_base
, emit_data
, cond
);
1212 static void uif_emit(const struct lp_build_tgsi_action
*action
,
1213 struct lp_build_tgsi_context
*bld_base
,
1214 struct lp_build_emit_data
*emit_data
)
1216 struct gallivm_state
*gallivm
= bld_base
->base
.gallivm
;
1219 cond
= LLVMBuildICmp(gallivm
->builder
, LLVMIntNE
,
1220 bitcast(bld_base
, TGSI_TYPE_UNSIGNED
, emit_data
->args
[0]),
1221 bld_base
->int_bld
.zero
, "");
1223 if_cond_emit(action
, bld_base
, emit_data
, cond
);
1226 static void emit_immediate(struct lp_build_tgsi_context
*bld_base
,
1227 const struct tgsi_full_immediate
*imm
)
1230 struct si_shader_context
*ctx
= si_shader_context(bld_base
);
1232 for (i
= 0; i
< 4; ++i
) {
1233 ctx
->imms
[ctx
->imms_num
* TGSI_NUM_CHANNELS
+ i
] =
1234 LLVMConstInt(bld_base
->uint_bld
.elem_type
, imm
->u
[i
].Uint
, false );
1240 void si_llvm_context_init(struct si_shader_context
*ctx
,
1241 struct si_screen
*sscreen
,
1242 struct si_shader
*shader
,
1243 LLVMTargetMachineRef tm
,
1244 const struct tgsi_shader_info
*info
,
1245 const struct tgsi_token
*tokens
)
1247 struct lp_type type
;
1249 /* Initialize the gallivm object:
1250 * We are only using the module, context, and builder fields of this struct.
1251 * This should be enough for us to be able to pass our gallivm struct to the
1252 * helper functions in the gallivm module.
1254 memset(ctx
, 0, sizeof(*ctx
));
1255 ctx
->shader
= shader
;
1256 ctx
->screen
= sscreen
;
1258 ctx
->type
= info
? info
->processor
: -1;
1260 ctx
->gallivm
.context
= LLVMContextCreate();
1261 ctx
->gallivm
.module
= LLVMModuleCreateWithNameInContext("tgsi",
1262 ctx
->gallivm
.context
);
1263 LLVMSetTarget(ctx
->gallivm
.module
, "amdgcn--");
1265 bool unsafe_fpmath
= (sscreen
->b
.debug_flags
& DBG_UNSAFE_MATH
) != 0;
1266 ctx
->gallivm
.builder
= lp_create_builder(ctx
->gallivm
.context
,
1269 ac_llvm_context_init(&ctx
->ac
, ctx
->gallivm
.context
);
1270 ctx
->ac
.module
= ctx
->gallivm
.module
;
1271 ctx
->ac
.builder
= ctx
->gallivm
.builder
;
1273 struct lp_build_tgsi_context
*bld_base
= &ctx
->soa
.bld_base
;
1275 bld_base
->info
= info
;
1277 if (info
&& info
->array_max
[TGSI_FILE_TEMPORARY
] > 0) {
1278 int size
= info
->array_max
[TGSI_FILE_TEMPORARY
];
1280 ctx
->temp_arrays
= CALLOC(size
, sizeof(ctx
->temp_arrays
[0]));
1281 ctx
->temp_array_allocas
= CALLOC(size
, sizeof(ctx
->temp_array_allocas
[0]));
1284 tgsi_scan_arrays(tokens
, TGSI_FILE_TEMPORARY
, size
,
1288 if (info
&& info
->file_max
[TGSI_FILE_IMMEDIATE
] >= 0) {
1289 int size
= info
->file_max
[TGSI_FILE_IMMEDIATE
] + 1;
1290 ctx
->imms
= MALLOC(size
* TGSI_NUM_CHANNELS
* sizeof(LLVMValueRef
));
1293 type
.floating
= true;
1300 lp_build_context_init(&bld_base
->base
, &ctx
->gallivm
, type
);
1301 lp_build_context_init(&ctx
->soa
.bld_base
.uint_bld
, &ctx
->gallivm
, lp_uint_type(type
));
1302 lp_build_context_init(&ctx
->soa
.bld_base
.int_bld
, &ctx
->gallivm
, lp_int_type(type
));
1304 lp_build_context_init(&ctx
->soa
.bld_base
.dbl_bld
, &ctx
->gallivm
, type
);
1305 lp_build_context_init(&ctx
->soa
.bld_base
.uint64_bld
, &ctx
->gallivm
, lp_uint_type(type
));
1306 lp_build_context_init(&ctx
->soa
.bld_base
.int64_bld
, &ctx
->gallivm
, lp_int_type(type
));
1309 bld_base
->emit_store
= si_llvm_emit_store
;
1310 bld_base
->emit_swizzle
= emit_swizzle
;
1311 bld_base
->emit_declaration
= emit_declaration
;
1312 bld_base
->emit_immediate
= emit_immediate
;
1314 bld_base
->emit_fetch_funcs
[TGSI_FILE_IMMEDIATE
] = si_llvm_emit_fetch
;
1315 bld_base
->emit_fetch_funcs
[TGSI_FILE_INPUT
] = si_llvm_emit_fetch
;
1316 bld_base
->emit_fetch_funcs
[TGSI_FILE_TEMPORARY
] = si_llvm_emit_fetch
;
1317 bld_base
->emit_fetch_funcs
[TGSI_FILE_OUTPUT
] = si_llvm_emit_fetch
;
1318 bld_base
->emit_fetch_funcs
[TGSI_FILE_SYSTEM_VALUE
] = fetch_system_value
;
1320 /* metadata allowing 2.5 ULP */
1321 ctx
->fpmath_md_kind
= LLVMGetMDKindIDInContext(ctx
->gallivm
.context
,
1323 LLVMValueRef arg
= lp_build_const_float(&ctx
->gallivm
, 2.5);
1324 ctx
->fpmath_md_2p5_ulp
= LLVMMDNodeInContext(ctx
->gallivm
.context
,
1327 /* Allocate outputs */
1328 ctx
->soa
.outputs
= ctx
->outputs
;
1330 bld_base
->op_actions
[TGSI_OPCODE_BGNLOOP
].emit
= bgnloop_emit
;
1331 bld_base
->op_actions
[TGSI_OPCODE_BRK
].emit
= brk_emit
;
1332 bld_base
->op_actions
[TGSI_OPCODE_CONT
].emit
= cont_emit
;
1333 bld_base
->op_actions
[TGSI_OPCODE_IF
].emit
= if_emit
;
1334 bld_base
->op_actions
[TGSI_OPCODE_UIF
].emit
= uif_emit
;
1335 bld_base
->op_actions
[TGSI_OPCODE_ELSE
].emit
= else_emit
;
1336 bld_base
->op_actions
[TGSI_OPCODE_ENDIF
].emit
= endif_emit
;
1337 bld_base
->op_actions
[TGSI_OPCODE_ENDLOOP
].emit
= endloop_emit
;
1339 si_shader_context_init_alu(&ctx
->soa
.bld_base
);
1341 ctx
->voidt
= LLVMVoidTypeInContext(ctx
->gallivm
.context
);
1342 ctx
->i1
= LLVMInt1TypeInContext(ctx
->gallivm
.context
);
1343 ctx
->i8
= LLVMInt8TypeInContext(ctx
->gallivm
.context
);
1344 ctx
->i32
= LLVMInt32TypeInContext(ctx
->gallivm
.context
);
1345 ctx
->i64
= LLVMInt64TypeInContext(ctx
->gallivm
.context
);
1346 ctx
->i128
= LLVMIntTypeInContext(ctx
->gallivm
.context
, 128);
1347 ctx
->f32
= LLVMFloatTypeInContext(ctx
->gallivm
.context
);
1348 ctx
->v16i8
= LLVMVectorType(ctx
->i8
, 16);
1349 ctx
->v2i32
= LLVMVectorType(ctx
->i32
, 2);
1350 ctx
->v4i32
= LLVMVectorType(ctx
->i32
, 4);
1351 ctx
->v4f32
= LLVMVectorType(ctx
->f32
, 4);
1352 ctx
->v8i32
= LLVMVectorType(ctx
->i32
, 8);
1355 void si_llvm_create_func(struct si_shader_context
*ctx
,
1357 LLVMTypeRef
*return_types
, unsigned num_return_elems
,
1358 LLVMTypeRef
*ParamTypes
, unsigned ParamCount
)
1360 LLVMTypeRef main_fn_type
, ret_type
;
1361 LLVMBasicBlockRef main_fn_body
;
1363 if (num_return_elems
)
1364 ret_type
= LLVMStructTypeInContext(ctx
->gallivm
.context
,
1366 num_return_elems
, true);
1368 ret_type
= LLVMVoidTypeInContext(ctx
->gallivm
.context
);
1370 /* Setup the function */
1371 ctx
->return_type
= ret_type
;
1372 main_fn_type
= LLVMFunctionType(ret_type
, ParamTypes
, ParamCount
, 0);
1373 ctx
->main_fn
= LLVMAddFunction(ctx
->gallivm
.module
, name
, main_fn_type
);
1374 main_fn_body
= LLVMAppendBasicBlockInContext(ctx
->gallivm
.context
,
1375 ctx
->main_fn
, "main_body");
1376 LLVMPositionBuilderAtEnd(ctx
->gallivm
.builder
, main_fn_body
);
1379 void si_llvm_finalize_module(struct si_shader_context
*ctx
,
1382 struct gallivm_state
*gallivm
= ctx
->soa
.bld_base
.base
.gallivm
;
1383 const char *triple
= LLVMGetTarget(gallivm
->module
);
1384 LLVMTargetLibraryInfoRef target_library_info
;
1386 /* Create the pass manager */
1387 gallivm
->passmgr
= LLVMCreatePassManager();
1389 target_library_info
= gallivm_create_target_library_info(triple
);
1390 LLVMAddTargetLibraryInfo(target_library_info
, gallivm
->passmgr
);
1393 LLVMAddVerifierPass(gallivm
->passmgr
);
1395 LLVMAddAlwaysInlinerPass(gallivm
->passmgr
);
1397 /* This pass should eliminate all the load and store instructions */
1398 LLVMAddPromoteMemoryToRegisterPass(gallivm
->passmgr
);
1400 /* Add some optimization passes */
1401 LLVMAddScalarReplAggregatesPass(gallivm
->passmgr
);
1402 LLVMAddLICMPass(gallivm
->passmgr
);
1403 LLVMAddAggressiveDCEPass(gallivm
->passmgr
);
1404 LLVMAddCFGSimplificationPass(gallivm
->passmgr
);
1405 LLVMAddInstructionCombiningPass(gallivm
->passmgr
);
1408 LLVMRunPassManager(gallivm
->passmgr
, ctx
->gallivm
.module
);
1410 LLVMDisposeBuilder(gallivm
->builder
);
1411 LLVMDisposePassManager(gallivm
->passmgr
);
1412 gallivm_dispose_target_library_info(target_library_info
);
1415 void si_llvm_dispose(struct si_shader_context
*ctx
)
1417 LLVMDisposeModule(ctx
->soa
.bld_base
.base
.gallivm
->module
);
1418 LLVMContextDispose(ctx
->soa
.bld_base
.base
.gallivm
->context
);
1419 FREE(ctx
->temp_arrays
);
1420 ctx
->temp_arrays
= NULL
;
1421 FREE(ctx
->temp_array_allocas
);
1422 ctx
->temp_array_allocas
= NULL
;
1425 ctx
->temps_count
= 0;
1431 ctx
->flow_depth_max
= 0;