1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "pipe/p_config.h"
30 #include "pipe/p_compiler.h"
31 #include "util/u_cpu_detect.h"
32 #include "util/u_debug.h"
33 #include "util/u_memory.h"
34 #include "util/simple_list.h"
35 #include "os/os_time.h"
37 #include "lp_bld_debug.h"
38 #include "lp_bld_misc.h"
39 #include "lp_bld_init.h"
41 #include <llvm-c/Analysis.h>
42 #include <llvm-c/Transforms/Scalar.h>
43 #include <llvm-c/BitWriter.h>
46 /* Only MCJIT is available as of LLVM SVN r216982 */
47 #if HAVE_LLVM >= 0x0306
49 #elif defined(PIPE_ARCH_PPC_64) || defined(PIPE_ARCH_S390) || defined(PIPE_ARCH_ARM) || defined(PIPE_ARCH_AARCH64)
56 void LLVMLinkInMCJIT();
60 unsigned gallivm_debug
= 0;
62 static const struct debug_named_value lp_bld_debug_flags
[] = {
63 { "tgsi", GALLIVM_DEBUG_TGSI
, NULL
},
64 { "ir", GALLIVM_DEBUG_IR
, NULL
},
65 { "asm", GALLIVM_DEBUG_ASM
, NULL
},
66 { "nopt", GALLIVM_DEBUG_NO_OPT
, NULL
},
67 { "perf", GALLIVM_DEBUG_PERF
, NULL
},
68 { "no_brilinear", GALLIVM_DEBUG_NO_BRILINEAR
, NULL
},
69 { "no_rho_approx", GALLIVM_DEBUG_NO_RHO_APPROX
, NULL
},
70 { "no_quad_lod", GALLIVM_DEBUG_NO_QUAD_LOD
, NULL
},
71 { "gc", GALLIVM_DEBUG_GC
, NULL
},
75 DEBUG_GET_ONCE_FLAGS_OPTION(gallivm_debug
, "GALLIVM_DEBUG", lp_bld_debug_flags
, 0)
79 static boolean gallivm_initialized
= FALSE
;
81 unsigned lp_native_vector_width
;
85 * Optimization values are:
88 * - 2: Default (-O2, -Os)
89 * - 3: Aggressive (-O3)
91 * See also CodeGenOpt::Level in llvm/Target/TargetMachine.h
93 enum LLVM_CodeGenOpt_Level
{
102 * Create the LLVM (optimization) pass manager and install
103 * relevant optimization passes.
104 * \return TRUE for success, FALSE for failure
107 create_pass_manager(struct gallivm_state
*gallivm
)
110 assert(!gallivm
->passmgr
);
111 assert(gallivm
->target
);
113 gallivm
->passmgr
= LLVMCreateFunctionPassManagerForModule(gallivm
->module
);
114 if (!gallivm
->passmgr
)
117 * TODO: some per module pass manager with IPO passes might be helpful -
118 * the generated texture functions may benefit from inlining if they are
119 * simple, or constant propagation into them, etc.
122 // Old versions of LLVM get the DataLayout from the pass manager.
123 LLVMAddTargetData(gallivm
->target
, gallivm
->passmgr
);
125 // New ones from the Module.
126 td_str
= LLVMCopyStringRepOfTargetData(gallivm
->target
);
127 LLVMSetDataLayout(gallivm
->module
, td_str
);
130 if ((gallivm_debug
& GALLIVM_DEBUG_NO_OPT
) == 0) {
131 /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
132 * but there are more on SVN.
133 * TODO: Add more passes.
135 LLVMAddScalarReplAggregatesPass(gallivm
->passmgr
);
136 LLVMAddLICMPass(gallivm
->passmgr
);
137 LLVMAddCFGSimplificationPass(gallivm
->passmgr
);
138 LLVMAddReassociatePass(gallivm
->passmgr
);
139 LLVMAddPromoteMemoryToRegisterPass(gallivm
->passmgr
);
140 LLVMAddConstantPropagationPass(gallivm
->passmgr
);
141 LLVMAddInstructionCombiningPass(gallivm
->passmgr
);
142 LLVMAddGVNPass(gallivm
->passmgr
);
145 /* We need at least this pass to prevent the backends to fail in
148 LLVMAddPromoteMemoryToRegisterPass(gallivm
->passmgr
);
156 * Free gallivm object's LLVM allocations, but not any generated code
157 * nor the gallivm object itself.
160 gallivm_free_ir(struct gallivm_state
*gallivm
)
162 if (gallivm
->passmgr
) {
163 LLVMDisposePassManager(gallivm
->passmgr
);
166 if (gallivm
->engine
) {
167 /* This will already destroy any associated module */
168 LLVMDisposeExecutionEngine(gallivm
->engine
);
169 } else if (gallivm
->module
) {
170 LLVMDisposeModule(gallivm
->module
);
174 /* Don't free the TargetData, it's owned by the exec engine */
176 if (gallivm
->target
) {
177 LLVMDisposeTargetData(gallivm
->target
);
181 if (gallivm
->builder
)
182 LLVMDisposeBuilder(gallivm
->builder
);
184 /* The LLVMContext should be owned by the parent of gallivm. */
186 gallivm
->engine
= NULL
;
187 gallivm
->target
= NULL
;
188 gallivm
->module
= NULL
;
189 gallivm
->passmgr
= NULL
;
190 gallivm
->context
= NULL
;
191 gallivm
->builder
= NULL
;
196 * Free LLVM-generated code. Should be done AFTER gallivm_free_ir().
199 gallivm_free_code(struct gallivm_state
*gallivm
)
201 assert(!gallivm
->module
);
202 assert(!gallivm
->engine
);
203 lp_free_generated_code(gallivm
->code
);
204 gallivm
->code
= NULL
;
205 lp_free_memory_manager(gallivm
->memorymgr
);
206 gallivm
->memorymgr
= NULL
;
211 init_gallivm_engine(struct gallivm_state
*gallivm
)
214 enum LLVM_CodeGenOpt_Level optlevel
;
218 if (gallivm_debug
& GALLIVM_DEBUG_NO_OPT
) {
225 ret
= lp_build_create_jit_compiler_for_module(&gallivm
->engine
,
233 _debug_printf("%s\n", error
);
234 LLVMDisposeMessage(error
);
240 gallivm
->target
= LLVMGetExecutionEngineTargetData(gallivm
->engine
);
241 if (!gallivm
->target
)
246 * Dump the data layout strings.
249 LLVMTargetDataRef target
= LLVMGetExecutionEngineTargetData(gallivm
->engine
);
251 char *engine_data_layout
;
253 data_layout
= LLVMCopyStringRepOfTargetData(gallivm
->target
);
254 engine_data_layout
= LLVMCopyStringRepOfTargetData(target
);
257 debug_printf("module target data = %s\n", data_layout
);
258 debug_printf("engine target data = %s\n", engine_data_layout
);
262 free(engine_data_layout
);
274 * Allocate gallivm LLVM objects.
275 * \return TRUE for success, FALSE for failure
278 init_gallivm_state(struct gallivm_state
*gallivm
, const char *name
,
279 LLVMContextRef context
)
281 assert(!gallivm
->context
);
282 assert(!gallivm
->module
);
284 if (!lp_build_init())
287 gallivm
->context
= context
;
289 if (!gallivm
->context
)
292 gallivm
->module
= LLVMModuleCreateWithNameInContext(name
,
294 if (!gallivm
->module
)
297 gallivm
->builder
= LLVMCreateBuilderInContext(gallivm
->context
);
298 if (!gallivm
->builder
)
301 gallivm
->memorymgr
= lp_get_default_memory_manager();
302 if (!gallivm
->memorymgr
)
305 /* FIXME: MC-JIT only allows compiling one module at a time, and it must be
306 * complete when MC-JIT is created. So defer the MC-JIT engine creation for
310 if (!init_gallivm_engine(gallivm
)) {
315 * MC-JIT engine compiles the module immediately on creation, so we can't
316 * obtain the target data from it. Instead we create a target data layout
319 * The produced layout strings are not precisely the same, but should make
320 * no difference for the kind of optimization passes we run.
322 * For reference this is the layout string on x64:
324 * e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64
327 * - http://llvm.org/docs/LangRef.html#datalayout
331 const unsigned pointer_size
= 8 * sizeof(void *);
333 util_snprintf(layout
, sizeof layout
, "%c-p:%u:%u:%u-i64:64:64-a0:0:%u-s0:%u:%u",
334 #ifdef PIPE_ARCH_LITTLE_ENDIAN
335 'e', // little endian
339 pointer_size
, pointer_size
, pointer_size
, // pointer size, abi alignment, preferred alignment
340 pointer_size
, // aggregate preferred alignment
341 pointer_size
, pointer_size
); // stack objects abi alignment, preferred alignment
343 gallivm
->target
= LLVMCreateTargetData(layout
);
344 if (!gallivm
->target
) {
350 if (!create_pass_manager(gallivm
))
356 gallivm_free_ir(gallivm
);
357 gallivm_free_code(gallivm
);
365 if (gallivm_initialized
)
369 gallivm_debug
= debug_get_option_gallivm_debug();
372 lp_set_target_options();
382 /* AMD Bulldozer AVX's throughput is the same as SSE2; and because using
383 * 8-wide vector needs more floating ops than 4-wide (due to padding), it is
384 * actually more efficient to use 4-wide vectors on this processor.
387 * - http://www.anandtech.com/show/4955/the-bulldozer-review-amd-fx8150-tested/2
389 if (util_cpu_caps
.has_avx
&&
390 util_cpu_caps
.has_intel
) {
391 lp_native_vector_width
= 256;
393 /* Leave it at 128, even when no SIMD extensions are available.
394 * Really needs to be a multiple of 128 so can fit 4 floats.
396 lp_native_vector_width
= 128;
399 lp_native_vector_width
= debug_get_num_option("LP_NATIVE_VECTOR_WIDTH",
400 lp_native_vector_width
);
402 if (lp_native_vector_width
<= 128) {
403 /* Hide AVX support, as often LLVM AVX intrinsics are only guarded by
404 * "util_cpu_caps.has_avx" predicate, and lack the
405 * "lp_native_vector_width > 128" predicate. And also to ensure a more
406 * consistent behavior, allowing one to test SSE2 on AVX machines.
407 * XXX: should not play games with util_cpu_caps directly as it might
408 * get used for other things outside llvm too.
410 util_cpu_caps
.has_avx
= 0;
411 util_cpu_caps
.has_avx2
= 0;
414 #ifdef PIPE_ARCH_PPC_64
415 /* Set the NJ bit in VSCR to 0 so denormalized values are handled as
416 * specified by IEEE standard (PowerISA 2.06 - Section 6.3). This guarantees
417 * that some rounding and half-float to float handling does not round
419 * XXX: should eventually follow same logic on all platforms.
420 * Right now denorms get explicitly disabled (but elsewhere) for x86,
421 * whereas ppc64 explicitly enables them...
423 if (util_cpu_caps
.has_altivec
) {
424 unsigned short mask
[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
425 0xFFFF, 0xFFFF, 0xFFFE, 0xFFFF };
436 gallivm_initialized
= TRUE
;
439 /* For simulating less capable machines */
440 util_cpu_caps
.has_sse3
= 0;
441 util_cpu_caps
.has_ssse3
= 0;
442 util_cpu_caps
.has_sse4_1
= 0;
443 util_cpu_caps
.has_avx
= 0;
444 util_cpu_caps
.has_f16c
= 0;
453 * Create a new gallivm_state object.
455 struct gallivm_state
*
456 gallivm_create(const char *name
, LLVMContextRef context
)
458 struct gallivm_state
*gallivm
;
460 gallivm
= CALLOC_STRUCT(gallivm_state
);
462 if (!init_gallivm_state(gallivm
, name
, context
)) {
473 * Destroy a gallivm_state object.
476 gallivm_destroy(struct gallivm_state
*gallivm
)
478 gallivm_free_ir(gallivm
);
479 gallivm_free_code(gallivm
);
485 * Validate a function.
486 * Verification is only done with debug builds.
489 gallivm_verify_function(struct gallivm_state
*gallivm
,
492 /* Verify the LLVM IR. If invalid, dump and abort */
494 if (LLVMVerifyFunction(func
, LLVMPrintMessageAction
)) {
495 lp_debug_dump_value(func
);
501 if (gallivm_debug
& GALLIVM_DEBUG_IR
) {
502 /* Print the LLVM IR to stderr */
503 lp_debug_dump_value(func
);
511 * This does IR optimization on all functions in the module.
514 gallivm_compile_module(struct gallivm_state
*gallivm
)
517 int64_t time_begin
= 0;
519 assert(!gallivm
->compiled
);
521 if (gallivm
->builder
) {
522 LLVMDisposeBuilder(gallivm
->builder
);
523 gallivm
->builder
= NULL
;
526 if (gallivm_debug
& GALLIVM_DEBUG_PERF
)
527 time_begin
= os_time_get();
529 /* Run optimization passes */
530 LLVMInitializeFunctionPassManager(gallivm
->passmgr
);
531 func
= LLVMGetFirstFunction(gallivm
->module
);
534 debug_printf("optimizing func %s...\n", LLVMGetValueName(func
));
536 LLVMRunFunctionPassManager(gallivm
->passmgr
, func
);
537 func
= LLVMGetNextFunction(func
);
539 LLVMFinalizeFunctionPassManager(gallivm
->passmgr
);
541 if (gallivm_debug
& GALLIVM_DEBUG_PERF
) {
542 int64_t time_end
= os_time_get();
543 int time_msec
= (int)(time_end
- time_begin
) / 1000;
544 debug_printf("optimizing module %s took %d msec\n",
545 lp_get_module_id(gallivm
->module
), time_msec
);
548 /* Dump byte code to a file */
550 LLVMWriteBitcodeToFile(gallivm
->module
, "llvmpipe.bc");
551 debug_printf("llvmpipe.bc written\n");
552 debug_printf("Invoke as \"llc -o - llvmpipe.bc\"\n");
556 assert(!gallivm
->engine
);
557 if (!init_gallivm_engine(gallivm
)) {
561 assert(gallivm
->engine
);
565 if (gallivm_debug
& GALLIVM_DEBUG_ASM
) {
566 LLVMValueRef llvm_func
= LLVMGetFirstFunction(gallivm
->module
);
570 * Need to filter out functions which don't have an implementation,
571 * such as the intrinsics. May not be sufficient in case of IPO?
572 * LLVMGetPointerToGlobal() will abort otherwise.
574 if (!LLVMIsDeclaration(llvm_func
)) {
575 void *func_code
= LLVMGetPointerToGlobal(gallivm
->engine
, llvm_func
);
576 lp_disassemble(llvm_func
, func_code
);
578 llvm_func
= LLVMGetNextFunction(llvm_func
);
584 LLVMValueRef llvm_func
= LLVMGetFirstFunction(gallivm
->module
);
587 if (!LLVMIsDeclaration(llvm_func
)) {
588 void *func_code
= LLVMGetPointerToGlobal(gallivm
->engine
, llvm_func
);
589 lp_profile(llvm_func
, func_code
);
591 llvm_func
= LLVMGetNextFunction(llvm_func
);
600 gallivm_jit_function(struct gallivm_state
*gallivm
,
604 func_pointer jit_func
;
606 assert(gallivm
->compiled
);
607 assert(gallivm
->engine
);
609 code
= LLVMGetPointerToGlobal(gallivm
->engine
, func
);
611 jit_func
= pointer_to_func(code
);