#include "util/u_cpu_detect.h"
#include "util/u_debug.h"
#include "util/u_memory.h"
-#include "util/u_simple_list.h"
+#include "util/simple_list.h"
+#include "util/os_time.h"
#include "lp_bld.h"
#include "lp_bld_debug.h"
#include "lp_bld_misc.h"
#include "lp_bld_init.h"
+#include <llvm/Config/llvm-config.h>
#include <llvm-c/Analysis.h>
#include <llvm-c/Transforms/Scalar.h>
+#if LLVM_VERSION_MAJOR >= 7
+#include <llvm-c/Transforms/Utils.h>
+#endif
#include <llvm-c/BitWriter.h>
-
-
-/**
- * AVX is supported in:
- * - standard JIT from LLVM 3.2 onwards
- * - MC-JIT from LLVM 3.1
- * - MC-JIT supports limited OSes (MacOSX and Linux)
- * - standard JIT in LLVM 3.1, with backports
- */
-#if defined(PIPE_ARCH_PPC_64) || defined(PIPE_ARCH_S390) || defined(PIPE_ARCH_ARM) || defined(PIPE_ARCH_AARCH64)
-# define USE_MCJIT 1
-# define HAVE_AVX 0
-#elif HAVE_LLVM >= 0x0302 || (HAVE_LLVM == 0x0301 && defined(HAVE_JIT_AVX_SUPPORT))
-# define USE_MCJIT 0
-# define HAVE_AVX 1
-#elif HAVE_LLVM == 0x0301 && (defined(PIPE_OS_LINUX) || defined(PIPE_OS_APPLE))
-# define USE_MCJIT 1
-# define HAVE_AVX 1
-#else
-# define USE_MCJIT 0
-# define HAVE_AVX 0
+#if GALLIVM_HAVE_CORO
+#if LLVM_VERSION_MAJOR <= 8 && (defined(PIPE_ARCH_AARCH64) || defined (PIPE_ARCH_ARM) || defined(PIPE_ARCH_S390))
+#include <llvm-c/Transforms/IPO.h>
#endif
-
-
-#if USE_MCJIT
-void LLVMLinkInMCJIT();
+#include <llvm-c/Transforms/Coroutines.h>
#endif
+unsigned gallivm_perf = 0;
+
+static const struct debug_named_value lp_bld_perf_flags[] = {
+ { "no_brilinear", GALLIVM_PERF_NO_BRILINEAR, "disable brilinear optimization" },
+ { "no_rho_approx", GALLIVM_PERF_NO_RHO_APPROX, "disable rho_approx optimization" },
+ { "no_quad_lod", GALLIVM_PERF_NO_QUAD_LOD, "disable quad_lod optimization" },
+ { "no_aos_sampling", GALLIVM_PERF_NO_AOS_SAMPLING, "disable aos sampling optimization" },
+ { "nopt", GALLIVM_PERF_NO_OPT, "disable optimization passes to speed up shader compilation" },
+ { "no_filter_hacks", GALLIVM_PERF_NO_BRILINEAR | GALLIVM_PERF_NO_RHO_APPROX |
+ GALLIVM_PERF_NO_QUAD_LOD, "disable filter optimization hacks" },
+ DEBUG_NAMED_VALUE_END
+};
#ifdef DEBUG
unsigned gallivm_debug = 0;
{ "tgsi", GALLIVM_DEBUG_TGSI, NULL },
{ "ir", GALLIVM_DEBUG_IR, NULL },
{ "asm", GALLIVM_DEBUG_ASM, NULL },
- { "nopt", GALLIVM_DEBUG_NO_OPT, NULL },
{ "perf", GALLIVM_DEBUG_PERF, NULL },
- { "no_brilinear", GALLIVM_DEBUG_NO_BRILINEAR, NULL },
- { "no_rho_approx", GALLIVM_DEBUG_NO_RHO_APPROX, NULL },
{ "gc", GALLIVM_DEBUG_GC, NULL },
+ { "dumpbc", GALLIVM_DEBUG_DUMP_BC, NULL },
DEBUG_NAMED_VALUE_END
};
* See also CodeGenOpt::Level in llvm/Target/TargetMachine.h
*/
enum LLVM_CodeGenOpt_Level {
-#if HAVE_LLVM >= 0x207
None, // -O0
Less, // -O1
Default, // -O2, -Os
Aggressive // -O3
-#else
- Default,
- None,
- Aggressive
-#endif
};
-#if HAVE_LLVM <= 0x0206
-/**
- * LLVM 2.6 permits only one ExecutionEngine to be created. So use the
- * same gallivm state everywhere.
- */
-static struct gallivm_state *GlobalGallivm = NULL;
-#endif
-
-
/**
* Create the LLVM (optimization) pass manager and install
* relevant optimization passes.
assert(!gallivm->passmgr);
assert(gallivm->target);
- gallivm->passmgr = LLVMCreateFunctionPassManager(gallivm->provider);
+ gallivm->passmgr = LLVMCreateFunctionPassManagerForModule(gallivm->module);
if (!gallivm->passmgr)
return FALSE;
- LLVMAddTargetData(gallivm->target, gallivm->passmgr);
+#if GALLIVM_HAVE_CORO
+ gallivm->cgpassmgr = LLVMCreatePassManager();
+#endif
+ /*
+ * TODO: some per module pass manager with IPO passes might be helpful -
+ * the generated texture functions may benefit from inlining if they are
+ * simple, or constant propagation into them, etc.
+ */
+
+ {
+ char *td_str;
+ // New ones from the Module.
+ td_str = LLVMCopyStringRepOfTargetData(gallivm->target);
+ LLVMSetDataLayout(gallivm->module, td_str);
+ free(td_str);
+ }
- if ((gallivm_debug & GALLIVM_DEBUG_NO_OPT) == 0) {
- /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
- * but there are more on SVN.
- * TODO: Add more passes.
+#if GALLIVM_HAVE_CORO
+#if LLVM_VERSION_MAJOR <= 8 && (defined(PIPE_ARCH_AARCH64) || defined (PIPE_ARCH_ARM) || defined(PIPE_ARCH_S390))
+ LLVMAddArgumentPromotionPass(gallivm->cgpassmgr);
+ LLVMAddFunctionAttrsPass(gallivm->cgpassmgr);
+#endif
+ LLVMAddCoroEarlyPass(gallivm->cgpassmgr);
+ LLVMAddCoroSplitPass(gallivm->cgpassmgr);
+ LLVMAddCoroElidePass(gallivm->cgpassmgr);
+#endif
+
+ if ((gallivm_perf & GALLIVM_PERF_NO_OPT) == 0) {
+ /*
+ * TODO: Evaluate passes some more - keeping in mind
+ * both quality of generated code and compile times.
+ */
+ /*
+ * NOTE: if you change this, don't forget to change the output
+ * with GALLIVM_DEBUG_DUMP_BC in gallivm_compile_module.
*/
LLVMAddScalarReplAggregatesPass(gallivm->passmgr);
- LLVMAddLICMPass(gallivm->passmgr);
+ LLVMAddEarlyCSEPass(gallivm->passmgr);
LLVMAddCFGSimplificationPass(gallivm->passmgr);
+ /*
+ * FIXME: LICM is potentially quite useful. However, for some
+ * rather crazy shaders the compile time can reach _hours_ per shader,
+ * due to licm implying lcssa (since llvm 3.5), which can take forever.
+ * Even for sane shaders, the cost of licm is rather high (and not just
+ * due to lcssa, licm itself too), though mostly only in cases when it
+ * can actually move things, so having to disable it is a pity.
+ * LLVMAddLICMPass(gallivm->passmgr);
+ */
LLVMAddReassociatePass(gallivm->passmgr);
-
- if (HAVE_LLVM >= 0x207 && sizeof(void*) == 4) {
- /* For LLVM >= 2.7 and 32-bit build, use this order of passes to
- * avoid generating bad code.
- * Test with piglit glsl-vs-sqrt-zero test.
- */
- LLVMAddConstantPropagationPass(gallivm->passmgr);
- LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
- }
- else {
- LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
- LLVMAddConstantPropagationPass(gallivm->passmgr);
- }
-
- if (util_cpu_caps.has_sse4_1) {
- /* FIXME: There is a bug in this pass, whereby the combination
- * of fptosi and sitofp (necessary for trunc/floor/ceil/round
- * implementation) somehow becomes invalid code.
- */
- LLVMAddInstructionCombiningPass(gallivm->passmgr);
- }
+ LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
+#if LLVM_VERSION_MAJOR <= 11
+ LLVMAddConstantPropagationPass(gallivm->passmgr);
+#endif
+ LLVMAddInstructionCombiningPass(gallivm->passmgr);
LLVMAddGVNPass(gallivm->passmgr);
}
else {
*/
LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
}
+#if GALLIVM_HAVE_CORO
+ LLVMAddCoroCleanupPass(gallivm->passmgr);
+#endif
return TRUE;
}
/**
- * Free gallivm object's LLVM allocations, but not the gallivm object itself.
+ * Free gallivm object's LLVM allocations, but not any generated code
+ * nor the gallivm object itself.
*/
-static void
-free_gallivm_state(struct gallivm_state *gallivm)
+void
+gallivm_free_ir(struct gallivm_state *gallivm)
{
-#if HAVE_LLVM >= 0x207 /* XXX or 0x208? */
- /* This leads to crashes w/ some versions of LLVM */
- LLVMModuleRef mod;
- char *error;
-
- if (gallivm->engine && gallivm->provider)
- LLVMRemoveModuleProvider(gallivm->engine, gallivm->provider,
- &mod, &error);
-#endif
-
if (gallivm->passmgr) {
LLVMDisposePassManager(gallivm->passmgr);
}
-#if 0
- /* XXX this seems to crash with all versions of LLVM */
- if (gallivm->provider)
- LLVMDisposeModuleProvider(gallivm->provider);
+#if GALLIVM_HAVE_CORO
+ if (gallivm->cgpassmgr) {
+ LLVMDisposePassManager(gallivm->cgpassmgr);
+ }
#endif
- if (HAVE_LLVM >= 0x207 && gallivm->engine) {
+ if (gallivm->engine) {
/* This will already destroy any associated module */
LLVMDisposeExecutionEngine(gallivm->engine);
- } else {
+ } else if (gallivm->module) {
LLVMDisposeModule(gallivm->module);
}
-#if !USE_MCJIT
- /* Don't free the TargetData, it's owned by the exec engine */
-#else
+ if (gallivm->cache) {
+ lp_free_objcache(gallivm->cache->jit_obj_cache);
+ free(gallivm->cache->data);
+ }
+ FREE(gallivm->module_name);
+
if (gallivm->target) {
LLVMDisposeTargetData(gallivm->target);
}
-#endif
-
- /* Never free the LLVM context.
- */
-#if 0
- if (gallivm->context)
- LLVMContextDispose(gallivm->context);
-#endif
if (gallivm->builder)
LLVMDisposeBuilder(gallivm->builder);
+ /* The LLVMContext should be owned by the parent of gallivm. */
+
gallivm->engine = NULL;
gallivm->target = NULL;
gallivm->module = NULL;
- gallivm->provider = NULL;
+ gallivm->module_name = NULL;
+ gallivm->cgpassmgr = NULL;
gallivm->passmgr = NULL;
gallivm->context = NULL;
gallivm->builder = NULL;
+ gallivm->cache = NULL;
+}
+
+
+/**
+ * Free LLVM-generated code. Should be done AFTER gallivm_free_ir().
+ */
+static void
+gallivm_free_code(struct gallivm_state *gallivm)
+{
+ assert(!gallivm->module);
+ assert(!gallivm->engine);
+ lp_free_generated_code(gallivm->code);
+ gallivm->code = NULL;
+ lp_free_memory_manager(gallivm->memorymgr);
+ gallivm->memorymgr = NULL;
}
init_gallivm_engine(struct gallivm_state *gallivm)
{
if (1) {
- /* We can only create one LLVMExecutionEngine (w/ LLVM 2.6 anyway) */
enum LLVM_CodeGenOpt_Level optlevel;
char *error = NULL;
int ret;
- if (gallivm_debug & GALLIVM_DEBUG_NO_OPT) {
+ if (gallivm_perf & GALLIVM_PERF_NO_OPT) {
optlevel = None;
}
else {
optlevel = Default;
}
-#if HAVE_LLVM >= 0x0301
ret = lp_build_create_jit_compiler_for_module(&gallivm->engine,
+ &gallivm->code,
+ gallivm->cache,
gallivm->module,
+ gallivm->memorymgr,
(unsigned) optlevel,
- USE_MCJIT,
&error);
-#else
- ret = LLVMCreateJITCompiler(&gallivm->engine, gallivm->provider,
- (unsigned) optlevel, &error);
-#endif
if (ret) {
_debug_printf("%s\n", error);
LLVMDisposeMessage(error);
}
}
- LLVMAddModuleProvider(gallivm->engine, gallivm->provider);//new
-
-#if !USE_MCJIT
- gallivm->target = LLVMGetExecutionEngineTargetData(gallivm->engine);
- if (!gallivm->target)
- goto fail;
-#else
if (0) {
/*
* Dump the data layout strings.
free(data_layout);
free(engine_data_layout);
}
-#endif
return TRUE;
}
-/**
- * Singleton
- *
- * We must never free LLVM contexts, because LLVM has several global caches
- * which pointing/derived from objects owned by the context, causing false
- * memory leaks and false cache hits when these objects are destroyed.
- *
- * TODO: For thread safety on multi-threaded OpenGL we should use one LLVM
- * context per thread, and put them in a pool when threads are destroyed.
- */
-static LLVMContextRef gallivm_context = NULL;
-
-
/**
* Allocate gallivm LLVM objects.
* \return TRUE for success, FALSE for failure
*/
static boolean
-init_gallivm_state(struct gallivm_state *gallivm)
+init_gallivm_state(struct gallivm_state *gallivm, const char *name,
+ LLVMContextRef context, struct lp_cached_code *cache)
{
assert(!gallivm->context);
assert(!gallivm->module);
- assert(!gallivm->provider);
- lp_build_init();
+ if (!lp_build_init())
+ return FALSE;
- if (!gallivm_context) {
- gallivm_context = LLVMContextCreate();
- }
- gallivm->context = gallivm_context;
+ gallivm->context = context;
+ gallivm->cache = cache;
if (!gallivm->context)
goto fail;
- gallivm->module = LLVMModuleCreateWithNameInContext("gallivm",
+ gallivm->module_name = NULL;
+ if (name) {
+ size_t size = strlen(name) + 1;
+ gallivm->module_name = MALLOC(size);
+ if (gallivm->module_name) {
+ memcpy(gallivm->module_name, name, size);
+ }
+ }
+
+ gallivm->module = LLVMModuleCreateWithNameInContext(name,
gallivm->context);
if (!gallivm->module)
goto fail;
- gallivm->provider =
- LLVMCreateModuleProviderForExistingModule(gallivm->module);
- if (!gallivm->provider)
- goto fail;
-
gallivm->builder = LLVMCreateBuilderInContext(gallivm->context);
if (!gallivm->builder)
goto fail;
+ gallivm->memorymgr = lp_get_default_memory_manager();
+ if (!gallivm->memorymgr)
+ goto fail;
+
/* FIXME: MC-JIT only allows compiling one module at a time, and it must be
* complete when MC-JIT is created. So defer the MC-JIT engine creation for
* now.
*/
-#if !USE_MCJIT
- if (!init_gallivm_engine(gallivm)) {
- goto fail;
- }
-#else
+
/*
* MC-JIT engine compiles the module immediately on creation, so we can't
* obtain the target data from it. Instead we create a target data layout
{
const unsigned pointer_size = 8 * sizeof(void *);
char layout[512];
- util_snprintf(layout, sizeof layout, "%c-p:%u:%u:%u-i64:64:64-a0:0:%u-s0:%u:%u",
-#ifdef PIPE_ARCH_LITTLE_ENDIAN
+ snprintf(layout, sizeof layout, "%c-p:%u:%u:%u-i64:64:64-a0:0:%u-s0:%u:%u",
+#if UTIL_ARCH_LITTLE_ENDIAN
'e', // little endian
#else
'E', // big endian
return FALSE;
}
}
-#endif
if (!create_pass_manager(gallivm))
goto fail;
return TRUE;
fail:
- free_gallivm_state(gallivm);
+ gallivm_free_ir(gallivm);
+ gallivm_free_code(gallivm);
return FALSE;
}
-void
+boolean
lp_build_init(void)
{
if (gallivm_initialized)
- return;
+ return TRUE;
+
+
+ /* LLVMLinkIn* are no-ops at runtime. They just ensure the respective
+ * component is linked at buildtime, which is sufficient for its static
+ * constructors to be called at load time.
+ */
+ LLVMLinkInMCJIT();
#ifdef DEBUG
gallivm_debug = debug_get_option_gallivm_debug();
#endif
- lp_set_target_options();
+ gallivm_perf = debug_get_flags_option("GALLIVM_PERF", lp_bld_perf_flags, 0 );
-#if USE_MCJIT
- LLVMLinkInMCJIT();
-#else
- LLVMLinkInJIT();
-#endif
+ lp_set_target_options();
util_cpu_detect();
- /* AMD Bulldozer AVX's throughput is the same as SSE2; and because using
- * 8-wide vector needs more floating ops than 4-wide (due to padding), it is
- * actually more efficient to use 4-wide vectors on this processor.
- *
- * See also:
- * - http://www.anandtech.com/show/4955/the-bulldozer-review-amd-fx8150-tested/2
- */
- if (HAVE_AVX &&
- util_cpu_caps.has_avx &&
- util_cpu_caps.has_intel) {
+ /* For simulating less capable machines */
+#ifdef DEBUG
+ if (debug_get_bool_option("LP_FORCE_SSE2", FALSE)) {
+ assert(util_cpu_caps.has_sse2);
+ util_cpu_caps.has_sse3 = 0;
+ util_cpu_caps.has_ssse3 = 0;
+ util_cpu_caps.has_sse4_1 = 0;
+ util_cpu_caps.has_sse4_2 = 0;
+ util_cpu_caps.has_avx = 0;
+ util_cpu_caps.has_avx2 = 0;
+ util_cpu_caps.has_f16c = 0;
+ util_cpu_caps.has_fma = 0;
+ }
+#endif
+
+ if (util_cpu_caps.has_avx2 || util_cpu_caps.has_avx) {
lp_native_vector_width = 256;
} else {
/* Leave it at 128, even when no SIMD extensions are available.
*/
lp_native_vector_width = 128;
}
-
+
lp_native_vector_width = debug_get_num_option("LP_NATIVE_VECTOR_WIDTH",
lp_native_vector_width);
+#if LLVM_VERSION_MAJOR < 4
if (lp_native_vector_width <= 128) {
- /* Hide AVX support, as often LLVM AVX instrinsics are only guarded by
+ /* Hide AVX support, as often LLVM AVX intrinsics are only guarded by
* "util_cpu_caps.has_avx" predicate, and lack the
* "lp_native_vector_width > 128" predicate. And also to ensure a more
* consistent behavior, allowing one to test SSE2 on AVX machines.
+ * XXX: should not play games with util_cpu_caps directly as it might
+ * get used for other things outside llvm too.
*/
util_cpu_caps.has_avx = 0;
- }
-
- if (!HAVE_AVX) {
- /*
- * note these instructions are VEX-only, so can only emit if we use
- * avx (don't want to base it on has_avx & has_f16c later as that would
- * omit it unnecessarily on amd cpus, see above).
- */
+ util_cpu_caps.has_avx2 = 0;
util_cpu_caps.has_f16c = 0;
+ util_cpu_caps.has_fma = 0;
}
+#endif
#ifdef PIPE_ARCH_PPC_64
/* Set the NJ bit in VSCR to 0 so denormalized values are handled as
- * specified by IEEE standard (PowerISA 2.06 - Section 6.3). This garantees
+ * specified by IEEE standard (PowerISA 2.06 - Section 6.3). This guarantees
* that some rounding and half-float to float handling does not round
* incorrectly to 0.
+ * XXX: should eventually follow same logic on all platforms.
+ * Right now denorms get explicitly disabled (but elsewhere) for x86,
+ * whereas ppc64 explicitly enables them...
*/
if (util_cpu_caps.has_altivec) {
unsigned short mask[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
gallivm_initialized = TRUE;
-#if 0
- /* For simulating less capable machines */
- util_cpu_caps.has_sse3 = 0;
- util_cpu_caps.has_ssse3 = 0;
- util_cpu_caps.has_sse4_1 = 0;
- util_cpu_caps.has_avx = 0;
- util_cpu_caps.has_f16c = 0;
-#endif
+ return TRUE;
}
/**
* Create a new gallivm_state object.
- * Note that we return a singleton.
*/
struct gallivm_state *
-gallivm_create(void)
+gallivm_create(const char *name, LLVMContextRef context,
+ struct lp_cached_code *cache)
{
struct gallivm_state *gallivm;
-#if HAVE_LLVM <= 0x206
- if (GlobalGallivm) {
- return GlobalGallivm;
- }
-#endif
-
gallivm = CALLOC_STRUCT(gallivm_state);
if (gallivm) {
- if (!init_gallivm_state(gallivm)) {
+ if (!init_gallivm_state(gallivm, name, context, cache)) {
FREE(gallivm);
gallivm = NULL;
}
}
-#if HAVE_LLVM <= 0x206
- GlobalGallivm = gallivm;
-#endif
-
+ assert(gallivm != NULL);
return gallivm;
}
void
gallivm_destroy(struct gallivm_state *gallivm)
{
-#if HAVE_LLVM <= 0x0206
- /* No-op: don't destroy the singleton */
- (void) gallivm;
-#else
- free_gallivm_state(gallivm);
+ gallivm_free_ir(gallivm);
+ gallivm_free_code(gallivm);
FREE(gallivm);
-#endif
-}
-
-
-/**
- * Validate and optimze a function.
- */
-static void
-gallivm_optimize_function(struct gallivm_state *gallivm,
- LLVMValueRef func)
-{
- if (0) {
- debug_printf("optimizing %s...\n", LLVMGetValueName(func));
- }
-
- assert(gallivm->passmgr);
-
- /* Apply optimizations to LLVM IR */
- LLVMRunFunctionPassManager(gallivm->passmgr, func);
-
- if (0) {
- if (gallivm_debug & GALLIVM_DEBUG_IR) {
- /* Print the LLVM IR to stderr */
- lp_debug_dump_value(func);
- debug_printf("\n");
- }
- }
}
/**
* Validate a function.
+ * Verification is only done with debug builds.
*/
void
gallivm_verify_function(struct gallivm_state *gallivm,
}
#endif
- gallivm_optimize_function(gallivm, func);
-
if (gallivm_debug & GALLIVM_DEBUG_IR) {
/* Print the LLVM IR to stderr */
lp_debug_dump_value(func);
}
+/**
+ * Compile a module.
+ * This does IR optimization on all functions in the module.
+ */
void
gallivm_compile_module(struct gallivm_state *gallivm)
{
-#if HAVE_LLVM > 0x206
+ LLVMValueRef func;
+ int64_t time_begin = 0;
+
assert(!gallivm->compiled);
+
+ if (gallivm->builder) {
+ LLVMDisposeBuilder(gallivm->builder);
+ gallivm->builder = NULL;
+ }
+
+ if (gallivm->cache && gallivm->cache->data_size) {
+ goto skip_cached;
+ }
+
+ /* Dump bitcode to a file */
+ if (gallivm_debug & GALLIVM_DEBUG_DUMP_BC) {
+ char filename[256];
+ assert(gallivm->module_name);
+ snprintf(filename, sizeof(filename), "ir_%s.bc", gallivm->module_name);
+ LLVMWriteBitcodeToFile(gallivm->module, filename);
+ debug_printf("%s written\n", filename);
+ debug_printf("Invoke as \"opt %s %s | llc -O%d %s%s\"\n",
+ gallivm_debug & GALLIVM_PERF_NO_OPT ? "-mem2reg" :
+ "-sroa -early-cse -simplifycfg -reassociate "
+ "-mem2reg -constprop -instcombine -gvn",
+ filename, gallivm_debug & GALLIVM_PERF_NO_OPT ? 0 : 2,
+ "[-mcpu=<-mcpu option>] ",
+ "[-mattr=<-mattr option(s)>]");
+ }
+
+ if (gallivm_debug & GALLIVM_DEBUG_PERF)
+ time_begin = os_time_get();
+
+#if GALLIVM_HAVE_CORO
+ LLVMRunPassManager(gallivm->cgpassmgr, gallivm->module);
#endif
+ /* Run optimization passes */
+ LLVMInitializeFunctionPassManager(gallivm->passmgr);
+ func = LLVMGetFirstFunction(gallivm->module);
+ while (func) {
+ if (0) {
+ debug_printf("optimizing func %s...\n", LLVMGetValueName(func));
+ }
- /* Dump byte code to a file */
- if (0) {
- LLVMWriteBitcodeToFile(gallivm->module, "llvmpipe.bc");
- debug_printf("llvmpipe.bc written\n");
- debug_printf("Invoke as \"llc -o - llvmpipe.bc\"\n");
+ /* Disable frame pointer omission on debug/profile builds */
+ /* XXX: And workaround http://llvm.org/PR21435 */
+#if defined(DEBUG) || defined(PROFILE) || defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ LLVMAddTargetDependentFunctionAttr(func, "no-frame-pointer-elim", "true");
+ LLVMAddTargetDependentFunctionAttr(func, "no-frame-pointer-elim-non-leaf", "true");
+#endif
+
+ LLVMRunFunctionPassManager(gallivm->passmgr, func);
+ func = LLVMGetNextFunction(func);
+ }
+ LLVMFinalizeFunctionPassManager(gallivm->passmgr);
+
+ if (gallivm_debug & GALLIVM_DEBUG_PERF) {
+ int64_t time_end = os_time_get();
+ int time_msec = (int)((time_end - time_begin) / 1000);
+ assert(gallivm->module_name);
+ debug_printf("optimizing module %s took %d msec\n",
+ gallivm->module_name, time_msec);
}
-#if USE_MCJIT
+ /* Setting the module's DataLayout to an empty string will cause the
+ * ExecutionEngine to copy to the DataLayout string from its target machine
+ * to the module. As of LLVM 3.8 the module and the execution engine are
+ * required to have the same DataLayout.
+ *
+ * We must make sure we do this after running the optimization passes,
+ * because those passes need a correct datalayout string. For example, if
+ * those optimization passes see an empty datalayout, they will assume this
+ * is a little endian target and will do optimizations that break big endian
+ * machines.
+ *
+ * TODO: This is just a temporary work-around. The correct solution is for
+ * gallivm_init_state() to create a TargetMachine and pull the DataLayout
+ * from there. Currently, the TargetMachine used by llvmpipe is being
+ * implicitly created by the EngineBuilder in
+ * lp_build_create_jit_compiler_for_module()
+ */
+ skip_cached:
+ LLVMSetDataLayout(gallivm->module, "");
assert(!gallivm->engine);
if (!init_gallivm_engine(gallivm)) {
assert(0);
}
-#endif
assert(gallivm->engine);
++gallivm->compiled;
+
+ if (gallivm->debug_printf_hook)
+ LLVMAddGlobalMapping(gallivm->engine, gallivm->debug_printf_hook, debug_printf);
+
+ if (gallivm_debug & GALLIVM_DEBUG_ASM) {
+ LLVMValueRef llvm_func = LLVMGetFirstFunction(gallivm->module);
+
+ while (llvm_func) {
+ /*
+ * Need to filter out functions which don't have an implementation,
+ * such as the intrinsics. May not be sufficient in case of IPO?
+ * LLVMGetPointerToGlobal() will abort otherwise.
+ */
+ if (!LLVMIsDeclaration(llvm_func)) {
+ void *func_code = LLVMGetPointerToGlobal(gallivm->engine, llvm_func);
+ lp_disassemble(llvm_func, func_code);
+ }
+ llvm_func = LLVMGetNextFunction(llvm_func);
+ }
+ }
+
+#if defined(PROFILE)
+ {
+ LLVMValueRef llvm_func = LLVMGetFirstFunction(gallivm->module);
+
+ while (llvm_func) {
+ if (!LLVMIsDeclaration(llvm_func)) {
+ void *func_code = LLVMGetPointerToGlobal(gallivm->engine, llvm_func);
+ lp_profile(llvm_func, func_code);
+ }
+ llvm_func = LLVMGetNextFunction(llvm_func);
+ }
+ }
+#endif
}
{
void *code;
func_pointer jit_func;
+ int64_t time_begin = 0;
assert(gallivm->compiled);
assert(gallivm->engine);
+ if (gallivm_debug & GALLIVM_DEBUG_PERF)
+ time_begin = os_time_get();
+
code = LLVMGetPointerToGlobal(gallivm->engine, func);
assert(code);
jit_func = pointer_to_func(code);
- if (gallivm_debug & GALLIVM_DEBUG_ASM) {
- lp_disassemble(func, code);
+ if (gallivm_debug & GALLIVM_DEBUG_PERF) {
+ int64_t time_end = os_time_get();
+ int time_msec = (int)(time_end - time_begin) / 1000;
+ debug_printf(" jitting func %s took %d msec\n",
+ LLVMGetValueName(func), time_msec);
}
-#if defined(PROFILE)
- lp_profile(func, code);
-#endif
-
- /* Free the function body to save memory */
- lp_func_delete_body(func);
-
return jit_func;
}
-
-
-/**
- * Free the function (and its machine code).
- */
-void
-gallivm_free_function(struct gallivm_state *gallivm,
- LLVMValueRef func,
- const void *code)
-{
-#if !USE_MCJIT
- if (code) {
- LLVMFreeMachineCodeForFunction(gallivm->engine, func);
- }
-
- LLVMDeleteFunction(func);
-#endif
-}