gallivm: remove workaround for reversing optimization pass order.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_init.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "pipe/p_config.h"
30 #include "pipe/p_compiler.h"
31 #include "util/u_cpu_detect.h"
32 #include "util/u_debug.h"
33 #include "util/u_memory.h"
34 #include "util/u_simple_list.h"
35 #include "lp_bld.h"
36 #include "lp_bld_debug.h"
37 #include "lp_bld_misc.h"
38 #include "lp_bld_init.h"
39
40 #include <llvm-c/Analysis.h>
41 #include <llvm-c/Transforms/Scalar.h>
42 #include <llvm-c/BitWriter.h>
43
44
45 /**
46 * AVX is supported in:
47 * - standard JIT from LLVM 3.2 onwards
48 * - MC-JIT from LLVM 3.1
49 * - MC-JIT supports limited OSes (MacOSX and Linux)
50 * - standard JIT in LLVM 3.1, with backports
51 */
52 #if defined(PIPE_ARCH_PPC_64) || defined(PIPE_ARCH_S390) || defined(PIPE_ARCH_ARM) || defined(PIPE_ARCH_AARCH64)
53 # define USE_MCJIT 1
54 # define HAVE_AVX 0
55 #elif HAVE_LLVM >= 0x0302 || (HAVE_LLVM == 0x0301 && defined(HAVE_JIT_AVX_SUPPORT))
56 # define USE_MCJIT 0
57 # define HAVE_AVX 1
58 #elif HAVE_LLVM == 0x0301 && (defined(PIPE_OS_LINUX) || defined(PIPE_OS_APPLE))
59 # define USE_MCJIT 1
60 # define HAVE_AVX 1
61 #else
62 # define USE_MCJIT 0
63 # define HAVE_AVX 0
64 #endif
65
66
67 #if USE_MCJIT
68 void LLVMLinkInMCJIT();
69 #endif
70
71 /*
72 * LLVM has several global caches which pointing/derived from objects
73 * owned by the context, so if we freeing contexts causes
74 * memory leaks and false cache hits when these objects are destroyed.
75 *
76 * TODO: For thread safety on multi-threaded OpenGL we should use one LLVM
77 * context per thread, and put them in a pool when threads are destroyed.
78 */
79 #define USE_GLOBAL_CONTEXT 1
80
81
82 #ifdef DEBUG
83 unsigned gallivm_debug = 0;
84
85 static const struct debug_named_value lp_bld_debug_flags[] = {
86 { "tgsi", GALLIVM_DEBUG_TGSI, NULL },
87 { "ir", GALLIVM_DEBUG_IR, NULL },
88 { "asm", GALLIVM_DEBUG_ASM, NULL },
89 { "nopt", GALLIVM_DEBUG_NO_OPT, NULL },
90 { "perf", GALLIVM_DEBUG_PERF, NULL },
91 { "no_brilinear", GALLIVM_DEBUG_NO_BRILINEAR, NULL },
92 { "no_rho_approx", GALLIVM_DEBUG_NO_RHO_APPROX, NULL },
93 { "no_quad_lod", GALLIVM_DEBUG_NO_QUAD_LOD, NULL },
94 { "gc", GALLIVM_DEBUG_GC, NULL },
95 DEBUG_NAMED_VALUE_END
96 };
97
98 DEBUG_GET_ONCE_FLAGS_OPTION(gallivm_debug, "GALLIVM_DEBUG", lp_bld_debug_flags, 0)
99 #endif
100
101
102 static boolean gallivm_initialized = FALSE;
103
104 unsigned lp_native_vector_width;
105
106
107 /*
108 * Optimization values are:
109 * - 0: None (-O0)
110 * - 1: Less (-O1)
111 * - 2: Default (-O2, -Os)
112 * - 3: Aggressive (-O3)
113 *
114 * See also CodeGenOpt::Level in llvm/Target/TargetMachine.h
115 */
116 enum LLVM_CodeGenOpt_Level {
117 None, // -O0
118 Less, // -O1
119 Default, // -O2, -Os
120 Aggressive // -O3
121 };
122
123
124 /**
125 * Create the LLVM (optimization) pass manager and install
126 * relevant optimization passes.
127 * \return TRUE for success, FALSE for failure
128 */
129 static boolean
130 create_pass_manager(struct gallivm_state *gallivm)
131 {
132 assert(!gallivm->passmgr);
133 assert(gallivm->target);
134
135 gallivm->passmgr = LLVMCreateFunctionPassManagerForModule(gallivm->module);
136 if (!gallivm->passmgr)
137 return FALSE;
138
139 LLVMAddTargetData(gallivm->target, gallivm->passmgr);
140
141 if ((gallivm_debug & GALLIVM_DEBUG_NO_OPT) == 0) {
142 /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
143 * but there are more on SVN.
144 * TODO: Add more passes.
145 */
146 LLVMAddScalarReplAggregatesPass(gallivm->passmgr);
147 LLVMAddLICMPass(gallivm->passmgr);
148 LLVMAddCFGSimplificationPass(gallivm->passmgr);
149 LLVMAddReassociatePass(gallivm->passmgr);
150 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
151 LLVMAddConstantPropagationPass(gallivm->passmgr);
152
153 if (util_cpu_caps.has_sse4_1) {
154 /* FIXME: There is a bug in this pass, whereby the combination
155 * of fptosi and sitofp (necessary for trunc/floor/ceil/round
156 * implementation) somehow becomes invalid code.
157 */
158 LLVMAddInstructionCombiningPass(gallivm->passmgr);
159 }
160 LLVMAddGVNPass(gallivm->passmgr);
161 }
162 else {
163 /* We need at least this pass to prevent the backends to fail in
164 * unexpected ways.
165 */
166 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
167 }
168
169 return TRUE;
170 }
171
172
173 /**
174 * Free gallivm object's LLVM allocations, but not any generated code
175 * nor the gallivm object itself.
176 */
177 void
178 gallivm_free_ir(struct gallivm_state *gallivm)
179 {
180 if (gallivm->passmgr) {
181 LLVMDisposePassManager(gallivm->passmgr);
182 }
183
184 if (gallivm->engine) {
185 /* This will already destroy any associated module */
186 LLVMDisposeExecutionEngine(gallivm->engine);
187 } else if (gallivm->module) {
188 LLVMDisposeModule(gallivm->module);
189 }
190
191 #if !USE_MCJIT
192 /* Don't free the TargetData, it's owned by the exec engine */
193 #else
194 if (gallivm->target) {
195 LLVMDisposeTargetData(gallivm->target);
196 }
197 #endif
198
199 if (gallivm->builder)
200 LLVMDisposeBuilder(gallivm->builder);
201
202 if (!USE_GLOBAL_CONTEXT && gallivm->context)
203 LLVMContextDispose(gallivm->context);
204
205 gallivm->engine = NULL;
206 gallivm->target = NULL;
207 gallivm->module = NULL;
208 gallivm->passmgr = NULL;
209 gallivm->context = NULL;
210 gallivm->builder = NULL;
211 }
212
213
214 /**
215 * Free LLVM-generated code. Should be done AFTER gallivm_free_ir().
216 */
217 static void
218 gallivm_free_code(struct gallivm_state *gallivm)
219 {
220 assert(!gallivm->module);
221 assert(!gallivm->engine);
222 lp_free_generated_code(gallivm->code);
223 gallivm->code = NULL;
224 }
225
226
227 static boolean
228 init_gallivm_engine(struct gallivm_state *gallivm)
229 {
230 if (1) {
231 enum LLVM_CodeGenOpt_Level optlevel;
232 char *error = NULL;
233 int ret;
234
235 if (gallivm_debug & GALLIVM_DEBUG_NO_OPT) {
236 optlevel = None;
237 }
238 else {
239 optlevel = Default;
240 }
241
242 ret = lp_build_create_jit_compiler_for_module(&gallivm->engine,
243 &gallivm->code,
244 gallivm->module,
245 (unsigned) optlevel,
246 USE_MCJIT,
247 &error);
248 if (ret) {
249 _debug_printf("%s\n", error);
250 LLVMDisposeMessage(error);
251 goto fail;
252 }
253 }
254
255 #if !USE_MCJIT
256 gallivm->target = LLVMGetExecutionEngineTargetData(gallivm->engine);
257 if (!gallivm->target)
258 goto fail;
259 #else
260 if (0) {
261 /*
262 * Dump the data layout strings.
263 */
264
265 LLVMTargetDataRef target = LLVMGetExecutionEngineTargetData(gallivm->engine);
266 char *data_layout;
267 char *engine_data_layout;
268
269 data_layout = LLVMCopyStringRepOfTargetData(gallivm->target);
270 engine_data_layout = LLVMCopyStringRepOfTargetData(target);
271
272 if (1) {
273 debug_printf("module target data = %s\n", data_layout);
274 debug_printf("engine target data = %s\n", engine_data_layout);
275 }
276
277 free(data_layout);
278 free(engine_data_layout);
279 }
280 #endif
281
282 return TRUE;
283
284 fail:
285 return FALSE;
286 }
287
288
289 /**
290 * Allocate gallivm LLVM objects.
291 * \return TRUE for success, FALSE for failure
292 */
293 static boolean
294 init_gallivm_state(struct gallivm_state *gallivm)
295 {
296 assert(!gallivm->context);
297 assert(!gallivm->module);
298
299 lp_build_init();
300
301 if (USE_GLOBAL_CONTEXT) {
302 gallivm->context = LLVMGetGlobalContext();
303 } else {
304 gallivm->context = LLVMContextCreate();
305 }
306 if (!gallivm->context)
307 goto fail;
308
309 gallivm->module = LLVMModuleCreateWithNameInContext("gallivm",
310 gallivm->context);
311 if (!gallivm->module)
312 goto fail;
313
314 gallivm->builder = LLVMCreateBuilderInContext(gallivm->context);
315 if (!gallivm->builder)
316 goto fail;
317
318 /* FIXME: MC-JIT only allows compiling one module at a time, and it must be
319 * complete when MC-JIT is created. So defer the MC-JIT engine creation for
320 * now.
321 */
322 #if !USE_MCJIT
323 if (!init_gallivm_engine(gallivm)) {
324 goto fail;
325 }
326 #else
327 /*
328 * MC-JIT engine compiles the module immediately on creation, so we can't
329 * obtain the target data from it. Instead we create a target data layout
330 * from a string.
331 *
332 * The produced layout strings are not precisely the same, but should make
333 * no difference for the kind of optimization passes we run.
334 *
335 * For reference this is the layout string on x64:
336 *
337 * e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64
338 *
339 * See also:
340 * - http://llvm.org/docs/LangRef.html#datalayout
341 */
342
343 {
344 const unsigned pointer_size = 8 * sizeof(void *);
345 char layout[512];
346 util_snprintf(layout, sizeof layout, "%c-p:%u:%u:%u-i64:64:64-a0:0:%u-s0:%u:%u",
347 #ifdef PIPE_ARCH_LITTLE_ENDIAN
348 'e', // little endian
349 #else
350 'E', // big endian
351 #endif
352 pointer_size, pointer_size, pointer_size, // pointer size, abi alignment, preferred alignment
353 pointer_size, // aggregate preferred alignment
354 pointer_size, pointer_size); // stack objects abi alignment, preferred alignment
355
356 gallivm->target = LLVMCreateTargetData(layout);
357 if (!gallivm->target) {
358 return FALSE;
359 }
360 }
361 #endif
362
363 if (!create_pass_manager(gallivm))
364 goto fail;
365
366 return TRUE;
367
368 fail:
369 gallivm_free_ir(gallivm);
370 gallivm_free_code(gallivm);
371 return FALSE;
372 }
373
374
375 void
376 lp_build_init(void)
377 {
378 if (gallivm_initialized)
379 return;
380
381 #ifdef DEBUG
382 gallivm_debug = debug_get_option_gallivm_debug();
383 #endif
384
385 lp_set_target_options();
386
387 #if USE_MCJIT
388 LLVMLinkInMCJIT();
389 #else
390 LLVMLinkInJIT();
391 #endif
392
393 util_cpu_detect();
394
395 /* AMD Bulldozer AVX's throughput is the same as SSE2; and because using
396 * 8-wide vector needs more floating ops than 4-wide (due to padding), it is
397 * actually more efficient to use 4-wide vectors on this processor.
398 *
399 * See also:
400 * - http://www.anandtech.com/show/4955/the-bulldozer-review-amd-fx8150-tested/2
401 */
402 if (HAVE_AVX &&
403 util_cpu_caps.has_avx &&
404 util_cpu_caps.has_intel) {
405 lp_native_vector_width = 256;
406 } else {
407 /* Leave it at 128, even when no SIMD extensions are available.
408 * Really needs to be a multiple of 128 so can fit 4 floats.
409 */
410 lp_native_vector_width = 128;
411 }
412
413 lp_native_vector_width = debug_get_num_option("LP_NATIVE_VECTOR_WIDTH",
414 lp_native_vector_width);
415
416 if (lp_native_vector_width <= 128) {
417 /* Hide AVX support, as often LLVM AVX intrinsics are only guarded by
418 * "util_cpu_caps.has_avx" predicate, and lack the
419 * "lp_native_vector_width > 128" predicate. And also to ensure a more
420 * consistent behavior, allowing one to test SSE2 on AVX machines.
421 * XXX: should not play games with util_cpu_caps directly as it might
422 * get used for other things outside llvm too.
423 */
424 util_cpu_caps.has_avx = 0;
425 util_cpu_caps.has_avx2 = 0;
426 }
427
428 if (!HAVE_AVX) {
429 /*
430 * note these instructions are VEX-only, so can only emit if we use
431 * avx (don't want to base it on has_avx & has_f16c later as that would
432 * omit it unnecessarily on amd cpus, see above).
433 */
434 util_cpu_caps.has_f16c = 0;
435 util_cpu_caps.has_xop = 0;
436 }
437
438 #ifdef PIPE_ARCH_PPC_64
439 /* Set the NJ bit in VSCR to 0 so denormalized values are handled as
440 * specified by IEEE standard (PowerISA 2.06 - Section 6.3). This guarantees
441 * that some rounding and half-float to float handling does not round
442 * incorrectly to 0.
443 * XXX: should eventually follow same logic on all platforms.
444 * Right now denorms get explicitly disabled (but elsewhere) for x86,
445 * whereas ppc64 explicitly enables them...
446 */
447 if (util_cpu_caps.has_altivec) {
448 unsigned short mask[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
449 0xFFFF, 0xFFFF, 0xFFFE, 0xFFFF };
450 __asm (
451 "mfvscr %%v1\n"
452 "vand %0,%%v1,%0\n"
453 "mtvscr %0"
454 :
455 : "r" (*mask)
456 );
457 }
458 #endif
459
460 gallivm_initialized = TRUE;
461
462 #if 0
463 /* For simulating less capable machines */
464 util_cpu_caps.has_sse3 = 0;
465 util_cpu_caps.has_ssse3 = 0;
466 util_cpu_caps.has_sse4_1 = 0;
467 util_cpu_caps.has_avx = 0;
468 util_cpu_caps.has_f16c = 0;
469 #endif
470 }
471
472
473
474 /**
475 * Create a new gallivm_state object.
476 * Note that we return a singleton.
477 */
478 struct gallivm_state *
479 gallivm_create(void)
480 {
481 struct gallivm_state *gallivm;
482
483 gallivm = CALLOC_STRUCT(gallivm_state);
484 if (gallivm) {
485 if (!init_gallivm_state(gallivm)) {
486 FREE(gallivm);
487 gallivm = NULL;
488 }
489 }
490
491 return gallivm;
492 }
493
494
495 /**
496 * Destroy a gallivm_state object.
497 */
498 void
499 gallivm_destroy(struct gallivm_state *gallivm)
500 {
501 gallivm_free_ir(gallivm);
502 gallivm_free_code(gallivm);
503 FREE(gallivm);
504 }
505
506
507 /**
508 * Validate a function.
509 */
510 void
511 gallivm_verify_function(struct gallivm_state *gallivm,
512 LLVMValueRef func)
513 {
514 /* Verify the LLVM IR. If invalid, dump and abort */
515 #ifdef DEBUG
516 if (LLVMVerifyFunction(func, LLVMPrintMessageAction)) {
517 lp_debug_dump_value(func);
518 assert(0);
519 return;
520 }
521 #endif
522
523 if (gallivm_debug & GALLIVM_DEBUG_IR) {
524 /* Print the LLVM IR to stderr */
525 lp_debug_dump_value(func);
526 debug_printf("\n");
527 }
528 }
529
530
531 void
532 gallivm_compile_module(struct gallivm_state *gallivm)
533 {
534 LLVMValueRef func;
535
536 assert(!gallivm->compiled);
537
538 if (gallivm->builder) {
539 LLVMDisposeBuilder(gallivm->builder);
540 gallivm->builder = NULL;
541 }
542
543 /* Run optimization passes */
544 LLVMInitializeFunctionPassManager(gallivm->passmgr);
545 func = LLVMGetFirstFunction(gallivm->module);
546 while (func) {
547 if (0) {
548 debug_printf("optimizing %s...\n", LLVMGetValueName(func));
549 }
550 LLVMRunFunctionPassManager(gallivm->passmgr, func);
551 func = LLVMGetNextFunction(func);
552 }
553 LLVMFinalizeFunctionPassManager(gallivm->passmgr);
554
555 /* Dump byte code to a file */
556 if (0) {
557 LLVMWriteBitcodeToFile(gallivm->module, "llvmpipe.bc");
558 debug_printf("llvmpipe.bc written\n");
559 debug_printf("Invoke as \"llc -o - llvmpipe.bc\"\n");
560 }
561
562 #if USE_MCJIT
563 assert(!gallivm->engine);
564 if (!init_gallivm_engine(gallivm)) {
565 assert(0);
566 }
567 #endif
568 assert(gallivm->engine);
569
570 ++gallivm->compiled;
571 }
572
573
574
575 func_pointer
576 gallivm_jit_function(struct gallivm_state *gallivm,
577 LLVMValueRef func)
578 {
579 void *code;
580 func_pointer jit_func;
581
582 assert(gallivm->compiled);
583 assert(gallivm->engine);
584
585 code = LLVMGetPointerToGlobal(gallivm->engine, func);
586 assert(code);
587 jit_func = pointer_to_func(code);
588
589 if (gallivm_debug & GALLIVM_DEBUG_ASM) {
590 lp_disassemble(func, code);
591 }
592
593 #if defined(PROFILE)
594 lp_profile(func, code);
595 #endif
596
597 return jit_func;
598 }