gallivm: Stop using module providers.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_init.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "pipe/p_config.h"
30 #include "pipe/p_compiler.h"
31 #include "util/u_cpu_detect.h"
32 #include "util/u_debug.h"
33 #include "util/u_memory.h"
34 #include "util/u_simple_list.h"
35 #include "lp_bld.h"
36 #include "lp_bld_debug.h"
37 #include "lp_bld_misc.h"
38 #include "lp_bld_init.h"
39
40 #include <llvm-c/Analysis.h>
41 #include <llvm-c/Transforms/Scalar.h>
42 #include <llvm-c/BitWriter.h>
43
44
45 /**
46 * AVX is supported in:
47 * - standard JIT from LLVM 3.2 onwards
48 * - MC-JIT from LLVM 3.1
49 * - MC-JIT supports limited OSes (MacOSX and Linux)
50 * - standard JIT in LLVM 3.1, with backports
51 */
52 #if defined(PIPE_ARCH_PPC_64) || defined(PIPE_ARCH_S390) || defined(PIPE_ARCH_ARM) || defined(PIPE_ARCH_AARCH64)
53 # define USE_MCJIT 1
54 # define HAVE_AVX 0
55 #elif HAVE_LLVM >= 0x0302 || (HAVE_LLVM == 0x0301 && defined(HAVE_JIT_AVX_SUPPORT))
56 # define USE_MCJIT 0
57 # define HAVE_AVX 1
58 #elif HAVE_LLVM == 0x0301 && (defined(PIPE_OS_LINUX) || defined(PIPE_OS_APPLE))
59 # define USE_MCJIT 1
60 # define HAVE_AVX 1
61 #else
62 # define USE_MCJIT 0
63 # define HAVE_AVX 0
64 #endif
65
66
67 #if USE_MCJIT
68 void LLVMLinkInMCJIT();
69 #endif
70
71
72 #ifdef DEBUG
73 unsigned gallivm_debug = 0;
74
75 static const struct debug_named_value lp_bld_debug_flags[] = {
76 { "tgsi", GALLIVM_DEBUG_TGSI, NULL },
77 { "ir", GALLIVM_DEBUG_IR, NULL },
78 { "asm", GALLIVM_DEBUG_ASM, NULL },
79 { "nopt", GALLIVM_DEBUG_NO_OPT, NULL },
80 { "perf", GALLIVM_DEBUG_PERF, NULL },
81 { "no_brilinear", GALLIVM_DEBUG_NO_BRILINEAR, NULL },
82 { "no_rho_approx", GALLIVM_DEBUG_NO_RHO_APPROX, NULL },
83 { "no_quad_lod", GALLIVM_DEBUG_NO_QUAD_LOD, NULL },
84 { "gc", GALLIVM_DEBUG_GC, NULL },
85 DEBUG_NAMED_VALUE_END
86 };
87
88 DEBUG_GET_ONCE_FLAGS_OPTION(gallivm_debug, "GALLIVM_DEBUG", lp_bld_debug_flags, 0)
89 #endif
90
91
92 static boolean gallivm_initialized = FALSE;
93
94 unsigned lp_native_vector_width;
95
96
97 /*
98 * Optimization values are:
99 * - 0: None (-O0)
100 * - 1: Less (-O1)
101 * - 2: Default (-O2, -Os)
102 * - 3: Aggressive (-O3)
103 *
104 * See also CodeGenOpt::Level in llvm/Target/TargetMachine.h
105 */
106 enum LLVM_CodeGenOpt_Level {
107 None, // -O0
108 Less, // -O1
109 Default, // -O2, -Os
110 Aggressive // -O3
111 };
112
113
114 /**
115 * Create the LLVM (optimization) pass manager and install
116 * relevant optimization passes.
117 * \return TRUE for success, FALSE for failure
118 */
119 static boolean
120 create_pass_manager(struct gallivm_state *gallivm)
121 {
122 assert(!gallivm->passmgr);
123 assert(gallivm->target);
124
125 gallivm->passmgr = LLVMCreateFunctionPassManagerForModule(gallivm->module);
126 if (!gallivm->passmgr)
127 return FALSE;
128
129 LLVMAddTargetData(gallivm->target, gallivm->passmgr);
130
131 if ((gallivm_debug & GALLIVM_DEBUG_NO_OPT) == 0) {
132 /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
133 * but there are more on SVN.
134 * TODO: Add more passes.
135 */
136 LLVMAddScalarReplAggregatesPass(gallivm->passmgr);
137 LLVMAddLICMPass(gallivm->passmgr);
138 LLVMAddCFGSimplificationPass(gallivm->passmgr);
139 LLVMAddReassociatePass(gallivm->passmgr);
140
141 if (sizeof(void*) == 4) {
142 /* XXX: For LLVM >= 2.7 and 32-bit build, use this order of passes to
143 * avoid generating bad code.
144 * Test with piglit glsl-vs-sqrt-zero test.
145 */
146 LLVMAddConstantPropagationPass(gallivm->passmgr);
147 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
148 }
149 else {
150 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
151 LLVMAddConstantPropagationPass(gallivm->passmgr);
152 }
153
154 if (util_cpu_caps.has_sse4_1) {
155 /* FIXME: There is a bug in this pass, whereby the combination
156 * of fptosi and sitofp (necessary for trunc/floor/ceil/round
157 * implementation) somehow becomes invalid code.
158 */
159 LLVMAddInstructionCombiningPass(gallivm->passmgr);
160 }
161 LLVMAddGVNPass(gallivm->passmgr);
162 }
163 else {
164 /* We need at least this pass to prevent the backends to fail in
165 * unexpected ways.
166 */
167 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
168 }
169
170 return TRUE;
171 }
172
173
174 /**
175 * Free gallivm object's LLVM allocations, but not the gallivm object itself.
176 */
177 static void
178 free_gallivm_state(struct gallivm_state *gallivm)
179 {
180 if (gallivm->passmgr) {
181 LLVMDisposePassManager(gallivm->passmgr);
182 }
183
184 if (gallivm->engine) {
185 /* This will already destroy any associated module */
186 LLVMDisposeExecutionEngine(gallivm->engine);
187 } else if (gallivm->module) {
188 LLVMDisposeModule(gallivm->module);
189 }
190
191 #if !USE_MCJIT
192 /* Don't free the TargetData, it's owned by the exec engine */
193 #else
194 if (gallivm->target) {
195 LLVMDisposeTargetData(gallivm->target);
196 }
197 #endif
198
199 /* Never free the LLVM context.
200 */
201 #if 0
202 if (gallivm->context)
203 LLVMContextDispose(gallivm->context);
204 #endif
205
206 if (gallivm->builder)
207 LLVMDisposeBuilder(gallivm->builder);
208
209 gallivm->engine = NULL;
210 gallivm->target = NULL;
211 gallivm->module = NULL;
212 gallivm->passmgr = NULL;
213 gallivm->context = NULL;
214 gallivm->builder = NULL;
215 }
216
217
218 static boolean
219 init_gallivm_engine(struct gallivm_state *gallivm)
220 {
221 if (1) {
222 enum LLVM_CodeGenOpt_Level optlevel;
223 char *error = NULL;
224 int ret;
225
226 if (gallivm_debug & GALLIVM_DEBUG_NO_OPT) {
227 optlevel = None;
228 }
229 else {
230 optlevel = Default;
231 }
232
233 ret = lp_build_create_jit_compiler_for_module(&gallivm->engine,
234 gallivm->module,
235 (unsigned) optlevel,
236 USE_MCJIT,
237 &error);
238 if (ret) {
239 _debug_printf("%s\n", error);
240 LLVMDisposeMessage(error);
241 goto fail;
242 }
243 }
244
245 #if !USE_MCJIT
246 gallivm->target = LLVMGetExecutionEngineTargetData(gallivm->engine);
247 if (!gallivm->target)
248 goto fail;
249 #else
250 if (0) {
251 /*
252 * Dump the data layout strings.
253 */
254
255 LLVMTargetDataRef target = LLVMGetExecutionEngineTargetData(gallivm->engine);
256 char *data_layout;
257 char *engine_data_layout;
258
259 data_layout = LLVMCopyStringRepOfTargetData(gallivm->target);
260 engine_data_layout = LLVMCopyStringRepOfTargetData(target);
261
262 if (1) {
263 debug_printf("module target data = %s\n", data_layout);
264 debug_printf("engine target data = %s\n", engine_data_layout);
265 }
266
267 free(data_layout);
268 free(engine_data_layout);
269 }
270 #endif
271
272 return TRUE;
273
274 fail:
275 return FALSE;
276 }
277
278
279 /**
280 * Singleton
281 *
282 * We must never free LLVM contexts, because LLVM has several global caches
283 * which pointing/derived from objects owned by the context, causing false
284 * memory leaks and false cache hits when these objects are destroyed.
285 *
286 * TODO: For thread safety on multi-threaded OpenGL we should use one LLVM
287 * context per thread, and put them in a pool when threads are destroyed.
288 */
289 static LLVMContextRef gallivm_context = NULL;
290
291
292 /**
293 * Allocate gallivm LLVM objects.
294 * \return TRUE for success, FALSE for failure
295 */
296 static boolean
297 init_gallivm_state(struct gallivm_state *gallivm)
298 {
299 assert(!gallivm->context);
300 assert(!gallivm->module);
301
302 lp_build_init();
303
304 if (!gallivm_context) {
305 gallivm_context = LLVMContextCreate();
306 }
307 gallivm->context = gallivm_context;
308 if (!gallivm->context)
309 goto fail;
310
311 gallivm->module = LLVMModuleCreateWithNameInContext("gallivm",
312 gallivm->context);
313 if (!gallivm->module)
314 goto fail;
315
316 gallivm->builder = LLVMCreateBuilderInContext(gallivm->context);
317 if (!gallivm->builder)
318 goto fail;
319
320 /* FIXME: MC-JIT only allows compiling one module at a time, and it must be
321 * complete when MC-JIT is created. So defer the MC-JIT engine creation for
322 * now.
323 */
324 #if !USE_MCJIT
325 if (!init_gallivm_engine(gallivm)) {
326 goto fail;
327 }
328 #else
329 /*
330 * MC-JIT engine compiles the module immediately on creation, so we can't
331 * obtain the target data from it. Instead we create a target data layout
332 * from a string.
333 *
334 * The produced layout strings are not precisely the same, but should make
335 * no difference for the kind of optimization passes we run.
336 *
337 * For reference this is the layout string on x64:
338 *
339 * e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64
340 *
341 * See also:
342 * - http://llvm.org/docs/LangRef.html#datalayout
343 */
344
345 {
346 const unsigned pointer_size = 8 * sizeof(void *);
347 char layout[512];
348 util_snprintf(layout, sizeof layout, "%c-p:%u:%u:%u-i64:64:64-a0:0:%u-s0:%u:%u",
349 #ifdef PIPE_ARCH_LITTLE_ENDIAN
350 'e', // little endian
351 #else
352 'E', // big endian
353 #endif
354 pointer_size, pointer_size, pointer_size, // pointer size, abi alignment, preferred alignment
355 pointer_size, // aggregate preferred alignment
356 pointer_size, pointer_size); // stack objects abi alignment, preferred alignment
357
358 gallivm->target = LLVMCreateTargetData(layout);
359 if (!gallivm->target) {
360 return FALSE;
361 }
362 }
363 #endif
364
365 if (!create_pass_manager(gallivm))
366 goto fail;
367
368 return TRUE;
369
370 fail:
371 free_gallivm_state(gallivm);
372 return FALSE;
373 }
374
375
376 void
377 lp_build_init(void)
378 {
379 if (gallivm_initialized)
380 return;
381
382 #ifdef DEBUG
383 gallivm_debug = debug_get_option_gallivm_debug();
384 #endif
385
386 lp_set_target_options();
387
388 #if USE_MCJIT
389 LLVMLinkInMCJIT();
390 #else
391 LLVMLinkInJIT();
392 #endif
393
394 util_cpu_detect();
395
396 /* AMD Bulldozer AVX's throughput is the same as SSE2; and because using
397 * 8-wide vector needs more floating ops than 4-wide (due to padding), it is
398 * actually more efficient to use 4-wide vectors on this processor.
399 *
400 * See also:
401 * - http://www.anandtech.com/show/4955/the-bulldozer-review-amd-fx8150-tested/2
402 */
403 if (HAVE_AVX &&
404 util_cpu_caps.has_avx &&
405 util_cpu_caps.has_intel) {
406 lp_native_vector_width = 256;
407 } else {
408 /* Leave it at 128, even when no SIMD extensions are available.
409 * Really needs to be a multiple of 128 so can fit 4 floats.
410 */
411 lp_native_vector_width = 128;
412 }
413
414 lp_native_vector_width = debug_get_num_option("LP_NATIVE_VECTOR_WIDTH",
415 lp_native_vector_width);
416
417 if (lp_native_vector_width <= 128) {
418 /* Hide AVX support, as often LLVM AVX intrinsics are only guarded by
419 * "util_cpu_caps.has_avx" predicate, and lack the
420 * "lp_native_vector_width > 128" predicate. And also to ensure a more
421 * consistent behavior, allowing one to test SSE2 on AVX machines.
422 * XXX: should not play games with util_cpu_caps directly as it might
423 * get used for other things outside llvm too.
424 */
425 util_cpu_caps.has_avx = 0;
426 util_cpu_caps.has_avx2 = 0;
427 }
428
429 if (!HAVE_AVX) {
430 /*
431 * note these instructions are VEX-only, so can only emit if we use
432 * avx (don't want to base it on has_avx & has_f16c later as that would
433 * omit it unnecessarily on amd cpus, see above).
434 */
435 util_cpu_caps.has_f16c = 0;
436 util_cpu_caps.has_xop = 0;
437 }
438
439 #ifdef PIPE_ARCH_PPC_64
440 /* Set the NJ bit in VSCR to 0 so denormalized values are handled as
441 * specified by IEEE standard (PowerISA 2.06 - Section 6.3). This guarantees
442 * that some rounding and half-float to float handling does not round
443 * incorrectly to 0.
444 * XXX: should eventually follow same logic on all platforms.
445 * Right now denorms get explicitly disabled (but elsewhere) for x86,
446 * whereas ppc64 explicitly enables them...
447 */
448 if (util_cpu_caps.has_altivec) {
449 unsigned short mask[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
450 0xFFFF, 0xFFFF, 0xFFFE, 0xFFFF };
451 __asm (
452 "mfvscr %%v1\n"
453 "vand %0,%%v1,%0\n"
454 "mtvscr %0"
455 :
456 : "r" (*mask)
457 );
458 }
459 #endif
460
461 gallivm_initialized = TRUE;
462
463 #if 0
464 /* For simulating less capable machines */
465 util_cpu_caps.has_sse3 = 0;
466 util_cpu_caps.has_ssse3 = 0;
467 util_cpu_caps.has_sse4_1 = 0;
468 util_cpu_caps.has_avx = 0;
469 util_cpu_caps.has_f16c = 0;
470 #endif
471 }
472
473
474
475 /**
476 * Create a new gallivm_state object.
477 * Note that we return a singleton.
478 */
479 struct gallivm_state *
480 gallivm_create(void)
481 {
482 struct gallivm_state *gallivm;
483
484 gallivm = CALLOC_STRUCT(gallivm_state);
485 if (gallivm) {
486 if (!init_gallivm_state(gallivm)) {
487 FREE(gallivm);
488 gallivm = NULL;
489 }
490 }
491
492 return gallivm;
493 }
494
495
496 /**
497 * Destroy a gallivm_state object.
498 */
499 void
500 gallivm_destroy(struct gallivm_state *gallivm)
501 {
502 free_gallivm_state(gallivm);
503 FREE(gallivm);
504 }
505
506
507 /**
508 * Validate and optimze a function.
509 */
510 static void
511 gallivm_optimize_function(struct gallivm_state *gallivm,
512 LLVMValueRef func)
513 {
514 if (0) {
515 debug_printf("optimizing %s...\n", LLVMGetValueName(func));
516 }
517
518 assert(gallivm->passmgr);
519
520 /* Apply optimizations to LLVM IR */
521 LLVMRunFunctionPassManager(gallivm->passmgr, func);
522
523 if (0) {
524 if (gallivm_debug & GALLIVM_DEBUG_IR) {
525 /* Print the LLVM IR to stderr */
526 lp_debug_dump_value(func);
527 debug_printf("\n");
528 }
529 }
530 }
531
532
533 /**
534 * Validate a function.
535 */
536 void
537 gallivm_verify_function(struct gallivm_state *gallivm,
538 LLVMValueRef func)
539 {
540 /* Verify the LLVM IR. If invalid, dump and abort */
541 #ifdef DEBUG
542 if (LLVMVerifyFunction(func, LLVMPrintMessageAction)) {
543 lp_debug_dump_value(func);
544 assert(0);
545 return;
546 }
547 #endif
548
549 gallivm_optimize_function(gallivm, func);
550
551 if (gallivm_debug & GALLIVM_DEBUG_IR) {
552 /* Print the LLVM IR to stderr */
553 lp_debug_dump_value(func);
554 debug_printf("\n");
555 }
556 }
557
558
559 void
560 gallivm_compile_module(struct gallivm_state *gallivm)
561 {
562 assert(!gallivm->compiled);
563
564 /* Dump byte code to a file */
565 if (0) {
566 LLVMWriteBitcodeToFile(gallivm->module, "llvmpipe.bc");
567 debug_printf("llvmpipe.bc written\n");
568 debug_printf("Invoke as \"llc -o - llvmpipe.bc\"\n");
569 }
570
571 #if USE_MCJIT
572 assert(!gallivm->engine);
573 if (!init_gallivm_engine(gallivm)) {
574 assert(0);
575 }
576 #endif
577 assert(gallivm->engine);
578
579 ++gallivm->compiled;
580 }
581
582
583
584 func_pointer
585 gallivm_jit_function(struct gallivm_state *gallivm,
586 LLVMValueRef func)
587 {
588 void *code;
589 func_pointer jit_func;
590
591 assert(gallivm->compiled);
592 assert(gallivm->engine);
593
594 code = LLVMGetPointerToGlobal(gallivm->engine, func);
595 assert(code);
596 jit_func = pointer_to_func(code);
597
598 if (gallivm_debug & GALLIVM_DEBUG_ASM) {
599 lp_disassemble(func, code);
600 }
601
602 #if defined(PROFILE)
603 lp_profile(func, code);
604 #endif
605
606 /* Free the function body to save memory */
607 lp_func_delete_body(func);
608
609 return jit_func;
610 }
611
612
613 /**
614 * Free the function (and its machine code).
615 */
616 void
617 gallivm_free_function(struct gallivm_state *gallivm,
618 LLVMValueRef func,
619 const void *code)
620 {
621 #if !USE_MCJIT
622 if (code) {
623 LLVMFreeMachineCodeForFunction(gallivm->engine, func);
624 }
625
626 LLVMDeleteFunction(func);
627 #endif
628 }