gallivm: Remove lp_func_delete_body.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_init.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "pipe/p_config.h"
30 #include "pipe/p_compiler.h"
31 #include "util/u_cpu_detect.h"
32 #include "util/u_debug.h"
33 #include "util/u_memory.h"
34 #include "util/u_simple_list.h"
35 #include "lp_bld.h"
36 #include "lp_bld_debug.h"
37 #include "lp_bld_misc.h"
38 #include "lp_bld_init.h"
39
40 #include <llvm-c/Analysis.h>
41 #include <llvm-c/Transforms/Scalar.h>
42 #include <llvm-c/BitWriter.h>
43
44
45 /**
46 * AVX is supported in:
47 * - standard JIT from LLVM 3.2 onwards
48 * - MC-JIT from LLVM 3.1
49 * - MC-JIT supports limited OSes (MacOSX and Linux)
50 * - standard JIT in LLVM 3.1, with backports
51 */
52 #if defined(PIPE_ARCH_PPC_64) || defined(PIPE_ARCH_S390) || defined(PIPE_ARCH_ARM) || defined(PIPE_ARCH_AARCH64)
53 # define USE_MCJIT 1
54 # define HAVE_AVX 0
55 #elif HAVE_LLVM >= 0x0302 || (HAVE_LLVM == 0x0301 && defined(HAVE_JIT_AVX_SUPPORT))
56 # define USE_MCJIT 0
57 # define HAVE_AVX 1
58 #elif HAVE_LLVM == 0x0301 && (defined(PIPE_OS_LINUX) || defined(PIPE_OS_APPLE))
59 # define USE_MCJIT 1
60 # define HAVE_AVX 1
61 #else
62 # define USE_MCJIT 0
63 # define HAVE_AVX 0
64 #endif
65
66
67 #if USE_MCJIT
68 void LLVMLinkInMCJIT();
69 #endif
70
71 /*
72 * LLVM has several global caches which pointing/derived from objects
73 * owned by the context, so if we freeing contexts causes
74 * memory leaks and false cache hits when these objects are destroyed.
75 *
76 * TODO: For thread safety on multi-threaded OpenGL we should use one LLVM
77 * context per thread, and put them in a pool when threads are destroyed.
78 */
79 #define USE_GLOBAL_CONTEXT 1
80
81
82 #ifdef DEBUG
83 unsigned gallivm_debug = 0;
84
85 static const struct debug_named_value lp_bld_debug_flags[] = {
86 { "tgsi", GALLIVM_DEBUG_TGSI, NULL },
87 { "ir", GALLIVM_DEBUG_IR, NULL },
88 { "asm", GALLIVM_DEBUG_ASM, NULL },
89 { "nopt", GALLIVM_DEBUG_NO_OPT, NULL },
90 { "perf", GALLIVM_DEBUG_PERF, NULL },
91 { "no_brilinear", GALLIVM_DEBUG_NO_BRILINEAR, NULL },
92 { "no_rho_approx", GALLIVM_DEBUG_NO_RHO_APPROX, NULL },
93 { "no_quad_lod", GALLIVM_DEBUG_NO_QUAD_LOD, NULL },
94 { "gc", GALLIVM_DEBUG_GC, NULL },
95 DEBUG_NAMED_VALUE_END
96 };
97
98 DEBUG_GET_ONCE_FLAGS_OPTION(gallivm_debug, "GALLIVM_DEBUG", lp_bld_debug_flags, 0)
99 #endif
100
101
102 static boolean gallivm_initialized = FALSE;
103
104 unsigned lp_native_vector_width;
105
106
107 /*
108 * Optimization values are:
109 * - 0: None (-O0)
110 * - 1: Less (-O1)
111 * - 2: Default (-O2, -Os)
112 * - 3: Aggressive (-O3)
113 *
114 * See also CodeGenOpt::Level in llvm/Target/TargetMachine.h
115 */
116 enum LLVM_CodeGenOpt_Level {
117 None, // -O0
118 Less, // -O1
119 Default, // -O2, -Os
120 Aggressive // -O3
121 };
122
123
124 /**
125 * Create the LLVM (optimization) pass manager and install
126 * relevant optimization passes.
127 * \return TRUE for success, FALSE for failure
128 */
129 static boolean
130 create_pass_manager(struct gallivm_state *gallivm)
131 {
132 assert(!gallivm->passmgr);
133 assert(gallivm->target);
134
135 gallivm->passmgr = LLVMCreateFunctionPassManagerForModule(gallivm->module);
136 if (!gallivm->passmgr)
137 return FALSE;
138
139 LLVMAddTargetData(gallivm->target, gallivm->passmgr);
140
141 if ((gallivm_debug & GALLIVM_DEBUG_NO_OPT) == 0) {
142 /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
143 * but there are more on SVN.
144 * TODO: Add more passes.
145 */
146 LLVMAddScalarReplAggregatesPass(gallivm->passmgr);
147 LLVMAddLICMPass(gallivm->passmgr);
148 LLVMAddCFGSimplificationPass(gallivm->passmgr);
149 LLVMAddReassociatePass(gallivm->passmgr);
150
151 if (sizeof(void*) == 4) {
152 /* XXX: For LLVM >= 2.7 and 32-bit build, use this order of passes to
153 * avoid generating bad code.
154 * Test with piglit glsl-vs-sqrt-zero test.
155 */
156 LLVMAddConstantPropagationPass(gallivm->passmgr);
157 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
158 }
159 else {
160 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
161 LLVMAddConstantPropagationPass(gallivm->passmgr);
162 }
163
164 if (util_cpu_caps.has_sse4_1) {
165 /* FIXME: There is a bug in this pass, whereby the combination
166 * of fptosi and sitofp (necessary for trunc/floor/ceil/round
167 * implementation) somehow becomes invalid code.
168 */
169 LLVMAddInstructionCombiningPass(gallivm->passmgr);
170 }
171 LLVMAddGVNPass(gallivm->passmgr);
172 }
173 else {
174 /* We need at least this pass to prevent the backends to fail in
175 * unexpected ways.
176 */
177 LLVMAddPromoteMemoryToRegisterPass(gallivm->passmgr);
178 }
179
180 return TRUE;
181 }
182
183
184 /**
185 * Free gallivm object's LLVM allocations, but not any generated code
186 * nor the gallivm object itself.
187 */
188 void
189 gallivm_free_ir(struct gallivm_state *gallivm)
190 {
191 if (gallivm->passmgr) {
192 LLVMDisposePassManager(gallivm->passmgr);
193 }
194
195 if (gallivm->engine) {
196 /* This will already destroy any associated module */
197 LLVMDisposeExecutionEngine(gallivm->engine);
198 } else if (gallivm->module) {
199 LLVMDisposeModule(gallivm->module);
200 }
201
202 #if !USE_MCJIT
203 /* Don't free the TargetData, it's owned by the exec engine */
204 #else
205 if (gallivm->target) {
206 LLVMDisposeTargetData(gallivm->target);
207 }
208 #endif
209
210 if (gallivm->builder)
211 LLVMDisposeBuilder(gallivm->builder);
212
213 if (!USE_GLOBAL_CONTEXT && gallivm->context)
214 LLVMContextDispose(gallivm->context);
215
216 gallivm->engine = NULL;
217 gallivm->target = NULL;
218 gallivm->module = NULL;
219 gallivm->passmgr = NULL;
220 gallivm->context = NULL;
221 gallivm->builder = NULL;
222 }
223
224
225 /**
226 * Free LLVM-generated code. Should be done AFTER gallivm_free_ir().
227 */
228 static void
229 gallivm_free_code(struct gallivm_state *gallivm)
230 {
231 assert(!gallivm->module);
232 assert(!gallivm->engine);
233 lp_free_generated_code(gallivm->code);
234 gallivm->code = NULL;
235 }
236
237
238 static boolean
239 init_gallivm_engine(struct gallivm_state *gallivm)
240 {
241 if (1) {
242 enum LLVM_CodeGenOpt_Level optlevel;
243 char *error = NULL;
244 int ret;
245
246 if (gallivm_debug & GALLIVM_DEBUG_NO_OPT) {
247 optlevel = None;
248 }
249 else {
250 optlevel = Default;
251 }
252
253 ret = lp_build_create_jit_compiler_for_module(&gallivm->engine,
254 &gallivm->code,
255 gallivm->module,
256 (unsigned) optlevel,
257 USE_MCJIT,
258 &error);
259 if (ret) {
260 _debug_printf("%s\n", error);
261 LLVMDisposeMessage(error);
262 goto fail;
263 }
264 }
265
266 #if !USE_MCJIT
267 gallivm->target = LLVMGetExecutionEngineTargetData(gallivm->engine);
268 if (!gallivm->target)
269 goto fail;
270 #else
271 if (0) {
272 /*
273 * Dump the data layout strings.
274 */
275
276 LLVMTargetDataRef target = LLVMGetExecutionEngineTargetData(gallivm->engine);
277 char *data_layout;
278 char *engine_data_layout;
279
280 data_layout = LLVMCopyStringRepOfTargetData(gallivm->target);
281 engine_data_layout = LLVMCopyStringRepOfTargetData(target);
282
283 if (1) {
284 debug_printf("module target data = %s\n", data_layout);
285 debug_printf("engine target data = %s\n", engine_data_layout);
286 }
287
288 free(data_layout);
289 free(engine_data_layout);
290 }
291 #endif
292
293 return TRUE;
294
295 fail:
296 return FALSE;
297 }
298
299
300 /**
301 * Allocate gallivm LLVM objects.
302 * \return TRUE for success, FALSE for failure
303 */
304 static boolean
305 init_gallivm_state(struct gallivm_state *gallivm)
306 {
307 assert(!gallivm->context);
308 assert(!gallivm->module);
309
310 lp_build_init();
311
312 if (USE_GLOBAL_CONTEXT) {
313 gallivm->context = LLVMGetGlobalContext();
314 } else {
315 gallivm->context = LLVMContextCreate();
316 }
317 if (!gallivm->context)
318 goto fail;
319
320 gallivm->module = LLVMModuleCreateWithNameInContext("gallivm",
321 gallivm->context);
322 if (!gallivm->module)
323 goto fail;
324
325 gallivm->builder = LLVMCreateBuilderInContext(gallivm->context);
326 if (!gallivm->builder)
327 goto fail;
328
329 /* FIXME: MC-JIT only allows compiling one module at a time, and it must be
330 * complete when MC-JIT is created. So defer the MC-JIT engine creation for
331 * now.
332 */
333 #if !USE_MCJIT
334 if (!init_gallivm_engine(gallivm)) {
335 goto fail;
336 }
337 #else
338 /*
339 * MC-JIT engine compiles the module immediately on creation, so we can't
340 * obtain the target data from it. Instead we create a target data layout
341 * from a string.
342 *
343 * The produced layout strings are not precisely the same, but should make
344 * no difference for the kind of optimization passes we run.
345 *
346 * For reference this is the layout string on x64:
347 *
348 * e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64
349 *
350 * See also:
351 * - http://llvm.org/docs/LangRef.html#datalayout
352 */
353
354 {
355 const unsigned pointer_size = 8 * sizeof(void *);
356 char layout[512];
357 util_snprintf(layout, sizeof layout, "%c-p:%u:%u:%u-i64:64:64-a0:0:%u-s0:%u:%u",
358 #ifdef PIPE_ARCH_LITTLE_ENDIAN
359 'e', // little endian
360 #else
361 'E', // big endian
362 #endif
363 pointer_size, pointer_size, pointer_size, // pointer size, abi alignment, preferred alignment
364 pointer_size, // aggregate preferred alignment
365 pointer_size, pointer_size); // stack objects abi alignment, preferred alignment
366
367 gallivm->target = LLVMCreateTargetData(layout);
368 if (!gallivm->target) {
369 return FALSE;
370 }
371 }
372 #endif
373
374 if (!create_pass_manager(gallivm))
375 goto fail;
376
377 return TRUE;
378
379 fail:
380 gallivm_free_ir(gallivm);
381 gallivm_free_code(gallivm);
382 return FALSE;
383 }
384
385
386 void
387 lp_build_init(void)
388 {
389 if (gallivm_initialized)
390 return;
391
392 #ifdef DEBUG
393 gallivm_debug = debug_get_option_gallivm_debug();
394 #endif
395
396 lp_set_target_options();
397
398 #if USE_MCJIT
399 LLVMLinkInMCJIT();
400 #else
401 LLVMLinkInJIT();
402 #endif
403
404 util_cpu_detect();
405
406 /* AMD Bulldozer AVX's throughput is the same as SSE2; and because using
407 * 8-wide vector needs more floating ops than 4-wide (due to padding), it is
408 * actually more efficient to use 4-wide vectors on this processor.
409 *
410 * See also:
411 * - http://www.anandtech.com/show/4955/the-bulldozer-review-amd-fx8150-tested/2
412 */
413 if (HAVE_AVX &&
414 util_cpu_caps.has_avx &&
415 util_cpu_caps.has_intel) {
416 lp_native_vector_width = 256;
417 } else {
418 /* Leave it at 128, even when no SIMD extensions are available.
419 * Really needs to be a multiple of 128 so can fit 4 floats.
420 */
421 lp_native_vector_width = 128;
422 }
423
424 lp_native_vector_width = debug_get_num_option("LP_NATIVE_VECTOR_WIDTH",
425 lp_native_vector_width);
426
427 if (lp_native_vector_width <= 128) {
428 /* Hide AVX support, as often LLVM AVX intrinsics are only guarded by
429 * "util_cpu_caps.has_avx" predicate, and lack the
430 * "lp_native_vector_width > 128" predicate. And also to ensure a more
431 * consistent behavior, allowing one to test SSE2 on AVX machines.
432 * XXX: should not play games with util_cpu_caps directly as it might
433 * get used for other things outside llvm too.
434 */
435 util_cpu_caps.has_avx = 0;
436 util_cpu_caps.has_avx2 = 0;
437 }
438
439 if (!HAVE_AVX) {
440 /*
441 * note these instructions are VEX-only, so can only emit if we use
442 * avx (don't want to base it on has_avx & has_f16c later as that would
443 * omit it unnecessarily on amd cpus, see above).
444 */
445 util_cpu_caps.has_f16c = 0;
446 util_cpu_caps.has_xop = 0;
447 }
448
449 #ifdef PIPE_ARCH_PPC_64
450 /* Set the NJ bit in VSCR to 0 so denormalized values are handled as
451 * specified by IEEE standard (PowerISA 2.06 - Section 6.3). This guarantees
452 * that some rounding and half-float to float handling does not round
453 * incorrectly to 0.
454 * XXX: should eventually follow same logic on all platforms.
455 * Right now denorms get explicitly disabled (but elsewhere) for x86,
456 * whereas ppc64 explicitly enables them...
457 */
458 if (util_cpu_caps.has_altivec) {
459 unsigned short mask[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
460 0xFFFF, 0xFFFF, 0xFFFE, 0xFFFF };
461 __asm (
462 "mfvscr %%v1\n"
463 "vand %0,%%v1,%0\n"
464 "mtvscr %0"
465 :
466 : "r" (*mask)
467 );
468 }
469 #endif
470
471 gallivm_initialized = TRUE;
472
473 #if 0
474 /* For simulating less capable machines */
475 util_cpu_caps.has_sse3 = 0;
476 util_cpu_caps.has_ssse3 = 0;
477 util_cpu_caps.has_sse4_1 = 0;
478 util_cpu_caps.has_avx = 0;
479 util_cpu_caps.has_f16c = 0;
480 #endif
481 }
482
483
484
485 /**
486 * Create a new gallivm_state object.
487 * Note that we return a singleton.
488 */
489 struct gallivm_state *
490 gallivm_create(void)
491 {
492 struct gallivm_state *gallivm;
493
494 gallivm = CALLOC_STRUCT(gallivm_state);
495 if (gallivm) {
496 if (!init_gallivm_state(gallivm)) {
497 FREE(gallivm);
498 gallivm = NULL;
499 }
500 }
501
502 return gallivm;
503 }
504
505
506 /**
507 * Destroy a gallivm_state object.
508 */
509 void
510 gallivm_destroy(struct gallivm_state *gallivm)
511 {
512 gallivm_free_ir(gallivm);
513 gallivm_free_code(gallivm);
514 FREE(gallivm);
515 }
516
517
518 /**
519 * Validate a function.
520 */
521 void
522 gallivm_verify_function(struct gallivm_state *gallivm,
523 LLVMValueRef func)
524 {
525 /* Verify the LLVM IR. If invalid, dump and abort */
526 #ifdef DEBUG
527 if (LLVMVerifyFunction(func, LLVMPrintMessageAction)) {
528 lp_debug_dump_value(func);
529 assert(0);
530 return;
531 }
532 #endif
533
534 if (gallivm_debug & GALLIVM_DEBUG_IR) {
535 /* Print the LLVM IR to stderr */
536 lp_debug_dump_value(func);
537 debug_printf("\n");
538 }
539 }
540
541
542 void
543 gallivm_compile_module(struct gallivm_state *gallivm)
544 {
545 LLVMValueRef func;
546
547 assert(!gallivm->compiled);
548
549 if (gallivm->builder) {
550 LLVMDisposeBuilder(gallivm->builder);
551 gallivm->builder = NULL;
552 }
553
554 /* Run optimization passes */
555 LLVMInitializeFunctionPassManager(gallivm->passmgr);
556 func = LLVMGetFirstFunction(gallivm->module);
557 while (func) {
558 if (0) {
559 debug_printf("optimizing %s...\n", LLVMGetValueName(func));
560 }
561 LLVMRunFunctionPassManager(gallivm->passmgr, func);
562 func = LLVMGetNextFunction(func);
563 }
564 LLVMFinalizeFunctionPassManager(gallivm->passmgr);
565
566 /* Dump byte code to a file */
567 if (0) {
568 LLVMWriteBitcodeToFile(gallivm->module, "llvmpipe.bc");
569 debug_printf("llvmpipe.bc written\n");
570 debug_printf("Invoke as \"llc -o - llvmpipe.bc\"\n");
571 }
572
573 #if USE_MCJIT
574 assert(!gallivm->engine);
575 if (!init_gallivm_engine(gallivm)) {
576 assert(0);
577 }
578 #endif
579 assert(gallivm->engine);
580
581 ++gallivm->compiled;
582 }
583
584
585
586 func_pointer
587 gallivm_jit_function(struct gallivm_state *gallivm,
588 LLVMValueRef func)
589 {
590 void *code;
591 func_pointer jit_func;
592
593 assert(gallivm->compiled);
594 assert(gallivm->engine);
595
596 code = LLVMGetPointerToGlobal(gallivm->engine, func);
597 assert(code);
598 jit_func = pointer_to_func(code);
599
600 if (gallivm_debug & GALLIVM_DEBUG_ASM) {
601 lp_disassemble(func, code);
602 }
603
604 #if defined(PROFILE)
605 lp_profile(func, code);
606 #endif
607
608 return jit_func;
609 }