static LLVMTargetMachineRef ac_create_target_machine(enum radeon_family family,
enum ac_target_machine_options tm_options,
+ LLVMCodeGenOptLevel level,
const char **out_triple)
{
assert(family >= CHIP_TAHITI);
triple,
ac_get_llvm_processor_name(family),
features,
- LLVMCodeGenLevelDefault,
+ level,
LLVMRelocDefault,
LLVMCodeModelDefault);
const char *triple;
memset(compiler, 0, sizeof(*compiler));
- compiler->tm = ac_create_target_machine(family,
- tm_options, &triple);
+ compiler->tm = ac_create_target_machine(family, tm_options,
+ LLVMCodeGenLevelDefault,
+ &triple);
if (!compiler->tm)
return false;
+ if (tm_options & AC_TM_CREATE_LOW_OPT) {
+ compiler->low_opt_tm =
+ ac_create_target_machine(family, tm_options,
+ LLVMCodeGenLevelLess, NULL);
+ if (!compiler->low_opt_tm)
+ goto fail;
+ }
+
if (okay_to_leak_target_library_info || (HAVE_LLVM >= 0x0700)) {
compiler->target_library_info =
ac_create_target_library_info(triple);
if (compiler->target_library_info)
ac_dispose_target_library_info(compiler->target_library_info);
#endif
+ if (compiler->low_opt_tm)
+ LLVMDisposeTargetMachine(compiler->low_opt_tm);
if (compiler->tm)
LLVMDisposeTargetMachine(compiler->tm);
}
AC_TM_PROMOTE_ALLOCA_TO_SCRATCH = (1 << 4),
AC_TM_CHECK_IR = (1 << 5),
AC_TM_ENABLE_GLOBAL_ISEL = (1 << 6),
+ AC_TM_CREATE_LOW_OPT = (1 << 7),
};
enum ac_float_mode {
/* Per-thread persistent LLVM objects. */
struct ac_llvm_compiler {
- LLVMTargetMachineRef tm;
LLVMTargetLibraryInfoRef target_library_info;
LLVMPassManagerRef passmgr;
+
+ /* Default compiler. */
+ LLVMTargetMachineRef tm;
struct ac_compiler_passes *passes;
+
+ /* Optional compiler for faster compilation with fewer optimizations.
+ * LLVM modules can be created with "tm" too. There is no difference.
+ */
+ LLVMTargetMachineRef low_opt_tm; /* uses -O1 instead of -O2 */
+ struct ac_compiler_passes *low_opt_passes;
};
const char *ac_get_llvm_processor_name(enum radeon_family family);
static void si_init_compiler(struct si_screen *sscreen,
struct ac_llvm_compiler *compiler)
{
+ /* Only create the less-optimizing version of the compiler on APUs
+ * predating Ryzen (Raven). */
+ bool create_low_opt_compiler = !sscreen->info.has_dedicated_vram &&
+ sscreen->info.chip_class <= VI;
+
enum ac_target_machine_options tm_options =
(sscreen->debug_flags & DBG(SI_SCHED) ? AC_TM_SISCHED : 0) |
(sscreen->debug_flags & DBG(GISEL) ? AC_TM_ENABLE_GLOBAL_ISEL : 0) |
(sscreen->info.chip_class >= GFX9 ? AC_TM_FORCE_ENABLE_XNACK : 0) |
(sscreen->info.chip_class < GFX9 ? AC_TM_FORCE_DISABLE_XNACK : 0) |
(!sscreen->llvm_has_working_vgpr_indexing ? AC_TM_PROMOTE_ALLOCA_TO_SCRATCH : 0) |
- (sscreen->debug_flags & DBG(CHECK_IR) ? AC_TM_CHECK_IR : 0);
+ (sscreen->debug_flags & DBG(CHECK_IR) ? AC_TM_CHECK_IR : 0) |
+ (create_low_opt_compiler ? AC_TM_CREATE_LOW_OPT : 0);
ac_init_llvm_once();
ac_init_llvm_compiler(compiler, true, sscreen->info.family, tm_options);
compiler->passes = ac_create_llvm_passes(compiler->tm);
+
+ if (compiler->low_opt_tm)
+ compiler->low_opt_passes = ac_create_llvm_passes(compiler->low_opt_tm);
}
static void si_destroy_compiler(struct ac_llvm_compiler *compiler)
{
ac_destroy_llvm_passes(compiler->passes);
+ ac_destroy_llvm_passes(compiler->low_opt_passes);
ac_destroy_llvm_compiler(compiler);
}
LLVMModuleRef mod,
struct pipe_debug_callback *debug,
unsigned processor,
- const char *name)
+ const char *name,
+ bool less_optimized)
{
int r = 0;
unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
}
if (!si_replace_shader(count, binary)) {
- r = si_llvm_compile(mod, binary, compiler, debug);
+ r = si_llvm_compile(mod, binary, compiler, debug,
+ less_optimized);
if (r)
return r;
}
&ctx.shader->config, ctx.compiler,
ctx.ac.module,
debug, PIPE_SHADER_GEOMETRY,
- "GS Copy Shader");
+ "GS Copy Shader", false);
if (!r) {
if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
fprintf(stderr, "GS Copy Shader:\n");
LLVMBuildRetVoid(builder);
}
+static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
+ struct si_shader_selector *sel)
+{
+ if (!compiler->low_opt_passes)
+ return false;
+
+ /* Assume a slow CPU. */
+ assert(!sel->screen->info.has_dedicated_vram &&
+ sel->screen->info.chip_class <= VI);
+
+ /* For a crazy dEQP test containing 2597 memory opcodes, mostly
+ * buffer stores. */
+ return sel->type == PIPE_SHADER_COMPUTE &&
+ sel->info.num_memory_instructions > 1000;
+}
+
int si_compile_tgsi_shader(struct si_screen *sscreen,
struct ac_llvm_compiler *compiler,
struct si_shader *shader,
/* Compile to bytecode. */
r = si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
- ctx.ac.module, debug, ctx.type, "TGSI shader");
+ ctx.ac.module, debug, ctx.type, "TGSI shader",
+ si_should_optimize_less(compiler, shader->selector));
si_llvm_dispose(&ctx);
if (r) {
fprintf(stderr, "LLVM failed to compile shader\n");
si_llvm_optimize_module(&ctx);
if (si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
- ctx.ac.module, debug, ctx.type, name)) {
+ ctx.ac.module, debug, ctx.type, name, false)) {
FREE(result);
result = NULL;
goto out;
unsigned si_llvm_compile(LLVMModuleRef M, struct ac_shader_binary *binary,
struct ac_llvm_compiler *compiler,
- struct pipe_debug_callback *debug);
+ struct pipe_debug_callback *debug,
+ bool less_optimized);
LLVMTypeRef tgsi2llvmtype(struct lp_build_tgsi_context *bld_base,
enum tgsi_opcode_type type);
*/
unsigned si_llvm_compile(LLVMModuleRef M, struct ac_shader_binary *binary,
struct ac_llvm_compiler *compiler,
- struct pipe_debug_callback *debug)
+ struct pipe_debug_callback *debug,
+ bool less_optimized)
{
+ struct ac_compiler_passes *passes =
+ less_optimized && compiler->low_opt_passes ?
+ compiler->low_opt_passes : compiler->passes;
struct si_llvm_diagnostics diag;
LLVMContextRef llvm_ctx;
LLVMContextSetDiagnosticHandler(llvm_ctx, si_diagnostic_handler, &diag);
/* Compile IR. */
- if (!ac_compile_module_to_binary(compiler->passes, M, binary))
+ if (!ac_compile_module_to_binary(passes, M, binary))
diag.retval = 1;
if (diag.retval != 0)