{ "sbdry", DBG_SB_DRY_RUN, "Don't use optimized bytecode (just print the dumps)" },
{ "sbstat", DBG_SB_STAT, "Print optimization statistics for shaders" },
{ "sbdump", DBG_SB_DUMP, "Print IR dumps after some optimization passes" },
+ { "sbnofallback", DBG_SB_NO_FALLBACK, "Abort on errors instead of fallback" },
DEBUG_NAMED_VALUE_END /* must be last */
};
sb_context::dump_pass = df & DBG_SB_DUMP;
sb_context::dump_stat = df & DBG_SB_STAT;
sb_context::dry_run = df & DBG_SB_DRY_RUN;
+ sb_context::no_fallback = df & DBG_SB_NO_FALLBACK;
sb_context::dskip_start = debug_get_num_option("R600_SB_DSKIP_START", 0);
sb_context::dskip_end = debug_get_num_option("R600_SB_DSKIP_END", 0);
time_start = os_time_get_nano();
}
+ SB_DUMP_STAT( cerr << "\nsb: shader " << shader_id << "\n"; );
+
+ bc_parser parser(*ctx, bc, pshader, dump_source_bytecode, optimize);
+
+ if ((r = parser.parse())) {
+ assert(0);
+ return r;
+ }
+
/* skip some shaders (use shaders from default backend)
* dskip_start - range start, dskip_end - range_end,
* e.g. start = 5, end = 6 means shaders 5 & 6
}
}
- SB_DUMP_STAT( cerr << "\nsb: shader " << shader_id << "\n"; );
-
- bc_parser parser(*ctx, bc, pshader, dump_source_bytecode, optimize);
-
- if ((r = parser.parse())) {
- assert(0);
- return r;
- }
-
shader *sh = parser.get_shader();
SB_DUMP_PASS( cerr << "\n\n###### after parse\n"; sh->dump_ir(); );
#define SB_RUN_PASS(n, dump) \
do { \
r = n(*sh).run(); \
+ if (r) { \
+ cerr << "sb: error (" << r << ") in the " << #n << " pass.\n"; \
+ if (sb_context::no_fallback) \
+ return r; \
+ cerr << "sb: using unoptimized bytecode...\n"; \
+ delete sh; \
+ return 0; \
+ } \
if (dump) { \
- SB_DUMP_PASS( cerr << "\n\n###### after " << #n << "\n"; sh->dump_ir();); \
+ SB_DUMP_PASS( cerr << "\n\n###### after " << #n << "\n"; \
+ sh->dump_ir();); \
} \
assert(!r); \
} while (0)
// container nodes in the correct locations for code placement
sh->create_bbs();
- SB_RUN_PASS(gcm, 0);
+ SB_RUN_PASS(gcm, 1);
sh->compute_interferences = true;
SB_RUN_PASS(liveness, 0);