enum
{
/* Shader logging options: */
- DBG_VS = PIPE_SHADER_VERTEX,
- DBG_PS = PIPE_SHADER_FRAGMENT,
- DBG_GS = PIPE_SHADER_GEOMETRY,
- DBG_TCS = PIPE_SHADER_TESS_CTRL,
- DBG_TES = PIPE_SHADER_TESS_EVAL,
- DBG_CS = PIPE_SHADER_COMPUTE,
+ DBG_VS = MESA_SHADER_VERTEX,
+ DBG_TCS = MESA_SHADER_TESS_CTRL,
+ DBG_TES = MESA_SHADER_TESS_EVAL,
+ DBG_GS = MESA_SHADER_GEOMETRY,
+ DBG_PS = MESA_SHADER_FRAGMENT,
+ DBG_CS = MESA_SHADER_COMPUTE,
DBG_NO_IR,
DBG_NO_NIR,
DBG_NO_ASM,
return vs->current ? vs->current : NULL;
}
-static inline bool si_can_dump_shader(struct si_screen *sscreen, unsigned processor)
+static inline bool si_can_dump_shader(struct si_screen *sscreen, gl_shader_stage stage)
{
- return sscreen->debug_flags & (1 << processor);
+ return sscreen->debug_flags & (1 << stage);
}
static inline bool si_get_strmout_en(struct si_context *sctx)
{
const struct ac_shader_config *conf = &shader->config;
- if (!check_debug_option || si_can_dump_shader(sscreen, shader->selector->type)) {
+ if (!check_debug_option || si_can_dump_shader(sscreen, shader->selector->info.stage)) {
if (shader->selector->info.stage == MESA_SHADER_FRAGMENT) {
fprintf(file,
"*** SHADER CONFIG ***\n"
struct pipe_debug_callback *debug, FILE *file, bool check_debug_option)
{
enum pipe_shader_type shader_type = shader->selector->type;
+ gl_shader_stage stage = shader->selector->info.stage;
- if (!check_debug_option || si_can_dump_shader(sscreen, shader_type))
+ if (!check_debug_option || si_can_dump_shader(sscreen, stage))
si_dump_shader_key(shader, file);
if (!check_debug_option && shader->binary.llvm_ir_string) {
}
if (!check_debug_option ||
- (si_can_dump_shader(sscreen, shader_type) && !(sscreen->debug_flags & DBG(NO_ASM)))) {
+ (si_can_dump_shader(sscreen, stage) && !(sscreen->debug_flags & DBG(NO_ASM)))) {
unsigned wave_size = si_get_shader_wave_size(shader);
fprintf(file, "\n%s:\n", si_get_shader_name(shader));
/* Post-optimization transformations and analysis. */
si_optimize_vs_outputs(&ctx);
- if ((debug && debug->debug_message) || si_can_dump_shader(sscreen, ctx.type)) {
+ if ((debug && debug->debug_message) || si_can_dump_shader(sscreen, ctx.stage)) {
ctx.shader->info.private_mem_vgprs = ac_count_scratch_private_memory(ctx.main_fn);
}
/* Dump NIR before doing NIR->LLVM conversion in case the
* conversion fails. */
- if (si_can_dump_shader(sscreen, sel->type) && !(sscreen->debug_flags & DBG(NO_NIR))) {
+ if (si_can_dump_shader(sscreen, sel->info.stage) &&
+ !(sscreen->debug_flags & DBG(NO_NIR))) {
nir_print_shader(nir, stderr);
si_dump_streamout(&sel->so);
}
{
unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
- if (si_can_dump_shader(sscreen, shader_type)) {
+ if (si_can_dump_shader(sscreen, tgsi_processor_to_shader_stage(shader_type))) {
fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
void si_llvm_optimize_module(struct si_shader_context *ctx)
{
/* Dump LLVM IR before any optimization passes */
- if (ctx->screen->debug_flags & DBG(PREOPT_IR) && si_can_dump_shader(ctx->screen, ctx->type))
+ if (ctx->screen->debug_flags & DBG(PREOPT_IR) && si_can_dump_shader(ctx->screen, ctx->stage))
LLVMDumpModule(ctx->ac.module);
/* Run the pass */
bool ok = false;
if (si_compile_llvm(sscreen, &ctx.shader->binary, &ctx.shader->config, ctx.compiler, &ctx.ac,
debug, PIPE_SHADER_GEOMETRY, "GS Copy Shader", false)) {
- if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
+ if (si_can_dump_shader(sscreen, MESA_SHADER_GEOMETRY))
fprintf(stderr, "GS Copy Shader:\n");
si_shader_dump(sscreen, ctx.shader, debug, stderr, true);
struct util_async_debug_callback async_debug;
bool debug = (sctx->debug.debug_message && !sctx->debug.async) || sctx->is_debug ||
- si_can_dump_shader(sctx->screen, pipe_shader_type_from_mesa(stage));
+ si_can_dump_shader(sctx->screen, stage);
if (debug) {
u_async_debug_init(&async_debug);