}
fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
- v8.promoted_constants, v8.runtime_check_aads_emit,
+ v8.shader_stats, v8.runtime_check_aads_emit,
MESA_SHADER_FRAGMENT);
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
unsigned max_dispatch_width = 32;
fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
- cfg_t *cfg = NULL;
+ fs_visitor *v = NULL;
const char *fail_msg = NULL;
- unsigned promoted_constants = 0;
if ((int)key->base.subgroup_size_type >= (int)BRW_SUBGROUP_SIZE_REQUIRE_8) {
/* These enum values are expressly chosen to be equal to the subgroup
/* We should always be able to do SIMD32 for compute shaders */
assert(v8->max_dispatch_width >= 32);
- cfg = v8->cfg;
+ v = v8;
cs_set_simd_size(prog_data, 8);
cs_fill_push_const_info(compiler->devinfo, prog_data);
- promoted_constants = v8->promoted_constants;
}
}
compiler->shader_perf_log(log_data,
"SIMD16 shader failed to compile: %s",
v16->fail_msg);
- if (!cfg) {
+ if (!v) {
fail_msg =
"Couldn't generate SIMD16 program and not "
"enough threads for SIMD8";
/* We should always be able to do SIMD32 for compute shaders */
assert(v16->max_dispatch_width >= 32);
- cfg = v16->cfg;
+ v = v16;
cs_set_simd_size(prog_data, 16);
cs_fill_push_const_info(compiler->devinfo, prog_data);
- promoted_constants = v16->promoted_constants;
}
}
compiler->shader_perf_log(log_data,
"SIMD32 shader failed to compile: %s",
v16->fail_msg);
- if (!cfg) {
+ if (!v) {
fail_msg =
"Couldn't generate SIMD32 program and not "
"enough threads for SIMD16";
}
} else {
- cfg = v32->cfg;
+ v = v32;
cs_set_simd_size(prog_data, 32);
cs_fill_push_const_info(compiler->devinfo, prog_data);
- promoted_constants = v32->promoted_constants;
}
}
const unsigned *ret = NULL;
- if (unlikely(cfg == NULL)) {
+ if (unlikely(v == NULL)) {
assert(fail_msg);
if (error_str)
*error_str = ralloc_strdup(mem_ctx, fail_msg);
} else {
fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
- promoted_constants, false, MESA_SHADER_COMPUTE);
+ v->shader_stats, v->runtime_check_aads_emit,
+ MESA_SHADER_COMPUTE);
if (INTEL_DEBUG & DEBUG_CS) {
char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
src_shader->info.label ?
g.enable_debug(name);
}
- g.generate_code(cfg, prog_data->simd_size);
+ g.generate_code(v->cfg, prog_data->simd_size);
ret = g.get_assembly();
}
#define UBO_START ((1 << 16) - 4)
+struct shader_stats {
+ unsigned promoted_constants;
+};
+
/**
* The fragment shader front-end.
*
int shader_time_index;
- unsigned promoted_constants;
+ struct shader_stats shader_stats;
+
brw::fs_builder bld;
private:
fs_generator(const struct brw_compiler *compiler, void *log_data,
void *mem_ctx,
struct brw_stage_prog_data *prog_data,
- unsigned promoted_constants,
+ struct shader_stats shader_stats,
bool runtime_check_aads_emit,
gl_shader_stage stage);
~fs_generator();
unsigned dispatch_width; /**< 8, 16 or 32 */
exec_list discard_halt_patches;
- unsigned promoted_constants;
+ struct shader_stats shader_stats;
bool runtime_check_aads_emit;
bool debug_flag;
const char *shader_name;
reg.offset += imm->size * width;
}
- promoted_constants = table.len;
+ shader_stats.promoted_constants = table.len;
/* Rewrite the immediate sources to refer to the new GRFs. */
for (int i = 0; i < table.len; i++) {
fs_generator::fs_generator(const struct brw_compiler *compiler, void *log_data,
void *mem_ctx,
struct brw_stage_prog_data *prog_data,
- unsigned promoted_constants,
+ struct shader_stats shader_stats,
bool runtime_check_aads_emit,
gl_shader_stage stage)
: compiler(compiler), log_data(log_data),
devinfo(compiler->devinfo),
prog_data(prog_data),
- promoted_constants(promoted_constants),
+ shader_stats(shader_stats),
runtime_check_aads_emit(runtime_check_aads_emit), debug_flag(false),
stage(stage), mem_ctx(mem_ctx)
{
"SIMD%d shader: %d instructions. %d loops. %u cycles. %d:%d spills:fills. Promoted %u constants. Compacted %d to %d"
" bytes (%.0f%%)\n",
shader_name, dispatch_width, before_size / 16, loop_count, cfg->cycle_count,
- spill_count, fill_count, promoted_constants, before_size, after_size,
+ spill_count, fill_count, shader_stats.promoted_constants, before_size, after_size,
100.0f * (before_size - after_size) / before_size);
dump_assembly(p->store, disasm_info);
_mesa_shader_stage_to_abbrev(stage),
dispatch_width, before_size / 16,
loop_count, cfg->cycle_count, spill_count,
- fill_count, promoted_constants, before_size,
+ fill_count, shader_stats.promoted_constants, before_size,
after_size);
return start_offset;
this->pull_constant_loc = NULL;
this->push_constant_loc = NULL;
- this->promoted_constants = 0,
+ this->shader_stats.promoted_constants = 0,
this->grf_used = 0;
this->spilled_any_registers = false;
prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
fs_generator g(compiler, log_data, mem_ctx,
- &prog_data->base.base, v.promoted_constants, false,
+ &prog_data->base.base, v.shader_stats, false,
MESA_SHADER_TESS_EVAL);
if (unlikely(INTEL_DEBUG & DEBUG_TES)) {
g.enable_debug(ralloc_asprintf(mem_ctx,
prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
fs_generator g(compiler, log_data, mem_ctx,
- &prog_data->base.base, v.promoted_constants,
+ &prog_data->base.base, v.shader_stats,
v.runtime_check_aads_emit, MESA_SHADER_VERTEX);
if (INTEL_DEBUG & DEBUG_VS) {
const char *debug_name =
prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
fs_generator g(compiler, log_data, mem_ctx,
- &prog_data->base.base, v.promoted_constants,
+ &prog_data->base.base, v.shader_stats,
false, MESA_SHADER_GEOMETRY);
if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
const char *label =
prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
fs_generator g(compiler, log_data, mem_ctx,
- &prog_data->base.base, v.promoted_constants, false,
+ &prog_data->base.base, v.shader_stats, false,
MESA_SHADER_TESS_CTRL);
if (unlikely(INTEL_DEBUG & DEBUG_TCS)) {
g.enable_debug(ralloc_asprintf(mem_ctx,