#include "brw_vs.h"
#include "brw_vec4_gs.h"
#include "brw_fs.h"
+#include "brw_cfg.h"
#include "glsl/ir_optimization.h"
#include "glsl/glsl_parser_extras.h"
#include "main/shaderapi.h"
unsigned int stage;
for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
+ const struct gl_shader_compiler_options *options =
+ &ctx->ShaderCompilerOptions[stage];
struct brw_shader *shader =
(struct brw_shader *)shProg->_LinkedShaders[stage];
lower_noise(shader->base.ir);
lower_quadop_vector(shader->base.ir, false);
- bool input = true;
- bool output = stage == MESA_SHADER_FRAGMENT;
- bool temp = stage == MESA_SHADER_FRAGMENT;
- bool uniform = false;
-
bool lowered_variable_indexing =
lower_variable_index_to_cond_assign(shader->base.ir,
- input, output, temp, uniform);
+ options->EmitNoIndirectInput,
+ options->EmitNoIndirectOutput,
+ options->EmitNoIndirectTemp,
+ options->EmitNoIndirectUniform);
if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
perf_debug("Unsupported form of variable indexing in FS; falling "
"back to very inefficient code generation\n");
}
- /* FINISHME: Do this before the variable index lowering. */
lower_ubo_reference(&shader->base, shader->base.ir);
do {
false /* loops */
) || progress;
- progress = do_common_optimization(shader->base.ir, true, true, 32,
- &ctx->ShaderCompilerOptions[stage])
+ progress = do_common_optimization(shader->base.ir, true, true,
+ options, ctx->Const.NativeIntegers)
|| progress;
} while (progress);
_mesa_reference_program(ctx, &prog, NULL);
- if (ctx->Shader.Flags & GLSL_DUMP) {
+ if (ctx->_Shader->Flags & GLSL_DUMP) {
fprintf(stderr, "\n");
fprintf(stderr, "GLSL IR for linked %s program %d:\n",
_mesa_shader_stage_to_string(shader->base.Stage),
}
}
- if ((ctx->Shader.Flags & GLSL_DUMP) && shProg->Name != 0) {
+ if ((ctx->_Shader->Flags & GLSL_DUMP) && shProg->Name != 0) {
for (unsigned i = 0; i < shProg->NumShaders; i++) {
const struct gl_shader *sh = shProg->Shaders[i];
if (!sh)
}
}
+backend_visitor::backend_visitor(struct brw_context *brw,
+ struct gl_shader_program *shader_prog,
+ struct gl_program *prog,
+ struct brw_stage_prog_data *stage_prog_data,
+ gl_shader_stage stage)
+ : brw(brw),
+ ctx(&brw->ctx),
+ shader(shader_prog ?
+ (struct brw_shader *)shader_prog->_LinkedShaders[stage] : NULL),
+ shader_prog(shader_prog),
+ prog(prog),
+ stage_prog_data(stage_prog_data)
+{
+}
+
bool
-backend_instruction::is_tex()
+backend_instruction::is_tex() const
{
return (opcode == SHADER_OPCODE_TEX ||
opcode == FS_OPCODE_TXB ||
}
bool
-backend_instruction::is_math()
+backend_instruction::is_math() const
{
return (opcode == SHADER_OPCODE_RCP ||
opcode == SHADER_OPCODE_RSQ ||
}
bool
-backend_instruction::is_control_flow()
+backend_instruction::is_control_flow() const
{
switch (opcode) {
case BRW_OPCODE_DO:
}
bool
-backend_instruction::can_do_source_mods()
+backend_instruction::can_do_source_mods() const
{
switch (opcode) {
case BRW_OPCODE_ADDC:
}
bool
-backend_instruction::can_do_saturate()
+backend_instruction::can_do_saturate() const
{
switch (opcode) {
case BRW_OPCODE_ADD:
}
}
+bool
+backend_instruction::reads_accumulator_implicitly() const
+{
+ switch (opcode) {
+ case BRW_OPCODE_MAC:
+ case BRW_OPCODE_MACH:
+ case BRW_OPCODE_SADA2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool
+backend_instruction::writes_accumulator_implicitly(int gen) const
+{
+ return writes_accumulator ||
+ (gen < 6 &&
+ ((opcode >= BRW_OPCODE_ADD && opcode < BRW_OPCODE_NOP) ||
+ (opcode >= FS_OPCODE_DDX && opcode <= FS_OPCODE_LINTERP &&
+ opcode != FS_OPCODE_CINTERP)));
+}
+
bool
backend_instruction::has_side_effects() const
{
/* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
}
+
+void annotate(struct brw_context *brw,
+ struct annotation_info *annotation, cfg_t *cfg,
+ backend_instruction *inst, unsigned offset)
+{
+ if (annotation->ann_size <= annotation->ann_count) {
+ annotation->ann_size = MAX2(1024, annotation->ann_size * 2);
+ annotation->ann = reralloc(annotation->mem_ctx, annotation->ann,
+ struct annotation, annotation->ann_size);
+ if (!annotation->ann)
+ return;
+ }
+
+ struct annotation *ann = &annotation->ann[annotation->ann_count++];
+ ann->offset = offset;
+ ann->ir = inst->ir;
+ ann->annotation = inst->annotation;
+
+ if (cfg->blocks[annotation->cur_block]->start == inst) {
+ ann->block_start = cfg->blocks[annotation->cur_block];
+ }
+
+ /* There is no hardware DO instruction on Gen6+, so since DO always
+ * starts a basic block, we need to set the .block_start of the next
+ * instruction's annotation with a pointer to the bblock started by
+ * the DO.
+ *
+ * There's also only complication from emitting an annotation without
+ * a corresponding hardware instruction to disassemble.
+ */
+ if (brw->gen >= 6 && inst->opcode == BRW_OPCODE_DO) {
+ annotation->ann_count--;
+ }
+
+ if (cfg->blocks[annotation->cur_block]->end == inst) {
+ ann->block_end = cfg->blocks[annotation->cur_block];
+ annotation->cur_block++;
+ }
+}
+
+void
+annotation_finalize(struct annotation_info *annotation,
+ unsigned next_inst_offset)
+{
+ if (!annotation->ann_count)
+ return;
+
+ if (annotation->ann_count == annotation->ann_size) {
+ annotation->ann = reralloc(annotation->mem_ctx, annotation->ann,
+ struct annotation, annotation->ann_size + 1);
+ }
+ annotation->ann[annotation->ann_count].offset = next_inst_offset;
+}