#include "main/macros.h"
#include "brw_context.h"
}
+#include "brw_vs.h"
+#include "brw_vec4_gs.h"
#include "brw_fs.h"
-#include "../glsl/ir_optimization.h"
-#include "../glsl/ir_print_visitor.h"
+#include "brw_cfg.h"
+#include "glsl/ir_optimization.h"
+#include "glsl/glsl_parser_extras.h"
+#include "main/shaderapi.h"
struct gl_shader *
brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
shader = rzalloc(NULL, struct brw_shader);
if (shader) {
shader->base.Type = type;
+ shader->base.Stage = _mesa_shader_enum_to_shader_stage(type);
shader->base.Name = name;
_mesa_init_shader(ctx, &shader->base);
}
struct gl_shader_program *
brw_new_shader_program(struct gl_context *ctx, GLuint name)
{
- struct brw_shader_program *prog;
- prog = rzalloc(NULL, struct brw_shader_program);
+ struct gl_shader_program *prog = rzalloc(NULL, struct gl_shader_program);
if (prog) {
- prog->base.Name = name;
- _mesa_init_shader_program(ctx, &prog->base);
+ prog->Name = name;
+ _mesa_init_shader_program(ctx, prog);
}
- return &prog->base;
+ return prog;
}
/**
* what non-orthogonal state will be set, in the hope that it reflects
* the eventual NOS used, and thus allows us to produce link failures.
*/
-bool
+static bool
brw_shader_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
{
- if (!brw_fs_precompile(ctx, prog))
+ struct brw_context *brw = brw_context(ctx);
+
+ if (brw->precompile && !brw_fs_precompile(ctx, prog))
+ return false;
+
+ if (brw->precompile && !brw_gs_precompile(ctx, prog))
+ return false;
+
+ if (brw->precompile && !brw_vs_precompile(ctx, prog))
return false;
return true;
}
+static void
+brw_lower_packing_builtins(struct brw_context *brw,
+ gl_shader_stage shader_type,
+ exec_list *ir)
+{
+ int ops = LOWER_PACK_SNORM_2x16
+ | LOWER_UNPACK_SNORM_2x16
+ | LOWER_PACK_UNORM_2x16
+ | LOWER_UNPACK_UNORM_2x16
+ | LOWER_PACK_SNORM_4x8
+ | LOWER_UNPACK_SNORM_4x8
+ | LOWER_PACK_UNORM_4x8
+ | LOWER_UNPACK_UNORM_4x8;
+
+ if (brw->gen >= 7) {
+ /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
+ * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
+ * lowering is needed. For SOA code, the Half2x16 ops must be
+ * scalarized.
+ */
+ if (shader_type == MESA_SHADER_FRAGMENT) {
+ ops |= LOWER_PACK_HALF_2x16_TO_SPLIT
+ | LOWER_UNPACK_HALF_2x16_TO_SPLIT;
+ }
+ } else {
+ ops |= LOWER_PACK_HALF_2x16
+ | LOWER_UNPACK_HALF_2x16;
+ }
+
+ lower_packing_builtins(ir, ops);
+}
+
GLboolean
-brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
+brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
{
struct brw_context *brw = brw_context(ctx);
- struct intel_context *intel = &brw->intel;
unsigned int stage;
- for (stage = 0; stage < ARRAY_SIZE(prog->_LinkedShaders); stage++) {
+ for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
+ const struct gl_shader_compiler_options *options =
+ &ctx->ShaderCompilerOptions[stage];
struct brw_shader *shader =
- (struct brw_shader *)prog->_LinkedShaders[stage];
+ (struct brw_shader *)shProg->_LinkedShaders[stage];
if (!shader)
continue;
- void *mem_ctx = ralloc_context(NULL);
- bool progress;
+ struct gl_program *prog =
+ ctx->Driver.NewProgram(ctx, _mesa_shader_stage_to_program(stage),
+ shader->base.Name);
+ if (!prog)
+ return false;
+ prog->Parameters = _mesa_new_parameter_list();
- if (shader->ir)
- ralloc_free(shader->ir);
- shader->ir = new(shader) exec_list;
- clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
+ _mesa_copy_linked_program_data((gl_shader_stage) stage, shProg, prog);
- do_mat_op_to_vec(shader->ir);
- lower_instructions(shader->ir,
+ bool progress;
+
+ /* lower_packing_builtins() inserts arithmetic instructions, so it
+ * must precede lower_instructions().
+ */
+ brw_lower_packing_builtins(brw, (gl_shader_stage) stage, shader->base.ir);
+ do_mat_op_to_vec(shader->base.ir);
+ const int bitfield_insert = brw->gen >= 7
+ ? BITFIELD_INSERT_TO_BFM_BFI
+ : 0;
+ lower_instructions(shader->base.ir,
MOD_TO_FRACT |
DIV_TO_MUL_RCP |
SUB_TO_ADD_NEG |
EXP_TO_EXP2 |
- LOG_TO_LOG2);
+ LOG_TO_LOG2 |
+ bitfield_insert |
+ LDEXP_TO_ARITH);
/* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
* if-statements need to be flattened.
*/
- if (intel->gen < 6)
- lower_if_to_cond_assign(shader->ir, 16);
+ if (brw->gen < 6)
+ lower_if_to_cond_assign(shader->base.ir, 16);
- do_lower_texture_projection(shader->ir);
- do_vec_index_to_cond_assign(shader->ir);
- brw_do_cubemap_normalize(shader->ir);
- lower_noise(shader->ir);
- lower_quadop_vector(shader->ir, false);
+ do_lower_texture_projection(shader->base.ir);
+ brw_lower_texture_gradients(brw, shader->base.ir);
+ do_vec_index_to_cond_assign(shader->base.ir);
+ lower_vector_insert(shader->base.ir, true);
+ brw_do_cubemap_normalize(shader->base.ir);
+ lower_offset_arrays(shader->base.ir);
+ brw_do_lower_unnormalized_offset(shader->base.ir);
+ lower_noise(shader->base.ir);
+ lower_quadop_vector(shader->base.ir, false);
- bool input = true;
- bool output = stage == MESA_SHADER_FRAGMENT;
- bool temp = stage == MESA_SHADER_FRAGMENT;
- bool uniform = true;
+ bool lowered_variable_indexing =
+ lower_variable_index_to_cond_assign(shader->base.ir,
+ options->EmitNoIndirectInput,
+ options->EmitNoIndirectOutput,
+ options->EmitNoIndirectTemp,
+ options->EmitNoIndirectUniform);
- lower_variable_index_to_cond_assign(shader->ir,
- input, output, temp, uniform);
+ if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
+ perf_debug("Unsupported form of variable indexing in FS; falling "
+ "back to very inefficient code generation\n");
+ }
+
+ lower_ubo_reference(&shader->base, shader->base.ir);
do {
progress = false;
if (stage == MESA_SHADER_FRAGMENT) {
- brw_do_channel_expressions(shader->ir);
- brw_do_vector_splitting(shader->ir);
+ brw_do_channel_expressions(shader->base.ir);
+ brw_do_vector_splitting(shader->base.ir);
}
- progress = do_lower_jumps(shader->ir, true, true,
+ progress = do_lower_jumps(shader->base.ir, true, true,
true, /* main return */
false, /* continue */
false /* loops */
) || progress;
- progress = do_common_optimization(shader->ir, true, 32) || progress;
+ progress = do_common_optimization(shader->base.ir, true, true,
+ options, ctx->Const.NativeIntegers)
+ || progress;
} while (progress);
- validate_ir_tree(shader->ir);
+ /* Make a pass over the IR to add state references for any built-in
+ * uniforms that are used. This has to be done now (during linking).
+ * Code generation doesn't happen until the first time this shader is
+ * used for rendering. Waiting until then to generate the parameters is
+ * too late. At that point, the values for the built-in uniforms won't
+ * get sent to the shader.
+ */
+ foreach_list(node, shader->base.ir) {
+ ir_variable *var = ((ir_instruction *) node)->as_variable();
+
+ if ((var == NULL) || (var->data.mode != ir_var_uniform)
+ || (strncmp(var->name, "gl_", 3) != 0))
+ continue;
- reparent_ir(shader->ir, shader->ir);
- ralloc_free(mem_ctx);
+ const ir_state_slot *const slots = var->state_slots;
+ assert(var->state_slots != NULL);
+
+ for (unsigned int i = 0; i < var->num_state_slots; i++) {
+ _mesa_add_state_reference(prog->Parameters,
+ (gl_state_index *) slots[i].tokens);
+ }
+ }
+
+ validate_ir_tree(shader->base.ir);
+
+ do_set_program_inouts(shader->base.ir, prog, shader->base.Stage);
+
+ prog->SamplersUsed = shader->base.active_samplers;
+ _mesa_update_shader_textures_used(shProg, prog);
+
+ _mesa_reference_program(ctx, &shader->base.Program, prog);
+
+ brw_add_texrect_params(prog);
+
+ /* This has to be done last. Any operation that can cause
+ * prog->ParameterValues to get reallocated (e.g., anything that adds a
+ * program constant) has to happen before creating this linkage.
+ */
+ _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
+
+ _mesa_reference_program(ctx, &prog, NULL);
+
+ if (ctx->_Shader->Flags & GLSL_DUMP) {
+ fprintf(stderr, "\n");
+ fprintf(stderr, "GLSL IR for linked %s program %d:\n",
+ _mesa_shader_stage_to_string(shader->base.Stage),
+ shProg->Name);
+ _mesa_print_ir(stderr, shader->base.ir, NULL);
+ fprintf(stderr, "\n");
+ }
}
- if (!_mesa_ir_link_shader(ctx, prog))
- return GL_FALSE;
+ if ((ctx->_Shader->Flags & GLSL_DUMP) && shProg->Name != 0) {
+ for (unsigned i = 0; i < shProg->NumShaders; i++) {
+ const struct gl_shader *sh = shProg->Shaders[i];
+ if (!sh)
+ continue;
+
+ fprintf(stderr, "GLSL %s shader %d source for linked program %d:\n",
+ _mesa_shader_stage_to_string(sh->Stage),
+ i, shProg->Name);
+ fprintf(stderr, "%s", sh->Source);
+ fprintf(stderr, "\n");
+ }
+ }
- if (!brw_shader_precompile(ctx, prog))
- return GL_FALSE;
+ if (!brw_shader_precompile(ctx, shProg))
+ return false;
- return GL_TRUE;
+ return true;
}
case GLSL_TYPE_UINT:
return BRW_REGISTER_TYPE_UD;
case GLSL_TYPE_ARRAY:
+ return brw_type_for_base_type(type->fields.array);
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_ATOMIC_UINT:
/* These should be overridden with the type of the member when
* dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
* way to trip up if we don't.
*/
return BRW_REGISTER_TYPE_UD;
- default:
+ case GLSL_TYPE_IMAGE:
+ return BRW_REGISTER_TYPE_UD;
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_ERROR:
+ case GLSL_TYPE_INTERFACE:
assert(!"not reached");
- return BRW_REGISTER_TYPE_F;
+ break;
}
+
+ return BRW_REGISTER_TYPE_F;
}
uint32_t
return BRW_MATH_FUNCTION_SIN;
case SHADER_OPCODE_COS:
return BRW_MATH_FUNCTION_COS;
+ case SHADER_OPCODE_INT_QUOTIENT:
+ return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT;
+ case SHADER_OPCODE_INT_REMAINDER:
+ return BRW_MATH_FUNCTION_INT_DIV_REMAINDER;
default:
assert(!"not reached: unknown math function");
return 0;
}
}
+
+uint32_t
+brw_texture_offset(struct gl_context *ctx, ir_constant *offset)
+{
+ /* If the driver does not support GL_ARB_gpu_shader5, the offset
+ * must be constant.
+ */
+ assert(offset != NULL || ctx->Extensions.ARB_gpu_shader5);
+
+ if (!offset) return 0; /* nonconstant offset; caller will handle it. */
+
+ signed char offsets[3];
+ for (unsigned i = 0; i < offset->type->vector_elements; i++)
+ offsets[i] = (signed char) offset->value.i[i];
+
+ /* Combine all three offsets into a single unsigned dword:
+ *
+ * bits 11:8 - U Offset (X component)
+ * bits 7:4 - V Offset (Y component)
+ * bits 3:0 - R Offset (Z component)
+ */
+ unsigned offset_bits = 0;
+ for (unsigned i = 0; i < offset->type->vector_elements; i++) {
+ const unsigned shift = 4 * (2 - i);
+ offset_bits |= (offsets[i] << shift) & (0xF << shift);
+ }
+ return offset_bits;
+}
+
+const char *
+brw_instruction_name(enum opcode op)
+{
+ char *fallback;
+
+ if (op < ARRAY_SIZE(opcode_descs) && opcode_descs[op].name)
+ return opcode_descs[op].name;
+
+ switch (op) {
+ case FS_OPCODE_FB_WRITE:
+ return "fb_write";
+ case FS_OPCODE_BLORP_FB_WRITE:
+ return "blorp_fb_write";
+
+ case SHADER_OPCODE_RCP:
+ return "rcp";
+ case SHADER_OPCODE_RSQ:
+ return "rsq";
+ case SHADER_OPCODE_SQRT:
+ return "sqrt";
+ case SHADER_OPCODE_EXP2:
+ return "exp2";
+ case SHADER_OPCODE_LOG2:
+ return "log2";
+ case SHADER_OPCODE_POW:
+ return "pow";
+ case SHADER_OPCODE_INT_QUOTIENT:
+ return "int_quot";
+ case SHADER_OPCODE_INT_REMAINDER:
+ return "int_rem";
+ case SHADER_OPCODE_SIN:
+ return "sin";
+ case SHADER_OPCODE_COS:
+ return "cos";
+
+ case SHADER_OPCODE_TEX:
+ return "tex";
+ case SHADER_OPCODE_TXD:
+ return "txd";
+ case SHADER_OPCODE_TXF:
+ return "txf";
+ case SHADER_OPCODE_TXL:
+ return "txl";
+ case SHADER_OPCODE_TXS:
+ return "txs";
+ case FS_OPCODE_TXB:
+ return "txb";
+ case SHADER_OPCODE_TXF_CMS:
+ return "txf_cms";
+ case SHADER_OPCODE_TXF_UMS:
+ return "txf_ums";
+ case SHADER_OPCODE_TXF_MCS:
+ return "txf_mcs";
+ case SHADER_OPCODE_TG4:
+ return "tg4";
+ case SHADER_OPCODE_TG4_OFFSET:
+ return "tg4_offset";
+
+ case SHADER_OPCODE_GEN4_SCRATCH_READ:
+ return "gen4_scratch_read";
+ case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
+ return "gen4_scratch_write";
+ case SHADER_OPCODE_GEN7_SCRATCH_READ:
+ return "gen7_scratch_read";
+
+ case FS_OPCODE_DDX:
+ return "ddx";
+ case FS_OPCODE_DDY:
+ return "ddy";
+
+ case FS_OPCODE_PIXEL_X:
+ return "pixel_x";
+ case FS_OPCODE_PIXEL_Y:
+ return "pixel_y";
+
+ case FS_OPCODE_CINTERP:
+ return "cinterp";
+ case FS_OPCODE_LINTERP:
+ return "linterp";
+
+ case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
+ return "uniform_pull_const";
+ case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
+ return "uniform_pull_const_gen7";
+ case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
+ return "varying_pull_const";
+ case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
+ return "varying_pull_const_gen7";
+
+ case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
+ return "mov_dispatch_to_flags";
+ case FS_OPCODE_DISCARD_JUMP:
+ return "discard_jump";
+
+ case FS_OPCODE_SET_SIMD4X2_OFFSET:
+ return "set_simd4x2_offset";
+
+ case FS_OPCODE_PACK_HALF_2x16_SPLIT:
+ return "pack_half_2x16_split";
+ case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
+ return "unpack_half_2x16_split_x";
+ case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
+ return "unpack_half_2x16_split_y";
+
+ case FS_OPCODE_PLACEHOLDER_HALT:
+ return "placeholder_halt";
+
+ case VS_OPCODE_URB_WRITE:
+ return "vs_urb_write";
+ case VS_OPCODE_PULL_CONSTANT_LOAD:
+ return "pull_constant_load";
+ case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
+ return "pull_constant_load_gen7";
+ case VS_OPCODE_UNPACK_FLAGS_SIMD4X2:
+ return "unpack_flags_simd4x2";
+
+ case GS_OPCODE_URB_WRITE:
+ return "gs_urb_write";
+ case GS_OPCODE_THREAD_END:
+ return "gs_thread_end";
+ case GS_OPCODE_SET_WRITE_OFFSET:
+ return "set_write_offset";
+ case GS_OPCODE_SET_VERTEX_COUNT:
+ return "set_vertex_count";
+ case GS_OPCODE_SET_DWORD_2_IMMED:
+ return "set_dword_2_immed";
+ case GS_OPCODE_PREPARE_CHANNEL_MASKS:
+ return "prepare_channel_masks";
+ case GS_OPCODE_SET_CHANNEL_MASKS:
+ return "set_channel_masks";
+ case GS_OPCODE_GET_INSTANCE_ID:
+ return "get_instance_id";
+
+ default:
+ /* Yes, this leaks. It's in debug code, it should never occur, and if
+ * it does, you should just add the case to the list above.
+ */
+ asprintf(&fallback, "op%d", op);
+ return fallback;
+ }
+}
+
+backend_visitor::backend_visitor(struct brw_context *brw,
+ struct gl_shader_program *shader_prog,
+ struct gl_program *prog,
+ struct brw_stage_prog_data *stage_prog_data,
+ gl_shader_stage stage)
+ : brw(brw),
+ ctx(&brw->ctx),
+ shader(shader_prog ?
+ (struct brw_shader *)shader_prog->_LinkedShaders[stage] : NULL),
+ shader_prog(shader_prog),
+ prog(prog),
+ stage_prog_data(stage_prog_data)
+{
+}
+
+bool
+backend_instruction::is_tex() const
+{
+ return (opcode == SHADER_OPCODE_TEX ||
+ opcode == FS_OPCODE_TXB ||
+ opcode == SHADER_OPCODE_TXD ||
+ opcode == SHADER_OPCODE_TXF ||
+ opcode == SHADER_OPCODE_TXF_CMS ||
+ opcode == SHADER_OPCODE_TXF_UMS ||
+ opcode == SHADER_OPCODE_TXF_MCS ||
+ opcode == SHADER_OPCODE_TXL ||
+ opcode == SHADER_OPCODE_TXS ||
+ opcode == SHADER_OPCODE_LOD ||
+ opcode == SHADER_OPCODE_TG4 ||
+ opcode == SHADER_OPCODE_TG4_OFFSET);
+}
+
+bool
+backend_instruction::is_math() const
+{
+ return (opcode == SHADER_OPCODE_RCP ||
+ opcode == SHADER_OPCODE_RSQ ||
+ opcode == SHADER_OPCODE_SQRT ||
+ opcode == SHADER_OPCODE_EXP2 ||
+ opcode == SHADER_OPCODE_LOG2 ||
+ opcode == SHADER_OPCODE_SIN ||
+ opcode == SHADER_OPCODE_COS ||
+ opcode == SHADER_OPCODE_INT_QUOTIENT ||
+ opcode == SHADER_OPCODE_INT_REMAINDER ||
+ opcode == SHADER_OPCODE_POW);
+}
+
+bool
+backend_instruction::is_control_flow() const
+{
+ switch (opcode) {
+ case BRW_OPCODE_DO:
+ case BRW_OPCODE_WHILE:
+ case BRW_OPCODE_IF:
+ case BRW_OPCODE_ELSE:
+ case BRW_OPCODE_ENDIF:
+ case BRW_OPCODE_BREAK:
+ case BRW_OPCODE_CONTINUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool
+backend_instruction::can_do_source_mods() const
+{
+ switch (opcode) {
+ case BRW_OPCODE_ADDC:
+ case BRW_OPCODE_BFE:
+ case BRW_OPCODE_BFI1:
+ case BRW_OPCODE_BFI2:
+ case BRW_OPCODE_BFREV:
+ case BRW_OPCODE_CBIT:
+ case BRW_OPCODE_FBH:
+ case BRW_OPCODE_FBL:
+ case BRW_OPCODE_SUBB:
+ return false;
+ default:
+ return true;
+ }
+}
+
+bool
+backend_instruction::can_do_saturate() const
+{
+ switch (opcode) {
+ case BRW_OPCODE_ADD:
+ case BRW_OPCODE_ASR:
+ case BRW_OPCODE_AVG:
+ case BRW_OPCODE_DP2:
+ case BRW_OPCODE_DP3:
+ case BRW_OPCODE_DP4:
+ case BRW_OPCODE_DPH:
+ case BRW_OPCODE_F16TO32:
+ case BRW_OPCODE_F32TO16:
+ case BRW_OPCODE_LINE:
+ case BRW_OPCODE_LRP:
+ case BRW_OPCODE_MAC:
+ case BRW_OPCODE_MACH:
+ case BRW_OPCODE_MAD:
+ case BRW_OPCODE_MATH:
+ case BRW_OPCODE_MOV:
+ case BRW_OPCODE_MUL:
+ case BRW_OPCODE_PLN:
+ case BRW_OPCODE_RNDD:
+ case BRW_OPCODE_RNDE:
+ case BRW_OPCODE_RNDU:
+ case BRW_OPCODE_RNDZ:
+ case BRW_OPCODE_SEL:
+ case BRW_OPCODE_SHL:
+ case BRW_OPCODE_SHR:
+ case FS_OPCODE_LINTERP:
+ case SHADER_OPCODE_COS:
+ case SHADER_OPCODE_EXP2:
+ case SHADER_OPCODE_LOG2:
+ case SHADER_OPCODE_POW:
+ case SHADER_OPCODE_RCP:
+ case SHADER_OPCODE_RSQ:
+ case SHADER_OPCODE_SIN:
+ case SHADER_OPCODE_SQRT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool
+backend_instruction::reads_accumulator_implicitly() const
+{
+ switch (opcode) {
+ case BRW_OPCODE_MAC:
+ case BRW_OPCODE_MACH:
+ case BRW_OPCODE_SADA2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool
+backend_instruction::writes_accumulator_implicitly(int gen) const
+{
+ return writes_accumulator ||
+ (gen < 6 &&
+ ((opcode >= BRW_OPCODE_ADD && opcode < BRW_OPCODE_NOP) ||
+ (opcode >= FS_OPCODE_DDX && opcode <= FS_OPCODE_LINTERP &&
+ opcode != FS_OPCODE_CINTERP)));
+}
+
+bool
+backend_instruction::has_side_effects() const
+{
+ switch (opcode) {
+ case SHADER_OPCODE_UNTYPED_ATOMIC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void
+backend_visitor::dump_instructions()
+{
+ int ip = 0;
+ foreach_list(node, &this->instructions) {
+ backend_instruction *inst = (backend_instruction *)node;
+ fprintf(stderr, "%d: ", ip++);
+ dump_instruction(inst);
+ }
+}
+
+
+/**
+ * Sets up the starting offsets for the groups of binding table entries
+ * commong to all pipeline stages.
+ *
+ * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
+ * unused but also make sure that addition of small offsets to them will
+ * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
+ */
+void
+backend_visitor::assign_common_binding_table_offsets(uint32_t next_binding_table_offset)
+{
+ int num_textures = _mesa_fls(prog->SamplersUsed);
+
+ stage_prog_data->binding_table.texture_start = next_binding_table_offset;
+ next_binding_table_offset += num_textures;
+
+ if (shader) {
+ stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
+ next_binding_table_offset += shader->base.NumUniformBlocks;
+ } else {
+ stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
+ }
+
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
+ stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
+ next_binding_table_offset++;
+ } else {
+ stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
+ }
+
+ if (prog->UsesGather) {
+ stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
+ next_binding_table_offset += num_textures;
+ } else {
+ stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
+ }
+
+ if (shader_prog && shader_prog->NumAtomicBuffers) {
+ stage_prog_data->binding_table.abo_start = next_binding_table_offset;
+ next_binding_table_offset += shader_prog->NumAtomicBuffers;
+ } else {
+ stage_prog_data->binding_table.abo_start = 0xd0d0d0d0;
+ }
+
+ /* This may or may not be used depending on how the compile goes. */
+ stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
+ next_binding_table_offset++;
+
+ assert(next_binding_table_offset <= BRW_MAX_SURFACES);
+
+ /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
+}
+
+void annotate(struct brw_context *brw,
+ struct annotation_info *annotation, cfg_t *cfg,
+ backend_instruction *inst, unsigned offset)
+{
+ if (annotation->ann_size <= annotation->ann_count) {
+ annotation->ann_size = MAX2(1024, annotation->ann_size * 2);
+ annotation->ann = reralloc(annotation->mem_ctx, annotation->ann,
+ struct annotation, annotation->ann_size);
+ if (!annotation->ann)
+ return;
+ }
+
+ struct annotation *ann = &annotation->ann[annotation->ann_count++];
+ ann->offset = offset;
+ ann->ir = inst->ir;
+ ann->annotation = inst->annotation;
+
+ if (cfg->blocks[annotation->cur_block]->start == inst) {
+ ann->block_start = cfg->blocks[annotation->cur_block];
+ }
+
+ /* There is no hardware DO instruction on Gen6+, so since DO always
+ * starts a basic block, we need to set the .block_start of the next
+ * instruction's annotation with a pointer to the bblock started by
+ * the DO.
+ *
+ * There's also only complication from emitting an annotation without
+ * a corresponding hardware instruction to disassemble.
+ */
+ if (brw->gen >= 6 && inst->opcode == BRW_OPCODE_DO) {
+ annotation->ann_count--;
+ }
+
+ if (cfg->blocks[annotation->cur_block]->end == inst) {
+ ann->block_end = cfg->blocks[annotation->cur_block];
+ annotation->cur_block++;
+ }
+}
+
+void
+annotation_finalize(struct annotation_info *annotation,
+ unsigned next_inst_offset)
+{
+ if (!annotation->ann_count)
+ return;
+
+ if (annotation->ann_count == annotation->ann_size) {
+ annotation->ann = reralloc(annotation->mem_ctx, annotation->ann,
+ struct annotation, annotation->ann_size + 1);
+ }
+ annotation->ann[annotation->ann_count].offset = next_inst_offset;
+}