GLboolean
brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
{
- struct intel_context *intel = intel_context(ctx);
-
struct brw_shader *shader =
(struct brw_shader *)prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
if (shader != NULL) {
GL_TRUE, /* temp */
GL_TRUE /* uniform */
) || progress;
- if (intel->gen == 6) {
- progress = do_if_to_cond_assign(shader->ir) || progress;
- }
} while (progress);
validate_ir_tree(shader->ir);
}
}
-static const fs_reg reg_undef;
-static const fs_reg reg_null_f(ARF, BRW_ARF_NULL, BRW_REGISTER_TYPE_F);
-static const fs_reg reg_null_d(ARF, BRW_ARF_NULL, BRW_REGISTER_TYPE_D);
-
int
fs_visitor::virtual_grf_alloc(int size)
{
return BRW_REGISTER_TYPE_UD;
case GLSL_TYPE_ARRAY:
case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_SAMPLER:
/* These should be overridden with the type of the member when
* dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
* way to trip up if we don't.
case GLSL_TYPE_BOOL:
vec_values = fp->Base.Parameters->ParameterValues[loc];
for (unsigned int i = 0; i < type->vector_elements; i++) {
- c->prog_data.param[c->prog_data.nr_params++] = &vec_values[i];
+ unsigned int param = c->prog_data.nr_params++;
+
+ assert(param < ARRAY_SIZE(c->prog_data.param));
+
+ switch (type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
+ break;
+ case GLSL_TYPE_UINT:
+ c->prog_data.param_convert[param] = PARAM_CONVERT_F2U;
+ break;
+ case GLSL_TYPE_INT:
+ c->prog_data.param_convert[param] = PARAM_CONVERT_F2I;
+ break;
+ case GLSL_TYPE_BOOL:
+ c->prog_data.param_convert[param] = PARAM_CONVERT_F2B;
+ break;
+ }
+
+ c->prog_data.param[param] = &vec_values[i];
}
return 1;
break;
last_swiz = swiz;
+ c->prog_data.param_convert[c->prog_data.nr_params] =
+ PARAM_NO_CONVERT;
c->prog_data.param[c->prog_data.nr_params++] = &vec_values[swiz];
}
}
*reg,
fs_reg(1)));
} else {
- fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
/* bit 31 is "primitive is back face", so checking < (1 << 31) gives
* us front face
}
reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index);
+ reg->type = brw_type_for_base_type(ir->type);
}
if (!reg)
{
unsigned int operand;
fs_reg op[2], temp;
- fs_reg result;
fs_inst *inst;
for (operand = 0; operand < ir->get_num_operands(); operand++) {
/* Note that BRW_OPCODE_NOT is not appropriate here, since it is
* ones complement of the whole register, not just bit 0.
*/
- emit(fs_inst(BRW_OPCODE_ADD, this->result, op[0], fs_reg(-1)));
+ emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1)));
break;
case ir_unop_neg:
op[0].negate = !op[0].negate;
break;
case ir_unop_bit_not:
- case ir_unop_u2f:
- case ir_binop_lshift:
- case ir_binop_rshift:
+ inst = emit(fs_inst(BRW_OPCODE_NOT, this->result, op[0]));
+ break;
case ir_binop_bit_and:
+ inst = emit(fs_inst(BRW_OPCODE_AND, this->result, op[0], op[1]));
+ break;
case ir_binop_bit_xor:
+ inst = emit(fs_inst(BRW_OPCODE_XOR, this->result, op[0], op[1]));
+ break;
case ir_binop_bit_or:
+ inst = emit(fs_inst(BRW_OPCODE_OR, this->result, op[0], op[1]));
+ break;
+
+ case ir_unop_u2f:
+ case ir_binop_lshift:
+ case ir_binop_rshift:
assert(!"GLSL 1.30 features unsupported");
break;
}
assert(!ir->projector);
sampler = _mesa_get_sampler_uniform_value(ir->sampler,
- ctx->Shader.CurrentProgram,
+ ctx->Shader.CurrentFragmentProgram,
&brw->fragment_program->Base);
sampler = c->fp->program.Base.SamplerUnits[sampler];
0
};
+ c->prog_data.param_convert[c->prog_data.nr_params] =
+ PARAM_NO_CONVERT;
+ c->prog_data.param_convert[c->prog_data.nr_params + 1] =
+ PARAM_NO_CONVERT;
+
fs_reg scale_x = fs_reg(UNIFORM, c->prog_data.nr_params);
fs_reg scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1);
GLuint index = _mesa_add_state_reference(params,
}
}
+/**
+ * Emit a gen6 IF statement with the comparison folded into the IF
+ * instruction.
+ */
+void
+fs_visitor::emit_if_gen6(ir_if *ir)
+{
+ ir_expression *expr = ir->condition->as_expression();
+
+ if (expr) {
+ fs_reg op[2];
+ fs_inst *inst;
+ fs_reg temp;
+
+ for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
+ assert(expr->operands[i]->type->is_scalar());
+
+ expr->operands[i]->accept(this);
+ op[i] = this->result;
+ }
+
+ switch (expr->operation) {
+ case ir_unop_logic_not:
+ inst = emit(fs_inst(BRW_OPCODE_IF, temp, op[0], fs_reg(1)));
+ inst->conditional_mod = BRW_CONDITIONAL_Z;
+ return;
+
+ case ir_binop_logic_xor:
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ return;
+
+ case ir_binop_logic_or:
+ temp = fs_reg(this, glsl_type::bool_type);
+ emit(fs_inst(BRW_OPCODE_OR, temp, op[0], op[1]));
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ return;
+
+ case ir_binop_logic_and:
+ temp = fs_reg(this, glsl_type::bool_type);
+ emit(fs_inst(BRW_OPCODE_AND, temp, op[0], op[1]));
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ return;
+
+ case ir_unop_f2b:
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0)));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ return;
+
+ case ir_unop_i2b:
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ return;
+
+ case ir_binop_greater:
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+ inst->conditional_mod = BRW_CONDITIONAL_G;
+ return;
+ case ir_binop_gequal:
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+ inst->conditional_mod = BRW_CONDITIONAL_GE;
+ return;
+ case ir_binop_less:
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+ inst->conditional_mod = BRW_CONDITIONAL_L;
+ return;
+ case ir_binop_lequal:
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+ inst->conditional_mod = BRW_CONDITIONAL_LE;
+ return;
+ case ir_binop_equal:
+ case ir_binop_all_equal:
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+ inst->conditional_mod = BRW_CONDITIONAL_Z;
+ return;
+ case ir_binop_nequal:
+ case ir_binop_any_nequal:
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], op[1]));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ return;
+ default:
+ assert(!"not reached");
+ inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+ this->fail = true;
+ return;
+ }
+ return;
+ }
+
+ ir->condition->accept(this);
+
+ fs_inst *inst = emit(fs_inst(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0)));
+ inst->conditional_mod = BRW_CONDITIONAL_NZ;
+}
+
void
fs_visitor::visit(ir_if *ir)
{
*/
this->base_ir = ir->condition;
- emit_bool_to_cond_code(ir->condition);
+ if (intel->gen >= 6) {
+ emit_if_gen6(ir);
+ } else {
+ emit_bool_to_cond_code(ir->condition);
- inst = emit(fs_inst(BRW_OPCODE_IF));
- inst->predicated = true;
+ inst = emit(fs_inst(BRW_OPCODE_IF));
+ inst->predicated = true;
+ }
foreach_iter(exec_list_iterator, iter, ir->then_instructions) {
ir_instruction *ir = (ir_instruction *)iter.get();
brw_MOV(p,
brw_message_reg(inst->base_mrf),
brw_vec8_grf(0, 0));
+
+ if (inst->target > 0) {
+ /* Set the render target index for choosing BLEND_STATE. */
+ brw_MOV(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 0, 2),
+ BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(inst->target));
+ }
+
+ /* Clear viewport index, render target array index. */
+ brw_AND(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 0, 0),
+ BRW_REGISTER_TYPE_UD),
+ retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(0xf7ff));
+
implied_header = brw_null_reg();
} else {
implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
void
fs_visitor::generate_discard_not(fs_inst *inst, struct brw_reg mask)
{
- brw_push_insn_state(p);
- brw_set_mask_control(p, BRW_MASK_DISABLE);
- brw_NOT(p, mask, brw_mask_reg(1)); /* IMASK */
- brw_pop_insn_state(p);
+ if (intel->gen >= 6) {
+ /* Gen6 no longer has the mask reg for us to just read the
+ * active channels from. However, cmp updates just the channels
+ * of the flag reg that are enabled, so we can get at the
+ * channel enables that way. In this step, make a reg of ones
+ * we'll compare to.
+ */
+ brw_MOV(p, mask, brw_imm_ud(1));
+ } else {
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_NOT(p, mask, brw_mask_reg(1)); /* IMASK */
+ brw_pop_insn_state(p);
+ }
}
void
fs_visitor::generate_discard_and(fs_inst *inst, struct brw_reg mask)
{
- struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
- mask = brw_uw1_reg(mask.file, mask.nr, 0);
+ if (intel->gen >= 6) {
+ struct brw_reg f0 = brw_flag_reg();
+ struct brw_reg g1 = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
+
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_MOV(p, f0, brw_imm_uw(0xffff)); /* inactive channels undiscarded */
+ brw_pop_insn_state(p);
+
+ brw_CMP(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
+ BRW_CONDITIONAL_Z, mask, brw_imm_ud(0)); /* active channels fail test */
+ /* Undo CMP's whacking of predication*/
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_AND(p, g1, f0, g1);
+ brw_pop_insn_state(p);
+ } else {
+ struct brw_reg g0 = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
- brw_push_insn_state(p);
- brw_set_mask_control(p, BRW_MASK_DISABLE);
- brw_AND(p, g0, mask, g0);
- brw_pop_insn_state(p);
+ mask = brw_uw1_reg(mask.file, mask.nr, 0);
+
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_AND(p, g0, mask, g0);
+ brw_pop_insn_state(p);
+ }
+}
+
+void
+fs_visitor::generate_spill(fs_inst *inst, struct brw_reg src)
+{
+ assert(inst->mlen != 0);
+
+ brw_MOV(p,
+ retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_UD),
+ retype(src, BRW_REGISTER_TYPE_UD));
+ brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf), 1,
+ inst->offset);
+}
+
+void
+fs_visitor::generate_unspill(fs_inst *inst, struct brw_reg dst)
+{
+ assert(inst->mlen != 0);
+
+ /* Clear any post destination dependencies that would be ignored by
+ * the block read. See the B-Spec for pre-gen5 send instruction.
+ *
+ * This could use a better solution, since texture sampling and
+ * math reads could potentially run into it as well -- anywhere
+ * that we have a SEND with a destination that is a register that
+ * was written but not read within the last N instructions (what's
+ * N? unsure). This is rare because of dead code elimination, but
+ * not impossible.
+ */
+ if (intel->gen == 4 && !intel->is_g4x)
+ brw_MOV(p, brw_null_reg(), dst);
+
+ brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf), 1,
+ inst->offset);
+
+ if (intel->gen == 4 && !intel->is_g4x) {
+ /* gen4 errata: destination from a send can't be used as a
+ * destination until it's been read. Just read it so we don't
+ * have to worry.
+ */
+ brw_MOV(p, brw_null_reg(), dst);
+ }
+}
+
+
+void
+fs_visitor::generate_pull_constant_load(fs_inst *inst, struct brw_reg dst)
+{
+ assert(inst->mlen != 0);
+
+ /* Clear any post destination dependencies that would be ignored by
+ * the block read. See the B-Spec for pre-gen5 send instruction.
+ *
+ * This could use a better solution, since texture sampling and
+ * math reads could potentially run into it as well -- anywhere
+ * that we have a SEND with a destination that is a register that
+ * was written but not read within the last N instructions (what's
+ * N? unsure). This is rare because of dead code elimination, but
+ * not impossible.
+ */
+ if (intel->gen == 4 && !intel->is_g4x)
+ brw_MOV(p, brw_null_reg(), dst);
+
+ brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
+ inst->offset, SURF_INDEX_FRAG_CONST_BUFFER);
+
+ if (intel->gen == 4 && !intel->is_g4x) {
+ /* gen4 errata: destination from a send can't be used as a
+ * destination until it's been read. Just read it so we don't
+ * have to worry.
+ */
+ brw_MOV(p, brw_null_reg(), dst);
+ }
}
void
constant_nr % 8);
inst->src[i].file = FIXED_HW_REG;
- inst->src[i].fixed_hw_reg = brw_reg;
+ inst->src[i].fixed_hw_reg = retype(brw_reg, inst->src[i].type);
}
}
}
this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
}
-static void
-assign_reg(int *reg_hw_locations, fs_reg *reg)
-{
- if (reg->file == GRF && reg->reg != 0) {
- assert(reg->reg_offset >= 0);
- reg->hw_reg = reg_hw_locations[reg->reg] + reg->reg_offset;
- reg->reg = 0;
- }
-}
-
-void
-fs_visitor::assign_regs_trivial()
-{
- int last_grf = 0;
- int hw_reg_mapping[this->virtual_grf_next];
- int i;
-
- hw_reg_mapping[0] = 0;
- hw_reg_mapping[1] = this->first_non_payload_grf;
- for (i = 2; i < this->virtual_grf_next; i++) {
- hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
- this->virtual_grf_sizes[i - 1]);
- }
- last_grf = hw_reg_mapping[i - 1] + this->virtual_grf_sizes[i - 1];
-
- foreach_iter(exec_list_iterator, iter, this->instructions) {
- fs_inst *inst = (fs_inst *)iter.get();
-
- assign_reg(hw_reg_mapping, &inst->dst);
- assign_reg(hw_reg_mapping, &inst->src[0]);
- assign_reg(hw_reg_mapping, &inst->src[1]);
- }
-
- this->grf_used = last_grf + 1;
-}
-
-void
-fs_visitor::assign_regs()
-{
- int last_grf = 0;
- int hw_reg_mapping[this->virtual_grf_next + 1];
- int base_reg_count = BRW_MAX_GRF - this->first_non_payload_grf;
- int class_sizes[base_reg_count];
- int class_count = 0;
- int aligned_pair_class = -1;
-
- /* Set up the register classes.
- *
- * The base registers store a scalar value. For texture samples,
- * we get virtual GRFs composed of 4 contiguous hw register. For
- * structures and arrays, we store them as contiguous larger things
- * than that, though we should be able to do better most of the
- * time.
- */
- class_sizes[class_count++] = 1;
- if (brw->has_pln && intel->gen < 6) {
- /* Always set up the (unaligned) pairs for gen5, so we can find
- * them for making the aligned pair class.
- */
- class_sizes[class_count++] = 2;
- }
- for (int r = 1; r < this->virtual_grf_next; r++) {
- int i;
-
- for (i = 0; i < class_count; i++) {
- if (class_sizes[i] == this->virtual_grf_sizes[r])
- break;
- }
- if (i == class_count) {
- if (this->virtual_grf_sizes[r] >= base_reg_count) {
- fprintf(stderr, "Object too large to register allocate.\n");
- this->fail = true;
- }
-
- class_sizes[class_count++] = this->virtual_grf_sizes[r];
- }
- }
-
- int ra_reg_count = 0;
- int class_base_reg[class_count];
- int class_reg_count[class_count];
- int classes[class_count + 1];
-
- for (int i = 0; i < class_count; i++) {
- class_base_reg[i] = ra_reg_count;
- class_reg_count[i] = base_reg_count - (class_sizes[i] - 1);
- ra_reg_count += class_reg_count[i];
- }
-
- struct ra_regs *regs = ra_alloc_reg_set(ra_reg_count);
- for (int i = 0; i < class_count; i++) {
- classes[i] = ra_alloc_reg_class(regs);
-
- for (int i_r = 0; i_r < class_reg_count[i]; i_r++) {
- ra_class_add_reg(regs, classes[i], class_base_reg[i] + i_r);
- }
-
- /* Add conflicts between our contiguous registers aliasing
- * base regs and other register classes' contiguous registers
- * that alias base regs, or the base regs themselves for classes[0].
- */
- for (int c = 0; c <= i; c++) {
- for (int i_r = 0; i_r < class_reg_count[i]; i_r++) {
- for (int c_r = MAX2(0, i_r - (class_sizes[c] - 1));
- c_r < MIN2(class_reg_count[c], i_r + class_sizes[i]);
- c_r++) {
-
- if (0) {
- printf("%d/%d conflicts %d/%d\n",
- class_sizes[i], this->first_non_payload_grf + i_r,
- class_sizes[c], this->first_non_payload_grf + c_r);
- }
-
- ra_add_reg_conflict(regs,
- class_base_reg[i] + i_r,
- class_base_reg[c] + c_r);
- }
- }
- }
- }
-
- /* Add a special class for aligned pairs, which we'll put delta_x/y
- * in on gen5 so that we can do PLN.
- */
- if (brw->has_pln && intel->gen < 6) {
- int reg_count = (base_reg_count - 1) / 2;
- int unaligned_pair_class = 1;
- assert(class_sizes[unaligned_pair_class] == 2);
-
- aligned_pair_class = class_count;
- classes[aligned_pair_class] = ra_alloc_reg_class(regs);
- class_sizes[aligned_pair_class] = 2;
- class_base_reg[aligned_pair_class] = 0;
- class_reg_count[aligned_pair_class] = 0;
- int start = (this->first_non_payload_grf & 1) ? 1 : 0;
-
- for (int i = 0; i < reg_count; i++) {
- ra_class_add_reg(regs, classes[aligned_pair_class],
- class_base_reg[unaligned_pair_class] + i * 2 + start);
- }
- class_count++;
- }
-
- ra_set_finalize(regs);
-
- struct ra_graph *g = ra_alloc_interference_graph(regs,
- this->virtual_grf_next);
- /* Node 0 is just a placeholder to keep virtual_grf[] mapping 1:1
- * with nodes.
- */
- ra_set_node_class(g, 0, classes[0]);
-
- for (int i = 1; i < this->virtual_grf_next; i++) {
- for (int c = 0; c < class_count; c++) {
- if (class_sizes[c] == this->virtual_grf_sizes[i]) {
- if (aligned_pair_class >= 0 &&
- this->delta_x.reg == i) {
- ra_set_node_class(g, i, classes[aligned_pair_class]);
- } else {
- ra_set_node_class(g, i, classes[c]);
- }
- break;
- }
- }
-
- for (int j = 1; j < i; j++) {
- if (virtual_grf_interferes(i, j)) {
- ra_add_node_interference(g, i, j);
- }
- }
- }
-
- /* FINISHME: Handle spilling */
- if (!ra_allocate_no_spills(g)) {
- fprintf(stderr, "Failed to allocate registers.\n");
- this->fail = true;
- return;
- }
-
- /* Get the chosen virtual registers for each node, and map virtual
- * regs in the register classes back down to real hardware reg
- * numbers.
- */
- hw_reg_mapping[0] = 0; /* unused */
- for (int i = 1; i < this->virtual_grf_next; i++) {
- int reg = ra_get_node_reg(g, i);
- int hw_reg = -1;
-
- for (int c = 0; c < class_count; c++) {
- if (reg >= class_base_reg[c] &&
- reg < class_base_reg[c] + class_reg_count[c]) {
- hw_reg = reg - class_base_reg[c];
- break;
- }
- }
-
- assert(hw_reg >= 0);
- hw_reg_mapping[i] = this->first_non_payload_grf + hw_reg;
- last_grf = MAX2(last_grf,
- hw_reg_mapping[i] + this->virtual_grf_sizes[i] - 1);
- }
-
- foreach_iter(exec_list_iterator, iter, this->instructions) {
- fs_inst *inst = (fs_inst *)iter.get();
-
- assign_reg(hw_reg_mapping, &inst->dst);
- assign_reg(hw_reg_mapping, &inst->src[0]);
- assign_reg(hw_reg_mapping, &inst->src[1]);
- }
-
- this->grf_used = last_grf + 1;
-
- talloc_free(g);
- talloc_free(regs);
-}
-
/**
* Split large virtual GRFs into separate components if we can.
*
}
}
+/**
+ * Choose accesses from the UNIFORM file to demote to using the pull
+ * constant buffer.
+ *
+ * We allow a fragment shader to have more than the specified minimum
+ * maximum number of fragment shader uniform components (64). If
+ * there are too many of these, they'd fill up all of register space.
+ * So, this will push some of them out to the pull constant buffer and
+ * update the program to load them.
+ */
+void
+fs_visitor::setup_pull_constants()
+{
+ /* Only allow 16 registers (128 uniform components) as push constants. */
+ unsigned int max_uniform_components = 16 * 8;
+ if (c->prog_data.nr_params <= max_uniform_components)
+ return;
+
+ /* Just demote the end of the list. We could probably do better
+ * here, demoting things that are rarely used in the program first.
+ */
+ int pull_uniform_base = max_uniform_components;
+ int pull_uniform_count = c->prog_data.nr_params - pull_uniform_base;
+
+ foreach_iter(exec_list_iterator, iter, this->instructions) {
+ fs_inst *inst = (fs_inst *)iter.get();
+
+ for (int i = 0; i < 3; i++) {
+ if (inst->src[i].file != UNIFORM)
+ continue;
+
+ int uniform_nr = inst->src[i].hw_reg + inst->src[i].reg_offset;
+ if (uniform_nr < pull_uniform_base)
+ continue;
+
+ fs_reg dst = fs_reg(this, glsl_type::float_type);
+ fs_inst *pull = new(mem_ctx) fs_inst(FS_OPCODE_PULL_CONSTANT_LOAD,
+ dst);
+ pull->offset = ((uniform_nr - pull_uniform_base) * 4) & ~15;
+ pull->ir = inst->ir;
+ pull->annotation = inst->annotation;
+ pull->base_mrf = 14;
+ pull->mlen = 1;
+
+ inst->insert_before(pull);
+
+ inst->src[i].file = GRF;
+ inst->src[i].reg = dst.reg;
+ inst->src[i].reg_offset = 0;
+ inst->src[i].smear = (uniform_nr - pull_uniform_base) & 3;
+ }
+ }
+
+ for (int i = 0; i < pull_uniform_count; i++) {
+ c->prog_data.pull_param[i] = c->prog_data.param[pull_uniform_base + i];
+ c->prog_data.pull_param_convert[i] =
+ c->prog_data.param_convert[pull_uniform_base + i];
+ }
+ c->prog_data.nr_params -= pull_uniform_count;
+ c->prog_data.nr_pull_params = pull_uniform_count;
+}
+
void
fs_visitor::calculate_live_intervals()
{
int *use = talloc_array(mem_ctx, int, num_vars);
int loop_depth = 0;
int loop_start = 0;
+ int bb_header_ip = 0;
for (int i = 0; i < num_vars; i++) {
def[i] = 1 << 30;
loop_depth--;
if (loop_depth == 0) {
- /* FINISHME:
- *
- * Patches up any vars marked for use within the loop as
- * live until the end. This is conservative, as there
- * will often be variables defined and used inside the
- * loop but dead at the end of the loop body.
+ /* Patches up the use of vars marked for being live across
+ * the whole loop.
*/
for (int i = 0; i < num_vars; i++) {
if (use[i] == loop_start) {
}
}
} else {
- int eip = ip;
-
- if (loop_depth)
- eip = loop_start;
-
for (unsigned int i = 0; i < 3; i++) {
if (inst->src[i].file == GRF && inst->src[i].reg != 0) {
- use[inst->src[i].reg] = MAX2(use[inst->src[i].reg], eip);
+ int reg = inst->src[i].reg;
+
+ if (!loop_depth || (this->virtual_grf_sizes[reg] == 1 &&
+ def[reg] >= bb_header_ip)) {
+ use[reg] = ip;
+ } else {
+ def[reg] = MIN2(loop_start, def[reg]);
+ use[reg] = loop_start;
+
+ /* Nobody else is going to go smash our start to
+ * later in the loop now, because def[reg] now
+ * points before the bb header.
+ */
+ }
}
}
if (inst->dst.file == GRF && inst->dst.reg != 0) {
- def[inst->dst.reg] = MIN2(def[inst->dst.reg], eip);
+ int reg = inst->dst.reg;
+
+ if (!loop_depth || (this->virtual_grf_sizes[reg] == 1 &&
+ !inst->predicated)) {
+ def[reg] = MIN2(def[reg], ip);
+ } else {
+ def[reg] = MIN2(def[reg], loop_start);
+ }
}
}
ip++;
+
+ /* Set the basic block header IP. This is used for determining
+ * if a complete def of single-register virtual GRF in a loop
+ * dominates a use in the same basic block. It's a quick way to
+ * reduce the live interval range of most register used in a
+ * loop.
+ */
+ if (inst->opcode == BRW_OPCODE_IF ||
+ inst->opcode == BRW_OPCODE_ELSE ||
+ inst->opcode == BRW_OPCODE_ENDIF ||
+ inst->opcode == BRW_OPCODE_DO ||
+ inst->opcode == BRW_OPCODE_WHILE ||
+ inst->opcode == BRW_OPCODE_BREAK ||
+ inst->opcode == BRW_OPCODE_CONTINUE) {
+ bb_header_ip = ip;
+ }
}
talloc_free(this->virtual_grf_def);
scan_inst->src[i].reg_offset = inst->src[0].reg_offset;
scan_inst->src[i].abs |= inst->src[0].abs;
scan_inst->src[i].negate ^= inst->src[0].negate;
+ scan_inst->src[i].smear = inst->src[0].smear;
}
}
}
inst->predicated ||
inst->dst.file != MRF || inst->src[0].file != GRF ||
inst->dst.type != inst->src[0].type ||
- inst->src[0].abs || inst->src[0].negate)
+ inst->src[0].abs || inst->src[0].negate || inst->src[0].smear != -1)
continue;
/* Can't compute-to-MRF this GRF if someone else was going to
case GRF:
case ARF:
case MRF:
- brw_reg = brw_vec8_reg(reg->file,
- reg->hw_reg, 0);
+ if (reg->smear == -1) {
+ brw_reg = brw_vec8_reg(reg->file,
+ reg->hw_reg, 0);
+ } else {
+ brw_reg = brw_vec1_reg(reg->file,
+ reg->hw_reg, reg->smear);
+ }
brw_reg = retype(brw_reg, reg->type);
break;
case IMM:
void
fs_visitor::generate_code()
{
- unsigned int annotation_len = 0;
int last_native_inst = 0;
struct brw_instruction *if_stack[16], *loop_stack[16];
int if_stack_depth = 0, loop_stack_depth = 0;
int if_depth_in_loop[16];
+ const char *last_annotation_string = NULL;
+ ir_instruction *last_annotation_ir = NULL;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ printf("Native code for fragment shader %d:\n",
+ ctx->Shader.CurrentFragmentProgram->Name);
+ }
if_depth_in_loop[loop_stack_depth] = 0;
fs_inst *inst = (fs_inst *)iter.get();
struct brw_reg src[3], dst;
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ if (last_annotation_ir != inst->ir) {
+ last_annotation_ir = inst->ir;
+ if (last_annotation_ir) {
+ printf(" ");
+ last_annotation_ir->print();
+ printf("\n");
+ }
+ }
+ if (last_annotation_string != inst->annotation) {
+ last_annotation_string = inst->annotation;
+ if (last_annotation_string)
+ printf(" %s\n", last_annotation_string);
+ }
+ }
+
for (unsigned int i = 0; i < 3; i++) {
src[i] = brw_reg_from_fs_reg(&inst->src[i]);
}
case BRW_OPCODE_IF:
assert(if_stack_depth < 16);
- if_stack[if_stack_depth] = brw_IF(p, BRW_EXECUTE_8);
+ if (inst->src[0].file != BAD_FILE) {
+ assert(intel->gen >= 6);
+ if_stack[if_stack_depth] = brw_IF_gen6(p, inst->conditional_mod, src[0], src[1]);
+ } else {
+ if_stack[if_stack_depth] = brw_IF(p, BRW_EXECUTE_8);
+ }
if_depth_in_loop[loop_stack_depth]++;
if_stack_depth++;
break;
+
case BRW_OPCODE_ELSE:
if_stack[if_stack_depth - 1] =
brw_ELSE(p, if_stack[if_stack_depth - 1]);
case FS_OPCODE_DDY:
generate_ddy(inst, dst, src[0]);
break;
+
+ case FS_OPCODE_SPILL:
+ generate_spill(inst, src[0]);
+ break;
+
+ case FS_OPCODE_UNSPILL:
+ generate_unspill(inst, dst);
+ break;
+
+ case FS_OPCODE_PULL_CONSTANT_LOAD:
+ generate_pull_constant_load(inst, dst);
+ break;
+
case FS_OPCODE_FB_WRITE:
generate_fb_write(inst);
break;
this->fail = true;
}
- if (annotation_len < p->nr_insn) {
- annotation_len *= 2;
- if (annotation_len < 16)
- annotation_len = 16;
-
- this->annotation_string = talloc_realloc(this->mem_ctx,
- annotation_string,
- const char *,
- annotation_len);
- this->annotation_ir = talloc_realloc(this->mem_ctx,
- annotation_ir,
- ir_instruction *,
- annotation_len);
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
+ if (0) {
+ printf("0x%08x 0x%08x 0x%08x 0x%08x ",
+ ((uint32_t *)&p->store[i])[3],
+ ((uint32_t *)&p->store[i])[2],
+ ((uint32_t *)&p->store[i])[1],
+ ((uint32_t *)&p->store[i])[0]);
+ }
+ brw_disasm(stdout, &p->store[i], intel->gen);
+ printf("\n");
+ }
}
- for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
- this->annotation_string[i] = inst->annotation;
- this->annotation_ir[i] = inst->ir;
- }
last_native_inst = p->nr_insn;
}
}
GLboolean
brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c)
{
- struct brw_compile *p = &c->func;
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
- struct gl_shader_program *prog = ctx->Shader.CurrentProgram;
+ struct gl_shader_program *prog = ctx->Shader.CurrentFragmentProgram;
if (!prog)
return GL_FALSE;
*/
c->dispatch_width = 8;
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
printf("GLSL IR for native fragment shader %d:\n", prog->Name);
_mesa_print_ir(shader->ir, NULL);
printf("\n");
v.emit_fb_writes();
v.split_virtual_grfs();
+ v.setup_pull_constants();
v.assign_curb_setup();
v.assign_urb_setup();
progress = v.dead_code_eliminate() || progress;
} while (progress);
+ if (0) {
+ /* Debug of register spilling: Go spill everything. */
+ int virtual_grf_count = v.virtual_grf_next;
+ for (int i = 1; i < virtual_grf_count; i++) {
+ v.spill_reg(i);
+ }
+ v.calculate_live_intervals();
+ }
+
if (0)
v.assign_regs_trivial();
- else
- v.assign_regs();
+ else {
+ while (!v.assign_regs()) {
+ if (v.fail)
+ break;
+
+ v.calculate_live_intervals();
+ }
+ }
}
if (!v.fail)
if (v.fail)
return GL_FALSE;
- if (INTEL_DEBUG & DEBUG_WM) {
- const char *last_annotation_string = NULL;
- ir_instruction *last_annotation_ir = NULL;
-
- printf("Native code for fragment shader %d:\n", prog->Name);
- for (unsigned int i = 0; i < p->nr_insn; i++) {
- if (last_annotation_ir != v.annotation_ir[i]) {
- last_annotation_ir = v.annotation_ir[i];
- if (last_annotation_ir) {
- printf(" ");
- last_annotation_ir->print();
- printf("\n");
- }
- }
- if (last_annotation_string != v.annotation_string[i]) {
- last_annotation_string = v.annotation_string[i];
- if (last_annotation_string)
- printf(" %s\n", last_annotation_string);
- }
- if (0) {
- printf("0x%08x 0x%08x 0x%08x 0x%08x ",
- ((uint32_t *)&p->store[i])[3],
- ((uint32_t *)&p->store[i])[2],
- ((uint32_t *)&p->store[i])[1],
- ((uint32_t *)&p->store[i])[0]);
- }
- brw_disasm(stdout, &p->store[i], intel->gen);
- }
- printf("\n");
- }
-
c->prog_data.total_grf = v.grf_used;
- c->prog_data.total_scratch = 0;
return GL_TRUE;
}