return src_reg(expanded);
}
-void
+vec4_instruction *
vec4_visitor::emit_math(enum opcode opcode,
const dst_reg &dst,
const src_reg &src0, const src_reg &src1)
/* MATH on Gen6 must be align1, so we can't do writemasks. */
math->dst = dst_reg(this, glsl_type::vec4_type);
math->dst.type = dst.type;
- emit(MOV(dst, src_reg(math->dst)));
+ math = emit(MOV(dst, src_reg(math->dst)));
} else if (devinfo->gen < 6) {
math->base_mrf = 1;
math->mlen = src1.file == BAD_FILE ? 1 : 2;
}
+
+ return math;
}
void
}
}
-
-static int
-type_size(const struct glsl_type *type)
+/**
+ * Returns the minimum number of vec4 elements needed to pack a type.
+ *
+ * For simple types, it will return 1 (a single vec4); for matrices, the
+ * number of columns; for array and struct, the sum of the vec4_size of
+ * each of its elements; and for sampler and atomic, zero.
+ *
+ * This method is useful to calculate how much register space is needed to
+ * store a particular type.
+ */
+int
+vec4_visitor::type_size(const struct glsl_type *type)
{
unsigned int i;
int size;
size += type_size(type->fields.structure[i].type);
}
return size;
+ case GLSL_TYPE_SUBROUTINE:
+ return 1;
+
case GLSL_TYPE_SAMPLER:
/* Samplers take up no register space, since they're baked in at
* link time.
init();
this->file = GRF;
- this->reg = v->alloc.allocate(type_size(type));
+ this->reg = v->alloc.allocate(v->type_size(type));
if (type->is_array() || type->is_record()) {
this->swizzle = BRW_SWIZZLE_NOOP;
init();
this->file = GRF;
- this->reg = v->alloc.allocate(type_size(type) * size);
+ this->reg = v->alloc.allocate(v->type_size(type) * size);
this->swizzle = BRW_SWIZZLE_NOOP;
init();
this->file = GRF;
- this->reg = v->alloc.allocate(type_size(type));
+ this->reg = v->alloc.allocate(v->type_size(type));
if (type->is_array() || type->is_record()) {
this->writemask = WRITEMASK_XYZW;
this->type = brw_type_for_base_type(type);
}
+void
+vec4_visitor::setup_vector_uniform_values(const gl_constant_value *values,
+ unsigned n)
+{
+ static const gl_constant_value zero = { 0 };
+
+ for (unsigned i = 0; i < n; ++i)
+ stage_prog_data->param[4 * uniforms + i] = &values[i];
+
+ for (unsigned i = n; i < 4; ++i)
+ stage_prog_data->param[4 * uniforms + i] = &zero;
+
+ uniform_vector_size[uniforms++] = n;
+}
+
/* Our support for uniforms is piggy-backed on the struct
* gl_fragment_program, because that's where the values actually
* get stored, rather than in some global gl_shader_program uniform
continue;
}
- gl_constant_value *components = storage->storage;
- unsigned vector_count = (MAX2(storage->array_elements, 1) *
- storage->type->matrix_columns);
-
- for (unsigned s = 0; s < vector_count; s++) {
- assert(uniforms < uniform_array_size);
- uniform_vector_size[uniforms] = storage->type->vector_elements;
-
- int i;
- for (i = 0; i < uniform_vector_size[uniforms]; i++) {
- stage_prog_data->param[uniforms * 4 + i] = components;
- components++;
- }
- for (; i < 4; i++) {
- static gl_constant_value zero = { 0.0 };
- stage_prog_data->param[uniforms * 4 + i] = &zero;
- }
+ const unsigned vector_count = (MAX2(storage->array_elements, 1) *
+ storage->type->matrix_columns);
+ const unsigned vector_size = storage->type->vector_elements;
- uniforms++;
- }
+ for (unsigned s = 0; s < vector_count; s++)
+ setup_vector_uniform_values(&storage->storage[s * vector_size],
+ vector_size);
}
}
for (int i = 0; i < type_size(ir->type); i++) {
output_reg[ir->data.location + i] = *reg;
output_reg[ir->data.location + i].reg_offset = i;
- output_reg[ir->data.location + i].type =
- brw_type_for_base_type(ir->type->get_scalar_type());
output_reg_annotation[ir->data.location + i] = ir->name;
}
break;
* Some uniforms, such as samplers and atomic counters, have no actual
* storage, so we should ignore them.
*/
- if (ir->is_in_uniform_block() || type_size(ir->type) == 0)
+ if (ir->is_in_buffer_block() || type_size(ir->type) == 0)
return;
/* Track how big the whole uniform variable is, in case we need to put a
break;
case ir_var_system_value:
- reg = make_reg_for_system_value(ir);
+ reg = make_reg_for_system_value(ir->data.location, ir->type);
break;
default:
return true;
}
-void
+vec4_instruction *
vec4_visitor::emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst,
src_reg src0, src_reg src1)
{
inst = emit(BRW_OPCODE_SEL, dst, src0, src1);
inst->predicate = BRW_PREDICATE_NORMAL;
}
+
+ return inst;
}
-void
+vec4_instruction *
vec4_visitor::emit_lrp(const dst_reg &dst,
const src_reg &x, const src_reg &y, const src_reg &a)
{
/* Note that the instruction's argument order is reversed from GLSL
* and the IR.
*/
- emit(LRP(dst,
- fix_3src_operand(a), fix_3src_operand(y), fix_3src_operand(x)));
+ return emit(LRP(dst, fix_3src_operand(a), fix_3src_operand(y),
+ fix_3src_operand(x)));
} else {
/* Earlier generations don't support three source operations, so we
* need to emit x*(1-a) + y*a.
emit(MUL(y_times_a, y, a));
emit(ADD(one_minus_a, negate(a), src_reg(1.0f)));
emit(MUL(x_times_one_minus_a, x, src_reg(one_minus_a)));
- emit(ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a)));
+ return emit(ADD(dst, src_reg(x_times_one_minus_a), src_reg(y_times_a)));
}
}
emit(pull);
}
-void
-vec4_visitor::emit_uniformize(const dst_reg &dst, const src_reg &src)
+src_reg
+vec4_visitor::emit_uniformize(const src_reg &src)
{
const src_reg chan_index(this, glsl_type::uint_type);
+ const dst_reg dst = retype(dst_reg(this, glsl_type::uint_type),
+ src.type);
emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, dst_reg(chan_index))
->force_writemask_all = true;
emit(SHADER_OPCODE_BROADCAST, dst, src, chan_index)
->force_writemask_all = true;
+
+ return src_reg(dst);
}
void
case ir_unop_noise:
unreachable("not reached: should be handled by lower_noise");
+ case ir_unop_subroutine_to_int:
+ emit(MOV(result_dst, op[0]));
+ break;
+
case ir_binop_add:
emit(ADD(result_dst, op[0], op[1]));
break;
assert(ir->type->is_integer());
emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]);
break;
- case ir_binop_carry: {
- struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
- emit(ADDC(dst_null_ud(), op[0], op[1]));
- emit(MOV(result_dst, src_reg(acc)));
- break;
- }
- case ir_binop_borrow: {
- struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
+ case ir_binop_carry:
+ unreachable("Should have been lowered by carry_to_arith().");
+
+ case ir_binop_borrow:
+ unreachable("Should have been lowered by borrow_to_arith().");
- emit(SUBB(dst_null_ud(), op[0], op[1]));
- emit(MOV(result_dst, src_reg(acc)));
- break;
- }
case ir_binop_mod:
/* Floating point should be lowered by MOD_TO_FLOOR in the compiler. */
assert(ir->type->is_integer());
emit(MOV(result_dst, op[0]));
break;
case ir_unop_b2i:
- emit(AND(result_dst, op[0], src_reg(1)));
- break;
case ir_unop_b2f:
if (devinfo->gen <= 5) {
resolve_bool_comparison(ir->operands[0], &op[0]);
}
- op[0].type = BRW_REGISTER_TYPE_D;
- result_dst.type = BRW_REGISTER_TYPE_D;
- emit(AND(result_dst, op[0], src_reg(0x3f800000u)));
- result_dst.type = BRW_REGISTER_TYPE_F;
+ emit(MOV(result_dst, negate(op[0])));
break;
case ir_unop_f2b:
emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
surf_index = src_reg(this, glsl_type::uint_type);
emit(ADD(dst_reg(surf_index), op[0],
src_reg(prog_data->base.binding_table.ubo_start)));
- emit_uniformize(dst_reg(surf_index), surf_index);
+ surf_index = emit_uniformize(surf_index);
/* Assume this may touch any UBO. It would be nice to provide
* a tighter bound, but the array information is already lowered away.
emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dst, offset,
src_reg(), src_reg());
}
+
+ brw_mark_surface_used(stage_prog_data, surf_index);
}
void
/* Emit code to evaluate the actual indexing expression */
nonconst_sampler_index->accept(this);
- dst_reg temp(this, glsl_type::uint_type);
- emit(ADD(temp, this->result, src_reg(sampler)));
- emit_uniformize(temp, src_reg(temp));
-
- sampler_reg = src_reg(temp);
+ src_reg temp(this, glsl_type::uint_type);
+ emit(ADD(dst_reg(temp), this->result, src_reg(sampler)));
+ sampler_reg = emit_uniformize(temp);
} else {
/* Single sampler, or constant array index; the indexing expression
* is just an immediate.
vec4_instruction *inst;
inst = emit(OR(header1_w, src_reg(header1_w), src_reg(1u << 6)));
inst->predicate = BRW_PREDICATE_NORMAL;
+ output_reg[BRW_VARYING_SLOT_NDC].type = BRW_REGISTER_TYPE_F;
inst = emit(MOV(output_reg[BRW_VARYING_SLOT_NDC], src_reg(0.0f)));
inst->predicate = BRW_PREDICATE_NORMAL;
}
if (prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) {
dst_reg reg_w = reg;
reg_w.writemask = WRITEMASK_W;
- emit(MOV(reg_w, src_reg(output_reg[VARYING_SLOT_PSIZ])));
+ src_reg reg_as_src = src_reg(output_reg[VARYING_SLOT_PSIZ]);
+ reg_as_src.type = reg_w.type;
+ reg_as_src.swizzle = brw_swizzle_for_size(1);
+ emit(MOV(reg_w, reg_as_src));
}
if (prog_data->vue_map.slots_valid & VARYING_BIT_LAYER) {
dst_reg reg_y = reg;
reg_y.writemask = WRITEMASK_Y;
reg_y.type = BRW_REGISTER_TYPE_D;
+ output_reg[VARYING_SLOT_LAYER].type = reg_y.type;
emit(MOV(reg_y, src_reg(output_reg[VARYING_SLOT_LAYER])));
}
if (prog_data->vue_map.slots_valid & VARYING_BIT_VIEWPORT) {
dst_reg reg_z = reg;
reg_z.writemask = WRITEMASK_Z;
reg_z.type = BRW_REGISTER_TYPE_D;
+ output_reg[VARYING_SLOT_VIEWPORT].type = reg_z.type;
emit(MOV(reg_z, src_reg(output_reg[VARYING_SLOT_VIEWPORT])));
}
}
vec4_instruction *
vec4_visitor::emit_generic_urb_slot(dst_reg reg, int varying)
{
- assert (varying < VARYING_SLOT_MAX);
- reg.type = output_reg[varying].type;
+ assert(varying < VARYING_SLOT_MAX);
+ assert(output_reg[varying].type == reg.type);
current_annotation = output_reg_annotation[varying];
/* Copy the register, saturating if necessary */
return emit(MOV(reg, src_reg(output_reg[varying])));
vec4_visitor::emit_urb_slot(dst_reg reg, int varying)
{
reg.type = BRW_REGISTER_TYPE_F;
+ output_reg[varying].type = reg.type;
switch (varying) {
case VARYING_SLOT_PSIZ: