this->shadow_compare = false;
this->ir = NULL;
this->urb_write_flags = BRW_URB_WRITE_NO_FLAGS;
- this->header_present = false;
+ this->header_size = 0;
this->flag_subreg = 0;
this->mlen = 0;
this->base_mrf = 0;
}
}
-
-static int
-type_size(const struct glsl_type *type)
+/**
+ * Returns the minimum number of vec4 elements needed to pack a type.
+ *
+ * For simple types, it will return 1 (a single vec4); for matrices, the
+ * number of columns; for array and struct, the sum of the vec4_size of
+ * each of its elements; and for sampler and atomic, zero.
+ *
+ * This method is useful to calculate how much register space is needed to
+ * store a particular type.
+ */
+int
+vec4_visitor::type_size(const struct glsl_type *type)
{
unsigned int i;
int size;
size += type_size(type->fields.structure[i].type);
}
return size;
+ case GLSL_TYPE_SUBROUTINE:
+ return 1;
+
case GLSL_TYPE_SAMPLER:
/* Samplers take up no register space, since they're baked in at
* link time.
init();
this->file = GRF;
- this->reg = v->alloc.allocate(type_size(type));
+ this->reg = v->alloc.allocate(v->type_size(type));
if (type->is_array() || type->is_record()) {
this->swizzle = BRW_SWIZZLE_NOOP;
init();
this->file = GRF;
- this->reg = v->alloc.allocate(type_size(type) * size);
+ this->reg = v->alloc.allocate(v->type_size(type) * size);
this->swizzle = BRW_SWIZZLE_NOOP;
init();
this->file = GRF;
- this->reg = v->alloc.allocate(type_size(type));
+ this->reg = v->alloc.allocate(v->type_size(type));
if (type->is_array() || type->is_record()) {
this->writemask = WRITEMASK_XYZW;
this->type = brw_type_for_base_type(type);
}
+void
+vec4_visitor::setup_vector_uniform_values(const gl_constant_value *values,
+ unsigned n)
+{
+ static const gl_constant_value zero = { 0 };
+
+ for (unsigned i = 0; i < n; ++i)
+ stage_prog_data->param[4 * uniforms + i] = &values[i];
+
+ for (unsigned i = n; i < 4; ++i)
+ stage_prog_data->param[4 * uniforms + i] = &zero;
+
+ uniform_vector_size[uniforms++] = n;
+}
+
/* Our support for uniforms is piggy-backed on the struct
* gl_fragment_program, because that's where the values actually
* get stored, rather than in some global gl_shader_program uniform
* order we'd walk the type, so walk the list of storage and find anything
* with our name, or the prefix of a component that starts with our name.
*/
- for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
+ for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) {
struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
+ if (storage->builtin)
+ continue;
+
if (strncmp(ir->name, storage->name, namelen) != 0 ||
(storage->name[namelen] != 0 &&
storage->name[namelen] != '.' &&
continue;
}
- gl_constant_value *components = storage->storage;
- unsigned vector_count = (MAX2(storage->array_elements, 1) *
- storage->type->matrix_columns);
-
- for (unsigned s = 0; s < vector_count; s++) {
- assert(uniforms < uniform_array_size);
- uniform_vector_size[uniforms] = storage->type->vector_elements;
-
- int i;
- for (i = 0; i < uniform_vector_size[uniforms]; i++) {
- stage_prog_data->param[uniforms * 4 + i] = components;
- components++;
- }
- for (; i < 4; i++) {
- static gl_constant_value zero = { 0.0 };
- stage_prog_data->param[uniforms * 4 + i] = &zero;
- }
+ const unsigned vector_count = (MAX2(storage->array_elements, 1) *
+ storage->type->matrix_columns);
+ const unsigned vector_size = storage->type->vector_elements;
- uniforms++;
- }
+ for (unsigned s = 0; s < vector_count; s++)
+ setup_vector_uniform_values(&storage->storage[s * vector_size],
+ vector_size);
}
}
void
-vec4_visitor::setup_uniform_clipplane_values()
+vec4_visitor::setup_uniform_clipplane_values(gl_clip_plane *clip_planes)
{
- gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
-
for (int i = 0; i < key->nr_userclip_plane_consts; ++i) {
assert(this->uniforms < uniform_array_size);
this->uniform_vector_size[this->uniforms] = 4;
for (int i = 0; i < type_size(ir->type); i++) {
output_reg[ir->data.location + i] = *reg;
output_reg[ir->data.location + i].reg_offset = i;
- output_reg[ir->data.location + i].type =
- brw_type_for_base_type(ir->type->get_scalar_type());
output_reg_annotation[ir->data.location + i] = ir->name;
}
break;
* Some uniforms, such as samplers and atomic counters, have no actual
* storage, so we should ignore them.
*/
- if (ir->is_in_uniform_block() || type_size(ir->type) == 0)
+ if (ir->is_in_buffer_block() || type_size(ir->type) == 0)
return;
/* Track how big the whole uniform variable is, in case we need to put a
break;
case ir_var_system_value:
- reg = make_reg_for_system_value(ir);
+ reg = make_reg_for_system_value(ir->data.location, ir->type);
break;
default:
surf_index,
header);
pull->mlen = 2;
- pull->header_present = true;
+ pull->header_size = 1;
} else if (devinfo->gen >= 7) {
dst_reg grf_offset = dst_reg(this, glsl_type::int_type);
emit(pull);
}
+src_reg
+vec4_visitor::emit_uniformize(const src_reg &src)
+{
+ const src_reg chan_index(this, glsl_type::uint_type);
+ const dst_reg dst = retype(dst_reg(this, glsl_type::uint_type),
+ src.type);
+
+ emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, dst_reg(chan_index))
+ ->force_writemask_all = true;
+ emit(SHADER_OPCODE_BROADCAST, dst, src, chan_index)
+ ->force_writemask_all = true;
+
+ return src_reg(dst);
+}
+
void
vec4_visitor::visit(ir_expression *ir)
{
case ir_unop_noise:
unreachable("not reached: should be handled by lower_noise");
+ case ir_unop_subroutine_to_int:
+ emit(MOV(result_dst, op[0]));
+ break;
+
case ir_binop_add:
emit(ADD(result_dst, op[0], op[1]));
break;
assert(ir->type->is_integer());
emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]);
break;
- case ir_binop_carry: {
- struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
- emit(ADDC(dst_null_ud(), op[0], op[1]));
- emit(MOV(result_dst, src_reg(acc)));
- break;
- }
- case ir_binop_borrow: {
- struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
+ case ir_binop_carry:
+ unreachable("Should have been lowered by carry_to_arith().");
+
+ case ir_binop_borrow:
+ unreachable("Should have been lowered by borrow_to_arith().");
- emit(SUBB(dst_null_ud(), op[0], op[1]));
- emit(MOV(result_dst, src_reg(acc)));
- break;
- }
case ir_binop_mod:
/* Floating point should be lowered by MOD_TO_FLOOR in the compiler. */
assert(ir->type->is_integer());
emit(MOV(result_dst, op[0]));
break;
case ir_unop_b2i:
- emit(AND(result_dst, op[0], src_reg(1)));
- break;
case ir_unop_b2f:
if (devinfo->gen <= 5) {
resolve_bool_comparison(ir->operands[0], &op[0]);
}
- op[0].type = BRW_REGISTER_TYPE_D;
- result_dst.type = BRW_REGISTER_TYPE_D;
- emit(AND(result_dst, op[0], src_reg(0x3f800000u)));
- result_dst.type = BRW_REGISTER_TYPE_F;
+ emit(MOV(result_dst, negate(op[0])));
break;
case ir_unop_f2b:
emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
const_uniform_block->value.u[0]);
} else {
/* The block index is not a constant. Evaluate the index expression
- * per-channel and add the base UBO index; the generator will select
- * a value from any live channel.
+ * per-channel and add the base UBO index; we have to select a value
+ * from any live channel.
*/
surf_index = src_reg(this, glsl_type::uint_type);
emit(ADD(dst_reg(surf_index), op[0],
src_reg(prog_data->base.binding_table.ubo_start)));
+ surf_index = emit_uniformize(surf_index);
/* Assume this may touch any UBO. It would be nice to provide
* a tighter bound, but the array information is already lowered away.
}
} else {
offset = src_reg(this, glsl_type::uint_type);
- emit(SHR(dst_reg(offset), op[1], src_reg(4)));
+ emit(SHR(dst_reg(offset), op[1], src_reg(4u)));
}
emit_pull_constant_load_reg(dst_reg(packed_consts),
emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dst, offset,
src_reg(), src_reg());
}
+
+ brw_mark_surface_used(stage_prog_data, surf_index);
}
void
new(mem_ctx) vec4_instruction(SHADER_OPCODE_TXF_MCS,
dst_reg(this, glsl_type::uvec4_type));
inst->base_mrf = 2;
- inst->mlen = 1;
inst->src[1] = sampler;
+ int param_base;
+
+ if (devinfo->gen >= 9) {
+ /* Gen9+ needs a message header in order to use SIMD4x2 mode */
+ vec4_instruction *header_inst = new(mem_ctx)
+ vec4_instruction(VS_OPCODE_SET_SIMD4X2_HEADER_GEN9,
+ dst_reg(MRF, inst->base_mrf));
+
+ emit(header_inst);
+
+ inst->mlen = 2;
+ inst->header_size = 1;
+ param_base = inst->base_mrf + 1;
+ } else {
+ inst->mlen = 1;
+ param_base = inst->base_mrf;
+ }
+
/* parameters are: u, v, r, lod; lod will always be zero due to api restrictions */
- int param_base = inst->base_mrf;
int coord_mask = (1 << ir->coordinate->type->vector_elements) - 1;
int zero_mask = 0xf & ~coord_mask;
/* Emit code to evaluate the actual indexing expression */
nonconst_sampler_index->accept(this);
- dst_reg temp(this, glsl_type::uint_type);
- emit(ADD(temp, this->result, src_reg(sampler)))
- ->force_writemask_all = true;
- sampler_reg = src_reg(temp);
+ src_reg temp(this, glsl_type::uint_type);
+ emit(ADD(dst_reg(temp), this->result, src_reg(sampler)));
+ sampler_reg = emit_uniformize(temp);
} else {
/* Single sampler, or constant array index; the indexing expression
* is just an immediate.
* - Gather channel selection
* - Sampler indices too large to fit in a 4-bit value.
*/
- inst->header_present =
- devinfo->gen < 5 || devinfo->gen >= 9 ||
- inst->offset != 0 || ir->op == ir_tg4 ||
- is_high_sampler(devinfo, sampler_reg);
+ inst->header_size =
+ (devinfo->gen < 5 || devinfo->gen >= 9 ||
+ inst->offset != 0 || ir->op == ir_tg4 ||
+ is_high_sampler(devinfo, sampler_reg)) ? 1 : 0;
inst->base_mrf = 2;
- inst->mlen = inst->header_present + 1; /* always at least one */
+ inst->mlen = inst->header_size + 1; /* always at least one */
inst->dst.writemask = WRITEMASK_XYZW;
inst->shadow_compare = ir->shadow_comparitor != NULL;
inst->src[1] = sampler_reg;
/* MRF for the first parameter */
- int param_base = inst->base_mrf + inst->header_present;
+ int param_base = inst->base_mrf + inst->header_size;
if (ir->op == ir_txs || ir->op == ir_query_levels) {
int writemask = devinfo->gen == 4 ? WRITEMASK_W : WRITEMASK_X;
unreachable("not reached");
}
+void
+vec4_visitor::visit(ir_barrier *)
+{
+ unreachable("not reached");
+}
+
void
vec4_visitor::emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
dst_reg dst, src_reg offset,
*/
vec4_instruction *inst = emit(SHADER_OPCODE_UNTYPED_ATOMIC, dst,
brw_message_reg(0),
- src_reg(atomic_op), src_reg(surf_index));
+ src_reg(surf_index), src_reg(atomic_op));
inst->mlen = mlen;
}
vec4_instruction *inst;
inst = emit(OR(header1_w, src_reg(header1_w), src_reg(1u << 6)));
inst->predicate = BRW_PREDICATE_NORMAL;
+ output_reg[BRW_VARYING_SLOT_NDC].type = BRW_REGISTER_TYPE_F;
inst = emit(MOV(output_reg[BRW_VARYING_SLOT_NDC], src_reg(0.0f)));
inst->predicate = BRW_PREDICATE_NORMAL;
}
if (prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) {
dst_reg reg_w = reg;
reg_w.writemask = WRITEMASK_W;
- emit(MOV(reg_w, src_reg(output_reg[VARYING_SLOT_PSIZ])));
+ src_reg reg_as_src = src_reg(output_reg[VARYING_SLOT_PSIZ]);
+ reg_as_src.type = reg_w.type;
+ reg_as_src.swizzle = brw_swizzle_for_size(1);
+ emit(MOV(reg_w, reg_as_src));
}
if (prog_data->vue_map.slots_valid & VARYING_BIT_LAYER) {
dst_reg reg_y = reg;
reg_y.writemask = WRITEMASK_Y;
reg_y.type = BRW_REGISTER_TYPE_D;
+ output_reg[VARYING_SLOT_LAYER].type = reg_y.type;
emit(MOV(reg_y, src_reg(output_reg[VARYING_SLOT_LAYER])));
}
if (prog_data->vue_map.slots_valid & VARYING_BIT_VIEWPORT) {
dst_reg reg_z = reg;
reg_z.writemask = WRITEMASK_Z;
reg_z.type = BRW_REGISTER_TYPE_D;
+ output_reg[VARYING_SLOT_VIEWPORT].type = reg_z.type;
emit(MOV(reg_z, src_reg(output_reg[VARYING_SLOT_VIEWPORT])));
}
}
vec4_instruction *
vec4_visitor::emit_generic_urb_slot(dst_reg reg, int varying)
{
- assert (varying < VARYING_SLOT_MAX);
- reg.type = output_reg[varying].type;
+ assert(varying < VARYING_SLOT_MAX);
+ assert(output_reg[varying].type == reg.type);
current_annotation = output_reg_annotation[varying];
/* Copy the register, saturating if necessary */
return emit(MOV(reg, src_reg(output_reg[varying])));
vec4_visitor::emit_urb_slot(dst_reg reg, int varying)
{
reg.type = BRW_REGISTER_TYPE_F;
+ output_reg[varying].type = reg.type;
switch (varying) {
case VARYING_SLOT_PSIZ:
foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
if (inst->dst.file == GRF && inst->dst.reladdr) {
if (scratch_loc[inst->dst.reg] == -1) {
- scratch_loc[inst->dst.reg] = c->last_scratch;
- c->last_scratch += this->alloc.sizes[inst->dst.reg];
+ scratch_loc[inst->dst.reg] = last_scratch;
+ last_scratch += this->alloc.sizes[inst->dst.reg];
}
for (src_reg *iter = inst->dst.reladdr;
iter->reladdr;
iter = iter->reladdr) {
if (iter->file == GRF && scratch_loc[iter->reg] == -1) {
- scratch_loc[iter->reg] = c->last_scratch;
- c->last_scratch += this->alloc.sizes[iter->reg];
+ scratch_loc[iter->reg] = last_scratch;
+ last_scratch += this->alloc.sizes[iter->reg];
}
}
}
iter->reladdr;
iter = iter->reladdr) {
if (iter->file == GRF && scratch_loc[iter->reg] == -1) {
- scratch_loc[iter->reg] = c->last_scratch;
- c->last_scratch += this->alloc.sizes[iter->reg];
+ scratch_loc[iter->reg] = last_scratch;
+ last_scratch += this->alloc.sizes[iter->reg];
}
}
}
*reg = neg_result;
}
-vec4_visitor::vec4_visitor(struct brw_context *brw,
- struct brw_vec4_compile *c,
+vec4_visitor::vec4_visitor(const struct brw_compiler *compiler,
+ void *log_data,
struct gl_program *prog,
const struct brw_vue_prog_key *key,
struct brw_vue_prog_data *prog_data,
gl_shader_stage stage,
void *mem_ctx,
bool no_spills,
- shader_time_shader_type st_base,
- shader_time_shader_type st_written,
- shader_time_shader_type st_reset)
- : backend_visitor(brw, shader_prog, prog, &prog_data->base, stage),
- c(c),
+ int shader_time_index)
+ : backend_shader(compiler, log_data, mem_ctx,
+ shader_prog, prog, &prog_data->base, stage),
key(key),
prog_data(prog_data),
sanity_param_count(0),
first_non_payload_grf(0),
need_all_constants_in_pull_buffer(false),
no_spills(no_spills),
- st_base(st_base),
- st_written(st_written),
- st_reset(st_reset)
+ shader_time_index(shader_time_index),
+ last_scratch(0)
{
- this->mem_ctx = mem_ctx;
this->failed = false;
this->base_ir = NULL;