src0, src1); \
}
+#define ALU3(op) \
+ vec4_instruction * \
+ vec4_visitor::op(dst_reg dst, src_reg src0, src_reg src1, src_reg src2)\
+ { \
+ return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \
+ src0, src1, src2); \
+ }
+
ALU1(NOT)
ALU1(MOV)
ALU1(FRC)
ALU2(SHL)
ALU2(SHR)
ALU2(ASR)
+ALU3(LRP)
+ALU1(BFREV)
+ALU3(BFE)
+ALU2(BFI1)
+ALU3(BFI2)
+ALU1(FBH)
+ALU1(FBL)
+ALU1(CBIT)
/** Gen4 predicated IF. */
vec4_instruction *
emit(dot_opcodes[elements - 2], dst, src0, src1);
}
+src_reg
+vec4_visitor::fix_3src_operand(src_reg src)
+{
+ /* Using vec4 uniforms in SIMD4x2 programs is difficult. You'd like to be
+ * able to use vertical stride of zero to replicate the vec4 uniform, like
+ *
+ * g3<0;4,1>:f - [0, 4][1, 5][2, 6][3, 7]
+ *
+ * But you can't, since vertical stride is always four in three-source
+ * instructions. Instead, insert a MOV instruction to do the replication so
+ * that the three-source instruction can consume it.
+ */
+
+ /* The MOV is only needed if the source is a uniform or immediate. */
+ if (src.file != UNIFORM && src.file != IMM)
+ return src;
+
+ dst_reg expanded = dst_reg(this, glsl_type::vec4_type);
+ expanded.type = src.type;
+ emit(MOV(expanded, src));
+ return src_reg(expanded);
+}
+
src_reg
vec4_visitor::fix_math_operand(src_reg src)
{
int i;
for (i = 0; i < uniform_vector_size[uniforms]; i++) {
- prog_data->base.param[uniforms * 4 + i] = &components->f;
+ prog_data->param[uniforms * 4 + i] = &components->f;
components++;
}
for (; i < 4; i++) {
static float zero = 0;
- prog_data->base.param[uniforms * 4 + i] = &zero;
+ prog_data->param[uniforms * 4 + i] = &zero;
}
uniforms++;
*/
int compacted_clipplane_index = 0;
for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
- if (!(c->key.base.userclip_planes_enabled_gen_4_5 & (1 << i)))
+ if (!(key->userclip_planes_enabled_gen_4_5 & (1 << i)))
continue;
this->uniform_vector_size[this->uniforms] = 4;
this->userplane[compacted_clipplane_index] = dst_reg(UNIFORM, this->uniforms);
this->userplane[compacted_clipplane_index].type = BRW_REGISTER_TYPE_F;
for (int j = 0; j < 4; ++j) {
- prog_data->base.param[this->uniforms * 4 + j] = &clip_planes[i][j];
+ prog_data->param[this->uniforms * 4 + j] = &clip_planes[i][j];
}
++compacted_clipplane_index;
++this->uniforms;
/* In Gen6 and later, we don't compact clip planes, because this
* simplifies the implementation of gl_ClipDistance.
*/
- for (int i = 0; i < c->key.base.nr_userclip_plane_consts; ++i) {
+ for (int i = 0; i < key->nr_userclip_plane_consts; ++i) {
this->uniform_vector_size[this->uniforms] = 4;
this->userplane[i] = dst_reg(UNIFORM, this->uniforms);
this->userplane[i].type = BRW_REGISTER_TYPE_F;
for (int j = 0; j < 4; ++j) {
- prog_data->base.param[this->uniforms * 4 + j] = &clip_planes[i][j];
+ prog_data->param[this->uniforms * 4 + j] = &clip_planes[i][j];
}
++this->uniforms;
}
int swiz = GET_SWZ(slots[i].swizzle, j);
last_swiz = swiz;
- prog_data->base.param[this->uniforms * 4 + j] = &values[swiz];
+ prog_data->param[this->uniforms * 4 + j] = &values[swiz];
if (swiz <= last_swiz)
this->uniform_vector_size[this->uniforms]++;
}
dst_reg es3_normalize_factor;
for (int i = 0; i < VERT_ATTRIB_MAX; i++) {
- if (prog_data->inputs_read & BITFIELD64_BIT(i)) {
- uint8_t wa_flags = c->key.gl_attrib_wa_flags[i];
+ if (vs_prog_data->inputs_read & BITFIELD64_BIT(i)) {
+ uint8_t wa_flags = vs_compile->key.gl_attrib_wa_flags[i];
dst_reg reg(ATTR, i);
dst_reg reg_d = reg;
reg_d.type = BRW_REGISTER_TYPE_D;
* it VERT_ATTRIB_MAX, which setup_attributes() picks up on.
*/
dst_reg *reg = new(mem_ctx) dst_reg(ATTR, VERT_ATTRIB_MAX);
- prog_data->uses_vertexid = true;
+ vs_prog_data->uses_vertexid = true;
switch (ir->location) {
case SYSTEM_VALUE_VERTEX_ID:
return true;
}
+bool
+vec4_visitor::try_emit_mad(ir_expression *ir, int mul_arg)
+{
+ /* 3-src instructions were introduced in gen6. */
+ if (intel->gen < 6)
+ return false;
+
+ /* MAD can only handle floating-point data. */
+ if (ir->type->base_type != GLSL_TYPE_FLOAT)
+ return false;
+
+ ir_rvalue *nonmul = ir->operands[1 - mul_arg];
+ ir_expression *mul = ir->operands[mul_arg]->as_expression();
+
+ if (!mul || mul->operation != ir_binop_mul)
+ return false;
+
+ nonmul->accept(this);
+ src_reg src0 = fix_3src_operand(this->result);
+
+ mul->operands[0]->accept(this);
+ src_reg src1 = fix_3src_operand(this->result);
+
+ mul->operands[1]->accept(this);
+ src_reg src2 = fix_3src_operand(this->result);
+
+ this->result = src_reg(this, ir->type);
+ emit(BRW_OPCODE_MAD, dst_reg(this->result), src0, src1, src2);
+
+ return true;
+}
+
void
vec4_visitor::emit_bool_comparison(unsigned int op,
dst_reg dst, src_reg src0, src_reg src1)
}
}
+static bool
+is_16bit_constant(ir_rvalue *rvalue)
+{
+ ir_constant *constant = rvalue->as_constant();
+ if (!constant)
+ return false;
+
+ if (constant->type != glsl_type::int_type &&
+ constant->type != glsl_type::uint_type)
+ return false;
+
+ return constant->value.u[0] < (1 << 16);
+}
+
void
vec4_visitor::visit(ir_expression *ir)
{
if (try_emit_sat(ir))
return;
+ if (ir->operation == ir_binop_add) {
+ if (try_emit_mad(ir, 0) || try_emit_mad(ir, 1))
+ return;
+ }
+
for (operand = 0; operand < ir->get_num_operands(); operand++) {
this->result.file = BAD_FILE;
ir->operands[operand]->accept(this);
assert(!"derivatives not valid in vertex shader");
break;
+ case ir_unop_bitfield_reverse:
+ emit(BFREV(result_dst, op[0]));
+ break;
+ case ir_unop_bit_count:
+ emit(CBIT(result_dst, op[0]));
+ break;
+ case ir_unop_find_msb: {
+ src_reg temp = src_reg(this, glsl_type::uint_type);
+
+ inst = emit(FBH(dst_reg(temp), op[0]));
+ inst->dst.writemask = WRITEMASK_XYZW;
+
+ /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
+ * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
+ * subtract the result from 31 to convert the MSB count into an LSB count.
+ */
+
+ /* FBH only supports UD type for dst, so use a MOV to convert UD to D. */
+ temp.swizzle = BRW_SWIZZLE_NOOP;
+ emit(MOV(result_dst, temp));
+
+ src_reg src_tmp = src_reg(result_dst);
+ emit(CMP(dst_null_d(), src_tmp, src_reg(-1), BRW_CONDITIONAL_NZ));
+
+ src_tmp.negate = true;
+ inst = emit(ADD(result_dst, src_tmp, src_reg(31)));
+ inst->predicate = BRW_PREDICATE_NORMAL;
+ break;
+ }
+ case ir_unop_find_lsb:
+ emit(FBL(result_dst, op[0]));
+ break;
+
case ir_unop_noise:
assert(!"not reached: should be handled by lower_noise");
break;
case ir_binop_mul:
if (ir->type->is_integer()) {
- /* For integer multiplication, the MUL uses the low 16 bits
- * of one of the operands (src0 on gen6, src1 on gen7). The
- * MACH accumulates in the contribution of the upper 16 bits
- * of that operand.
- *
- * FINISHME: Emit just the MUL if we know an operand is small
- * enough.
- */
- struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
-
- emit(MUL(acc, op[0], op[1]));
- emit(MACH(dst_null_d(), op[0], op[1]));
- emit(MOV(result_dst, src_reg(acc)));
+ /* For integer multiplication, the MUL uses the low 16 bits of one of
+ * the operands (src0 through SNB, src1 on IVB and later). The MACH
+ * accumulates in the contribution of the upper 16 bits of that
+ * operand. If we can determine that one of the args is in the low
+ * 16 bits, though, we can just emit a single MUL.
+ */
+ if (is_16bit_constant(ir->operands[0])) {
+ if (intel->gen < 7)
+ emit(MUL(result_dst, op[0], op[1]));
+ else
+ emit(MUL(result_dst, op[1], op[0]));
+ } else if (is_16bit_constant(ir->operands[1])) {
+ if (intel->gen < 7)
+ emit(MUL(result_dst, op[1], op[0]));
+ else
+ emit(MUL(result_dst, op[0], op[1]));
+ } else {
+ struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
+
+ emit(MUL(acc, op[0], op[1]));
+ emit(MACH(dst_null_d(), op[0], op[1]));
+ emit(MOV(result_dst, src_reg(acc)));
+ }
} else {
emit(MUL(result_dst, op[0], op[1]));
}
inst = emit(SHR(result_dst, op[0], op[1]));
break;
+ case ir_binop_bfm:
+ emit(BFI1(result_dst, op[0], op[1]));
+ break;
+
case ir_binop_ubo_load: {
ir_constant *uniform_block = ir->operands[0]->as_constant();
ir_constant *const_offset_ir = ir->operands[1]->as_constant();
break;
}
+ case ir_binop_vector_extract:
+ assert(!"should have been lowered by vec_index_to_cond_assign");
+ break;
+
case ir_triop_lrp:
- assert(!"not reached: should be handled by lrp_to_arith");
+ op[0] = fix_3src_operand(op[0]);
+ op[1] = fix_3src_operand(op[1]);
+ op[2] = fix_3src_operand(op[2]);
+ /* Note that the instruction's argument order is reversed from GLSL
+ * and the IR.
+ */
+ emit(LRP(result_dst, op[2], op[1], op[0]));
+ break;
+
+ case ir_triop_bfi:
+ op[0] = fix_3src_operand(op[0]);
+ op[1] = fix_3src_operand(op[1]);
+ op[2] = fix_3src_operand(op[2]);
+ emit(BFI2(result_dst, op[0], op[1], op[2]));
+ break;
+
+ case ir_triop_bitfield_extract:
+ op[0] = fix_3src_operand(op[0]);
+ op[1] = fix_3src_operand(op[1]);
+ op[2] = fix_3src_operand(op[2]);
+ /* Note that the instruction's argument order is reversed from GLSL
+ * and the IR.
+ */
+ emit(BFE(result_dst, op[2], op[1], op[0]));
+ break;
+
+ case ir_triop_vector_insert:
+ assert(!"should have been lowered by lower_vector_insert");
+ break;
+
+ case ir_quadop_bitfield_insert:
+ assert(!"not reached: should be handled by "
+ "bitfield_insert_to_bfm_bfi\n");
break;
case ir_quadop_vector:
this->result.swizzle = swizzle_for_size(type->vector_elements);
}
+
+int
+vec4_visitor::compute_array_stride(ir_dereference_array *ir)
+{
+ /* Under normal circumstances array elements are stored consecutively, so
+ * the stride is equal to the size of the array element.
+ */
+ return type_size(ir->type);
+}
+
+
void
vec4_visitor::visit(ir_dereference_array *ir)
{
ir_constant *constant_index;
src_reg src;
- int element_size = type_size(ir->type);
+ int array_stride = compute_array_stride(ir);
constant_index = ir->array_index->constant_expression_value();
src = this->result;
if (constant_index) {
- src.reg_offset += constant_index->value.i[0] * element_size;
+ src.reg_offset += constant_index->value.i[0] * array_stride;
} else {
/* Variable index array dereference. It eats the "vec4" of the
* base of the array and an index that offsets the Mesa register
src_reg index_reg;
- if (element_size == 1) {
+ if (array_stride == 1) {
index_reg = this->result;
} else {
index_reg = src_reg(this, glsl_type::int_type);
- emit(MUL(dst_reg(index_reg), this->result, src_reg(element_size)));
+ emit(MUL(dst_reg(index_reg), this->result, src_reg(array_stride)));
}
if (src.reladdr) {
shadow_comparitor = this->result;
}
- const glsl_type *lod_type, *sample_index_type;
+ const glsl_type *lod_type = NULL, *sample_index_type = NULL;
src_reg lod, dPdx, dPdy, sample_index;
switch (ir->op) {
case ir_tex:
emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, zero_mask),
src_reg(0)));
/* Load the shadow comparitor */
- if (ir->shadow_comparitor) {
+ if (ir->shadow_comparitor && ir->op != ir_txd) {
emit(MOV(dst_reg(MRF, param_base + 1, ir->shadow_comparitor->type,
WRITEMASK_X),
shadow_comparitor));
emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_YW), dPdy));
inst->mlen++;
- if (ir->type->vector_elements == 3) {
+ if (ir->type->vector_elements == 3 || ir->shadow_comparitor) {
dPdx.swizzle = BRW_SWIZZLE_ZZZZ;
dPdy.swizzle = BRW_SWIZZLE_ZZZZ;
emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_X), dPdx));
emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_Y), dPdy));
inst->mlen++;
+
+ if (ir->shadow_comparitor) {
+ emit(MOV(dst_reg(MRF, param_base + 2,
+ ir->shadow_comparitor->type, WRITEMASK_Z),
+ shadow_comparitor));
+ }
}
} else /* intel->gen == 4 */ {
emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx));
void
vec4_visitor::swizzle_result(ir_texture *ir, src_reg orig_val, int sampler)
{
- int s = c->key.base.tex.swizzles[sampler];
+ int s = key->tex.swizzles[sampler];
this->result = src_reg(this, ir->type);
dst_reg swizzled_result(this->result);
vec4_visitor::emit_psiz_and_flags(struct brw_reg reg)
{
if (intel->gen < 6 &&
- ((prog_data->base.vue_map.slots_valid & VARYING_BIT_PSIZ) ||
- c->key.base.userclip_active || brw->has_negative_rhw_bug)) {
+ ((prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) ||
+ key->userclip_active || brw->has_negative_rhw_bug)) {
dst_reg header1 = dst_reg(this, glsl_type::uvec4_type);
dst_reg header1_w = header1;
header1_w.writemask = WRITEMASK_W;
emit(MOV(header1, 0u));
- if (prog_data->base.vue_map.slots_valid & VARYING_BIT_PSIZ) {
+ if (prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) {
src_reg psiz = src_reg(output_reg[VARYING_SLOT_PSIZ]);
current_annotation = "Point size";
}
current_annotation = "Clipping flags";
- for (i = 0; i < c->key.base.nr_userclip_plane_consts; i++) {
+ for (i = 0; i < key->nr_userclip_plane_consts; i++) {
vec4_instruction *inst;
+ gl_varying_slot slot = (prog_data->vue_map.slots_valid & VARYING_BIT_CLIP_VERTEX)
+ ? VARYING_SLOT_CLIP_VERTEX : VARYING_SLOT_POS;
- inst = emit(DP4(dst_null_f(), src_reg(output_reg[VARYING_SLOT_POS]),
+ inst = emit(DP4(dst_null_f(), src_reg(output_reg[slot]),
src_reg(this->userplane[i])));
inst->conditional_mod = BRW_CONDITIONAL_L;
emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u));
} else {
emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0)));
- if (prog_data->base.vue_map.slots_valid & VARYING_BIT_PSIZ) {
+ if (prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) {
emit(MOV(brw_writemask(reg, WRITEMASK_W),
src_reg(output_reg[VARYING_SLOT_PSIZ])));
}
+ if (prog_data->vue_map.slots_valid & VARYING_BIT_LAYER) {
+ emit(MOV(retype(brw_writemask(reg, WRITEMASK_Y), BRW_REGISTER_TYPE_D),
+ src_reg(output_reg[VARYING_SLOT_LAYER])));
+ }
}
}
* if the user wrote to it; otherwise we use gl_Position.
*/
gl_varying_slot clip_vertex = VARYING_SLOT_CLIP_VERTEX;
- if (!(prog_data->base.vue_map.slots_valid & VARYING_BIT_CLIP_VERTEX)) {
+ if (!(prog_data->vue_map.slots_valid & VARYING_BIT_CLIP_VERTEX)) {
clip_vertex = VARYING_SLOT_POS;
}
- for (int i = 0; i + offset < c->key.base.nr_userclip_plane_consts && i < 4;
+ for (int i = 0; i + offset < key->nr_userclip_plane_consts && i < 4;
++i) {
emit(DP4(dst_reg(brw_writemask(reg, 1 << i)),
src_reg(output_reg[clip_vertex]),
varying == VARYING_SLOT_COL1 ||
varying == VARYING_SLOT_BFC0 ||
varying == VARYING_SLOT_BFC1) &&
- c->key.base.clamp_vertex_color) {
+ key->clamp_vertex_color) {
inst->saturate = true;
}
}
current_annotation = "NDC";
emit(MOV(reg, src_reg(output_reg[BRW_VARYING_SLOT_NDC])));
break;
- case BRW_VARYING_SLOT_POS_DUPLICATE:
case VARYING_SLOT_POS:
current_annotation = "gl_Position";
emit(MOV(reg, src_reg(output_reg[VARYING_SLOT_POS])));
break;
case VARYING_SLOT_CLIP_DIST0:
case VARYING_SLOT_CLIP_DIST1:
- if (this->c->key.base.uses_clip_distance) {
+ if (this->key->uses_clip_distance) {
emit_generic_urb_slot(reg, varying);
} else {
current_annotation = "user clip distances";
return mlen;
}
+void
+vec4_vs_visitor::emit_urb_write_header(int mrf)
+{
+ /* No need to do anything for VS; an implied write to this MRF will be
+ * performed by VS_OPCODE_URB_WRITE.
+ */
+ (void) mrf;
+}
+
+vec4_instruction *
+vec4_vs_visitor::emit_urb_write_opcode(bool complete)
+{
+ /* For VS, the URB writes end the thread. */
+ if (complete) {
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ emit_shader_time_end();
+ }
+
+ vec4_instruction *inst = emit(VS_OPCODE_URB_WRITE);
+ inst->eot = complete;
+
+ return inst;
+}
+
/**
- * Generates the VUE payload plus the 1 or 2 URB write instructions to
- * complete the VS thread.
+ * Generates the VUE payload plus the necessary URB write instructions to
+ * output it.
*
* The VUE layout is documented in Volume 2a.
*/
void
-vec4_vs_visitor::emit_thread_end()
+vec4_visitor::emit_vertex()
{
/* MRF 0 is reserved for the debugger, so start with message header
* in MRF 1.
*/
assert ((max_usable_mrf - base_mrf) % 2 == 0);
- /* First mrf is the g0-based message header containing URB handles and such,
- * which is implied in VS_OPCODE_URB_WRITE.
+ /* First mrf is the g0-based message header containing URB handles and
+ * such.
*/
- mrf++;
+ emit_urb_write_header(mrf++);
if (intel->gen < 6) {
emit_ndc_computation();
/* Set up the VUE data for the first URB write */
int slot;
- for (slot = 0; slot < prog_data->base.vue_map.num_slots; ++slot) {
- emit_urb_slot(mrf++, prog_data->base.vue_map.slot_to_varying[slot]);
+ for (slot = 0; slot < prog_data->vue_map.num_slots; ++slot) {
+ emit_urb_slot(mrf++, prog_data->vue_map.slot_to_varying[slot]);
/* If this was max_usable_mrf, we can't fit anything more into this URB
* WRITE.
}
}
- bool eot = slot >= prog_data->base.vue_map.num_slots;
- if (eot) {
- if (INTEL_DEBUG & DEBUG_SHADER_TIME)
- emit_shader_time_end();
- }
+ bool complete = slot >= prog_data->vue_map.num_slots;
current_annotation = "URB write";
- vec4_instruction *inst = emit(VS_OPCODE_URB_WRITE);
+ vec4_instruction *inst = emit_urb_write_opcode(complete);
inst->base_mrf = base_mrf;
inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
- inst->eot = eot;
/* Optional second URB write */
- if (!inst->eot) {
+ if (!complete) {
mrf = base_mrf + 1;
- for (; slot < prog_data->base.vue_map.num_slots; ++slot) {
+ for (; slot < prog_data->vue_map.num_slots; ++slot) {
assert(mrf < max_usable_mrf);
- emit_urb_slot(mrf++, prog_data->base.vue_map.slot_to_varying[slot]);
+ emit_urb_slot(mrf++, prog_data->vue_map.slot_to_varying[slot]);
}
- if (INTEL_DEBUG & DEBUG_SHADER_TIME)
- emit_shader_time_end();
-
current_annotation = "URB write";
- inst = emit(VS_OPCODE_URB_WRITE);
+ inst = emit_urb_write_opcode(true /* complete */);
inst->base_mrf = base_mrf;
inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
- inst->eot = true;
/* URB destination offset. In the previous write, we got MRFs
* 2-13 minus the one header MRF, so 12 regs. URB offset is in
* URB row increments, and each of our MRFs is half of one of
}
}
+void
+vec4_vs_visitor::emit_thread_end()
+{
+ /* For VS, we always end the thread by emitting a single vertex.
+ * emit_urb_write_opcode() will take care of setting the eot flag on the
+ * SEND instruction.
+ */
+ emit_vertex();
+}
+
src_reg
vec4_visitor::get_scratch_offset(vec4_instruction *inst,
src_reg *reladdr, int reg_offset)
if (inst->dst.file == GRF && inst->dst.reladdr &&
scratch_loc[inst->dst.reg] == -1) {
- scratch_loc[inst->dst.reg] = c->base.last_scratch;
- c->base.last_scratch += this->virtual_grf_sizes[inst->dst.reg];
+ scratch_loc[inst->dst.reg] = c->last_scratch;
+ c->last_scratch += this->virtual_grf_sizes[inst->dst.reg];
}
for (int i = 0 ; i < 3; i++) {
if (src->file == GRF && src->reladdr &&
scratch_loc[src->reg] == -1) {
- scratch_loc[src->reg] = c->base.last_scratch;
- c->base.last_scratch += this->virtual_grf_sizes[src->reg];
+ scratch_loc[src->reg] = c->last_scratch;
+ c->last_scratch += this->virtual_grf_sizes[src->reg];
}
}
}
* add it.
*/
if (pull_constant_loc[uniform] == -1) {
- const float **values = &prog_data->base.param[uniform * 4];
+ const float **values = &prog_data->param[uniform * 4];
- pull_constant_loc[uniform] = prog_data->base.nr_pull_params / 4;
+ pull_constant_loc[uniform] = prog_data->nr_pull_params / 4;
for (int j = 0; j < uniform_size[uniform] * 4; j++) {
- prog_data->base.pull_param[prog_data->base.nr_pull_params++]
+ prog_data->pull_param[prog_data->nr_pull_params++]
= values[j];
}
}
}
vec4_visitor::vec4_visitor(struct brw_context *brw,
- struct brw_vs_compile *c,
- struct brw_vs_prog_data *prog_data,
+ struct brw_vec4_compile *c,
+ struct gl_program *prog,
+ const struct brw_vec4_prog_key *key,
+ struct brw_vec4_prog_data *prog_data,
struct gl_shader_program *shader_prog,
struct brw_shader *shader,
- void *mem_ctx)
+ void *mem_ctx,
+ bool debug_flag)
+ : debug_flag(debug_flag)
{
- this->c = c;
this->brw = brw;
this->intel = &brw->intel;
this->ctx = &intel->ctx;
memset(this->output_reg_annotation, 0, sizeof(this->output_reg_annotation));
this->c = c;
- this->prog = &c->vp->program.Base;
+ this->prog = prog;
+ this->key = key;
this->prog_data = prog_data;
this->variable_ht = hash_table_ctor(0,
hash_table_pointer_hash,
hash_table_pointer_compare);
- this->virtual_grf_def = NULL;
- this->virtual_grf_use = NULL;
+ this->virtual_grf_start = NULL;
+ this->virtual_grf_end = NULL;
this->virtual_grf_sizes = NULL;
this->virtual_grf_count = 0;
this->virtual_grf_reg_map = NULL;
vec4_vs_visitor::vec4_vs_visitor(struct brw_context *brw,
- struct brw_vs_compile *c,
- struct brw_vs_prog_data *prog_data,
+ struct brw_vs_compile *vs_compile,
+ struct brw_vs_prog_data *vs_prog_data,
struct gl_shader_program *prog,
struct brw_shader *shader,
void *mem_ctx)
- : vec4_visitor(brw, c, prog_data, prog, shader, mem_ctx)
+ : vec4_visitor(brw, &vs_compile->base, &vs_compile->vp->program.Base,
+ &vs_compile->key.base, &vs_prog_data->base, prog, shader,
+ mem_ctx, INTEL_DEBUG & DEBUG_VS),
+ vs_compile(vs_compile),
+ vs_prog_data(vs_prog_data)
{
}
this->fail_msg = msg;
- if (INTEL_DEBUG & DEBUG_VS) {
+ if (debug_flag) {
fprintf(stderr, "%s", msg);
}
}