/* Return the SrcReg index of the channels that can be immediate float operands
* instead of usage of PROGRAM_CONSTANT values through push/pull.
*/
-static GLboolean
+static bool
brw_vs_arg_can_be_immediate(enum prog_opcode opcode, int arg)
{
int opcode_array[] = {
*/
if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
if (arg == 1 || arg == 2)
- return GL_TRUE;
+ return true;
}
if (opcode > ARRAY_SIZE(opcode_array))
- return GL_FALSE;
+ return false;
return arg == opcode_array[opcode] - 1;
}
c->last_tmp = c->first_tmp;
}
+static int
+get_first_reladdr_output(struct gl_vertex_program *vp)
+{
+ int i;
+ int first_reladdr_output = VERT_RESULT_MAX;
+
+ for (i = 0; i < vp->Base.NumInstructions; i++) {
+ struct prog_instruction *inst = vp->Base.Instructions + i;
+
+ if (inst->DstReg.File == PROGRAM_OUTPUT &&
+ inst->DstReg.RelAddr &&
+ inst->DstReg.Index < first_reladdr_output)
+ first_reladdr_output = inst->DstReg.Index;
+ }
+
+ return first_reladdr_output;
+}
+
+/* Clears the record of which vp_const_buffer elements have been
+ * loaded into our constant buffer registers, for the starts of new
+ * blocks after control flow.
+ */
+static void
+clear_current_const(struct brw_vs_compile *c)
+{
+ unsigned int i;
+
+ if (c->vp->use_const_buffer) {
+ for (i = 0; i < 3; i++) {
+ c->current_const[i].index = -1;
+ }
+ }
+}
+
+/* The message length for all SEND messages is restricted to [1,15]. This
+ * includes 1 for the header, so anything in slots 14 and above needs to be
+ * placed in a general-purpose register and emitted using a second URB write.
+ */
+#define MAX_SLOTS_IN_FIRST_URB_WRITE 14
+
+/**
+ * Determine whether the given vertex output can be written directly to a MRF
+ * or whether it has to be stored in a general-purpose register.
+ */
+static inline bool can_use_direct_mrf(int vert_result,
+ int first_reladdr_output, int slot)
+{
+ if (vert_result == VERT_RESULT_HPOS || vert_result == VERT_RESULT_PSIZ) {
+ /* These never go straight into MRF's. They are placed in the MRF by
+ * epilog code.
+ */
+ return false;
+ }
+ if (first_reladdr_output <= vert_result && vert_result < VERT_RESULT_MAX) {
+ /* Relative addressing might be used to access this vert_result, so it
+ * needs to go into a general-purpose register.
+ */
+ return false;
+ }
+ if (slot >= MAX_SLOTS_IN_FIRST_URB_WRITE) {
+ /* This output won't go out until the second URB write so it must be
+ * stored in a general-purpose register until then.
+ */
+ return false;
+ }
+ return true;
+}
/**
* Preallocate GRF register before code emit.
*/
static void brw_vs_alloc_regs( struct brw_vs_compile *c )
{
+ struct brw_context *brw = c->func.brw;
struct intel_context *intel = &c->func.brw->intel;
- GLuint i, reg = 0, mrf;
+ GLuint i, reg = 0, slot;
int attributes_in_vue;
+ int first_reladdr_output;
+ int max_constant;
+ int constant = 0;
+ struct brw_vertex_program *vp = c->vp;
+ const struct gl_program_parameter_list *params = vp->program.Base.Parameters;
/* Determine whether to use a real constant buffer or use a block
* of GRF registers for constants. The later is faster but only
*/
if (c->vp->program.Base.Parameters->NumParameters +
c->vp->program.Base.NumTemporaries + 20 > BRW_MAX_GRF)
- c->vp->use_const_buffer = GL_TRUE;
+ c->vp->use_const_buffer = true;
else
- c->vp->use_const_buffer = GL_FALSE;
+ c->vp->use_const_buffer = false;
/*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
/* User clip planes from curbe:
*/
- if (c->key.nr_userclip) {
- for (i = 0; i < c->key.nr_userclip; i++) {
- c->userplane[i] = stride( brw_vec4_grf(reg+3+i/2, (i%2) * 4), 0, 4, 1);
- }
+ if (c->key.userclip_active) {
+ if (intel->gen >= 6) {
+ for (i = 0; i <= c->key.nr_userclip_plane_consts; i++) {
+ c->userplane[i] = stride(brw_vec4_grf(reg + i / 2,
+ (i % 2) * 4), 0, 4, 1);
+ }
+ reg += ALIGN(c->key.nr_userclip_plane_consts, 2) / 2;
+ } else {
+ for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
+ c->userplane[i] = stride(brw_vec4_grf(reg + (6 + i) / 2,
+ (i % 2) * 4), 0, 4, 1);
+ }
+ reg += (ALIGN(6 + c->key.nr_userclip_plane_consts, 4) / 4) * 2;
+ }
- /* Deal with curbe alignment:
- */
- reg += ((6 + c->key.nr_userclip + 3) / 4) * 2;
}
- /* Vertex program parameters from curbe:
+ /* Assign some (probably all) of the vertex program constants to
+ * the push constant buffer/CURBE.
+ *
+ * There's an obvious limit to the numer of push constants equal to
+ * the number of register available, and that number is smaller
+ * than the minimum maximum number of vertex program parameters, so
+ * support for pull constants is required if we overflow.
+ * Additionally, on gen6 the number of push constants is even
+ * lower.
+ *
+ * When there's relative addressing, we don't know what range of
+ * Mesa IR registers can be accessed. And generally, when relative
+ * addressing is used we also have too many constants to load them
+ * all as push constants. So, we'll just support relative
+ * addressing out of the pull constant buffers, and try to load as
+ * many statically-accessed constants into the push constant buffer
+ * as we can.
*/
- if (c->vp->use_const_buffer) {
- int max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
- int constant = 0;
-
- /* We've got more constants than we can load with the push
- * mechanism. This is often correlated with reladdr loads where
- * we should probably be using a pull mechanism anyway to avoid
- * excessive reading. However, the pull mechanism is slow in
- * general. So, we try to allocate as many non-reladdr-loaded
- * constants through the push buffer as we can before giving up.
- */
- memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
- for (i = 0;
- i < c->vp->program.Base.NumInstructions && constant < max_constant;
- i++) {
- struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
- int arg;
-
- for (arg = 0; arg < 3 && constant < max_constant; arg++) {
- if ((inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
- inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
- inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
- inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
- inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) ||
- inst->SrcReg[arg].RelAddr)
- continue;
-
- if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
- c->constant_map[inst->SrcReg[arg].Index] = constant++;
- }
+ if (intel->gen >= 6) {
+ /* We can only load 32 regs of push constants. */
+ max_constant = 32 * 2 - c->key.nr_userclip_plane_consts;
+ } else {
+ max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
+ }
+
+ /* constant_map maps from ParameterValues[] index to index in the
+ * push constant buffer, or -1 if it's only in the pull constant
+ * buffer.
+ */
+ memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
+ for (i = 0;
+ i < c->vp->program.Base.NumInstructions && constant < max_constant;
+ i++) {
+ struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
+ int arg;
+
+ for (arg = 0; arg < 3 && constant < max_constant; arg++) {
+ if (inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
+ inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
+ inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
+ inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
+ inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) {
+ continue;
+ }
+
+ if (inst->SrcReg[arg].RelAddr) {
+ c->vp->use_const_buffer = true;
+ continue;
}
- }
- for (i = 0; i < constant; i++) {
- c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2,
- (i%2) * 4),
- 0, 4, 1);
+ if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
+ c->constant_map[inst->SrcReg[arg].Index] = constant++;
+ }
}
- reg += (constant + 1) / 2;
- c->prog_data.curb_read_length = reg - 1;
- /* XXX 0 causes a bug elsewhere... */
- c->prog_data.nr_params = MAX2(constant * 4, 4);
}
- else {
- /* use a section of the GRF for constants */
- GLuint nr_params = c->vp->program.Base.Parameters->NumParameters;
- for (i = 0; i < nr_params; i++) {
- c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2, (i%2) * 4), 0, 4, 1);
+
+ /* If we ran out of push constant space, then we'll also upload all
+ * constants through the pull constant buffer so that they can be
+ * accessed no matter what. For relative addressing (the common
+ * case) we need them all in place anyway.
+ */
+ if (constant == max_constant)
+ c->vp->use_const_buffer = true;
+
+ /* Set up the references to the pull parameters if present. This backend
+ * uses a 1:1 mapping from Mesa IR's index to location in the pull constant
+ * buffer, while the new VS backend allocates values to the pull buffer on
+ * demand.
+ */
+ if (c->vp->use_const_buffer) {
+ for (i = 0; i < params->NumParameters * 4; i++) {
+ c->prog_data.pull_param[i] = ¶ms->ParameterValues[i / 4][i % 4].f;
}
- reg += (nr_params + 1) / 2;
- c->prog_data.curb_read_length = reg - 1;
+ c->prog_data.nr_pull_params = i;
+ }
- c->prog_data.nr_params = nr_params * 4;
+ for (i = 0; i < constant; i++) {
+ c->regs[PROGRAM_STATE_VAR][i] = stride(brw_vec4_grf(reg + i / 2,
+ (i % 2) * 4),
+ 0, 4, 1);
}
+ reg += (constant + 1) / 2;
+ c->prog_data.curb_read_length = reg - 1;
+ c->prog_data.nr_params = constant * 4;
+ /* XXX 0 causes a bug elsewhere... */
+ if (intel->gen < 6 && c->prog_data.nr_params == 0)
+ c->prog_data.nr_params = 4;
/* Allocate input regs:
*/
c->nr_inputs = 0;
for (i = 0; i < VERT_ATTRIB_MAX; i++) {
- if (c->prog_data.inputs_read & (1 << i)) {
+ if (c->prog_data.inputs_read & BITFIELD64_BIT(i)) {
c->nr_inputs++;
c->regs[PROGRAM_INPUT][i] = brw_vec8_grf(reg, 0);
reg++;
/* Allocate outputs. The non-position outputs go straight into message regs.
*/
- c->nr_outputs = 0;
+ brw_compute_vue_map(&c->vue_map, intel, &c->prog_data);
c->first_output = reg;
- c->first_overflow_output = 0;
- if (intel->gen >= 6)
- mrf = 4;
- else if (intel->gen == 5)
- mrf = 8;
- else
- mrf = 4;
-
- for (i = 0; i < VERT_RESULT_MAX; i++) {
- if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
- c->nr_outputs++;
- assert(i < Elements(c->regs[PROGRAM_OUTPUT]));
- if (i == VERT_RESULT_HPOS) {
- c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
- reg++;
- }
- else if (i == VERT_RESULT_PSIZ) {
- c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
- reg++;
- mrf++; /* just a placeholder? XXX fix later stages & remove this */
- }
- else {
- /* Two restrictions on our compute-to-MRF here. The
- * message length for all SEND messages is restricted to
- * [1,15], so we can't use mrf 15, as that means a length
- * of 16.
- *
- * Additionally, URB writes are aligned to URB rows, so we
- * need to put an even number of registers of URB data in
- * each URB write so that the later write is aligned. A
- * message length of 15 means 1 message header reg plus 14
- * regs of URB data.
- *
- * For attributes beyond the compute-to-MRF, we compute to
- * GRFs and they will be written in the second URB_WRITE.
- */
- if (mrf < 15) {
- c->regs[PROGRAM_OUTPUT][i] = brw_message_reg(mrf);
- mrf++;
- }
- else {
- if (!c->first_overflow_output)
- c->first_overflow_output = i;
- c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
- reg++;
- }
- }
+ first_reladdr_output = get_first_reladdr_output(&c->vp->program);
+
+ for (slot = 0; slot < c->vue_map.num_slots; slot++) {
+ int vert_result = c->vue_map.slot_to_vert_result[slot];
+ assert(vert_result < Elements(c->regs[PROGRAM_OUTPUT]));
+ if (can_use_direct_mrf(vert_result, first_reladdr_output, slot)) {
+ c->regs[PROGRAM_OUTPUT][vert_result] = brw_message_reg(slot + 1);
+ } else {
+ c->regs[PROGRAM_OUTPUT][vert_result] = brw_vec8_grf(reg, 0);
+ reg++;
}
}
if (c->vp->use_const_buffer) {
for (i = 0; i < 3; i++) {
- c->current_const[i].index = -1;
c->current_const[i].reg = brw_vec8_grf(reg, 0);
reg++;
}
+ clear_current_const(c);
}
for (i = 0; i < 128; i++) {
/* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
* them to fit the biggest thing they need to.
*/
- attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
+ attributes_in_vue = MAX2(c->vue_map.num_slots, c->nr_inputs);
- /* See emit_vertex_write() for where the VUE's overhead on top of the
- * attributes comes from.
- */
- if (intel->gen >= 6)
- c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 7) / 8;
- else if (intel->gen == 5)
- c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
- else
- c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
+ if (intel->gen == 6) {
+ /* Each attribute is 32 bytes (2 vec4s), so dividing by 8 gives us the
+ * number of 128-byte (1024-bit) units.
+ */
+ c->prog_data.urb_entry_size = ALIGN(attributes_in_vue, 8) / 8;
+ } else {
+ /* Each attribute is 16 bytes (1 vec4), so dividing by 4 gives us the
+ * number of 64-byte (512-bit) units.
+ */
+ c->prog_data.urb_entry_size = ALIGN(attributes_in_vue, 4) / 4;
+ }
c->prog_data.total_grf = reg;
- if (INTEL_DEBUG & DEBUG_VS) {
+ if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
printf("%s NumAddrRegs %d\n", __FUNCTION__, c->vp->program.Base.NumAddressRegs);
printf("%s NumTemps %d\n", __FUNCTION__, c->vp->program.Base.NumTemporaries);
printf("%s reg = %d\n", __FUNCTION__, reg);
struct brw_reg arg0,
struct brw_reg arg1 )
{
- brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0, arg1);
- brw_SEL(p, dst, arg0, arg1);
- brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ struct intel_context *intel = &p->brw->intel;
+
+ if (intel->gen >= 6) {
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_GE);
+ brw_SEL(p, dst, arg0, arg1);
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ } else {
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0, arg1);
+ brw_SEL(p, dst, arg0, arg1);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ }
}
static void emit_min( struct brw_compile *p,
struct brw_reg arg0,
struct brw_reg arg1 )
{
- brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
- brw_SEL(p, dst, arg0, arg1);
- brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ struct intel_context *intel = &p->brw->intel;
+
+ if (intel->gen >= 6) {
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
+ brw_SEL(p, dst, arg0, arg1);
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ } else {
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
+ brw_SEL(p, dst, arg0, arg1);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ }
}
+static void emit_arl(struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg src)
+{
+ struct intel_context *intel = &p->brw->intel;
-static void emit_math1( struct brw_vs_compile *c,
- GLuint function,
- struct brw_reg dst,
- struct brw_reg arg0,
- GLuint precision)
+ if (intel->gen >= 6) {
+ struct brw_reg dst_f = retype(dst, BRW_REGISTER_TYPE_F);
+
+ brw_RNDD(p, dst_f, src);
+ brw_MOV(p, dst, dst_f);
+ } else {
+ brw_RNDD(p, dst, src);
+ }
+}
+
+static void emit_math1_gen4(struct brw_vs_compile *c,
+ GLuint function,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ GLuint precision)
{
/* There are various odd behaviours with SEND on the simulator. In
* addition there are documented issues with the fact that the GEN4
* whether that turns out to be a simulator bug or not:
*/
struct brw_compile *p = &c->func;
- struct intel_context *intel = &p->brw->intel;
struct brw_reg tmp = dst;
- GLboolean need_tmp = (intel->gen < 6 &&
- (dst.dw1.bits.writemask != 0xf ||
- dst.file != BRW_GENERAL_REGISTER_FILE));
+ bool need_tmp = false;
- if (need_tmp)
+ if (dst.file != BRW_GENERAL_REGISTER_FILE ||
+ dst.dw1.bits.writemask != 0xf)
+ need_tmp = true;
+
+ if (need_tmp)
tmp = get_tmp(c);
brw_math(p,
}
}
+static void
+emit_math1_gen6(struct brw_vs_compile *c,
+ GLuint function,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ GLuint precision)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp_src, tmp_dst;
-static void emit_math2( struct brw_vs_compile *c,
+ /* Something is strange on gen6 math in 16-wide mode, though the
+ * docs say it's supposed to work. Punt to using align1 mode,
+ * which doesn't do writemasking and swizzles.
+ */
+ tmp_src = get_tmp(c);
+ tmp_dst = get_tmp(c);
+
+ brw_MOV(p, tmp_src, arg0);
+
+ brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_math(p,
+ tmp_dst,
+ function,
+ BRW_MATH_SATURATE_NONE,
+ 2,
+ tmp_src,
+ BRW_MATH_DATA_SCALAR,
+ precision);
+ brw_set_access_mode(p, BRW_ALIGN_16);
+
+ brw_MOV(p, dst, tmp_dst);
+
+ release_tmp(c, tmp_src);
+ release_tmp(c, tmp_dst);
+}
+
+static void
+emit_math1(struct brw_vs_compile *c,
+ GLuint function,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ GLuint precision)
+{
+ struct brw_compile *p = &c->func;
+ struct intel_context *intel = &p->brw->intel;
+
+ if (intel->gen >= 6)
+ emit_math1_gen6(c, function, dst, arg0, precision);
+ else
+ emit_math1_gen4(c, function, dst, arg0, precision);
+}
+
+static void emit_math2_gen4( struct brw_vs_compile *c,
GLuint function,
struct brw_reg dst,
struct brw_reg arg0,
GLuint precision)
{
struct brw_compile *p = &c->func;
- struct intel_context *intel = &p->brw->intel;
struct brw_reg tmp = dst;
- GLboolean need_tmp = (intel->gen < 6 &&
- (dst.dw1.bits.writemask != 0xf ||
- dst.file != BRW_GENERAL_REGISTER_FILE));
+ bool need_tmp = false;
+
+ if (dst.file != BRW_GENERAL_REGISTER_FILE ||
+ dst.dw1.bits.writemask != 0xf)
+ need_tmp = true;
if (need_tmp)
tmp = get_tmp(c);
}
}
+static void emit_math2_gen6( struct brw_vs_compile *c,
+ GLuint function,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1,
+ GLuint precision)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp_src0, tmp_src1, tmp_dst;
+
+ tmp_src0 = get_tmp(c);
+ tmp_src1 = get_tmp(c);
+ tmp_dst = get_tmp(c);
+
+ brw_MOV(p, tmp_src0, arg0);
+ brw_MOV(p, tmp_src1, arg1);
+
+ brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_math2(p,
+ tmp_dst,
+ function,
+ tmp_src0,
+ tmp_src1);
+ brw_set_access_mode(p, BRW_ALIGN_16);
+
+ brw_MOV(p, dst, tmp_dst);
+
+ release_tmp(c, tmp_src0);
+ release_tmp(c, tmp_src1);
+ release_tmp(c, tmp_dst);
+}
+
+static void emit_math2( struct brw_vs_compile *c,
+ GLuint function,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1,
+ GLuint precision)
+{
+ struct brw_compile *p = &c->func;
+ struct intel_context *intel = &p->brw->intel;
+
+ if (intel->gen >= 6)
+ emit_math2_gen6(c, function, dst, arg0, arg1, precision);
+ else
+ emit_math2_gen4(c, function, dst, arg0, arg1, precision);
+}
static void emit_exp_noalias( struct brw_vs_compile *c,
struct brw_reg dst,
struct brw_reg tmp = dst;
struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
- GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
+ bool need_tmp = (dst.dw1.bits.writemask != 0xf ||
dst.file != BRW_GENERAL_REGISTER_FILE);
if (need_tmp) {
struct brw_reg arg0 )
{
struct brw_compile *p = &c->func;
- struct brw_instruction *if_insn;
struct brw_reg tmp = dst;
- GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
+ bool need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
if (need_tmp)
tmp = get_tmp(c);
* BRW_EXECUTE_1 for all comparisions.
*/
brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
- if_insn = brw_IF(p, BRW_EXECUTE_8);
+ brw_IF(p, BRW_EXECUTE_8);
{
brw_MOV(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0,0));
brw_swizzle1(arg0, 3),
BRW_MATH_PRECISION_PARTIAL);
}
-
- brw_ENDIF(p, if_insn);
+ brw_ENDIF(p);
release_tmp(c, tmp);
}
brw_MAC(p, dst, arg0, arg1);
}
-/** 3 or 4-component vector normalization */
-static void emit_nrm( struct brw_vs_compile *c,
- struct brw_reg dst,
- struct brw_reg arg0,
- int num_comps)
-{
- struct brw_compile *p = &c->func;
- struct brw_reg tmp = get_tmp(c);
-
- /* tmp = dot(arg0, arg0) */
- if (num_comps == 3)
- brw_DP3(p, tmp, arg0, arg0);
- else
- brw_DP4(p, tmp, arg0, arg0);
-
- /* tmp = 1 / sqrt(tmp) */
- emit_math1(c, BRW_MATH_FUNCTION_RSQ, tmp, tmp, BRW_MATH_PRECISION_FULL);
-
- /* dst = arg0 * tmp */
- brw_MUL(p, dst, arg0, tmp);
-
- release_tmp(c, tmp);
-}
-
-
static struct brw_reg
get_constant(struct brw_vs_compile *c,
const struct prog_instruction *inst,
}
/* replicate lower four floats into upper half (to get XYZWXYZW) */
- const_reg = stride(const_reg, 0, 4, 0);
+ const_reg = stride(const_reg, 0, 4, 1);
const_reg.subnr = 0;
return const_reg;
{
const struct prog_src_register *src = &inst->SrcReg[argIndex];
struct brw_compile *p = &c->func;
+ struct brw_context *brw = p->brw;
+ struct intel_context *intel = &brw->intel;
struct brw_reg const_reg = c->current_const[argIndex].reg;
- struct brw_reg addrReg = c->regs[PROGRAM_ADDRESS][0];
- struct brw_reg byte_addr_reg = get_tmp(c);
+ struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
+ uint32_t offset;
assert(argIndex < 3);
src->Index, argIndex, c->current_const[argIndex].reg.nr);
#endif
- brw_MUL(p, byte_addr_reg, addrReg, brw_imm_ud(16));
+ if (intel->gen >= 6) {
+ offset = src->Index;
+ } else {
+ struct brw_reg byte_addr_reg = retype(get_tmp(c), BRW_REGISTER_TYPE_D);
+ brw_MUL(p, byte_addr_reg, addr_reg, brw_imm_d(16));
+ addr_reg = byte_addr_reg;
+ offset = 16 * src->Index;
+ }
/* fetch the first vec4 */
brw_dp_READ_4_vs_relative(p,
- const_reg, /* writeback dest */
- byte_addr_reg, /* address register */
- 16 * src->Index, /* byte offset */
- SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
- );
+ const_reg,
+ addr_reg,
+ offset,
+ SURF_INDEX_VERT_CONST_BUFFER);
return const_reg;
}
int reg_size = 32;
struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
- struct brw_reg temp_base = c->regs[inst->DstReg.File][0];
- GLuint byte_offset = temp_base.nr * 32 + temp_base.subnr;
+ struct brw_reg base = c->regs[inst->DstReg.File][inst->DstReg.Index];
+ GLuint byte_offset = base.nr * 32 + base.subnr;
struct brw_reg indirect = brw_vec4_indirect(0,0);
struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
- byte_offset += inst->DstReg.Index * reg_size;
+ /* Because destination register indirect addressing can only use
+ * one index, we'll write each vertex's vec4 value separately.
+ */
+ val.width = BRW_WIDTH_4;
+ val.vstride = BRW_VERTICAL_STRIDE_4;
brw_push_insn_state(p);
brw_set_access_mode(p, BRW_ALIGN_1);
{
const GLuint file = inst->SrcReg[argIndex].File;
const GLint index = inst->SrcReg[argIndex].Index;
- const GLboolean relAddr = inst->SrcReg[argIndex].RelAddr;
+ const bool relAddr = inst->SrcReg[argIndex].RelAddr;
if (brw_vs_arg_can_be_immediate(inst->Opcode, argIndex)) {
const struct prog_src_register *src = &inst->SrcReg[argIndex];
if (component >= 0) {
params = c->vp->program.Base.Parameters;
- f = params->ParameterValues[src->Index][component];
+ f = params->ParameterValues[src->Index][component].f;
if (src->Abs)
f = fabs(f);
case PROGRAM_UNIFORM:
case PROGRAM_ENV_PARAM:
case PROGRAM_LOCAL_PARAM:
- if (c->vp->use_const_buffer) {
- if (!relAddr && c->constant_map[index] != -1) {
- assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
- return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
- } else if (relAddr)
+ if (!relAddr && c->constant_map[index] != -1) {
+ /* Take from the push constant buffer if possible. */
+ assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
+ return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
+ } else {
+ /* Must be in the pull constant buffer then .*/
+ assert(c->vp->use_const_buffer);
+ if (relAddr)
return get_reladdr_constant(c, inst, argIndex);
else
return get_constant(c, inst, argIndex);
}
- else if (relAddr) {
- return deref(c, c->regs[PROGRAM_STATE_VAR][0], index, 16);
- }
- else {
- assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
- return c->regs[PROGRAM_STATE_VAR][index];
- }
case PROGRAM_ADDRESS:
assert(index == 0);
return c->regs[file][index];
GET_SWZ(src->Swizzle, 1),
GET_SWZ(src->Swizzle, 2),
GET_SWZ(src->Swizzle, 3));
- }
- /* Note this is ok for non-swizzle instructions:
- */
- reg.negate = src->Negate ? 1 : 0;
+ /* Note this is ok for non-swizzle ARB_vp instructions */
+ reg.negate = src->Negate ? 1 : 0;
+ }
return reg;
}
GLuint ones_mask = 0;
GLuint src_mask = 0;
GLubyte src_swz[4];
- GLboolean need_tmp = (src.Negate &&
+ bool need_tmp = (src.Negate &&
dst.file != BRW_GENERAL_REGISTER_FILE);
struct brw_reg tmp = dst;
GLuint i;
}
}
+static int
+align_interleaved_urb_mlen(struct brw_context *brw, int mlen)
+{
+ struct intel_context *intel = &brw->intel;
+
+ if (intel->gen >= 6) {
+ /* URB data written (does not include the message header reg) must
+ * be a multiple of 256 bits, or 2 VS registers. See vol5c.5,
+ * section 5.4.3.2.2: URB_INTERLEAVED.
+ *
+ * URB entries are allocated on a multiple of 1024 bits, so an
+ * extra 128 bits written here to make the end align to 256 is
+ * no problem.
+ */
+ if ((mlen % 2) != 1)
+ mlen++;
+ }
+
+ return mlen;
+}
/**
* Post-vertex-program processing. Send the results to the URB.
struct brw_reg ndc;
int eot;
GLuint len_vertex_header = 2;
+ int i;
+ int msg_len;
+ int slot;
if (c->key.copy_edgeflag) {
brw_MOV(p,
/* Update the header for point size, user clipping flags, and -ve rhw
* workaround.
*/
- if ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
- c->key.nr_userclip || brw->has_negative_rhw_bug)
- {
+ if (intel->gen >= 6) {
+ struct brw_reg m1 = brw_message_reg(1);
+
+ /* On gen6, m1 has each value in a separate dword, so we never
+ * need to mess with a temporary for computing the m1 value.
+ */
+ brw_MOV(p, retype(m1, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
+ if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
+ brw_MOV(p, brw_writemask(m1, WRITEMASK_W),
+ brw_swizzle1(c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ], 0));
+ }
+
+ /* Set the user clip distances in dword 8-15. (m3-4)*/
+ if (c->key.userclip_active) {
+ for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
+ struct brw_reg m;
+ if (i < 4)
+ m = brw_message_reg(3);
+ else
+ m = brw_message_reg(4);
+
+ brw_DP4(p, brw_writemask(m, (1 << (i & 3))),pos, c->userplane[i]);
+ }
+ }
+ } else if ((c->prog_data.outputs_written &
+ BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
+ c->key.userclip_active || brw->has_negative_rhw_bug) {
struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
GLuint i;
if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
struct brw_reg psiz = c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ];
- brw_MUL(p, brw_writemask(header1, WRITEMASK_W), brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
- brw_AND(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(0x7ff<<8));
+ brw_MUL(p, brw_writemask(header1, WRITEMASK_W),
+ brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
+ brw_AND(p, brw_writemask(header1, WRITEMASK_W),
+ header1, brw_imm_ud(0x7ff<<8));
}
- for (i = 0; i < c->key.nr_userclip; i++) {
+ for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<i));
* of zeros followed by two sets of NDC coordinates:
*/
brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_set_acc_write_control(p, 0);
/* The VUE layout is documented in Volume 2a. */
if (intel->gen >= 6) {
* dword 0-3 (m1) of the header is indices, point width, clip flags.
* dword 4-7 (m2) is the 4D space position
* dword 8-15 (m3,m4) of the vertex header is the user clip distance if
- * enabled. We don't use it, so skip it.
- * m3 is the first vertex element data we fill, which is the vertex
- * position.
+ * enabled.
+ * m3 or 5 is the first vertex element data we fill, which is
+ * the vertex position.
*/
brw_MOV(p, brw_message_reg(2), pos);
- brw_MOV(p, brw_message_reg(3), pos);
- len_vertex_header = 2;
+ len_vertex_header = 1;
+ if (c->key.userclip_active)
+ len_vertex_header += 2;
} else if (intel->gen == 5) {
/* There are 20 DWs (D0-D19) in VUE header on Ironlake:
* dword 0-3 (m1) of the header is indices, point width, clip flags.
len_vertex_header = 2;
}
- eot = (c->first_overflow_output == 0);
+ /* Move variable-addressed, non-overflow outputs to their MRFs. */
+ for (slot = len_vertex_header; slot < c->vue_map.num_slots; ++slot) {
+ if (slot >= MAX_SLOTS_IN_FIRST_URB_WRITE)
+ break;
+
+ int mrf = slot + 1;
+ int vert_result = c->vue_map.slot_to_vert_result[slot];
+ if (c->regs[PROGRAM_OUTPUT][vert_result].file ==
+ BRW_GENERAL_REGISTER_FILE) {
+ brw_MOV(p, brw_message_reg(mrf),
+ c->regs[PROGRAM_OUTPUT][vert_result]);
+ }
+ }
+
+ eot = (slot >= c->vue_map.num_slots);
+
+ /* Message header, plus the (first part of the) VUE. */
+ msg_len = 1 + slot;
+ msg_len = align_interleaved_urb_mlen(brw, msg_len);
+ /* Any outputs beyond BRW_MAX_MRF should be in the second URB write */
+ assert (msg_len <= BRW_MAX_MRF - 1);
brw_urb_WRITE(p,
brw_null_reg(), /* dest */
c->r0, /* src */
0, /* allocate */
1, /* used */
- MIN2(c->nr_outputs + 1 + len_vertex_header, (BRW_MAX_MRF-1)), /* msg len */
+ msg_len,
0, /* response len */
eot, /* eot */
eot, /* writes complete */
0, /* urb destination offset */
BRW_URB_SWIZZLE_INTERLEAVE);
- if (c->first_overflow_output > 0) {
+ if (slot < c->vue_map.num_slots) {
/* Not all of the vertex outputs/results fit into the MRF.
* Move the overflowed attributes from the GRF to the MRF and
* issue another brw_urb_WRITE().
*/
- GLuint i, mrf = 1;
- for (i = c->first_overflow_output; i < VERT_RESULT_MAX; i++) {
- if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
- /* move from GRF to MRF */
- brw_MOV(p, brw_message_reg(mrf), c->regs[PROGRAM_OUTPUT][i]);
- mrf++;
- }
+ GLuint mrf = 1;
+ for (; slot < c->vue_map.num_slots; ++slot) {
+ int vert_result = c->vue_map.slot_to_vert_result[slot];
+ /* move from GRF to MRF */
+ brw_MOV(p, brw_message_reg(mrf),
+ c->regs[PROGRAM_OUTPUT][vert_result]);
+ mrf++;
}
brw_urb_WRITE(p,
c->r0, /* src */
0, /* allocate */
1, /* used */
- mrf, /* msg len */
+ align_interleaved_urb_mlen(brw, mrf),
0, /* response len */
1, /* eot */
1, /* writes complete */
- 14 / 2, /* urb destination offset */
+ MAX_SLOTS_IN_FIRST_URB_WRITE / 2, /* urb destination offset */
BRW_URB_SWIZZLE_INTERLEAVE);
}
}
-static GLboolean
+static bool
accumulator_contains(struct brw_vs_compile *c, struct brw_reg val)
{
struct brw_compile *p = &c->func;
struct brw_instruction *prev_insn = &p->store[p->nr_insn - 1];
if (p->nr_insn == 0)
- return GL_FALSE;
+ return false;
if (val.address_mode != BRW_ADDRESS_DIRECT)
- return GL_FALSE;
+ return false;
+
+ if (val.negate || val.abs)
+ return false;
switch (prev_insn->header.opcode) {
case BRW_OPCODE_MOV:
prev_insn->bits1.da1.dest_reg_nr == val.nr &&
prev_insn->bits1.da16.dest_subreg_nr == val.subnr / 16 &&
prev_insn->bits1.da16.dest_writemask == 0xf)
- return GL_TRUE;
+ return true;
else
- return GL_FALSE;
+ return false;
default:
- return GL_FALSE;
+ return false;
}
}
}
}
+static void
+brw_vs_rescale_gl_fixed(struct brw_vs_compile *c)
+{
+ struct brw_compile *p = &c->func;
+ int i;
+
+ for (i = 0; i < VERT_ATTRIB_MAX; i++) {
+ if (!(c->prog_data.inputs_read & BITFIELD64_BIT(i)))
+ continue;
+
+ if (c->key.gl_fixed_input_size[i] != 0) {
+ struct brw_reg reg = c->regs[PROGRAM_INPUT][i];
+
+ brw_MUL(p,
+ brw_writemask(reg, (1 << c->key.gl_fixed_input_size[i]) - 1),
+ reg, brw_imm_f(1.0 / 65536.0));
+ }
+ }
+}
+
/* Emit the vertex program instructions here.
*/
-void brw_vs_emit(struct brw_vs_compile *c )
+void brw_old_vs_emit(struct brw_vs_compile *c )
{
#define MAX_IF_DEPTH 32
#define MAX_LOOP_DEPTH 32
struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
const GLuint nr_insns = c->vp->program.Base.NumInstructions;
- GLuint insn, if_depth = 0, loop_depth = 0;
- struct brw_instruction *if_inst[MAX_IF_DEPTH], *loop_inst[MAX_LOOP_DEPTH] = { 0 };
- int if_depth_in_loop[MAX_LOOP_DEPTH];
+ GLuint insn;
const struct brw_indirect stack_index = brw_indirect(0, 0);
GLuint index;
GLuint file;
- if (INTEL_DEBUG & DEBUG_VS) {
+ if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
printf("vs-mesa:\n");
_mesa_fprint_program_opt(stdout, &c->vp->program.Base, PROG_PRINT_DEBUG,
- GL_TRUE);
+ true);
printf("\n");
}
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
brw_set_access_mode(p, BRW_ALIGN_16);
- if_depth_in_loop[loop_depth] = 0;
+
+ brw_set_acc_write_control(p, 1);
for (insn = 0; insn < nr_insns; insn++) {
GLuint i;
GLuint index = src->Index;
GLuint file = src->File;
if (file == PROGRAM_OUTPUT && index != VERT_RESULT_HPOS)
- c->output_regs[index].used_in_src = GL_TRUE;
+ c->output_regs[index].used_in_src = true;
}
switch (inst->Opcode) {
case OPCODE_CAL:
case OPCODE_RET:
- c->needs_stack = GL_TRUE;
+ c->needs_stack = true;
break;
default:
break;
*/
brw_vs_alloc_regs(c);
+ brw_vs_rescale_gl_fixed(c);
+
if (c->needs_stack)
brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack));
const struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
struct brw_reg args[3], dst;
GLuint i;
- struct brw_instruction *temp;
#if 0
printf("%d: ", insn);
const struct prog_src_register *src = &inst->SrcReg[i];
index = src->Index;
file = src->File;
- if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
- args[i] = c->output_regs[index].reg;
- else
+ if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src) {
+ /* Can't just make get_arg "do the right thing" here because
+ * other callers of get_arg and get_src_reg don't expect any
+ * special behavior for the c->output_regs[index].used_in_src
+ * case.
+ */
+ args[i] = c->output_regs[index].reg;
+ args[i].dw1.bits.swizzle =
+ BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
+ GET_SWZ(src->Swizzle, 1),
+ GET_SWZ(src->Swizzle, 2),
+ GET_SWZ(src->Swizzle, 3));
+
+ /* Note this is ok for non-swizzle ARB_vp instructions */
+ args[i].negate = src->Negate ? 1 : 0;
+ } else
args[i] = get_arg(c, inst, i);
}
index = inst->DstReg.Index;
file = inst->DstReg.File;
if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
- dst = c->output_regs[index].reg;
+ /* Can't just make get_dst "do the right thing" here because other
+ * callers of get_dst don't expect any special behavior for the
+ * c->output_regs[index].used_in_src case.
+ */
+ dst = brw_writemask(c->output_regs[index].reg, inst->DstReg.WriteMask);
else
dst = get_dst(c, inst->DstReg);
switch (inst->Opcode) {
case OPCODE_ABS:
+ args[0].negate = false;
brw_MOV(p, dst, brw_abs(args[0]));
break;
case OPCODE_ADD:
case OPCODE_DPH:
brw_DPH(p, dst, args[0], args[1]);
break;
- case OPCODE_NRM3:
- emit_nrm(c, dst, args[0], 3);
- break;
- case OPCODE_NRM4:
- emit_nrm(c, dst, args[0], 4);
- break;
case OPCODE_DST:
unalias2(c, dst, args[0], args[1], emit_dst_noalias);
break;
emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
break;
case OPCODE_ARL:
- brw_RNDD(p, dst, args[0]);
+ emit_arl(p, dst, args[0]);
break;
case OPCODE_FLR:
brw_RNDD(p, dst, args[0]);
emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
break;
case OPCODE_RSQ:
- emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, args[0], BRW_MATH_PRECISION_FULL);
+ emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, brw_abs(args[0]), BRW_MATH_PRECISION_FULL);
break;
case OPCODE_SEQ:
case OPCODE_XPD:
emit_xpd(p, dst, args[0], args[1]);
break;
- case OPCODE_IF:
- assert(if_depth < MAX_IF_DEPTH);
- if_inst[if_depth] = brw_IF(p, BRW_EXECUTE_8);
+ case OPCODE_IF: {
+ struct brw_instruction *if_inst = brw_IF(p, BRW_EXECUTE_8);
/* Note that brw_IF smashes the predicate_control field. */
- if_inst[if_depth]->header.predicate_control = get_predicate(inst);
- if_depth_in_loop[loop_depth]++;
- if_depth++;
+ if_inst->header.predicate_control = get_predicate(inst);
break;
+ }
case OPCODE_ELSE:
- assert(if_depth > 0);
- if_inst[if_depth-1] = brw_ELSE(p, if_inst[if_depth-1]);
+ clear_current_const(c);
+ brw_ELSE(p);
break;
case OPCODE_ENDIF:
- assert(if_depth > 0);
- brw_ENDIF(p, if_inst[--if_depth]);
- if_depth_in_loop[loop_depth]--;
+ clear_current_const(c);
+ brw_ENDIF(p);
break;
case OPCODE_BGNLOOP:
- loop_inst[loop_depth++] = brw_DO(p, BRW_EXECUTE_8);
- if_depth_in_loop[loop_depth] = 0;
+ clear_current_const(c);
+ brw_DO(p, BRW_EXECUTE_8);
break;
case OPCODE_BRK:
brw_set_predicate_control(p, get_predicate(inst));
- temp = brw_BREAK(p);
- temp->bits3.if_else.pop_count = if_depth_in_loop[loop_depth];
+ brw_BREAK(p);
brw_set_predicate_control(p, BRW_PREDICATE_NONE);
break;
case OPCODE_CONT:
brw_set_predicate_control(p, get_predicate(inst));
- temp = brw_CONT(p);
- temp->bits3.if_else.pop_count = if_depth_in_loop[loop_depth];
+ if (intel->gen >= 6) {
+ gen6_CONT(p);
+ } else {
+ brw_CONT(p);
+ }
brw_set_predicate_control(p, BRW_PREDICATE_NONE);
break;
- case OPCODE_ENDLOOP:
- {
- struct brw_instruction *inst0, *inst1;
- GLuint br = 1;
-
- loop_depth--;
-
- if (intel->gen == 5)
- br = 2;
-
- inst0 = inst1 = brw_WHILE(p, loop_inst[loop_depth]);
- /* patch all the BREAK/CONT instructions from last BEGINLOOP */
- while (inst0 > loop_inst[loop_depth]) {
- inst0--;
- if (inst0->header.opcode == BRW_OPCODE_BREAK &&
- inst0->bits3.if_else.jump_count == 0) {
- inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1);
- }
- else if (inst0->header.opcode == BRW_OPCODE_CONTINUE &&
- inst0->bits3.if_else.jump_count == 0) {
- inst0->bits3.if_else.jump_count = br * (inst1 - inst0);
- }
- }
- }
+
+ case OPCODE_ENDLOOP:
+ clear_current_const(c);
+ brw_WHILE(p);
break;
+
case OPCODE_BRA:
brw_set_predicate_control(p, get_predicate(inst));
brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
* instructions. Instead, we directly modify the header
* of the last (already stored) instruction.
*/
- if (inst->DstReg.File == PROGRAM_OUTPUT) {
+ if (inst->DstReg.File == PROGRAM_OUTPUT &&
+ c->key.clamp_vertex_color) {
if ((inst->DstReg.Index == VERT_RESULT_COL0)
|| (inst->DstReg.Index == VERT_RESULT_COL1)
|| (inst->DstReg.Index == VERT_RESULT_BFC0)
}
}
- if (inst->DstReg.RelAddr && inst->DstReg.File == PROGRAM_TEMPORARY) {
- /* We don't do RelAddr of PROGRAM_OUTPUT yet, because of the
- * compute-to-mrf and the fact that we are allocating
- * registers for only the used PROGRAM_OUTPUTs.
- */
+ if (inst->DstReg.RelAddr) {
+ assert(inst->DstReg.File == PROGRAM_TEMPORARY||
+ inst->DstReg.File == PROGRAM_OUTPUT);
move_to_reladdr_dst(c, inst, dst);
}
}
brw_resolve_cals(p);
+ brw_set_uip_jip(p);
brw_optimize(p);
- if (INTEL_DEBUG & DEBUG_VS) {
+ if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
int i;
printf("vs-native:\n");