}
}
+/* The message length for all SEND messages is restricted to [1,15]. This
+ * includes 1 for the header, so anything in slots 14 and above needs to be
+ * placed in a general-purpose register and emitted using a second URB write.
+ */
+#define MAX_SLOTS_IN_FIRST_URB_WRITE 14
+
+/**
+ * Determine whether the given vertex output can be written directly to a MRF
+ * or whether it has to be stored in a general-purpose register.
+ */
+static inline bool can_use_direct_mrf(int vert_result,
+ int first_reladdr_output, int slot)
+{
+ if (vert_result == VERT_RESULT_HPOS || vert_result == VERT_RESULT_PSIZ) {
+ /* These never go straight into MRF's. They are placed in the MRF by
+ * epilog code.
+ */
+ return false;
+ }
+ if (first_reladdr_output <= vert_result && vert_result < VERT_RESULT_MAX) {
+ /* Relative addressing might be used to access this vert_result, so it
+ * needs to go into a general-purpose register.
+ */
+ return false;
+ }
+ if (slot >= MAX_SLOTS_IN_FIRST_URB_WRITE) {
+ /* This output won't go out until the second URB write so it must be
+ * stored in a general-purpose register until then.
+ */
+ return false;
+ }
+ return true;
+}
+
/**
* Preallocate GRF register before code emit.
* Do things as simply as possible. Allocate and populate all regs
static void brw_vs_alloc_regs( struct brw_vs_compile *c )
{
struct intel_context *intel = &c->func.brw->intel;
- GLuint i, reg = 0, mrf;
+ GLuint i, reg = 0, slot;
int attributes_in_vue;
int first_reladdr_output;
+ int max_constant;
+ int constant = 0;
+ struct brw_vertex_program *vp = c->vp;
+ const struct gl_program_parameter_list *params = vp->program.Base.Parameters;
/* Determine whether to use a real constant buffer or use a block
* of GRF registers for constants. The later is faster but only
}
- /* Vertex program parameters from curbe:
+ /* Assign some (probably all) of the vertex program constants to
+ * the push constant buffer/CURBE.
+ *
+ * There's an obvious limit to the numer of push constants equal to
+ * the number of register available, and that number is smaller
+ * than the minimum maximum number of vertex program parameters, so
+ * support for pull constants is required if we overflow.
+ * Additionally, on gen6 the number of push constants is even
+ * lower.
+ *
+ * When there's relative addressing, we don't know what range of
+ * Mesa IR registers can be accessed. And generally, when relative
+ * addressing is used we also have too many constants to load them
+ * all as push constants. So, we'll just support relative
+ * addressing out of the pull constant buffers, and try to load as
+ * many statically-accessed constants into the push constant buffer
+ * as we can.
*/
- if (c->vp->use_const_buffer) {
- int max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
- int constant = 0;
-
- /* We've got more constants than we can load with the push
- * mechanism. This is often correlated with reladdr loads where
- * we should probably be using a pull mechanism anyway to avoid
- * excessive reading. However, the pull mechanism is slow in
- * general. So, we try to allocate as many non-reladdr-loaded
- * constants through the push buffer as we can before giving up.
- */
- memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
- for (i = 0;
- i < c->vp->program.Base.NumInstructions && constant < max_constant;
- i++) {
- struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
- int arg;
-
- for (arg = 0; arg < 3 && constant < max_constant; arg++) {
- if ((inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
- inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
- inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
- inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
- inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) ||
- inst->SrcReg[arg].RelAddr)
- continue;
-
- if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
- c->constant_map[inst->SrcReg[arg].Index] = constant++;
- }
+ if (intel->gen >= 6) {
+ /* We can only load 32 regs of push constants. */
+ max_constant = 32 * 2 - c->key.nr_userclip;
+ } else {
+ max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
+ }
+
+ /* constant_map maps from ParameterValues[] index to index in the
+ * push constant buffer, or -1 if it's only in the pull constant
+ * buffer.
+ */
+ memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
+ for (i = 0;
+ i < c->vp->program.Base.NumInstructions && constant < max_constant;
+ i++) {
+ struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
+ int arg;
+
+ for (arg = 0; arg < 3 && constant < max_constant; arg++) {
+ if (inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
+ inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
+ inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
+ inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
+ inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) {
+ continue;
+ }
+
+ if (inst->SrcReg[arg].RelAddr) {
+ c->vp->use_const_buffer = GL_TRUE;
+ continue;
}
- }
- for (i = 0; i < constant; i++) {
- c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2,
- (i%2) * 4),
- 0, 4, 1);
+ if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
+ c->constant_map[inst->SrcReg[arg].Index] = constant++;
+ }
}
- reg += (constant + 1) / 2;
- c->prog_data.curb_read_length = reg - 1;
- /* XXX 0 causes a bug elsewhere... */
- c->prog_data.nr_params = MAX2(constant * 4, 4);
}
- else {
- /* use a section of the GRF for constants */
- GLuint nr_params = c->vp->program.Base.Parameters->NumParameters;
- for (i = 0; i < nr_params; i++) {
- c->regs[PROGRAM_STATE_VAR][i] = stride( brw_vec4_grf(reg+i/2, (i%2) * 4), 0, 4, 1);
+
+ /* If we ran out of push constant space, then we'll also upload all
+ * constants through the pull constant buffer so that they can be
+ * accessed no matter what. For relative addressing (the common
+ * case) we need them all in place anyway.
+ */
+ if (constant == max_constant)
+ c->vp->use_const_buffer = GL_TRUE;
+
+ /* Set up the references to the pull parameters if present. This backend
+ * uses a 1:1 mapping from Mesa IR's index to location in the pull constant
+ * buffer, while the new VS backend allocates values to the pull buffer on
+ * demand.
+ */
+ if (c->vp->use_const_buffer) {
+ for (i = 0; i < params->NumParameters * 4; i++) {
+ c->prog_data.pull_param[i] = ¶ms->ParameterValues[i / 4][i % 4].f;
}
- reg += (nr_params + 1) / 2;
- c->prog_data.curb_read_length = reg - 1;
+ c->prog_data.nr_pull_params = i;
+ }
- c->prog_data.nr_params = nr_params * 4;
+ for (i = 0; i < constant; i++) {
+ c->regs[PROGRAM_STATE_VAR][i] = stride(brw_vec4_grf(reg + i / 2,
+ (i % 2) * 4),
+ 0, 4, 1);
}
+ reg += (constant + 1) / 2;
+ c->prog_data.curb_read_length = reg - 1;
+ c->prog_data.nr_params = constant * 4;
+ /* XXX 0 causes a bug elsewhere... */
+ if (intel->gen < 6 && c->prog_data.nr_params == 0)
+ c->prog_data.nr_params = 4;
/* Allocate input regs:
*/
/* Allocate outputs. The non-position outputs go straight into message regs.
*/
- c->nr_outputs = 0;
+ brw_compute_vue_map(&c->vue_map, intel, c->key.nr_userclip,
+ c->key.two_side_color, c->prog_data.outputs_written);
c->first_output = reg;
- c->first_overflow_output = 0;
-
- if (intel->gen >= 6) {
- mrf = 3;
- if (c->key.nr_userclip)
- mrf += 2;
- } else if (intel->gen == 5)
- mrf = 8;
- else
- mrf = 4;
first_reladdr_output = get_first_reladdr_output(&c->vp->program);
- for (i = 0; i < VERT_RESULT_MAX; i++) {
- if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
- c->nr_outputs++;
- assert(i < Elements(c->regs[PROGRAM_OUTPUT]));
- if (i == VERT_RESULT_HPOS) {
- c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
- reg++;
- }
- else if (i == VERT_RESULT_PSIZ) {
- c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
- reg++;
- }
- else {
- /* Two restrictions on our compute-to-MRF here. The
- * message length for all SEND messages is restricted to
- * [1,15], so we can't use mrf 15, as that means a length
- * of 16.
- *
- * Additionally, URB writes are aligned to URB rows, so we
- * need to put an even number of registers of URB data in
- * each URB write so that the later write is aligned. A
- * message length of 15 means 1 message header reg plus 14
- * regs of URB data.
- *
- * For attributes beyond the compute-to-MRF, we compute to
- * GRFs and they will be written in the second URB_WRITE.
- */
- if (first_reladdr_output > i && mrf < 15) {
- c->regs[PROGRAM_OUTPUT][i] = brw_message_reg(mrf);
- mrf++;
- }
- else {
- if (mrf >= 15 && !c->first_overflow_output)
- c->first_overflow_output = i;
- c->regs[PROGRAM_OUTPUT][i] = brw_vec8_grf(reg, 0);
- reg++;
- mrf++;
- }
- }
+
+ for (slot = 0; slot < c->vue_map.num_slots; slot++) {
+ int vert_result = c->vue_map.slot_to_vert_result[slot];
+ assert(vert_result < Elements(c->regs[PROGRAM_OUTPUT]));
+ if (can_use_direct_mrf(vert_result, first_reladdr_output, slot)) {
+ c->regs[PROGRAM_OUTPUT][vert_result] = brw_message_reg(slot + 1);
+ } else {
+ c->regs[PROGRAM_OUTPUT][vert_result] = brw_vec8_grf(reg, 0);
+ reg++;
}
}
/* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
* them to fit the biggest thing they need to.
*/
- attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
-
- /* See emit_vertex_write() for where the VUE's overhead on top of the
- * attributes comes from.
- */
- if (intel->gen >= 6) {
- int header_regs = 2;
- if (c->key.nr_userclip)
- header_regs += 2;
+ attributes_in_vue = MAX2(c->vue_map.num_slots, c->nr_inputs);
- c->prog_data.urb_entry_size = (attributes_in_vue + header_regs + 7) / 8;
- } else if (intel->gen == 5)
- c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
- else
- c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
+ if (intel->gen == 6) {
+ /* Each attribute is 32 bytes (2 vec4s), so dividing by 8 gives us the
+ * number of 128-byte (1024-bit) units.
+ */
+ c->prog_data.urb_entry_size = ALIGN(attributes_in_vue, 8) / 8;
+ } else {
+ /* Each attribute is 16 bytes (1 vec4), so dividing by 4 gives us the
+ * number of 64-byte (512-bit) units.
+ */
+ c->prog_data.urb_entry_size = ALIGN(attributes_in_vue, 4) / 4;
+ }
c->prog_data.total_grf = reg;
}
}
+static void emit_arl(struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg src)
+{
+ struct intel_context *intel = &p->brw->intel;
+
+ if (intel->gen >= 6) {
+ struct brw_reg dst_f = retype(dst, BRW_REGISTER_TYPE_F);
+
+ brw_RNDD(p, dst_f, src);
+ brw_MOV(p, dst, dst_f);
+ } else {
+ brw_RNDD(p, dst, src);
+ }
+}
+
static void emit_math1_gen4(struct brw_vs_compile *c,
GLuint function,
struct brw_reg dst,
struct brw_reg arg0 )
{
struct brw_compile *p = &c->func;
- struct brw_instruction *if_insn;
struct brw_reg tmp = dst;
GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
* BRW_EXECUTE_1 for all comparisions.
*/
brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
- if_insn = brw_IF(p, BRW_EXECUTE_8);
+ brw_IF(p, BRW_EXECUTE_8);
{
brw_MOV(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0,0));
brw_swizzle1(arg0, 3),
BRW_MATH_PRECISION_PARTIAL);
}
-
- brw_ENDIF(p, if_insn);
+ brw_ENDIF(p);
release_tmp(c, tmp);
}
brw_MAC(p, dst, arg0, arg1);
}
-/** 3 or 4-component vector normalization */
-static void emit_nrm( struct brw_vs_compile *c,
- struct brw_reg dst,
- struct brw_reg arg0,
- int num_comps)
-{
- struct brw_compile *p = &c->func;
- struct brw_reg tmp = get_tmp(c);
-
- /* tmp = dot(arg0, arg0) */
- if (num_comps == 3)
- brw_DP3(p, tmp, arg0, arg0);
- else
- brw_DP4(p, tmp, arg0, arg0);
-
- /* tmp = 1 / sqrt(tmp) */
- emit_math1(c, BRW_MATH_FUNCTION_RSQ, tmp, tmp, BRW_MATH_PRECISION_FULL);
-
- /* dst = arg0 * tmp */
- brw_MUL(p, dst, arg0, tmp);
-
- release_tmp(c, tmp);
-}
-
-
static struct brw_reg
get_constant(struct brw_vs_compile *c,
const struct prog_instruction *inst,
assert(argIndex < 3);
- assert(c->func.brw->intel.gen < 6); /* FINISHME */
-
if (c->current_const[argIndex].index != src->Index) {
/* Keep track of the last constant loaded in this slot, for reuse. */
c->current_const[argIndex].index = src->Index;
}
/* replicate lower four floats into upper half (to get XYZWXYZW) */
- const_reg = stride(const_reg, 0, 4, 0);
+ const_reg = stride(const_reg, 0, 4, 1);
const_reg.subnr = 0;
return const_reg;
{
const struct prog_src_register *src = &inst->SrcReg[argIndex];
struct brw_compile *p = &c->func;
+ struct brw_context *brw = p->brw;
+ struct intel_context *intel = &brw->intel;
struct brw_reg const_reg = c->current_const[argIndex].reg;
- struct brw_reg addrReg = c->regs[PROGRAM_ADDRESS][0];
- struct brw_reg byte_addr_reg = retype(get_tmp(c), BRW_REGISTER_TYPE_D);
+ struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
+ uint32_t offset;
assert(argIndex < 3);
- assert(c->func.brw->intel.gen < 6); /* FINISHME */
-
/* Can't reuse a reladdr constant load. */
c->current_const[argIndex].index = -1;
src->Index, argIndex, c->current_const[argIndex].reg.nr);
#endif
- brw_MUL(p, byte_addr_reg, addrReg, brw_imm_ud(16));
+ if (intel->gen >= 6) {
+ offset = src->Index;
+ } else {
+ struct brw_reg byte_addr_reg = retype(get_tmp(c), BRW_REGISTER_TYPE_D);
+ brw_MUL(p, byte_addr_reg, addr_reg, brw_imm_d(16));
+ addr_reg = byte_addr_reg;
+ offset = 16 * src->Index;
+ }
/* fetch the first vec4 */
brw_dp_READ_4_vs_relative(p,
- const_reg, /* writeback dest */
- byte_addr_reg, /* address register */
- 16 * src->Index, /* byte offset */
- SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
- );
+ const_reg,
+ addr_reg,
+ offset,
+ SURF_INDEX_VERT_CONST_BUFFER);
return const_reg;
}
if (component >= 0) {
params = c->vp->program.Base.Parameters;
- f = params->ParameterValues[src->Index][component];
+ f = params->ParameterValues[src->Index][component].f;
if (src->Abs)
f = fabs(f);
case PROGRAM_UNIFORM:
case PROGRAM_ENV_PARAM:
case PROGRAM_LOCAL_PARAM:
- if (c->vp->use_const_buffer) {
- if (!relAddr && c->constant_map[index] != -1) {
- assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
- return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
- } else if (relAddr)
+ if (!relAddr && c->constant_map[index] != -1) {
+ /* Take from the push constant buffer if possible. */
+ assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
+ return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
+ } else {
+ /* Must be in the pull constant buffer then .*/
+ assert(c->vp->use_const_buffer);
+ if (relAddr)
return get_reladdr_constant(c, inst, argIndex);
else
return get_constant(c, inst, argIndex);
}
- else if (relAddr) {
- return deref(c, c->regs[PROGRAM_STATE_VAR][0], index, 16);
- }
- else {
- assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
- return c->regs[PROGRAM_STATE_VAR][index];
- }
case PROGRAM_ADDRESS:
assert(index == 0);
return c->regs[file][index];
GET_SWZ(src->Swizzle, 1),
GET_SWZ(src->Swizzle, 2),
GET_SWZ(src->Swizzle, 3));
- }
- /* Note this is ok for non-swizzle instructions:
- */
- reg.negate = src->Negate ? 1 : 0;
+ /* Note this is ok for non-swizzle ARB_vp instructions */
+ reg.negate = src->Negate ? 1 : 0;
+ }
return reg;
}
}
}
+static int
+align_interleaved_urb_mlen(struct brw_context *brw, int mlen)
+{
+ struct intel_context *intel = &brw->intel;
+
+ if (intel->gen >= 6) {
+ /* URB data written (does not include the message header reg) must
+ * be a multiple of 256 bits, or 2 VS registers. See vol5c.5,
+ * section 5.4.3.2.2: URB_INTERLEAVED.
+ *
+ * URB entries are allocated on a multiple of 1024 bits, so an
+ * extra 128 bits written here to make the end align to 256 is
+ * no problem.
+ */
+ if ((mlen % 2) != 1)
+ mlen++;
+ }
+
+ return mlen;
+}
/**
* Post-vertex-program processing. Send the results to the URB.
struct brw_reg ndc;
int eot;
GLuint len_vertex_header = 2;
- int next_mrf, i;
+ int i;
+ int msg_len;
+ int slot;
if (c->key.copy_edgeflag) {
brw_MOV(p,
else
m = brw_message_reg(4);
- brw_DP4(p, brw_writemask(m, (1 << (i & 7))),pos, c->userplane[i]);
+ brw_DP4(p, brw_writemask(m, (1 << (i & 3))),pos, c->userplane[i]);
}
}
} else if ((c->prog_data.outputs_written &
}
/* Move variable-addressed, non-overflow outputs to their MRFs. */
- next_mrf = 2 + len_vertex_header;
- for (i = 0; i < VERT_RESULT_MAX; i++) {
- if (c->first_overflow_output > 0 && i >= c->first_overflow_output)
- break;
- if (!(c->prog_data.outputs_written & BITFIELD64_BIT(i)))
- continue;
- if (i == VERT_RESULT_PSIZ)
- continue;
+ for (slot = len_vertex_header; slot < c->vue_map.num_slots; ++slot) {
+ if (slot >= MAX_SLOTS_IN_FIRST_URB_WRITE)
+ break;
- if (i >= VERT_RESULT_TEX0 &&
- c->regs[PROGRAM_OUTPUT][i].file == BRW_GENERAL_REGISTER_FILE) {
- brw_MOV(p, brw_message_reg(next_mrf), c->regs[PROGRAM_OUTPUT][i]);
- next_mrf++;
- } else if (c->regs[PROGRAM_OUTPUT][i].file == BRW_MESSAGE_REGISTER_FILE) {
- next_mrf = c->regs[PROGRAM_OUTPUT][i].nr + 1;
+ int mrf = slot + 1;
+ int vert_result = c->vue_map.slot_to_vert_result[slot];
+ if (c->regs[PROGRAM_OUTPUT][vert_result].file ==
+ BRW_GENERAL_REGISTER_FILE) {
+ brw_MOV(p, brw_message_reg(mrf),
+ c->regs[PROGRAM_OUTPUT][vert_result]);
}
}
- eot = (c->first_overflow_output == 0);
+ eot = (slot >= c->vue_map.num_slots);
+
+ /* Message header, plus the (first part of the) VUE. */
+ msg_len = 1 + slot;
+ msg_len = align_interleaved_urb_mlen(brw, msg_len);
+ /* Any outputs beyond BRW_MAX_MRF should be in the second URB write */
+ assert (msg_len <= BRW_MAX_MRF - 1);
brw_urb_WRITE(p,
brw_null_reg(), /* dest */
c->r0, /* src */
0, /* allocate */
1, /* used */
- MIN2(c->nr_outputs + 1 + len_vertex_header, (BRW_MAX_MRF-1)), /* msg len */
+ msg_len,
0, /* response len */
eot, /* eot */
eot, /* writes complete */
0, /* urb destination offset */
BRW_URB_SWIZZLE_INTERLEAVE);
- if (c->first_overflow_output > 0) {
+ if (slot < c->vue_map.num_slots) {
/* Not all of the vertex outputs/results fit into the MRF.
* Move the overflowed attributes from the GRF to the MRF and
* issue another brw_urb_WRITE().
*/
- GLuint i, mrf = 1;
- for (i = c->first_overflow_output; i < VERT_RESULT_MAX; i++) {
- if (c->prog_data.outputs_written & BITFIELD64_BIT(i)) {
- /* move from GRF to MRF */
- brw_MOV(p, brw_message_reg(mrf), c->regs[PROGRAM_OUTPUT][i]);
- mrf++;
- }
+ GLuint mrf = 1;
+ for (; slot < c->vue_map.num_slots; ++slot) {
+ int vert_result = c->vue_map.slot_to_vert_result[slot];
+ /* move from GRF to MRF */
+ brw_MOV(p, brw_message_reg(mrf),
+ c->regs[PROGRAM_OUTPUT][vert_result]);
+ mrf++;
}
brw_urb_WRITE(p,
c->r0, /* src */
0, /* allocate */
1, /* used */
- mrf, /* msg len */
+ align_interleaved_urb_mlen(brw, mrf),
0, /* response len */
1, /* eot */
1, /* writes complete */
- 14 / 2, /* urb destination offset */
+ MAX_SLOTS_IN_FIRST_URB_WRITE / 2, /* urb destination offset */
BRW_URB_SWIZZLE_INTERLEAVE);
}
}
if (val.address_mode != BRW_ADDRESS_DIRECT)
return GL_FALSE;
+ if (val.negate || val.abs)
+ return GL_FALSE;
+
switch (prev_insn->header.opcode) {
case BRW_OPCODE_MOV:
case BRW_OPCODE_MAC:
}
}
+static void
+brw_vs_rescale_gl_fixed(struct brw_vs_compile *c)
+{
+ struct brw_compile *p = &c->func;
+ int i;
+
+ for (i = 0; i < VERT_ATTRIB_MAX; i++) {
+ if (!(c->prog_data.inputs_read & (1 << i)))
+ continue;
+
+ if (c->key.gl_fixed_input_size[i] != 0) {
+ struct brw_reg reg = c->regs[PROGRAM_INPUT][i];
+
+ brw_MUL(p,
+ brw_writemask(reg, (1 << c->key.gl_fixed_input_size[i]) - 1),
+ reg, brw_imm_f(1.0 / 65536.0));
+ }
+ }
+}
+
/* Emit the vertex program instructions here.
*/
-void brw_vs_emit(struct brw_vs_compile *c )
+void brw_old_vs_emit(struct brw_vs_compile *c )
{
#define MAX_IF_DEPTH 32
#define MAX_LOOP_DEPTH 32
struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
const GLuint nr_insns = c->vp->program.Base.NumInstructions;
- GLuint insn, if_depth = 0, loop_depth = 0;
- struct brw_instruction *if_inst[MAX_IF_DEPTH], *loop_inst[MAX_LOOP_DEPTH] = { 0 };
+ GLuint insn, loop_depth = 0;
+ struct brw_instruction *loop_inst[MAX_LOOP_DEPTH] = { 0 };
int if_depth_in_loop[MAX_LOOP_DEPTH];
const struct brw_indirect stack_index = brw_indirect(0, 0);
GLuint index;
*/
brw_vs_alloc_regs(c);
+ brw_vs_rescale_gl_fixed(c);
+
if (c->needs_stack)
brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack));
const struct prog_src_register *src = &inst->SrcReg[i];
index = src->Index;
file = src->File;
- if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
- args[i] = c->output_regs[index].reg;
- else
+ if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src) {
+ /* Can't just make get_arg "do the right thing" here because
+ * other callers of get_arg and get_src_reg don't expect any
+ * special behavior for the c->output_regs[index].used_in_src
+ * case.
+ */
+ args[i] = c->output_regs[index].reg;
+ args[i].dw1.bits.swizzle =
+ BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
+ GET_SWZ(src->Swizzle, 1),
+ GET_SWZ(src->Swizzle, 2),
+ GET_SWZ(src->Swizzle, 3));
+
+ /* Note this is ok for non-swizzle ARB_vp instructions */
+ args[i].negate = src->Negate ? 1 : 0;
+ } else
args[i] = get_arg(c, inst, i);
}
index = inst->DstReg.Index;
file = inst->DstReg.File;
if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
- dst = c->output_regs[index].reg;
+ /* Can't just make get_dst "do the right thing" here because other
+ * callers of get_dst don't expect any special behavior for the
+ * c->output_regs[index].used_in_src case.
+ */
+ dst = brw_writemask(c->output_regs[index].reg, inst->DstReg.WriteMask);
else
dst = get_dst(c, inst->DstReg);
switch (inst->Opcode) {
case OPCODE_ABS:
+ args[0].negate = false;
brw_MOV(p, dst, brw_abs(args[0]));
break;
case OPCODE_ADD:
case OPCODE_DPH:
brw_DPH(p, dst, args[0], args[1]);
break;
- case OPCODE_NRM3:
- emit_nrm(c, dst, args[0], 3);
- break;
- case OPCODE_NRM4:
- emit_nrm(c, dst, args[0], 4);
- break;
case OPCODE_DST:
unalias2(c, dst, args[0], args[1], emit_dst_noalias);
break;
emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
break;
case OPCODE_ARL:
- brw_RNDD(p, dst, args[0]);
+ emit_arl(p, dst, args[0]);
break;
case OPCODE_FLR:
brw_RNDD(p, dst, args[0]);
case OPCODE_XPD:
emit_xpd(p, dst, args[0], args[1]);
break;
- case OPCODE_IF:
- assert(if_depth < MAX_IF_DEPTH);
- if_inst[if_depth] = brw_IF(p, BRW_EXECUTE_8);
+ case OPCODE_IF: {
+ struct brw_instruction *if_inst = brw_IF(p, BRW_EXECUTE_8);
/* Note that brw_IF smashes the predicate_control field. */
- if_inst[if_depth]->header.predicate_control = get_predicate(inst);
+ if_inst->header.predicate_control = get_predicate(inst);
if_depth_in_loop[loop_depth]++;
- if_depth++;
break;
+ }
case OPCODE_ELSE:
clear_current_const(c);
- assert(if_depth > 0);
- if_inst[if_depth-1] = brw_ELSE(p, if_inst[if_depth-1]);
+ brw_ELSE(p);
break;
case OPCODE_ENDIF:
clear_current_const(c);
- assert(if_depth > 0);
- brw_ENDIF(p, if_inst[--if_depth]);
+ brw_ENDIF(p);
if_depth_in_loop[loop_depth]--;
break;
case OPCODE_BGNLOOP:
case OPCODE_CONT:
brw_set_predicate_control(p, get_predicate(inst));
if (intel->gen >= 6) {
- brw_CONT_gen6(p, loop_inst[loop_depth - 1]);
+ gen6_CONT(p, loop_inst[loop_depth - 1]);
} else {
brw_CONT(p, if_depth_in_loop[loop_depth]);
}
* instructions. Instead, we directly modify the header
* of the last (already stored) instruction.
*/
- if (inst->DstReg.File == PROGRAM_OUTPUT) {
+ if (inst->DstReg.File == PROGRAM_OUTPUT &&
+ c->key.clamp_vertex_color) {
if ((inst->DstReg.Index == VERT_RESULT_COL0)
|| (inst->DstReg.Index == VERT_RESULT_COL1)
|| (inst->DstReg.Index == VERT_RESULT_BFC0)