#define ATTR_SIZE (4*4)
/**
- * True if the given vert_result is one of the outputs of the vertex shader.
+ * True if the given varying is one of the outputs of the vertex shader.
*/
-static inline bool brw_clip_have_vert_result(struct brw_clip_compile *c,
- GLuint vert_result)
+static inline bool brw_clip_have_varying(struct brw_clip_compile *c,
+ GLuint varying)
{
- return (c->key.attrs & BITFIELD64_BIT(vert_result)) ? 1 : 0;
+ return (c->key.attrs & BITFIELD64_BIT(varying)) ? 1 : 0;
}
/* Points are only culled, so no need for a clip routine, however it
struct brw_indirect newvtx1 = brw_indirect(3, 0);
struct brw_indirect plane_ptr = brw_indirect(4, 0);
struct brw_reg v1_null_ud = retype(vec1(brw_null_reg()), BRW_REGISTER_TYPE_UD);
- GLuint hpos_offset = brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_POS);
+ GLuint hpos_offset = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS);
brw_MOV(p, get_addr_reg(vtx0), brw_address(c->reg.vertex[0]));
brw_MOV(p, get_addr_reg(vtx1), brw_address(c->reg.vertex[1]));
struct brw_indirect inlist_ptr = brw_indirect(4, 0);
struct brw_indirect outlist_ptr = brw_indirect(5, 0);
struct brw_indirect freelist_ptr = brw_indirect(6, 0);
- GLuint hpos_offset = brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_POS);
+ GLuint hpos_offset = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS);
brw_MOV(p, get_addr_reg(vtxPrev), brw_address(c->reg.vertex[2]) );
brw_MOV(p, get_addr_reg(plane_ptr), brw_clip_plane0_address(c));
struct brw_compile *p = &c->func;
struct brw_reg tmp0 = c->reg.loopcount; /* handy temporary */
- GLuint hpos_offset = brw_vert_result_to_offset(&c->vue_map,
+ GLuint hpos_offset = brw_varying_to_offset(&c->vue_map,
VARYING_SLOT_POS);
brw_MOV(p, get_addr_reg(vt0), brw_address(c->reg.vertex[0]));
struct brw_compile *p = &c->func;
struct brw_reg e = c->reg.tmp0;
struct brw_reg f = c->reg.tmp1;
- GLuint hpos_offset = brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_POS);
+ GLuint hpos_offset = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS);
struct brw_reg v0 = byte_offset(c->reg.vertex[0], hpos_offset);
struct brw_reg v1 = byte_offset(c->reg.vertex[1], hpos_offset);
struct brw_reg v2 = byte_offset(c->reg.vertex[2], hpos_offset);
/* Do we have any colors to copy?
*/
- if (!(brw_clip_have_vert_result(c, VARYING_SLOT_COL0) &&
- brw_clip_have_vert_result(c, VARYING_SLOT_BFC0)) &&
- !(brw_clip_have_vert_result(c, VARYING_SLOT_COL1) &&
- brw_clip_have_vert_result(c, VARYING_SLOT_BFC1)))
+ if (!(brw_clip_have_varying(c, VARYING_SLOT_COL0) &&
+ brw_clip_have_varying(c, VARYING_SLOT_BFC0)) &&
+ !(brw_clip_have_varying(c, VARYING_SLOT_COL1) &&
+ brw_clip_have_varying(c, VARYING_SLOT_BFC1)))
return;
/* In some wierd degnerate cases we can end up testing the
GLuint i;
for (i = 0; i < 3; i++) {
- if (brw_clip_have_vert_result(c, VARYING_SLOT_COL0) &&
- brw_clip_have_vert_result(c, VARYING_SLOT_BFC0))
+ if (brw_clip_have_varying(c, VARYING_SLOT_COL0) &&
+ brw_clip_have_varying(c, VARYING_SLOT_BFC0))
brw_MOV(p,
byte_offset(c->reg.vertex[i],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_COL0)),
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_COL0)),
byte_offset(c->reg.vertex[i],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_BFC0)));
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_BFC0)));
- if (brw_clip_have_vert_result(c, VARYING_SLOT_COL1) &&
- brw_clip_have_vert_result(c, VARYING_SLOT_BFC1))
+ if (brw_clip_have_varying(c, VARYING_SLOT_COL1) &&
+ brw_clip_have_varying(c, VARYING_SLOT_BFC1))
brw_MOV(p,
byte_offset(c->reg.vertex[i],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_COL1)),
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_COL1)),
byte_offset(c->reg.vertex[i],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_BFC1)));
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_BFC1)));
}
}
brw_ENDIF(p);
brw_set_conditionalmod(p, BRW_CONDITIONAL_EQ);
brw_AND(p, vec1(brw_null_reg()), get_element_ud(c->reg.R0, 2), brw_imm_ud(1<<8));
brw_MOV(p, byte_offset(c->reg.vertex[0],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_EDGE)),
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_EDGE)),
brw_imm_f(0));
brw_set_predicate_control(p, BRW_PREDICATE_NONE);
brw_set_conditionalmod(p, BRW_CONDITIONAL_EQ);
brw_AND(p, vec1(brw_null_reg()), get_element_ud(c->reg.R0, 2), brw_imm_ud(1<<9));
brw_MOV(p, byte_offset(c->reg.vertex[2],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_EDGE)),
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_EDGE)),
brw_imm_f(0));
brw_set_predicate_control(p, BRW_PREDICATE_NONE);
}
struct brw_indirect vert )
{
struct brw_compile *p = &c->func;
- GLuint ndc_offset = brw_vert_result_to_offset(&c->vue_map,
- BRW_VARYING_SLOT_NDC);
+ GLuint ndc_offset = brw_varying_to_offset(&c->vue_map,
+ BRW_VARYING_SLOT_NDC);
struct brw_reg z = deref_1f(vert, ndc_offset +
2 * type_sz(BRW_REGISTER_TYPE_F));
/* draw edge if edgeflag != 0 */
brw_CMP(p,
vec1(brw_null_reg()), BRW_CONDITIONAL_NZ,
- deref_1f(v0, brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_EDGE)),
+ deref_1f(v0, brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_EDGE)),
brw_imm_f(0));
brw_IF(p, BRW_EXECUTE_1);
{
*/
brw_CMP(p,
vec1(brw_null_reg()), BRW_CONDITIONAL_NZ,
- deref_1f(v0, brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_EDGE)),
+ deref_1f(v0, brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_EDGE)),
brw_imm_f(0));
brw_IF(p, BRW_EXECUTE_1);
{
brw_clip_tri_init_vertices(c);
brw_clip_init_ff_sync(c);
- assert(brw_clip_have_vert_result(c, VARYING_SLOT_EDGE));
+ assert(brw_clip_have_varying(c, VARYING_SLOT_EDGE));
if (c->key.fill_ccw == CLIP_CULL &&
c->key.fill_cw == CLIP_CULL) {
{
struct brw_compile *p = &c->func;
struct brw_reg tmp = get_tmp(c);
- GLuint hpos_offset = brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_POS);
- GLuint ndc_offset = brw_vert_result_to_offset(&c->vue_map,
- BRW_VARYING_SLOT_NDC);
+ GLuint hpos_offset = brw_varying_to_offset(&c->vue_map, VARYING_SLOT_POS);
+ GLuint ndc_offset = brw_varying_to_offset(&c->vue_map,
+ BRW_VARYING_SLOT_NDC);
/* Fixup position. Extract from the original vertex and re-project
* to screen space:
/* Iterate over each attribute (could be done in pairs?)
*/
for (slot = 0; slot < c->vue_map.num_slots; slot++) {
- int vert_result = c->vue_map.slot_to_vert_result[slot];
+ int varying = c->vue_map.slot_to_varying[slot];
GLuint delta = brw_vue_slot_to_offset(slot);
- if (vert_result == VARYING_SLOT_EDGE) {
+ if (varying == VARYING_SLOT_EDGE) {
if (force_edgeflag)
brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1));
else
brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta));
- } else if (vert_result == VARYING_SLOT_PSIZ ||
- vert_result == VARYING_SLOT_CLIP_DIST0 ||
- vert_result == VARYING_SLOT_CLIP_DIST1) {
+ } else if (varying == VARYING_SLOT_PSIZ ||
+ varying == VARYING_SLOT_CLIP_DIST0 ||
+ varying == VARYING_SLOT_CLIP_DIST1) {
/* PSIZ doesn't need interpolation because it isn't used by the
* fragment shader. CLIP_DIST0 and CLIP_DIST1 don't need
* intepolation because on pre-GEN6, these are just placeholder VUE
* slots that don't perform any action.
*/
- } else if (vert_result < VARYING_SLOT_MAX) {
+ } else if (varying < VARYING_SLOT_MAX) {
/* This is a true vertex result (and not a special value for the VUE
* header), so interpolate:
*
{
struct brw_compile *p = &c->func;
- if (brw_clip_have_vert_result(c, VARYING_SLOT_COL0))
+ if (brw_clip_have_varying(c, VARYING_SLOT_COL0))
brw_MOV(p,
byte_offset(c->reg.vertex[to],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_COL0)),
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_COL0)),
byte_offset(c->reg.vertex[from],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_COL0)));
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_COL0)));
- if (brw_clip_have_vert_result(c, VARYING_SLOT_COL1))
+ if (brw_clip_have_varying(c, VARYING_SLOT_COL1))
brw_MOV(p,
byte_offset(c->reg.vertex[to],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_COL1)),
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_COL1)),
byte_offset(c->reg.vertex[from],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_COL1)));
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_COL1)));
- if (brw_clip_have_vert_result(c, VARYING_SLOT_BFC0))
+ if (brw_clip_have_varying(c, VARYING_SLOT_BFC0))
brw_MOV(p,
byte_offset(c->reg.vertex[to],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_BFC0)),
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_BFC0)),
byte_offset(c->reg.vertex[from],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_BFC0)));
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_BFC0)));
- if (brw_clip_have_vert_result(c, VARYING_SLOT_BFC1))
+ if (brw_clip_have_varying(c, VARYING_SLOT_BFC1))
brw_MOV(p,
byte_offset(c->reg.vertex[to],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_BFC1)),
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_BFC1)),
byte_offset(c->reg.vertex[from],
- brw_vert_result_to_offset(&c->vue_map,
- VARYING_SLOT_BFC1)));
+ brw_varying_to_offset(&c->vue_map,
+ VARYING_SLOT_BFC1)));
}
BRW_VARYING_SLOT_NDC = VARYING_SLOT_MAX,
BRW_VARYING_SLOT_POS_DUPLICATE,
BRW_VARYING_SLOT_PAD,
- /*
- * It's actually not a vert_result but just a _mark_ to let sf aware that
- * he need do something special to handle gl_PointCoord builtin variable
- * correctly. see compile_sf_prog() for more info.
+ /**
+ * Technically this is not a varying but just a placeholder that
+ * compile_sf_prog() inserts into its VUE map to cause the gl_PointCoord
+ * builtin variable to be compiled correctly. see compile_sf_prog() for
+ * more info.
*/
BRW_VARYING_SLOT_PNTC,
BRW_VARYING_SLOT_MAX
* additional processing is applied before storing them in the VUE), the
* value is -1.
*/
- int vert_result_to_slot[BRW_VARYING_SLOT_MAX];
+ int varying_to_slot[BRW_VARYING_SLOT_MAX];
/**
* Map from VUE slot to gl_varying_slot value. For slots that do not
* brw_varying_slot.
*
* For slots that are not in use, the value is BRW_VARYING_SLOT_MAX (this
- * simplifies code that uses the value stored in slot_to_vert_result to
+ * simplifies code that uses the value stored in slot_to_varying to
* create a bit mask).
*/
- int slot_to_vert_result[BRW_VARYING_SLOT_MAX];
+ int slot_to_varying[BRW_VARYING_SLOT_MAX];
/**
* Total number of VUE slots in use
* Convert a vertex output (brw_varying_slot) into a byte offset within the
* VUE.
*/
-static inline GLuint brw_vert_result_to_offset(struct brw_vue_map *vue_map,
- GLuint vert_result)
+static inline GLuint brw_varying_to_offset(struct brw_vue_map *vue_map,
+ GLuint varying)
{
- return brw_vue_slot_to_offset(vue_map->vert_result_to_slot[vert_result]);
+ return brw_vue_slot_to_offset(vue_map->varying_to_slot[varying]);
}
for (binding = 0; binding < key->num_transform_feedback_bindings;
++binding) {
- unsigned char vert_result =
+ unsigned char varying =
key->transform_feedback_bindings[binding];
- unsigned char slot = c->vue_map.vert_result_to_slot[vert_result];
+ unsigned char slot = c->vue_map.varying_to_slot[varying];
/* From the Sandybridge PRM, Volume 2, Part 1, Section 4.5.1:
*
* "Prior to End of Thread with a URB_WRITE, the kernel must
vertex_slot.nr += slot / 2;
vertex_slot.subnr = (slot % 2) * 16;
/* gl_PointSize is stored in VARYING_SLOT_PSIZ.w. */
- vertex_slot.dw1.bits.swizzle = vert_result == VARYING_SLOT_PSIZ
+ vertex_slot.dw1.bits.swizzle = varying == VARYING_SLOT_PSIZ
? BRW_SWIZZLE_WWWW : key->transform_feedback_swizzles[binding];
brw_set_access_mode(p, BRW_ALIGN_16);
brw_MOV(p, stride(c->reg.header, 4, 4, 1),
* it manually to let SF shader generate the needed interpolation
* coefficient for FS shader.
*/
- c.vue_map.vert_result_to_slot[BRW_VARYING_SLOT_PNTC] = c.vue_map.num_slots;
- c.vue_map.slot_to_vert_result[c.vue_map.num_slots++] = BRW_VARYING_SLOT_PNTC;
+ c.vue_map.varying_to_slot[BRW_VARYING_SLOT_PNTC] = c.vue_map.num_slots;
+ c.vue_map.slot_to_varying[c.vue_map.num_slots++] = BRW_VARYING_SLOT_PNTC;
}
c.urb_entry_read_offset = brw_sf_compute_urb_entry_read_offset(intel);
c.nr_attr_regs = (c.vue_map.num_slots + 1)/2 - c.urb_entry_read_offset;
/**
- * Determine the vert_result corresponding to the given half of the given
+ * Determine the varying corresponding to the given half of the given
* register. half=0 means the first half of a register, half=1 means the
* second half.
*/
-static inline int vert_reg_to_vert_result(struct brw_sf_compile *c, GLuint reg,
- int half)
+static inline int vert_reg_to_varying(struct brw_sf_compile *c, GLuint reg,
+ int half)
{
int vue_slot = (reg + c->urb_entry_read_offset) * 2 + half;
- return c->vue_map.slot_to_vert_result[vue_slot];
+ return c->vue_map.slot_to_varying[vue_slot];
}
/**
- * Determine the register corresponding to the given vert_result.
+ * Determine the register corresponding to the given varying.
*/
-static struct brw_reg get_vert_result(struct brw_sf_compile *c,
- struct brw_reg vert,
- GLuint vert_result)
+static struct brw_reg get_varying(struct brw_sf_compile *c,
+ struct brw_reg vert,
+ GLuint varying)
{
- int vue_slot = c->vue_map.vert_result_to_slot[vert_result];
+ int vue_slot = c->vue_map.varying_to_slot[varying];
assert (vue_slot >= c->urb_entry_read_offset);
GLuint off = vue_slot / 2 - c->urb_entry_read_offset;
GLuint sub = vue_slot % 2;
if (have_attr(c, VARYING_SLOT_COL0+i) &&
have_attr(c, VARYING_SLOT_BFC0+i))
brw_MOV(p,
- get_vert_result(c, vert, VARYING_SLOT_COL0+i),
- get_vert_result(c, vert, VARYING_SLOT_BFC0+i));
+ get_varying(c, vert, VARYING_SLOT_COL0+i),
+ get_varying(c, vert, VARYING_SLOT_BFC0+i));
}
}
for (i = VARYING_SLOT_COL0; i <= VARYING_SLOT_COL1; i++) {
if (have_attr(c,i))
brw_MOV(p,
- get_vert_result(c, dst, i),
- get_vert_result(c, src, i));
+ get_varying(c, dst, i),
+ get_varying(c, src, i));
}
}
*pc_linear = 0;
*pc = 0xf;
- if (persp_mask & BITFIELD64_BIT(vert_reg_to_vert_result(c, reg, 0)))
+ if (persp_mask & BITFIELD64_BIT(vert_reg_to_varying(c, reg, 0)))
*pc_persp = 0xf;
- if (linear_mask & BITFIELD64_BIT(vert_reg_to_vert_result(c, reg, 0)))
+ if (linear_mask & BITFIELD64_BIT(vert_reg_to_varying(c, reg, 0)))
*pc_linear = 0xf;
/* Maybe only processs one attribute on the final round:
*/
- if (vert_reg_to_vert_result(c, reg, 1) != BRW_VARYING_SLOT_MAX) {
+ if (vert_reg_to_varying(c, reg, 1) != BRW_VARYING_SLOT_MAX) {
*pc |= 0xf0;
- if (persp_mask & BITFIELD64_BIT(vert_reg_to_vert_result(c, reg, 1)))
+ if (persp_mask & BITFIELD64_BIT(vert_reg_to_varying(c, reg, 1)))
*pc_persp |= 0xf0;
- if (linear_mask & BITFIELD64_BIT(vert_reg_to_vert_result(c, reg, 1)))
+ if (linear_mask & BITFIELD64_BIT(vert_reg_to_varying(c, reg, 1)))
*pc_linear |= 0xf0;
}
static uint16_t
calculate_point_sprite_mask(struct brw_sf_compile *c, GLuint reg)
{
- int vert_result1, vert_result2;
+ int varying1, varying2;
uint16_t pc = 0;
- vert_result1 = vert_reg_to_vert_result(c, reg, 0);
- if (vert_result1 >= VARYING_SLOT_TEX0 && vert_result1 <= VARYING_SLOT_TEX7) {
- if (c->key.point_sprite_coord_replace & (1 << (vert_result1 - VARYING_SLOT_TEX0)))
+ varying1 = vert_reg_to_varying(c, reg, 0);
+ if (varying1 >= VARYING_SLOT_TEX0 && varying1 <= VARYING_SLOT_TEX7) {
+ if (c->key.point_sprite_coord_replace & (1 << (varying1 - VARYING_SLOT_TEX0)))
pc |= 0x0f;
}
- if (vert_result1 == BRW_VARYING_SLOT_PNTC)
+ if (varying1 == BRW_VARYING_SLOT_PNTC)
pc |= 0x0f;
- vert_result2 = vert_reg_to_vert_result(c, reg, 1);
- if (vert_result2 >= VARYING_SLOT_TEX0 && vert_result2 <= VARYING_SLOT_TEX7) {
- if (c->key.point_sprite_coord_replace & (1 << (vert_result2 -
+ varying2 = vert_reg_to_varying(c, reg, 1);
+ if (varying2 >= VARYING_SLOT_TEX0 && varying2 <= VARYING_SLOT_TEX7) {
+ if (c->key.point_sprite_coord_replace & (1 << (varying2 -
VARYING_SLOT_TEX0)))
pc |= 0xf0;
}
- if (vert_result2 == BRW_VARYING_SLOT_PNTC)
+ if (varying2 == BRW_VARYING_SLOT_PNTC)
pc |= 0xf0;
return pc;
void emit_ndc_computation();
void emit_psiz_and_flags(struct brw_reg reg);
void emit_clip_distances(struct brw_reg reg, int offset);
- void emit_generic_urb_slot(dst_reg reg, int vert_result);
- void emit_urb_slot(int mrf, int vert_result);
+ void emit_generic_urb_slot(dst_reg reg, int varying);
+ void emit_urb_slot(int mrf, int varying);
void emit_urb_writes(void);
void emit_shader_time_begin();
}
void
-vec4_visitor::emit_generic_urb_slot(dst_reg reg, int vert_result)
+vec4_visitor::emit_generic_urb_slot(dst_reg reg, int varying)
{
- assert (vert_result < VARYING_SLOT_MAX);
- reg.type = output_reg[vert_result].type;
- current_annotation = output_reg_annotation[vert_result];
+ assert (varying < VARYING_SLOT_MAX);
+ reg.type = output_reg[varying].type;
+ current_annotation = output_reg_annotation[varying];
/* Copy the register, saturating if necessary */
vec4_instruction *inst = emit(MOV(reg,
- src_reg(output_reg[vert_result])));
- if ((vert_result == VARYING_SLOT_COL0 ||
- vert_result == VARYING_SLOT_COL1 ||
- vert_result == VARYING_SLOT_BFC0 ||
- vert_result == VARYING_SLOT_BFC1) &&
+ src_reg(output_reg[varying])));
+ if ((varying == VARYING_SLOT_COL0 ||
+ varying == VARYING_SLOT_COL1 ||
+ varying == VARYING_SLOT_BFC0 ||
+ varying == VARYING_SLOT_BFC1) &&
c->key.clamp_vertex_color) {
inst->saturate = true;
}
}
void
-vec4_visitor::emit_urb_slot(int mrf, int vert_result)
+vec4_visitor::emit_urb_slot(int mrf, int varying)
{
struct brw_reg hw_reg = brw_message_reg(mrf);
dst_reg reg = dst_reg(MRF, mrf);
reg.type = BRW_REGISTER_TYPE_F;
- switch (vert_result) {
+ switch (varying) {
case VARYING_SLOT_PSIZ:
/* PSIZ is always in slot 0, and is coupled with other flags. */
current_annotation = "indices, point width, clip flags";
case VARYING_SLOT_CLIP_DIST0:
case VARYING_SLOT_CLIP_DIST1:
if (this->c->key.uses_clip_distance) {
- emit_generic_urb_slot(reg, vert_result);
+ emit_generic_urb_slot(reg, varying);
} else {
current_annotation = "user clip distances";
- emit_clip_distances(hw_reg, (vert_result - VARYING_SLOT_CLIP_DIST0) * 4);
+ emit_clip_distances(hw_reg, (varying - VARYING_SLOT_CLIP_DIST0) * 4);
}
break;
case VARYING_SLOT_EDGE:
/* No need to write to this slot */
break;
default:
- emit_generic_urb_slot(reg, vert_result);
+ emit_generic_urb_slot(reg, varying);
break;
}
}
/* Set up the VUE data for the first URB write */
int slot;
for (slot = 0; slot < c->prog_data.vue_map.num_slots; ++slot) {
- emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_vert_result[slot]);
+ emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_varying[slot]);
/* If this was max_usable_mrf, we can't fit anything more into this URB
* WRITE.
for (; slot < c->prog_data.vue_map.num_slots; ++slot) {
assert(mrf < max_usable_mrf);
- emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_vert_result[slot]);
+ emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_varying[slot]);
}
current_annotation = "URB write";
/* PROGRAM_OUTPUT */
for (int slot = 0; slot < c->prog_data.vue_map.num_slots; slot++) {
- int vert_result = c->prog_data.vue_map.slot_to_vert_result[slot];
- if (vert_result == VARYING_SLOT_PSIZ)
- output_reg[vert_result] = dst_reg(this, glsl_type::float_type);
+ int varying = c->prog_data.vue_map.slot_to_varying[slot];
+ if (varying == VARYING_SLOT_PSIZ)
+ output_reg[varying] = dst_reg(this, glsl_type::float_type);
else
- output_reg[vert_result] = dst_reg(this, glsl_type::vec4_type);
- assert(output_reg[vert_result].type == BRW_REGISTER_TYPE_F);
+ output_reg[varying] = dst_reg(this, glsl_type::vec4_type);
+ assert(output_reg[varying].type == BRW_REGISTER_TYPE_F);
}
/* PROGRAM_ADDRESS */
#include "glsl/ralloc.h"
static inline void assign_vue_slot(struct brw_vue_map *vue_map,
- int vert_result)
+ int varying)
{
- /* Make sure this vert_result hasn't been assigned a slot already */
- assert (vue_map->vert_result_to_slot[vert_result] == -1);
+ /* Make sure this varying hasn't been assigned a slot already */
+ assert (vue_map->varying_to_slot[varying] == -1);
- vue_map->vert_result_to_slot[vert_result] = vue_map->num_slots;
- vue_map->slot_to_vert_result[vue_map->num_slots++] = vert_result;
+ vue_map->varying_to_slot[varying] = vue_map->num_slots;
+ vue_map->slot_to_varying[vue_map->num_slots++] = varying;
}
/**
vue_map->num_slots = 0;
for (i = 0; i < BRW_VARYING_SLOT_MAX; ++i) {
- vue_map->vert_result_to_slot[i] = -1;
- vue_map->slot_to_vert_result[i] = BRW_VARYING_SLOT_MAX;
+ vue_map->varying_to_slot[i] = -1;
+ vue_map->slot_to_varying[i] = BRW_VARYING_SLOT_MAX;
}
/* VUE header: format depends on chip generation and whether clipping is
* dword 24-27 is the first vertex data we fill.
*
* Note: future pipeline stages expect 4D space position to be
- * contiguous with the other vert_results, so we make dword 24-27 a
+ * contiguous with the other varyings, so we make dword 24-27 a
* duplicate copy of the 4D space position.
*/
assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
if (intel->gen < 6 && i == VARYING_SLOT_CLIP_VERTEX)
continue;
if ((outputs_written & BITFIELD64_BIT(i)) &&
- vue_map->vert_result_to_slot[i] == -1) {
+ vue_map->varying_to_slot[i] == -1) {
assign_vue_slot(vue_map, i);
}
}
}
/* Find the VUE slot for this attribute. */
- int slot = vue_map->vert_result_to_slot[fs_attr];
+ int slot = vue_map->varying_to_slot[fs_attr];
/* If there was only a back color written but not front, use back
* as the color instead of undefined
*/
if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
- slot = vue_map->vert_result_to_slot[VARYING_SLOT_BFC0];
+ slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
- slot = vue_map->vert_result_to_slot[VARYING_SLOT_BFC1];
+ slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
if (slot == -1) {
/* This attribute does not exist in the VUE--that means that the vertex
* do back-facing swizzling.
*/
bool swizzling = two_side_color &&
- ((vue_map->slot_to_vert_result[slot] == VARYING_SLOT_COL0 &&
- vue_map->slot_to_vert_result[slot+1] == VARYING_SLOT_BFC0) ||
- (vue_map->slot_to_vert_result[slot] == VARYING_SLOT_COL1 &&
- vue_map->slot_to_vert_result[slot+1] == VARYING_SLOT_BFC1));
+ ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
+ vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
+ (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
+ vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1));
/* Update max_source_attr. If swizzling, the SF will read this slot + 1. */
if (*max_source_attr < source_attr + swizzling)
for (i = 0; i < linked_xfb_info->NumOutputs; i++) {
int buffer = linked_xfb_info->Outputs[i].OutputBuffer;
uint16_t decl = 0;
- int vert_result = linked_xfb_info->Outputs[i].OutputRegister;
+ int varying = linked_xfb_info->Outputs[i].OutputRegister;
unsigned component_mask =
(1 << linked_xfb_info->Outputs[i].NumComponents) - 1;
/* gl_PointSize is stored in VARYING_SLOT_PSIZ.w. */
- if (vert_result == VARYING_SLOT_PSIZ) {
+ if (varying == VARYING_SLOT_PSIZ) {
assert(linked_xfb_info->Outputs[i].NumComponents == 1);
component_mask <<= 3;
} else {
buffer_mask |= 1 << buffer;
decl |= buffer << SO_DECL_OUTPUT_BUFFER_SLOT_SHIFT;
- decl |= vue_map->vert_result_to_slot[vert_result] <<
+ decl |= vue_map->varying_to_slot[varying] <<
SO_DECL_REGISTER_INDEX_SHIFT;
decl |= component_mask << SO_DECL_COMPONENT_MASK_SHIFT;