#include "glsl/ralloc.h"
static inline void assign_vue_slot(struct brw_vue_map *vue_map,
- int vert_result)
+ int varying)
{
- /* Make sure this vert_result hasn't been assigned a slot already */
- assert (vue_map->vert_result_to_slot[vert_result] == -1);
+ /* Make sure this varying hasn't been assigned a slot already */
+ assert (vue_map->varying_to_slot[varying] == -1);
- vue_map->vert_result_to_slot[vert_result] = vue_map->num_slots;
- vue_map->slot_to_vert_result[vue_map->num_slots++] = vert_result;
+ vue_map->varying_to_slot[varying] = vue_map->num_slots;
+ vue_map->slot_to_varying[vue_map->num_slots++] = varying;
}
/**
* (generated by CACHE_NEW_VS_PROG).
*/
static void
-brw_compute_vue_map(struct brw_vs_compile *c)
+brw_compute_vue_map(struct brw_context *brw, struct brw_vs_compile *c)
{
- struct brw_context *brw = c->func.brw;
const struct intel_context *intel = &brw->intel;
struct brw_vue_map *vue_map = &c->prog_data.vue_map;
GLbitfield64 outputs_written = c->prog_data.outputs_written;
int i;
vue_map->num_slots = 0;
- for (i = 0; i < BRW_VERT_RESULT_MAX; ++i) {
- vue_map->vert_result_to_slot[i] = -1;
- vue_map->slot_to_vert_result[i] = BRW_VERT_RESULT_MAX;
+ for (i = 0; i < BRW_VARYING_SLOT_MAX; ++i) {
+ vue_map->varying_to_slot[i] = -1;
+ vue_map->slot_to_varying[i] = BRW_VARYING_SLOT_MAX;
}
/* VUE header: format depends on chip generation and whether clipping is
* dword 4-7 is ndc position
* dword 8-11 is the first vertex data.
*/
- assign_vue_slot(vue_map, VERT_RESULT_PSIZ);
- assign_vue_slot(vue_map, BRW_VERT_RESULT_NDC);
- assign_vue_slot(vue_map, VERT_RESULT_HPOS);
+ assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
+ assign_vue_slot(vue_map, BRW_VARYING_SLOT_NDC);
+ assign_vue_slot(vue_map, VARYING_SLOT_POS);
break;
case 5:
/* There are 20 DWs (D0-D19) in VUE header on Ironlake:
* dword 24-27 is the first vertex data we fill.
*
* Note: future pipeline stages expect 4D space position to be
- * contiguous with the other vert_results, so we make dword 24-27 a
+ * contiguous with the other varyings, so we make dword 24-27 a
* duplicate copy of the 4D space position.
*/
- assign_vue_slot(vue_map, VERT_RESULT_PSIZ);
- assign_vue_slot(vue_map, BRW_VERT_RESULT_NDC);
- assign_vue_slot(vue_map, BRW_VERT_RESULT_HPOS_DUPLICATE);
- assign_vue_slot(vue_map, VERT_RESULT_CLIP_DIST0);
- assign_vue_slot(vue_map, VERT_RESULT_CLIP_DIST1);
- assign_vue_slot(vue_map, BRW_VERT_RESULT_PAD);
- assign_vue_slot(vue_map, VERT_RESULT_HPOS);
+ assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
+ assign_vue_slot(vue_map, BRW_VARYING_SLOT_NDC);
+ assign_vue_slot(vue_map, BRW_VARYING_SLOT_POS_DUPLICATE);
+ assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST0);
+ assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST1);
+ assign_vue_slot(vue_map, BRW_VARYING_SLOT_PAD);
+ assign_vue_slot(vue_map, VARYING_SLOT_POS);
break;
case 6:
case 7:
* enabled.
* dword 8-11 or 16-19 is the first vertex element data we fill.
*/
- assign_vue_slot(vue_map, VERT_RESULT_PSIZ);
- assign_vue_slot(vue_map, VERT_RESULT_HPOS);
+ assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
+ assign_vue_slot(vue_map, VARYING_SLOT_POS);
if (c->key.userclip_active) {
- assign_vue_slot(vue_map, VERT_RESULT_CLIP_DIST0);
- assign_vue_slot(vue_map, VERT_RESULT_CLIP_DIST1);
+ assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST0);
+ assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST1);
}
/* front and back colors need to be consecutive so that we can use
* ATTRIBUTE_SWIZZLE_INPUTATTR_FACING to swizzle them when doing
* two-sided color.
*/
- if (outputs_written & BITFIELD64_BIT(VERT_RESULT_COL0))
- assign_vue_slot(vue_map, VERT_RESULT_COL0);
- if (outputs_written & BITFIELD64_BIT(VERT_RESULT_BFC0))
- assign_vue_slot(vue_map, VERT_RESULT_BFC0);
- if (outputs_written & BITFIELD64_BIT(VERT_RESULT_COL1))
- assign_vue_slot(vue_map, VERT_RESULT_COL1);
- if (outputs_written & BITFIELD64_BIT(VERT_RESULT_BFC1))
- assign_vue_slot(vue_map, VERT_RESULT_BFC1);
+ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_COL0))
+ assign_vue_slot(vue_map, VARYING_SLOT_COL0);
+ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
+ assign_vue_slot(vue_map, VARYING_SLOT_BFC0);
+ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_COL1))
+ assign_vue_slot(vue_map, VARYING_SLOT_COL1);
+ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
+ assign_vue_slot(vue_map, VARYING_SLOT_BFC1);
break;
default:
assert (!"VUE map not known for this chip generation");
* assign them contiguously. Don't reassign outputs that already have a
* slot.
*
- * Also, prior to Gen6, don't assign a slot for VERT_RESULT_CLIP_VERTEX,
- * since it is unsupported. In Gen6 and above, VERT_RESULT_CLIP_VERTEX may
+ * Also, prior to Gen6, don't assign a slot for VARYING_SLOT_CLIP_VERTEX,
+ * since it is unsupported. In Gen6 and above, VARYING_SLOT_CLIP_VERTEX may
* be needed for transform feedback; since we don't want to have to
* recompute the VUE map (and everything that depends on it) when transform
* feedback is enabled or disabled, just go ahead and assign a slot for it.
*/
- for (int i = 0; i < VERT_RESULT_MAX; ++i) {
- if (intel->gen < 6 && i == VERT_RESULT_CLIP_VERTEX)
+ for (int i = 0; i < VARYING_SLOT_MAX; ++i) {
+ if (intel->gen < 6 && i == VARYING_SLOT_CLIP_VERTEX)
continue;
if ((outputs_written & BITFIELD64_BIT(i)) &&
- vue_map->vert_result_to_slot[i] == -1) {
+ vue_map->varying_to_slot[i] == -1) {
assign_vue_slot(vue_map, i);
}
}
struct brw_vertex_program *vp,
struct brw_vs_prog_key *key)
{
- struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
GLuint program_size;
const GLuint *program;
struct brw_vs_compile c;
void *mem_ctx;
- int aux_size;
int i;
struct gl_shader *vs = NULL;
mem_ctx = ralloc_context(NULL);
- brw_init_compile(brw, &c.func, mem_ctx);
c.vp = vp;
/* Allocate the references to the uniforms that will end up in the
c.prog_data.inputs_read = vp->program.Base.InputsRead;
if (c.key.copy_edgeflag) {
- c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_EDGE);
+ c.prog_data.outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
c.prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
}
- /* Put dummy slots into the VUE for the SF to put the replaced
- * point sprite coords in. We shouldn't need these dummy slots,
- * which take up precious URB space, but it would mean that the SF
- * doesn't get nice aligned pairs of input coords into output
- * coords, which would be a pain to handle.
- */
- for (i = 0; i < 8; i++) {
- if (c.key.point_coord_replace & (1 << i))
- c.prog_data.outputs_written |= BITFIELD64_BIT(VERT_RESULT_TEX0 + i);
+ if (intel->gen < 6) {
+ /* Put dummy slots into the VUE for the SF to put the replaced
+ * point sprite coords in. We shouldn't need these dummy slots,
+ * which take up precious URB space, but it would mean that the SF
+ * doesn't get nice aligned pairs of input coords into output
+ * coords, which would be a pain to handle.
+ */
+ for (i = 0; i < 8; i++) {
+ if (c.key.point_coord_replace & (1 << i))
+ c.prog_data.outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
+ }
}
- brw_compute_vue_map(&c);
+ brw_compute_vue_map(brw, &c);
if (0) {
_mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG,
/* Emit GEN4 code.
*/
- if (!brw_vs_emit(prog, &c)) {
+ program = brw_vs_emit(brw, prog, &c, mem_ctx, &program_size);
+ if (program == NULL) {
ralloc_free(mem_ctx);
return false;
}
c.prog_data.total_scratch * brw->max_vs_threads);
}
- /* get the program
- */
- program = brw_get_program(&c.func, &program_size);
-
brw_upload_cache(&brw->cache, BRW_VS_PROG,
&c.key, sizeof(c.key),
program, program_size,
}
static bool
-key_debug(const char *name, int a, int b)
+key_debug(struct intel_context *intel, const char *name, int a, int b)
{
if (a != b) {
perf_debug(" %s %d->%d\n", name, a, b);
struct gl_shader_program *prog,
const struct brw_vs_prog_key *key)
{
+ struct intel_context *intel = &brw->intel;
struct brw_cache_item *c = NULL;
const struct brw_vs_prog_key *old_key = NULL;
bool found = false;
}
for (unsigned int i = 0; i < VERT_ATTRIB_MAX; i++) {
- found |= key_debug("GL_FIXED rescaling",
- old_key->gl_fixed_input_size[i],
- key->gl_fixed_input_size[i]);
+ found |= key_debug(intel, "Vertex attrib w/a flags",
+ old_key->gl_attrib_wa_flags[i],
+ key->gl_attrib_wa_flags[i]);
}
- found |= key_debug("user clip flags",
+ found |= key_debug(intel, "user clip flags",
old_key->userclip_active, key->userclip_active);
- found |= key_debug("user clipping planes as push constants",
+ found |= key_debug(intel, "user clipping planes as push constants",
old_key->nr_userclip_plane_consts,
key->nr_userclip_plane_consts);
- found |= key_debug("clip distance enable",
+ found |= key_debug(intel, "clip distance enable",
old_key->uses_clip_distance, key->uses_clip_distance);
- found |= key_debug("clip plane enable bitfield",
+ found |= key_debug(intel, "clip plane enable bitfield",
old_key->userclip_planes_enabled_gen_4_5,
key->userclip_planes_enabled_gen_4_5);
- found |= key_debug("copy edgeflag",
+ found |= key_debug(intel, "copy edgeflag",
old_key->copy_edgeflag, key->copy_edgeflag);
- found |= key_debug("PointCoord replace",
+ found |= key_debug(intel, "PointCoord replace",
old_key->point_coord_replace, key->point_coord_replace);
- found |= key_debug("vertex color clamping",
+ found |= key_debug(intel, "vertex color clamping",
old_key->clamp_vertex_color, key->clamp_vertex_color);
- found |= brw_debug_recompile_sampler_key(&old_key->tex, &key->tex);
+ found |= brw_debug_recompile_sampler_key(intel, &old_key->tex, &key->tex);
if (!found) {
perf_debug(" Something else\n");
key.clamp_vertex_color = ctx->Light._ClampVertexColor;
/* _NEW_POINT */
- if (ctx->Point.PointSprite) {
+ if (intel->gen < 6 && ctx->Point.PointSprite) {
for (i = 0; i < 8; i++) {
if (ctx->Point.CoordReplace[i])
key.point_coord_replace |= (1 << i);
brw_populate_sampler_prog_key_data(ctx, prog, &key.tex);
/* BRW_NEW_VERTICES */
- for (i = 0; i < VERT_ATTRIB_MAX; i++) {
- if (vp->program.Base.InputsRead & BITFIELD64_BIT(i) &&
- brw->vb.inputs[i].glarray->Type == GL_FIXED) {
- key.gl_fixed_input_size[i] = brw->vb.inputs[i].glarray->Size;
+ if (intel->gen < 8 && !intel->is_haswell) {
+ /* Prior to Haswell, the hardware can't natively support GL_FIXED or
+ * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
+ */
+ for (i = 0; i < VERT_ATTRIB_MAX; i++) {
+ if (!(vp->program.Base.InputsRead & BITFIELD64_BIT(i)))
+ continue;
+
+ uint8_t wa_flags = 0;
+
+ switch (brw->vb.inputs[i].glarray->Type) {
+
+ case GL_FIXED:
+ wa_flags = brw->vb.inputs[i].glarray->Size;
+ break;
+
+ case GL_INT_2_10_10_10_REV:
+ wa_flags |= BRW_ATTRIB_WA_SIGN;
+ /* fallthough */
+
+ case GL_UNSIGNED_INT_2_10_10_10_REV:
+ if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
+ wa_flags |= BRW_ATTRIB_WA_BGRA;
+
+ if (brw->vb.inputs[i].glarray->Normalized)
+ wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
+ else if (!brw->vb.inputs[i].glarray->Integer)
+ wa_flags |= BRW_ATTRIB_WA_SCALE;
+
+ break;
+ }
+
+ key.gl_attrib_wa_flags[i] = wa_flags;
}
}