#include "glsl/ralloc.h"
-static void compile_gs_prog( struct brw_context *brw,
- struct brw_gs_prog_key *key )
+static void compile_ff_gs_prog(struct brw_context *brw,
+ struct brw_ff_gs_prog_key *key)
{
- struct brw_gs_compile c;
+ struct brw_ff_gs_compile c;
const GLuint *program;
void *mem_ctx;
GLuint program_size;
*/
switch (key->primitive) {
case _3DPRIM_QUADLIST:
- brw_gs_quads( &c, key );
+ brw_ff_gs_quads( &c, key );
break;
case _3DPRIM_QUADSTRIP:
- brw_gs_quad_strip( &c, key );
+ brw_ff_gs_quad_strip( &c, key );
break;
case _3DPRIM_LINELOOP:
- brw_gs_lines( &c );
+ brw_ff_gs_lines( &c );
break;
default:
ralloc_free(mem_ctx);
printf("\n");
}
- brw_upload_cache(&brw->cache, BRW_GS_PROG,
+ brw_upload_cache(&brw->cache, BRW_FF_GS_PROG,
&c.key, sizeof(c.key),
program, program_size,
&c.prog_data, sizeof(c.prog_data),
- &brw->gs.prog_offset, &brw->gs.prog_data);
+ &brw->ff_gs.prog_offset, &brw->ff_gs.prog_data);
ralloc_free(mem_ctx);
}
-static void populate_key( struct brw_context *brw,
- struct brw_gs_prog_key *key )
+static void populate_key(struct brw_context *brw,
+ struct brw_ff_gs_prog_key *key)
{
static const unsigned swizzle_for_offset[4] = {
BRW_SWIZZLE4(0, 1, 2, 3),
/* Calculate interpolants for triangle and line rasterization.
*/
static void
-brw_upload_gs_prog(struct brw_context *brw)
+brw_upload_ff_gs_prog(struct brw_context *brw)
{
- struct brw_gs_prog_key key;
+ struct brw_ff_gs_prog_key key;
/* Populate the key:
*/
populate_key(brw, &key);
- if (brw->gs.prog_active != key.need_gs_prog) {
- brw->state.dirty.cache |= CACHE_NEW_GS_PROG;
- brw->gs.prog_active = key.need_gs_prog;
+ if (brw->ff_gs.prog_active != key.need_gs_prog) {
+ brw->state.dirty.cache |= CACHE_NEW_FF_GS_PROG;
+ brw->ff_gs.prog_active = key.need_gs_prog;
}
- if (brw->gs.prog_active) {
- if (!brw_search_cache(&brw->cache, BRW_GS_PROG,
+ if (brw->ff_gs.prog_active) {
+ if (!brw_search_cache(&brw->cache, BRW_FF_GS_PROG,
&key, sizeof(key),
- &brw->gs.prog_offset, &brw->gs.prog_data)) {
- compile_gs_prog( brw, &key );
+ &brw->ff_gs.prog_offset, &brw->ff_gs.prog_data)) {
+ compile_ff_gs_prog( brw, &key );
}
}
}
-const struct brw_tracked_state brw_gs_prog = {
+const struct brw_tracked_state brw_ff_gs_prog = {
.dirty = {
.mesa = (_NEW_LIGHT),
.brw = (BRW_NEW_PRIMITIVE |
BRW_NEW_TRANSFORM_FEEDBACK),
.cache = CACHE_NEW_VS_PROG
},
- .emit = brw_upload_gs_prog
+ .emit = brw_upload_ff_gs_prog
};
*
* - The thread will need to use the destination_indices register.
*/
-static void brw_gs_alloc_regs( struct brw_gs_compile *c,
- GLuint nr_verts,
- bool sol_program )
+static void brw_ff_gs_alloc_regs(struct brw_ff_gs_compile *c,
+ GLuint nr_verts,
+ bool sol_program)
{
GLuint i = 0,j;
* This function sets up the above data by copying by copying the contents of
* R0 to the header register.
*/
-static void brw_gs_initialize_header(struct brw_gs_compile *c)
+static void brw_ff_gs_initialize_header(struct brw_ff_gs_compile *c)
{
struct brw_compile *p = &c->func;
brw_MOV(p, c->reg.header, c->reg.R0);
* PrimEnd, Increment CL_INVOCATIONS, and SONumPrimsWritten, many of which we
* need to be able to update on a per-vertex basis.
*/
-static void brw_gs_overwrite_header_dw2(struct brw_gs_compile *c,
- unsigned dw2)
+static void brw_ff_gs_overwrite_header_dw2(struct brw_ff_gs_compile *c,
+ unsigned dw2)
{
struct brw_compile *p = &c->func;
brw_MOV(p, get_element_ud(c->reg.header, 2), brw_imm_ud(dw2));
* DWORD 2. So this function extracts the primitive type field, bitshifts it
* appropriately, and stores it in c->reg.header.
*/
-static void brw_gs_overwrite_header_dw2_from_r0(struct brw_gs_compile *c)
+static void brw_ff_gs_overwrite_header_dw2_from_r0(struct brw_ff_gs_compile *c)
{
struct brw_compile *p = &c->func;
brw_AND(p, get_element_ud(c->reg.header, 2), get_element_ud(c->reg.R0, 2),
* This is used to set/unset the "PrimStart" and "PrimEnd" flags appropriately
* for each vertex.
*/
-static void brw_gs_offset_header_dw2(struct brw_gs_compile *c, int offset)
+static void brw_ff_gs_offset_header_dw2(struct brw_ff_gs_compile *c,
+ int offset)
{
struct brw_compile *p = &c->func;
brw_ADD(p, get_element_d(c->reg.header, 2), get_element_d(c->reg.header, 2),
* will be stored in DWORD 0 of c->reg.header for use in the next URB_WRITE
* message.
*/
-static void brw_gs_emit_vue(struct brw_gs_compile *c,
- struct brw_reg vert,
- bool last)
+static void brw_ff_gs_emit_vue(struct brw_ff_gs_compile *c,
+ struct brw_reg vert,
+ bool last)
{
struct brw_compile *p = &c->func;
bool allocate = !last;
* the allocated URB entry (which will be needed by the URB_WRITE meesage that
* follows).
*/
-static void brw_gs_ff_sync(struct brw_gs_compile *c, int num_prim)
+static void brw_ff_gs_ff_sync(struct brw_ff_gs_compile *c, int num_prim)
{
struct brw_compile *p = &c->func;
}
-void brw_gs_quads( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
+void
+brw_ff_gs_quads(struct brw_ff_gs_compile *c, struct brw_ff_gs_prog_key *key)
{
struct brw_context *brw = c->func.brw;
- brw_gs_alloc_regs(c, 4, false);
- brw_gs_initialize_header(c);
+ brw_ff_gs_alloc_regs(c, 4, false);
+ brw_ff_gs_initialize_header(c);
/* Use polygons for correct edgeflag behaviour. Note that vertex 3
* is the PV for quads, but vertex 0 for polygons:
*/
if (brw->gen == 5)
- brw_gs_ff_sync(c, 1);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_ff_sync(c, 1);
+ brw_ff_gs_overwrite_header_dw2(
c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT)
| URB_WRITE_PRIM_START));
if (key->pv_first) {
- brw_gs_emit_vue(c, c->reg.vertex[0], 0);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_emit_vue(c, c->reg.vertex[0], 0);
+ brw_ff_gs_overwrite_header_dw2(
c, _3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT);
- brw_gs_emit_vue(c, c->reg.vertex[1], 0);
- brw_gs_emit_vue(c, c->reg.vertex[2], 0);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_emit_vue(c, c->reg.vertex[1], 0);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[2], 0);
+ brw_ff_gs_overwrite_header_dw2(
c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT)
| URB_WRITE_PRIM_END));
- brw_gs_emit_vue(c, c->reg.vertex[3], 1);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[3], 1);
}
else {
- brw_gs_emit_vue(c, c->reg.vertex[3], 0);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_emit_vue(c, c->reg.vertex[3], 0);
+ brw_ff_gs_overwrite_header_dw2(
c, _3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT);
- brw_gs_emit_vue(c, c->reg.vertex[0], 0);
- brw_gs_emit_vue(c, c->reg.vertex[1], 0);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_emit_vue(c, c->reg.vertex[0], 0);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[1], 0);
+ brw_ff_gs_overwrite_header_dw2(
c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT)
| URB_WRITE_PRIM_END));
- brw_gs_emit_vue(c, c->reg.vertex[2], 1);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[2], 1);
}
}
-void brw_gs_quad_strip( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
+void
+brw_ff_gs_quad_strip(struct brw_ff_gs_compile *c,
+ struct brw_ff_gs_prog_key *key)
{
struct brw_context *brw = c->func.brw;
- brw_gs_alloc_regs(c, 4, false);
- brw_gs_initialize_header(c);
+ brw_ff_gs_alloc_regs(c, 4, false);
+ brw_ff_gs_initialize_header(c);
if (brw->gen == 5)
- brw_gs_ff_sync(c, 1);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_ff_sync(c, 1);
+ brw_ff_gs_overwrite_header_dw2(
c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT)
| URB_WRITE_PRIM_START));
if (key->pv_first) {
- brw_gs_emit_vue(c, c->reg.vertex[0], 0);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_emit_vue(c, c->reg.vertex[0], 0);
+ brw_ff_gs_overwrite_header_dw2(
c, _3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT);
- brw_gs_emit_vue(c, c->reg.vertex[1], 0);
- brw_gs_emit_vue(c, c->reg.vertex[2], 0);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_emit_vue(c, c->reg.vertex[1], 0);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[2], 0);
+ brw_ff_gs_overwrite_header_dw2(
c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT)
| URB_WRITE_PRIM_END));
- brw_gs_emit_vue(c, c->reg.vertex[3], 1);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[3], 1);
}
else {
- brw_gs_emit_vue(c, c->reg.vertex[2], 0);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_emit_vue(c, c->reg.vertex[2], 0);
+ brw_ff_gs_overwrite_header_dw2(
c, _3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT);
- brw_gs_emit_vue(c, c->reg.vertex[3], 0);
- brw_gs_emit_vue(c, c->reg.vertex[0], 0);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_emit_vue(c, c->reg.vertex[3], 0);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[0], 0);
+ brw_ff_gs_overwrite_header_dw2(
c, ((_3DPRIM_POLYGON << URB_WRITE_PRIM_TYPE_SHIFT)
| URB_WRITE_PRIM_END));
- brw_gs_emit_vue(c, c->reg.vertex[1], 1);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[1], 1);
}
}
-void brw_gs_lines( struct brw_gs_compile *c )
+void brw_ff_gs_lines(struct brw_ff_gs_compile *c)
{
struct brw_context *brw = c->func.brw;
- brw_gs_alloc_regs(c, 2, false);
- brw_gs_initialize_header(c);
+ brw_ff_gs_alloc_regs(c, 2, false);
+ brw_ff_gs_initialize_header(c);
if (brw->gen == 5)
- brw_gs_ff_sync(c, 1);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_ff_sync(c, 1);
+ brw_ff_gs_overwrite_header_dw2(
c, ((_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT)
| URB_WRITE_PRIM_START));
- brw_gs_emit_vue(c, c->reg.vertex[0], 0);
- brw_gs_overwrite_header_dw2(
+ brw_ff_gs_emit_vue(c, c->reg.vertex[0], 0);
+ brw_ff_gs_overwrite_header_dw2(
c, ((_3DPRIM_LINESTRIP << URB_WRITE_PRIM_TYPE_SHIFT)
| URB_WRITE_PRIM_END));
- brw_gs_emit_vue(c, c->reg.vertex[1], 1);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[1], 1);
}
/**
* (transform feedback).
*/
void
-gen6_sol_program(struct brw_gs_compile *c, struct brw_gs_prog_key *key,
+gen6_sol_program(struct brw_ff_gs_compile *c, struct brw_ff_gs_prog_key *key,
unsigned num_verts, bool check_edge_flags)
{
struct brw_compile *p = &c->func;
c->prog_data.svbi_postincrement_value = num_verts;
- brw_gs_alloc_regs(c, num_verts, true);
- brw_gs_initialize_header(c);
+ brw_ff_gs_alloc_regs(c, num_verts, true);
+ brw_ff_gs_initialize_header(c);
if (key->num_transform_feedback_bindings > 0) {
unsigned vertex, binding;
* the register that we overwrote while streaming out transform feedback
* data.
*/
- brw_gs_initialize_header(c);
+ brw_ff_gs_initialize_header(c);
/* Finally, wait for the write commit to occur so that we can proceed to
* other things safely.
brw_MOV(p, c->reg.temp, c->reg.temp);
}
- brw_gs_ff_sync(c, 1);
+ brw_ff_gs_ff_sync(c, 1);
- brw_gs_overwrite_header_dw2_from_r0(c);
+ brw_ff_gs_overwrite_header_dw2_from_r0(c);
switch (num_verts) {
case 1:
- brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_START | URB_WRITE_PRIM_END);
- brw_gs_emit_vue(c, c->reg.vertex[0], true);
+ brw_ff_gs_offset_header_dw2(c,
+ URB_WRITE_PRIM_START | URB_WRITE_PRIM_END);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[0], true);
break;
case 2:
- brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_START);
- brw_gs_emit_vue(c, c->reg.vertex[0], false);
- brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_END - URB_WRITE_PRIM_START);
- brw_gs_emit_vue(c, c->reg.vertex[1], true);
+ brw_ff_gs_offset_header_dw2(c, URB_WRITE_PRIM_START);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[0], false);
+ brw_ff_gs_offset_header_dw2(c,
+ URB_WRITE_PRIM_END - URB_WRITE_PRIM_START);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[1], true);
break;
case 3:
if (check_edge_flags) {
brw_imm_ud(BRW_GS_EDGE_INDICATOR_0));
brw_IF(p, BRW_EXECUTE_1);
}
- brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_START);
- brw_gs_emit_vue(c, c->reg.vertex[0], false);
- brw_gs_offset_header_dw2(c, -URB_WRITE_PRIM_START);
- brw_gs_emit_vue(c, c->reg.vertex[1], false);
+ brw_ff_gs_offset_header_dw2(c, URB_WRITE_PRIM_START);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[0], false);
+ brw_ff_gs_offset_header_dw2(c, -URB_WRITE_PRIM_START);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[1], false);
if (check_edge_flags) {
brw_ENDIF(p);
/* Only emit vertex 2 in PRIM_END mode if this is the last triangle
brw_imm_ud(BRW_GS_EDGE_INDICATOR_1));
brw_set_predicate_control(p, BRW_PREDICATE_NORMAL);
}
- brw_gs_offset_header_dw2(c, URB_WRITE_PRIM_END);
+ brw_ff_gs_offset_header_dw2(c, URB_WRITE_PRIM_END);
brw_set_predicate_control(p, BRW_PREDICATE_NONE);
- brw_gs_emit_vue(c, c->reg.vertex[2], true);
+ brw_ff_gs_emit_vue(c, c->reg.vertex[2], true);
break;
}
}