struct {
struct brw_stage_state base;
- struct brw_vs_prog_data *prog_data;
} vs;
struct {
const GLuint nr_fp_regs = (brw->wm.prog_data->base.nr_params + 15) / 16;
/* BRW_NEW_VS_PROG_DATA */
- const GLuint nr_vp_regs = (brw->vs.prog_data->base.base.nr_params + 15) / 16;
+ const GLuint nr_vp_regs = (brw->vs.base.prog_data->nr_params + 15) / 16;
GLuint nr_clip_regs = 0;
GLuint total_regs;
GLuint offset = brw->curbe.vs_start * 16;
/* BRW_NEW_VS_PROG_DATA | _NEW_PROGRAM_CONSTANTS: copy uniform values */
- for (i = 0; i < brw->vs.prog_data->base.base.nr_params; i++) {
- buf[offset + i] = *brw->vs.prog_data->base.base.param[i];
+ for (i = 0; i < brw->vs.base.prog_data->nr_params; i++) {
+ buf[offset + i] = *brw->vs.base.prog_data->param[i];
}
}
const int new_basevertex =
prims[i].indexed ? prims[i].basevertex : prims[i].start;
const int new_baseinstance = prims[i].base_instance;
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
if (i > 0) {
const bool uses_draw_parameters =
- brw->vs.prog_data->uses_basevertex ||
- brw->vs.prog_data->uses_baseinstance;
+ vs_prog_data->uses_basevertex ||
+ vs_prog_data->uses_baseinstance;
if ((uses_draw_parameters && prims[i].is_indirect) ||
- (brw->vs.prog_data->uses_basevertex &&
+ (vs_prog_data->uses_basevertex &&
brw->draw.params.gl_basevertex != new_basevertex) ||
- (brw->vs.prog_data->uses_baseinstance &&
+ (vs_prog_data->uses_baseinstance &&
brw->draw.params.gl_baseinstance != new_baseinstance))
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
}
/* gl_DrawID always needs its own vertex buffer since it's not part of
* the indirect parameter buffer. If the program uses gl_DrawID we need
* to flag BRW_NEW_VERTICES. For the first iteration, we don't have
- * valid brw->vs.prog_data, but we always flag BRW_NEW_VERTICES before
+ * valid vs_prog_data, but we always flag BRW_NEW_VERTICES before
* the loop.
*/
brw->draw.gl_drawid = prims[i].draw_id;
drm_intel_bo_unreference(brw->draw.draw_id_bo);
brw->draw.draw_id_bo = NULL;
- if (i > 0 && brw->vs.prog_data->uses_drawid)
+ if (i > 0 && vs_prog_data->uses_drawid)
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
if (brw->gen < 6)
{
struct gl_context *ctx = &brw->ctx;
/* BRW_NEW_VS_PROG_DATA */
- GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read;
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+ GLbitfield64 vs_inputs = vs_prog_data->inputs_read;
const unsigned char *ptr = NULL;
GLuint interleaved = 0;
unsigned int min_index = brw->vb.min_index + brw->basevertex;
void
brw_prepare_shader_draw_parameters(struct brw_context *brw)
{
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+
/* For non-indirect draws, upload gl_BaseVertex. */
- if ((brw->vs.prog_data->uses_basevertex ||
- brw->vs.prog_data->uses_baseinstance) &&
+ if ((vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) &&
brw->draw.draw_params_bo == NULL) {
intel_upload_data(brw, &brw->draw.params, sizeof(brw->draw.params), 4,
&brw->draw.draw_params_bo,
&brw->draw.draw_params_offset);
}
- if (brw->vs.prog_data->uses_drawid) {
+ if (vs_prog_data->uses_drawid) {
intel_upload_data(brw, &brw->draw.gl_drawid, sizeof(brw->draw.gl_drawid), 4,
&brw->draw.draw_id_bo,
&brw->draw.draw_id_offset);
brw_emit_query_begin(brw);
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+
unsigned nr_elements = brw->vb.nr_enabled;
- if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid ||
- brw->vs.prog_data->uses_basevertex || brw->vs.prog_data->uses_baseinstance)
+ if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid ||
+ vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
++nr_elements;
- if (brw->vs.prog_data->uses_drawid)
+ if (vs_prog_data->uses_drawid)
nr_elements++;
/* If the VS doesn't read any inputs (calculating vertex position from
*/
const bool uses_draw_params =
- brw->vs.prog_data->uses_basevertex ||
- brw->vs.prog_data->uses_baseinstance;
+ vs_prog_data->uses_basevertex ||
+ vs_prog_data->uses_baseinstance;
const unsigned nr_buffers = brw->vb.nr_buffers +
- uses_draw_params + brw->vs.prog_data->uses_drawid;
+ uses_draw_params + vs_prog_data->uses_drawid;
if (nr_buffers) {
if (brw->gen >= 6) {
0); /* step rate */
}
- if (brw->vs.prog_data->uses_drawid) {
+ if (vs_prog_data->uses_drawid) {
EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers + 1,
brw->draw.draw_id_bo,
brw->draw.draw_id_offset,
((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
}
- if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid ||
- brw->vs.prog_data->uses_basevertex || brw->vs.prog_data->uses_baseinstance) {
+ if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid ||
+ vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) {
uint32_t dw0 = 0, dw1 = 0;
uint32_t comp0 = BRW_VE1_COMPONENT_STORE_0;
uint32_t comp1 = BRW_VE1_COMPONENT_STORE_0;
uint32_t comp2 = BRW_VE1_COMPONENT_STORE_0;
uint32_t comp3 = BRW_VE1_COMPONENT_STORE_0;
- if (brw->vs.prog_data->uses_basevertex)
+ if (vs_prog_data->uses_basevertex)
comp0 = BRW_VE1_COMPONENT_STORE_SRC;
- if (brw->vs.prog_data->uses_baseinstance)
+ if (vs_prog_data->uses_baseinstance)
comp1 = BRW_VE1_COMPONENT_STORE_SRC;
- if (brw->vs.prog_data->uses_vertexid)
+ if (vs_prog_data->uses_vertexid)
comp2 = BRW_VE1_COMPONENT_STORE_VID;
- if (brw->vs.prog_data->uses_instanceid)
+ if (vs_prog_data->uses_instanceid)
comp3 = BRW_VE1_COMPONENT_STORE_IID;
dw1 = (comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
OUT_BATCH(dw1);
}
- if (brw->vs.prog_data->uses_drawid) {
+ if (vs_prog_data->uses_drawid) {
uint32_t dw0 = 0, dw1 = 0;
dw1 = (BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
memset(&c, 0, sizeof(c));
c.key = *key;
- c.vue_map = brw->vs.prog_data->base.vue_map;
+ c.vue_map = brw_vue_prog_data(brw->vs.base.prog_data)->vue_map;
c.nr_regs = (c.vue_map.num_slots + 1)/2;
mem_ctx = ralloc_context(NULL);
memset(key, 0, sizeof(*key));
/* BRW_NEW_VS_PROG_DATA (part of VUE map) */
- key->attrs = brw->vs.prog_data->base.vue_map.slots_valid;
+ key->attrs = brw_vue_prog_data(brw->vs.base.prog_data)->vue_map.slots_valid;
/* BRW_NEW_PRIMITIVE */
key->primitive = brw->primitive;
brw->state.pipelines[BRW_COMPUTE_PIPELINE].brw = ~0ull;
/* Also, NULL out any stale program pointers. */
- brw->vs.prog_data = NULL;
brw->vs.base.prog_data = NULL;
brw->tcs.prog_data = NULL;
brw->tcs.base.prog_data = NULL;
*/
GLbitfield64 old_slots = brw->vue_map_geom_out.slots_valid;
bool old_separate = brw->vue_map_geom_out.separate;
+ struct brw_vue_prog_data *vue_prog_data;
if (brw->geometry_program)
- brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map;
+ vue_prog_data = brw_vue_prog_data(brw->gs.base.prog_data);
else if (brw->tess_eval_program)
- brw->vue_map_geom_out = brw->tes.prog_data->base.vue_map;
+ vue_prog_data = brw_vue_prog_data(brw->tes.base.prog_data);
else
- brw->vue_map_geom_out = brw->vs.prog_data->base.vue_map;
+ vue_prog_data = brw_vue_prog_data(brw->vs.base.prog_data);
+
+ brw->vue_map_geom_out = vue_prog_data->vue_map;
/* If the layout has changed, signal BRW_NEW_VUE_MAP_GEOM_OUT. */
if (old_slots != brw->vue_map_geom_out.slots_valid ||
static void recalculate_urb_fence( struct brw_context *brw )
{
GLuint csize = brw->curbe.total_size;
- GLuint vsize = brw->vs.prog_data->base.urb_entry_size;
+ GLuint vsize = brw_vue_prog_data(brw->vs.base.prog_data)->urb_entry_size;
GLuint sfsize = brw->sf.prog_data->urb_entry_size;
if (csize < limits[CS].min_entry_size)
key, sizeof(struct brw_vs_prog_key),
program, program_size,
&prog_data, sizeof(prog_data),
- &brw->vs.base.prog_offset, &brw->vs.prog_data);
+ &brw->vs.base.prog_offset, &brw->vs.base.prog_data);
ralloc_free(mem_ctx);
return true;
if (!brw_search_cache(&brw->cache, BRW_CACHE_VS_PROG,
&key, sizeof(key),
- &brw->vs.base.prog_offset, &brw->vs.prog_data)) {
+ &brw->vs.base.prog_offset, &brw->vs.base.prog_data)) {
bool success = brw_codegen_vs_prog(brw, current[MESA_SHADER_VERTEX],
vp, &key);
(void) success;
assert(success);
}
- brw->vs.base.prog_data = &brw->vs.prog_data->base.base;
}
bool
struct brw_context *brw = brw_context(ctx);
struct brw_vs_prog_key key;
uint32_t old_prog_offset = brw->vs.base.prog_offset;
- struct brw_vs_prog_data *old_prog_data = brw->vs.prog_data;
+ struct brw_stage_prog_data *old_prog_data = brw->vs.base.prog_data;
bool success;
struct gl_vertex_program *vp = (struct gl_vertex_program *) prog;
success = brw_codegen_vs_prog(brw, shader_prog, bvp, &key);
brw->vs.base.prog_offset = old_prog_offset;
- brw->vs.prog_data = old_prog_data;
+ brw->vs.base.prog_data = old_prog_data;
return success;
}
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
struct brw_stage_state *stage_state = &brw->vs.base;
+ const struct brw_stage_prog_data *prog_data = stage_state->prog_data;
+ const struct brw_vue_prog_data *vue_prog_data =
+ brw_vue_prog_data(stage_state->prog_data);
struct brw_vs_unit_state *vs;
memset(vs, 0, sizeof(*vs));
/* BRW_NEW_PROGRAM_CACHE | BRW_NEW_VS_PROG_DATA */
- vs->thread0.grf_reg_count =
- ALIGN(brw->vs.prog_data->base.total_grf, 16) / 16 - 1;
+ vs->thread0.grf_reg_count = ALIGN(vue_prog_data->total_grf, 16) / 16 - 1;
vs->thread0.kernel_start_pointer =
brw_program_reloc(brw,
stage_state->state_offset +
stage_state->prog_offset +
(vs->thread0.grf_reg_count << 1)) >> 6;
- if (brw->vs.prog_data->base.base.use_alt_mode)
+ if (prog_data->use_alt_mode)
vs->thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
else
vs->thread1.floating_point_mode = BRW_FLOATING_POINT_IEEE_754;
vs->thread1.single_program_flow = (brw->gen == 5);
vs->thread1.binding_table_entry_count =
- brw->vs.prog_data->base.base.binding_table.size_bytes / 4;
+ prog_data->binding_table.size_bytes / 4;
- if (brw->vs.prog_data->base.base.total_scratch != 0) {
+ if (prog_data->total_scratch != 0) {
vs->thread2.scratch_space_base_pointer =
stage_state->scratch_bo->offset64 >> 10; /* reloc */
vs->thread2.per_thread_scratch_space =
vs->thread2.per_thread_scratch_space = 0;
}
- vs->thread3.urb_entry_read_length = brw->vs.prog_data->base.urb_read_length;
- vs->thread3.const_urb_entry_read_length
- = brw->vs.prog_data->base.base.curb_read_length;
- vs->thread3.dispatch_grf_start_reg =
- brw->vs.prog_data->base.base.dispatch_grf_start_reg;
+ vs->thread3.urb_entry_read_length = vue_prog_data->urb_read_length;
+ vs->thread3.const_urb_entry_read_length = prog_data->curb_read_length;
+ vs->thread3.dispatch_grf_start_reg = prog_data->dispatch_grf_start_reg;
vs->thread3.urb_entry_read_offset = 0;
/* BRW_NEW_CURBE_OFFSETS */
}
/* Emit scratch space relocation */
- if (brw->vs.prog_data->base.base.total_scratch != 0) {
+ if (prog_data->total_scratch != 0) {
drm_intel_bo_emit_reloc(brw->batch.bo,
stage_state->state_offset +
offsetof(struct brw_vs_unit_state, thread2),
(struct brw_vertex_program *) brw->vertex_program;
/* BRW_NEW_VS_PROG_DATA */
- const struct brw_stage_prog_data *prog_data = &brw->vs.prog_data->base.base;
+ const struct brw_stage_prog_data *prog_data = brw->vs.base.prog_data;
_mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_VERTEX);
/* _NEW_PROGRAM_CONSTANTS */
/* BRW_NEW_VS_PROG_DATA */
brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_VERTEX],
- &brw->vs.base, &brw->vs.prog_data->base.base);
+ &brw->vs.base, brw->vs.base.prog_data);
}
const struct brw_tracked_state brw_vs_ubo_surfaces = {
if (prog) {
/* BRW_NEW_VS_PROG_DATA */
brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_VERTEX],
- &brw->vs.base, &brw->vs.prog_data->base.base);
+ &brw->vs.base, brw->vs.base.prog_data);
}
}
if (prog) {
/* BRW_NEW_VS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_VERTEX],
- &brw->vs.base, &brw->vs.prog_data->base.base);
+ &brw->vs.base, brw->vs.base.prog_data);
}
}
}
/* BRW_NEW_VS_PROG_DATA */
- dw1 |= brw->vs.prog_data->base.cull_distance_mask;
+ dw1 |= brw_vue_prog_data(brw->vs.base.prog_data)->cull_distance_mask;
if (brw->gen >= 7)
dw1 |= GEN7_CLIP_EARLY_CULL;
upload_urb(struct brw_context *brw)
{
/* BRW_NEW_VS_PROG_DATA */
- const unsigned vs_size = MAX2(brw->vs.prog_data->base.urb_entry_size, 1);
+ const struct brw_vue_prog_data *vs_vue_prog_data =
+ brw_vue_prog_data(brw->vs.base.prog_data);
+ const unsigned vs_size = MAX2(vs_vue_prog_data->urb_entry_size, 1);
/* BRW_NEW_GEOMETRY_PROGRAM, BRW_NEW_GS_PROG_DATA */
const bool gs_present = brw->ff_gs.prog_active || brw->geometry_program;
const struct brw_vertex_program *vp =
brw_vertex_program_const(brw->vertex_program);
/* BRW_NEW_VS_PROG_DATA */
- const struct brw_stage_prog_data *prog_data = &brw->vs.prog_data->base.base;
+ const struct brw_stage_prog_data *prog_data = brw->vs.base.prog_data;
_mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_VERTEX);
gen6_upload_push_constants(brw, &vp->program.Base, prog_data,
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
const struct brw_stage_state *stage_state = &brw->vs.base;
+ const struct brw_stage_prog_data *prog_data = stage_state->prog_data;
+ const struct brw_vue_prog_data *vue_prog_data =
+ brw_vue_prog_data(stage_state->prog_data);
uint32_t floating_point_mode = 0;
/* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
ADVANCE_BATCH();
}
- if (brw->vs.prog_data->base.base.use_alt_mode)
+ if (prog_data->use_alt_mode)
floating_point_mode = GEN6_VS_FLOATING_POINT_MODE_ALT;
BEGIN_BATCH(6);
OUT_BATCH(stage_state->prog_offset);
OUT_BATCH(floating_point_mode |
((ALIGN(stage_state->sampler_count, 4)/4) << GEN6_VS_SAMPLER_COUNT_SHIFT) |
- ((brw->vs.prog_data->base.base.binding_table.size_bytes / 4) <<
+ ((prog_data->binding_table.size_bytes / 4) <<
GEN6_VS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
- if (brw->vs.prog_data->base.base.total_scratch) {
+ if (prog_data->total_scratch) {
OUT_RELOC(stage_state->scratch_bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
ffs(stage_state->per_thread_scratch) - 11);
OUT_BATCH(0);
}
- OUT_BATCH((brw->vs.prog_data->base.base.dispatch_grf_start_reg <<
+ OUT_BATCH((prog_data->dispatch_grf_start_reg <<
GEN6_VS_DISPATCH_START_GRF_SHIFT) |
- (brw->vs.prog_data->base.urb_read_length << GEN6_VS_URB_READ_LENGTH_SHIFT) |
+ (vue_prog_data->urb_read_length << GEN6_VS_URB_READ_LENGTH_SHIFT) |
(0 << GEN6_VS_URB_ENTRY_READ_OFFSET_SHIFT));
OUT_BATCH(((devinfo->max_vs_threads - 1) << GEN6_VS_MAX_THREADS_SHIFT) |
upload_urb(struct brw_context *brw)
{
/* BRW_NEW_VS_PROG_DATA */
- const unsigned vs_size = MAX2(brw->vs.prog_data->base.urb_entry_size, 1);
+ const struct brw_vue_prog_data *vs_vue_prog_data =
+ brw_vue_prog_data(brw->vs.base.prog_data);
+ const unsigned vs_size = MAX2(vs_vue_prog_data->urb_entry_size, 1);
/* BRW_NEW_GEOMETRY_PROGRAM, BRW_NEW_GS_PROG_DATA */
const bool gs_present = brw->geometry_program;
/* BRW_NEW_TESS_PROGRAMS */
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
const struct brw_stage_state *stage_state = &brw->vs.base;
+ const struct brw_stage_prog_data *prog_data = stage_state->prog_data;
+ const struct brw_vue_prog_data *vue_prog_data =
+ brw_vue_prog_data(stage_state->prog_data);
uint32_t floating_point_mode = 0;
const int max_threads_shift = brw->is_haswell ?
HSW_VS_MAX_THREADS_SHIFT : GEN6_VS_MAX_THREADS_SHIFT;
- const struct brw_vue_prog_data *prog_data = &brw->vs.prog_data->base;
if (!brw->is_haswell && !brw->is_baytrail)
gen7_emit_vs_workaround_flush(brw);
- if (prog_data->base.use_alt_mode)
+ if (prog_data->use_alt_mode)
floating_point_mode = GEN6_VS_FLOATING_POINT_MODE_ALT;
BEGIN_BATCH(6);
OUT_BATCH(floating_point_mode |
((ALIGN(stage_state->sampler_count, 4)/4) <<
GEN6_VS_SAMPLER_COUNT_SHIFT) |
- ((prog_data->base.binding_table.size_bytes / 4) <<
+ ((prog_data->binding_table.size_bytes / 4) <<
GEN6_VS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
- if (prog_data->base.total_scratch) {
+ if (prog_data->total_scratch) {
OUT_RELOC(stage_state->scratch_bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
ffs(stage_state->per_thread_scratch) - 11);
OUT_BATCH(0);
}
- OUT_BATCH((prog_data->base.dispatch_grf_start_reg <<
+ OUT_BATCH((prog_data->dispatch_grf_start_reg <<
GEN6_VS_DISPATCH_START_GRF_SHIFT) |
- (prog_data->urb_read_length << GEN6_VS_URB_READ_LENGTH_SHIFT) |
+ (vue_prog_data->urb_read_length << GEN6_VS_URB_READ_LENGTH_SHIFT) |
(0 << GEN6_VS_URB_ENTRY_READ_OFFSET_SHIFT));
OUT_BATCH(((devinfo->max_vs_threads - 1) << max_threads_shift) |
uses_edge_flag = (ctx->Polygon.FrontMode != GL_FILL ||
ctx->Polygon.BackMode != GL_FILL);
- if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid) {
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+
+ if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid) {
unsigned vue = brw->vb.nr_enabled;
/* The element for the edge flags must always be last, so we have to
"need to reorder the vertex attrbutes.");
unsigned dw1 = 0;
- if (brw->vs.prog_data->uses_vertexid) {
+ if (vs_prog_data->uses_vertexid) {
dw1 |= GEN8_SGVS_ENABLE_VERTEX_ID |
(2 << GEN8_SGVS_VERTEX_ID_COMPONENT_SHIFT) | /* .z channel */
(vue << GEN8_SGVS_VERTEX_ID_ELEMENT_OFFSET_SHIFT);
}
- if (brw->vs.prog_data->uses_instanceid) {
+ if (vs_prog_data->uses_instanceid) {
dw1 |= GEN8_SGVS_ENABLE_INSTANCE_ID |
(3 << GEN8_SGVS_INSTANCE_ID_COMPONENT_SHIFT) | /* .w channel */
(vue << GEN8_SGVS_INSTANCE_ID_ELEMENT_OFFSET_SHIFT);
/* Now emit 3DSTATE_VERTEX_BUFFERS and 3DSTATE_VERTEX_ELEMENTS packets. */
const bool uses_draw_params =
- brw->vs.prog_data->uses_basevertex ||
- brw->vs.prog_data->uses_baseinstance;
+ vs_prog_data->uses_basevertex ||
+ vs_prog_data->uses_baseinstance;
const unsigned nr_buffers = brw->vb.nr_buffers +
- uses_draw_params + brw->vs.prog_data->uses_drawid;
+ uses_draw_params + vs_prog_data->uses_drawid;
if (nr_buffers) {
assert(nr_buffers <= 33);
0 /* unused */);
}
- if (brw->vs.prog_data->uses_drawid) {
+ if (vs_prog_data->uses_drawid) {
EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers + 1,
brw->draw.draw_id_bo,
brw->draw.draw_id_offset,
* can't be inserted past that so we need a dummy element to ensure that
* the edge flag is the last one.
*/
- const bool needs_sgvs_element = (brw->vs.prog_data->uses_basevertex ||
- brw->vs.prog_data->uses_baseinstance ||
- ((brw->vs.prog_data->uses_instanceid ||
- brw->vs.prog_data->uses_vertexid) &&
+ const bool needs_sgvs_element = (vs_prog_data->uses_basevertex ||
+ vs_prog_data->uses_baseinstance ||
+ ((vs_prog_data->uses_instanceid ||
+ vs_prog_data->uses_vertexid) &&
uses_edge_flag));
const unsigned nr_elements =
- brw->vb.nr_enabled + needs_sgvs_element + brw->vs.prog_data->uses_drawid;
+ brw->vb.nr_enabled + needs_sgvs_element + vs_prog_data->uses_drawid;
/* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS,
* presumably for VertexID/InstanceID.
}
if (needs_sgvs_element) {
- if (brw->vs.prog_data->uses_basevertex ||
- brw->vs.prog_data->uses_baseinstance) {
+ if (vs_prog_data->uses_basevertex ||
+ vs_prog_data->uses_baseinstance) {
OUT_BATCH(GEN6_VE0_VALID |
brw->vb.nr_buffers << GEN6_VE0_INDEX_SHIFT |
BRW_SURFACEFORMAT_R32G32_UINT << BRW_VE0_FORMAT_SHIFT);
}
}
- if (brw->vs.prog_data->uses_drawid) {
+ if (vs_prog_data->uses_drawid) {
OUT_BATCH(GEN6_VE0_VALID |
((brw->vb.nr_buffers + 1) << GEN6_VE0_INDEX_SHIFT) |
(BRW_SURFACEFORMAT_R32_UINT << BRW_VE0_FORMAT_SHIFT));
ADVANCE_BATCH();
}
- if (brw->vs.prog_data->uses_drawid) {
+ if (vs_prog_data->uses_drawid) {
const unsigned element = brw->vb.nr_enabled + needs_sgvs_element;
BEGIN_BATCH(3);
OUT_BATCH(_3DSTATE_VF_INSTANCING << 16 | (3 - 2));
uint32_t floating_point_mode = 0;
/* BRW_NEW_VS_PROG_DATA */
- const struct brw_vue_prog_data *prog_data = &brw->vs.prog_data->base;
+ const struct brw_stage_prog_data *prog_data = stage_state->prog_data;
+ const struct brw_vue_prog_data *vue_prog_data =
+ brw_vue_prog_data(stage_state->prog_data);
- assert(prog_data->dispatch_mode == DISPATCH_MODE_SIMD8 ||
- prog_data->dispatch_mode == DISPATCH_MODE_4X2_DUAL_OBJECT);
+ assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_SIMD8 ||
+ vue_prog_data->dispatch_mode == DISPATCH_MODE_4X2_DUAL_OBJECT);
- if (prog_data->base.use_alt_mode)
+ if (prog_data->use_alt_mode)
floating_point_mode = GEN6_VS_FLOATING_POINT_MODE_ALT;
BEGIN_BATCH(9);
OUT_BATCH(floating_point_mode |
((ALIGN(stage_state->sampler_count, 4) / 4) <<
GEN6_VS_SAMPLER_COUNT_SHIFT) |
- ((prog_data->base.binding_table.size_bytes / 4) <<
+ ((prog_data->binding_table.size_bytes / 4) <<
GEN6_VS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
- if (prog_data->base.total_scratch) {
+ if (prog_data->total_scratch) {
OUT_RELOC64(stage_state->scratch_bo,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
ffs(stage_state->per_thread_scratch) - 11);
OUT_BATCH(0);
}
- OUT_BATCH((prog_data->base.dispatch_grf_start_reg <<
+ OUT_BATCH((prog_data->dispatch_grf_start_reg <<
GEN6_VS_DISPATCH_START_GRF_SHIFT) |
- (prog_data->urb_read_length << GEN6_VS_URB_READ_LENGTH_SHIFT) |
+ (vue_prog_data->urb_read_length <<
+ GEN6_VS_URB_READ_LENGTH_SHIFT) |
(0 << GEN6_VS_URB_ENTRY_READ_OFFSET_SHIFT));
- uint32_t simd8_enable = prog_data->dispatch_mode == DISPATCH_MODE_SIMD8 ?
+ uint32_t simd8_enable =
+ vue_prog_data->dispatch_mode == DISPATCH_MODE_SIMD8 ?
GEN8_VS_SIMD8_ENABLE : 0;
OUT_BATCH(((devinfo->max_vs_threads - 1) << HSW_VS_MAX_THREADS_SHIFT) |
GEN6_VS_STATISTICS_ENABLE |
GEN6_VS_ENABLE);
/* _NEW_TRANSFORM */
- OUT_BATCH(prog_data->cull_distance_mask |
+ OUT_BATCH(vue_prog_data->cull_distance_mask |
(ctx->Transform.ClipPlanesEnabled <<
GEN8_VS_USER_CLIP_DISTANCE_SHIFT));
ADVANCE_BATCH();