sizeof(*vs), 32, &stage_state->state_offset);
memset(vs, 0, sizeof(*vs));
- /* BRW_NEW_PROGRAM_CACHE | CACHE_NEW_VS_PROG */
+ /* BRW_NEW_PROGRAM_CACHE | BRW_NEW_VS_PROG_DATA */
vs->thread0.grf_reg_count =
ALIGN(brw->vs.prog_data->base.total_grf, 16) / 16 - 1;
vs->thread0.kernel_start_pointer =
stage_state->prog_offset +
(vs->thread0.grf_reg_count << 1)) >> 6;
- /* Use ALT floating point mode for ARB vertex programs, because they
- * require 0^0 == 1.
- */
- if (brw->ctx.Shader.CurrentProgram[MESA_SHADER_VERTEX] == NULL)
+ if (brw->vs.prog_data->base.base.use_alt_mode)
vs->thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
else
vs->thread1.floating_point_mode = BRW_FLOATING_POINT_IEEE_754;
vs->thread1.binding_table_entry_count =
brw->vs.prog_data->base.base.binding_table.size_bytes / 4;
- if (brw->vs.prog_data->base.total_scratch != 0) {
+ if (brw->vs.prog_data->base.base.total_scratch != 0) {
vs->thread2.scratch_space_base_pointer =
stage_state->scratch_bo->offset64 >> 10; /* reloc */
vs->thread2.per_thread_scratch_space =
- ffs(brw->vs.prog_data->base.total_scratch) - 11;
+ ffs(brw->vs.prog_data->base.base.total_scratch) - 11;
} else {
vs->thread2.scratch_space_base_pointer = 0;
vs->thread2.per_thread_scratch_space = 0;
vs->thread3.urb_entry_read_length = brw->vs.prog_data->base.urb_read_length;
vs->thread3.const_urb_entry_read_length
- = brw->vs.prog_data->base.curb_read_length;
+ = brw->vs.prog_data->base.base.curb_read_length;
vs->thread3.dispatch_grf_start_reg =
brw->vs.prog_data->base.base.dispatch_grf_start_reg;
vs->thread3.urb_entry_read_offset = 0;
- /* BRW_NEW_CURBE_OFFSETS, _NEW_TRANSFORM, BRW_NEW_VERTEX_PROGRAM */
+ /* BRW_NEW_CURBE_OFFSETS */
vs->thread3.const_urb_entry_read_offset = brw->curbe.vs_start * 2;
/* BRW_NEW_URB_FENCE */
if (brw->gen == 5)
vs->vs5.sampler_count = 0; /* hardware requirement */
else {
- /* CACHE_NEW_SAMPLER */
vs->vs5.sampler_count = (stage_state->sampler_count + 3) / 4;
}
/* Set the sampler state pointer, and its reloc
*/
if (stage_state->sampler_count) {
+ /* BRW_NEW_SAMPLER_STATE_TABLE - reloc */
vs->vs5.sampler_state_pointer =
(brw->batch.bo->offset64 + stage_state->sampler_offset) >> 5;
drm_intel_bo_emit_reloc(brw->batch.bo,
}
/* Emit scratch space relocation */
- if (brw->vs.prog_data->base.total_scratch != 0) {
+ if (brw->vs.prog_data->base.base.total_scratch != 0) {
drm_intel_bo_emit_reloc(brw->batch.bo,
stage_state->state_offset +
offsetof(struct brw_vs_unit_state, thread2),
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
}
- brw->state.dirty.cache |= CACHE_NEW_VS_UNIT;
+ brw->state.dirty.brw |= BRW_NEW_GEN4_UNIT_STATE;
}
const struct brw_tracked_state brw_vs_unit = {
.dirty = {
- .mesa = _NEW_TRANSFORM,
- .brw = (BRW_NEW_BATCH |
- BRW_NEW_PROGRAM_CACHE |
- BRW_NEW_CURBE_OFFSETS |
- BRW_NEW_URB_FENCE |
- BRW_NEW_VERTEX_PROGRAM),
- .cache = CACHE_NEW_VS_PROG | CACHE_NEW_SAMPLER
+ .mesa = 0,
+ .brw = BRW_NEW_BATCH |
+ BRW_NEW_CURBE_OFFSETS |
+ BRW_NEW_PROGRAM_CACHE |
+ BRW_NEW_SAMPLER_STATE_TABLE |
+ BRW_NEW_URB_FENCE |
+ BRW_NEW_VS_PROG_DATA,
},
.emit = brw_upload_vs_unit,
};