*/
#define BRW_NEW_BATCH (1 << BRW_STATE_BATCH)
/** \see brw.state.depth_region */
-#define BRW_NEW_NR_WM_SURFACES (1 << BRW_STATE_NR_WM_SURFACES)
-#define BRW_NEW_NR_VS_SURFACES (1 << BRW_STATE_NR_VS_SURFACES)
#define BRW_NEW_INDEX_BUFFER (1 << BRW_STATE_INDEX_BUFFER)
#define BRW_NEW_VS_CONSTBUF (1 << BRW_STATE_VS_CONSTBUF)
#define BRW_NEW_WM_CONSTBUF (1 << BRW_STATE_WM_CONSTBUF)
/** Binding table of pointers to surf_bo entries */
uint32_t bind_bo_offset;
uint32_t surf_offset[BRW_VS_MAX_SURF];
- GLuint nr_surfaces;
uint32_t push_const_offset; /* Offset in the batchbuffer */
int push_const_size; /* in 256-bit register increments */
uint32_t sdc_offset[BRW_MAX_TEX_UNIT];
GLuint render_surf;
- GLuint nr_surfaces;
drm_intel_bo *scratch_bo;
DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
DEFINE_BIT(BRW_NEW_VERTICES),
DEFINE_BIT(BRW_NEW_BATCH),
- DEFINE_BIT(BRW_NEW_NR_WM_SURFACES),
- DEFINE_BIT(BRW_NEW_NR_VS_SURFACES),
DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
DEFINE_BIT(BRW_NEW_WM_CONSTBUF),
DEFINE_BIT(BRW_NEW_VS_BINDING_TABLE),
*/
vs->thread1.single_program_flow = (intel->gen == 5);
- /* BRW_NEW_NR_VS_SURFACES */
- if (intel->gen == 5)
- vs->thread1.binding_table_entry_count = 0; /* hardware requirement */
- else
- vs->thread1.binding_table_entry_count = brw->vs.nr_surfaces;
+ vs->thread1.binding_table_entry_count = 0;
if (brw->vs.prog_data->total_scratch != 0) {
vs->thread2.scratch_space_base_pointer =
.brw = (BRW_NEW_BATCH |
BRW_NEW_PROGRAM_CACHE |
BRW_NEW_CURBE_OFFSETS |
- BRW_NEW_NR_VS_SURFACES |
BRW_NEW_URB_FENCE |
BRW_NEW_VERTEX_PROGRAM),
.cache = CACHE_NEW_VS_PROG
/**
* Vertex shader surfaces (constant buffer).
- *
- * This consumes the state updates for the constant buffer needing
- * to be updated, and produces BRW_NEW_NR_VS_SURFACES for the VS unit.
*/
static void
brw_upload_vs_surfaces(struct brw_context *brw)
brw->vs.bind_bo_offset = 0;
}
}
-
- if (brw->vs.nr_surfaces != nr_surfaces) {
- brw->state.dirty.brw |= BRW_NEW_NR_VS_SURFACES;
- brw->vs.nr_surfaces = nr_surfaces;
- }
}
const struct brw_tracked_state brw_vs_surfaces = {
wm->thread1.depth_coef_urb_read_offset = 1;
wm->thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
- if (intel->gen == 5)
- wm->thread1.binding_table_entry_count = 0; /* hardware requirement */
- else {
- /* BRW_NEW_NR_SURFACES */
- wm->thread1.binding_table_entry_count = brw->wm.nr_surfaces;
- }
+ wm->thread1.binding_table_entry_count = 0;
if (brw->wm.prog_data->total_scratch != 0) {
wm->thread2.scratch_space_base_pointer =
.brw = (BRW_NEW_BATCH |
BRW_NEW_PROGRAM_CACHE |
BRW_NEW_FRAGMENT_PROGRAM |
- BRW_NEW_CURBE_OFFSETS |
- BRW_NEW_NR_WM_SURFACES),
+ BRW_NEW_CURBE_OFFSETS),
.cache = (CACHE_NEW_WM_PROG |
CACHE_NEW_SAMPLER)
}
}
- if (brw->wm.nr_surfaces != nr_surfaces) {
- brw->wm.nr_surfaces = nr_surfaces;
- brw->state.dirty.brw |= BRW_NEW_NR_WM_SURFACES;
- }
-
brw->state.dirty.brw |= BRW_NEW_WM_SURFACES;
}
BEGIN_BATCH(6);
OUT_BATCH(_3DSTATE_VS << 16 | (6 - 2));
OUT_BATCH(brw->vs.prog_offset);
- OUT_BATCH((0 << GEN6_VS_SAMPLER_COUNT_SHIFT) |
- floating_point_mode |
- (brw->vs.nr_surfaces << GEN6_VS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
+ OUT_BATCH(floating_point_mode | (0 << GEN6_VS_SAMPLER_COUNT_SHIFT));
if (brw->vs.prog_data->total_scratch) {
OUT_RELOC(brw->vs.scratch_bo,
const struct brw_tracked_state gen6_vs_state = {
.dirty = {
.mesa = _NEW_TRANSFORM | _NEW_PROGRAM_CONSTANTS,
- .brw = (BRW_NEW_NR_VS_SURFACES |
- BRW_NEW_URB_FENCE |
+ .brw = (BRW_NEW_URB_FENCE |
BRW_NEW_CONTEXT |
BRW_NEW_VERTEX_PROGRAM |
BRW_NEW_BATCH),
if (ctx->Shader.CurrentFragmentProgram == NULL)
dw2 |= GEN6_WM_FLOATING_POINT_MODE_ALT;
- /* BRW_NEW_NR_WM_SURFACES */
- dw2 |= brw->wm.nr_surfaces << GEN6_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT;
-
/* CACHE_NEW_SAMPLER */
dw2 |= (ALIGN(brw->wm.sampler_count, 4) / 4) << GEN6_WM_SAMPLER_COUNT_SHIFT;
dw4 |= (brw->wm.prog_data->first_curbe_grf <<
_NEW_PROGRAM_CONSTANTS |
_NEW_POLYGON),
.brw = (BRW_NEW_FRAGMENT_PROGRAM |
- BRW_NEW_NR_WM_SURFACES |
BRW_NEW_URB_FENCE |
BRW_NEW_BATCH),
.cache = (CACHE_NEW_SAMPLER |
BEGIN_BATCH(6);
OUT_BATCH(_3DSTATE_VS << 16 | (6 - 2));
OUT_BATCH(brw->vs.prog_offset);
- OUT_BATCH((0 << GEN6_VS_SAMPLER_COUNT_SHIFT) |
- floating_point_mode |
- (brw->vs.nr_surfaces << GEN6_VS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
+ OUT_BATCH((0 << GEN6_VS_SAMPLER_COUNT_SHIFT) | floating_point_mode);
if (brw->vs.prog_data->total_scratch) {
OUT_RELOC(brw->vs.scratch_bo,
.dirty = {
.mesa = _NEW_TRANSFORM | _NEW_PROGRAM_CONSTANTS,
.brw = (BRW_NEW_CURBE_OFFSETS |
- BRW_NEW_NR_VS_SURFACES |
BRW_NEW_URB_FENCE |
BRW_NEW_CONTEXT |
BRW_NEW_VERTEX_PROGRAM |
dw2 |= (ALIGN(brw->wm.sampler_count, 4) / 4) << GEN7_PS_SAMPLER_COUNT_SHIFT;
- /* BRW_NEW_NR_WM_SURFACES */
- dw2 |= brw->wm.nr_surfaces << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT;
-
/* Use ALT floating point mode for ARB fragment programs, because they
* require 0^0 == 1.
*/
.mesa = _NEW_PROGRAM_CONSTANTS,
.brw = (BRW_NEW_CURBE_OFFSETS |
BRW_NEW_FRAGMENT_PROGRAM |
- BRW_NEW_NR_WM_SURFACES |
BRW_NEW_PS_BINDING_TABLE |
BRW_NEW_URB_FENCE |
BRW_NEW_BATCH),