prog_data->binding_table.size_bytes);
}
- SET_DIRTY_BIT(brw, brw_new_binding_table);
+ brw->state.dirty.brw |= brw_new_binding_table;
if (brw->gen >= 7) {
BEGIN_BATCH(2);
/* We've smashed all state compared to what the normal 3D pipeline
* rendering tracks for GL.
*/
- SET_DIRTY64_ALL(brw);
- SET_DIRTY_ALL(cache);
+ brw->state.dirty.brw = ~0;
+ brw->state.dirty.cache = ~0;
brw->no_depth_or_stencil = false;
brw->ib.type = -1;
}
}
- SET_DIRTY_BIT(cache, CACHE_NEW_CC_VP);
+ brw->state.dirty.cache |= CACHE_NEW_CC_VP;
}
const struct brw_tracked_state brw_cc_vp = {
cc->cc4.cc_viewport_state_offset = (brw->batch.bo->offset64 +
brw->cc.vp_offset) >> 5; /* reloc */
- SET_DIRTY_BIT(cache, CACHE_NEW_CC_UNIT);
+ brw->state.dirty.cache |= CACHE_NEW_CC_UNIT;
/* Emit CC viewport relocation */
drm_intel_bo_emit_reloc(brw->batch.bo,
clip->viewport_ymin = -1;
clip->viewport_ymax = 1;
- SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_UNIT);
+ brw->state.dirty.cache |= CACHE_NEW_CLIP_UNIT;
}
const struct brw_tracked_state brw_clip_unit = {
GLuint cache;
};
-
-/**
- * Enum representing the different pipelines.
- */
-typedef enum {
- /**
- * 3D rendering pipeline (vertex through fragment shader).
- */
- BRW_PIPELINE_3D,
-
- /**
- * Compute shader pipeline.
- */
- BRW_PIPELINE_COMPUTE,
-
- BRW_NUM_PIPELINES
-} brw_pipeline;
-
-
-/**
- * Set one of the bits in a field of brw_state_flags.
- */
-#define SET_DIRTY_BIT(FIELD, FLAG) \
- do { \
- for (int pipeline = 0; pipeline < BRW_NUM_PIPELINES; pipeline++) \
- brw->state.pipeline_dirty[pipeline].FIELD |= (FLAG); \
- } while (false)
-
-
-/**
- * Set all of the bits in a field of brw_state_flags.
- */
-#define SET_DIRTY_ALL(FIELD) \
- do { \
- /* ~0 == 0xffffffff, so make sure field is <= 32 bits */ \
- STATIC_ASSERT(sizeof(brw->state.pipeline_dirty[0].FIELD) == 4); \
- for (int pipeline = 0; pipeline < BRW_NUM_PIPELINES; pipeline++) \
- brw->state.pipeline_dirty[pipeline].FIELD = ~0; \
- } while (false)
-
-
-/**
- * Set all of the bits in a field of brw_state_flags.
- */
-#define SET_DIRTY64_ALL(FIELD) \
- do { \
- /* ~0ULL == 0xffffffffffffffff, so make sure field is <= 64 bits */ \
- STATIC_ASSERT(sizeof(brw->state.pipeline_dirty[0].FIELD) == 8); \
- for (int pipeline = 0; pipeline < BRW_NUM_PIPELINES; pipeline++) \
- brw->state.pipeline_dirty[pipeline].FIELD = ~(0ULL); \
- } while (false)
-
-
-/**
- * Check one of the bits in a field of brw_state_flags.
- */
-#define CHECK_DIRTY_BIT(FIELD, FLAG) \
- ((brw->state.pipeline_dirty[brw->state.current_pipeline].FIELD & (FLAG)) \
- != 0)
-
-
/** Subclass of Mesa vertex program */
struct brw_vertex_program {
struct gl_vertex_program program;
GLuint NewGLState;
struct {
- struct brw_state_flags pipeline_dirty[BRW_NUM_PIPELINES];
- brw_pipeline current_pipeline;
+ struct brw_state_flags dirty;
} state;
struct brw_cache cache;
int entries_per_oa_snapshot;
} perfmon;
- int num_atoms[BRW_NUM_PIPELINES];
- const struct brw_tracked_state **atoms[BRW_NUM_PIPELINES];
+ int num_atoms;
+ const struct brw_tracked_state **atoms;
/* If (INTEL_DEBUG & DEBUG_BATCH) */
struct {
brw->curbe.vs_start,
brw->curbe.vs_size );
- SET_DIRTY_BIT(brw, BRW_NEW_CURBE_OFFSETS);
+ brw->state.dirty.brw |= BRW_NEW_CURBE_OFFSETS;
}
}
if (hw_prim != brw->primitive) {
brw->primitive = hw_prim;
- SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE);
+ brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
if (reduced_prim[prim->mode] != brw->reduced_primitive) {
brw->reduced_primitive = reduced_prim[prim->mode];
- SET_DIRTY_BIT(brw, BRW_NEW_REDUCED_PRIMITIVE);
+ brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
}
}
}
if (hw_prim != brw->primitive) {
brw->primitive = hw_prim;
- SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE);
+ brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
}
}
brw_merge_inputs( brw, arrays );
brw->ib.ib = ib;
- SET_DIRTY_BIT(brw, BRW_NEW_INDICES);
+ brw->state.dirty.brw |= BRW_NEW_INDICES;
brw->vb.min_index = min_index;
brw->vb.max_index = max_index;
- SET_DIRTY_BIT(brw, BRW_NEW_VERTICES);
+ brw->state.dirty.brw |= BRW_NEW_VERTICES;
for (i = 0; i < nr_prims; i++) {
int estimated_max_prim_size;
brw->num_instances = prims[i].num_instances;
brw->basevertex = prims[i].basevertex;
if (i > 0) { /* For i == 0 we just did this before the loop */
- SET_DIRTY_BIT(brw, BRW_NEW_VERTICES);
+ brw->state.dirty.brw |= BRW_NEW_VERTICES;
brw_merge_inputs(brw, arrays);
}
}
* *_set_prim or intel_batchbuffer_flush(), which only impacts
* brw->state.dirty.brw.
*/
- if (brw->state.pipeline_dirty[BRW_PIPELINE_3D].brw) {
+ if (brw->state.dirty.brw) {
brw->no_batch_wrap = true;
- brw_upload_state(brw, BRW_PIPELINE_3D);
+ brw_upload_state(brw);
}
brw_emit_prim(brw, &prims[i], brw->primitive);
/* Now that we know we haven't run out of aperture space, we can safely
* reset the dirty bits.
*/
- if (brw->state.pipeline_dirty[BRW_PIPELINE_3D].brw)
- brw_clear_dirty_bits(brw, BRW_PIPELINE_3D);
+ if (brw->state.dirty.brw)
+ brw_clear_dirty_bits(brw);
}
if (brw->always_flush_batch)
brw->ib.start_vertex_offset = offset / ib_type_size;
if (brw->ib.bo != old_bo)
- SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER);
+ brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
if (index_buffer->type != brw->ib.type) {
brw->ib.type = index_buffer->type;
- SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER);
+ brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
}
}
populate_key(brw, &key);
if (brw->ff_gs.prog_active != key.need_gs_prog) {
- SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_PROG);
+ brw->state.dirty.cache |= CACHE_NEW_FF_GS_PROG;
brw->ff_gs.prog_active = key.need_gs_prog;
}
gs->gs6.max_vp_index = brw->ctx.Const.MaxViewports - 1;
- SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_UNIT);
+ brw->state.dirty.cache |= CACHE_NEW_FF_GS_UNIT;
}
const struct brw_tracked_state brw_gs_unit = {
memset(&brw->interpolation_mode, INTERP_QUALIFIER_NONE, sizeof(brw->interpolation_mode));
- SET_DIRTY_BIT(brw, BRW_NEW_INTERPOLATION_MAP);
+ brw->state.dirty.brw |= BRW_NEW_INTERPOLATION_MAP;
if (!fprog)
return;
* 3DSTATE_PS.
*/
brw->wm.fast_clear_op = op;
- SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
+ brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
}
static void
* _NEW_BUFFERS to make sure we emit new SURFACE_STATE with the new fast
* clear color value.
*/
- SET_DIRTY_BIT(mesa, _NEW_LIGHT | _NEW_BUFFERS);
- SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
+ brw->state.dirty.mesa |= _NEW_LIGHT | _NEW_BUFFERS;
+ brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
}
bool
* color before resolve and sets irb->mt->fast_clear_state to UNRESOLVED if
* we render to it.
*/
- SET_DIRTY_BIT(mesa, _NEW_BUFFERS);
+ brw->state.dirty.mesa |= _NEW_BUFFERS;
+
/* Set the custom state back to normal and dirty the same bits as above */
use_rectlist(brw, false);
brw->cc.state_offset);
ADVANCE_BATCH();
- SET_DIRTY_BIT(brw, BRW_NEW_PSP);
+ brw->state.dirty.brw |= BRW_NEW_PSP;
}
static void upload_psp_urb_cbs(struct brw_context *brw )
* obvious.
*/
- SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS);
+ brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
}
const struct brw_tracked_state brw_state_base_address = {
switch (target) {
case GL_VERTEX_PROGRAM_ARB:
- SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
+ brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
break;
case MESA_GEOMETRY_PROGRAM:
- SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM);
+ brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
break;
case GL_FRAGMENT_PROGRAM_ARB:
- SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
+ brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
break;
}
}
brw_fragment_program_const(brw->fragment_program);
if (newFP == curFP)
- SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
+ brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
newFP->id = get_new_program_id(brw->intelScreen);
break;
}
brw_vertex_program_const(brw->vertex_program);
if (newVP == curVP)
- SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
+ brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
if (newVP->program.IsPositionInvariant) {
_mesa_insert_mvp_code(ctx, &newVP->program);
}
* so turn them on now.
*/
brw->stats_wm++;
- SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM);
+ brw->state.dirty.brw |= BRW_NEW_STATS_WM;
break;
default:
brw->query.obj = NULL;
brw->stats_wm--;
- SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM);
+ brw->state.dirty.brw |= BRW_NEW_STATS_WM;
break;
default:
/* Flag that the sampler state table pointer has changed; later atoms
* will handle it.
*/
- SET_DIRTY_BIT(cache, CACHE_NEW_SAMPLER);
+ brw->state.dirty.cache |= CACHE_NEW_SAMPLER;
}
}
sfv->scissor.ymax = ctx->DrawBuffer->Height - ctx->DrawBuffer->_Ymin - 1;
}
- SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP);
+ brw->state.dirty.cache |= CACHE_NEW_SF_VP;
}
const struct brw_tracked_state brw_sf_vp = {
(sf->sf5.viewport_transform << 1)),
I915_GEM_DOMAIN_INSTRUCTION, 0);
- SET_DIRTY_BIT(cache, CACHE_NEW_SF_UNIT);
+ brw->state.dirty.cache |= CACHE_NEW_SF_UNIT;
}
const struct brw_tracked_state brw_sf_unit = {
/***********************************************************************
* brw_state.c
*/
-void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline);
-void brw_clear_dirty_bits(struct brw_context *brw, brw_pipeline pipeline);
+void brw_upload_state(struct brw_context *brw);
+void brw_clear_dirty_bits(struct brw_context *brw);
void brw_init_state(struct brw_context *brw);
void brw_destroy_state(struct brw_context *brw);
*(void **)out_aux = ((char *)item->key + item->key_size);
if (item->offset != *inout_offset) {
- SET_DIRTY_BIT(cache, 1 << cache_id);
+ brw->state.dirty.cache |= (1 << cache_id);
*inout_offset = item->offset;
}
/* Since we have a new BO in place, we need to signal the units
* that depend on it (state base address on gen5+, or unit state before).
*/
- SET_DIRTY_BIT(brw, BRW_NEW_PROGRAM_CACHE);
+ brw->state.dirty.brw |= BRW_NEW_PROGRAM_CACHE;
}
/**
uint32_t *out_offset,
void *out_aux)
{
- struct brw_context *brw = cache->brw;
struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
GLuint hash;
void *tmp;
*out_offset = item->offset;
*(void **)out_aux = (void *)((char *)item->key + item->key_size);
- SET_DIRTY_BIT(cache, 1 << cache_id);
+ cache->brw->state.dirty.cache |= 1 << cache_id;
}
void
/* We need to make sure that the programs get regenerated, since
* any offsets leftover in brw_context will no longer be valid.
*/
- SET_DIRTY_ALL(mesa);
- SET_DIRTY64_ALL(brw);
- SET_DIRTY_ALL(cache);
+ brw->state.dirty.mesa |= ~0;
+ brw->state.dirty.brw |= ~0;
+ brw->state.dirty.cache |= ~0;
intel_batchbuffer_flush(brw);
}
&haswell_cut_index,
};
-static const struct brw_tracked_state *gen7_compute_atoms[] =
-{
-};
-
-
static void
brw_upload_initial_gpu_state(struct brw_context *brw)
{
void brw_init_state( struct brw_context *brw )
{
struct gl_context *ctx = &brw->ctx;
- int i, j;
+ const struct brw_tracked_state **atoms;
+ int num_atoms;
brw_init_caches(brw);
- memset(brw->atoms, 0, sizeof(brw->atoms));
- memset(brw->num_atoms, 0, sizeof(brw->num_atoms));
-
if (brw->gen >= 8) {
- brw->atoms[BRW_PIPELINE_3D] = gen8_atoms;
- brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen8_atoms);
+ atoms = gen8_atoms;
+ num_atoms = ARRAY_SIZE(gen8_atoms);
} else if (brw->gen == 7) {
- brw->atoms[BRW_PIPELINE_3D] = gen7_atoms;
- brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen7_atoms);
- brw->atoms[BRW_PIPELINE_COMPUTE] = gen7_compute_atoms;
- brw->num_atoms[BRW_PIPELINE_COMPUTE] = ARRAY_SIZE(gen7_compute_atoms);
+ atoms = gen7_atoms;
+ num_atoms = ARRAY_SIZE(gen7_atoms);
} else if (brw->gen == 6) {
- brw->atoms[BRW_PIPELINE_3D] = gen6_atoms;
- brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen6_atoms);
+ atoms = gen6_atoms;
+ num_atoms = ARRAY_SIZE(gen6_atoms);
} else {
- brw->atoms[BRW_PIPELINE_3D] = gen4_atoms;
- brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen4_atoms);
+ atoms = gen4_atoms;
+ num_atoms = ARRAY_SIZE(gen4_atoms);
}
- for (i = 0; i < BRW_NUM_PIPELINES; i++) {
- for (j = 0; j < brw->num_atoms[i]; j++) {
- assert(brw->atoms[i][j]->dirty.mesa |
- brw->atoms[i][j]->dirty.brw |
- brw->atoms[i][j]->dirty.cache);
- assert(brw->atoms[i][j]->emit);
- }
+ brw->atoms = atoms;
+ brw->num_atoms = num_atoms;
+
+ while (num_atoms--) {
+ assert((*atoms)->dirty.mesa |
+ (*atoms)->dirty.brw |
+ (*atoms)->dirty.cache);
+ assert((*atoms)->emit);
+ atoms++;
}
brw_upload_initial_gpu_state(brw);
- SET_DIRTY_ALL(mesa);
- SET_DIRTY64_ALL(brw);
+ brw->state.dirty.mesa = ~0;
+ brw->state.dirty.brw = ~0;
/* Make sure that brw->state.dirty.brw has enough bits to hold all possible
* dirty flags.
*/
- STATIC_ASSERT(BRW_NUM_STATE_BITS <=
- 8 * sizeof(brw->state.pipeline_dirty[0].brw));
+ STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->state.dirty.brw));
ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
/***********************************************************************
* Emit all state:
*/
-void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
+void brw_upload_state(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
- struct brw_state_flags *state = &brw->state.pipeline_dirty[pipeline];
+ struct brw_state_flags *state = &brw->state.dirty;
int i;
static int dirty_count = 0;
- assert(0 <= pipeline && pipeline < BRW_NUM_PIPELINES);
- brw->state.current_pipeline = pipeline;
-
- SET_DIRTY_BIT(mesa, brw->NewGLState);
+ state->mesa |= brw->NewGLState;
brw->NewGLState = 0;
- SET_DIRTY_BIT(brw, ctx->NewDriverState);
+ state->brw |= ctx->NewDriverState;
ctx->NewDriverState = 0;
if (0) {
if (brw->fragment_program != ctx->FragmentProgram._Current) {
brw->fragment_program = ctx->FragmentProgram._Current;
- SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
+ brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
}
if (brw->geometry_program != ctx->GeometryProgram._Current) {
brw->geometry_program = ctx->GeometryProgram._Current;
- SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM);
+ brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
}
if (brw->vertex_program != ctx->VertexProgram._Current) {
brw->vertex_program = ctx->VertexProgram._Current;
- SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
+ brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
}
if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
brw->meta_in_progress = _mesa_meta_in_progress(ctx);
- SET_DIRTY_BIT(brw, BRW_NEW_META_IN_PROGRESS);
+ brw->state.dirty.brw |= BRW_NEW_META_IN_PROGRESS;
}
if (brw->num_samples != ctx->DrawBuffer->Visual.samples) {
brw->num_samples = ctx->DrawBuffer->Visual.samples;
- SET_DIRTY_BIT(brw, BRW_NEW_NUM_SAMPLES);
+ brw->state.dirty.brw |= BRW_NEW_NUM_SAMPLES;
}
if ((state->mesa | state->cache | state->brw) == 0)
memset(&examined, 0, sizeof(examined));
prev = *state;
- for (i = 0; i < brw->num_atoms[pipeline]; i++) {
- const struct brw_tracked_state *atom = brw->atoms[pipeline][i];
+ for (i = 0; i < brw->num_atoms; i++) {
+ const struct brw_tracked_state *atom = brw->atoms[i];
struct brw_state_flags generated;
if (check_state(state, &atom->dirty)) {
}
}
else {
- for (i = 0; i < brw->num_atoms[pipeline]; i++) {
- const struct brw_tracked_state *atom = brw->atoms[pipeline][i];
+ for (i = 0; i < brw->num_atoms; i++) {
+ const struct brw_tracked_state *atom = brw->atoms[i];
if (check_state(state, &atom->dirty)) {
atom->emit(brw);
* brw_upload_state() call.
*/
void
-brw_clear_dirty_bits(struct brw_context *brw, brw_pipeline pipeline)
+brw_clear_dirty_bits(struct brw_context *brw)
{
- struct brw_state_flags *state = &brw->state.pipeline_dirty[pipeline];
+ struct brw_state_flags *state = &brw->state.dirty;
memset(state, 0, sizeof(*state));
}
brw->urb.cs_start,
brw->urb.size);
- SET_DIRTY_BIT(brw, BRW_NEW_URB_FENCE);
+ brw->state.dirty.brw |= BRW_NEW_URB_FENCE;
}
}
if (gp == NULL) {
/* No geometry shader. Vertex data just passes straight through. */
- if (CHECK_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_VS)) {
+ if (brw->state.dirty.brw & BRW_NEW_VUE_MAP_VS) {
brw->vue_map_geom_out = brw->vue_map_vs;
- SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
+ brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
}
/* Other state atoms had better not try to access prog_data, since
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map;
- SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
+ brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
}
}
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
- SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_VS);
+ brw->state.dirty.brw |= BRW_NEW_VUE_MAP_VS;
if (brw->gen < 7) {
/* No geometry shader support, so the VS VUE map is the VUE map for
* the output of the "geometry" portion of the pipeline.
*/
brw->vue_map_geom_out = brw->vue_map_vs;
- SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
+ brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
}
}
}
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
}
- SET_DIRTY_BIT(cache, CACHE_NEW_VS_UNIT);
+ brw->state.dirty.cache |= CACHE_NEW_VS_UNIT;
}
const struct brw_tracked_state brw_vs_unit = {
if (!prog_data->nr_pull_params) {
if (stage_state->surf_offset[surf_index]) {
stage_state->surf_offset[surf_index] = 0;
- SET_DIRTY_BIT(brw, brw_new_constbuf);
+ brw->state.dirty.brw |= brw_new_constbuf;
}
return;
}
dword_pitch);
drm_intel_bo_unreference(const_bo);
- SET_DIRTY_BIT(brw, brw_new_constbuf);
+ brw->state.dirty.brw |= brw_new_constbuf;
}
I915_GEM_DOMAIN_INSTRUCTION, 0);
}
- SET_DIRTY_BIT(cache, CACHE_NEW_WM_UNIT);
+ brw->state.dirty.cache |= CACHE_NEW_WM_UNIT;
}
const struct brw_tracked_state brw_wm_unit = {
} else {
brw->vtbl.update_null_renderbuffer_surface(brw, 0);
}
- SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
const struct brw_tracked_state brw_renderbuffer_surfaces = {
update_stage_texture_surfaces(brw, fs, &brw->wm.base, true);
}
- SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
const struct brw_tracked_state brw_texture_surfaces = {
}
if (shader->NumUniformBlocks)
- SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
static void
}
if (prog->NumUniformBlocks)
- SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
static void
}
}
- SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
+ brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
const struct brw_tracked_state gen6_sol_surface = {
/* Skip making a binding table if we don't have anything to put in it. */
if (!has_surfaces) {
if (brw->ff_gs.bind_bo_offset != 0) {
- SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE);
+ brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
brw->ff_gs.bind_bo_offset = 0;
}
return;
/* BRW_NEW_SURFACES */
memcpy(bind, brw->ff_gs.surf_offset, BRW_MAX_GEN6_GS_SURFACES * sizeof(uint32_t));
- SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE);
+ brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
}
const struct brw_tracked_state gen6_gs_binding_table = {
vp->ymin = -gby;
vp->ymax = gby;
- SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_VP);
+ brw->state.dirty.cache |= CACHE_NEW_CLIP_VP;
}
const struct brw_tracked_state gen6_clip_vp = {
sfv->viewport.m31 = v[MAT_TY] * y_scale + y_bias;
sfv->viewport.m32 = v[MAT_TZ] * depth_scale;
- SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP);
+ brw->state.dirty.cache |= CACHE_NEW_SF_VP;
}
const struct brw_tracked_state gen6_sf_vp = {
* Similar text exists for the other 3DSTATE_PUSH_CONSTANT_ALLOC_*
* commands.
*/
- SET_DIRTY_BIT(brw, BRW_NEW_PUSH_CONSTANT_ALLOCATION);
+ brw->state.dirty.brw |= BRW_NEW_PUSH_CONSTANT_ALLOCATION;
}
void
*
* Setting _NEW_DEPTH and _NEW_BUFFERS covers it, but is rather overkill.
*/
- SET_DIRTY_BIT(mesa, _NEW_DEPTH | _NEW_BUFFERS);
+ brw->state.dirty.mesa |= _NEW_DEPTH | _NEW_BUFFERS;
}
OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
ADVANCE_BATCH();
- SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS);
+ brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
}
const struct brw_tracked_state gen8_state_base_address = {
* purposes means everything).
*/
if (brw->hw_ctx == NULL)
- SET_DIRTY_BIT(brw, BRW_NEW_CONTEXT);
+ brw->state.dirty.brw |= BRW_NEW_CONTEXT;
- SET_DIRTY_BIT(brw, BRW_NEW_BATCH);
+ brw->state.dirty.brw |= BRW_NEW_BATCH;
/* Assume that the last command before the start of our batch was a
* primitive, for safety.
/* the buffer might be bound as a uniform buffer, need to update it
*/
- SET_DIRTY_BIT(brw, BRW_NEW_UNIFORM_BUFFER);
+ brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
intel_bufferobj_mark_inactive(intel_obj);
}