prog_data->binding_table.size_bytes);
}
- brw->state.dirty.brw |= brw_new_binding_table;
+ SET_DIRTY_BIT(brw, brw_new_binding_table);
if (brw->gen >= 7) {
BEGIN_BATCH(2);
}
}
- brw->state.dirty.cache |= CACHE_NEW_CC_VP;
+ SET_DIRTY_BIT(cache, CACHE_NEW_CC_VP);
}
const struct brw_tracked_state brw_cc_vp = {
cc->cc4.cc_viewport_state_offset = (brw->batch.bo->offset64 +
brw->cc.vp_offset) >> 5; /* reloc */
- brw->state.dirty.cache |= CACHE_NEW_CC_UNIT;
+ SET_DIRTY_BIT(cache, CACHE_NEW_CC_UNIT);
/* Emit CC viewport relocation */
drm_intel_bo_emit_reloc(brw->batch.bo,
clip->viewport_ymin = -1;
clip->viewport_ymax = 1;
- brw->state.dirty.cache |= CACHE_NEW_CLIP_UNIT;
+ SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_UNIT);
}
const struct brw_tracked_state brw_clip_unit = {
GLuint cache;
};
+
+/**
+ * Set one of the bits in a field of brw_state_flags.
+ */
+#define SET_DIRTY_BIT(FIELD, FLAG) brw->state.dirty.FIELD |= (FLAG)
+
+
/** Subclass of Mesa vertex program */
struct brw_vertex_program {
struct gl_vertex_program program;
brw->curbe.vs_start,
brw->curbe.vs_size );
- brw->state.dirty.brw |= BRW_NEW_CURBE_OFFSETS;
+ SET_DIRTY_BIT(brw, BRW_NEW_CURBE_OFFSETS);
}
}
if (hw_prim != brw->primitive) {
brw->primitive = hw_prim;
- brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
+ SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE);
if (reduced_prim[prim->mode] != brw->reduced_primitive) {
brw->reduced_primitive = reduced_prim[prim->mode];
- brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
+ SET_DIRTY_BIT(brw, BRW_NEW_REDUCED_PRIMITIVE);
}
}
}
if (hw_prim != brw->primitive) {
brw->primitive = hw_prim;
- brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
+ SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE);
}
}
brw_merge_inputs( brw, arrays );
brw->ib.ib = ib;
- brw->state.dirty.brw |= BRW_NEW_INDICES;
+ SET_DIRTY_BIT(brw, BRW_NEW_INDICES);
brw->vb.min_index = min_index;
brw->vb.max_index = max_index;
- brw->state.dirty.brw |= BRW_NEW_VERTICES;
+ SET_DIRTY_BIT(brw, BRW_NEW_VERTICES);
for (i = 0; i < nr_prims; i++) {
int estimated_max_prim_size;
brw->num_instances = prims[i].num_instances;
brw->basevertex = prims[i].basevertex;
if (i > 0) { /* For i == 0 we just did this before the loop */
- brw->state.dirty.brw |= BRW_NEW_VERTICES;
+ SET_DIRTY_BIT(brw, BRW_NEW_VERTICES);
brw_merge_inputs(brw, arrays);
}
}
brw->ib.start_vertex_offset = offset / ib_type_size;
if (brw->ib.bo != old_bo)
- brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
+ SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER);
if (index_buffer->type != brw->ib.type) {
brw->ib.type = index_buffer->type;
- brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
+ SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER);
}
}
populate_key(brw, &key);
if (brw->ff_gs.prog_active != key.need_gs_prog) {
- brw->state.dirty.cache |= CACHE_NEW_FF_GS_PROG;
+ SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_PROG);
brw->ff_gs.prog_active = key.need_gs_prog;
}
gs->gs6.max_vp_index = brw->ctx.Const.MaxViewports - 1;
- brw->state.dirty.cache |= CACHE_NEW_FF_GS_UNIT;
+ SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_UNIT);
}
const struct brw_tracked_state brw_gs_unit = {
memset(&brw->interpolation_mode, INTERP_QUALIFIER_NONE, sizeof(brw->interpolation_mode));
- brw->state.dirty.brw |= BRW_NEW_INTERPOLATION_MAP;
+ SET_DIRTY_BIT(brw, BRW_NEW_INTERPOLATION_MAP);
if (!fprog)
return;
* 3DSTATE_PS.
*/
brw->wm.fast_clear_op = op;
- brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
+ SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
}
static void
* _NEW_BUFFERS to make sure we emit new SURFACE_STATE with the new fast
* clear color value.
*/
- brw->state.dirty.mesa |= _NEW_LIGHT | _NEW_BUFFERS;
- brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
+ SET_DIRTY_BIT(mesa, _NEW_LIGHT | _NEW_BUFFERS);
+ SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
}
bool
* color before resolve and sets irb->mt->fast_clear_state to UNRESOLVED if
* we render to it.
*/
- brw->state.dirty.mesa |= _NEW_BUFFERS;
-
+ SET_DIRTY_BIT(mesa, _NEW_BUFFERS);
/* Set the custom state back to normal and dirty the same bits as above */
use_rectlist(brw, false);
brw->cc.state_offset);
ADVANCE_BATCH();
- brw->state.dirty.brw |= BRW_NEW_PSP;
+ SET_DIRTY_BIT(brw, BRW_NEW_PSP);
}
static void upload_psp_urb_cbs(struct brw_context *brw )
* obvious.
*/
- brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
+ SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS);
}
const struct brw_tracked_state brw_state_base_address = {
switch (target) {
case GL_VERTEX_PROGRAM_ARB:
- brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
+ SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
break;
case MESA_GEOMETRY_PROGRAM:
- brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
+ SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM);
break;
case GL_FRAGMENT_PROGRAM_ARB:
- brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
+ SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
break;
}
}
brw_fragment_program_const(brw->fragment_program);
if (newFP == curFP)
- brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
+ SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
newFP->id = get_new_program_id(brw->intelScreen);
break;
}
brw_vertex_program_const(brw->vertex_program);
if (newVP == curVP)
- brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
+ SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
if (newVP->program.IsPositionInvariant) {
_mesa_insert_mvp_code(ctx, &newVP->program);
}
* so turn them on now.
*/
brw->stats_wm++;
- brw->state.dirty.brw |= BRW_NEW_STATS_WM;
+ SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM);
break;
default:
brw->query.obj = NULL;
brw->stats_wm--;
- brw->state.dirty.brw |= BRW_NEW_STATS_WM;
+ SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM);
break;
default:
/* Flag that the sampler state table pointer has changed; later atoms
* will handle it.
*/
- brw->state.dirty.cache |= CACHE_NEW_SAMPLER;
+ SET_DIRTY_BIT(cache, CACHE_NEW_SAMPLER);
}
}
sfv->scissor.ymax = ctx->DrawBuffer->Height - ctx->DrawBuffer->_Ymin - 1;
}
- brw->state.dirty.cache |= CACHE_NEW_SF_VP;
+ SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP);
}
const struct brw_tracked_state brw_sf_vp = {
(sf->sf5.viewport_transform << 1)),
I915_GEM_DOMAIN_INSTRUCTION, 0);
- brw->state.dirty.cache |= CACHE_NEW_SF_UNIT;
+ SET_DIRTY_BIT(cache, CACHE_NEW_SF_UNIT);
}
const struct brw_tracked_state brw_sf_unit = {
*(void **)out_aux = ((char *)item->key + item->key_size);
if (item->offset != *inout_offset) {
- brw->state.dirty.cache |= (1 << cache_id);
+ SET_DIRTY_BIT(cache, 1 << cache_id);
*inout_offset = item->offset;
}
/* Since we have a new BO in place, we need to signal the units
* that depend on it (state base address on gen5+, or unit state before).
*/
- brw->state.dirty.brw |= BRW_NEW_PROGRAM_CACHE;
+ SET_DIRTY_BIT(brw, BRW_NEW_PROGRAM_CACHE);
}
/**
uint32_t *out_offset,
void *out_aux)
{
+ struct brw_context *brw = cache->brw;
struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
GLuint hash;
void *tmp;
*out_offset = item->offset;
*(void **)out_aux = (void *)((char *)item->key + item->key_size);
- cache->brw->state.dirty.cache |= 1 << cache_id;
+ SET_DIRTY_BIT(cache, 1 << cache_id);
}
void
int i;
static int dirty_count = 0;
- state->mesa |= brw->NewGLState;
+ SET_DIRTY_BIT(mesa, brw->NewGLState);
brw->NewGLState = 0;
- state->brw |= ctx->NewDriverState;
+ SET_DIRTY_BIT(brw, ctx->NewDriverState);
ctx->NewDriverState = 0;
if (0) {
if (brw->fragment_program != ctx->FragmentProgram._Current) {
brw->fragment_program = ctx->FragmentProgram._Current;
- brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
+ SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
}
if (brw->geometry_program != ctx->GeometryProgram._Current) {
brw->geometry_program = ctx->GeometryProgram._Current;
- brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
+ SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM);
}
if (brw->vertex_program != ctx->VertexProgram._Current) {
brw->vertex_program = ctx->VertexProgram._Current;
- brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
+ SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
}
if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
brw->meta_in_progress = _mesa_meta_in_progress(ctx);
- brw->state.dirty.brw |= BRW_NEW_META_IN_PROGRESS;
+ SET_DIRTY_BIT(brw, BRW_NEW_META_IN_PROGRESS);
}
if (brw->num_samples != ctx->DrawBuffer->Visual.samples) {
brw->num_samples = ctx->DrawBuffer->Visual.samples;
- brw->state.dirty.brw |= BRW_NEW_NUM_SAMPLES;
+ SET_DIRTY_BIT(brw, BRW_NEW_NUM_SAMPLES);
}
if ((state->mesa | state->cache | state->brw) == 0)
brw->urb.cs_start,
brw->urb.size);
- brw->state.dirty.brw |= BRW_NEW_URB_FENCE;
+ SET_DIRTY_BIT(brw, BRW_NEW_URB_FENCE);
}
}
/* No geometry shader. Vertex data just passes straight through. */
if (brw->state.dirty.brw & BRW_NEW_VUE_MAP_VS) {
brw->vue_map_geom_out = brw->vue_map_vs;
- brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
+ SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
}
/* Other state atoms had better not try to access prog_data, since
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map;
- brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
+ SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
}
}
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
- brw->state.dirty.brw |= BRW_NEW_VUE_MAP_VS;
+ SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_VS);
if (brw->gen < 7) {
/* No geometry shader support, so the VS VUE map is the VUE map for
* the output of the "geometry" portion of the pipeline.
*/
brw->vue_map_geom_out = brw->vue_map_vs;
- brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
+ SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
}
}
}
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
}
- brw->state.dirty.cache |= CACHE_NEW_VS_UNIT;
+ SET_DIRTY_BIT(cache, CACHE_NEW_VS_UNIT);
}
const struct brw_tracked_state brw_vs_unit = {
if (!prog_data->nr_pull_params) {
if (stage_state->surf_offset[surf_index]) {
stage_state->surf_offset[surf_index] = 0;
- brw->state.dirty.brw |= brw_new_constbuf;
+ SET_DIRTY_BIT(brw, brw_new_constbuf);
}
return;
}
dword_pitch);
drm_intel_bo_unreference(const_bo);
- brw->state.dirty.brw |= brw_new_constbuf;
+ SET_DIRTY_BIT(brw, brw_new_constbuf);
}
I915_GEM_DOMAIN_INSTRUCTION, 0);
}
- brw->state.dirty.cache |= CACHE_NEW_WM_UNIT;
+ SET_DIRTY_BIT(cache, CACHE_NEW_WM_UNIT);
}
const struct brw_tracked_state brw_wm_unit = {
} else {
brw->vtbl.update_null_renderbuffer_surface(brw, 0);
}
- brw->state.dirty.brw |= BRW_NEW_SURFACES;
+ SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
}
const struct brw_tracked_state brw_renderbuffer_surfaces = {
update_stage_texture_surfaces(brw, fs, &brw->wm.base, true);
}
- brw->state.dirty.brw |= BRW_NEW_SURFACES;
+ SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
}
const struct brw_tracked_state brw_texture_surfaces = {
}
if (shader->NumUniformBlocks)
- brw->state.dirty.brw |= BRW_NEW_SURFACES;
+ SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
}
static void
}
if (prog->NumUniformBlocks)
- brw->state.dirty.brw |= BRW_NEW_SURFACES;
+ SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
}
static void
}
}
- brw->state.dirty.brw |= BRW_NEW_SURFACES;
+ SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
}
const struct brw_tracked_state gen6_sol_surface = {
/* Skip making a binding table if we don't have anything to put in it. */
if (!has_surfaces) {
if (brw->ff_gs.bind_bo_offset != 0) {
- brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
+ SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE);
brw->ff_gs.bind_bo_offset = 0;
}
return;
/* BRW_NEW_SURFACES */
memcpy(bind, brw->ff_gs.surf_offset, BRW_MAX_GEN6_GS_SURFACES * sizeof(uint32_t));
- brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
+ SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE);
}
const struct brw_tracked_state gen6_gs_binding_table = {
vp->ymin = -gby;
vp->ymax = gby;
- brw->state.dirty.cache |= CACHE_NEW_CLIP_VP;
+ SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_VP);
}
const struct brw_tracked_state gen6_clip_vp = {
sfv->viewport.m31 = v[MAT_TY] * y_scale + y_bias;
sfv->viewport.m32 = v[MAT_TZ] * depth_scale;
- brw->state.dirty.cache |= CACHE_NEW_SF_VP;
+ SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP);
}
const struct brw_tracked_state gen6_sf_vp = {
* Similar text exists for the other 3DSTATE_PUSH_CONSTANT_ALLOC_*
* commands.
*/
- brw->state.dirty.brw |= BRW_NEW_PUSH_CONSTANT_ALLOCATION;
+ SET_DIRTY_BIT(brw, BRW_NEW_PUSH_CONSTANT_ALLOCATION);
}
void
*
* Setting _NEW_DEPTH and _NEW_BUFFERS covers it, but is rather overkill.
*/
- brw->state.dirty.mesa |= _NEW_DEPTH | _NEW_BUFFERS;
+ SET_DIRTY_BIT(mesa, _NEW_DEPTH | _NEW_BUFFERS);
}
OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
ADVANCE_BATCH();
- brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
+ SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS);
}
const struct brw_tracked_state gen8_state_base_address = {
* purposes means everything).
*/
if (brw->hw_ctx == NULL)
- brw->state.dirty.brw |= BRW_NEW_CONTEXT;
+ SET_DIRTY_BIT(brw, BRW_NEW_CONTEXT);
- brw->state.dirty.brw |= BRW_NEW_BATCH;
+ SET_DIRTY_BIT(brw, BRW_NEW_BATCH);
/* Assume that the last command before the start of our batch was a
* primitive, for safety.
/* the buffer might be bound as a uniform buffer, need to update it
*/
- brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
+ SET_DIRTY_BIT(brw, BRW_NEW_UNIFORM_BUFFER);
intel_bufferobj_mark_inactive(intel_obj);
}