* We do this here so that iris_binder_reserve_3d correctly gets a new
* larger total_size when making the updated reservation.
*/
- ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS;
+ ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
+ ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_BINDINGS;
}
static uint32_t
unsigned total_size;
/* If nothing is dirty, skip all this. */
- if (!(ice->state.dirty & IRIS_ALL_DIRTY_BINDINGS))
+ if (!(ice->state.dirty & IRIS_DIRTY_RENDER_BUFFER) &&
+ !(ice->state.stage_dirty & IRIS_ALL_STAGE_DIRTY_BINDINGS))
return;
/* Get the binding table sizes for each stage */
while (true) {
total_size = 0;
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_VS << stage))
+ if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage))
total_size += sizes[stage];
}
uint32_t offset = binder_insert(binder, total_size);
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
+ if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
binder->bt_offset[stage] = sizes[stage] > 0 ? offset : 0;
iris_record_state_size(ice->state.sizes,
binder->bo->gtt_offset + offset, sizes[stage]);
void
iris_binder_reserve_compute(struct iris_context *ice)
{
- if (!(ice->state.dirty & IRIS_DIRTY_BINDINGS_CS))
+ if (!(ice->state.stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_CS))
return;
struct iris_binder *binder = &ice->state.binder;
IRIS_DIRTY_LINE_STIPPLE |
IRIS_ALL_DIRTY_FOR_COMPUTE |
IRIS_DIRTY_SCISSOR_RECT |
- IRIS_DIRTY_UNCOMPILED_VS |
- IRIS_DIRTY_UNCOMPILED_TCS |
- IRIS_DIRTY_UNCOMPILED_TES |
- IRIS_DIRTY_UNCOMPILED_GS |
- IRIS_DIRTY_UNCOMPILED_FS |
IRIS_DIRTY_VF |
- IRIS_DIRTY_SF_CL_VIEWPORT |
- IRIS_DIRTY_SAMPLER_STATES_VS |
- IRIS_DIRTY_SAMPLER_STATES_TCS |
- IRIS_DIRTY_SAMPLER_STATES_TES |
- IRIS_DIRTY_SAMPLER_STATES_GS);
+ IRIS_DIRTY_SF_CL_VIEWPORT);
+ uint64_t skip_stage_bits = (IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE |
+ IRIS_STAGE_DIRTY_UNCOMPILED_VS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_TES |
+ IRIS_STAGE_DIRTY_UNCOMPILED_GS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_FS |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_VS |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_TES |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_GS);
if (!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL]) {
/* BLORP disabled tessellation, that's fine for the next draw */
- skip_bits |= IRIS_DIRTY_TCS |
- IRIS_DIRTY_TES |
- IRIS_DIRTY_CONSTANTS_TCS |
- IRIS_DIRTY_CONSTANTS_TES |
- IRIS_DIRTY_BINDINGS_TCS |
- IRIS_DIRTY_BINDINGS_TES;
+ skip_stage_bits |= IRIS_STAGE_DIRTY_TCS |
+ IRIS_STAGE_DIRTY_TES |
+ IRIS_STAGE_DIRTY_CONSTANTS_TCS |
+ IRIS_STAGE_DIRTY_CONSTANTS_TES |
+ IRIS_STAGE_DIRTY_BINDINGS_TCS |
+ IRIS_STAGE_DIRTY_BINDINGS_TES;
}
if (!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY]) {
/* BLORP disabled geometry shaders, that's fine for the next draw */
- skip_bits |= IRIS_DIRTY_GS |
- IRIS_DIRTY_CONSTANTS_GS |
- IRIS_DIRTY_BINDINGS_GS;
+ skip_stage_bits |= IRIS_STAGE_DIRTY_GS |
+ IRIS_STAGE_DIRTY_CONSTANTS_GS |
+ IRIS_STAGE_DIRTY_BINDINGS_GS;
}
/* we can skip flagging IRIS_DIRTY_DEPTH_BUFFER, if
skip_bits |= IRIS_DIRTY_BLEND_STATE | IRIS_DIRTY_PS_BLEND;
ice->state.dirty |= ~skip_bits;
+ ice->state.stage_dirty |= ~skip_stage_bits;
if (params->dst.enabled) {
iris_render_cache_add_bo(batch, params->dst.addr.buffer,
iris_resource_set_aux_state(ice, res, level, box->z,
box->depth, ISL_AUX_STATE_CLEAR);
- ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS;
+ ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
+ ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_BINDINGS;
return;
}
}
ice->state.dirty = ~0ull;
+ ice->state.stage_dirty = ~0ull;
ice->state.current_hash_scale = 0;
memset(ice->state.last_block, 0, sizeof(ice->state.last_block));
memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
*
* See iris_upload_render_state().
*/
-#define IRIS_DIRTY_COLOR_CALC_STATE (1ull << 0)
-#define IRIS_DIRTY_POLYGON_STIPPLE (1ull << 1)
-#define IRIS_DIRTY_SCISSOR_RECT (1ull << 2)
-#define IRIS_DIRTY_WM_DEPTH_STENCIL (1ull << 3)
-#define IRIS_DIRTY_CC_VIEWPORT (1ull << 4)
-#define IRIS_DIRTY_SF_CL_VIEWPORT (1ull << 5)
-#define IRIS_DIRTY_PS_BLEND (1ull << 6)
-#define IRIS_DIRTY_BLEND_STATE (1ull << 7)
-#define IRIS_DIRTY_RASTER (1ull << 8)
-#define IRIS_DIRTY_CLIP (1ull << 9)
-#define IRIS_DIRTY_SBE (1ull << 10)
-#define IRIS_DIRTY_LINE_STIPPLE (1ull << 11)
-#define IRIS_DIRTY_VERTEX_ELEMENTS (1ull << 12)
-#define IRIS_DIRTY_MULTISAMPLE (1ull << 13)
-#define IRIS_DIRTY_VERTEX_BUFFERS (1ull << 14)
-#define IRIS_DIRTY_SAMPLE_MASK (1ull << 15)
-#define IRIS_DIRTY_SAMPLER_STATES_VS (1ull << 16)
-#define IRIS_DIRTY_SAMPLER_STATES_TCS (1ull << 17)
-#define IRIS_DIRTY_SAMPLER_STATES_TES (1ull << 18)
-#define IRIS_DIRTY_SAMPLER_STATES_GS (1ull << 19)
-#define IRIS_DIRTY_SAMPLER_STATES_PS (1ull << 20)
-#define IRIS_DIRTY_SAMPLER_STATES_CS (1ull << 21)
-#define IRIS_DIRTY_UNCOMPILED_VS (1ull << 22)
-#define IRIS_DIRTY_UNCOMPILED_TCS (1ull << 23)
-#define IRIS_DIRTY_UNCOMPILED_TES (1ull << 24)
-#define IRIS_DIRTY_UNCOMPILED_GS (1ull << 25)
-#define IRIS_DIRTY_UNCOMPILED_FS (1ull << 26)
-#define IRIS_DIRTY_UNCOMPILED_CS (1ull << 27)
-#define IRIS_DIRTY_VS (1ull << 28)
-#define IRIS_DIRTY_TCS (1ull << 29)
-#define IRIS_DIRTY_TES (1ull << 30)
-#define IRIS_DIRTY_GS (1ull << 31)
-#define IRIS_DIRTY_FS (1ull << 32)
-#define IRIS_DIRTY_CS (1ull << 33)
-#define IRIS_DIRTY_URB (1ull << 34)
-#define IRIS_SHIFT_FOR_DIRTY_CONSTANTS 35
-#define IRIS_DIRTY_CONSTANTS_VS (1ull << 35)
-#define IRIS_DIRTY_CONSTANTS_TCS (1ull << 36)
-#define IRIS_DIRTY_CONSTANTS_TES (1ull << 37)
-#define IRIS_DIRTY_CONSTANTS_GS (1ull << 38)
-#define IRIS_DIRTY_CONSTANTS_FS (1ull << 39)
-#define IRIS_DIRTY_CONSTANTS_CS (1ull << 40)
-#define IRIS_DIRTY_DEPTH_BUFFER (1ull << 41)
-#define IRIS_DIRTY_WM (1ull << 42)
-#define IRIS_DIRTY_BINDINGS_VS (1ull << 43)
-#define IRIS_DIRTY_BINDINGS_TCS (1ull << 44)
-#define IRIS_DIRTY_BINDINGS_TES (1ull << 45)
-#define IRIS_DIRTY_BINDINGS_GS (1ull << 46)
-#define IRIS_DIRTY_BINDINGS_FS (1ull << 47)
-#define IRIS_DIRTY_BINDINGS_CS (1ull << 48)
-#define IRIS_DIRTY_SO_BUFFERS (1ull << 49)
-#define IRIS_DIRTY_SO_DECL_LIST (1ull << 50)
-#define IRIS_DIRTY_STREAMOUT (1ull << 51)
-#define IRIS_DIRTY_VF_SGVS (1ull << 52)
-#define IRIS_DIRTY_VF (1ull << 53)
-#define IRIS_DIRTY_VF_TOPOLOGY (1ull << 54)
-#define IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES (1ull << 55)
-#define IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES (1ull << 56)
-#define IRIS_DIRTY_VF_STATISTICS (1ull << 57)
-#define IRIS_DIRTY_PMA_FIX (1ull << 58)
-#define IRIS_DIRTY_DEPTH_BOUNDS (1ull << 59)
-#define IRIS_DIRTY_RENDER_BUFFER (1ull << 60)
-#define IRIS_DIRTY_STENCIL_REF (1ull << 61)
-
-#define IRIS_ALL_DIRTY_FOR_COMPUTE (IRIS_DIRTY_CS | \
- IRIS_DIRTY_SAMPLER_STATES_CS | \
- IRIS_DIRTY_UNCOMPILED_CS | \
- IRIS_DIRTY_CONSTANTS_CS | \
- IRIS_DIRTY_BINDINGS_CS | \
- IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES)
-
-#define IRIS_ALL_DIRTY_FOR_RENDER ~IRIS_ALL_DIRTY_FOR_COMPUTE
-
-#define IRIS_ALL_DIRTY_BINDINGS (IRIS_DIRTY_BINDINGS_VS | \
- IRIS_DIRTY_BINDINGS_TCS | \
- IRIS_DIRTY_BINDINGS_TES | \
- IRIS_DIRTY_BINDINGS_GS | \
- IRIS_DIRTY_BINDINGS_FS | \
- IRIS_DIRTY_BINDINGS_CS | \
- IRIS_DIRTY_RENDER_BUFFER)
+#define IRIS_DIRTY_COLOR_CALC_STATE (1ull << 0)
+#define IRIS_DIRTY_POLYGON_STIPPLE (1ull << 1)
+#define IRIS_DIRTY_SCISSOR_RECT (1ull << 2)
+#define IRIS_DIRTY_WM_DEPTH_STENCIL (1ull << 3)
+#define IRIS_DIRTY_CC_VIEWPORT (1ull << 4)
+#define IRIS_DIRTY_SF_CL_VIEWPORT (1ull << 5)
+#define IRIS_DIRTY_PS_BLEND (1ull << 6)
+#define IRIS_DIRTY_BLEND_STATE (1ull << 7)
+#define IRIS_DIRTY_RASTER (1ull << 8)
+#define IRIS_DIRTY_CLIP (1ull << 9)
+#define IRIS_DIRTY_SBE (1ull << 10)
+#define IRIS_DIRTY_LINE_STIPPLE (1ull << 11)
+#define IRIS_DIRTY_VERTEX_ELEMENTS (1ull << 12)
+#define IRIS_DIRTY_MULTISAMPLE (1ull << 13)
+#define IRIS_DIRTY_VERTEX_BUFFERS (1ull << 14)
+#define IRIS_DIRTY_SAMPLE_MASK (1ull << 15)
+#define IRIS_DIRTY_URB (1ull << 16)
+#define IRIS_DIRTY_DEPTH_BUFFER (1ull << 17)
+#define IRIS_DIRTY_WM (1ull << 18)
+#define IRIS_DIRTY_SO_BUFFERS (1ull << 19)
+#define IRIS_DIRTY_SO_DECL_LIST (1ull << 20)
+#define IRIS_DIRTY_STREAMOUT (1ull << 21)
+#define IRIS_DIRTY_VF_SGVS (1ull << 22)
+#define IRIS_DIRTY_VF (1ull << 23)
+#define IRIS_DIRTY_VF_TOPOLOGY (1ull << 24)
+#define IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES (1ull << 25)
+#define IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES (1ull << 26)
+#define IRIS_DIRTY_VF_STATISTICS (1ull << 27)
+#define IRIS_DIRTY_PMA_FIX (1ull << 28)
+#define IRIS_DIRTY_DEPTH_BOUNDS (1ull << 29)
+#define IRIS_DIRTY_RENDER_BUFFER (1ull << 30)
+#define IRIS_DIRTY_STENCIL_REF (1ull << 31)
+
+#define IRIS_ALL_DIRTY_FOR_COMPUTE (IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES)
+
+#define IRIS_ALL_DIRTY_FOR_RENDER (~IRIS_ALL_DIRTY_FOR_COMPUTE)
+
+/**
+ * Per-stage dirty flags. When state changes, we flag some combination of
+ * these to indicate that particular GPU commands need to be re-emitted.
+ * Unlike the IRIS_DIRTY_* flags these are shader stage-specific and can be
+ * indexed by shifting the mask by the shader stage index.
+ *
+ * See iris_upload_render_state().
+ */
+#define IRIS_STAGE_DIRTY_SAMPLER_STATES_VS (1ull << 0)
+#define IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS (1ull << 1)
+#define IRIS_STAGE_DIRTY_SAMPLER_STATES_TES (1ull << 2)
+#define IRIS_STAGE_DIRTY_SAMPLER_STATES_GS (1ull << 3)
+#define IRIS_STAGE_DIRTY_SAMPLER_STATES_PS (1ull << 4)
+#define IRIS_STAGE_DIRTY_SAMPLER_STATES_CS (1ull << 5)
+#define IRIS_STAGE_DIRTY_UNCOMPILED_VS (1ull << 6)
+#define IRIS_STAGE_DIRTY_UNCOMPILED_TCS (1ull << 7)
+#define IRIS_STAGE_DIRTY_UNCOMPILED_TES (1ull << 8)
+#define IRIS_STAGE_DIRTY_UNCOMPILED_GS (1ull << 9)
+#define IRIS_STAGE_DIRTY_UNCOMPILED_FS (1ull << 10)
+#define IRIS_STAGE_DIRTY_UNCOMPILED_CS (1ull << 11)
+#define IRIS_STAGE_DIRTY_VS (1ull << 12)
+#define IRIS_STAGE_DIRTY_TCS (1ull << 13)
+#define IRIS_STAGE_DIRTY_TES (1ull << 14)
+#define IRIS_STAGE_DIRTY_GS (1ull << 15)
+#define IRIS_STAGE_DIRTY_FS (1ull << 16)
+#define IRIS_STAGE_DIRTY_CS (1ull << 17)
+#define IRIS_SHIFT_FOR_STAGE_DIRTY_CONSTANTS 18
+#define IRIS_STAGE_DIRTY_CONSTANTS_VS (1ull << 18)
+#define IRIS_STAGE_DIRTY_CONSTANTS_TCS (1ull << 19)
+#define IRIS_STAGE_DIRTY_CONSTANTS_TES (1ull << 20)
+#define IRIS_STAGE_DIRTY_CONSTANTS_GS (1ull << 21)
+#define IRIS_STAGE_DIRTY_CONSTANTS_FS (1ull << 22)
+#define IRIS_STAGE_DIRTY_CONSTANTS_CS (1ull << 23)
+#define IRIS_STAGE_DIRTY_BINDINGS_VS (1ull << 24)
+#define IRIS_STAGE_DIRTY_BINDINGS_TCS (1ull << 25)
+#define IRIS_STAGE_DIRTY_BINDINGS_TES (1ull << 26)
+#define IRIS_STAGE_DIRTY_BINDINGS_GS (1ull << 27)
+#define IRIS_STAGE_DIRTY_BINDINGS_FS (1ull << 28)
+#define IRIS_STAGE_DIRTY_BINDINGS_CS (1ull << 29)
+
+#define IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE (IRIS_STAGE_DIRTY_CS | \
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_CS | \
+ IRIS_STAGE_DIRTY_UNCOMPILED_CS | \
+ IRIS_STAGE_DIRTY_CONSTANTS_CS | \
+ IRIS_STAGE_DIRTY_BINDINGS_CS)
+
+#define IRIS_ALL_STAGE_DIRTY_FOR_RENDER (~IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE)
+
+#define IRIS_ALL_STAGE_DIRTY_BINDINGS (IRIS_STAGE_DIRTY_BINDINGS_VS | \
+ IRIS_STAGE_DIRTY_BINDINGS_TCS | \
+ IRIS_STAGE_DIRTY_BINDINGS_TES | \
+ IRIS_STAGE_DIRTY_BINDINGS_GS | \
+ IRIS_STAGE_DIRTY_BINDINGS_FS | \
+ IRIS_STAGE_DIRTY_BINDINGS_CS)
/**
* Non-orthogonal state (NOS) dependency flags.
struct {
uint64_t dirty;
- uint64_t dirty_for_nos[IRIS_NOS_COUNT];
+ uint64_t stage_dirty;
+ uint64_t stage_dirty_for_nos[IRIS_NOS_COUNT];
unsigned num_viewports;
unsigned sample_mask;
/* 8_PATCH TCS needs this for key->input_vertices */
if (compiler->use_tcs_8_patch)
- ice->state.dirty |= IRIS_DIRTY_UNCOMPILED_TCS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_TCS;
/* Flag constants dirty for gl_PatchVerticesIn if needed. */
const struct shader_info *tcs_info =
iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
if (tcs_info &&
tcs_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TCS;
ice->state.shaders[MESA_SHADER_TESS_CTRL].sysvals_need_upload = true;
}
}
batch->screen->vtbl.load_register_reg64(batch, CS_GPR(15), MI_PREDICATE_RESULT);
}
- uint64_t orig_dirty = ice->state.dirty;
+ const uint64_t orig_dirty = ice->state.dirty;
+ const uint64_t orig_stage_dirty = ice->state.stage_dirty;
for (int i = 0; i < info.indirect->draw_count; i++) {
info.drawid = i;
batch->screen->vtbl.upload_render_state(ice, batch, &info);
ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
+ ice->state.stage_dirty &= ~IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
info.indirect->offset += info.indirect->stride;
}
/* Put this back for post-draw resolves, we'll clear it again after. */
ice->state.dirty = orig_dirty;
+ ice->state.stage_dirty = orig_stage_dirty;
}
static void
/* We can't safely re-emit 3DSTATE_SO_BUFFERS because it may zero the
* write offsets, changing the behavior.
*/
- if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
+ if (unlikely(INTEL_DEBUG & DEBUG_REEMIT)) {
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER & ~IRIS_DIRTY_SO_BUFFERS;
+ ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
+ }
iris_update_draw_info(ice, info);
iris_postdraw_update_resolve_tracking(ice, batch);
ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
+ ice->state.stage_dirty &= ~IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
}
static void
.stride_B = 1,
.mocs = iris_mocs(grid_bo, isl_dev));
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_CS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_CS;
}
void
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
return;
- if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
+ if (unlikely(INTEL_DEBUG & DEBUG_REEMIT)) {
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
+ ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
+ }
/* We can't do resolves on the compute engine, so awkwardly, we have to
* do them on the render batch...
if (memcmp(ice->state.last_block, grid->block, sizeof(grid->block)) != 0) {
memcpy(ice->state.last_block, grid->block, sizeof(grid->block));
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_CS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_CS;
ice->state.shaders[MESA_SHADER_COMPUTE].sysvals_need_upload = true;
}
iris_handle_always_flush_cache(batch);
ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_COMPUTE;
+ ice->state.stage_dirty &= ~IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
/* Note: since compute shaders can't access the framebuffer, there's
* no need to call iris_postdraw_update_resolve_tracking.
if (old != shader) {
ice->shaders.prog[IRIS_CACHE_VS] = shader;
- ice->state.dirty |= IRIS_DIRTY_VS |
- IRIS_DIRTY_BINDINGS_VS |
- IRIS_DIRTY_CONSTANTS_VS |
- IRIS_DIRTY_VF_SGVS;
+ ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_VS |
+ IRIS_STAGE_DIRTY_BINDINGS_VS |
+ IRIS_STAGE_DIRTY_CONSTANTS_VS;
shs->sysvals_need_upload = true;
const struct brw_vs_prog_data *vs_prog_data =
if (old != shader) {
ice->shaders.prog[IRIS_CACHE_TCS] = shader;
- ice->state.dirty |= IRIS_DIRTY_TCS |
- IRIS_DIRTY_BINDINGS_TCS |
- IRIS_DIRTY_CONSTANTS_TCS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TCS |
+ IRIS_STAGE_DIRTY_BINDINGS_TCS |
+ IRIS_STAGE_DIRTY_CONSTANTS_TCS;
shs->sysvals_need_upload = true;
}
}
if (old != shader) {
ice->shaders.prog[IRIS_CACHE_TES] = shader;
- ice->state.dirty |= IRIS_DIRTY_TES |
- IRIS_DIRTY_BINDINGS_TES |
- IRIS_DIRTY_CONSTANTS_TES;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TES |
+ IRIS_STAGE_DIRTY_BINDINGS_TES |
+ IRIS_STAGE_DIRTY_CONSTANTS_TES;
shs->sysvals_need_upload = true;
}
/* TODO: Could compare and avoid flagging this. */
const struct shader_info *tes_info = &ish->nir->info;
if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TES;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TES;
ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
}
}
if (old != shader) {
ice->shaders.prog[IRIS_CACHE_GS] = shader;
- ice->state.dirty |= IRIS_DIRTY_GS |
- IRIS_DIRTY_BINDINGS_GS |
- IRIS_DIRTY_CONSTANTS_GS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
+ IRIS_STAGE_DIRTY_BINDINGS_GS |
+ IRIS_STAGE_DIRTY_CONSTANTS_GS;
shs->sysvals_need_upload = true;
}
}
// XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
// toggles. might be able to avoid flagging SBE too.
ice->shaders.prog[IRIS_CACHE_FS] = shader;
- ice->state.dirty |= IRIS_DIRTY_FS |
- IRIS_DIRTY_BINDINGS_FS |
- IRIS_DIRTY_CONSTANTS_FS |
- IRIS_DIRTY_WM |
+ ice->state.dirty |= IRIS_DIRTY_WM |
IRIS_DIRTY_CLIP |
IRIS_DIRTY_SBE;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS |
+ IRIS_STAGE_DIRTY_BINDINGS_FS |
+ IRIS_STAGE_DIRTY_CONSTANTS_FS;
shs->sysvals_need_upload = true;
}
}
ice->state.dirty |= IRIS_DIRTY_CLIP |
IRIS_DIRTY_SF_CL_VIEWPORT |
IRIS_DIRTY_CC_VIEWPORT |
- IRIS_DIRTY_SCISSOR_RECT |
- IRIS_DIRTY_UNCOMPILED_FS |
- ice->state.dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
+ IRIS_DIRTY_SCISSOR_RECT;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_FS |
+ ice->state.stage_dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
}
if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
}
if (any_new_descriptors)
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
}
/**
iris_update_compiled_shaders(struct iris_context *ice)
{
const uint64_t dirty = ice->state.dirty;
+ const uint64_t stage_dirty = ice->state.stage_dirty;
struct brw_vue_prog_data *old_prog_datas[4];
if (!(dirty & IRIS_DIRTY_URB)) {
old_prog_datas[i] = get_vue_prog_data(ice, i);
}
- if (dirty & (IRIS_DIRTY_UNCOMPILED_TCS | IRIS_DIRTY_UNCOMPILED_TES)) {
+ if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
struct iris_uncompiled_shader *tes =
ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
if (tes) {
} else {
ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
ice->shaders.prog[IRIS_CACHE_TES] = NULL;
- ice->state.dirty |=
- IRIS_DIRTY_TCS | IRIS_DIRTY_TES |
- IRIS_DIRTY_BINDINGS_TCS | IRIS_DIRTY_BINDINGS_TES |
- IRIS_DIRTY_CONSTANTS_TCS | IRIS_DIRTY_CONSTANTS_TES;
+ ice->state.stage_dirty |=
+ IRIS_STAGE_DIRTY_TCS | IRIS_STAGE_DIRTY_TES |
+ IRIS_STAGE_DIRTY_BINDINGS_TCS | IRIS_STAGE_DIRTY_BINDINGS_TES |
+ IRIS_STAGE_DIRTY_CONSTANTS_TCS | IRIS_STAGE_DIRTY_CONSTANTS_TES;
}
}
- if (dirty & IRIS_DIRTY_UNCOMPILED_VS)
+ if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_VS)
iris_update_compiled_vs(ice);
- if (dirty & IRIS_DIRTY_UNCOMPILED_GS)
+ if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_GS)
iris_update_compiled_gs(ice);
- if (dirty & (IRIS_DIRTY_UNCOMPILED_GS | IRIS_DIRTY_UNCOMPILED_TES)) {
+ if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_GS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
const struct iris_compiled_shader *gs =
ice->shaders.prog[MESA_SHADER_GEOMETRY];
const struct iris_compiled_shader *tes =
}
}
- if (dirty & IRIS_DIRTY_UNCOMPILED_FS)
+ if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_FS)
iris_update_compiled_fs(ice);
/* Changing shader interfaces may require a URB configuration. */
}
for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
- if (ice->state.dirty & (IRIS_DIRTY_CONSTANTS_VS << i))
+ if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << i))
iris_update_pull_constant_descriptors(ice, i);
}
}
if (old != shader) {
ice->shaders.prog[IRIS_CACHE_CS] = shader;
- ice->state.dirty |= IRIS_DIRTY_CS |
- IRIS_DIRTY_BINDINGS_CS |
- IRIS_DIRTY_CONSTANTS_CS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CS |
+ IRIS_STAGE_DIRTY_BINDINGS_CS |
+ IRIS_STAGE_DIRTY_CONSTANTS_CS;
shs->sysvals_need_upload = true;
}
}
void
iris_update_compiled_compute_shader(struct iris_context *ice)
{
- if (ice->state.dirty & IRIS_DIRTY_UNCOMPILED_CS)
+ if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_CS)
iris_update_compiled_cs(ice);
- if (ice->state.dirty & IRIS_DIRTY_CONSTANTS_CS)
+ if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS)
iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
}
if (ice->shaders.uncompiled[stage] == ish) {
ice->shaders.uncompiled[stage] = NULL;
- ice->state.dirty |= IRIS_DIRTY_UNCOMPILED_VS << stage;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
}
if (ish->const_data) {
struct iris_uncompiled_shader *ish,
gl_shader_stage stage)
{
- uint64_t dirty_bit = IRIS_DIRTY_UNCOMPILED_VS << stage;
+ uint64_t stage_dirty_bit = IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
const uint64_t nos = ish ? ish->nos : 0;
const struct shader_info *old_info = iris_get_shader_info(ice, stage);
if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
(new_info ? util_last_bit(new_info->textures_used) : 0)) {
- ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
}
ice->shaders.uncompiled[stage] = ish;
- ice->state.dirty |= dirty_bit;
+ ice->state.stage_dirty |= stage_dirty_bit;
/* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
* (or that they no longer need to do so).
*/
for (int i = 0; i < IRIS_NOS_COUNT; i++) {
if (nos & (1 << i))
- ice->state.dirty_for_nos[i] |= dirty_bit;
+ ice->state.stage_dirty_for_nos[i] |= stage_dirty_bit;
else
- ice->state.dirty_for_nos[i] &= ~dirty_bit;
+ ice->state.stage_dirty_for_nos[i] &= ~stage_dirty_bit;
}
}
// have to be done dynamically at draw time, which is a pain
ice->state.statistics_counters_enabled = enable;
ice->state.dirty |= IRIS_DIRTY_CLIP |
- IRIS_DIRTY_GS |
IRIS_DIRTY_RASTER |
IRIS_DIRTY_STREAMOUT |
- IRIS_DIRTY_TCS |
- IRIS_DIRTY_TES |
- IRIS_DIRTY_VS |
IRIS_DIRTY_WM;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
+ IRIS_STAGE_DIRTY_TCS |
+ IRIS_STAGE_DIRTY_TES |
+ IRIS_STAGE_DIRTY_VS;
}
static void
struct iris_shader_state *shs = &ice->state.shaders[stage];
const struct shader_info *info = iris_get_shader_info(ice, stage);
- uint64_t dirty = (IRIS_DIRTY_BINDINGS_VS << stage) |
- (consider_framebuffer ? IRIS_DIRTY_BINDINGS_FS : 0);
+ uint64_t stage_dirty = (IRIS_STAGE_DIRTY_BINDINGS_VS << stage) |
+ (consider_framebuffer ? IRIS_STAGE_DIRTY_BINDINGS_FS : 0);
- if (ice->state.dirty & dirty) {
+ if (ice->state.stage_dirty & stage_dirty) {
resolve_sampler_views(ice, batch, shs, info, draw_aux_buffer_disabled,
consider_framebuffer);
resolve_image_views(ice, batch, shs, info, draw_aux_buffer_disabled,
}
}
- if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_FS | IRIS_DIRTY_BLEND_STATE)) {
+ if ((ice->state.dirty & IRIS_DIRTY_BLEND_STATE) ||
+ (ice->state.stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_FS)) {
for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
struct iris_surface *surf = (void *) cso_fb->cbufs[i];
if (!surf)
if (ice->state.draw_aux_usage[i] != aux_usage) {
ice->state.draw_aux_usage[i] = aux_usage;
/* XXX: Need to track which bindings to make dirty */
- ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS;
+ ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
+ ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_BINDINGS;
}
iris_resource_prepare_render(ice, batch, res, surf->view.base_level,
}
bool may_have_resolved_color =
- ice->state.dirty & (IRIS_DIRTY_BINDINGS_FS | IRIS_DIRTY_BLEND_STATE);
+ (ice->state.dirty & IRIS_DIRTY_BLEND_STATE) ||
+ (ice->state.stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_FS);
for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
struct iris_surface *surf = (void *) cso_fb->cbufs[i];
if (res->aux.state[level][start_layer + a] != aux_state) {
res->aux.state[level][start_layer + a] = aux_state;
/* XXX: Need to track which bindings to make dirty */
- ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS;
+ ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
+ ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_BINDINGS;
}
}
}
iris_dirty_for_history(struct iris_context *ice,
struct iris_resource *res)
{
- uint64_t dirty = 0ull;
+ uint64_t stage_dirty = 0ull;
if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
- dirty |= ((uint64_t)res->bind_stages) << IRIS_SHIFT_FOR_DIRTY_CONSTANTS;
+ stage_dirty |= ((uint64_t)res->bind_stages)
+ << IRIS_SHIFT_FOR_STAGE_DIRTY_CONSTANTS;
}
- ice->state.dirty |= dirty;
+ ice->state.stage_dirty |= stage_dirty;
}
/**
ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
- ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
+ ice->state.stage_dirty |= ice->state.stage_dirty_for_nos[IRIS_NOS_BLEND];
if (GEN_GEN == 8)
ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
ice->state.cso_zsa = new_cso;
ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
- ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
+ ice->state.stage_dirty |=
+ ice->state.stage_dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
if (GEN_GEN == 8)
ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
ice->state.dirty |= IRIS_DIRTY_SBE;
if (cso_changed(conservative_rasterization))
- ice->state.dirty |= IRIS_DIRTY_FS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
}
ice->state.cso_rast = new_cso;
ice->state.dirty |= IRIS_DIRTY_RASTER;
ice->state.dirty |= IRIS_DIRTY_CLIP;
- ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_RASTERIZER];
+ ice->state.stage_dirty |=
+ ice->state.stage_dirty_for_nos[IRIS_NOS_RASTERIZER];
}
/**
}
if (dirty)
- ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
}
/**
}
}
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
ice->state.dirty |=
stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
: IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
/* Broadwell also needs brw_image_params re-uploaded */
if (GEN_GEN < 9) {
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
shs->sysvals_need_upload = true;
}
}
}
}
- ice->state.dirty |= (IRIS_DIRTY_BINDINGS_VS << stage);
+ ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_BINDINGS_VS << stage);
ice->state.dirty |=
stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
: IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TCS;
shs->sysvals_need_upload = true;
}
memcpy(&ice->state.clip_planes, state, sizeof(*state));
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_CONSTANTS_GS |
- IRIS_DIRTY_CONSTANTS_TES;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS |
+ IRIS_STAGE_DIRTY_CONSTANTS_GS |
+ IRIS_STAGE_DIRTY_CONSTANTS_TES;
shs->sysvals_need_upload = true;
gshs->sysvals_need_upload = true;
tshs->sysvals_need_upload = true;
/* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
if (GEN_GEN >= 9 && (cso->samples == 16 || samples == 16))
- ice->state.dirty |= IRIS_DIRTY_FS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
}
if (cso->nr_cbufs != state->nr_cbufs) {
iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
/* Render target change */
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_FS;
ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
- ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
+ ice->state.stage_dirty |=
+ ice->state.stage_dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
if (GEN_GEN == 8)
ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
pipe_resource_reference(&cbuf->buffer, NULL);
}
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
}
static void
}
}
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
}
static void
struct iris_genx_state *genx = ice->state.genx;
const uint64_t clean = ~ice->state.dirty;
+ const uint64_t stage_clean = ~ice->state.stage_dirty;
if (clean & IRIS_DIRTY_CC_VIEWPORT) {
iris_use_optional_res(batch, ice->state.last_res.cc_vp, false);
}
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (!(clean & (IRIS_DIRTY_CONSTANTS_VS << stage)))
+ if (!(stage_clean & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)))
continue;
struct iris_shader_state *shs = &ice->state.shaders[stage];
}
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (clean & (IRIS_DIRTY_BINDINGS_VS << stage)) {
+ if (stage_clean & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
/* Re-pin any buffers referred to by the binding table. */
iris_populate_binding_table(ice, batch, stage, true);
}
}
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (clean & (IRIS_DIRTY_VS << stage)) {
+ if (stage_clean & (IRIS_STAGE_DIRTY_VS << stage)) {
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
if (shader) {
struct iris_batch *batch,
const struct pipe_grid_info *grid)
{
- const uint64_t clean = ~ice->state.dirty;
+ const uint64_t stage_clean = ~ice->state.stage_dirty;
const int stage = MESA_SHADER_COMPUTE;
struct iris_shader_state *shs = &ice->state.shaders[stage];
- if (clean & IRIS_DIRTY_BINDINGS_CS) {
+ if (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) {
/* Re-pin any buffers referred to by the binding table. */
iris_populate_binding_table(ice, batch, stage, true);
}
if (sampler_res)
iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false);
- if ((clean & IRIS_DIRTY_SAMPLER_STATES_CS) &&
- (clean & IRIS_DIRTY_BINDINGS_CS) &&
- (clean & IRIS_DIRTY_CONSTANTS_CS) &&
- (clean & IRIS_DIRTY_CS)) {
+ if ((stage_clean & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS) &&
+ (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) &&
+ (stage_clean & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
+ (stage_clean & IRIS_STAGE_DIRTY_CS)) {
iris_use_optional_res(batch, ice->state.last_res.cs_desc, false);
}
- if (clean & IRIS_DIRTY_CS) {
+ if (stage_clean & IRIS_STAGE_DIRTY_CS) {
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
if (shader) {
const struct pipe_draw_info *draw)
{
const uint64_t dirty = ice->state.dirty;
+ const uint64_t stage_dirty = ice->state.stage_dirty;
- if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER))
+ if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER) &&
+ !(stage_dirty & IRIS_ALL_STAGE_DIRTY_FOR_RENDER))
return;
struct iris_genx_state *genx = ice->state.genx;
* any stage has a dirty binding table.
*/
const bool emit_const_wa = GEN_GEN >= 11 &&
- (dirty & IRIS_ALL_DIRTY_BINDINGS) != 0;
+ ((dirty & IRIS_DIRTY_RENDER_BUFFER) ||
+ (stage_dirty & IRIS_ALL_STAGE_DIRTY_BINDINGS));
#if GEN_GEN >= 12
uint32_t nobuffer_stages = 0;
#endif
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)) &&
+ if (!(stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)) &&
!emit_const_wa)
continue;
* in order to commit constants. TODO: Investigate "Disable Gather
* at Set Shader" to go back to legacy mode...
*/
- if (dirty & ((IRIS_DIRTY_BINDINGS_VS |
- (GEN_GEN == 9 ? IRIS_DIRTY_CONSTANTS_VS : 0)) << stage)) {
+ if (stage_dirty & ((IRIS_STAGE_DIRTY_BINDINGS_VS |
+ (GEN_GEN == 9 ? IRIS_STAGE_DIRTY_CONSTANTS_VS : 0))
+ << stage)) {
iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
ptr._3DCommandSubOpcode = 38 + stage;
ptr.PointertoVSBindingTable = binder->bt_offset[stage];
}
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
+ if (stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
iris_populate_binding_table(ice, batch, stage, false);
}
}
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (!(dirty & (IRIS_DIRTY_SAMPLER_STATES_VS << stage)) ||
+ if (!(stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage)) ||
!ice->shaders.prog[stage])
continue;
}
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (!(dirty & (IRIS_DIRTY_VS << stage)))
+ if (!(stage_dirty & (IRIS_STAGE_DIRTY_VS << stage)))
continue;
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
struct iris_batch *batch,
const struct pipe_grid_info *grid)
{
- const uint64_t dirty = ice->state.dirty;
+ const uint64_t stage_dirty = ice->state.stage_dirty;
struct iris_screen *screen = batch->screen;
const struct gen_device_info *devinfo = &screen->devinfo;
struct iris_binder *binder = &ice->state.binder;
*/
iris_use_pinned_bo(batch, ice->state.binder.bo, false);
- if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->sysvals_need_upload)
+ if ((stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
+ shs->sysvals_need_upload)
upload_sysvals(ice, MESA_SHADER_COMPUTE);
- if (dirty & IRIS_DIRTY_BINDINGS_CS)
+ if (stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_CS)
iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
- if (dirty & IRIS_DIRTY_SAMPLER_STATES_CS)
+ if (stage_dirty & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS)
iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE);
iris_use_optional_res(batch, shs->sampler_table.res, false);
genX(invalidate_aux_map_state)(batch);
#endif
- if (dirty & IRIS_DIRTY_CS) {
+ if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
/* The MEDIA_VFE_STATE documentation for Gen8+ says:
*
* "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
}
/* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
- if (dirty & IRIS_DIRTY_CS) {
+ if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
uint32_t curbe_data_offset = 0;
assert(cs_prog_data->push.cross_thread.dwords == 0 &&
cs_prog_data->push.per_thread.dwords == 1 &&
}
}
- if (dirty & (IRIS_DIRTY_SAMPLER_STATES_CS |
- IRIS_DIRTY_BINDINGS_CS |
- IRIS_DIRTY_CONSTANTS_CS |
- IRIS_DIRTY_CS)) {
+ if (stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_CS |
+ IRIS_STAGE_DIRTY_BINDINGS_CS |
+ IRIS_STAGE_DIRTY_CONSTANTS_CS |
+ IRIS_STAGE_DIRTY_CS)) {
uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
if (res->bo == iris_resource_bo(cbuf->buffer)) {
pipe_resource_reference(&surf_state->res, NULL);
- ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << s;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << s;
}
}
}
if (update_surface_state_addrs(ice->state.surface_uploader,
&isv->surface_state, bo)) {
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
}
}
}
if (update_surface_state_addrs(ice->state.surface_uploader,
&iv->surface_state, bo)) {
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
+ ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
}
}
}
{
struct iris_context *ice = (struct iris_context *) ctx;
- if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER], enable))
+ if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER], enable)) {
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
+ ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
+ }
- if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE], enable))
+ if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE], enable)) {
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
+ ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
+ }
}
void
screen->vtbl.lost_genx_state = iris_lost_genx_state;
ice->state.dirty = ~0ull;
+ ice->state.stage_dirty = ~0ull;
ice->state.statistics_counters_enabled = true;