u_upload_alloc(uploader, 0, size, alignment, out_offset, &res, &ptr);
struct iris_bo *bo = iris_resource_bo(res);
- iris_use_pinned_bo(batch, bo, false);
+ iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
iris_record_state_size(batch->state_sizes,
bo->gtt_offset + *out_offset, size);
struct iris_batch *batch = blorp_batch->driver_batch;
struct iris_bo *bo = addr.buffer;
- iris_use_pinned_bo(batch, bo, addr.reloc_flags & RELOC_WRITE);
+ iris_use_pinned_bo(batch, bo, addr.reloc_flags & RELOC_WRITE,
+ IRIS_DOMAIN_NONE);
/* Assume this is a general address, not relative to a base. */
return bo->gtt_offset + addr.offset;
bt_map[i] = surface_offsets[i] - (uint32_t) binder->bo->gtt_offset;
}
- iris_use_pinned_bo(batch, binder->bo, false);
+ iris_use_pinned_bo(batch, binder->bo, false, IRIS_DOMAIN_NONE);
- ice->vtbl.update_surface_base_address(batch, binder);
+ batch->screen->vtbl.update_surface_base_address(batch, binder);
}
static void *
}
static struct blorp_address
-blorp_get_workaround_page(struct blorp_batch *blorp_batch)
+blorp_get_workaround_address(struct blorp_batch *blorp_batch)
{
struct iris_batch *batch = blorp_batch->driver_batch;
- return (struct blorp_address) { .buffer = batch->screen->workaround_bo };
+ return (struct blorp_address) {
+ .buffer = batch->screen->workaround_address.bo,
+ .offset = batch->screen->workaround_address.offset,
+ };
}
static void
PIPE_CONTROL_STALL_AT_SCOREBOARD);
#endif
- /* Flush the sampler and render caches. We definitely need to flush the
- * sampler cache so that we get updated contents from the render cache for
- * the glBlitFramebuffer() source. Also, we are sometimes warned in the
- * docs to flush the cache between reinterpretations of the same surface
- * data with different formats, which blorp does for stencil and depth
- * data.
+ /* Flush the render cache in cases where the same surface is reinterpreted
+ * with a differernt format, which blorp does for stencil and depth data
+ * among other things. Invalidation of sampler caches and flushing of any
+ * caches which had previously written the source surfaces should already
+ * have been handled by the caller.
*/
- if (params->src.enabled)
- iris_cache_flush_for_read(batch, params->src.addr.buffer);
if (params->dst.enabled) {
iris_cache_flush_for_render(batch, params->dst.addr.buffer,
params->dst.view.format,
params->dst.aux_usage);
}
- if (params->depth.enabled)
- iris_cache_flush_for_depth(batch, params->depth.addr.buffer);
- if (params->stencil.enabled)
- iris_cache_flush_for_depth(batch, params->stencil.addr.buffer);
iris_require_command_space(batch, 1400);
IRIS_DIRTY_LINE_STIPPLE |
IRIS_ALL_DIRTY_FOR_COMPUTE |
IRIS_DIRTY_SCISSOR_RECT |
- IRIS_DIRTY_UNCOMPILED_VS |
- IRIS_DIRTY_UNCOMPILED_TCS |
- IRIS_DIRTY_UNCOMPILED_TES |
- IRIS_DIRTY_UNCOMPILED_GS |
- IRIS_DIRTY_UNCOMPILED_FS |
IRIS_DIRTY_VF |
- IRIS_DIRTY_SF_CL_VIEWPORT |
- IRIS_DIRTY_SAMPLER_STATES_VS |
- IRIS_DIRTY_SAMPLER_STATES_TCS |
- IRIS_DIRTY_SAMPLER_STATES_TES |
- IRIS_DIRTY_SAMPLER_STATES_GS);
+ IRIS_DIRTY_SF_CL_VIEWPORT);
+ uint64_t skip_stage_bits = (IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE |
+ IRIS_STAGE_DIRTY_UNCOMPILED_VS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_TES |
+ IRIS_STAGE_DIRTY_UNCOMPILED_GS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_FS |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_VS |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_TES |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_GS);
if (!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL]) {
/* BLORP disabled tessellation, that's fine for the next draw */
- skip_bits |= IRIS_DIRTY_TCS |
- IRIS_DIRTY_TES |
- IRIS_DIRTY_CONSTANTS_TCS |
- IRIS_DIRTY_CONSTANTS_TES |
- IRIS_DIRTY_BINDINGS_TCS |
- IRIS_DIRTY_BINDINGS_TES;
+ skip_stage_bits |= IRIS_STAGE_DIRTY_TCS |
+ IRIS_STAGE_DIRTY_TES |
+ IRIS_STAGE_DIRTY_CONSTANTS_TCS |
+ IRIS_STAGE_DIRTY_CONSTANTS_TES |
+ IRIS_STAGE_DIRTY_BINDINGS_TCS |
+ IRIS_STAGE_DIRTY_BINDINGS_TES;
}
if (!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY]) {
/* BLORP disabled geometry shaders, that's fine for the next draw */
- skip_bits |= IRIS_DIRTY_GS |
- IRIS_DIRTY_CONSTANTS_GS |
- IRIS_DIRTY_BINDINGS_GS;
+ skip_stage_bits |= IRIS_STAGE_DIRTY_GS |
+ IRIS_STAGE_DIRTY_CONSTANTS_GS |
+ IRIS_STAGE_DIRTY_BINDINGS_GS;
}
/* we can skip flagging IRIS_DIRTY_DEPTH_BUFFER, if
skip_bits |= IRIS_DIRTY_BLEND_STATE | IRIS_DIRTY_PS_BLEND;
ice->state.dirty |= ~skip_bits;
+ ice->state.stage_dirty |= ~skip_stage_bits;
- if (params->dst.enabled) {
- iris_render_cache_add_bo(batch, params->dst.addr.buffer,
- params->dst.view.format,
- params->dst.aux_usage);
- }
+ if (params->src.enabled)
+ iris_bo_bump_seqno(params->src.addr.buffer, batch->next_seqno,
+ IRIS_DOMAIN_OTHER_READ);
+ if (params->dst.enabled)
+ iris_bo_bump_seqno(params->dst.addr.buffer, batch->next_seqno,
+ IRIS_DOMAIN_RENDER_WRITE);
if (params->depth.enabled)
- iris_depth_cache_add_bo(batch, params->depth.addr.buffer);
+ iris_bo_bump_seqno(params->depth.addr.buffer, batch->next_seqno,
+ IRIS_DOMAIN_DEPTH_WRITE);
if (params->stencil.enabled)
- iris_depth_cache_add_bo(batch, params->stencil.addr.buffer);
+ iris_bo_bump_seqno(params->stencil.addr.buffer, batch->next_seqno,
+ IRIS_DOMAIN_DEPTH_WRITE);
}
void