#define BLORP_USE_SOFTPIN
#include "blorp/blorp_genX_exec.h"
-#if GEN_GEN == 8
-#define MOCS_WB 0x78
-#else
-#define MOCS_WB (2 << 1)
-#endif
-
static uint32_t *
stream_state(struct iris_batch *batch,
struct u_upload_mgr *uploader,
u_upload_alloc(uploader, 0, size, alignment, out_offset, &res, &ptr);
struct iris_bo *bo = iris_resource_bo(res);
- iris_use_pinned_bo(batch, bo, false);
+ iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
iris_record_state_size(batch->state_sizes,
bo->gtt_offset + *out_offset, size);
struct iris_batch *batch = blorp_batch->driver_batch;
struct iris_bo *bo = addr.buffer;
- iris_use_pinned_bo(batch, bo, addr.reloc_flags & RELOC_WRITE);
+ iris_use_pinned_bo(batch, bo, addr.reloc_flags & RELOC_WRITE,
+ IRIS_DOMAIN_NONE);
/* Assume this is a general address, not relative to a base. */
return bo->gtt_offset + addr.offset;
bt_map[i] = surface_offsets[i] - (uint32_t) binder->bo->gtt_offset;
}
- iris_use_pinned_bo(batch, binder->bo, false);
+ iris_use_pinned_bo(batch, binder->bo, false, IRIS_DOMAIN_NONE);
- ice->vtbl.update_surface_base_address(batch, binder);
+ batch->screen->vtbl.update_surface_base_address(batch, binder);
}
static void *
*addr = (struct blorp_address) {
.buffer = bo,
.offset = offset,
- .mocs = MOCS_WB,
+ .mocs = iris_mocs(bo, &batch->screen->isl_dev),
};
return map;
static void
blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *blorp_batch,
const struct blorp_address *addrs,
+ UNUSED uint32_t *sizes,
unsigned num_vbs)
{
+#if GEN_GEN < 11
struct iris_context *ice = blorp_batch->blorp->driver_ctx;
struct iris_batch *batch = blorp_batch->driver_batch;
bool need_invalidate = false;
PIPE_CONTROL_VF_CACHE_INVALIDATE |
PIPE_CONTROL_CS_STALL);
}
+#endif
}
static struct blorp_address
-blorp_get_workaround_page(struct blorp_batch *blorp_batch)
+blorp_get_workaround_address(struct blorp_batch *blorp_batch)
{
struct iris_batch *batch = blorp_batch->driver_batch;
- return (struct blorp_address) { .buffer = batch->screen->workaround_bo };
+ return (struct blorp_address) {
+ .buffer = batch->screen->workaround_address.bo,
+ .offset = batch->screen->workaround_address.offset,
+ };
}
static void
*/
}
-static void
-blorp_emit_urb_config(struct blorp_batch *blorp_batch,
- unsigned vs_entry_size,
- UNUSED unsigned sf_entry_size)
+static const struct gen_l3_config *
+blorp_get_l3_config(struct blorp_batch *blorp_batch)
{
- struct iris_context *ice = blorp_batch->blorp->driver_ctx;
struct iris_batch *batch = blorp_batch->driver_batch;
-
- unsigned size[4] = { vs_entry_size, 1, 1, 1 };
-
- /* If last VS URB size is good enough for what the BLORP operation needed,
- * then we can skip reconfiguration
- */
- if (ice->shaders.last_vs_entry_size >= vs_entry_size)
- return;
-
- genX(emit_urb_setup)(ice, batch, size, false, false);
- ice->state.dirty |= IRIS_DIRTY_URB;
+ return batch->screen->l3_config_3d;
}
static void
PIPE_CONTROL_STALL_AT_SCOREBOARD);
#endif
- /* Flush the sampler and render caches. We definitely need to flush the
- * sampler cache so that we get updated contents from the render cache for
- * the glBlitFramebuffer() source. Also, we are sometimes warned in the
- * docs to flush the cache between reinterpretations of the same surface
- * data with different formats, which blorp does for stencil and depth
- * data.
+ /* Flush the render cache in cases where the same surface is reinterpreted
+ * with a differernt format, which blorp does for stencil and depth data
+ * among other things. Invalidation of sampler caches and flushing of any
+ * caches which had previously written the source surfaces should already
+ * have been handled by the caller.
*/
- if (params->src.enabled)
- iris_cache_flush_for_read(batch, params->src.addr.buffer);
if (params->dst.enabled) {
iris_cache_flush_for_render(batch, params->dst.addr.buffer,
params->dst.view.format,
params->dst.aux_usage);
}
- if (params->depth.enabled)
- iris_cache_flush_for_depth(batch, params->depth.addr.buffer);
- if (params->stencil.enabled)
- iris_cache_flush_for_depth(batch, params->stencil.addr.buffer);
iris_require_command_space(batch, 1400);
+#if GEN_GEN == 8
+ genX(update_pma_fix)(ice, batch, false);
+#endif
+
+ const unsigned scale = params->fast_clear_op ? UINT_MAX : 1;
+ if (ice->state.current_hash_scale != scale) {
+ genX(emit_hashing_mode)(ice, batch, params->x1 - params->x0,
+ params->y1 - params->y0, scale);
+ }
+
+#if GEN_GEN >= 12
+ genX(invalidate_aux_map_state)(batch);
+#endif
+
+ iris_handle_always_flush_cache(batch);
+
blorp_exec(blorp_batch, params);
+ iris_handle_always_flush_cache(batch);
+
/* We've smashed all state compared to what the normal 3D pipeline
* rendering tracks for GL.
*/
IRIS_DIRTY_LINE_STIPPLE |
IRIS_ALL_DIRTY_FOR_COMPUTE |
IRIS_DIRTY_SCISSOR_RECT |
- IRIS_DIRTY_UNCOMPILED_VS |
- IRIS_DIRTY_UNCOMPILED_TCS |
- IRIS_DIRTY_UNCOMPILED_TES |
- IRIS_DIRTY_UNCOMPILED_GS |
- IRIS_DIRTY_UNCOMPILED_FS |
IRIS_DIRTY_VF |
- IRIS_DIRTY_URB |
- IRIS_DIRTY_SF_CL_VIEWPORT |
- IRIS_DIRTY_SAMPLER_STATES_VS |
- IRIS_DIRTY_SAMPLER_STATES_TCS |
- IRIS_DIRTY_SAMPLER_STATES_TES |
- IRIS_DIRTY_SAMPLER_STATES_GS);
+ IRIS_DIRTY_SF_CL_VIEWPORT);
+ uint64_t skip_stage_bits = (IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE |
+ IRIS_STAGE_DIRTY_UNCOMPILED_VS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_TES |
+ IRIS_STAGE_DIRTY_UNCOMPILED_GS |
+ IRIS_STAGE_DIRTY_UNCOMPILED_FS |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_VS |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_TES |
+ IRIS_STAGE_DIRTY_SAMPLER_STATES_GS);
+
+ if (!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL]) {
+ /* BLORP disabled tessellation, that's fine for the next draw */
+ skip_stage_bits |= IRIS_STAGE_DIRTY_TCS |
+ IRIS_STAGE_DIRTY_TES |
+ IRIS_STAGE_DIRTY_CONSTANTS_TCS |
+ IRIS_STAGE_DIRTY_CONSTANTS_TES |
+ IRIS_STAGE_DIRTY_BINDINGS_TCS |
+ IRIS_STAGE_DIRTY_BINDINGS_TES;
+ }
+
+ if (!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY]) {
+ /* BLORP disabled geometry shaders, that's fine for the next draw */
+ skip_stage_bits |= IRIS_STAGE_DIRTY_GS |
+ IRIS_STAGE_DIRTY_CONSTANTS_GS |
+ IRIS_STAGE_DIRTY_BINDINGS_GS;
+ }
/* we can skip flagging IRIS_DIRTY_DEPTH_BUFFER, if
* BLORP_BATCH_NO_EMIT_DEPTH_STENCIL is set.
skip_bits |= IRIS_DIRTY_BLEND_STATE | IRIS_DIRTY_PS_BLEND;
ice->state.dirty |= ~skip_bits;
+ ice->state.stage_dirty |= ~skip_stage_bits;
- if (params->dst.enabled) {
- iris_render_cache_add_bo(batch, params->dst.addr.buffer,
- params->dst.view.format,
- params->dst.aux_usage);
- }
+ if (params->src.enabled)
+ iris_bo_bump_seqno(params->src.addr.buffer, batch->next_seqno,
+ IRIS_DOMAIN_OTHER_READ);
+ if (params->dst.enabled)
+ iris_bo_bump_seqno(params->dst.addr.buffer, batch->next_seqno,
+ IRIS_DOMAIN_RENDER_WRITE);
if (params->depth.enabled)
- iris_depth_cache_add_bo(batch, params->depth.addr.buffer);
+ iris_bo_bump_seqno(params->depth.addr.buffer, batch->next_seqno,
+ IRIS_DOMAIN_DEPTH_WRITE);
if (params->stencil.enabled)
- iris_depth_cache_add_bo(batch, params->stencil.addr.buffer);
+ iris_bo_bump_seqno(params->stencil.addr.buffer, batch->next_seqno,
+ IRIS_DOMAIN_DEPTH_WRITE);
}
void