iris_finish_batch(batch);
- if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
+ if (unlikely(INTEL_DEBUG &
+ (DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL))) {
int bytes_for_commands = iris_batch_bytes_used(batch);
int second_bytes = 0;
if (batch->bo != batch->exec_bos[0]) {
100.0f * bytes_for_commands / BATCH_SZ,
batch->exec_count,
(float) batch->aperture_space / (1024 * 1024));
- dump_fence_list(batch);
- dump_validation_list(batch);
- }
- if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
- decode_batch(batch);
+ if (INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT)) {
+ dump_fence_list(batch);
+ dump_validation_list(batch);
+ }
+
+ if (INTEL_DEBUG & DEBUG_BATCH) {
+ decode_batch(batch);
+ }
}
int ret = submit_batch(batch);
*
* TODO: Remove this hack!
*/
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ const char *reason =
+ "workaround: WaSamplerCacheFlushBetweenRedescribedSurfaceReads";
+
+ iris_emit_pipe_control_flush(batch, reason, PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(batch, reason,
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
}
/**
info->dst.box.depth, dst_aux_usage);
iris_flush_and_dirty_for_history(ice, batch, (struct iris_resource *)
- info->dst.resource);
+ info->dst.resource,
+ "cache history: post-blit");
}
static void
blorp_batch_finish(&blorp_batch);
iris_flush_and_dirty_for_history(ice, batch,
- (struct iris_resource *) dst);
+ (struct iris_resource *) dst,
+ "cache history: post copy_region");
} else {
// XXX: what about one surface being a buffer and not the other?
}
if (need_invalidate) {
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_VF_CACHE_INVALIDATE |
- PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(batch,
+ "workaround: VF cache 32-bit key [blorp]",
+ PIPE_CONTROL_VF_CACHE_INVALIDATE |
+ PIPE_CONTROL_CS_STALL);
}
}
* be set in this packet."
*/
iris_emit_pipe_control_flush(batch,
+ "workaround: RT BTI change [blorp]",
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
#endif
* and again afterwards to ensure that the resolve is complete before we
* do any more regular drawing.
*/
- iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
+ iris_emit_end_of_pipe_sync(batch,
+ "fast clear: pre-flush",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
/* If we reach this point, we need to fast clear to change the state to
* ISL_AUX_STATE_CLEAR, or to update the fast clear color (or both).
box->x, box->y, box->x + box->width,
box->y + box->height);
blorp_batch_finish(&blorp_batch);
- iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
+ iris_emit_end_of_pipe_sync(batch,
+ "fast clear: post flush",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
iris_resource_set_aux_state(ice, res, level, box->z,
box->depth, ISL_AUX_STATE_CLEAR);
color, color_write_disable);
blorp_batch_finish(&blorp_batch);
- iris_flush_and_dirty_for_history(ice, batch, res);
+ iris_flush_and_dirty_for_history(ice, batch, res,
+ "cache history: post color clear");
iris_resource_finish_render(ice, res, level,
box->z, box->depth, aux_usage);
if (z_res && clear_depth &&
can_fast_clear_depth(ice, z_res, level, box, depth)) {
fast_clear_depth(ice, z_res, level, box, depth);
- iris_flush_and_dirty_for_history(ice, batch, res);
+ iris_flush_and_dirty_for_history(ice, batch, res,
+ "cache history: post fast Z clear");
clear_depth = false;
z_res = false;
}
clear_stencil && stencil_res ? 0xff : 0, stencil);
blorp_batch_finish(&blorp_batch);
- iris_flush_and_dirty_for_history(ice, batch, res);
+ iris_flush_and_dirty_for_history(ice, batch, res,
+ "cache history: post slow ZS clear");
if (z_res) {
iris_resource_finish_depth(ice, z_res, level,
struct iris_bo *dst_bo, uint32_t dst_offset,
struct iris_bo *src_bo, uint32_t src_offset,
unsigned bytes);
- void (*emit_raw_pipe_control)(struct iris_batch *batch, uint32_t flags,
+ void (*emit_raw_pipe_control)(struct iris_batch *batch,
+ const char *reason, uint32_t flags,
struct iris_bo *bo, uint32_t offset,
uint64_t imm);
/* iris_pipe_control.c */
void iris_emit_pipe_control_flush(struct iris_batch *batch,
- uint32_t flags);
-void iris_emit_pipe_control_write(struct iris_batch *batch, uint32_t flags,
+ const char *reason, uint32_t flags);
+void iris_emit_pipe_control_write(struct iris_batch *batch,
+ const char *reason, uint32_t flags,
struct iris_bo *bo, uint32_t offset,
uint64_t imm);
void iris_emit_end_of_pipe_sync(struct iris_batch *batch,
- uint32_t flags);
+ const char *reason, uint32_t flags);
void iris_init_flush_functions(struct pipe_context *ctx);
* given generation.
*/
void
-iris_emit_pipe_control_flush(struct iris_batch *batch, uint32_t flags)
+iris_emit_pipe_control_flush(struct iris_batch *batch,
+ const char *reason,
+ uint32_t flags)
{
if ((flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&
(flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {
* with any write cache flush, so this shouldn't be a concern. In order
* to ensure a full stall, we do an end-of-pipe sync.
*/
- iris_emit_end_of_pipe_sync(batch, flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
+ iris_emit_end_of_pipe_sync(batch, reason,
+ flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
}
- batch->vtbl->emit_raw_pipe_control(batch, flags, NULL, 0, 0);
+ batch->vtbl->emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
}
/**
* - PIPE_CONTROL_WRITE_DEPTH_COUNT
*/
void
-iris_emit_pipe_control_write(struct iris_batch *batch, uint32_t flags,
+iris_emit_pipe_control_write(struct iris_batch *batch,
+ const char *reason, uint32_t flags,
struct iris_bo *bo, uint32_t offset,
uint64_t imm)
{
- batch->vtbl->emit_raw_pipe_control(batch, flags, bo, offset, imm);
+ batch->vtbl->emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
}
/*
* Data" in the PIPE_CONTROL command.
*/
void
-iris_emit_end_of_pipe_sync(struct iris_batch *batch, uint32_t flags)
+iris_emit_end_of_pipe_sync(struct iris_batch *batch,
+ const char *reason, uint32_t flags)
{
/* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
*
* Data, Required Write Cache Flush bits set)
* - Workload-2 (Can use the data produce or output by Workload-1)
*/
- iris_emit_pipe_control_write(batch, flags | PIPE_CONTROL_CS_STALL |
+ iris_emit_pipe_control_write(batch, reason,
+ flags | PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_WRITE_IMMEDIATE,
batch->screen->workaround_bo, 0, 0);
}
render_batch->cache.render->entries ||
render_batch->cache.depth->entries) {
iris_emit_pipe_control_flush(render_batch,
+ "API: texture barrier (1/2)",
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_CS_STALL);
iris_emit_pipe_control_flush(render_batch,
+ "API: texture barrier (2/2)",
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
}
if (compute_batch->contains_draw) {
iris_emit_pipe_control_flush(compute_batch,
+ "API: texture barrier (1/2)",
PIPE_CONTROL_CS_STALL);
iris_emit_pipe_control_flush(compute_batch,
+ "API: texture barrier (2/2)",
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
}
}
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
if (ice->batches[i].contains_draw ||
- ice->batches[i].cache.render->entries)
- iris_emit_pipe_control_flush(&ice->batches[i], bits);
+ ice->batches[i].cache.render->entries) {
+ iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier",
+ bits);
+ }
}
}
} else {
/* Order available *after* the query results. */
flags |= PIPE_CONTROL_FLUSH_ENABLE;
- iris_emit_pipe_control_write(batch, flags, bo, offset, true);
+ iris_emit_pipe_control_write(batch, "query: mark available",
+ flags, bo, offset, true);
}
}
devinfo->gen == 9 && devinfo->gt == 4 ? PIPE_CONTROL_CS_STALL : 0;
struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
- iris_emit_pipe_control_write(batch, flags | optional_cs_stall,
+ iris_emit_pipe_control_write(batch, "query: pipelined snapshot write",
+ flags | optional_cs_stall,
bo, offset, 0ull);
}
if (!iris_is_query_pipelined(q)) {
iris_emit_pipe_control_flush(batch,
+ "query: non-pipelined snapshot write",
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
q->stalled = true;
* bit set prior to programming a PIPE_CONTROL with Write PS Depth
* Count sync operation."
*/
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_DEPTH_STALL);
+ iris_emit_pipe_control_flush(batch,
+ "workaround: depth stall before writing "
+ "PS_DEPTH_COUNT",
+ PIPE_CONTROL_DEPTH_STALL);
}
iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q,
PIPE_CONTROL_WRITE_DEPTH_COUNT |
uint32_t offset = q->query_state_ref.offset;
iris_emit_pipe_control_flush(batch,
+ "query: write SO overflow snapshots",
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
for (uint32_t i = 0; i < count; i++) {
* and use the result.
*/
// XXX: Why? i965 doesn't do this.
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(batch,
+ "query: unknown QBO flushing hack",
+ PIPE_CONTROL_CS_STALL);
return;
}
ice->state.predicate = IRIS_PREDICATE_STATE_USE_BIT;
/* Ensure the memory is coherent for MI_LOAD_REGISTER_* commands. */
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_FLUSH_ENABLE);
+ iris_emit_pipe_control_flush(batch,
+ "conditional rendering: set predicate",
+ PIPE_CONTROL_FLUSH_ENABLE);
q->stalled = true;
switch (q->type) {
iris_flush_depth_and_render_caches(struct iris_batch *batch)
{
iris_emit_pipe_control_flush(batch,
+ "cache tracker: render-to-texture",
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_CS_STALL);
iris_emit_pipe_control_flush(batch,
+ "cache tracker: render-to-texture",
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_CONST_CACHE_INVALIDATE);
* and again afterwards to ensure that the resolve is complete before we
* do any more regular drawing.
*/
- iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
+ iris_emit_end_of_pipe_sync(batch, "color resolve: pre-flush",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
struct blorp_batch blorp_batch;
blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
blorp_batch_finish(&blorp_batch);
/* See comment above */
- iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
+ iris_emit_end_of_pipe_sync(batch, "color resolve: post-flush",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
}
static void
* another for depth stall.
*/
iris_emit_pipe_control_flush(batch,
+ "hiz op: pre-flushes (1/2)",
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_CS_STALL);
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_DEPTH_STALL);
+ iris_emit_pipe_control_flush(batch, "hiz op: pre-flushes (2/2)",
+ PIPE_CONTROL_DEPTH_STALL);
assert(res->aux.usage == ISL_AUX_USAGE_HIZ && res->aux.bo);
* TODO: Such as the spec says, this could be conditional.
*/
iris_emit_pipe_control_flush(batch,
+ "hiz op: post flush",
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_STALL);
}
xfer->resource, xfer->level, box);
/* Ensure writes to the staging BO land before we map it below. */
iris_emit_pipe_control_flush(map->batch,
+ "transfer read: flush before mapping",
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_CS_STALL);
}
if (ice->batches[i].contains_draw ||
ice->batches[i].cache.render->entries) {
iris_batch_maybe_flush(&ice->batches[i], 24);
- iris_flush_and_dirty_for_history(ice, &ice->batches[i], res);
+ iris_flush_and_dirty_for_history(ice, &ice->batches[i], res,
+ "cache history: transfer flush");
}
}
void
iris_flush_and_dirty_for_history(struct iris_context *ice,
struct iris_batch *batch,
- struct iris_resource *res)
+ struct iris_resource *res,
+ const char *reason)
{
if (res->base.target != PIPE_BUFFER)
return;
if (batch->name != IRIS_BATCH_COMPUTE)
flush |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
- iris_emit_pipe_control_flush(batch, flush);
+ iris_emit_pipe_control_flush(batch, reason, flush);
}
bool
void iris_flush_and_dirty_for_history(struct iris_context *ice,
struct iris_batch *batch,
- struct iris_resource *res);
+ struct iris_resource *res,
+ const char *reason);
unsigned iris_get_num_logical_layers(const struct iris_resource *res,
unsigned level);
* rendering. It's a bit of a big hammer but it appears to work.
*/
iris_emit_end_of_pipe_sync(batch,
+ "change STATE_BASE_ADDRESS",
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DATA_CACHE_FLUSH);
* MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
*/
iris_emit_pipe_control_flush(batch,
+ "workaround: PIPELINE_SELECT flushes (1/2)",
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DATA_CACHE_FLUSH |
PIPE_CONTROL_CS_STALL);
iris_emit_pipe_control_flush(batch,
+ "workaround: PIPELINE_SELECT flushes (2/2)",
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_CONST_CACHE_INVALIDATE |
PIPE_CONTROL_STATE_CACHE_INVALIDATE |
uint32_t reg_val;
/* A fixed function pipe flush is required before modifying this field */
- iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
+ iris_emit_end_of_pipe_sync(batch, enable ? "enable preemption"
+ : "disable preemption",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
/* enable object level preemption */
iris_pack_state(GENX(CS_CHICKEN1), ®_val, reg) {
*/
// XXX: does this need to happen at 3DSTATE_BTP_PS time?
iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
+ "workaround: RT BTI change [draw]",
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
#endif
iris_dirty_for_history(ice, res);
}
}
- iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER], flush);
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
+ "make streamout results visible", flush);
}
}
}
}
- if (flush_flags)
- iris_emit_pipe_control_flush(batch, flush_flags);
+ if (flush_flags) {
+ iris_emit_pipe_control_flush(batch,
+ "workaround: VF cache 32-bit key [VB]",
+ flush_flags);
+ }
const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
/* The VF cache key only uses 32-bits, see vertex buffer comment above */
uint16_t high_bits = bo->gtt_offset >> 32ull;
if (high_bits != ice->state.last_index_bo_high_bits) {
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_VF_CACHE_INVALIDATE |
- PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(batch,
+ "workaround: VF cache 32-bit key [IB]",
+ PIPE_CONTROL_VF_CACHE_INVALIDATE |
+ PIPE_CONTROL_CS_STALL);
ice->state.last_index_bo_high_bits = high_bits;
}
}
unsigned draw_count_offset =
draw->indirect->indirect_draw_count_offset;
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_FLUSH_ENABLE);
+ iris_emit_pipe_control_flush(batch,
+ "ensure indirect draw buffer is flushed",
+ PIPE_CONTROL_FLUSH_ENABLE);
if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
static const uint32_t math[] = {
(void *) draw->count_from_stream_output;
/* XXX: Replace with actual cache tracking */
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(batch,
+ "draw count from stream output stall",
+ PIPE_CONTROL_CS_STALL);
iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
lrm.RegisterAddress = CS_GPR(0);
* these scoreboard related states, a MEDIA_STATE_FLUSH is
* sufficient."
*/
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(batch,
+ "workaround: stall before MEDIA_VFE_STATE",
+ PIPE_CONTROL_CS_STALL);
iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
if (prog_data->total_scratch) {
* iris_pipe_control.c instead, which may split the pipe control further.
*/
static void
-iris_emit_raw_pipe_control(struct iris_batch *batch, uint32_t flags,
- struct iris_bo *bo, uint32_t offset, uint64_t imm)
+iris_emit_raw_pipe_control(struct iris_batch *batch,
+ const char *reason,
+ uint32_t flags,
+ struct iris_bo *bo,
+ uint32_t offset,
+ uint64_t imm)
{
UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
* needs to be sent prior to the PIPE_CONTROL with VF Cache
* Invalidation Enable set to a 1."
*/
- iris_emit_raw_pipe_control(batch, 0, NULL, 0, 0);
+ iris_emit_raw_pipe_control(batch,
+ "workaround: recursive VF cache invalidate",
+ 0, NULL, 0, 0);
}
if (GEN_GEN == 9 && IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
*
* The same text exists a few rows below for Post Sync Op.
*/
- iris_emit_raw_pipe_control(batch, PIPE_CONTROL_CS_STALL, bo, offset, imm);
+ iris_emit_raw_pipe_control(batch,
+ "workaround: CS stall before gpgpu post-sync",
+ PIPE_CONTROL_CS_STALL, bo, offset, imm);
}
if (GEN_GEN == 10 && (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH)) {
* another PIPE_CONTROL with Render Target Cache Flush Enable (bit 12)
* = 0 and Pipe Control Flush Enable (bit 7) = 1"
*/
- iris_emit_raw_pipe_control(batch, PIPE_CONTROL_FLUSH_ENABLE, bo,
- offset, imm);
+ iris_emit_raw_pipe_control(batch,
+ "workaround: PC flush before RT flush",
+ PIPE_CONTROL_FLUSH_ENABLE, bo, offset, imm);
}
/* "Flush Types" workarounds ---------------------------------------------
/* Emit --------------------------------------------------------------- */
+ if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
+ fprintf(stderr,
+ " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
+ (flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
+ (flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
+ (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
+ (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) ? "VF " : "",
+ (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ? "RT " : "",
+ (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE) ? "Const " : "",
+ (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) ? "TC " : "",
+ (flags & PIPE_CONTROL_DATA_CACHE_FLUSH) ? "DC " : "",
+ (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH) ? "ZFlush " : "",
+ (flags & PIPE_CONTROL_DEPTH_STALL) ? "ZStall " : "",
+ (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE) ? "State " : "",
+ (flags & PIPE_CONTROL_TLB_INVALIDATE) ? "TLB " : "",
+ (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE) ? "Inst " : "",
+ (flags & PIPE_CONTROL_MEDIA_STATE_CLEAR) ? "MediaClear " : "",
+ (flags & PIPE_CONTROL_NOTIFY_ENABLE) ? "Notify " : "",
+ (flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) ?
+ "SnapRes" : "",
+ (flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE) ?
+ "ISPDis" : "",
+ (flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
+ (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
+ (flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
+ imm, reason);
+ }
+
iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
pc.LRIPostSyncOperation = NoLRIOperation;
pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
{ "soft64", DEBUG_SOFT64 },
{ "tcs8", DEBUG_TCS_EIGHT_PATCH },
{ "bt", DEBUG_BT },
+ { "pc", DEBUG_PIPE_CONTROL },
{ NULL, 0 }
};
#define DEBUG_SOFT64 (1ull << 42)
#define DEBUG_TCS_EIGHT_PATCH (1ull << 43)
#define DEBUG_BT (1ull << 44)
+#define DEBUG_PIPE_CONTROL (1ull << 45)
/* These flags are not compatible with the disk shader cache */
#define DEBUG_DISK_CACHE_DISABLE_MASK DEBUG_SHADER_TIME