* synchronize when the CP is running far ahead)
*/
struct fd_bo *blit_mem;
+ uint32_t seqno;
struct u_upload_mgr *border_color_uploader;
struct pipe_resource *border_color_buf;
for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
if (emit.streamout_mask & (1 << i)) {
- OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, FLUSH_SO_0 + i);
+ fd6_event_write(ctx->batch, ring, FLUSH_SO_0 + i, false);
}
}
}
OUT_RING(ring, uc.ui[2]);
OUT_RING(ring, uc.ui[3]);
- fd6_emit_blit(ctx, ring);
+ fd6_emit_blit(ctx->batch, ring);
}
}
OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
OUT_RING(ring, clear);
- fd6_emit_blit(ctx, ring);
+ fd6_emit_blit(ctx->batch, ring);
#if 0
if (pfb->zsbuf && (buffers & PIPE_CLEAR_DEPTH)) {
}
static inline void
-fd6_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
+fd6_event_write(struct fd_batch *batch, struct fd_ringbuffer *ring,
+ enum vgt_event_type evt, bool timestamp)
{
fd_reset_wfi(batch);
-#if 0
- OUT_PKT4(ring, REG_A6XX_UCHE_CACHE_INVALIDATE_MIN_LO, 5);
- OUT_RING(ring, 0x00000000); /* UCHE_CACHE_INVALIDATE_MIN_LO */
- OUT_RING(ring, 0x00000000); /* UCHE_CACHE_INVALIDATE_MIN_HI */
- OUT_RING(ring, 0x00000000); /* UCHE_CACHE_INVALIDATE_MAX_LO */
- OUT_RING(ring, 0x00000000); /* UCHE_CACHE_INVALIDATE_MAX_HI */
- OUT_RING(ring, 0x00000012); /* UCHE_CACHE_INVALIDATE */
- fd_wfi(batch, ring);
-#else
- DBG("fd6_cache_flush stub");
-#endif
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
+ if (timestamp) {
+ struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
+ OUT_RELOCW(ring, fd6_ctx->blit_mem, 0, 0, 0); /* ADDR_LO/HI */
+ OUT_RING(ring, ++fd6_ctx->seqno);
+ }
+}
+
+static inline void
+fd6_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
+{
+ fd6_event_write(batch, ring, 0x31, false);
}
static inline void
-fd6_emit_blit(struct fd_context *ctx, struct fd_ringbuffer *ring)
+fd6_emit_blit(struct fd_batch *batch, struct fd_ringbuffer *ring)
{
emit_marker6(ring, 7);
-
- OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(BLIT));
-
+ fd6_event_write(batch, ring, BLIT, false);
emit_marker6(ring, 7);
}
fd6_emit_lrz_flush(ring);
- OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, 0x31); /* vertex cache invalidate? */
+ fd6_cache_flush(batch, ring);
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x0);
OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
OUT_RING(ring, base);
- fd6_emit_blit(batch->ctx, ring);
+ fd6_emit_blit(batch, ring);
}
static void
fd6_emit_lrz_flush(ring);
- fd6_cache_flush(batch, ring);
+ fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
}
static void
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x0);
- OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
-
- OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, 0x31); /* vertex cache invalidate? */
+ fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
+ fd6_cache_flush(batch, ring);
#if 0
OUT_PKT4(ring, REG_A6XX_PC_POWER_CNTL, 1);
static void
fd6_emit_sysmem_fini(struct fd_batch *batch)
{
- struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
struct fd_ringbuffer *ring = batch->gmem;
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
fd6_emit_lrz_flush(ring);
- OUT_PKT7(ring, CP_EVENT_WRITE, 4);
- OUT_RING(ring, UNK_1D);
- OUT_RELOCW(ring, fd6_ctx->blit_mem, 0, 0, 0); /* ADDR_LO/HI */
- OUT_RING(ring, 0x00000000);
+ fd6_event_write(batch, ring, UNK_1D, true);
}
void
#include "freedreno_resource.h"
#include "fd6_context.h"
+#include "fd6_emit.h"
#include "fd6_format.h"
#include "fd6_query.h"
OUT_PKT4(ring, REG_A6XX_RB_SAMPLE_COUNT_ADDR_LO, 2);
OUT_RELOCW(ring, query_sample(aq, start));
- OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, ZPASS_DONE);
- fd_reset_wfi(batch);
+ fd6_event_write(batch, ring, ZPASS_DONE, false);
fd6_context(batch->ctx)->samples_passed_queries++;
}