struct fd_ringbuffer *ring;
uint32_t clear = util_pack_z(PIPE_FORMAT_Z16_UNORM, depth);
- // TODO mid-frame clears (ie. app doing crazy stuff)?? Maybe worth
- // splitting both clear and lrz clear out into their own rb's. And
- // just throw away any draws prior to clear. (Anything not fullscreen
- // clear, just fallback to generic path that treats it as a normal
- // draw
-
- if (!batch->lrz_clear) {
- batch->lrz_clear = fd_submit_new_ringbuffer(batch->submit, 0x1000, 0);
- }
-
- ring = batch->lrz_clear;
+ ring = fd_batch_get_prologue(batch);
OUT_WFI5(ring);
fd5_emit_restore(batch, ring);
- if (batch->lrz_clear)
- fd5_emit_ib(ring, batch->lrz_clear);
+ if (batch->prologue)
+ fd5_emit_ib(ring, batch->prologue);
fd5_emit_lrz_flush(ring);
fd5_emit_lrz_flush(ring);
+ if (batch->prologue)
+ fd5_emit_ib(ring, batch->prologue);
+
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x0);
struct fd_ringbuffer *ring;
struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
- if (batch->lrz_clear) {
- fd_ringbuffer_del(batch->lrz_clear);
- }
-
- batch->lrz_clear = fd_submit_new_ringbuffer(batch->submit, 0x1000, 0);
- ring = batch->lrz_clear;
+ ring = fd_batch_get_prologue(batch);
emit_marker6(ring, 7);
OUT_PKT7(ring, CP_SET_MARKER, 1);
fd6_emit_lrz_flush(ring);
- if (batch->lrz_clear) {
- fd_log(batch, "START LRZ CLEAR");
- fd6_emit_ib(ring, batch->lrz_clear);
- fd_log(batch, "END LRZ CLEAR");
+ if (batch->prologue) {
+ fd_log(batch, "START PROLOGUE");
+ fd6_emit_ib(ring, batch->prologue);
+ fd_log(batch, "END PROLOGUE");
}
fd6_cache_inv(batch, ring);
struct fd_ringbuffer *ring = batch->gmem;
fd6_emit_restore(batch, ring);
+ fd6_emit_lrz_flush(ring);
+
+ if (batch->prologue) {
+ fd_log(batch, "START PROLOGUE");
+ fd6_emit_ib(ring, batch->prologue);
+ fd_log(batch, "END PROLOGUE");
+ }
if (pfb->width > 0 && pfb->height > 0)
set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
emit_sysmem_clears(batch, ring);
- fd6_emit_lrz_flush(ring);
-
- if (batch->lrz_clear)
- fd6_emit_ib(ring, batch->lrz_clear);
-
emit_marker6(ring, 7);
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
debug_assert(!batch->gmem);
}
- if (batch->lrz_clear) {
- fd_ringbuffer_del(batch->lrz_clear);
- batch->lrz_clear = NULL;
+ if (batch->prologue) {
+ fd_ringbuffer_del(batch->prologue);
+ batch->prologue = NULL;
}
if (batch->epilogue) {
fd_screen_unlock(batch->ctx->screen);
}
+/* Get per-batch prologue */
+struct fd_ringbuffer *
+fd_batch_get_prologue(struct fd_batch *batch)
+{
+ if (!batch->prologue)
+ batch->prologue = alloc_ring(batch, 0x1000, 0);
+ return batch->prologue;
+}
+
/* NOTE: could drop the last ref to batch
*
* @sync: synchronize with flush_queue, ensures batch is *actually* flushed
/** tiling/gmem (IB0) cmdstream: */
struct fd_ringbuffer *gmem;
- /** epilogue cmdstream: */
+ /** preemble cmdstream (executed once before first tile): */
+ struct fd_ringbuffer *prologue;
+
+ /** epilogue cmdstream (executed after each tile): */
struct fd_ringbuffer *epilogue;
- // TODO maybe more generically split out clear and clear_binning rings?
- struct fd_ringbuffer *lrz_clear;
struct fd_ringbuffer *tile_setup;
struct fd_ringbuffer *tile_fini;
fd_reset_wfi(batch);
}
+/* Get per-tile epilogue */
static inline struct fd_ringbuffer *
fd_batch_get_epilogue(struct fd_batch *batch)
{
return batch->epilogue;
}
+struct fd_ringbuffer * fd_batch_get_prologue(struct fd_batch *batch);
#endif /* FREEDRENO_BATCH_H_ */