/* Queue the job */
panfrost_scoreboard_queue_compute_job(batch, transfer);
- panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
+ panfrost_flush_all_batches(ctx, true);
}
void
unsigned flags)
{
struct panfrost_context *ctx = pan_context(pipe);
- struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
struct util_dynarray fences;
/* We must collect the fences before the flush is done, otherwise we'll
*/
if (fence) {
util_dynarray_init(&fences, NULL);
- panfrost_batch_fence_reference(batch->out_sync);
- util_dynarray_append(&fences, struct panfrost_batch_fence *,
- batch->out_sync);
+ hash_table_foreach(ctx->batches, hentry) {
+ struct panfrost_batch *batch = hentry->data;
+
+ panfrost_batch_fence_reference(batch->out_sync);
+ util_dynarray_append(&fences,
+ struct panfrost_batch_fence *,
+ batch->out_sync);
+ }
}
- /* Submit the frame itself */
- panfrost_batch_submit(batch);
+ /* Submit all pending jobs */
+ panfrost_flush_all_batches(ctx, false);
if (fence) {
struct panfrost_fence *f = panfrost_fence_create(ctx, &fences);
}
if (!is_scanout || has_draws)
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
+ panfrost_flush_all_batches(ctx, true);
else
assert(!ctx->payloads[PIPE_SHADER_VERTEX].postfix.framebuffer &&
!ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.framebuffer);
union pipe_query_result *vresult)
{
struct panfrost_query *query = (struct panfrost_query *) q;
+ struct panfrost_context *ctx = pan_context(pipe);
switch (query->type) {
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
/* Flush first */
- panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
+ panfrost_flush_all_batches(ctx, true);
/* Read back the query results */
unsigned *result = (unsigned *) query->transfer.cpu;
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_PRIMITIVES_EMITTED:
- panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
+ panfrost_flush_all_batches(ctx, true);
vresult->u64 = query->end - query->start;
break;
return ret;
}
-void
+static void
panfrost_batch_submit(struct panfrost_batch *batch)
{
assert(batch);
panfrost_free_batch(batch);
+}
+
+void
+panfrost_flush_all_batches(struct panfrost_context *ctx, bool wait)
+{
+ struct util_dynarray fences, syncobjs;
+
+ if (wait) {
+ util_dynarray_init(&fences, NULL);
+ util_dynarray_init(&syncobjs, NULL);
+ }
+
+ hash_table_foreach(ctx->batches, hentry) {
+ struct panfrost_batch *batch = hentry->data;
+
+ assert(batch);
+
+ if (wait) {
+ panfrost_batch_fence_reference(batch->out_sync);
+ util_dynarray_append(&fences, struct panfrost_batch_fence *,
+ batch->out_sync);
+ util_dynarray_append(&syncobjs, uint32_t,
+ batch->out_sync->syncobj);
+ }
+
+ panfrost_batch_submit(batch);
+ }
+
+ assert(!ctx->batches->entries);
+
/* Collect batch fences before returning */
panfrost_gc_fences(ctx);
+
+ if (!wait)
+ return;
+
+ drmSyncobjWait(pan_screen(ctx->base.screen)->fd,
+ util_dynarray_begin(&syncobjs),
+ util_dynarray_num_elements(&syncobjs, uint32_t),
+ INT64_MAX, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL, NULL);
+
+ util_dynarray_foreach(&fences, struct panfrost_batch_fence *, fence)
+ panfrost_batch_fence_unreference(*fence);
+
+ util_dynarray_fini(&fences);
+ util_dynarray_fini(&syncobjs);
}
void
uint32_t create_flags, uint32_t access_flags);
void
-panfrost_batch_submit(struct panfrost_batch *batch);
+panfrost_flush_all_batches(struct panfrost_context *ctx, bool wait);
void
panfrost_batch_set_requirements(struct panfrost_batch *batch);
if (is_bound && (usage & PIPE_TRANSFER_READ)) {
assert(level == 0);
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
+ panfrost_flush_all_batches(ctx, true);
}
/* TODO: Respect usage flags */
bool has_draws = batch->last_job.gpu;
if (has_draws)
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
+ panfrost_flush_all_batches(ctx, true);
/* We've flushed the original buffer if needed, now trigger a blit */
* the state tracker deal with it. */
if (blit_res)
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
+ panfrost_flush_all_batches(ctx, true);
return blit_res;
}