pipe_reference(NULL, &fence->reference);
}
+static void
+panfrost_batch_add_fbo_bos(struct panfrost_batch *batch);
+
static struct panfrost_batch *
panfrost_create_batch(struct panfrost_context *ctx,
const struct pipe_framebuffer_state *key)
{
struct panfrost_batch *batch = rzalloc(ctx, struct panfrost_batch);
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
batch->ctx = ctx;
batch->out_sync = panfrost_create_batch_fence(batch);
util_copy_framebuffer_state(&batch->key, key);
- batch->pool = panfrost_create_pool(batch, pan_device(ctx->base.screen));
+ /* Preallocate the main pool, since every batch has at least one job
+ * structure so it will be used */
+ panfrost_pool_init(&batch->pool, batch, dev, 0, true);
+
+ /* Don't preallocate the invisible pool, since not every batch will use
+ * the pre-allocation, particularly if the varyings are larger than the
+ * preallocation and a reallocation is needed after anyway. */
+ panfrost_pool_init(&batch->invisible_pool, batch, dev, PAN_BO_INVISIBLE, false);
+
+ panfrost_batch_add_fbo_bos(batch);
return batch;
}
struct hash_entry *entry;
/* Remove the entry in the FBO -> batch hash table if the batch
- * matches. This way, next draws/clears targeting this FBO will trigger
- * the creation of a new batch.
+ * matches and drop the context reference. This way, next draws/clears
+ * targeting this FBO will trigger the creation of a new batch.
*/
entry = _mesa_hash_table_search(ctx->batches, &batch->key);
if (entry && entry->data == batch)
_mesa_hash_table_remove(ctx->batches, entry);
- /* If this is the bound batch, the panfrost_context parameters are
- * relevant so submitting it invalidates those parameters, but if it's
- * not bound, the context parameters are for some other batch so we
- * can't invalidate them.
- */
- if (ctx->batch == batch) {
- panfrost_invalidate_frame(ctx);
+ if (ctx->batch == batch)
ctx->batch = NULL;
- }
}
#ifdef PAN_BATCH_DEBUG
hash_table_foreach(batch->bos, entry)
panfrost_bo_unreference((struct panfrost_bo *)entry->key);
- hash_table_foreach(batch->pool.bos, entry)
- panfrost_bo_unreference((struct panfrost_bo *)entry->key);
+ panfrost_pool_cleanup(&batch->pool);
+ panfrost_pool_cleanup(&batch->invisible_pool);
util_dynarray_foreach(&batch->dependencies,
struct panfrost_batch_fence *, dep) {
panfrost_batch_fence_unreference(*dep);
}
+ util_dynarray_fini(&batch->dependencies);
+
/* The out_sync fence lifetime is different from the the batch one
* since other batches might want to wait on a fence of already
* submitted/signaled batch. All we need to do here is make sure the
panfrost_batch_add_bo(batch, rsrc->separate_stencil->bo, flags);
}
-void panfrost_batch_add_fbo_bos(struct panfrost_batch *batch)
+static void
+panfrost_batch_add_fbo_bos(struct panfrost_batch *batch)
{
uint32_t flags = PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_WRITE |
PAN_BO_ACCESS_VERTEX_TILER |
struct panfrost_bo *
panfrost_batch_get_scratchpad(struct panfrost_batch *batch,
- unsigned shift,
+ unsigned size_per_thread,
unsigned thread_tls_alloc,
unsigned core_count)
{
- unsigned size = panfrost_get_total_stack_size(shift,
+ unsigned size = panfrost_get_total_stack_size(size_per_thread,
thread_tls_alloc,
core_count);
return batch->shared_memory;
}
-struct panfrost_bo *
-panfrost_batch_get_tiler_heap(struct panfrost_batch *batch)
-{
- if (batch->tiler_heap)
- return batch->tiler_heap;
-
- batch->tiler_heap = panfrost_batch_create_bo(batch, 4096 * 4096,
- PAN_BO_INVISIBLE |
- PAN_BO_GROWABLE,
- PAN_BO_ACCESS_PRIVATE |
- PAN_BO_ACCESS_RW |
- PAN_BO_ACCESS_VERTEX_TILER |
- PAN_BO_ACCESS_FRAGMENT);
- assert(batch->tiler_heap);
- return batch->tiler_heap;
-}
-
mali_ptr
panfrost_batch_get_tiler_meta(struct panfrost_batch *batch, unsigned vertex_count)
{
if (batch->tiler_meta)
return batch->tiler_meta;
- struct panfrost_bo *tiler_heap;
- tiler_heap = panfrost_batch_get_tiler_heap(batch);
+ struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
struct bifrost_tiler_heap_meta tiler_heap_meta = {
- .heap_size = tiler_heap->size,
- .tiler_heap_start = tiler_heap->gpu,
- .tiler_heap_free = tiler_heap->gpu,
- .tiler_heap_end = tiler_heap->gpu + tiler_heap->size,
+ .heap_size = dev->tiler_heap->size,
+ .tiler_heap_start = dev->tiler_heap->gpu,
+ .tiler_heap_free = dev->tiler_heap->gpu,
+ .tiler_heap_end = dev->tiler_heap->gpu + dev->tiler_heap->size,
.unk1 = 0x1,
.unk7e007e = 0x7e007e,
};
.flags = 0x0,
.width = MALI_POSITIVE(batch->key.width),
.height = MALI_POSITIVE(batch->key.height),
- .tiler_heap_meta = panfrost_pool_upload(&batch->pool, &tiler_heap_meta, sizeof(tiler_heap_meta)),
+ .tiler_heap_meta = panfrost_pool_upload_aligned(&batch->pool, &tiler_heap_meta, sizeof(tiler_heap_meta), 64)
};
- batch->tiler_meta = panfrost_pool_upload(&batch->pool, &tiler_meta, sizeof(tiler_meta));
+ batch->tiler_meta = panfrost_pool_upload_aligned(&batch->pool, &tiler_meta, sizeof(tiler_meta), 64);
return batch->tiler_meta;
}
sizeof(struct mali_single_framebuffer) :
sizeof(struct mali_framebuffer);
- batch->framebuffer = panfrost_pool_alloc(&batch->pool, size);
+ batch->framebuffer = panfrost_pool_alloc_aligned(&batch->pool, size, 64);
/* Tag the pointer */
if (!(dev->quirks & MIDGARD_SFBD))
blend_shader = bo->gpu | b->first_tag;
}
- struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
- 4 * 4 * 6 * rsrc->damage.inverted_len);
+ struct panfrost_transfer transfer = panfrost_pool_alloc_aligned(&batch->pool,
+ 4 * 4 * 6 * rsrc->damage.inverted_len, 64);
for (unsigned i = 0; i < rsrc->damage.inverted_len; ++i) {
float *o = (float *) (transfer.cpu + (4 * 4 * 6 * i));
submit.jc = first_job_desc;
submit.requirements = reqs;
- bo_handles = calloc(batch->pool.bos->entries + batch->bos->entries, sizeof(*bo_handles));
+ bo_handles = calloc(panfrost_pool_num_bos(&batch->pool) +
+ panfrost_pool_num_bos(&batch->invisible_pool) +
+ batch->bos->entries + 1,
+ sizeof(*bo_handles));
assert(bo_handles);
hash_table_foreach(batch->bos, entry)
panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
- hash_table_foreach(batch->pool.bos, entry)
- panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
+ panfrost_pool_get_bo_handles(&batch->pool, bo_handles + submit.bo_handle_count);
+ submit.bo_handle_count += panfrost_pool_num_bos(&batch->pool);
+ panfrost_pool_get_bo_handles(&batch->invisible_pool, bo_handles + submit.bo_handle_count);
+ submit.bo_handle_count += panfrost_pool_num_bos(&batch->invisible_pool);
+
+ /* Used by all tiler jobs (XXX: skip for compute-only) */
+ if (!(reqs & PANFROST_JD_REQ_FS))
+ bo_handles[submit.bo_handle_count++] = dev->tiler_heap->gem_handle;
submit.bo_handles = (u64) (uintptr_t) bo_handles;
ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
{
struct panfrost_context *ctx = batch->ctx;
- if (ctx->rasterizer && ctx->rasterizer->base.multisample)
+ if (ctx->rasterizer->base.multisample)
batch->requirements |= PAN_REQ_MSAA;
if (ctx->depth_stencil && ctx->depth_stencil->base.depth.writemask) {