unsigned vertex_count)
{
struct midgard_tiler_descriptor t = {};
+ struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
t.hierarchy_mask =
panfrost_choose_hierarchy_mask(width, height, vertex_count);
/* Sanity check */
if (t.hierarchy_mask) {
- assert(ctx->tiler_polygon_list.bo->size >= (header_size + body_size));
-
- /* Specify allocated tiler structures */
- t.polygon_list = ctx->tiler_polygon_list.bo->gpu;
+ t.polygon_list = panfrost_job_get_polygon_list(batch, header_size + body_size);
/* Allow the entire tiler heap */
t.heap_start = ctx->tiler_heap.bo->gpu;
panfrost_drm_free_slab(screen, &panfrost->scratchpad);
panfrost_drm_free_slab(screen, &panfrost->shaders);
panfrost_drm_free_slab(screen, &panfrost->tiler_heap);
- panfrost_drm_free_slab(screen, &panfrost->tiler_polygon_list);
panfrost_drm_free_slab(screen, &panfrost->tiler_dummy);
ralloc_free(pipe);
panfrost_drm_allocate_slab(screen, &ctx->scratchpad, 64*4, false, 0, 0, 0);
panfrost_drm_allocate_slab(screen, &ctx->shaders, 4096, true, PAN_ALLOCATE_EXECUTE, 0, 0);
panfrost_drm_allocate_slab(screen, &ctx->tiler_heap, 4096, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
- panfrost_drm_allocate_slab(screen, &ctx->tiler_polygon_list, 128*128, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
panfrost_drm_allocate_slab(screen, &ctx->tiler_dummy, 1, false, PAN_ALLOCATE_INVISIBLE, 0, 0);
}
struct panfrost_memory shaders;
struct panfrost_memory scratchpad;
struct panfrost_memory tiler_heap;
- struct panfrost_memory tiler_polygon_list;
struct panfrost_memory tiler_dummy;
struct panfrost_memory depth_stencil_buffer;
panfrost_job_add_bo(job, ctx->shaders.bo);
panfrost_job_add_bo(job, ctx->scratchpad.bo);
panfrost_job_add_bo(job, ctx->tiler_heap.bo);
- panfrost_job_add_bo(job, ctx->tiler_polygon_list.bo);
+ panfrost_job_add_bo(job, job->polygon_list);
if (job->first_job.gpu) {
ret = panfrost_drm_submit_job(ctx, job->first_job.gpu, 0);
BITSET_SET(screen->free_transient, *index);
}
+ /* Unreference the polygon list */
+ panfrost_bo_unreference(ctx->base.screen, job->polygon_list);
+
_mesa_hash_table_remove_key(ctx->jobs, &job->key);
if (ctx->job == job)
_mesa_set_add(job->bos, bo);
}
+/* Returns the polygon list's GPU address if available, or otherwise allocates
+ * the polygon list. It's perfectly fast to use allocate/free BO directly,
+ * since we'll hit the BO cache and this is one-per-batch anyway. */
+
+mali_ptr
+panfrost_job_get_polygon_list(struct panfrost_job *batch, unsigned size)
+{
+ if (batch->polygon_list) {
+ assert(batch->polygon_list->size >= size);
+ } else {
+ struct panfrost_screen *screen = pan_screen(batch->ctx->base.screen);
+
+ /* Create the BO as invisible, as there's no reason to map */
+
+ batch->polygon_list = panfrost_drm_create_bo(screen,
+ size, PAN_ALLOCATE_INVISIBLE);
+ }
+
+ return batch->polygon_list->gpu;
+}
+
void
panfrost_flush_jobs_writing_resource(struct panfrost_context *panfrost,
struct pipe_resource *prsc)
/* Within the topmost transient BO, how much has been used? */
unsigned transient_offset;
+
+ /* Polygon list bound to the batch, or NULL if none bound yet */
+ struct panfrost_bo *polygon_list;
};
/* Functions for managing the above */
panfrost_job_set_requirements(struct panfrost_context *ctx,
struct panfrost_job *job);
+mali_ptr
+panfrost_job_get_polygon_list(struct panfrost_job *batch, unsigned size);
+
void
panfrost_job_clear(struct panfrost_context *ctx,
struct panfrost_job *job,
if (!batch->last_tiler.gpu)
return;
- /* Okay, we do. Let's generate it */
+ /* Okay, we do. Let's generate it. We'll need the job's polygon list
+ * regardless of size. */
struct panfrost_context *ctx = batch->ctx;
- mali_ptr polygon_list = ctx->tiler_polygon_list.bo->gpu;
+ mali_ptr polygon_list = panfrost_job_get_polygon_list(batch, 0);
struct panfrost_transfer job =
panfrost_set_value_job(ctx, polygon_list);