t.polygon_list_size = panfrost_tiler_full_size(
width, height, t.hierarchy_mask, hierarchy);
- /* Sanity check */
-
if (vertex_count) {
- struct panfrost_bo *tiler_heap;
-
- tiler_heap = panfrost_batch_get_tiler_heap(batch);
t.polygon_list = panfrost_batch_get_polygon_list(batch,
header_size +
t.polygon_list_size);
- /* Allow the entire tiler heap */
- t.heap_start = tiler_heap->gpu;
- t.heap_end = tiler_heap->gpu + tiler_heap->size;
+ t.heap_start = device->tiler_heap->gpu;
+ t.heap_end = device->tiler_heap->gpu + device->tiler_heap->size;
} else {
struct panfrost_bo *tiler_dummy;
return batch->shared_memory;
}
-struct panfrost_bo *
-panfrost_batch_get_tiler_heap(struct panfrost_batch *batch)
-{
- if (batch->tiler_heap)
- return batch->tiler_heap;
-
- batch->tiler_heap = panfrost_batch_create_bo(batch, 4096 * 4096,
- PAN_BO_INVISIBLE |
- PAN_BO_GROWABLE,
- PAN_BO_ACCESS_PRIVATE |
- PAN_BO_ACCESS_RW |
- PAN_BO_ACCESS_VERTEX_TILER |
- PAN_BO_ACCESS_FRAGMENT);
- assert(batch->tiler_heap);
- return batch->tiler_heap;
-}
-
mali_ptr
panfrost_batch_get_tiler_meta(struct panfrost_batch *batch, unsigned vertex_count)
{
if (batch->tiler_meta)
return batch->tiler_meta;
- struct panfrost_bo *tiler_heap;
- tiler_heap = panfrost_batch_get_tiler_heap(batch);
+ struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
struct bifrost_tiler_heap_meta tiler_heap_meta = {
- .heap_size = tiler_heap->size,
- .tiler_heap_start = tiler_heap->gpu,
- .tiler_heap_free = tiler_heap->gpu,
- .tiler_heap_end = tiler_heap->gpu + tiler_heap->size,
+ .heap_size = dev->tiler_heap->size,
+ .tiler_heap_start = dev->tiler_heap->gpu,
+ .tiler_heap_free = dev->tiler_heap->gpu,
+ .tiler_heap_end = dev->tiler_heap->gpu + dev->tiler_heap->size,
.unk1 = 0x1,
.unk7e007e = 0x7e007e,
};
submit.jc = first_job_desc;
submit.requirements = reqs;
- bo_handles = calloc(batch->pool.bos->entries + batch->invisible_pool.bos->entries + batch->bos->entries, sizeof(*bo_handles));
+ bo_handles = calloc(batch->pool.bos->entries + batch->invisible_pool.bos->entries + batch->bos->entries + 1, sizeof(*bo_handles));
assert(bo_handles);
hash_table_foreach(batch->bos, entry)
hash_table_foreach(batch->invisible_pool.bos, entry)
panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
+ /* Used by all tiler jobs (XXX: skip for compute-only) */
+ if (!(reqs & PANFROST_JD_REQ_FS))
+ bo_handles[submit.bo_handle_count++] = dev->tiler_heap->gem_handle;
+
submit.bo_handles = (u64) (uintptr_t) bo_handles;
ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
free(bo_handles);
} bo_cache;
struct pan_blit_shaders blit_shaders;
+
+ /* Tiler heap shared across all tiler jobs, allocated against the
+ * device since there's only a single tiler. Since this is invisible to
+ * the CPU, it's okay for multiple contexts to reference it
+ * simultaneously; by keeping on the device struct, we eliminate a
+ * costly per-context allocation. */
+
+ struct panfrost_bo *tiler_heap;
};
void
for (unsigned i = 0; i < ARRAY_SIZE(dev->bo_cache.buckets); ++i)
list_inithead(&dev->bo_cache.buckets[i]);
+
+ /* Tiler heap is internally required by the tiler, which can only be
+ * active for a single job chain at once, so a single heap can be
+ * shared across batches/contextes */
+
+ dev->tiler_heap = panfrost_bo_create(dev, 4096 * 4096,
+ PAN_BO_INVISIBLE | PAN_BO_GROWABLE);
}
void
panfrost_close_device(struct panfrost_device *dev)
{
panfrost_bo_unreference(dev->blit_shaders.bo);
+ panfrost_bo_unreference(dev->tiler_heap);
panfrost_bo_cache_evict_all(dev);
pthread_mutex_destroy(&dev->bo_cache.lock);
drmFreeVersion(dev->kernel_version);