* into whereever we left off. If there isn't space, we allocate a new entry
* into the pool and copy there */
+struct pan_pool
+panfrost_create_pool(void *memctx)
+{
+ struct pan_pool pool = {
+ .transient_offset = 0,
+ .transient_bo = NULL
+ };
+
+ pool.bos = _mesa_hash_table_create(memctx, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+
+ return pool;
+}
+
struct panfrost_transfer
panfrost_allocate_transient(struct panfrost_batch *batch, size_t sz)
{
unsigned offset = 0;
- bool fits_in_current = (batch->transient_offset + sz) < TRANSIENT_SLAB_SIZE;
+ bool fits_in_current = (batch->pool.transient_offset + sz) < TRANSIENT_SLAB_SIZE;
- if (likely(batch->transient_bo && fits_in_current)) {
+ if (likely(batch->pool.transient_bo && fits_in_current)) {
/* We can reuse the current BO, so get it */
- bo = batch->transient_bo;
+ bo = batch->pool.transient_bo;
/* Use the specified offset */
- offset = batch->transient_offset;
- batch->transient_offset = offset + sz;
+ offset = batch->pool.transient_offset;
+ batch->pool.transient_offset = offset + sz;
} else {
size_t bo_sz = sz < TRANSIENT_SLAB_SIZE ?
TRANSIENT_SLAB_SIZE : ALIGN_POT(sz, 4096);
PAN_BO_ACCESS_FRAGMENT);
if (sz < TRANSIENT_SLAB_SIZE) {
- batch->transient_bo = bo;
- batch->transient_offset = offset + sz;
+ batch->pool.transient_bo = bo;
+ batch->pool.transient_offset = offset + sz;
}
}
struct panfrost_batch;
+/* Represents a pool of memory that can only grow, used to allocate objects
+ * with the same lifetime as the pool itself. In OpenGL, a pool is owned by the
+ * batch for transient structures. In Vulkan, it may be owned by e.g. the
+ * command pool */
+
+struct pan_pool {
+ /* panfrost_bo -> access_flags owned by the pool */
+ struct hash_table *bos;
+
+ /* Current transient BO */
+ struct panfrost_bo *transient_bo;
+
+ /* Within the topmost transient BO, how much has been used? */
+ unsigned transient_offset;
+};
+
+struct pan_pool
+panfrost_create_pool(void *memctx);
+
/* Represents a fat pointer for GPU-mapped memory, returned from the transient
* allocator and not used for much else */
batch->ctx = ctx;
batch->bos = _mesa_hash_table_create(batch, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ _mesa_key_pointer_equal);
batch->minx = batch->miny = ~0;
batch->maxx = batch->maxy = 0;
- batch->transient_offset = 0;
batch->out_sync = panfrost_create_batch_fence(batch);
util_copy_framebuffer_state(&batch->key, key);
+ batch->pool = panfrost_create_pool(batch);
+
return batch;
}
hash_table_foreach(batch->bos, entry)
panfrost_bo_unreference((struct panfrost_bo *)entry->key);
+ hash_table_foreach(batch->pool.bos, entry)
+ panfrost_bo_unreference((struct panfrost_bo *)entry->key);
+
util_dynarray_foreach(&batch->dependencies,
struct panfrost_batch_fence *, dep) {
panfrost_batch_fence_unreference(*dep);
batch->ctx->wallpaper_batch = NULL;
}
+static void
+panfrost_batch_record_bo(struct hash_entry *entry, unsigned *bo_handles, unsigned idx)
+{
+ struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
+ uint32_t flags = (uintptr_t)entry->data;
+
+ assert(bo->gem_handle > 0);
+ bo_handles[idx] = bo->gem_handle;
+
+ /* Update the BO access flags so that panfrost_bo_wait() knows
+ * about all pending accesses.
+ * We only keep the READ/WRITE info since this is all the BO
+ * wait logic cares about.
+ * We also preserve existing flags as this batch might not
+ * be the first one to access the BO.
+ */
+ bo->gpu_access |= flags & (PAN_BO_ACCESS_RW);
+}
+
static int
panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
mali_ptr first_job_desc,
submit.jc = first_job_desc;
submit.requirements = reqs;
- bo_handles = calloc(batch->bos->entries, sizeof(*bo_handles));
+ bo_handles = calloc(batch->pool.bos->entries + batch->bos->entries, sizeof(*bo_handles));
assert(bo_handles);
- hash_table_foreach(batch->bos, entry) {
- struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
- uint32_t flags = (uintptr_t)entry->data;
-
- assert(bo->gem_handle > 0);
- bo_handles[submit.bo_handle_count++] = bo->gem_handle;
+ hash_table_foreach(batch->bos, entry)
+ panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
- /* Update the BO access flags so that panfrost_bo_wait() knows
- * about all pending accesses.
- * We only keep the READ/WRITE info since this is all the BO
- * wait logic cares about.
- * We also preserve existing flags as this batch might not
- * be the first one to access the BO.
- */
- bo->gpu_access |= flags & (PAN_BO_ACCESS_RW);
- }
+ hash_table_foreach(batch->pool.bos, entry)
+ panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
submit.bo_handles = (u64) (uintptr_t) bo_handles;
ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
/* The job index of the WRITE_VALUE job (before it has been created) */
unsigned write_value_index;
- /* BOs referenced -- will be used for flushing logic */
+ /* BOs referenced not in the pool */
struct hash_table *bos;
- /* Current transient BO */
- struct panfrost_bo *transient_bo;
-
- /* Within the topmost transient BO, how much has been used? */
- unsigned transient_offset;
+ /* Pool owned by this batch (released when the batch is released) used for temporary descriptors */
+ struct pan_pool pool;
/* Polygon list bound to the batch, or NULL if none bound yet */
struct panfrost_bo *polygon_list;