panfrost: Introduce pan_pool struct
authorAlyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Tue, 7 Jul 2020 18:46:40 +0000 (14:46 -0400)
committerMarge Bot <eric+marge@anholt.net>
Thu, 9 Jul 2020 14:54:38 +0000 (14:54 +0000)
As a first step towards separating pools from batches, let's collect
pool-related state together.

Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5794>

src/gallium/drivers/panfrost/pan_allocate.c
src/gallium/drivers/panfrost/pan_allocate.h
src/gallium/drivers/panfrost/pan_job.c
src/gallium/drivers/panfrost/pan_job.h

index b16a1253ac2f5e5c2cd93fe4b28bc3e414b683ec..a0ca7eb0e68f81cc0e9b8d3ee65300ca2dbf5120 100644 (file)
  * into whereever we left off. If there isn't space, we allocate a new entry
  * into the pool and copy there */
 
+struct pan_pool
+panfrost_create_pool(void *memctx)
+{
+        struct pan_pool pool = {
+                .transient_offset = 0,
+                .transient_bo = NULL
+        };
+
+        pool.bos = _mesa_hash_table_create(memctx, _mesa_hash_pointer,
+                        _mesa_key_pointer_equal);
+
+
+        return pool;
+}
+
 struct panfrost_transfer
 panfrost_allocate_transient(struct panfrost_batch *batch, size_t sz)
 {
@@ -50,15 +65,15 @@ panfrost_allocate_transient(struct panfrost_batch *batch, size_t sz)
 
         unsigned offset = 0;
 
-        bool fits_in_current = (batch->transient_offset + sz) < TRANSIENT_SLAB_SIZE;
+        bool fits_in_current = (batch->pool.transient_offset + sz) < TRANSIENT_SLAB_SIZE;
 
-        if (likely(batch->transient_bo && fits_in_current)) {
+        if (likely(batch->pool.transient_bo && fits_in_current)) {
                 /* We can reuse the current BO, so get it */
-                bo = batch->transient_bo;
+                bo = batch->pool.transient_bo;
 
                 /* Use the specified offset */
-                offset = batch->transient_offset;
-                batch->transient_offset = offset + sz;
+                offset = batch->pool.transient_offset;
+                batch->pool.transient_offset = offset + sz;
         } else {
                 size_t bo_sz = sz < TRANSIENT_SLAB_SIZE ?
                                TRANSIENT_SLAB_SIZE : ALIGN_POT(sz, 4096);
@@ -77,8 +92,8 @@ panfrost_allocate_transient(struct panfrost_batch *batch, size_t sz)
                                               PAN_BO_ACCESS_FRAGMENT);
 
                 if (sz < TRANSIENT_SLAB_SIZE) {
-                        batch->transient_bo = bo;
-                        batch->transient_offset = offset + sz;
+                        batch->pool.transient_bo = bo;
+                        batch->pool.transient_offset = offset + sz;
                 }
         }
 
index f18218fb32a11c2443cf9884fe07b78d4afc5b3c..2eff640045cc72cbb1b2757626664adc13300a43 100644 (file)
 
 struct panfrost_batch;
 
+/* Represents a pool of memory that can only grow, used to allocate objects
+ * with the same lifetime as the pool itself. In OpenGL, a pool is owned by the
+ * batch for transient structures. In Vulkan, it may be owned by e.g. the
+ * command pool */
+
+struct pan_pool {
+        /* panfrost_bo -> access_flags owned by the pool */
+        struct hash_table *bos;
+
+        /* Current transient BO */
+        struct panfrost_bo *transient_bo;
+
+        /* Within the topmost transient BO, how much has been used? */
+        unsigned transient_offset;
+};
+
+struct pan_pool
+panfrost_create_pool(void *memctx);
+
 /* Represents a fat pointer for GPU-mapped memory, returned from the transient
  * allocator and not used for much else */
 
index 3846417dfe2485a3b3a4c8c9b83854cf9ec1d18e..f5cfc6a1ec800533abf7ec9e49e2be1e191819ce 100644 (file)
@@ -109,15 +109,16 @@ panfrost_create_batch(struct panfrost_context *ctx,
         batch->ctx = ctx;
 
         batch->bos = _mesa_hash_table_create(batch, _mesa_hash_pointer,
-                                             _mesa_key_pointer_equal);
+                        _mesa_key_pointer_equal);
 
         batch->minx = batch->miny = ~0;
         batch->maxx = batch->maxy = 0;
-        batch->transient_offset = 0;
 
         batch->out_sync = panfrost_create_batch_fence(batch);
         util_copy_framebuffer_state(&batch->key, key);
 
+        batch->pool = panfrost_create_pool(batch);
+
         return batch;
 }
 
@@ -176,6 +177,9 @@ panfrost_free_batch(struct panfrost_batch *batch)
         hash_table_foreach(batch->bos, entry)
                 panfrost_bo_unreference((struct panfrost_bo *)entry->key);
 
+        hash_table_foreach(batch->pool.bos, entry)
+                panfrost_bo_unreference((struct panfrost_bo *)entry->key);
+
         util_dynarray_foreach(&batch->dependencies,
                               struct panfrost_batch_fence *, dep) {
                 panfrost_batch_fence_unreference(*dep);
@@ -892,6 +896,25 @@ panfrost_batch_draw_wallpaper(struct panfrost_batch *batch)
         batch->ctx->wallpaper_batch = NULL;
 }
 
+static void
+panfrost_batch_record_bo(struct hash_entry *entry, unsigned *bo_handles, unsigned idx)
+{
+        struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
+        uint32_t flags = (uintptr_t)entry->data;
+
+        assert(bo->gem_handle > 0);
+        bo_handles[idx] = bo->gem_handle;
+
+        /* Update the BO access flags so that panfrost_bo_wait() knows
+         * about all pending accesses.
+         * We only keep the READ/WRITE info since this is all the BO
+         * wait logic cares about.
+         * We also preserve existing flags as this batch might not
+         * be the first one to access the BO.
+         */
+        bo->gpu_access |= flags & (PAN_BO_ACCESS_RW);
+}
+
 static int
 panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
                             mali_ptr first_job_desc,
@@ -935,25 +958,14 @@ panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
         submit.jc = first_job_desc;
         submit.requirements = reqs;
 
-        bo_handles = calloc(batch->bos->entries, sizeof(*bo_handles));
+        bo_handles = calloc(batch->pool.bos->entries + batch->bos->entries, sizeof(*bo_handles));
         assert(bo_handles);
 
-        hash_table_foreach(batch->bos, entry) {
-                struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
-                uint32_t flags = (uintptr_t)entry->data;
-
-                assert(bo->gem_handle > 0);
-                bo_handles[submit.bo_handle_count++] = bo->gem_handle;
+        hash_table_foreach(batch->bos, entry)
+                panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
 
-                /* Update the BO access flags so that panfrost_bo_wait() knows
-                 * about all pending accesses.
-                 * We only keep the READ/WRITE info since this is all the BO
-                 * wait logic cares about.
-                 * We also preserve existing flags as this batch might not
-                 * be the first one to access the BO.
-                 */
-                bo->gpu_access |= flags & (PAN_BO_ACCESS_RW);
-        }
+        hash_table_foreach(batch->pool.bos, entry)
+                panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
 
         submit.bo_handles = (u64) (uintptr_t) bo_handles;
         ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
index f9499fc1b90600690fdd485e5482f564439caeb3..c0a569bbf64956c54b1dce83c5278735e3c32d07 100644 (file)
@@ -115,14 +115,11 @@ struct panfrost_batch {
         /* The job index of the WRITE_VALUE job (before it has been created) */
         unsigned write_value_index;
 
-        /* BOs referenced -- will be used for flushing logic */
+        /* BOs referenced not in the pool */
         struct hash_table *bos;
 
-        /* Current transient BO */
-       struct panfrost_bo *transient_bo;
-
-        /* Within the topmost transient BO, how much has been used? */
-        unsigned transient_offset;
+        /* Pool owned by this batch (released when the batch is released) used for temporary descriptors */
+        struct pan_pool pool;
 
         /* Polygon list bound to the batch, or NULL if none bound yet */
         struct panfrost_bo *polygon_list;