panfrost: Avoid accessing pan_pool fields directly
authorBoris Brezillon <boris.brezillon@collabora.com>
Mon, 24 Aug 2020 09:48:10 +0000 (11:48 +0200)
committerMarge Bot <eric+marge@anholt.net>
Fri, 28 Aug 2020 19:18:08 +0000 (19:18 +0000)
Having panfrost_batch access the pan_pool fields directly makes it hard
to change pan_pool internals, like for instance, changing the hash table
for a dynamic array. Let's hide pan_pool internals behind helpers that do
what we need.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6494>

src/gallium/drivers/panfrost/pan_job.c
src/panfrost/lib/pan_pool.c
src/panfrost/lib/pan_pool.h

index 43aee264b3c394d99eb8abffe719ba2382f4ff3f..7db9ce936a5b6d5fe4a430d2dfb37d5115854fe3 100644 (file)
@@ -175,11 +175,8 @@ panfrost_free_batch(struct panfrost_batch *batch)
         hash_table_foreach(batch->bos, entry)
                 panfrost_bo_unreference((struct panfrost_bo *)entry->key);
 
-        hash_table_foreach(batch->pool.bos, entry)
-                panfrost_bo_unreference((struct panfrost_bo *)entry->key);
-
-        hash_table_foreach(batch->invisible_pool.bos, entry)
-                panfrost_bo_unreference((struct panfrost_bo *)entry->key);
+        panfrost_pool_cleanup(&batch->pool);
+        panfrost_pool_cleanup(&batch->invisible_pool);
 
         util_dynarray_foreach(&batch->dependencies,
                               struct panfrost_batch_fence *, dep) {
@@ -980,17 +977,19 @@ panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
         submit.jc = first_job_desc;
         submit.requirements = reqs;
 
-        bo_handles = calloc(batch->pool.bos->entries + batch->invisible_pool.bos->entries + batch->bos->entries + 1, sizeof(*bo_handles));
+        bo_handles = calloc(panfrost_pool_num_bos(&batch->pool) +
+                            panfrost_pool_num_bos(&batch->invisible_pool) +
+                            batch->bos->entries + 1,
+                            sizeof(*bo_handles));
         assert(bo_handles);
 
         hash_table_foreach(batch->bos, entry)
                 panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
 
-        hash_table_foreach(batch->pool.bos, entry)
-                panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
-
-        hash_table_foreach(batch->invisible_pool.bos, entry)
-                panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
+        panfrost_pool_get_bo_handles(&batch->pool, bo_handles + submit.bo_handle_count);
+        submit.bo_handle_count += panfrost_pool_num_bos(&batch->pool);
+        panfrost_pool_get_bo_handles(&batch->invisible_pool, bo_handles + submit.bo_handle_count);
+        submit.bo_handle_count += panfrost_pool_num_bos(&batch->invisible_pool);
 
         /* Used by all tiler jobs (XXX: skip for compute-only) */
         if (!(reqs & PANFROST_JD_REQ_FS))
index 5fc7cc041f649b7e8f72bf1916055d463ba85dc5..1188063772051855872082553f8917ec57f5ddb5 100644 (file)
@@ -71,6 +71,38 @@ panfrost_pool_init(struct pan_pool *pool, void *memctx,
                 panfrost_pool_alloc_backing(pool, TRANSIENT_SLAB_SIZE);
 }
 
+static void delete_bo_entry(struct hash_entry *entry)
+{
+        panfrost_bo_unreference((struct panfrost_bo *)entry->key);
+}
+
+void
+panfrost_pool_cleanup(struct pan_pool *pool)
+{
+        _mesa_hash_table_destroy(pool->bos, delete_bo_entry);
+}
+
+void
+panfrost_pool_get_bo_handles(struct pan_pool *pool, uint32_t *handles)
+{
+        unsigned idx = 0;
+        hash_table_foreach(pool->bos, entry) {
+                struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
+
+                assert(bo->gem_handle > 0);
+                handles[idx++] = bo->gem_handle;
+
+               /* Update the BO access flags so that panfrost_bo_wait() knows
+                * about all pending accesses.
+                * We only keep the READ/WRITE info since this is all the BO
+                * wait logic cares about.
+                * We also preserve existing flags as this batch might not
+                * be the first one to access the BO.
+                */
+                bo->gpu_access |= PAN_BO_ACCESS_RW;
+        }
+}
+
 struct panfrost_transfer
 panfrost_pool_alloc_aligned(struct pan_pool *pool, size_t sz, unsigned alignment)
 {
index 23dba284425d7884336b8c8db1a36820d95f4198..c3fa2abd0490dc62ba629d79810d4c6eab022c32 100644 (file)
@@ -55,6 +55,18 @@ panfrost_pool_init(struct pan_pool *pool, void *memctx,
                    struct panfrost_device *dev, unsigned create_flags,
                    bool prealloc);
 
+void
+panfrost_pool_cleanup(struct pan_pool *pool);
+
+static inline unsigned
+panfrost_pool_num_bos(struct pan_pool *pool)
+{
+        return pool->bos->entries;
+}
+
+void
+panfrost_pool_get_bo_handles(struct pan_pool *pool, uint32_t *handles);
+
 /* Represents a fat pointer for GPU-mapped memory, returned from the transient
  * allocator and not used for much else */