hash_table_foreach(batch->bos, entry)
panfrost_bo_unreference((struct panfrost_bo *)entry->key);
- hash_table_foreach(batch->pool.bos, entry)
- panfrost_bo_unreference((struct panfrost_bo *)entry->key);
-
- hash_table_foreach(batch->invisible_pool.bos, entry)
- panfrost_bo_unreference((struct panfrost_bo *)entry->key);
+ panfrost_pool_cleanup(&batch->pool);
+ panfrost_pool_cleanup(&batch->invisible_pool);
util_dynarray_foreach(&batch->dependencies,
struct panfrost_batch_fence *, dep) {
submit.jc = first_job_desc;
submit.requirements = reqs;
- bo_handles = calloc(batch->pool.bos->entries + batch->invisible_pool.bos->entries + batch->bos->entries + 1, sizeof(*bo_handles));
+ bo_handles = calloc(panfrost_pool_num_bos(&batch->pool) +
+ panfrost_pool_num_bos(&batch->invisible_pool) +
+ batch->bos->entries + 1,
+ sizeof(*bo_handles));
assert(bo_handles);
hash_table_foreach(batch->bos, entry)
panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
- hash_table_foreach(batch->pool.bos, entry)
- panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
-
- hash_table_foreach(batch->invisible_pool.bos, entry)
- panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
+ panfrost_pool_get_bo_handles(&batch->pool, bo_handles + submit.bo_handle_count);
+ submit.bo_handle_count += panfrost_pool_num_bos(&batch->pool);
+ panfrost_pool_get_bo_handles(&batch->invisible_pool, bo_handles + submit.bo_handle_count);
+ submit.bo_handle_count += panfrost_pool_num_bos(&batch->invisible_pool);
/* Used by all tiler jobs (XXX: skip for compute-only) */
if (!(reqs & PANFROST_JD_REQ_FS))
panfrost_pool_alloc_backing(pool, TRANSIENT_SLAB_SIZE);
}
+static void delete_bo_entry(struct hash_entry *entry)
+{
+ panfrost_bo_unreference((struct panfrost_bo *)entry->key);
+}
+
+void
+panfrost_pool_cleanup(struct pan_pool *pool)
+{
+ _mesa_hash_table_destroy(pool->bos, delete_bo_entry);
+}
+
+void
+panfrost_pool_get_bo_handles(struct pan_pool *pool, uint32_t *handles)
+{
+ unsigned idx = 0;
+ hash_table_foreach(pool->bos, entry) {
+ struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
+
+ assert(bo->gem_handle > 0);
+ handles[idx++] = bo->gem_handle;
+
+ /* Update the BO access flags so that panfrost_bo_wait() knows
+ * about all pending accesses.
+ * We only keep the READ/WRITE info since this is all the BO
+ * wait logic cares about.
+ * We also preserve existing flags as this batch might not
+ * be the first one to access the BO.
+ */
+ bo->gpu_access |= PAN_BO_ACCESS_RW;
+ }
+}
+
struct panfrost_transfer
panfrost_pool_alloc_aligned(struct pan_pool *pool, size_t sz, unsigned alignment)
{
struct panfrost_device *dev, unsigned create_flags,
bool prealloc);
+void
+panfrost_pool_cleanup(struct pan_pool *pool);
+
+static inline unsigned
+panfrost_pool_num_bos(struct pan_pool *pool)
+{
+ return pool->bos->entries;
+}
+
+void
+panfrost_pool_get_bo_handles(struct pan_pool *pool, uint32_t *handles);
+
/* Represents a fat pointer for GPU-mapped memory, returned from the transient
* allocator and not used for much else */