From: Alyssa Rosenzweig Date: Fri, 14 Aug 2020 23:08:57 +0000 (-0400) Subject: panfrost: Pre-allocate memory for pool X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=40c0d7a13df02ee15e2e0e14ed4ab53c7e866a53;p=mesa.git panfrost: Pre-allocate memory for pool Seeing random BO creates in the middle of command stream generation is annoying, let's preallocate memory so CPU profiling is less random. Signed-off-by: Alyssa Rosenzweig Reviewed-by: Tomeu Vizoso Part-of: --- diff --git a/src/gallium/drivers/panfrost/pan_job.c b/src/gallium/drivers/panfrost/pan_job.c index b0665da6b6e..50616eb9017 100644 --- a/src/gallium/drivers/panfrost/pan_job.c +++ b/src/gallium/drivers/panfrost/pan_job.c @@ -112,7 +112,7 @@ panfrost_create_batch(struct panfrost_context *ctx, batch->out_sync = panfrost_create_batch_fence(batch); util_copy_framebuffer_state(&batch->key, key); - batch->pool = panfrost_create_pool(batch, pan_device(ctx->base.screen)); + batch->pool = panfrost_create_pool(batch, pan_device(ctx->base.screen), 0, true); panfrost_batch_add_fbo_bos(batch); diff --git a/src/panfrost/lib/pan_pool.c b/src/panfrost/lib/pan_pool.c index 1a08be2aacf..94847565f6b 100644 --- a/src/panfrost/lib/pan_pool.c +++ b/src/panfrost/lib/pan_pool.c @@ -34,11 +34,38 @@ * into whereever we left off. If there isn't space, we allocate a new entry * into the pool and copy there */ +static struct panfrost_bo * +panfrost_pool_alloc_backing(struct pan_pool *pool, size_t bo_sz) +{ + /* We don't know what the BO will be used for, so let's flag it + * RW and attach it to both the fragment and vertex/tiler jobs. + * TODO: if we want fine grained BO assignment we should pass + * flags to this function and keep the read/write, + * fragment/vertex+tiler pools separate. + */ + struct panfrost_bo *bo = panfrost_bo_create(pool->dev, bo_sz, + pool->create_flags); + + uintptr_t flags = PAN_BO_ACCESS_PRIVATE | + PAN_BO_ACCESS_RW | + PAN_BO_ACCESS_VERTEX_TILER | + PAN_BO_ACCESS_FRAGMENT; + + _mesa_hash_table_insert(pool->bos, bo, (void *) flags); + + pool->transient_bo = bo; + pool->transient_offset = 0; + + return bo; +} + struct pan_pool -panfrost_create_pool(void *memctx, struct panfrost_device *dev) +panfrost_create_pool(void *memctx, struct panfrost_device *dev, + unsigned create_flags, bool prealloc) { struct pan_pool pool = { .dev = dev, + .create_flags = create_flags, .transient_offset = 0, .transient_bo = NULL }; @@ -46,6 +73,8 @@ panfrost_create_pool(void *memctx, struct panfrost_device *dev) pool.bos = _mesa_hash_table_create(memctx, _mesa_hash_pointer, _mesa_key_pointer_equal); + if (prealloc) + panfrost_pool_alloc_backing(&pool, TRANSIENT_SLAB_SIZE); return pool; } @@ -57,45 +86,18 @@ panfrost_pool_alloc(struct pan_pool *pool, size_t sz) sz = ALIGN_POT(sz, ALIGNMENT); /* Find or create a suitable BO */ - struct panfrost_bo *bo = NULL; - - unsigned offset = 0; - - bool fits_in_current = (pool->transient_offset + sz) < TRANSIENT_SLAB_SIZE; - - if (likely(pool->transient_bo && fits_in_current)) { - /* We can reuse the current BO, so get it */ - bo = pool->transient_bo; - - /* Use the specified offset */ - offset = pool->transient_offset; - pool->transient_offset = offset + sz; - } else { - size_t bo_sz = sz < TRANSIENT_SLAB_SIZE ? - TRANSIENT_SLAB_SIZE : ALIGN_POT(sz, 4096); - - /* We can't reuse the current BO, but we can create a new one. - * We don't know what the BO will be used for, so let's flag it - * RW and attach it to both the fragment and vertex/tiler jobs. - * TODO: if we want fine grained BO assignment we should pass - * flags to this function and keep the read/write, - * fragment/vertex+tiler pools separate. - */ - bo = panfrost_bo_create(pool->dev, bo_sz, 0); - - uintptr_t flags = PAN_BO_ACCESS_PRIVATE | - PAN_BO_ACCESS_RW | - PAN_BO_ACCESS_VERTEX_TILER | - PAN_BO_ACCESS_FRAGMENT; - - _mesa_hash_table_insert(pool->bos, bo, (void *) flags); - - if (sz < TRANSIENT_SLAB_SIZE) { - pool->transient_bo = bo; - pool->transient_offset = offset + sz; - } + struct panfrost_bo *bo = pool->transient_bo; + unsigned offset = pool->transient_offset; + + /* If we don't fit, allocate a new backing */ + if (unlikely(bo == NULL || (offset + sz) >= TRANSIENT_SLAB_SIZE)) { + bo = panfrost_pool_alloc_backing(pool, + ALIGN_POT(MAX2(TRANSIENT_SLAB_SIZE, sz), 4096)); + offset = 0; } + pool->transient_offset += sz; + struct panfrost_transfer ret = { .cpu = bo->cpu + offset, .gpu = bo->gpu + offset, diff --git a/src/panfrost/lib/pan_pool.h b/src/panfrost/lib/pan_pool.h index c4668af57fe..22fddb8870c 100644 --- a/src/panfrost/lib/pan_pool.h +++ b/src/panfrost/lib/pan_pool.h @@ -45,10 +45,13 @@ struct pan_pool { /* Within the topmost transient BO, how much has been used? */ unsigned transient_offset; + + /* BO flags to use in the pool */ + unsigned create_flags; }; struct pan_pool -panfrost_create_pool(void *memctx, struct panfrost_device *dev); +panfrost_create_pool(void *memctx, struct panfrost_device *dev, unsigned create_flags, bool prealloc); /* Represents a fat pointer for GPU-mapped memory, returned from the transient * allocator and not used for much else */