}
struct panfrost_transfer
-panfrost_allocate_transient(struct panfrost_batch *batch, size_t sz)
+panfrost_pool_alloc(struct pan_pool *pool, size_t sz)
{
/* Pad the size */
sz = ALIGN_POT(sz, ALIGNMENT);
unsigned offset = 0;
- bool fits_in_current = (batch->pool.transient_offset + sz) < TRANSIENT_SLAB_SIZE;
+ bool fits_in_current = (pool->transient_offset + sz) < TRANSIENT_SLAB_SIZE;
- if (likely(batch->pool.transient_bo && fits_in_current)) {
+ if (likely(pool->transient_bo && fits_in_current)) {
/* We can reuse the current BO, so get it */
- bo = batch->pool.transient_bo;
+ bo = pool->transient_bo;
/* Use the specified offset */
- offset = batch->pool.transient_offset;
- batch->pool.transient_offset = offset + sz;
+ offset = pool->transient_offset;
+ pool->transient_offset = offset + sz;
} else {
size_t bo_sz = sz < TRANSIENT_SLAB_SIZE ?
TRANSIENT_SLAB_SIZE : ALIGN_POT(sz, 4096);
* flags to this function and keep the read/write,
* fragment/vertex+tiler pools separate.
*/
- bo = pan_bo_create(batch->pool.dev, bo_sz, 0);
+ bo = pan_bo_create(pool->dev, bo_sz, 0);
uintptr_t flags = PAN_BO_ACCESS_PRIVATE |
PAN_BO_ACCESS_RW |
PAN_BO_ACCESS_VERTEX_TILER |
PAN_BO_ACCESS_FRAGMENT;
- _mesa_hash_table_insert(batch->pool.bos, bo, (void *) flags);
+ _mesa_hash_table_insert(pool->bos, bo, (void *) flags);
if (sz < TRANSIENT_SLAB_SIZE) {
- batch->pool.transient_bo = bo;
- batch->pool.transient_offset = offset + sz;
+ pool->transient_bo = bo;
+ pool->transient_offset = offset + sz;
}
}
}
mali_ptr
-panfrost_upload_transient(struct panfrost_batch *batch, const void *data,
- size_t sz)
+panfrost_pool_upload(struct pan_pool *pool, const void *data, size_t sz)
{
- struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sz);
+ struct panfrost_transfer transfer = panfrost_pool_alloc(pool, sz);
memcpy(transfer.cpu, data, sz);
return transfer.gpu;
}
.scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
.shared_workgroup_count = ~0,
};
- postfix->shared_memory = panfrost_upload_transient(batch, &shared, sizeof(shared));
+ postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
}
static void
sizeof(struct mali_single_framebuffer) :
sizeof(struct mali_framebuffer);
- batch->framebuffer = panfrost_allocate_transient(batch, size);
+ batch->framebuffer = panfrost_pool_alloc(&batch->pool, size);
/* Tag the pointer */
if (!(dev->quirks & MIDGARD_SFBD))
} else {
/* Otherwise, we need to upload to transient memory */
const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
- out = panfrost_upload_transient(batch, ibuf8 + offset,
+ out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
info->count *
info->index_size);
}
panfrost_frag_shader_meta_init(ctx, &meta, rts);
- xfer = panfrost_allocate_transient(batch, desc_size);
+ xfer = panfrost_pool_alloc(&batch->pool, desc_size);
memcpy(xfer.cpu, &meta, sizeof(meta));
memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
shader_ptr = xfer.gpu;
} else {
- shader_ptr = panfrost_upload_transient(batch, &meta,
+ shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
sizeof(meta));
}
mvp.viewport1[0] + 1,
mvp.viewport1[1] + 1);
- tiler_postfix->viewport = panfrost_upload_transient(batch, &mvp,
+ tiler_postfix->viewport = panfrost_pool_upload(&batch->pool, &mvp,
sizeof(mvp));
}
* PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
return rsrc->bo->gpu + cb->buffer_offset;
} else if (cb->user_buffer) {
- return panfrost_upload_transient(batch,
+ return panfrost_pool_upload(&batch->pool,
cb->user_buffer +
cb->buffer_offset,
cb->buffer_size);
size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
size_t size = sys_size + uniform_size;
- struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
+ struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
size);
/* Upload sysvals requested by the shader */
ubos[ubo] = MALI_MAKE_UBO(aligned / bytes_per_field, gpu);
}
- mali_ptr ubufs = panfrost_upload_transient(batch, ubos, sz);
+ mali_ptr ubufs = panfrost_pool_upload(&batch->pool, ubos, sz);
postfix->uniforms = transfer.gpu;
postfix->uniform_buffers = ubufs;
.shared_shift = util_logbase2(single_size) - 1
};
- vtp->postfix.shared_memory = panfrost_upload_transient(batch, &shared,
+ vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
sizeof(shared));
}
memcpy(&descriptors[i], view->bifrost_descriptor, sizeof(*view->bifrost_descriptor));
}
- postfix->textures = panfrost_upload_transient(batch,
+ postfix->textures = panfrost_pool_upload(&batch->pool,
descriptors,
sizeof(struct bifrost_texture_descriptor) *
ctx->sampler_view_count[stage]);
trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
}
- postfix->textures = panfrost_upload_transient(batch,
+ postfix->textures = panfrost_pool_upload(&batch->pool,
trampolines,
sizeof(uint64_t) *
ctx->sampler_view_count[stage]);
if (device->quirks & IS_BIFROST) {
size_t desc_size = sizeof(struct bifrost_sampler_descriptor);
size_t transfer_size = desc_size * ctx->sampler_count[stage];
- struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
+ struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
transfer_size);
struct bifrost_sampler_descriptor *desc = (struct bifrost_sampler_descriptor *)transfer.cpu;
} else {
size_t desc_size = sizeof(struct mali_sampler_descriptor);
size_t transfer_size = desc_size * ctx->sampler_count[stage];
- struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
+ struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
transfer_size);
struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
struct panfrost_vertex_state *so = ctx->vertex;
panfrost_vertex_state_upd_attr_offs(ctx, vertex_postfix);
- vertex_postfix->attribute_meta = panfrost_upload_transient(batch, so->hw,
+ vertex_postfix->attribute_meta = panfrost_pool_upload(&batch->pool, so->hw,
sizeof(*so->hw) *
PAN_MAX_ATTRIBUTE);
}
/* Upload whatever we emitted and go */
- vertex_postfix->attributes = panfrost_upload_transient(batch, attrs,
+ vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
k * sizeof(*attrs));
}
slot->size = stride * count;
slot->shift = slot->extra_flags = 0;
- struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
+ struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
slot->size);
slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
- struct panfrost_transfer trans = panfrost_allocate_transient(batch,
+ struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
vs_size +
fs_size);
}
unsigned xfb_base = pan_xfb_base(present);
- struct panfrost_transfer T = panfrost_allocate_transient(batch,
+ struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
union mali_attr *varyings = (union mali_attr *) T.cpu;
0, 0,
};
- return panfrost_upload_transient(batch, locations, 96 * sizeof(uint16_t));
+ return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
}