struct panfrost_device *dev = pan_device(ctx->base.screen);
struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
- unsigned shift = panfrost_get_stack_shift(batch->stack_size);
struct mali_shared_memory shared = {
- .stack_shift = shift,
- .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
.shared_workgroup_count = ~0,
};
- postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
+
+ if (batch->stack_size) {
+ struct panfrost_bo *stack =
+ panfrost_batch_get_scratchpad(batch, batch->stack_size,
+ dev->thread_tls_alloc,
+ dev->core_count);
+
+ shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
+ shared.scratchpad = stack->gpu;
+ }
+
+ postfix->shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
}
static void
} else {
/* Otherwise, we need to upload to transient memory */
const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
- out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
- info->count *
- info->index_size);
+ struct panfrost_transfer T =
+ panfrost_pool_alloc_aligned(&batch->pool,
+ info->count * info->index_size,
+ info->index_size);
+
+ memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
+ out = T.gpu;
}
if (needs_indices) {
static void
panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
struct mali_shader_meta *fragmeta,
- void *rts)
+ void *rts,
+ struct panfrost_blend_final *blend)
{
struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
const struct panfrost_device *dev = pan_device(ctx->base.screen);
ctx->blend->base.alpha_to_coverage);
/* Get blending setup */
- unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
-
- struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
- unsigned shader_offset = 0;
- struct panfrost_bo *shader_bo = NULL;
-
- for (unsigned c = 0; c < rt_count; ++c)
- blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
- &shader_offset);
+ unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
/* Disable shader execution if we can */
if (dev->quirks & MIDGARD_SHADERLESS
fragmeta->blend.shader = 0;
- for (signed rt = (rt_count - 1); rt >= 0; --rt) {
+ for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
if (!blend[rt].is_shader)
continue;
/* Additional blend descriptor tacked on for jobs using MFBD */
+ struct bifrost_blend_rt *brts = rts;
+ struct midgard_blend_rt *mrts = rts;
+
+ /* Disable blending for depth-only on Bifrost */
+
+ if (rt_count == 0 && dev->quirks & IS_BIFROST)
+ brts[0].unk2 = 0x3;
+
for (unsigned i = 0; i < rt_count; ++i) {
unsigned flags = 0;
- if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
+ if (!blend[i].no_colour) {
flags = 0x200;
batch->draws |= (PIPE_CLEAR_COLOR0 << i);
- bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
- (ctx->pipe_framebuffer.cbufs[i]) &&
- util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
+ bool is_srgb = util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
}
if (dev->quirks & IS_BIFROST) {
- struct bifrost_blend_rt *brts = rts;
-
brts[i].flags = flags;
if (blend[i].is_shader) {
(fs->bo->gpu & (0xffffffffull << 32)));
brts[i].shader = blend[i].shader.gpu;
brts[i].unk2 = 0x0;
- } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
+ } else {
enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
const struct util_format_description *format_desc;
format_desc = util_format_description(format);
* blending. */
brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
- brts[i].shader_type = fs->blend_types[i];
- } else {
- /* Dummy attachment for depth-only */
- brts[i].unk2 = 0x3;
brts[i].shader_type = fs->blend_types[i];
}
} else {
- struct midgard_blend_rt *mrts = rts;
mrts[i].flags = flags;
if (blend[i].is_shader) {
static void
panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
struct mali_shader_meta *fragmeta,
- void *rts)
+ void *rts,
+ struct panfrost_blend_final *blend)
{
const struct panfrost_device *dev = pan_device(ctx->base.screen);
struct panfrost_shader_state *fs;
panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
panfrost_frag_meta_zsa_update(ctx, fragmeta);
- panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
+ panfrost_frag_meta_blend_update(ctx, fragmeta, rts, blend);
}
void
if (rt_size)
rts = rzalloc_size(ctx, rt_size * rt_count);
- panfrost_frag_shader_meta_init(ctx, &meta, rts);
+ struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
+
+ for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
+ blend[c] = panfrost_get_blend_for_context(ctx, c);
- xfer = panfrost_pool_alloc(&batch->pool, desc_size);
+ panfrost_frag_shader_meta_init(ctx, &meta, rts, blend);
+
+ xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
memcpy(xfer.cpu, &meta, sizeof(meta));
memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
* PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
return rsrc->bo->gpu + cb->buffer_offset;
} else if (cb->user_buffer) {
- return panfrost_pool_upload(&batch->pool,
+ return panfrost_pool_upload_aligned(&batch->pool,
cb->user_buffer +
cb->buffer_offset,
- cb->buffer_size);
+ cb->buffer_size, 16);
} else {
unreachable("No constant buffer");
}
size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
size_t size = sys_size + uniform_size;
- struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
- size);
+ struct panfrost_transfer transfer =
+ panfrost_pool_alloc_aligned(&batch->pool, size, 16);
/* Upload sysvals requested by the shader */
panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
assert(ubo_count >= 1);
size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
- struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
+ struct panfrost_transfer ubos =
+ panfrost_pool_alloc_aligned(&batch->pool, sz,
+ MALI_UNIFORM_BUFFER_LENGTH);
+
uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
/* Upload uniforms as a UBO */
struct midgard_payload_vertex_tiler *vtp)
{
struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
struct panfrost_shader_state *ss = &all->variants[all->active_variant];
unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
128));
- unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
- info->grid[2] * 4;
+
+ unsigned log2_instances =
+ util_logbase2_ceil(info->grid[0]) +
+ util_logbase2_ceil(info->grid[1]) +
+ util_logbase2_ceil(info->grid[2]);
+
+ unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
shared_size,
1);
struct mali_shared_memory shared = {
.shared_memory = bo->gpu,
- .shared_workgroup_count =
- util_logbase2_ceil(info->grid[0]) +
- util_logbase2_ceil(info->grid[1]) +
- util_logbase2_ceil(info->grid[2]),
- .shared_unk1 = 0x2,
- .shared_shift = util_logbase2(single_size) - 1
+ .shared_workgroup_count = log2_instances,
+ .shared_shift = util_logbase2(single_size) + 1
};
- vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
- sizeof(shared));
+ vtp->postfix.shared_memory = panfrost_pool_upload_aligned(&batch->pool, &shared,
+ sizeof(shared), 64);
}
static mali_ptr
return;
if (device->quirks & IS_BIFROST) {
- struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
+ struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
MALI_BIFROST_TEXTURE_LENGTH *
- ctx->sampler_view_count[stage]);
+ ctx->sampler_view_count[stage],
+ MALI_BIFROST_TEXTURE_LENGTH);
struct mali_bifrost_texture_packed *out =
(struct mali_bifrost_texture_packed *) T.cpu;
trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
}
- postfix->textures = panfrost_pool_upload(&batch->pool,
+ postfix->textures = panfrost_pool_upload_aligned(&batch->pool,
trampolines,
sizeof(uint64_t) *
- ctx->sampler_view_count[stage]);
+ ctx->sampler_view_count[stage],
+ sizeof(uint64_t));
}
}
assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
size_t sz = desc_size * ctx->sampler_count[stage];
- struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, sz);
+ struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_vertex_state *so = ctx->vertex;
+ struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
unsigned instance_shift = vertex_postfix->instance_shift;
unsigned instance_odd = vertex_postfix->instance_odd;
- /* Worst case: everything is NPOT */
+ /* Worst case: everything is NPOT, which is only possible if instancing
+ * is enabled. Otherwise single record is gauranteed */
+ bool could_npot = instance_shift || instance_odd;
- struct panfrost_transfer S = panfrost_pool_alloc(&batch->pool,
- MALI_ATTRIBUTE_LENGTH * PIPE_MAX_ATTRIBS * 2);
+ struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
+ MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
+ (could_npot ? 2 : 1),
+ MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
- struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
- MALI_ATTRIBUTE_LENGTH * (PAN_INSTANCE_ID + 1));
+ struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
+ MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
+ MALI_ATTRIBUTE_LENGTH);
struct mali_attribute_buffer_packed *bufs =
(struct mali_attribute_buffer_packed *) S.cpu;
/* Add special gl_VertexID/gl_InstanceID buffers */
- panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
+ if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
+ panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
- pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
- cfg.buffer_index = k++;
- cfg.format = so->formats[PAN_VERTEX_ID];
- }
+ pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
+ cfg.buffer_index = k++;
+ cfg.format = so->formats[PAN_VERTEX_ID];
+ }
- panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
+ panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
- pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
- cfg.buffer_index = k++;
- cfg.format = so->formats[PAN_INSTANCE_ID];
+ pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
+ cfg.buffer_index = k++;
+ cfg.format = so->formats[PAN_INSTANCE_ID];
+ }
}
/* Attribute addresses require 64-byte alignment, so let:
unsigned stride, unsigned count)
{
unsigned size = stride * count;
- mali_ptr ptr = panfrost_pool_alloc(&batch->pool, size).gpu;
+ mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
cfg.stride = stride;
vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
- struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
- vs_size +
- fs_size);
+ struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
+ &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
struct pipe_stream_output_info *so = &vs->stream_output;
unsigned present = pan_varying_present(vs, fs, dev->quirks);
}
unsigned xfb_base = pan_xfb_base(present);
- struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
- MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets));
+ struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
+ MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
+ MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
struct mali_attribute_buffer_packed *varyings =
(struct mali_attribute_buffer_packed *) T.cpu;
0, 0,
};
- return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
+ return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
}