PAN_BO_ACCESS_VERTEX_TILER;
}
-static void
-panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
- struct mali_vertex_tiler_postfix *postfix)
+static mali_ptr
+panfrost_vt_emit_shared_memory(struct panfrost_batch *batch)
{
- struct panfrost_device *dev = pan_device(ctx->base.screen);
- struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+ struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
- unsigned shift = panfrost_get_stack_shift(batch->stack_size);
struct mali_shared_memory shared = {
- .stack_shift = shift,
- .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
.shared_workgroup_count = ~0,
};
- postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
-}
-static void
-panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
- struct mali_vertex_tiler_postfix *postfix)
-{
- struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
- postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
+ if (batch->stack_size) {
+ struct panfrost_bo *stack =
+ panfrost_batch_get_scratchpad(batch, batch->stack_size,
+ dev->thread_tls_alloc,
+ dev->core_count);
+
+ shared.stack_shift = panfrost_get_stack_shift(batch->stack_size);
+ shared.scratchpad = stack->gpu;
+ }
+
+ return panfrost_pool_upload_aligned(&batch->pool, &shared, sizeof(shared), 64);
}
static void
-panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
+panfrost_vt_update_rasterizer(struct panfrost_rasterizer *rasterizer,
struct mali_vertex_tiler_prefix *prefix,
struct mali_vertex_tiler_postfix *postfix)
{
- struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
-
postfix->gl_enables |= 0x7;
SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
- rasterizer && rasterizer->base.front_ccw);
+ rasterizer->base.front_ccw);
SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
- rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
+ (rasterizer->base.cull_face & PIPE_FACE_FRONT));
SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
- rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
+ (rasterizer->base.cull_face & PIPE_FACE_BACK));
SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
- rasterizer && rasterizer->base.flatshade_first);
+ rasterizer->base.flatshade_first);
}
void
struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
if (!panfrost_writes_point_size(ctx)) {
- bool points = prefix->draw_mode == MALI_DRAW_MODE_POINTS;
- float val = 0.0f;
-
- if (rasterizer)
- val = points ?
+ float val = (prefix->draw_mode == MALI_DRAW_MODE_POINTS) ?
rasterizer->base.point_size :
rasterizer->base.line_width;
struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_device *device = pan_device(ctx->base.screen);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
if (!ctx->shader[stage])
return;
if (device->quirks & IS_BIFROST) {
postfix->gl_enables = 0x2;
- panfrost_vt_emit_shared_memory(ctx, postfix);
+ postfix->shared_memory = panfrost_vt_emit_shared_memory(batch);
} else {
postfix->gl_enables = 0x6;
- panfrost_vt_attach_framebuffer(ctx, postfix);
+ postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
}
if (stage == PIPE_SHADER_FRAGMENT) {
panfrost_vt_update_occlusion_query(ctx, postfix);
- panfrost_vt_update_rasterizer(ctx, prefix, postfix);
+ panfrost_vt_update_rasterizer(ctx->rasterizer, prefix, postfix);
}
}
} else {
/* Otherwise, we need to upload to transient memory */
const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
- out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
- info->count *
- info->index_size);
+ struct panfrost_transfer T =
+ panfrost_pool_alloc_aligned(&batch->pool,
+ info->count * info->index_size,
+ info->index_size);
+
+ memcpy(T.cpu, ibuf8 + offset, info->count * info->index_size);
+ out = T.gpu;
}
if (needs_indices) {
}
}
-static void
-panfrost_shader_meta_init(struct panfrost_context *ctx,
- enum pipe_shader_type st,
- struct mali_shader_meta *meta)
-{
- const struct panfrost_device *dev = pan_device(ctx->base.screen);
- struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
-
- memset(meta, 0, sizeof(*meta));
- meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
- meta->attribute_count = ss->attribute_count;
- meta->varying_count = ss->varying_count;
- meta->texture_count = ctx->sampler_view_count[st];
- meta->sampler_count = ctx->sampler_count[st];
-
- if (dev->quirks & IS_BIFROST) {
- if (st == PIPE_SHADER_VERTEX)
- meta->bifrost1.unk1 = 0x800000;
- else {
- /* First clause ATEST |= 0x4000000.
- * Less than 32 regs |= 0x200 */
- meta->bifrost1.unk1 = 0x950020;
- }
-
- meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
- if (st == PIPE_SHADER_VERTEX)
- meta->bifrost2.preload_regs = 0xC0;
- else {
- meta->bifrost2.preload_regs = 0x1;
- SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
- }
-
- meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
- ss->uniform_cutoff);
- } else {
- meta->midgard1.uniform_count = MIN2(ss->uniform_count,
- ss->uniform_cutoff);
- meta->midgard1.work_count = ss->work_reg_count;
-
- /* TODO: This is not conformant on ES3 */
- meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
-
- meta->midgard1.flags_lo = 0x20;
- meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
-
- SET_BIT(meta->midgard1.flags_hi, MALI_WRITES_GLOBAL, ss->writes_global);
- }
-}
-
static unsigned
translate_tex_wrap(enum pipe_tex_wrap w)
{
}
}
-static void
-panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
- struct mali_shader_meta *fragmeta)
-{
- if (!ctx->rasterizer) {
- SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
- SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
- fragmeta->depth_units = 0.0f;
- fragmeta->depth_factor = 0.0f;
- SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
- SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
- SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, true);
- SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, true);
- return;
- }
-
- struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
-
- bool msaa = rast->multisample;
-
- /* TODO: Sample size */
- SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
- SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
-
- struct panfrost_shader_state *fs;
- fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
-
- /* EXT_shader_framebuffer_fetch requires the shader to be run
- * per-sample when outputs are read. */
- bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
- SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
-
- fragmeta->depth_units = rast->offset_units * 2.0f;
- fragmeta->depth_factor = rast->offset_scale;
-
- /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
-
- SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
- SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
-
- SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
- SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
-}
-
-static void
-panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
- struct mali_shader_meta *fragmeta)
-{
- const struct panfrost_zsa_state *so = ctx->depth_stencil;
- int zfunc = PIPE_FUNC_ALWAYS;
-
- if (!so) {
- /* If stenciling is disabled, the state is irrelevant */
- SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
- SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
- } else {
- SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
- so->base.stencil[0].enabled);
-
- fragmeta->stencil_mask_front = so->stencil_mask_front;
- fragmeta->stencil_mask_back = so->stencil_mask_back;
-
- /* Bottom bits for stencil ref, exactly one word */
- fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
-
- /* If back-stencil is not enabled, use the front values */
-
- if (so->base.stencil[1].enabled)
- fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
- else
- fragmeta->stencil_back = fragmeta->stencil_front;
-
- if (so->base.depth.enabled)
- zfunc = so->base.depth.func;
-
- /* Depth state (TODO: Refactor) */
-
- SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
- so->base.depth.writemask);
- }
-
- fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
- fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
-}
-
static bool
panfrost_fs_required(
struct panfrost_shader_state *fs,
}
static void
-panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
- struct mali_shader_meta *fragmeta,
- void *rts)
+panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
+ struct panfrost_blend_final *blend)
{
- struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
- const struct panfrost_device *dev = pan_device(ctx->base.screen);
- struct panfrost_shader_state *fs;
- fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
+ const struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
+ struct panfrost_shader_state *fs = panfrost_get_shader_state(batch->ctx, PIPE_SHADER_FRAGMENT);
+ unsigned rt_count = batch->key.nr_cbufs;
- SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
- (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
- !ctx->blend->base.dither);
+ struct bifrost_blend_rt *brts = rts;
- SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
- ctx->blend->base.alpha_to_coverage);
+ /* Disable blending for depth-only */
- /* Get blending setup */
- unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
-
- struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
- unsigned shader_offset = 0;
- struct panfrost_bo *shader_bo = NULL;
-
- for (unsigned c = 0; c < rt_count; ++c)
- blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
- &shader_offset);
-
- /* Disable shader execution if we can */
- if (dev->quirks & MIDGARD_SHADERLESS
- && !panfrost_fs_required(fs, blend, rt_count)) {
- fragmeta->shader = 0;
- fragmeta->attribute_count = 0;
- fragmeta->varying_count = 0;
- fragmeta->texture_count = 0;
- fragmeta->sampler_count = 0;
-
- /* This feature is not known to work on Bifrost */
- fragmeta->midgard1.work_count = 1;
- fragmeta->midgard1.uniform_count = 0;
- fragmeta->midgard1.uniform_buffer_count = 0;
- }
-
- /* If there is a blend shader, work registers are shared. We impose 8
- * work registers as a limit for blend shaders. Should be lower XXX */
-
- if (!(dev->quirks & IS_BIFROST)) {
- for (unsigned c = 0; c < rt_count; ++c) {
- if (blend[c].is_shader) {
- fragmeta->midgard1.work_count =
- MAX2(fragmeta->midgard1.work_count, 8);
+ if (rt_count == 0) {
+ if (dev->quirks & IS_BIFROST) {
+ memset(brts, 0, sizeof(*brts));
+ brts[0].unk2 = 0x3;
+ } else {
+ pan_pack(rts, MIDGARD_BLEND_OPAQUE, cfg) {
+ cfg.equation = 0xf0122122; /* Replace */
}
}
}
- /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
- * copied to the blend_meta appended (by convention), but this is the
- * field actually read by the hardware. (Or maybe both are read...?).
- * Specify the last RTi with a blend shader. */
-
- fragmeta->blend.shader = 0;
-
- for (signed rt = (rt_count - 1); rt >= 0; --rt) {
- if (!blend[rt].is_shader)
- continue;
-
- fragmeta->blend.shader = blend[rt].shader.gpu |
- blend[rt].shader.first_tag;
- break;
- }
-
- if (dev->quirks & MIDGARD_SFBD) {
- /* When only a single render target platform is used, the blend
- * information is inside the shader meta itself. We additionally
- * need to signal CAN_DISCARD for nontrivial blend modes (so
- * we're able to read back the destination buffer) */
-
- SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
- blend[0].is_shader);
-
- if (!blend[0].is_shader) {
- fragmeta->blend.equation = *blend[0].equation.equation;
- fragmeta->blend.constant = blend[0].equation.constant;
- }
-
- SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
- !blend[0].no_blending || fs->can_discard);
-
- batch->draws |= PIPE_CLEAR_COLOR0;
- return;
- }
-
- if (dev->quirks & IS_BIFROST) {
- bool no_blend = true;
-
- for (unsigned i = 0; i < rt_count; ++i)
- no_blend &= (blend[i].no_blending | blend[i].no_colour);
-
- SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
- !fs->can_discard && !fs->writes_depth && no_blend);
- }
-
- /* Additional blend descriptor tacked on for jobs using MFBD */
-
for (unsigned i = 0; i < rt_count; ++i) {
- unsigned flags = 0;
+ struct mali_blend_flags_packed flags = {};
+
+ pan_pack(&flags, BLEND_FLAGS, cfg) {
+ if (blend[i].no_colour) {
+ cfg.enable = false;
+ break;
+ }
- if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
- flags = 0x200;
batch->draws |= (PIPE_CLEAR_COLOR0 << i);
- bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
- (ctx->pipe_framebuffer.cbufs[i]) &&
- util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
+ cfg.srgb = util_format_is_srgb(batch->key.cbufs[i]->format);
+ cfg.load_destination = blend[i].load_dest;
+ cfg.dither_disable = !batch->ctx->blend->base.dither;
- SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
- SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
- SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
- SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
+ if (!(dev->quirks & IS_BIFROST))
+ cfg.midgard_blend_shader = blend[i].is_shader;
}
if (dev->quirks & IS_BIFROST) {
- struct bifrost_blend_rt *brts = rts;
-
- brts[i].flags = flags;
+ memset(brts + i, 0, sizeof(brts[i]));
+ brts[i].flags = flags.opaque[0];
if (blend[i].is_shader) {
/* The blend shader's address needs to be at
(fs->bo->gpu & (0xffffffffull << 32)));
brts[i].shader = blend[i].shader.gpu;
brts[i].unk2 = 0x0;
- } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
- enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
+ } else {
+ enum pipe_format format = batch->key.cbufs[i]->format;
const struct util_format_description *format_desc;
format_desc = util_format_description(format);
- brts[i].equation = *blend[i].equation.equation;
+ brts[i].equation = blend[i].equation.equation;
/* TODO: this is a bit more complicated */
brts[i].constant = blend[i].equation.constant;
* mode (equivalent to rgb_mode = alpha_mode =
* x122, colour mask = 0xF). 0x1a allows
* blending. */
- brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
+ brts[i].unk2 = blend[i].opaque ? 0x19 : 0x1a;
- brts[i].shader_type = fs->blend_types[i];
- } else {
- /* Dummy attachment for depth-only */
- brts[i].unk2 = 0x3;
brts[i].shader_type = fs->blend_types[i];
}
} else {
- struct midgard_blend_rt *mrts = rts;
- mrts[i].flags = flags;
-
- if (blend[i].is_shader) {
- mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
- } else {
- mrts[i].blend.equation = *blend[i].equation.equation;
- mrts[i].blend.constant = blend[i].equation.constant;
+ pan_pack(rts, MIDGARD_BLEND_OPAQUE, cfg) {
+ cfg.flags = flags;
+
+ if (blend[i].is_shader) {
+ cfg.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
+ } else {
+ cfg.equation = blend[i].equation.equation.opaque[0];
+ cfg.constant = blend[i].equation.constant;
+ }
}
+
+ rts += MALI_MIDGARD_BLEND_LENGTH;
}
}
}
static void
-panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
- struct mali_shader_meta *fragmeta,
- void *rts)
+panfrost_emit_frag_shader(struct panfrost_context *ctx,
+ struct mali_state_packed *fragmeta,
+ struct panfrost_blend_final *blend)
{
const struct panfrost_device *dev = pan_device(ctx->base.screen);
- struct panfrost_shader_state *fs;
+ struct panfrost_shader_state *fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
+ struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
+ const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
+ unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
+ bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
+
+ /* Built up here */
+ struct mali_shader_packed shader = fs->shader;
+ struct mali_preload_packed preload = fs->preload;
+ uint32_t properties;
+ struct mali_multisample_misc_packed multisample_misc;
+ struct mali_stencil_mask_misc_packed stencil_mask_misc;
+ union midgard_blend sfbd_blend = { 0 };
+
+ if (!panfrost_fs_required(fs, blend, rt_count)) {
+ if (dev->quirks & IS_BIFROST) {
+ pan_pack(&shader, SHADER, cfg) {}
- fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
+ pan_pack(&properties, BIFROST_PROPERTIES, cfg) {
+ cfg.unknown = 0x950020; /* XXX */
+ cfg.early_z_enable = true;
+ }
- bool msaa = ctx->rasterizer && ctx->rasterizer->base.multisample;
- fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
+ preload.opaque[0] = 0;
+ } else {
+ pan_pack(&shader, SHADER, cfg) {
+ cfg.shader = 0x1;
+ }
- fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
- fragmeta->unknown2_4 = 0x4e0;
+ pan_pack(&properties, MIDGARD_PROPERTIES, cfg) {
+ cfg.work_register_count = 1;
+ cfg.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
+ cfg.early_z_enable = true;
+ }
+ }
+ } else if (dev->quirks & IS_BIFROST) {
+ bool no_blend = true;
- /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
- * is required (independent of 32-bit/64-bit descriptors), or why it's
- * not used on later GPU revisions. Otherwise, all shader jobs fault on
- * these earlier chips (perhaps this is a chicken bit of some kind).
- * More investigation is needed. */
+ for (unsigned i = 0; i < rt_count; ++i)
+ no_blend &= (!blend[i].load_dest | blend[i].no_colour);
- SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
+ pan_pack(&properties, BIFROST_PROPERTIES, cfg) {
+ cfg.early_z_enable = !fs->can_discard && !fs->writes_depth && no_blend;
+ }
- if (dev->quirks & IS_BIFROST) {
- /* TODO */
+ /* Combine with prepacked properties */
+ properties |= fs->properties.opaque[0];
} else {
- /* Depending on whether it's legal to in the given shader, we try to
- * enable early-z testing. TODO: respect e-z force */
+ /* Reasons to disable early-Z from a shader perspective */
+ bool late_z = fs->can_discard || fs->writes_global ||
+ fs->writes_depth || fs->writes_stencil;
+
+ /* If either depth or stencil is enabled, discard matters */
+ bool zs_enabled =
+ (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
+ zsa->base.stencil[0].enabled;
+
+ bool has_blend_shader = false;
+
+ for (unsigned c = 0; c < rt_count; ++c)
+ has_blend_shader |= blend[c].is_shader;
+
+ pan_pack(&properties, MIDGARD_PROPERTIES, cfg) {
+ /* TODO: Reduce this limit? */
+ if (has_blend_shader)
+ cfg.work_register_count = MAX2(fs->work_reg_count, 8);
+ else
+ cfg.work_register_count = fs->work_reg_count;
+
+ cfg.early_z_enable = !(late_z || alpha_to_coverage);
+ cfg.reads_tilebuffer = fs->outputs_read || (!zs_enabled && fs->can_discard);
+ cfg.reads_depth_stencil = zs_enabled && fs->can_discard;
+ }
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
- !fs->can_discard && !fs->writes_global &&
- !fs->writes_depth && !fs->writes_stencil &&
- !ctx->blend->base.alpha_to_coverage);
+ properties |= fs->properties.opaque[0];
+ }
- /* Add the writes Z/S flags if needed. */
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
- SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
+ pan_pack(&multisample_misc, MULTISAMPLE_MISC, cfg) {
+ bool msaa = rast->multisample;
+ cfg.multisample_enable = msaa;
+ cfg.sample_mask = (msaa ? ctx->sample_mask : ~0) & 0xFFFF;
- /* Any time texturing is used, derivatives are implicitly calculated,
- * so we need to enable helper invocations */
+ /* EXT_shader_framebuffer_fetch requires per-sample */
+ bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
+ cfg.evaluate_per_sample = msaa && per_sample;
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
- fs->helper_invocations);
+ if (dev->quirks & MIDGARD_SFBD) {
+ cfg.sfbd_load_destination = blend[0].load_dest;
+ cfg.sfbd_blend_shader = blend[0].is_shader;
+ }
- /* If discard is enabled, which bit we set to convey this
- * depends on if depth/stencil is used for the draw or not.
- * Just one of depth OR stencil is enough to trigger this. */
+ cfg.depth_function = zsa->base.depth.enabled ?
+ panfrost_translate_compare_func(zsa->base.depth.func) :
+ MALI_FUNC_ALWAYS;
- const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
- bool zs_enabled = fs->writes_depth || fs->writes_stencil;
+ cfg.depth_write_mask = zsa->base.depth.writemask;
+ cfg.near_discard = rast->depth_clip_near;
+ cfg.far_discard = rast->depth_clip_far;
+ cfg.unknown_2 = true;
+ }
- if (zsa) {
- zs_enabled |= (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS);
- zs_enabled |= zsa->stencil[0].enabled;
+ pan_pack(&stencil_mask_misc, STENCIL_MASK_MISC, cfg) {
+ cfg.stencil_mask_front = zsa->stencil_mask_front;
+ cfg.stencil_mask_back = zsa->stencil_mask_back;
+ cfg.stencil_enable = zsa->base.stencil[0].enabled;
+ cfg.alpha_to_coverage = alpha_to_coverage;
+
+ if (dev->quirks & MIDGARD_SFBD) {
+ cfg.sfbd_write_enable = !blend[0].no_colour;
+ cfg.sfbd_srgb = util_format_is_srgb(ctx->pipe_framebuffer.cbufs[0]->format);
+ cfg.sfbd_dither_disable = !ctx->blend->base.dither;
}
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
- fs->outputs_read || (!zs_enabled && fs->can_discard));
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
+ cfg.unknown_1 = 0x7;
+ cfg.depth_range_1 = cfg.depth_range_2 = rast->offset_tri;
+ cfg.single_sampled_lines = !rast->multisample;
}
- panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
- panfrost_frag_meta_zsa_update(ctx, fragmeta);
- panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
-}
+ if (dev->quirks & MIDGARD_SFBD) {
+ if (blend[0].is_shader) {
+ sfbd_blend.shader = blend[0].shader.gpu |
+ blend[0].shader.first_tag;
+ } else {
+ sfbd_blend.equation = blend[0].equation.equation;
+ sfbd_blend.constant = blend[0].equation.constant;
+ }
+ } else if (!(dev->quirks & IS_BIFROST)) {
+ /* Bug where MRT-capable hw apparently reads the last blend
+ * shader from here instead of the usual location? */
-void
-panfrost_emit_shader_meta(struct panfrost_batch *batch,
- enum pipe_shader_type st,
- struct mali_vertex_tiler_postfix *postfix)
-{
- struct panfrost_context *ctx = batch->ctx;
- struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
+ for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
+ if (!blend[rt].is_shader)
+ continue;
- if (!ss) {
- postfix->shader = 0;
- return;
+ sfbd_blend.shader = blend[rt].shader.gpu |
+ blend[rt].shader.first_tag;
+ break;
+ }
}
- struct mali_shader_meta meta;
+ pan_pack(fragmeta, STATE_OPAQUE, cfg) {
+ cfg.shader = fs->shader;
+ cfg.properties = properties;
+ cfg.depth_units = rast->offset_units * 2.0f;
+ cfg.depth_factor = rast->offset_scale;
+ cfg.multisample_misc = multisample_misc;
+ cfg.stencil_mask_misc = stencil_mask_misc;
- panfrost_shader_meta_init(ctx, st, &meta);
+ cfg.stencil_front = zsa->stencil_front;
+ cfg.stencil_back = zsa->stencil_back;
+
+ /* Bottom bits for stencil ref, exactly one word */
+ bool back_enab = zsa->base.stencil[1].enabled;
+ cfg.stencil_front.opaque[0] |= ctx->stencil_ref.ref_value[0];
+ cfg.stencil_back.opaque[0] |= ctx->stencil_ref.ref_value[back_enab ? 1 : 0];
+
+ if (dev->quirks & IS_BIFROST)
+ cfg.preload = preload;
+ else
+ memcpy(&cfg.sfbd_blend, &sfbd_blend, sizeof(sfbd_blend));
+ }
+}
+
+mali_ptr
+panfrost_emit_compute_shader_meta(struct panfrost_batch *batch, enum pipe_shader_type stage)
+{
+ struct panfrost_shader_state *ss = panfrost_get_shader_state(batch->ctx, stage);
- /* Add the shader BO to the batch. */
panfrost_batch_add_bo(batch, ss->bo,
PAN_BO_ACCESS_PRIVATE |
PAN_BO_ACCESS_READ |
- panfrost_bo_access_for_stage(st));
+ PAN_BO_ACCESS_VERTEX_TILER);
- mali_ptr shader_ptr;
+ panfrost_batch_add_bo(batch, pan_resource(ss->upload.rsrc)->bo,
+ PAN_BO_ACCESS_PRIVATE |
+ PAN_BO_ACCESS_READ |
+ PAN_BO_ACCESS_VERTEX_TILER);
- if (st == PIPE_SHADER_FRAGMENT) {
- struct panfrost_device *dev = pan_device(ctx->base.screen);
- unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
- size_t desc_size = sizeof(meta);
- void *rts = NULL;
- struct panfrost_transfer xfer;
- unsigned rt_size;
+ return pan_resource(ss->upload.rsrc)->bo->gpu + ss->upload.offset;
+}
- if (dev->quirks & MIDGARD_SFBD)
- rt_size = 0;
- else if (dev->quirks & IS_BIFROST)
- rt_size = sizeof(struct bifrost_blend_rt);
- else
- rt_size = sizeof(struct midgard_blend_rt);
+mali_ptr
+panfrost_emit_frag_shader_meta(struct panfrost_batch *batch)
+{
+ struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
- desc_size += rt_size * rt_count;
+ /* Add the shader BO to the batch. */
+ panfrost_batch_add_bo(batch, ss->bo,
+ PAN_BO_ACCESS_PRIVATE |
+ PAN_BO_ACCESS_READ |
+ PAN_BO_ACCESS_FRAGMENT);
- if (rt_size)
- rts = rzalloc_size(ctx, rt_size * rt_count);
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
+ unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
+ struct panfrost_transfer xfer;
+ unsigned rt_size;
- panfrost_frag_shader_meta_init(ctx, &meta, rts);
+ if (dev->quirks & MIDGARD_SFBD)
+ rt_size = 0;
+ else if (dev->quirks & IS_BIFROST)
+ rt_size = sizeof(struct bifrost_blend_rt);
+ else
+ rt_size = sizeof(struct midgard_blend_rt);
- xfer = panfrost_pool_alloc(&batch->pool, desc_size);
+ unsigned desc_size = MALI_STATE_LENGTH + rt_size * rt_count;
+ xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, MALI_STATE_LENGTH);
- memcpy(xfer.cpu, &meta, sizeof(meta));
- memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
+ struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
- if (rt_size)
- ralloc_free(rts);
+ for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
+ blend[c] = panfrost_get_blend_for_context(ctx, c);
- shader_ptr = xfer.gpu;
- } else {
- shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
- sizeof(meta));
- }
+ panfrost_emit_frag_shader(ctx, (struct mali_state_packed *) xfer.cpu, blend);
- postfix->shader = shader_ptr;
+ if (!(dev->quirks & MIDGARD_SFBD))
+ panfrost_emit_blend(batch, xfer.cpu + MALI_STATE_LENGTH, blend);
+ else
+ batch->draws |= PIPE_CLEAR_COLOR0;
+
+ return xfer.gpu;
}
-void
-panfrost_emit_viewport(struct panfrost_batch *batch,
- struct mali_vertex_tiler_postfix *tiler_postfix)
+mali_ptr
+panfrost_emit_viewport(struct panfrost_batch *batch)
{
struct panfrost_context *ctx = batch->ctx;
const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
unsigned miny = MIN2(fb->height, vp_miny);
unsigned maxy = MIN2(fb->height, vp_maxy);
- if (ss && rast && rast->scissor) {
+ if (ss && rast->scissor) {
minx = MAX2(ss->minx, minx);
miny = MAX2(ss->miny, miny);
maxx = MIN2(ss->maxx, maxx);
cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
}
- tiler_postfix->viewport = T.gpu;
panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
+ return T.gpu;
}
static mali_ptr
* PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
return rsrc->bo->gpu + cb->buffer_offset;
} else if (cb->user_buffer) {
- return panfrost_pool_upload(&batch->pool,
+ return panfrost_pool_upload_aligned(&batch->pool,
cb->user_buffer +
cb->buffer_offset,
- cb->buffer_size);
+ cb->buffer_size, 16);
} else {
unreachable("No constant buffer");
}
size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
size_t size = sys_size + uniform_size;
- struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
- size);
+ struct panfrost_transfer transfer =
+ panfrost_pool_alloc_aligned(&batch->pool, size, 16);
/* Upload sysvals requested by the shader */
panfrost_upload_sysvals(batch, transfer.cpu, ss, stage);
}
/* Next up, attach UBOs. UBO #0 is the uniforms we just
- * uploaded */
+ * uploaded, so it's always included. The count is the highest UBO
+ * addressable -- gaps are included. */
- unsigned ubo_count = panfrost_ubo_count(ctx, stage);
- assert(ubo_count >= 1);
+ unsigned ubo_count = 32 - __builtin_clz(buf->enabled_mask | 1);
size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
- struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
+ struct panfrost_transfer ubos =
+ panfrost_pool_alloc_aligned(&batch->pool, sz,
+ MALI_UNIFORM_BUFFER_LENGTH);
+
uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
/* Upload uniforms as a UBO */
- if (ss->uniform_count) {
+ if (size) {
pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
- cfg.entries = ss->uniform_count;
+ cfg.entries = DIV_ROUND_UP(size, 16);
cfg.pointer = transfer.gpu;
}
} else {
buf->dirty_mask = 0;
}
-void
+mali_ptr
panfrost_emit_shared_memory(struct panfrost_batch *batch,
- const struct pipe_grid_info *info,
- struct midgard_payload_vertex_tiler *vtp)
+ const struct pipe_grid_info *info)
{
struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
struct panfrost_shader_state *ss = &all->variants[all->active_variant];
unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
128));
- unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
- info->grid[2] * 4;
+
+ unsigned log2_instances =
+ util_logbase2_ceil(info->grid[0]) +
+ util_logbase2_ceil(info->grid[1]) +
+ util_logbase2_ceil(info->grid[2]);
+
+ unsigned shared_size = single_size * (1 << log2_instances) * dev->core_count;
struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
shared_size,
1);
struct mali_shared_memory shared = {
.shared_memory = bo->gpu,
- .shared_workgroup_count =
- util_logbase2_ceil(info->grid[0]) +
- util_logbase2_ceil(info->grid[1]) +
- util_logbase2_ceil(info->grid[2]),
- .shared_unk1 = 0x2,
- .shared_shift = util_logbase2(single_size) - 1
+ .shared_workgroup_count = log2_instances,
+ .shared_shift = util_logbase2(single_size) + 1
};
- vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
- sizeof(shared));
+ return panfrost_pool_upload_aligned(&batch->pool, &shared,
+ sizeof(shared), 64);
}
static mali_ptr
}
}
-void
+mali_ptr
panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
- enum pipe_shader_type stage,
- struct mali_vertex_tiler_postfix *postfix)
+ enum pipe_shader_type stage)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_device *device = pan_device(ctx->base.screen);
if (!ctx->sampler_view_count[stage])
- return;
+ return 0;
if (device->quirks & IS_BIFROST) {
- struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
+ struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
MALI_BIFROST_TEXTURE_LENGTH *
- ctx->sampler_view_count[stage]);
+ ctx->sampler_view_count[stage],
+ MALI_BIFROST_TEXTURE_LENGTH);
struct mali_bifrost_texture_packed *out =
(struct mali_bifrost_texture_packed *) T.cpu;
panfrost_bo_access_for_stage(stage));
}
- postfix->textures = T.gpu;
+ return T.gpu;
} else {
uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
}
- postfix->textures = panfrost_pool_upload(&batch->pool,
- trampolines,
- sizeof(uint64_t) *
- ctx->sampler_view_count[stage]);
+ return panfrost_pool_upload_aligned(&batch->pool, trampolines,
+ sizeof(uint64_t) *
+ ctx->sampler_view_count[stage],
+ sizeof(uint64_t));
}
}
-void
+mali_ptr
panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
- enum pipe_shader_type stage,
- struct mali_vertex_tiler_postfix *postfix)
+ enum pipe_shader_type stage)
{
struct panfrost_context *ctx = batch->ctx;
if (!ctx->sampler_count[stage])
- return;
+ return 0;
size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
size_t sz = desc_size * ctx->sampler_count[stage];
- struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, sz);
+ struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool, sz, desc_size);
struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
out[i] = ctx->samplers[stage][i]->hw;
- postfix->sampler_descriptor = T.gpu;
+ return T.gpu;
}
void
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_vertex_state *so = ctx->vertex;
+ struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
+
+ unsigned instance_shift = vertex_postfix->instance_shift;
+ unsigned instance_odd = vertex_postfix->instance_odd;
+
+ /* Worst case: everything is NPOT, which is only possible if instancing
+ * is enabled. Otherwise single record is gauranteed */
+ bool could_npot = instance_shift || instance_odd;
+
+ struct panfrost_transfer S = panfrost_pool_alloc_aligned(&batch->pool,
+ MALI_ATTRIBUTE_BUFFER_LENGTH * vs->attribute_count *
+ (could_npot ? 2 : 1),
+ MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
+
+ struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
+ MALI_ATTRIBUTE_LENGTH * vs->attribute_count,
+ MALI_ATTRIBUTE_LENGTH);
+
+ struct mali_attribute_buffer_packed *bufs =
+ (struct mali_attribute_buffer_packed *) S.cpu;
+
+ struct mali_attribute_packed *out =
+ (struct mali_attribute_packed *) T.cpu;
- /* Staged mali_attr, and index into them. i =/= k, depending on the
- * vertex buffer mask and instancing. Twice as much room is allocated,
- * for a worst case of NPOT_DIVIDEs which take up extra slot */
- union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
unsigned k = 0;
for (unsigned i = 0; i < so->num_elements; ++i) {
- /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
+ /* We map buffers 1:1 with the attributes, which
* means duplicating some vertex buffers (who cares? aside from
* maybe some caching implications but I somehow doubt that
* matters) */
if (!rsrc)
continue;
- /* Align to 64 bytes by masking off the lower bits. This
- * will be adjusted back when we fixup the src_offset in
- * mali_attr_meta */
-
- mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
- mali_ptr addr = raw_addr & ~63;
- unsigned chopped_addr = raw_addr - addr;
-
/* Add a dependency of the batch on the vertex buffer */
panfrost_batch_add_bo(batch, rsrc->bo,
PAN_BO_ACCESS_SHARED |
PAN_BO_ACCESS_READ |
PAN_BO_ACCESS_VERTEX_TILER);
- /* Set common fields */
- attrs[k].elements = addr;
- attrs[k].stride = buf->stride;
+ /* Mask off lower bits, see offset fixup below */
+ mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
+ mali_ptr addr = raw_addr & ~63;
/* Since we advanced the base pointer, we shrink the buffer
- * size */
- attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
+ * size, but add the offset we subtracted */
+ unsigned size = rsrc->base.width0 + (raw_addr - addr)
+ - buf->buffer_offset;
- /* We need to add the extra size we masked off (for
- * correctness) so the data doesn't get clamped away */
- attrs[k].size += chopped_addr;
+ /* When there is a divisor, the hardware-level divisor is
+ * the product of the instance divisor and the padded count */
+ unsigned divisor = elem->instance_divisor;
+ unsigned hw_divisor = ctx->padded_count * divisor;
+ unsigned stride = buf->stride;
- /* For non-instancing make sure we initialize */
- attrs[k].shift = attrs[k].extra_flags = 0;
+ /* If there's a divisor(=1) but no instancing, we want every
+ * attribute to be the same */
- /* Instancing uses a dramatically different code path than
- * linear, so dispatch for the actual emission now that the
- * common code is finished */
+ if (divisor && ctx->instance_count == 1)
+ stride = 0;
- unsigned divisor = elem->instance_divisor;
+ if (!divisor || ctx->instance_count <= 1) {
+ pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
+ if (ctx->instance_count > 1)
+ cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
+
+ cfg.pointer = addr;
+ cfg.stride = stride;
+ cfg.size = size;
+ cfg.divisor_r = instance_shift;
+ cfg.divisor_p = instance_odd;
+ }
+ } else if (util_is_power_of_two_or_zero(hw_divisor)) {
+ pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
+ cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
+ cfg.pointer = addr;
+ cfg.stride = stride;
+ cfg.size = size;
+ cfg.divisor_r = __builtin_ctz(hw_divisor);
+ }
- if (divisor && ctx->instance_count == 1) {
- /* Silly corner case where there's a divisor(=1) but
- * there's no legitimate instancing. So we want *every*
- * attribute to be the same. So set stride to zero so
- * we don't go anywhere. */
-
- attrs[k].size = attrs[k].stride + chopped_addr;
- attrs[k].stride = 0;
- attrs[k++].elements |= MALI_ATTR_LINEAR;
- } else if (ctx->instance_count <= 1) {
- /* Normal, non-instanced attributes */
- attrs[k++].elements |= MALI_ATTR_LINEAR;
} else {
- unsigned instance_shift = vertex_postfix->instance_shift;
- unsigned instance_odd = vertex_postfix->instance_odd;
+ unsigned shift = 0, extra_flags = 0;
- k += panfrost_vertex_instanced(ctx->padded_count,
- instance_shift,
- instance_odd,
- divisor, &attrs[k]);
- }
- }
+ unsigned magic_divisor =
+ panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
- /* Add special gl_VertexID/gl_InstanceID buffers */
+ pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
+ cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
+ cfg.pointer = addr;
+ cfg.stride = stride;
+ cfg.size = size;
- struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
- MALI_ATTRIBUTE_LENGTH * (PAN_INSTANCE_ID + 1));
+ cfg.divisor_r = shift;
+ cfg.divisor_e = extra_flags;
+ }
- struct mali_attribute_packed *out =
- (struct mali_attribute_packed *) T.cpu;
+ pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
+ cfg.divisor_numerator = magic_divisor;
+ cfg.divisor = divisor;
+ }
- panfrost_vertex_id(ctx->padded_count, &attrs[k]);
+ ++k;
+ }
- pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
- cfg.buffer_index = k++;
- cfg.format = so->formats[PAN_VERTEX_ID];
+ ++k;
}
- panfrost_instance_id(ctx->padded_count, &attrs[k]);
+ /* Add special gl_VertexID/gl_InstanceID buffers */
+
+ if (unlikely(vs->attribute_count >= PAN_VERTEX_ID)) {
+ panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
+
+ pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
+ cfg.buffer_index = k++;
+ cfg.format = so->formats[PAN_VERTEX_ID];
+ }
+
+ panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
- pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
- cfg.buffer_index = k++;
- cfg.format = so->formats[PAN_INSTANCE_ID];
+ pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
+ cfg.buffer_index = k++;
+ cfg.format = so->formats[PAN_INSTANCE_ID];
+ }
}
/* Attribute addresses require 64-byte alignment, so let:
}
}
-
- vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
- k * sizeof(*attrs));
-
+ vertex_postfix->attributes = S.gpu;
vertex_postfix->attribute_meta = T.gpu;
}
static mali_ptr
-panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
- unsigned stride, unsigned count)
+panfrost_emit_varyings(struct panfrost_batch *batch,
+ struct mali_attribute_buffer_packed *slot,
+ unsigned stride, unsigned count)
{
- /* Fill out the descriptor */
- slot->stride = stride;
- slot->size = stride * count;
- slot->shift = slot->extra_flags = 0;
+ unsigned size = stride * count;
+ mali_ptr ptr = panfrost_pool_alloc_aligned(&batch->invisible_pool, size, 64).gpu;
- struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
- slot->size);
-
- slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
+ pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
+ cfg.stride = stride;
+ cfg.size = size;
+ cfg.pointer = ptr;
+ }
- return transfer.gpu;
+ return ptr;
}
static unsigned
}
static void
-panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
- unsigned stride, unsigned offset, unsigned count,
+panfrost_emit_streamout(struct panfrost_batch *batch,
+ struct mali_attribute_buffer_packed *slot,
+ unsigned stride_words, unsigned offset, unsigned count,
struct pipe_stream_output_target *target)
{
- /* Fill out the descriptor */
- slot->stride = stride * 4;
- slot->shift = slot->extra_flags = 0;
-
+ unsigned stride = stride_words * 4;
unsigned max_size = target->buffer_size;
- unsigned expected_size = slot->stride * count;
+ unsigned expected_size = stride * count;
/* Grab the BO and bind it to the batch */
struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
PAN_BO_ACCESS_FRAGMENT);
/* We will have an offset applied to get alignment */
- mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
- slot->elements = (addr & ~63) | MALI_ATTR_LINEAR;
- slot->size = MIN2(max_size, expected_size) + (addr & 63);
+ mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
+
+ pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
+ cfg.pointer = (addr & ~63);
+ cfg.stride = stride;
+ cfg.size = MIN2(max_size, expected_size) + (addr & 63);
+ }
}
static bool
/* Emitters for varying records */
-static struct mali_attr_meta
-pan_emit_vary(unsigned present, enum pan_special_varying buf,
+static void
+pan_emit_vary(struct mali_attribute_packed *out,
+ unsigned present, enum pan_special_varying buf,
unsigned quirks, enum mali_format format,
unsigned offset)
{
panfrost_get_default_swizzle(nr_channels) :
panfrost_bifrost_swizzle(nr_channels);
- struct mali_attr_meta meta = {
- .index = pan_varying_index(present, buf),
- .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
- .format = (format << 12) | swizzle,
- .src_offset = offset
- };
-
- return meta;
+ pan_pack(out, ATTRIBUTE, cfg) {
+ cfg.buffer_index = pan_varying_index(present, buf);
+ cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
+ cfg.format = (format << 12) | swizzle;
+ cfg.offset = offset;
+ }
}
/* General varying that is unused */
-static struct mali_attr_meta
-pan_emit_vary_only(unsigned present, unsigned quirks)
+static void
+pan_emit_vary_only(struct mali_attribute_packed *out,
+ unsigned present, unsigned quirks)
{
- return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
+ pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
}
/* Special records */
[PAN_VARY_FRAGCOORD] = MALI_RGBA32F
};
-static struct mali_attr_meta
-pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
+static void
+pan_emit_vary_special(struct mali_attribute_packed *out,
+ unsigned present, enum pan_special_varying buf,
unsigned quirks)
{
assert(buf < PAN_VARY_MAX);
- return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
+ pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
}
static enum mali_format
* a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
* value. */
-static struct mali_attr_meta
-pan_emit_vary_xfb(unsigned present,
+static void
+pan_emit_vary_xfb(struct mali_attribute_packed *out,
+ unsigned present,
unsigned max_xfb,
unsigned *streamout_offsets,
unsigned quirks,
panfrost_get_default_swizzle(o.num_components) :
panfrost_bifrost_swizzle(o.num_components);
- /* Otherwise construct a record for it */
- struct mali_attr_meta meta = {
+ pan_pack(out, ATTRIBUTE, cfg) {
/* XFB buffers come after everything else */
- .index = pan_xfb_base(present) + o.output_buffer,
-
- /* As usual unknown bit */
- .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
+ cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
+ cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
/* Override number of channels and precision to highp */
- .format = (pan_xfb_format(format, o.num_components) << 12) | swizzle,
+ cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
/* Apply given offsets together */
- .src_offset = (o.dst_offset * 4) /* dwords */
- + streamout_offsets[o.output_buffer]
- };
-
- return meta;
+ cfg.offset = (o.dst_offset * 4) /* dwords */
+ + streamout_offsets[o.output_buffer];
+ }
}
/* Determine if we should capture a varying for XFB. This requires actually
return o->output_buffer < max_xfb;
}
-/* Higher-level wrapper around all of the above, classifying a varying into one
- * of the above types */
-
-static struct mali_attr_meta
-panfrost_emit_varying(
- struct panfrost_shader_state *stage,
+static void
+pan_emit_general_varying(struct mali_attribute_packed *out,
struct panfrost_shader_state *other,
struct panfrost_shader_state *xfb,
+ gl_varying_slot loc,
+ enum mali_format format,
unsigned present,
- unsigned max_xfb,
- unsigned *streamout_offsets,
unsigned quirks,
unsigned *gen_offsets,
enum mali_format *gen_formats,
unsigned *gen_stride,
unsigned idx,
- bool should_alloc,
- bool is_fragment)
+ bool should_alloc)
{
- gl_varying_slot loc = stage->varyings_loc[idx];
- enum mali_format format = stage->varyings[idx];
-
- /* Override format to match linkage */
- if (!should_alloc && gen_formats[idx])
- format = gen_formats[idx];
-
- if (has_point_coord(stage->point_sprite_mask, loc)) {
- return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
- } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
- struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
- return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
- } else if (loc == VARYING_SLOT_POS) {
- if (is_fragment)
- return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
- else
- return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
- } else if (loc == VARYING_SLOT_PSIZ) {
- return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
- } else if (loc == VARYING_SLOT_PNTC) {
- return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
- } else if (loc == VARYING_SLOT_FACE) {
- return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
- }
-
- /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
+ /* Check if we're linked */
signed other_idx = -1;
for (unsigned j = 0; j < other->varying_count; ++j) {
}
}
- if (other_idx < 0)
- return pan_emit_vary_only(present, quirks);
+ if (other_idx < 0) {
+ pan_emit_vary_only(out, present, quirks);
+ return;
+ }
unsigned offset = gen_offsets[other_idx];
*gen_stride += size;
}
- return pan_emit_vary(present, PAN_VARY_GENERAL,
- quirks, format, offset);
+ pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
+}
+
+/* Higher-level wrapper around all of the above, classifying a varying into one
+ * of the above types */
+
+static void
+panfrost_emit_varying(
+ struct mali_attribute_packed *out,
+ struct panfrost_shader_state *stage,
+ struct panfrost_shader_state *other,
+ struct panfrost_shader_state *xfb,
+ unsigned present,
+ unsigned max_xfb,
+ unsigned *streamout_offsets,
+ unsigned quirks,
+ unsigned *gen_offsets,
+ enum mali_format *gen_formats,
+ unsigned *gen_stride,
+ unsigned idx,
+ bool should_alloc,
+ bool is_fragment)
+{
+ gl_varying_slot loc = stage->varyings_loc[idx];
+ enum mali_format format = stage->varyings[idx];
+
+ /* Override format to match linkage */
+ if (!should_alloc && gen_formats[idx])
+ format = gen_formats[idx];
+
+ if (has_point_coord(stage->point_sprite_mask, loc)) {
+ pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
+ } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
+ struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
+ pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
+ } else if (loc == VARYING_SLOT_POS) {
+ if (is_fragment)
+ pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
+ else
+ pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
+ } else if (loc == VARYING_SLOT_PSIZ) {
+ pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
+ } else if (loc == VARYING_SLOT_PNTC) {
+ pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
+ } else if (loc == VARYING_SLOT_FACE) {
+ pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
+ } else {
+ pan_emit_general_varying(out, other, xfb, loc, format, present,
+ quirks, gen_offsets, gen_formats, gen_stride,
+ idx, should_alloc);
+ }
}
static void
-pan_emit_special_input(union mali_attr *varyings,
+pan_emit_special_input(struct mali_attribute_buffer_packed *out,
unsigned present,
enum pan_special_varying v,
- mali_ptr addr)
+ unsigned special)
{
if (present & (1 << v)) {
- /* Ensure we write exactly once for performance and with fields
- * zeroed appropriately to avoid flakes */
-
- union mali_attr s = {
- .elements = addr
- };
+ unsigned idx = pan_varying_index(present, v);
- varyings[pan_varying_index(present, v)] = s;
+ pan_pack(out + idx, ATTRIBUTE_BUFFER, cfg) {
+ cfg.special = special;
+ cfg.type = 0;
+ }
}
}
vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
- vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
- fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
+ vs_size = MALI_ATTRIBUTE_LENGTH * vs->varying_count;
+ fs_size = MALI_ATTRIBUTE_LENGTH * fs->varying_count;
- struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
- vs_size +
- fs_size);
+ struct panfrost_transfer trans = panfrost_pool_alloc_aligned(
+ &batch->pool, vs_size + fs_size, MALI_ATTRIBUTE_LENGTH);
struct pipe_stream_output_info *so = &vs->stream_output;
unsigned present = pan_varying_present(vs, fs, dev->quirks);
ctx->streamout.targets[i]);
}
- struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
- struct mali_attr_meta *ofs = ovs + vs->varying_count;
+ struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
+ struct mali_attribute_packed *ofs = ovs + vs->varying_count;
for (unsigned i = 0; i < vs->varying_count; i++) {
- ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
+ panfrost_emit_varying(ovs + i, vs, fs, vs, present,
ctx->streamout.num_targets, streamout_offsets,
dev->quirks,
gen_offsets, gen_formats, &gen_stride, i, true, false);
}
for (unsigned i = 0; i < fs->varying_count; i++) {
- ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
+ panfrost_emit_varying(ofs + i, fs, vs, vs, present,
ctx->streamout.num_targets, streamout_offsets,
dev->quirks,
gen_offsets, gen_formats, &gen_stride, i, false, true);
}
unsigned xfb_base = pan_xfb_base(present);
- struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
- sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
- union mali_attr *varyings = (union mali_attr *) T.cpu;
+ struct panfrost_transfer T = panfrost_pool_alloc_aligned(&batch->pool,
+ MALI_ATTRIBUTE_BUFFER_LENGTH * (xfb_base + ctx->streamout.num_targets),
+ MALI_ATTRIBUTE_BUFFER_LENGTH * 2);
+ struct mali_attribute_buffer_packed *varyings =
+ (struct mali_attribute_buffer_packed *) T.cpu;
/* Emit the stream out buffers */
2, vertex_count);
}
- pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_VARYING_POINT_COORD);
- pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_VARYING_FRONT_FACING);
- pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_VARYING_FRAG_COORD);
+ pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_ATTRIBUTE_SPECIAL_POINT_COORD);
+ pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_ATTRIBUTE_SPECIAL_FRONT_FACING);
+ pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_ATTRIBUTE_SPECIAL_FRAG_COORD);
vertex_postfix->varyings = T.gpu;
tiler_postfix->varyings = T.gpu;
/* If rasterizer discard is enable, only submit the vertex */
- bool rasterizer_discard = ctx->rasterizer &&
- ctx->rasterizer->base.rasterizer_discard;
-
unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
vp, vp_size, false);
- if (rasterizer_discard)
+ if (ctx->rasterizer->base.rasterizer_discard)
return;
panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
0, 0,
};
- return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
+ return panfrost_pool_upload_aligned(&batch->pool, locations, 96 * sizeof(uint16_t), 64);
}