* SOFTWARE.
*/
+#include "util/macros.h"
+#include "util/u_vbuf.h"
+
+#include "panfrost-quirks.h"
+
#include "pan_allocate.h"
#include "pan_bo.h"
#include "pan_cmdstream.h"
#include "pan_context.h"
#include "pan_job.h"
+/* TODO: Bifrost requires just a mali_shared_memory, without the rest of the
+ * framebuffer */
+
+void
+panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
+ struct midgard_payload_vertex_tiler *vt)
+{
+ struct panfrost_screen *screen = pan_screen(ctx->base.screen);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+
+ /* If we haven't, reserve space for the framebuffer */
+
+ if (!batch->framebuffer.gpu) {
+ unsigned size = (screen->quirks & MIDGARD_SFBD) ?
+ sizeof(struct mali_single_framebuffer) :
+ sizeof(struct mali_framebuffer);
+
+ batch->framebuffer = panfrost_allocate_transient(batch, size);
+
+ /* Tag the pointer */
+ if (!(screen->quirks & MIDGARD_SFBD))
+ batch->framebuffer.gpu |= MALI_MFBD;
+ }
+
+ vt->postfix.shared_memory = batch->framebuffer.gpu;
+}
+
+void
+panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
+ struct midgard_payload_vertex_tiler *tp)
+{
+ struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
+
+ tp->gl_enables |= 0x7;
+ SET_BIT(tp->gl_enables, MALI_FRONT_CCW_TOP,
+ rasterizer && rasterizer->base.front_ccw);
+ SET_BIT(tp->gl_enables, MALI_CULL_FACE_FRONT,
+ rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
+ SET_BIT(tp->gl_enables, MALI_CULL_FACE_BACK,
+ rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
+ SET_BIT(tp->prefix.unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
+ rasterizer && rasterizer->base.flatshade_first);
+
+ if (!panfrost_writes_point_size(ctx)) {
+ bool points = tp->prefix.draw_mode == MALI_POINTS;
+ float val = 0.0f;
+
+ if (rasterizer)
+ val = points ?
+ rasterizer->base.point_size :
+ rasterizer->base.line_width;
+
+ tp->primitive_size.constant = val;
+ }
+}
+
+void
+panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
+ struct midgard_payload_vertex_tiler *tp)
+{
+ SET_BIT(tp->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
+ if (ctx->occlusion_query)
+ tp->postfix.occlusion_counter = ctx->occlusion_query->bo->gpu;
+ else
+ tp->postfix.occlusion_counter = 0;
+}
+
+static unsigned
+panfrost_translate_index_size(unsigned size)
+{
+ switch (size) {
+ case 1:
+ return MALI_DRAW_INDEXED_UINT8;
+
+ case 2:
+ return MALI_DRAW_INDEXED_UINT16;
+
+ case 4:
+ return MALI_DRAW_INDEXED_UINT32;
+
+ default:
+ unreachable("Invalid index size");
+ }
+}
+
+/* Gets a GPU address for the associated index buffer. Only gauranteed to be
+ * good for the duration of the draw (transient), could last longer. Also get
+ * the bounds on the index buffer for the range accessed by the draw. We do
+ * these operations together because there are natural optimizations which
+ * require them to be together. */
+
+static mali_ptr
+panfrost_get_index_buffer_bounded(struct panfrost_context *ctx,
+ const struct pipe_draw_info *info,
+ unsigned *min_index, unsigned *max_index)
+{
+ struct panfrost_resource *rsrc = pan_resource(info->index.resource);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+ off_t offset = info->start * info->index_size;
+ bool needs_indices = true;
+ mali_ptr out = 0;
+
+ if (info->max_index != ~0u) {
+ *min_index = info->min_index;
+ *max_index = info->max_index;
+ needs_indices = false;
+ }
+
+ if (!info->has_user_indices) {
+ /* Only resources can be directly mapped */
+ panfrost_batch_add_bo(batch, rsrc->bo,
+ PAN_BO_ACCESS_SHARED |
+ PAN_BO_ACCESS_READ |
+ PAN_BO_ACCESS_VERTEX_TILER);
+ out = rsrc->bo->gpu + offset;
+
+ /* Check the cache */
+ needs_indices = !panfrost_minmax_cache_get(rsrc->index_cache,
+ info->start,
+ info->count,
+ min_index,
+ max_index);
+ } else {
+ /* Otherwise, we need to upload to transient memory */
+ const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
+ out = panfrost_upload_transient(batch, ibuf8 + offset,
+ info->count *
+ info->index_size);
+ }
+
+ if (needs_indices) {
+ /* Fallback */
+ u_vbuf_get_minmax_index(&ctx->base, info, min_index, max_index);
+
+ if (!info->has_user_indices)
+ panfrost_minmax_cache_add(rsrc->index_cache,
+ info->start, info->count,
+ *min_index, *max_index);
+ }
+
+ return out;
+}
+
+void
+panfrost_vt_set_draw_info(struct panfrost_context *ctx,
+ const struct pipe_draw_info *info,
+ enum mali_draw_mode draw_mode,
+ struct midgard_payload_vertex_tiler *vp,
+ struct midgard_payload_vertex_tiler *tp,
+ unsigned *vertex_count,
+ unsigned *padded_count)
+{
+ tp->prefix.draw_mode = draw_mode;
+
+ unsigned draw_flags = 0;
+
+ if (panfrost_writes_point_size(ctx))
+ draw_flags |= MALI_DRAW_VARYING_SIZE;
+
+ if (info->primitive_restart)
+ draw_flags |= MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX;
+
+ /* These doesn't make much sense */
+
+ draw_flags |= 0x3000;
+
+ if (info->index_size) {
+ unsigned min_index = 0, max_index = 0;
+
+ tp->prefix.indices = panfrost_get_index_buffer_bounded(ctx,
+ info,
+ &min_index,
+ &max_index);
+
+ /* Use the corresponding values */
+ *vertex_count = max_index - min_index + 1;
+ tp->offset_start = vp->offset_start = min_index + info->index_bias;
+ tp->prefix.offset_bias_correction = -min_index;
+ tp->prefix.index_count = MALI_POSITIVE(info->count);
+ draw_flags |= panfrost_translate_index_size(info->index_size);
+ } else {
+ tp->prefix.indices = 0;
+ *vertex_count = ctx->vertex_count;
+ tp->offset_start = vp->offset_start = info->start;
+ tp->prefix.offset_bias_correction = 0;
+ tp->prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
+ }
+
+ tp->prefix.unknown_draw = draw_flags;
+
+ /* Encode the padded vertex count */
+
+ if (info->instance_count > 1) {
+ *padded_count = panfrost_padded_vertex_count(*vertex_count);
+
+ unsigned shift = __builtin_ctz(ctx->padded_count);
+ unsigned k = ctx->padded_count >> (shift + 1);
+
+ tp->instance_shift = vp->instance_shift = shift;
+ tp->instance_odd = vp->instance_odd = k;
+ } else {
+ *padded_count = *vertex_count;
+
+ /* Reset instancing state */
+ tp->instance_shift = vp->instance_shift = 0;
+ tp->instance_odd = vp->instance_odd = 0;
+ }
+}
+
+static void
+panfrost_shader_meta_init(struct panfrost_context *ctx,
+ enum pipe_shader_type st,
+ struct mali_shader_meta *meta)
+{
+ struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
+
+ memset(meta, 0, sizeof(*meta));
+ meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
+ meta->midgard1.uniform_count = MIN2(ss->uniform_count,
+ ss->uniform_cutoff);
+ meta->midgard1.work_count = ss->work_reg_count;
+ meta->attribute_count = ss->attribute_count;
+ meta->varying_count = ss->varying_count;
+ meta->midgard1.flags_hi = 0x8; /* XXX */
+ meta->midgard1.flags_lo = 0x220;
+ meta->texture_count = ctx->sampler_view_count[st];
+ meta->sampler_count = ctx->sampler_count[st];
+ meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
+}
+
+static unsigned
+panfrost_translate_compare_func(enum pipe_compare_func in)
+{
+ switch (in) {
+ case PIPE_FUNC_NEVER:
+ return MALI_FUNC_NEVER;
+
+ case PIPE_FUNC_LESS:
+ return MALI_FUNC_LESS;
+
+ case PIPE_FUNC_EQUAL:
+ return MALI_FUNC_EQUAL;
+
+ case PIPE_FUNC_LEQUAL:
+ return MALI_FUNC_LEQUAL;
+
+ case PIPE_FUNC_GREATER:
+ return MALI_FUNC_GREATER;
+
+ case PIPE_FUNC_NOTEQUAL:
+ return MALI_FUNC_NOTEQUAL;
+
+ case PIPE_FUNC_GEQUAL:
+ return MALI_FUNC_GEQUAL;
+
+ case PIPE_FUNC_ALWAYS:
+ return MALI_FUNC_ALWAYS;
+
+ default:
+ unreachable("Invalid func");
+ }
+}
+
+static unsigned
+panfrost_translate_stencil_op(enum pipe_stencil_op in)
+{
+ switch (in) {
+ case PIPE_STENCIL_OP_KEEP:
+ return MALI_STENCIL_KEEP;
+
+ case PIPE_STENCIL_OP_ZERO:
+ return MALI_STENCIL_ZERO;
+
+ case PIPE_STENCIL_OP_REPLACE:
+ return MALI_STENCIL_REPLACE;
+
+ case PIPE_STENCIL_OP_INCR:
+ return MALI_STENCIL_INCR;
+
+ case PIPE_STENCIL_OP_DECR:
+ return MALI_STENCIL_DECR;
+
+ case PIPE_STENCIL_OP_INCR_WRAP:
+ return MALI_STENCIL_INCR_WRAP;
+
+ case PIPE_STENCIL_OP_DECR_WRAP:
+ return MALI_STENCIL_DECR_WRAP;
+
+ case PIPE_STENCIL_OP_INVERT:
+ return MALI_STENCIL_INVERT;
+
+ default:
+ unreachable("Invalid stencil op");
+ }
+}
+
+static unsigned
+translate_tex_wrap(enum pipe_tex_wrap w)
+{
+ switch (w) {
+ case PIPE_TEX_WRAP_REPEAT:
+ return MALI_WRAP_REPEAT;
+
+ case PIPE_TEX_WRAP_CLAMP:
+ return MALI_WRAP_CLAMP;
+
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ return MALI_WRAP_CLAMP_TO_EDGE;
+
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ return MALI_WRAP_CLAMP_TO_BORDER;
+
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ return MALI_WRAP_MIRRORED_REPEAT;
+
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ return MALI_WRAP_MIRRORED_CLAMP;
+
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE;
+
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER;
+
+ default:
+ unreachable("Invalid wrap");
+ }
+}
+
+void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
+ struct mali_sampler_descriptor *hw)
+{
+ unsigned func = panfrost_translate_compare_func(cso->compare_func);
+ bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
+ bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
+ bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
+ unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
+ unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
+ unsigned mip_filter = mip_linear ?
+ (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
+ unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
+
+ *hw = (struct mali_sampler_descriptor) {
+ .filter_mode = min_filter | mag_filter | mip_filter |
+ normalized,
+ .wrap_s = translate_tex_wrap(cso->wrap_s),
+ .wrap_t = translate_tex_wrap(cso->wrap_t),
+ .wrap_r = translate_tex_wrap(cso->wrap_r),
+ .compare_func = panfrost_flip_compare_func(func),
+ .border_color = {
+ cso->border_color.f[0],
+ cso->border_color.f[1],
+ cso->border_color.f[2],
+ cso->border_color.f[3]
+ },
+ .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
+ .max_lod = FIXED_16(cso->max_lod, false),
+ .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */
+ .seamless_cube_map = cso->seamless_cube_map,
+ };
+
+ /* If necessary, we disable mipmapping in the sampler descriptor by
+ * clamping the LOD as tight as possible (from 0 to epsilon,
+ * essentially -- remember these are fixed point numbers, so
+ * epsilon=1/256) */
+
+ if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
+ hw->max_lod = hw->min_lod + 1;
+}
+
+static void
+panfrost_make_stencil_state(const struct pipe_stencil_state *in,
+ struct mali_stencil_test *out)
+{
+ out->ref = 0; /* Gallium gets it from elsewhere */
+
+ out->mask = in->valuemask;
+ out->func = panfrost_translate_compare_func(in->func);
+ out->sfail = panfrost_translate_stencil_op(in->fail_op);
+ out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
+ out->dppass = panfrost_translate_stencil_op(in->zpass_op);
+}
+
+static void
+panfrost_frag_meta_rasterizer_update(struct panfrost_context *ctx,
+ struct mali_shader_meta *fragmeta)
+{
+ if (!ctx->rasterizer) {
+ SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, true);
+ SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, false);
+ fragmeta->depth_units = 0.0f;
+ fragmeta->depth_factor = 0.0f;
+ SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
+ SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
+ return;
+ }
+
+ bool msaa = ctx->rasterizer->base.multisample;
+
+ /* TODO: Sample size */
+ SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
+ SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
+ fragmeta->depth_units = ctx->rasterizer->base.offset_units * 2.0f;
+ fragmeta->depth_factor = ctx->rasterizer->base.offset_scale;
+
+ /* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
+
+ SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A,
+ ctx->rasterizer->base.offset_tri);
+ SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B,
+ ctx->rasterizer->base.offset_tri);
+}
+
+static void
+panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
+ struct mali_shader_meta *fragmeta)
+{
+ const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
+ int zfunc = PIPE_FUNC_ALWAYS;
+
+ if (!zsa) {
+ struct pipe_stencil_state default_stencil = {
+ .enabled = 0,
+ .func = PIPE_FUNC_ALWAYS,
+ .fail_op = MALI_STENCIL_KEEP,
+ .zfail_op = MALI_STENCIL_KEEP,
+ .zpass_op = MALI_STENCIL_KEEP,
+ .writemask = 0xFF,
+ .valuemask = 0xFF
+ };
+
+ panfrost_make_stencil_state(&default_stencil,
+ &fragmeta->stencil_front);
+ fragmeta->stencil_mask_front = default_stencil.writemask;
+ fragmeta->stencil_back = fragmeta->stencil_front;
+ fragmeta->stencil_mask_back = default_stencil.writemask;
+ SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
+ SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
+ } else {
+ SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
+ zsa->stencil[0].enabled);
+ panfrost_make_stencil_state(&zsa->stencil[0],
+ &fragmeta->stencil_front);
+ fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
+ fragmeta->stencil_front.ref = ctx->stencil_ref.ref_value[0];
+
+ /* If back-stencil is not enabled, use the front values */
+
+ if (zsa->stencil[1].enabled) {
+ panfrost_make_stencil_state(&zsa->stencil[1],
+ &fragmeta->stencil_back);
+ fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
+ fragmeta->stencil_back.ref = ctx->stencil_ref.ref_value[1];
+ } else {
+ fragmeta->stencil_back = fragmeta->stencil_front;
+ fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
+ fragmeta->stencil_back.ref = fragmeta->stencil_front.ref;
+ }
+
+ if (zsa->depth.enabled)
+ zfunc = zsa->depth.func;
+
+ /* Depth state (TODO: Refactor) */
+
+ SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
+ zsa->depth.writemask);
+ }
+
+ fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
+ fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
+}
+
+static void
+panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
+ struct mali_shader_meta *fragmeta,
+ struct midgard_blend_rt *rts)
+{
+ const struct panfrost_screen *screen = pan_screen(ctx->base.screen);
+
+ SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
+ (screen->quirks & MIDGARD_SFBD) && ctx->blend &&
+ !ctx->blend->base.dither);
+
+ /* Get blending setup */
+ unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
+
+ struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
+ unsigned shader_offset = 0;
+ struct panfrost_bo *shader_bo = NULL;
+
+ for (unsigned c = 0; c < rt_count; ++c)
+ blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
+ &shader_offset);
+
+ /* If there is a blend shader, work registers are shared. XXX: opt */
+
+ for (unsigned c = 0; c < rt_count; ++c) {
+ if (blend[c].is_shader)
+ fragmeta->midgard1.work_count = 16;
+ }
+
+ /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
+ * copied to the blend_meta appended (by convention), but this is the
+ * field actually read by the hardware. (Or maybe both are read...?).
+ * Specify the last RTi with a blend shader. */
+
+ fragmeta->blend.shader = 0;
+
+ for (signed rt = (rt_count - 1); rt >= 0; --rt) {
+ if (!blend[rt].is_shader)
+ continue;
+
+ fragmeta->blend.shader = blend[rt].shader.gpu |
+ blend[rt].shader.first_tag;
+ break;
+ }
+
+ if (screen->quirks & MIDGARD_SFBD) {
+ /* When only a single render target platform is used, the blend
+ * information is inside the shader meta itself. We additionally
+ * need to signal CAN_DISCARD for nontrivial blend modes (so
+ * we're able to read back the destination buffer) */
+
+ SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
+ blend[0].is_shader);
+
+ if (!blend[0].is_shader) {
+ fragmeta->blend.equation = *blend[0].equation.equation;
+ fragmeta->blend.constant = blend[0].equation.constant;
+ }
+
+ SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
+ !blend[0].no_blending);
+ return;
+ }
+
+ /* Additional blend descriptor tacked on for jobs using MFBD */
+
+ for (unsigned i = 0; i < rt_count; ++i) {
+ rts[i].flags = 0x200;
+
+ bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
+ (ctx->pipe_framebuffer.cbufs[i]) &&
+ util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
+
+ SET_BIT(rts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
+ SET_BIT(rts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
+ SET_BIT(rts[i].flags, MALI_BLEND_SRGB, is_srgb);
+ SET_BIT(rts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
+
+ if (blend[i].is_shader) {
+ rts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
+ } else {
+ rts[i].blend.equation = *blend[i].equation.equation;
+ rts[i].blend.constant = blend[i].equation.constant;
+ }
+ }
+}
+
+static void
+panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
+ struct mali_shader_meta *fragmeta,
+ struct midgard_blend_rt *rts)
+{
+ const struct panfrost_screen *screen = pan_screen(ctx->base.screen);
+ struct panfrost_shader_state *fs;
+
+ fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
+
+ fragmeta->alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000);
+ fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010;
+ fragmeta->unknown2_4 = 0x4e0;
+
+ /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
+ * is required (independent of 32-bit/64-bit descriptors), or why it's
+ * not used on later GPU revisions. Otherwise, all shader jobs fault on
+ * these earlier chips (perhaps this is a chicken bit of some kind).
+ * More investigation is needed. */
+
+ SET_BIT(fragmeta->unknown2_4, 0x10, screen->quirks & MIDGARD_SFBD);
+
+ /* Depending on whether it's legal to in the given shader, we try to
+ * enable early-z testing (or forward-pixel kill?) */
+
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
+ !fs->can_discard && !fs->writes_depth);
+
+ /* Add the writes Z/S flags if needed. */
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
+ SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
+
+ /* Any time texturing is used, derivatives are implicitly calculated,
+ * so we need to enable helper invocations */
+
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
+ fs->helper_invocations);
+
+ /* CAN_DISCARD should be set if the fragment shader possibly contains a
+ * 'discard' instruction. It is likely this is related to optimizations
+ * related to forward-pixel kill, as per "Mali Performance 3: Is
+ * EGL_BUFFER_PRESERVED a good thing?" by Peter Harris */
+
+ SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD, fs->can_discard);
+ SET_BIT(fragmeta->midgard1.flags_lo, 0x400, fs->can_discard);
+
+ panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
+ panfrost_frag_meta_zsa_update(ctx, fragmeta);
+ panfrost_frag_meta_blend_update(ctx, fragmeta, rts);
+}
+
+void
+panfrost_emit_shader_meta(struct panfrost_batch *batch,
+ enum pipe_shader_type st,
+ struct midgard_payload_vertex_tiler *vtp)
+{
+ struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
+
+ if (!ss) {
+ vtp->postfix.shader = 0;
+ return;
+ }
+
+ struct mali_shader_meta meta;
+
+ panfrost_shader_meta_init(ctx, st, &meta);
+
+ /* Add the shader BO to the batch. */
+ panfrost_batch_add_bo(batch, ss->bo,
+ PAN_BO_ACCESS_PRIVATE |
+ PAN_BO_ACCESS_READ |
+ panfrost_bo_access_for_stage(st));
+
+ mali_ptr shader_ptr;
+
+ if (st == PIPE_SHADER_FRAGMENT) {
+ struct panfrost_screen *screen = pan_screen(ctx->base.screen);
+ unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
+ size_t desc_size = sizeof(meta);
+ struct midgard_blend_rt rts[4];
+ struct panfrost_transfer xfer;
+
+ assert(rt_count <= ARRAY_SIZE(rts));
+
+ panfrost_frag_shader_meta_init(ctx, &meta, rts);
+
+ if (!(screen->quirks & MIDGARD_SFBD))
+ desc_size += sizeof(*rts) * rt_count;
+
+ xfer = panfrost_allocate_transient(batch, desc_size);
+
+ memcpy(xfer.cpu, &meta, sizeof(meta));
+ memcpy(xfer.cpu + sizeof(meta), rts, sizeof(*rts) * rt_count);
+
+ shader_ptr = xfer.gpu;
+ } else {
+ shader_ptr = panfrost_upload_transient(batch, &meta,
+ sizeof(meta));
+ }
+
+ vtp->postfix.shader = shader_ptr;
+}
+
static void
panfrost_mali_viewport_init(struct panfrost_context *ctx,
struct mali_viewport *mvp)
buf->dirty_mask = 0;
}
+
+void
+panfrost_emit_shared_memory(struct panfrost_batch *batch,
+ const struct pipe_grid_info *info,
+ struct midgard_payload_vertex_tiler *vtp)
+{
+ struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_shader_variants *all = ctx->shader[PIPE_SHADER_COMPUTE];
+ struct panfrost_shader_state *ss = &all->variants[all->active_variant];
+ unsigned single_size = util_next_power_of_two(MAX2(ss->shared_size,
+ 128));
+ unsigned shared_size = single_size * info->grid[0] * info->grid[1] *
+ info->grid[2] * 4;
+ struct panfrost_bo *bo = panfrost_batch_get_shared_memory(batch,
+ shared_size,
+ 1);
+
+ struct mali_shared_memory shared = {
+ .shared_memory = bo->gpu,
+ .shared_workgroup_count =
+ util_logbase2_ceil(info->grid[0]) +
+ util_logbase2_ceil(info->grid[1]) +
+ util_logbase2_ceil(info->grid[2]),
+ .shared_unk1 = 0x2,
+ .shared_shift = util_logbase2(single_size) - 1
+ };
+
+ vtp->postfix.shared_memory = panfrost_upload_transient(batch, &shared,
+ sizeof(shared));
+}
+
+static mali_ptr
+panfrost_get_tex_desc(struct panfrost_batch *batch,
+ enum pipe_shader_type st,
+ struct panfrost_sampler_view *view)
+{
+ if (!view)
+ return (mali_ptr) 0;
+
+ struct pipe_sampler_view *pview = &view->base;
+ struct panfrost_resource *rsrc = pan_resource(pview->texture);
+
+ /* Add the BO to the job so it's retained until the job is done. */
+
+ panfrost_batch_add_bo(batch, rsrc->bo,
+ PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
+ panfrost_bo_access_for_stage(st));
+
+ panfrost_batch_add_bo(batch, view->bo,
+ PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
+ panfrost_bo_access_for_stage(st));
+
+ return view->bo->gpu;
+}
+
+void
+panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
+ enum pipe_shader_type stage,
+ struct midgard_payload_vertex_tiler *vtp)
+{
+ struct panfrost_context *ctx = batch->ctx;
+
+ if (!ctx->sampler_view_count[stage])
+ return;
+
+ uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+
+ for (int i = 0; i < ctx->sampler_view_count[stage]; ++i)
+ trampolines[i] = panfrost_get_tex_desc(batch, stage,
+ ctx->sampler_views[stage][i]);
+
+ vtp->postfix.texture_trampoline = panfrost_upload_transient(batch,
+ trampolines,
+ sizeof(uint64_t) *
+ ctx->sampler_view_count[stage]);
+}
+
+void
+panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
+ enum pipe_shader_type stage,
+ struct midgard_payload_vertex_tiler *vtp)
+{
+ struct panfrost_context *ctx = batch->ctx;
+
+ if (!ctx->sampler_count[stage])
+ return;
+
+ size_t desc_size = sizeof(struct mali_sampler_descriptor);
+ size_t transfer_size = desc_size * ctx->sampler_count[stage];
+ struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
+ transfer_size);
+ struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
+
+ for (int i = 0; i < ctx->sampler_count[stage]; ++i)
+ desc[i] = ctx->samplers[stage][i]->hw;
+
+ vtp->postfix.sampler_descriptor = transfer.gpu;
+}
+
+void
+panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
+ struct midgard_payload_vertex_tiler *vp)
+{
+ struct panfrost_context *ctx = batch->ctx;
+
+ if (!ctx->vertex)
+ return;
+
+ struct panfrost_vertex_state *so = ctx->vertex;
+
+ panfrost_vertex_state_upd_attr_offs(ctx, vp);
+ vp->postfix.attribute_meta = panfrost_upload_transient(batch, so->hw,
+ sizeof(*so->hw) *
+ PAN_MAX_ATTRIBUTE);
+}
+
+void
+panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
+ struct midgard_payload_vertex_tiler *vp,
+ struct midgard_payload_vertex_tiler *tp)
+{
+ struct panfrost_context *ctx = batch->ctx;
+ bool wallpapering = ctx->wallpaper_batch && batch->tiler_dep;
+
+ if (wallpapering) {
+ /* Inject in reverse order, with "predicted" job indices.
+ * THIS IS A HACK XXX */
+ panfrost_new_job(batch, JOB_TYPE_TILER, false,
+ batch->job_index + 2, tp, sizeof(*tp), true);
+ panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
+ vp, sizeof(*vp), true);
+ return;
+ }
+
+ /* If rasterizer discard is enable, only submit the vertex */
+
+ bool rasterizer_discard = ctx->rasterizer &&
+ ctx->rasterizer->base.rasterizer_discard;
+
+ unsigned vertex = panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
+ vp, sizeof(*vp), false);
+
+ if (rasterizer_discard)
+ return;
+
+ panfrost_new_job(batch, JOB_TYPE_TILER, false, vertex, tp, sizeof(*tp),
+ false);
+}