*/
#include "util/macros.h"
+#include "util/u_prim.h"
#include "util/u_vbuf.h"
#include "panfrost-quirks.h"
-#include "pan_allocate.h"
+#include "pan_pool.h"
#include "pan_bo.h"
#include "pan_cmdstream.h"
#include "pan_context.h"
#include "pan_job.h"
-/* TODO: Bifrost requires just a mali_shared_memory, without the rest of the
- * framebuffer */
+/* If a BO is accessed for a particular shader stage, will it be in the primary
+ * batch (vertex/tiler) or the secondary batch (fragment)? Anything but
+ * fragment will be primary, e.g. compute jobs will be considered
+ * "vertex/tiler" by analogy */
-void
-panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
- struct midgard_payload_vertex_tiler *vt)
+static inline uint32_t
+panfrost_bo_access_for_stage(enum pipe_shader_type stage)
{
- struct panfrost_screen *screen = pan_screen(ctx->base.screen);
- struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+ assert(stage == PIPE_SHADER_FRAGMENT ||
+ stage == PIPE_SHADER_VERTEX ||
+ stage == PIPE_SHADER_COMPUTE);
- /* If we haven't, reserve space for the framebuffer */
-
- if (!batch->framebuffer.gpu) {
- unsigned size = (screen->quirks & MIDGARD_SFBD) ?
- sizeof(struct mali_single_framebuffer) :
- sizeof(struct mali_framebuffer);
+ return stage == PIPE_SHADER_FRAGMENT ?
+ PAN_BO_ACCESS_FRAGMENT :
+ PAN_BO_ACCESS_VERTEX_TILER;
+}
- batch->framebuffer = panfrost_allocate_transient(batch, size);
+static void
+panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
+ struct mali_vertex_tiler_postfix *postfix)
+{
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
- /* Tag the pointer */
- if (!(screen->quirks & MIDGARD_SFBD))
- batch->framebuffer.gpu |= MALI_MFBD;
- }
+ unsigned shift = panfrost_get_stack_shift(batch->stack_size);
+ struct mali_shared_memory shared = {
+ .stack_shift = shift,
+ .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
+ .shared_workgroup_count = ~0,
+ };
+ postfix->shared_memory = panfrost_pool_upload(&batch->pool, &shared, sizeof(shared));
+}
- vt->postfix.shared_memory = batch->framebuffer.gpu;
+static void
+panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
+ struct mali_vertex_tiler_postfix *postfix)
+{
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+ postfix->shared_memory = panfrost_batch_reserve_framebuffer(batch);
}
-void
+static void
panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
- struct midgard_payload_vertex_tiler *tp)
+ struct mali_vertex_tiler_prefix *prefix,
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
- tp->gl_enables |= 0x7;
- SET_BIT(tp->gl_enables, MALI_FRONT_CCW_TOP,
+ postfix->gl_enables |= 0x7;
+ SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
rasterizer && rasterizer->base.front_ccw);
- SET_BIT(tp->gl_enables, MALI_CULL_FACE_FRONT,
+ SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
- SET_BIT(tp->gl_enables, MALI_CULL_FACE_BACK,
+ SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
- SET_BIT(tp->prefix.unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
+ SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
rasterizer && rasterizer->base.flatshade_first);
+}
+
+void
+panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
+ struct mali_vertex_tiler_prefix *prefix,
+ union midgard_primitive_size *primitive_size)
+{
+ struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
if (!panfrost_writes_point_size(ctx)) {
- bool points = tp->prefix.draw_mode == MALI_POINTS;
+ bool points = prefix->draw_mode == MALI_DRAW_MODE_POINTS;
float val = 0.0f;
if (rasterizer)
rasterizer->base.point_size :
rasterizer->base.line_width;
- tp->primitive_size.constant = val;
+ primitive_size->constant = val;
}
}
-void
+static void
panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
- struct midgard_payload_vertex_tiler *tp)
+ struct mali_vertex_tiler_postfix *postfix)
{
- SET_BIT(tp->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
- if (ctx->occlusion_query)
- tp->postfix.occlusion_counter = ctx->occlusion_query->bo->gpu;
- else
- tp->postfix.occlusion_counter = 0;
+ SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
+ if (ctx->occlusion_query) {
+ postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
+ panfrost_batch_add_bo(ctx->batch, ctx->occlusion_query->bo,
+ PAN_BO_ACCESS_SHARED |
+ PAN_BO_ACCESS_RW |
+ PAN_BO_ACCESS_FRAGMENT);
+ } else {
+ postfix->occlusion_counter = 0;
+ }
+}
+
+void
+panfrost_vt_init(struct panfrost_context *ctx,
+ enum pipe_shader_type stage,
+ struct mali_vertex_tiler_prefix *prefix,
+ struct mali_vertex_tiler_postfix *postfix)
+{
+ struct panfrost_device *device = pan_device(ctx->base.screen);
+
+ if (!ctx->shader[stage])
+ return;
+
+ memset(prefix, 0, sizeof(*prefix));
+ memset(postfix, 0, sizeof(*postfix));
+
+ if (device->quirks & IS_BIFROST) {
+ postfix->gl_enables = 0x2;
+ panfrost_vt_emit_shared_memory(ctx, postfix);
+ } else {
+ postfix->gl_enables = 0x6;
+ panfrost_vt_attach_framebuffer(ctx, postfix);
+ }
+
+ if (stage == PIPE_SHADER_FRAGMENT) {
+ panfrost_vt_update_occlusion_query(ctx, postfix);
+ panfrost_vt_update_rasterizer(ctx, prefix, postfix);
+ }
}
static unsigned
} else {
/* Otherwise, we need to upload to transient memory */
const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
- out = panfrost_upload_transient(batch, ibuf8 + offset,
+ out = panfrost_pool_upload(&batch->pool, ibuf8 + offset,
info->count *
info->index_size);
}
panfrost_vt_set_draw_info(struct panfrost_context *ctx,
const struct pipe_draw_info *info,
enum mali_draw_mode draw_mode,
- struct midgard_payload_vertex_tiler *vp,
- struct midgard_payload_vertex_tiler *tp,
+ struct mali_vertex_tiler_postfix *vertex_postfix,
+ struct mali_vertex_tiler_prefix *tiler_prefix,
+ struct mali_vertex_tiler_postfix *tiler_postfix,
unsigned *vertex_count,
unsigned *padded_count)
{
- tp->prefix.draw_mode = draw_mode;
+ tiler_prefix->draw_mode = draw_mode;
unsigned draw_flags = 0;
if (info->index_size) {
unsigned min_index = 0, max_index = 0;
- tp->prefix.indices = panfrost_get_index_buffer_bounded(ctx,
+ tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
info,
&min_index,
&max_index);
/* Use the corresponding values */
*vertex_count = max_index - min_index + 1;
- tp->offset_start = vp->offset_start = min_index + info->index_bias;
- tp->prefix.offset_bias_correction = -min_index;
- tp->prefix.index_count = MALI_POSITIVE(info->count);
+ tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
+ tiler_prefix->offset_bias_correction = -min_index;
+ tiler_prefix->index_count = MALI_POSITIVE(info->count);
draw_flags |= panfrost_translate_index_size(info->index_size);
} else {
- tp->prefix.indices = 0;
+ tiler_prefix->indices = 0;
*vertex_count = ctx->vertex_count;
- tp->offset_start = vp->offset_start = info->start;
- tp->prefix.offset_bias_correction = 0;
- tp->prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
+ tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
+ tiler_prefix->offset_bias_correction = 0;
+ tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
}
- tp->prefix.unknown_draw = draw_flags;
+ tiler_prefix->unknown_draw = draw_flags;
/* Encode the padded vertex count */
unsigned shift = __builtin_ctz(ctx->padded_count);
unsigned k = ctx->padded_count >> (shift + 1);
- tp->instance_shift = vp->instance_shift = shift;
- tp->instance_odd = vp->instance_odd = k;
+ tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
+ tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
} else {
*padded_count = *vertex_count;
/* Reset instancing state */
- tp->instance_shift = vp->instance_shift = 0;
- tp->instance_odd = vp->instance_odd = 0;
+ tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
+ tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
}
}
enum pipe_shader_type st,
struct mali_shader_meta *meta)
{
+ const struct panfrost_device *dev = pan_device(ctx->base.screen);
struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
memset(meta, 0, sizeof(*meta));
meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
- meta->midgard1.uniform_count = MIN2(ss->uniform_count,
- ss->uniform_cutoff);
- meta->midgard1.work_count = ss->work_reg_count;
meta->attribute_count = ss->attribute_count;
meta->varying_count = ss->varying_count;
- meta->midgard1.flags_hi = 0x8; /* XXX */
- meta->midgard1.flags_lo = 0x220;
meta->texture_count = ctx->sampler_view_count[st];
meta->sampler_count = ctx->sampler_count[st];
- meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
-}
-
-static unsigned
-panfrost_translate_compare_func(enum pipe_compare_func in)
-{
- switch (in) {
- case PIPE_FUNC_NEVER:
- return MALI_FUNC_NEVER;
-
- case PIPE_FUNC_LESS:
- return MALI_FUNC_LESS;
-
- case PIPE_FUNC_EQUAL:
- return MALI_FUNC_EQUAL;
- case PIPE_FUNC_LEQUAL:
- return MALI_FUNC_LEQUAL;
+ if (dev->quirks & IS_BIFROST) {
+ if (st == PIPE_SHADER_VERTEX)
+ meta->bifrost1.unk1 = 0x800000;
+ else {
+ /* First clause ATEST |= 0x4000000.
+ * Less than 32 regs |= 0x200 */
+ meta->bifrost1.unk1 = 0x950020;
+ }
- case PIPE_FUNC_GREATER:
- return MALI_FUNC_GREATER;
+ meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
+ if (st == PIPE_SHADER_VERTEX)
+ meta->bifrost2.preload_regs = 0xC0;
+ else {
+ meta->bifrost2.preload_regs = 0x1;
+ SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
+ }
- case PIPE_FUNC_NOTEQUAL:
- return MALI_FUNC_NOTEQUAL;
+ meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
+ ss->uniform_cutoff);
+ } else {
+ meta->midgard1.uniform_count = MIN2(ss->uniform_count,
+ ss->uniform_cutoff);
+ meta->midgard1.work_count = ss->work_reg_count;
- case PIPE_FUNC_GEQUAL:
- return MALI_FUNC_GEQUAL;
+ /* TODO: This is not conformant on ES3 */
+ meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
- case PIPE_FUNC_ALWAYS:
- return MALI_FUNC_ALWAYS;
+ meta->midgard1.flags_lo = 0x20;
+ meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
- default:
- unreachable("Invalid func");
+ SET_BIT(meta->midgard1.flags_hi, MALI_WRITES_GLOBAL, ss->writes_global);
}
}
static unsigned
-panfrost_translate_stencil_op(enum pipe_stencil_op in)
+translate_tex_wrap(enum pipe_tex_wrap w)
{
- switch (in) {
- case PIPE_STENCIL_OP_KEEP:
- return MALI_STENCIL_KEEP;
-
- case PIPE_STENCIL_OP_ZERO:
- return MALI_STENCIL_ZERO;
-
- case PIPE_STENCIL_OP_REPLACE:
- return MALI_STENCIL_REPLACE;
-
- case PIPE_STENCIL_OP_INCR:
- return MALI_STENCIL_INCR;
-
- case PIPE_STENCIL_OP_DECR:
- return MALI_STENCIL_DECR;
+ switch (w) {
+ case PIPE_TEX_WRAP_REPEAT: return MALI_WRAP_MODE_REPEAT;
+ case PIPE_TEX_WRAP_CLAMP: return MALI_WRAP_MODE_CLAMP;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return MALI_WRAP_MODE_CLAMP_TO_EDGE;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return MALI_WRAP_MODE_CLAMP_TO_BORDER;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT: return MALI_WRAP_MODE_MIRRORED_REPEAT;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP: return MALI_WRAP_MODE_MIRRORED_CLAMP;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_EDGE;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return MALI_WRAP_MODE_MIRRORED_CLAMP_TO_BORDER;
+ default: unreachable("Invalid wrap");
+ }
+}
- case PIPE_STENCIL_OP_INCR_WRAP:
- return MALI_STENCIL_INCR_WRAP;
+/* The hardware compares in the wrong order order, so we have to flip before
+ * encoding. Yes, really. */
- case PIPE_STENCIL_OP_DECR_WRAP:
- return MALI_STENCIL_DECR_WRAP;
+static enum mali_func
+panfrost_sampler_compare_func(const struct pipe_sampler_state *cso)
+{
+ if (!cso->compare_mode)
+ return MALI_FUNC_NEVER;
- case PIPE_STENCIL_OP_INVERT:
- return MALI_STENCIL_INVERT;
+ enum mali_func f = panfrost_translate_compare_func(cso->compare_func);
+ return panfrost_flip_compare_func(f);
+}
- default:
- unreachable("Invalid stencil op");
+static enum mali_mipmap_mode
+pan_pipe_to_mipmode(enum pipe_tex_mipfilter f)
+{
+ switch (f) {
+ case PIPE_TEX_MIPFILTER_NEAREST: return MALI_MIPMAP_MODE_NEAREST;
+ case PIPE_TEX_MIPFILTER_LINEAR: return MALI_MIPMAP_MODE_TRILINEAR;
+ case PIPE_TEX_MIPFILTER_NONE: return MALI_MIPMAP_MODE_NONE;
+ default: unreachable("Invalid");
}
}
-static unsigned
-translate_tex_wrap(enum pipe_tex_wrap w)
+void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
+ struct mali_midgard_sampler_packed *hw)
{
- switch (w) {
- case PIPE_TEX_WRAP_REPEAT:
- return MALI_WRAP_REPEAT;
+ pan_pack(hw, MIDGARD_SAMPLER, cfg) {
+ cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
+ cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
+ cfg.mipmap_mode = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) ?
+ MALI_MIPMAP_MODE_TRILINEAR : MALI_MIPMAP_MODE_NEAREST;
+ cfg.normalized_coordinates = cso->normalized_coords;
- case PIPE_TEX_WRAP_CLAMP:
- return MALI_WRAP_CLAMP;
+ cfg.lod_bias = FIXED_16(cso->lod_bias, true);
- case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
- return MALI_WRAP_CLAMP_TO_EDGE;
+ cfg.minimum_lod = FIXED_16(cso->min_lod, false);
- case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
- return MALI_WRAP_CLAMP_TO_BORDER;
+ /* If necessary, we disable mipmapping in the sampler descriptor by
+ * clamping the LOD as tight as possible (from 0 to epsilon,
+ * essentially -- remember these are fixed point numbers, so
+ * epsilon=1/256) */
- case PIPE_TEX_WRAP_MIRROR_REPEAT:
- return MALI_WRAP_MIRRORED_REPEAT;
+ cfg.maximum_lod = (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) ?
+ cfg.minimum_lod + 1 :
+ FIXED_16(cso->max_lod, false);
- case PIPE_TEX_WRAP_MIRROR_CLAMP:
- return MALI_WRAP_MIRRORED_CLAMP;
+ cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
+ cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
+ cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
- case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
- return MALI_WRAP_MIRRORED_CLAMP_TO_EDGE;
+ cfg.compare_function = panfrost_sampler_compare_func(cso);
+ cfg.seamless_cube_map = cso->seamless_cube_map;
- case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
- return MALI_WRAP_MIRRORED_CLAMP_TO_BORDER;
-
- default:
- unreachable("Invalid wrap");
+ cfg.border_color_r = cso->border_color.f[0];
+ cfg.border_color_g = cso->border_color.f[1];
+ cfg.border_color_b = cso->border_color.f[2];
+ cfg.border_color_a = cso->border_color.f[3];
}
}
-void panfrost_sampler_desc_init(const struct pipe_sampler_state *cso,
- struct mali_sampler_descriptor *hw)
-{
- unsigned func = panfrost_translate_compare_func(cso->compare_func);
- bool min_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST;
- bool mag_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST;
- bool mip_linear = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR;
- unsigned min_filter = min_nearest ? MALI_SAMP_MIN_NEAREST : 0;
- unsigned mag_filter = mag_nearest ? MALI_SAMP_MAG_NEAREST : 0;
- unsigned mip_filter = mip_linear ?
- (MALI_SAMP_MIP_LINEAR_1 | MALI_SAMP_MIP_LINEAR_2) : 0;
- unsigned normalized = cso->normalized_coords ? MALI_SAMP_NORM_COORDS : 0;
-
- *hw = (struct mali_sampler_descriptor) {
- .filter_mode = min_filter | mag_filter | mip_filter |
- normalized,
- .wrap_s = translate_tex_wrap(cso->wrap_s),
- .wrap_t = translate_tex_wrap(cso->wrap_t),
- .wrap_r = translate_tex_wrap(cso->wrap_r),
- .compare_func = panfrost_flip_compare_func(func),
- .border_color = {
- cso->border_color.f[0],
- cso->border_color.f[1],
- cso->border_color.f[2],
- cso->border_color.f[3]
- },
- .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
- .max_lod = FIXED_16(cso->max_lod, false),
- .lod_bias = FIXED_16(cso->lod_bias, true), /* can be negative */
- .seamless_cube_map = cso->seamless_cube_map,
- };
-
- /* If necessary, we disable mipmapping in the sampler descriptor by
- * clamping the LOD as tight as possible (from 0 to epsilon,
- * essentially -- remember these are fixed point numbers, so
- * epsilon=1/256) */
-
- if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
- hw->max_lod = hw->min_lod + 1;
-}
-
-static void
-panfrost_make_stencil_state(const struct pipe_stencil_state *in,
- struct mali_stencil_test *out)
+void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
+ struct mali_bifrost_sampler_packed *hw)
{
- out->ref = 0; /* Gallium gets it from elsewhere */
-
- out->mask = in->valuemask;
- out->func = panfrost_translate_compare_func(in->func);
- out->sfail = panfrost_translate_stencil_op(in->fail_op);
- out->dpfail = panfrost_translate_stencil_op(in->zfail_op);
- out->dppass = panfrost_translate_stencil_op(in->zpass_op);
+ pan_pack(hw, BIFROST_SAMPLER, cfg) {
+ cfg.magnify_linear = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR;
+ cfg.minify_linear = cso->min_img_filter == PIPE_TEX_FILTER_LINEAR;
+ cfg.mipmap_mode = pan_pipe_to_mipmode(cso->min_mip_filter);
+ cfg.normalized_coordinates = cso->normalized_coords;
+
+ cfg.lod_bias = FIXED_16(cso->lod_bias, true);
+ cfg.minimum_lod = FIXED_16(cso->min_lod, false);
+ cfg.maximum_lod = FIXED_16(cso->max_lod, false);
+
+ cfg.wrap_mode_s = translate_tex_wrap(cso->wrap_s);
+ cfg.wrap_mode_t = translate_tex_wrap(cso->wrap_t);
+ cfg.wrap_mode_r = translate_tex_wrap(cso->wrap_r);
+
+ cfg.compare_function = panfrost_sampler_compare_func(cso);
+ cfg.seamless_cube_map = cso->seamless_cube_map;
+ }
}
static void
fragmeta->depth_factor = 0.0f;
SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, false);
SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, false);
+ SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, true);
+ SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, true);
return;
}
- bool msaa = ctx->rasterizer->base.multisample;
+ struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
+
+ bool msaa = rast->multisample;
/* TODO: Sample size */
SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
- fragmeta->depth_units = ctx->rasterizer->base.offset_units * 2.0f;
- fragmeta->depth_factor = ctx->rasterizer->base.offset_scale;
+
+ struct panfrost_shader_state *fs;
+ fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
+
+ /* EXT_shader_framebuffer_fetch requires the shader to be run
+ * per-sample when outputs are read. */
+ bool per_sample = ctx->min_samples > 1 || fs->outputs_read;
+ SET_BIT(fragmeta->unknown2_3, MALI_PER_SAMPLE, msaa && per_sample);
+
+ fragmeta->depth_units = rast->offset_units * 2.0f;
+ fragmeta->depth_factor = rast->offset_scale;
/* XXX: Which bit is which? Does this maybe allow offseting not-tri? */
- SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A,
- ctx->rasterizer->base.offset_tri);
- SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B,
- ctx->rasterizer->base.offset_tri);
+ SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_A, rast->offset_tri);
+ SET_BIT(fragmeta->unknown2_4, MALI_DEPTH_RANGE_B, rast->offset_tri);
+
+ SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_NEAR, rast->depth_clip_near);
+ SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_CLIP_FAR, rast->depth_clip_far);
}
static void
panfrost_frag_meta_zsa_update(struct panfrost_context *ctx,
struct mali_shader_meta *fragmeta)
{
- const struct pipe_depth_stencil_alpha_state *zsa = ctx->depth_stencil;
+ const struct panfrost_zsa_state *so = ctx->depth_stencil;
int zfunc = PIPE_FUNC_ALWAYS;
- if (!zsa) {
- struct pipe_stencil_state default_stencil = {
- .enabled = 0,
- .func = PIPE_FUNC_ALWAYS,
- .fail_op = MALI_STENCIL_KEEP,
- .zfail_op = MALI_STENCIL_KEEP,
- .zpass_op = MALI_STENCIL_KEEP,
- .writemask = 0xFF,
- .valuemask = 0xFF
- };
-
- panfrost_make_stencil_state(&default_stencil,
- &fragmeta->stencil_front);
- fragmeta->stencil_mask_front = default_stencil.writemask;
- fragmeta->stencil_back = fragmeta->stencil_front;
- fragmeta->stencil_mask_back = default_stencil.writemask;
+ if (!so) {
+ /* If stenciling is disabled, the state is irrelevant */
SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST, false);
SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK, false);
} else {
SET_BIT(fragmeta->unknown2_4, MALI_STENCIL_TEST,
- zsa->stencil[0].enabled);
- panfrost_make_stencil_state(&zsa->stencil[0],
- &fragmeta->stencil_front);
- fragmeta->stencil_mask_front = zsa->stencil[0].writemask;
- fragmeta->stencil_front.ref = ctx->stencil_ref.ref_value[0];
+ so->base.stencil[0].enabled);
+
+ fragmeta->stencil_mask_front = so->stencil_mask_front;
+ fragmeta->stencil_mask_back = so->stencil_mask_back;
+
+ /* Bottom bits for stencil ref, exactly one word */
+ fragmeta->stencil_front.opaque[0] = so->stencil_front.opaque[0] | ctx->stencil_ref.ref_value[0];
/* If back-stencil is not enabled, use the front values */
- if (zsa->stencil[1].enabled) {
- panfrost_make_stencil_state(&zsa->stencil[1],
- &fragmeta->stencil_back);
- fragmeta->stencil_mask_back = zsa->stencil[1].writemask;
- fragmeta->stencil_back.ref = ctx->stencil_ref.ref_value[1];
- } else {
+ if (so->base.stencil[1].enabled)
+ fragmeta->stencil_back.opaque[0] = so->stencil_back.opaque[0] | ctx->stencil_ref.ref_value[1];
+ else
fragmeta->stencil_back = fragmeta->stencil_front;
- fragmeta->stencil_mask_back = fragmeta->stencil_mask_front;
- fragmeta->stencil_back.ref = fragmeta->stencil_front.ref;
- }
- if (zsa->depth.enabled)
- zfunc = zsa->depth.func;
+ if (so->base.depth.enabled)
+ zfunc = so->base.depth.func;
/* Depth state (TODO: Refactor) */
SET_BIT(fragmeta->unknown2_3, MALI_DEPTH_WRITEMASK,
- zsa->depth.writemask);
+ so->base.depth.writemask);
}
fragmeta->unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(zfunc));
}
+static bool
+panfrost_fs_required(
+ struct panfrost_shader_state *fs,
+ struct panfrost_blend_final *blend,
+ unsigned rt_count)
+{
+ /* If we generally have side effects */
+ if (fs->fs_sidefx)
+ return true;
+
+ /* If colour is written we need to execute */
+ for (unsigned i = 0; i < rt_count; ++i) {
+ if (!blend[i].no_colour)
+ return true;
+ }
+
+ /* If depth is written and not implied we need to execute.
+ * TODO: Predicate on Z/S writes being enabled */
+ return (fs->writes_depth || fs->writes_stencil);
+}
+
static void
panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
struct mali_shader_meta *fragmeta,
- struct midgard_blend_rt *rts)
+ void *rts)
{
- const struct panfrost_screen *screen = pan_screen(ctx->base.screen);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+ const struct panfrost_device *dev = pan_device(ctx->base.screen);
+ struct panfrost_shader_state *fs;
+ fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
- (screen->quirks & MIDGARD_SFBD) && ctx->blend &&
+ (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
!ctx->blend->base.dither);
+ SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
+ ctx->blend->base.alpha_to_coverage);
+
/* Get blending setup */
unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
blend[c] = panfrost_get_blend_for_context(ctx, c, &shader_bo,
&shader_offset);
- /* If there is a blend shader, work registers are shared. XXX: opt */
+ /* Disable shader execution if we can */
+ if (dev->quirks & MIDGARD_SHADERLESS
+ && !panfrost_fs_required(fs, blend, rt_count)) {
+ fragmeta->shader = 0;
+ fragmeta->attribute_count = 0;
+ fragmeta->varying_count = 0;
+ fragmeta->texture_count = 0;
+ fragmeta->sampler_count = 0;
+
+ /* This feature is not known to work on Bifrost */
+ fragmeta->midgard1.work_count = 1;
+ fragmeta->midgard1.uniform_count = 0;
+ fragmeta->midgard1.uniform_buffer_count = 0;
+ }
+
+ /* If there is a blend shader, work registers are shared. We impose 8
+ * work registers as a limit for blend shaders. Should be lower XXX */
- for (unsigned c = 0; c < rt_count; ++c) {
- if (blend[c].is_shader)
- fragmeta->midgard1.work_count = 16;
+ if (!(dev->quirks & IS_BIFROST)) {
+ for (unsigned c = 0; c < rt_count; ++c) {
+ if (blend[c].is_shader) {
+ fragmeta->midgard1.work_count =
+ MAX2(fragmeta->midgard1.work_count, 8);
+ }
+ }
}
/* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
break;
}
- if (screen->quirks & MIDGARD_SFBD) {
+ if (dev->quirks & MIDGARD_SFBD) {
/* When only a single render target platform is used, the blend
* information is inside the shader meta itself. We additionally
* need to signal CAN_DISCARD for nontrivial blend modes (so
}
SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
- !blend[0].no_blending);
+ !blend[0].no_blending || fs->can_discard);
+
+ batch->draws |= PIPE_CLEAR_COLOR0;
return;
}
+ if (dev->quirks & IS_BIFROST) {
+ bool no_blend = true;
+
+ for (unsigned i = 0; i < rt_count; ++i)
+ no_blend &= (blend[i].no_blending | blend[i].no_colour);
+
+ SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
+ !fs->can_discard && !fs->writes_depth && no_blend);
+ }
+
/* Additional blend descriptor tacked on for jobs using MFBD */
for (unsigned i = 0; i < rt_count; ++i) {
- rts[i].flags = 0x200;
+ unsigned flags = 0;
+
+ if (ctx->pipe_framebuffer.nr_cbufs > i && !blend[i].no_colour) {
+ flags = 0x200;
+ batch->draws |= (PIPE_CLEAR_COLOR0 << i);
- bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
- (ctx->pipe_framebuffer.cbufs[i]) &&
- util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
+ bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
+ (ctx->pipe_framebuffer.cbufs[i]) &&
+ util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
- SET_BIT(rts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
- SET_BIT(rts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
- SET_BIT(rts[i].flags, MALI_BLEND_SRGB, is_srgb);
- SET_BIT(rts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
+ SET_BIT(flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
+ SET_BIT(flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
+ SET_BIT(flags, MALI_BLEND_SRGB, is_srgb);
+ SET_BIT(flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
+ }
- if (blend[i].is_shader) {
- rts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
+ if (dev->quirks & IS_BIFROST) {
+ struct bifrost_blend_rt *brts = rts;
+
+ brts[i].flags = flags;
+
+ if (blend[i].is_shader) {
+ /* The blend shader's address needs to be at
+ * the same top 32 bit as the fragment shader.
+ * TODO: Ensure that's always the case.
+ */
+ assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
+ (fs->bo->gpu & (0xffffffffull << 32)));
+ brts[i].shader = blend[i].shader.gpu;
+ brts[i].unk2 = 0x0;
+ } else if (ctx->pipe_framebuffer.nr_cbufs > i) {
+ enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
+ const struct util_format_description *format_desc;
+ format_desc = util_format_description(format);
+
+ brts[i].equation = *blend[i].equation.equation;
+
+ /* TODO: this is a bit more complicated */
+ brts[i].constant = blend[i].equation.constant;
+
+ brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
+
+ /* 0x19 disables blending and forces REPLACE
+ * mode (equivalent to rgb_mode = alpha_mode =
+ * x122, colour mask = 0xF). 0x1a allows
+ * blending. */
+ brts[i].unk2 = blend[i].no_blending ? 0x19 : 0x1a;
+
+ brts[i].shader_type = fs->blend_types[i];
+ } else {
+ /* Dummy attachment for depth-only */
+ brts[i].unk2 = 0x3;
+ brts[i].shader_type = fs->blend_types[i];
+ }
} else {
- rts[i].blend.equation = *blend[i].equation.equation;
- rts[i].blend.constant = blend[i].equation.constant;
+ struct midgard_blend_rt *mrts = rts;
+ mrts[i].flags = flags;
+
+ if (blend[i].is_shader) {
+ mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
+ } else {
+ mrts[i].blend.equation = *blend[i].equation.equation;
+ mrts[i].blend.constant = blend[i].equation.constant;
+ }
}
}
}
static void
panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
struct mali_shader_meta *fragmeta,
- struct midgard_blend_rt *rts)
+ void *rts)
{
- const struct panfrost_screen *screen = pan_screen(ctx->base.screen);
+ const struct panfrost_device *dev = pan_device(ctx->base.screen);
struct panfrost_shader_state *fs;
fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
- fragmeta->alpha_coverage = ~MALI_ALPHA_COVERAGE(0.000000);
- fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x3010;
+ bool msaa = ctx->rasterizer && ctx->rasterizer->base.multisample;
+ fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
+
+ fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
fragmeta->unknown2_4 = 0x4e0;
/* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
* these earlier chips (perhaps this is a chicken bit of some kind).
* More investigation is needed. */
- SET_BIT(fragmeta->unknown2_4, 0x10, screen->quirks & MIDGARD_SFBD);
+ SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
+
+ if (dev->quirks & IS_BIFROST) {
+ /* TODO */
+ } else {
+ /* Depending on whether it's legal to in the given shader, we try to
+ * enable early-z testing. TODO: respect e-z force */
+
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
+ !fs->can_discard && !fs->writes_global &&
+ !fs->writes_depth && !fs->writes_stencil &&
+ !ctx->blend->base.alpha_to_coverage);
- /* Depending on whether it's legal to in the given shader, we try to
- * enable early-z testing (or forward-pixel kill?) */
+ /* Add the writes Z/S flags if needed. */
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
+ SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
- !fs->can_discard && !fs->writes_depth);
+ /* Any time texturing is used, derivatives are implicitly calculated,
+ * so we need to enable helper invocations */
- /* Add the writes Z/S flags if needed. */
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
- SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
+ fs->helper_invocations);
- /* Any time texturing is used, derivatives are implicitly calculated,
- * so we need to enable helper invocations */
+ /* If discard is enabled, which bit we set to convey this
+ * depends on if depth/stencil is used for the draw or not.
+ * Just one of depth OR stencil is enough to trigger this. */
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
- fs->helper_invocations);
+ const struct pipe_depth_stencil_alpha_state *zsa = &ctx->depth_stencil->base;
+ bool zs_enabled = fs->writes_depth || fs->writes_stencil;
- /* CAN_DISCARD should be set if the fragment shader possibly contains a
- * 'discard' instruction. It is likely this is related to optimizations
- * related to forward-pixel kill, as per "Mali Performance 3: Is
- * EGL_BUFFER_PRESERVED a good thing?" by Peter Harris */
+ if (zsa) {
+ zs_enabled |= (zsa->depth.enabled && zsa->depth.func != PIPE_FUNC_ALWAYS);
+ zs_enabled |= zsa->stencil[0].enabled;
+ }
- SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD, fs->can_discard);
- SET_BIT(fragmeta->midgard1.flags_lo, 0x400, fs->can_discard);
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
+ fs->outputs_read || (!zs_enabled && fs->can_discard));
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
+ }
panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
panfrost_frag_meta_zsa_update(ctx, fragmeta);
void
panfrost_emit_shader_meta(struct panfrost_batch *batch,
enum pipe_shader_type st,
- struct midgard_payload_vertex_tiler *vtp)
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
if (!ss) {
- vtp->postfix.shader = 0;
+ postfix->shader = 0;
return;
}
mali_ptr shader_ptr;
if (st == PIPE_SHADER_FRAGMENT) {
- struct panfrost_screen *screen = pan_screen(ctx->base.screen);
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
size_t desc_size = sizeof(meta);
- struct midgard_blend_rt rts[4];
+ void *rts = NULL;
struct panfrost_transfer xfer;
+ unsigned rt_size;
- assert(rt_count <= ARRAY_SIZE(rts));
+ if (dev->quirks & MIDGARD_SFBD)
+ rt_size = 0;
+ else if (dev->quirks & IS_BIFROST)
+ rt_size = sizeof(struct bifrost_blend_rt);
+ else
+ rt_size = sizeof(struct midgard_blend_rt);
- panfrost_frag_shader_meta_init(ctx, &meta, rts);
+ desc_size += rt_size * rt_count;
+
+ if (rt_size)
+ rts = rzalloc_size(ctx, rt_size * rt_count);
- if (!(screen->quirks & MIDGARD_SFBD))
- desc_size += sizeof(*rts) * rt_count;
+ panfrost_frag_shader_meta_init(ctx, &meta, rts);
- xfer = panfrost_allocate_transient(batch, desc_size);
+ xfer = panfrost_pool_alloc(&batch->pool, desc_size);
memcpy(xfer.cpu, &meta, sizeof(meta));
- memcpy(xfer.cpu + sizeof(meta), rts, sizeof(*rts) * rt_count);
+ memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
+
+ if (rt_size)
+ ralloc_free(rts);
shader_ptr = xfer.gpu;
} else {
- shader_ptr = panfrost_upload_transient(batch, &meta,
+ shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
sizeof(meta));
}
- vtp->postfix.shader = shader_ptr;
+ postfix->shader = shader_ptr;
}
-static void
-panfrost_mali_viewport_init(struct panfrost_context *ctx,
- struct mali_viewport *mvp)
+void
+panfrost_emit_viewport(struct panfrost_batch *batch,
+ struct mali_vertex_tiler_postfix *tiler_postfix)
{
+ struct panfrost_context *ctx = batch->ctx;
const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
-
- /* Clip bounds are encoded as floats. The viewport itself is encoded as
- * (somewhat) asymmetric ints. */
-
const struct pipe_scissor_state *ss = &ctx->scissor;
+ const struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
+ const struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
- memset(mvp, 0, sizeof(*mvp));
-
- /* By default, do no viewport clipping, i.e. clip to (-inf, inf) in
- * each direction. Clipping to the viewport in theory should work, but
- * in practice causes issues when we're not explicitly trying to
- * scissor */
-
- *mvp = (struct mali_viewport) {
- .clip_minx = -INFINITY,
- .clip_miny = -INFINITY,
- .clip_maxx = INFINITY,
- .clip_maxy = INFINITY,
- };
-
- /* Always scissor to the viewport by default. */
+ /* Derive min/max from translate/scale. Note since |x| >= 0 by
+ * definition, we have that -|x| <= |x| hence translate - |scale| <=
+ * translate + |scale|, so the ordering is correct here. */
float vp_minx = (int) (vp->translate[0] - fabsf(vp->scale[0]));
float vp_maxx = (int) (vp->translate[0] + fabsf(vp->scale[0]));
-
float vp_miny = (int) (vp->translate[1] - fabsf(vp->scale[1]));
float vp_maxy = (int) (vp->translate[1] + fabsf(vp->scale[1]));
-
float minz = (vp->translate[2] - fabsf(vp->scale[2]));
float maxz = (vp->translate[2] + fabsf(vp->scale[2]));
- /* Apply the scissor test */
+ /* Scissor to the intersection of viewport and to the scissor, clamped
+ * to the framebuffer */
- unsigned minx, miny, maxx, maxy;
+ unsigned minx = MIN2(fb->width, vp_minx);
+ unsigned maxx = MIN2(fb->width, vp_maxx);
+ unsigned miny = MIN2(fb->height, vp_miny);
+ unsigned maxy = MIN2(fb->height, vp_maxy);
- if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
- minx = MAX2(ss->minx, vp_minx);
- miny = MAX2(ss->miny, vp_miny);
- maxx = MIN2(ss->maxx, vp_maxx);
- maxy = MIN2(ss->maxy, vp_maxy);
- } else {
- minx = vp_minx;
- miny = vp_miny;
- maxx = vp_maxx;
- maxy = vp_maxy;
+ if (ss && rast && rast->scissor) {
+ minx = MAX2(ss->minx, minx);
+ miny = MAX2(ss->miny, miny);
+ maxx = MIN2(ss->maxx, maxx);
+ maxy = MIN2(ss->maxy, maxy);
}
- /* Hardware needs the min/max to be strictly ordered, so flip if we
- * need to. The viewport transformation in the vertex shader will
- * handle the negatives if we don't */
+ struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, MALI_VIEWPORT_LENGTH);
- if (miny > maxy) {
- unsigned temp = miny;
- miny = maxy;
- maxy = temp;
- }
-
- if (minx > maxx) {
- unsigned temp = minx;
- minx = maxx;
- maxx = temp;
- }
+ pan_pack(T.cpu, VIEWPORT, cfg) {
+ cfg.scissor_minimum_x = minx;
+ cfg.scissor_minimum_y = miny;
+ cfg.scissor_maximum_x = maxx - 1;
+ cfg.scissor_maximum_y = maxy - 1;
- if (minz > maxz) {
- float temp = minz;
- minz = maxz;
- maxz = temp;
+ cfg.minimum_z = rast->depth_clip_near ? minz : -INFINITY;
+ cfg.maximum_z = rast->depth_clip_far ? maxz : INFINITY;
}
- /* Clamp to the framebuffer size as a last check */
-
- minx = MIN2(ctx->pipe_framebuffer.width, minx);
- maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
-
- miny = MIN2(ctx->pipe_framebuffer.height, miny);
- maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
-
- /* Upload */
-
- mvp->viewport0[0] = minx;
- mvp->viewport1[0] = MALI_POSITIVE(maxx);
-
- mvp->viewport0[1] = miny;
- mvp->viewport1[1] = MALI_POSITIVE(maxy);
-
- mvp->clip_minz = minz;
- mvp->clip_maxz = maxz;
-}
-
-void
-panfrost_emit_viewport(struct panfrost_batch *batch,
- struct midgard_payload_vertex_tiler *tp)
-{
- struct panfrost_context *ctx = batch->ctx;
- struct mali_viewport mvp;
-
- panfrost_mali_viewport_init(batch->ctx, &mvp);
-
- /* Update the job, unless we're doing wallpapering (whose lack of
- * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
- * just... be faster :) */
-
- if (!ctx->wallpaper_batch)
- panfrost_batch_union_scissor(batch, mvp.viewport0[0],
- mvp.viewport0[1],
- mvp.viewport1[0] + 1,
- mvp.viewport1[1] + 1);
-
- tp->postfix.viewport = panfrost_upload_transient(batch, &mvp,
- sizeof(mvp));
+ tiler_postfix->viewport = T.gpu;
+ panfrost_batch_union_scissor(batch, minx, miny, maxx, maxy);
}
static mali_ptr
* PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT */
return rsrc->bo->gpu + cb->buffer_offset;
} else if (cb->user_buffer) {
- return panfrost_upload_transient(batch,
+ return panfrost_pool_upload(&batch->pool,
cb->user_buffer +
cb->buffer_offset,
cb->buffer_size);
void
panfrost_emit_const_buf(struct panfrost_batch *batch,
enum pipe_shader_type stage,
- struct midgard_payload_vertex_tiler *vtp)
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_shader_variants *all = ctx->shader[stage];
size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
size_t size = sys_size + uniform_size;
- struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
+ struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
size);
/* Upload sysvals requested by the shader */
memcpy(transfer.cpu + sys_size, cpu, uniform_size);
}
- struct mali_vertex_tiler_postfix *postfix = &vtp->postfix;
-
/* Next up, attach UBOs. UBO #0 is the uniforms we just
* uploaded */
unsigned ubo_count = panfrost_ubo_count(ctx, stage);
assert(ubo_count >= 1);
- size_t sz = sizeof(uint64_t) * ubo_count;
- uint64_t ubos[PAN_MAX_CONST_BUFFERS];
- int uniform_count = ss->uniform_count;
+ size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
+ struct panfrost_transfer ubos = panfrost_pool_alloc(&batch->pool, sz);
+ uint64_t *ubo_ptr = (uint64_t *) ubos.cpu;
/* Upload uniforms as a UBO */
- ubos[0] = MALI_MAKE_UBO(2 + uniform_count, transfer.gpu);
+
+ if (ss->uniform_count) {
+ pan_pack(ubo_ptr, UNIFORM_BUFFER, cfg) {
+ cfg.entries = ss->uniform_count;
+ cfg.pointer = transfer.gpu;
+ }
+ } else {
+ *ubo_ptr = 0;
+ }
/* The rest are honest-to-goodness UBOs */
bool empty = usz == 0;
if (!enabled || empty) {
- /* Stub out disabled UBOs to catch accesses */
- ubos[ubo] = MALI_MAKE_UBO(0, 0xDEAD0000);
+ ubo_ptr[ubo] = 0;
continue;
}
- mali_ptr gpu = panfrost_map_constant_buffer_gpu(batch, stage,
- buf, ubo);
-
- unsigned bytes_per_field = 16;
- unsigned aligned = ALIGN_POT(usz, bytes_per_field);
- ubos[ubo] = MALI_MAKE_UBO(aligned / bytes_per_field, gpu);
+ pan_pack(ubo_ptr + ubo, UNIFORM_BUFFER, cfg) {
+ cfg.entries = DIV_ROUND_UP(usz, 16);
+ cfg.pointer = panfrost_map_constant_buffer_gpu(batch,
+ stage, buf, ubo);
+ }
}
- mali_ptr ubufs = panfrost_upload_transient(batch, ubos, sz);
postfix->uniforms = transfer.gpu;
- postfix->uniform_buffers = ubufs;
+ postfix->uniform_buffers = ubos.gpu;
buf->dirty_mask = 0;
}
.shared_shift = util_logbase2(single_size) - 1
};
- vtp->postfix.shared_memory = panfrost_upload_transient(batch, &shared,
+ vtp->postfix.shared_memory = panfrost_pool_upload(&batch->pool, &shared,
sizeof(shared));
}
return view->bo->gpu;
}
+static void
+panfrost_update_sampler_view(struct panfrost_sampler_view *view,
+ struct pipe_context *pctx)
+{
+ struct panfrost_resource *rsrc = pan_resource(view->base.texture);
+ if (view->texture_bo != rsrc->bo->gpu ||
+ view->modifier != rsrc->modifier) {
+ panfrost_bo_unreference(view->bo);
+ panfrost_create_sampler_view_bo(view, pctx, &rsrc->base);
+ }
+}
+
void
panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
enum pipe_shader_type stage,
- struct midgard_payload_vertex_tiler *vtp)
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_device *device = pan_device(ctx->base.screen);
if (!ctx->sampler_view_count[stage])
return;
- uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ if (device->quirks & IS_BIFROST) {
+ struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
+ MALI_BIFROST_TEXTURE_LENGTH *
+ ctx->sampler_view_count[stage]);
- for (int i = 0; i < ctx->sampler_view_count[stage]; ++i)
- trampolines[i] = panfrost_get_tex_desc(batch, stage,
- ctx->sampler_views[stage][i]);
+ struct mali_bifrost_texture_packed *out =
+ (struct mali_bifrost_texture_packed *) T.cpu;
- vtp->postfix.texture_trampoline = panfrost_upload_transient(batch,
- trampolines,
- sizeof(uint64_t) *
- ctx->sampler_view_count[stage]);
-}
+ for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
+ struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
+ struct pipe_sampler_view *pview = &view->base;
+ struct panfrost_resource *rsrc = pan_resource(pview->texture);
-void
-panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
- enum pipe_shader_type stage,
- struct midgard_payload_vertex_tiler *vtp)
-{
- struct panfrost_context *ctx = batch->ctx;
+ panfrost_update_sampler_view(view, &ctx->base);
+ out[i] = view->bifrost_descriptor;
- if (!ctx->sampler_count[stage])
- return;
+ /* Add the BOs to the job so they are retained until the job is done. */
+
+ panfrost_batch_add_bo(batch, rsrc->bo,
+ PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
+ panfrost_bo_access_for_stage(stage));
+
+ panfrost_batch_add_bo(batch, view->bo,
+ PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
+ panfrost_bo_access_for_stage(stage));
+ }
- size_t desc_size = sizeof(struct mali_sampler_descriptor);
- size_t transfer_size = desc_size * ctx->sampler_count[stage];
- struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
- transfer_size);
- struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
+ postfix->textures = T.gpu;
+ } else {
+ uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+
+ for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
+ struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
+
+ panfrost_update_sampler_view(view, &ctx->base);
- for (int i = 0; i < ctx->sampler_count[stage]; ++i)
- desc[i] = ctx->samplers[stage][i]->hw;
+ trampolines[i] = panfrost_get_tex_desc(batch, stage, view);
+ }
- vtp->postfix.sampler_descriptor = transfer.gpu;
+ postfix->textures = panfrost_pool_upload(&batch->pool,
+ trampolines,
+ sizeof(uint64_t) *
+ ctx->sampler_view_count[stage]);
+ }
}
void
-panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
- struct midgard_payload_vertex_tiler *vp)
+panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
+ enum pipe_shader_type stage,
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_context *ctx = batch->ctx;
- if (!ctx->vertex)
+ if (!ctx->sampler_count[stage])
return;
- struct panfrost_vertex_state *so = ctx->vertex;
+ size_t desc_size = MALI_BIFROST_SAMPLER_LENGTH;
+ assert(MALI_BIFROST_SAMPLER_LENGTH == MALI_MIDGARD_SAMPLER_LENGTH);
- panfrost_vertex_state_upd_attr_offs(ctx, vp);
- vp->postfix.attribute_meta = panfrost_upload_transient(batch, so->hw,
- sizeof(*so->hw) *
- PAN_MAX_ATTRIBUTE);
+ size_t sz = desc_size * ctx->sampler_count[stage];
+ struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool, sz);
+ struct mali_midgard_sampler_packed *out = (struct mali_midgard_sampler_packed *) T.cpu;
+
+ for (unsigned i = 0; i < ctx->sampler_count[stage]; ++i)
+ out[i] = ctx->samplers[stage][i]->hw;
+
+ postfix->sampler_descriptor = T.gpu;
}
void
panfrost_emit_vertex_data(struct panfrost_batch *batch,
- struct midgard_payload_vertex_tiler *vp)
+ struct mali_vertex_tiler_postfix *vertex_postfix)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_vertex_state *so = ctx->vertex;
* vertex buffer mask and instancing. Twice as much room is allocated,
* for a worst case of NPOT_DIVIDEs which take up extra slot */
union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
+ unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
unsigned k = 0;
for (unsigned i = 0; i < so->num_elements; ++i) {
struct pipe_vertex_element *elem = &so->pipe[i];
unsigned vbi = elem->vertex_buffer_index;
-
- /* The exception to 1:1 mapping is that we can have multiple
- * entries (NPOT divisors), so we fixup anyways */
-
- so->hw[i].index = k;
+ attrib_to_buffer[i] = k;
if (!(ctx->vb_mask & (1 << vbi)))
continue;
/* Normal, non-instanced attributes */
attrs[k++].elements |= MALI_ATTR_LINEAR;
} else {
- unsigned instance_shift = vp->instance_shift;
- unsigned instance_odd = vp->instance_odd;
+ unsigned instance_shift = vertex_postfix->instance_shift;
+ unsigned instance_odd = vertex_postfix->instance_odd;
k += panfrost_vertex_instanced(ctx->padded_count,
instance_shift,
/* Add special gl_VertexID/gl_InstanceID buffers */
+ struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
+ MALI_ATTRIBUTE_LENGTH * (PAN_INSTANCE_ID + 1));
+
+ struct mali_attribute_packed *out =
+ (struct mali_attribute_packed *) T.cpu;
+
panfrost_vertex_id(ctx->padded_count, &attrs[k]);
- so->hw[PAN_VERTEX_ID].index = k++;
+
+ pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
+ cfg.buffer_index = k++;
+ cfg.format = so->formats[PAN_VERTEX_ID];
+ }
+
panfrost_instance_id(ctx->padded_count, &attrs[k]);
- so->hw[PAN_INSTANCE_ID].index = k++;
- /* Upload whatever we emitted and go */
+ pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
+ cfg.buffer_index = k++;
+ cfg.format = so->formats[PAN_INSTANCE_ID];
+ }
+
+ /* Attribute addresses require 64-byte alignment, so let:
+ *
+ * base' = base & ~63 = base - (base & 63)
+ * offset' = offset + (base & 63)
+ *
+ * Since base' + offset' = base + offset, these are equivalent
+ * addressing modes and now base is 64 aligned.
+ */
+
+ unsigned start = vertex_postfix->offset_start;
+
+ for (unsigned i = 0; i < so->num_elements; ++i) {
+ unsigned vbi = so->pipe[i].vertex_buffer_index;
+ struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
+
+ /* Adjust by the masked off bits of the offset. Make sure we
+ * read src_offset from so->hw (which is not GPU visible)
+ * rather than target (which is) due to caching effects */
+
+ unsigned src_offset = so->pipe[i].src_offset;
+
+ /* BOs aligned to 4k so guaranteed aligned to 64 */
+ src_offset += (buf->buffer_offset & 63);
+
+ /* Also, somewhat obscurely per-instance data needs to be
+ * offset in response to a delayed start in an indexed draw */
+
+ if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
+ src_offset -= buf->stride * start;
+
+ pan_pack(out + i, ATTRIBUTE, cfg) {
+ cfg.buffer_index = attrib_to_buffer[i];
+ cfg.format = so->formats[i];
+ cfg.offset = src_offset;
+ }
+ }
+
- vp->postfix.attributes = panfrost_upload_transient(batch, attrs,
+ vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
k * sizeof(*attrs));
+
+ vertex_postfix->attribute_meta = T.gpu;
+}
+
+static mali_ptr
+panfrost_emit_varyings(struct panfrost_batch *batch, union mali_attr *slot,
+ unsigned stride, unsigned count)
+{
+ /* Fill out the descriptor */
+ slot->stride = stride;
+ slot->size = stride * count;
+ slot->shift = slot->extra_flags = 0;
+
+ struct panfrost_transfer transfer = panfrost_pool_alloc(&batch->pool,
+ slot->size);
+
+ slot->elements = transfer.gpu | MALI_ATTR_LINEAR;
+
+ return transfer.gpu;
+}
+
+static unsigned
+panfrost_streamout_offset(unsigned stride, unsigned offset,
+ struct pipe_stream_output_target *target)
+{
+ return (target->buffer_offset + (offset * stride * 4)) & 63;
+}
+
+static void
+panfrost_emit_streamout(struct panfrost_batch *batch, union mali_attr *slot,
+ unsigned stride, unsigned offset, unsigned count,
+ struct pipe_stream_output_target *target)
+{
+ /* Fill out the descriptor */
+ slot->stride = stride * 4;
+ slot->shift = slot->extra_flags = 0;
+
+ unsigned max_size = target->buffer_size;
+ unsigned expected_size = slot->stride * count;
+
+ /* Grab the BO and bind it to the batch */
+ struct panfrost_bo *bo = pan_resource(target->buffer)->bo;
+
+ /* Varyings are WRITE from the perspective of the VERTEX but READ from
+ * the perspective of the TILER and FRAGMENT.
+ */
+ panfrost_batch_add_bo(batch, bo,
+ PAN_BO_ACCESS_SHARED |
+ PAN_BO_ACCESS_RW |
+ PAN_BO_ACCESS_VERTEX_TILER |
+ PAN_BO_ACCESS_FRAGMENT);
+
+ /* We will have an offset applied to get alignment */
+ mali_ptr addr = bo->gpu + target->buffer_offset + (offset * slot->stride);
+ slot->elements = (addr & ~63) | MALI_ATTR_LINEAR;
+ slot->size = MIN2(max_size, expected_size) + (addr & 63);
+}
+
+static bool
+has_point_coord(unsigned mask, gl_varying_slot loc)
+{
+ if ((loc >= VARYING_SLOT_TEX0) && (loc <= VARYING_SLOT_TEX7))
+ return (mask & (1 << (loc - VARYING_SLOT_TEX0)));
+ else if (loc == VARYING_SLOT_PNTC)
+ return (mask & (1 << 8));
+ else
+ return false;
+}
+
+/* Helpers for manipulating stream out information so we can pack varyings
+ * accordingly. Compute the src_offset for a given captured varying */
+
+static struct pipe_stream_output *
+pan_get_so(struct pipe_stream_output_info *info, gl_varying_slot loc)
+{
+ for (unsigned i = 0; i < info->num_outputs; ++i) {
+ if (info->output[i].register_index == loc)
+ return &info->output[i];
+ }
+
+ unreachable("Varying not captured");
+}
+
+static unsigned
+pan_varying_size(enum mali_format fmt)
+{
+ unsigned type = MALI_EXTRACT_TYPE(fmt);
+ unsigned chan = MALI_EXTRACT_CHANNELS(fmt);
+ unsigned bits = MALI_EXTRACT_BITS(fmt);
+ unsigned bpc = 0;
+
+ if (bits == MALI_CHANNEL_FLOAT) {
+ /* No doubles */
+ bool fp16 = (type == MALI_FORMAT_SINT);
+ assert(fp16 || (type == MALI_FORMAT_UNORM));
+
+ bpc = fp16 ? 2 : 4;
+ } else {
+ assert(type >= MALI_FORMAT_SNORM && type <= MALI_FORMAT_SINT);
+
+ /* See the enums */
+ bits = 1 << bits;
+ assert(bits >= 8);
+ bpc = bits / 8;
+ }
+
+ return bpc * chan;
+}
+
+/* Indices for named (non-XFB) varyings that are present. These are packed
+ * tightly so they correspond to a bitfield present (P) indexed by (1 <<
+ * PAN_VARY_*). This has the nice property that you can lookup the buffer index
+ * of a given special field given a shift S by:
+ *
+ * idx = popcount(P & ((1 << S) - 1))
+ *
+ * That is... look at all of the varyings that come earlier and count them, the
+ * count is the new index since plus one. Likewise, the total number of special
+ * buffers required is simply popcount(P)
+ */
+
+enum pan_special_varying {
+ PAN_VARY_GENERAL = 0,
+ PAN_VARY_POSITION = 1,
+ PAN_VARY_PSIZ = 2,
+ PAN_VARY_PNTCOORD = 3,
+ PAN_VARY_FACE = 4,
+ PAN_VARY_FRAGCOORD = 5,
+
+ /* Keep last */
+ PAN_VARY_MAX,
+};
+
+/* Given a varying, figure out which index it correpsonds to */
+
+static inline unsigned
+pan_varying_index(unsigned present, enum pan_special_varying v)
+{
+ unsigned mask = (1 << v) - 1;
+ return util_bitcount(present & mask);
+}
+
+/* Get the base offset for XFB buffers, which by convention come after
+ * everything else. Wrapper function for semantic reasons; by construction this
+ * is just popcount. */
+
+static inline unsigned
+pan_xfb_base(unsigned present)
+{
+ return util_bitcount(present);
+}
+
+/* Computes the present mask for varyings so we can start emitting varying records */
+
+static inline unsigned
+pan_varying_present(
+ struct panfrost_shader_state *vs,
+ struct panfrost_shader_state *fs,
+ unsigned quirks)
+{
+ /* At the moment we always emit general and position buffers. Not
+ * strictly necessary but usually harmless */
+
+ unsigned present = (1 << PAN_VARY_GENERAL) | (1 << PAN_VARY_POSITION);
+
+ /* Enable special buffers by the shader info */
+
+ if (vs->writes_point_size)
+ present |= (1 << PAN_VARY_PSIZ);
+
+ if (fs->reads_point_coord)
+ present |= (1 << PAN_VARY_PNTCOORD);
+
+ if (fs->reads_face)
+ present |= (1 << PAN_VARY_FACE);
+
+ if (fs->reads_frag_coord && !(quirks & IS_BIFROST))
+ present |= (1 << PAN_VARY_FRAGCOORD);
+
+ /* Also, if we have a point sprite, we need a point coord buffer */
+
+ for (unsigned i = 0; i < fs->varying_count; i++) {
+ gl_varying_slot loc = fs->varyings_loc[i];
+
+ if (has_point_coord(fs->point_sprite_mask, loc))
+ present |= (1 << PAN_VARY_PNTCOORD);
+ }
+
+ return present;
+}
+
+/* Emitters for varying records */
+
+static struct mali_attr_meta
+pan_emit_vary(unsigned present, enum pan_special_varying buf,
+ unsigned quirks, enum mali_format format,
+ unsigned offset)
+{
+ unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
+ unsigned swizzle = quirks & HAS_SWIZZLES ?
+ panfrost_get_default_swizzle(nr_channels) :
+ panfrost_bifrost_swizzle(nr_channels);
+
+ struct mali_attr_meta meta = {
+ .index = pan_varying_index(present, buf),
+ .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
+ .format = (format << 12) | swizzle,
+ .src_offset = offset
+ };
+
+ return meta;
+}
+
+/* General varying that is unused */
+
+static struct mali_attr_meta
+pan_emit_vary_only(unsigned present, unsigned quirks)
+{
+ return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
+}
+
+/* Special records */
+
+static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
+ [PAN_VARY_POSITION] = MALI_VARYING_POS,
+ [PAN_VARY_PSIZ] = MALI_R16F,
+ [PAN_VARY_PNTCOORD] = MALI_R16F,
+ [PAN_VARY_FACE] = MALI_R32I,
+ [PAN_VARY_FRAGCOORD] = MALI_RGBA32F
+};
+
+static struct mali_attr_meta
+pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
+ unsigned quirks)
+{
+ assert(buf < PAN_VARY_MAX);
+ return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
+}
+
+static enum mali_format
+pan_xfb_format(enum mali_format format, unsigned nr)
+{
+ if (MALI_EXTRACT_BITS(format) == MALI_CHANNEL_FLOAT)
+ return MALI_R32F | MALI_NR_CHANNELS(nr);
+ else
+ return MALI_EXTRACT_TYPE(format) | MALI_NR_CHANNELS(nr) | MALI_CHANNEL_32;
+}
+
+/* Transform feedback records. Note struct pipe_stream_output is (if packed as
+ * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
+ * value. */
+
+static struct mali_attr_meta
+pan_emit_vary_xfb(unsigned present,
+ unsigned max_xfb,
+ unsigned *streamout_offsets,
+ unsigned quirks,
+ enum mali_format format,
+ struct pipe_stream_output o)
+{
+ unsigned swizzle = quirks & HAS_SWIZZLES ?
+ panfrost_get_default_swizzle(o.num_components) :
+ panfrost_bifrost_swizzle(o.num_components);
+
+ /* Otherwise construct a record for it */
+ struct mali_attr_meta meta = {
+ /* XFB buffers come after everything else */
+ .index = pan_xfb_base(present) + o.output_buffer,
+
+ /* As usual unknown bit */
+ .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
+
+ /* Override number of channels and precision to highp */
+ .format = (pan_xfb_format(format, o.num_components) << 12) | swizzle,
+
+ /* Apply given offsets together */
+ .src_offset = (o.dst_offset * 4) /* dwords */
+ + streamout_offsets[o.output_buffer]
+ };
+
+ return meta;
+}
+
+/* Determine if we should capture a varying for XFB. This requires actually
+ * having a buffer for it. If we don't capture it, we'll fallback to a general
+ * varying path (linked or unlinked, possibly discarding the write) */
+
+static bool
+panfrost_xfb_captured(struct panfrost_shader_state *xfb,
+ unsigned loc, unsigned max_xfb)
+{
+ if (!(xfb->so_mask & (1ll << loc)))
+ return false;
+
+ struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
+ return o->output_buffer < max_xfb;
+}
+
+/* Higher-level wrapper around all of the above, classifying a varying into one
+ * of the above types */
+
+static struct mali_attr_meta
+panfrost_emit_varying(
+ struct panfrost_shader_state *stage,
+ struct panfrost_shader_state *other,
+ struct panfrost_shader_state *xfb,
+ unsigned present,
+ unsigned max_xfb,
+ unsigned *streamout_offsets,
+ unsigned quirks,
+ unsigned *gen_offsets,
+ enum mali_format *gen_formats,
+ unsigned *gen_stride,
+ unsigned idx,
+ bool should_alloc,
+ bool is_fragment)
+{
+ gl_varying_slot loc = stage->varyings_loc[idx];
+ enum mali_format format = stage->varyings[idx];
+
+ /* Override format to match linkage */
+ if (!should_alloc && gen_formats[idx])
+ format = gen_formats[idx];
+
+ if (has_point_coord(stage->point_sprite_mask, loc)) {
+ return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
+ } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
+ struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
+ return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
+ } else if (loc == VARYING_SLOT_POS) {
+ if (is_fragment)
+ return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
+ else
+ return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
+ } else if (loc == VARYING_SLOT_PSIZ) {
+ return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
+ } else if (loc == VARYING_SLOT_PNTC) {
+ return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
+ } else if (loc == VARYING_SLOT_FACE) {
+ return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
+ }
+
+ /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
+ signed other_idx = -1;
+
+ for (unsigned j = 0; j < other->varying_count; ++j) {
+ if (other->varyings_loc[j] == loc) {
+ other_idx = j;
+ break;
+ }
+ }
+
+ if (other_idx < 0)
+ return pan_emit_vary_only(present, quirks);
+
+ unsigned offset = gen_offsets[other_idx];
+
+ if (should_alloc) {
+ /* We're linked, so allocate a space via a watermark allocation */
+ enum mali_format alt = other->varyings[other_idx];
+
+ /* Do interpolation at minimum precision */
+ unsigned size_main = pan_varying_size(format);
+ unsigned size_alt = pan_varying_size(alt);
+ unsigned size = MIN2(size_main, size_alt);
+
+ /* If a varying is marked for XFB but not actually captured, we
+ * should match the format to the format that would otherwise
+ * be used for XFB, since dEQP checks for invariance here. It's
+ * unclear if this is required by the spec. */
+
+ if (xfb->so_mask & (1ull << loc)) {
+ struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
+ format = pan_xfb_format(format, o->num_components);
+ size = pan_varying_size(format);
+ } else if (size == size_alt) {
+ format = alt;
+ }
+
+ gen_offsets[idx] = *gen_stride;
+ gen_formats[other_idx] = format;
+ offset = *gen_stride;
+ *gen_stride += size;
+ }
+
+ return pan_emit_vary(present, PAN_VARY_GENERAL,
+ quirks, format, offset);
+}
+
+static void
+pan_emit_special_input(union mali_attr *varyings,
+ unsigned present,
+ enum pan_special_varying v,
+ mali_ptr addr)
+{
+ if (present & (1 << v)) {
+ /* Ensure we write exactly once for performance and with fields
+ * zeroed appropriately to avoid flakes */
+
+ union mali_attr s = {
+ .elements = addr
+ };
+
+ varyings[pan_varying_index(present, v)] = s;
+ }
+}
+
+void
+panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
+ unsigned vertex_count,
+ struct mali_vertex_tiler_postfix *vertex_postfix,
+ struct mali_vertex_tiler_postfix *tiler_postfix,
+ union midgard_primitive_size *primitive_size)
+{
+ /* Load the shaders */
+ struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
+ struct panfrost_shader_state *vs, *fs;
+ size_t vs_size, fs_size;
+
+ /* Allocate the varying descriptor */
+
+ vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
+ fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
+ vs_size = sizeof(struct mali_attr_meta) * vs->varying_count;
+ fs_size = sizeof(struct mali_attr_meta) * fs->varying_count;
+
+ struct panfrost_transfer trans = panfrost_pool_alloc(&batch->pool,
+ vs_size +
+ fs_size);
+
+ struct pipe_stream_output_info *so = &vs->stream_output;
+ unsigned present = pan_varying_present(vs, fs, dev->quirks);
+
+ /* Check if this varying is linked by us. This is the case for
+ * general-purpose, non-captured varyings. If it is, link it. If it's
+ * not, use the provided stream out information to determine the
+ * offset, since it was already linked for us. */
+
+ unsigned gen_offsets[32];
+ enum mali_format gen_formats[32];
+ memset(gen_offsets, 0, sizeof(gen_offsets));
+ memset(gen_formats, 0, sizeof(gen_formats));
+
+ unsigned gen_stride = 0;
+ assert(vs->varying_count < ARRAY_SIZE(gen_offsets));
+ assert(fs->varying_count < ARRAY_SIZE(gen_offsets));
+
+ unsigned streamout_offsets[32];
+
+ for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
+ streamout_offsets[i] = panfrost_streamout_offset(
+ so->stride[i],
+ ctx->streamout.offsets[i],
+ ctx->streamout.targets[i]);
+ }
+
+ struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
+ struct mali_attr_meta *ofs = ovs + vs->varying_count;
+
+ for (unsigned i = 0; i < vs->varying_count; i++) {
+ ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
+ ctx->streamout.num_targets, streamout_offsets,
+ dev->quirks,
+ gen_offsets, gen_formats, &gen_stride, i, true, false);
+ }
+
+ for (unsigned i = 0; i < fs->varying_count; i++) {
+ ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
+ ctx->streamout.num_targets, streamout_offsets,
+ dev->quirks,
+ gen_offsets, gen_formats, &gen_stride, i, false, true);
+ }
+
+ unsigned xfb_base = pan_xfb_base(present);
+ struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
+ sizeof(union mali_attr) * (xfb_base + ctx->streamout.num_targets));
+ union mali_attr *varyings = (union mali_attr *) T.cpu;
+
+ /* Emit the stream out buffers */
+
+ unsigned out_count = u_stream_outputs_for_vertices(ctx->active_prim,
+ ctx->vertex_count);
+
+ for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
+ panfrost_emit_streamout(batch, &varyings[xfb_base + i],
+ so->stride[i],
+ ctx->streamout.offsets[i],
+ out_count,
+ ctx->streamout.targets[i]);
+ }
+
+ panfrost_emit_varyings(batch,
+ &varyings[pan_varying_index(present, PAN_VARY_GENERAL)],
+ gen_stride, vertex_count);
+
+ /* fp32 vec4 gl_Position */
+ tiler_postfix->position_varying = panfrost_emit_varyings(batch,
+ &varyings[pan_varying_index(present, PAN_VARY_POSITION)],
+ sizeof(float) * 4, vertex_count);
+
+ if (present & (1 << PAN_VARY_PSIZ)) {
+ primitive_size->pointer = panfrost_emit_varyings(batch,
+ &varyings[pan_varying_index(present, PAN_VARY_PSIZ)],
+ 2, vertex_count);
+ }
+
+ pan_emit_special_input(varyings, present, PAN_VARY_PNTCOORD, MALI_VARYING_POINT_COORD);
+ pan_emit_special_input(varyings, present, PAN_VARY_FACE, MALI_VARYING_FRONT_FACING);
+ pan_emit_special_input(varyings, present, PAN_VARY_FRAGCOORD, MALI_VARYING_FRAG_COORD);
+
+ vertex_postfix->varyings = T.gpu;
+ tiler_postfix->varyings = T.gpu;
+
+ vertex_postfix->varying_meta = trans.gpu;
+ tiler_postfix->varying_meta = trans.gpu + vs_size;
}
void
panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
- struct midgard_payload_vertex_tiler *vp,
- struct midgard_payload_vertex_tiler *tp)
+ struct mali_vertex_tiler_prefix *vertex_prefix,
+ struct mali_vertex_tiler_postfix *vertex_postfix,
+ struct mali_vertex_tiler_prefix *tiler_prefix,
+ struct mali_vertex_tiler_postfix *tiler_postfix,
+ union midgard_primitive_size *primitive_size)
{
struct panfrost_context *ctx = batch->ctx;
- bool wallpapering = ctx->wallpaper_batch && batch->tiler_dep;
+ struct panfrost_device *device = pan_device(ctx->base.screen);
+ bool wallpapering = ctx->wallpaper_batch && batch->scoreboard.tiler_dep;
+ struct bifrost_payload_vertex bifrost_vertex = {0,};
+ struct bifrost_payload_tiler bifrost_tiler = {0,};
+ struct midgard_payload_vertex_tiler midgard_vertex = {0,};
+ struct midgard_payload_vertex_tiler midgard_tiler = {0,};
+ void *vp, *tp;
+ size_t vp_size, tp_size;
+
+ if (device->quirks & IS_BIFROST) {
+ bifrost_vertex.prefix = *vertex_prefix;
+ bifrost_vertex.postfix = *vertex_postfix;
+ vp = &bifrost_vertex;
+ vp_size = sizeof(bifrost_vertex);
+
+ bifrost_tiler.prefix = *tiler_prefix;
+ bifrost_tiler.tiler.primitive_size = *primitive_size;
+ bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
+ bifrost_tiler.postfix = *tiler_postfix;
+ tp = &bifrost_tiler;
+ tp_size = sizeof(bifrost_tiler);
+ } else {
+ midgard_vertex.prefix = *vertex_prefix;
+ midgard_vertex.postfix = *vertex_postfix;
+ vp = &midgard_vertex;
+ vp_size = sizeof(midgard_vertex);
+
+ midgard_tiler.prefix = *tiler_prefix;
+ midgard_tiler.postfix = *tiler_postfix;
+ midgard_tiler.primitive_size = *primitive_size;
+ tp = &midgard_tiler;
+ tp_size = sizeof(midgard_tiler);
+ }
if (wallpapering) {
/* Inject in reverse order, with "predicted" job indices.
* THIS IS A HACK XXX */
- panfrost_new_job(batch, JOB_TYPE_TILER, false,
- batch->job_index + 2, tp, sizeof(*tp), true);
- panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
- vp, sizeof(*vp), true);
+ panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false,
+ batch->scoreboard.job_index + 2, tp, tp_size, true);
+ panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
+ vp, vp_size, true);
return;
}
bool rasterizer_discard = ctx->rasterizer &&
ctx->rasterizer->base.rasterizer_discard;
- unsigned vertex = panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
- vp, sizeof(*vp), false);
+ unsigned vertex = panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_VERTEX, false, 0,
+ vp, vp_size, false);
if (rasterizer_discard)
return;
- panfrost_new_job(batch, JOB_TYPE_TILER, false, vertex, tp, sizeof(*tp),
+ panfrost_new_job(&batch->pool, &batch->scoreboard, MALI_JOB_TYPE_TILER, false, vertex, tp, tp_size,
false);
}
+
+/* TODO: stop hardcoding this */
+mali_ptr
+panfrost_emit_sample_locations(struct panfrost_batch *batch)
+{
+ uint16_t locations[] = {
+ 128, 128,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 128, 128,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ };
+
+ return panfrost_pool_upload(&batch->pool, locations, 96 * sizeof(uint16_t));
+}