PAN_BO_ACCESS_VERTEX_TILER;
}
-/* TODO: Bifrost requires just a mali_shared_memory, without the rest of the
- * framebuffer */
+static void
+panfrost_vt_emit_shared_memory(struct panfrost_context *ctx,
+ struct mali_vertex_tiler_postfix *postfix)
+{
+ struct panfrost_device *dev = pan_device(ctx->base.screen);
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
-void
+ unsigned shift = panfrost_get_stack_shift(batch->stack_size);
+ struct mali_shared_memory shared = {
+ .stack_shift = shift,
+ .scratchpad = panfrost_batch_get_scratchpad(batch, shift, dev->thread_tls_alloc, dev->core_count)->gpu,
+ .shared_workgroup_count = ~0,
+ };
+ postfix->shared_memory = panfrost_upload_transient(batch, &shared, sizeof(shared));
+}
+
+static void
panfrost_vt_attach_framebuffer(struct panfrost_context *ctx,
- struct midgard_payload_vertex_tiler *vt)
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_device *dev = pan_device(ctx->base.screen);
struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
batch->framebuffer.gpu |= MALI_MFBD;
}
- vt->postfix.shared_memory = batch->framebuffer.gpu;
+ postfix->shared_memory = batch->framebuffer.gpu;
}
-void
+static void
panfrost_vt_update_rasterizer(struct panfrost_context *ctx,
- struct midgard_payload_vertex_tiler *tp)
+ struct mali_vertex_tiler_prefix *prefix,
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
- tp->gl_enables |= 0x7;
- SET_BIT(tp->gl_enables, MALI_FRONT_CCW_TOP,
+ postfix->gl_enables |= 0x7;
+ SET_BIT(postfix->gl_enables, MALI_FRONT_CCW_TOP,
rasterizer && rasterizer->base.front_ccw);
- SET_BIT(tp->gl_enables, MALI_CULL_FACE_FRONT,
+ SET_BIT(postfix->gl_enables, MALI_CULL_FACE_FRONT,
rasterizer && (rasterizer->base.cull_face & PIPE_FACE_FRONT));
- SET_BIT(tp->gl_enables, MALI_CULL_FACE_BACK,
+ SET_BIT(postfix->gl_enables, MALI_CULL_FACE_BACK,
rasterizer && (rasterizer->base.cull_face & PIPE_FACE_BACK));
- SET_BIT(tp->prefix.unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
+ SET_BIT(prefix->unknown_draw, MALI_DRAW_FLATSHADE_FIRST,
rasterizer && rasterizer->base.flatshade_first);
+}
+
+void
+panfrost_vt_update_primitive_size(struct panfrost_context *ctx,
+ struct mali_vertex_tiler_prefix *prefix,
+ union midgard_primitive_size *primitive_size)
+{
+ struct panfrost_rasterizer *rasterizer = ctx->rasterizer;
if (!panfrost_writes_point_size(ctx)) {
- bool points = tp->prefix.draw_mode == MALI_POINTS;
+ bool points = prefix->draw_mode == MALI_POINTS;
float val = 0.0f;
if (rasterizer)
rasterizer->base.point_size :
rasterizer->base.line_width;
- tp->primitive_size.constant = val;
+ primitive_size->constant = val;
}
}
-void
+static void
panfrost_vt_update_occlusion_query(struct panfrost_context *ctx,
- struct midgard_payload_vertex_tiler *tp)
+ struct mali_vertex_tiler_postfix *postfix)
{
- SET_BIT(tp->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
+ SET_BIT(postfix->gl_enables, MALI_OCCLUSION_QUERY, ctx->occlusion_query);
if (ctx->occlusion_query)
- tp->postfix.occlusion_counter = ctx->occlusion_query->bo->gpu;
+ postfix->occlusion_counter = ctx->occlusion_query->bo->gpu;
else
- tp->postfix.occlusion_counter = 0;
+ postfix->occlusion_counter = 0;
}
void
panfrost_vt_init(struct panfrost_context *ctx,
enum pipe_shader_type stage,
- struct midgard_payload_vertex_tiler *vtp)
+ struct mali_vertex_tiler_prefix *prefix,
+ struct mali_vertex_tiler_postfix *postfix)
{
+ struct panfrost_device *device = pan_device(ctx->base.screen);
+
if (!ctx->shader[stage])
return;
- memset(vtp, 0, sizeof(*vtp));
- vtp->gl_enables = 0x6;
- panfrost_vt_attach_framebuffer(ctx, vtp);
+ memset(prefix, 0, sizeof(*prefix));
+ memset(postfix, 0, sizeof(*postfix));
+
+ if (device->quirks & IS_BIFROST) {
+ postfix->gl_enables = 0x2;
+ panfrost_vt_emit_shared_memory(ctx, postfix);
+ } else {
+ postfix->gl_enables = 0x6;
+ panfrost_vt_attach_framebuffer(ctx, postfix);
+ }
if (stage == PIPE_SHADER_FRAGMENT) {
- panfrost_vt_update_occlusion_query(ctx, vtp);
- panfrost_vt_update_rasterizer(ctx, vtp);
+ panfrost_vt_update_occlusion_query(ctx, postfix);
+ panfrost_vt_update_rasterizer(ctx, prefix, postfix);
}
}
-
static unsigned
panfrost_translate_index_size(unsigned size)
{
panfrost_vt_set_draw_info(struct panfrost_context *ctx,
const struct pipe_draw_info *info,
enum mali_draw_mode draw_mode,
- struct midgard_payload_vertex_tiler *vp,
- struct midgard_payload_vertex_tiler *tp,
+ struct mali_vertex_tiler_postfix *vertex_postfix,
+ struct mali_vertex_tiler_prefix *tiler_prefix,
+ struct mali_vertex_tiler_postfix *tiler_postfix,
unsigned *vertex_count,
unsigned *padded_count)
{
- tp->prefix.draw_mode = draw_mode;
+ tiler_prefix->draw_mode = draw_mode;
unsigned draw_flags = 0;
if (info->index_size) {
unsigned min_index = 0, max_index = 0;
- tp->prefix.indices = panfrost_get_index_buffer_bounded(ctx,
+ tiler_prefix->indices = panfrost_get_index_buffer_bounded(ctx,
info,
&min_index,
&max_index);
/* Use the corresponding values */
*vertex_count = max_index - min_index + 1;
- tp->offset_start = vp->offset_start = min_index + info->index_bias;
- tp->prefix.offset_bias_correction = -min_index;
- tp->prefix.index_count = MALI_POSITIVE(info->count);
+ tiler_postfix->offset_start = vertex_postfix->offset_start = min_index + info->index_bias;
+ tiler_prefix->offset_bias_correction = -min_index;
+ tiler_prefix->index_count = MALI_POSITIVE(info->count);
draw_flags |= panfrost_translate_index_size(info->index_size);
} else {
- tp->prefix.indices = 0;
+ tiler_prefix->indices = 0;
*vertex_count = ctx->vertex_count;
- tp->offset_start = vp->offset_start = info->start;
- tp->prefix.offset_bias_correction = 0;
- tp->prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
+ tiler_postfix->offset_start = vertex_postfix->offset_start = info->start;
+ tiler_prefix->offset_bias_correction = 0;
+ tiler_prefix->index_count = MALI_POSITIVE(ctx->vertex_count);
}
- tp->prefix.unknown_draw = draw_flags;
+ tiler_prefix->unknown_draw = draw_flags;
/* Encode the padded vertex count */
unsigned shift = __builtin_ctz(ctx->padded_count);
unsigned k = ctx->padded_count >> (shift + 1);
- tp->instance_shift = vp->instance_shift = shift;
- tp->instance_odd = vp->instance_odd = k;
+ tiler_postfix->instance_shift = vertex_postfix->instance_shift = shift;
+ tiler_postfix->instance_odd = vertex_postfix->instance_odd = k;
} else {
*padded_count = *vertex_count;
/* Reset instancing state */
- tp->instance_shift = vp->instance_shift = 0;
- tp->instance_odd = vp->instance_odd = 0;
+ tiler_postfix->instance_shift = vertex_postfix->instance_shift = 0;
+ tiler_postfix->instance_odd = vertex_postfix->instance_odd = 0;
}
}
meta->sampler_count = ctx->sampler_count[st];
if (dev->quirks & IS_BIFROST) {
- meta->bifrost1.unk1 = 0x800200;
+ if (st == PIPE_SHADER_VERTEX)
+ meta->bifrost1.unk1 = 0x800000;
+ else {
+ /* First clause ATEST |= 0x4000000.
+ * Less than 32 regs |= 0x200 */
+ meta->bifrost1.unk1 = 0x958020;
+ }
+
meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
- meta->bifrost2.preload_regs = 0xC0;
+ if (st == PIPE_SHADER_VERTEX)
+ meta->bifrost2.preload_regs = 0xC0;
+ else
+ meta->bifrost2.preload_regs = 0x1;
meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
ss->uniform_cutoff);
} else {
meta->midgard1.flags_lo = 0x220;
meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
}
-
}
static unsigned
hw->max_lod = hw->min_lod + 1;
}
+void panfrost_sampler_desc_init_bifrost(const struct pipe_sampler_state *cso,
+ struct bifrost_sampler_descriptor *hw)
+{
+ *hw = (struct bifrost_sampler_descriptor) {
+ .unk1 = 0x1,
+ .wrap_s = translate_tex_wrap(cso->wrap_s),
+ .wrap_t = translate_tex_wrap(cso->wrap_t),
+ .wrap_r = translate_tex_wrap(cso->wrap_r),
+ .unk8 = 0x8,
+ .min_filter = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST,
+ .norm_coords = cso->normalized_coords,
+ .mip_filter = cso->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR,
+ .mag_filter = cso->mag_img_filter == PIPE_TEX_FILTER_LINEAR,
+ .min_lod = FIXED_16(cso->min_lod, false), /* clamp at 0 */
+ .max_lod = FIXED_16(cso->max_lod, false),
+ };
+
+ /* If necessary, we disable mipmapping in the sampler descriptor by
+ * clamping the LOD as tight as possible (from 0 to epsilon,
+ * essentially -- remember these are fixed point numbers, so
+ * epsilon=1/256) */
+
+ if (cso->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
+ hw->max_lod = hw->min_lod + 1;
+}
+
static void
panfrost_make_stencil_state(const struct pipe_stencil_state *in,
struct mali_stencil_test *out)
static void
panfrost_frag_meta_blend_update(struct panfrost_context *ctx,
struct mali_shader_meta *fragmeta,
- struct midgard_blend_rt *rts)
+ void *rts)
{
const struct panfrost_device *dev = pan_device(ctx->base.screen);
/* If there is a blend shader, work registers are shared. XXX: opt */
- for (unsigned c = 0; c < rt_count; ++c) {
- if (blend[c].is_shader)
- fragmeta->midgard1.work_count = 16;
+ if (!(dev->quirks & IS_BIFROST)) {
+ for (unsigned c = 0; c < rt_count; ++c) {
+ if (blend[c].is_shader)
+ fragmeta->midgard1.work_count = 16;
+ }
}
/* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
/* Additional blend descriptor tacked on for jobs using MFBD */
for (unsigned i = 0; i < rt_count; ++i) {
- rts[i].flags = 0x200;
+ if (dev->quirks & IS_BIFROST) {
+ struct bifrost_blend_rt *brts = rts;
+ struct panfrost_shader_state *fs;
+ fs = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
+
+ brts[i].flags = 0x200;
+ if (blend[i].is_shader) {
+ /* The blend shader's address needs to be at
+ * the same top 32 bit as the fragment shader.
+ * TODO: Ensure that's always the case.
+ */
+ assert((blend[i].shader.gpu & (0xffffffffull << 32)) ==
+ (fs->bo->gpu & (0xffffffffull << 32)));
+ brts[i].shader = blend[i].shader.gpu;
+ brts[i].unk2 = 0x0;
+ } else {
+ enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
+ const struct util_format_description *format_desc;
+ format_desc = util_format_description(format);
+
+ brts[i].equation = *blend[i].equation.equation;
+
+ /* TODO: this is a bit more complicated */
+ brts[i].constant = blend[i].equation.constant;
+
+ brts[i].format = panfrost_format_to_bifrost_blend(format_desc);
+ brts[i].unk2 = 0x19;
+
+ brts[i].shader_type = fs->blend_types[i];
+ }
+ } else {
+ struct midgard_blend_rt *mrts = rts;
- bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
- (ctx->pipe_framebuffer.cbufs[i]) &&
- util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
+ mrts[i].flags = 0x200;
- SET_BIT(rts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
- SET_BIT(rts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
- SET_BIT(rts[i].flags, MALI_BLEND_SRGB, is_srgb);
- SET_BIT(rts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
+ bool is_srgb = (ctx->pipe_framebuffer.nr_cbufs > i) &&
+ (ctx->pipe_framebuffer.cbufs[i]) &&
+ util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
- if (blend[i].is_shader) {
- rts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
- } else {
- rts[i].blend.equation = *blend[i].equation.equation;
- rts[i].blend.constant = blend[i].equation.constant;
+ SET_BIT(mrts[i].flags, MALI_BLEND_MRT_SHADER, blend[i].is_shader);
+ SET_BIT(mrts[i].flags, MALI_BLEND_LOAD_TIB, !blend[i].no_blending);
+ SET_BIT(mrts[i].flags, MALI_BLEND_SRGB, is_srgb);
+ SET_BIT(mrts[i].flags, MALI_BLEND_NO_DITHER, !ctx->blend->base.dither);
+
+ if (blend[i].is_shader) {
+ mrts[i].blend.shader = blend[i].shader.gpu | blend[i].shader.first_tag;
+ } else {
+ mrts[i].blend.equation = *blend[i].equation.equation;
+ mrts[i].blend.constant = blend[i].equation.constant;
+ }
}
}
}
static void
panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
struct mali_shader_meta *fragmeta,
- struct midgard_blend_rt *rts)
+ void *rts)
{
const struct panfrost_device *dev = pan_device(ctx->base.screen);
struct panfrost_shader_state *fs;
SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
- /* Depending on whether it's legal to in the given shader, we try to
- * enable early-z testing (or forward-pixel kill?) */
+ if (dev->quirks & IS_BIFROST) {
+ /* TODO */
+ } else {
+ /* Depending on whether it's legal to in the given shader, we try to
+ * enable early-z testing (or forward-pixel kill?) */
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
- !fs->can_discard && !fs->writes_depth);
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
+ !fs->can_discard && !fs->writes_depth);
- /* Add the writes Z/S flags if needed. */
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
- SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
+ /* Add the writes Z/S flags if needed. */
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
+ SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
- /* Any time texturing is used, derivatives are implicitly calculated,
- * so we need to enable helper invocations */
+ /* Any time texturing is used, derivatives are implicitly calculated,
+ * so we need to enable helper invocations */
- SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
- fs->helper_invocations);
+ SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
+ fs->helper_invocations);
- /* CAN_DISCARD should be set if the fragment shader possibly contains a
- * 'discard' instruction. It is likely this is related to optimizations
- * related to forward-pixel kill, as per "Mali Performance 3: Is
- * EGL_BUFFER_PRESERVED a good thing?" by Peter Harris */
+ /* CAN_DISCARD should be set if the fragment shader possibly contains a
+ * 'discard' instruction. It is likely this is related to optimizations
+ * related to forward-pixel kill, as per "Mali Performance 3: Is
+ * EGL_BUFFER_PRESERVED a good thing?" by Peter Harris */
- SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD, fs->can_discard);
- SET_BIT(fragmeta->midgard1.flags_lo, 0x400, fs->can_discard);
+ SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD, fs->can_discard);
+ SET_BIT(fragmeta->midgard1.flags_lo, 0x400, fs->can_discard);
+ }
panfrost_frag_meta_rasterizer_update(ctx, fragmeta);
panfrost_frag_meta_zsa_update(ctx, fragmeta);
void
panfrost_emit_shader_meta(struct panfrost_batch *batch,
enum pipe_shader_type st,
- struct midgard_payload_vertex_tiler *vtp)
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
if (!ss) {
- vtp->postfix.shader = 0;
+ postfix->shader = 0;
return;
}
struct panfrost_device *dev = pan_device(ctx->base.screen);
unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
size_t desc_size = sizeof(meta);
- struct midgard_blend_rt rts[4];
+ void *rts = NULL;
struct panfrost_transfer xfer;
+ unsigned rt_size;
+
+ if (dev->quirks & MIDGARD_SFBD)
+ rt_size = 0;
+ else if (dev->quirks & IS_BIFROST)
+ rt_size = sizeof(struct bifrost_blend_rt);
+ else
+ rt_size = sizeof(struct midgard_blend_rt);
- assert(rt_count <= ARRAY_SIZE(rts));
+ desc_size += rt_size * rt_count;
- panfrost_frag_shader_meta_init(ctx, &meta, rts);
+ if (rt_size)
+ rts = rzalloc_size(ctx, rt_size * rt_count);
- if (!(dev->quirks & MIDGARD_SFBD))
- desc_size += sizeof(*rts) * rt_count;
+ panfrost_frag_shader_meta_init(ctx, &meta, rts);
xfer = panfrost_allocate_transient(batch, desc_size);
memcpy(xfer.cpu, &meta, sizeof(meta));
- memcpy(xfer.cpu + sizeof(meta), rts, sizeof(*rts) * rt_count);
+ memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
+
+ if (rt_size)
+ ralloc_free(rts);
shader_ptr = xfer.gpu;
} else {
sizeof(meta));
}
- vtp->postfix.shader = shader_ptr;
+ postfix->shader = shader_ptr;
}
static void
void
panfrost_emit_viewport(struct panfrost_batch *batch,
- struct midgard_payload_vertex_tiler *tp)
+ struct mali_vertex_tiler_postfix *tiler_postfix)
{
struct panfrost_context *ctx = batch->ctx;
struct mali_viewport mvp;
mvp.viewport1[0] + 1,
mvp.viewport1[1] + 1);
- tp->postfix.viewport = panfrost_upload_transient(batch, &mvp,
- sizeof(mvp));
+ tiler_postfix->viewport = panfrost_upload_transient(batch, &mvp,
+ sizeof(mvp));
}
static mali_ptr
void
panfrost_emit_const_buf(struct panfrost_batch *batch,
enum pipe_shader_type stage,
- struct midgard_payload_vertex_tiler *vtp)
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_shader_variants *all = ctx->shader[stage];
memcpy(transfer.cpu + sys_size, cpu, uniform_size);
}
- struct mali_vertex_tiler_postfix *postfix = &vtp->postfix;
-
/* Next up, attach UBOs. UBO #0 is the uniforms we just
* uploaded */
PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
panfrost_bo_access_for_stage(st));
- panfrost_batch_add_bo(batch, view->bo,
+ panfrost_batch_add_bo(batch, view->midgard_bo,
PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
panfrost_bo_access_for_stage(st));
- return view->bo->gpu;
+ return view->midgard_bo->gpu;
}
void
panfrost_emit_texture_descriptors(struct panfrost_batch *batch,
enum pipe_shader_type stage,
- struct midgard_payload_vertex_tiler *vtp)
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_device *device = pan_device(ctx->base.screen);
if (!ctx->sampler_view_count[stage])
return;
- uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ if (device->quirks & IS_BIFROST) {
+ struct bifrost_texture_descriptor *descriptors;
- for (int i = 0; i < ctx->sampler_view_count[stage]; ++i)
- trampolines[i] = panfrost_get_tex_desc(batch, stage,
- ctx->sampler_views[stage][i]);
+ descriptors = malloc(sizeof(struct bifrost_texture_descriptor) *
+ ctx->sampler_view_count[stage]);
- vtp->postfix.texture_trampoline = panfrost_upload_transient(batch,
- trampolines,
- sizeof(uint64_t) *
- ctx->sampler_view_count[stage]);
+ for (int i = 0; i < ctx->sampler_view_count[stage]; ++i) {
+ struct panfrost_sampler_view *view = ctx->sampler_views[stage][i];
+ struct pipe_sampler_view *pview = &view->base;
+ struct panfrost_resource *rsrc = pan_resource(pview->texture);
+
+ /* Add the BOs to the job so they are retained until the job is done. */
+
+ panfrost_batch_add_bo(batch, rsrc->bo,
+ PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
+ panfrost_bo_access_for_stage(stage));
+
+ panfrost_batch_add_bo(batch, view->bifrost_bo,
+ PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ |
+ panfrost_bo_access_for_stage(stage));
+
+ memcpy(&descriptors[i], view->bifrost_descriptor, sizeof(*view->bifrost_descriptor));
+ }
+
+ postfix->textures = panfrost_upload_transient(batch,
+ descriptors,
+ sizeof(struct bifrost_texture_descriptor) *
+ ctx->sampler_view_count[stage]);
+
+ free(descriptors);
+ } else {
+ uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+
+ for (int i = 0; i < ctx->sampler_view_count[stage]; ++i)
+ trampolines[i] = panfrost_get_tex_desc(batch, stage,
+ ctx->sampler_views[stage][i]);
+
+ postfix->textures = panfrost_upload_transient(batch,
+ trampolines,
+ sizeof(uint64_t) *
+ ctx->sampler_view_count[stage]);
+ }
}
void
panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
enum pipe_shader_type stage,
- struct midgard_payload_vertex_tiler *vtp)
+ struct mali_vertex_tiler_postfix *postfix)
{
struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_device *device = pan_device(ctx->base.screen);
if (!ctx->sampler_count[stage])
return;
- size_t desc_size = sizeof(struct mali_sampler_descriptor);
- size_t transfer_size = desc_size * ctx->sampler_count[stage];
- struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
- transfer_size);
- struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
+ if (device->quirks & IS_BIFROST) {
+ size_t desc_size = sizeof(struct bifrost_sampler_descriptor);
+ size_t transfer_size = desc_size * ctx->sampler_count[stage];
+ struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
+ transfer_size);
+ struct bifrost_sampler_descriptor *desc = (struct bifrost_sampler_descriptor *)transfer.cpu;
+
+ for (int i = 0; i < ctx->sampler_count[stage]; ++i)
+ desc[i] = ctx->samplers[stage][i]->bifrost_hw;
- for (int i = 0; i < ctx->sampler_count[stage]; ++i)
- desc[i] = ctx->samplers[stage][i]->hw;
+ postfix->sampler_descriptor = transfer.gpu;
+ } else {
+ size_t desc_size = sizeof(struct mali_sampler_descriptor);
+ size_t transfer_size = desc_size * ctx->sampler_count[stage];
+ struct panfrost_transfer transfer = panfrost_allocate_transient(batch,
+ transfer_size);
+ struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *)transfer.cpu;
- vtp->postfix.sampler_descriptor = transfer.gpu;
+ for (int i = 0; i < ctx->sampler_count[stage]; ++i)
+ desc[i] = ctx->samplers[stage][i]->midgard_hw;
+
+ postfix->sampler_descriptor = transfer.gpu;
+ }
}
void
panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
- struct midgard_payload_vertex_tiler *vp)
+ struct mali_vertex_tiler_postfix *vertex_postfix)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_vertex_state *so = ctx->vertex;
- panfrost_vertex_state_upd_attr_offs(ctx, vp);
- vp->postfix.attribute_meta = panfrost_upload_transient(batch, so->hw,
+ panfrost_vertex_state_upd_attr_offs(ctx, vertex_postfix);
+ vertex_postfix->attribute_meta = panfrost_upload_transient(batch, so->hw,
sizeof(*so->hw) *
PAN_MAX_ATTRIBUTE);
}
void
panfrost_emit_vertex_data(struct panfrost_batch *batch,
- struct midgard_payload_vertex_tiler *vp)
+ struct mali_vertex_tiler_postfix *vertex_postfix)
{
struct panfrost_context *ctx = batch->ctx;
struct panfrost_vertex_state *so = ctx->vertex;
/* Normal, non-instanced attributes */
attrs[k++].elements |= MALI_ATTR_LINEAR;
} else {
- unsigned instance_shift = vp->instance_shift;
- unsigned instance_odd = vp->instance_odd;
+ unsigned instance_shift = vertex_postfix->instance_shift;
+ unsigned instance_odd = vertex_postfix->instance_odd;
k += panfrost_vertex_instanced(ctx->padded_count,
instance_shift,
/* Upload whatever we emitted and go */
- vp->postfix.attributes = panfrost_upload_transient(batch, attrs,
+ vertex_postfix->attributes = panfrost_upload_transient(batch, attrs,
k * sizeof(*attrs));
}
void
panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
unsigned vertex_count,
- struct midgard_payload_vertex_tiler *vp,
- struct midgard_payload_vertex_tiler *tp)
+ struct mali_vertex_tiler_postfix *vertex_postfix,
+ struct mali_vertex_tiler_postfix *tiler_postfix,
+ union midgard_primitive_size *primitive_size)
{
/* Load the shaders */
struct panfrost_context *ctx = batch->ctx;
/* fp32 vec4 gl_Position */
varyings_p = panfrost_emit_varyings(batch, &varyings[gl_Position],
sizeof(float) * 4, vertex_count);
- tp->postfix.position_varying = varyings_p;
+ tiler_postfix->position_varying = varyings_p;
if (panfrost_writes_point_size(ctx)) {
varyings_p = panfrost_emit_varyings(batch,
&varyings[gl_PointSize],
2, vertex_count);
- tp->primitive_size.pointer = varyings_p;
+ primitive_size->pointer = varyings_p;
}
if (reads_point_coord)
if (fs->reads_frag_coord)
varyings[gl_FragCoord].elements = MALI_VARYING_FRAG_COORD;
+ struct panfrost_device *device = pan_device(ctx->base.screen);
+ assert(!(device->quirks & IS_BIFROST) || !(reads_point_coord));
+
/* Let's go ahead and link varying meta to the buffer in question, now
* that that information is available. VARYING_SLOT_POS is mapped to
* gl_FragCoord for fragment shaders but gl_Positionf or vertex shaders
/* Set the type appropriately. TODO: Integer varyings XXX */
assert(o->stream == 0);
ovs[i].format = pan_xfb_format(o->num_components);
- ovs[i].swizzle = panfrost_get_default_swizzle(o->num_components);
+
+ if (device->quirks & HAS_SWIZZLES)
+ ovs[i].swizzle = panfrost_get_default_swizzle(o->num_components);
+ else
+ ovs[i].swizzle = panfrost_bifrost_swizzle(o->num_components);
/* Link to the fragment */
signed fs_idx = -1;
varyings_p = panfrost_upload_transient(batch, varyings,
idx * sizeof(*varyings));
- vp->postfix.varyings = varyings_p;
- tp->postfix.varyings = varyings_p;
+ vertex_postfix->varyings = varyings_p;
+ tiler_postfix->varyings = varyings_p;
- vp->postfix.varying_meta = trans.gpu;
- tp->postfix.varying_meta = trans.gpu + vs_size;
+ vertex_postfix->varying_meta = trans.gpu;
+ tiler_postfix->varying_meta = trans.gpu + vs_size;
}
void
panfrost_emit_vertex_tiler_jobs(struct panfrost_batch *batch,
- struct midgard_payload_vertex_tiler *vp,
- struct midgard_payload_vertex_tiler *tp)
+ struct mali_vertex_tiler_prefix *vertex_prefix,
+ struct mali_vertex_tiler_postfix *vertex_postfix,
+ struct mali_vertex_tiler_prefix *tiler_prefix,
+ struct mali_vertex_tiler_postfix *tiler_postfix,
+ union midgard_primitive_size *primitive_size)
{
struct panfrost_context *ctx = batch->ctx;
+ struct panfrost_device *device = pan_device(ctx->base.screen);
bool wallpapering = ctx->wallpaper_batch && batch->tiler_dep;
+ struct bifrost_payload_vertex bifrost_vertex = {0,};
+ struct bifrost_payload_tiler bifrost_tiler = {0,};
+ struct midgard_payload_vertex_tiler midgard_vertex = {0,};
+ struct midgard_payload_vertex_tiler midgard_tiler = {0,};
+ void *vp, *tp;
+ size_t vp_size, tp_size;
+
+ if (device->quirks & IS_BIFROST) {
+ bifrost_vertex.prefix = *vertex_prefix;
+ bifrost_vertex.postfix = *vertex_postfix;
+ vp = &bifrost_vertex;
+ vp_size = sizeof(bifrost_vertex);
+
+ bifrost_tiler.prefix = *tiler_prefix;
+ bifrost_tiler.tiler.primitive_size = *primitive_size;
+ bifrost_tiler.tiler.tiler_meta = panfrost_batch_get_tiler_meta(batch, ~0);
+ bifrost_tiler.postfix = *tiler_postfix;
+ tp = &bifrost_tiler;
+ tp_size = sizeof(bifrost_tiler);
+ } else {
+ midgard_vertex.prefix = *vertex_prefix;
+ midgard_vertex.postfix = *vertex_postfix;
+ vp = &midgard_vertex;
+ vp_size = sizeof(midgard_vertex);
+
+ midgard_tiler.prefix = *tiler_prefix;
+ midgard_tiler.postfix = *tiler_postfix;
+ midgard_tiler.primitive_size = *primitive_size;
+ tp = &midgard_tiler;
+ tp_size = sizeof(midgard_tiler);
+ }
if (wallpapering) {
/* Inject in reverse order, with "predicted" job indices.
* THIS IS A HACK XXX */
panfrost_new_job(batch, JOB_TYPE_TILER, false,
- batch->job_index + 2, tp, sizeof(*tp), true);
+ batch->job_index + 2, tp, tp_size, true);
panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
- vp, sizeof(*vp), true);
+ vp, vp_size, true);
return;
}
ctx->rasterizer->base.rasterizer_discard;
unsigned vertex = panfrost_new_job(batch, JOB_TYPE_VERTEX, false, 0,
- vp, sizeof(*vp), false);
+ vp, vp_size, false);
if (rasterizer_discard)
return;
- panfrost_new_job(batch, JOB_TYPE_TILER, false, vertex, tp, sizeof(*tp),
+ panfrost_new_job(batch, JOB_TYPE_TILER, false, vertex, tp, tp_size,
false);
}
+
+/* TODO: stop hardcoding this */
+mali_ptr
+panfrost_emit_sample_locations(struct panfrost_batch *batch)
+{
+ uint16_t locations[] = {
+ 128, 128,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 0, 256,
+ 128, 128,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ };
+
+ return panfrost_upload_transient(batch, locations, 96 * sizeof(uint16_t));
+}