panfrost: Use preuploaded shader descriptors
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
index 7e5772f8ca3a6e5b509728a94f5469d3295723a7..801a39216f224462e8117ee1971deba345893289 100644 (file)
@@ -306,55 +306,6 @@ panfrost_vt_set_draw_info(struct panfrost_context *ctx,
         }
 }
 
-static void
-panfrost_shader_meta_init(struct panfrost_context *ctx,
-                          enum pipe_shader_type st,
-                          struct mali_shader_meta *meta)
-{
-        const struct panfrost_device *dev = pan_device(ctx->base.screen);
-        struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
-
-        memset(meta, 0, sizeof(*meta));
-        meta->shader = (ss->bo ? ss->bo->gpu : 0) | ss->first_tag;
-        meta->attribute_count = ss->attribute_count;
-        meta->varying_count = ss->varying_count;
-        meta->texture_count = ctx->sampler_view_count[st];
-        meta->sampler_count = ctx->sampler_count[st];
-
-        if (dev->quirks & IS_BIFROST) {
-                if (st == PIPE_SHADER_VERTEX)
-                        meta->bifrost1.unk1 = 0x800000;
-                else {
-                        /* First clause ATEST |= 0x4000000.
-                         * Less than 32 regs |= 0x200 */
-                        meta->bifrost1.unk1 = 0x950020;
-                }
-
-                meta->bifrost1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
-                if (st == PIPE_SHADER_VERTEX)
-                        meta->bifrost2.preload_regs = 0xC0;
-                else {
-                        meta->bifrost2.preload_regs = 0x1;
-                        SET_BIT(meta->bifrost2.preload_regs, 0x10, ss->reads_frag_coord);
-                }
-
-                meta->bifrost2.uniform_count = MIN2(ss->uniform_count,
-                                                    ss->uniform_cutoff);
-        } else {
-                meta->midgard1.uniform_count = MIN2(ss->uniform_count,
-                                                    ss->uniform_cutoff);
-                meta->midgard1.work_count = ss->work_reg_count;
-
-                /* TODO: This is not conformant on ES3 */
-                meta->midgard1.flags_hi = MALI_SUPPRESS_INF_NAN;
-
-                meta->midgard1.flags_lo = 0x20;
-                meta->midgard1.uniform_buffer_count = panfrost_ubo_count(ctx, st);
-
-                SET_BIT(meta->midgard1.flags_lo, MALI_WRITES_GLOBAL, ss->writes_global);
-        }
-}
-
 static unsigned
 translate_tex_wrap(enum pipe_tex_wrap w)
 {
@@ -556,7 +507,7 @@ panfrost_emit_blend(struct panfrost_batch *batch, void *rts,
 }
 
 static void
-panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
+panfrost_emit_frag_shader(struct panfrost_context *ctx,
                                struct mali_shader_meta *fragmeta,
                                struct panfrost_blend_final *blend)
 {
@@ -567,56 +518,97 @@ panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
 
         struct pipe_rasterizer_state *rast = &ctx->rasterizer->base;
         const struct panfrost_zsa_state *zsa = ctx->depth_stencil;
+        unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
 
-        bool msaa = rast->multisample;
-        fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
+        memset(fragmeta, 0, sizeof(*fragmeta));
+        memcpy(&fragmeta->shader, &fs->shader, sizeof(fs->shader));
 
-        fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
-        fragmeta->unknown2_4 = 0x4e0;
+        if (!panfrost_fs_required(fs, blend, rt_count)) {
+                struct mali_shader_packed shader = { 0 };
+                struct mali_midgard_properties_packed prop;
 
-        /* unknown2_4 has 0x10 bit set on T6XX and T720. We don't know why this
-         * is required (independent of 32-bit/64-bit descriptors), or why it's
-         * not used on later GPU revisions. Otherwise, all shader jobs fault on
-         * these earlier chips (perhaps this is a chicken bit of some kind).
-         * More investigation is needed. */
+                if (dev->quirks & IS_BIFROST) {
+                        struct mali_preload_packed preload = { 0 };
+                        memcpy(&fragmeta->bifrost_preload, &preload, sizeof(preload));
 
-        SET_BIT(fragmeta->unknown2_4, 0x10, dev->quirks & MIDGARD_SFBD);
+                        pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
+                                cfg.unknown = 0x950020; /* XXX */
+                                cfg.early_z_enable = true;
+                        }
+                } else {
+                        pan_pack(&shader, SHADER, cfg) {
+                                cfg.shader = 0x1;
+                        }
 
-        if (dev->quirks & IS_BIFROST) {
-                /* TODO */
-        } else {
-                /* Depending on whether it's legal to in the given shader, we try to
-                 * enable early-z testing. TODO: respect e-z force */
+                        pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
+                                cfg.work_register_count = 1;
+                                cfg.depth_source = MALI_DEPTH_SOURCE_FIXED_FUNCTION;
+                                cfg.early_z_enable = true;
+                        }
+                }
 
-                SET_BIT(fragmeta->midgard1.flags_lo, MALI_EARLY_Z,
-                        !fs->can_discard && !fs->writes_global &&
-                        !fs->writes_depth && !fs->writes_stencil &&
-                        !ctx->blend->base.alpha_to_coverage);
+                memcpy(&fragmeta->shader, &shader, sizeof(shader));
+                memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
+        } else if (dev->quirks & IS_BIFROST) {
+                struct mali_bifrost_properties_packed prop;
 
-                /* Add the writes Z/S flags if needed. */
-                SET_BIT(fragmeta->midgard1.flags_lo, MALI_WRITES_Z, fs->writes_depth);
-                SET_BIT(fragmeta->midgard1.flags_hi, MALI_WRITES_S, fs->writes_stencil);
+                bool no_blend = true;
 
-                /* Any time texturing is used, derivatives are implicitly calculated,
-                 * so we need to enable helper invocations */
+                for (unsigned i = 0; i < rt_count; ++i)
+                        no_blend &= (!blend[i].load_dest | blend[i].no_colour);
 
-                SET_BIT(fragmeta->midgard1.flags_lo, MALI_HELPER_INVOCATIONS,
-                        fs->helper_invocations);
+                pan_pack(&prop, BIFROST_PROPERTIES, cfg) {
+                        cfg.early_z_enable = !fs->can_discard && !fs->writes_depth && no_blend;
+                }
+
+                /* Combine with prepacked properties */
+                prop.opaque[0] |= fs->properties.opaque[0];
 
-                /* If discard is enabled, which bit we set to convey this
-                 * depends on if depth/stencil is used for the draw or not.
-                 * Just one of depth OR stencil is enough to trigger this. */
+                memcpy(&fragmeta->bifrost_props, &prop, sizeof(prop));
+                memcpy(&fragmeta->bifrost_preload, &fs->preload, sizeof(fs->preload));
+        } else {
+                struct mali_midgard_properties_packed prop;
 
+                /* Reasons to disable early-Z from a shader perspective */
+                bool late_z = fs->can_discard || fs->writes_global ||
+                        fs->writes_depth || fs->writes_stencil;
+
+                /* Reasons to disable early-Z from a CSO perspective */
+                bool alpha_to_coverage = ctx->blend->base.alpha_to_coverage;
+
+                /* If either depth or stencil is enabled, discard matters */
                 bool zs_enabled =
-                        fs->writes_depth || fs->writes_stencil ||
                         (zsa->base.depth.enabled && zsa->base.depth.func != PIPE_FUNC_ALWAYS) ||
                         zsa->base.stencil[0].enabled;
 
-                SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_TILEBUFFER,
-                        fs->outputs_read || (!zs_enabled && fs->can_discard));
-                SET_BIT(fragmeta->midgard1.flags_lo, MALI_READS_ZS, zs_enabled && fs->can_discard);
+                bool has_blend_shader = false;
+
+                for (unsigned c = 0; c < rt_count; ++c)
+                        has_blend_shader |= blend[c].is_shader;
+
+                pan_pack(&prop, MIDGARD_PROPERTIES, cfg) {
+                        /* TODO: Reduce this limit? */
+                        if (has_blend_shader)
+                                cfg.work_register_count = MAX2(fs->work_reg_count, 8);
+                        else
+                                cfg.work_register_count = fs->work_reg_count;
+
+                        cfg.early_z_enable = !(late_z || alpha_to_coverage);
+                        cfg.reads_tilebuffer = fs->outputs_read || (!zs_enabled && fs->can_discard);
+                        cfg.reads_depth_stencil = zs_enabled && fs->can_discard;
+                }
+
+                /* Combine with prepacked properties */
+                prop.opaque[0] |= fs->properties.opaque[0];
+                memcpy(&fragmeta->midgard_props, &prop, sizeof(prop));
         }
 
+        bool msaa = rast->multisample;
+        fragmeta->coverage_mask = msaa ? ctx->sample_mask : ~0;
+
+        fragmeta->unknown2_3 = MALI_DEPTH_FUNC(MALI_FUNC_ALWAYS) | 0x10;
+        fragmeta->unknown2_4 = 0x4e0;
+
         /* TODO: Sample size */
         SET_BIT(fragmeta->unknown2_3, MALI_HAS_MSAA, msaa);
         SET_BIT(fragmeta->unknown2_4, MALI_NO_MSAA, !msaa);
@@ -660,160 +652,124 @@ panfrost_frag_shader_meta_init(struct panfrost_context *ctx,
         fragmeta->unknown2_3 |= MALI_DEPTH_FUNC(panfrost_translate_compare_func(
                 zsa->base.depth.enabled ? zsa->base.depth.func : PIPE_FUNC_ALWAYS));
 
-        SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER,
-                (dev->quirks & MIDGARD_SFBD) && ctx->blend &&
-                !ctx->blend->base.dither);
-
         SET_BIT(fragmeta->unknown2_4, MALI_ALPHA_TO_COVERAGE,
                         ctx->blend->base.alpha_to_coverage);
 
-        /* Get blending setup */
-        unsigned rt_count = ctx->pipe_framebuffer.nr_cbufs;
-
-        /* Disable shader execution if we can */
-        if (dev->quirks & MIDGARD_SHADERLESS
-                        && !panfrost_fs_required(fs, blend, rt_count)) {
-                fragmeta->shader = 0;
-                fragmeta->attribute_count = 0;
-                fragmeta->varying_count = 0;
-                fragmeta->texture_count = 0;
-                fragmeta->sampler_count = 0;
-
-                /* This feature is not known to work on Bifrost */
-                fragmeta->midgard1.work_count = 1;
-                fragmeta->midgard1.uniform_count = 0;
-                fragmeta->midgard1.uniform_buffer_count = 0;
-        }
-
-         /* If there is a blend shader, work registers are shared. We impose 8
-          * work registers as a limit for blend shaders. Should be lower XXX */
-
-        if (!(dev->quirks & IS_BIFROST)) {
-                for (unsigned c = 0; c < rt_count; ++c) {
-                        if (blend[c].is_shader) {
-                                fragmeta->midgard1.work_count =
-                                        MAX2(fragmeta->midgard1.work_count, 8);
-                        }
-                }
-        }
-
-        /* Even on MFBD, the shader descriptor gets blend shaders. It's *also*
-         * copied to the blend_meta appended (by convention), but this is the
-         * field actually read by the hardware. (Or maybe both are read...?).
-         * Specify the last RTi with a blend shader. */
-
-        fragmeta->blend.shader = 0;
-
-        for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
-                if (!blend[rt].is_shader)
-                        continue;
-
-                fragmeta->blend.shader = blend[rt].shader.gpu |
-                                         blend[rt].shader.first_tag;
-                break;
-        }
-
         if (dev->quirks & MIDGARD_SFBD) {
                 /* When only a single render target platform is used, the blend
                  * information is inside the shader meta itself. We additionally
                  * need to signal CAN_DISCARD for nontrivial blend modes (so
                  * we're able to read back the destination buffer) */
 
+                if (blend[0].no_colour)
+                        return;
+
+                fragmeta->unknown2_4 |= MALI_SFBD_ENABLE;
+
+                SET_BIT(fragmeta->unknown2_4, MALI_SFBD_SRGB,
+                                util_format_is_srgb(ctx->pipe_framebuffer.cbufs[0]->format));
+
                 SET_BIT(fragmeta->unknown2_3, MALI_HAS_BLEND_SHADER,
                         blend[0].is_shader);
 
-                if (!blend[0].is_shader) {
+                if (blend[0].is_shader) {
+                        fragmeta->blend.shader = blend[0].shader.gpu |
+                                blend[0].shader.first_tag;
+                } else {
                         fragmeta->blend.equation = blend[0].equation.equation;
                         fragmeta->blend.constant = blend[0].equation.constant;
                 }
 
                 SET_BIT(fragmeta->unknown2_3, MALI_CAN_DISCARD,
                         blend[0].load_dest);
-        }
 
-        if (dev->quirks & IS_BIFROST) {
-                bool no_blend = true;
+                SET_BIT(fragmeta->unknown2_4, MALI_NO_DITHER, !ctx->blend->base.dither);
+        } else if (!(dev->quirks & IS_BIFROST)) {
+                /* Bug where MRT-capable hw apparently reads the last blend
+                 * shader from here instead of the usual location? */
 
-                for (unsigned i = 0; i < rt_count; ++i)
-                        no_blend &= (!blend[i].load_dest | blend[i].no_colour);
+                for (signed rt = ((signed) rt_count - 1); rt >= 0; --rt) {
+                        if (!blend[rt].is_shader)
+                                continue;
 
-                SET_BIT(fragmeta->bifrost1.unk1, MALI_BIFROST_EARLY_Z,
-                        !fs->can_discard && !fs->writes_depth && no_blend);
+                        fragmeta->blend.shader = blend[rt].shader.gpu |
+                                                 blend[rt].shader.first_tag;
+                        break;
+                }
         }
 }
 
-void
-panfrost_emit_shader_meta(struct panfrost_batch *batch,
-                          enum pipe_shader_type st,
-                          struct mali_vertex_tiler_postfix *postfix)
+mali_ptr
+panfrost_emit_compute_shader_meta(struct panfrost_batch *batch, enum pipe_shader_type stage)
 {
-        struct panfrost_context *ctx = batch->ctx;
-        struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, st);
+        struct panfrost_shader_state *ss = panfrost_get_shader_state(batch->ctx, stage);
 
-        if (!ss) {
-                postfix->shader = 0;
-                return;
-        }
+        panfrost_batch_add_bo(batch, ss->bo,
+                              PAN_BO_ACCESS_PRIVATE |
+                              PAN_BO_ACCESS_READ |
+                              PAN_BO_ACCESS_VERTEX_TILER);
 
-        struct mali_shader_meta meta;
+        panfrost_batch_add_bo(batch, pan_resource(ss->upload.rsrc)->bo,
+                              PAN_BO_ACCESS_PRIVATE |
+                              PAN_BO_ACCESS_READ |
+                              PAN_BO_ACCESS_VERTEX_TILER);
 
-        panfrost_shader_meta_init(ctx, st, &meta);
+        return pan_resource(ss->upload.rsrc)->bo->gpu + ss->upload.offset;
+}
+
+mali_ptr
+panfrost_emit_frag_shader_meta(struct panfrost_batch *batch)
+{
+        struct panfrost_context *ctx = batch->ctx;
+        struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
+        struct mali_shader_meta meta;
 
         /* Add the shader BO to the batch. */
         panfrost_batch_add_bo(batch, ss->bo,
                               PAN_BO_ACCESS_PRIVATE |
                               PAN_BO_ACCESS_READ |
-                              panfrost_bo_access_for_stage(st));
-
-        mali_ptr shader_ptr;
-
-        if (st == PIPE_SHADER_FRAGMENT) {
-                struct panfrost_device *dev = pan_device(ctx->base.screen);
-                unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
-                size_t desc_size = sizeof(meta);
-                void *rts = NULL;
-                struct panfrost_transfer xfer;
-                unsigned rt_size;
-
-                if (dev->quirks & MIDGARD_SFBD)
-                        rt_size = 0;
-                else if (dev->quirks & IS_BIFROST)
-                        rt_size = sizeof(struct bifrost_blend_rt);
-                else
-                        rt_size = sizeof(struct midgard_blend_rt);
+                              PAN_BO_ACCESS_FRAGMENT);
 
-                desc_size += rt_size * rt_count;
+        struct panfrost_device *dev = pan_device(ctx->base.screen);
+        unsigned rt_count = MAX2(ctx->pipe_framebuffer.nr_cbufs, 1);
+        size_t desc_size = sizeof(meta);
+        void *rts = NULL;
+        struct panfrost_transfer xfer;
+        unsigned rt_size;
+
+        if (dev->quirks & MIDGARD_SFBD)
+                rt_size = 0;
+        else if (dev->quirks & IS_BIFROST)
+                rt_size = sizeof(struct bifrost_blend_rt);
+        else
+                rt_size = sizeof(struct midgard_blend_rt);
 
-                if (rt_size)
-                        rts = rzalloc_size(ctx, rt_size * rt_count);
+        desc_size += rt_size * rt_count;
 
-                struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
+        if (rt_size)
+                rts = rzalloc_size(ctx, rt_size * rt_count);
 
-                for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
-                        blend[c] = panfrost_get_blend_for_context(ctx, c);
+        struct panfrost_blend_final blend[PIPE_MAX_COLOR_BUFS];
 
-                panfrost_frag_shader_meta_init(ctx, &meta, blend);
+        for (unsigned c = 0; c < ctx->pipe_framebuffer.nr_cbufs; ++c)
+                blend[c] = panfrost_get_blend_for_context(ctx, c);
 
-                if (!(dev->quirks & MIDGARD_SFBD))
-                        panfrost_emit_blend(batch, rts, blend);
-                else
-                        batch->draws |= PIPE_CLEAR_COLOR0;
+        panfrost_emit_frag_shader(ctx, &meta, blend);
 
-                xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
+        if (!(dev->quirks & MIDGARD_SFBD))
+                panfrost_emit_blend(batch, rts, blend);
+        else
+                batch->draws |= PIPE_CLEAR_COLOR0;
 
-                memcpy(xfer.cpu, &meta, sizeof(meta));
-                memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
+        xfer = panfrost_pool_alloc_aligned(&batch->pool, desc_size, sizeof(meta));
 
-                if (rt_size)
-                        ralloc_free(rts);
+        memcpy(xfer.cpu, &meta, sizeof(meta));
+        memcpy(xfer.cpu + sizeof(meta), rts, rt_size * rt_count);
 
-                shader_ptr = xfer.gpu;
-        } else {
-                shader_ptr = panfrost_pool_upload(&batch->pool, &meta,
-                                                       sizeof(meta));
-        }
+        if (rt_size)
+                ralloc_free(rts);
 
-        postfix->shader = shader_ptr;
+        return xfer.gpu;
 }
 
 void
@@ -1104,10 +1060,10 @@ panfrost_emit_const_buf(struct panfrost_batch *batch,
         }
 
         /* Next up, attach UBOs. UBO #0 is the uniforms we just
-         * uploaded */
+         * uploaded, so it's always included. The count is the highest UBO
+         * addressable -- gaps are included. */
 
-        unsigned ubo_count = panfrost_ubo_count(ctx, stage);
-        assert(ubo_count >= 1);
+        unsigned ubo_count = 32 - __builtin_clz(buf->enabled_mask | 1);
 
         size_t sz = MALI_UNIFORM_BUFFER_LENGTH * ubo_count;
         struct panfrost_transfer ubos =