panfrost: Use pack for general varying
[mesa.git] / src / gallium / drivers / panfrost / pan_cmdstream.c
index 31c19679b6a80f3c6765b0f1ec7b1ec34bf49848..376705fc844ceec6963debf1d2bb978dce500165 100644 (file)
@@ -1338,48 +1338,41 @@ panfrost_emit_sampler_descriptors(struct panfrost_batch *batch,
 }
 
 void
-panfrost_emit_vertex_attr_meta(struct panfrost_batch *batch,
-                               struct mali_vertex_tiler_postfix *vertex_postfix)
+panfrost_emit_vertex_data(struct panfrost_batch *batch,
+                          struct mali_vertex_tiler_postfix *vertex_postfix)
 {
         struct panfrost_context *ctx = batch->ctx;
+        struct panfrost_vertex_state *so = ctx->vertex;
 
-        if (!ctx->vertex)
-                return;
+        unsigned instance_shift = vertex_postfix->instance_shift;
+        unsigned instance_odd = vertex_postfix->instance_odd;
 
-        struct panfrost_vertex_state *so = ctx->vertex;
+        /* Worst case: everything is NPOT */
 
-        panfrost_vertex_state_upd_attr_offs(ctx, vertex_postfix);
-        vertex_postfix->attribute_meta = panfrost_pool_upload(&batch->pool, so->hw,
-                                                               sizeof(*so->hw) *
-                                                               PAN_MAX_ATTRIBUTE);
-}
+        struct panfrost_transfer S = panfrost_pool_alloc(&batch->pool,
+                        MALI_ATTRIBUTE_LENGTH * PIPE_MAX_ATTRIBS * 2);
 
-void
-panfrost_emit_vertex_data(struct panfrost_batch *batch,
-                          struct mali_vertex_tiler_postfix *vertex_postfix)
-{
-        struct panfrost_context *ctx = batch->ctx;
-        struct panfrost_vertex_state *so = ctx->vertex;
+        struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
+                        MALI_ATTRIBUTE_LENGTH * (PAN_INSTANCE_ID + 1));
+
+        struct mali_attribute_buffer_packed *bufs =
+                (struct mali_attribute_buffer_packed *) S.cpu;
 
-        /* Staged mali_attr, and index into them. i =/= k, depending on the
-         * vertex buffer mask and instancing. Twice as much room is allocated,
-         * for a worst case of NPOT_DIVIDEs which take up extra slot */
-        union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
+        struct mali_attribute_packed *out =
+                (struct mali_attribute_packed *) T.cpu;
+
+        unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
         unsigned k = 0;
 
         for (unsigned i = 0; i < so->num_elements; ++i) {
-                /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
+                /* We map buffers 1:1 with the attributes, which
                  * means duplicating some vertex buffers (who cares? aside from
                  * maybe some caching implications but I somehow doubt that
                  * matters) */
 
                 struct pipe_vertex_element *elem = &so->pipe[i];
                 unsigned vbi = elem->vertex_buffer_index;
-
-                /* The exception to 1:1 mapping is that we can have multiple
-                 * entries (NPOT divisors), so we fixup anyways */
-
-                so->hw[i].index = k;
+                attrib_to_buffer[i] = k;
 
                 if (!(ctx->vb_mask & (1 << vbi)))
                         continue;
@@ -1391,75 +1384,135 @@ panfrost_emit_vertex_data(struct panfrost_batch *batch,
                 if (!rsrc)
                         continue;
 
-                /* Align to 64 bytes by masking off the lower bits. This
-                 * will be adjusted back when we fixup the src_offset in
-                 * mali_attr_meta */
-
-                mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
-                mali_ptr addr = raw_addr & ~63;
-                unsigned chopped_addr = raw_addr - addr;
-
                 /* Add a dependency of the batch on the vertex buffer */
                 panfrost_batch_add_bo(batch, rsrc->bo,
                                       PAN_BO_ACCESS_SHARED |
                                       PAN_BO_ACCESS_READ |
                                       PAN_BO_ACCESS_VERTEX_TILER);
 
-                /* Set common fields */
-                attrs[k].elements = addr;
-                attrs[k].stride = buf->stride;
+                /* Mask off lower bits, see offset fixup below */
+                mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
+                mali_ptr addr = raw_addr & ~63;
 
                 /* Since we advanced the base pointer, we shrink the buffer
-                 * size */
-                attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
+                 * size, but add the offset we subtracted */
+                unsigned size = rsrc->base.width0 + (raw_addr - addr)
+                        - buf->buffer_offset;
 
-                /* We need to add the extra size we masked off (for
-                 * correctness) so the data doesn't get clamped away */
-                attrs[k].size += chopped_addr;
+                /* When there is a divisor, the hardware-level divisor is
+                 * the product of the instance divisor and the padded count */
+                unsigned divisor = elem->instance_divisor;
+                unsigned hw_divisor = ctx->padded_count * divisor;
+                unsigned stride = buf->stride;
 
-                /* For non-instancing make sure we initialize */
-                attrs[k].shift = attrs[k].extra_flags = 0;
+                /* If there's a divisor(=1) but no instancing, we want every
+                 * attribute to be the same */
 
-                /* Instancing uses a dramatically different code path than
-                 * linear, so dispatch for the actual emission now that the
-                 * common code is finished */
+                if (divisor && ctx->instance_count == 1)
+                        stride = 0;
 
-                unsigned divisor = elem->instance_divisor;
+                if (!divisor || ctx->instance_count <= 1) {
+                        pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
+                                if (ctx->instance_count > 1)
+                                        cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
+
+                                cfg.pointer = addr;
+                                cfg.stride = stride;
+                                cfg.size = size;
+                                cfg.divisor_r = instance_shift;
+                                cfg.divisor_p = instance_odd;
+                        }
+                } else if (util_is_power_of_two_or_zero(hw_divisor)) {
+                        pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
+                                cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
+                                cfg.pointer = addr;
+                                cfg.stride = stride;
+                                cfg.size = size;
+                                cfg.divisor_r = __builtin_ctz(hw_divisor);
+                        }
 
-                if (divisor && ctx->instance_count == 1) {
-                        /* Silly corner case where there's a divisor(=1) but
-                         * there's no legitimate instancing. So we want *every*
-                         * attribute to be the same. So set stride to zero so
-                         * we don't go anywhere. */
-
-                        attrs[k].size = attrs[k].stride + chopped_addr;
-                        attrs[k].stride = 0;
-                        attrs[k++].elements |= MALI_ATTR_LINEAR;
-                } else if (ctx->instance_count <= 1) {
-                        /* Normal, non-instanced attributes */
-                        attrs[k++].elements |= MALI_ATTR_LINEAR;
                 } else {
-                        unsigned instance_shift = vertex_postfix->instance_shift;
-                        unsigned instance_odd = vertex_postfix->instance_odd;
+                        unsigned shift = 0, extra_flags = 0;
+
+                        unsigned magic_divisor =
+                                panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
+
+                        pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
+                                cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
+                                cfg.pointer = addr;
+                                cfg.stride = stride;
+                                cfg.size = size;
+
+                                cfg.divisor_r = shift;
+                                cfg.divisor_e = extra_flags;
+                        }
+
+                        pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
+                                cfg.divisor_numerator = magic_divisor;
+                                cfg.divisor = divisor;
+                        }
 
-                        k += panfrost_vertex_instanced(ctx->padded_count,
-                                                       instance_shift,
-                                                       instance_odd,
-                                                       divisor, &attrs[k]);
+                        ++k;
                 }
+
+                ++k;
         }
 
         /* Add special gl_VertexID/gl_InstanceID buffers */
 
-        panfrost_vertex_id(ctx->padded_count, &attrs[k]);
-        so->hw[PAN_VERTEX_ID].index = k++;
-        panfrost_instance_id(ctx->padded_count, &attrs[k]);
-        so->hw[PAN_INSTANCE_ID].index = k++;
+        panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
 
-        /* Upload whatever we emitted and go */
+        pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
+                cfg.buffer_index = k++;
+                cfg.format = so->formats[PAN_VERTEX_ID];
+        }
+
+        panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
+
+        pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
+                cfg.buffer_index = k++;
+                cfg.format = so->formats[PAN_INSTANCE_ID];
+        }
+
+        /* Attribute addresses require 64-byte alignment, so let:
+         *
+         *      base' = base & ~63 = base - (base & 63)
+         *      offset' = offset + (base & 63)
+         *
+         * Since base' + offset' = base + offset, these are equivalent
+         * addressing modes and now base is 64 aligned.
+         */
+
+        unsigned start = vertex_postfix->offset_start;
+
+        for (unsigned i = 0; i < so->num_elements; ++i) {
+                unsigned vbi = so->pipe[i].vertex_buffer_index;
+                struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
+
+                /* Adjust by the masked off bits of the offset. Make sure we
+                 * read src_offset from so->hw (which is not GPU visible)
+                 * rather than target (which is) due to caching effects */
+
+                unsigned src_offset = so->pipe[i].src_offset;
+
+                /* BOs aligned to 4k so guaranteed aligned to 64 */
+                src_offset += (buf->buffer_offset & 63);
+
+                /* Also, somewhat obscurely per-instance data needs to be
+                 * offset in response to a delayed start in an indexed draw */
+
+                if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
+                        src_offset -= buf->stride * start;
+
+                pan_pack(out + i, ATTRIBUTE, cfg) {
+                        cfg.buffer_index = attrib_to_buffer[i];
+                        cfg.format = so->formats[i];
+                        cfg.offset = src_offset;
+                }
+        }
 
-        vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
-                                                           k * sizeof(*attrs));
+        vertex_postfix->attributes = S.gpu;
+        vertex_postfix->attribute_meta = T.gpu;
 }
 
 static mali_ptr
@@ -1651,32 +1704,32 @@ pan_varying_present(
 
 /* Emitters for varying records */
 
-static struct mali_attr_meta
-pan_emit_vary(unsigned present, enum pan_special_varying buf,
+static void
+pan_emit_vary(struct mali_attribute_packed *out,
+                unsigned present, enum pan_special_varying buf,
                 unsigned quirks, enum mali_format format,
                 unsigned offset)
 {
         unsigned nr_channels = MALI_EXTRACT_CHANNELS(format);
-
-        struct mali_attr_meta meta = {
-                .index = pan_varying_index(present, buf),
-                .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
-                .swizzle = quirks & HAS_SWIZZLES ?
+        unsigned swizzle = quirks & HAS_SWIZZLES ?
                         panfrost_get_default_swizzle(nr_channels) :
-                        panfrost_bifrost_swizzle(nr_channels),
-                .format = format,
-                .src_offset = offset
-        };
+                        panfrost_bifrost_swizzle(nr_channels);
 
-        return meta;
+        pan_pack(out, ATTRIBUTE, cfg) {
+                cfg.buffer_index = pan_varying_index(present, buf);
+                cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
+                cfg.format = (format << 12) | swizzle;
+                cfg.offset = offset;
+        }
 }
 
 /* General varying that is unused */
 
-static struct mali_attr_meta
-pan_emit_vary_only(unsigned present, unsigned quirks)
+static void
+pan_emit_vary_only(struct mali_attribute_packed *out,
+                unsigned present, unsigned quirks)
 {
-        return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
+        pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
 }
 
 /* Special records */
@@ -1689,12 +1742,13 @@ static const enum mali_format pan_varying_formats[PAN_VARY_MAX] = {
         [PAN_VARY_FRAGCOORD]    = MALI_RGBA32F
 };
 
-static struct mali_attr_meta
-pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
+static void
+pan_emit_vary_special(struct mali_attribute_packed *out,
+                unsigned present, enum pan_special_varying buf,
                 unsigned quirks)
 {
         assert(buf < PAN_VARY_MAX);
-        return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
+        pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
 }
 
 static enum mali_format
@@ -1710,36 +1764,31 @@ pan_xfb_format(enum mali_format format, unsigned nr)
  * a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
  * value. */
 
-static struct mali_attr_meta
-pan_emit_vary_xfb(unsigned present,
+static void
+pan_emit_vary_xfb(struct mali_attribute_packed *out,
+                unsigned present,
                 unsigned max_xfb,
                 unsigned *streamout_offsets,
                 unsigned quirks,
                 enum mali_format format,
                 struct pipe_stream_output o)
 {
-        /* Otherwise construct a record for it */
-        struct mali_attr_meta meta = {
-                /* XFB buffers come after everything else */
-                .index = pan_xfb_base(present) + o.output_buffer,
-
-                /* As usual unknown bit */
-                .unknown1 = quirks & IS_BIFROST ? 0x0 : 0x2,
-
-                /* Override swizzle with number of channels */
-                .swizzle = quirks & HAS_SWIZZLES ?
+        unsigned swizzle = quirks & HAS_SWIZZLES ?
                         panfrost_get_default_swizzle(o.num_components) :
-                        panfrost_bifrost_swizzle(o.num_components),
+                        panfrost_bifrost_swizzle(o.num_components);
+
+        pan_pack(out, ATTRIBUTE, cfg) {
+                /* XFB buffers come after everything else */
+                cfg.buffer_index = pan_xfb_base(present) + o.output_buffer;
+                cfg.unknown = quirks & IS_BIFROST ? 0x0 : 0x1;
 
                 /* Override number of channels and precision to highp */
-                .format = pan_xfb_format(format, o.num_components),
+                cfg.format = (pan_xfb_format(format, o.num_components) << 12) | swizzle;
 
                 /* Apply given offsets together */
-                .src_offset = (o.dst_offset * 4) /* dwords */
-                        + streamout_offsets[o.output_buffer]
-        };
-
-        return meta;
+                cfg.offset = (o.dst_offset * 4) /* dwords */
+                        + streamout_offsets[o.output_buffer];
+        }
 }
 
 /* Determine if we should capture a varying for XFB. This requires actually
@@ -1757,51 +1806,21 @@ panfrost_xfb_captured(struct panfrost_shader_state *xfb,
         return o->output_buffer < max_xfb;
 }
 
-/* Higher-level wrapper around all of the above, classifying a varying into one
- * of the above types */
-
-static struct mali_attr_meta
-panfrost_emit_varying(
-                struct panfrost_shader_state *stage,
+static void
+pan_emit_general_varying(struct mali_attribute_packed *out,
                 struct panfrost_shader_state *other,
                 struct panfrost_shader_state *xfb,
+                gl_varying_slot loc,
+                enum mali_format format,
                 unsigned present,
-                unsigned max_xfb,
-                unsigned *streamout_offsets,
                 unsigned quirks,
                 unsigned *gen_offsets,
                 enum mali_format *gen_formats,
                 unsigned *gen_stride,
                 unsigned idx,
-                bool should_alloc,
-                bool is_fragment)
+                bool should_alloc)
 {
-        gl_varying_slot loc = stage->varyings_loc[idx];
-        enum mali_format format = stage->varyings[idx];
-
-        /* Override format to match linkage */
-        if (!should_alloc && gen_formats[idx])
-                format = gen_formats[idx];
-
-        if (has_point_coord(stage->point_sprite_mask, loc)) {
-                return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
-        } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
-                struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
-                return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
-        } else if (loc == VARYING_SLOT_POS) {
-                if (is_fragment)
-                        return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
-                else
-                        return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
-        } else if (loc == VARYING_SLOT_PSIZ) {
-                return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
-        } else if (loc == VARYING_SLOT_PNTC) {
-                return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
-        } else if (loc == VARYING_SLOT_FACE) {
-                return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
-        }
-
-        /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
+        /* Check if we're linked */
         signed other_idx = -1;
 
         for (unsigned j = 0; j < other->varying_count; ++j) {
@@ -1811,8 +1830,10 @@ panfrost_emit_varying(
                 }
         }
 
-        if (other_idx < 0)
-                return pan_emit_vary_only(present, quirks);
+        if (other_idx < 0) {
+                pan_emit_vary_only(out, present, quirks);
+                return;
+        }
 
         unsigned offset = gen_offsets[other_idx];
 
@@ -1844,8 +1865,57 @@ panfrost_emit_varying(
                 *gen_stride += size;
         }
 
-        return pan_emit_vary(present, PAN_VARY_GENERAL,
-                        quirks, format, offset);
+        pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
+}
+
+/* Higher-level wrapper around all of the above, classifying a varying into one
+ * of the above types */
+
+static void
+panfrost_emit_varying(
+                struct mali_attribute_packed *out,
+                struct panfrost_shader_state *stage,
+                struct panfrost_shader_state *other,
+                struct panfrost_shader_state *xfb,
+                unsigned present,
+                unsigned max_xfb,
+                unsigned *streamout_offsets,
+                unsigned quirks,
+                unsigned *gen_offsets,
+                enum mali_format *gen_formats,
+                unsigned *gen_stride,
+                unsigned idx,
+                bool should_alloc,
+                bool is_fragment)
+{
+        gl_varying_slot loc = stage->varyings_loc[idx];
+        enum mali_format format = stage->varyings[idx];
+
+        /* Override format to match linkage */
+        if (!should_alloc && gen_formats[idx])
+                format = gen_formats[idx];
+
+        if (has_point_coord(stage->point_sprite_mask, loc)) {
+                pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
+        } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
+                struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
+                pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
+        } else if (loc == VARYING_SLOT_POS) {
+                if (is_fragment)
+                        pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
+                else
+                        pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
+        } else if (loc == VARYING_SLOT_PSIZ) {
+                pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
+        } else if (loc == VARYING_SLOT_PNTC) {
+                pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
+        } else if (loc == VARYING_SLOT_FACE) {
+                pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
+        } else {
+                pan_emit_general_varying(out, other, xfb, loc, format, present,
+                                quirks, gen_offsets, gen_formats, gen_stride,
+                                idx, should_alloc);
+        }
 }
 
 static void
@@ -1916,18 +1986,18 @@ panfrost_emit_varying_descriptor(struct panfrost_batch *batch,
                                         ctx->streamout.targets[i]);
         }
 
-        struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
-        struct mali_attr_meta *ofs = ovs + vs->varying_count;
+        struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
+        struct mali_attribute_packed *ofs = ovs + vs->varying_count;
 
         for (unsigned i = 0; i < vs->varying_count; i++) {
-                ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
+                panfrost_emit_varying(ovs + i, vs, fs, vs, present,
                                 ctx->streamout.num_targets, streamout_offsets,
                                 dev->quirks,
                                 gen_offsets, gen_formats, &gen_stride, i, true, false);
         }
 
         for (unsigned i = 0; i < fs->varying_count; i++) {
-                ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
+                panfrost_emit_varying(ofs + i, fs, vs, vs, present,
                                 ctx->streamout.num_targets, streamout_offsets,
                                 dev->quirks,
                                 gen_offsets, gen_formats, &gen_stride, i, false, true);