struct panfrost_context *ctx = batch->ctx;
struct panfrost_vertex_state *so = ctx->vertex;
- /* Staged mali_attr, and index into them. i =/= k, depending on the
- * vertex buffer mask and instancing. Twice as much room is allocated,
- * for a worst case of NPOT_DIVIDEs which take up extra slot */
- union mali_attr attrs[PIPE_MAX_ATTRIBS * 2];
+ unsigned instance_shift = vertex_postfix->instance_shift;
+ unsigned instance_odd = vertex_postfix->instance_odd;
+
+ /* Worst case: everything is NPOT */
+
+ struct panfrost_transfer S = panfrost_pool_alloc(&batch->pool,
+ MALI_ATTRIBUTE_LENGTH * PIPE_MAX_ATTRIBS * 2);
+
+ struct panfrost_transfer T = panfrost_pool_alloc(&batch->pool,
+ MALI_ATTRIBUTE_LENGTH * (PAN_INSTANCE_ID + 1));
+
+ struct mali_attribute_buffer_packed *bufs =
+ (struct mali_attribute_buffer_packed *) S.cpu;
+
+ struct mali_attribute_packed *out =
+ (struct mali_attribute_packed *) T.cpu;
+
+ unsigned attrib_to_buffer[PIPE_MAX_ATTRIBS] = { 0 };
unsigned k = 0;
for (unsigned i = 0; i < so->num_elements; ++i) {
- /* We map a mali_attr to be 1:1 with the mali_attr_meta, which
+ /* We map buffers 1:1 with the attributes, which
* means duplicating some vertex buffers (who cares? aside from
* maybe some caching implications but I somehow doubt that
* matters) */
struct pipe_vertex_element *elem = &so->pipe[i];
unsigned vbi = elem->vertex_buffer_index;
-
- /* The exception to 1:1 mapping is that we can have multiple
- * entries (NPOT divisors), so we fixup anyways */
-
- so->hw[i].index = k;
+ attrib_to_buffer[i] = k;
if (!(ctx->vb_mask & (1 << vbi)))
continue;
if (!rsrc)
continue;
- /* Align to 64 bytes by masking off the lower bits. This
- * will be adjusted back when we fixup the src_offset in
- * mali_attr_meta */
-
- mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
- mali_ptr addr = raw_addr & ~63;
- unsigned chopped_addr = raw_addr - addr;
-
/* Add a dependency of the batch on the vertex buffer */
panfrost_batch_add_bo(batch, rsrc->bo,
PAN_BO_ACCESS_SHARED |
PAN_BO_ACCESS_READ |
PAN_BO_ACCESS_VERTEX_TILER);
- /* Set common fields */
- attrs[k].elements = addr;
- attrs[k].stride = buf->stride;
+ /* Mask off lower bits, see offset fixup below */
+ mali_ptr raw_addr = rsrc->bo->gpu + buf->buffer_offset;
+ mali_ptr addr = raw_addr & ~63;
/* Since we advanced the base pointer, we shrink the buffer
- * size */
- attrs[k].size = rsrc->base.width0 - buf->buffer_offset;
+ * size, but add the offset we subtracted */
+ unsigned size = rsrc->base.width0 + (raw_addr - addr)
+ - buf->buffer_offset;
- /* We need to add the extra size we masked off (for
- * correctness) so the data doesn't get clamped away */
- attrs[k].size += chopped_addr;
+ /* When there is a divisor, the hardware-level divisor is
+ * the product of the instance divisor and the padded count */
+ unsigned divisor = elem->instance_divisor;
+ unsigned hw_divisor = ctx->padded_count * divisor;
+ unsigned stride = buf->stride;
- /* For non-instancing make sure we initialize */
- attrs[k].shift = attrs[k].extra_flags = 0;
+ /* If there's a divisor(=1) but no instancing, we want every
+ * attribute to be the same */
- /* Instancing uses a dramatically different code path than
- * linear, so dispatch for the actual emission now that the
- * common code is finished */
+ if (divisor && ctx->instance_count == 1)
+ stride = 0;
- unsigned divisor = elem->instance_divisor;
+ if (!divisor || ctx->instance_count <= 1) {
+ pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
+ if (ctx->instance_count > 1)
+ cfg.type = MALI_ATTRIBUTE_TYPE_1D_MODULUS;
+
+ cfg.pointer = addr;
+ cfg.stride = stride;
+ cfg.size = size;
+ cfg.divisor_r = instance_shift;
+ cfg.divisor_p = instance_odd;
+ }
+ } else if (util_is_power_of_two_or_zero(hw_divisor)) {
+ pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
+ cfg.type = MALI_ATTRIBUTE_TYPE_1D_POT_DIVISOR;
+ cfg.pointer = addr;
+ cfg.stride = stride;
+ cfg.size = size;
+ cfg.divisor_r = __builtin_ctz(hw_divisor);
+ }
- if (divisor && ctx->instance_count == 1) {
- /* Silly corner case where there's a divisor(=1) but
- * there's no legitimate instancing. So we want *every*
- * attribute to be the same. So set stride to zero so
- * we don't go anywhere. */
-
- attrs[k].size = attrs[k].stride + chopped_addr;
- attrs[k].stride = 0;
- attrs[k++].elements |= MALI_ATTR_LINEAR;
- } else if (ctx->instance_count <= 1) {
- /* Normal, non-instanced attributes */
- attrs[k++].elements |= MALI_ATTR_LINEAR;
} else {
- unsigned instance_shift = vertex_postfix->instance_shift;
- unsigned instance_odd = vertex_postfix->instance_odd;
+ unsigned shift = 0, extra_flags = 0;
+
+ unsigned magic_divisor =
+ panfrost_compute_magic_divisor(hw_divisor, &shift, &extra_flags);
- k += panfrost_vertex_instanced(ctx->padded_count,
- instance_shift,
- instance_odd,
- divisor, &attrs[k]);
+ pan_pack(bufs + k, ATTRIBUTE_BUFFER, cfg) {
+ cfg.type = MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR;
+ cfg.pointer = addr;
+ cfg.stride = stride;
+ cfg.size = size;
+
+ cfg.divisor_r = shift;
+ cfg.divisor_e = extra_flags;
+ }
+
+ pan_pack(bufs + k + 1, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, cfg) {
+ cfg.divisor_numerator = magic_divisor;
+ cfg.divisor = divisor;
+ }
+
+ ++k;
}
+
+ ++k;
}
/* Add special gl_VertexID/gl_InstanceID buffers */
- panfrost_vertex_id(ctx->padded_count, &attrs[k]);
- so->hw[PAN_VERTEX_ID].index = k++;
- panfrost_instance_id(ctx->padded_count, &attrs[k]);
- so->hw[PAN_INSTANCE_ID].index = k++;
+ panfrost_vertex_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
+
+ pan_pack(out + PAN_VERTEX_ID, ATTRIBUTE, cfg) {
+ cfg.buffer_index = k++;
+ cfg.format = so->formats[PAN_VERTEX_ID];
+ }
+
+ panfrost_instance_id(ctx->padded_count, &bufs[k], ctx->instance_count > 1);
+
+ pan_pack(out + PAN_INSTANCE_ID, ATTRIBUTE, cfg) {
+ cfg.buffer_index = k++;
+ cfg.format = so->formats[PAN_INSTANCE_ID];
+ }
/* Attribute addresses require 64-byte alignment, so let:
*
if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start)
src_offset -= buf->stride * start;
- so->hw[i].src_offset = src_offset;
+ pan_pack(out + i, ATTRIBUTE, cfg) {
+ cfg.buffer_index = attrib_to_buffer[i];
+ cfg.format = so->formats[i];
+ cfg.offset = src_offset;
+ }
}
-
- vertex_postfix->attributes = panfrost_pool_upload(&batch->pool, attrs,
- k * sizeof(*attrs));
-
- vertex_postfix->attribute_meta = panfrost_pool_upload(&batch->pool, so->hw,
- sizeof(*so->hw) *
- PAN_MAX_ATTRIBUTE);
+ vertex_postfix->attributes = S.gpu;
+ vertex_postfix->attribute_meta = T.gpu;
}
static mali_ptr
/* Emitters for varying records */
-static struct mali_attr_meta
-pan_emit_vary(unsigned present, enum pan_special_varying buf,
+static void
+pan_emit_vary(struct mali_attribute_packed *out,
+ unsigned present, enum pan_special_varying buf,
unsigned quirks, enum mali_format format,
unsigned offset)
{
.src_offset = offset
};
- return meta;
+ memcpy(out, &meta, sizeof(meta));
}
/* General varying that is unused */
-static struct mali_attr_meta
-pan_emit_vary_only(unsigned present, unsigned quirks)
+static void
+pan_emit_vary_only(struct mali_attribute_packed *out,
+ unsigned present, unsigned quirks)
{
- return pan_emit_vary(present, 0, quirks, MALI_VARYING_DISCARD, 0);
+ pan_emit_vary(out, present, 0, quirks, MALI_VARYING_DISCARD, 0);
}
/* Special records */
[PAN_VARY_FRAGCOORD] = MALI_RGBA32F
};
-static struct mali_attr_meta
-pan_emit_vary_special(unsigned present, enum pan_special_varying buf,
+static void
+pan_emit_vary_special(struct mali_attribute_packed *out,
+ unsigned present, enum pan_special_varying buf,
unsigned quirks)
{
assert(buf < PAN_VARY_MAX);
- return pan_emit_vary(present, buf, quirks, pan_varying_formats[buf], 0);
+ pan_emit_vary(out, present, buf, quirks, pan_varying_formats[buf], 0);
}
static enum mali_format
* a bitfield) 32-bit, smaller than a 64-bit pointer, so may as well pass by
* value. */
-static struct mali_attr_meta
-pan_emit_vary_xfb(unsigned present,
+static void
+pan_emit_vary_xfb(struct mali_attribute_packed *out,
+ unsigned present,
unsigned max_xfb,
unsigned *streamout_offsets,
unsigned quirks,
+ streamout_offsets[o.output_buffer]
};
- return meta;
+ memcpy(out, &meta, sizeof(meta));
}
/* Determine if we should capture a varying for XFB. This requires actually
return o->output_buffer < max_xfb;
}
-/* Higher-level wrapper around all of the above, classifying a varying into one
- * of the above types */
-
-static struct mali_attr_meta
-panfrost_emit_varying(
- struct panfrost_shader_state *stage,
+static void
+pan_emit_general_varying(struct mali_attribute_packed *out,
struct panfrost_shader_state *other,
struct panfrost_shader_state *xfb,
+ gl_varying_slot loc,
+ enum mali_format format,
unsigned present,
- unsigned max_xfb,
- unsigned *streamout_offsets,
unsigned quirks,
unsigned *gen_offsets,
enum mali_format *gen_formats,
unsigned *gen_stride,
unsigned idx,
- bool should_alloc,
- bool is_fragment)
+ bool should_alloc)
{
- gl_varying_slot loc = stage->varyings_loc[idx];
- enum mali_format format = stage->varyings[idx];
-
- /* Override format to match linkage */
- if (!should_alloc && gen_formats[idx])
- format = gen_formats[idx];
-
- if (has_point_coord(stage->point_sprite_mask, loc)) {
- return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
- } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
- struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
- return pan_emit_vary_xfb(present, max_xfb, streamout_offsets, quirks, format, *o);
- } else if (loc == VARYING_SLOT_POS) {
- if (is_fragment)
- return pan_emit_vary_special(present, PAN_VARY_FRAGCOORD, quirks);
- else
- return pan_emit_vary_special(present, PAN_VARY_POSITION, quirks);
- } else if (loc == VARYING_SLOT_PSIZ) {
- return pan_emit_vary_special(present, PAN_VARY_PSIZ, quirks);
- } else if (loc == VARYING_SLOT_PNTC) {
- return pan_emit_vary_special(present, PAN_VARY_PNTCOORD, quirks);
- } else if (loc == VARYING_SLOT_FACE) {
- return pan_emit_vary_special(present, PAN_VARY_FACE, quirks);
- }
-
- /* We've exhausted special cases, so it's otherwise a general varying. Check if we're linked */
+ /* Check if we're linked */
signed other_idx = -1;
for (unsigned j = 0; j < other->varying_count; ++j) {
}
}
- if (other_idx < 0)
- return pan_emit_vary_only(present, quirks);
+ if (other_idx < 0) {
+ pan_emit_vary_only(out, present, quirks);
+ return;
+ }
unsigned offset = gen_offsets[other_idx];
*gen_stride += size;
}
- return pan_emit_vary(present, PAN_VARY_GENERAL,
- quirks, format, offset);
+ pan_emit_vary(out, present, PAN_VARY_GENERAL, quirks, format, offset);
+}
+
+/* Higher-level wrapper around all of the above, classifying a varying into one
+ * of the above types */
+
+static void
+panfrost_emit_varying(
+ struct mali_attribute_packed *out,
+ struct panfrost_shader_state *stage,
+ struct panfrost_shader_state *other,
+ struct panfrost_shader_state *xfb,
+ unsigned present,
+ unsigned max_xfb,
+ unsigned *streamout_offsets,
+ unsigned quirks,
+ unsigned *gen_offsets,
+ enum mali_format *gen_formats,
+ unsigned *gen_stride,
+ unsigned idx,
+ bool should_alloc,
+ bool is_fragment)
+{
+ gl_varying_slot loc = stage->varyings_loc[idx];
+ enum mali_format format = stage->varyings[idx];
+
+ /* Override format to match linkage */
+ if (!should_alloc && gen_formats[idx])
+ format = gen_formats[idx];
+
+ if (has_point_coord(stage->point_sprite_mask, loc)) {
+ pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
+ } else if (panfrost_xfb_captured(xfb, loc, max_xfb)) {
+ struct pipe_stream_output *o = pan_get_so(&xfb->stream_output, loc);
+ pan_emit_vary_xfb(out, present, max_xfb, streamout_offsets, quirks, format, *o);
+ } else if (loc == VARYING_SLOT_POS) {
+ if (is_fragment)
+ pan_emit_vary_special(out, present, PAN_VARY_FRAGCOORD, quirks);
+ else
+ pan_emit_vary_special(out, present, PAN_VARY_POSITION, quirks);
+ } else if (loc == VARYING_SLOT_PSIZ) {
+ pan_emit_vary_special(out, present, PAN_VARY_PSIZ, quirks);
+ } else if (loc == VARYING_SLOT_PNTC) {
+ pan_emit_vary_special(out, present, PAN_VARY_PNTCOORD, quirks);
+ } else if (loc == VARYING_SLOT_FACE) {
+ pan_emit_vary_special(out, present, PAN_VARY_FACE, quirks);
+ } else {
+ pan_emit_general_varying(out, other, xfb, loc, format, present,
+ quirks, gen_offsets, gen_formats, gen_stride,
+ idx, should_alloc);
+ }
}
static void
ctx->streamout.targets[i]);
}
- struct mali_attr_meta *ovs = (struct mali_attr_meta *)trans.cpu;
- struct mali_attr_meta *ofs = ovs + vs->varying_count;
+ struct mali_attribute_packed *ovs = (struct mali_attribute_packed *)trans.cpu;
+ struct mali_attribute_packed *ofs = ovs + vs->varying_count;
for (unsigned i = 0; i < vs->varying_count; i++) {
- ovs[i] = panfrost_emit_varying(vs, fs, vs, present,
+ panfrost_emit_varying(ovs + i, vs, fs, vs, present,
ctx->streamout.num_targets, streamout_offsets,
dev->quirks,
gen_offsets, gen_formats, &gen_stride, i, true, false);
}
for (unsigned i = 0; i < fs->varying_count; i++) {
- ofs[i] = panfrost_emit_varying(fs, vs, vs, present,
+ panfrost_emit_varying(ofs + i, fs, vs, vs, present,
ctx->streamout.num_targets, streamout_offsets,
dev->quirks,
gen_offsets, gen_formats, &gen_stride, i, false, true);