GL_ARB_texture_cube_map_array DONE (i965/gen6+, nv50, softpipe, swr, zink)
GL_ARB_texture_gather DONE (freedreno, i965/gen6+, nv50, softpipe, swr, v3d)
GL_ARB_texture_query_lod DONE (freedreno, i965, nv50, softpipe, swr, v3d, panfrost)
- GL_ARB_transform_feedback2 DONE (i965/gen6+, nv50, softpipe, swr, v3d)
+ GL_ARB_transform_feedback2 DONE (i965/gen6+, nv50, softpipe, swr, v3d, panfrost)
GL_ARB_transform_feedback3 DONE (i965/gen7+, softpipe, swr)
}
static unsigned
-panfrost_streamout_offset(unsigned stride, unsigned offset,
+panfrost_streamout_offset(unsigned stride,
struct pipe_stream_output_target *target)
{
- return (target->buffer_offset + (offset * stride * 4)) & 63;
+ return (target->buffer_offset + (pan_so_target(target)->offset * stride * 4)) & 63;
}
static void
panfrost_emit_streamout(struct panfrost_batch *batch,
struct mali_attribute_buffer_packed *slot,
- unsigned stride_words, unsigned offset, unsigned count,
+ unsigned stride_words, unsigned count,
struct pipe_stream_output_target *target)
{
unsigned stride = stride_words * 4;
PAN_BO_ACCESS_FRAGMENT);
/* We will have an offset applied to get alignment */
- mali_ptr addr = bo->gpu + target->buffer_offset + (offset * stride);
+ mali_ptr addr = bo->gpu + target->buffer_offset + (pan_so_target(target)->offset * stride);
pan_pack(slot, ATTRIBUTE_BUFFER, cfg) {
cfg.pointer = (addr & ~63);
for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
streamout_offsets[i] = panfrost_streamout_offset(
so->stride[i],
- ctx->streamout.offsets[i],
ctx->streamout.targets[i]);
}
for (unsigned i = 0; i < ctx->streamout.num_targets; ++i) {
panfrost_emit_streamout(batch, &varyings[xfb_base + i],
so->stride[i],
- ctx->streamout.offsets[i],
out_count,
ctx->streamout.targets[i]);
}
count = u_stream_outputs_for_vertices(ctx->active_prim,
ctx->vertex_count);
- ctx->streamout.offsets[i] += count;
+ pan_so_target(ctx->streamout.targets[i])->offset += count;
}
}
cfg.index_count = info->count;
} else {
ctx->offset_start = info->start;
- cfg.index_count = ctx->vertex_count;
+ cfg.index_count = info->count_from_stream_output ?
+ pan_so_target(info->count_from_stream_output)->offset :
+ ctx->vertex_count;
}
}
{
struct pipe_stream_output_target *target;
- target = rzalloc(pctx, struct pipe_stream_output_target);
+ target = &rzalloc(pctx, struct panfrost_streamout_target)->base;
if (!target)
return NULL;
for (unsigned i = 0; i < num_targets; i++) {
if (offsets[i] != -1)
- so->offsets[i] = offsets[i];
+ pan_so_target(targets[i])->offset = offsets[i];
pipe_so_target_reference(&so->targets[i], targets[i]);
}
bool signaled;
};
+struct panfrost_streamout_target {
+ struct pipe_stream_output_target base;
+ uint32_t offset;
+};
+
struct panfrost_streamout {
struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
- uint32_t offsets[PIPE_MAX_SO_BUFFERS];
unsigned num_targets;
};
return (struct panfrost_context *) pcontext;
}
+static inline struct panfrost_streamout_target *
+pan_so_target(struct pipe_stream_output_target *target)
+{
+ return (struct panfrost_streamout_target *)target;
+}
+
static inline struct panfrost_shader_state *
panfrost_get_shader_state(struct panfrost_context *ctx,
enum pipe_shader_type st)
case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
return is_bifrost ? 0 : 64;
+ case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
return is_bifrost ? 0 : 1;