bool signaled;
};
+struct panfrost_streamout_target {
+ struct pipe_stream_output_target base;
+ uint32_t offset;
+};
+
struct panfrost_streamout {
struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
- uint32_t offsets[PIPE_MAX_SO_BUFFERS];
unsigned num_targets;
};
/* Gallium context */
struct pipe_context base;
+ /* Upload manager for small resident GPU-internal data structures, like
+ * sampler descriptors. We use an upload manager since the minimum BO
+ * size from the kernel is 4kb */
+ struct u_upload_mgr *state_uploader;
+
/* Bound job batch and map of panfrost_batch_key to job batches */
struct panfrost_batch *batch;
struct hash_table *batches;
unsigned vertex_count;
unsigned instance_count;
+ unsigned offset_start;
enum pipe_prim_type active_prim;
/* If instancing is enabled, vertex count padded for instance; if
struct panfrost_shader_state {
/* Compiled, mapped descriptor, ready for the hardware */
bool compiled;
+
+ /* Uploaded shader descriptor (TODO: maybe stuff the packed unuploaded
+ * bits in a union to save some memory?) */
+
+ struct {
+ struct pipe_resource *rsrc;
+ uint32_t offset;
+ } upload;
+
struct mali_shader_packed shader;
+ struct mali_midgard_properties_packed properties;
+ struct mali_preload_packed preload;
/* Non-descript information */
unsigned uniform_count;
unsigned sysval_count;
unsigned sysval[MAX_SYSVAL_COUNT];
- uint16_t point_sprite_mask;
- unsigned point_sprite_upper_left : 1;
-
/* Should we enable helper invocations */
bool helper_invocations;
return (struct panfrost_context *) pcontext;
}
+static inline struct panfrost_streamout_target *
+pan_so_target(struct pipe_stream_output_target *target)
+{
+ return (struct panfrost_streamout_target *)target;
+}
+
static inline struct panfrost_shader_state *
panfrost_get_shader_state(struct panfrost_context *ctx,
enum pipe_shader_type st)