struct ir3 *ir;
struct ir3_shader_variant *so;
+ /* Tables of scalar inputs/outputs. Because of the way varying packing
+ * works, we could have inputs w/ fractional location, which is a bit
+ * awkward to deal with unless we keep track of the split scalar in/
+ * out components.
+ *
+ * These *only* have inputs/outputs that are touched by load_*input and
+ * store_output.
+ */
+ unsigned ninputs, noutputs;
+ struct ir3_instruction **inputs;
+ struct ir3_instruction **outputs;
+
struct ir3_block *block; /* the current block */
struct ir3_block *in_block; /* block created for shader inputs */
struct ir3_instruction *frag_face, *frag_coord;
/* For vertex shaders, keep track of the system values sources */
- struct ir3_instruction *vertex_id, *basevertex, *instance_id;
+ struct ir3_instruction *vertex_id, *basevertex, *instance_id, *base_instance;
/* For fragment shaders: */
struct ir3_instruction *samp_id, *samp_mask_in;
+ /* For geometry shaders: */
+ struct ir3_instruction *primitive_id;
+ struct ir3_instruction *gs_header;
+
+ /* For tessellation shaders: */
+ struct ir3_instruction *patch_vertices_in;
+ struct ir3_instruction *tcs_header;
+ struct ir3_instruction *tess_coord;
+
/* Compute shader inputs: */
struct ir3_instruction *local_invocation_id, *work_group_id;
* src used for an array of vec1 cannot be also used for an
* array of vec4.
*/
- struct hash_table *addr_ht[4];
+ struct hash_table *addr0_ht[4];
+
+ /* The same for a1.x. We only support immediate values for a1.x, as this
+ * is the only use so far.
+ */
+ struct hash_table_u64 *addr1_ht;
/* last dst array, for indirect we need to insert a var-store.
*/
struct ir3_instruction **dst);
void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
struct ir3_instruction * (*emit_intrinsic_atomic_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
+ void (*emit_intrinsic_load_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr,
+ struct ir3_instruction **dst);
void (*emit_intrinsic_store_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
struct ir3_instruction * (*emit_intrinsic_atomic_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
};
if (!(cond)) ir3_context_error((ctx), "failed assert: "#cond"\n"); \
} while (0)
-struct ir3_instruction * ir3_get_addr(struct ir3_context *ctx,
+struct ir3_instruction * ir3_get_addr0(struct ir3_context *ctx,
struct ir3_instruction *src, int align);
+struct ir3_instruction * ir3_get_addr1(struct ir3_context *ctx,
+ unsigned const_val);
struct ir3_instruction * ir3_get_predicate(struct ir3_context *ctx,
struct ir3_instruction *src);
void ir3_declare_array(struct ir3_context *ctx, nir_register *reg);
struct ir3_array * ir3_get_array(struct ir3_context *ctx, nir_register *reg);
struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
- struct ir3_array *arr, int n, struct ir3_instruction *address);
+ struct ir3_array *arr, int n, struct ir3_instruction *address,
+ unsigned bitsize);
void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
struct ir3_instruction *src, struct ir3_instruction *address);