* registers. Negative values mean unused. */
typedef struct {
- int src0;
- int src1;
+ int src[3];
int dest;
- /* src1 is -not- SSA but instead a 16-bit inline constant to be smudged
- * in. Only valid for ALU ops. */
bool inline_constant;
} ssa_args;
bool writeout;
bool prepacked_branch;
+ /* Kind of a hack, but hint against aggressive DCE */
+ bool dont_eliminate;
+
/* Masks in a saneish format. One bit per channel, not packed fancy.
* Use this instead of the op specific ones, and switch over at emit
* time */
+
uint16_t mask;
+ /* For ALU ops only: set to true to invert (bitwise NOT) the
+ * destination of an integer-out op. Not imeplemented in hardware but
+ * allows more optimizations */
+
+ bool invert;
+
+ /* Hint for the register allocator not to spill the destination written
+ * from this instruction (because it is a spill/unspill node itself) */
+
+ bool no_spill;
+
+ /* Generic hint for intra-pass use */
+ bool hint;
+
union {
midgard_load_store_word load_store;
midgard_vector_alu alu;
/* List of midgard_instructions emitted for the current block */
struct list_head instructions;
+ /* Index of the block in source order */
+ unsigned source_id;
+
bool is_scheduled;
/* List of midgard_bundles emitted (after the scheduler has run) */
/* Number of quadwords _actually_ emitted, as determined after scheduling */
unsigned quadword_count;
- /* Successors: always one forward (the block after us), maybe
- * one backwards (for a backward branch). No need for a second
- * forward, since graph traversal would get there eventually
- * anyway */
+ /* Succeeding blocks. The compiler should not necessarily rely on
+ * source-order traversal */
struct midgard_block *successors[2];
unsigned nr_successors;
+ struct set *predecessors;
+
/* The successors pointer form a graph, and in the case of
* complex control flow, this graph has a cycles. To aid
* traversal during liveness analysis, we have a visited?
nir_shader *nir;
gl_shader_stage stage;
+ /* The screen we correspond to */
+ struct midgard_screen *screen;
+
/* Is internally a blend shader? Depends on stage == FRAGMENT */
bool is_blend;
/* Tracking for blend constant patching */
int blend_constant_offset;
+ /* Number of bytes used for Thread Local Storage */
+ unsigned tls_size;
+
+ /* Count of spills and fills for shaderdb */
+ unsigned spills;
+ unsigned fills;
+
/* Current NIR function */
nir_function *func;
+ /* Allocated compiler temporary counter */
+ unsigned temp_alloc;
+
/* Unordered list of midgard_blocks */
int block_count;
struct list_head blocks;
- midgard_block *initial_block;
- midgard_block *previous_source_block;
- midgard_block *final_block;
+ /* TODO merge with block_count? */
+ unsigned block_source_count;
/* List of midgard_instructions emitted for the current block */
midgard_block *current_block;
+ /* If there is a preset after block, use this, otherwise emit_block will create one if NULL */
+ midgard_block *after_block;
+
/* The current "depth" of the loop, for disambiguating breaks/continues
* when using nested loops */
int current_loop_depth;
/* Constants which have been loaded, for later inlining */
struct hash_table_u64 *ssa_constants;
- /* SSA values / registers which have been aliased. Naively, these
- * demand a fmov output; instead, we alias them in a later pass to
- * avoid the wasted op.
- *
- * A note on encoding: to avoid dynamic memory management here, rather
- * than ampping to a pointer, we map to the source index; the key
- * itself is just the destination index. */
-
- struct hash_table_u64 *ssa_to_alias;
- struct set *leftover_ssa_to_alias;
-
- /* Actual SSA-to-register for RA */
- struct hash_table_u64 *ssa_to_register;
-
/* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */
struct hash_table_u64 *hash_to_temp;
int temp_count;
/* Mapping of texture register -> SSA index for unaliasing */
int texture_index[2];
- /* If any path hits a discard instruction */
- bool can_discard;
-
/* The number of uniforms allowable for the fast path */
int uniform_cutoff;
/* Alpha ref value passed in */
float alpha_ref;
- /* The index corresponding to the fragment output */
- unsigned fragment_output;
-
/* The mapping of sysvals to uniforms, the count, and the off-by-one inverse */
unsigned sysvals[MAX_SYSVAL_COUNT];
unsigned sysval_count;
return heap;
}
-static inline void
+static inline midgard_instruction *
emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction ins)
{
- list_addtail(&(mir_upload_ins(ins))->link, &ctx->current_block->instructions);
+ midgard_instruction *u = mir_upload_ins(ins);
+ list_addtail(&u->link, &ctx->current_block->instructions);
+ return u;
}
-static inline void
+static inline struct midgard_instruction *
mir_insert_instruction_before(struct midgard_instruction *tag, struct midgard_instruction ins)
{
- list_addtail(&(mir_upload_ins(ins))->link, &tag->link);
+ struct midgard_instruction *u = mir_upload_ins(ins);
+ list_addtail(&u->link, &tag->link);
+ return u;
}
static inline void
#define mir_foreach_instr_in_block(block, v) \
list_for_each_entry(struct midgard_instruction, v, &block->instructions, link)
+#define mir_foreach_instr_in_block_rev(block, v) \
+ list_for_each_entry_rev(struct midgard_instruction, v, &block->instructions, link)
#define mir_foreach_instr_in_block_safe(block, v) \
list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
mir_foreach_block(ctx, v_block) \
mir_foreach_instr_in_block_safe(v_block, v)
+#define mir_foreach_successor(blk, v) \
+ struct midgard_block *v; \
+ struct midgard_block **_v; \
+ for (_v = &blk->successors[0], \
+ v = *_v; \
+ v != NULL && _v < &blk->successors[2]; \
+ _v++, v = *_v) \
+/* Based on set_foreach, expanded with automatic type casts */
+
+#define mir_foreach_predecessor(blk, v) \
+ struct set_entry *_entry_##v; \
+ struct midgard_block *v; \
+ for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
+ v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL); \
+ _entry_##v != NULL; \
+ _entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
+ v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
+
+#define mir_foreach_src(ins, v) \
+ for (unsigned v = 0; v < ARRAY_SIZE(ins->ssa_args.src); ++v)
static inline midgard_instruction *
mir_last_in_block(struct midgard_block *block)
return (struct midgard_block *) lst;
}
+static inline midgard_block *
+mir_exit_block(struct compiler_context *ctx)
+{
+ midgard_block *last = list_last_entry(&ctx->blocks,
+ struct midgard_block, link);
+
+ /* The last block must be empty (the exit block) */
+ assert(list_empty(&last->instructions));
+ assert(last->nr_successors == 0);
+
+ return last;
+}
+
static inline bool
mir_is_alu_bundle(midgard_bundle *bundle)
{
return IS_ALU(bundle->tag);
}
+/* Registers/SSA are distinguish in the backend by the bottom-most bit */
+
+#define IS_REG (1)
+
+static inline unsigned
+make_compiler_temp(compiler_context *ctx)
+{
+ return (ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1;
+}
+
+static inline unsigned
+make_compiler_temp_reg(compiler_context *ctx)
+{
+ return ((ctx->func->impl->reg_alloc + ctx->temp_alloc++) << 1) | IS_REG;
+}
+
+static inline unsigned
+nir_src_index(compiler_context *ctx, nir_src *src)
+{
+ if (src->is_ssa)
+ return (src->ssa->index << 1) | 0;
+ else {
+ assert(!src->reg.indirect);
+ return (src->reg.reg->index << 1) | IS_REG;
+ }
+}
+
+static inline unsigned
+nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
+{
+ return nir_src_index(ctx, &src->src);
+}
+
+static inline unsigned
+nir_dest_index(compiler_context *ctx, nir_dest *dst)
+{
+ if (dst->is_ssa)
+ return (dst->ssa.index << 1) | 0;
+ else {
+ assert(!dst->reg.indirect);
+ return (dst->reg.reg->index << 1) | IS_REG;
+ }
+}
+
+
+
/* MIR manipulation */
void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
void mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new);
void mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new);
+void mir_rewrite_index_dst_tag(compiler_context *ctx, unsigned old, unsigned new, unsigned tag);
+void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new);
+void mir_rewrite_index_src_tag(compiler_context *ctx, unsigned old, unsigned new, unsigned tag);
+void mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned swizzle);
+bool mir_single_use(compiler_context *ctx, unsigned value);
+bool mir_special_index(compiler_context *ctx, unsigned idx);
+unsigned mir_use_count(compiler_context *ctx, unsigned value);
+bool mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node);
+unsigned mir_mask_of_read_components(midgard_instruction *ins, unsigned node);
+unsigned mir_ubo_shift(midgard_load_store_op op);
/* MIR printing */
void mir_print_bundle(midgard_bundle *ctx);
void mir_print_block(midgard_block *block);
void mir_print_shader(compiler_context *ctx);
+bool mir_nontrivial_source2_mod(midgard_instruction *ins);
+bool mir_nontrivial_source2_mod_simple(midgard_instruction *ins);
+bool mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask);
+bool mir_nontrivial_outmod(midgard_instruction *ins);
/* MIR goodies */
.type = TAG_ALU_4,
.mask = 0xF,
.ssa_args = {
- .src0 = SSA_UNUSED_1,
- .src1 = src,
+ .src = { SSA_UNUSED_1, src, -1 },
.dest = dest,
},
.alu = {
return ins;
}
+static inline bool
+mir_has_arg(midgard_instruction *ins, unsigned arg)
+{
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
+ if (ins->ssa_args.src[i] == arg)
+ return true;
+ }
+
+ return false;
+}
+
/* Scheduling */
void schedule_program(compiler_context *ctx);
struct ra_graph;
+/* Broad types of register classes so we can handle special
+ * registers */
+
+#define NR_REG_CLASSES 5
+
+#define REG_CLASS_WORK 0
+#define REG_CLASS_LDST 1
+#define REG_CLASS_LDST27 2
+#define REG_CLASS_TEXR 3
+#define REG_CLASS_TEXW 4
+
+void mir_lower_special_reads(compiler_context *ctx);
struct ra_graph* allocate_registers(compiler_context *ctx, bool *spilled);
void install_registers(compiler_context *ctx, struct ra_graph *g);
bool mir_is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src);
void mir_create_pipeline_registers(compiler_context *ctx);
void
-midgard_promote_uniforms(compiler_context *ctx, unsigned pressure);
+midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count);
-void
+midgard_instruction *
emit_ubo_read(
compiler_context *ctx,
+ nir_instr *instr,
unsigned dest,
unsigned offset,
nir_src *indirect_offset,
unsigned index);
+void
+emit_sysval_read(compiler_context *ctx, nir_instr *instr, signed dest_override, unsigned nr_components);
+
+void
+midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr);
+
+void
+midgard_lower_derivatives(compiler_context *ctx, midgard_block *block);
+
+bool mir_op_computes_derivatives(unsigned op);
/* Final emission */
void
nir_clamp_psiz(nir_shader *shader, float min_size, float max_size);
+/* Optimizations */
+
+bool midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block);
+bool midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block);
+bool midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block);
+bool midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block);
+bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block);
+void midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g);
+
+void midgard_lower_invert(compiler_context *ctx, midgard_block *block);
+bool midgard_opt_not_propagate(compiler_context *ctx, midgard_block *block);
+bool midgard_opt_fuse_src_invert(compiler_context *ctx, midgard_block *block);
+bool midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block);
+
#endif