2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #ifndef _MDG_COMPILER_H
25 #define _MDG_COMPILER_H
29 #include "midgard_compile.h"
31 #include "util/hash_table.h"
32 #include "util/u_dynarray.h"
34 #include "util/list.h"
36 #include "main/mtypes.h"
37 #include "compiler/nir_types.h"
38 #include "compiler/nir/nir.h"
43 /* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
44 * the hardware), hence why that must be zero. TARGET_DISCARD signals this
45 * instruction is actually a discard op. */
48 #define TARGET_BREAK 1
49 #define TARGET_CONTINUE 2
50 #define TARGET_DISCARD 3
52 typedef struct midgard_branch
{
53 /* If conditional, the condition is specified in r31.w */
56 /* For conditionals, if this is true, we branch on FALSE. If false, we branch on TRUE. */
57 bool invert_conditional
;
59 /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */
62 /* The actual target */
70 /* Generic in-memory data type repesenting a single logical instruction, rather
71 * than a single instruction group. This is the preferred form for code gen.
72 * Multiple midgard_insturctions will later be combined during scheduling,
73 * though this is not represented in this structure. Its format bridges
74 * the low-level binary representation with the higher level semantic meaning.
76 * Notably, it allows registers to be specified as block local SSA, for code
77 * emitted before the register allocation pass.
80 typedef struct midgard_instruction
{
81 /* Must be first for casting */
82 struct list_head link
;
84 unsigned type
; /* ALU, load/store, texture */
86 /* Instruction arguments represented as block-local SSA
87 * indices, rather than registers. ~0 means unused. */
91 /* Swizzle for the conditional for a csel/branch */
92 unsigned cond_swizzle
;
94 /* Special fields for an ALU instruction */
95 midgard_reg_info registers
;
97 /* I.e. (1 << alu_bit) */
101 uint32_t constants
[4];
102 uint16_t inline_constant
;
103 bool has_blend_constant
;
104 bool has_inline_constant
;
108 bool prepacked_branch
;
110 /* Kind of a hack, but hint against aggressive DCE */
113 /* Masks in a saneish format. One bit per channel, not packed fancy.
114 * Use this instead of the op specific ones, and switch over at emit
119 /* For ALU ops only: set to true to invert (bitwise NOT) the
120 * destination of an integer-out op. Not imeplemented in hardware but
121 * allows more optimizations */
125 /* Hint for the register allocator not to spill the destination written
126 * from this instruction (because it is a spill/unspill node itself) */
130 /* Generic hint for intra-pass use */
133 /* During scheduling, the backwards dependency graph
134 * (DAG). nr_dependencies is the number of unscheduled
135 * instructions that must still be scheduled after
136 * (before) this instruction. dependents are which
137 * instructions need to be scheduled before (after) this
140 unsigned nr_dependencies
;
141 BITSET_WORD
*dependents
;
144 midgard_load_store_word load_store
;
145 midgard_vector_alu alu
;
146 midgard_texture_word texture
;
147 midgard_branch_extended branch_extended
;
150 /* General branch, rather than packed br_compact. Higher level
151 * than the other components */
152 midgard_branch branch
;
154 } midgard_instruction
;
156 typedef struct midgard_block
{
157 /* Link to next block. Must be first for mir_get_block */
158 struct list_head link
;
160 /* List of midgard_instructions emitted for the current block */
161 struct list_head instructions
;
163 /* Index of the block in source order */
168 /* List of midgard_bundles emitted (after the scheduler has run) */
169 struct util_dynarray bundles
;
171 /* Number of quadwords _actually_ emitted, as determined after scheduling */
172 unsigned quadword_count
;
174 /* Succeeding blocks. The compiler should not necessarily rely on
175 * source-order traversal */
176 struct midgard_block
*successors
[2];
177 unsigned nr_successors
;
179 struct set
*predecessors
;
181 /* The successors pointer form a graph, and in the case of
182 * complex control flow, this graph has a cycles. To aid
183 * traversal during liveness analysis, we have a visited?
184 * boolean for passes to use as they see fit, provided they
188 /* In liveness analysis, these are live masks (per-component) for
189 * indices for the block. Scalar compilers have the luxury of using
190 * simple bit fields, but for us, liveness is a vector idea. We use
191 * 8-bit to allow finegrained tracking up to vec8. If you're
192 * implementing vec16 on Panfrost... I'm sorry. */
197 typedef struct midgard_bundle
{
198 /* Tag for the overall bundle */
201 /* Instructions contained by the bundle. instruction_count <= 6 (vmul,
202 * sadd, vadd, smul, vlut, branch) */
203 int instruction_count
;
204 midgard_instruction
*instructions
[6];
206 /* Bundle-wide ALU configuration */
209 bool has_embedded_constants
;
211 bool has_blend_constant
;
214 typedef struct compiler_context
{
216 gl_shader_stage stage
;
218 /* The screen we correspond to */
219 struct midgard_screen
*screen
;
221 /* Is internally a blend shader? Depends on stage == FRAGMENT */
224 /* Tracking for blend constant patching */
225 int blend_constant_offset
;
227 /* Number of bytes used for Thread Local Storage */
230 /* Count of spills and fills for shaderdb */
234 /* Current NIR function */
237 /* Allocated compiler temporary counter */
240 /* Unordered list of midgard_blocks */
242 struct list_head blocks
;
244 /* TODO merge with block_count? */
245 unsigned block_source_count
;
247 /* List of midgard_instructions emitted for the current block */
248 midgard_block
*current_block
;
250 /* If there is a preset after block, use this, otherwise emit_block will create one if NULL */
251 midgard_block
*after_block
;
253 /* The current "depth" of the loop, for disambiguating breaks/continues
254 * when using nested loops */
255 int current_loop_depth
;
257 /* Total number of loops for shader-db */
260 /* Constants which have been loaded, for later inlining */
261 struct hash_table_u64
*ssa_constants
;
263 /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */
264 struct hash_table_u64
*hash_to_temp
;
268 /* Just the count of the max register used. Higher count => higher
269 * register pressure */
272 /* Used for cont/last hinting. Increase when a tex op is added.
273 * Decrease when a tex op is removed. */
274 int texture_op_count
;
276 /* The number of uniforms allowable for the fast path */
279 /* Count of instructions emitted from NIR overall, across all blocks */
280 int instruction_count
;
282 /* Alpha ref value passed in */
285 unsigned quadword_count
;
287 /* The mapping of sysvals to uniforms, the count, and the off-by-one inverse */
288 unsigned sysvals
[MAX_SYSVAL_COUNT
];
289 unsigned sysval_count
;
290 struct hash_table_u64
*sysval_to_id
;
293 /* Helpers for manipulating the above structures (forming the driver IR) */
295 /* Append instruction to end of current block */
297 static inline midgard_instruction
*
298 mir_upload_ins(struct compiler_context
*ctx
, struct midgard_instruction ins
)
300 midgard_instruction
*heap
= ralloc(ctx
, struct midgard_instruction
);
301 memcpy(heap
, &ins
, sizeof(ins
));
305 static inline midgard_instruction
*
306 emit_mir_instruction(struct compiler_context
*ctx
, struct midgard_instruction ins
)
308 midgard_instruction
*u
= mir_upload_ins(ctx
, ins
);
309 list_addtail(&u
->link
, &ctx
->current_block
->instructions
);
313 static inline struct midgard_instruction
*
314 mir_insert_instruction_before(struct compiler_context
*ctx
,
315 struct midgard_instruction
*tag
,
316 struct midgard_instruction ins
)
318 struct midgard_instruction
*u
= mir_upload_ins(ctx
, ins
);
319 list_addtail(&u
->link
, &tag
->link
);
324 mir_remove_instruction(struct midgard_instruction
*ins
)
326 list_del(&ins
->link
);
329 static inline midgard_instruction
*
330 mir_prev_op(struct midgard_instruction
*ins
)
332 return list_last_entry(&(ins
->link
), midgard_instruction
, link
);
335 static inline midgard_instruction
*
336 mir_next_op(struct midgard_instruction
*ins
)
338 return list_first_entry(&(ins
->link
), midgard_instruction
, link
);
341 #define mir_foreach_block(ctx, v) \
342 list_for_each_entry(struct midgard_block, v, &ctx->blocks, link)
344 #define mir_foreach_block_from(ctx, from, v) \
345 list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
347 #define mir_foreach_instr(ctx, v) \
348 list_for_each_entry(struct midgard_instruction, v, &ctx->current_block->instructions, link)
350 #define mir_foreach_instr_safe(ctx, v) \
351 list_for_each_entry_safe(struct midgard_instruction, v, &ctx->current_block->instructions, link)
353 #define mir_foreach_instr_in_block(block, v) \
354 list_for_each_entry(struct midgard_instruction, v, &block->instructions, link)
355 #define mir_foreach_instr_in_block_rev(block, v) \
356 list_for_each_entry_rev(struct midgard_instruction, v, &block->instructions, link)
358 #define mir_foreach_instr_in_block_safe(block, v) \
359 list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
361 #define mir_foreach_instr_in_block_safe_rev(block, v) \
362 list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link)
364 #define mir_foreach_instr_in_block_from(block, v, from) \
365 list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link)
367 #define mir_foreach_instr_in_block_from_rev(block, v, from) \
368 list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link)
370 #define mir_foreach_bundle_in_block(block, v) \
371 util_dynarray_foreach(&block->bundles, midgard_bundle, v)
373 #define mir_foreach_bundle_in_block_rev(block, v) \
374 util_dynarray_foreach_reverse(&block->bundles, midgard_bundle, v)
376 #define mir_foreach_instr_in_block_scheduled_rev(block, v) \
377 midgard_instruction* v; \
379 mir_foreach_bundle_in_block_rev(block, _bundle) \
380 for (i = (_bundle->instruction_count - 1), v = _bundle->instructions[i]; \
382 --i, v = _bundle->instructions[i]) \
384 #define mir_foreach_instr_global(ctx, v) \
385 mir_foreach_block(ctx, v_block) \
386 mir_foreach_instr_in_block(v_block, v)
388 #define mir_foreach_instr_global_safe(ctx, v) \
389 mir_foreach_block(ctx, v_block) \
390 mir_foreach_instr_in_block_safe(v_block, v)
392 #define mir_foreach_successor(blk, v) \
393 struct midgard_block *v; \
394 struct midgard_block **_v; \
395 for (_v = &blk->successors[0], \
397 v != NULL && _v < &blk->successors[2]; \
400 /* Based on set_foreach, expanded with automatic type casts */
402 #define mir_foreach_predecessor(blk, v) \
403 struct set_entry *_entry_##v; \
404 struct midgard_block *v; \
405 for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
406 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL); \
407 _entry_##v != NULL; \
408 _entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
409 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
411 #define mir_foreach_src(ins, v) \
412 for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
414 static inline midgard_instruction
*
415 mir_last_in_block(struct midgard_block
*block
)
417 return list_last_entry(&block
->instructions
, struct midgard_instruction
, link
);
420 static inline midgard_block
*
421 mir_get_block(compiler_context
*ctx
, int idx
)
423 struct list_head
*lst
= &ctx
->blocks
;
428 return (struct midgard_block
*) lst
;
431 static inline midgard_block
*
432 mir_exit_block(struct compiler_context
*ctx
)
434 midgard_block
*last
= list_last_entry(&ctx
->blocks
,
435 struct midgard_block
, link
);
437 /* The last block must be empty logically but contains branch writeout
438 * for fragment shaders */
440 assert(last
->nr_successors
== 0);
446 mir_is_alu_bundle(midgard_bundle
*bundle
)
448 return IS_ALU(bundle
->tag
);
451 /* Registers/SSA are distinguish in the backend by the bottom-most bit */
455 static inline unsigned
456 make_compiler_temp(compiler_context
*ctx
)
458 return (ctx
->func
->impl
->ssa_alloc
+ ctx
->temp_alloc
++) << 1;
461 static inline unsigned
462 make_compiler_temp_reg(compiler_context
*ctx
)
464 return ((ctx
->func
->impl
->reg_alloc
+ ctx
->temp_alloc
++) << 1) | IS_REG
;
467 static inline unsigned
468 nir_src_index(compiler_context
*ctx
, nir_src
*src
)
471 return (src
->ssa
->index
<< 1) | 0;
473 assert(!src
->reg
.indirect
);
474 return (src
->reg
.reg
->index
<< 1) | IS_REG
;
478 static inline unsigned
479 nir_alu_src_index(compiler_context
*ctx
, nir_alu_src
*src
)
481 return nir_src_index(ctx
, &src
->src
);
484 static inline unsigned
485 nir_dest_index(compiler_context
*ctx
, nir_dest
*dst
)
488 return (dst
->ssa
.index
<< 1) | 0;
490 assert(!dst
->reg
.indirect
);
491 return (dst
->reg
.reg
->index
<< 1) | IS_REG
;
497 /* MIR manipulation */
499 unsigned mir_get_swizzle(midgard_instruction
*ins
, unsigned idx
);
500 void mir_set_swizzle(midgard_instruction
*ins
, unsigned idx
, unsigned new);
501 void mir_rewrite_index(compiler_context
*ctx
, unsigned old
, unsigned new);
502 void mir_rewrite_index_src(compiler_context
*ctx
, unsigned old
, unsigned new);
503 void mir_rewrite_index_dst(compiler_context
*ctx
, unsigned old
, unsigned new);
504 void mir_rewrite_index_dst_single(midgard_instruction
*ins
, unsigned old
, unsigned new);
505 void mir_rewrite_index_src_single(midgard_instruction
*ins
, unsigned old
, unsigned new);
506 void mir_rewrite_index_src_swizzle(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned swizzle
);
507 bool mir_single_use(compiler_context
*ctx
, unsigned value
);
508 bool mir_special_index(compiler_context
*ctx
, unsigned idx
);
509 unsigned mir_use_count(compiler_context
*ctx
, unsigned value
);
510 bool mir_is_written_before(compiler_context
*ctx
, midgard_instruction
*ins
, unsigned node
);
511 unsigned mir_mask_of_read_components(midgard_instruction
*ins
, unsigned node
);
512 unsigned mir_ubo_shift(midgard_load_store_op op
);
516 void mir_print_instruction(midgard_instruction
*ins
);
517 void mir_print_bundle(midgard_bundle
*ctx
);
518 void mir_print_block(midgard_block
*block
);
519 void mir_print_shader(compiler_context
*ctx
);
520 bool mir_nontrivial_source2_mod(midgard_instruction
*ins
);
521 bool mir_nontrivial_source2_mod_simple(midgard_instruction
*ins
);
522 bool mir_nontrivial_mod(midgard_vector_alu_src src
, bool is_int
, unsigned mask
);
523 bool mir_nontrivial_outmod(midgard_instruction
*ins
);
525 void mir_insert_instruction_before_scheduled(compiler_context
*ctx
, midgard_block
*block
, midgard_instruction
*tag
, midgard_instruction ins
);
526 void mir_insert_instruction_after_scheduled(compiler_context
*ctx
, midgard_block
*block
, midgard_instruction
*tag
, midgard_instruction ins
);
527 void mir_flip(midgard_instruction
*ins
);
531 static const midgard_vector_alu_src blank_alu_src
= {
532 .swizzle
= SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
535 static const midgard_vector_alu_src blank_alu_src_xxxx
= {
536 .swizzle
= SWIZZLE(COMPONENT_X
, COMPONENT_X
, COMPONENT_X
, COMPONENT_X
),
539 static const midgard_scalar_alu_src blank_scalar_alu_src
= {
543 /* Used for encoding the unused source of 1-op instructions */
544 static const midgard_vector_alu_src zero_alu_src
= { 0 };
546 /* 'Intrinsic' move for aliasing */
548 static inline midgard_instruction
549 v_mov(unsigned src
, midgard_vector_alu_src mod
, unsigned dest
)
551 midgard_instruction ins
= {
554 .src
= { SSA_UNUSED
, src
, SSA_UNUSED
},
557 .op
= midgard_alu_op_imov
,
558 .reg_mode
= midgard_reg_mode_32
,
559 .dest_override
= midgard_dest_override_none
,
560 .outmod
= midgard_outmod_int_wrap
,
561 .src1
= vector_alu_srco_unsigned(zero_alu_src
),
562 .src2
= vector_alu_srco_unsigned(mod
)
570 mir_has_arg(midgard_instruction
*ins
, unsigned arg
)
575 for (unsigned i
= 0; i
< ARRAY_SIZE(ins
->src
); ++i
) {
576 if (ins
->src
[i
] == arg
)
585 void schedule_program(compiler_context
*ctx
);
587 /* Register allocation */
591 /* Broad types of register classes so we can handle special
594 #define NR_REG_CLASSES 6
596 #define REG_CLASS_WORK 0
597 #define REG_CLASS_LDST 1
598 #define REG_CLASS_LDST27 2
599 #define REG_CLASS_TEXR 3
600 #define REG_CLASS_TEXW 4
601 #define REG_CLASS_FRAGC 5
603 void mir_lower_special_reads(compiler_context
*ctx
);
604 struct ra_graph
* allocate_registers(compiler_context
*ctx
, bool *spilled
);
605 void install_registers(compiler_context
*ctx
, struct ra_graph
*g
);
606 bool mir_is_live_after(compiler_context
*ctx
, midgard_block
*block
, midgard_instruction
*start
, int src
);
607 bool mir_has_multiple_writes(compiler_context
*ctx
, int src
);
609 void mir_create_pipeline_registers(compiler_context
*ctx
);
612 midgard_promote_uniforms(compiler_context
*ctx
, unsigned promoted_count
);
614 midgard_instruction
*
616 compiler_context
*ctx
,
620 nir_src
*indirect_offset
,
624 emit_sysval_read(compiler_context
*ctx
, nir_instr
*instr
, signed dest_override
, unsigned nr_components
);
627 midgard_emit_derivatives(compiler_context
*ctx
, nir_alu_instr
*instr
);
630 midgard_lower_derivatives(compiler_context
*ctx
, midgard_block
*block
);
632 bool mir_op_computes_derivatives(unsigned op
);
636 void emit_binary_bundle(
637 compiler_context
*ctx
,
638 midgard_bundle
*bundle
,
639 struct util_dynarray
*emission
,
643 nir_undef_to_zero(nir_shader
*shader
);
647 bool midgard_opt_copy_prop(compiler_context
*ctx
, midgard_block
*block
);
648 bool midgard_opt_combine_projection(compiler_context
*ctx
, midgard_block
*block
);
649 bool midgard_opt_varying_projection(compiler_context
*ctx
, midgard_block
*block
);
650 bool midgard_opt_dead_code_eliminate(compiler_context
*ctx
, midgard_block
*block
);
651 bool midgard_opt_dead_move_eliminate(compiler_context
*ctx
, midgard_block
*block
);
653 void midgard_lower_invert(compiler_context
*ctx
, midgard_block
*block
);
654 bool midgard_opt_not_propagate(compiler_context
*ctx
, midgard_block
*block
);
655 bool midgard_opt_fuse_src_invert(compiler_context
*ctx
, midgard_block
*block
);
656 bool midgard_opt_fuse_dest_invert(compiler_context
*ctx
, midgard_block
*block
);
657 bool midgard_opt_promote_fmov(compiler_context
*ctx
, midgard_block
*block
);