2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #ifndef _MDG_COMPILER_H
25 #define _MDG_COMPILER_H
29 #include "midgard_compile.h"
30 #include "midgard_ops.h"
32 #include "util/hash_table.h"
33 #include "util/u_dynarray.h"
35 #include "util/list.h"
37 #include "main/mtypes.h"
38 #include "compiler/nir_types.h"
39 #include "compiler/nir/nir.h"
40 #include "panfrost/util/pan_ir.h"
41 #include "panfrost/util/lcra.h"
46 /* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
47 * the hardware), hence why that must be zero. TARGET_DISCARD signals this
48 * instruction is actually a discard op. */
51 #define TARGET_BREAK 1
52 #define TARGET_CONTINUE 2
53 #define TARGET_DISCARD 3
54 #define TARGET_TILEBUF_WAIT 4
56 typedef struct midgard_branch
{
57 /* If conditional, the condition is specified in r31.w */
60 /* For conditionals, if this is true, we branch on FALSE. If false, we branch on TRUE. */
61 bool invert_conditional
;
63 /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */
66 /* The actual target */
74 #define PAN_WRITEOUT_C 1
75 #define PAN_WRITEOUT_Z 2
76 #define PAN_WRITEOUT_S 4
78 /* Generic in-memory data type repesenting a single logical instruction, rather
79 * than a single instruction group. This is the preferred form for code gen.
80 * Multiple midgard_insturctions will later be combined during scheduling,
81 * though this is not represented in this structure. Its format bridges
82 * the low-level binary representation with the higher level semantic meaning.
84 * Notably, it allows registers to be specified as block local SSA, for code
85 * emitted before the register allocation pass.
88 #define MIR_SRC_COUNT 4
89 #define MIR_VEC_COMPONENTS 16
91 typedef struct midgard_instruction
{
92 /* Must be first for casting */
93 struct list_head link
;
95 unsigned type
; /* ALU, load/store, texture */
97 /* Instruction arguments represented as block-local SSA
98 * indices, rather than registers. ~0 means unused. */
99 unsigned src
[MIR_SRC_COUNT
];
102 /* vec16 swizzle, unpacked, per source */
103 unsigned swizzle
[MIR_SRC_COUNT
][MIR_VEC_COMPONENTS
];
106 nir_alu_type src_types
[MIR_SRC_COUNT
];
107 nir_alu_type dest_type
;
109 /* Packing ops have non-32-bit dest types even though they functionally
110 * work at the 32-bit level, use this as a signal to disable copyprop.
111 * We maybe need synthetic pack ops instead. */
114 /* Modifiers, depending on type */
117 bool src_abs
[MIR_SRC_COUNT
];
118 bool src_neg
[MIR_SRC_COUNT
];
122 bool src_shift
[MIR_SRC_COUNT
];
126 /* Out of the union for csel (could maybe be fixed..) */
127 bool src_invert
[MIR_SRC_COUNT
];
129 /* If the op supports it */
130 enum midgard_roundmode roundmode
;
132 /* For textures: should helpers execute this instruction (instead of
133 * just helping with derivatives)? Should helpers terminate after? */
134 bool helper_terminate
;
137 /* I.e. (1 << alu_bit) */
141 midgard_constants constants
;
142 uint16_t inline_constant
;
143 bool has_blend_constant
;
144 bool has_inline_constant
;
150 /* Masks in a saneish format. One bit per channel, not packed fancy.
151 * Use this instead of the op specific ones, and switch over at emit
156 /* Hint for the register allocator not to spill the destination written
157 * from this instruction (because it is a spill/unspill node itself).
158 * Bitmask of spilled classes */
162 /* Generic hint for intra-pass use */
165 /* During scheduling, the backwards dependency graph
166 * (DAG). nr_dependencies is the number of unscheduled
167 * instructions that must still be scheduled after
168 * (before) this instruction. dependents are which
169 * instructions need to be scheduled before (after) this
172 unsigned nr_dependencies
;
173 BITSET_WORD
*dependents
;
175 /* Use this in conjunction with `type` */
178 /* This refers to midgard_outmod_float or midgard_outmod_int.
179 * In case of a ALU op, use midgard_is_integer_out_op() to know which
181 * If it's a texture op, it's always midgard_outmod_float. */
185 midgard_load_store_word load_store
;
186 midgard_texture_word texture
;
188 midgard_branch branch
;
190 } midgard_instruction
;
192 typedef struct midgard_block
{
197 /* List of midgard_bundles emitted (after the scheduler has run) */
198 struct util_dynarray bundles
;
200 /* Number of quadwords _actually_ emitted, as determined after scheduling */
201 unsigned quadword_count
;
203 /* Indicates this is a fixed-function fragment epilogue block */
206 /* Are helper invocations required by this block? */
210 typedef struct midgard_bundle
{
211 /* Tag for the overall bundle */
214 /* Instructions contained by the bundle. instruction_count <= 6 (vmul,
215 * sadd, vadd, smul, vlut, branch) */
216 int instruction_count
;
217 midgard_instruction
*instructions
[6];
219 /* Bundle-wide ALU configuration */
222 bool has_embedded_constants
;
223 midgard_constants constants
;
224 bool has_blend_constant
;
229 MIDGARD_COLOR_RT0
= 0,
241 typedef struct compiler_context
{
243 gl_shader_stage stage
;
245 /* Is internally a blend shader? Depends on stage == FRAGMENT */
248 /* Render target number for a keyed blend shader. Depends on is_blend */
251 /* Index to precolour to r0 for an input blend colour */
252 unsigned blend_input
;
254 /* Index to precolour to r2 for a dual-source blend colour */
257 /* Tracking for blend constant patching */
258 int blend_constant_offset
;
260 /* Number of bytes used for Thread Local Storage */
263 /* Count of spills and fills for shaderdb */
267 /* Current NIR function */
270 /* Allocated compiler temporary counter */
273 /* Unordered list of midgard_blocks */
275 struct list_head blocks
;
277 /* TODO merge with block_count? */
278 unsigned block_source_count
;
280 /* List of midgard_instructions emitted for the current block */
281 midgard_block
*current_block
;
283 /* If there is a preset after block, use this, otherwise emit_block will create one if NULL */
284 midgard_block
*after_block
;
286 /* The current "depth" of the loop, for disambiguating breaks/continues
287 * when using nested loops */
288 int current_loop_depth
;
290 /* Total number of loops for shader-db */
293 /* Constants which have been loaded, for later inlining */
294 struct hash_table_u64
*ssa_constants
;
296 /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */
297 struct hash_table_u64
*hash_to_temp
;
301 /* Set of NIR indices that were already emitted as outmods */
302 BITSET_WORD
*already_emitted
;
304 /* Just the count of the max register used. Higher count => higher
305 * register pressure */
308 /* The number of uniforms allowable for the fast path */
311 /* Count of instructions emitted from NIR overall, across all blocks */
312 int instruction_count
;
314 unsigned quadword_count
;
316 /* Bitmask of valid metadata */
319 /* Model-specific quirk set */
322 /* Writeout instructions for each render target */
323 midgard_instruction
*writeout_branch
[MIDGARD_NUM_RTS
];
325 struct panfrost_sysvals sysvals
;
328 /* Per-block live_in/live_out */
329 #define MIDGARD_METADATA_LIVENESS (1 << 0)
331 /* Helpers for manipulating the above structures (forming the driver IR) */
333 /* Append instruction to end of current block */
335 static inline midgard_instruction
*
336 mir_upload_ins(struct compiler_context
*ctx
, struct midgard_instruction ins
)
338 midgard_instruction
*heap
= ralloc(ctx
, struct midgard_instruction
);
339 memcpy(heap
, &ins
, sizeof(ins
));
343 static inline midgard_instruction
*
344 emit_mir_instruction(struct compiler_context
*ctx
, struct midgard_instruction ins
)
346 midgard_instruction
*u
= mir_upload_ins(ctx
, ins
);
347 list_addtail(&u
->link
, &ctx
->current_block
->base
.instructions
);
351 static inline struct midgard_instruction
*
352 mir_insert_instruction_before(struct compiler_context
*ctx
,
353 struct midgard_instruction
*tag
,
354 struct midgard_instruction ins
)
356 struct midgard_instruction
*u
= mir_upload_ins(ctx
, ins
);
357 list_addtail(&u
->link
, &tag
->link
);
362 mir_remove_instruction(struct midgard_instruction
*ins
)
364 list_del(&ins
->link
);
367 static inline midgard_instruction
*
368 mir_prev_op(struct midgard_instruction
*ins
)
370 return list_last_entry(&(ins
->link
), midgard_instruction
, link
);
373 static inline midgard_instruction
*
374 mir_next_op(struct midgard_instruction
*ins
)
376 return list_first_entry(&(ins
->link
), midgard_instruction
, link
);
379 #define mir_foreach_block(ctx, v) \
380 list_for_each_entry(pan_block, v, &ctx->blocks, link)
382 #define mir_foreach_block_from(ctx, from, v) \
383 list_for_each_entry_from(pan_block, v, &from->base, &ctx->blocks, link)
385 #define mir_foreach_instr_in_block(block, v) \
386 list_for_each_entry(struct midgard_instruction, v, &block->base.instructions, link)
387 #define mir_foreach_instr_in_block_rev(block, v) \
388 list_for_each_entry_rev(struct midgard_instruction, v, &block->base.instructions, link)
390 #define mir_foreach_instr_in_block_safe(block, v) \
391 list_for_each_entry_safe(struct midgard_instruction, v, &block->base.instructions, link)
393 #define mir_foreach_instr_in_block_safe_rev(block, v) \
394 list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->base.instructions, link)
396 #define mir_foreach_instr_in_block_from(block, v, from) \
397 list_for_each_entry_from(struct midgard_instruction, v, from, &block->base.instructions, link)
399 #define mir_foreach_instr_in_block_from_rev(block, v, from) \
400 list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->base.instructions, link)
402 #define mir_foreach_bundle_in_block(block, v) \
403 util_dynarray_foreach(&block->bundles, midgard_bundle, v)
405 #define mir_foreach_bundle_in_block_rev(block, v) \
406 util_dynarray_foreach_reverse(&block->bundles, midgard_bundle, v)
408 #define mir_foreach_instr_in_block_scheduled_rev(block, v) \
409 midgard_instruction* v; \
411 mir_foreach_bundle_in_block_rev(block, _bundle) \
412 for (i = (_bundle->instruction_count - 1), v = _bundle->instructions[i]; \
414 --i, v = (i >= 0) ? _bundle->instructions[i] : NULL) \
416 #define mir_foreach_instr_global(ctx, v) \
417 mir_foreach_block(ctx, v_block) \
418 mir_foreach_instr_in_block(((midgard_block *) v_block), v)
420 #define mir_foreach_instr_global_safe(ctx, v) \
421 mir_foreach_block(ctx, v_block) \
422 mir_foreach_instr_in_block_safe(((midgard_block *) v_block), v)
424 /* Based on set_foreach, expanded with automatic type casts */
426 #define mir_foreach_predecessor(blk, v) \
427 struct set_entry *_entry_##v; \
428 struct midgard_block *v; \
429 for (_entry_##v = _mesa_set_next_entry(blk->base.predecessors, NULL), \
430 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL); \
431 _entry_##v != NULL; \
432 _entry_##v = _mesa_set_next_entry(blk->base.predecessors, _entry_##v), \
433 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
435 #define mir_foreach_src(ins, v) \
436 for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
438 static inline midgard_instruction
*
439 mir_last_in_block(struct midgard_block
*block
)
441 return list_last_entry(&block
->base
.instructions
, struct midgard_instruction
, link
);
444 static inline midgard_block
*
445 mir_get_block(compiler_context
*ctx
, int idx
)
447 struct list_head
*lst
= &ctx
->blocks
;
452 return (struct midgard_block
*) lst
;
456 mir_is_alu_bundle(midgard_bundle
*bundle
)
458 return IS_ALU(bundle
->tag
);
461 static inline unsigned
462 make_compiler_temp(compiler_context
*ctx
)
464 return (ctx
->func
->impl
->ssa_alloc
+ ctx
->temp_alloc
++) << 1;
467 static inline unsigned
468 make_compiler_temp_reg(compiler_context
*ctx
)
470 return ((ctx
->func
->impl
->reg_alloc
+ ctx
->temp_alloc
++) << 1) | PAN_IS_REG
;
473 static inline unsigned
474 nir_ssa_index(nir_ssa_def
*ssa
)
476 return (ssa
->index
<< 1) | 0;
479 static inline unsigned
480 nir_src_index(compiler_context
*ctx
, nir_src
*src
)
483 return nir_ssa_index(src
->ssa
);
485 assert(!src
->reg
.indirect
);
486 return (src
->reg
.reg
->index
<< 1) | PAN_IS_REG
;
490 static inline unsigned
491 nir_dest_index(nir_dest
*dst
)
494 return (dst
->ssa
.index
<< 1) | 0;
496 assert(!dst
->reg
.indirect
);
497 return (dst
->reg
.reg
->index
<< 1) | PAN_IS_REG
;
503 /* MIR manipulation */
505 void mir_rewrite_index(compiler_context
*ctx
, unsigned old
, unsigned new);
506 void mir_rewrite_index_src(compiler_context
*ctx
, unsigned old
, unsigned new);
507 void mir_rewrite_index_dst(compiler_context
*ctx
, unsigned old
, unsigned new);
508 void mir_rewrite_index_dst_single(midgard_instruction
*ins
, unsigned old
, unsigned new);
509 void mir_rewrite_index_src_single(midgard_instruction
*ins
, unsigned old
, unsigned new);
510 void mir_rewrite_index_src_swizzle(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned *swizzle
);
511 bool mir_single_use(compiler_context
*ctx
, unsigned value
);
512 unsigned mir_use_count(compiler_context
*ctx
, unsigned value
);
513 uint16_t mir_bytemask_of_read_components(midgard_instruction
*ins
, unsigned node
);
514 uint16_t mir_bytemask_of_read_components_index(midgard_instruction
*ins
, unsigned i
);
515 uint16_t mir_from_bytemask(uint16_t bytemask
, unsigned bits
);
516 uint16_t mir_bytemask(midgard_instruction
*ins
);
517 uint16_t mir_round_bytemask_up(uint16_t mask
, unsigned bits
);
518 void mir_set_bytemask(midgard_instruction
*ins
, uint16_t bytemask
);
519 signed mir_upper_override(midgard_instruction
*ins
, unsigned inst_size
);
520 unsigned mir_components_for_type(nir_alu_type T
);
521 unsigned max_bitsize_for_alu(midgard_instruction
*ins
);
522 midgard_reg_mode
reg_mode_for_bitsize(unsigned bitsize
);
526 void mir_print_instruction(midgard_instruction
*ins
);
527 void mir_print_bundle(midgard_bundle
*ctx
);
528 void mir_print_block(midgard_block
*block
);
529 void mir_print_shader(compiler_context
*ctx
);
530 bool mir_nontrivial_mod(midgard_instruction
*ins
, unsigned i
, bool check_swizzle
);
531 bool mir_nontrivial_outmod(midgard_instruction
*ins
);
533 void mir_insert_instruction_before_scheduled(compiler_context
*ctx
, midgard_block
*block
, midgard_instruction
*tag
, midgard_instruction ins
);
534 void mir_insert_instruction_after_scheduled(compiler_context
*ctx
, midgard_block
*block
, midgard_instruction
*tag
, midgard_instruction ins
);
535 void mir_flip(midgard_instruction
*ins
);
536 void mir_compute_temp_count(compiler_context
*ctx
);
538 void mir_set_offset(compiler_context
*ctx
, midgard_instruction
*ins
, nir_src
*offset
, bool is_shared
);
540 /* 'Intrinsic' move for aliasing */
542 static inline midgard_instruction
543 v_mov(unsigned src
, unsigned dest
)
545 midgard_instruction ins
= {
548 .src
= { ~0, src
, ~0, ~0 },
549 .src_types
= { 0, nir_type_uint32
},
550 .swizzle
= SWIZZLE_IDENTITY
,
552 .dest_type
= nir_type_uint32
,
553 .op
= midgard_alu_op_imov
,
554 .outmod
= midgard_outmod_int_wrap
560 /* Broad types of register classes so we can handle special
563 #define REG_CLASS_WORK 0
564 #define REG_CLASS_LDST 1
565 #define REG_CLASS_TEXR 3
566 #define REG_CLASS_TEXW 4
568 /* Like a move, but to thread local storage! */
570 static inline midgard_instruction
571 v_load_store_scratch(
577 /* We index by 32-bit vec4s */
578 unsigned byte
= (index
* 4 * 4);
580 midgard_instruction ins
= {
581 .type
= TAG_LOAD_STORE_4
,
583 .dest_type
= nir_type_uint32
,
585 .src
= { ~0, ~0, ~0, ~0 },
586 .swizzle
= SWIZZLE_IDENTITY_4
,
587 .op
= is_store
? midgard_op_st_int4
: midgard_op_ld_int4
,
589 /* For register spilling - to thread local storage */
594 /* If we spill an unspill, RA goes into an infinite loop */
595 .no_spill
= (1 << REG_CLASS_WORK
)
598 ins
.constants
.u32
[0] = byte
;
601 ins
.src
[0] = srcdest
;
602 ins
.src_types
[0] = nir_type_uint32
;
604 /* Ensure we are tightly swizzled so liveness analysis is
607 for (unsigned i
= 0; i
< 4; ++i
) {
608 if (!(mask
& (1 << i
)))
609 ins
.swizzle
[0][i
] = COMPONENT_X
;
618 mir_has_arg(midgard_instruction
*ins
, unsigned arg
)
623 mir_foreach_src(ins
, i
) {
624 if (ins
->src
[i
] == arg
)
633 void midgard_schedule_program(compiler_context
*ctx
);
635 void mir_ra(compiler_context
*ctx
);
636 void mir_squeeze_index(compiler_context
*ctx
);
637 void mir_lower_special_reads(compiler_context
*ctx
);
638 void mir_liveness_ins_update(uint16_t *live
, midgard_instruction
*ins
, unsigned max
);
639 void mir_compute_liveness(compiler_context
*ctx
);
640 void mir_invalidate_liveness(compiler_context
*ctx
);
641 bool mir_is_live_after(compiler_context
*ctx
, midgard_block
*block
, midgard_instruction
*start
, int src
);
643 void mir_create_pipeline_registers(compiler_context
*ctx
);
644 void midgard_promote_uniforms(compiler_context
*ctx
);
647 midgard_emit_derivatives(compiler_context
*ctx
, nir_alu_instr
*instr
);
650 midgard_lower_derivatives(compiler_context
*ctx
, midgard_block
*block
);
652 bool mir_op_computes_derivatives(gl_shader_stage stage
, unsigned op
);
654 void mir_analyze_helper_terminate(compiler_context
*ctx
);
655 void mir_analyze_helper_requirements(compiler_context
*ctx
);
659 void emit_binary_bundle(
660 compiler_context
*ctx
,
661 midgard_block
*block
,
662 midgard_bundle
*bundle
,
663 struct util_dynarray
*emission
,
667 nir_undef_to_zero(nir_shader
*shader
);
668 bool nir_fuse_io_16(nir_shader
*shader
);
670 void midgard_nir_lod_errata(nir_shader
*shader
);
672 unsigned midgard_get_first_tag_from_block(compiler_context
*ctx
, unsigned block_idx
);
676 bool midgard_opt_copy_prop(compiler_context
*ctx
, midgard_block
*block
);
677 bool midgard_opt_combine_projection(compiler_context
*ctx
, midgard_block
*block
);
678 bool midgard_opt_varying_projection(compiler_context
*ctx
, midgard_block
*block
);
679 bool midgard_opt_dead_code_eliminate(compiler_context
*ctx
);
680 bool midgard_opt_dead_move_eliminate(compiler_context
*ctx
, midgard_block
*block
);