2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #ifndef _MDG_COMPILER_H
25 #define _MDG_COMPILER_H
29 #include "midgard_compile.h"
31 #include "util/hash_table.h"
32 #include "util/u_dynarray.h"
34 #include "util/list.h"
36 #include "main/mtypes.h"
37 #include "compiler/nir_types.h"
38 #include "compiler/nir/nir.h"
43 /* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
44 * the hardware), hence why that must be zero. TARGET_DISCARD signals this
45 * instruction is actually a discard op. */
48 #define TARGET_BREAK 1
49 #define TARGET_CONTINUE 2
50 #define TARGET_DISCARD 3
52 typedef struct midgard_branch
{
53 /* If conditional, the condition is specified in r31.w */
56 /* For conditionals, if this is true, we branch on FALSE. If false, we branch on TRUE. */
57 bool invert_conditional
;
59 /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */
62 /* The actual target */
70 /* Instruction arguments represented as block-local SSA indices, rather than
71 * registers. Negative values mean unused. */
78 /* src1 is -not- SSA but instead a 16-bit inline constant to be smudged
79 * in. Only valid for ALU ops. */
83 /* Generic in-memory data type repesenting a single logical instruction, rather
84 * than a single instruction group. This is the preferred form for code gen.
85 * Multiple midgard_insturctions will later be combined during scheduling,
86 * though this is not represented in this structure. Its format bridges
87 * the low-level binary representation with the higher level semantic meaning.
89 * Notably, it allows registers to be specified as block local SSA, for code
90 * emitted before the register allocation pass.
93 typedef struct midgard_instruction
{
94 /* Must be first for casting */
95 struct list_head link
;
97 unsigned type
; /* ALU, load/store, texture */
99 /* If the register allocator has not run yet... */
102 /* Special fields for an ALU instruction */
103 midgard_reg_info registers
;
105 /* I.e. (1 << alu_bit) */
108 /* When emitting bundle, should this instruction have a break forced
109 * before it? Used for r31 writes which are valid only within a single
110 * bundle and *need* to happen as early as possible... this is a hack,
111 * TODO remove when we have a scheduler */
116 uint16_t inline_constant
;
117 bool has_blend_constant
;
121 bool prepacked_branch
;
123 /* Masks in a saneish format. One bit per channel, not packed fancy.
124 * Use this instead of the op specific ones, and switch over at emit
129 /* For ALU ops only: set to true to invert (bitwise NOT) the
130 * destination of an integer-out op. Not imeplemented in hardware but
131 * allows more optimizations */
136 midgard_load_store_word load_store
;
137 midgard_vector_alu alu
;
138 midgard_texture_word texture
;
139 midgard_branch_extended branch_extended
;
142 /* General branch, rather than packed br_compact. Higher level
143 * than the other components */
144 midgard_branch branch
;
146 } midgard_instruction
;
148 typedef struct midgard_block
{
149 /* Link to next block. Must be first for mir_get_block */
150 struct list_head link
;
152 /* List of midgard_instructions emitted for the current block */
153 struct list_head instructions
;
157 /* List of midgard_bundles emitted (after the scheduler has run) */
158 struct util_dynarray bundles
;
160 /* Number of quadwords _actually_ emitted, as determined after scheduling */
161 unsigned quadword_count
;
163 /* Successors: always one forward (the block after us), maybe
164 * one backwards (for a backward branch). No need for a second
165 * forward, since graph traversal would get there eventually
167 struct midgard_block
*successors
[2];
168 unsigned nr_successors
;
170 /* The successors pointer form a graph, and in the case of
171 * complex control flow, this graph has a cycles. To aid
172 * traversal during liveness analysis, we have a visited?
173 * boolean for passes to use as they see fit, provided they
178 typedef struct midgard_bundle
{
179 /* Tag for the overall bundle */
182 /* Instructions contained by the bundle */
183 int instruction_count
;
184 midgard_instruction
*instructions
[5];
186 /* Bundle-wide ALU configuration */
189 bool has_embedded_constants
;
191 bool has_blend_constant
;
194 typedef struct compiler_context
{
196 gl_shader_stage stage
;
198 /* The screen we correspond to */
199 struct midgard_screen
*screen
;
201 /* Is internally a blend shader? Depends on stage == FRAGMENT */
204 /* Tracking for blend constant patching */
205 int blend_constant_offset
;
207 /* Number of bytes used for Thread Local Storage */
210 /* Count of spills and fills for shaderdb */
214 /* Current NIR function */
217 /* Allocated compiler temporary counter */
220 /* Unordered list of midgard_blocks */
222 struct list_head blocks
;
224 midgard_block
*initial_block
;
225 midgard_block
*previous_source_block
;
226 midgard_block
*final_block
;
228 /* List of midgard_instructions emitted for the current block */
229 midgard_block
*current_block
;
231 /* The current "depth" of the loop, for disambiguating breaks/continues
232 * when using nested loops */
233 int current_loop_depth
;
235 /* Total number of loops for shader-db */
238 /* Constants which have been loaded, for later inlining */
239 struct hash_table_u64
*ssa_constants
;
241 /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */
242 struct hash_table_u64
*hash_to_temp
;
246 /* Just the count of the max register used. Higher count => higher
247 * register pressure */
250 /* Used for cont/last hinting. Increase when a tex op is added.
251 * Decrease when a tex op is removed. */
252 int texture_op_count
;
254 /* Mapping of texture register -> SSA index for unaliasing */
255 int texture_index
[2];
257 /* The number of uniforms allowable for the fast path */
260 /* Count of instructions emitted from NIR overall, across all blocks */
261 int instruction_count
;
263 /* Alpha ref value passed in */
266 /* The index corresponding to the fragment output */
267 unsigned fragment_output
;
269 /* The mapping of sysvals to uniforms, the count, and the off-by-one inverse */
270 unsigned sysvals
[MAX_SYSVAL_COUNT
];
271 unsigned sysval_count
;
272 struct hash_table_u64
*sysval_to_id
;
275 /* Helpers for manipulating the above structures (forming the driver IR) */
277 /* Append instruction to end of current block */
279 static inline midgard_instruction
*
280 mir_upload_ins(struct midgard_instruction ins
)
282 midgard_instruction
*heap
= malloc(sizeof(ins
));
283 memcpy(heap
, &ins
, sizeof(ins
));
288 emit_mir_instruction(struct compiler_context
*ctx
, struct midgard_instruction ins
)
290 list_addtail(&(mir_upload_ins(ins
))->link
, &ctx
->current_block
->instructions
);
293 static inline struct midgard_instruction
*
294 mir_insert_instruction_before(struct midgard_instruction
*tag
, struct midgard_instruction ins
)
296 struct midgard_instruction
*u
= mir_upload_ins(ins
);
297 list_addtail(&u
->link
, &tag
->link
);
302 mir_remove_instruction(struct midgard_instruction
*ins
)
304 list_del(&ins
->link
);
307 static inline midgard_instruction
*
308 mir_prev_op(struct midgard_instruction
*ins
)
310 return list_last_entry(&(ins
->link
), midgard_instruction
, link
);
313 static inline midgard_instruction
*
314 mir_next_op(struct midgard_instruction
*ins
)
316 return list_first_entry(&(ins
->link
), midgard_instruction
, link
);
319 #define mir_foreach_block(ctx, v) \
320 list_for_each_entry(struct midgard_block, v, &ctx->blocks, link)
322 #define mir_foreach_block_from(ctx, from, v) \
323 list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
325 #define mir_foreach_instr(ctx, v) \
326 list_for_each_entry(struct midgard_instruction, v, &ctx->current_block->instructions, link)
328 #define mir_foreach_instr_safe(ctx, v) \
329 list_for_each_entry_safe(struct midgard_instruction, v, &ctx->current_block->instructions, link)
331 #define mir_foreach_instr_in_block(block, v) \
332 list_for_each_entry(struct midgard_instruction, v, &block->instructions, link)
334 #define mir_foreach_instr_in_block_safe(block, v) \
335 list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
337 #define mir_foreach_instr_in_block_safe_rev(block, v) \
338 list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link)
340 #define mir_foreach_instr_in_block_from(block, v, from) \
341 list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link)
343 #define mir_foreach_instr_in_block_from_rev(block, v, from) \
344 list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link)
346 #define mir_foreach_bundle_in_block(block, v) \
347 util_dynarray_foreach(&block->bundles, midgard_bundle, v)
349 #define mir_foreach_instr_global(ctx, v) \
350 mir_foreach_block(ctx, v_block) \
351 mir_foreach_instr_in_block(v_block, v)
353 #define mir_foreach_instr_global_safe(ctx, v) \
354 mir_foreach_block(ctx, v_block) \
355 mir_foreach_instr_in_block_safe(v_block, v)
357 static inline midgard_instruction
*
358 mir_last_in_block(struct midgard_block
*block
)
360 return list_last_entry(&block
->instructions
, struct midgard_instruction
, link
);
363 static inline midgard_block
*
364 mir_get_block(compiler_context
*ctx
, int idx
)
366 struct list_head
*lst
= &ctx
->blocks
;
371 return (struct midgard_block
*) lst
;
375 mir_is_alu_bundle(midgard_bundle
*bundle
)
377 return IS_ALU(bundle
->tag
);
380 /* Registers/SSA are distinguish in the backend by the bottom-most bit */
384 static inline unsigned
385 make_compiler_temp(compiler_context
*ctx
)
387 return (ctx
->func
->impl
->ssa_alloc
+ ctx
->temp_alloc
++) << 1;
390 static inline unsigned
391 make_compiler_temp_reg(compiler_context
*ctx
)
393 return ((ctx
->func
->impl
->reg_alloc
+ ctx
->temp_alloc
++) << 1) | IS_REG
;
396 static inline unsigned
397 nir_src_index(compiler_context
*ctx
, nir_src
*src
)
400 return (src
->ssa
->index
<< 1) | 0;
402 assert(!src
->reg
.indirect
);
403 return (src
->reg
.reg
->index
<< 1) | IS_REG
;
407 static inline unsigned
408 nir_alu_src_index(compiler_context
*ctx
, nir_alu_src
*src
)
410 return nir_src_index(ctx
, &src
->src
);
413 static inline unsigned
414 nir_dest_index(compiler_context
*ctx
, nir_dest
*dst
)
417 return (dst
->ssa
.index
<< 1) | 0;
419 assert(!dst
->reg
.indirect
);
420 return (dst
->reg
.reg
->index
<< 1) | IS_REG
;
426 /* MIR manipulation */
428 void mir_rewrite_index(compiler_context
*ctx
, unsigned old
, unsigned new);
429 void mir_rewrite_index_src(compiler_context
*ctx
, unsigned old
, unsigned new);
430 void mir_rewrite_index_dst(compiler_context
*ctx
, unsigned old
, unsigned new);
431 void mir_rewrite_index_dst_tag(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned tag
);
432 void mir_rewrite_index_src_single(midgard_instruction
*ins
, unsigned old
, unsigned new);
433 void mir_rewrite_index_src_tag(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned tag
);
434 bool mir_single_use(compiler_context
*ctx
, unsigned value
);
435 bool mir_special_index(compiler_context
*ctx
, unsigned idx
);
436 unsigned mir_use_count(compiler_context
*ctx
, unsigned value
);
437 bool mir_is_written_before(compiler_context
*ctx
, midgard_instruction
*ins
, unsigned node
);
438 unsigned mir_mask_of_read_components(midgard_instruction
*ins
, unsigned node
);
442 void mir_print_instruction(midgard_instruction
*ins
);
443 void mir_print_bundle(midgard_bundle
*ctx
);
444 void mir_print_block(midgard_block
*block
);
445 void mir_print_shader(compiler_context
*ctx
);
446 bool mir_nontrivial_raw_mod(midgard_vector_alu_src src
, bool is_int
);
447 bool mir_nontrivial_source2_mod(midgard_instruction
*ins
);
448 bool mir_nontrivial_mod(midgard_vector_alu_src src
, bool is_int
, unsigned mask
);
449 bool mir_nontrivial_outmod(midgard_instruction
*ins
);
453 static const midgard_vector_alu_src blank_alu_src
= {
454 .swizzle
= SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
457 static const midgard_vector_alu_src blank_alu_src_xxxx
= {
458 .swizzle
= SWIZZLE(COMPONENT_X
, COMPONENT_X
, COMPONENT_X
, COMPONENT_X
),
461 static const midgard_scalar_alu_src blank_scalar_alu_src
= {
465 /* Used for encoding the unused source of 1-op instructions */
466 static const midgard_vector_alu_src zero_alu_src
= { 0 };
468 /* 'Intrinsic' move for aliasing */
470 static inline midgard_instruction
471 v_mov(unsigned src
, midgard_vector_alu_src mod
, unsigned dest
)
473 midgard_instruction ins
= {
477 .src0
= SSA_UNUSED_1
,
482 .op
= midgard_alu_op_imov
,
483 .reg_mode
= midgard_reg_mode_32
,
484 .dest_override
= midgard_dest_override_none
,
485 .outmod
= midgard_outmod_int_wrap
,
486 .src1
= vector_alu_srco_unsigned(zero_alu_src
),
487 .src2
= vector_alu_srco_unsigned(mod
)
495 mir_has_arg(midgard_instruction
*ins
, unsigned arg
)
497 if (ins
->ssa_args
.src0
== arg
)
500 if (ins
->ssa_args
.src1
== arg
&& !ins
->ssa_args
.inline_constant
)
508 void schedule_program(compiler_context
*ctx
);
510 /* Register allocation */
514 /* Broad types of register classes so we can handle special
517 #define NR_REG_CLASSES 5
519 #define REG_CLASS_WORK 0
520 #define REG_CLASS_LDST 1
521 #define REG_CLASS_LDST27 2
522 #define REG_CLASS_TEXR 3
523 #define REG_CLASS_TEXW 4
525 void mir_lower_special_reads(compiler_context
*ctx
);
526 struct ra_graph
* allocate_registers(compiler_context
*ctx
, bool *spilled
);
527 void install_registers(compiler_context
*ctx
, struct ra_graph
*g
);
528 bool mir_is_live_after(compiler_context
*ctx
, midgard_block
*block
, midgard_instruction
*start
, int src
);
529 bool mir_has_multiple_writes(compiler_context
*ctx
, int src
);
531 void mir_create_pipeline_registers(compiler_context
*ctx
);
534 midgard_promote_uniforms(compiler_context
*ctx
, unsigned pressure
);
538 compiler_context
*ctx
,
541 nir_src
*indirect_offset
,
547 void emit_binary_bundle(
548 compiler_context
*ctx
,
549 midgard_bundle
*bundle
,
550 struct util_dynarray
*emission
,
553 /* NIR stuff. TODO: Move? Share? Something? */
556 nir_undef_to_zero(nir_shader
*shader
);
559 nir_clamp_psiz(nir_shader
*shader
, float min_size
, float max_size
);
563 bool midgard_opt_copy_prop(compiler_context
*ctx
, midgard_block
*block
);
564 bool midgard_opt_combine_projection(compiler_context
*ctx
, midgard_block
*block
);
565 bool midgard_opt_varying_projection(compiler_context
*ctx
, midgard_block
*block
);
566 bool midgard_opt_dead_code_eliminate(compiler_context
*ctx
, midgard_block
*block
);
567 bool midgard_opt_dead_move_eliminate(compiler_context
*ctx
, midgard_block
*block
);
568 void midgard_opt_post_move_eliminate(compiler_context
*ctx
, midgard_block
*block
, struct ra_graph
*g
);
570 void midgard_lower_invert(compiler_context
*ctx
, midgard_block
*block
);