pan/midgard: Sync midgard_block field names with Bifrost
[mesa.git] / src / panfrost / midgard / compiler.h
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #ifndef _MDG_COMPILER_H
25 #define _MDG_COMPILER_H
26
27 #include "midgard.h"
28 #include "helpers.h"
29 #include "midgard_compile.h"
30 #include "midgard_ops.h"
31 #include "lcra.h"
32
33 #include "util/hash_table.h"
34 #include "util/u_dynarray.h"
35 #include "util/set.h"
36 #include "util/list.h"
37
38 #include "main/mtypes.h"
39 #include "compiler/nir_types.h"
40 #include "compiler/nir/nir.h"
41 #include "panfrost/util/pan_ir.h"
42
43 /* Forward declare */
44 struct midgard_block;
45
46 /* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
47 * the hardware), hence why that must be zero. TARGET_DISCARD signals this
48 * instruction is actually a discard op. */
49
50 #define TARGET_GOTO 0
51 #define TARGET_BREAK 1
52 #define TARGET_CONTINUE 2
53 #define TARGET_DISCARD 3
54
55 typedef struct midgard_branch {
56 /* If conditional, the condition is specified in r31.w */
57 bool conditional;
58
59 /* For conditionals, if this is true, we branch on FALSE. If false, we branch on TRUE. */
60 bool invert_conditional;
61
62 /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */
63 unsigned target_type;
64
65 /* The actual target */
66 union {
67 int target_block;
68 int target_break;
69 int target_continue;
70 };
71 } midgard_branch;
72
73 /* Generic in-memory data type repesenting a single logical instruction, rather
74 * than a single instruction group. This is the preferred form for code gen.
75 * Multiple midgard_insturctions will later be combined during scheduling,
76 * though this is not represented in this structure. Its format bridges
77 * the low-level binary representation with the higher level semantic meaning.
78 *
79 * Notably, it allows registers to be specified as block local SSA, for code
80 * emitted before the register allocation pass.
81 */
82
83 #define MIR_SRC_COUNT 4
84 #define MIR_VEC_COMPONENTS 16
85
86 typedef struct midgard_instruction {
87 /* Must be first for casting */
88 struct list_head link;
89
90 unsigned type; /* ALU, load/store, texture */
91
92 /* Instruction arguments represented as block-local SSA
93 * indices, rather than registers. ~0 means unused. */
94 unsigned src[MIR_SRC_COUNT];
95 unsigned dest;
96
97 /* vec16 swizzle, unpacked, per source */
98 unsigned swizzle[MIR_SRC_COUNT][MIR_VEC_COMPONENTS];
99
100 /* Special fields for an ALU instruction */
101 midgard_reg_info registers;
102
103 /* I.e. (1 << alu_bit) */
104 int unit;
105
106 bool has_constants;
107 midgard_constants constants;
108 uint16_t inline_constant;
109 bool has_blend_constant;
110 bool has_inline_constant;
111
112 bool compact_branch;
113 bool writeout;
114 bool writeout_depth;
115 bool writeout_stencil;
116 bool last_writeout;
117
118 /* Kind of a hack, but hint against aggressive DCE */
119 bool dont_eliminate;
120
121 /* Masks in a saneish format. One bit per channel, not packed fancy.
122 * Use this instead of the op specific ones, and switch over at emit
123 * time */
124
125 uint16_t mask;
126
127 /* For ALU ops only: set to true to invert (bitwise NOT) the
128 * destination of an integer-out op. Not implemented in hardware but
129 * allows more optimizations */
130
131 bool invert;
132
133 /* Hint for the register allocator not to spill the destination written
134 * from this instruction (because it is a spill/unspill node itself).
135 * Bitmask of spilled classes */
136
137 unsigned no_spill;
138
139 /* Generic hint for intra-pass use */
140 bool hint;
141
142 /* During scheduling, the backwards dependency graph
143 * (DAG). nr_dependencies is the number of unscheduled
144 * instructions that must still be scheduled after
145 * (before) this instruction. dependents are which
146 * instructions need to be scheduled before (after) this
147 * instruction. */
148
149 unsigned nr_dependencies;
150 BITSET_WORD *dependents;
151
152 /* For load/store ops.. force 64-bit destination */
153 bool load_64;
154
155 union {
156 midgard_load_store_word load_store;
157 midgard_vector_alu alu;
158 midgard_texture_word texture;
159 midgard_branch_extended branch_extended;
160 uint16_t br_compact;
161
162 /* General branch, rather than packed br_compact. Higher level
163 * than the other components */
164 midgard_branch branch;
165 };
166 } midgard_instruction;
167
168 typedef struct midgard_block {
169 /* Link to next block. Must be first for mir_get_block */
170 struct list_head link;
171
172 /* List of midgard_instructions emitted for the current block */
173 struct list_head instructions;
174
175 /* Index of the block in source order */
176 unsigned name;
177
178 bool scheduled;
179
180 /* List of midgard_bundles emitted (after the scheduler has run) */
181 struct util_dynarray bundles;
182
183 /* Number of quadwords _actually_ emitted, as determined after scheduling */
184 unsigned quadword_count;
185
186 /* Succeeding blocks. The compiler should not necessarily rely on
187 * source-order traversal */
188 struct midgard_block *successors[2];
189 unsigned nr_successors;
190
191 struct set *predecessors;
192
193 /* In liveness analysis, these are live masks (per-component) for
194 * indices for the block. Scalar compilers have the luxury of using
195 * simple bit fields, but for us, liveness is a vector idea. */
196 uint16_t *live_in;
197 uint16_t *live_out;
198
199 /* Indicates this is a fixed-function fragment epilogue block */
200 bool epilogue;
201 } midgard_block;
202
203 typedef struct midgard_bundle {
204 /* Tag for the overall bundle */
205 int tag;
206
207 /* Instructions contained by the bundle. instruction_count <= 6 (vmul,
208 * sadd, vadd, smul, vlut, branch) */
209 int instruction_count;
210 midgard_instruction *instructions[6];
211
212 /* Bundle-wide ALU configuration */
213 int padding;
214 int control;
215 bool has_embedded_constants;
216 midgard_constants constants;
217 bool has_blend_constant;
218 bool last_writeout;
219 } midgard_bundle;
220
221 enum midgard_rt_id {
222 MIDGARD_COLOR_RT0,
223 MIDGARD_COLOR_RT1,
224 MIDGARD_COLOR_RT2,
225 MIDGARD_COLOR_RT3,
226 MIDGARD_ZS_RT,
227 MIDGARD_NUM_RTS,
228 };
229
230 typedef struct compiler_context {
231 nir_shader *nir;
232 gl_shader_stage stage;
233
234 /* Is internally a blend shader? Depends on stage == FRAGMENT */
235 bool is_blend;
236
237 /* Render target number for a keyed blend shader. Depends on is_blend */
238 unsigned blend_rt;
239
240 /* Tracking for blend constant patching */
241 int blend_constant_offset;
242
243 /* Number of bytes used for Thread Local Storage */
244 unsigned tls_size;
245
246 /* Count of spills and fills for shaderdb */
247 unsigned spills;
248 unsigned fills;
249
250 /* Current NIR function */
251 nir_function *func;
252
253 /* Allocated compiler temporary counter */
254 unsigned temp_alloc;
255
256 /* Unordered list of midgard_blocks */
257 int block_count;
258 struct list_head blocks;
259
260 /* TODO merge with block_count? */
261 unsigned block_source_count;
262
263 /* List of midgard_instructions emitted for the current block */
264 midgard_block *current_block;
265
266 /* If there is a preset after block, use this, otherwise emit_block will create one if NULL */
267 midgard_block *after_block;
268
269 /* The current "depth" of the loop, for disambiguating breaks/continues
270 * when using nested loops */
271 int current_loop_depth;
272
273 /* Total number of loops for shader-db */
274 unsigned loop_count;
275
276 /* Constants which have been loaded, for later inlining */
277 struct hash_table_u64 *ssa_constants;
278
279 /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */
280 struct hash_table_u64 *hash_to_temp;
281 int temp_count;
282 int max_hash;
283
284 /* Just the count of the max register used. Higher count => higher
285 * register pressure */
286 int work_registers;
287
288 /* Used for cont/last hinting. Increase when a tex op is added.
289 * Decrease when a tex op is removed. */
290 int texture_op_count;
291
292 /* The number of uniforms allowable for the fast path */
293 int uniform_cutoff;
294
295 /* Count of instructions emitted from NIR overall, across all blocks */
296 int instruction_count;
297
298 /* Alpha ref value passed in */
299 float alpha_ref;
300
301 unsigned quadword_count;
302
303 /* Bitmask of valid metadata */
304 unsigned metadata;
305
306 /* Model-specific quirk set */
307 uint32_t quirks;
308
309 /* Writeout instructions for each render target */
310 midgard_instruction *writeout_branch[MIDGARD_NUM_RTS];
311
312 struct panfrost_sysvals sysvals;
313 } compiler_context;
314
315 /* Per-block live_in/live_out */
316 #define MIDGARD_METADATA_LIVENESS (1 << 0)
317
318 /* Helpers for manipulating the above structures (forming the driver IR) */
319
320 /* Append instruction to end of current block */
321
322 static inline midgard_instruction *
323 mir_upload_ins(struct compiler_context *ctx, struct midgard_instruction ins)
324 {
325 midgard_instruction *heap = ralloc(ctx, struct midgard_instruction);
326 memcpy(heap, &ins, sizeof(ins));
327 return heap;
328 }
329
330 static inline midgard_instruction *
331 emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction ins)
332 {
333 midgard_instruction *u = mir_upload_ins(ctx, ins);
334 list_addtail(&u->link, &ctx->current_block->instructions);
335 return u;
336 }
337
338 static inline struct midgard_instruction *
339 mir_insert_instruction_before(struct compiler_context *ctx,
340 struct midgard_instruction *tag,
341 struct midgard_instruction ins)
342 {
343 struct midgard_instruction *u = mir_upload_ins(ctx, ins);
344 list_addtail(&u->link, &tag->link);
345 return u;
346 }
347
348 static inline void
349 mir_remove_instruction(struct midgard_instruction *ins)
350 {
351 list_del(&ins->link);
352 }
353
354 static inline midgard_instruction*
355 mir_prev_op(struct midgard_instruction *ins)
356 {
357 return list_last_entry(&(ins->link), midgard_instruction, link);
358 }
359
360 static inline midgard_instruction*
361 mir_next_op(struct midgard_instruction *ins)
362 {
363 return list_first_entry(&(ins->link), midgard_instruction, link);
364 }
365
366 #define mir_foreach_block(ctx, v) \
367 list_for_each_entry(struct midgard_block, v, &ctx->blocks, link)
368
369 #define mir_foreach_block_from(ctx, from, v) \
370 list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
371
372 #define mir_foreach_instr_in_block(block, v) \
373 list_for_each_entry(struct midgard_instruction, v, &block->instructions, link)
374 #define mir_foreach_instr_in_block_rev(block, v) \
375 list_for_each_entry_rev(struct midgard_instruction, v, &block->instructions, link)
376
377 #define mir_foreach_instr_in_block_safe(block, v) \
378 list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
379
380 #define mir_foreach_instr_in_block_safe_rev(block, v) \
381 list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link)
382
383 #define mir_foreach_instr_in_block_from(block, v, from) \
384 list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link)
385
386 #define mir_foreach_instr_in_block_from_rev(block, v, from) \
387 list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link)
388
389 #define mir_foreach_bundle_in_block(block, v) \
390 util_dynarray_foreach(&block->bundles, midgard_bundle, v)
391
392 #define mir_foreach_bundle_in_block_rev(block, v) \
393 util_dynarray_foreach_reverse(&block->bundles, midgard_bundle, v)
394
395 #define mir_foreach_instr_in_block_scheduled_rev(block, v) \
396 midgard_instruction* v; \
397 signed i = 0; \
398 mir_foreach_bundle_in_block_rev(block, _bundle) \
399 for (i = (_bundle->instruction_count - 1), v = _bundle->instructions[i]; \
400 i >= 0; \
401 --i, v = (i >= 0) ? _bundle->instructions[i] : NULL) \
402
403 #define mir_foreach_instr_global(ctx, v) \
404 mir_foreach_block(ctx, v_block) \
405 mir_foreach_instr_in_block(v_block, v)
406
407 #define mir_foreach_instr_global_safe(ctx, v) \
408 mir_foreach_block(ctx, v_block) \
409 mir_foreach_instr_in_block_safe(v_block, v)
410
411 #define mir_foreach_successor(blk, v) \
412 struct midgard_block *v; \
413 struct midgard_block **_v; \
414 for (_v = &blk->successors[0], \
415 v = *_v; \
416 v != NULL && _v < &blk->successors[2]; \
417 _v++, v = *_v) \
418
419 /* Based on set_foreach, expanded with automatic type casts */
420
421 #define mir_foreach_predecessor(blk, v) \
422 struct set_entry *_entry_##v; \
423 struct midgard_block *v; \
424 for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
425 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL); \
426 _entry_##v != NULL; \
427 _entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
428 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
429
430 #define mir_foreach_src(ins, v) \
431 for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
432
433 static inline midgard_instruction *
434 mir_last_in_block(struct midgard_block *block)
435 {
436 return list_last_entry(&block->instructions, struct midgard_instruction, link);
437 }
438
439 static inline midgard_block *
440 mir_get_block(compiler_context *ctx, int idx)
441 {
442 struct list_head *lst = &ctx->blocks;
443
444 while ((idx--) + 1)
445 lst = lst->next;
446
447 return (struct midgard_block *) lst;
448 }
449
450 static inline midgard_block *
451 mir_exit_block(struct compiler_context *ctx)
452 {
453 midgard_block *last = list_last_entry(&ctx->blocks,
454 struct midgard_block, link);
455
456 /* The last block must be empty logically but contains branch writeout
457 * for fragment shaders */
458
459 assert(last->nr_successors == 0);
460
461 return last;
462 }
463
464 static inline bool
465 mir_is_alu_bundle(midgard_bundle *bundle)
466 {
467 return IS_ALU(bundle->tag);
468 }
469
470 /* Registers/SSA are distinguish in the backend by the bottom-most bit */
471
472 #define IS_REG (1)
473
474 static inline unsigned
475 make_compiler_temp(compiler_context *ctx)
476 {
477 return (ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1;
478 }
479
480 static inline unsigned
481 make_compiler_temp_reg(compiler_context *ctx)
482 {
483 return ((ctx->func->impl->reg_alloc + ctx->temp_alloc++) << 1) | IS_REG;
484 }
485
486 static inline unsigned
487 nir_ssa_index(nir_ssa_def *ssa)
488 {
489 return (ssa->index << 1) | 0;
490 }
491
492 static inline unsigned
493 nir_src_index(compiler_context *ctx, nir_src *src)
494 {
495 if (src->is_ssa)
496 return nir_ssa_index(src->ssa);
497 else {
498 assert(!src->reg.indirect);
499 return (src->reg.reg->index << 1) | IS_REG;
500 }
501 }
502
503 static inline unsigned
504 nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
505 {
506 return nir_src_index(ctx, &src->src);
507 }
508
509 static inline unsigned
510 nir_dest_index(nir_dest *dst)
511 {
512 if (dst->is_ssa)
513 return (dst->ssa.index << 1) | 0;
514 else {
515 assert(!dst->reg.indirect);
516 return (dst->reg.reg->index << 1) | IS_REG;
517 }
518 }
519
520
521
522 /* MIR manipulation */
523
524 void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
525 void mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new);
526 void mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new);
527 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new);
528 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new);
529 void mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle);
530 bool mir_single_use(compiler_context *ctx, unsigned value);
531 bool mir_special_index(compiler_context *ctx, unsigned idx);
532 unsigned mir_use_count(compiler_context *ctx, unsigned value);
533 bool mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node);
534 uint16_t mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node);
535 uint16_t mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i);
536 midgard_reg_mode mir_typesize(midgard_instruction *ins);
537 midgard_reg_mode mir_srcsize(midgard_instruction *ins, unsigned i);
538 unsigned mir_bytes_for_mode(midgard_reg_mode mode);
539 midgard_reg_mode mir_mode_for_destsize(unsigned size);
540 uint16_t mir_from_bytemask(uint16_t bytemask, midgard_reg_mode mode);
541 uint16_t mir_bytemask(midgard_instruction *ins);
542 uint16_t mir_round_bytemask_up(uint16_t mask, midgard_reg_mode mode);
543 void mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask);
544 unsigned mir_upper_override(midgard_instruction *ins);
545
546 /* MIR printing */
547
548 void mir_print_instruction(midgard_instruction *ins);
549 void mir_print_bundle(midgard_bundle *ctx);
550 void mir_print_block(midgard_block *block);
551 void mir_print_shader(compiler_context *ctx);
552 bool mir_nontrivial_source2_mod(midgard_instruction *ins);
553 bool mir_nontrivial_source2_mod_simple(midgard_instruction *ins);
554 bool mir_nontrivial_outmod(midgard_instruction *ins);
555
556 void mir_insert_instruction_before_scheduled(compiler_context *ctx, midgard_block *block, midgard_instruction *tag, midgard_instruction ins);
557 void mir_insert_instruction_after_scheduled(compiler_context *ctx, midgard_block *block, midgard_instruction *tag, midgard_instruction ins);
558 void mir_flip(midgard_instruction *ins);
559 void mir_compute_temp_count(compiler_context *ctx);
560
561 void mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, bool is_shared);
562
563 /* 'Intrinsic' move for aliasing */
564
565 static inline midgard_instruction
566 v_mov(unsigned src, unsigned dest)
567 {
568 midgard_instruction ins = {
569 .type = TAG_ALU_4,
570 .mask = 0xF,
571 .src = { ~0, src, ~0, ~0 },
572 .swizzle = SWIZZLE_IDENTITY,
573 .dest = dest,
574 .alu = {
575 .op = midgard_alu_op_imov,
576 .reg_mode = midgard_reg_mode_32,
577 .dest_override = midgard_dest_override_none,
578 .outmod = midgard_outmod_int_wrap
579 },
580 };
581
582 return ins;
583 }
584
585 /* Broad types of register classes so we can handle special
586 * registers */
587
588 #define REG_CLASS_WORK 0
589 #define REG_CLASS_LDST 1
590 #define REG_CLASS_TEXR 3
591 #define REG_CLASS_TEXW 4
592
593 /* Like a move, but to thread local storage! */
594
595 static inline midgard_instruction
596 v_load_store_scratch(
597 unsigned srcdest,
598 unsigned index,
599 bool is_store,
600 unsigned mask)
601 {
602 /* We index by 32-bit vec4s */
603 unsigned byte = (index * 4 * 4);
604
605 midgard_instruction ins = {
606 .type = TAG_LOAD_STORE_4,
607 .mask = mask,
608 .dest = ~0,
609 .src = { ~0, ~0, ~0, ~0 },
610 .swizzle = SWIZZLE_IDENTITY_4,
611 .load_store = {
612 .op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
613
614 /* For register spilling - to thread local storage */
615 .arg_1 = 0xEA,
616 .arg_2 = 0x1E,
617 },
618
619 /* If we spill an unspill, RA goes into an infinite loop */
620 .no_spill = (1 << REG_CLASS_WORK)
621 };
622
623 ins.constants.u32[0] = byte;
624
625 if (is_store) {
626 ins.src[0] = srcdest;
627
628 /* Ensure we are tightly swizzled so liveness analysis is
629 * correct */
630
631 for (unsigned i = 0; i < 4; ++i) {
632 if (!(mask & (1 << i)))
633 ins.swizzle[0][i] = COMPONENT_X;
634 }
635 } else
636 ins.dest = srcdest;
637
638 return ins;
639 }
640
641 static inline bool
642 mir_has_arg(midgard_instruction *ins, unsigned arg)
643 {
644 if (!ins)
645 return false;
646
647 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
648 if (ins->src[i] == arg)
649 return true;
650 }
651
652 return false;
653 }
654
655 /* Scheduling */
656
657 void midgard_schedule_program(compiler_context *ctx);
658
659 void mir_ra(compiler_context *ctx);
660 void mir_squeeze_index(compiler_context *ctx);
661 void mir_lower_special_reads(compiler_context *ctx);
662 void mir_liveness_ins_update(uint16_t *live, midgard_instruction *ins, unsigned max);
663 void mir_compute_liveness(compiler_context *ctx);
664 void mir_invalidate_liveness(compiler_context *ctx);
665 bool mir_is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src);
666
667 void mir_create_pipeline_registers(compiler_context *ctx);
668 void midgard_promote_uniforms(compiler_context *ctx);
669
670 void
671 midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr);
672
673 void
674 midgard_lower_derivatives(compiler_context *ctx, midgard_block *block);
675
676 bool mir_op_computes_derivatives(gl_shader_stage stage, unsigned op);
677
678 /* Final emission */
679
680 void emit_binary_bundle(
681 compiler_context *ctx,
682 midgard_bundle *bundle,
683 struct util_dynarray *emission,
684 int next_tag);
685
686 bool
687 nir_undef_to_zero(nir_shader *shader);
688
689 void midgard_nir_lod_errata(nir_shader *shader);
690
691 /* Optimizations */
692
693 bool midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block);
694 bool midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block);
695 bool midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block);
696 bool midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block);
697 bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block);
698
699 void midgard_lower_invert(compiler_context *ctx, midgard_block *block);
700 bool midgard_opt_not_propagate(compiler_context *ctx, midgard_block *block);
701 bool midgard_opt_fuse_src_invert(compiler_context *ctx, midgard_block *block);
702 bool midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block);
703 bool midgard_opt_csel_invert(compiler_context *ctx, midgard_block *block);
704 bool midgard_opt_promote_fmov(compiler_context *ctx, midgard_block *block);
705 bool midgard_opt_drop_cmp_invert(compiler_context *ctx, midgard_block *block);
706 bool midgard_opt_invert_branch(compiler_context *ctx, midgard_block *block);
707
708 #endif