2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "midgard_ops.h"
27 #include "util/register_allocate.h"
28 #include "util/u_math.h"
30 /* For work registers, we can subdivide in various ways. So we create
31 * classes for the various sizes and conflict accordingly, keeping in
32 * mind that physical registers are divided along 128-bit boundaries.
33 * The important part is that 128-bit boundaries are not crossed.
35 * For each 128-bit register, we can subdivide to 32-bits 10 ways
42 * For each 64-bit register, we can subdivide similarly to 16-bit
43 * (TODO: half-float RA, not that we support fp16 yet)
46 #define WORK_STRIDE 10
49 /* Prepacked masks/swizzles for virtual register types */
50 static unsigned reg_type_to_mask
[WORK_STRIDE
] = {
52 0x7, 0x7 << 1, /* xyz */
53 0x3, 0x3 << 1, 0x3 << 2, /* xy */
54 0x1, 0x1 << 1, 0x1 << 2, 0x1 << 3 /* x */
57 static unsigned reg_type_to_swizzle
[WORK_STRIDE
] = {
58 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
60 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
61 SWIZZLE(COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
, COMPONENT_W
),
63 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
64 SWIZZLE(COMPONENT_Y
, COMPONENT_Z
, COMPONENT_Z
, COMPONENT_W
),
65 SWIZZLE(COMPONENT_Z
, COMPONENT_W
, COMPONENT_Z
, COMPONENT_W
),
67 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
68 SWIZZLE(COMPONENT_Y
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
69 SWIZZLE(COMPONENT_Z
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
70 SWIZZLE(COMPONENT_W
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
79 /* Given the mask/swizzle of both the register and the original source,
80 * compose to find the actual mask/swizzle to give the hardware */
83 compose_writemask(unsigned mask
, struct phys_reg reg
)
85 /* Note: the reg mask is guaranteed to be contiguous. So we shift
86 * into the X place, compose via a simple AND, and shift back */
88 unsigned shift
= __builtin_ctz(reg
.mask
);
89 return ((reg
.mask
>> shift
) & mask
) << shift
;
93 compose_swizzle(unsigned swizzle
, unsigned mask
,
94 struct phys_reg reg
, struct phys_reg dst
)
96 unsigned out
= pan_compose_swizzle(swizzle
, reg
.swizzle
);
98 /* Based on the register mask, we need to adjust over. E.g if we're
99 * writing to yz, a base swizzle of xy__ becomes _xy_. Save the
100 * original first component (x). But to prevent duplicate shifting
101 * (only applies to ALU -- mask param is set to xyzw out on L/S to
102 * prevent changes), we have to account for the shift inherent to the
103 * original writemask */
105 unsigned rep
= out
& 0x3;
106 unsigned shift
= __builtin_ctz(dst
.mask
) - __builtin_ctz(mask
);
107 unsigned shifted
= out
<< (2*shift
);
109 /* ..but we fill in the gaps so it appears to replicate */
111 for (unsigned s
= 0; s
< shift
; ++s
)
112 shifted
|= rep
<< (2*s
);
117 /* Helper to return the default phys_reg for a given register */
119 static struct phys_reg
120 default_phys_reg(int reg
)
122 struct phys_reg r
= {
124 .mask
= 0xF, /* xyzw */
125 .swizzle
= 0xE4 /* xyzw */
131 /* Determine which physical register, swizzle, and mask a virtual
132 * register corresponds to */
134 static struct phys_reg
135 index_to_reg(compiler_context
*ctx
, struct ra_graph
*g
, int reg
)
137 /* Check for special cases */
138 if (reg
>= SSA_FIXED_MINIMUM
)
139 return default_phys_reg(SSA_REG_FROM_FIXED(reg
));
140 else if ((reg
< 0) || !g
)
141 return default_phys_reg(REGISTER_UNUSED
);
143 /* Special cases aside, we pick the underlying register */
144 int virt
= ra_get_node_reg(g
, reg
);
146 /* Divide out the register and classification */
147 int phys
= virt
/ WORK_STRIDE
;
148 int type
= virt
% WORK_STRIDE
;
150 /* Apply shadow registers */
152 if (phys
== SHADOW_R27
)
155 struct phys_reg r
= {
157 .mask
= reg_type_to_mask
[type
],
158 .swizzle
= reg_type_to_swizzle
[type
]
161 /* Report that we actually use this register, and return it */
164 ctx
->work_registers
= MAX2(ctx
->work_registers
, phys
);
169 /* This routine creates a register set. Should be called infrequently since
170 * it's slow and can be cached. For legibility, variables are named in terms of
171 * work registers, although it is also used to create the register set for
172 * special register allocation */
174 static struct ra_regs
*
175 create_register_set(unsigned work_count
, unsigned *classes
)
177 int virtual_count
= 32 * WORK_STRIDE
;
179 /* First, initialize the RA */
180 struct ra_regs
*regs
= ra_alloc_reg_set(NULL
, virtual_count
, true);
182 for (unsigned c
= 0; c
< NR_REG_CLASSES
; ++c
) {
183 int work_vec4
= ra_alloc_reg_class(regs
);
184 int work_vec3
= ra_alloc_reg_class(regs
);
185 int work_vec2
= ra_alloc_reg_class(regs
);
186 int work_vec1
= ra_alloc_reg_class(regs
);
188 classes
[4*c
+ 0] = work_vec1
;
189 classes
[4*c
+ 1] = work_vec2
;
190 classes
[4*c
+ 2] = work_vec3
;
191 classes
[4*c
+ 3] = work_vec4
;
193 /* Special register classes have other register counts */
195 (c
== REG_CLASS_WORK
) ? work_count
:
196 (c
== REG_CLASS_LDST27
) ? 1 : 2;
198 /* We arbitraily pick r17 (RA unused) as the shadow for r27 */
200 (c
== REG_CLASS_LDST
) ? 26 :
201 (c
== REG_CLASS_LDST27
) ? SHADOW_R27
:
202 (c
== REG_CLASS_TEX
) ? 28 : 0;
204 /* Add the full set of work registers */
205 for (unsigned i
= first_reg
; i
< (first_reg
+ count
); ++i
) {
206 int base
= WORK_STRIDE
* i
;
208 /* Build a full set of subdivisions */
209 ra_class_add_reg(regs
, work_vec4
, base
);
210 ra_class_add_reg(regs
, work_vec3
, base
+ 1);
211 ra_class_add_reg(regs
, work_vec3
, base
+ 2);
212 ra_class_add_reg(regs
, work_vec2
, base
+ 3);
213 ra_class_add_reg(regs
, work_vec2
, base
+ 4);
214 ra_class_add_reg(regs
, work_vec2
, base
+ 5);
215 ra_class_add_reg(regs
, work_vec1
, base
+ 6);
216 ra_class_add_reg(regs
, work_vec1
, base
+ 7);
217 ra_class_add_reg(regs
, work_vec1
, base
+ 8);
218 ra_class_add_reg(regs
, work_vec1
, base
+ 9);
220 for (unsigned a
= 0; a
< 10; ++a
) {
221 unsigned mask1
= reg_type_to_mask
[a
];
223 for (unsigned b
= 0; b
< 10; ++b
) {
224 unsigned mask2
= reg_type_to_mask
[b
];
227 ra_add_reg_conflict(regs
,
235 /* All of the r27 registers in in LDST conflict with all of the
236 * registers in LD27 (pseudo/shadow register) */
238 for (unsigned a
= 0; a
< WORK_STRIDE
; ++a
) {
239 unsigned reg_a
= (WORK_STRIDE
* 27) + a
;
241 for (unsigned b
= 0; b
< WORK_STRIDE
; ++b
) {
242 unsigned reg_b
= (WORK_STRIDE
* SHADOW_R27
) + b
;
244 ra_add_reg_conflict(regs
, reg_a
, reg_b
);
245 ra_add_reg_conflict(regs
, reg_b
, reg_a
);
249 /* We're done setting up */
250 ra_set_finalize(regs
, NULL
);
255 /* This routine gets a precomputed register set off the screen if it's able, or
256 * otherwise it computes one on the fly */
258 static struct ra_regs
*
259 get_register_set(struct midgard_screen
*screen
, unsigned work_count
, unsigned **classes
)
262 assert(work_count
>= 8);
263 assert(work_count
<= 16);
266 unsigned index
= work_count
- 8;
268 /* Find the reg set */
269 struct ra_regs
*cached
= screen
->regs
[index
];
272 assert(screen
->reg_classes
[index
]);
273 *classes
= screen
->reg_classes
[index
];
277 /* Otherwise, create one */
278 struct ra_regs
*created
= create_register_set(work_count
, screen
->reg_classes
[index
]);
280 /* Cache it and use it */
281 screen
->regs
[index
] = created
;
283 *classes
= screen
->reg_classes
[index
];
287 /* Assign a (special) class, ensuring that it is compatible with whatever class
291 set_class(unsigned *classes
, unsigned node
, unsigned class)
293 /* Check that we're even a node */
294 if ((node
< 0) || (node
>= SSA_FIXED_MINIMUM
))
297 /* First 4 are work, next 4 are load/store.. */
298 unsigned current_class
= classes
[node
] >> 2;
301 if (class == current_class
)
305 if ((current_class
== REG_CLASS_LDST27
) && (class == REG_CLASS_LDST
))
308 /* If we're changing, we must not have already assigned a special class
311 bool compat
= current_class
== REG_CLASS_WORK
;
312 compat
|= (current_class
== REG_CLASS_LDST
) && (class == REG_CLASS_LDST27
);
316 classes
[node
] &= 0x3;
317 classes
[node
] |= (class << 2);
320 /* Special register classes impose special constraints on who can read their
321 * values, so check that */
324 check_read_class(unsigned *classes
, unsigned tag
, unsigned node
)
326 /* Non-nodes are implicitly ok */
327 if ((node
< 0) || (node
>= SSA_FIXED_MINIMUM
))
330 unsigned current_class
= classes
[node
] >> 2;
332 switch (current_class
) {
334 case REG_CLASS_LDST27
:
335 return (tag
== TAG_LOAD_STORE_4
);
337 return (tag
!= TAG_LOAD_STORE_4
);
341 /* Prepass before RA to ensure special class restrictions are met. The idea is
342 * to create a bit field of types of instructions that read a particular index.
343 * Later, we'll add moves as appropriate and rewrite to specialize by type. */
346 mark_node_class (unsigned *bitfield
, unsigned node
)
348 if ((node
>= 0) && (node
< SSA_FIXED_MINIMUM
))
349 BITSET_SET(bitfield
, node
);
352 static midgard_instruction
*
353 mir_find_last_write(compiler_context
*ctx
, unsigned i
)
355 midgard_instruction
*last_write
= NULL
;
357 mir_foreach_instr_global(ctx
, ins
) {
358 if (ins
->compact_branch
) continue;
360 if (ins
->ssa_args
.dest
== i
)
368 mir_lower_special_reads(compiler_context
*ctx
)
370 size_t sz
= BITSET_WORDS(ctx
->temp_count
) * sizeof(BITSET_WORD
);
372 /* Bitfields for the various types of registers we could have */
374 unsigned *alur
= calloc(sz
, 1);
375 unsigned *ldst
= calloc(sz
, 1);
376 unsigned *texr
= calloc(sz
, 1);
377 unsigned *texw
= calloc(sz
, 1);
379 /* Pass #1 is analysis, a linear scan to fill out the bitfields */
381 mir_foreach_instr_global(ctx
, ins
) {
382 if (ins
->compact_branch
) continue;
386 mark_node_class(alur
, ins
->ssa_args
.src0
);
387 mark_node_class(alur
, ins
->ssa_args
.src1
);
389 case TAG_LOAD_STORE_4
:
390 mark_node_class(ldst
, ins
->ssa_args
.src0
);
391 mark_node_class(ldst
, ins
->ssa_args
.src1
);
394 mark_node_class(texr
, ins
->ssa_args
.src0
);
395 mark_node_class(texr
, ins
->ssa_args
.src1
);
396 mark_node_class(texw
, ins
->ssa_args
.dest
);
401 /* Pass #2 is lowering now that we've analyzed all the classes.
402 * Conceptually, if an index is only marked for a single type of use,
403 * there is nothing to lower. If it is marked for different uses, we
404 * split up based on the number of types of uses. To do so, we divide
405 * into N distinct classes of use (where N>1 by definition), emit N-1
406 * moves from the index to copies of the index, and finally rewrite N-1
407 * of the types of uses to use the corresponding move */
409 unsigned spill_idx
= ctx
->temp_count
;
411 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
412 bool is_alur
= BITSET_TEST(alur
, i
);
413 bool is_ldst
= BITSET_TEST(ldst
, i
);
414 bool is_texr
= BITSET_TEST(texr
, i
);
415 bool is_texw
= BITSET_TEST(texw
, i
);
417 /* Analyse to check how many distinct uses there are. ALU ops
418 * (alur) can read the results of the texture pipeline (texw)
419 * but not ldst or texr. Load/store ops (ldst) cannot read
420 * anything but load/store inputs. Texture pipeline cannot read
421 * anything but texture inputs. TODO: Simplify. */
424 (is_alur
&& (is_ldst
|| is_texr
)) ||
425 (is_ldst
&& (is_alur
|| is_texr
|| is_texw
)) ||
426 (is_texr
&& (is_alur
|| is_ldst
)) ||
427 (is_texw
&& (is_ldst
));
432 /* Use the index as-is as the work copy. Emit copies for
436 unsigned idx
= spill_idx
++;
437 midgard_instruction m
= v_mov(i
, blank_alu_src
, idx
);
438 midgard_instruction
*use
= mir_next_op(mir_find_last_write(ctx
, i
));
440 mir_insert_instruction_before(use
, m
);
443 mir_rewrite_index_src_tag(ctx
, i
, idx
, TAG_LOAD_STORE_4
);
453 /* This routine performs the actual register allocation. It should be succeeded
454 * by install_registers */
457 allocate_registers(compiler_context
*ctx
, bool *spilled
)
459 /* The number of vec4 work registers available depends on when the
460 * uniforms start, so compute that first */
461 int work_count
= 16 - MAX2((ctx
->uniform_cutoff
- 8), 0);
462 unsigned *classes
= NULL
;
463 struct ra_regs
*regs
= get_register_set(ctx
->screen
, work_count
, &classes
);
465 assert(regs
!= NULL
);
466 assert(classes
!= NULL
);
468 /* No register allocation to do with no SSA */
470 if (!ctx
->temp_count
)
473 /* Let's actually do register allocation */
474 int nodes
= ctx
->temp_count
;
475 struct ra_graph
*g
= ra_alloc_interference_graph(regs
, nodes
);
477 /* Register class (as known to the Mesa register allocator) is actually
478 * the product of both semantic class (work, load/store, texture..) and
479 * size (vec2/vec3..). First, we'll go through and determine the
480 * minimum size needed to hold values */
482 unsigned *found_class
= calloc(sizeof(unsigned), ctx
->temp_count
);
484 mir_foreach_instr_global(ctx
, ins
) {
485 if (ins
->compact_branch
) continue;
486 if (ins
->ssa_args
.dest
< 0) continue;
487 if (ins
->ssa_args
.dest
>= SSA_FIXED_MINIMUM
) continue;
489 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
490 int class = util_logbase2(ins
->mask
);
492 /* Use the largest class if there's ambiguity, this
493 * handles partial writes */
495 int dest
= ins
->ssa_args
.dest
;
496 found_class
[dest
] = MAX2(found_class
[dest
], class);
499 /* Next, we'll determine semantic class. We default to zero (work).
500 * But, if we're used with a special operation, that will force us to a
501 * particular class. Each node must be assigned to exactly one class; a
502 * prepass before RA should have lowered what-would-have-been
503 * multiclass nodes into a series of moves to break it up into multiple
506 mir_foreach_instr_global(ctx
, ins
) {
507 if (ins
->compact_branch
) continue;
509 /* Check if this operation imposes any classes */
511 if (ins
->type
== TAG_LOAD_STORE_4
) {
512 bool force_r27
= OP_IS_R27_ONLY(ins
->load_store
.op
);
513 unsigned class = force_r27
? REG_CLASS_LDST27
: REG_CLASS_LDST
;
515 set_class(found_class
, ins
->ssa_args
.src0
, class);
516 set_class(found_class
, ins
->ssa_args
.src1
, class);
520 /* Check that the semantics of the class are respected */
521 mir_foreach_instr_global(ctx
, ins
) {
522 if (ins
->compact_branch
) continue;
524 /* Non-load-store cannot read load/store */
525 assert(check_read_class(found_class
, ins
->type
, ins
->ssa_args
.src0
));
526 assert(check_read_class(found_class
, ins
->type
, ins
->ssa_args
.src1
));
529 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
530 unsigned class = found_class
[i
];
531 ra_set_node_class(g
, i
, classes
[class]);
534 /* Determine liveness */
536 int *live_start
= malloc(nodes
* sizeof(int));
537 int *live_end
= malloc(nodes
* sizeof(int));
539 /* Initialize as non-existent */
541 for (int i
= 0; i
< nodes
; ++i
) {
542 live_start
[i
] = live_end
[i
] = -1;
547 mir_foreach_block(ctx
, block
) {
548 mir_foreach_instr_in_block(block
, ins
) {
549 if (ins
->compact_branch
) continue;
551 if (ins
->ssa_args
.dest
< SSA_FIXED_MINIMUM
) {
552 /* If this destination is not yet live, it is
553 * now since we just wrote it */
555 int dest
= ins
->ssa_args
.dest
;
557 if (dest
>= 0 && live_start
[dest
] == -1)
558 live_start
[dest
] = d
;
561 /* Since we just used a source, the source might be
562 * dead now. Scan the rest of the block for
563 * invocations, and if there are none, the source dies
567 ins
->ssa_args
.src0
, ins
->ssa_args
.src1
570 for (int src
= 0; src
< 2; ++src
) {
571 int s
= sources
[src
];
573 if (ins
->ssa_args
.inline_constant
&& src
== 1)
578 if (s
>= SSA_FIXED_MINIMUM
) continue;
580 if (!mir_is_live_after(ctx
, block
, ins
, s
)) {
589 /* If a node still hasn't been killed, kill it now */
591 for (int i
= 0; i
< nodes
; ++i
) {
592 /* live_start == -1 most likely indicates a pinned output */
594 if (live_end
[i
] == -1)
598 /* Setup interference between nodes that are live at the same time */
600 for (int i
= 0; i
< nodes
; ++i
) {
601 for (int j
= i
+ 1; j
< nodes
; ++j
) {
602 bool j_overlaps_i
= live_start
[j
] < live_end
[i
];
603 bool i_overlaps_j
= live_end
[j
] < live_start
[i
];
605 if (i_overlaps_j
|| j_overlaps_i
)
606 ra_add_node_interference(g
, i
, j
);
614 if (!ra_allocate(g
)) {
620 /* Whether we were successful or not, report the graph so we can
621 * compute spill nodes */
626 /* Once registers have been decided via register allocation
627 * (allocate_registers), we need to rewrite the MIR to use registers instead of
631 install_registers_instr(
632 compiler_context
*ctx
,
634 midgard_instruction
*ins
)
636 ssa_args args
= ins
->ssa_args
;
640 int adjusted_src
= args
.inline_constant
? -1 : args
.src1
;
641 struct phys_reg src1
= index_to_reg(ctx
, g
, args
.src0
);
642 struct phys_reg src2
= index_to_reg(ctx
, g
, adjusted_src
);
643 struct phys_reg dest
= index_to_reg(ctx
, g
, args
.dest
);
645 unsigned uncomposed_mask
= ins
->mask
;
646 ins
->mask
= compose_writemask(uncomposed_mask
, dest
);
648 /* Adjust the dest mask if necessary. Mostly this is a no-op
649 * but it matters for dot products */
650 dest
.mask
= effective_writemask(&ins
->alu
, ins
->mask
);
652 midgard_vector_alu_src mod1
=
653 vector_alu_from_unsigned(ins
->alu
.src1
);
654 mod1
.swizzle
= compose_swizzle(mod1
.swizzle
, uncomposed_mask
, src1
, dest
);
655 ins
->alu
.src1
= vector_alu_srco_unsigned(mod1
);
657 ins
->registers
.src1_reg
= src1
.reg
;
659 ins
->registers
.src2_imm
= args
.inline_constant
;
661 if (args
.inline_constant
) {
662 /* Encode inline 16-bit constant. See disassembler for
663 * where the algorithm is from */
665 ins
->registers
.src2_reg
= ins
->inline_constant
>> 11;
667 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
668 uint16_t imm
= ((lower_11
>> 8) & 0x7) |
669 ((lower_11
& 0xFF) << 3);
671 ins
->alu
.src2
= imm
<< 2;
673 midgard_vector_alu_src mod2
=
674 vector_alu_from_unsigned(ins
->alu
.src2
);
675 mod2
.swizzle
= compose_swizzle(
676 mod2
.swizzle
, uncomposed_mask
, src2
, dest
);
677 ins
->alu
.src2
= vector_alu_srco_unsigned(mod2
);
679 ins
->registers
.src2_reg
= src2
.reg
;
682 ins
->registers
.out_reg
= dest
.reg
;
686 case TAG_LOAD_STORE_4
: {
687 bool fixed
= args
.src0
>= SSA_FIXED_MINIMUM
;
689 if (OP_IS_STORE_R26(ins
->load_store
.op
) && fixed
) {
690 ins
->load_store
.reg
= SSA_REG_FROM_FIXED(args
.src0
);
691 } else if (OP_IS_STORE_VARY(ins
->load_store
.op
)) {
692 struct phys_reg src
= index_to_reg(ctx
, g
, args
.src0
);
693 assert(src
.reg
== 26 || src
.reg
== 27);
695 ins
->load_store
.reg
= src
.reg
- 26;
697 /* TODO: swizzle/mask */
699 /* Which physical register we read off depends on
700 * whether we are loading or storing -- think about the
701 * logical dataflow */
703 unsigned r
= OP_IS_STORE(ins
->load_store
.op
) ?
704 args
.src0
: args
.dest
;
705 struct phys_reg src
= index_to_reg(ctx
, g
, r
);
707 ins
->load_store
.reg
= src
.reg
;
709 ins
->load_store
.swizzle
= compose_swizzle(
710 ins
->load_store
.swizzle
, 0xF,
711 default_phys_reg(0), src
);
713 ins
->mask
= compose_writemask(
726 install_registers(compiler_context
*ctx
, struct ra_graph
*g
)
728 mir_foreach_block(ctx
, block
) {
729 mir_foreach_instr_in_block(block
, ins
) {
730 if (ins
->compact_branch
) continue;
731 install_registers_instr(ctx
, g
, ins
);