2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "midgard_ops.h"
27 #include "util/register_allocate.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
31 /* For work registers, we can subdivide in various ways. So we create
32 * classes for the various sizes and conflict accordingly, keeping in
33 * mind that physical registers are divided along 128-bit boundaries.
34 * The important part is that 128-bit boundaries are not crossed.
36 * For each 128-bit register, we can subdivide to 32-bits 10 ways
43 * For each 64-bit register, we can subdivide similarly to 16-bit
44 * (TODO: half-float RA, not that we support fp16 yet)
47 #define WORK_STRIDE 10
49 /* We have overlapping register classes for special registers, handled via
55 /* Prepacked masks/swizzles for virtual register types */
56 static unsigned reg_type_to_mask
[WORK_STRIDE
] = {
58 0x7, 0x7 << 1, /* xyz */
59 0x3, 0x3 << 1, 0x3 << 2, /* xy */
60 0x1, 0x1 << 1, 0x1 << 2, 0x1 << 3 /* x */
63 static unsigned reg_type_to_swizzle
[WORK_STRIDE
] = {
64 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
66 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
67 SWIZZLE(COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
, COMPONENT_W
),
69 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
70 SWIZZLE(COMPONENT_Y
, COMPONENT_Z
, COMPONENT_Z
, COMPONENT_W
),
71 SWIZZLE(COMPONENT_Z
, COMPONENT_W
, COMPONENT_Z
, COMPONENT_W
),
73 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
74 SWIZZLE(COMPONENT_Y
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
75 SWIZZLE(COMPONENT_Z
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
76 SWIZZLE(COMPONENT_W
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
85 /* Given the mask/swizzle of both the register and the original source,
86 * compose to find the actual mask/swizzle to give the hardware */
89 compose_writemask(unsigned mask
, struct phys_reg reg
)
91 /* Note: the reg mask is guaranteed to be contiguous. So we shift
92 * into the X place, compose via a simple AND, and shift back */
94 unsigned shift
= __builtin_ctz(reg
.mask
);
95 return ((reg
.mask
>> shift
) & mask
) << shift
;
99 compose_swizzle(unsigned swizzle
, unsigned mask
,
100 struct phys_reg reg
, struct phys_reg dst
)
102 unsigned out
= pan_compose_swizzle(swizzle
, reg
.swizzle
);
104 /* Based on the register mask, we need to adjust over. E.g if we're
105 * writing to yz, a base swizzle of xy__ becomes _xy_. Save the
106 * original first component (x). But to prevent duplicate shifting
107 * (only applies to ALU -- mask param is set to xyzw out on L/S to
108 * prevent changes), we have to account for the shift inherent to the
109 * original writemask */
111 unsigned rep
= out
& 0x3;
112 unsigned shift
= __builtin_ctz(dst
.mask
) - __builtin_ctz(mask
);
113 unsigned shifted
= out
<< (2*shift
);
115 /* ..but we fill in the gaps so it appears to replicate */
117 for (unsigned s
= 0; s
< shift
; ++s
)
118 shifted
|= rep
<< (2*s
);
123 /* Helper to return the default phys_reg for a given register */
125 static struct phys_reg
126 default_phys_reg(int reg
)
128 struct phys_reg r
= {
130 .mask
= 0xF, /* xyzw */
131 .swizzle
= 0xE4 /* xyzw */
137 /* Determine which physical register, swizzle, and mask a virtual
138 * register corresponds to */
140 static struct phys_reg
141 index_to_reg(compiler_context
*ctx
, struct ra_graph
*g
, unsigned reg
)
143 /* Check for special cases */
145 return default_phys_reg(REGISTER_UNUSED
);
146 else if (reg
>= SSA_FIXED_MINIMUM
)
147 return default_phys_reg(SSA_REG_FROM_FIXED(reg
));
149 return default_phys_reg(REGISTER_UNUSED
);
151 /* Special cases aside, we pick the underlying register */
152 int virt
= ra_get_node_reg(g
, reg
);
154 /* Divide out the register and classification */
155 int phys
= virt
/ WORK_STRIDE
;
156 int type
= virt
% WORK_STRIDE
;
158 /* Apply shadow registers */
160 if (phys
>= SHADOW_R28
&& phys
<= SHADOW_R29
)
161 phys
+= 28 - SHADOW_R28
;
163 struct phys_reg r
= {
165 .mask
= reg_type_to_mask
[type
],
166 .swizzle
= reg_type_to_swizzle
[type
]
169 /* Report that we actually use this register, and return it */
172 ctx
->work_registers
= MAX2(ctx
->work_registers
, phys
);
177 /* This routine creates a register set. Should be called infrequently since
178 * it's slow and can be cached. For legibility, variables are named in terms of
179 * work registers, although it is also used to create the register set for
180 * special register allocation */
183 add_shadow_conflicts (struct ra_regs
*regs
, unsigned base
, unsigned shadow
)
185 for (unsigned a
= 0; a
< WORK_STRIDE
; ++a
) {
186 unsigned reg_a
= (WORK_STRIDE
* base
) + a
;
188 for (unsigned b
= 0; b
< WORK_STRIDE
; ++b
) {
189 unsigned reg_b
= (WORK_STRIDE
* shadow
) + b
;
191 ra_add_reg_conflict(regs
, reg_a
, reg_b
);
192 ra_add_reg_conflict(regs
, reg_b
, reg_a
);
197 static struct ra_regs
*
198 create_register_set(unsigned work_count
, unsigned *classes
)
200 int virtual_count
= 32 * WORK_STRIDE
;
202 /* First, initialize the RA */
203 struct ra_regs
*regs
= ra_alloc_reg_set(NULL
, virtual_count
, true);
205 for (unsigned c
= 0; c
< NR_REG_CLASSES
; ++c
) {
206 int work_vec4
= ra_alloc_reg_class(regs
);
207 int work_vec3
= ra_alloc_reg_class(regs
);
208 int work_vec2
= ra_alloc_reg_class(regs
);
209 int work_vec1
= ra_alloc_reg_class(regs
);
211 classes
[4*c
+ 0] = work_vec1
;
212 classes
[4*c
+ 1] = work_vec2
;
213 classes
[4*c
+ 2] = work_vec3
;
214 classes
[4*c
+ 3] = work_vec4
;
216 /* Special register classes have other register counts */
218 (c
== REG_CLASS_WORK
) ? work_count
: 2;
221 (c
== REG_CLASS_LDST
) ? 26 :
222 (c
== REG_CLASS_TEXR
) ? 28 :
223 (c
== REG_CLASS_TEXW
) ? SHADOW_R28
:
226 /* Add the full set of work registers */
227 for (unsigned i
= first_reg
; i
< (first_reg
+ count
); ++i
) {
228 int base
= WORK_STRIDE
* i
;
230 /* Build a full set of subdivisions */
231 ra_class_add_reg(regs
, work_vec4
, base
);
232 ra_class_add_reg(regs
, work_vec3
, base
+ 1);
233 ra_class_add_reg(regs
, work_vec3
, base
+ 2);
234 ra_class_add_reg(regs
, work_vec2
, base
+ 3);
235 ra_class_add_reg(regs
, work_vec2
, base
+ 4);
236 ra_class_add_reg(regs
, work_vec2
, base
+ 5);
237 ra_class_add_reg(regs
, work_vec1
, base
+ 6);
238 ra_class_add_reg(regs
, work_vec1
, base
+ 7);
239 ra_class_add_reg(regs
, work_vec1
, base
+ 8);
240 ra_class_add_reg(regs
, work_vec1
, base
+ 9);
242 for (unsigned a
= 0; a
< 10; ++a
) {
243 unsigned mask1
= reg_type_to_mask
[a
];
245 for (unsigned b
= 0; b
< 10; ++b
) {
246 unsigned mask2
= reg_type_to_mask
[b
];
249 ra_add_reg_conflict(regs
,
257 /* We have duplicate classes */
258 add_shadow_conflicts(regs
, 28, SHADOW_R28
);
259 add_shadow_conflicts(regs
, 29, SHADOW_R29
);
261 /* We're done setting up */
262 ra_set_finalize(regs
, NULL
);
267 /* This routine gets a precomputed register set off the screen if it's able, or
268 * otherwise it computes one on the fly */
270 static struct ra_regs
*
271 get_register_set(struct midgard_screen
*screen
, unsigned work_count
, unsigned **classes
)
274 assert(work_count
>= 8);
275 assert(work_count
<= 16);
278 unsigned index
= work_count
- 8;
280 /* Find the reg set */
281 struct ra_regs
*cached
= screen
->regs
[index
];
284 assert(screen
->reg_classes
[index
]);
285 *classes
= screen
->reg_classes
[index
];
289 /* Otherwise, create one */
290 struct ra_regs
*created
= create_register_set(work_count
, screen
->reg_classes
[index
]);
292 /* Cache it and use it */
293 screen
->regs
[index
] = created
;
295 *classes
= screen
->reg_classes
[index
];
299 /* Assign a (special) class, ensuring that it is compatible with whatever class
303 set_class(unsigned *classes
, unsigned node
, unsigned class)
305 /* Check that we're even a node */
306 if (node
>= SSA_FIXED_MINIMUM
)
309 /* First 4 are work, next 4 are load/store.. */
310 unsigned current_class
= classes
[node
] >> 2;
313 if (class == current_class
)
316 /* If we're changing, we haven't assigned a special class */
317 assert(current_class
== REG_CLASS_WORK
);
319 classes
[node
] &= 0x3;
320 classes
[node
] |= (class << 2);
324 force_vec4(unsigned *classes
, unsigned node
)
326 if (node
>= SSA_FIXED_MINIMUM
)
330 classes
[node
] |= 0x3;
333 /* Special register classes impose special constraints on who can read their
334 * values, so check that */
337 check_read_class(unsigned *classes
, unsigned tag
, unsigned node
)
339 /* Non-nodes are implicitly ok */
340 if (node
>= SSA_FIXED_MINIMUM
)
343 unsigned current_class
= classes
[node
] >> 2;
345 switch (current_class
) {
347 return (tag
== TAG_LOAD_STORE_4
);
349 return (tag
== TAG_TEXTURE_4
);
351 return (tag
!= TAG_LOAD_STORE_4
);
355 unreachable("Invalid class");
360 check_write_class(unsigned *classes
, unsigned tag
, unsigned node
)
362 /* Non-nodes are implicitly ok */
363 if (node
>= SSA_FIXED_MINIMUM
)
366 unsigned current_class
= classes
[node
] >> 2;
368 switch (current_class
) {
372 return (tag
== TAG_TEXTURE_4
);
375 return IS_ALU(tag
) || (tag
== TAG_LOAD_STORE_4
);
377 unreachable("Invalid class");
381 /* Prepass before RA to ensure special class restrictions are met. The idea is
382 * to create a bit field of types of instructions that read a particular index.
383 * Later, we'll add moves as appropriate and rewrite to specialize by type. */
386 mark_node_class (unsigned *bitfield
, unsigned node
)
388 if (node
< SSA_FIXED_MINIMUM
)
389 BITSET_SET(bitfield
, node
);
393 mir_lower_special_reads(compiler_context
*ctx
)
395 size_t sz
= BITSET_WORDS(ctx
->temp_count
) * sizeof(BITSET_WORD
);
397 /* Bitfields for the various types of registers we could have. aluw can
398 * be written by either ALU or load/store */
400 unsigned *alur
= calloc(sz
, 1);
401 unsigned *aluw
= calloc(sz
, 1);
402 unsigned *ldst
= calloc(sz
, 1);
403 unsigned *texr
= calloc(sz
, 1);
404 unsigned *texw
= calloc(sz
, 1);
406 /* Pass #1 is analysis, a linear scan to fill out the bitfields */
408 mir_foreach_instr_global(ctx
, ins
) {
411 mark_node_class(aluw
, ins
->dest
);
412 mark_node_class(alur
, ins
->src
[0]);
413 mark_node_class(alur
, ins
->src
[1]);
414 mark_node_class(alur
, ins
->src
[2]);
417 case TAG_LOAD_STORE_4
:
418 mark_node_class(aluw
, ins
->dest
);
419 mark_node_class(ldst
, ins
->src
[0]);
420 mark_node_class(ldst
, ins
->src
[1]);
421 mark_node_class(ldst
, ins
->src
[2]);
425 mark_node_class(texr
, ins
->src
[0]);
426 mark_node_class(texr
, ins
->src
[1]);
427 mark_node_class(texr
, ins
->src
[2]);
428 mark_node_class(texw
, ins
->dest
);
433 /* Pass #2 is lowering now that we've analyzed all the classes.
434 * Conceptually, if an index is only marked for a single type of use,
435 * there is nothing to lower. If it is marked for different uses, we
436 * split up based on the number of types of uses. To do so, we divide
437 * into N distinct classes of use (where N>1 by definition), emit N-1
438 * moves from the index to copies of the index, and finally rewrite N-1
439 * of the types of uses to use the corresponding move */
441 unsigned spill_idx
= ctx
->temp_count
;
443 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
444 bool is_alur
= BITSET_TEST(alur
, i
);
445 bool is_aluw
= BITSET_TEST(aluw
, i
);
446 bool is_ldst
= BITSET_TEST(ldst
, i
);
447 bool is_texr
= BITSET_TEST(texr
, i
);
448 bool is_texw
= BITSET_TEST(texw
, i
);
450 /* Analyse to check how many distinct uses there are. ALU ops
451 * (alur) can read the results of the texture pipeline (texw)
452 * but not ldst or texr. Load/store ops (ldst) cannot read
453 * anything but load/store inputs. Texture pipeline cannot read
454 * anything but texture inputs. TODO: Simplify. */
457 (is_alur
&& (is_ldst
|| is_texr
)) ||
458 (is_ldst
&& (is_alur
|| is_texr
|| is_texw
)) ||
459 (is_texr
&& (is_alur
|| is_ldst
|| is_texw
)) ||
460 (is_texw
&& (is_aluw
|| is_ldst
|| is_texr
));
465 /* Use the index as-is as the work copy. Emit copies for
468 unsigned classes
[] = { TAG_LOAD_STORE_4
, TAG_TEXTURE_4
, TAG_TEXTURE_4
};
469 bool collisions
[] = { is_ldst
, is_texr
, is_texw
&& is_aluw
};
471 for (unsigned j
= 0; j
< ARRAY_SIZE(collisions
); ++j
) {
472 if (!collisions
[j
]) continue;
474 /* When the hazard is from reading, we move and rewrite
475 * sources (typical case). When it's from writing, we
476 * flip the move and rewrite destinations (obscure,
477 * only from control flow -- impossible in SSA) */
479 bool hazard_write
= (j
== 2);
481 unsigned idx
= spill_idx
++;
483 midgard_instruction m
= hazard_write
?
484 v_mov(idx
, blank_alu_src
, i
) :
485 v_mov(i
, blank_alu_src
, idx
);
487 /* Insert move before each read/write, depending on the
488 * hazard we're trying to account for */
490 mir_foreach_instr_global_safe(ctx
, pre_use
) {
491 if (pre_use
->type
!= classes
[j
])
495 if (pre_use
->dest
!= i
)
498 if (!mir_has_arg(pre_use
, i
))
503 midgard_instruction
*use
= mir_next_op(pre_use
);
505 mir_insert_instruction_before(ctx
, use
, m
);
506 mir_rewrite_index_dst_single(pre_use
, i
, idx
);
509 m
= v_mov(i
, blank_alu_src
, idx
);
510 m
.mask
= mir_mask_of_read_components(pre_use
, i
);
511 mir_insert_instruction_before(ctx
, pre_use
, m
);
512 mir_rewrite_index_src_single(pre_use
, i
, idx
);
525 /* Routines for liveness analysis */
528 liveness_gen(uint8_t *live
, unsigned node
, unsigned max
, unsigned mask
)
537 liveness_kill(uint8_t *live
, unsigned node
, unsigned max
, unsigned mask
)
545 /* Updates live_in for a single instruction */
548 liveness_ins_update(uint8_t *live
, midgard_instruction
*ins
, unsigned max
)
550 /* live_in[s] = GEN[s] + (live_out[s] - KILL[s]) */
552 liveness_kill(live
, ins
->dest
, max
, ins
->mask
);
554 mir_foreach_src(ins
, src
) {
555 unsigned node
= ins
->src
[src
];
556 unsigned mask
= mir_mask_of_read_components(ins
, node
);
558 liveness_gen(live
, node
, max
, mask
);
562 /* live_out[s] = sum { p in succ[s] } ( live_in[p] ) */
565 liveness_block_live_out(compiler_context
*ctx
, midgard_block
*blk
)
567 mir_foreach_successor(blk
, succ
) {
568 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
)
569 blk
->live_out
[i
] |= succ
->live_in
[i
];
573 /* Liveness analysis is a backwards-may dataflow analysis pass. Within a block,
574 * we compute live_out from live_in. The intrablock pass is linear-time. It
575 * returns whether progress was made. */
578 liveness_block_update(compiler_context
*ctx
, midgard_block
*blk
)
580 bool progress
= false;
582 liveness_block_live_out(ctx
, blk
);
584 uint8_t *live
= mem_dup(blk
->live_out
, ctx
->temp_count
);
586 mir_foreach_instr_in_block_rev(blk
, ins
)
587 liveness_ins_update(live
, ins
, ctx
->temp_count
);
589 /* To figure out progress, diff live_in */
591 for (unsigned i
= 0; (i
< ctx
->temp_count
) && !progress
; ++i
)
592 progress
|= (blk
->live_in
[i
] != live
[i
]);
600 /* Globally, liveness analysis uses a fixed-point algorithm based on a
601 * worklist. We initialize a work list with the exit block. We iterate the work
602 * list to compute live_in from live_out for each block on the work list,
603 * adding the predecessors of the block to the work list if we made progress.
607 mir_compute_liveness(
608 compiler_context
*ctx
,
611 /* List of midgard_block */
612 struct set
*work_list
;
614 work_list
= _mesa_set_create(ctx
,
616 _mesa_key_pointer_equal
);
620 mir_foreach_block(ctx
, block
) {
621 block
->live_in
= calloc(ctx
->temp_count
, 1);
622 block
->live_out
= calloc(ctx
->temp_count
, 1);
625 /* Initialize the work list with the exit block */
626 struct set_entry
*cur
;
628 midgard_block
*exit
= mir_exit_block(ctx
);
629 cur
= _mesa_set_add(work_list
, exit
);
631 /* Iterate the work list */
634 /* Pop off a block */
635 midgard_block
*blk
= (struct midgard_block
*) cur
->key
;
636 _mesa_set_remove(work_list
, cur
);
638 /* Update its liveness information */
639 bool progress
= liveness_block_update(ctx
, blk
);
641 /* If we made progress, we need to process the predecessors */
643 if (progress
|| (blk
== exit
)) {
644 mir_foreach_predecessor(blk
, pred
)
645 _mesa_set_add(work_list
, pred
);
647 } while((cur
= _mesa_set_next_entry(work_list
, NULL
)) != NULL
);
649 /* Now that every block has live_in/live_out computed, we can determine
650 * interference by walking each block linearly. Take live_out at the
651 * end of each block and walk the block backwards. */
653 mir_foreach_block(ctx
, blk
) {
654 uint8_t *live
= calloc(ctx
->temp_count
, 1);
656 mir_foreach_successor(blk
, succ
) {
657 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
)
658 live
[i
] |= succ
->live_in
[i
];
661 mir_foreach_instr_in_block_rev(blk
, ins
) {
662 /* Mark all registers live after the instruction as
663 * interfering with the destination */
665 unsigned dest
= ins
->dest
;
667 if (dest
< ctx
->temp_count
) {
668 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
)
670 ra_add_node_interference(g
, dest
, i
);
674 liveness_ins_update(live
, ins
, ctx
->temp_count
);
680 mir_foreach_block(ctx
, blk
) {
686 /* This routine performs the actual register allocation. It should be succeeded
687 * by install_registers */
690 allocate_registers(compiler_context
*ctx
, bool *spilled
)
692 /* The number of vec4 work registers available depends on when the
693 * uniforms start, so compute that first */
694 int work_count
= 16 - MAX2((ctx
->uniform_cutoff
- 8), 0);
695 unsigned *classes
= NULL
;
696 struct ra_regs
*regs
= get_register_set(ctx
->screen
, work_count
, &classes
);
698 assert(regs
!= NULL
);
699 assert(classes
!= NULL
);
701 /* No register allocation to do with no SSA */
703 if (!ctx
->temp_count
)
706 /* Let's actually do register allocation */
707 int nodes
= ctx
->temp_count
;
708 struct ra_graph
*g
= ra_alloc_interference_graph(regs
, nodes
);
710 /* Register class (as known to the Mesa register allocator) is actually
711 * the product of both semantic class (work, load/store, texture..) and
712 * size (vec2/vec3..). First, we'll go through and determine the
713 * minimum size needed to hold values */
715 unsigned *found_class
= calloc(sizeof(unsigned), ctx
->temp_count
);
717 mir_foreach_instr_global(ctx
, ins
) {
718 if (ins
->dest
>= SSA_FIXED_MINIMUM
) continue;
720 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
721 int class = util_logbase2(ins
->mask
);
723 /* Use the largest class if there's ambiguity, this
724 * handles partial writes */
726 int dest
= ins
->dest
;
727 found_class
[dest
] = MAX2(found_class
[dest
], class);
730 /* Next, we'll determine semantic class. We default to zero (work).
731 * But, if we're used with a special operation, that will force us to a
732 * particular class. Each node must be assigned to exactly one class; a
733 * prepass before RA should have lowered what-would-have-been
734 * multiclass nodes into a series of moves to break it up into multiple
737 mir_foreach_instr_global(ctx
, ins
) {
738 /* Check if this operation imposes any classes */
740 if (ins
->type
== TAG_LOAD_STORE_4
) {
741 bool force_vec4_only
= OP_IS_VEC4_ONLY(ins
->load_store
.op
);
743 set_class(found_class
, ins
->src
[0], REG_CLASS_LDST
);
744 set_class(found_class
, ins
->src
[1], REG_CLASS_LDST
);
745 set_class(found_class
, ins
->src
[2], REG_CLASS_LDST
);
747 if (force_vec4_only
) {
748 force_vec4(found_class
, ins
->dest
);
749 force_vec4(found_class
, ins
->src
[0]);
750 force_vec4(found_class
, ins
->src
[1]);
751 force_vec4(found_class
, ins
->src
[2]);
753 } else if (ins
->type
== TAG_TEXTURE_4
) {
754 set_class(found_class
, ins
->dest
, REG_CLASS_TEXW
);
755 set_class(found_class
, ins
->src
[0], REG_CLASS_TEXR
);
756 set_class(found_class
, ins
->src
[1], REG_CLASS_TEXR
);
757 set_class(found_class
, ins
->src
[2], REG_CLASS_TEXR
);
761 /* Check that the semantics of the class are respected */
762 mir_foreach_instr_global(ctx
, ins
) {
763 assert(check_write_class(found_class
, ins
->type
, ins
->dest
));
764 assert(check_read_class(found_class
, ins
->type
, ins
->src
[0]));
765 assert(check_read_class(found_class
, ins
->type
, ins
->src
[1]));
766 assert(check_read_class(found_class
, ins
->type
, ins
->src
[2]));
769 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
770 unsigned class = found_class
[i
];
771 ra_set_node_class(g
, i
, classes
[class]);
774 mir_compute_liveness(ctx
, g
);
776 if (!ra_allocate(g
)) {
782 /* Whether we were successful or not, report the graph so we can
783 * compute spill nodes */
788 /* Once registers have been decided via register allocation
789 * (allocate_registers), we need to rewrite the MIR to use registers instead of
793 install_registers_instr(
794 compiler_context
*ctx
,
796 midgard_instruction
*ins
)
803 if (ins
->compact_branch
)
806 struct phys_reg src1
= index_to_reg(ctx
, g
, ins
->src
[0]);
807 struct phys_reg src2
= index_to_reg(ctx
, g
, ins
->src
[1]);
808 struct phys_reg dest
= index_to_reg(ctx
, g
, ins
->dest
);
810 unsigned uncomposed_mask
= ins
->mask
;
811 ins
->mask
= compose_writemask(uncomposed_mask
, dest
);
813 /* Adjust the dest mask if necessary. Mostly this is a no-op
814 * but it matters for dot products */
815 dest
.mask
= effective_writemask(&ins
->alu
, ins
->mask
);
817 midgard_vector_alu_src mod1
=
818 vector_alu_from_unsigned(ins
->alu
.src1
);
819 mod1
.swizzle
= compose_swizzle(mod1
.swizzle
, uncomposed_mask
, src1
, dest
);
820 ins
->alu
.src1
= vector_alu_srco_unsigned(mod1
);
822 ins
->registers
.src1_reg
= src1
.reg
;
824 ins
->registers
.src2_imm
= ins
->has_inline_constant
;
826 if (ins
->has_inline_constant
) {
827 /* Encode inline 16-bit constant. See disassembler for
828 * where the algorithm is from */
830 ins
->registers
.src2_reg
= ins
->inline_constant
>> 11;
832 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
833 uint16_t imm
= ((lower_11
>> 8) & 0x7) |
834 ((lower_11
& 0xFF) << 3);
836 ins
->alu
.src2
= imm
<< 2;
838 midgard_vector_alu_src mod2
=
839 vector_alu_from_unsigned(ins
->alu
.src2
);
840 mod2
.swizzle
= compose_swizzle(
841 mod2
.swizzle
, uncomposed_mask
, src2
, dest
);
842 ins
->alu
.src2
= vector_alu_srco_unsigned(mod2
);
844 ins
->registers
.src2_reg
= src2
.reg
;
847 ins
->registers
.out_reg
= dest
.reg
;
851 case TAG_LOAD_STORE_4
: {
852 /* Which physical register we read off depends on
853 * whether we are loading or storing -- think about the
854 * logical dataflow */
856 bool encodes_src
= OP_IS_STORE(ins
->load_store
.op
);
859 struct phys_reg src
= index_to_reg(ctx
, g
, ins
->src
[0]);
860 assert(src
.reg
== 26 || src
.reg
== 27);
862 ins
->load_store
.reg
= src
.reg
- 26;
864 unsigned shift
= __builtin_ctz(src
.mask
);
865 unsigned adjusted_mask
= src
.mask
>> shift
;
866 assert(((adjusted_mask
+ 1) & adjusted_mask
) == 0);
868 unsigned new_swizzle
= 0;
869 for (unsigned q
= 0; q
< 4; ++q
) {
870 unsigned c
= (ins
->load_store
.swizzle
>> (2*q
)) & 3;
871 new_swizzle
|= (c
+ shift
) << (2*q
);
874 ins
->load_store
.swizzle
= compose_swizzle(
875 new_swizzle
, src
.mask
,
876 default_phys_reg(0), src
);
878 struct phys_reg src
= index_to_reg(ctx
, g
, ins
->dest
);
880 ins
->load_store
.reg
= src
.reg
;
882 ins
->load_store
.swizzle
= compose_swizzle(
883 ins
->load_store
.swizzle
, 0xF,
884 default_phys_reg(0), src
);
886 ins
->mask
= compose_writemask(
890 /* We also follow up by actual arguments */
893 encodes_src
? ins
->src
[1] : ins
->src
[0];
896 encodes_src
? ins
->src
[2] : ins
->src
[1];
899 struct phys_reg src
= index_to_reg(ctx
, g
, src2
);
900 unsigned component
= __builtin_ctz(src
.mask
);
901 ins
->load_store
.arg_1
|= midgard_ldst_reg(src
.reg
, component
);
905 struct phys_reg src
= index_to_reg(ctx
, g
, src3
);
906 unsigned component
= __builtin_ctz(src
.mask
);
907 ins
->load_store
.arg_2
|= midgard_ldst_reg(src
.reg
, component
);
913 case TAG_TEXTURE_4
: {
914 /* Grab RA results */
915 struct phys_reg dest
= index_to_reg(ctx
, g
, ins
->dest
);
916 struct phys_reg coord
= index_to_reg(ctx
, g
, ins
->src
[0]);
917 struct phys_reg lod
= index_to_reg(ctx
, g
, ins
->src
[1]);
919 assert(dest
.reg
== 28 || dest
.reg
== 29);
920 assert(coord
.reg
== 28 || coord
.reg
== 29);
922 /* First, install the texture coordinate */
923 ins
->texture
.in_reg_full
= 1;
924 ins
->texture
.in_reg_upper
= 0;
925 ins
->texture
.in_reg_select
= coord
.reg
- 28;
926 ins
->texture
.in_reg_swizzle
=
927 compose_swizzle(ins
->texture
.in_reg_swizzle
, 0xF, coord
, dest
);
929 /* Next, install the destination */
930 ins
->texture
.out_full
= 1;
931 ins
->texture
.out_upper
= 0;
932 ins
->texture
.out_reg_select
= dest
.reg
- 28;
933 ins
->texture
.swizzle
=
934 compose_swizzle(ins
->texture
.swizzle
, dest
.mask
, dest
, dest
);
936 compose_writemask(ins
->mask
, dest
);
938 /* If there is a register LOD/bias, use it */
939 if (ins
->src
[1] != ~0) {
940 midgard_tex_register_select sel
= {
943 .component
= lod
.swizzle
& 3,
947 memcpy(&packed
, &sel
, sizeof(packed
));
948 ins
->texture
.bias
= packed
;
960 install_registers(compiler_context
*ctx
, struct ra_graph
*g
)
962 mir_foreach_instr_global(ctx
, ins
)
963 install_registers_instr(ctx
, g
, ins
);