2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "midgard_ops.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
29 #include "midgard_quirks.h"
32 /* Physical register: 0-31 */
35 /* Byte offset into the physical register: 0-15 */
38 /* Number of bytes in a component of this register */
42 /* Shift up by reg_offset and horizontally by dst_offset. */
45 offset_swizzle(unsigned *swizzle
, unsigned reg_offset
, unsigned srcsize
, unsigned dstsize
, unsigned dst_offset
)
47 unsigned out
[MIR_VEC_COMPONENTS
];
49 signed reg_comp
= reg_offset
/ srcsize
;
50 signed dst_comp
= dst_offset
/ dstsize
;
52 unsigned max_component
= (16 / srcsize
) - 1;
54 assert(reg_comp
* srcsize
== reg_offset
);
55 assert(dst_comp
* dstsize
== dst_offset
);
57 for (signed c
= 0; c
< MIR_VEC_COMPONENTS
; ++c
) {
58 signed comp
= MAX2(c
- dst_comp
, 0);
59 out
[c
] = MIN2(swizzle
[comp
] + reg_comp
, max_component
);
62 memcpy(swizzle
, out
, sizeof(out
));
65 /* Helper to return the default phys_reg for a given register */
67 static struct phys_reg
68 default_phys_reg(int reg
, unsigned size
)
79 /* Determine which physical register, swizzle, and mask a virtual
80 * register corresponds to */
82 static struct phys_reg
83 index_to_reg(compiler_context
*ctx
, struct lcra_state
*l
, unsigned reg
, unsigned size
)
85 /* Check for special cases */
87 return default_phys_reg(REGISTER_UNUSED
, size
);
88 else if (reg
>= SSA_FIXED_MINIMUM
)
89 return default_phys_reg(SSA_REG_FROM_FIXED(reg
), size
);
91 return default_phys_reg(REGISTER_UNUSED
, size
);
94 .reg
= l
->solutions
[reg
] / 16,
95 .offset
= l
->solutions
[reg
] & 0xF,
99 /* Report that we actually use this register, and return it */
102 ctx
->work_registers
= MAX2(ctx
->work_registers
, r
.reg
);
108 set_class(unsigned *classes
, unsigned node
, unsigned class)
110 if (node
< SSA_FIXED_MINIMUM
&& class != classes
[node
]) {
111 assert(classes
[node
] == REG_CLASS_WORK
);
112 classes
[node
] = class;
116 /* Special register classes impose special constraints on who can read their
117 * values, so check that */
120 check_read_class(unsigned *classes
, unsigned tag
, unsigned node
)
122 /* Non-nodes are implicitly ok */
123 if (node
>= SSA_FIXED_MINIMUM
)
126 switch (classes
[node
]) {
128 return (tag
== TAG_LOAD_STORE_4
);
130 return (tag
== TAG_TEXTURE_4
);
132 return (tag
!= TAG_LOAD_STORE_4
);
136 unreachable("Invalid class");
141 check_write_class(unsigned *classes
, unsigned tag
, unsigned node
)
143 /* Non-nodes are implicitly ok */
144 if (node
>= SSA_FIXED_MINIMUM
)
147 switch (classes
[node
]) {
151 return (tag
== TAG_TEXTURE_4
);
154 return IS_ALU(tag
) || (tag
== TAG_LOAD_STORE_4
);
156 unreachable("Invalid class");
160 /* Prepass before RA to ensure special class restrictions are met. The idea is
161 * to create a bit field of types of instructions that read a particular index.
162 * Later, we'll add moves as appropriate and rewrite to specialize by type. */
165 mark_node_class (unsigned *bitfield
, unsigned node
)
167 if (node
< SSA_FIXED_MINIMUM
)
168 BITSET_SET(bitfield
, node
);
172 mir_lower_special_reads(compiler_context
*ctx
)
174 size_t sz
= BITSET_WORDS(ctx
->temp_count
) * sizeof(BITSET_WORD
);
176 /* Bitfields for the various types of registers we could have. aluw can
177 * be written by either ALU or load/store */
179 unsigned *alur
= calloc(sz
, 1);
180 unsigned *aluw
= calloc(sz
, 1);
181 unsigned *brar
= calloc(sz
, 1);
182 unsigned *ldst
= calloc(sz
, 1);
183 unsigned *texr
= calloc(sz
, 1);
184 unsigned *texw
= calloc(sz
, 1);
186 /* Pass #1 is analysis, a linear scan to fill out the bitfields */
188 mir_foreach_instr_global(ctx
, ins
) {
191 mark_node_class(aluw
, ins
->dest
);
192 mark_node_class(alur
, ins
->src
[0]);
193 mark_node_class(alur
, ins
->src
[1]);
194 mark_node_class(alur
, ins
->src
[2]);
196 if (ins
->compact_branch
&& ins
->writeout
)
197 mark_node_class(brar
, ins
->src
[0]);
201 case TAG_LOAD_STORE_4
:
202 mark_node_class(aluw
, ins
->dest
);
203 mark_node_class(ldst
, ins
->src
[0]);
204 mark_node_class(ldst
, ins
->src
[1]);
205 mark_node_class(ldst
, ins
->src
[2]);
209 mark_node_class(texr
, ins
->src
[0]);
210 mark_node_class(texr
, ins
->src
[1]);
211 mark_node_class(texr
, ins
->src
[2]);
212 mark_node_class(texw
, ins
->dest
);
217 /* Pass #2 is lowering now that we've analyzed all the classes.
218 * Conceptually, if an index is only marked for a single type of use,
219 * there is nothing to lower. If it is marked for different uses, we
220 * split up based on the number of types of uses. To do so, we divide
221 * into N distinct classes of use (where N>1 by definition), emit N-1
222 * moves from the index to copies of the index, and finally rewrite N-1
223 * of the types of uses to use the corresponding move */
225 unsigned spill_idx
= ctx
->temp_count
;
227 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
228 bool is_alur
= BITSET_TEST(alur
, i
);
229 bool is_aluw
= BITSET_TEST(aluw
, i
);
230 bool is_brar
= BITSET_TEST(brar
, i
);
231 bool is_ldst
= BITSET_TEST(ldst
, i
);
232 bool is_texr
= BITSET_TEST(texr
, i
);
233 bool is_texw
= BITSET_TEST(texw
, i
);
235 /* Analyse to check how many distinct uses there are. ALU ops
236 * (alur) can read the results of the texture pipeline (texw)
237 * but not ldst or texr. Load/store ops (ldst) cannot read
238 * anything but load/store inputs. Texture pipeline cannot read
239 * anything but texture inputs. TODO: Simplify. */
242 (is_alur
&& (is_ldst
|| is_texr
)) ||
243 (is_ldst
&& (is_alur
|| is_texr
|| is_texw
)) ||
244 (is_texr
&& (is_alur
|| is_ldst
|| is_texw
)) ||
245 (is_texw
&& (is_aluw
|| is_ldst
|| is_texr
)) ||
246 (is_brar
&& is_texw
);
251 /* Use the index as-is as the work copy. Emit copies for
254 unsigned classes
[] = { TAG_LOAD_STORE_4
, TAG_TEXTURE_4
, TAG_TEXTURE_4
, TAG_ALU_4
};
255 bool collisions
[] = { is_ldst
, is_texr
, is_texw
&& is_aluw
, is_brar
};
257 for (unsigned j
= 0; j
< ARRAY_SIZE(collisions
); ++j
) {
258 if (!collisions
[j
]) continue;
260 /* When the hazard is from reading, we move and rewrite
261 * sources (typical case). When it's from writing, we
262 * flip the move and rewrite destinations (obscure,
263 * only from control flow -- impossible in SSA) */
265 bool hazard_write
= (j
== 2);
267 unsigned idx
= spill_idx
++;
269 midgard_instruction m
= hazard_write
?
270 v_mov(idx
, i
) : v_mov(i
, idx
);
272 /* Insert move before each read/write, depending on the
273 * hazard we're trying to account for */
275 mir_foreach_instr_global_safe(ctx
, pre_use
) {
276 if (pre_use
->type
!= classes
[j
])
280 if (pre_use
->dest
!= i
)
283 if (!mir_has_arg(pre_use
, i
))
288 midgard_instruction
*use
= mir_next_op(pre_use
);
290 mir_insert_instruction_before(ctx
, use
, m
);
291 mir_rewrite_index_dst_single(pre_use
, i
, idx
);
295 m
.mask
= mir_from_bytemask(mir_bytemask_of_read_components(pre_use
, i
), 32);
296 mir_insert_instruction_before(ctx
, pre_use
, m
);
297 mir_rewrite_index_src_single(pre_use
, i
, idx
);
311 /* We register allocate after scheduling, so we need to ensure instructions
312 * executing in parallel within a segment of a bundle don't clobber each
313 * other's registers. This is mostly a non-issue thanks to scheduling, but
314 * there are edge cases. In particular, after a register is written in a
315 * segment, it interferes with anything reading. */
318 mir_compute_segment_interference(
319 compiler_context
*ctx
,
320 struct lcra_state
*l
,
325 for (unsigned j
= pivot
; j
< i
; ++j
) {
326 mir_foreach_src(bun
->instructions
[j
], s
) {
327 if (bun
->instructions
[j
]->src
[s
] >= ctx
->temp_count
)
330 for (unsigned q
= pivot
; q
< i
; ++q
) {
331 if (bun
->instructions
[q
]->dest
>= ctx
->temp_count
)
334 /* See dEQP-GLES2.functional.shaders.return.output_write_in_func_dynamic_fragment */
337 if (!(bun
->instructions
[j
]->unit
== UNIT_SMUL
&& bun
->instructions
[q
]->unit
== UNIT_VLUT
))
341 unsigned mask
= mir_bytemask(bun
->instructions
[q
]);
342 unsigned rmask
= mir_bytemask_of_read_components(bun
->instructions
[j
], bun
->instructions
[j
]->src
[s
]);
343 lcra_add_node_interference(l
, bun
->instructions
[q
]->dest
, mask
, bun
->instructions
[j
]->src
[s
], rmask
);
350 mir_compute_bundle_interference(
351 compiler_context
*ctx
,
352 struct lcra_state
*l
,
355 if (!IS_ALU(bun
->tag
))
358 bool old
= bun
->instructions
[0]->unit
>= UNIT_VADD
;
361 for (unsigned i
= 1; i
< bun
->instruction_count
; ++i
) {
362 bool new = bun
->instructions
[i
]->unit
>= UNIT_VADD
;
365 mir_compute_segment_interference(ctx
, l
, bun
, 0, i
);
371 mir_compute_segment_interference(ctx
, l
, bun
, pivot
, bun
->instruction_count
);
375 mir_compute_interference(
376 compiler_context
*ctx
,
377 struct lcra_state
*l
)
379 /* First, we need liveness information to be computed per block */
380 mir_compute_liveness(ctx
);
382 /* We need to force r1.w live throughout a blend shader */
387 mir_foreach_block(ctx
, _block
) {
388 midgard_block
*block
= (midgard_block
*) _block
;
389 mir_foreach_instr_in_block_rev(block
, ins
) {
398 mir_foreach_instr_global(ctx
, ins
) {
399 if (ins
->dest
< ctx
->temp_count
)
400 lcra_add_node_interference(l
, ins
->dest
, mir_bytemask(ins
), r1w
, 0xF);
404 /* Now that every block has live_in/live_out computed, we can determine
405 * interference by walking each block linearly. Take live_out at the
406 * end of each block and walk the block backwards. */
408 mir_foreach_block(ctx
, _blk
) {
409 midgard_block
*blk
= (midgard_block
*) _blk
;
410 uint16_t *live
= mem_dup(_blk
->live_out
, ctx
->temp_count
* sizeof(uint16_t));
412 mir_foreach_instr_in_block_rev(blk
, ins
) {
413 /* Mark all registers live after the instruction as
414 * interfering with the destination */
416 unsigned dest
= ins
->dest
;
418 if (dest
< ctx
->temp_count
) {
419 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
)
421 unsigned mask
= mir_bytemask(ins
);
422 lcra_add_node_interference(l
, dest
, mask
, i
, live
[i
]);
427 mir_liveness_ins_update(live
, ins
, ctx
->temp_count
);
430 mir_foreach_bundle_in_block(blk
, bun
)
431 mir_compute_bundle_interference(ctx
, l
, bun
);
437 /* This routine performs the actual register allocation. It should be succeeded
438 * by install_registers */
440 static struct lcra_state
*
441 allocate_registers(compiler_context
*ctx
, bool *spilled
)
443 /* The number of vec4 work registers available depends on when the
444 * uniforms start, so compute that first */
445 int work_count
= 16 - MAX2((ctx
->uniform_cutoff
- 8), 0);
447 /* No register allocation to do with no SSA */
449 if (!ctx
->temp_count
)
452 struct lcra_state
*l
= lcra_alloc_equations(ctx
->temp_count
, 5);
454 /* Starts of classes, in bytes */
455 l
->class_start
[REG_CLASS_WORK
] = 16 * 0;
456 l
->class_start
[REG_CLASS_LDST
] = 16 * 26;
457 l
->class_start
[REG_CLASS_TEXR
] = 16 * 28;
458 l
->class_start
[REG_CLASS_TEXW
] = 16 * 28;
460 l
->class_size
[REG_CLASS_WORK
] = 16 * work_count
;
461 l
->class_size
[REG_CLASS_LDST
] = 16 * 2;
462 l
->class_size
[REG_CLASS_TEXR
] = 16 * 2;
463 l
->class_size
[REG_CLASS_TEXW
] = 16 * 2;
465 lcra_set_disjoint_class(l
, REG_CLASS_TEXR
, REG_CLASS_TEXW
);
467 /* To save space on T*20, we don't have real texture registers.
468 * Instead, tex inputs reuse the load/store pipeline registers, and
469 * tex outputs use work r0/r1. Note we still use TEXR/TEXW classes,
470 * noting that this handles interferences and sizes correctly. */
472 if (ctx
->quirks
& MIDGARD_INTERPIPE_REG_ALIASING
) {
473 l
->class_start
[REG_CLASS_TEXR
] = l
->class_start
[REG_CLASS_LDST
];
474 l
->class_start
[REG_CLASS_TEXW
] = l
->class_start
[REG_CLASS_WORK
];
477 unsigned *found_class
= calloc(sizeof(unsigned), ctx
->temp_count
);
478 unsigned *min_alignment
= calloc(sizeof(unsigned), ctx
->temp_count
);
479 unsigned *min_bound
= calloc(sizeof(unsigned), ctx
->temp_count
);
481 mir_foreach_instr_global(ctx
, ins
) {
482 /* Swizzles of 32-bit sources on 64-bit instructions need to be
483 * aligned to either bottom (xy) or top (zw). More general
484 * swizzle lowering should happen prior to scheduling (TODO),
485 * but once we get RA we shouldn't disrupt this further. Align
486 * sources of 64-bit instructions. */
488 if (ins
->type
== TAG_ALU_4
&& ins
->alu
.reg_mode
== midgard_reg_mode_64
) {
489 mir_foreach_src(ins
, v
) {
490 unsigned s
= ins
->src
[v
];
492 if (s
< ctx
->temp_count
)
493 min_alignment
[s
] = 3;
497 if (ins
->type
== TAG_LOAD_STORE_4
&& OP_HAS_ADDRESS(ins
->load_store
.op
)) {
498 mir_foreach_src(ins
, v
) {
499 unsigned s
= ins
->src
[v
];
500 unsigned size
= nir_alu_type_get_type_size(ins
->src_types
[v
]);
502 if (s
< ctx
->temp_count
)
503 min_alignment
[s
] = (size
== 64) ? 3 : 2;
507 if (ins
->dest
>= SSA_FIXED_MINIMUM
) continue;
509 unsigned size
= nir_alu_type_get_type_size(ins
->dest_type
);
511 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
512 int comps1
= util_logbase2(ins
->mask
);
514 int bytes
= (comps1
+ 1) * (size
/ 8);
516 /* Use the largest class if there's ambiguity, this
517 * handles partial writes */
519 int dest
= ins
->dest
;
520 found_class
[dest
] = MAX2(found_class
[dest
], bytes
);
522 min_alignment
[dest
] =
523 (size
== 16) ? 1 : /* (1 << 1) = 2-byte */
524 (size
== 32) ? 2 : /* (1 << 2) = 4-byte */
525 (size
== 64) ? 3 : /* (1 << 3) = 8-byte */
528 /* We don't have a swizzle for the conditional and we don't
529 * want to muck with the conditional itself, so just force
530 * alignment for now */
532 if (ins
->type
== TAG_ALU_4
&& OP_IS_CSEL_V(ins
->alu
.op
))
533 min_alignment
[dest
] = 4; /* 1 << 4= 16-byte = vec4 */
537 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
538 lcra_set_alignment(l
, i
, min_alignment
[i
] ? min_alignment
[i
] : 2,
539 min_bound
[i
] ? min_bound
[i
] : 16);
540 lcra_restrict_range(l
, i
, found_class
[i
]);
547 /* Next, we'll determine semantic class. We default to zero (work).
548 * But, if we're used with a special operation, that will force us to a
549 * particular class. Each node must be assigned to exactly one class; a
550 * prepass before RA should have lowered what-would-have-been
551 * multiclass nodes into a series of moves to break it up into multiple
554 mir_foreach_instr_global(ctx
, ins
) {
555 /* Check if this operation imposes any classes */
557 if (ins
->type
== TAG_LOAD_STORE_4
) {
558 set_class(l
->class, ins
->src
[0], REG_CLASS_LDST
);
559 set_class(l
->class, ins
->src
[1], REG_CLASS_LDST
);
560 set_class(l
->class, ins
->src
[2], REG_CLASS_LDST
);
562 if (OP_IS_VEC4_ONLY(ins
->load_store
.op
)) {
563 lcra_restrict_range(l
, ins
->dest
, 16);
564 lcra_restrict_range(l
, ins
->src
[0], 16);
565 lcra_restrict_range(l
, ins
->src
[1], 16);
566 lcra_restrict_range(l
, ins
->src
[2], 16);
568 } else if (ins
->type
== TAG_TEXTURE_4
) {
569 set_class(l
->class, ins
->dest
, REG_CLASS_TEXW
);
570 set_class(l
->class, ins
->src
[0], REG_CLASS_TEXR
);
571 set_class(l
->class, ins
->src
[1], REG_CLASS_TEXR
);
572 set_class(l
->class, ins
->src
[2], REG_CLASS_TEXR
);
573 set_class(l
->class, ins
->src
[3], REG_CLASS_TEXR
);
577 /* Check that the semantics of the class are respected */
578 mir_foreach_instr_global(ctx
, ins
) {
579 assert(check_write_class(l
->class, ins
->type
, ins
->dest
));
580 assert(check_read_class(l
->class, ins
->type
, ins
->src
[0]));
581 assert(check_read_class(l
->class, ins
->type
, ins
->src
[1]));
582 assert(check_read_class(l
->class, ins
->type
, ins
->src
[2]));
585 /* Mark writeout to r0, render target to r1.z, unknown to r1.w */
586 mir_foreach_instr_global(ctx
, ins
) {
587 if (!(ins
->compact_branch
&& ins
->writeout
)) continue;
589 if (ins
->src
[0] < ctx
->temp_count
) {
590 if (ins
->writeout_depth
)
591 l
->solutions
[ins
->src
[0]] = (16 * 1) + COMPONENT_X
* 4;
592 else if (ins
->writeout_stencil
)
593 l
->solutions
[ins
->src
[0]] = (16 * 1) + COMPONENT_Y
* 4;
595 l
->solutions
[ins
->src
[0]] = 0;
598 if (ins
->src
[1] < ctx
->temp_count
)
599 l
->solutions
[ins
->src
[1]] = (16 * 1) + COMPONENT_Z
* 4;
601 if (ins
->src
[2] < ctx
->temp_count
)
602 l
->solutions
[ins
->src
[2]] = (16 * 1) + COMPONENT_W
* 4;
605 mir_compute_interference(ctx
, l
);
607 *spilled
= !lcra_solve(l
);
612 /* Once registers have been decided via register allocation
613 * (allocate_registers), we need to rewrite the MIR to use registers instead of
617 install_registers_instr(
618 compiler_context
*ctx
,
619 struct lcra_state
*l
,
620 midgard_instruction
*ins
)
622 unsigned src_size
[MIR_SRC_COUNT
];
624 for (unsigned i
= 0; i
< MIR_SRC_COUNT
; ++i
)
625 src_size
[i
] = MAX2(nir_alu_type_get_type_size(ins
->src_types
[i
]) / 8, 1);
627 unsigned dest_size
= MAX2(nir_alu_type_get_type_size(ins
->dest_type
) / 8, 1);
634 if (ins
->compact_branch
)
637 struct phys_reg src1
= index_to_reg(ctx
, l
, ins
->src
[0], src_size
[0]);
638 struct phys_reg src2
= index_to_reg(ctx
, l
, ins
->src
[1], src_size
[1]);
639 struct phys_reg dest
= index_to_reg(ctx
, l
, ins
->dest
, dest_size
);
641 mir_set_bytemask(ins
, mir_bytemask(ins
) << dest
.offset
);
643 unsigned dest_offset
=
644 GET_CHANNEL_COUNT(alu_opcode_props
[ins
->alu
.op
].props
) ? 0 :
647 offset_swizzle(ins
->swizzle
[0], src1
.offset
, src1
.size
, dest
.size
, dest_offset
);
649 ins
->registers
.src1_reg
= src1
.reg
;
651 ins
->registers
.src2_imm
= ins
->has_inline_constant
;
653 if (ins
->has_inline_constant
) {
654 /* Encode inline 16-bit constant. See disassembler for
655 * where the algorithm is from */
657 ins
->registers
.src2_reg
= ins
->inline_constant
>> 11;
659 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
660 uint16_t imm
= ((lower_11
>> 8) & 0x7) |
661 ((lower_11
& 0xFF) << 3);
663 ins
->alu
.src2
= imm
<< 2;
665 offset_swizzle(ins
->swizzle
[1], src2
.offset
, src2
.size
, dest
.size
, dest_offset
);
667 ins
->registers
.src2_reg
= src2
.reg
;
670 ins
->registers
.out_reg
= dest
.reg
;
674 case TAG_LOAD_STORE_4
: {
675 /* Which physical register we read off depends on
676 * whether we are loading or storing -- think about the
677 * logical dataflow */
679 bool encodes_src
= OP_IS_STORE(ins
->load_store
.op
);
682 struct phys_reg src
= index_to_reg(ctx
, l
, ins
->src
[0], src_size
[0]);
683 assert(src
.reg
== 26 || src
.reg
== 27);
685 ins
->load_store
.reg
= src
.reg
- 26;
686 offset_swizzle(ins
->swizzle
[0], src
.offset
, src
.size
, 1, 0);
688 struct phys_reg dst
= index_to_reg(ctx
, l
, ins
->dest
, dest_size
);
690 ins
->load_store
.reg
= dst
.reg
;
691 offset_swizzle(ins
->swizzle
[0], 0, 4, 4, dst
.offset
);
692 mir_set_bytemask(ins
, mir_bytemask(ins
) << dst
.offset
);
695 /* We also follow up by actual arguments */
697 unsigned src2
= ins
->src
[1];
698 unsigned src3
= ins
->src
[2];
701 struct phys_reg src
= index_to_reg(ctx
, l
, src2
, 4);
702 unsigned component
= src
.offset
/ src
.size
;
703 assert(component
* src
.size
== src
.offset
);
704 ins
->load_store
.arg_1
|= midgard_ldst_reg(src
.reg
, component
);
708 struct phys_reg src
= index_to_reg(ctx
, l
, src3
, 4);
709 unsigned component
= src
.offset
/ src
.size
;
710 assert(component
* src
.size
== src
.offset
);
711 ins
->load_store
.arg_2
|= midgard_ldst_reg(src
.reg
, component
);
717 case TAG_TEXTURE_4
: {
718 if (ins
->texture
.op
== TEXTURE_OP_BARRIER
)
721 /* Grab RA results */
722 struct phys_reg dest
= index_to_reg(ctx
, l
, ins
->dest
, dest_size
);
723 struct phys_reg coord
= index_to_reg(ctx
, l
, ins
->src
[1], src_size
[1]);
724 struct phys_reg lod
= index_to_reg(ctx
, l
, ins
->src
[2], src_size
[2]);
725 struct phys_reg offset
= index_to_reg(ctx
, l
, ins
->src
[3], src_size
[3]);
727 /* First, install the texture coordinate */
728 ins
->texture
.in_reg_select
= coord
.reg
& 1;
729 offset_swizzle(ins
->swizzle
[1], coord
.offset
, coord
.size
, dest
.size
, 0);
731 /* Next, install the destination */
732 ins
->texture
.out_reg_select
= dest
.reg
& 1;
733 offset_swizzle(ins
->swizzle
[0], 0, 4, dest
.size
,
734 dest_size
== 2 ? dest
.offset
% 8 :
736 mir_set_bytemask(ins
, mir_bytemask(ins
) << dest
.offset
);
738 /* If there is a register LOD/bias, use it */
739 if (ins
->src
[2] != ~0) {
740 assert(!(lod
.offset
& 3));
741 midgard_tex_register_select sel
= {
742 .select
= lod
.reg
& 1,
744 .component
= lod
.offset
/ 4
748 memcpy(&packed
, &sel
, sizeof(packed
));
749 ins
->texture
.bias
= packed
;
752 /* If there is an offset register, install it */
753 if (ins
->src
[3] != ~0) {
754 unsigned x
= offset
.offset
/ 4;
758 /* Check range, TODO: half-registers */
761 ins
->texture
.offset
=
763 (offset
.reg
& 1) << 1 | /* select */
764 (0 << 2) | /* upper */
765 (x
<< 3) | /* swizzle */
766 (y
<< 5) | /* swizzle */
767 (z
<< 7); /* swizzle */
779 install_registers(compiler_context
*ctx
, struct lcra_state
*l
)
781 mir_foreach_instr_global(ctx
, ins
)
782 install_registers_instr(ctx
, l
, ins
);
786 /* If register allocation fails, find the best spill node */
789 mir_choose_spill_node(
790 compiler_context
*ctx
,
791 struct lcra_state
*l
)
793 /* We can't spill a previously spilled value or an unspill */
795 mir_foreach_instr_global(ctx
, ins
) {
796 if (ins
->no_spill
& (1 << l
->spill_class
)) {
797 lcra_set_node_spill_cost(l
, ins
->dest
, -1);
799 if (l
->spill_class
!= REG_CLASS_WORK
) {
800 mir_foreach_src(ins
, s
)
801 lcra_set_node_spill_cost(l
, ins
->src
[s
], -1);
806 return lcra_get_best_spill_node(l
);
809 /* Once we've chosen a spill node, spill it */
813 compiler_context
*ctx
,
815 unsigned spill_class
,
816 unsigned *spill_count
)
818 unsigned spill_index
= ctx
->temp_count
;
820 /* We have a spill node, so check the class. Work registers
821 * legitimately spill to TLS, but special registers just spill to work
824 bool is_special
= spill_class
!= REG_CLASS_WORK
;
825 bool is_special_w
= spill_class
== REG_CLASS_TEXW
;
827 /* Allocate TLS slot (maybe) */
828 unsigned spill_slot
= !is_special
? (*spill_count
)++ : 0;
830 /* For TLS, replace all stores to the spilled node. For
831 * special reads, just keep as-is; the class will be demoted
832 * implicitly. For special writes, spill to a work register */
834 if (!is_special
|| is_special_w
) {
836 spill_slot
= spill_index
++;
838 mir_foreach_block(ctx
, _block
) {
839 midgard_block
*block
= (midgard_block
*) _block
;
840 mir_foreach_instr_in_block_safe(block
, ins
) {
841 if (ins
->dest
!= spill_node
) continue;
843 midgard_instruction st
;
846 st
= v_mov(spill_node
, spill_slot
);
847 st
.no_spill
|= (1 << spill_class
);
849 ins
->dest
= spill_index
++;
850 ins
->no_spill
|= (1 << spill_class
);
851 st
= v_load_store_scratch(ins
->dest
, spill_slot
, true, ins
->mask
);
854 /* Hint: don't rewrite this node */
857 mir_insert_instruction_after_scheduled(ctx
, block
, ins
, st
);
865 /* For special reads, figure out how many bytes we need */
866 unsigned read_bytemask
= 0;
868 mir_foreach_instr_global_safe(ctx
, ins
) {
869 read_bytemask
|= mir_bytemask_of_read_components(ins
, spill_node
);
872 /* Insert a load from TLS before the first consecutive
873 * use of the node, rewriting to use spilled indices to
874 * break up the live range. Or, for special, insert a
875 * move. Ironically the latter *increases* register
876 * pressure, but the two uses of the spilling mechanism
877 * are somewhat orthogonal. (special spilling is to use
878 * work registers to back special registers; TLS
879 * spilling is to use memory to back work registers) */
881 mir_foreach_block(ctx
, _block
) {
882 midgard_block
*block
= (midgard_block
*) _block
;
883 mir_foreach_instr_in_block(block
, ins
) {
884 /* We can't rewrite the moves used to spill in the
885 * first place. These moves are hinted. */
886 if (ins
->hint
) continue;
888 /* If we don't use the spilled value, nothing to do */
889 if (!mir_has_arg(ins
, spill_node
)) continue;
894 index
= ++spill_index
;
896 midgard_instruction
*before
= ins
;
897 midgard_instruction st
;
901 st
= v_mov(spill_node
, index
);
902 st
.no_spill
|= (1 << spill_class
);
905 st
= v_load_store_scratch(index
, spill_slot
, false, 0xF);
908 /* Mask the load based on the component count
909 * actually needed to prevent RA loops */
911 st
.mask
= mir_from_bytemask(read_bytemask
, 32);
913 mir_insert_instruction_before_scheduled(ctx
, block
, before
, st
);
915 /* Special writes already have their move spilled in */
921 mir_rewrite_index_src_single(ins
, spill_node
, index
);
930 mir_foreach_instr_global(ctx
, ins
) {
935 /* Run register allocation in a loop, spilling until we succeed */
938 mir_ra(compiler_context
*ctx
)
940 struct lcra_state
*l
= NULL
;
941 bool spilled
= false;
942 int iter_count
= 1000; /* max iterations */
944 /* Number of 128-bit slots in memory we've spilled into */
945 unsigned spill_count
= 0;
948 mir_create_pipeline_registers(ctx
);
952 signed spill_node
= mir_choose_spill_node(ctx
, l
);
954 if (spill_node
== -1) {
955 fprintf(stderr
, "ERROR: Failed to choose spill node\n");
959 mir_spill_register(ctx
, spill_node
, l
->spill_class
, &spill_count
);
962 mir_squeeze_index(ctx
);
963 mir_invalidate_liveness(ctx
);
970 l
= allocate_registers(ctx
, &spilled
);
971 } while(spilled
&& ((iter_count
--) > 0));
973 if (iter_count
<= 0) {
974 fprintf(stderr
, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
978 /* Report spilling information. spill_count is in 128-bit slots (vec4 x
979 * fp32), but tls_size is in bytes, so multiply by 16 */
981 ctx
->tls_size
= spill_count
* 16;
983 install_registers(ctx
, l
);