2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "midgard_ops.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
30 #include "midgard_quirks.h"
33 /* Physical register: 0-31 */
36 /* Byte offset into the physical register: 0-15 */
39 /* Number of bytes in a component of this register */
43 /* Shift up by reg_offset and horizontally by dst_offset. */
46 offset_swizzle(unsigned *swizzle
, unsigned reg_offset
, unsigned srcsize
, unsigned dst_offset
)
48 unsigned out
[MIR_VEC_COMPONENTS
];
50 signed reg_comp
= reg_offset
/ srcsize
;
51 signed dst_comp
= dst_offset
/ srcsize
;
53 unsigned max_component
= (16 / srcsize
) - 1;
55 assert(reg_comp
* srcsize
== reg_offset
);
56 assert(dst_comp
* srcsize
== dst_offset
);
58 for (signed c
= 0; c
< MIR_VEC_COMPONENTS
; ++c
) {
59 signed comp
= MAX2(c
- dst_comp
, 0);
60 out
[c
] = MIN2(swizzle
[comp
] + reg_comp
, max_component
);
63 memcpy(swizzle
, out
, sizeof(out
));
66 /* Helper to return the default phys_reg for a given register */
68 static struct phys_reg
69 default_phys_reg(int reg
, midgard_reg_mode size
)
74 .size
= mir_bytes_for_mode(size
)
80 /* Determine which physical register, swizzle, and mask a virtual
81 * register corresponds to */
83 static struct phys_reg
84 index_to_reg(compiler_context
*ctx
, struct lcra_state
*l
, unsigned reg
, midgard_reg_mode size
)
86 /* Check for special cases */
88 return default_phys_reg(REGISTER_UNUSED
, size
);
89 else if (reg
>= SSA_FIXED_MINIMUM
)
90 return default_phys_reg(SSA_REG_FROM_FIXED(reg
), size
);
92 return default_phys_reg(REGISTER_UNUSED
, size
);
95 .reg
= l
->solutions
[reg
] / 16,
96 .offset
= l
->solutions
[reg
] & 0xF,
97 .size
= mir_bytes_for_mode(size
)
100 /* Report that we actually use this register, and return it */
103 ctx
->work_registers
= MAX2(ctx
->work_registers
, r
.reg
);
109 set_class(unsigned *classes
, unsigned node
, unsigned class)
111 if (node
< SSA_FIXED_MINIMUM
&& class != classes
[node
]) {
112 assert(classes
[node
] == REG_CLASS_WORK
);
113 classes
[node
] = class;
117 /* Special register classes impose special constraints on who can read their
118 * values, so check that */
121 check_read_class(unsigned *classes
, unsigned tag
, unsigned node
)
123 /* Non-nodes are implicitly ok */
124 if (node
>= SSA_FIXED_MINIMUM
)
127 switch (classes
[node
]) {
129 return (tag
== TAG_LOAD_STORE_4
);
131 return (tag
== TAG_TEXTURE_4
);
133 return (tag
!= TAG_LOAD_STORE_4
);
137 unreachable("Invalid class");
142 check_write_class(unsigned *classes
, unsigned tag
, unsigned node
)
144 /* Non-nodes are implicitly ok */
145 if (node
>= SSA_FIXED_MINIMUM
)
148 switch (classes
[node
]) {
152 return (tag
== TAG_TEXTURE_4
);
155 return IS_ALU(tag
) || (tag
== TAG_LOAD_STORE_4
);
157 unreachable("Invalid class");
161 /* Prepass before RA to ensure special class restrictions are met. The idea is
162 * to create a bit field of types of instructions that read a particular index.
163 * Later, we'll add moves as appropriate and rewrite to specialize by type. */
166 mark_node_class (unsigned *bitfield
, unsigned node
)
168 if (node
< SSA_FIXED_MINIMUM
)
169 BITSET_SET(bitfield
, node
);
173 mir_lower_special_reads(compiler_context
*ctx
)
175 size_t sz
= BITSET_WORDS(ctx
->temp_count
) * sizeof(BITSET_WORD
);
177 /* Bitfields for the various types of registers we could have. aluw can
178 * be written by either ALU or load/store */
180 unsigned *alur
= calloc(sz
, 1);
181 unsigned *aluw
= calloc(sz
, 1);
182 unsigned *brar
= calloc(sz
, 1);
183 unsigned *ldst
= calloc(sz
, 1);
184 unsigned *texr
= calloc(sz
, 1);
185 unsigned *texw
= calloc(sz
, 1);
187 /* Pass #1 is analysis, a linear scan to fill out the bitfields */
189 mir_foreach_instr_global(ctx
, ins
) {
192 mark_node_class(aluw
, ins
->dest
);
193 mark_node_class(alur
, ins
->src
[0]);
194 mark_node_class(alur
, ins
->src
[1]);
195 mark_node_class(alur
, ins
->src
[2]);
197 if (ins
->compact_branch
&& ins
->writeout
)
198 mark_node_class(brar
, ins
->src
[0]);
202 case TAG_LOAD_STORE_4
:
203 mark_node_class(aluw
, ins
->dest
);
204 mark_node_class(ldst
, ins
->src
[0]);
205 mark_node_class(ldst
, ins
->src
[1]);
206 mark_node_class(ldst
, ins
->src
[2]);
210 mark_node_class(texr
, ins
->src
[0]);
211 mark_node_class(texr
, ins
->src
[1]);
212 mark_node_class(texr
, ins
->src
[2]);
213 mark_node_class(texw
, ins
->dest
);
218 /* Pass #2 is lowering now that we've analyzed all the classes.
219 * Conceptually, if an index is only marked for a single type of use,
220 * there is nothing to lower. If it is marked for different uses, we
221 * split up based on the number of types of uses. To do so, we divide
222 * into N distinct classes of use (where N>1 by definition), emit N-1
223 * moves from the index to copies of the index, and finally rewrite N-1
224 * of the types of uses to use the corresponding move */
226 unsigned spill_idx
= ctx
->temp_count
;
228 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
229 bool is_alur
= BITSET_TEST(alur
, i
);
230 bool is_aluw
= BITSET_TEST(aluw
, i
);
231 bool is_brar
= BITSET_TEST(brar
, i
);
232 bool is_ldst
= BITSET_TEST(ldst
, i
);
233 bool is_texr
= BITSET_TEST(texr
, i
);
234 bool is_texw
= BITSET_TEST(texw
, i
);
236 /* Analyse to check how many distinct uses there are. ALU ops
237 * (alur) can read the results of the texture pipeline (texw)
238 * but not ldst or texr. Load/store ops (ldst) cannot read
239 * anything but load/store inputs. Texture pipeline cannot read
240 * anything but texture inputs. TODO: Simplify. */
243 (is_alur
&& (is_ldst
|| is_texr
)) ||
244 (is_ldst
&& (is_alur
|| is_texr
|| is_texw
)) ||
245 (is_texr
&& (is_alur
|| is_ldst
|| is_texw
)) ||
246 (is_texw
&& (is_aluw
|| is_ldst
|| is_texr
)) ||
247 (is_brar
&& is_texw
);
252 /* Use the index as-is as the work copy. Emit copies for
255 unsigned classes
[] = { TAG_LOAD_STORE_4
, TAG_TEXTURE_4
, TAG_TEXTURE_4
, TAG_ALU_4
};
256 bool collisions
[] = { is_ldst
, is_texr
, is_texw
&& is_aluw
, is_brar
};
258 for (unsigned j
= 0; j
< ARRAY_SIZE(collisions
); ++j
) {
259 if (!collisions
[j
]) continue;
261 /* When the hazard is from reading, we move and rewrite
262 * sources (typical case). When it's from writing, we
263 * flip the move and rewrite destinations (obscure,
264 * only from control flow -- impossible in SSA) */
266 bool hazard_write
= (j
== 2);
268 unsigned idx
= spill_idx
++;
270 midgard_instruction m
= hazard_write
?
271 v_mov(idx
, i
) : v_mov(i
, idx
);
273 /* Insert move before each read/write, depending on the
274 * hazard we're trying to account for */
276 mir_foreach_instr_global_safe(ctx
, pre_use
) {
277 if (pre_use
->type
!= classes
[j
])
281 if (pre_use
->dest
!= i
)
284 if (!mir_has_arg(pre_use
, i
))
289 midgard_instruction
*use
= mir_next_op(pre_use
);
291 mir_insert_instruction_before(ctx
, use
, m
);
292 mir_rewrite_index_dst_single(pre_use
, i
, idx
);
296 m
.mask
= mir_from_bytemask(mir_bytemask_of_read_components(pre_use
, i
), midgard_reg_mode_32
);
297 mir_insert_instruction_before(ctx
, pre_use
, m
);
298 mir_rewrite_index_src_single(pre_use
, i
, idx
);
312 /* We register allocate after scheduling, so we need to ensure instructions
313 * executing in parallel within a segment of a bundle don't clobber each
314 * other's registers. This is mostly a non-issue thanks to scheduling, but
315 * there are edge cases. In particular, after a register is written in a
316 * segment, it interferes with anything reading. */
319 mir_compute_segment_interference(
320 compiler_context
*ctx
,
321 struct lcra_state
*l
,
326 for (unsigned j
= pivot
; j
< i
; ++j
) {
327 mir_foreach_src(bun
->instructions
[j
], s
) {
328 if (bun
->instructions
[j
]->src
[s
] >= ctx
->temp_count
)
331 for (unsigned q
= pivot
; q
< i
; ++q
) {
332 if (bun
->instructions
[q
]->dest
>= ctx
->temp_count
)
335 /* See dEQP-GLES2.functional.shaders.return.output_write_in_func_dynamic_fragment */
338 if (!(bun
->instructions
[j
]->unit
== UNIT_SMUL
&& bun
->instructions
[q
]->unit
== UNIT_VLUT
))
342 unsigned mask
= mir_bytemask(bun
->instructions
[q
]);
343 unsigned rmask
= mir_bytemask_of_read_components(bun
->instructions
[j
], bun
->instructions
[j
]->src
[s
]);
344 lcra_add_node_interference(l
, bun
->instructions
[q
]->dest
, mask
, bun
->instructions
[j
]->src
[s
], rmask
);
351 mir_compute_bundle_interference(
352 compiler_context
*ctx
,
353 struct lcra_state
*l
,
356 if (!IS_ALU(bun
->tag
))
359 bool old
= bun
->instructions
[0]->unit
>= UNIT_VADD
;
362 for (unsigned i
= 1; i
< bun
->instruction_count
; ++i
) {
363 bool new = bun
->instructions
[i
]->unit
>= UNIT_VADD
;
366 mir_compute_segment_interference(ctx
, l
, bun
, 0, i
);
372 mir_compute_segment_interference(ctx
, l
, bun
, pivot
, bun
->instruction_count
);
376 mir_compute_interference(
377 compiler_context
*ctx
,
378 struct lcra_state
*l
)
380 /* First, we need liveness information to be computed per block */
381 mir_compute_liveness(ctx
);
383 /* We need to force r1.w live throughout a blend shader */
388 mir_foreach_block(ctx
, block
) {
389 mir_foreach_instr_in_block_rev(block
, ins
) {
398 mir_foreach_instr_global(ctx
, ins
) {
399 if (ins
->dest
< ctx
->temp_count
)
400 lcra_add_node_interference(l
, ins
->dest
, mir_bytemask(ins
), r1w
, 0xF);
404 /* Now that every block has live_in/live_out computed, we can determine
405 * interference by walking each block linearly. Take live_out at the
406 * end of each block and walk the block backwards. */
408 mir_foreach_block(ctx
, blk
) {
409 uint16_t *live
= mem_dup(blk
->live_out
, ctx
->temp_count
* sizeof(uint16_t));
411 mir_foreach_instr_in_block_rev(blk
, ins
) {
412 /* Mark all registers live after the instruction as
413 * interfering with the destination */
415 unsigned dest
= ins
->dest
;
417 if (dest
< ctx
->temp_count
) {
418 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
)
420 unsigned mask
= mir_bytemask(ins
);
421 lcra_add_node_interference(l
, dest
, mask
, i
, live
[i
]);
426 mir_liveness_ins_update(live
, ins
, ctx
->temp_count
);
429 mir_foreach_bundle_in_block(blk
, bun
)
430 mir_compute_bundle_interference(ctx
, l
, bun
);
436 /* This routine performs the actual register allocation. It should be succeeded
437 * by install_registers */
439 static struct lcra_state
*
440 allocate_registers(compiler_context
*ctx
, bool *spilled
)
442 /* The number of vec4 work registers available depends on when the
443 * uniforms start, so compute that first */
444 int work_count
= 16 - MAX2((ctx
->uniform_cutoff
- 8), 0);
446 /* No register allocation to do with no SSA */
448 if (!ctx
->temp_count
)
451 struct lcra_state
*l
= lcra_alloc_equations(ctx
->temp_count
, 1, 8, 16, 5);
453 /* Starts of classes, in bytes */
454 l
->class_start
[REG_CLASS_WORK
] = 16 * 0;
455 l
->class_start
[REG_CLASS_LDST
] = 16 * 26;
456 l
->class_start
[REG_CLASS_TEXR
] = 16 * 28;
457 l
->class_start
[REG_CLASS_TEXW
] = 16 * 28;
459 l
->class_size
[REG_CLASS_WORK
] = 16 * work_count
;
460 l
->class_size
[REG_CLASS_LDST
] = 16 * 2;
461 l
->class_size
[REG_CLASS_TEXR
] = 16 * 2;
462 l
->class_size
[REG_CLASS_TEXW
] = 16 * 2;
464 lcra_set_disjoint_class(l
, REG_CLASS_TEXR
, REG_CLASS_TEXW
);
466 /* To save space on T*20, we don't have real texture registers.
467 * Instead, tex inputs reuse the load/store pipeline registers, and
468 * tex outputs use work r0/r1. Note we still use TEXR/TEXW classes,
469 * noting that this handles interferences and sizes correctly. */
471 if (ctx
->quirks
& MIDGARD_INTERPIPE_REG_ALIASING
) {
472 l
->class_start
[REG_CLASS_TEXR
] = l
->class_start
[REG_CLASS_LDST
];
473 l
->class_start
[REG_CLASS_TEXW
] = l
->class_start
[REG_CLASS_WORK
];
476 unsigned *found_class
= calloc(sizeof(unsigned), ctx
->temp_count
);
477 unsigned *min_alignment
= calloc(sizeof(unsigned), ctx
->temp_count
);
479 mir_foreach_instr_global(ctx
, ins
) {
480 /* Swizzles of 32-bit sources on 64-bit instructions need to be
481 * aligned to either bottom (xy) or top (zw). More general
482 * swizzle lowering should happen prior to scheduling (TODO),
483 * but once we get RA we shouldn't disrupt this further. Align
484 * sources of 64-bit instructions. */
486 if (ins
->type
== TAG_ALU_4
&& ins
->alu
.reg_mode
== midgard_reg_mode_64
) {
487 mir_foreach_src(ins
, v
) {
488 unsigned s
= ins
->src
[v
];
490 if (s
< ctx
->temp_count
)
491 min_alignment
[s
] = 3;
495 if (ins
->dest
>= SSA_FIXED_MINIMUM
) continue;
497 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
498 int class = util_logbase2(ins
->mask
);
500 /* Use the largest class if there's ambiguity, this
501 * handles partial writes */
503 int dest
= ins
->dest
;
504 found_class
[dest
] = MAX2(found_class
[dest
], class);
506 /* XXX: Ensure swizzles align the right way with more LCRA constraints? */
507 if (ins
->type
== TAG_ALU_4
&& ins
->alu
.reg_mode
!= midgard_reg_mode_32
)
508 min_alignment
[dest
] = 3; /* (1 << 3) = 8 */
510 if (ins
->type
== TAG_LOAD_STORE_4
&& ins
->load_64
)
511 min_alignment
[dest
] = 3;
513 /* We don't have a swizzle for the conditional and we don't
514 * want to muck with the conditional itself, so just force
515 * alignment for now */
517 if (ins
->type
== TAG_ALU_4
&& OP_IS_CSEL_V(ins
->alu
.op
))
518 min_alignment
[dest
] = 4; /* 1 << 4= 16-byte = vec4 */
522 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
523 lcra_set_alignment(l
, i
, min_alignment
[i
] ? min_alignment
[i
] : 2);
524 lcra_restrict_range(l
, i
, (found_class
[i
] + 1) * 4);
530 /* Next, we'll determine semantic class. We default to zero (work).
531 * But, if we're used with a special operation, that will force us to a
532 * particular class. Each node must be assigned to exactly one class; a
533 * prepass before RA should have lowered what-would-have-been
534 * multiclass nodes into a series of moves to break it up into multiple
537 mir_foreach_instr_global(ctx
, ins
) {
538 /* Check if this operation imposes any classes */
540 if (ins
->type
== TAG_LOAD_STORE_4
) {
541 set_class(l
->class, ins
->src
[0], REG_CLASS_LDST
);
542 set_class(l
->class, ins
->src
[1], REG_CLASS_LDST
);
543 set_class(l
->class, ins
->src
[2], REG_CLASS_LDST
);
545 if (OP_IS_VEC4_ONLY(ins
->load_store
.op
)) {
546 lcra_restrict_range(l
, ins
->dest
, 16);
547 lcra_restrict_range(l
, ins
->src
[0], 16);
548 lcra_restrict_range(l
, ins
->src
[1], 16);
549 lcra_restrict_range(l
, ins
->src
[2], 16);
551 } else if (ins
->type
== TAG_TEXTURE_4
) {
552 set_class(l
->class, ins
->dest
, REG_CLASS_TEXW
);
553 set_class(l
->class, ins
->src
[0], REG_CLASS_TEXR
);
554 set_class(l
->class, ins
->src
[1], REG_CLASS_TEXR
);
555 set_class(l
->class, ins
->src
[2], REG_CLASS_TEXR
);
556 set_class(l
->class, ins
->src
[3], REG_CLASS_TEXR
);
558 /* Texture offsets need to be aligned to vec4, since
559 * the swizzle for x is forced to x in hardware, while
560 * the other components are free. TODO: Relax to 8 for
561 * half-registers if that ever occurs. */
563 //lcra_restrict_range(l, ins->src[3], 16);
567 /* Check that the semantics of the class are respected */
568 mir_foreach_instr_global(ctx
, ins
) {
569 assert(check_write_class(l
->class, ins
->type
, ins
->dest
));
570 assert(check_read_class(l
->class, ins
->type
, ins
->src
[0]));
571 assert(check_read_class(l
->class, ins
->type
, ins
->src
[1]));
572 assert(check_read_class(l
->class, ins
->type
, ins
->src
[2]));
575 /* Mark writeout to r0, render target to r1.z, unknown to r1.w */
576 mir_foreach_instr_global(ctx
, ins
) {
577 if (!(ins
->compact_branch
&& ins
->writeout
)) continue;
579 if (ins
->src
[0] < ctx
->temp_count
) {
580 if (ins
->writeout_depth
)
581 l
->solutions
[ins
->src
[0]] = (16 * 1) + COMPONENT_X
* 4;
582 else if (ins
->writeout_stencil
)
583 l
->solutions
[ins
->src
[0]] = (16 * 1) + COMPONENT_Y
* 4;
585 l
->solutions
[ins
->src
[0]] = 0;
588 if (ins
->src
[1] < ctx
->temp_count
)
589 l
->solutions
[ins
->src
[1]] = (16 * 1) + COMPONENT_Z
* 4;
591 if (ins
->src
[2] < ctx
->temp_count
)
592 l
->solutions
[ins
->src
[2]] = (16 * 1) + COMPONENT_W
* 4;
595 mir_compute_interference(ctx
, l
);
597 *spilled
= !lcra_solve(l
);
602 /* Once registers have been decided via register allocation
603 * (allocate_registers), we need to rewrite the MIR to use registers instead of
607 install_registers_instr(
608 compiler_context
*ctx
,
609 struct lcra_state
*l
,
610 midgard_instruction
*ins
)
617 if (ins
->compact_branch
)
620 struct phys_reg src1
= index_to_reg(ctx
, l
, ins
->src
[0], mir_srcsize(ins
, 0));
621 struct phys_reg src2
= index_to_reg(ctx
, l
, ins
->src
[1], mir_srcsize(ins
, 1));
622 struct phys_reg dest
= index_to_reg(ctx
, l
, ins
->dest
, mir_typesize(ins
));
624 mir_set_bytemask(ins
, mir_bytemask(ins
) << dest
.offset
);
626 unsigned dest_offset
=
627 GET_CHANNEL_COUNT(alu_opcode_props
[ins
->alu
.op
].props
) ? 0 :
630 offset_swizzle(ins
->swizzle
[0], src1
.offset
, src1
.size
, dest_offset
);
632 ins
->registers
.src1_reg
= src1
.reg
;
634 ins
->registers
.src2_imm
= ins
->has_inline_constant
;
636 if (ins
->has_inline_constant
) {
637 /* Encode inline 16-bit constant. See disassembler for
638 * where the algorithm is from */
640 ins
->registers
.src2_reg
= ins
->inline_constant
>> 11;
642 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
643 uint16_t imm
= ((lower_11
>> 8) & 0x7) |
644 ((lower_11
& 0xFF) << 3);
646 ins
->alu
.src2
= imm
<< 2;
648 midgard_vector_alu_src mod2
=
649 vector_alu_from_unsigned(ins
->alu
.src2
);
650 offset_swizzle(ins
->swizzle
[1], src2
.offset
, src2
.size
, dest_offset
);
651 ins
->alu
.src2
= vector_alu_srco_unsigned(mod2
);
653 ins
->registers
.src2_reg
= src2
.reg
;
656 ins
->registers
.out_reg
= dest
.reg
;
660 case TAG_LOAD_STORE_4
: {
661 /* Which physical register we read off depends on
662 * whether we are loading or storing -- think about the
663 * logical dataflow */
665 bool encodes_src
= OP_IS_STORE(ins
->load_store
.op
);
668 struct phys_reg src
= index_to_reg(ctx
, l
, ins
->src
[0], mir_srcsize(ins
, 0));
669 assert(src
.reg
== 26 || src
.reg
== 27);
671 ins
->load_store
.reg
= src
.reg
- 26;
672 offset_swizzle(ins
->swizzle
[0], src
.offset
, src
.size
, 0);
674 struct phys_reg dst
= index_to_reg(ctx
, l
, ins
->dest
, mir_typesize(ins
));
676 ins
->load_store
.reg
= dst
.reg
;
677 offset_swizzle(ins
->swizzle
[0], 0, 4, dst
.offset
);
678 mir_set_bytemask(ins
, mir_bytemask(ins
) << dst
.offset
);
681 /* We also follow up by actual arguments */
683 unsigned src2
= ins
->src
[1];
684 unsigned src3
= ins
->src
[2];
685 midgard_reg_mode m32
= midgard_reg_mode_32
;
688 struct phys_reg src
= index_to_reg(ctx
, l
, src2
, m32
);
689 unsigned component
= src
.offset
/ src
.size
;
690 assert(component
* src
.size
== src
.offset
);
691 ins
->load_store
.arg_1
|= midgard_ldst_reg(src
.reg
, component
);
695 struct phys_reg src
= index_to_reg(ctx
, l
, src3
, m32
);
696 unsigned component
= src
.offset
/ src
.size
;
697 assert(component
* src
.size
== src
.offset
);
698 ins
->load_store
.arg_2
|= midgard_ldst_reg(src
.reg
, component
);
704 case TAG_TEXTURE_4
: {
705 if (ins
->texture
.op
== TEXTURE_OP_BARRIER
)
708 /* Grab RA results */
709 struct phys_reg dest
= index_to_reg(ctx
, l
, ins
->dest
, mir_typesize(ins
));
710 struct phys_reg coord
= index_to_reg(ctx
, l
, ins
->src
[1], mir_srcsize(ins
, 1));
711 struct phys_reg lod
= index_to_reg(ctx
, l
, ins
->src
[2], mir_srcsize(ins
, 2));
712 struct phys_reg offset
= index_to_reg(ctx
, l
, ins
->src
[3], mir_srcsize(ins
, 2));
714 /* First, install the texture coordinate */
715 ins
->texture
.in_reg_full
= 1;
716 ins
->texture
.in_reg_upper
= 0;
717 ins
->texture
.in_reg_select
= coord
.reg
& 1;
718 offset_swizzle(ins
->swizzle
[1], coord
.offset
, coord
.size
, 0);
720 /* Next, install the destination */
721 ins
->texture
.out_full
= 1;
722 ins
->texture
.out_upper
= 0;
723 ins
->texture
.out_reg_select
= dest
.reg
& 1;
724 offset_swizzle(ins
->swizzle
[0], 0, 4, dest
.offset
);
725 mir_set_bytemask(ins
, mir_bytemask(ins
) << dest
.offset
);
727 /* If there is a register LOD/bias, use it */
728 if (ins
->src
[2] != ~0) {
729 assert(!(lod
.offset
& 3));
730 midgard_tex_register_select sel
= {
731 .select
= lod
.reg
& 1,
733 .component
= lod
.offset
/ 4
737 memcpy(&packed
, &sel
, sizeof(packed
));
738 ins
->texture
.bias
= packed
;
741 /* If there is an offset register, install it */
742 if (ins
->src
[3] != ~0) {
743 unsigned x
= offset
.offset
/ 4;
747 /* Check range, TODO: half-registers */
750 ins
->texture
.offset
=
752 (offset
.reg
& 1) << 1 | /* select */
753 (0 << 2) | /* upper */
754 (x
<< 3) | /* swizzle */
755 (y
<< 5) | /* swizzle */
756 (z
<< 7); /* swizzle */
768 install_registers(compiler_context
*ctx
, struct lcra_state
*l
)
770 mir_foreach_instr_global(ctx
, ins
)
771 install_registers_instr(ctx
, l
, ins
);
775 /* If register allocation fails, find the best spill node */
778 mir_choose_spill_node(
779 compiler_context
*ctx
,
780 struct lcra_state
*l
)
782 /* We can't spill a previously spilled value or an unspill */
784 mir_foreach_instr_global(ctx
, ins
) {
785 if (ins
->no_spill
& (1 << l
->spill_class
)) {
786 lcra_set_node_spill_cost(l
, ins
->dest
, -1);
788 if (l
->spill_class
!= REG_CLASS_WORK
) {
789 mir_foreach_src(ins
, s
)
790 lcra_set_node_spill_cost(l
, ins
->src
[s
], -1);
795 return lcra_get_best_spill_node(l
);
798 /* Once we've chosen a spill node, spill it */
802 compiler_context
*ctx
,
804 unsigned spill_class
,
805 unsigned *spill_count
)
807 unsigned spill_index
= ctx
->temp_count
;
809 /* We have a spill node, so check the class. Work registers
810 * legitimately spill to TLS, but special registers just spill to work
813 bool is_special
= spill_class
!= REG_CLASS_WORK
;
814 bool is_special_w
= spill_class
== REG_CLASS_TEXW
;
816 /* Allocate TLS slot (maybe) */
817 unsigned spill_slot
= !is_special
? (*spill_count
)++ : 0;
819 /* For TLS, replace all stores to the spilled node. For
820 * special reads, just keep as-is; the class will be demoted
821 * implicitly. For special writes, spill to a work register */
823 if (!is_special
|| is_special_w
) {
825 spill_slot
= spill_index
++;
827 mir_foreach_block(ctx
, block
) {
828 mir_foreach_instr_in_block_safe(block
, ins
) {
829 if (ins
->dest
!= spill_node
) continue;
831 midgard_instruction st
;
834 st
= v_mov(spill_node
, spill_slot
);
835 st
.no_spill
|= (1 << spill_class
);
837 ins
->dest
= spill_index
++;
838 ins
->no_spill
|= (1 << spill_class
);
839 st
= v_load_store_scratch(ins
->dest
, spill_slot
, true, ins
->mask
);
842 /* Hint: don't rewrite this node */
845 mir_insert_instruction_after_scheduled(ctx
, block
, ins
, st
);
853 /* For special reads, figure out how many bytes we need */
854 unsigned read_bytemask
= 0;
856 mir_foreach_instr_global_safe(ctx
, ins
) {
857 read_bytemask
|= mir_bytemask_of_read_components(ins
, spill_node
);
860 /* Insert a load from TLS before the first consecutive
861 * use of the node, rewriting to use spilled indices to
862 * break up the live range. Or, for special, insert a
863 * move. Ironically the latter *increases* register
864 * pressure, but the two uses of the spilling mechanism
865 * are somewhat orthogonal. (special spilling is to use
866 * work registers to back special registers; TLS
867 * spilling is to use memory to back work registers) */
869 mir_foreach_block(ctx
, block
) {
870 mir_foreach_instr_in_block(block
, ins
) {
871 /* We can't rewrite the moves used to spill in the
872 * first place. These moves are hinted. */
873 if (ins
->hint
) continue;
875 /* If we don't use the spilled value, nothing to do */
876 if (!mir_has_arg(ins
, spill_node
)) continue;
881 index
= ++spill_index
;
883 midgard_instruction
*before
= ins
;
884 midgard_instruction st
;
888 st
= v_mov(spill_node
, index
);
889 st
.no_spill
|= (1 << spill_class
);
892 st
= v_load_store_scratch(index
, spill_slot
, false, 0xF);
895 /* Mask the load based on the component count
896 * actually needed to prevent RA loops */
898 st
.mask
= mir_from_bytemask(read_bytemask
, midgard_reg_mode_32
);
900 mir_insert_instruction_before_scheduled(ctx
, block
, before
, st
);
902 /* Special writes already have their move spilled in */
908 mir_rewrite_index_src_single(ins
, spill_node
, index
);
917 mir_foreach_instr_global(ctx
, ins
) {
922 /* Run register allocation in a loop, spilling until we succeed */
925 mir_ra(compiler_context
*ctx
)
927 struct lcra_state
*l
= NULL
;
928 bool spilled
= false;
929 int iter_count
= 1000; /* max iterations */
931 /* Number of 128-bit slots in memory we've spilled into */
932 unsigned spill_count
= 0;
935 mir_create_pipeline_registers(ctx
);
939 signed spill_node
= mir_choose_spill_node(ctx
, l
);
941 if (spill_node
== -1) {
942 fprintf(stderr
, "ERROR: Failed to choose spill node\n");
946 mir_spill_register(ctx
, spill_node
, l
->spill_class
, &spill_count
);
949 mir_squeeze_index(ctx
);
950 mir_invalidate_liveness(ctx
);
957 l
= allocate_registers(ctx
, &spilled
);
958 } while(spilled
&& ((iter_count
--) > 0));
960 if (iter_count
<= 0) {
961 fprintf(stderr
, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
965 /* Report spilling information. spill_count is in 128-bit slots (vec4 x
966 * fp32), but tls_size is in bytes, so multiply by 16 */
968 ctx
->tls_size
= spill_count
* 16;
970 install_registers(ctx
, l
);