2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "midgard_ops.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
29 #include "midgard_quirks.h"
32 /* Physical register: 0-31 */
35 /* Byte offset into the physical register: 0-15 */
38 /* log2(bytes per component) for fast mul/div */
42 /* Shift up by reg_offset and horizontally by dst_offset. */
45 offset_swizzle(unsigned *swizzle
, unsigned reg_offset
, unsigned srcshift
, unsigned dstshift
, unsigned dst_offset
)
47 unsigned out
[MIR_VEC_COMPONENTS
];
49 signed reg_comp
= reg_offset
>> srcshift
;
50 signed dst_comp
= dst_offset
>> dstshift
;
52 unsigned max_component
= (16 >> srcshift
) - 1;
54 assert(reg_comp
<< srcshift
== reg_offset
);
55 assert(dst_comp
<< dstshift
== dst_offset
);
57 for (signed c
= 0; c
< MIR_VEC_COMPONENTS
; ++c
) {
58 signed comp
= MAX2(c
- dst_comp
, 0);
59 out
[c
] = MIN2(swizzle
[comp
] + reg_comp
, max_component
);
62 memcpy(swizzle
, out
, sizeof(out
));
65 /* Helper to return the default phys_reg for a given register */
67 static struct phys_reg
68 default_phys_reg(int reg
, unsigned shift
)
79 /* Determine which physical register, swizzle, and mask a virtual
80 * register corresponds to */
82 static struct phys_reg
83 index_to_reg(compiler_context
*ctx
, struct lcra_state
*l
, unsigned reg
, unsigned shift
)
85 /* Check for special cases */
87 return default_phys_reg(REGISTER_UNUSED
, shift
);
88 else if (reg
>= SSA_FIXED_MINIMUM
)
89 return default_phys_reg(SSA_REG_FROM_FIXED(reg
), shift
);
91 return default_phys_reg(REGISTER_UNUSED
, shift
);
94 .reg
= l
->solutions
[reg
] / 16,
95 .offset
= l
->solutions
[reg
] & 0xF,
99 /* Report that we actually use this register, and return it */
102 ctx
->work_registers
= MAX2(ctx
->work_registers
, r
.reg
);
108 set_class(unsigned *classes
, unsigned node
, unsigned class)
110 if (node
< SSA_FIXED_MINIMUM
&& class != classes
[node
]) {
111 assert(classes
[node
] == REG_CLASS_WORK
);
112 classes
[node
] = class;
116 /* Special register classes impose special constraints on who can read their
117 * values, so check that */
120 check_read_class(unsigned *classes
, unsigned tag
, unsigned node
)
122 /* Non-nodes are implicitly ok */
123 if (node
>= SSA_FIXED_MINIMUM
)
126 switch (classes
[node
]) {
128 return (tag
== TAG_LOAD_STORE_4
);
130 return (tag
== TAG_TEXTURE_4
);
132 return (tag
!= TAG_LOAD_STORE_4
);
136 unreachable("Invalid class");
141 check_write_class(unsigned *classes
, unsigned tag
, unsigned node
)
143 /* Non-nodes are implicitly ok */
144 if (node
>= SSA_FIXED_MINIMUM
)
147 switch (classes
[node
]) {
151 return (tag
== TAG_TEXTURE_4
);
154 return IS_ALU(tag
) || (tag
== TAG_LOAD_STORE_4
);
156 unreachable("Invalid class");
160 /* Prepass before RA to ensure special class restrictions are met. The idea is
161 * to create a bit field of types of instructions that read a particular index.
162 * Later, we'll add moves as appropriate and rewrite to specialize by type. */
165 mark_node_class (unsigned *bitfield
, unsigned node
)
167 if (node
< SSA_FIXED_MINIMUM
)
168 BITSET_SET(bitfield
, node
);
172 mir_lower_special_reads(compiler_context
*ctx
)
174 size_t sz
= BITSET_WORDS(ctx
->temp_count
) * sizeof(BITSET_WORD
);
176 /* Bitfields for the various types of registers we could have. aluw can
177 * be written by either ALU or load/store */
179 unsigned *alur
= calloc(sz
, 1);
180 unsigned *aluw
= calloc(sz
, 1);
181 unsigned *brar
= calloc(sz
, 1);
182 unsigned *ldst
= calloc(sz
, 1);
183 unsigned *texr
= calloc(sz
, 1);
184 unsigned *texw
= calloc(sz
, 1);
186 /* Pass #1 is analysis, a linear scan to fill out the bitfields */
188 mir_foreach_instr_global(ctx
, ins
) {
191 mark_node_class(aluw
, ins
->dest
);
192 mark_node_class(alur
, ins
->src
[0]);
193 mark_node_class(alur
, ins
->src
[1]);
194 mark_node_class(alur
, ins
->src
[2]);
196 if (ins
->compact_branch
&& ins
->writeout
)
197 mark_node_class(brar
, ins
->src
[0]);
201 case TAG_LOAD_STORE_4
:
202 mark_node_class(aluw
, ins
->dest
);
203 mark_node_class(ldst
, ins
->src
[0]);
204 mark_node_class(ldst
, ins
->src
[1]);
205 mark_node_class(ldst
, ins
->src
[2]);
209 mark_node_class(texr
, ins
->src
[0]);
210 mark_node_class(texr
, ins
->src
[1]);
211 mark_node_class(texr
, ins
->src
[2]);
212 mark_node_class(texw
, ins
->dest
);
217 /* Pass #2 is lowering now that we've analyzed all the classes.
218 * Conceptually, if an index is only marked for a single type of use,
219 * there is nothing to lower. If it is marked for different uses, we
220 * split up based on the number of types of uses. To do so, we divide
221 * into N distinct classes of use (where N>1 by definition), emit N-1
222 * moves from the index to copies of the index, and finally rewrite N-1
223 * of the types of uses to use the corresponding move */
225 unsigned spill_idx
= ctx
->temp_count
;
227 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
228 bool is_alur
= BITSET_TEST(alur
, i
);
229 bool is_aluw
= BITSET_TEST(aluw
, i
);
230 bool is_brar
= BITSET_TEST(brar
, i
);
231 bool is_ldst
= BITSET_TEST(ldst
, i
);
232 bool is_texr
= BITSET_TEST(texr
, i
);
233 bool is_texw
= BITSET_TEST(texw
, i
);
235 /* Analyse to check how many distinct uses there are. ALU ops
236 * (alur) can read the results of the texture pipeline (texw)
237 * but not ldst or texr. Load/store ops (ldst) cannot read
238 * anything but load/store inputs. Texture pipeline cannot read
239 * anything but texture inputs. TODO: Simplify. */
242 (is_alur
&& (is_ldst
|| is_texr
)) ||
243 (is_ldst
&& (is_alur
|| is_texr
|| is_texw
)) ||
244 (is_texr
&& (is_alur
|| is_ldst
|| is_texw
)) ||
245 (is_texw
&& (is_aluw
|| is_ldst
|| is_texr
)) ||
246 (is_brar
&& is_texw
);
251 /* Use the index as-is as the work copy. Emit copies for
254 unsigned classes
[] = { TAG_LOAD_STORE_4
, TAG_TEXTURE_4
, TAG_TEXTURE_4
, TAG_ALU_4
};
255 bool collisions
[] = { is_ldst
, is_texr
, is_texw
&& is_aluw
, is_brar
};
257 for (unsigned j
= 0; j
< ARRAY_SIZE(collisions
); ++j
) {
258 if (!collisions
[j
]) continue;
260 /* When the hazard is from reading, we move and rewrite
261 * sources (typical case). When it's from writing, we
262 * flip the move and rewrite destinations (obscure,
263 * only from control flow -- impossible in SSA) */
265 bool hazard_write
= (j
== 2);
267 unsigned idx
= spill_idx
++;
269 midgard_instruction m
= hazard_write
?
270 v_mov(idx
, i
) : v_mov(i
, idx
);
272 /* Insert move before each read/write, depending on the
273 * hazard we're trying to account for */
275 mir_foreach_instr_global_safe(ctx
, pre_use
) {
276 if (pre_use
->type
!= classes
[j
])
280 if (pre_use
->dest
!= i
)
283 if (!mir_has_arg(pre_use
, i
))
288 midgard_instruction
*use
= mir_next_op(pre_use
);
290 mir_insert_instruction_before(ctx
, use
, m
);
291 mir_rewrite_index_dst_single(pre_use
, i
, idx
);
295 m
.mask
= mir_from_bytemask(mir_round_bytemask_up(
296 mir_bytemask_of_read_components(pre_use
, i
), 32), 32);
297 mir_insert_instruction_before(ctx
, pre_use
, m
);
298 mir_rewrite_index_src_single(pre_use
, i
, idx
);
312 /* We register allocate after scheduling, so we need to ensure instructions
313 * executing in parallel within a segment of a bundle don't clobber each
314 * other's registers. This is mostly a non-issue thanks to scheduling, but
315 * there are edge cases. In particular, after a register is written in a
316 * segment, it interferes with anything reading. */
319 mir_compute_segment_interference(
320 compiler_context
*ctx
,
321 struct lcra_state
*l
,
326 for (unsigned j
= pivot
; j
< i
; ++j
) {
327 mir_foreach_src(bun
->instructions
[j
], s
) {
328 if (bun
->instructions
[j
]->src
[s
] >= ctx
->temp_count
)
331 for (unsigned q
= pivot
; q
< i
; ++q
) {
332 if (bun
->instructions
[q
]->dest
>= ctx
->temp_count
)
335 /* See dEQP-GLES2.functional.shaders.return.output_write_in_func_dynamic_fragment */
338 if (!(bun
->instructions
[j
]->unit
== UNIT_SMUL
&& bun
->instructions
[q
]->unit
== UNIT_VLUT
))
342 unsigned mask
= mir_bytemask(bun
->instructions
[q
]);
343 unsigned rmask
= mir_bytemask_of_read_components(bun
->instructions
[j
], bun
->instructions
[j
]->src
[s
]);
344 lcra_add_node_interference(l
, bun
->instructions
[q
]->dest
, mask
, bun
->instructions
[j
]->src
[s
], rmask
);
351 mir_compute_bundle_interference(
352 compiler_context
*ctx
,
353 struct lcra_state
*l
,
356 if (!IS_ALU(bun
->tag
))
359 bool old
= bun
->instructions
[0]->unit
>= UNIT_VADD
;
362 for (unsigned i
= 1; i
< bun
->instruction_count
; ++i
) {
363 bool new = bun
->instructions
[i
]->unit
>= UNIT_VADD
;
366 mir_compute_segment_interference(ctx
, l
, bun
, 0, i
);
372 mir_compute_segment_interference(ctx
, l
, bun
, pivot
, bun
->instruction_count
);
376 mir_compute_interference(
377 compiler_context
*ctx
,
378 struct lcra_state
*l
)
380 /* First, we need liveness information to be computed per block */
381 mir_compute_liveness(ctx
);
383 /* We need to force r1.w live throughout a blend shader */
388 mir_foreach_block(ctx
, _block
) {
389 midgard_block
*block
= (midgard_block
*) _block
;
390 mir_foreach_instr_in_block_rev(block
, ins
) {
399 mir_foreach_instr_global(ctx
, ins
) {
400 if (ins
->dest
< ctx
->temp_count
)
401 lcra_add_node_interference(l
, ins
->dest
, mir_bytemask(ins
), r1w
, 0xF);
405 /* Now that every block has live_in/live_out computed, we can determine
406 * interference by walking each block linearly. Take live_out at the
407 * end of each block and walk the block backwards. */
409 mir_foreach_block(ctx
, _blk
) {
410 midgard_block
*blk
= (midgard_block
*) _blk
;
411 uint16_t *live
= mem_dup(_blk
->live_out
, ctx
->temp_count
* sizeof(uint16_t));
413 mir_foreach_instr_in_block_rev(blk
, ins
) {
414 /* Mark all registers live after the instruction as
415 * interfering with the destination */
417 unsigned dest
= ins
->dest
;
419 if (dest
< ctx
->temp_count
) {
420 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
)
422 unsigned mask
= mir_bytemask(ins
);
423 lcra_add_node_interference(l
, dest
, mask
, i
, live
[i
]);
428 mir_liveness_ins_update(live
, ins
, ctx
->temp_count
);
431 mir_foreach_bundle_in_block(blk
, bun
)
432 mir_compute_bundle_interference(ctx
, l
, bun
);
439 mir_is_64(midgard_instruction
*ins
)
441 if (nir_alu_type_get_type_size(ins
->dest_type
) == 64)
444 mir_foreach_src(ins
, v
) {
445 if (nir_alu_type_get_type_size(ins
->src_types
[v
]) == 64)
452 /* This routine performs the actual register allocation. It should be succeeded
453 * by install_registers */
455 static struct lcra_state
*
456 allocate_registers(compiler_context
*ctx
, bool *spilled
)
458 /* The number of vec4 work registers available depends on when the
459 * uniforms start and the shader stage. By ABI we limit blend shaders
460 * to 8 registers, should be lower XXX */
461 int work_count
= ctx
->is_blend
? 8 :
462 16 - MAX2((ctx
->uniform_cutoff
- 8), 0);
464 /* No register allocation to do with no SSA */
466 if (!ctx
->temp_count
)
469 struct lcra_state
*l
= lcra_alloc_equations(ctx
->temp_count
, 5);
471 /* Starts of classes, in bytes */
472 l
->class_start
[REG_CLASS_WORK
] = 16 * 0;
473 l
->class_start
[REG_CLASS_LDST
] = 16 * 26;
474 l
->class_start
[REG_CLASS_TEXR
] = 16 * 28;
475 l
->class_start
[REG_CLASS_TEXW
] = 16 * 28;
477 l
->class_size
[REG_CLASS_WORK
] = 16 * work_count
;
478 l
->class_size
[REG_CLASS_LDST
] = 16 * 2;
479 l
->class_size
[REG_CLASS_TEXR
] = 16 * 2;
480 l
->class_size
[REG_CLASS_TEXW
] = 16 * 2;
482 lcra_set_disjoint_class(l
, REG_CLASS_TEXR
, REG_CLASS_TEXW
);
484 /* To save space on T*20, we don't have real texture registers.
485 * Instead, tex inputs reuse the load/store pipeline registers, and
486 * tex outputs use work r0/r1. Note we still use TEXR/TEXW classes,
487 * noting that this handles interferences and sizes correctly. */
489 if (ctx
->quirks
& MIDGARD_INTERPIPE_REG_ALIASING
) {
490 l
->class_start
[REG_CLASS_TEXR
] = l
->class_start
[REG_CLASS_LDST
];
491 l
->class_start
[REG_CLASS_TEXW
] = l
->class_start
[REG_CLASS_WORK
];
494 unsigned *found_class
= calloc(sizeof(unsigned), ctx
->temp_count
);
495 unsigned *min_alignment
= calloc(sizeof(unsigned), ctx
->temp_count
);
496 unsigned *min_bound
= calloc(sizeof(unsigned), ctx
->temp_count
);
498 mir_foreach_instr_global(ctx
, ins
) {
499 /* Swizzles of 32-bit sources on 64-bit instructions need to be
500 * aligned to either bottom (xy) or top (zw). More general
501 * swizzle lowering should happen prior to scheduling (TODO),
502 * but once we get RA we shouldn't disrupt this further. Align
503 * sources of 64-bit instructions. */
505 if (ins
->type
== TAG_ALU_4
&& mir_is_64(ins
)) {
506 mir_foreach_src(ins
, v
) {
507 unsigned s
= ins
->src
[v
];
509 if (s
< ctx
->temp_count
)
510 min_alignment
[s
] = 3;
514 if (ins
->type
== TAG_LOAD_STORE_4
&& OP_HAS_ADDRESS(ins
->load_store
.op
)) {
515 mir_foreach_src(ins
, v
) {
516 unsigned s
= ins
->src
[v
];
517 unsigned size
= nir_alu_type_get_type_size(ins
->src_types
[v
]);
519 if (s
< ctx
->temp_count
)
520 min_alignment
[s
] = (size
== 64) ? 3 : 2;
524 if (ins
->dest
>= SSA_FIXED_MINIMUM
) continue;
526 unsigned size
= nir_alu_type_get_type_size(ins
->dest_type
);
531 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
532 int comps1
= util_logbase2(ins
->mask
);
534 int bytes
= (comps1
+ 1) * (size
/ 8);
536 /* Use the largest class if there's ambiguity, this
537 * handles partial writes */
539 int dest
= ins
->dest
;
540 found_class
[dest
] = MAX2(found_class
[dest
], bytes
);
542 min_alignment
[dest
] =
543 (size
== 16) ? 1 : /* (1 << 1) = 2-byte */
544 (size
== 32) ? 2 : /* (1 << 2) = 4-byte */
545 (size
== 64) ? 3 : /* (1 << 3) = 8-byte */
548 /* We can't cross xy/zw boundaries. TODO: vec8 can */
552 /* We don't have a swizzle for the conditional and we don't
553 * want to muck with the conditional itself, so just force
554 * alignment for now */
556 if (ins
->type
== TAG_ALU_4
&& OP_IS_CSEL_V(ins
->alu
.op
)) {
557 min_alignment
[dest
] = 4; /* 1 << 4= 16-byte = vec4 */
559 /* LCRA assumes bound >= alignment */
560 min_bound
[dest
] = 16;
563 /* Since ld/st swizzles and masks are 32-bit only, we need them
564 * aligned to enable final packing */
565 if (ins
->type
== TAG_LOAD_STORE_4
)
566 min_alignment
[dest
] = MAX2(min_alignment
[dest
], 2);
569 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
570 lcra_set_alignment(l
, i
, min_alignment
[i
] ? min_alignment
[i
] : 2,
571 min_bound
[i
] ? min_bound
[i
] : 16);
572 lcra_restrict_range(l
, i
, found_class
[i
]);
579 /* Next, we'll determine semantic class. We default to zero (work).
580 * But, if we're used with a special operation, that will force us to a
581 * particular class. Each node must be assigned to exactly one class; a
582 * prepass before RA should have lowered what-would-have-been
583 * multiclass nodes into a series of moves to break it up into multiple
586 mir_foreach_instr_global(ctx
, ins
) {
587 /* Check if this operation imposes any classes */
589 if (ins
->type
== TAG_LOAD_STORE_4
) {
590 set_class(l
->class, ins
->src
[0], REG_CLASS_LDST
);
591 set_class(l
->class, ins
->src
[1], REG_CLASS_LDST
);
592 set_class(l
->class, ins
->src
[2], REG_CLASS_LDST
);
594 if (OP_IS_VEC4_ONLY(ins
->load_store
.op
)) {
595 lcra_restrict_range(l
, ins
->dest
, 16);
596 lcra_restrict_range(l
, ins
->src
[0], 16);
597 lcra_restrict_range(l
, ins
->src
[1], 16);
598 lcra_restrict_range(l
, ins
->src
[2], 16);
600 } else if (ins
->type
== TAG_TEXTURE_4
) {
601 set_class(l
->class, ins
->dest
, REG_CLASS_TEXW
);
602 set_class(l
->class, ins
->src
[0], REG_CLASS_TEXR
);
603 set_class(l
->class, ins
->src
[1], REG_CLASS_TEXR
);
604 set_class(l
->class, ins
->src
[2], REG_CLASS_TEXR
);
605 set_class(l
->class, ins
->src
[3], REG_CLASS_TEXR
);
609 /* Check that the semantics of the class are respected */
610 mir_foreach_instr_global(ctx
, ins
) {
611 assert(check_write_class(l
->class, ins
->type
, ins
->dest
));
612 assert(check_read_class(l
->class, ins
->type
, ins
->src
[0]));
613 assert(check_read_class(l
->class, ins
->type
, ins
->src
[1]));
614 assert(check_read_class(l
->class, ins
->type
, ins
->src
[2]));
617 /* Mark writeout to r0, depth to r1.x, stencil to r1.y,
618 * render target to r1.z, unknown to r1.w */
619 mir_foreach_instr_global(ctx
, ins
) {
620 if (!(ins
->compact_branch
&& ins
->writeout
)) continue;
622 if (ins
->src
[0] < ctx
->temp_count
)
623 l
->solutions
[ins
->src
[0]] = 0;
625 if (ins
->src
[2] < ctx
->temp_count
)
626 l
->solutions
[ins
->src
[2]] = (16 * 1) + COMPONENT_X
* 4;
628 if (ins
->src
[3] < ctx
->temp_count
)
629 l
->solutions
[ins
->src
[3]] = (16 * 1) + COMPONENT_Y
* 4;
631 if (ins
->src
[1] < ctx
->temp_count
)
632 l
->solutions
[ins
->src
[1]] = (16 * 1) + COMPONENT_Z
* 4;
634 if (ins
->dest
< ctx
->temp_count
)
635 l
->solutions
[ins
->dest
] = (16 * 1) + COMPONENT_W
* 4;
638 /* Precolour blend input to r0. Note writeout is necessarily at the end
639 * and blend shaders are single-RT only so there is only a single
640 * writeout block, so this cannot conflict with the writeout r0 (there
641 * is no need to have an intermediate move) */
643 if (ctx
->blend_input
!= ~0) {
644 assert(ctx
->blend_input
< ctx
->temp_count
);
645 l
->solutions
[ctx
->blend_input
] = 0;
648 mir_compute_interference(ctx
, l
);
650 *spilled
= !lcra_solve(l
);
655 /* Once registers have been decided via register allocation
656 * (allocate_registers), we need to rewrite the MIR to use registers instead of
660 install_registers_instr(
661 compiler_context
*ctx
,
662 struct lcra_state
*l
,
663 midgard_instruction
*ins
)
665 unsigned src_shift
[MIR_SRC_COUNT
];
667 for (unsigned i
= 0; i
< MIR_SRC_COUNT
; ++i
) {
669 util_logbase2(nir_alu_type_get_type_size(ins
->src_types
[i
]) / 8);
672 unsigned dest_shift
=
673 util_logbase2(nir_alu_type_get_type_size(ins
->dest_type
) / 8);
680 if (ins
->compact_branch
)
683 struct phys_reg src1
= index_to_reg(ctx
, l
, ins
->src
[0], src_shift
[0]);
684 struct phys_reg src2
= index_to_reg(ctx
, l
, ins
->src
[1], src_shift
[1]);
685 struct phys_reg dest
= index_to_reg(ctx
, l
, ins
->dest
, dest_shift
);
687 mir_set_bytemask(ins
, mir_bytemask(ins
) << dest
.offset
);
689 unsigned dest_offset
=
690 GET_CHANNEL_COUNT(alu_opcode_props
[ins
->alu
.op
].props
) ? 0 :
693 offset_swizzle(ins
->swizzle
[0], src1
.offset
, src1
.shift
, dest
.shift
, dest_offset
);
695 ins
->registers
.src1_reg
= src1
.reg
;
697 ins
->registers
.src2_imm
= ins
->has_inline_constant
;
699 if (ins
->has_inline_constant
) {
700 /* Encode inline 16-bit constant. See disassembler for
701 * where the algorithm is from */
703 ins
->registers
.src2_reg
= ins
->inline_constant
>> 11;
705 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
706 uint16_t imm
= ((lower_11
>> 8) & 0x7) |
707 ((lower_11
& 0xFF) << 3);
709 ins
->alu
.src2
= imm
<< 2;
711 offset_swizzle(ins
->swizzle
[1], src2
.offset
, src2
.shift
, dest
.shift
, dest_offset
);
713 ins
->registers
.src2_reg
= src2
.reg
;
716 ins
->registers
.out_reg
= dest
.reg
;
720 case TAG_LOAD_STORE_4
: {
721 /* Which physical register we read off depends on
722 * whether we are loading or storing -- think about the
723 * logical dataflow */
725 bool encodes_src
= OP_IS_STORE(ins
->load_store
.op
);
728 struct phys_reg src
= index_to_reg(ctx
, l
, ins
->src
[0], src_shift
[0]);
729 assert(src
.reg
== 26 || src
.reg
== 27);
731 ins
->load_store
.reg
= src
.reg
- 26;
732 offset_swizzle(ins
->swizzle
[0], src
.offset
, src
.shift
, 0, 0);
734 struct phys_reg dst
= index_to_reg(ctx
, l
, ins
->dest
, dest_shift
);
736 ins
->load_store
.reg
= dst
.reg
;
737 offset_swizzle(ins
->swizzle
[0], 0, 2, 2, dst
.offset
);
738 mir_set_bytemask(ins
, mir_bytemask(ins
) << dst
.offset
);
741 /* We also follow up by actual arguments */
743 unsigned src2
= ins
->src
[1];
744 unsigned src3
= ins
->src
[2];
747 struct phys_reg src
= index_to_reg(ctx
, l
, src2
, 2);
748 unsigned component
= src
.offset
>> src
.shift
;
749 assert(component
<< src
.shift
== src
.offset
);
750 ins
->load_store
.arg_1
|= midgard_ldst_reg(src
.reg
, component
);
754 struct phys_reg src
= index_to_reg(ctx
, l
, src3
, 2);
755 unsigned component
= src
.offset
>> src
.shift
;
756 assert(component
<< src
.shift
== src
.offset
);
757 ins
->load_store
.arg_2
|= midgard_ldst_reg(src
.reg
, component
);
763 case TAG_TEXTURE_4
: {
764 if (ins
->texture
.op
== TEXTURE_OP_BARRIER
)
767 /* Grab RA results */
768 struct phys_reg dest
= index_to_reg(ctx
, l
, ins
->dest
, dest_shift
);
769 struct phys_reg coord
= index_to_reg(ctx
, l
, ins
->src
[1], src_shift
[1]);
770 struct phys_reg lod
= index_to_reg(ctx
, l
, ins
->src
[2], src_shift
[2]);
771 struct phys_reg offset
= index_to_reg(ctx
, l
, ins
->src
[3], src_shift
[3]);
773 /* First, install the texture coordinate */
774 ins
->texture
.in_reg_select
= coord
.reg
& 1;
775 offset_swizzle(ins
->swizzle
[1], coord
.offset
, coord
.shift
, dest
.shift
, 0);
777 /* Next, install the destination */
778 ins
->texture
.out_reg_select
= dest
.reg
& 1;
779 offset_swizzle(ins
->swizzle
[0], 0, 2, dest
.shift
,
780 dest_shift
== 1 ? dest
.offset
% 8 :
782 mir_set_bytemask(ins
, mir_bytemask(ins
) << dest
.offset
);
784 /* If there is a register LOD/bias, use it */
785 if (ins
->src
[2] != ~0) {
786 assert(!(lod
.offset
& 3));
787 midgard_tex_register_select sel
= {
788 .select
= lod
.reg
& 1,
790 .component
= lod
.offset
/ 4
794 memcpy(&packed
, &sel
, sizeof(packed
));
795 ins
->texture
.bias
= packed
;
798 /* If there is an offset register, install it */
799 if (ins
->src
[3] != ~0) {
800 unsigned x
= offset
.offset
/ 4;
804 /* Check range, TODO: half-registers */
807 ins
->texture
.offset
=
809 (offset
.reg
& 1) << 1 | /* select */
810 (0 << 2) | /* upper */
811 (x
<< 3) | /* swizzle */
812 (y
<< 5) | /* swizzle */
813 (z
<< 7); /* swizzle */
825 install_registers(compiler_context
*ctx
, struct lcra_state
*l
)
827 mir_foreach_instr_global(ctx
, ins
)
828 install_registers_instr(ctx
, l
, ins
);
832 /* If register allocation fails, find the best spill node */
835 mir_choose_spill_node(
836 compiler_context
*ctx
,
837 struct lcra_state
*l
)
839 /* We can't spill a previously spilled value or an unspill */
841 mir_foreach_instr_global(ctx
, ins
) {
842 if (ins
->no_spill
& (1 << l
->spill_class
)) {
843 lcra_set_node_spill_cost(l
, ins
->dest
, -1);
845 if (l
->spill_class
!= REG_CLASS_WORK
) {
846 mir_foreach_src(ins
, s
)
847 lcra_set_node_spill_cost(l
, ins
->src
[s
], -1);
852 return lcra_get_best_spill_node(l
);
855 /* Once we've chosen a spill node, spill it */
859 compiler_context
*ctx
,
861 unsigned spill_class
,
862 unsigned *spill_count
)
864 if (spill_class
== REG_CLASS_WORK
&& ctx
->is_blend
)
865 unreachable("Blend shader spilling is currently unimplemented");
867 unsigned spill_index
= ctx
->temp_count
;
869 /* We have a spill node, so check the class. Work registers
870 * legitimately spill to TLS, but special registers just spill to work
873 bool is_special
= spill_class
!= REG_CLASS_WORK
;
874 bool is_special_w
= spill_class
== REG_CLASS_TEXW
;
876 /* Allocate TLS slot (maybe) */
877 unsigned spill_slot
= !is_special
? (*spill_count
)++ : 0;
879 /* For TLS, replace all stores to the spilled node. For
880 * special reads, just keep as-is; the class will be demoted
881 * implicitly. For special writes, spill to a work register */
883 if (!is_special
|| is_special_w
) {
885 spill_slot
= spill_index
++;
887 mir_foreach_block(ctx
, _block
) {
888 midgard_block
*block
= (midgard_block
*) _block
;
889 mir_foreach_instr_in_block_safe(block
, ins
) {
890 if (ins
->dest
!= spill_node
) continue;
892 midgard_instruction st
;
895 st
= v_mov(spill_node
, spill_slot
);
896 st
.no_spill
|= (1 << spill_class
);
898 ins
->dest
= spill_index
++;
899 ins
->no_spill
|= (1 << spill_class
);
900 st
= v_load_store_scratch(ins
->dest
, spill_slot
, true, ins
->mask
);
903 /* Hint: don't rewrite this node */
906 mir_insert_instruction_after_scheduled(ctx
, block
, ins
, st
);
914 /* For special reads, figure out how many bytes we need */
915 unsigned read_bytemask
= 0;
917 mir_foreach_instr_global_safe(ctx
, ins
) {
918 read_bytemask
|= mir_bytemask_of_read_components(ins
, spill_node
);
921 /* Insert a load from TLS before the first consecutive
922 * use of the node, rewriting to use spilled indices to
923 * break up the live range. Or, for special, insert a
924 * move. Ironically the latter *increases* register
925 * pressure, but the two uses of the spilling mechanism
926 * are somewhat orthogonal. (special spilling is to use
927 * work registers to back special registers; TLS
928 * spilling is to use memory to back work registers) */
930 mir_foreach_block(ctx
, _block
) {
931 midgard_block
*block
= (midgard_block
*) _block
;
932 mir_foreach_instr_in_block(block
, ins
) {
933 /* We can't rewrite the moves used to spill in the
934 * first place. These moves are hinted. */
935 if (ins
->hint
) continue;
937 /* If we don't use the spilled value, nothing to do */
938 if (!mir_has_arg(ins
, spill_node
)) continue;
943 index
= ++spill_index
;
945 midgard_instruction
*before
= ins
;
946 midgard_instruction st
;
950 st
= v_mov(spill_node
, index
);
951 st
.no_spill
|= (1 << spill_class
);
954 st
= v_load_store_scratch(index
, spill_slot
, false, 0xF);
957 /* Mask the load based on the component count
958 * actually needed to prevent RA loops */
960 st
.mask
= mir_from_bytemask(mir_round_bytemask_up(
961 read_bytemask
, 32), 32);
963 mir_insert_instruction_before_scheduled(ctx
, block
, before
, st
);
965 /* Special writes already have their move spilled in */
971 mir_rewrite_index_src_single(ins
, spill_node
, index
);
980 mir_foreach_instr_global(ctx
, ins
) {
985 /* Run register allocation in a loop, spilling until we succeed */
988 mir_ra(compiler_context
*ctx
)
990 struct lcra_state
*l
= NULL
;
991 bool spilled
= false;
992 int iter_count
= 1000; /* max iterations */
994 /* Number of 128-bit slots in memory we've spilled into */
995 unsigned spill_count
= 0;
998 mir_create_pipeline_registers(ctx
);
1002 signed spill_node
= mir_choose_spill_node(ctx
, l
);
1004 if (spill_node
== -1) {
1005 fprintf(stderr
, "ERROR: Failed to choose spill node\n");
1009 mir_spill_register(ctx
, spill_node
, l
->spill_class
, &spill_count
);
1012 mir_squeeze_index(ctx
);
1013 mir_invalidate_liveness(ctx
);
1020 l
= allocate_registers(ctx
, &spilled
);
1021 } while(spilled
&& ((iter_count
--) > 0));
1023 if (iter_count
<= 0) {
1024 fprintf(stderr
, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
1028 /* Report spilling information. spill_count is in 128-bit slots (vec4 x
1029 * fp32), but tls_size is in bytes, so multiply by 16 */
1031 ctx
->tls_size
= spill_count
* 16;
1033 install_registers(ctx
, l
);