2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "midgard_ops.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
29 #include "midgard_quirks.h"
32 /* Physical register: 0-31 */
35 /* Byte offset into the physical register: 0-15 */
38 /* log2(bytes per component) for fast mul/div */
42 /* Shift up by reg_offset and horizontally by dst_offset. */
45 offset_swizzle(unsigned *swizzle
, unsigned reg_offset
, unsigned srcshift
, unsigned dstshift
, unsigned dst_offset
)
47 unsigned out
[MIR_VEC_COMPONENTS
];
49 signed reg_comp
= reg_offset
>> srcshift
;
50 signed dst_comp
= dst_offset
>> dstshift
;
52 unsigned max_component
= (16 >> srcshift
) - 1;
54 assert(reg_comp
<< srcshift
== reg_offset
);
55 assert(dst_comp
<< dstshift
== dst_offset
);
57 for (signed c
= 0; c
< MIR_VEC_COMPONENTS
; ++c
) {
58 signed comp
= MAX2(c
- dst_comp
, 0);
59 out
[c
] = MIN2(swizzle
[comp
] + reg_comp
, max_component
);
62 memcpy(swizzle
, out
, sizeof(out
));
65 /* Helper to return the default phys_reg for a given register */
67 static struct phys_reg
68 default_phys_reg(int reg
, unsigned shift
)
79 /* Determine which physical register, swizzle, and mask a virtual
80 * register corresponds to */
82 static struct phys_reg
83 index_to_reg(compiler_context
*ctx
, struct lcra_state
*l
, unsigned reg
, unsigned shift
)
85 /* Check for special cases */
87 return default_phys_reg(REGISTER_UNUSED
, shift
);
88 else if (reg
>= SSA_FIXED_MINIMUM
)
89 return default_phys_reg(SSA_REG_FROM_FIXED(reg
), shift
);
91 return default_phys_reg(REGISTER_UNUSED
, shift
);
94 .reg
= l
->solutions
[reg
] / 16,
95 .offset
= l
->solutions
[reg
] & 0xF,
99 /* Report that we actually use this register, and return it */
102 ctx
->work_registers
= MAX2(ctx
->work_registers
, r
.reg
);
108 set_class(unsigned *classes
, unsigned node
, unsigned class)
110 if (node
< SSA_FIXED_MINIMUM
&& class != classes
[node
]) {
111 assert(classes
[node
] == REG_CLASS_WORK
);
112 classes
[node
] = class;
116 /* Special register classes impose special constraints on who can read their
117 * values, so check that */
120 check_read_class(unsigned *classes
, unsigned tag
, unsigned node
)
122 /* Non-nodes are implicitly ok */
123 if (node
>= SSA_FIXED_MINIMUM
)
126 switch (classes
[node
]) {
128 return (tag
== TAG_LOAD_STORE_4
);
130 return (tag
== TAG_TEXTURE_4
);
132 return (tag
!= TAG_LOAD_STORE_4
);
136 unreachable("Invalid class");
141 check_write_class(unsigned *classes
, unsigned tag
, unsigned node
)
143 /* Non-nodes are implicitly ok */
144 if (node
>= SSA_FIXED_MINIMUM
)
147 switch (classes
[node
]) {
151 return (tag
== TAG_TEXTURE_4
);
154 return IS_ALU(tag
) || (tag
== TAG_LOAD_STORE_4
);
156 unreachable("Invalid class");
160 /* Prepass before RA to ensure special class restrictions are met. The idea is
161 * to create a bit field of types of instructions that read a particular index.
162 * Later, we'll add moves as appropriate and rewrite to specialize by type. */
165 mark_node_class (unsigned *bitfield
, unsigned node
)
167 if (node
< SSA_FIXED_MINIMUM
)
168 BITSET_SET(bitfield
, node
);
172 mir_lower_special_reads(compiler_context
*ctx
)
174 size_t sz
= BITSET_WORDS(ctx
->temp_count
) * sizeof(BITSET_WORD
);
176 /* Bitfields for the various types of registers we could have. aluw can
177 * be written by either ALU or load/store */
179 unsigned *alur
= calloc(sz
, 1);
180 unsigned *aluw
= calloc(sz
, 1);
181 unsigned *brar
= calloc(sz
, 1);
182 unsigned *ldst
= calloc(sz
, 1);
183 unsigned *texr
= calloc(sz
, 1);
184 unsigned *texw
= calloc(sz
, 1);
186 /* Pass #1 is analysis, a linear scan to fill out the bitfields */
188 mir_foreach_instr_global(ctx
, ins
) {
191 mark_node_class(aluw
, ins
->dest
);
192 mark_node_class(alur
, ins
->src
[0]);
193 mark_node_class(alur
, ins
->src
[1]);
194 mark_node_class(alur
, ins
->src
[2]);
196 if (ins
->compact_branch
&& ins
->writeout
)
197 mark_node_class(brar
, ins
->src
[0]);
201 case TAG_LOAD_STORE_4
:
202 mark_node_class(aluw
, ins
->dest
);
203 mark_node_class(ldst
, ins
->src
[0]);
204 mark_node_class(ldst
, ins
->src
[1]);
205 mark_node_class(ldst
, ins
->src
[2]);
206 mark_node_class(ldst
, ins
->src
[3]);
210 mark_node_class(texr
, ins
->src
[0]);
211 mark_node_class(texr
, ins
->src
[1]);
212 mark_node_class(texr
, ins
->src
[2]);
213 mark_node_class(texw
, ins
->dest
);
218 /* Pass #2 is lowering now that we've analyzed all the classes.
219 * Conceptually, if an index is only marked for a single type of use,
220 * there is nothing to lower. If it is marked for different uses, we
221 * split up based on the number of types of uses. To do so, we divide
222 * into N distinct classes of use (where N>1 by definition), emit N-1
223 * moves from the index to copies of the index, and finally rewrite N-1
224 * of the types of uses to use the corresponding move */
226 unsigned spill_idx
= ctx
->temp_count
;
228 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
229 bool is_alur
= BITSET_TEST(alur
, i
);
230 bool is_aluw
= BITSET_TEST(aluw
, i
);
231 bool is_brar
= BITSET_TEST(brar
, i
);
232 bool is_ldst
= BITSET_TEST(ldst
, i
);
233 bool is_texr
= BITSET_TEST(texr
, i
);
234 bool is_texw
= BITSET_TEST(texw
, i
);
236 /* Analyse to check how many distinct uses there are. ALU ops
237 * (alur) can read the results of the texture pipeline (texw)
238 * but not ldst or texr. Load/store ops (ldst) cannot read
239 * anything but load/store inputs. Texture pipeline cannot read
240 * anything but texture inputs. TODO: Simplify. */
243 (is_alur
&& (is_ldst
|| is_texr
)) ||
244 (is_ldst
&& (is_alur
|| is_texr
|| is_texw
)) ||
245 (is_texr
&& (is_alur
|| is_ldst
|| is_texw
)) ||
246 (is_texw
&& (is_aluw
|| is_ldst
|| is_texr
)) ||
247 (is_brar
&& is_texw
);
252 /* Use the index as-is as the work copy. Emit copies for
255 unsigned classes
[] = { TAG_LOAD_STORE_4
, TAG_TEXTURE_4
, TAG_TEXTURE_4
, TAG_ALU_4
};
256 bool collisions
[] = { is_ldst
, is_texr
, is_texw
&& is_aluw
, is_brar
};
258 for (unsigned j
= 0; j
< ARRAY_SIZE(collisions
); ++j
) {
259 if (!collisions
[j
]) continue;
261 /* When the hazard is from reading, we move and rewrite
262 * sources (typical case). When it's from writing, we
263 * flip the move and rewrite destinations (obscure,
264 * only from control flow -- impossible in SSA) */
266 bool hazard_write
= (j
== 2);
268 unsigned idx
= spill_idx
++;
270 /* Insert move before each read/write, depending on the
271 * hazard we're trying to account for */
273 mir_foreach_instr_global_safe(ctx
, pre_use
) {
274 if (pre_use
->type
!= classes
[j
])
278 if (pre_use
->dest
!= i
)
281 midgard_instruction m
= v_mov(idx
, i
);
282 m
.dest_type
= pre_use
->dest_type
;
283 m
.src_types
[1] = m
.dest_type
;
284 m
.mask
= pre_use
->mask
;
286 midgard_instruction
*use
= mir_next_op(pre_use
);
288 mir_insert_instruction_before(ctx
, use
, m
);
289 mir_rewrite_index_dst_single(pre_use
, i
, idx
);
291 if (!mir_has_arg(pre_use
, i
))
296 midgard_instruction m
= v_mov(i
, idx
);
297 m
.mask
= mir_from_bytemask(mir_round_bytemask_up(
298 mir_bytemask_of_read_components(pre_use
, i
), 32), 32);
299 mir_insert_instruction_before(ctx
, pre_use
, m
);
300 mir_rewrite_index_src_single(pre_use
, i
, idx
);
315 mir_compute_interference(
316 compiler_context
*ctx
,
317 struct lcra_state
*l
)
319 /* First, we need liveness information to be computed per block */
320 mir_compute_liveness(ctx
);
322 /* We need to force r1.w live throughout a blend shader */
327 mir_foreach_block(ctx
, _block
) {
328 midgard_block
*block
= (midgard_block
*) _block
;
329 mir_foreach_instr_in_block_rev(block
, ins
) {
338 mir_foreach_instr_global(ctx
, ins
) {
339 if (ins
->dest
< ctx
->temp_count
)
340 lcra_add_node_interference(l
, ins
->dest
, mir_bytemask(ins
), r1w
, 0xF);
344 /* Now that every block has live_in/live_out computed, we can determine
345 * interference by walking each block linearly. Take live_out at the
346 * end of each block and walk the block backwards. */
348 mir_foreach_block(ctx
, _blk
) {
349 midgard_block
*blk
= (midgard_block
*) _blk
;
350 uint16_t *live
= mem_dup(_blk
->live_out
, ctx
->temp_count
* sizeof(uint16_t));
352 mir_foreach_instr_in_block_rev(blk
, ins
) {
353 /* Mark all registers live after the instruction as
354 * interfering with the destination */
356 unsigned dest
= ins
->dest
;
358 if (dest
< ctx
->temp_count
) {
359 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
)
361 unsigned mask
= mir_bytemask(ins
);
362 lcra_add_node_interference(l
, dest
, mask
, i
, live
[i
]);
367 mir_liveness_ins_update(live
, ins
, ctx
->temp_count
);
375 mir_is_64(midgard_instruction
*ins
)
377 if (nir_alu_type_get_type_size(ins
->dest_type
) == 64)
380 mir_foreach_src(ins
, v
) {
381 if (nir_alu_type_get_type_size(ins
->src_types
[v
]) == 64)
388 /* This routine performs the actual register allocation. It should be succeeded
389 * by install_registers */
391 static struct lcra_state
*
392 allocate_registers(compiler_context
*ctx
, bool *spilled
)
394 /* The number of vec4 work registers available depends on when the
395 * uniforms start and the shader stage. By ABI we limit blend shaders
396 * to 8 registers, should be lower XXX */
397 int work_count
= ctx
->is_blend
? 8 :
398 16 - MAX2((ctx
->uniform_cutoff
- 8), 0);
400 /* No register allocation to do with no SSA */
402 if (!ctx
->temp_count
)
405 /* Initialize LCRA. Allocate an extra node at the end for a precoloured
406 * r1 for interference */
408 struct lcra_state
*l
= lcra_alloc_equations(ctx
->temp_count
+ 1, 5);
409 unsigned node_r1
= ctx
->temp_count
;
411 /* Starts of classes, in bytes */
412 l
->class_start
[REG_CLASS_WORK
] = 16 * 0;
413 l
->class_start
[REG_CLASS_LDST
] = 16 * 26;
414 l
->class_start
[REG_CLASS_TEXR
] = 16 * 28;
415 l
->class_start
[REG_CLASS_TEXW
] = 16 * 28;
417 l
->class_size
[REG_CLASS_WORK
] = 16 * work_count
;
418 l
->class_size
[REG_CLASS_LDST
] = 16 * 2;
419 l
->class_size
[REG_CLASS_TEXR
] = 16 * 2;
420 l
->class_size
[REG_CLASS_TEXW
] = 16 * 2;
422 lcra_set_disjoint_class(l
, REG_CLASS_TEXR
, REG_CLASS_TEXW
);
424 /* To save space on T*20, we don't have real texture registers.
425 * Instead, tex inputs reuse the load/store pipeline registers, and
426 * tex outputs use work r0/r1. Note we still use TEXR/TEXW classes,
427 * noting that this handles interferences and sizes correctly. */
429 if (ctx
->quirks
& MIDGARD_INTERPIPE_REG_ALIASING
) {
430 l
->class_start
[REG_CLASS_TEXR
] = l
->class_start
[REG_CLASS_LDST
];
431 l
->class_start
[REG_CLASS_TEXW
] = l
->class_start
[REG_CLASS_WORK
];
434 unsigned *found_class
= calloc(sizeof(unsigned), ctx
->temp_count
);
435 unsigned *min_alignment
= calloc(sizeof(unsigned), ctx
->temp_count
);
436 unsigned *min_bound
= calloc(sizeof(unsigned), ctx
->temp_count
);
438 mir_foreach_instr_global(ctx
, ins
) {
439 /* Swizzles of 32-bit sources on 64-bit instructions need to be
440 * aligned to either bottom (xy) or top (zw). More general
441 * swizzle lowering should happen prior to scheduling (TODO),
442 * but once we get RA we shouldn't disrupt this further. Align
443 * sources of 64-bit instructions. */
445 if (ins
->type
== TAG_ALU_4
&& mir_is_64(ins
)) {
446 mir_foreach_src(ins
, v
) {
447 unsigned s
= ins
->src
[v
];
449 if (s
< ctx
->temp_count
)
450 min_alignment
[s
] = 3;
454 if (ins
->type
== TAG_LOAD_STORE_4
&& OP_HAS_ADDRESS(ins
->op
)) {
455 mir_foreach_src(ins
, v
) {
456 unsigned s
= ins
->src
[v
];
457 unsigned size
= nir_alu_type_get_type_size(ins
->src_types
[v
]);
459 if (s
< ctx
->temp_count
)
460 min_alignment
[s
] = (size
== 64) ? 3 : 2;
464 if (ins
->dest
>= SSA_FIXED_MINIMUM
) continue;
466 unsigned size
= nir_alu_type_get_type_size(ins
->dest_type
);
471 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
472 int comps1
= util_logbase2(ins
->mask
);
474 int bytes
= (comps1
+ 1) * (size
/ 8);
476 /* Use the largest class if there's ambiguity, this
477 * handles partial writes */
479 int dest
= ins
->dest
;
480 found_class
[dest
] = MAX2(found_class
[dest
], bytes
);
482 min_alignment
[dest
] =
483 (size
== 16) ? 1 : /* (1 << 1) = 2-byte */
484 (size
== 32) ? 2 : /* (1 << 2) = 4-byte */
485 (size
== 64) ? 3 : /* (1 << 3) = 8-byte */
488 /* We can't cross xy/zw boundaries. TODO: vec8 can */
492 /* We don't have a swizzle for the conditional and we don't
493 * want to muck with the conditional itself, so just force
494 * alignment for now */
496 if (ins
->type
== TAG_ALU_4
&& OP_IS_CSEL_V(ins
->op
)) {
497 min_alignment
[dest
] = 4; /* 1 << 4= 16-byte = vec4 */
499 /* LCRA assumes bound >= alignment */
500 min_bound
[dest
] = 16;
503 /* Since ld/st swizzles and masks are 32-bit only, we need them
504 * aligned to enable final packing */
505 if (ins
->type
== TAG_LOAD_STORE_4
)
506 min_alignment
[dest
] = MAX2(min_alignment
[dest
], 2);
509 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
510 lcra_set_alignment(l
, i
, min_alignment
[i
] ? min_alignment
[i
] : 2,
511 min_bound
[i
] ? min_bound
[i
] : 16);
512 lcra_restrict_range(l
, i
, found_class
[i
]);
519 /* Next, we'll determine semantic class. We default to zero (work).
520 * But, if we're used with a special operation, that will force us to a
521 * particular class. Each node must be assigned to exactly one class; a
522 * prepass before RA should have lowered what-would-have-been
523 * multiclass nodes into a series of moves to break it up into multiple
526 mir_foreach_instr_global(ctx
, ins
) {
527 /* Check if this operation imposes any classes */
529 if (ins
->type
== TAG_LOAD_STORE_4
) {
530 set_class(l
->class, ins
->src
[0], REG_CLASS_LDST
);
531 set_class(l
->class, ins
->src
[1], REG_CLASS_LDST
);
532 set_class(l
->class, ins
->src
[2], REG_CLASS_LDST
);
533 set_class(l
->class, ins
->src
[3], REG_CLASS_LDST
);
535 if (OP_IS_VEC4_ONLY(ins
->op
)) {
536 lcra_restrict_range(l
, ins
->dest
, 16);
537 lcra_restrict_range(l
, ins
->src
[0], 16);
538 lcra_restrict_range(l
, ins
->src
[1], 16);
539 lcra_restrict_range(l
, ins
->src
[2], 16);
540 lcra_restrict_range(l
, ins
->src
[3], 16);
542 } else if (ins
->type
== TAG_TEXTURE_4
) {
543 set_class(l
->class, ins
->dest
, REG_CLASS_TEXW
);
544 set_class(l
->class, ins
->src
[0], REG_CLASS_TEXR
);
545 set_class(l
->class, ins
->src
[1], REG_CLASS_TEXR
);
546 set_class(l
->class, ins
->src
[2], REG_CLASS_TEXR
);
547 set_class(l
->class, ins
->src
[3], REG_CLASS_TEXR
);
551 /* Check that the semantics of the class are respected */
552 mir_foreach_instr_global(ctx
, ins
) {
553 assert(check_write_class(l
->class, ins
->type
, ins
->dest
));
554 assert(check_read_class(l
->class, ins
->type
, ins
->src
[0]));
555 assert(check_read_class(l
->class, ins
->type
, ins
->src
[1]));
556 assert(check_read_class(l
->class, ins
->type
, ins
->src
[2]));
557 assert(check_read_class(l
->class, ins
->type
, ins
->src
[3]));
560 /* Mark writeout to r0, depth to r1.x, stencil to r1.y,
561 * render target to r1.z, unknown to r1.w */
562 mir_foreach_instr_global(ctx
, ins
) {
563 if (!(ins
->compact_branch
&& ins
->writeout
)) continue;
565 if (ins
->src
[0] < ctx
->temp_count
)
566 l
->solutions
[ins
->src
[0]] = 0;
568 if (ins
->src
[2] < ctx
->temp_count
)
569 l
->solutions
[ins
->src
[2]] = (16 * 1) + COMPONENT_X
* 4;
571 if (ins
->src
[3] < ctx
->temp_count
)
572 l
->solutions
[ins
->src
[3]] = (16 * 1) + COMPONENT_Y
* 4;
574 if (ins
->src
[1] < ctx
->temp_count
)
575 l
->solutions
[ins
->src
[1]] = (16 * 1) + COMPONENT_Z
* 4;
577 if (ins
->dest
< ctx
->temp_count
)
578 l
->solutions
[ins
->dest
] = (16 * 1) + COMPONENT_W
* 4;
581 /* Destinations of instructions in a writeout block cannot be assigned
582 * to r1 unless they are actually used as r1 from the writeout itself,
583 * since the writes to r1 are special. A code sequence like:
585 * sadd.fmov r1.x, [...]
586 * vadd.fadd r0, r1, r2
589 * will misbehave since the r1.x write will be interpreted as a
590 * gl_FragDepth write so it won't show up correctly when r1 is read in
591 * the following segment. We model this as interference.
594 l
->solutions
[node_r1
] = (16 * 1);
596 mir_foreach_block(ctx
, _blk
) {
597 midgard_block
*blk
= (midgard_block
*) _blk
;
599 mir_foreach_bundle_in_block(blk
, v
) {
600 /* We need at least a writeout and nonwriteout instruction */
601 if (v
->instruction_count
< 2)
604 /* Branches always come at the end */
605 midgard_instruction
*br
= v
->instructions
[v
->instruction_count
- 1];
610 for (signed i
= v
->instruction_count
- 2; i
>= 0; --i
) {
611 midgard_instruction
*ins
= v
->instructions
[i
];
613 if (ins
->dest
>= ctx
->temp_count
)
616 bool used_as_r1
= (br
->dest
== ins
->dest
);
618 mir_foreach_src(br
, s
)
619 used_as_r1
|= (s
> 0) && (br
->src
[s
] == ins
->dest
);
622 lcra_add_node_interference(l
, ins
->dest
, mir_bytemask(ins
), node_r1
, 0xFFFF);
627 /* Precolour blend input to r0. Note writeout is necessarily at the end
628 * and blend shaders are single-RT only so there is only a single
629 * writeout block, so this cannot conflict with the writeout r0 (there
630 * is no need to have an intermediate move) */
632 if (ctx
->blend_input
!= ~0) {
633 assert(ctx
->blend_input
< ctx
->temp_count
);
634 l
->solutions
[ctx
->blend_input
] = 0;
637 /* Same for the dual-source blend input/output, except here we use r2,
638 * which is also set in the fragment shader. */
640 if (ctx
->blend_src1
!= ~0) {
641 assert(ctx
->blend_src1
< ctx
->temp_count
);
642 l
->solutions
[ctx
->blend_src1
] = (16 * 2);
643 ctx
->work_registers
= MAX2(ctx
->work_registers
, 2);
646 mir_compute_interference(ctx
, l
);
648 *spilled
= !lcra_solve(l
);
653 /* Once registers have been decided via register allocation
654 * (allocate_registers), we need to rewrite the MIR to use registers instead of
658 install_registers_instr(
659 compiler_context
*ctx
,
660 struct lcra_state
*l
,
661 midgard_instruction
*ins
)
663 unsigned src_shift
[MIR_SRC_COUNT
];
665 for (unsigned i
= 0; i
< MIR_SRC_COUNT
; ++i
) {
667 util_logbase2(nir_alu_type_get_type_size(ins
->src_types
[i
]) / 8);
670 unsigned dest_shift
=
671 util_logbase2(nir_alu_type_get_type_size(ins
->dest_type
) / 8);
678 if (ins
->compact_branch
)
681 struct phys_reg src1
= index_to_reg(ctx
, l
, ins
->src
[0], src_shift
[0]);
682 struct phys_reg src2
= index_to_reg(ctx
, l
, ins
->src
[1], src_shift
[1]);
683 struct phys_reg dest
= index_to_reg(ctx
, l
, ins
->dest
, dest_shift
);
685 mir_set_bytemask(ins
, mir_bytemask(ins
) << dest
.offset
);
687 unsigned dest_offset
=
688 GET_CHANNEL_COUNT(alu_opcode_props
[ins
->op
].props
) ? 0 :
691 offset_swizzle(ins
->swizzle
[0], src1
.offset
, src1
.shift
, dest
.shift
, dest_offset
);
692 if (!ins
->has_inline_constant
)
693 offset_swizzle(ins
->swizzle
[1], src2
.offset
, src2
.shift
, dest
.shift
, dest_offset
);
694 if (ins
->src
[0] != ~0)
695 ins
->src
[0] = SSA_FIXED_REGISTER(src1
.reg
);
696 if (ins
->src
[1] != ~0)
697 ins
->src
[1] = SSA_FIXED_REGISTER(src2
.reg
);
699 ins
->dest
= SSA_FIXED_REGISTER(dest
.reg
);
703 case TAG_LOAD_STORE_4
: {
704 /* Which physical register we read off depends on
705 * whether we are loading or storing -- think about the
706 * logical dataflow */
708 bool encodes_src
= OP_IS_STORE(ins
->op
);
711 struct phys_reg src
= index_to_reg(ctx
, l
, ins
->src
[0], src_shift
[0]);
712 assert(src
.reg
== 26 || src
.reg
== 27);
714 ins
->src
[0] = SSA_FIXED_REGISTER(src
.reg
);
715 offset_swizzle(ins
->swizzle
[0], src
.offset
, src
.shift
, 0, 0);
717 struct phys_reg dst
= index_to_reg(ctx
, l
, ins
->dest
, dest_shift
);
719 ins
->dest
= SSA_FIXED_REGISTER(dst
.reg
);
720 offset_swizzle(ins
->swizzle
[0], 0, 2, 2, dst
.offset
);
721 mir_set_bytemask(ins
, mir_bytemask(ins
) << dst
.offset
);
724 /* We also follow up by actual arguments */
726 for (int i
= 1; i
<= 3; i
++) {
727 unsigned src_index
= ins
->src
[i
];
728 if (src_index
!= ~0) {
729 struct phys_reg src
= index_to_reg(ctx
, l
, src_index
, src_shift
[i
]);
730 unsigned component
= src
.offset
>> src
.shift
;
731 assert(component
<< src
.shift
== src
.offset
);
732 ins
->src
[i
] = SSA_FIXED_REGISTER(src
.reg
);
733 ins
->swizzle
[i
][0] += component
;
740 case TAG_TEXTURE_4
: {
741 if (ins
->op
== TEXTURE_OP_BARRIER
)
744 /* Grab RA results */
745 struct phys_reg dest
= index_to_reg(ctx
, l
, ins
->dest
, dest_shift
);
746 struct phys_reg coord
= index_to_reg(ctx
, l
, ins
->src
[1], src_shift
[1]);
747 struct phys_reg lod
= index_to_reg(ctx
, l
, ins
->src
[2], src_shift
[2]);
748 struct phys_reg offset
= index_to_reg(ctx
, l
, ins
->src
[3], src_shift
[3]);
750 /* First, install the texture coordinate */
751 if (ins
->src
[1] != ~0)
752 ins
->src
[1] = SSA_FIXED_REGISTER(coord
.reg
);
753 offset_swizzle(ins
->swizzle
[1], coord
.offset
, coord
.shift
, dest
.shift
, 0);
755 /* Next, install the destination */
757 ins
->dest
= SSA_FIXED_REGISTER(dest
.reg
);
758 offset_swizzle(ins
->swizzle
[0], 0, 2, dest
.shift
,
759 dest_shift
== 1 ? dest
.offset
% 8 :
761 mir_set_bytemask(ins
, mir_bytemask(ins
) << dest
.offset
);
763 /* If there is a register LOD/bias, use it */
764 if (ins
->src
[2] != ~0) {
765 assert(!(lod
.offset
& 3));
766 ins
->src
[2] = SSA_FIXED_REGISTER(lod
.reg
);
767 ins
->swizzle
[2][0] = lod
.offset
/ 4;
770 /* If there is an offset register, install it */
771 if (ins
->src
[3] != ~0) {
772 ins
->src
[3] = SSA_FIXED_REGISTER(offset
.reg
);
773 ins
->swizzle
[3][0] = offset
.offset
/ 4;
785 install_registers(compiler_context
*ctx
, struct lcra_state
*l
)
787 mir_foreach_instr_global(ctx
, ins
)
788 install_registers_instr(ctx
, l
, ins
);
792 /* If register allocation fails, find the best spill node */
795 mir_choose_spill_node(
796 compiler_context
*ctx
,
797 struct lcra_state
*l
)
799 /* We can't spill a previously spilled value or an unspill */
801 mir_foreach_instr_global(ctx
, ins
) {
802 if (ins
->no_spill
& (1 << l
->spill_class
)) {
803 lcra_set_node_spill_cost(l
, ins
->dest
, -1);
805 if (l
->spill_class
!= REG_CLASS_WORK
) {
806 mir_foreach_src(ins
, s
)
807 lcra_set_node_spill_cost(l
, ins
->src
[s
], -1);
812 return lcra_get_best_spill_node(l
);
815 /* Once we've chosen a spill node, spill it */
819 compiler_context
*ctx
,
821 unsigned spill_class
,
822 unsigned *spill_count
)
824 if (spill_class
== REG_CLASS_WORK
&& ctx
->is_blend
)
825 unreachable("Blend shader spilling is currently unimplemented");
827 unsigned spill_index
= ctx
->temp_count
;
829 /* We have a spill node, so check the class. Work registers
830 * legitimately spill to TLS, but special registers just spill to work
833 bool is_special
= spill_class
!= REG_CLASS_WORK
;
834 bool is_special_w
= spill_class
== REG_CLASS_TEXW
;
836 /* Allocate TLS slot (maybe) */
837 unsigned spill_slot
= !is_special
? (*spill_count
)++ : 0;
839 /* For TLS, replace all stores to the spilled node. For
840 * special reads, just keep as-is; the class will be demoted
841 * implicitly. For special writes, spill to a work register */
843 if (!is_special
|| is_special_w
) {
845 spill_slot
= spill_index
++;
847 mir_foreach_block(ctx
, _block
) {
848 midgard_block
*block
= (midgard_block
*) _block
;
849 mir_foreach_instr_in_block_safe(block
, ins
) {
850 if (ins
->dest
!= spill_node
) continue;
852 midgard_instruction st
;
854 /* Note: it's important to match the mask of the spill
855 * with the mask of the instruction whose destination
856 * we're spilling, or otherwise we'll read invalid
857 * components and can fail RA in a subsequent iteration
861 st
= v_mov(spill_node
, spill_slot
);
862 st
.no_spill
|= (1 << spill_class
);
864 st
.dest_type
= st
.src_types
[0] = ins
->dest_type
;
866 ins
->dest
= spill_index
++;
867 ins
->no_spill
|= (1 << spill_class
);
868 st
= v_load_store_scratch(ins
->dest
, spill_slot
, true, ins
->mask
);
871 /* Hint: don't rewrite this node */
874 mir_insert_instruction_after_scheduled(ctx
, block
, ins
, st
);
882 /* For special reads, figure out how many bytes we need */
883 unsigned read_bytemask
= 0;
885 mir_foreach_instr_global_safe(ctx
, ins
) {
886 read_bytemask
|= mir_bytemask_of_read_components(ins
, spill_node
);
889 /* Insert a load from TLS before the first consecutive
890 * use of the node, rewriting to use spilled indices to
891 * break up the live range. Or, for special, insert a
892 * move. Ironically the latter *increases* register
893 * pressure, but the two uses of the spilling mechanism
894 * are somewhat orthogonal. (special spilling is to use
895 * work registers to back special registers; TLS
896 * spilling is to use memory to back work registers) */
898 mir_foreach_block(ctx
, _block
) {
899 midgard_block
*block
= (midgard_block
*) _block
;
900 mir_foreach_instr_in_block(block
, ins
) {
901 /* We can't rewrite the moves used to spill in the
902 * first place. These moves are hinted. */
903 if (ins
->hint
) continue;
905 /* If we don't use the spilled value, nothing to do */
906 if (!mir_has_arg(ins
, spill_node
)) continue;
911 index
= ++spill_index
;
913 midgard_instruction
*before
= ins
;
914 midgard_instruction st
;
918 st
= v_mov(spill_node
, index
);
919 st
.no_spill
|= (1 << spill_class
);
922 st
= v_load_store_scratch(index
, spill_slot
, false, 0xF);
925 /* Mask the load based on the component count
926 * actually needed to prevent RA loops */
928 st
.mask
= mir_from_bytemask(mir_round_bytemask_up(
929 read_bytemask
, 32), 32);
931 mir_insert_instruction_before_scheduled(ctx
, block
, before
, st
);
933 /* Special writes already have their move spilled in */
939 mir_rewrite_index_src_single(ins
, spill_node
, index
);
948 mir_foreach_instr_global(ctx
, ins
) {
953 /* Run register allocation in a loop, spilling until we succeed */
956 mir_ra(compiler_context
*ctx
)
958 struct lcra_state
*l
= NULL
;
959 bool spilled
= false;
960 int iter_count
= 1000; /* max iterations */
962 /* Number of 128-bit slots in memory we've spilled into */
963 unsigned spill_count
= 0;
966 mir_create_pipeline_registers(ctx
);
970 signed spill_node
= mir_choose_spill_node(ctx
, l
);
972 if (spill_node
== -1) {
973 fprintf(stderr
, "ERROR: Failed to choose spill node\n");
977 mir_spill_register(ctx
, spill_node
, l
->spill_class
, &spill_count
);
980 mir_squeeze_index(ctx
);
981 mir_invalidate_liveness(ctx
);
988 l
= allocate_registers(ctx
, &spilled
);
989 } while(spilled
&& ((iter_count
--) > 0));
991 if (iter_count
<= 0) {
992 fprintf(stderr
, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
996 /* Report spilling information. spill_count is in 128-bit slots (vec4 x
997 * fp32), but tls_size is in bytes, so multiply by 16 */
999 ctx
->tls_size
= spill_count
* 16;
1001 install_registers(ctx
, l
);