2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
26 #include "util/u_memory.h"
28 /* Scheduling for Midgard is complicated, to say the least. ALU instructions
29 * must be grouped into VLIW bundles according to following model:
32 * [VADD] [SMUL] [VLUT]
34 * A given instruction can execute on some subset of the units (or a few can
35 * execute on all). Instructions can be either vector or scalar; only scalar
36 * instructions can execute on SADD/SMUL units. Units on a given line execute
37 * in parallel. Subsequent lines execute separately and can pass results
38 * directly via pipeline registers r24/r25, bypassing the register file.
40 * A bundle can optionally have 128-bits of embedded constants, shared across
41 * all of the instructions within a bundle.
43 * Instructions consuming conditionals (branches and conditional selects)
44 * require their condition to be written into the conditional register (r31)
45 * within the same bundle they are consumed.
47 * Fragment writeout requires its argument to be written in full within the
48 * same bundle as the branch, with no hanging dependencies.
50 * Load/store instructions are also in bundles of simply two instructions, and
51 * texture instructions have no bundling.
53 * -------------------------------------------------------------------------
57 /* We create the dependency graph with per-byte granularity */
62 add_dependency(struct util_dynarray
*table
, unsigned index
, uint16_t mask
, midgard_instruction
**instructions
, unsigned child
)
64 for (unsigned i
= 0; i
< BYTE_COUNT
; ++i
) {
65 if (!(mask
& (1 << i
)))
68 struct util_dynarray
*parents
= &table
[(BYTE_COUNT
* index
) + i
];
70 util_dynarray_foreach(parents
, unsigned, parent
) {
71 BITSET_WORD
*dependents
= instructions
[*parent
]->dependents
;
73 /* Already have the dependency */
74 if (BITSET_TEST(dependents
, child
))
77 BITSET_SET(dependents
, child
);
78 instructions
[child
]->nr_dependencies
++;
84 mark_access(struct util_dynarray
*table
, unsigned index
, uint16_t mask
, unsigned parent
)
86 for (unsigned i
= 0; i
< BYTE_COUNT
; ++i
) {
87 if (!(mask
& (1 << i
)))
90 util_dynarray_append(&table
[(BYTE_COUNT
* index
) + i
], unsigned, parent
);
95 mir_create_dependency_graph(midgard_instruction
**instructions
, unsigned count
, unsigned node_count
)
97 size_t sz
= node_count
* BYTE_COUNT
;
99 struct util_dynarray
*last_read
= calloc(sizeof(struct util_dynarray
), sz
);
100 struct util_dynarray
*last_write
= calloc(sizeof(struct util_dynarray
), sz
);
102 for (unsigned i
= 0; i
< sz
; ++i
) {
103 util_dynarray_init(&last_read
[i
], NULL
);
104 util_dynarray_init(&last_write
[i
], NULL
);
107 /* Initialize dependency graph */
108 for (unsigned i
= 0; i
< count
; ++i
) {
109 instructions
[i
]->dependents
=
110 calloc(BITSET_WORDS(count
), sizeof(BITSET_WORD
));
112 instructions
[i
]->nr_dependencies
= 0;
115 /* Populate dependency graph */
116 for (signed i
= count
- 1; i
>= 0; --i
) {
117 if (instructions
[i
]->compact_branch
)
120 unsigned dest
= instructions
[i
]->dest
;
121 unsigned mask
= mir_bytemask(instructions
[i
]);
123 mir_foreach_src((*instructions
), s
) {
124 unsigned src
= instructions
[i
]->src
[s
];
126 if (src
< node_count
) {
127 unsigned readmask
= mir_bytemask_of_read_components(instructions
[i
], src
);
128 add_dependency(last_write
, src
, readmask
, instructions
, i
);
132 if (dest
< node_count
) {
133 add_dependency(last_read
, dest
, mask
, instructions
, i
);
134 add_dependency(last_write
, dest
, mask
, instructions
, i
);
135 mark_access(last_write
, dest
, mask
, i
);
138 mir_foreach_src((*instructions
), s
) {
139 unsigned src
= instructions
[i
]->src
[s
];
141 if (src
< node_count
) {
142 unsigned readmask
= mir_bytemask_of_read_components(instructions
[i
], src
);
143 mark_access(last_read
, src
, readmask
, i
);
148 /* If there is a branch, all instructions depend on it, as interblock
149 * execution must be purely in-order */
151 if (instructions
[count
- 1]->compact_branch
) {
152 BITSET_WORD
*dependents
= instructions
[count
- 1]->dependents
;
154 for (signed i
= count
- 2; i
>= 0; --i
) {
155 if (BITSET_TEST(dependents
, i
))
158 BITSET_SET(dependents
, i
);
159 instructions
[i
]->nr_dependencies
++;
163 /* Free the intermediate structures */
164 for (unsigned i
= 0; i
< sz
; ++i
) {
165 util_dynarray_fini(&last_read
[i
]);
166 util_dynarray_fini(&last_write
[i
]);
173 /* Does the mask cover more than a scalar? */
176 is_single_component_mask(unsigned mask
)
180 for (int c
= 0; c
< 8; ++c
) {
185 return components
== 1;
188 /* Helpers for scheudling */
191 mir_is_scalar(midgard_instruction
*ains
)
193 /* Do we try to use it as a vector op? */
194 if (!is_single_component_mask(ains
->mask
))
197 /* Otherwise, check mode hazards */
198 bool could_scalar
= true;
200 /* Only 16/32-bit can run on a scalar unit */
201 could_scalar
&= ains
->alu
.reg_mode
!= midgard_reg_mode_8
;
202 could_scalar
&= ains
->alu
.reg_mode
!= midgard_reg_mode_64
;
203 could_scalar
&= ains
->alu
.dest_override
== midgard_dest_override_none
;
205 if (ains
->alu
.reg_mode
== midgard_reg_mode_16
) {
206 /* If we're running in 16-bit mode, we
207 * can't have any 8-bit sources on the
208 * scalar unit (since the scalar unit
209 * doesn't understand 8-bit) */
211 midgard_vector_alu_src s1
=
212 vector_alu_from_unsigned(ains
->alu
.src1
);
214 could_scalar
&= !s1
.half
;
216 midgard_vector_alu_src s2
=
217 vector_alu_from_unsigned(ains
->alu
.src2
);
219 could_scalar
&= !s2
.half
;
225 /* How many bytes does this ALU instruction add to the bundle? */
228 bytes_for_instruction(midgard_instruction
*ains
)
230 if (ains
->unit
& UNITS_ANY_VECTOR
)
231 return sizeof(midgard_reg_info
) + sizeof(midgard_vector_alu
);
232 else if (ains
->unit
== ALU_ENAB_BRANCH
)
233 return sizeof(midgard_branch_extended
);
234 else if (ains
->compact_branch
)
235 return sizeof(ains
->br_compact
);
237 return sizeof(midgard_reg_info
) + sizeof(midgard_scalar_alu
);
240 /* We would like to flatten the linked list of midgard_instructions in a bundle
241 * to an array of pointers on the heap for easy indexing */
243 static midgard_instruction
**
244 flatten_mir(midgard_block
*block
, unsigned *len
)
246 *len
= list_length(&block
->instructions
);
251 midgard_instruction
**instructions
=
252 calloc(sizeof(midgard_instruction
*), *len
);
256 mir_foreach_instr_in_block(block
, ins
)
257 instructions
[i
++] = ins
;
262 /* The worklist is the set of instructions that can be scheduled now; that is,
263 * the set of instructions with no remaining dependencies */
266 mir_initialize_worklist(BITSET_WORD
*worklist
, midgard_instruction
**instructions
, unsigned count
)
268 for (unsigned i
= 0; i
< count
; ++i
) {
269 if (instructions
[i
]->nr_dependencies
== 0)
270 BITSET_SET(worklist
, i
);
274 /* Update the worklist after an instruction terminates. Remove its edges from
275 * the graph and if that causes any node to have no dependencies, add it to the
280 BITSET_WORD
*worklist
, unsigned count
,
281 midgard_instruction
**instructions
, midgard_instruction
*done
)
283 /* Sanity check: if no instruction terminated, there is nothing to do.
284 * If the instruction that terminated had dependencies, that makes no
285 * sense and means we messed up the worklist. Finally, as the purpose
286 * of this routine is to update dependents, we abort early if there are
287 * no dependents defined. */
292 assert(done
->nr_dependencies
== 0);
294 if (!done
->dependents
)
297 /* We have an instruction with dependents. Iterate each dependent to
298 * remove one dependency (`done`), adding dependents to the worklist
303 BITSET_FOREACH_SET(i
, tmp
, done
->dependents
, count
) {
304 assert(instructions
[i
]->nr_dependencies
);
306 if (!(--instructions
[i
]->nr_dependencies
))
307 BITSET_SET(worklist
, i
);
310 free(done
->dependents
);
313 /* While scheduling, we need to choose instructions satisfying certain
314 * criteria. As we schedule backwards, we choose the *last* instruction in the
315 * worklist to simulate in-order scheduling. Chosen instructions must satisfy a
316 * given predicate. */
318 struct midgard_predicate
{
319 /* TAG or ~0 for dont-care */
322 /* True if we want to pop off the chosen instruction */
325 /* For ALU, choose only this unit */
328 /* State for bundle constants. constants is the actual constants
329 * for the bundle. constant_count is the number of bytes (up to
330 * 16) currently in use for constants. When picking in destructive
331 * mode, the constants array will be updated, and the instruction
332 * will be adjusted to index into the constants array */
335 unsigned constant_count
;
338 /* Exclude this destination (if not ~0) */
341 /* Don't schedule instructions consuming conditionals (since we already
342 * scheduled one). Excludes conditional branches and csel */
345 /* Require a minimal mask and (if nonzero) given destination. Used for
346 * writeout optimizations */
352 /* For an instruction that can fit, adjust it to fit and update the constants
353 * array, in destructive mode. Returns whether the fitting was successful. */
356 mir_adjust_constants(midgard_instruction
*ins
,
357 struct midgard_predicate
*pred
,
360 /* Blend constants dominate */
361 if (ins
->has_blend_constant
) {
362 if (pred
->constant_count
)
364 else if (destructive
) {
365 pred
->blend_constant
= true;
366 pred
->constant_count
= 16;
371 /* No constant, nothing to adjust */
372 if (!ins
->has_constants
)
375 if (ins
->alu
.reg_mode
!= midgard_reg_mode_32
) {
376 /* TODO: 16-bit constant combining */
377 if (pred
->constant_count
)
380 uint16_t *bundles
= (uint16_t *) pred
->constants
;
381 uint32_t *constants
= (uint32_t *) ins
->constants
;
383 /* Copy them wholesale */
384 for (unsigned i
= 0; i
< 4; ++i
)
385 bundles
[i
] = constants
[i
];
387 pred
->constant_count
= 16;
389 /* Pack 32-bit constants */
390 uint32_t *bundles
= (uint32_t *) pred
->constants
;
391 uint32_t *constants
= (uint32_t *) ins
->constants
;
392 unsigned r_constant
= SSA_FIXED_REGISTER(REGISTER_CONSTANT
);
393 unsigned mask
= mir_from_bytemask(mir_bytemask_of_read_components(ins
, r_constant
), midgard_reg_mode_32
);
395 /* First, check if it fits */
396 unsigned count
= DIV_ROUND_UP(pred
->constant_count
, sizeof(uint32_t));
397 unsigned existing_count
= count
;
399 for (unsigned i
= 0; i
< 4; ++i
) {
400 if (!(mask
& (1 << i
)))
405 /* Look for existing constant */
406 for (unsigned j
= 0; j
< existing_count
; ++j
) {
407 if (bundles
[j
] == constants
[i
]) {
416 /* If the constant is new, check ourselves */
417 for (unsigned j
= 0; j
< i
; ++j
) {
418 if (constants
[j
] == constants
[i
]) {
427 /* Otherwise, this is a new constant */
431 /* Check if we have space */
435 /* If non-destructive, we're done */
439 /* If destructive, let's copy in the new constants and adjust
440 * swizzles to pack it in. */
442 unsigned indices
[16] = { 0 };
445 count
= existing_count
;
447 for (unsigned i
= 0; i
< 4; ++i
) {
448 if (!(mask
& (1 << i
)))
451 uint32_t cons
= constants
[i
];
452 bool constant_found
= false;
454 /* Search for the constant */
455 for (unsigned j
= 0; j
< count
; ++j
) {
456 if (bundles
[j
] != cons
)
459 /* We found it, reuse */
461 constant_found
= true;
468 /* We didn't find it, so allocate it */
469 unsigned idx
= count
++;
471 /* We have space, copy it in! */
476 pred
->constant_count
= count
* sizeof(uint32_t);
478 /* Use indices as a swizzle */
480 mir_foreach_src(ins
, s
) {
481 if (ins
->src
[s
] == r_constant
)
482 mir_compose_swizzle(ins
->swizzle
[s
], indices
, ins
->swizzle
[s
]);
489 static midgard_instruction
*
490 mir_choose_instruction(
491 midgard_instruction
**instructions
,
492 BITSET_WORD
*worklist
, unsigned count
,
493 struct midgard_predicate
*predicate
)
495 /* Parse the predicate */
496 unsigned tag
= predicate
->tag
;
497 bool alu
= tag
== TAG_ALU_4
;
498 unsigned unit
= predicate
->unit
;
499 bool branch
= alu
&& (unit
== ALU_ENAB_BR_COMPACT
);
500 bool scalar
= (unit
!= ~0) && (unit
& UNITS_SCALAR
);
501 bool no_cond
= predicate
->no_cond
;
503 unsigned mask
= predicate
->mask
;
504 unsigned dest
= predicate
->dest
;
505 bool needs_dest
= mask
& 0xF;
507 /* Iterate to find the best instruction satisfying the predicate */
511 signed best_index
= -1;
512 bool best_conditional
= false;
514 /* Enforce a simple metric limiting distance to keep down register
515 * pressure. TOOD: replace with liveness tracking for much better
518 unsigned max_active
= 0;
519 unsigned max_distance
= 6;
521 BITSET_FOREACH_SET(i
, tmp
, worklist
, count
) {
522 max_active
= MAX2(max_active
, i
);
525 BITSET_FOREACH_SET(i
, tmp
, worklist
, count
) {
526 if ((max_active
- i
) >= max_distance
)
529 if (tag
!= ~0 && instructions
[i
]->type
!= tag
)
532 if (predicate
->exclude
!= ~0 && instructions
[i
]->dest
== predicate
->exclude
)
535 if (alu
&& !branch
&& !(alu_opcode_props
[instructions
[i
]->alu
.op
].props
& unit
))
538 if (branch
&& !instructions
[i
]->compact_branch
)
541 if (alu
&& scalar
&& !mir_is_scalar(instructions
[i
]))
544 if (alu
&& !mir_adjust_constants(instructions
[i
], predicate
, false))
547 if (needs_dest
&& instructions
[i
]->dest
!= dest
)
550 if (mask
&& ((~instructions
[i
]->mask
) & mask
))
553 bool conditional
= alu
&& !branch
&& OP_IS_CSEL(instructions
[i
]->alu
.op
);
554 conditional
|= (branch
&& !instructions
[i
]->prepacked_branch
&& instructions
[i
]->branch
.conditional
);
556 if (conditional
&& no_cond
)
559 /* Simulate in-order scheduling */
560 if ((signed) i
< best_index
)
564 best_conditional
= conditional
;
568 /* Did we find anything? */
573 /* If we found something, remove it from the worklist */
574 assert(best_index
< count
);
576 if (predicate
->destructive
) {
577 BITSET_CLEAR(worklist
, best_index
);
580 mir_adjust_constants(instructions
[best_index
], predicate
, true);
582 /* Once we schedule a conditional, we can't again */
583 predicate
->no_cond
|= best_conditional
;
586 return instructions
[best_index
];
589 /* Still, we don't choose instructions in a vacuum. We need a way to choose the
590 * best bundle type (ALU, load/store, texture). Nondestructive. */
594 midgard_instruction
**instructions
,
595 BITSET_WORD
*worklist
, unsigned count
)
597 /* At the moment, our algorithm is very simple - use the bundle of the
598 * best instruction, regardless of what else could be scheduled
599 * alongside it. This is not optimal but it works okay for in-order */
601 struct midgard_predicate predicate
= {
603 .destructive
= false,
607 midgard_instruction
*chosen
= mir_choose_instruction(instructions
, worklist
, count
, &predicate
);
615 /* We want to choose an ALU instruction filling a given unit */
617 mir_choose_alu(midgard_instruction
**slot
,
618 midgard_instruction
**instructions
,
619 BITSET_WORD
*worklist
, unsigned len
,
620 struct midgard_predicate
*predicate
,
623 /* Did we already schedule to this slot? */
627 /* Try to schedule something, if not */
628 predicate
->unit
= unit
;
629 *slot
= mir_choose_instruction(instructions
, worklist
, len
, predicate
);
631 /* Store unit upon scheduling */
632 if (*slot
&& !((*slot
)->compact_branch
))
633 (*slot
)->unit
= unit
;
636 /* When we are scheduling a branch/csel, we need the consumed condition in the
637 * same block as a pipeline register. There are two options to enable this:
639 * - Move the conditional into the bundle. Preferred, but only works if the
640 * conditional is used only once and is from this block.
641 * - Copy the conditional.
643 * We search for the conditional. If it's in this block, single-use, and
644 * without embedded constants, we schedule it immediately. Otherwise, we
645 * schedule a move for it.
647 * mir_comparison_mobile is a helper to find the moveable condition.
651 mir_comparison_mobile(
652 compiler_context
*ctx
,
653 midgard_instruction
**instructions
,
654 struct midgard_predicate
*predicate
,
658 if (!mir_single_use(ctx
, cond
))
663 for (unsigned i
= 0; i
< count
; ++i
) {
664 if (instructions
[i
]->dest
!= cond
)
667 /* Must fit in an ALU bundle */
668 if (instructions
[i
]->type
!= TAG_ALU_4
)
671 /* We'll need to rewrite to .w but that doesn't work for vector
672 * ops that don't replicate (ball/bany), so bail there */
674 if (GET_CHANNEL_COUNT(alu_opcode_props
[instructions
[i
]->alu
.op
].props
))
677 /* Ensure it will fit with constants */
679 if (!mir_adjust_constants(instructions
[i
], predicate
, false))
682 /* Ensure it is written only once */
690 /* Inject constants now that we are sure we want to */
692 mir_adjust_constants(instructions
[ret
], predicate
, true);
697 /* Using the information about the moveable conditional itself, we either pop
698 * that condition off the worklist for use now, or create a move to
699 * artificially schedule instead as a fallback */
701 static midgard_instruction
*
702 mir_schedule_comparison(
703 compiler_context
*ctx
,
704 midgard_instruction
**instructions
,
705 struct midgard_predicate
*predicate
,
706 BITSET_WORD
*worklist
, unsigned count
,
707 unsigned cond
, bool vector
, unsigned *swizzle
,
708 midgard_instruction
*user
)
710 /* TODO: swizzle when scheduling */
712 (!vector
&& (swizzle
[0] == 0)) ?
713 mir_comparison_mobile(ctx
, instructions
, predicate
, count
, cond
) : ~0;
715 /* If we can, schedule the condition immediately */
716 if ((comp_i
!= ~0) && BITSET_TEST(worklist
, comp_i
)) {
717 assert(comp_i
< count
);
718 BITSET_CLEAR(worklist
, comp_i
);
719 return instructions
[comp_i
];
722 /* Otherwise, we insert a move */
724 midgard_instruction mov
= v_mov(cond
, cond
);
725 mov
.mask
= vector
? 0xF : 0x1;
726 memcpy(mov
.swizzle
[1], swizzle
, sizeof(mov
.swizzle
[1]));
728 return mir_insert_instruction_before(ctx
, user
, mov
);
731 /* Most generally, we need instructions writing to r31 in the appropriate
734 static midgard_instruction
*
735 mir_schedule_condition(compiler_context
*ctx
,
736 struct midgard_predicate
*predicate
,
737 BITSET_WORD
*worklist
, unsigned count
,
738 midgard_instruction
**instructions
,
739 midgard_instruction
*last
)
741 /* For a branch, the condition is the only argument; for csel, third */
742 bool branch
= last
->compact_branch
;
743 unsigned condition_index
= branch
? 0 : 2;
745 /* csel_v is vector; otherwise, conditions are scalar */
746 bool vector
= !branch
&& OP_IS_CSEL_V(last
->alu
.op
);
748 /* Grab the conditional instruction */
750 midgard_instruction
*cond
= mir_schedule_comparison(
751 ctx
, instructions
, predicate
, worklist
, count
, last
->src
[condition_index
],
752 vector
, last
->swizzle
[2], last
);
754 /* We have exclusive reign over this (possibly move) conditional
755 * instruction. We can rewrite into a pipeline conditional register */
757 predicate
->exclude
= cond
->dest
;
758 cond
->dest
= SSA_FIXED_REGISTER(31);
761 cond
->mask
= (1 << COMPONENT_W
);
763 mir_foreach_src(cond
, s
) {
764 if (cond
->src
[s
] == ~0)
767 for (unsigned q
= 0; q
< 4; ++q
)
768 cond
->swizzle
[s
][q
+ COMPONENT_W
] = cond
->swizzle
[s
][q
];
772 /* Schedule the unit: csel is always in the latter pipeline, so a csel
773 * condition must be in the former pipeline stage (vmul/sadd),
774 * depending on scalar/vector of the instruction itself. A branch must
775 * be written from the latter pipeline stage and a branch condition is
776 * always scalar, so it is always in smul (exception: ball/bany, which
780 cond
->unit
= UNIT_SMUL
;
782 cond
->unit
= vector
? UNIT_VMUL
: UNIT_SADD
;
787 /* Schedules a single bundle of the given type */
789 static midgard_bundle
790 mir_schedule_texture(
791 midgard_instruction
**instructions
,
792 BITSET_WORD
*worklist
, unsigned len
)
794 struct midgard_predicate predicate
= {
795 .tag
= TAG_TEXTURE_4
,
800 midgard_instruction
*ins
=
801 mir_choose_instruction(instructions
, worklist
, len
, &predicate
);
803 mir_update_worklist(worklist
, len
, instructions
, ins
);
805 struct midgard_bundle out
= {
806 .tag
= TAG_TEXTURE_4
,
807 .instruction_count
= 1,
808 .instructions
= { ins
}
814 static midgard_bundle
816 midgard_instruction
**instructions
,
817 BITSET_WORD
*worklist
, unsigned len
)
819 struct midgard_predicate predicate
= {
820 .tag
= TAG_LOAD_STORE_4
,
825 /* Try to pick two load/store ops. Second not gauranteed to exist */
827 midgard_instruction
*ins
=
828 mir_choose_instruction(instructions
, worklist
, len
, &predicate
);
830 midgard_instruction
*pair
=
831 mir_choose_instruction(instructions
, worklist
, len
, &predicate
);
833 struct midgard_bundle out
= {
834 .tag
= TAG_LOAD_STORE_4
,
835 .instruction_count
= pair
? 2 : 1,
836 .instructions
= { ins
, pair
}
839 /* We have to update the worklist atomically, since the two
840 * instructions run concurrently (TODO: verify it's not pipelined) */
842 mir_update_worklist(worklist
, len
, instructions
, ins
);
843 mir_update_worklist(worklist
, len
, instructions
, pair
);
848 static midgard_bundle
850 compiler_context
*ctx
,
851 midgard_instruction
**instructions
,
852 BITSET_WORD
*worklist
, unsigned len
)
854 struct midgard_bundle bundle
= {};
856 unsigned bytes_emitted
= sizeof(bundle
.control
);
858 struct midgard_predicate predicate
= {
862 .constants
= (uint8_t *) bundle
.constants
865 midgard_instruction
*vmul
= NULL
;
866 midgard_instruction
*vadd
= NULL
;
867 midgard_instruction
*vlut
= NULL
;
868 midgard_instruction
*smul
= NULL
;
869 midgard_instruction
*sadd
= NULL
;
870 midgard_instruction
*branch
= NULL
;
872 mir_choose_alu(&branch
, instructions
, worklist
, len
, &predicate
, ALU_ENAB_BR_COMPACT
);
873 mir_update_worklist(worklist
, len
, instructions
, branch
);
874 bool writeout
= branch
&& branch
->writeout
;
876 if (branch
&& !branch
->prepacked_branch
&& branch
->branch
.conditional
) {
877 midgard_instruction
*cond
= mir_schedule_condition(ctx
, &predicate
, worklist
, len
, instructions
, branch
);
879 if (cond
->unit
== UNIT_VADD
)
881 else if (cond
->unit
== UNIT_SMUL
)
884 unreachable("Bad condition");
887 mir_choose_alu(&smul
, instructions
, worklist
, len
, &predicate
, UNIT_SMUL
);
890 mir_choose_alu(&vlut
, instructions
, worklist
, len
, &predicate
, UNIT_VLUT
);
892 mir_choose_alu(&vadd
, instructions
, worklist
, len
, &predicate
, UNIT_VADD
);
894 mir_update_worklist(worklist
, len
, instructions
, vlut
);
895 mir_update_worklist(worklist
, len
, instructions
, vadd
);
896 mir_update_worklist(worklist
, len
, instructions
, smul
);
898 bool vadd_csel
= vadd
&& OP_IS_CSEL(vadd
->alu
.op
);
899 bool smul_csel
= smul
&& OP_IS_CSEL(smul
->alu
.op
);
901 if (vadd_csel
|| smul_csel
) {
902 midgard_instruction
*ins
= vadd_csel
? vadd
: smul
;
903 midgard_instruction
*cond
= mir_schedule_condition(ctx
, &predicate
, worklist
, len
, instructions
, ins
);
905 if (cond
->unit
== UNIT_VMUL
)
907 else if (cond
->unit
== UNIT_SADD
)
910 unreachable("Bad condition");
913 /* Stage 2, let's schedule sadd before vmul for writeout */
914 mir_choose_alu(&sadd
, instructions
, worklist
, len
, &predicate
, UNIT_SADD
);
916 /* Check if writeout reads its own register */
917 bool bad_writeout
= false;
919 if (branch
&& branch
->writeout
) {
920 midgard_instruction
*stages
[] = { sadd
, vadd
, smul
};
921 unsigned src
= (branch
->src
[0] == ~0) ? SSA_FIXED_REGISTER(0) : branch
->src
[0];
922 unsigned writeout_mask
= 0x0;
924 for (unsigned i
= 0; i
< ARRAY_SIZE(stages
); ++i
) {
928 if (stages
[i
]->dest
!= src
)
931 writeout_mask
|= stages
[i
]->mask
;
932 bad_writeout
|= mir_has_arg(stages
[i
], branch
->src
[0]);
935 /* It's possible we'll be able to schedule something into vmul
936 * to fill r0. Let's peak into the future, trying to schedule
937 * vmul specially that way. */
939 if (!bad_writeout
&& writeout_mask
!= 0xF) {
940 predicate
.unit
= UNIT_VMUL
;
941 predicate
.dest
= src
;
942 predicate
.mask
= writeout_mask
^ 0xF;
944 struct midgard_instruction
*peaked
=
945 mir_choose_instruction(instructions
, worklist
, len
, &predicate
);
949 vmul
->unit
= UNIT_VMUL
;
950 writeout_mask
|= predicate
.mask
;
951 assert(writeout_mask
== 0xF);
955 predicate
.dest
= predicate
.mask
= 0;
958 /* Finally, add a move if necessary */
959 if (bad_writeout
|| writeout_mask
!= 0xF) {
960 unsigned temp
= (branch
->src
[0] == ~0) ? SSA_FIXED_REGISTER(0) : make_compiler_temp(ctx
);
961 midgard_instruction mov
= v_mov(src
, temp
);
962 vmul
= mem_dup(&mov
, sizeof(midgard_instruction
));
963 vmul
->unit
= UNIT_VMUL
;
964 vmul
->mask
= 0xF ^ writeout_mask
;
965 /* TODO: Don't leak */
967 /* Rewrite to use our temp */
969 for (unsigned i
= 0; i
< ARRAY_SIZE(stages
); ++i
) {
971 mir_rewrite_index_dst_single(stages
[i
], src
, temp
);
974 mir_rewrite_index_src_single(branch
, src
, temp
);
978 mir_choose_alu(&vmul
, instructions
, worklist
, len
, &predicate
, UNIT_VMUL
);
980 mir_update_worklist(worklist
, len
, instructions
, vmul
);
981 mir_update_worklist(worklist
, len
, instructions
, sadd
);
983 bundle
.has_blend_constant
= predicate
.blend_constant
;
984 bundle
.has_embedded_constants
= predicate
.constant_count
> 0;
986 unsigned padding
= 0;
988 /* Now that we have finished scheduling, build up the bundle */
989 midgard_instruction
*stages
[] = { vmul
, sadd
, vadd
, smul
, vlut
, branch
};
991 for (unsigned i
= 0; i
< ARRAY_SIZE(stages
); ++i
) {
993 bundle
.control
|= stages
[i
]->unit
;
994 bytes_emitted
+= bytes_for_instruction(stages
[i
]);
995 bundle
.instructions
[bundle
.instruction_count
++] = stages
[i
];
999 /* Pad ALU op to nearest word */
1001 if (bytes_emitted
& 15) {
1002 padding
= 16 - (bytes_emitted
& 15);
1003 bytes_emitted
+= padding
;
1006 /* Constants must always be quadwords */
1007 if (bundle
.has_embedded_constants
)
1008 bytes_emitted
+= 16;
1010 /* Size ALU instruction for tag */
1011 bundle
.tag
= (TAG_ALU_4
) + (bytes_emitted
/ 16) - 1;
1012 bundle
.padding
= padding
;
1013 bundle
.control
|= bundle
.tag
;
1018 /* Schedule a single block by iterating its instruction to create bundles.
1019 * While we go, tally about the bundle sizes to compute the block size. */
1023 schedule_block(compiler_context
*ctx
, midgard_block
*block
)
1025 /* Copy list to dynamic array */
1027 midgard_instruction
**instructions
= flatten_mir(block
, &len
);
1032 /* Calculate dependencies and initial worklist */
1033 unsigned node_count
= ctx
->temp_count
+ 1;
1034 mir_create_dependency_graph(instructions
, len
, node_count
);
1036 /* Allocate the worklist */
1037 size_t sz
= BITSET_WORDS(len
) * sizeof(BITSET_WORD
);
1038 BITSET_WORD
*worklist
= calloc(sz
, 1);
1039 mir_initialize_worklist(worklist
, instructions
, len
);
1041 struct util_dynarray bundles
;
1042 util_dynarray_init(&bundles
, NULL
);
1044 block
->quadword_count
= 0;
1045 unsigned blend_offset
= 0;
1048 unsigned tag
= mir_choose_bundle(instructions
, worklist
, len
);
1049 midgard_bundle bundle
;
1051 if (tag
== TAG_TEXTURE_4
)
1052 bundle
= mir_schedule_texture(instructions
, worklist
, len
);
1053 else if (tag
== TAG_LOAD_STORE_4
)
1054 bundle
= mir_schedule_ldst(instructions
, worklist
, len
);
1055 else if (tag
== TAG_ALU_4
)
1056 bundle
= mir_schedule_alu(ctx
, instructions
, worklist
, len
);
1060 util_dynarray_append(&bundles
, midgard_bundle
, bundle
);
1062 if (bundle
.has_blend_constant
)
1063 blend_offset
= block
->quadword_count
;
1065 block
->quadword_count
+= quadword_size(bundle
.tag
);
1068 /* We emitted bundles backwards; copy into the block in reverse-order */
1070 util_dynarray_init(&block
->bundles
, NULL
);
1071 util_dynarray_foreach_reverse(&bundles
, midgard_bundle
, bundle
) {
1072 util_dynarray_append(&block
->bundles
, midgard_bundle
, *bundle
);
1075 /* Blend constant was backwards as well. blend_offset if set is
1076 * strictly positive, as an offset of zero would imply constants before
1077 * any instructions which is invalid in Midgard. TODO: blend constants
1078 * are broken if you spill since then quadword_count becomes invalid
1082 ctx
->blend_constant_offset
= ((ctx
->quadword_count
+ block
->quadword_count
) - blend_offset
- 1) * 0x10;
1084 block
->is_scheduled
= true;
1085 ctx
->quadword_count
+= block
->quadword_count
;
1087 /* Reorder instructions to match bundled. First remove existing
1088 * instructions and then recreate the list */
1090 mir_foreach_instr_in_block_safe(block
, ins
) {
1091 list_del(&ins
->link
);
1094 mir_foreach_instr_in_block_scheduled_rev(block
, ins
) {
1095 list_add(&ins
->link
, &block
->instructions
);
1098 free(instructions
); /* Allocated by flatten_mir() */
1102 /* When we're 'squeezing down' the values in the IR, we maintain a hash
1106 find_or_allocate_temp(compiler_context
*ctx
, unsigned hash
)
1108 if (hash
>= SSA_FIXED_MINIMUM
)
1111 unsigned temp
= (uintptr_t) _mesa_hash_table_u64_search(
1112 ctx
->hash_to_temp
, hash
+ 1);
1117 /* If no temp is find, allocate one */
1118 temp
= ctx
->temp_count
++;
1119 ctx
->max_hash
= MAX2(ctx
->max_hash
, hash
);
1121 _mesa_hash_table_u64_insert(ctx
->hash_to_temp
,
1122 hash
+ 1, (void *) ((uintptr_t) temp
+ 1));
1127 /* Reassigns numbering to get rid of gaps in the indices and to prioritize
1128 * smaller register classes */
1131 mir_squeeze_index(compiler_context
*ctx
)
1134 ctx
->temp_count
= 0;
1135 /* TODO don't leak old hash_to_temp */
1136 ctx
->hash_to_temp
= _mesa_hash_table_u64_create(NULL
);
1138 /* We need to prioritize texture registers on older GPUs so we don't
1139 * fail RA trying to assign to work registers r0/r1 when a work
1140 * register is already there */
1142 mir_foreach_instr_global(ctx
, ins
) {
1143 if (ins
->type
== TAG_TEXTURE_4
)
1144 ins
->dest
= find_or_allocate_temp(ctx
, ins
->dest
);
1147 mir_foreach_instr_global(ctx
, ins
) {
1148 if (ins
->type
!= TAG_TEXTURE_4
)
1149 ins
->dest
= find_or_allocate_temp(ctx
, ins
->dest
);
1151 for (unsigned i
= 0; i
< ARRAY_SIZE(ins
->src
); ++i
)
1152 ins
->src
[i
] = find_or_allocate_temp(ctx
, ins
->src
[i
]);
1156 static midgard_instruction
1157 v_load_store_scratch(
1163 /* We index by 32-bit vec4s */
1164 unsigned byte
= (index
* 4 * 4);
1166 midgard_instruction ins
= {
1167 .type
= TAG_LOAD_STORE_4
,
1170 .src
= { ~0, ~0, ~0 },
1171 .swizzle
= SWIZZLE_IDENTITY_4
,
1173 .op
= is_store
? midgard_op_st_int4
: midgard_op_ld_int4
,
1175 /* For register spilling - to thread local storage */
1180 /* If we spill an unspill, RA goes into an infinite loop */
1184 ins
.constants
[0] = byte
;
1187 /* r0 = r26, r1 = r27 */
1188 assert(srcdest
== SSA_FIXED_REGISTER(26) || srcdest
== SSA_FIXED_REGISTER(27));
1189 ins
.src
[0] = srcdest
;
1197 /* If register allocation fails, find the best spill node and spill it to fix
1198 * whatever the issue was. This spill node could be a work register (spilling
1199 * to thread local storage), but it could also simply be a special register
1200 * that needs to spill to become a work register. */
1202 static void mir_spill_register(
1203 compiler_context
*ctx
,
1204 struct lcra_state
*l
,
1205 unsigned *spill_count
)
1207 unsigned spill_index
= ctx
->temp_count
;
1209 /* Our first step is to calculate spill cost to figure out the best
1210 * spill node. All nodes are equal in spill cost, but we can't spill
1211 * nodes written to from an unspill */
1213 unsigned *cost
= calloc(ctx
->temp_count
, sizeof(cost
[0]));
1215 mir_foreach_instr_global(ctx
, ins
) {
1216 if (ins
->dest
< ctx
->temp_count
)
1219 mir_foreach_src(ins
, s
) {
1220 if (ins
->src
[s
] < ctx
->temp_count
)
1221 cost
[ins
->src
[s
]]++;
1225 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
)
1226 lcra_set_node_spill_cost(l
, i
, cost
[i
]);
1228 /* We can't spill any bundles that contain unspills. This could be
1229 * optimized to allow use of r27 to spill twice per bundle, but if
1230 * you're at the point of optimizing spilling, it's too late.
1232 * We also can't double-spill. */
1234 mir_foreach_block(ctx
, block
) {
1235 mir_foreach_bundle_in_block(block
, bun
) {
1236 bool no_spill
= false;
1238 for (unsigned i
= 0; i
< bun
->instruction_count
; ++i
) {
1239 no_spill
|= bun
->instructions
[i
]->no_spill
;
1241 if (bun
->instructions
[i
]->no_spill
) {
1242 mir_foreach_src(bun
->instructions
[i
], s
) {
1243 unsigned src
= bun
->instructions
[i
]->src
[s
];
1245 if (src
< ctx
->temp_count
)
1246 lcra_set_node_spill_cost(l
, src
, -1);
1254 for (unsigned i
= 0; i
< bun
->instruction_count
; ++i
) {
1255 unsigned dest
= bun
->instructions
[i
]->dest
;
1256 if (dest
< ctx
->temp_count
)
1257 lcra_set_node_spill_cost(l
, dest
, -1);
1262 int spill_node
= lcra_get_best_spill_node(l
);
1264 if (spill_node
< 0) {
1265 mir_print_shader(ctx
);
1269 /* We have a spill node, so check the class. Work registers
1270 * legitimately spill to TLS, but special registers just spill to work
1273 bool is_special
= l
->class[spill_node
] != REG_CLASS_WORK
;
1274 bool is_special_w
= l
->class[spill_node
] == REG_CLASS_TEXW
;
1276 /* Allocate TLS slot (maybe) */
1277 unsigned spill_slot
= !is_special
? (*spill_count
)++ : 0;
1279 /* For TLS, replace all stores to the spilled node. For
1280 * special reads, just keep as-is; the class will be demoted
1281 * implicitly. For special writes, spill to a work register */
1283 if (!is_special
|| is_special_w
) {
1285 spill_slot
= spill_index
++;
1287 mir_foreach_block(ctx
, block
) {
1288 mir_foreach_instr_in_block_safe(block
, ins
) {
1289 if (ins
->dest
!= spill_node
) continue;
1291 midgard_instruction st
;
1294 st
= v_mov(spill_node
, spill_slot
);
1297 ins
->dest
= SSA_FIXED_REGISTER(26);
1298 ins
->no_spill
= true;
1299 st
= v_load_store_scratch(ins
->dest
, spill_slot
, true, ins
->mask
);
1302 /* Hint: don't rewrite this node */
1305 mir_insert_instruction_after_scheduled(ctx
, block
, ins
, st
);
1313 /* For special reads, figure out how many bytes we need */
1314 unsigned read_bytemask
= 0;
1316 mir_foreach_instr_global_safe(ctx
, ins
) {
1317 read_bytemask
|= mir_bytemask_of_read_components(ins
, spill_node
);
1320 /* Insert a load from TLS before the first consecutive
1321 * use of the node, rewriting to use spilled indices to
1322 * break up the live range. Or, for special, insert a
1323 * move. Ironically the latter *increases* register
1324 * pressure, but the two uses of the spilling mechanism
1325 * are somewhat orthogonal. (special spilling is to use
1326 * work registers to back special registers; TLS
1327 * spilling is to use memory to back work registers) */
1329 mir_foreach_block(ctx
, block
) {
1330 bool consecutive_skip
= false;
1331 unsigned consecutive_index
= 0;
1333 mir_foreach_instr_in_block(block
, ins
) {
1334 /* We can't rewrite the moves used to spill in the
1335 * first place. These moves are hinted. */
1336 if (ins
->hint
) continue;
1338 if (!mir_has_arg(ins
, spill_node
)) {
1339 consecutive_skip
= false;
1343 if (consecutive_skip
) {
1345 mir_rewrite_index_src_single(ins
, spill_node
, consecutive_index
);
1349 if (!is_special_w
) {
1350 consecutive_index
= ++spill_index
;
1352 midgard_instruction
*before
= ins
;
1354 /* TODO: Remove me I'm a fossil */
1355 if (ins
->type
== TAG_ALU_4
&& OP_IS_CSEL(ins
->alu
.op
))
1356 before
= mir_prev_op(before
);
1358 midgard_instruction st
;
1362 st
= v_mov(spill_node
, consecutive_index
);
1366 st
= v_load_store_scratch(consecutive_index
, spill_slot
, false, 0xF);
1369 /* Mask the load based on the component count
1370 * actually needed to prevent RA loops */
1372 st
.mask
= mir_from_bytemask(read_bytemask
, midgard_reg_mode_32
);
1374 mir_insert_instruction_before_scheduled(ctx
, block
, before
, st
);
1375 // consecutive_skip = true;
1377 /* Special writes already have their move spilled in */
1378 consecutive_index
= spill_slot
;
1382 /* Rewrite to use */
1383 mir_rewrite_index_src_single(ins
, spill_node
, consecutive_index
);
1392 mir_foreach_instr_global(ctx
, ins
) {
1398 schedule_program(compiler_context
*ctx
)
1400 struct lcra_state
*l
= NULL
;
1401 bool spilled
= false;
1402 int iter_count
= 1000; /* max iterations */
1404 /* Number of 128-bit slots in memory we've spilled into */
1405 unsigned spill_count
= 0;
1407 midgard_promote_uniforms(ctx
, 16);
1409 /* Must be lowered right before RA */
1410 mir_squeeze_index(ctx
);
1411 mir_lower_special_reads(ctx
);
1412 mir_squeeze_index(ctx
);
1414 /* Lowering can introduce some dead moves */
1416 mir_foreach_block(ctx
, block
) {
1417 midgard_opt_dead_move_eliminate(ctx
, block
);
1418 schedule_block(ctx
, block
);
1421 mir_create_pipeline_registers(ctx
);
1425 mir_spill_register(ctx
, l
, &spill_count
);
1427 mir_squeeze_index(ctx
);
1428 mir_invalidate_liveness(ctx
);
1431 l
= allocate_registers(ctx
, &spilled
);
1432 } while(spilled
&& ((iter_count
--) > 0));
1434 if (iter_count
<= 0) {
1435 fprintf(stderr
, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
1439 /* Report spilling information. spill_count is in 128-bit slots (vec4 x
1440 * fp32), but tls_size is in bytes, so multiply by 16 */
1442 ctx
->tls_size
= spill_count
* 16;
1444 install_registers(ctx
, l
);