2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
26 #include "midgard_quirks.h"
27 #include "util/u_memory.h"
29 /* Scheduling for Midgard is complicated, to say the least. ALU instructions
30 * must be grouped into VLIW bundles according to following model:
33 * [VADD] [SMUL] [VLUT]
35 * A given instruction can execute on some subset of the units (or a few can
36 * execute on all). Instructions can be either vector or scalar; only scalar
37 * instructions can execute on SADD/SMUL units. Units on a given line execute
38 * in parallel. Subsequent lines execute separately and can pass results
39 * directly via pipeline registers r24/r25, bypassing the register file.
41 * A bundle can optionally have 128-bits of embedded constants, shared across
42 * all of the instructions within a bundle.
44 * Instructions consuming conditionals (branches and conditional selects)
45 * require their condition to be written into the conditional register (r31)
46 * within the same bundle they are consumed.
48 * Fragment writeout requires its argument to be written in full within the
49 * same bundle as the branch, with no hanging dependencies.
51 * Load/store instructions are also in bundles of simply two instructions, and
52 * texture instructions have no bundling.
54 * -------------------------------------------------------------------------
58 /* We create the dependency graph with per-byte granularity */
63 add_dependency(struct util_dynarray
*table
, unsigned index
, uint16_t mask
, midgard_instruction
**instructions
, unsigned child
)
65 for (unsigned i
= 0; i
< BYTE_COUNT
; ++i
) {
66 if (!(mask
& (1 << i
)))
69 struct util_dynarray
*parents
= &table
[(BYTE_COUNT
* index
) + i
];
71 util_dynarray_foreach(parents
, unsigned, parent
) {
72 BITSET_WORD
*dependents
= instructions
[*parent
]->dependents
;
74 /* Already have the dependency */
75 if (BITSET_TEST(dependents
, child
))
78 BITSET_SET(dependents
, child
);
79 instructions
[child
]->nr_dependencies
++;
85 mark_access(struct util_dynarray
*table
, unsigned index
, uint16_t mask
, unsigned parent
)
87 for (unsigned i
= 0; i
< BYTE_COUNT
; ++i
) {
88 if (!(mask
& (1 << i
)))
91 util_dynarray_append(&table
[(BYTE_COUNT
* index
) + i
], unsigned, parent
);
96 mir_create_dependency_graph(midgard_instruction
**instructions
, unsigned count
, unsigned node_count
)
98 size_t sz
= node_count
* BYTE_COUNT
;
100 struct util_dynarray
*last_read
= calloc(sizeof(struct util_dynarray
), sz
);
101 struct util_dynarray
*last_write
= calloc(sizeof(struct util_dynarray
), sz
);
103 for (unsigned i
= 0; i
< sz
; ++i
) {
104 util_dynarray_init(&last_read
[i
], NULL
);
105 util_dynarray_init(&last_write
[i
], NULL
);
108 /* Initialize dependency graph */
109 for (unsigned i
= 0; i
< count
; ++i
) {
110 instructions
[i
]->dependents
=
111 calloc(BITSET_WORDS(count
), sizeof(BITSET_WORD
));
113 instructions
[i
]->nr_dependencies
= 0;
116 /* Populate dependency graph */
117 for (signed i
= count
- 1; i
>= 0; --i
) {
118 if (instructions
[i
]->compact_branch
)
121 unsigned dest
= instructions
[i
]->dest
;
122 unsigned mask
= mir_bytemask(instructions
[i
]);
124 mir_foreach_src((*instructions
), s
) {
125 unsigned src
= instructions
[i
]->src
[s
];
127 if (src
< node_count
) {
128 unsigned readmask
= mir_bytemask_of_read_components(instructions
[i
], src
);
129 add_dependency(last_write
, src
, readmask
, instructions
, i
);
133 if (dest
< node_count
) {
134 add_dependency(last_read
, dest
, mask
, instructions
, i
);
135 add_dependency(last_write
, dest
, mask
, instructions
, i
);
136 mark_access(last_write
, dest
, mask
, i
);
139 mir_foreach_src((*instructions
), s
) {
140 unsigned src
= instructions
[i
]->src
[s
];
142 if (src
< node_count
) {
143 unsigned readmask
= mir_bytemask_of_read_components(instructions
[i
], src
);
144 mark_access(last_read
, src
, readmask
, i
);
149 /* If there is a branch, all instructions depend on it, as interblock
150 * execution must be purely in-order */
152 if (instructions
[count
- 1]->compact_branch
) {
153 BITSET_WORD
*dependents
= instructions
[count
- 1]->dependents
;
155 for (signed i
= count
- 2; i
>= 0; --i
) {
156 if (BITSET_TEST(dependents
, i
))
159 BITSET_SET(dependents
, i
);
160 instructions
[i
]->nr_dependencies
++;
164 /* Free the intermediate structures */
165 for (unsigned i
= 0; i
< sz
; ++i
) {
166 util_dynarray_fini(&last_read
[i
]);
167 util_dynarray_fini(&last_write
[i
]);
174 /* Does the mask cover more than a scalar? */
177 is_single_component_mask(unsigned mask
)
181 for (int c
= 0; c
< 8; ++c
) {
186 return components
== 1;
189 /* Helpers for scheudling */
192 mir_is_scalar(midgard_instruction
*ains
)
194 /* Do we try to use it as a vector op? */
195 if (!is_single_component_mask(ains
->mask
))
198 /* Otherwise, check mode hazards */
199 bool could_scalar
= true;
201 /* Only 16/32-bit can run on a scalar unit */
202 could_scalar
&= ains
->alu
.reg_mode
!= midgard_reg_mode_8
;
203 could_scalar
&= ains
->alu
.reg_mode
!= midgard_reg_mode_64
;
204 could_scalar
&= ains
->alu
.dest_override
== midgard_dest_override_none
;
206 if (ains
->alu
.reg_mode
== midgard_reg_mode_16
) {
207 /* If we're running in 16-bit mode, we
208 * can't have any 8-bit sources on the
209 * scalar unit (since the scalar unit
210 * doesn't understand 8-bit) */
212 midgard_vector_alu_src s1
=
213 vector_alu_from_unsigned(ains
->alu
.src1
);
215 could_scalar
&= !s1
.half
;
217 midgard_vector_alu_src s2
=
218 vector_alu_from_unsigned(ains
->alu
.src2
);
220 could_scalar
&= !s2
.half
;
226 /* How many bytes does this ALU instruction add to the bundle? */
229 bytes_for_instruction(midgard_instruction
*ains
)
231 if (ains
->unit
& UNITS_ANY_VECTOR
)
232 return sizeof(midgard_reg_info
) + sizeof(midgard_vector_alu
);
233 else if (ains
->unit
== ALU_ENAB_BRANCH
)
234 return sizeof(midgard_branch_extended
);
235 else if (ains
->compact_branch
)
236 return sizeof(ains
->br_compact
);
238 return sizeof(midgard_reg_info
) + sizeof(midgard_scalar_alu
);
241 /* We would like to flatten the linked list of midgard_instructions in a bundle
242 * to an array of pointers on the heap for easy indexing */
244 static midgard_instruction
**
245 flatten_mir(midgard_block
*block
, unsigned *len
)
247 *len
= list_length(&block
->instructions
);
252 midgard_instruction
**instructions
=
253 calloc(sizeof(midgard_instruction
*), *len
);
257 mir_foreach_instr_in_block(block
, ins
)
258 instructions
[i
++] = ins
;
263 /* The worklist is the set of instructions that can be scheduled now; that is,
264 * the set of instructions with no remaining dependencies */
267 mir_initialize_worklist(BITSET_WORD
*worklist
, midgard_instruction
**instructions
, unsigned count
)
269 for (unsigned i
= 0; i
< count
; ++i
) {
270 if (instructions
[i
]->nr_dependencies
== 0)
271 BITSET_SET(worklist
, i
);
275 /* Update the worklist after an instruction terminates. Remove its edges from
276 * the graph and if that causes any node to have no dependencies, add it to the
281 BITSET_WORD
*worklist
, unsigned count
,
282 midgard_instruction
**instructions
, midgard_instruction
*done
)
284 /* Sanity check: if no instruction terminated, there is nothing to do.
285 * If the instruction that terminated had dependencies, that makes no
286 * sense and means we messed up the worklist. Finally, as the purpose
287 * of this routine is to update dependents, we abort early if there are
288 * no dependents defined. */
293 assert(done
->nr_dependencies
== 0);
295 if (!done
->dependents
)
298 /* We have an instruction with dependents. Iterate each dependent to
299 * remove one dependency (`done`), adding dependents to the worklist
304 BITSET_FOREACH_SET(i
, tmp
, done
->dependents
, count
) {
305 assert(instructions
[i
]->nr_dependencies
);
307 if (!(--instructions
[i
]->nr_dependencies
))
308 BITSET_SET(worklist
, i
);
311 free(done
->dependents
);
314 /* While scheduling, we need to choose instructions satisfying certain
315 * criteria. As we schedule backwards, we choose the *last* instruction in the
316 * worklist to simulate in-order scheduling. Chosen instructions must satisfy a
317 * given predicate. */
319 struct midgard_predicate
{
320 /* TAG or ~0 for dont-care */
323 /* True if we want to pop off the chosen instruction */
326 /* For ALU, choose only this unit */
329 /* State for bundle constants. constants is the actual constants
330 * for the bundle. constant_count is the number of bytes (up to
331 * 16) currently in use for constants. When picking in destructive
332 * mode, the constants array will be updated, and the instruction
333 * will be adjusted to index into the constants array */
335 midgard_constants
*constants
;
336 unsigned constant_count
;
339 /* Exclude this destination (if not ~0) */
342 /* Don't schedule instructions consuming conditionals (since we already
343 * scheduled one). Excludes conditional branches and csel */
346 /* Require a minimal mask and (if nonzero) given destination. Used for
347 * writeout optimizations */
353 /* For an instruction that can fit, adjust it to fit and update the constants
354 * array, in destructive mode. Returns whether the fitting was successful. */
357 mir_adjust_constants(midgard_instruction
*ins
,
358 struct midgard_predicate
*pred
,
361 /* Blend constants dominate */
362 if (ins
->has_blend_constant
) {
363 if (pred
->constant_count
)
365 else if (destructive
) {
366 pred
->blend_constant
= true;
367 pred
->constant_count
= 16;
372 /* No constant, nothing to adjust */
373 if (!ins
->has_constants
)
376 if (ins
->alu
.reg_mode
!= midgard_reg_mode_32
) {
377 /* TODO: 16-bit constant combining */
378 if (pred
->constant_count
)
381 uint16_t *bundles
= pred
->constants
->u16
;
382 const uint16_t *constants
= ins
->constants
.u16
;
384 /* Copy them wholesale */
385 for (unsigned i
= 0; i
< 4; ++i
)
386 bundles
[i
] = constants
[i
];
388 pred
->constant_count
= 16;
390 /* Pack 32-bit constants */
391 uint32_t *bundles
= pred
->constants
->u32
;
392 const uint32_t *constants
= ins
->constants
.u32
;
393 unsigned r_constant
= SSA_FIXED_REGISTER(REGISTER_CONSTANT
);
394 unsigned mask
= mir_from_bytemask(mir_bytemask_of_read_components(ins
, r_constant
), midgard_reg_mode_32
);
396 /* First, check if it fits */
397 unsigned count
= DIV_ROUND_UP(pred
->constant_count
, sizeof(uint32_t));
398 unsigned existing_count
= count
;
400 for (unsigned i
= 0; i
< 4; ++i
) {
401 if (!(mask
& (1 << i
)))
406 /* Look for existing constant */
407 for (unsigned j
= 0; j
< existing_count
; ++j
) {
408 if (bundles
[j
] == constants
[i
]) {
417 /* If the constant is new, check ourselves */
418 for (unsigned j
= 0; j
< i
; ++j
) {
419 if (constants
[j
] == constants
[i
] && (mask
& (1 << j
))) {
428 /* Otherwise, this is a new constant */
432 /* Check if we have space */
436 /* If non-destructive, we're done */
440 /* If destructive, let's copy in the new constants and adjust
441 * swizzles to pack it in. */
443 unsigned indices
[16] = { 0 };
446 count
= existing_count
;
448 for (unsigned i
= 0; i
< 4; ++i
) {
449 if (!(mask
& (1 << i
)))
452 uint32_t cons
= constants
[i
];
453 bool constant_found
= false;
455 /* Search for the constant */
456 for (unsigned j
= 0; j
< count
; ++j
) {
457 if (bundles
[j
] != cons
)
460 /* We found it, reuse */
462 constant_found
= true;
469 /* We didn't find it, so allocate it */
470 unsigned idx
= count
++;
472 /* We have space, copy it in! */
477 pred
->constant_count
= count
* sizeof(uint32_t);
479 /* Use indices as a swizzle */
481 mir_foreach_src(ins
, s
) {
482 if (ins
->src
[s
] == r_constant
)
483 mir_compose_swizzle(ins
->swizzle
[s
], indices
, ins
->swizzle
[s
]);
490 static midgard_instruction
*
491 mir_choose_instruction(
492 midgard_instruction
**instructions
,
493 BITSET_WORD
*worklist
, unsigned count
,
494 struct midgard_predicate
*predicate
)
496 /* Parse the predicate */
497 unsigned tag
= predicate
->tag
;
498 bool alu
= tag
== TAG_ALU_4
;
499 unsigned unit
= predicate
->unit
;
500 bool branch
= alu
&& (unit
== ALU_ENAB_BR_COMPACT
);
501 bool scalar
= (unit
!= ~0) && (unit
& UNITS_SCALAR
);
502 bool no_cond
= predicate
->no_cond
;
504 unsigned mask
= predicate
->mask
;
505 unsigned dest
= predicate
->dest
;
506 bool needs_dest
= mask
& 0xF;
508 /* Iterate to find the best instruction satisfying the predicate */
512 signed best_index
= -1;
513 bool best_conditional
= false;
515 /* Enforce a simple metric limiting distance to keep down register
516 * pressure. TOOD: replace with liveness tracking for much better
519 unsigned max_active
= 0;
520 unsigned max_distance
= 6;
522 BITSET_FOREACH_SET(i
, tmp
, worklist
, count
) {
523 max_active
= MAX2(max_active
, i
);
526 BITSET_FOREACH_SET(i
, tmp
, worklist
, count
) {
527 if ((max_active
- i
) >= max_distance
)
530 if (tag
!= ~0 && instructions
[i
]->type
!= tag
)
533 if (predicate
->exclude
!= ~0 && instructions
[i
]->dest
== predicate
->exclude
)
536 if (alu
&& !branch
&& !(alu_opcode_props
[instructions
[i
]->alu
.op
].props
& unit
))
539 if (branch
&& !instructions
[i
]->compact_branch
)
542 if (alu
&& scalar
&& !mir_is_scalar(instructions
[i
]))
545 if (alu
&& !mir_adjust_constants(instructions
[i
], predicate
, false))
548 if (needs_dest
&& instructions
[i
]->dest
!= dest
)
551 if (mask
&& ((~instructions
[i
]->mask
) & mask
))
554 bool conditional
= alu
&& !branch
&& OP_IS_CSEL(instructions
[i
]->alu
.op
);
555 conditional
|= (branch
&& instructions
[i
]->branch
.conditional
);
557 if (conditional
&& no_cond
)
560 /* Simulate in-order scheduling */
561 if ((signed) i
< best_index
)
565 best_conditional
= conditional
;
569 /* Did we find anything? */
574 /* If we found something, remove it from the worklist */
575 assert(best_index
< count
);
577 if (predicate
->destructive
) {
578 BITSET_CLEAR(worklist
, best_index
);
581 mir_adjust_constants(instructions
[best_index
], predicate
, true);
583 /* Once we schedule a conditional, we can't again */
584 predicate
->no_cond
|= best_conditional
;
587 return instructions
[best_index
];
590 /* Still, we don't choose instructions in a vacuum. We need a way to choose the
591 * best bundle type (ALU, load/store, texture). Nondestructive. */
595 midgard_instruction
**instructions
,
596 BITSET_WORD
*worklist
, unsigned count
)
598 /* At the moment, our algorithm is very simple - use the bundle of the
599 * best instruction, regardless of what else could be scheduled
600 * alongside it. This is not optimal but it works okay for in-order */
602 struct midgard_predicate predicate
= {
604 .destructive
= false,
608 midgard_instruction
*chosen
= mir_choose_instruction(instructions
, worklist
, count
, &predicate
);
616 /* We want to choose an ALU instruction filling a given unit */
618 mir_choose_alu(midgard_instruction
**slot
,
619 midgard_instruction
**instructions
,
620 BITSET_WORD
*worklist
, unsigned len
,
621 struct midgard_predicate
*predicate
,
624 /* Did we already schedule to this slot? */
628 /* Try to schedule something, if not */
629 predicate
->unit
= unit
;
630 *slot
= mir_choose_instruction(instructions
, worklist
, len
, predicate
);
632 /* Store unit upon scheduling */
633 if (*slot
&& !((*slot
)->compact_branch
))
634 (*slot
)->unit
= unit
;
637 /* When we are scheduling a branch/csel, we need the consumed condition in the
638 * same block as a pipeline register. There are two options to enable this:
640 * - Move the conditional into the bundle. Preferred, but only works if the
641 * conditional is used only once and is from this block.
642 * - Copy the conditional.
644 * We search for the conditional. If it's in this block, single-use, and
645 * without embedded constants, we schedule it immediately. Otherwise, we
646 * schedule a move for it.
648 * mir_comparison_mobile is a helper to find the moveable condition.
652 mir_comparison_mobile(
653 compiler_context
*ctx
,
654 midgard_instruction
**instructions
,
655 struct midgard_predicate
*predicate
,
659 if (!mir_single_use(ctx
, cond
))
664 for (unsigned i
= 0; i
< count
; ++i
) {
665 if (instructions
[i
]->dest
!= cond
)
668 /* Must fit in an ALU bundle */
669 if (instructions
[i
]->type
!= TAG_ALU_4
)
672 /* If it would itself require a condition, that's recursive */
673 if (OP_IS_CSEL(instructions
[i
]->alu
.op
))
676 /* We'll need to rewrite to .w but that doesn't work for vector
677 * ops that don't replicate (ball/bany), so bail there */
679 if (GET_CHANNEL_COUNT(alu_opcode_props
[instructions
[i
]->alu
.op
].props
))
682 /* Ensure it will fit with constants */
684 if (!mir_adjust_constants(instructions
[i
], predicate
, false))
687 /* Ensure it is written only once */
695 /* Inject constants now that we are sure we want to */
697 mir_adjust_constants(instructions
[ret
], predicate
, true);
702 /* Using the information about the moveable conditional itself, we either pop
703 * that condition off the worklist for use now, or create a move to
704 * artificially schedule instead as a fallback */
706 static midgard_instruction
*
707 mir_schedule_comparison(
708 compiler_context
*ctx
,
709 midgard_instruction
**instructions
,
710 struct midgard_predicate
*predicate
,
711 BITSET_WORD
*worklist
, unsigned count
,
712 unsigned cond
, bool vector
, unsigned *swizzle
,
713 midgard_instruction
*user
)
715 /* TODO: swizzle when scheduling */
717 (!vector
&& (swizzle
[0] == 0)) ?
718 mir_comparison_mobile(ctx
, instructions
, predicate
, count
, cond
) : ~0;
720 /* If we can, schedule the condition immediately */
721 if ((comp_i
!= ~0) && BITSET_TEST(worklist
, comp_i
)) {
722 assert(comp_i
< count
);
723 BITSET_CLEAR(worklist
, comp_i
);
724 return instructions
[comp_i
];
727 /* Otherwise, we insert a move */
729 midgard_instruction mov
= v_mov(cond
, cond
);
730 mov
.mask
= vector
? 0xF : 0x1;
731 memcpy(mov
.swizzle
[1], swizzle
, sizeof(mov
.swizzle
[1]));
733 return mir_insert_instruction_before(ctx
, user
, mov
);
736 /* Most generally, we need instructions writing to r31 in the appropriate
739 static midgard_instruction
*
740 mir_schedule_condition(compiler_context
*ctx
,
741 struct midgard_predicate
*predicate
,
742 BITSET_WORD
*worklist
, unsigned count
,
743 midgard_instruction
**instructions
,
744 midgard_instruction
*last
)
746 /* For a branch, the condition is the only argument; for csel, third */
747 bool branch
= last
->compact_branch
;
748 unsigned condition_index
= branch
? 0 : 2;
750 /* csel_v is vector; otherwise, conditions are scalar */
751 bool vector
= !branch
&& OP_IS_CSEL_V(last
->alu
.op
);
753 /* Grab the conditional instruction */
755 midgard_instruction
*cond
= mir_schedule_comparison(
756 ctx
, instructions
, predicate
, worklist
, count
, last
->src
[condition_index
],
757 vector
, last
->swizzle
[2], last
);
759 /* We have exclusive reign over this (possibly move) conditional
760 * instruction. We can rewrite into a pipeline conditional register */
762 predicate
->exclude
= cond
->dest
;
763 cond
->dest
= SSA_FIXED_REGISTER(31);
766 cond
->mask
= (1 << COMPONENT_W
);
768 mir_foreach_src(cond
, s
) {
769 if (cond
->src
[s
] == ~0)
772 for (unsigned q
= 0; q
< 4; ++q
)
773 cond
->swizzle
[s
][q
+ COMPONENT_W
] = cond
->swizzle
[s
][q
];
777 /* Schedule the unit: csel is always in the latter pipeline, so a csel
778 * condition must be in the former pipeline stage (vmul/sadd),
779 * depending on scalar/vector of the instruction itself. A branch must
780 * be written from the latter pipeline stage and a branch condition is
781 * always scalar, so it is always in smul (exception: ball/bany, which
785 cond
->unit
= UNIT_SMUL
;
787 cond
->unit
= vector
? UNIT_VMUL
: UNIT_SADD
;
792 /* Schedules a single bundle of the given type */
794 static midgard_bundle
795 mir_schedule_texture(
796 midgard_instruction
**instructions
,
797 BITSET_WORD
*worklist
, unsigned len
)
799 struct midgard_predicate predicate
= {
800 .tag
= TAG_TEXTURE_4
,
805 midgard_instruction
*ins
=
806 mir_choose_instruction(instructions
, worklist
, len
, &predicate
);
808 mir_update_worklist(worklist
, len
, instructions
, ins
);
810 struct midgard_bundle out
= {
811 .tag
= TAG_TEXTURE_4
,
812 .instruction_count
= 1,
813 .instructions
= { ins
}
819 static midgard_bundle
821 midgard_instruction
**instructions
,
822 BITSET_WORD
*worklist
, unsigned len
)
824 struct midgard_predicate predicate
= {
825 .tag
= TAG_LOAD_STORE_4
,
830 /* Try to pick two load/store ops. Second not gauranteed to exist */
832 midgard_instruction
*ins
=
833 mir_choose_instruction(instructions
, worklist
, len
, &predicate
);
835 midgard_instruction
*pair
=
836 mir_choose_instruction(instructions
, worklist
, len
, &predicate
);
838 struct midgard_bundle out
= {
839 .tag
= TAG_LOAD_STORE_4
,
840 .instruction_count
= pair
? 2 : 1,
841 .instructions
= { ins
, pair
}
844 /* We have to update the worklist atomically, since the two
845 * instructions run concurrently (TODO: verify it's not pipelined) */
847 mir_update_worklist(worklist
, len
, instructions
, ins
);
848 mir_update_worklist(worklist
, len
, instructions
, pair
);
853 static midgard_bundle
855 compiler_context
*ctx
,
856 midgard_instruction
**instructions
,
857 BITSET_WORD
*worklist
, unsigned len
)
859 struct midgard_bundle bundle
= {};
861 unsigned bytes_emitted
= sizeof(bundle
.control
);
863 struct midgard_predicate predicate
= {
867 .constants
= &bundle
.constants
870 midgard_instruction
*vmul
= NULL
;
871 midgard_instruction
*vadd
= NULL
;
872 midgard_instruction
*vlut
= NULL
;
873 midgard_instruction
*smul
= NULL
;
874 midgard_instruction
*sadd
= NULL
;
875 midgard_instruction
*branch
= NULL
;
877 mir_choose_alu(&branch
, instructions
, worklist
, len
, &predicate
, ALU_ENAB_BR_COMPACT
);
878 mir_update_worklist(worklist
, len
, instructions
, branch
);
879 bool writeout
= branch
&& branch
->writeout
;
881 if (branch
&& branch
->branch
.conditional
) {
882 midgard_instruction
*cond
= mir_schedule_condition(ctx
, &predicate
, worklist
, len
, instructions
, branch
);
884 if (cond
->unit
== UNIT_VADD
)
886 else if (cond
->unit
== UNIT_SMUL
)
889 unreachable("Bad condition");
892 mir_choose_alu(&smul
, instructions
, worklist
, len
, &predicate
, UNIT_SMUL
);
895 mir_choose_alu(&vlut
, instructions
, worklist
, len
, &predicate
, UNIT_VLUT
);
899 bundle
.last_writeout
= branch
->last_writeout
;
901 midgard_instruction add
= v_mov(~0, make_compiler_temp(ctx
));
903 if (!ctx
->is_blend
) {
904 add
.alu
.op
= midgard_alu_op_iadd
;
905 add
.src
[0] = SSA_FIXED_REGISTER(31);
907 for (unsigned c
= 0; c
< 16; ++c
)
908 add
.swizzle
[0][c
] = COMPONENT_X
;
910 add
.has_inline_constant
= true;
911 add
.inline_constant
= 0;
913 add
.src
[1] = SSA_FIXED_REGISTER(1);
915 for (unsigned c
= 0; c
< 16; ++c
)
916 add
.swizzle
[1][c
] = COMPONENT_W
;
919 vadd
= mem_dup(&add
, sizeof(midgard_instruction
));
921 vadd
->unit
= UNIT_VADD
;
923 branch
->src
[2] = add
.dest
;
926 mir_choose_alu(&vadd
, instructions
, worklist
, len
, &predicate
, UNIT_VADD
);
928 mir_update_worklist(worklist
, len
, instructions
, vlut
);
929 mir_update_worklist(worklist
, len
, instructions
, vadd
);
930 mir_update_worklist(worklist
, len
, instructions
, smul
);
932 bool vadd_csel
= vadd
&& OP_IS_CSEL(vadd
->alu
.op
);
933 bool smul_csel
= smul
&& OP_IS_CSEL(smul
->alu
.op
);
935 if (vadd_csel
|| smul_csel
) {
936 midgard_instruction
*ins
= vadd_csel
? vadd
: smul
;
937 midgard_instruction
*cond
= mir_schedule_condition(ctx
, &predicate
, worklist
, len
, instructions
, ins
);
939 if (cond
->unit
== UNIT_VMUL
)
941 else if (cond
->unit
== UNIT_SADD
)
944 unreachable("Bad condition");
947 /* If we have a render target reference, schedule a move for it */
949 if (branch
&& branch
->writeout
&& (branch
->constants
.u32
[0] || ctx
->is_blend
)) {
950 midgard_instruction mov
= v_mov(~0, make_compiler_temp(ctx
));
951 sadd
= mem_dup(&mov
, sizeof(midgard_instruction
));
952 sadd
->unit
= UNIT_SADD
;
954 sadd
->has_inline_constant
= true;
955 sadd
->inline_constant
= branch
->constants
.u32
[0];
956 branch
->src
[1] = mov
.dest
;
957 /* TODO: Don't leak */
960 /* Stage 2, let's schedule sadd before vmul for writeout */
961 mir_choose_alu(&sadd
, instructions
, worklist
, len
, &predicate
, UNIT_SADD
);
963 /* Check if writeout reads its own register */
965 if (branch
&& branch
->writeout
) {
966 midgard_instruction
*stages
[] = { sadd
, vadd
, smul
};
967 unsigned src
= (branch
->src
[0] == ~0) ? SSA_FIXED_REGISTER(0) : branch
->src
[0];
968 unsigned writeout_mask
= 0x0;
969 bool bad_writeout
= false;
971 for (unsigned i
= 0; i
< ARRAY_SIZE(stages
); ++i
) {
975 if (stages
[i
]->dest
!= src
)
978 writeout_mask
|= stages
[i
]->mask
;
979 bad_writeout
|= mir_has_arg(stages
[i
], branch
->src
[0]);
982 /* It's possible we'll be able to schedule something into vmul
983 * to fill r0. Let's peak into the future, trying to schedule
984 * vmul specially that way. */
986 if (!bad_writeout
&& writeout_mask
!= 0xF) {
987 predicate
.unit
= UNIT_VMUL
;
988 predicate
.dest
= src
;
989 predicate
.mask
= writeout_mask
^ 0xF;
991 struct midgard_instruction
*peaked
=
992 mir_choose_instruction(instructions
, worklist
, len
, &predicate
);
996 vmul
->unit
= UNIT_VMUL
;
997 writeout_mask
|= predicate
.mask
;
998 assert(writeout_mask
== 0xF);
1002 predicate
.dest
= predicate
.mask
= 0;
1005 /* Finally, add a move if necessary */
1006 if (bad_writeout
|| writeout_mask
!= 0xF) {
1007 unsigned temp
= (branch
->src
[0] == ~0) ? SSA_FIXED_REGISTER(0) : make_compiler_temp(ctx
);
1008 midgard_instruction mov
= v_mov(src
, temp
);
1009 vmul
= mem_dup(&mov
, sizeof(midgard_instruction
));
1010 vmul
->unit
= UNIT_VMUL
;
1011 vmul
->mask
= 0xF ^ writeout_mask
;
1012 /* TODO: Don't leak */
1014 /* Rewrite to use our temp */
1016 for (unsigned i
= 0; i
< ARRAY_SIZE(stages
); ++i
) {
1018 mir_rewrite_index_dst_single(stages
[i
], src
, temp
);
1021 mir_rewrite_index_src_single(branch
, src
, temp
);
1025 mir_choose_alu(&vmul
, instructions
, worklist
, len
, &predicate
, UNIT_VMUL
);
1027 mir_update_worklist(worklist
, len
, instructions
, vmul
);
1028 mir_update_worklist(worklist
, len
, instructions
, sadd
);
1030 bundle
.has_blend_constant
= predicate
.blend_constant
;
1031 bundle
.has_embedded_constants
= predicate
.constant_count
> 0;
1033 unsigned padding
= 0;
1035 /* Now that we have finished scheduling, build up the bundle */
1036 midgard_instruction
*stages
[] = { vmul
, sadd
, vadd
, smul
, vlut
, branch
};
1038 for (unsigned i
= 0; i
< ARRAY_SIZE(stages
); ++i
) {
1040 bundle
.control
|= stages
[i
]->unit
;
1041 bytes_emitted
+= bytes_for_instruction(stages
[i
]);
1042 bundle
.instructions
[bundle
.instruction_count
++] = stages
[i
];
1046 /* Pad ALU op to nearest word */
1048 if (bytes_emitted
& 15) {
1049 padding
= 16 - (bytes_emitted
& 15);
1050 bytes_emitted
+= padding
;
1053 /* Constants must always be quadwords */
1054 if (bundle
.has_embedded_constants
)
1055 bytes_emitted
+= 16;
1057 /* Size ALU instruction for tag */
1058 bundle
.tag
= (TAG_ALU_4
) + (bytes_emitted
/ 16) - 1;
1060 /* MRT capable GPUs use a special writeout procedure */
1061 if (writeout
&& !(ctx
->quirks
& MIDGARD_NO_UPPER_ALU
))
1064 bundle
.padding
= padding
;
1065 bundle
.control
|= bundle
.tag
;
1070 /* Schedule a single block by iterating its instruction to create bundles.
1071 * While we go, tally about the bundle sizes to compute the block size. */
1075 schedule_block(compiler_context
*ctx
, midgard_block
*block
)
1077 /* Copy list to dynamic array */
1079 midgard_instruction
**instructions
= flatten_mir(block
, &len
);
1084 /* Calculate dependencies and initial worklist */
1085 unsigned node_count
= ctx
->temp_count
+ 1;
1086 mir_create_dependency_graph(instructions
, len
, node_count
);
1088 /* Allocate the worklist */
1089 size_t sz
= BITSET_WORDS(len
) * sizeof(BITSET_WORD
);
1090 BITSET_WORD
*worklist
= calloc(sz
, 1);
1091 mir_initialize_worklist(worklist
, instructions
, len
);
1093 struct util_dynarray bundles
;
1094 util_dynarray_init(&bundles
, NULL
);
1096 block
->quadword_count
= 0;
1097 unsigned blend_offset
= 0;
1100 unsigned tag
= mir_choose_bundle(instructions
, worklist
, len
);
1101 midgard_bundle bundle
;
1103 if (tag
== TAG_TEXTURE_4
)
1104 bundle
= mir_schedule_texture(instructions
, worklist
, len
);
1105 else if (tag
== TAG_LOAD_STORE_4
)
1106 bundle
= mir_schedule_ldst(instructions
, worklist
, len
);
1107 else if (tag
== TAG_ALU_4
)
1108 bundle
= mir_schedule_alu(ctx
, instructions
, worklist
, len
);
1112 util_dynarray_append(&bundles
, midgard_bundle
, bundle
);
1114 if (bundle
.has_blend_constant
)
1115 blend_offset
= block
->quadword_count
;
1117 block
->quadword_count
+= midgard_word_size
[bundle
.tag
];
1120 /* We emitted bundles backwards; copy into the block in reverse-order */
1122 util_dynarray_init(&block
->bundles
, NULL
);
1123 util_dynarray_foreach_reverse(&bundles
, midgard_bundle
, bundle
) {
1124 util_dynarray_append(&block
->bundles
, midgard_bundle
, *bundle
);
1127 /* Blend constant was backwards as well. blend_offset if set is
1128 * strictly positive, as an offset of zero would imply constants before
1129 * any instructions which is invalid in Midgard. TODO: blend constants
1130 * are broken if you spill since then quadword_count becomes invalid
1134 ctx
->blend_constant_offset
= ((ctx
->quadword_count
+ block
->quadword_count
) - blend_offset
- 1) * 0x10;
1136 block
->is_scheduled
= true;
1137 ctx
->quadword_count
+= block
->quadword_count
;
1139 /* Reorder instructions to match bundled. First remove existing
1140 * instructions and then recreate the list */
1142 mir_foreach_instr_in_block_safe(block
, ins
) {
1143 list_del(&ins
->link
);
1146 mir_foreach_instr_in_block_scheduled_rev(block
, ins
) {
1147 list_add(&ins
->link
, &block
->instructions
);
1150 free(instructions
); /* Allocated by flatten_mir() */
1155 midgard_schedule_program(compiler_context
*ctx
)
1157 midgard_promote_uniforms(ctx
);
1159 /* Must be lowered right before scheduling */
1160 mir_squeeze_index(ctx
);
1161 mir_lower_special_reads(ctx
);
1162 mir_squeeze_index(ctx
);
1164 /* Lowering can introduce some dead moves */
1166 mir_foreach_block(ctx
, block
) {
1167 midgard_opt_dead_move_eliminate(ctx
, block
);
1168 schedule_block(ctx
, block
);