2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
26 #include "midgard_quirks.h"
27 #include "util/u_memory.h"
28 #include "util/u_math.h"
29 #include "util/half_float.h"
31 /* Scheduling for Midgard is complicated, to say the least. ALU instructions
32 * must be grouped into VLIW bundles according to following model:
35 * [VADD] [SMUL] [VLUT]
37 * A given instruction can execute on some subset of the units (or a few can
38 * execute on all). Instructions can be either vector or scalar; only scalar
39 * instructions can execute on SADD/SMUL units. Units on a given line execute
40 * in parallel. Subsequent lines execute separately and can pass results
41 * directly via pipeline registers r24/r25, bypassing the register file.
43 * A bundle can optionally have 128-bits of embedded constants, shared across
44 * all of the instructions within a bundle.
46 * Instructions consuming conditionals (branches and conditional selects)
47 * require their condition to be written into the conditional register (r31)
48 * within the same bundle they are consumed.
50 * Fragment writeout requires its argument to be written in full within the
51 * same bundle as the branch, with no hanging dependencies.
53 * Load/store instructions are also in bundles of simply two instructions, and
54 * texture instructions have no bundling.
56 * -------------------------------------------------------------------------
60 /* We create the dependency graph with per-byte granularity */
65 add_dependency(struct util_dynarray
*table
, unsigned index
, uint16_t mask
, midgard_instruction
**instructions
, unsigned child
)
67 for (unsigned i
= 0; i
< BYTE_COUNT
; ++i
) {
68 if (!(mask
& (1 << i
)))
71 struct util_dynarray
*parents
= &table
[(BYTE_COUNT
* index
) + i
];
73 util_dynarray_foreach(parents
, unsigned, parent
) {
74 BITSET_WORD
*dependents
= instructions
[*parent
]->dependents
;
76 /* Already have the dependency */
77 if (BITSET_TEST(dependents
, child
))
80 BITSET_SET(dependents
, child
);
81 instructions
[child
]->nr_dependencies
++;
87 mark_access(struct util_dynarray
*table
, unsigned index
, uint16_t mask
, unsigned parent
)
89 for (unsigned i
= 0; i
< BYTE_COUNT
; ++i
) {
90 if (!(mask
& (1 << i
)))
93 util_dynarray_append(&table
[(BYTE_COUNT
* index
) + i
], unsigned, parent
);
98 mir_create_dependency_graph(midgard_instruction
**instructions
, unsigned count
, unsigned node_count
)
100 size_t sz
= node_count
* BYTE_COUNT
;
102 struct util_dynarray
*last_read
= calloc(sizeof(struct util_dynarray
), sz
);
103 struct util_dynarray
*last_write
= calloc(sizeof(struct util_dynarray
), sz
);
105 for (unsigned i
= 0; i
< sz
; ++i
) {
106 util_dynarray_init(&last_read
[i
], NULL
);
107 util_dynarray_init(&last_write
[i
], NULL
);
110 /* Initialize dependency graph */
111 for (unsigned i
= 0; i
< count
; ++i
) {
112 instructions
[i
]->dependents
=
113 calloc(BITSET_WORDS(count
), sizeof(BITSET_WORD
));
115 instructions
[i
]->nr_dependencies
= 0;
118 /* Populate dependency graph */
119 for (signed i
= count
- 1; i
>= 0; --i
) {
120 if (instructions
[i
]->compact_branch
)
123 unsigned dest
= instructions
[i
]->dest
;
124 unsigned mask
= mir_bytemask(instructions
[i
]);
126 mir_foreach_src((*instructions
), s
) {
127 unsigned src
= instructions
[i
]->src
[s
];
129 if (src
< node_count
) {
130 unsigned readmask
= mir_bytemask_of_read_components(instructions
[i
], src
);
131 add_dependency(last_write
, src
, readmask
, instructions
, i
);
135 if (dest
< node_count
) {
136 add_dependency(last_read
, dest
, mask
, instructions
, i
);
137 add_dependency(last_write
, dest
, mask
, instructions
, i
);
138 mark_access(last_write
, dest
, mask
, i
);
141 mir_foreach_src((*instructions
), s
) {
142 unsigned src
= instructions
[i
]->src
[s
];
144 if (src
< node_count
) {
145 unsigned readmask
= mir_bytemask_of_read_components(instructions
[i
], src
);
146 mark_access(last_read
, src
, readmask
, i
);
151 /* If there is a branch, all instructions depend on it, as interblock
152 * execution must be purely in-order */
154 if (instructions
[count
- 1]->compact_branch
) {
155 BITSET_WORD
*dependents
= instructions
[count
- 1]->dependents
;
157 for (signed i
= count
- 2; i
>= 0; --i
) {
158 if (BITSET_TEST(dependents
, i
))
161 BITSET_SET(dependents
, i
);
162 instructions
[i
]->nr_dependencies
++;
166 /* Free the intermediate structures */
167 for (unsigned i
= 0; i
< sz
; ++i
) {
168 util_dynarray_fini(&last_read
[i
]);
169 util_dynarray_fini(&last_write
[i
]);
176 /* Does the mask cover more than a scalar? */
179 is_single_component_mask(unsigned mask
)
183 for (int c
= 0; c
< 8; ++c
) {
188 return components
== 1;
191 /* Helpers for scheudling */
194 mir_is_scalar(midgard_instruction
*ains
)
196 /* Do we try to use it as a vector op? */
197 if (!is_single_component_mask(ains
->mask
))
200 /* Otherwise, check mode hazards */
201 bool could_scalar
= true;
202 unsigned szd
= nir_alu_type_get_type_size(ains
->dest_type
);
203 unsigned sz0
= nir_alu_type_get_type_size(ains
->src_types
[0]);
204 unsigned sz1
= nir_alu_type_get_type_size(ains
->src_types
[1]);
206 /* Only 16/32-bit can run on a scalar unit */
207 could_scalar
&= (szd
== 16) || (szd
== 32);
209 if (ains
->src
[0] != ~0)
210 could_scalar
&= (sz0
== 16) || (sz0
== 32);
212 if (ains
->src
[1] != ~0)
213 could_scalar
&= (sz1
== 16) || (sz1
== 32);
218 /* How many bytes does this ALU instruction add to the bundle? */
221 bytes_for_instruction(midgard_instruction
*ains
)
223 if (ains
->unit
& UNITS_ANY_VECTOR
)
224 return sizeof(midgard_reg_info
) + sizeof(midgard_vector_alu
);
225 else if (ains
->unit
== ALU_ENAB_BRANCH
)
226 return sizeof(midgard_branch_extended
);
227 else if (ains
->compact_branch
)
228 return sizeof(ains
->br_compact
);
230 return sizeof(midgard_reg_info
) + sizeof(midgard_scalar_alu
);
233 /* We would like to flatten the linked list of midgard_instructions in a bundle
234 * to an array of pointers on the heap for easy indexing */
236 static midgard_instruction
**
237 flatten_mir(midgard_block
*block
, unsigned *len
)
239 *len
= list_length(&block
->base
.instructions
);
244 midgard_instruction
**instructions
=
245 calloc(sizeof(midgard_instruction
*), *len
);
249 mir_foreach_instr_in_block(block
, ins
)
250 instructions
[i
++] = ins
;
255 /* The worklist is the set of instructions that can be scheduled now; that is,
256 * the set of instructions with no remaining dependencies */
259 mir_initialize_worklist(BITSET_WORD
*worklist
, midgard_instruction
**instructions
, unsigned count
)
261 for (unsigned i
= 0; i
< count
; ++i
) {
262 if (instructions
[i
]->nr_dependencies
== 0)
263 BITSET_SET(worklist
, i
);
267 /* Update the worklist after an instruction terminates. Remove its edges from
268 * the graph and if that causes any node to have no dependencies, add it to the
273 BITSET_WORD
*worklist
, unsigned count
,
274 midgard_instruction
**instructions
, midgard_instruction
*done
)
276 /* Sanity check: if no instruction terminated, there is nothing to do.
277 * If the instruction that terminated had dependencies, that makes no
278 * sense and means we messed up the worklist. Finally, as the purpose
279 * of this routine is to update dependents, we abort early if there are
280 * no dependents defined. */
285 assert(done
->nr_dependencies
== 0);
287 if (!done
->dependents
)
290 /* We have an instruction with dependents. Iterate each dependent to
291 * remove one dependency (`done`), adding dependents to the worklist
295 BITSET_FOREACH_SET(i
, done
->dependents
, count
) {
296 assert(instructions
[i
]->nr_dependencies
);
298 if (!(--instructions
[i
]->nr_dependencies
))
299 BITSET_SET(worklist
, i
);
302 free(done
->dependents
);
305 /* While scheduling, we need to choose instructions satisfying certain
306 * criteria. As we schedule backwards, we choose the *last* instruction in the
307 * worklist to simulate in-order scheduling. Chosen instructions must satisfy a
308 * given predicate. */
310 struct midgard_predicate
{
311 /* TAG or ~0 for dont-care */
314 /* True if we want to pop off the chosen instruction */
317 /* For ALU, choose only this unit */
320 /* State for bundle constants. constants is the actual constants
321 * for the bundle. constant_count is the number of bytes (up to
322 * 16) currently in use for constants. When picking in destructive
323 * mode, the constants array will be updated, and the instruction
324 * will be adjusted to index into the constants array */
326 midgard_constants
*constants
;
327 unsigned constant_mask
;
330 /* Exclude this destination (if not ~0) */
333 /* Don't schedule instructions consuming conditionals (since we already
334 * scheduled one). Excludes conditional branches and csel */
337 /* Require (or reject) a minimal mask and (if nonzero) given
338 * destination. Used for writeout optimizations */
344 /* For VADD/VLUT whether to only/never schedule imov/fmov instructions
345 * This allows non-move instructions to get priority on each unit */
348 /* For load/store: how many pipeline registers are in use? The two
349 * scheduled instructions cannot use more than the 256-bits of pipeline
350 * space available or RA will fail (as it would run out of pipeline
351 * registers and fail to spill without breaking the schedule) */
353 unsigned pipeline_count
;
357 mir_adjust_constant(midgard_instruction
*ins
, unsigned src
,
358 unsigned *bundle_constant_mask
,
359 unsigned *comp_mapping
,
360 uint8_t *bundle_constants
,
363 unsigned type_size
= nir_alu_type_get_type_size(ins
->src_types
[src
]) / 8;
364 unsigned type_shift
= util_logbase2(type_size
);
365 unsigned max_comp
= mir_components_for_type(ins
->src_types
[src
]);
366 unsigned comp_mask
= mir_from_bytemask(mir_round_bytemask_up(
367 mir_bytemask_of_read_components_index(ins
, src
),
370 unsigned type_mask
= (1 << type_size
) - 1;
372 /* Upper only makes sense for 16-bit */
373 if (type_size
!= 16 && upper
)
376 /* For 16-bit, we need to stay on either upper or lower halves to avoid
377 * disrupting the swizzle */
378 unsigned start
= upper
? 8 : 0;
379 unsigned length
= (type_size
== 2) ? 8 : 16;
381 for (unsigned comp
= 0; comp
< max_comp
; comp
++) {
382 if (!(comp_mask
& (1 << comp
)))
385 uint8_t *constantp
= ins
->constants
.u8
+ (type_size
* comp
);
386 unsigned best_reuse_bytes
= 0;
387 signed best_place
= -1;
390 for (i
= start
; i
< (start
+ length
); i
+= type_size
) {
391 unsigned reuse_bytes
= 0;
393 for (j
= 0; j
< type_size
; j
++) {
394 if (!(*bundle_constant_mask
& (1 << (i
+ j
))))
396 if (constantp
[j
] != bundle_constants
[i
+ j
])
398 if ((i
+ j
) > (start
+ length
))
404 /* Select the place where existing bytes can be
405 * reused so we leave empty slots to others
407 if (j
== type_size
&&
408 (reuse_bytes
> best_reuse_bytes
|| best_place
< 0)) {
409 best_reuse_bytes
= reuse_bytes
;
415 /* This component couldn't fit in the remaining constant slot,
416 * no need check the remaining components, bail out now
421 memcpy(&bundle_constants
[i
], constantp
, type_size
);
422 *bundle_constant_mask
|= type_mask
<< best_place
;
423 comp_mapping
[comp
] = best_place
>> type_shift
;
429 /* For an instruction that can fit, adjust it to fit and update the constants
430 * array, in destructive mode. Returns whether the fitting was successful. */
433 mir_adjust_constants(midgard_instruction
*ins
,
434 struct midgard_predicate
*pred
,
437 /* Blend constants dominate */
438 if (ins
->has_blend_constant
) {
439 if (pred
->constant_mask
)
441 else if (destructive
) {
442 pred
->blend_constant
= true;
443 pred
->constant_mask
= 0xffff;
448 /* No constant, nothing to adjust */
449 if (!ins
->has_constants
)
452 unsigned r_constant
= SSA_FIXED_REGISTER(REGISTER_CONSTANT
);
453 unsigned bundle_constant_mask
= pred
->constant_mask
;
454 unsigned comp_mapping
[2][16] = { };
455 uint8_t bundle_constants
[16];
457 memcpy(bundle_constants
, pred
->constants
, 16);
459 /* Let's try to find a place for each active component of the constant
462 for (unsigned src
= 0; src
< 2; ++src
) {
463 if (ins
->src
[src
] != SSA_FIXED_REGISTER(REGISTER_CONSTANT
))
466 /* First, try lower half (or whole for !16) */
467 if (mir_adjust_constant(ins
, src
, &bundle_constant_mask
,
468 comp_mapping
[src
], bundle_constants
, false))
471 /* Next, try upper half */
472 if (mir_adjust_constant(ins
, src
, &bundle_constant_mask
,
473 comp_mapping
[src
], bundle_constants
, true))
480 /* If non-destructive, we're done */
484 /* Otherwise update the constant_mask and constant values */
485 pred
->constant_mask
= bundle_constant_mask
;
486 memcpy(pred
->constants
, bundle_constants
, 16);
488 /* Use comp_mapping as a swizzle */
489 mir_foreach_src(ins
, s
) {
490 if (ins
->src
[s
] == r_constant
)
491 mir_compose_swizzle(ins
->swizzle
[s
], comp_mapping
[s
], ins
->swizzle
[s
]);
497 /* Conservative estimate of the pipeline registers required for load/store */
500 mir_pipeline_count(midgard_instruction
*ins
)
502 unsigned bytecount
= 0;
504 mir_foreach_src(ins
, i
) {
505 /* Skip empty source */
506 if (ins
->src
[i
] == ~0) continue;
508 unsigned bytemask
= mir_bytemask_of_read_components_index(ins
, i
);
510 unsigned max
= util_logbase2(bytemask
) + 1;
514 return DIV_ROUND_UP(bytecount
, 16);
517 /* Matches FADD x, x with modifiers compatible. Since x + x = x * 2, for
518 * any x including of the form f(y) for some swizzle/abs/neg function f */
521 mir_is_add_2(midgard_instruction
*ins
)
523 if (ins
->alu
.op
!= midgard_alu_op_fadd
)
526 if (ins
->src
[0] != ins
->src
[1])
529 if (ins
->src_types
[0] != ins
->src_types
[1])
532 for (unsigned i
= 0; i
< MIR_VEC_COMPONENTS
; ++i
) {
533 if (ins
->swizzle
[0][i
] != ins
->swizzle
[1][i
])
537 if (ins
->src_abs
[0] != ins
->src_abs
[1])
540 if (ins
->src_neg
[0] != ins
->src_neg
[1])
547 mir_adjust_unit(midgard_instruction
*ins
, unsigned unit
)
549 /* FADD x, x = FMUL x, #2 */
550 if (mir_is_add_2(ins
) && (unit
& (UNITS_MUL
| UNIT_VLUT
))) {
551 ins
->alu
.op
= midgard_alu_op_fmul
;
554 ins
->src_abs
[1] = false;
555 ins
->src_neg
[1] = false;
557 ins
->has_inline_constant
= true;
558 ins
->inline_constant
= _mesa_float_to_half(2.0);
563 mir_has_unit(midgard_instruction
*ins
, unsigned unit
)
565 if (alu_opcode_props
[ins
->alu
.op
].props
& unit
)
568 /* FADD x, x can run on any adder or any multiplier */
569 if (mir_is_add_2(ins
))
575 /* Net change in liveness if an instruction were scheduled. Loosely based on
576 * ir3's scheduler. */
579 mir_live_effect(uint16_t *liveness
, midgard_instruction
*ins
, bool destructive
)
581 /* TODO: what if dest is used multiple times? */
584 if (ins
->dest
< SSA_FIXED_MINIMUM
) {
585 unsigned bytemask
= mir_bytemask(ins
);
586 bytemask
= util_next_power_of_two(bytemask
+ 1) - 1;
587 free_live
+= util_bitcount(liveness
[ins
->dest
] & bytemask
);
590 liveness
[ins
->dest
] &= ~bytemask
;
595 mir_foreach_src(ins
, s
) {
596 unsigned S
= ins
->src
[s
];
600 for (unsigned q
= 0; q
< s
; ++q
)
601 dupe
|= (ins
->src
[q
] == S
);
606 if (S
< SSA_FIXED_MINIMUM
) {
607 unsigned bytemask
= mir_bytemask_of_read_components(ins
, S
);
608 bytemask
= util_next_power_of_two(bytemask
+ 1) - 1;
610 /* Count only the new components */
611 new_live
+= util_bitcount(bytemask
& ~(liveness
[S
]));
614 liveness
[S
] |= bytemask
;
618 return new_live
- free_live
;
621 static midgard_instruction
*
622 mir_choose_instruction(
623 midgard_instruction
**instructions
,
625 BITSET_WORD
*worklist
, unsigned count
,
626 struct midgard_predicate
*predicate
)
628 /* Parse the predicate */
629 unsigned tag
= predicate
->tag
;
630 bool alu
= tag
== TAG_ALU_4
;
631 bool ldst
= tag
== TAG_LOAD_STORE_4
;
632 unsigned unit
= predicate
->unit
;
633 bool branch
= alu
&& (unit
== ALU_ENAB_BR_COMPACT
);
634 bool scalar
= (unit
!= ~0) && (unit
& UNITS_SCALAR
);
635 bool no_cond
= predicate
->no_cond
;
637 unsigned mask
= predicate
->mask
;
638 unsigned dest
= predicate
->dest
;
639 bool needs_dest
= mask
& 0xF;
641 /* Iterate to find the best instruction satisfying the predicate */
644 signed best_index
= -1;
645 signed best_effect
= INT_MAX
;
646 bool best_conditional
= false;
648 /* Enforce a simple metric limiting distance to keep down register
649 * pressure. TOOD: replace with liveness tracking for much better
652 unsigned max_active
= 0;
653 unsigned max_distance
= 36;
655 BITSET_FOREACH_SET(i
, worklist
, count
) {
656 max_active
= MAX2(max_active
, i
);
659 BITSET_FOREACH_SET(i
, worklist
, count
) {
660 bool is_move
= alu
&&
661 (instructions
[i
]->alu
.op
== midgard_alu_op_imov
||
662 instructions
[i
]->alu
.op
== midgard_alu_op_fmov
);
664 if ((max_active
- i
) >= max_distance
)
667 if (tag
!= ~0 && instructions
[i
]->type
!= tag
)
670 if (predicate
->exclude
!= ~0 && instructions
[i
]->dest
== predicate
->exclude
)
673 if (alu
&& !branch
&& !(mir_has_unit(instructions
[i
], unit
)))
676 if ((unit
== UNIT_VLUT
|| unit
== UNIT_VADD
) && (predicate
->moves
!= is_move
))
679 if (branch
&& !instructions
[i
]->compact_branch
)
682 if (alu
&& scalar
&& !mir_is_scalar(instructions
[i
]))
685 if (alu
&& !mir_adjust_constants(instructions
[i
], predicate
, false))
688 if (needs_dest
&& instructions
[i
]->dest
!= dest
)
691 if (mask
&& ((~instructions
[i
]->mask
) & mask
))
694 if (instructions
[i
]->mask
& predicate
->no_mask
)
697 if (ldst
&& mir_pipeline_count(instructions
[i
]) + predicate
->pipeline_count
> 2)
700 bool conditional
= alu
&& !branch
&& OP_IS_CSEL(instructions
[i
]->alu
.op
);
701 conditional
|= (branch
&& instructions
[i
]->branch
.conditional
);
703 if (conditional
&& no_cond
)
706 int effect
= mir_live_effect(liveness
, instructions
[i
], false);
708 if (effect
> best_effect
)
711 if (effect
== best_effect
&& (signed) i
< best_index
)
714 best_effect
= effect
;
716 best_conditional
= conditional
;
719 /* Did we find anything? */
724 /* If we found something, remove it from the worklist */
725 assert(best_index
< count
);
727 if (predicate
->destructive
) {
728 BITSET_CLEAR(worklist
, best_index
);
731 mir_adjust_constants(instructions
[best_index
], predicate
, true);
734 predicate
->pipeline_count
+= mir_pipeline_count(instructions
[best_index
]);
737 mir_adjust_unit(instructions
[best_index
], unit
);
739 /* Once we schedule a conditional, we can't again */
740 predicate
->no_cond
|= best_conditional
;
741 mir_live_effect(liveness
, instructions
[best_index
], true);
744 return instructions
[best_index
];
747 /* Still, we don't choose instructions in a vacuum. We need a way to choose the
748 * best bundle type (ALU, load/store, texture). Nondestructive. */
752 midgard_instruction
**instructions
,
754 BITSET_WORD
*worklist
, unsigned count
)
756 /* At the moment, our algorithm is very simple - use the bundle of the
757 * best instruction, regardless of what else could be scheduled
758 * alongside it. This is not optimal but it works okay for in-order */
760 struct midgard_predicate predicate
= {
762 .destructive
= false,
766 midgard_instruction
*chosen
= mir_choose_instruction(instructions
, liveness
, worklist
, count
, &predicate
);
774 /* We want to choose an ALU instruction filling a given unit */
776 mir_choose_alu(midgard_instruction
**slot
,
777 midgard_instruction
**instructions
,
779 BITSET_WORD
*worklist
, unsigned len
,
780 struct midgard_predicate
*predicate
,
783 /* Did we already schedule to this slot? */
787 /* Try to schedule something, if not */
788 predicate
->unit
= unit
;
789 *slot
= mir_choose_instruction(instructions
, liveness
, worklist
, len
, predicate
);
791 /* Store unit upon scheduling */
792 if (*slot
&& !((*slot
)->compact_branch
))
793 (*slot
)->unit
= unit
;
796 /* When we are scheduling a branch/csel, we need the consumed condition in the
797 * same block as a pipeline register. There are two options to enable this:
799 * - Move the conditional into the bundle. Preferred, but only works if the
800 * conditional is used only once and is from this block.
801 * - Copy the conditional.
803 * We search for the conditional. If it's in this block, single-use, and
804 * without embedded constants, we schedule it immediately. Otherwise, we
805 * schedule a move for it.
807 * mir_comparison_mobile is a helper to find the moveable condition.
811 mir_comparison_mobile(
812 compiler_context
*ctx
,
813 midgard_instruction
**instructions
,
814 struct midgard_predicate
*predicate
,
818 if (!mir_single_use(ctx
, cond
))
823 for (unsigned i
= 0; i
< count
; ++i
) {
824 if (instructions
[i
]->dest
!= cond
)
827 /* Must fit in an ALU bundle */
828 if (instructions
[i
]->type
!= TAG_ALU_4
)
831 /* If it would itself require a condition, that's recursive */
832 if (OP_IS_CSEL(instructions
[i
]->alu
.op
))
835 /* We'll need to rewrite to .w but that doesn't work for vector
836 * ops that don't replicate (ball/bany), so bail there */
838 if (GET_CHANNEL_COUNT(alu_opcode_props
[instructions
[i
]->alu
.op
].props
))
841 /* Ensure it will fit with constants */
843 if (!mir_adjust_constants(instructions
[i
], predicate
, false))
846 /* Ensure it is written only once */
854 /* Inject constants now that we are sure we want to */
856 mir_adjust_constants(instructions
[ret
], predicate
, true);
861 /* Using the information about the moveable conditional itself, we either pop
862 * that condition off the worklist for use now, or create a move to
863 * artificially schedule instead as a fallback */
865 static midgard_instruction
*
866 mir_schedule_comparison(
867 compiler_context
*ctx
,
868 midgard_instruction
**instructions
,
869 struct midgard_predicate
*predicate
,
870 BITSET_WORD
*worklist
, unsigned count
,
871 unsigned cond
, bool vector
, unsigned *swizzle
,
872 midgard_instruction
*user
)
874 /* TODO: swizzle when scheduling */
876 (!vector
&& (swizzle
[0] == 0)) ?
877 mir_comparison_mobile(ctx
, instructions
, predicate
, count
, cond
) : ~0;
879 /* If we can, schedule the condition immediately */
880 if ((comp_i
!= ~0) && BITSET_TEST(worklist
, comp_i
)) {
881 assert(comp_i
< count
);
882 BITSET_CLEAR(worklist
, comp_i
);
883 return instructions
[comp_i
];
886 /* Otherwise, we insert a move */
888 midgard_instruction mov
= v_mov(cond
, cond
);
889 mov
.mask
= vector
? 0xF : 0x1;
890 memcpy(mov
.swizzle
[1], swizzle
, sizeof(mov
.swizzle
[1]));
892 return mir_insert_instruction_before(ctx
, user
, mov
);
895 /* Most generally, we need instructions writing to r31 in the appropriate
898 static midgard_instruction
*
899 mir_schedule_condition(compiler_context
*ctx
,
900 struct midgard_predicate
*predicate
,
901 BITSET_WORD
*worklist
, unsigned count
,
902 midgard_instruction
**instructions
,
903 midgard_instruction
*last
)
905 /* For a branch, the condition is the only argument; for csel, third */
906 bool branch
= last
->compact_branch
;
907 unsigned condition_index
= branch
? 0 : 2;
909 /* csel_v is vector; otherwise, conditions are scalar */
910 bool vector
= !branch
&& OP_IS_CSEL_V(last
->alu
.op
);
912 /* Grab the conditional instruction */
914 midgard_instruction
*cond
= mir_schedule_comparison(
915 ctx
, instructions
, predicate
, worklist
, count
, last
->src
[condition_index
],
916 vector
, last
->swizzle
[2], last
);
918 /* We have exclusive reign over this (possibly move) conditional
919 * instruction. We can rewrite into a pipeline conditional register */
921 predicate
->exclude
= cond
->dest
;
922 cond
->dest
= SSA_FIXED_REGISTER(31);
925 cond
->mask
= (1 << COMPONENT_W
);
927 mir_foreach_src(cond
, s
) {
928 if (cond
->src
[s
] == ~0)
931 for (unsigned q
= 0; q
< 4; ++q
)
932 cond
->swizzle
[s
][q
+ COMPONENT_W
] = cond
->swizzle
[s
][q
];
936 /* Schedule the unit: csel is always in the latter pipeline, so a csel
937 * condition must be in the former pipeline stage (vmul/sadd),
938 * depending on scalar/vector of the instruction itself. A branch must
939 * be written from the latter pipeline stage and a branch condition is
940 * always scalar, so it is always in smul (exception: ball/bany, which
944 cond
->unit
= UNIT_SMUL
;
946 cond
->unit
= vector
? UNIT_VMUL
: UNIT_SADD
;
951 /* Schedules a single bundle of the given type */
953 static midgard_bundle
954 mir_schedule_texture(
955 midgard_instruction
**instructions
,
957 BITSET_WORD
*worklist
, unsigned len
,
960 struct midgard_predicate predicate
= {
961 .tag
= TAG_TEXTURE_4
,
966 midgard_instruction
*ins
=
967 mir_choose_instruction(instructions
, liveness
, worklist
, len
, &predicate
);
969 mir_update_worklist(worklist
, len
, instructions
, ins
);
971 struct midgard_bundle out
= {
972 .tag
= ins
->texture
.op
== TEXTURE_OP_BARRIER
?
973 TAG_TEXTURE_4_BARRIER
: is_vertex
?
974 TAG_TEXTURE_4_VTX
: TAG_TEXTURE_4
,
975 .instruction_count
= 1,
976 .instructions
= { ins
}
982 static midgard_bundle
984 midgard_instruction
**instructions
,
986 BITSET_WORD
*worklist
, unsigned len
)
988 struct midgard_predicate predicate
= {
989 .tag
= TAG_LOAD_STORE_4
,
994 /* Try to pick two load/store ops. Second not gauranteed to exist */
996 midgard_instruction
*ins
=
997 mir_choose_instruction(instructions
, liveness
, worklist
, len
, &predicate
);
999 midgard_instruction
*pair
=
1000 mir_choose_instruction(instructions
, liveness
, worklist
, len
, &predicate
);
1002 struct midgard_bundle out
= {
1003 .tag
= TAG_LOAD_STORE_4
,
1004 .instruction_count
= pair
? 2 : 1,
1005 .instructions
= { ins
, pair
}
1008 /* We have to update the worklist atomically, since the two
1009 * instructions run concurrently (TODO: verify it's not pipelined) */
1011 mir_update_worklist(worklist
, len
, instructions
, ins
);
1012 mir_update_worklist(worklist
, len
, instructions
, pair
);
1018 mir_schedule_zs_write(
1019 compiler_context
*ctx
,
1020 struct midgard_predicate
*predicate
,
1021 midgard_instruction
**instructions
,
1023 BITSET_WORD
*worklist
, unsigned len
,
1024 midgard_instruction
*branch
,
1025 midgard_instruction
**smul
,
1026 midgard_instruction
**vadd
,
1027 midgard_instruction
**vlut
,
1030 bool success
= false;
1031 unsigned idx
= stencil
? 3 : 2;
1032 unsigned src
= (branch
->src
[0] == ~0) ? SSA_FIXED_REGISTER(1) : branch
->src
[idx
];
1034 predicate
->dest
= src
;
1035 predicate
->mask
= 0x1;
1037 midgard_instruction
**units
[] = { smul
, vadd
, vlut
};
1038 unsigned unit_names
[] = { UNIT_SMUL
, UNIT_VADD
, UNIT_VLUT
};
1040 for (unsigned i
= 0; i
< 3; ++i
) {
1044 predicate
->unit
= unit_names
[i
];
1045 midgard_instruction
*ins
=
1046 mir_choose_instruction(instructions
, liveness
, worklist
, len
, predicate
);
1049 ins
->unit
= unit_names
[i
];
1056 predicate
->dest
= predicate
->mask
= 0;
1061 midgard_instruction
*mov
= ralloc(ctx
, midgard_instruction
);
1062 *mov
= v_mov(src
, make_compiler_temp(ctx
));
1065 branch
->src
[idx
] = mov
->dest
;
1068 unsigned swizzle
= (branch
->src
[0] == ~0) ? COMPONENT_Y
: COMPONENT_X
;
1070 for (unsigned c
= 0; c
< 16; ++c
)
1071 mov
->swizzle
[1][c
] = swizzle
;
1074 for (unsigned i
= 0; i
< 3; ++i
) {
1075 if (!(*(units
[i
]))) {
1077 mov
->unit
= unit_names
[i
];
1082 unreachable("Could not schedule Z/S move to any unit");
1085 static midgard_bundle
1087 compiler_context
*ctx
,
1088 midgard_instruction
**instructions
,
1090 BITSET_WORD
*worklist
, unsigned len
)
1092 struct midgard_bundle bundle
= {};
1094 unsigned bytes_emitted
= sizeof(bundle
.control
);
1096 struct midgard_predicate predicate
= {
1098 .destructive
= true,
1100 .constants
= &bundle
.constants
1103 midgard_instruction
*vmul
= NULL
;
1104 midgard_instruction
*vadd
= NULL
;
1105 midgard_instruction
*vlut
= NULL
;
1106 midgard_instruction
*smul
= NULL
;
1107 midgard_instruction
*sadd
= NULL
;
1108 midgard_instruction
*branch
= NULL
;
1110 mir_choose_alu(&branch
, instructions
, liveness
, worklist
, len
, &predicate
, ALU_ENAB_BR_COMPACT
);
1111 mir_update_worklist(worklist
, len
, instructions
, branch
);
1112 unsigned writeout
= branch
? branch
->writeout
: 0;
1114 if (branch
&& branch
->branch
.conditional
) {
1115 midgard_instruction
*cond
= mir_schedule_condition(ctx
, &predicate
, worklist
, len
, instructions
, branch
);
1117 if (cond
->unit
== UNIT_VADD
)
1119 else if (cond
->unit
== UNIT_SMUL
)
1122 unreachable("Bad condition");
1125 /* If we have a render target reference, schedule a move for it. Since
1126 * this will be in sadd, we boost this to prevent scheduling csel into
1129 if (writeout
&& (branch
->constants
.u32
[0] || ctx
->is_blend
)) {
1130 sadd
= ralloc(ctx
, midgard_instruction
);
1131 *sadd
= v_mov(~0, make_compiler_temp(ctx
));
1132 sadd
->unit
= UNIT_SADD
;
1134 sadd
->has_inline_constant
= true;
1135 sadd
->inline_constant
= branch
->constants
.u32
[0];
1136 branch
->src
[1] = sadd
->dest
;
1137 branch
->src_types
[1] = sadd
->dest_type
;
1139 /* Mask off any conditionals. Could be optimized to just scalar
1140 * conditionals TODO */
1141 predicate
.no_cond
= true;
1146 bundle
.last_writeout
= branch
->last_writeout
;
1149 /* When MRT is in use, writeout loops require r1.w to be filled (with a
1150 * return address? by symmetry with Bifrost, etc), at least for blend
1151 * shaders to work properly. When MRT is not in use (including on SFBD
1152 * GPUs), this is not needed. Blend shaders themselves don't know if
1153 * they are paired with MRT or not so they always need this, at least
1156 if (writeout
&& (ctx
->is_blend
|| ctx
->writeout_branch
[1])) {
1157 vadd
= ralloc(ctx
, midgard_instruction
);
1158 *vadd
= v_mov(~0, make_compiler_temp(ctx
));
1160 if (!ctx
->is_blend
) {
1161 vadd
->alu
.op
= midgard_alu_op_iadd
;
1162 vadd
->src
[0] = SSA_FIXED_REGISTER(31);
1163 vadd
->src_types
[0] = nir_type_uint32
;
1165 for (unsigned c
= 0; c
< 16; ++c
)
1166 vadd
->swizzle
[0][c
] = COMPONENT_X
;
1168 vadd
->has_inline_constant
= true;
1169 vadd
->inline_constant
= 0;
1171 vadd
->src
[1] = SSA_FIXED_REGISTER(1);
1172 vadd
->src_types
[0] = nir_type_uint32
;
1174 for (unsigned c
= 0; c
< 16; ++c
)
1175 vadd
->swizzle
[1][c
] = COMPONENT_W
;
1178 vadd
->unit
= UNIT_VADD
;
1180 branch
->dest
= vadd
->dest
;
1181 branch
->dest_type
= vadd
->dest_type
;
1184 if (writeout
& PAN_WRITEOUT_Z
)
1185 mir_schedule_zs_write(ctx
, &predicate
, instructions
, liveness
, worklist
, len
, branch
, &smul
, &vadd
, &vlut
, false);
1187 if (writeout
& PAN_WRITEOUT_S
)
1188 mir_schedule_zs_write(ctx
, &predicate
, instructions
, liveness
, worklist
, len
, branch
, &smul
, &vadd
, &vlut
, true);
1190 mir_choose_alu(&smul
, instructions
, liveness
, worklist
, len
, &predicate
, UNIT_SMUL
);
1192 for (unsigned moves
= 0; moves
< 2; ++moves
) {
1193 predicate
.moves
= moves
;
1194 predicate
.no_mask
= writeout
? (1 << 3) : 0;
1195 mir_choose_alu(&vlut
, instructions
, liveness
, worklist
, len
, &predicate
, UNIT_VLUT
);
1196 predicate
.no_mask
= 0;
1197 mir_choose_alu(&vadd
, instructions
, liveness
, worklist
, len
, &predicate
, UNIT_VADD
);
1200 mir_update_worklist(worklist
, len
, instructions
, vlut
);
1201 mir_update_worklist(worklist
, len
, instructions
, vadd
);
1202 mir_update_worklist(worklist
, len
, instructions
, smul
);
1204 bool vadd_csel
= vadd
&& OP_IS_CSEL(vadd
->alu
.op
);
1205 bool smul_csel
= smul
&& OP_IS_CSEL(smul
->alu
.op
);
1207 if (vadd_csel
|| smul_csel
) {
1208 midgard_instruction
*ins
= vadd_csel
? vadd
: smul
;
1209 midgard_instruction
*cond
= mir_schedule_condition(ctx
, &predicate
, worklist
, len
, instructions
, ins
);
1211 if (cond
->unit
== UNIT_VMUL
)
1213 else if (cond
->unit
== UNIT_SADD
)
1216 unreachable("Bad condition");
1219 /* Stage 2, let's schedule sadd before vmul for writeout */
1220 mir_choose_alu(&sadd
, instructions
, liveness
, worklist
, len
, &predicate
, UNIT_SADD
);
1222 /* Check if writeout reads its own register */
1225 midgard_instruction
*stages
[] = { sadd
, vadd
, smul
, vlut
};
1226 unsigned src
= (branch
->src
[0] == ~0) ? SSA_FIXED_REGISTER(0) : branch
->src
[0];
1227 unsigned writeout_mask
= 0x0;
1228 bool bad_writeout
= false;
1230 for (unsigned i
= 0; i
< ARRAY_SIZE(stages
); ++i
) {
1234 if (stages
[i
]->dest
!= src
)
1237 writeout_mask
|= stages
[i
]->mask
;
1238 bad_writeout
|= mir_has_arg(stages
[i
], branch
->src
[0]);
1241 /* It's possible we'll be able to schedule something into vmul
1242 * to fill r0. Let's peak into the future, trying to schedule
1243 * vmul specially that way. */
1245 unsigned full_mask
= 0xF;
1247 if (!bad_writeout
&& writeout_mask
!= full_mask
) {
1248 predicate
.unit
= UNIT_VMUL
;
1249 predicate
.dest
= src
;
1250 predicate
.mask
= writeout_mask
^ full_mask
;
1252 struct midgard_instruction
*peaked
=
1253 mir_choose_instruction(instructions
, liveness
, worklist
, len
, &predicate
);
1257 vmul
->unit
= UNIT_VMUL
;
1258 writeout_mask
|= predicate
.mask
;
1259 assert(writeout_mask
== full_mask
);
1263 predicate
.dest
= predicate
.mask
= 0;
1266 /* Finally, add a move if necessary */
1267 if (bad_writeout
|| writeout_mask
!= full_mask
) {
1268 unsigned temp
= (branch
->src
[0] == ~0) ? SSA_FIXED_REGISTER(0) : make_compiler_temp(ctx
);
1270 vmul
= ralloc(ctx
, midgard_instruction
);
1271 *vmul
= v_mov(src
, temp
);
1272 vmul
->unit
= UNIT_VMUL
;
1273 vmul
->mask
= full_mask
^ writeout_mask
;
1275 /* Rewrite to use our temp */
1277 for (unsigned i
= 0; i
< ARRAY_SIZE(stages
); ++i
) {
1279 mir_rewrite_index_dst_single(stages
[i
], src
, temp
);
1282 mir_rewrite_index_src_single(branch
, src
, temp
);
1286 mir_choose_alu(&vmul
, instructions
, liveness
, worklist
, len
, &predicate
, UNIT_VMUL
);
1288 mir_update_worklist(worklist
, len
, instructions
, vmul
);
1289 mir_update_worklist(worklist
, len
, instructions
, sadd
);
1291 bundle
.has_blend_constant
= predicate
.blend_constant
;
1292 bundle
.has_embedded_constants
= predicate
.constant_mask
!= 0;
1294 unsigned padding
= 0;
1296 /* Now that we have finished scheduling, build up the bundle */
1297 midgard_instruction
*stages
[] = { vmul
, sadd
, vadd
, smul
, vlut
, branch
};
1299 for (unsigned i
= 0; i
< ARRAY_SIZE(stages
); ++i
) {
1301 bundle
.control
|= stages
[i
]->unit
;
1302 bytes_emitted
+= bytes_for_instruction(stages
[i
]);
1303 bundle
.instructions
[bundle
.instruction_count
++] = stages
[i
];
1305 /* If we branch, we can't spill to TLS since the store
1306 * instruction will never get executed. We could try to
1307 * break the bundle but this is probably easier for
1311 stages
[i
]->no_spill
|= (1 << REG_CLASS_WORK
);
1315 /* Pad ALU op to nearest word */
1317 if (bytes_emitted
& 15) {
1318 padding
= 16 - (bytes_emitted
& 15);
1319 bytes_emitted
+= padding
;
1322 /* Constants must always be quadwords */
1323 if (bundle
.has_embedded_constants
)
1324 bytes_emitted
+= 16;
1326 /* Size ALU instruction for tag */
1327 bundle
.tag
= (TAG_ALU_4
) + (bytes_emitted
/ 16) - 1;
1329 /* MRT capable GPUs use a special writeout procedure */
1330 if (writeout
&& !(ctx
->quirks
& MIDGARD_NO_UPPER_ALU
))
1333 bundle
.padding
= padding
;
1334 bundle
.control
|= bundle
.tag
;
1339 /* Schedule a single block by iterating its instruction to create bundles.
1340 * While we go, tally about the bundle sizes to compute the block size. */
1344 schedule_block(compiler_context
*ctx
, midgard_block
*block
)
1346 /* Copy list to dynamic array */
1348 midgard_instruction
**instructions
= flatten_mir(block
, &len
);
1353 /* Calculate dependencies and initial worklist */
1354 unsigned node_count
= ctx
->temp_count
+ 1;
1355 mir_create_dependency_graph(instructions
, len
, node_count
);
1357 /* Allocate the worklist */
1358 size_t sz
= BITSET_WORDS(len
) * sizeof(BITSET_WORD
);
1359 BITSET_WORD
*worklist
= calloc(sz
, 1);
1360 uint16_t *liveness
= calloc(node_count
, 2);
1361 mir_initialize_worklist(worklist
, instructions
, len
);
1363 struct util_dynarray bundles
;
1364 util_dynarray_init(&bundles
, NULL
);
1366 block
->quadword_count
= 0;
1367 unsigned blend_offset
= 0;
1370 unsigned tag
= mir_choose_bundle(instructions
, liveness
, worklist
, len
);
1371 midgard_bundle bundle
;
1373 if (tag
== TAG_TEXTURE_4
)
1374 bundle
= mir_schedule_texture(instructions
, liveness
, worklist
, len
, ctx
->stage
!= MESA_SHADER_FRAGMENT
);
1375 else if (tag
== TAG_LOAD_STORE_4
)
1376 bundle
= mir_schedule_ldst(instructions
, liveness
, worklist
, len
);
1377 else if (tag
== TAG_ALU_4
)
1378 bundle
= mir_schedule_alu(ctx
, instructions
, liveness
, worklist
, len
);
1382 util_dynarray_append(&bundles
, midgard_bundle
, bundle
);
1384 if (bundle
.has_blend_constant
)
1385 blend_offset
= block
->quadword_count
;
1387 block
->quadword_count
+= midgard_tag_props
[bundle
.tag
].size
;
1390 /* We emitted bundles backwards; copy into the block in reverse-order */
1392 util_dynarray_init(&block
->bundles
, block
);
1393 util_dynarray_foreach_reverse(&bundles
, midgard_bundle
, bundle
) {
1394 util_dynarray_append(&block
->bundles
, midgard_bundle
, *bundle
);
1396 util_dynarray_fini(&bundles
);
1398 /* Blend constant was backwards as well. blend_offset if set is
1399 * strictly positive, as an offset of zero would imply constants before
1400 * any instructions which is invalid in Midgard. TODO: blend constants
1401 * are broken if you spill since then quadword_count becomes invalid
1405 ctx
->blend_constant_offset
= ((ctx
->quadword_count
+ block
->quadword_count
) - blend_offset
- 1) * 0x10;
1407 block
->scheduled
= true;
1408 ctx
->quadword_count
+= block
->quadword_count
;
1410 /* Reorder instructions to match bundled. First remove existing
1411 * instructions and then recreate the list */
1413 mir_foreach_instr_in_block_safe(block
, ins
) {
1414 list_del(&ins
->link
);
1417 mir_foreach_instr_in_block_scheduled_rev(block
, ins
) {
1418 list_add(&ins
->link
, &block
->base
.instructions
);
1421 free(instructions
); /* Allocated by flatten_mir() */
1427 midgard_schedule_program(compiler_context
*ctx
)
1429 midgard_promote_uniforms(ctx
);
1431 /* Must be lowered right before scheduling */
1432 mir_squeeze_index(ctx
);
1433 mir_lower_special_reads(ctx
);
1434 mir_squeeze_index(ctx
);
1436 /* Lowering can introduce some dead moves */
1438 mir_foreach_block(ctx
, _block
) {
1439 midgard_block
*block
= (midgard_block
*) _block
;
1440 midgard_opt_dead_move_eliminate(ctx
, block
);
1441 schedule_block(ctx
, block
);