2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
26 #include "util/u_memory.h"
27 #include "util/register_allocate.h"
29 /* Create a mask of accessed components from a swizzle to figure out vector
33 swizzle_to_access_mask(unsigned swizzle
)
35 unsigned component_mask
= 0;
37 for (int i
= 0; i
< 4; ++i
) {
38 unsigned c
= (swizzle
>> (2 * i
)) & 3;
39 component_mask
|= (1 << c
);
42 return component_mask
;
45 /* Does the mask cover more than a scalar? */
48 is_single_component_mask(unsigned mask
)
52 for (int c
= 0; c
< 8; ++c
) {
57 return components
== 1;
60 /* Checks for an SSA data hazard between two adjacent instructions, keeping in
61 * mind that we are a vector architecture and we can write to different
62 * components simultaneously */
65 can_run_concurrent_ssa(midgard_instruction
*first
, midgard_instruction
*second
)
67 /* Each instruction reads some registers and writes to a register. See
68 * where the first writes */
70 /* Figure out where exactly we wrote to */
71 int source
= first
->ssa_args
.dest
;
72 int source_mask
= first
->mask
;
74 /* As long as the second doesn't read from the first, we're okay */
75 if (second
->ssa_args
.src0
== source
) {
76 if (first
->type
== TAG_ALU_4
) {
77 /* Figure out which components we just read from */
79 int q
= second
->alu
.src1
;
80 midgard_vector_alu_src
*m
= (midgard_vector_alu_src
*) &q
;
82 /* Check if there are components in common, and fail if so */
83 if (swizzle_to_access_mask(m
->swizzle
) & source_mask
)
90 if (second
->ssa_args
.src1
== source
)
93 /* Otherwise, it's safe in that regard. Another data hazard is both
94 * writing to the same place, of course */
96 if (second
->ssa_args
.dest
== source
) {
97 /* ...but only if the components overlap */
99 if (second
->mask
& source_mask
)
109 midgard_instruction
**segment
, unsigned segment_size
,
110 midgard_instruction
*ains
)
112 for (int s
= 0; s
< segment_size
; ++s
)
113 if (!can_run_concurrent_ssa(segment
[s
], ains
))
121 /* Schedules, but does not emit, a single basic block. After scheduling, the
122 * final tag and size of the block are known, which are necessary for branching
125 static midgard_bundle
126 schedule_bundle(compiler_context
*ctx
, midgard_block
*block
, midgard_instruction
*ins
, int *skip
)
128 int instructions_emitted
= 0, packed_idx
= 0;
129 midgard_bundle bundle
= { 0 };
131 uint8_t tag
= ins
->type
;
133 /* Default to the instruction's tag */
138 uint32_t control
= 0;
139 size_t bytes_emitted
= sizeof(control
);
141 /* TODO: Constant combining */
142 int index
= 0, last_unit
= 0;
144 /* Previous instructions, for the purpose of parallelism */
145 midgard_instruction
*segment
[4] = {0};
146 int segment_size
= 0;
148 instructions_emitted
= -1;
149 midgard_instruction
*pins
= ins
;
151 unsigned constant_count
= 0;
154 midgard_instruction
*ains
= pins
;
156 /* Advance instruction pointer */
158 ains
= mir_next_op(pins
);
162 /* Out-of-work condition */
163 if ((struct list_head
*) ains
== &block
->instructions
)
166 /* Ensure that the chain can continue */
167 if (ains
->type
!= TAG_ALU_4
) break;
169 /* If there's already something in the bundle and we
170 * have weird scheduler constraints, break now */
171 if (ains
->precede_break
&& index
) break;
173 /* According to the presentation "The ARM
174 * Mali-T880 Mobile GPU" from HotChips 27,
175 * there are two pipeline stages. Branching
176 * position determined experimentally. Lines
177 * are executed in parallel:
180 * [ VADD ] [ SMUL ] [ LUT ] [ BRANCH ]
182 * Verify that there are no ordering dependencies here.
184 * TODO: Allow for parallelism!!!
187 /* Pick a unit for it if it doesn't force a particular unit */
189 int unit
= ains
->unit
;
192 int op
= ains
->alu
.op
;
193 int units
= alu_opcode_props
[op
].props
;
195 bool scalarable
= units
& UNITS_SCALAR
;
196 bool could_scalar
= is_single_component_mask(ains
->mask
);
198 /* Only 16/32-bit can run on a scalar unit */
199 could_scalar
&= ains
->alu
.reg_mode
!= midgard_reg_mode_8
;
200 could_scalar
&= ains
->alu
.reg_mode
!= midgard_reg_mode_64
;
201 could_scalar
&= ains
->alu
.dest_override
== midgard_dest_override_none
;
203 if (ains
->alu
.reg_mode
== midgard_reg_mode_16
) {
204 /* If we're running in 16-bit mode, we
205 * can't have any 8-bit sources on the
206 * scalar unit (since the scalar unit
207 * doesn't understand 8-bit) */
209 midgard_vector_alu_src s1
=
210 vector_alu_from_unsigned(ains
->alu
.src1
);
212 could_scalar
&= !s1
.half
;
214 if (!ains
->ssa_args
.inline_constant
) {
215 midgard_vector_alu_src s2
=
216 vector_alu_from_unsigned(ains
->alu
.src2
);
218 could_scalar
&= !s2
.half
;
223 bool scalar
= could_scalar
&& scalarable
;
225 /* TODO: Check ahead-of-time for other scalar
226 * hazards that otherwise get aborted out */
229 assert(units
& UNITS_SCALAR
);
232 if (last_unit
>= UNIT_VADD
) {
233 if (units
& UNIT_VLUT
)
238 if ((units
& UNIT_VMUL
) && last_unit
< UNIT_VMUL
)
240 else if ((units
& UNIT_VADD
) && !(control
& UNIT_VADD
))
242 else if (units
& UNIT_VLUT
)
248 if (last_unit
>= UNIT_VADD
) {
249 if ((units
& UNIT_SMUL
) && !(control
& UNIT_SMUL
))
251 else if (units
& UNIT_VLUT
)
256 if ((units
& UNIT_SADD
) && !(control
& UNIT_SADD
) && !midgard_has_hazard(segment
, segment_size
, ains
))
258 else if (units
& UNIT_SMUL
)
259 unit
= ((units
& UNIT_VMUL
) && !(control
& UNIT_VMUL
)) ? UNIT_VMUL
: UNIT_SMUL
;
260 else if ((units
& UNIT_VADD
) && !(control
& UNIT_VADD
))
267 assert(unit
& units
);
270 /* Late unit check, this time for encoding (not parallelism) */
271 if (unit
<= last_unit
) break;
273 /* Clear the segment */
274 if (last_unit
< UNIT_VADD
&& unit
>= UNIT_VADD
)
277 if (midgard_has_hazard(segment
, segment_size
, ains
))
280 /* We're good to go -- emit the instruction */
283 segment
[segment_size
++] = ains
;
285 /* We try to reuse constants if possible, by adjusting
288 if (ains
->has_blend_constant
) {
289 /* Everything conflicts with the blend constant */
290 if (bundle
.has_embedded_constants
)
293 bundle
.has_blend_constant
= 1;
294 bundle
.has_embedded_constants
= 1;
295 } else if (ains
->has_constants
&& ains
->alu
.reg_mode
== midgard_reg_mode_16
) {
296 /* TODO: DRY with the analysis pass */
298 if (bundle
.has_blend_constant
)
304 /* TODO: Fix packing XXX */
305 uint16_t *bundles
= (uint16_t *) bundle
.constants
;
306 uint32_t *constants
= (uint32_t *) ains
->constants
;
308 /* Copy them wholesale */
309 for (unsigned i
= 0; i
< 4; ++i
)
310 bundles
[i
] = constants
[i
];
312 bundle
.has_embedded_constants
= true;
314 } else if (ains
->has_constants
) {
315 /* By definition, blend constants conflict with
316 * everything, so if there are already
317 * constants we break the bundle *now* */
319 if (bundle
.has_blend_constant
)
322 /* For anything but blend constants, we can do
323 * proper analysis, however */
325 /* TODO: Mask by which are used */
326 uint32_t *constants
= (uint32_t *) ains
->constants
;
327 uint32_t *bundles
= (uint32_t *) bundle
.constants
;
329 uint32_t indices
[4] = { 0 };
330 bool break_bundle
= false;
332 for (unsigned i
= 0; i
< 4; ++i
) {
333 uint32_t cons
= constants
[i
];
334 bool constant_found
= false;
336 /* Search for the constant */
337 for (unsigned j
= 0; j
< constant_count
; ++j
) {
338 if (bundles
[j
] != cons
)
341 /* We found it, reuse */
343 constant_found
= true;
350 /* We didn't find it, so allocate it */
351 unsigned idx
= constant_count
++;
354 /* Uh-oh, out of space */
359 /* We have space, copy it in! */
367 /* Cool, we have it in. So use indices as a
370 unsigned swizzle
= SWIZZLE_FROM_ARRAY(indices
);
371 unsigned r_constant
= SSA_FIXED_REGISTER(REGISTER_CONSTANT
);
373 if (ains
->ssa_args
.src0
== r_constant
)
374 ains
->alu
.src1
= vector_alu_apply_swizzle(ains
->alu
.src1
, swizzle
);
376 if (ains
->ssa_args
.src1
== r_constant
)
377 ains
->alu
.src2
= vector_alu_apply_swizzle(ains
->alu
.src2
, swizzle
);
379 bundle
.has_embedded_constants
= true;
382 if (ains
->unit
& UNITS_ANY_VECTOR
) {
383 bytes_emitted
+= sizeof(midgard_reg_info
);
384 bytes_emitted
+= sizeof(midgard_vector_alu
);
385 } else if (ains
->compact_branch
) {
386 /* All of r0 has to be written out along with
387 * the branch writeout */
389 if (ains
->writeout
) {
390 /* The rules for when "bare" writeout
391 * is safe are when all components are
392 * r0 are written out in the final
393 * bundle, earlier than VLUT, where any
394 * register dependencies of r0 are from
395 * an earlier bundle. We can't verify
396 * this before RA, so we don't try. */
402 midgard_instruction ins
= v_mov(0, blank_alu_src
, SSA_FIXED_REGISTER(0));
403 ins
.unit
= UNIT_VMUL
;
406 /* TODO don't leak */
407 midgard_instruction
*move
=
408 mem_dup(&ins
, sizeof(midgard_instruction
));
409 bytes_emitted
+= sizeof(midgard_reg_info
);
410 bytes_emitted
+= sizeof(midgard_vector_alu
);
411 bundle
.instructions
[packed_idx
++] = move
;
414 if (ains
->unit
== ALU_ENAB_BRANCH
) {
415 bytes_emitted
+= sizeof(midgard_branch_extended
);
417 bytes_emitted
+= sizeof(ains
->br_compact
);
420 bytes_emitted
+= sizeof(midgard_reg_info
);
421 bytes_emitted
+= sizeof(midgard_scalar_alu
);
424 /* Defer marking until after writing to allow for break */
425 control
|= ains
->unit
;
426 last_unit
= ains
->unit
;
427 ++instructions_emitted
;
433 /* Pad ALU op to nearest word */
435 if (bytes_emitted
& 15) {
436 padding
= 16 - (bytes_emitted
& 15);
437 bytes_emitted
+= padding
;
440 /* Constants must always be quadwords */
441 if (bundle
.has_embedded_constants
)
444 /* Size ALU instruction for tag */
445 bundle
.tag
= (TAG_ALU_4
) + (bytes_emitted
/ 16) - 1;
446 bundle
.padding
= padding
;
447 bundle
.control
= bundle
.tag
| control
;
452 case TAG_LOAD_STORE_4
: {
453 /* Load store instructions have two words at once. If
454 * we only have one queued up, we need to NOP pad.
455 * Otherwise, we store both in succession to save space
456 * and cycles -- letting them go in parallel -- skip
457 * the next. The usefulness of this optimisation is
458 * greatly dependent on the quality of the instruction
462 midgard_instruction
*next_op
= mir_next_op(ins
);
464 if ((struct list_head
*) next_op
!= &block
->instructions
&& next_op
->type
== TAG_LOAD_STORE_4
) {
465 /* TODO: Concurrency check */
466 instructions_emitted
++;
472 case TAG_TEXTURE_4
: {
473 /* Which tag we use depends on the shader stage */
474 bool in_frag
= ctx
->stage
== MESA_SHADER_FRAGMENT
;
475 bundle
.tag
= in_frag
? TAG_TEXTURE_4
: TAG_TEXTURE_4_VTX
;
480 unreachable("Unknown tag");
484 /* Copy the instructions into the bundle */
485 bundle
.instruction_count
= instructions_emitted
+ 1 + packed_idx
;
487 midgard_instruction
*uins
= ins
;
488 for (; packed_idx
< bundle
.instruction_count
; ++packed_idx
) {
489 bundle
.instructions
[packed_idx
] = uins
;
490 uins
= mir_next_op(uins
);
493 *skip
= instructions_emitted
;
498 /* Schedule a single block by iterating its instruction to create bundles.
499 * While we go, tally about the bundle sizes to compute the block size. */
502 schedule_block(compiler_context
*ctx
, midgard_block
*block
)
504 util_dynarray_init(&block
->bundles
, NULL
);
506 block
->quadword_count
= 0;
508 mir_foreach_instr_in_block(block
, ins
) {
510 midgard_bundle bundle
= schedule_bundle(ctx
, block
, ins
, &skip
);
511 util_dynarray_append(&block
->bundles
, midgard_bundle
, bundle
);
513 if (bundle
.has_blend_constant
) {
514 /* TODO: Multiblock? */
515 int quadwords_within_block
= block
->quadword_count
+ quadword_size(bundle
.tag
) - 1;
516 ctx
->blend_constant_offset
= quadwords_within_block
* 0x10;
520 ins
= mir_next_op(ins
);
522 block
->quadword_count
+= quadword_size(bundle
.tag
);
525 block
->is_scheduled
= true;
528 /* The following passes reorder MIR instructions to enable better scheduling */
531 midgard_pair_load_store(compiler_context
*ctx
, midgard_block
*block
)
533 mir_foreach_instr_in_block_safe(block
, ins
) {
534 if (ins
->type
!= TAG_LOAD_STORE_4
) continue;
536 /* We've found a load/store op. Check if next is also load/store. */
537 midgard_instruction
*next_op
= mir_next_op(ins
);
538 if (&next_op
->link
!= &block
->instructions
) {
539 if (next_op
->type
== TAG_LOAD_STORE_4
) {
540 /* If so, we're done since we're a pair */
541 ins
= mir_next_op(ins
);
545 /* Maximum search distance to pair, to avoid register pressure disasters */
546 int search_distance
= 8;
548 /* Otherwise, we have an orphaned load/store -- search for another load */
549 mir_foreach_instr_in_block_from(block
, c
, mir_next_op(ins
)) {
550 /* Terminate search if necessary */
551 if (!(search_distance
--)) break;
553 if (c
->type
!= TAG_LOAD_STORE_4
) continue;
555 /* Stores cannot be reordered, since they have
556 * dependencies. For the same reason, indirect
557 * loads cannot be reordered as their index is
560 if (OP_IS_STORE(c
->load_store
.op
)) continue;
562 /* It appears the 0x800 bit is set whenever a
563 * load is direct, unset when it is indirect.
564 * Skip indirect loads. */
566 if (!(c
->load_store
.unknown
& 0x800)) continue;
568 /* We found one! Move it up to pair and remove it from the old location */
570 mir_insert_instruction_before(ins
, *c
);
571 mir_remove_instruction(c
);
579 /* When we're 'squeezing down' the values in the IR, we maintain a hash
583 find_or_allocate_temp(compiler_context
*ctx
, unsigned hash
)
585 if ((hash
< 0) || (hash
>= SSA_FIXED_MINIMUM
))
588 unsigned temp
= (uintptr_t) _mesa_hash_table_u64_search(
589 ctx
->hash_to_temp
, hash
+ 1);
594 /* If no temp is find, allocate one */
595 temp
= ctx
->temp_count
++;
596 ctx
->max_hash
= MAX2(ctx
->max_hash
, hash
);
598 _mesa_hash_table_u64_insert(ctx
->hash_to_temp
,
599 hash
+ 1, (void *) ((uintptr_t) temp
+ 1));
604 /* Reassigns numbering to get rid of gaps in the indices */
607 mir_squeeze_index(compiler_context
*ctx
)
611 /* TODO don't leak old hash_to_temp */
612 ctx
->hash_to_temp
= _mesa_hash_table_u64_create(NULL
);
614 mir_foreach_instr_global(ctx
, ins
) {
615 if (ins
->compact_branch
) continue;
617 ins
->ssa_args
.dest
= find_or_allocate_temp(ctx
, ins
->ssa_args
.dest
);
618 ins
->ssa_args
.src0
= find_or_allocate_temp(ctx
, ins
->ssa_args
.src0
);
620 if (!ins
->ssa_args
.inline_constant
)
621 ins
->ssa_args
.src1
= find_or_allocate_temp(ctx
, ins
->ssa_args
.src1
);
626 static midgard_instruction
627 v_load_store_scratch(
633 /* We index by 32-bit vec4s */
634 unsigned byte
= (index
* 4 * 4);
636 midgard_instruction ins
= {
637 .type
= TAG_LOAD_STORE_4
,
645 .op
= is_store
? midgard_op_st_int4
: midgard_op_ld_int4
,
646 .swizzle
= SWIZZLE_XYZW
,
648 /* For register spilling - to thread local storage */
651 /* Splattered across, TODO combine logically */
652 .varying_parameters
= (byte
& 0x1FF) << 1,
653 .address
= (byte
>> 9)
658 /* r0 = r26, r1 = r27 */
659 assert(srcdest
== SSA_FIXED_REGISTER(26) || srcdest
== SSA_FIXED_REGISTER(27));
660 ins
.ssa_args
.src0
= (srcdest
== SSA_FIXED_REGISTER(27)) ? SSA_FIXED_REGISTER(1) : SSA_FIXED_REGISTER(0);
662 ins
.ssa_args
.dest
= srcdest
;
669 schedule_program(compiler_context
*ctx
)
671 struct ra_graph
*g
= NULL
;
672 bool spilled
= false;
673 int iter_count
= 1000; /* max iterations */
675 /* Number of 128-bit slots in memory we've spilled into */
676 unsigned spill_count
= 0;
678 midgard_promote_uniforms(ctx
, 8);
680 mir_foreach_block(ctx
, block
) {
681 midgard_pair_load_store(ctx
, block
);
685 /* If we spill, find the best spill node and spill it */
687 unsigned spill_index
= ctx
->temp_count
;
689 /* All nodes are equal in spill cost, but we can't
690 * spill nodes written to from an unspill */
692 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
693 ra_set_node_spill_cost(g
, i
, 1.0);
696 mir_foreach_instr_global(ctx
, ins
) {
697 if (ins
->type
!= TAG_LOAD_STORE_4
) continue;
698 if (ins
->load_store
.op
!= midgard_op_ld_int4
) continue;
699 if (ins
->load_store
.unknown
!= 0x1EEA) continue;
700 ra_set_node_spill_cost(g
, ins
->ssa_args
.dest
, -1.0);
703 int spill_node
= ra_get_best_spill_node(g
);
705 if (spill_node
< 0) {
706 mir_print_shader(ctx
);
710 /* Allocate TLS slot */
711 unsigned spill_slot
= spill_count
++;
713 /* Replace all stores to the spilled node with stores
716 mir_foreach_instr_global_safe(ctx
, ins
) {
717 if (ins
->compact_branch
) continue;
718 if (ins
->ssa_args
.dest
!= spill_node
) continue;
719 ins
->ssa_args
.dest
= SSA_FIXED_REGISTER(26);
721 midgard_instruction st
= v_load_store_scratch(ins
->ssa_args
.dest
, spill_slot
, true, ins
->mask
);
722 mir_insert_instruction_before(mir_next_op(ins
), st
);
725 /* Insert a load from TLS before the first consecutive
726 * use of the node, rewriting to use spilled indices to
727 * break up the live range */
729 mir_foreach_block(ctx
, block
) {
731 bool consecutive_skip
= false;
732 unsigned consecutive_index
= 0;
734 mir_foreach_instr_in_block(block
, ins
) {
735 if (ins
->compact_branch
) continue;
737 if (!mir_has_arg(ins
, spill_node
)) {
738 consecutive_skip
= false;
742 if (consecutive_skip
) {
744 mir_rewrite_index_src_single(ins
, spill_node
, consecutive_index
);
748 consecutive_index
= ++spill_index
;
749 midgard_instruction st
= v_load_store_scratch(consecutive_index
, spill_slot
, false, 0xF);
750 midgard_instruction
*before
= ins
;
752 /* For a csel, go back one more not to break up the bundle */
753 if (ins
->type
== TAG_ALU_4
&& OP_IS_CSEL(ins
->alu
.op
))
754 before
= mir_prev_op(before
);
756 mir_insert_instruction_before(before
, st
);
757 // consecutive_skip = true;
761 mir_rewrite_index_src_single(ins
, spill_node
, consecutive_index
);
766 mir_squeeze_index(ctx
);
769 g
= allocate_registers(ctx
, &spilled
);
770 } while(spilled
&& ((iter_count
--) > 0));
772 /* We would like to run RA after scheduling, but spilling can
775 mir_foreach_block(ctx
, block
) {
776 schedule_block(ctx
, block
);
780 /* Pipeline registers creation is a prepass before RA */
781 mir_create_pipeline_registers(ctx
);
786 if (iter_count
<= 0) {
787 fprintf(stderr
, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
791 /* Report spilling information. spill_count is in 128-bit slots (vec4 x
792 * fp32), but tls_size is in bytes, so multiply by 16 */
794 ctx
->tls_size
= spill_count
* 16;
796 install_registers(ctx
, g
);