2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
26 #include "util/u_memory.h"
28 /* Create a mask of accessed components from a swizzle to figure out vector
32 swizzle_to_access_mask(unsigned swizzle
)
34 unsigned component_mask
= 0;
36 for (int i
= 0; i
< 4; ++i
) {
37 unsigned c
= (swizzle
>> (2 * i
)) & 3;
38 component_mask
|= (1 << c
);
41 return component_mask
;
44 /* Does the mask cover more than a scalar? */
47 is_single_component_mask(unsigned mask
)
51 for (int c
= 0; c
< 8; ++c
) {
56 return components
== 1;
59 /* Checks for an SSA data hazard between two adjacent instructions, keeping in
60 * mind that we are a vector architecture and we can write to different
61 * components simultaneously */
64 can_run_concurrent_ssa(midgard_instruction
*first
, midgard_instruction
*second
)
66 /* Each instruction reads some registers and writes to a register. See
67 * where the first writes */
69 /* Figure out where exactly we wrote to */
70 int source
= first
->ssa_args
.dest
;
71 int source_mask
= first
->mask
;
73 /* As long as the second doesn't read from the first, we're okay */
74 if (second
->ssa_args
.src0
== source
) {
75 if (first
->type
== TAG_ALU_4
) {
76 /* Figure out which components we just read from */
78 int q
= second
->alu
.src1
;
79 midgard_vector_alu_src
*m
= (midgard_vector_alu_src
*) &q
;
81 /* Check if there are components in common, and fail if so */
82 if (swizzle_to_access_mask(m
->swizzle
) & source_mask
)
89 if (second
->ssa_args
.src1
== source
)
92 /* Otherwise, it's safe in that regard. Another data hazard is both
93 * writing to the same place, of course */
95 if (second
->ssa_args
.dest
== source
) {
96 /* ...but only if the components overlap */
98 if (second
->mask
& source_mask
)
108 midgard_instruction
**segment
, unsigned segment_size
,
109 midgard_instruction
*ains
)
111 for (int s
= 0; s
< segment_size
; ++s
)
112 if (!can_run_concurrent_ssa(segment
[s
], ains
))
120 /* Schedules, but does not emit, a single basic block. After scheduling, the
121 * final tag and size of the block are known, which are necessary for branching
124 static midgard_bundle
125 schedule_bundle(compiler_context
*ctx
, midgard_block
*block
, midgard_instruction
*ins
, int *skip
)
127 int instructions_emitted
= 0, packed_idx
= 0;
128 midgard_bundle bundle
= { 0 };
130 uint8_t tag
= ins
->type
;
132 /* Default to the instruction's tag */
137 uint32_t control
= 0;
138 size_t bytes_emitted
= sizeof(control
);
140 /* TODO: Constant combining */
141 int index
= 0, last_unit
= 0;
143 /* Previous instructions, for the purpose of parallelism */
144 midgard_instruction
*segment
[4] = {0};
145 int segment_size
= 0;
147 instructions_emitted
= -1;
148 midgard_instruction
*pins
= ins
;
150 unsigned constant_count
= 0;
153 midgard_instruction
*ains
= pins
;
155 /* Advance instruction pointer */
157 ains
= mir_next_op(pins
);
161 /* Out-of-work condition */
162 if ((struct list_head
*) ains
== &block
->instructions
)
165 /* Ensure that the chain can continue */
166 if (ains
->type
!= TAG_ALU_4
) break;
168 /* If there's already something in the bundle and we
169 * have weird scheduler constraints, break now */
170 if (ains
->precede_break
&& index
) break;
172 /* According to the presentation "The ARM
173 * Mali-T880 Mobile GPU" from HotChips 27,
174 * there are two pipeline stages. Branching
175 * position determined experimentally. Lines
176 * are executed in parallel:
179 * [ VADD ] [ SMUL ] [ LUT ] [ BRANCH ]
181 * Verify that there are no ordering dependencies here.
183 * TODO: Allow for parallelism!!!
186 /* Pick a unit for it if it doesn't force a particular unit */
188 int unit
= ains
->unit
;
191 int op
= ains
->alu
.op
;
192 int units
= alu_opcode_props
[op
].props
;
194 bool scalarable
= units
& UNITS_SCALAR
;
195 bool could_scalar
= is_single_component_mask(ains
->mask
);
197 /* Only 16/32-bit can run on a scalar unit */
198 could_scalar
&= ains
->alu
.reg_mode
!= midgard_reg_mode_8
;
199 could_scalar
&= ains
->alu
.reg_mode
!= midgard_reg_mode_64
;
200 could_scalar
&= ains
->alu
.dest_override
== midgard_dest_override_none
;
202 if (ains
->alu
.reg_mode
== midgard_reg_mode_16
) {
203 /* If we're running in 16-bit mode, we
204 * can't have any 8-bit sources on the
205 * scalar unit (since the scalar unit
206 * doesn't understand 8-bit) */
208 midgard_vector_alu_src s1
=
209 vector_alu_from_unsigned(ains
->alu
.src1
);
211 could_scalar
&= !s1
.half
;
213 if (!ains
->ssa_args
.inline_constant
) {
214 midgard_vector_alu_src s2
=
215 vector_alu_from_unsigned(ains
->alu
.src2
);
217 could_scalar
&= !s2
.half
;
222 bool scalar
= could_scalar
&& scalarable
;
224 /* TODO: Check ahead-of-time for other scalar
225 * hazards that otherwise get aborted out */
228 assert(units
& UNITS_SCALAR
);
231 if (last_unit
>= UNIT_VADD
) {
232 if (units
& UNIT_VLUT
)
237 if ((units
& UNIT_VMUL
) && last_unit
< UNIT_VMUL
)
239 else if ((units
& UNIT_VADD
) && !(control
& UNIT_VADD
))
241 else if (units
& UNIT_VLUT
)
247 if (last_unit
>= UNIT_VADD
) {
248 if ((units
& UNIT_SMUL
) && !(control
& UNIT_SMUL
))
250 else if (units
& UNIT_VLUT
)
255 if ((units
& UNIT_SADD
) && !(control
& UNIT_SADD
) && !midgard_has_hazard(segment
, segment_size
, ains
))
257 else if (units
& UNIT_SMUL
)
258 unit
= ((units
& UNIT_VMUL
) && !(control
& UNIT_VMUL
)) ? UNIT_VMUL
: UNIT_SMUL
;
259 else if ((units
& UNIT_VADD
) && !(control
& UNIT_VADD
))
266 assert(unit
& units
);
269 /* Late unit check, this time for encoding (not parallelism) */
270 if (unit
<= last_unit
) break;
272 /* Clear the segment */
273 if (last_unit
< UNIT_VADD
&& unit
>= UNIT_VADD
)
276 if (midgard_has_hazard(segment
, segment_size
, ains
))
279 /* We're good to go -- emit the instruction */
282 segment
[segment_size
++] = ains
;
284 /* We try to reuse constants if possible, by adjusting
287 if (ains
->has_blend_constant
) {
288 /* Everything conflicts with the blend constant */
289 if (bundle
.has_embedded_constants
)
292 bundle
.has_blend_constant
= 1;
293 bundle
.has_embedded_constants
= 1;
294 } else if (ains
->has_constants
&& ains
->alu
.reg_mode
== midgard_reg_mode_16
) {
295 /* TODO: DRY with the analysis pass */
297 if (bundle
.has_blend_constant
)
303 /* TODO: Fix packing XXX */
304 uint16_t *bundles
= (uint16_t *) bundle
.constants
;
305 uint32_t *constants
= (uint32_t *) ains
->constants
;
307 /* Copy them wholesale */
308 for (unsigned i
= 0; i
< 4; ++i
)
309 bundles
[i
] = constants
[i
];
311 bundle
.has_embedded_constants
= true;
313 } else if (ains
->has_constants
) {
314 /* By definition, blend constants conflict with
315 * everything, so if there are already
316 * constants we break the bundle *now* */
318 if (bundle
.has_blend_constant
)
321 /* For anything but blend constants, we can do
322 * proper analysis, however */
324 /* TODO: Mask by which are used */
325 uint32_t *constants
= (uint32_t *) ains
->constants
;
326 uint32_t *bundles
= (uint32_t *) bundle
.constants
;
328 uint32_t indices
[4] = { 0 };
329 bool break_bundle
= false;
331 for (unsigned i
= 0; i
< 4; ++i
) {
332 uint32_t cons
= constants
[i
];
333 bool constant_found
= false;
335 /* Search for the constant */
336 for (unsigned j
= 0; j
< constant_count
; ++j
) {
337 if (bundles
[j
] != cons
)
340 /* We found it, reuse */
342 constant_found
= true;
349 /* We didn't find it, so allocate it */
350 unsigned idx
= constant_count
++;
353 /* Uh-oh, out of space */
358 /* We have space, copy it in! */
366 /* Cool, we have it in. So use indices as a
369 unsigned swizzle
= SWIZZLE_FROM_ARRAY(indices
);
370 unsigned r_constant
= SSA_FIXED_REGISTER(REGISTER_CONSTANT
);
372 if (ains
->ssa_args
.src0
== r_constant
)
373 ains
->alu
.src1
= vector_alu_apply_swizzle(ains
->alu
.src1
, swizzle
);
375 if (ains
->ssa_args
.src1
== r_constant
)
376 ains
->alu
.src2
= vector_alu_apply_swizzle(ains
->alu
.src2
, swizzle
);
378 bundle
.has_embedded_constants
= true;
381 if (ains
->unit
& UNITS_ANY_VECTOR
) {
382 bytes_emitted
+= sizeof(midgard_reg_info
);
383 bytes_emitted
+= sizeof(midgard_vector_alu
);
384 } else if (ains
->compact_branch
) {
385 /* All of r0 has to be written out along with
386 * the branch writeout */
388 if (ains
->writeout
) {
389 /* The rules for when "bare" writeout
390 * is safe are when all components are
391 * r0 are written out in the final
392 * bundle, earlier than VLUT, where any
393 * register dependencies of r0 are from
394 * an earlier bundle. We can't verify
395 * this before RA, so we don't try. */
401 midgard_instruction ins
= v_mov(0, blank_alu_src
, SSA_FIXED_REGISTER(0));
402 ins
.unit
= UNIT_VMUL
;
405 /* TODO don't leak */
406 midgard_instruction
*move
=
407 mem_dup(&ins
, sizeof(midgard_instruction
));
408 bytes_emitted
+= sizeof(midgard_reg_info
);
409 bytes_emitted
+= sizeof(midgard_vector_alu
);
410 bundle
.instructions
[packed_idx
++] = move
;
413 if (ains
->unit
== ALU_ENAB_BRANCH
) {
414 bytes_emitted
+= sizeof(midgard_branch_extended
);
416 bytes_emitted
+= sizeof(ains
->br_compact
);
419 bytes_emitted
+= sizeof(midgard_reg_info
);
420 bytes_emitted
+= sizeof(midgard_scalar_alu
);
423 /* Defer marking until after writing to allow for break */
424 control
|= ains
->unit
;
425 last_unit
= ains
->unit
;
426 ++instructions_emitted
;
432 /* Pad ALU op to nearest word */
434 if (bytes_emitted
& 15) {
435 padding
= 16 - (bytes_emitted
& 15);
436 bytes_emitted
+= padding
;
439 /* Constants must always be quadwords */
440 if (bundle
.has_embedded_constants
)
443 /* Size ALU instruction for tag */
444 bundle
.tag
= (TAG_ALU_4
) + (bytes_emitted
/ 16) - 1;
445 bundle
.padding
= padding
;
446 bundle
.control
= bundle
.tag
| control
;
451 case TAG_LOAD_STORE_4
: {
452 /* Load store instructions have two words at once. If
453 * we only have one queued up, we need to NOP pad.
454 * Otherwise, we store both in succession to save space
455 * and cycles -- letting them go in parallel -- skip
456 * the next. The usefulness of this optimisation is
457 * greatly dependent on the quality of the instruction
461 midgard_instruction
*next_op
= mir_next_op(ins
);
463 if ((struct list_head
*) next_op
!= &block
->instructions
&& next_op
->type
== TAG_LOAD_STORE_4
) {
464 /* TODO: Concurrency check */
465 instructions_emitted
++;
471 case TAG_TEXTURE_4
: {
472 /* Which tag we use depends on the shader stage */
473 bool in_frag
= ctx
->stage
== MESA_SHADER_FRAGMENT
;
474 bundle
.tag
= in_frag
? TAG_TEXTURE_4
: TAG_TEXTURE_4_VTX
;
479 unreachable("Unknown tag");
483 /* Copy the instructions into the bundle */
484 bundle
.instruction_count
= instructions_emitted
+ 1 + packed_idx
;
486 midgard_instruction
*uins
= ins
;
487 for (; packed_idx
< bundle
.instruction_count
; ++packed_idx
) {
488 bundle
.instructions
[packed_idx
] = uins
;
489 uins
= mir_next_op(uins
);
492 *skip
= instructions_emitted
;
497 /* Schedule a single block by iterating its instruction to create bundles.
498 * While we go, tally about the bundle sizes to compute the block size. */
501 schedule_block(compiler_context
*ctx
, midgard_block
*block
)
503 util_dynarray_init(&block
->bundles
, NULL
);
505 block
->quadword_count
= 0;
507 mir_foreach_instr_in_block(block
, ins
) {
509 midgard_bundle bundle
= schedule_bundle(ctx
, block
, ins
, &skip
);
510 util_dynarray_append(&block
->bundles
, midgard_bundle
, bundle
);
512 if (bundle
.has_blend_constant
) {
513 /* TODO: Multiblock? */
514 int quadwords_within_block
= block
->quadword_count
+ quadword_size(bundle
.tag
) - 1;
515 ctx
->blend_constant_offset
= quadwords_within_block
* 0x10;
519 ins
= mir_next_op(ins
);
521 block
->quadword_count
+= quadword_size(bundle
.tag
);
524 block
->is_scheduled
= true;
528 schedule_program(compiler_context
*ctx
)
530 struct ra_graph
*g
= NULL
;
531 bool spilled
= false;
532 int iter_count
= 10; /* max iterations */
535 /* We would like to run RA after scheduling, but spilling can
538 mir_foreach_block(ctx
, block
) {
539 schedule_block(ctx
, block
);
542 /* Pipeline registers creation is a prepass before RA */
543 mir_create_pipeline_registers(ctx
);
545 g
= allocate_registers(ctx
, &spilled
);
546 } while(spilled
&& ((iter_count
--) > 0));
548 if (iter_count
<= 0) {
549 fprintf(stderr
, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
553 install_registers(ctx
, g
);