pan/midgard: Allocate `dependencies` on stack
[mesa.git] / src / panfrost / midgard / midgard_schedule.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26 #include "util/u_memory.h"
27 #include "util/register_allocate.h"
28
29 /* Create a mask of accessed components from a swizzle to figure out vector
30 * dependencies */
31
32 static unsigned
33 swizzle_to_access_mask(unsigned swizzle)
34 {
35 unsigned component_mask = 0;
36
37 for (int i = 0; i < 4; ++i) {
38 unsigned c = (swizzle >> (2 * i)) & 3;
39 component_mask |= (1 << c);
40 }
41
42 return component_mask;
43 }
44
45 /* Does the mask cover more than a scalar? */
46
47 static bool
48 is_single_component_mask(unsigned mask)
49 {
50 int components = 0;
51
52 for (int c = 0; c < 8; ++c) {
53 if (mask & (1 << c))
54 components++;
55 }
56
57 return components == 1;
58 }
59
60 /* Checks for an SSA data hazard between two adjacent instructions, keeping in
61 * mind that we are a vector architecture and we can write to different
62 * components simultaneously */
63
64 static bool
65 can_run_concurrent_ssa(midgard_instruction *first, midgard_instruction *second)
66 {
67 /* Each instruction reads some registers and writes to a register. See
68 * where the first writes */
69
70 /* Figure out where exactly we wrote to */
71 int source = first->ssa_args.dest;
72 int source_mask = first->mask;
73
74 /* As long as the second doesn't read from the first, we're okay */
75 for (unsigned i = 0; i < ARRAY_SIZE(second->ssa_args.src); ++i) {
76 if (second->ssa_args.src[i] != source)
77 continue;
78
79 if (first->type != TAG_ALU_4)
80 return false;
81
82 /* Figure out which components we just read from */
83
84 int q = (i == 0) ? second->alu.src1 : second->alu.src2;
85 midgard_vector_alu_src *m = (midgard_vector_alu_src *) &q;
86
87 /* Check if there are components in common, and fail if so */
88 if (swizzle_to_access_mask(m->swizzle) & source_mask)
89 return false;
90 }
91
92 /* Otherwise, it's safe in that regard. Another data hazard is both
93 * writing to the same place, of course */
94
95 if (second->ssa_args.dest == source) {
96 /* ...but only if the components overlap */
97
98 if (second->mask & source_mask)
99 return false;
100 }
101
102 /* ...That's it */
103 return true;
104 }
105
106 static bool
107 midgard_has_hazard(
108 midgard_instruction **segment, unsigned segment_size,
109 midgard_instruction *ains)
110 {
111 for (int s = 0; s < segment_size; ++s)
112 if (!can_run_concurrent_ssa(segment[s], ains))
113 return true;
114
115 return false;
116
117
118 }
119
120 /* Fragment writeout (of r0) is allowed when:
121 *
122 * - All components of r0 are written in the bundle
123 * - No components of r0 are written in VLUT
124 * - Non-pipelined dependencies of r0 are not written in the bundle
125 *
126 * This function checks if these requirements are satisfied given the content
127 * of a scheduled bundle.
128 */
129
130 static bool
131 can_writeout_fragment(compiler_context *ctx, midgard_instruction **bundle, unsigned count, unsigned node_count)
132 {
133 /* First scan for which components of r0 are written out. Initially
134 * none are written */
135
136 uint8_t r0_written_mask = 0x0;
137
138 /* Simultaneously we scan for the set of dependencies */
139
140 size_t sz = sizeof(BITSET_WORD) * BITSET_WORDS(node_count);
141 BITSET_WORD *dependencies = alloca(sz);
142 memset(dependencies, 0, sz);
143
144 for (unsigned i = 0; i < count; ++i) {
145 midgard_instruction *ins = bundle[i];
146
147 if (ins->ssa_args.dest != SSA_FIXED_REGISTER(0))
148 continue;
149
150 /* Record written out mask */
151 r0_written_mask |= ins->mask;
152
153 /* Record dependencies, but only if they won't become pipeline
154 * registers. We know we can't be live after this, because
155 * we're writeout at the very end of the shader. So check if
156 * they were written before us. */
157
158 unsigned src0 = ins->ssa_args.src[0];
159 unsigned src1 = ins->ssa_args.src[1];
160
161 if (!mir_is_written_before(ctx, bundle[0], src0))
162 src0 = -1;
163
164 if (!mir_is_written_before(ctx, bundle[0], src1))
165 src1 = -1;
166
167 if ((src0 > 0) && (src0 < node_count))
168 BITSET_SET(dependencies, src0);
169
170 if ((src1 > 0) && (src1 < node_count))
171 BITSET_SET(dependencies, src1);
172
173 /* Requirement 2 */
174 if (ins->unit == UNIT_VLUT)
175 return false;
176 }
177
178 /* Requirement 1 */
179 if ((r0_written_mask & 0xF) != 0xF)
180 return false;
181
182 /* Requirement 3 */
183
184 for (unsigned i = 0; i < count; ++i) {
185 unsigned dest = bundle[i]->ssa_args.dest;
186
187 if (dest < node_count && BITSET_TEST(dependencies, dest))
188 return false;
189 }
190
191 /* Otherwise, we're good to go */
192 return true;
193 }
194
195 /* Schedules, but does not emit, a single basic block. After scheduling, the
196 * final tag and size of the block are known, which are necessary for branching
197 * */
198
199 static midgard_bundle
200 schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction *ins, int *skip)
201 {
202 int instructions_emitted = 0, packed_idx = 0;
203 midgard_bundle bundle = { 0 };
204
205 midgard_instruction *scheduled[5] = { NULL };
206
207 uint8_t tag = ins->type;
208
209 /* Default to the instruction's tag */
210 bundle.tag = tag;
211
212 switch (ins->type) {
213 case TAG_ALU_4: {
214 uint32_t control = 0;
215 size_t bytes_emitted = sizeof(control);
216
217 /* TODO: Constant combining */
218 int index = 0, last_unit = 0;
219
220 /* Previous instructions, for the purpose of parallelism */
221 midgard_instruction *segment[4] = {0};
222 int segment_size = 0;
223
224 instructions_emitted = -1;
225 midgard_instruction *pins = ins;
226
227 unsigned constant_count = 0;
228
229 for (;;) {
230 midgard_instruction *ains = pins;
231
232 /* Advance instruction pointer */
233 if (index) {
234 ains = mir_next_op(pins);
235 pins = ains;
236 }
237
238 /* Out-of-work condition */
239 if ((struct list_head *) ains == &block->instructions)
240 break;
241
242 /* Ensure that the chain can continue */
243 if (ains->type != TAG_ALU_4) break;
244
245 /* If there's already something in the bundle and we
246 * have weird scheduler constraints, break now */
247 if (ains->precede_break && index) break;
248
249 /* According to the presentation "The ARM
250 * Mali-T880 Mobile GPU" from HotChips 27,
251 * there are two pipeline stages. Branching
252 * position determined experimentally. Lines
253 * are executed in parallel:
254 *
255 * [ VMUL ] [ SADD ]
256 * [ VADD ] [ SMUL ] [ LUT ] [ BRANCH ]
257 *
258 * Verify that there are no ordering dependencies here.
259 *
260 * TODO: Allow for parallelism!!!
261 */
262
263 /* Pick a unit for it if it doesn't force a particular unit */
264
265 int unit = ains->unit;
266
267 if (!unit) {
268 int op = ains->alu.op;
269 int units = alu_opcode_props[op].props;
270
271 bool scalarable = units & UNITS_SCALAR;
272 bool could_scalar = is_single_component_mask(ains->mask);
273
274 /* Only 16/32-bit can run on a scalar unit */
275 could_scalar &= ains->alu.reg_mode != midgard_reg_mode_8;
276 could_scalar &= ains->alu.reg_mode != midgard_reg_mode_64;
277 could_scalar &= ains->alu.dest_override == midgard_dest_override_none;
278
279 if (ains->alu.reg_mode == midgard_reg_mode_16) {
280 /* If we're running in 16-bit mode, we
281 * can't have any 8-bit sources on the
282 * scalar unit (since the scalar unit
283 * doesn't understand 8-bit) */
284
285 midgard_vector_alu_src s1 =
286 vector_alu_from_unsigned(ains->alu.src1);
287
288 could_scalar &= !s1.half;
289
290 midgard_vector_alu_src s2 =
291 vector_alu_from_unsigned(ains->alu.src2);
292
293 could_scalar &= !s2.half;
294 }
295
296 bool scalar = could_scalar && scalarable;
297
298 /* TODO: Check ahead-of-time for other scalar
299 * hazards that otherwise get aborted out */
300
301 if (scalar)
302 assert(units & UNITS_SCALAR);
303
304 if (!scalar) {
305 if (last_unit >= UNIT_VADD) {
306 if (units & UNIT_VLUT)
307 unit = UNIT_VLUT;
308 else
309 break;
310 } else {
311 if ((units & UNIT_VMUL) && last_unit < UNIT_VMUL)
312 unit = UNIT_VMUL;
313 else if ((units & UNIT_VADD) && !(control & UNIT_VADD))
314 unit = UNIT_VADD;
315 else if (units & UNIT_VLUT)
316 unit = UNIT_VLUT;
317 else
318 break;
319 }
320 } else {
321 if (last_unit >= UNIT_VADD) {
322 if ((units & UNIT_SMUL) && !(control & UNIT_SMUL))
323 unit = UNIT_SMUL;
324 else if (units & UNIT_VLUT)
325 unit = UNIT_VLUT;
326 else
327 break;
328 } else {
329 if ((units & UNIT_VMUL) && (last_unit < UNIT_VMUL))
330 unit = UNIT_VMUL;
331 else if ((units & UNIT_SADD) && !(control & UNIT_SADD) && !midgard_has_hazard(segment, segment_size, ains))
332 unit = UNIT_SADD;
333 else if (units & UNIT_VADD)
334 unit = UNIT_VADD;
335 else if (units & UNIT_SMUL)
336 unit = UNIT_SMUL;
337 else if (units & UNIT_VLUT)
338 unit = UNIT_VLUT;
339 else
340 break;
341 }
342 }
343
344 assert(unit & units);
345 }
346
347 /* Late unit check, this time for encoding (not parallelism) */
348 if (unit <= last_unit) break;
349
350 /* Clear the segment */
351 if (last_unit < UNIT_VADD && unit >= UNIT_VADD)
352 segment_size = 0;
353
354 if (midgard_has_hazard(segment, segment_size, ains))
355 break;
356
357 /* We're good to go -- emit the instruction */
358 ains->unit = unit;
359
360 segment[segment_size++] = ains;
361
362 /* We try to reuse constants if possible, by adjusting
363 * the swizzle */
364
365 if (ains->has_blend_constant) {
366 /* Everything conflicts with the blend constant */
367 if (bundle.has_embedded_constants)
368 break;
369
370 bundle.has_blend_constant = 1;
371 bundle.has_embedded_constants = 1;
372 } else if (ains->has_constants && ains->alu.reg_mode == midgard_reg_mode_16) {
373 /* TODO: DRY with the analysis pass */
374
375 if (bundle.has_blend_constant)
376 break;
377
378 if (constant_count)
379 break;
380
381 /* TODO: Fix packing XXX */
382 uint16_t *bundles = (uint16_t *) bundle.constants;
383 uint32_t *constants = (uint32_t *) ains->constants;
384
385 /* Copy them wholesale */
386 for (unsigned i = 0; i < 4; ++i)
387 bundles[i] = constants[i];
388
389 bundle.has_embedded_constants = true;
390 constant_count = 4;
391 } else if (ains->has_constants) {
392 /* By definition, blend constants conflict with
393 * everything, so if there are already
394 * constants we break the bundle *now* */
395
396 if (bundle.has_blend_constant)
397 break;
398
399 /* For anything but blend constants, we can do
400 * proper analysis, however */
401
402 /* TODO: Mask by which are used */
403 uint32_t *constants = (uint32_t *) ains->constants;
404 uint32_t *bundles = (uint32_t *) bundle.constants;
405
406 uint32_t indices[4] = { 0 };
407 bool break_bundle = false;
408
409 for (unsigned i = 0; i < 4; ++i) {
410 uint32_t cons = constants[i];
411 bool constant_found = false;
412
413 /* Search for the constant */
414 for (unsigned j = 0; j < constant_count; ++j) {
415 if (bundles[j] != cons)
416 continue;
417
418 /* We found it, reuse */
419 indices[i] = j;
420 constant_found = true;
421 break;
422 }
423
424 if (constant_found)
425 continue;
426
427 /* We didn't find it, so allocate it */
428 unsigned idx = constant_count++;
429
430 if (idx >= 4) {
431 /* Uh-oh, out of space */
432 break_bundle = true;
433 break;
434 }
435
436 /* We have space, copy it in! */
437 bundles[idx] = cons;
438 indices[i] = idx;
439 }
440
441 if (break_bundle)
442 break;
443
444 /* Cool, we have it in. So use indices as a
445 * swizzle */
446
447 unsigned swizzle = SWIZZLE_FROM_ARRAY(indices);
448 unsigned r_constant = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
449
450 if (ains->ssa_args.src[0] == r_constant)
451 ains->alu.src1 = vector_alu_apply_swizzle(ains->alu.src1, swizzle);
452
453 if (ains->ssa_args.src[1] == r_constant)
454 ains->alu.src2 = vector_alu_apply_swizzle(ains->alu.src2, swizzle);
455
456 bundle.has_embedded_constants = true;
457 }
458
459 if (ains->unit & UNITS_ANY_VECTOR) {
460 bytes_emitted += sizeof(midgard_reg_info);
461 bytes_emitted += sizeof(midgard_vector_alu);
462 } else if (ains->compact_branch) {
463 /* All of r0 has to be written out along with
464 * the branch writeout */
465
466 if (ains->writeout && !can_writeout_fragment(ctx, scheduled, index, ctx->temp_count)) {
467 /* We only work on full moves
468 * at the beginning. We could
469 * probably do better */
470 if (index != 0)
471 break;
472
473 /* Inject a move */
474 midgard_instruction ins = v_mov(0, blank_alu_src, SSA_FIXED_REGISTER(0));
475 ins.unit = UNIT_VMUL;
476 control |= ins.unit;
477
478 /* TODO don't leak */
479 midgard_instruction *move =
480 mem_dup(&ins, sizeof(midgard_instruction));
481 bytes_emitted += sizeof(midgard_reg_info);
482 bytes_emitted += sizeof(midgard_vector_alu);
483 bundle.instructions[packed_idx++] = move;
484 }
485
486 if (ains->unit == ALU_ENAB_BRANCH) {
487 bytes_emitted += sizeof(midgard_branch_extended);
488 } else {
489 bytes_emitted += sizeof(ains->br_compact);
490 }
491 } else {
492 bytes_emitted += sizeof(midgard_reg_info);
493 bytes_emitted += sizeof(midgard_scalar_alu);
494 }
495
496 /* Defer marking until after writing to allow for break */
497 scheduled[index] = ains;
498 control |= ains->unit;
499 last_unit = ains->unit;
500 ++instructions_emitted;
501 ++index;
502 }
503
504 int padding = 0;
505
506 /* Pad ALU op to nearest word */
507
508 if (bytes_emitted & 15) {
509 padding = 16 - (bytes_emitted & 15);
510 bytes_emitted += padding;
511 }
512
513 /* Constants must always be quadwords */
514 if (bundle.has_embedded_constants)
515 bytes_emitted += 16;
516
517 /* Size ALU instruction for tag */
518 bundle.tag = (TAG_ALU_4) + (bytes_emitted / 16) - 1;
519 bundle.padding = padding;
520 bundle.control = bundle.tag | control;
521
522 break;
523 }
524
525 case TAG_LOAD_STORE_4: {
526 /* Load store instructions have two words at once. If
527 * we only have one queued up, we need to NOP pad.
528 * Otherwise, we store both in succession to save space
529 * and cycles -- letting them go in parallel -- skip
530 * the next. The usefulness of this optimisation is
531 * greatly dependent on the quality of the instruction
532 * scheduler.
533 */
534
535 midgard_instruction *next_op = mir_next_op(ins);
536
537 if ((struct list_head *) next_op != &block->instructions && next_op->type == TAG_LOAD_STORE_4) {
538 /* TODO: Concurrency check */
539 instructions_emitted++;
540 }
541
542 break;
543 }
544
545 case TAG_TEXTURE_4: {
546 /* Which tag we use depends on the shader stage */
547 bool in_frag = ctx->stage == MESA_SHADER_FRAGMENT;
548 bundle.tag = in_frag ? TAG_TEXTURE_4 : TAG_TEXTURE_4_VTX;
549 break;
550 }
551
552 default:
553 unreachable("Unknown tag");
554 break;
555 }
556
557 /* Copy the instructions into the bundle */
558 bundle.instruction_count = instructions_emitted + 1 + packed_idx;
559
560 midgard_instruction *uins = ins;
561 for (; packed_idx < bundle.instruction_count; ++packed_idx) {
562 bundle.instructions[packed_idx] = uins;
563 uins = mir_next_op(uins);
564 }
565
566 *skip = instructions_emitted;
567
568 return bundle;
569 }
570
571 /* Schedule a single block by iterating its instruction to create bundles.
572 * While we go, tally about the bundle sizes to compute the block size. */
573
574 static void
575 schedule_block(compiler_context *ctx, midgard_block *block)
576 {
577 util_dynarray_init(&block->bundles, NULL);
578
579 block->quadword_count = 0;
580
581 mir_foreach_instr_in_block(block, ins) {
582 int skip;
583 midgard_bundle bundle = schedule_bundle(ctx, block, ins, &skip);
584 util_dynarray_append(&block->bundles, midgard_bundle, bundle);
585
586 if (bundle.has_blend_constant) {
587 /* TODO: Multiblock? */
588 int quadwords_within_block = block->quadword_count + quadword_size(bundle.tag) - 1;
589 ctx->blend_constant_offset = quadwords_within_block * 0x10;
590 }
591
592 while(skip--)
593 ins = mir_next_op(ins);
594
595 block->quadword_count += quadword_size(bundle.tag);
596 }
597
598 block->is_scheduled = true;
599 }
600
601 /* The following passes reorder MIR instructions to enable better scheduling */
602
603 static void
604 midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
605 {
606 mir_foreach_instr_in_block_safe(block, ins) {
607 if (ins->type != TAG_LOAD_STORE_4) continue;
608
609 /* We've found a load/store op. Check if next is also load/store. */
610 midgard_instruction *next_op = mir_next_op(ins);
611 if (&next_op->link != &block->instructions) {
612 if (next_op->type == TAG_LOAD_STORE_4) {
613 /* If so, we're done since we're a pair */
614 ins = mir_next_op(ins);
615 continue;
616 }
617
618 /* Maximum search distance to pair, to avoid register pressure disasters */
619 int search_distance = 8;
620
621 /* Otherwise, we have an orphaned load/store -- search for another load */
622 mir_foreach_instr_in_block_from(block, c, mir_next_op(ins)) {
623 /* Terminate search if necessary */
624 if (!(search_distance--)) break;
625
626 if (c->type != TAG_LOAD_STORE_4) continue;
627
628 /* We can only reorder if there are no sources */
629
630 bool deps = false;
631
632 for (unsigned s = 0; s < ARRAY_SIZE(ins->ssa_args.src); ++s)
633 deps |= (c->ssa_args.src[s] != -1);
634
635 if (deps)
636 continue;
637
638 /* We found one! Move it up to pair and remove it from the old location */
639
640 mir_insert_instruction_before(ins, *c);
641 mir_remove_instruction(c);
642
643 break;
644 }
645 }
646 }
647 }
648
649 /* When we're 'squeezing down' the values in the IR, we maintain a hash
650 * as such */
651
652 static unsigned
653 find_or_allocate_temp(compiler_context *ctx, unsigned hash)
654 {
655 if ((hash < 0) || (hash >= SSA_FIXED_MINIMUM))
656 return hash;
657
658 unsigned temp = (uintptr_t) _mesa_hash_table_u64_search(
659 ctx->hash_to_temp, hash + 1);
660
661 if (temp)
662 return temp - 1;
663
664 /* If no temp is find, allocate one */
665 temp = ctx->temp_count++;
666 ctx->max_hash = MAX2(ctx->max_hash, hash);
667
668 _mesa_hash_table_u64_insert(ctx->hash_to_temp,
669 hash + 1, (void *) ((uintptr_t) temp + 1));
670
671 return temp;
672 }
673
674 /* Reassigns numbering to get rid of gaps in the indices */
675
676 static void
677 mir_squeeze_index(compiler_context *ctx)
678 {
679 /* Reset */
680 ctx->temp_count = 0;
681 /* TODO don't leak old hash_to_temp */
682 ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
683
684 mir_foreach_instr_global(ctx, ins) {
685 ins->ssa_args.dest = find_or_allocate_temp(ctx, ins->ssa_args.dest);
686
687 for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i)
688 ins->ssa_args.src[i] = find_or_allocate_temp(ctx, ins->ssa_args.src[i]);
689 }
690 }
691
692 static midgard_instruction
693 v_load_store_scratch(
694 unsigned srcdest,
695 unsigned index,
696 bool is_store,
697 unsigned mask)
698 {
699 /* We index by 32-bit vec4s */
700 unsigned byte = (index * 4 * 4);
701
702 midgard_instruction ins = {
703 .type = TAG_LOAD_STORE_4,
704 .mask = mask,
705 .ssa_args = {
706 .dest = -1,
707 .src = { -1, -1, -1 },
708 },
709 .load_store = {
710 .op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
711 .swizzle = SWIZZLE_XYZW,
712
713 /* For register spilling - to thread local storage */
714 .arg_1 = 0xEA,
715 .arg_2 = 0x1E,
716
717 /* Splattered across, TODO combine logically */
718 .varying_parameters = (byte & 0x1FF) << 1,
719 .address = (byte >> 9)
720 },
721
722 /* If we spill an unspill, RA goes into an infinite loop */
723 .no_spill = true
724 };
725
726 if (is_store) {
727 /* r0 = r26, r1 = r27 */
728 assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
729 ins.ssa_args.src[0] = srcdest;
730 } else {
731 ins.ssa_args.dest = srcdest;
732 }
733
734 return ins;
735 }
736
737 /* If register allocation fails, find the best spill node and spill it to fix
738 * whatever the issue was. This spill node could be a work register (spilling
739 * to thread local storage), but it could also simply be a special register
740 * that needs to spill to become a work register. */
741
742 static void mir_spill_register(
743 compiler_context *ctx,
744 struct ra_graph *g,
745 unsigned *spill_count)
746 {
747 unsigned spill_index = ctx->temp_count;
748
749 /* Our first step is to calculate spill cost to figure out the best
750 * spill node. All nodes are equal in spill cost, but we can't spill
751 * nodes written to from an unspill */
752
753 for (unsigned i = 0; i < ctx->temp_count; ++i) {
754 ra_set_node_spill_cost(g, i, 1.0);
755 }
756
757 mir_foreach_instr_global(ctx, ins) {
758 if (ins->no_spill &&
759 ins->ssa_args.dest >= 0 &&
760 ins->ssa_args.dest < ctx->temp_count)
761 ra_set_node_spill_cost(g, ins->ssa_args.dest, -1.0);
762 }
763
764 int spill_node = ra_get_best_spill_node(g);
765
766 if (spill_node < 0) {
767 mir_print_shader(ctx);
768 assert(0);
769 }
770
771 /* We have a spill node, so check the class. Work registers
772 * legitimately spill to TLS, but special registers just spill to work
773 * registers */
774
775 unsigned class = ra_get_node_class(g, spill_node);
776 bool is_special = (class >> 2) != REG_CLASS_WORK;
777 bool is_special_w = (class >> 2) == REG_CLASS_TEXW;
778
779 /* Allocate TLS slot (maybe) */
780 unsigned spill_slot = !is_special ? (*spill_count)++ : 0;
781
782 /* For TLS, replace all stores to the spilled node. For
783 * special reads, just keep as-is; the class will be demoted
784 * implicitly. For special writes, spill to a work register */
785
786 if (!is_special || is_special_w) {
787 if (is_special_w)
788 spill_slot = spill_index++;
789
790 mir_foreach_instr_global_safe(ctx, ins) {
791 if (ins->ssa_args.dest != spill_node) continue;
792
793 midgard_instruction st;
794
795 if (is_special_w) {
796 st = v_mov(spill_node, blank_alu_src, spill_slot);
797 st.no_spill = true;
798 } else {
799 ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
800 st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
801 }
802
803 /* Hint: don't rewrite this node */
804 st.hint = true;
805
806 mir_insert_instruction_before(mir_next_op(ins), st);
807
808 if (!is_special)
809 ctx->spills++;
810 }
811 }
812
813 /* For special reads, figure out how many components we need */
814 unsigned read_mask = 0;
815
816 mir_foreach_instr_global_safe(ctx, ins) {
817 read_mask |= mir_mask_of_read_components(ins, spill_node);
818 }
819
820 /* Insert a load from TLS before the first consecutive
821 * use of the node, rewriting to use spilled indices to
822 * break up the live range. Or, for special, insert a
823 * move. Ironically the latter *increases* register
824 * pressure, but the two uses of the spilling mechanism
825 * are somewhat orthogonal. (special spilling is to use
826 * work registers to back special registers; TLS
827 * spilling is to use memory to back work registers) */
828
829 mir_foreach_block(ctx, block) {
830 bool consecutive_skip = false;
831 unsigned consecutive_index = 0;
832
833 mir_foreach_instr_in_block(block, ins) {
834 /* We can't rewrite the moves used to spill in the
835 * first place. These moves are hinted. */
836 if (ins->hint) continue;
837
838 if (!mir_has_arg(ins, spill_node)) {
839 consecutive_skip = false;
840 continue;
841 }
842
843 if (consecutive_skip) {
844 /* Rewrite */
845 mir_rewrite_index_src_single(ins, spill_node, consecutive_index);
846 continue;
847 }
848
849 if (!is_special_w) {
850 consecutive_index = ++spill_index;
851
852 midgard_instruction *before = ins;
853
854 /* For a csel, go back one more not to break up the bundle */
855 if (ins->type == TAG_ALU_4 && OP_IS_CSEL(ins->alu.op))
856 before = mir_prev_op(before);
857
858 midgard_instruction st;
859
860 if (is_special) {
861 /* Move */
862 st = v_mov(spill_node, blank_alu_src, consecutive_index);
863 st.no_spill = true;
864 } else {
865 /* TLS load */
866 st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF);
867 }
868
869 /* Mask the load based on the component count
870 * actually needed to prvent RA loops */
871
872 st.mask = read_mask;
873
874 mir_insert_instruction_before(before, st);
875 // consecutive_skip = true;
876 } else {
877 /* Special writes already have their move spilled in */
878 consecutive_index = spill_slot;
879 }
880
881
882 /* Rewrite to use */
883 mir_rewrite_index_src_single(ins, spill_node, consecutive_index);
884
885 if (!is_special)
886 ctx->fills++;
887 }
888 }
889
890 /* Reset hints */
891
892 mir_foreach_instr_global(ctx, ins) {
893 ins->hint = false;
894 }
895 }
896
897 void
898 schedule_program(compiler_context *ctx)
899 {
900 struct ra_graph *g = NULL;
901 bool spilled = false;
902 int iter_count = 1000; /* max iterations */
903
904 /* Number of 128-bit slots in memory we've spilled into */
905 unsigned spill_count = 0;
906
907 midgard_promote_uniforms(ctx, 16);
908
909 mir_foreach_block(ctx, block) {
910 midgard_pair_load_store(ctx, block);
911 }
912
913 /* Must be lowered right before RA */
914 mir_squeeze_index(ctx);
915 mir_lower_special_reads(ctx);
916
917 /* Lowering can introduce some dead moves */
918
919 mir_foreach_block(ctx, block) {
920 midgard_opt_dead_move_eliminate(ctx, block);
921 }
922
923 do {
924 if (spilled)
925 mir_spill_register(ctx, g, &spill_count);
926
927 mir_squeeze_index(ctx);
928
929 g = NULL;
930 g = allocate_registers(ctx, &spilled);
931 } while(spilled && ((iter_count--) > 0));
932
933 /* We can simplify a bit after RA */
934
935 mir_foreach_block(ctx, block) {
936 midgard_opt_post_move_eliminate(ctx, block, g);
937 }
938
939 /* After RA finishes, we schedule all at once */
940
941 mir_foreach_block(ctx, block) {
942 schedule_block(ctx, block);
943 }
944
945 /* Finally, we create pipeline registers as a peephole pass after
946 * scheduling. This isn't totally optimal, since there are cases where
947 * the usage of pipeline registers can eliminate spills, but it does
948 * save some power */
949
950 mir_create_pipeline_registers(ctx);
951
952 if (iter_count <= 0) {
953 fprintf(stderr, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
954 assert(0);
955 }
956
957 /* Report spilling information. spill_count is in 128-bit slots (vec4 x
958 * fp32), but tls_size is in bytes, so multiply by 16 */
959
960 ctx->tls_size = spill_count * 16;
961
962 install_registers(ctx, g);
963 }