pan/midgard: Lower texr/texw mixed registers
[mesa.git] / src / panfrost / midgard / midgard_ra.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "compiler.h"
26 #include "midgard_ops.h"
27 #include "util/register_allocate.h"
28 #include "util/u_math.h"
29
30 /* For work registers, we can subdivide in various ways. So we create
31 * classes for the various sizes and conflict accordingly, keeping in
32 * mind that physical registers are divided along 128-bit boundaries.
33 * The important part is that 128-bit boundaries are not crossed.
34 *
35 * For each 128-bit register, we can subdivide to 32-bits 10 ways
36 *
37 * vec4: xyzw
38 * vec3: xyz, yzw
39 * vec2: xy, yz, zw,
40 * vec1: x, y, z, w
41 *
42 * For each 64-bit register, we can subdivide similarly to 16-bit
43 * (TODO: half-float RA, not that we support fp16 yet)
44 */
45
46 #define WORK_STRIDE 10
47
48 /* We have overlapping register classes for special registers, handled via
49 * shadows */
50
51 #define SHADOW_R27 17
52 #define SHADOW_R28 18
53 #define SHADOW_R29 19
54
55 /* Prepacked masks/swizzles for virtual register types */
56 static unsigned reg_type_to_mask[WORK_STRIDE] = {
57 0xF, /* xyzw */
58 0x7, 0x7 << 1, /* xyz */
59 0x3, 0x3 << 1, 0x3 << 2, /* xy */
60 0x1, 0x1 << 1, 0x1 << 2, 0x1 << 3 /* x */
61 };
62
63 static unsigned reg_type_to_swizzle[WORK_STRIDE] = {
64 SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
65
66 SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
67 SWIZZLE(COMPONENT_Y, COMPONENT_Z, COMPONENT_W, COMPONENT_W),
68
69 SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
70 SWIZZLE(COMPONENT_Y, COMPONENT_Z, COMPONENT_Z, COMPONENT_W),
71 SWIZZLE(COMPONENT_Z, COMPONENT_W, COMPONENT_Z, COMPONENT_W),
72
73 SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
74 SWIZZLE(COMPONENT_Y, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
75 SWIZZLE(COMPONENT_Z, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
76 SWIZZLE(COMPONENT_W, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
77 };
78
79 struct phys_reg {
80 unsigned reg;
81 unsigned mask;
82 unsigned swizzle;
83 };
84
85 /* Given the mask/swizzle of both the register and the original source,
86 * compose to find the actual mask/swizzle to give the hardware */
87
88 static unsigned
89 compose_writemask(unsigned mask, struct phys_reg reg)
90 {
91 /* Note: the reg mask is guaranteed to be contiguous. So we shift
92 * into the X place, compose via a simple AND, and shift back */
93
94 unsigned shift = __builtin_ctz(reg.mask);
95 return ((reg.mask >> shift) & mask) << shift;
96 }
97
98 static unsigned
99 compose_swizzle(unsigned swizzle, unsigned mask,
100 struct phys_reg reg, struct phys_reg dst)
101 {
102 unsigned out = pan_compose_swizzle(swizzle, reg.swizzle);
103
104 /* Based on the register mask, we need to adjust over. E.g if we're
105 * writing to yz, a base swizzle of xy__ becomes _xy_. Save the
106 * original first component (x). But to prevent duplicate shifting
107 * (only applies to ALU -- mask param is set to xyzw out on L/S to
108 * prevent changes), we have to account for the shift inherent to the
109 * original writemask */
110
111 unsigned rep = out & 0x3;
112 unsigned shift = __builtin_ctz(dst.mask) - __builtin_ctz(mask);
113 unsigned shifted = out << (2*shift);
114
115 /* ..but we fill in the gaps so it appears to replicate */
116
117 for (unsigned s = 0; s < shift; ++s)
118 shifted |= rep << (2*s);
119
120 return shifted;
121 }
122
123 /* Helper to return the default phys_reg for a given register */
124
125 static struct phys_reg
126 default_phys_reg(int reg)
127 {
128 struct phys_reg r = {
129 .reg = reg,
130 .mask = 0xF, /* xyzw */
131 .swizzle = 0xE4 /* xyzw */
132 };
133
134 return r;
135 }
136
137 /* Determine which physical register, swizzle, and mask a virtual
138 * register corresponds to */
139
140 static struct phys_reg
141 index_to_reg(compiler_context *ctx, struct ra_graph *g, int reg)
142 {
143 /* Check for special cases */
144 if (reg >= SSA_FIXED_MINIMUM)
145 return default_phys_reg(SSA_REG_FROM_FIXED(reg));
146 else if ((reg < 0) || !g)
147 return default_phys_reg(REGISTER_UNUSED);
148
149 /* Special cases aside, we pick the underlying register */
150 int virt = ra_get_node_reg(g, reg);
151
152 /* Divide out the register and classification */
153 int phys = virt / WORK_STRIDE;
154 int type = virt % WORK_STRIDE;
155
156 /* Apply shadow registers */
157
158 if (phys >= SHADOW_R27 && phys <= SHADOW_R29)
159 phys += 27 - SHADOW_R27;
160
161 struct phys_reg r = {
162 .reg = phys,
163 .mask = reg_type_to_mask[type],
164 .swizzle = reg_type_to_swizzle[type]
165 };
166
167 /* Report that we actually use this register, and return it */
168
169 if (phys < 16)
170 ctx->work_registers = MAX2(ctx->work_registers, phys);
171
172 return r;
173 }
174
175 /* This routine creates a register set. Should be called infrequently since
176 * it's slow and can be cached. For legibility, variables are named in terms of
177 * work registers, although it is also used to create the register set for
178 * special register allocation */
179
180 static void
181 add_shadow_conflicts (struct ra_regs *regs, unsigned base, unsigned shadow)
182 {
183 for (unsigned a = 0; a < WORK_STRIDE; ++a) {
184 unsigned reg_a = (WORK_STRIDE * base) + a;
185
186 for (unsigned b = 0; b < WORK_STRIDE; ++b) {
187 unsigned reg_b = (WORK_STRIDE * shadow) + b;
188
189 ra_add_reg_conflict(regs, reg_a, reg_b);
190 ra_add_reg_conflict(regs, reg_b, reg_a);
191 }
192 }
193 }
194
195 static struct ra_regs *
196 create_register_set(unsigned work_count, unsigned *classes)
197 {
198 int virtual_count = 32 * WORK_STRIDE;
199
200 /* First, initialize the RA */
201 struct ra_regs *regs = ra_alloc_reg_set(NULL, virtual_count, true);
202
203 for (unsigned c = 0; c < NR_REG_CLASSES; ++c) {
204 int work_vec4 = ra_alloc_reg_class(regs);
205 int work_vec3 = ra_alloc_reg_class(regs);
206 int work_vec2 = ra_alloc_reg_class(regs);
207 int work_vec1 = ra_alloc_reg_class(regs);
208
209 classes[4*c + 0] = work_vec1;
210 classes[4*c + 1] = work_vec2;
211 classes[4*c + 2] = work_vec3;
212 classes[4*c + 3] = work_vec4;
213
214 /* Special register classes have other register counts */
215 unsigned count =
216 (c == REG_CLASS_WORK) ? work_count :
217 (c == REG_CLASS_LDST27) ? 1 : 2;
218
219 /* We arbitraily pick r17 (RA unused) as the shadow for r27 */
220 unsigned first_reg =
221 (c == REG_CLASS_LDST) ? 26 :
222 (c == REG_CLASS_LDST27) ? SHADOW_R27 :
223 (c == REG_CLASS_TEXR) ? 28 :
224 (c == REG_CLASS_TEXW) ? SHADOW_R28 :
225 0;
226
227 /* Add the full set of work registers */
228 for (unsigned i = first_reg; i < (first_reg + count); ++i) {
229 int base = WORK_STRIDE * i;
230
231 /* Build a full set of subdivisions */
232 ra_class_add_reg(regs, work_vec4, base);
233 ra_class_add_reg(regs, work_vec3, base + 1);
234 ra_class_add_reg(regs, work_vec3, base + 2);
235 ra_class_add_reg(regs, work_vec2, base + 3);
236 ra_class_add_reg(regs, work_vec2, base + 4);
237 ra_class_add_reg(regs, work_vec2, base + 5);
238 ra_class_add_reg(regs, work_vec1, base + 6);
239 ra_class_add_reg(regs, work_vec1, base + 7);
240 ra_class_add_reg(regs, work_vec1, base + 8);
241 ra_class_add_reg(regs, work_vec1, base + 9);
242
243 for (unsigned a = 0; a < 10; ++a) {
244 unsigned mask1 = reg_type_to_mask[a];
245
246 for (unsigned b = 0; b < 10; ++b) {
247 unsigned mask2 = reg_type_to_mask[b];
248
249 if (mask1 & mask2)
250 ra_add_reg_conflict(regs,
251 base + a, base + b);
252 }
253 }
254 }
255 }
256
257
258 /* We have duplicate classes */
259 add_shadow_conflicts(regs, 27, SHADOW_R27);
260 add_shadow_conflicts(regs, 28, SHADOW_R28);
261 add_shadow_conflicts(regs, 29, SHADOW_R29);
262
263 /* We're done setting up */
264 ra_set_finalize(regs, NULL);
265
266 return regs;
267 }
268
269 /* This routine gets a precomputed register set off the screen if it's able, or
270 * otherwise it computes one on the fly */
271
272 static struct ra_regs *
273 get_register_set(struct midgard_screen *screen, unsigned work_count, unsigned **classes)
274 {
275 /* Bounds check */
276 assert(work_count >= 8);
277 assert(work_count <= 16);
278
279 /* Compute index */
280 unsigned index = work_count - 8;
281
282 /* Find the reg set */
283 struct ra_regs *cached = screen->regs[index];
284
285 if (cached) {
286 assert(screen->reg_classes[index]);
287 *classes = screen->reg_classes[index];
288 return cached;
289 }
290
291 /* Otherwise, create one */
292 struct ra_regs *created = create_register_set(work_count, screen->reg_classes[index]);
293
294 /* Cache it and use it */
295 screen->regs[index] = created;
296
297 *classes = screen->reg_classes[index];
298 return created;
299 }
300
301 /* Assign a (special) class, ensuring that it is compatible with whatever class
302 * was already set */
303
304 static void
305 set_class(unsigned *classes, unsigned node, unsigned class)
306 {
307 /* Check that we're even a node */
308 if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
309 return;
310
311 /* First 4 are work, next 4 are load/store.. */
312 unsigned current_class = classes[node] >> 2;
313
314 /* Nothing to do */
315 if (class == current_class)
316 return;
317
318
319 if ((current_class == REG_CLASS_LDST27) && (class == REG_CLASS_LDST))
320 return;
321
322 /* If we're changing, we must not have already assigned a special class
323 */
324
325 bool compat = current_class == REG_CLASS_WORK;
326 compat |= (current_class == REG_CLASS_LDST) && (class == REG_CLASS_LDST27);
327
328 assert(compat);
329
330 classes[node] &= 0x3;
331 classes[node] |= (class << 2);
332 }
333
334 static void
335 force_vec4(unsigned *classes, unsigned node)
336 {
337 if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
338 return;
339
340 /* Force vec4 = 3 */
341 classes[node] |= 0x3;
342 }
343
344 /* Special register classes impose special constraints on who can read their
345 * values, so check that */
346
347 static bool
348 check_read_class(unsigned *classes, unsigned tag, unsigned node)
349 {
350 /* Non-nodes are implicitly ok */
351 if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
352 return true;
353
354 unsigned current_class = classes[node] >> 2;
355
356 switch (current_class) {
357 case REG_CLASS_LDST:
358 case REG_CLASS_LDST27:
359 return (tag == TAG_LOAD_STORE_4);
360 case REG_CLASS_TEXR:
361 return (tag == TAG_TEXTURE_4);
362 case REG_CLASS_TEXW:
363 return (tag != TAG_LOAD_STORE_4);
364 case REG_CLASS_WORK:
365 return (tag == TAG_ALU_4);
366 default:
367 unreachable("Invalid class");
368 }
369 }
370
371 static bool
372 check_write_class(unsigned *classes, unsigned tag, unsigned node)
373 {
374 /* Non-nodes are implicitly ok */
375 if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
376 return true;
377
378 unsigned current_class = classes[node] >> 2;
379
380 switch (current_class) {
381 case REG_CLASS_TEXR:
382 return true;
383 case REG_CLASS_TEXW:
384 return (tag == TAG_TEXTURE_4);
385 case REG_CLASS_LDST:
386 case REG_CLASS_LDST27:
387 case REG_CLASS_WORK:
388 return (tag == TAG_ALU_4) || (tag == TAG_LOAD_STORE_4);
389 default:
390 unreachable("Invalid class");
391 }
392 }
393
394 /* Prepass before RA to ensure special class restrictions are met. The idea is
395 * to create a bit field of types of instructions that read a particular index.
396 * Later, we'll add moves as appropriate and rewrite to specialize by type. */
397
398 static void
399 mark_node_class (unsigned *bitfield, unsigned node)
400 {
401 if ((node >= 0) && (node < SSA_FIXED_MINIMUM))
402 BITSET_SET(bitfield, node);
403 }
404
405 void
406 mir_lower_special_reads(compiler_context *ctx)
407 {
408 size_t sz = BITSET_WORDS(ctx->temp_count) * sizeof(BITSET_WORD);
409
410 /* Bitfields for the various types of registers we could have */
411
412 unsigned *alur = calloc(sz, 1);
413 unsigned *aluw = calloc(sz, 1);
414 unsigned *ldst = calloc(sz, 1);
415 unsigned *texr = calloc(sz, 1);
416 unsigned *texw = calloc(sz, 1);
417
418 /* Pass #1 is analysis, a linear scan to fill out the bitfields */
419
420 mir_foreach_instr_global(ctx, ins) {
421 if (ins->compact_branch) continue;
422
423 switch (ins->type) {
424 case TAG_ALU_4:
425 mark_node_class(aluw, ins->ssa_args.dest);
426 mark_node_class(alur, ins->ssa_args.src0);
427
428 if (!ins->ssa_args.inline_constant)
429 mark_node_class(alur, ins->ssa_args.src1);
430
431 break;
432 case TAG_LOAD_STORE_4:
433 mark_node_class(ldst, ins->ssa_args.src0);
434 mark_node_class(ldst, ins->ssa_args.src1);
435 break;
436 case TAG_TEXTURE_4:
437 mark_node_class(texr, ins->ssa_args.src0);
438 mark_node_class(texr, ins->ssa_args.src1);
439 mark_node_class(texw, ins->ssa_args.dest);
440 break;
441 }
442 }
443
444 /* Pass #2 is lowering now that we've analyzed all the classes.
445 * Conceptually, if an index is only marked for a single type of use,
446 * there is nothing to lower. If it is marked for different uses, we
447 * split up based on the number of types of uses. To do so, we divide
448 * into N distinct classes of use (where N>1 by definition), emit N-1
449 * moves from the index to copies of the index, and finally rewrite N-1
450 * of the types of uses to use the corresponding move */
451
452 unsigned spill_idx = ctx->temp_count;
453
454 for (unsigned i = 0; i < ctx->temp_count; ++i) {
455 bool is_alur = BITSET_TEST(alur, i);
456 bool is_aluw = BITSET_TEST(aluw, i);
457 bool is_ldst = BITSET_TEST(ldst, i);
458 bool is_texr = BITSET_TEST(texr, i);
459 bool is_texw = BITSET_TEST(texw, i);
460
461 /* Analyse to check how many distinct uses there are. ALU ops
462 * (alur) can read the results of the texture pipeline (texw)
463 * but not ldst or texr. Load/store ops (ldst) cannot read
464 * anything but load/store inputs. Texture pipeline cannot read
465 * anything but texture inputs. TODO: Simplify. */
466
467 bool collision =
468 (is_alur && (is_ldst || is_texr)) ||
469 (is_ldst && (is_alur || is_texr || is_texw)) ||
470 (is_texr && (is_alur || is_ldst || is_texw)) ||
471 (is_texw && (is_aluw || is_ldst || is_texr));
472
473 if (!collision)
474 continue;
475
476 /* Use the index as-is as the work copy. Emit copies for
477 * special uses */
478
479 unsigned classes[] = { TAG_LOAD_STORE_4, TAG_TEXTURE_4, TAG_TEXTURE_4 };
480 bool collisions[] = { is_ldst, is_texr, is_texw && is_aluw };
481
482 for (unsigned j = 0; j < ARRAY_SIZE(collisions); ++j) {
483 if (!collisions[j]) continue;
484
485 /* When the hazard is from reading, we move and rewrite
486 * sources (typical case). When it's from writing, we
487 * flip the move and rewrite destinations (obscure,
488 * only from control flow -- impossible in SSA) */
489
490 bool hazard_write = (j == 2);
491
492 unsigned idx = spill_idx++;
493
494 midgard_instruction m = hazard_write ?
495 v_mov(idx, blank_alu_src, i) :
496 v_mov(i, blank_alu_src, idx);
497
498 /* Insert move after each write */
499 mir_foreach_instr_global_safe(ctx, pre_use) {
500 if (pre_use->compact_branch) continue;
501 if (pre_use->ssa_args.dest != i)
502 continue;
503
504 /* If the hazard is writing, we need to
505 * specific insert moves for the contentious
506 * class. If the hazard is reading, we insert
507 * moves whenever it is written */
508
509 if (hazard_write && pre_use->type != classes[j])
510 continue;
511
512 midgard_instruction *use = mir_next_op(pre_use);
513 assert(use);
514 mir_insert_instruction_before(use, m);
515 }
516
517 /* Rewrite to use */
518 if (hazard_write)
519 mir_rewrite_index_dst_tag(ctx, i, idx, classes[j]);
520 else
521 mir_rewrite_index_src_tag(ctx, i, idx, classes[j]);
522 }
523 }
524
525 free(alur);
526 free(aluw);
527 free(ldst);
528 free(texr);
529 free(texw);
530 }
531
532 /* This routine performs the actual register allocation. It should be succeeded
533 * by install_registers */
534
535 struct ra_graph *
536 allocate_registers(compiler_context *ctx, bool *spilled)
537 {
538 /* The number of vec4 work registers available depends on when the
539 * uniforms start, so compute that first */
540 int work_count = 16 - MAX2((ctx->uniform_cutoff - 8), 0);
541 unsigned *classes = NULL;
542 struct ra_regs *regs = get_register_set(ctx->screen, work_count, &classes);
543
544 assert(regs != NULL);
545 assert(classes != NULL);
546
547 /* No register allocation to do with no SSA */
548
549 if (!ctx->temp_count)
550 return NULL;
551
552 /* Let's actually do register allocation */
553 int nodes = ctx->temp_count;
554 struct ra_graph *g = ra_alloc_interference_graph(regs, nodes);
555
556 /* Register class (as known to the Mesa register allocator) is actually
557 * the product of both semantic class (work, load/store, texture..) and
558 * size (vec2/vec3..). First, we'll go through and determine the
559 * minimum size needed to hold values */
560
561 unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
562
563 mir_foreach_instr_global(ctx, ins) {
564 if (ins->compact_branch) continue;
565 if (ins->ssa_args.dest < 0) continue;
566 if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
567
568 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
569 int class = util_logbase2(ins->mask);
570
571 /* Use the largest class if there's ambiguity, this
572 * handles partial writes */
573
574 int dest = ins->ssa_args.dest;
575 found_class[dest] = MAX2(found_class[dest], class);
576 }
577
578 /* Next, we'll determine semantic class. We default to zero (work).
579 * But, if we're used with a special operation, that will force us to a
580 * particular class. Each node must be assigned to exactly one class; a
581 * prepass before RA should have lowered what-would-have-been
582 * multiclass nodes into a series of moves to break it up into multiple
583 * nodes (TODO) */
584
585 mir_foreach_instr_global(ctx, ins) {
586 if (ins->compact_branch) continue;
587
588 /* Check if this operation imposes any classes */
589
590 if (ins->type == TAG_LOAD_STORE_4) {
591 bool force_r27 = OP_IS_R27_ONLY(ins->load_store.op);
592 unsigned class = force_r27 ? REG_CLASS_LDST27 : REG_CLASS_LDST;
593
594 set_class(found_class, ins->ssa_args.src0, class);
595 set_class(found_class, ins->ssa_args.src1, class);
596
597 if (force_r27) {
598 force_vec4(found_class, ins->ssa_args.dest);
599 force_vec4(found_class, ins->ssa_args.src0);
600 force_vec4(found_class, ins->ssa_args.src1);
601 }
602 } else if (ins->type == TAG_TEXTURE_4) {
603 set_class(found_class, ins->ssa_args.dest, REG_CLASS_TEXW);
604 set_class(found_class, ins->ssa_args.src0, REG_CLASS_TEXR);
605 set_class(found_class, ins->ssa_args.src1, REG_CLASS_TEXR);
606 }
607 }
608
609 /* Check that the semantics of the class are respected */
610 mir_foreach_instr_global(ctx, ins) {
611 if (ins->compact_branch) continue;
612
613 assert(check_write_class(found_class, ins->type, ins->ssa_args.dest));
614 assert(check_read_class(found_class, ins->type, ins->ssa_args.src0));
615
616 if (!ins->ssa_args.inline_constant)
617 assert(check_read_class(found_class, ins->type, ins->ssa_args.src1));
618 }
619
620 for (unsigned i = 0; i < ctx->temp_count; ++i) {
621 unsigned class = found_class[i];
622 ra_set_node_class(g, i, classes[class]);
623 }
624
625 /* Determine liveness */
626
627 int *live_start = malloc(nodes * sizeof(int));
628 int *live_end = malloc(nodes * sizeof(int));
629
630 /* Initialize as non-existent */
631
632 for (int i = 0; i < nodes; ++i) {
633 live_start[i] = live_end[i] = -1;
634 }
635
636 int d = 0;
637
638 mir_foreach_block(ctx, block) {
639 mir_foreach_instr_in_block(block, ins) {
640 if (ins->compact_branch) continue;
641
642 if (ins->ssa_args.dest < SSA_FIXED_MINIMUM) {
643 /* If this destination is not yet live, it is
644 * now since we just wrote it */
645
646 int dest = ins->ssa_args.dest;
647
648 if (dest >= 0 && live_start[dest] == -1)
649 live_start[dest] = d;
650 }
651
652 /* Since we just used a source, the source might be
653 * dead now. Scan the rest of the block for
654 * invocations, and if there are none, the source dies
655 * */
656
657 int sources[2] = {
658 ins->ssa_args.src0, ins->ssa_args.src1
659 };
660
661 for (int src = 0; src < 2; ++src) {
662 int s = sources[src];
663
664 if (ins->ssa_args.inline_constant && src == 1)
665 continue;
666
667 if (s < 0) continue;
668
669 if (s >= SSA_FIXED_MINIMUM) continue;
670
671 if (!mir_is_live_after(ctx, block, ins, s)) {
672 live_end[s] = d;
673 }
674 }
675
676 ++d;
677 }
678 }
679
680 /* If a node still hasn't been killed, kill it now */
681
682 for (int i = 0; i < nodes; ++i) {
683 /* live_start == -1 most likely indicates a pinned output */
684
685 if (live_end[i] == -1)
686 live_end[i] = d;
687 }
688
689 /* Setup interference between nodes that are live at the same time */
690
691 for (int i = 0; i < nodes; ++i) {
692 for (int j = i + 1; j < nodes; ++j) {
693 bool j_overlaps_i = live_start[j] < live_end[i];
694 bool i_overlaps_j = live_end[j] < live_start[i];
695
696 if (i_overlaps_j || j_overlaps_i)
697 ra_add_node_interference(g, i, j);
698 }
699 }
700
701 /* Cleanup */
702 free(live_start);
703 free(live_end);
704
705 if (!ra_allocate(g)) {
706 *spilled = true;
707 } else {
708 *spilled = false;
709 }
710
711 /* Whether we were successful or not, report the graph so we can
712 * compute spill nodes */
713
714 return g;
715 }
716
717 /* Once registers have been decided via register allocation
718 * (allocate_registers), we need to rewrite the MIR to use registers instead of
719 * indices */
720
721 static void
722 install_registers_instr(
723 compiler_context *ctx,
724 struct ra_graph *g,
725 midgard_instruction *ins)
726 {
727 ssa_args args = ins->ssa_args;
728
729 switch (ins->type) {
730 case TAG_ALU_4: {
731 int adjusted_src = args.inline_constant ? -1 : args.src1;
732 struct phys_reg src1 = index_to_reg(ctx, g, args.src0);
733 struct phys_reg src2 = index_to_reg(ctx, g, adjusted_src);
734 struct phys_reg dest = index_to_reg(ctx, g, args.dest);
735
736 unsigned uncomposed_mask = ins->mask;
737 ins->mask = compose_writemask(uncomposed_mask, dest);
738
739 /* Adjust the dest mask if necessary. Mostly this is a no-op
740 * but it matters for dot products */
741 dest.mask = effective_writemask(&ins->alu, ins->mask);
742
743 midgard_vector_alu_src mod1 =
744 vector_alu_from_unsigned(ins->alu.src1);
745 mod1.swizzle = compose_swizzle(mod1.swizzle, uncomposed_mask, src1, dest);
746 ins->alu.src1 = vector_alu_srco_unsigned(mod1);
747
748 ins->registers.src1_reg = src1.reg;
749
750 ins->registers.src2_imm = args.inline_constant;
751
752 if (args.inline_constant) {
753 /* Encode inline 16-bit constant. See disassembler for
754 * where the algorithm is from */
755
756 ins->registers.src2_reg = ins->inline_constant >> 11;
757
758 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
759 uint16_t imm = ((lower_11 >> 8) & 0x7) |
760 ((lower_11 & 0xFF) << 3);
761
762 ins->alu.src2 = imm << 2;
763 } else {
764 midgard_vector_alu_src mod2 =
765 vector_alu_from_unsigned(ins->alu.src2);
766 mod2.swizzle = compose_swizzle(
767 mod2.swizzle, uncomposed_mask, src2, dest);
768 ins->alu.src2 = vector_alu_srco_unsigned(mod2);
769
770 ins->registers.src2_reg = src2.reg;
771 }
772
773 ins->registers.out_reg = dest.reg;
774 break;
775 }
776
777 case TAG_LOAD_STORE_4: {
778 bool fixed = args.src0 >= SSA_FIXED_MINIMUM;
779
780 if (OP_IS_STORE_R26(ins->load_store.op) && fixed) {
781 ins->load_store.reg = SSA_REG_FROM_FIXED(args.src0);
782 } else if (OP_IS_STORE_VARY(ins->load_store.op)) {
783 struct phys_reg src = index_to_reg(ctx, g, args.src0);
784 assert(src.reg == 26 || src.reg == 27);
785
786 ins->load_store.reg = src.reg - 26;
787
788 /* TODO: swizzle/mask */
789 } else {
790 /* Which physical register we read off depends on
791 * whether we are loading or storing -- think about the
792 * logical dataflow */
793
794 bool encodes_src =
795 OP_IS_STORE(ins->load_store.op) &&
796 ins->load_store.op != midgard_op_st_cubemap_coords;
797
798 unsigned r = encodes_src ?
799 args.src0 : args.dest;
800
801 struct phys_reg src = index_to_reg(ctx, g, r);
802
803 ins->load_store.reg = src.reg;
804
805 ins->load_store.swizzle = compose_swizzle(
806 ins->load_store.swizzle, 0xF,
807 default_phys_reg(0), src);
808
809 ins->mask = compose_writemask(
810 ins->mask, src);
811 }
812
813 break;
814 }
815
816 case TAG_TEXTURE_4: {
817 /* Grab RA results */
818 struct phys_reg dest = index_to_reg(ctx, g, args.dest);
819 struct phys_reg coord = index_to_reg(ctx, g, args.src0);
820 struct phys_reg lod = index_to_reg(ctx, g, args.src1);
821
822 assert(dest.reg == 28 || dest.reg == 29);
823 assert(coord.reg == 28 || coord.reg == 29);
824
825 /* First, install the texture coordinate */
826 ins->texture.in_reg_full = 1;
827 ins->texture.in_reg_upper = 0;
828 ins->texture.in_reg_select = coord.reg - 28;
829 ins->texture.in_reg_swizzle =
830 compose_swizzle(ins->texture.in_reg_swizzle, 0xF, coord, dest);
831
832 /* Next, install the destination */
833 ins->texture.out_full = 1;
834 ins->texture.out_upper = 0;
835 ins->texture.out_reg_select = dest.reg - 28;
836 ins->texture.swizzle =
837 compose_swizzle(ins->texture.swizzle, dest.mask, dest, dest);
838 ins->mask =
839 compose_writemask(ins->mask, dest);
840
841 /* If there is a register LOD/bias, use it */
842 if (args.src1 > -1) {
843 midgard_tex_register_select sel = {
844 .select = lod.reg,
845 .full = 1,
846 .component = lod.swizzle & 3,
847 };
848
849 uint8_t packed;
850 memcpy(&packed, &sel, sizeof(packed));
851 ins->texture.bias = packed;
852 }
853
854 break;
855 }
856
857 default:
858 break;
859 }
860 }
861
862 void
863 install_registers(compiler_context *ctx, struct ra_graph *g)
864 {
865 mir_foreach_block(ctx, block) {
866 mir_foreach_instr_in_block(block, ins) {
867 if (ins->compact_branch) continue;
868 install_registers_instr(ctx, g, ins);
869 }
870 }
871
872 }