pan/midgard: Remove check for `class`
[mesa.git] / src / panfrost / midgard / midgard_ra.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "compiler.h"
26 #include "midgard_ops.h"
27 #include "util/register_allocate.h"
28 #include "util/u_math.h"
29
30 /* For work registers, we can subdivide in various ways. So we create
31 * classes for the various sizes and conflict accordingly, keeping in
32 * mind that physical registers are divided along 128-bit boundaries.
33 * The important part is that 128-bit boundaries are not crossed.
34 *
35 * For each 128-bit register, we can subdivide to 32-bits 10 ways
36 *
37 * vec4: xyzw
38 * vec3: xyz, yzw
39 * vec2: xy, yz, zw,
40 * vec1: x, y, z, w
41 *
42 * For each 64-bit register, we can subdivide similarly to 16-bit
43 * (TODO: half-float RA, not that we support fp16 yet)
44 */
45
46 #define WORK_STRIDE 10
47
48 /* Prepacked masks/swizzles for virtual register types */
49 static unsigned reg_type_to_mask[WORK_STRIDE] = {
50 0xF, /* xyzw */
51 0x7, 0x7 << 1, /* xyz */
52 0x3, 0x3 << 1, 0x3 << 2, /* xy */
53 0x1, 0x1 << 1, 0x1 << 2, 0x1 << 3 /* x */
54 };
55
56 static unsigned reg_type_to_swizzle[WORK_STRIDE] = {
57 SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
58
59 SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
60 SWIZZLE(COMPONENT_Y, COMPONENT_Z, COMPONENT_W, COMPONENT_W),
61
62 SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
63 SWIZZLE(COMPONENT_Y, COMPONENT_Z, COMPONENT_Z, COMPONENT_W),
64 SWIZZLE(COMPONENT_Z, COMPONENT_W, COMPONENT_Z, COMPONENT_W),
65
66 SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
67 SWIZZLE(COMPONENT_Y, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
68 SWIZZLE(COMPONENT_Z, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
69 SWIZZLE(COMPONENT_W, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
70 };
71
72 struct phys_reg {
73 unsigned reg;
74 unsigned mask;
75 unsigned swizzle;
76 };
77
78 /* Given the mask/swizzle of both the register and the original source,
79 * compose to find the actual mask/swizzle to give the hardware */
80
81 static unsigned
82 compose_writemask(unsigned mask, struct phys_reg reg)
83 {
84 /* Note: the reg mask is guaranteed to be contiguous. So we shift
85 * into the X place, compose via a simple AND, and shift back */
86
87 unsigned shift = __builtin_ctz(reg.mask);
88 return ((reg.mask >> shift) & mask) << shift;
89 }
90
91 static unsigned
92 compose_swizzle(unsigned swizzle, unsigned mask,
93 struct phys_reg reg, struct phys_reg dst)
94 {
95 unsigned out = pan_compose_swizzle(swizzle, reg.swizzle);
96
97 /* Based on the register mask, we need to adjust over. E.g if we're
98 * writing to yz, a base swizzle of xy__ becomes _xy_. Save the
99 * original first component (x). But to prevent duplicate shifting
100 * (only applies to ALU -- mask param is set to xyzw out on L/S to
101 * prevent changes), we have to account for the shift inherent to the
102 * original writemask */
103
104 unsigned rep = out & 0x3;
105 unsigned shift = __builtin_ctz(dst.mask) - __builtin_ctz(mask);
106 unsigned shifted = out << (2*shift);
107
108 /* ..but we fill in the gaps so it appears to replicate */
109
110 for (unsigned s = 0; s < shift; ++s)
111 shifted |= rep << (2*s);
112
113 return shifted;
114 }
115
116 /* Helper to return the default phys_reg for a given register */
117
118 static struct phys_reg
119 default_phys_reg(int reg)
120 {
121 struct phys_reg r = {
122 .reg = reg,
123 .mask = 0xF, /* xyzw */
124 .swizzle = 0xE4 /* xyzw */
125 };
126
127 return r;
128 }
129
130 /* Determine which physical register, swizzle, and mask a virtual
131 * register corresponds to */
132
133 static struct phys_reg
134 index_to_reg(compiler_context *ctx, struct ra_graph *g, int reg)
135 {
136 /* Check for special cases */
137 if (reg >= SSA_FIXED_MINIMUM)
138 return default_phys_reg(SSA_REG_FROM_FIXED(reg));
139 else if ((reg < 0) || !g)
140 return default_phys_reg(REGISTER_UNUSED);
141
142 /* Special cases aside, we pick the underlying register */
143 int virt = ra_get_node_reg(g, reg);
144
145 /* Divide out the register and classification */
146 int phys = virt / WORK_STRIDE;
147 int type = virt % WORK_STRIDE;
148
149 struct phys_reg r = {
150 .reg = phys,
151 .mask = reg_type_to_mask[type],
152 .swizzle = reg_type_to_swizzle[type]
153 };
154
155 /* Report that we actually use this register, and return it */
156
157 if (phys < 16)
158 ctx->work_registers = MAX2(ctx->work_registers, phys);
159
160 return r;
161 }
162
163 /* This routine creates a register set. Should be called infrequently since
164 * it's slow and can be cached. For legibility, variables are named in terms of
165 * work registers, although it is also used to create the register set for
166 * special register allocation */
167
168 static struct ra_regs *
169 create_register_set(unsigned work_count, unsigned *classes)
170 {
171 int virtual_count = 32 * WORK_STRIDE;
172
173 /* First, initialize the RA */
174 struct ra_regs *regs = ra_alloc_reg_set(NULL, virtual_count, true);
175
176 for (unsigned c = 0; c < NR_REG_CLASSES; ++c) {
177 int work_vec4 = ra_alloc_reg_class(regs);
178 int work_vec3 = ra_alloc_reg_class(regs);
179 int work_vec2 = ra_alloc_reg_class(regs);
180 int work_vec1 = ra_alloc_reg_class(regs);
181
182 classes[4*c + 0] = work_vec1;
183 classes[4*c + 1] = work_vec2;
184 classes[4*c + 2] = work_vec3;
185 classes[4*c + 3] = work_vec4;
186
187 /* Special register classes have two registers in them */
188 unsigned count = (c == REG_CLASS_WORK) ? work_count : 2;
189
190 unsigned first_reg =
191 (c == REG_CLASS_LDST) ? 26 :
192 (c == REG_CLASS_TEX) ? 28 : 0;
193
194 /* Add the full set of work registers */
195 for (unsigned i = first_reg; i < (first_reg + count); ++i) {
196 int base = WORK_STRIDE * i;
197
198 /* Build a full set of subdivisions */
199 ra_class_add_reg(regs, work_vec4, base);
200 ra_class_add_reg(regs, work_vec3, base + 1);
201 ra_class_add_reg(regs, work_vec3, base + 2);
202 ra_class_add_reg(regs, work_vec2, base + 3);
203 ra_class_add_reg(regs, work_vec2, base + 4);
204 ra_class_add_reg(regs, work_vec2, base + 5);
205 ra_class_add_reg(regs, work_vec1, base + 6);
206 ra_class_add_reg(regs, work_vec1, base + 7);
207 ra_class_add_reg(regs, work_vec1, base + 8);
208 ra_class_add_reg(regs, work_vec1, base + 9);
209
210 for (unsigned a = 0; a < 10; ++a) {
211 unsigned mask1 = reg_type_to_mask[a];
212
213 for (unsigned b = 0; b < 10; ++b) {
214 unsigned mask2 = reg_type_to_mask[b];
215
216 if (mask1 & mask2)
217 ra_add_reg_conflict(regs,
218 base + a, base + b);
219 }
220 }
221 }
222 }
223
224 /* We're done setting up */
225 ra_set_finalize(regs, NULL);
226
227 return regs;
228 }
229
230 /* This routine gets a precomputed register set off the screen if it's able, or
231 * otherwise it computes one on the fly */
232
233 static struct ra_regs *
234 get_register_set(struct midgard_screen *screen, unsigned work_count, unsigned **classes)
235 {
236 /* Bounds check */
237 assert(work_count >= 8);
238 assert(work_count <= 16);
239
240 /* Compute index */
241 unsigned index = work_count - 8;
242
243 /* Find the reg set */
244 struct ra_regs *cached = screen->regs[index];
245
246 if (cached) {
247 assert(screen->reg_classes[index]);
248 *classes = screen->reg_classes[index];
249 return cached;
250 }
251
252 /* Otherwise, create one */
253 struct ra_regs *created = create_register_set(work_count, screen->reg_classes[index]);
254
255 /* Cache it and use it */
256 screen->regs[index] = created;
257
258 *classes = screen->reg_classes[index];
259 return created;
260 }
261
262 /* Assign a (special) class, ensuring that it is compatible with whatever class
263 * was already set */
264
265 static void
266 set_class(unsigned *classes, unsigned node, unsigned class)
267 {
268 /* Check that we're even a node */
269 if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
270 return;
271
272 /* First 4 are work, next 4 are load/store.. */
273 unsigned current_class = classes[node] >> 2;
274
275 /* Nothing to do */
276 if (class == current_class)
277 return;
278
279 /* If we're changing, we must not have already assigned a special class
280 */
281
282 assert(current_class == REG_CLASS_WORK);
283 assert(REG_CLASS_WORK == 0);
284
285 classes[node] |= (class << 2);
286 }
287
288 /* Special register classes impose special constraints on who can read their
289 * values, so check that */
290
291 static bool
292 check_read_class(unsigned *classes, unsigned tag, unsigned node)
293 {
294 /* Non-nodes are implicitly ok */
295 if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
296 return true;
297
298 unsigned current_class = classes[node] >> 2;
299
300 switch (current_class) {
301 case REG_CLASS_LDST:
302 return (tag == TAG_LOAD_STORE_4);
303 default:
304 return (tag != TAG_LOAD_STORE_4);
305 }
306 }
307
308 /* Prepass before RA to ensure special class restrictions are met. The idea is
309 * to create a bit field of types of instructions that read a particular index.
310 * Later, we'll add moves as appropriate and rewrite to specialize by type. */
311
312 static void
313 mark_node_class (unsigned *bitfield, unsigned node)
314 {
315 if ((node >= 0) && (node < SSA_FIXED_MINIMUM))
316 BITSET_SET(bitfield, node);
317 }
318
319 static midgard_instruction *
320 mir_find_last_write(compiler_context *ctx, unsigned i)
321 {
322 midgard_instruction *last_write = NULL;
323
324 mir_foreach_instr_global(ctx, ins) {
325 if (ins->compact_branch) continue;
326
327 if (ins->ssa_args.dest == i)
328 last_write = ins;
329 }
330
331 return last_write;
332 }
333
334 void
335 mir_lower_special_reads(compiler_context *ctx)
336 {
337 size_t sz = BITSET_WORDS(ctx->temp_count) * sizeof(BITSET_WORD);
338
339 /* Bitfields for the various types of registers we could have */
340
341 unsigned *alur = calloc(sz, 1);
342 unsigned *ldst = calloc(sz, 1);
343 unsigned *texr = calloc(sz, 1);
344 unsigned *texw = calloc(sz, 1);
345
346 /* Pass #1 is analysis, a linear scan to fill out the bitfields */
347
348 mir_foreach_instr_global(ctx, ins) {
349 if (ins->compact_branch) continue;
350
351 switch (ins->type) {
352 case TAG_ALU_4:
353 mark_node_class(alur, ins->ssa_args.src0);
354 mark_node_class(alur, ins->ssa_args.src1);
355 break;
356 case TAG_LOAD_STORE_4:
357 mark_node_class(ldst, ins->ssa_args.src0);
358 mark_node_class(ldst, ins->ssa_args.src1);
359 break;
360 case TAG_TEXTURE_4:
361 mark_node_class(texr, ins->ssa_args.src0);
362 mark_node_class(texr, ins->ssa_args.src1);
363 mark_node_class(texw, ins->ssa_args.dest);
364 break;
365 }
366 }
367
368 /* Pass #2 is lowering now that we've analyzed all the classes.
369 * Conceptually, if an index is only marked for a single type of use,
370 * there is nothing to lower. If it is marked for different uses, we
371 * split up based on the number of types of uses. To do so, we divide
372 * into N distinct classes of use (where N>1 by definition), emit N-1
373 * moves from the index to copies of the index, and finally rewrite N-1
374 * of the types of uses to use the corresponding move */
375
376 unsigned spill_idx = ctx->temp_count;
377
378 for (unsigned i = 0; i < ctx->temp_count; ++i) {
379 bool is_alur = BITSET_TEST(alur, i);
380 bool is_ldst = BITSET_TEST(ldst, i);
381 bool is_texr = BITSET_TEST(texr, i);
382 bool is_texw = BITSET_TEST(texw, i);
383
384 /* Analyse to check how many distinct uses there are. ALU ops
385 * (alur) can read the results of the texture pipeline (texw)
386 * but not ldst or texr. Load/store ops (ldst) cannot read
387 * anything but load/store inputs. Texture pipeline cannot read
388 * anything but texture inputs. TODO: Simplify. */
389
390 bool collision =
391 (is_alur && (is_ldst || is_texr)) ||
392 (is_ldst && (is_alur || is_texr || is_texw)) ||
393 (is_texr && (is_alur || is_ldst)) ||
394 (is_texw && (is_ldst));
395
396 if (!collision)
397 continue;
398
399 /* Use the index as-is as the work copy. Emit copies for
400 * special uses */
401
402 if (is_ldst) {
403 unsigned idx = spill_idx++;
404 midgard_instruction m = v_mov(i, blank_alu_src, idx);
405 midgard_instruction *use = mir_next_op(mir_find_last_write(ctx, i));
406 assert(use);
407 mir_insert_instruction_before(use, m);
408
409 /* Rewrite to use */
410 mir_rewrite_index_src_tag(ctx, i, idx, TAG_LOAD_STORE_4);
411 }
412 }
413
414 free(alur);
415 free(ldst);
416 free(texr);
417 free(texw);
418 }
419
420 /* This routine performs the actual register allocation. It should be succeeded
421 * by install_registers */
422
423 struct ra_graph *
424 allocate_registers(compiler_context *ctx, bool *spilled)
425 {
426 /* The number of vec4 work registers available depends on when the
427 * uniforms start, so compute that first */
428 int work_count = 16 - MAX2((ctx->uniform_cutoff - 8), 0);
429 unsigned *classes = NULL;
430 struct ra_regs *regs = get_register_set(ctx->screen, work_count, &classes);
431
432 assert(regs != NULL);
433 assert(classes != NULL);
434
435 /* No register allocation to do with no SSA */
436
437 if (!ctx->temp_count)
438 return NULL;
439
440 /* Let's actually do register allocation */
441 int nodes = ctx->temp_count;
442 struct ra_graph *g = ra_alloc_interference_graph(regs, nodes);
443
444 /* Register class (as known to the Mesa register allocator) is actually
445 * the product of both semantic class (work, load/store, texture..) and
446 * size (vec2/vec3..). First, we'll go through and determine the
447 * minimum size needed to hold values */
448
449 unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
450
451 mir_foreach_instr_global(ctx, ins) {
452 if (ins->compact_branch) continue;
453 if (ins->ssa_args.dest < 0) continue;
454 if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
455
456 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
457 int class = util_logbase2(ins->mask);
458
459 /* Use the largest class if there's ambiguity, this
460 * handles partial writes */
461
462 int dest = ins->ssa_args.dest;
463 found_class[dest] = MAX2(found_class[dest], class);
464 }
465
466 /* Next, we'll determine semantic class. We default to zero (work).
467 * But, if we're used with a special operation, that will force us to a
468 * particular class. Each node must be assigned to exactly one class; a
469 * prepass before RA should have lowered what-would-have-been
470 * multiclass nodes into a series of moves to break it up into multiple
471 * nodes (TODO) */
472
473 mir_foreach_instr_global(ctx, ins) {
474 if (ins->compact_branch) continue;
475
476 /* Check if this operation imposes any classes */
477
478 if (ins->type == TAG_LOAD_STORE_4) {
479 set_class(found_class, ins->ssa_args.src0, REG_CLASS_LDST);
480 set_class(found_class, ins->ssa_args.src1, REG_CLASS_LDST);
481 }
482 }
483
484 /* Check that the semantics of the class are respected */
485 mir_foreach_instr_global(ctx, ins) {
486 if (ins->compact_branch) continue;
487
488 /* Non-load-store cannot read load/store */
489 assert(check_read_class(found_class, ins->type, ins->ssa_args.src0));
490 assert(check_read_class(found_class, ins->type, ins->ssa_args.src1));
491 }
492
493 for (unsigned i = 0; i < ctx->temp_count; ++i) {
494 unsigned class = found_class[i];
495 ra_set_node_class(g, i, classes[class]);
496 }
497
498 /* Determine liveness */
499
500 int *live_start = malloc(nodes * sizeof(int));
501 int *live_end = malloc(nodes * sizeof(int));
502
503 /* Initialize as non-existent */
504
505 for (int i = 0; i < nodes; ++i) {
506 live_start[i] = live_end[i] = -1;
507 }
508
509 int d = 0;
510
511 mir_foreach_block(ctx, block) {
512 mir_foreach_instr_in_block(block, ins) {
513 if (ins->compact_branch) continue;
514
515 if (ins->ssa_args.dest < SSA_FIXED_MINIMUM) {
516 /* If this destination is not yet live, it is
517 * now since we just wrote it */
518
519 int dest = ins->ssa_args.dest;
520
521 if (dest >= 0 && live_start[dest] == -1)
522 live_start[dest] = d;
523 }
524
525 /* Since we just used a source, the source might be
526 * dead now. Scan the rest of the block for
527 * invocations, and if there are none, the source dies
528 * */
529
530 int sources[2] = {
531 ins->ssa_args.src0, ins->ssa_args.src1
532 };
533
534 for (int src = 0; src < 2; ++src) {
535 int s = sources[src];
536
537 if (ins->ssa_args.inline_constant && src == 1)
538 continue;
539
540 if (s < 0) continue;
541
542 if (s >= SSA_FIXED_MINIMUM) continue;
543
544 if (!mir_is_live_after(ctx, block, ins, s)) {
545 live_end[s] = d;
546 }
547 }
548
549 ++d;
550 }
551 }
552
553 /* If a node still hasn't been killed, kill it now */
554
555 for (int i = 0; i < nodes; ++i) {
556 /* live_start == -1 most likely indicates a pinned output */
557
558 if (live_end[i] == -1)
559 live_end[i] = d;
560 }
561
562 /* Setup interference between nodes that are live at the same time */
563
564 for (int i = 0; i < nodes; ++i) {
565 for (int j = i + 1; j < nodes; ++j) {
566 bool j_overlaps_i = live_start[j] < live_end[i];
567 bool i_overlaps_j = live_end[j] < live_start[i];
568
569 if (i_overlaps_j || j_overlaps_i)
570 ra_add_node_interference(g, i, j);
571 }
572 }
573
574 /* Cleanup */
575 free(live_start);
576 free(live_end);
577
578 if (!ra_allocate(g)) {
579 *spilled = true;
580 } else {
581 *spilled = false;
582 }
583
584 /* Whether we were successful or not, report the graph so we can
585 * compute spill nodes */
586
587 return g;
588 }
589
590 /* Once registers have been decided via register allocation
591 * (allocate_registers), we need to rewrite the MIR to use registers instead of
592 * indices */
593
594 static void
595 install_registers_instr(
596 compiler_context *ctx,
597 struct ra_graph *g,
598 midgard_instruction *ins)
599 {
600 ssa_args args = ins->ssa_args;
601
602 switch (ins->type) {
603 case TAG_ALU_4: {
604 int adjusted_src = args.inline_constant ? -1 : args.src1;
605 struct phys_reg src1 = index_to_reg(ctx, g, args.src0);
606 struct phys_reg src2 = index_to_reg(ctx, g, adjusted_src);
607 struct phys_reg dest = index_to_reg(ctx, g, args.dest);
608
609 unsigned uncomposed_mask = ins->mask;
610 ins->mask = compose_writemask(uncomposed_mask, dest);
611
612 /* Adjust the dest mask if necessary. Mostly this is a no-op
613 * but it matters for dot products */
614 dest.mask = effective_writemask(&ins->alu, ins->mask);
615
616 midgard_vector_alu_src mod1 =
617 vector_alu_from_unsigned(ins->alu.src1);
618 mod1.swizzle = compose_swizzle(mod1.swizzle, uncomposed_mask, src1, dest);
619 ins->alu.src1 = vector_alu_srco_unsigned(mod1);
620
621 ins->registers.src1_reg = src1.reg;
622
623 ins->registers.src2_imm = args.inline_constant;
624
625 if (args.inline_constant) {
626 /* Encode inline 16-bit constant. See disassembler for
627 * where the algorithm is from */
628
629 ins->registers.src2_reg = ins->inline_constant >> 11;
630
631 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
632 uint16_t imm = ((lower_11 >> 8) & 0x7) |
633 ((lower_11 & 0xFF) << 3);
634
635 ins->alu.src2 = imm << 2;
636 } else {
637 midgard_vector_alu_src mod2 =
638 vector_alu_from_unsigned(ins->alu.src2);
639 mod2.swizzle = compose_swizzle(
640 mod2.swizzle, uncomposed_mask, src2, dest);
641 ins->alu.src2 = vector_alu_srco_unsigned(mod2);
642
643 ins->registers.src2_reg = src2.reg;
644 }
645
646 ins->registers.out_reg = dest.reg;
647 break;
648 }
649
650 case TAG_LOAD_STORE_4: {
651 bool fixed = args.src0 >= SSA_FIXED_MINIMUM;
652
653 if (OP_IS_STORE_R26(ins->load_store.op) && fixed) {
654 ins->load_store.reg = SSA_REG_FROM_FIXED(args.src0);
655 } else if (OP_IS_STORE_VARY(ins->load_store.op)) {
656 struct phys_reg src = index_to_reg(ctx, g, args.src0);
657 assert(src.reg == 26 || src.reg == 27);
658
659 ins->load_store.reg = src.reg - 26;
660
661 /* TODO: swizzle/mask */
662 } else {
663 /* Which physical register we read off depends on
664 * whether we are loading or storing -- think about the
665 * logical dataflow */
666
667 unsigned r = OP_IS_STORE(ins->load_store.op) ?
668 args.src0 : args.dest;
669 struct phys_reg src = index_to_reg(ctx, g, r);
670
671 ins->load_store.reg = src.reg;
672
673 ins->load_store.swizzle = compose_swizzle(
674 ins->load_store.swizzle, 0xF,
675 default_phys_reg(0), src);
676
677 ins->mask = compose_writemask(
678 ins->mask, src);
679 }
680
681 break;
682 }
683
684 default:
685 break;
686 }
687 }
688
689 void
690 install_registers(compiler_context *ctx, struct ra_graph *g)
691 {
692 mir_foreach_block(ctx, block) {
693 mir_foreach_instr_in_block(block, ins) {
694 if (ins->compact_branch) continue;
695 install_registers_instr(ctx, g, ins);
696 }
697 }
698
699 }