freedreno: move ir3 to common location
[mesa.git] / src / freedreno / ir3 / ir3_ra.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31
32 #include "ir3.h"
33 #include "ir3_compiler.h"
34
35 /*
36 * Register Assignment:
37 *
38 * Uses the register_allocate util, which implements graph coloring
39 * algo with interference classes. To handle the cases where we need
40 * consecutive registers (for example, texture sample instructions),
41 * we model these as larger (double/quad/etc) registers which conflict
42 * with the corresponding registers in other classes.
43 *
44 * Additionally we create additional classes for half-regs, which
45 * do not conflict with the full-reg classes. We do need at least
46 * sizes 1-4 (to deal w/ texture sample instructions output to half-
47 * reg). At the moment we don't create the higher order half-reg
48 * classes as half-reg frequently does not have enough precision
49 * for texture coords at higher resolutions.
50 *
51 * There are some additional cases that we need to handle specially,
52 * as the graph coloring algo doesn't understand "partial writes".
53 * For example, a sequence like:
54 *
55 * add r0.z, ...
56 * sam (f32)(xy)r0.x, ...
57 * ...
58 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
59 *
60 * In this scenario, we treat r0.xyz as class size 3, which is written
61 * (from a use/def perspective) at the 'add' instruction and ignore the
62 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
63 * defining instruction, as it is the first to partially write r0.xyz.
64 *
65 * Note i965 has a similar scenario, which they solve with a virtual
66 * LOAD_PAYLOAD instruction which gets turned into multiple MOV's after
67 * register assignment. But for us that is horrible from a scheduling
68 * standpoint. Instead what we do is use idea of 'definer' instruction.
69 * Ie. the first instruction (lowest ip) to write to the variable is the
70 * one we consider from use/def perspective when building interference
71 * graph. (Other instructions which write other variable components
72 * just define the variable some more.)
73 *
74 * Arrays of arbitrary size are handled via pre-coloring a consecutive
75 * sequence of registers. Additional scalar (single component) reg
76 * names are allocated starting at ctx->class_base[total_class_count]
77 * (see arr->base), which are pre-colored. In the use/def graph direct
78 * access is treated as a single element use/def, and indirect access
79 * is treated as use or def of all array elements. (Only the first
80 * def is tracked, in case of multiple indirect writes, etc.)
81 *
82 * TODO arrays that fit in one of the pre-defined class sizes should
83 * not need to be pre-colored, but instead could be given a normal
84 * vreg name. (Ignoring this for now since it is a good way to work
85 * out the kinks with arbitrary sized arrays.)
86 *
87 * TODO might be easier for debugging to split this into two passes,
88 * the first assigning vreg names in a way that we could ir3_print()
89 * the result.
90 */
91
92 static const unsigned class_sizes[] = {
93 1, 2, 3, 4,
94 4 + 4, /* txd + 1d/2d */
95 4 + 6, /* txd + 3d */
96 };
97 #define class_count ARRAY_SIZE(class_sizes)
98
99 static const unsigned half_class_sizes[] = {
100 1, 2, 3, 4,
101 };
102 #define half_class_count ARRAY_SIZE(half_class_sizes)
103
104 /* seems to just be used for compute shaders? Seems like vec1 and vec3
105 * are sufficient (for now?)
106 */
107 static const unsigned high_class_sizes[] = {
108 1, 3,
109 };
110 #define high_class_count ARRAY_SIZE(high_class_sizes)
111
112 #define total_class_count (class_count + half_class_count + high_class_count)
113
114 /* Below a0.x are normal regs. RA doesn't need to assign a0.x/p0.x. */
115 #define NUM_REGS (4 * 48) /* r0 to r47 */
116 #define NUM_HIGH_REGS (4 * 8) /* r48 to r55 */
117 #define FIRST_HIGH_REG (4 * 48)
118 /* Number of virtual regs in a given class: */
119 #define CLASS_REGS(i) (NUM_REGS - (class_sizes[i] - 1))
120 #define HALF_CLASS_REGS(i) (NUM_REGS - (half_class_sizes[i] - 1))
121 #define HIGH_CLASS_REGS(i) (NUM_HIGH_REGS - (high_class_sizes[i] - 1))
122
123 #define HALF_OFFSET (class_count)
124 #define HIGH_OFFSET (class_count + half_class_count)
125
126 /* register-set, created one time, used for all shaders: */
127 struct ir3_ra_reg_set {
128 struct ra_regs *regs;
129 unsigned int classes[class_count];
130 unsigned int half_classes[half_class_count];
131 unsigned int high_classes[high_class_count];
132 /* maps flat virtual register space to base gpr: */
133 uint16_t *ra_reg_to_gpr;
134 /* maps cls,gpr to flat virtual register space: */
135 uint16_t **gpr_to_ra_reg;
136 };
137
138 static void
139 build_q_values(unsigned int **q_values, unsigned off,
140 const unsigned *sizes, unsigned count)
141 {
142 for (unsigned i = 0; i < count; i++) {
143 q_values[i + off] = rzalloc_array(q_values, unsigned, total_class_count);
144
145 /* From register_allocate.c:
146 *
147 * q(B,C) (indexed by C, B is this register class) in
148 * Runeson/Nyström paper. This is "how many registers of B could
149 * the worst choice register from C conflict with".
150 *
151 * If we just let the register allocation algorithm compute these
152 * values, is extremely expensive. However, since all of our
153 * registers are laid out, we can very easily compute them
154 * ourselves. View the register from C as fixed starting at GRF n
155 * somewhere in the middle, and the register from B as sliding back
156 * and forth. Then the first register to conflict from B is the
157 * one starting at n - class_size[B] + 1 and the last register to
158 * conflict will start at n + class_size[B] - 1. Therefore, the
159 * number of conflicts from B is class_size[B] + class_size[C] - 1.
160 *
161 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
162 * B | | | | | |n| --> | | | | | | |
163 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
164 * +-+-+-+-+-+
165 * C |n| | | | |
166 * +-+-+-+-+-+
167 *
168 * (Idea copied from brw_fs_reg_allocate.cpp)
169 */
170 for (unsigned j = 0; j < count; j++)
171 q_values[i + off][j + off] = sizes[i] + sizes[j] - 1;
172 }
173 }
174
175 /* One-time setup of RA register-set, which describes all the possible
176 * "virtual" registers and their interferences. Ie. double register
177 * occupies (and conflicts with) two single registers, and so forth.
178 * Since registers do not need to be aligned to their class size, they
179 * can conflict with other registers in the same class too. Ie:
180 *
181 * Single (base) | Double
182 * --------------+---------------
183 * R0 | D0
184 * R1 | D0 D1
185 * R2 | D1 D2
186 * R3 | D2
187 * .. and so on..
188 *
189 * (NOTE the disassembler uses notation like r0.x/y/z/w but those are
190 * really just four scalar registers. Don't let that confuse you.)
191 */
192 struct ir3_ra_reg_set *
193 ir3_ra_alloc_reg_set(struct ir3_compiler *compiler)
194 {
195 struct ir3_ra_reg_set *set = rzalloc(compiler, struct ir3_ra_reg_set);
196 unsigned ra_reg_count, reg, first_half_reg, first_high_reg, base;
197 unsigned int **q_values;
198
199 /* calculate # of regs across all classes: */
200 ra_reg_count = 0;
201 for (unsigned i = 0; i < class_count; i++)
202 ra_reg_count += CLASS_REGS(i);
203 for (unsigned i = 0; i < half_class_count; i++)
204 ra_reg_count += HALF_CLASS_REGS(i);
205 for (unsigned i = 0; i < high_class_count; i++)
206 ra_reg_count += HIGH_CLASS_REGS(i);
207
208 /* allocate and populate q_values: */
209 q_values = ralloc_array(set, unsigned *, total_class_count);
210
211 build_q_values(q_values, 0, class_sizes, class_count);
212 build_q_values(q_values, HALF_OFFSET, half_class_sizes, half_class_count);
213 build_q_values(q_values, HIGH_OFFSET, high_class_sizes, high_class_count);
214
215 /* allocate the reg-set.. */
216 set->regs = ra_alloc_reg_set(set, ra_reg_count, true);
217 set->ra_reg_to_gpr = ralloc_array(set, uint16_t, ra_reg_count);
218 set->gpr_to_ra_reg = ralloc_array(set, uint16_t *, total_class_count);
219
220 /* .. and classes */
221 reg = 0;
222 for (unsigned i = 0; i < class_count; i++) {
223 set->classes[i] = ra_alloc_reg_class(set->regs);
224
225 set->gpr_to_ra_reg[i] = ralloc_array(set, uint16_t, CLASS_REGS(i));
226
227 for (unsigned j = 0; j < CLASS_REGS(i); j++) {
228 ra_class_add_reg(set->regs, set->classes[i], reg);
229
230 set->ra_reg_to_gpr[reg] = j;
231 set->gpr_to_ra_reg[i][j] = reg;
232
233 for (unsigned br = j; br < j + class_sizes[i]; br++)
234 ra_add_transitive_reg_conflict(set->regs, br, reg);
235
236 reg++;
237 }
238 }
239
240 first_half_reg = reg;
241 base = HALF_OFFSET;
242
243 for (unsigned i = 0; i < half_class_count; i++) {
244 set->half_classes[i] = ra_alloc_reg_class(set->regs);
245
246 set->gpr_to_ra_reg[base + i] =
247 ralloc_array(set, uint16_t, HALF_CLASS_REGS(i));
248
249 for (unsigned j = 0; j < HALF_CLASS_REGS(i); j++) {
250 ra_class_add_reg(set->regs, set->half_classes[i], reg);
251
252 set->ra_reg_to_gpr[reg] = j;
253 set->gpr_to_ra_reg[base + i][j] = reg;
254
255 for (unsigned br = j; br < j + half_class_sizes[i]; br++)
256 ra_add_transitive_reg_conflict(set->regs, br + first_half_reg, reg);
257
258 reg++;
259 }
260 }
261
262 first_high_reg = reg;
263 base = HIGH_OFFSET;
264
265 for (unsigned i = 0; i < high_class_count; i++) {
266 set->high_classes[i] = ra_alloc_reg_class(set->regs);
267
268 set->gpr_to_ra_reg[base + i] =
269 ralloc_array(set, uint16_t, HIGH_CLASS_REGS(i));
270
271 for (unsigned j = 0; j < HIGH_CLASS_REGS(i); j++) {
272 ra_class_add_reg(set->regs, set->high_classes[i], reg);
273
274 set->ra_reg_to_gpr[reg] = j;
275 set->gpr_to_ra_reg[base + i][j] = reg;
276
277 for (unsigned br = j; br < j + high_class_sizes[i]; br++)
278 ra_add_transitive_reg_conflict(set->regs, br + first_high_reg, reg);
279
280 reg++;
281 }
282 }
283
284 /* starting a6xx, half precision regs conflict w/ full precision regs: */
285 if (compiler->gpu_id >= 600) {
286 /* because of transitivity, we can get away with just setting up
287 * conflicts between the first class of full and half regs:
288 */
289 for (unsigned j = 0; j < CLASS_REGS(0) / 2; j++) {
290 unsigned freg = set->gpr_to_ra_reg[0][j];
291 unsigned hreg0 = set->gpr_to_ra_reg[HALF_OFFSET][(j * 2) + 0];
292 unsigned hreg1 = set->gpr_to_ra_reg[HALF_OFFSET][(j * 2) + 1];
293
294 ra_add_transitive_reg_conflict(set->regs, freg, hreg0);
295 ra_add_transitive_reg_conflict(set->regs, freg, hreg1);
296 }
297
298 // TODO also need to update q_values, but for now:
299 ra_set_finalize(set->regs, NULL);
300 } else {
301 ra_set_finalize(set->regs, q_values);
302 }
303
304 ralloc_free(q_values);
305
306 return set;
307 }
308
309 /* additional block-data (per-block) */
310 struct ir3_ra_block_data {
311 BITSET_WORD *def; /* variables defined before used in block */
312 BITSET_WORD *use; /* variables used before defined in block */
313 BITSET_WORD *livein; /* which defs reach entry point of block */
314 BITSET_WORD *liveout; /* which defs reach exit point of block */
315 };
316
317 /* additional instruction-data (per-instruction) */
318 struct ir3_ra_instr_data {
319 /* cached instruction 'definer' info: */
320 struct ir3_instruction *defn;
321 int off, sz, cls;
322 };
323
324 /* register-assign context, per-shader */
325 struct ir3_ra_ctx {
326 struct ir3 *ir;
327 gl_shader_stage type;
328 bool frag_face;
329
330 struct ir3_ra_reg_set *set;
331 struct ra_graph *g;
332 unsigned alloc_count;
333 /* one per class, plus one slot for arrays: */
334 unsigned class_alloc_count[total_class_count + 1];
335 unsigned class_base[total_class_count + 1];
336 unsigned instr_cnt;
337 unsigned *def, *use; /* def/use table */
338 struct ir3_ra_instr_data *instrd;
339 };
340
341 /* does it conflict? */
342 static inline bool
343 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
344 {
345 return !((a_start >= b_end) || (b_start >= a_end));
346 }
347
348 static bool
349 is_half(struct ir3_instruction *instr)
350 {
351 return !!(instr->regs[0]->flags & IR3_REG_HALF);
352 }
353
354 static bool
355 is_high(struct ir3_instruction *instr)
356 {
357 return !!(instr->regs[0]->flags & IR3_REG_HIGH);
358 }
359
360 static int
361 size_to_class(unsigned sz, bool half, bool high)
362 {
363 if (high) {
364 for (unsigned i = 0; i < high_class_count; i++)
365 if (high_class_sizes[i] >= sz)
366 return i + HIGH_OFFSET;
367 } else if (half) {
368 for (unsigned i = 0; i < half_class_count; i++)
369 if (half_class_sizes[i] >= sz)
370 return i + HALF_OFFSET;
371 } else {
372 for (unsigned i = 0; i < class_count; i++)
373 if (class_sizes[i] >= sz)
374 return i;
375 }
376 debug_assert(0);
377 return -1;
378 }
379
380 static bool
381 writes_gpr(struct ir3_instruction *instr)
382 {
383 if (is_store(instr))
384 return false;
385 /* is dest a normal temp register: */
386 struct ir3_register *reg = instr->regs[0];
387 if (reg->flags & (IR3_REG_CONST | IR3_REG_IMMED))
388 return false;
389 if ((reg->num == regid(REG_A0, 0)) ||
390 (reg->num == regid(REG_P0, 0)))
391 return false;
392 return true;
393 }
394
395 static bool
396 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
397 {
398 if (a->flags & IR3_INSTR_UNUSED)
399 return false;
400 return (a->ip < b->ip);
401 }
402
403 static struct ir3_instruction *
404 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
405 int *sz, int *off)
406 {
407 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
408 struct ir3_instruction *d = NULL;
409
410 if (id->defn) {
411 *sz = id->sz;
412 *off = id->off;
413 return id->defn;
414 }
415
416 if (instr->opc == OPC_META_FI) {
417 /* What about the case where collect is subset of array, we
418 * need to find the distance between where actual array starts
419 * and fanin.. that probably doesn't happen currently.
420 */
421 struct ir3_register *src;
422 int dsz, doff;
423
424 /* note: don't use foreach_ssa_src as this gets called once
425 * while assigning regs (which clears SSA flag)
426 */
427 foreach_src_n(src, n, instr) {
428 struct ir3_instruction *dd;
429 if (!src->instr)
430 continue;
431
432 dd = get_definer(ctx, src->instr, &dsz, &doff);
433
434 if ((!d) || instr_before(dd, d)) {
435 d = dd;
436 *sz = dsz;
437 *off = doff - n;
438 }
439 }
440
441 } else if (instr->cp.right || instr->cp.left) {
442 /* covers also the meta:fo case, which ends up w/ single
443 * scalar instructions for each component:
444 */
445 struct ir3_instruction *f = ir3_neighbor_first(instr);
446
447 /* by definition, the entire sequence forms one linked list
448 * of single scalar register nodes (even if some of them may
449 * be fanouts from a texture sample (for example) instr. We
450 * just need to walk the list finding the first element of
451 * the group defined (lowest ip)
452 */
453 int cnt = 0;
454
455 /* need to skip over unused in the group: */
456 while (f && (f->flags & IR3_INSTR_UNUSED)) {
457 f = f->cp.right;
458 cnt++;
459 }
460
461 while (f) {
462 if ((!d) || instr_before(f, d))
463 d = f;
464 if (f == instr)
465 *off = cnt;
466 f = f->cp.right;
467 cnt++;
468 }
469
470 *sz = cnt;
471
472 } else {
473 /* second case is looking directly at the instruction which
474 * produces multiple values (eg, texture sample), rather
475 * than the fanout nodes that point back to that instruction.
476 * This isn't quite right, because it may be part of a larger
477 * group, such as:
478 *
479 * sam (f32)(xyzw)r0.x, ...
480 * add r1.x, ...
481 * add r1.y, ...
482 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
483 *
484 * need to come up with a better way to handle that case.
485 */
486 if (instr->address) {
487 *sz = instr->regs[0]->size;
488 } else {
489 *sz = util_last_bit(instr->regs[0]->wrmask);
490 }
491 *off = 0;
492 d = instr;
493 }
494
495 if (d->opc == OPC_META_FO) {
496 struct ir3_instruction *dd;
497 int dsz, doff;
498
499 dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
500
501 /* by definition, should come before: */
502 debug_assert(instr_before(dd, d));
503
504 *sz = MAX2(*sz, dsz);
505
506 debug_assert(instr->opc == OPC_META_FO);
507 *off = MAX2(*off, instr->fo.off);
508
509 d = dd;
510 }
511
512 id->defn = d;
513 id->sz = *sz;
514 id->off = *off;
515
516 return d;
517 }
518
519 static void
520 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
521 {
522 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
523 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
524 if (instr->regs_count == 0)
525 continue;
526 /* couple special cases: */
527 if (writes_addr(instr) || writes_pred(instr)) {
528 id->cls = -1;
529 } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
530 id->cls = total_class_count;
531 } else {
532 id->defn = get_definer(ctx, instr, &id->sz, &id->off);
533 id->cls = size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
534 }
535 }
536 }
537
538 /* give each instruction a name (and ip), and count up the # of names
539 * of each class
540 */
541 static void
542 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
543 {
544 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
545 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
546
547 #ifdef DEBUG
548 instr->name = ~0;
549 #endif
550
551 ctx->instr_cnt++;
552
553 if (instr->regs_count == 0)
554 continue;
555
556 if (!writes_gpr(instr))
557 continue;
558
559 if (id->defn != instr)
560 continue;
561
562 /* arrays which don't fit in one of the pre-defined class
563 * sizes are pre-colored:
564 */
565 if ((id->cls >= 0) && (id->cls < total_class_count)) {
566 instr->name = ctx->class_alloc_count[id->cls]++;
567 ctx->alloc_count++;
568 }
569 }
570 }
571
572 static void
573 ra_init(struct ir3_ra_ctx *ctx)
574 {
575 unsigned n, base;
576
577 ir3_clear_mark(ctx->ir);
578 n = ir3_count_instructions(ctx->ir);
579
580 ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
581
582 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
583 ra_block_find_definers(ctx, block);
584 }
585
586 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
587 ra_block_name_instructions(ctx, block);
588 }
589
590 /* figure out the base register name for each class. The
591 * actual ra name is class_base[cls] + instr->name;
592 */
593 ctx->class_base[0] = 0;
594 for (unsigned i = 1; i <= total_class_count; i++) {
595 ctx->class_base[i] = ctx->class_base[i-1] +
596 ctx->class_alloc_count[i-1];
597 }
598
599 /* and vreg names for array elements: */
600 base = ctx->class_base[total_class_count];
601 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
602 arr->base = base;
603 ctx->class_alloc_count[total_class_count] += arr->length;
604 base += arr->length;
605 }
606 ctx->alloc_count += ctx->class_alloc_count[total_class_count];
607
608 ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
609 ralloc_steal(ctx->g, ctx->instrd);
610 ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
611 ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
612 }
613
614 static unsigned
615 __ra_name(struct ir3_ra_ctx *ctx, int cls, struct ir3_instruction *defn)
616 {
617 unsigned name;
618 debug_assert(cls >= 0);
619 debug_assert(cls < total_class_count); /* we shouldn't get arrays here.. */
620 name = ctx->class_base[cls] + defn->name;
621 debug_assert(name < ctx->alloc_count);
622 return name;
623 }
624
625 static int
626 ra_name(struct ir3_ra_ctx *ctx, struct ir3_ra_instr_data *id)
627 {
628 /* TODO handle name mapping for arrays */
629 return __ra_name(ctx, id->cls, id->defn);
630 }
631
632 static void
633 ra_destroy(struct ir3_ra_ctx *ctx)
634 {
635 ralloc_free(ctx->g);
636 }
637
638 static void
639 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
640 {
641 struct ir3_ra_block_data *bd;
642 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
643
644 #define def(name, instr) \
645 do { \
646 /* defined on first write: */ \
647 if (!ctx->def[name]) \
648 ctx->def[name] = instr->ip; \
649 ctx->use[name] = instr->ip; \
650 BITSET_SET(bd->def, name); \
651 } while(0);
652
653 #define use(name, instr) \
654 do { \
655 ctx->use[name] = MAX2(ctx->use[name], instr->ip); \
656 if (!BITSET_TEST(bd->def, name)) \
657 BITSET_SET(bd->use, name); \
658 } while(0);
659
660 bd = rzalloc(ctx->g, struct ir3_ra_block_data);
661
662 bd->def = rzalloc_array(bd, BITSET_WORD, bitset_words);
663 bd->use = rzalloc_array(bd, BITSET_WORD, bitset_words);
664 bd->livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
665 bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
666
667 block->data = bd;
668
669 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
670 struct ir3_instruction *src;
671 struct ir3_register *reg;
672
673 if (instr->regs_count == 0)
674 continue;
675
676 /* There are a couple special cases to deal with here:
677 *
678 * fanout: used to split values from a higher class to a lower
679 * class, for example split the results of a texture fetch
680 * into individual scalar values; We skip over these from
681 * a 'def' perspective, and for a 'use' we walk the chain
682 * up to the defining instruction.
683 *
684 * fanin: used to collect values from lower class and assemble
685 * them together into a higher class, for example arguments
686 * to texture sample instructions; We consider these to be
687 * defined at the earliest fanin source.
688 *
689 * Most of this is handled in the get_definer() helper.
690 *
691 * In either case, we trace the instruction back to the original
692 * definer and consider that as the def/use ip.
693 */
694
695 if (writes_gpr(instr)) {
696 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
697 struct ir3_register *dst = instr->regs[0];
698
699 if (dst->flags & IR3_REG_ARRAY) {
700 struct ir3_array *arr =
701 ir3_lookup_array(ctx->ir, dst->array.id);
702 unsigned i;
703
704 arr->start_ip = MIN2(arr->start_ip, instr->ip);
705 arr->end_ip = MAX2(arr->end_ip, instr->ip);
706
707 /* set the node class now.. in case we don't encounter
708 * this array dst again. From register_alloc algo's
709 * perspective, these are all single/scalar regs:
710 */
711 for (i = 0; i < arr->length; i++) {
712 unsigned name = arr->base + i;
713 ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
714 }
715
716 /* indirect write is treated like a write to all array
717 * elements, since we don't know which one is actually
718 * written:
719 */
720 if (dst->flags & IR3_REG_RELATIV) {
721 for (i = 0; i < arr->length; i++) {
722 unsigned name = arr->base + i;
723 def(name, instr);
724 }
725 } else {
726 unsigned name = arr->base + dst->array.offset;
727 def(name, instr);
728 }
729
730 } else if (id->defn == instr) {
731 unsigned name = ra_name(ctx, id);
732
733 /* since we are in SSA at this point: */
734 debug_assert(!BITSET_TEST(bd->use, name));
735
736 def(name, id->defn);
737
738 if (is_high(id->defn)) {
739 ra_set_node_class(ctx->g, name,
740 ctx->set->high_classes[id->cls - HIGH_OFFSET]);
741 } else if (is_half(id->defn)) {
742 ra_set_node_class(ctx->g, name,
743 ctx->set->half_classes[id->cls - HALF_OFFSET]);
744 } else {
745 ra_set_node_class(ctx->g, name,
746 ctx->set->classes[id->cls]);
747 }
748 }
749 }
750
751 foreach_src(reg, instr) {
752 if (reg->flags & IR3_REG_ARRAY) {
753 struct ir3_array *arr =
754 ir3_lookup_array(ctx->ir, reg->array.id);
755 arr->start_ip = MIN2(arr->start_ip, instr->ip);
756 arr->end_ip = MAX2(arr->end_ip, instr->ip);
757
758 /* indirect read is treated like a read fromall array
759 * elements, since we don't know which one is actually
760 * read:
761 */
762 if (reg->flags & IR3_REG_RELATIV) {
763 unsigned i;
764 for (i = 0; i < arr->length; i++) {
765 unsigned name = arr->base + i;
766 use(name, instr);
767 }
768 } else {
769 unsigned name = arr->base + reg->array.offset;
770 use(name, instr);
771 /* NOTE: arrays are not SSA so unconditionally
772 * set use bit:
773 */
774 BITSET_SET(bd->use, name);
775 debug_assert(reg->array.offset < arr->length);
776 }
777 } else if ((src = ssa(reg)) && writes_gpr(src)) {
778 unsigned name = ra_name(ctx, &ctx->instrd[src->ip]);
779 use(name, instr);
780 }
781 }
782 }
783 }
784
785 static bool
786 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
787 {
788 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
789 bool progress = false;
790
791 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
792 struct ir3_ra_block_data *bd = block->data;
793
794 /* update livein: */
795 for (unsigned i = 0; i < bitset_words; i++) {
796 BITSET_WORD new_livein =
797 (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
798
799 if (new_livein & ~bd->livein[i]) {
800 bd->livein[i] |= new_livein;
801 progress = true;
802 }
803 }
804
805 /* update liveout: */
806 for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
807 struct ir3_block *succ = block->successors[j];
808 struct ir3_ra_block_data *succ_bd;
809
810 if (!succ)
811 continue;
812
813 succ_bd = succ->data;
814
815 for (unsigned i = 0; i < bitset_words; i++) {
816 BITSET_WORD new_liveout =
817 (succ_bd->livein[i] & ~bd->liveout[i]);
818
819 if (new_liveout) {
820 bd->liveout[i] |= new_liveout;
821 progress = true;
822 }
823 }
824 }
825 }
826
827 return progress;
828 }
829
830 static void
831 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
832 {
833 bool first = true;
834 debug_printf(" %s:", name);
835 for (unsigned i = 0; i < cnt; i++) {
836 if (BITSET_TEST(bs, i)) {
837 if (!first)
838 debug_printf(",");
839 debug_printf(" %04u", i);
840 first = false;
841 }
842 }
843 debug_printf("\n");
844 }
845
846 static void
847 ra_add_interference(struct ir3_ra_ctx *ctx)
848 {
849 struct ir3 *ir = ctx->ir;
850
851 /* initialize array live ranges: */
852 list_for_each_entry (struct ir3_array, arr, &ir->array_list, node) {
853 arr->start_ip = ~0;
854 arr->end_ip = 0;
855 }
856
857 /* compute live ranges (use/def) on a block level, also updating
858 * block's def/use bitmasks (used below to calculate per-block
859 * livein/liveout):
860 */
861 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
862 ra_block_compute_live_ranges(ctx, block);
863 }
864
865 /* update per-block livein/liveout: */
866 while (ra_compute_livein_liveout(ctx)) {}
867
868 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
869 debug_printf("AFTER LIVEIN/OUT:\n");
870 ir3_print(ir);
871 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
872 struct ir3_ra_block_data *bd = block->data;
873 debug_printf("block%u:\n", block_id(block));
874 print_bitset(" def", bd->def, ctx->alloc_count);
875 print_bitset(" use", bd->use, ctx->alloc_count);
876 print_bitset(" l/i", bd->livein, ctx->alloc_count);
877 print_bitset(" l/o", bd->liveout, ctx->alloc_count);
878 }
879 list_for_each_entry (struct ir3_array, arr, &ir->array_list, node) {
880 debug_printf("array%u:\n", arr->id);
881 debug_printf(" length: %u\n", arr->length);
882 debug_printf(" start_ip: %u\n", arr->start_ip);
883 debug_printf(" end_ip: %u\n", arr->end_ip);
884 }
885 }
886
887 /* extend start/end ranges based on livein/liveout info from cfg: */
888 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
889 struct ir3_ra_block_data *bd = block->data;
890
891 for (unsigned i = 0; i < ctx->alloc_count; i++) {
892 if (BITSET_TEST(bd->livein, i)) {
893 ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
894 ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
895 }
896
897 if (BITSET_TEST(bd->liveout, i)) {
898 ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
899 ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
900 }
901 }
902
903 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
904 for (unsigned i = 0; i < arr->length; i++) {
905 if (BITSET_TEST(bd->livein, i + arr->base)) {
906 arr->start_ip = MIN2(arr->start_ip, block->start_ip);
907 }
908 if (BITSET_TEST(bd->livein, i + arr->base)) {
909 arr->end_ip = MAX2(arr->end_ip, block->end_ip);
910 }
911 }
912 }
913 }
914
915 /* need to fix things up to keep outputs live: */
916 for (unsigned i = 0; i < ir->noutputs; i++) {
917 struct ir3_instruction *instr = ir->outputs[i];
918 unsigned name = ra_name(ctx, &ctx->instrd[instr->ip]);
919 ctx->use[name] = ctx->instr_cnt;
920 }
921
922 for (unsigned i = 0; i < ctx->alloc_count; i++) {
923 for (unsigned j = 0; j < ctx->alloc_count; j++) {
924 if (intersects(ctx->def[i], ctx->use[i],
925 ctx->def[j], ctx->use[j])) {
926 ra_add_node_interference(ctx->g, i, j);
927 }
928 }
929 }
930 }
931
932 /* some instructions need fix-up if dst register is half precision: */
933 static void fixup_half_instr_dst(struct ir3_instruction *instr)
934 {
935 switch (opc_cat(instr->opc)) {
936 case 1: /* move instructions */
937 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
938 break;
939 case 3:
940 switch (instr->opc) {
941 case OPC_MAD_F32:
942 instr->opc = OPC_MAD_F16;
943 break;
944 case OPC_SEL_B32:
945 instr->opc = OPC_SEL_B16;
946 break;
947 case OPC_SEL_S32:
948 instr->opc = OPC_SEL_S16;
949 break;
950 case OPC_SEL_F32:
951 instr->opc = OPC_SEL_F16;
952 break;
953 case OPC_SAD_S32:
954 instr->opc = OPC_SAD_S16;
955 break;
956 /* instructions may already be fixed up: */
957 case OPC_MAD_F16:
958 case OPC_SEL_B16:
959 case OPC_SEL_S16:
960 case OPC_SEL_F16:
961 case OPC_SAD_S16:
962 break;
963 default:
964 assert(0);
965 break;
966 }
967 break;
968 case 5:
969 instr->cat5.type = half_type(instr->cat5.type);
970 break;
971 }
972 }
973 /* some instructions need fix-up if src register is half precision: */
974 static void fixup_half_instr_src(struct ir3_instruction *instr)
975 {
976 switch (instr->opc) {
977 case OPC_MOV:
978 instr->cat1.src_type = half_type(instr->cat1.src_type);
979 break;
980 default:
981 break;
982 }
983 }
984
985 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
986 * array access(es) which do not have any previous access to depend
987 * on from scheduling point of view
988 */
989 static void
990 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
991 struct ir3_instruction *instr)
992 {
993 struct ir3_ra_instr_data *id;
994
995 if (reg->flags & IR3_REG_ARRAY) {
996 struct ir3_array *arr =
997 ir3_lookup_array(ctx->ir, reg->array.id);
998 unsigned name = arr->base + reg->array.offset;
999 unsigned r = ra_get_node_reg(ctx->g, name);
1000 unsigned num = ctx->set->ra_reg_to_gpr[r];
1001
1002 if (reg->flags & IR3_REG_RELATIV) {
1003 reg->array.offset = num;
1004 } else {
1005 reg->num = num;
1006 reg->flags &= ~IR3_REG_SSA;
1007 }
1008
1009 reg->flags &= ~IR3_REG_ARRAY;
1010 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1011 unsigned name = ra_name(ctx, id);
1012 unsigned r = ra_get_node_reg(ctx->g, name);
1013 unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
1014
1015 debug_assert(!(reg->flags & IR3_REG_RELATIV));
1016
1017 if (is_high(id->defn))
1018 num += FIRST_HIGH_REG;
1019
1020 reg->num = num;
1021 reg->flags &= ~IR3_REG_SSA;
1022
1023 if (is_half(id->defn))
1024 reg->flags |= IR3_REG_HALF;
1025 }
1026 }
1027
1028 static void
1029 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
1030 {
1031 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
1032 struct ir3_register *reg;
1033
1034 if (instr->regs_count == 0)
1035 continue;
1036
1037 if (writes_gpr(instr)) {
1038 reg_assign(ctx, instr->regs[0], instr);
1039 if (instr->regs[0]->flags & IR3_REG_HALF)
1040 fixup_half_instr_dst(instr);
1041 }
1042
1043 foreach_src_n(reg, n, instr) {
1044 struct ir3_instruction *src = reg->instr;
1045 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1046 if (!(src || (reg->flags & IR3_REG_ARRAY)))
1047 continue;
1048 reg_assign(ctx, instr->regs[n+1], src);
1049 if (instr->regs[n+1]->flags & IR3_REG_HALF)
1050 fixup_half_instr_src(instr);
1051 }
1052 }
1053 }
1054
1055 static int
1056 ra_alloc(struct ir3_ra_ctx *ctx)
1057 {
1058 /* pre-assign array elements:
1059 */
1060 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
1061 unsigned base = 0;
1062
1063 if (arr->end_ip == 0)
1064 continue;
1065
1066 /* figure out what else we conflict with which has already
1067 * been assigned:
1068 */
1069 retry:
1070 list_for_each_entry (struct ir3_array, arr2, &ctx->ir->array_list, node) {
1071 if (arr2 == arr)
1072 break;
1073 if (arr2->end_ip == 0)
1074 continue;
1075 /* if it intersects with liverange AND register range.. */
1076 if (intersects(arr->start_ip, arr->end_ip,
1077 arr2->start_ip, arr2->end_ip) &&
1078 intersects(base, base + arr->length,
1079 arr2->reg, arr2->reg + arr2->length)) {
1080 base = MAX2(base, arr2->reg + arr2->length);
1081 goto retry;
1082 }
1083 }
1084
1085 arr->reg = base;
1086
1087 for (unsigned i = 0; i < arr->length; i++) {
1088 unsigned name, reg;
1089
1090 name = arr->base + i;
1091 reg = ctx->set->gpr_to_ra_reg[0][base++];
1092
1093 ra_set_node_reg(ctx->g, name, reg);
1094 }
1095 }
1096
1097 if (!ra_allocate(ctx->g))
1098 return -1;
1099
1100 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
1101 ra_block_alloc(ctx, block);
1102 }
1103
1104 return 0;
1105 }
1106
1107 int ir3_ra(struct ir3 *ir, gl_shader_stage type,
1108 bool frag_coord, bool frag_face)
1109 {
1110 struct ir3_ra_ctx ctx = {
1111 .ir = ir,
1112 .type = type,
1113 .frag_face = frag_face,
1114 .set = ir->compiler->set,
1115 };
1116 int ret;
1117
1118 ra_init(&ctx);
1119 ra_add_interference(&ctx);
1120 ret = ra_alloc(&ctx);
1121 ra_destroy(&ctx);
1122
1123 return ret;
1124 }