freedreno: Remove the Emacs mode lines
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_ra.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31
32 #include "freedreno_util.h"
33
34 #include "ir3.h"
35 #include "ir3_compiler.h"
36
37 /*
38 * Register Assignment:
39 *
40 * Uses the register_allocate util, which implements graph coloring
41 * algo with interference classes. To handle the cases where we need
42 * consecutive registers (for example, texture sample instructions),
43 * we model these as larger (double/quad/etc) registers which conflict
44 * with the corresponding registers in other classes.
45 *
46 * Additionally we create additional classes for half-regs, which
47 * do not conflict with the full-reg classes. We do need at least
48 * sizes 1-4 (to deal w/ texture sample instructions output to half-
49 * reg). At the moment we don't create the higher order half-reg
50 * classes as half-reg frequently does not have enough precision
51 * for texture coords at higher resolutions.
52 *
53 * There are some additional cases that we need to handle specially,
54 * as the graph coloring algo doesn't understand "partial writes".
55 * For example, a sequence like:
56 *
57 * add r0.z, ...
58 * sam (f32)(xy)r0.x, ...
59 * ...
60 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
61 *
62 * In this scenario, we treat r0.xyz as class size 3, which is written
63 * (from a use/def perspective) at the 'add' instruction and ignore the
64 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
65 * defining instruction, as it is the first to partially write r0.xyz.
66 *
67 * Note i965 has a similar scenario, which they solve with a virtual
68 * LOAD_PAYLOAD instruction which gets turned into multiple MOV's after
69 * register assignment. But for us that is horrible from a scheduling
70 * standpoint. Instead what we do is use idea of 'definer' instruction.
71 * Ie. the first instruction (lowest ip) to write to the variable is the
72 * one we consider from use/def perspective when building interference
73 * graph. (Other instructions which write other variable components
74 * just define the variable some more.)
75 *
76 * Arrays of arbitrary size are handled via pre-coloring a consecutive
77 * sequence of registers. Additional scalar (single component) reg
78 * names are allocated starting at ctx->class_base[total_class_count]
79 * (see arr->base), which are pre-colored. In the use/def graph direct
80 * access is treated as a single element use/def, and indirect access
81 * is treated as use or def of all array elements. (Only the first
82 * def is tracked, in case of multiple indirect writes, etc.)
83 *
84 * TODO arrays that fit in one of the pre-defined class sizes should
85 * not need to be pre-colored, but instead could be given a normal
86 * vreg name. (Ignoring this for now since it is a good way to work
87 * out the kinks with arbitrary sized arrays.)
88 *
89 * TODO might be easier for debugging to split this into two passes,
90 * the first assigning vreg names in a way that we could ir3_print()
91 * the result.
92 */
93
94 static const unsigned class_sizes[] = {
95 1, 2, 3, 4,
96 4 + 4, /* txd + 1d/2d */
97 4 + 6, /* txd + 3d */
98 };
99 #define class_count ARRAY_SIZE(class_sizes)
100
101 static const unsigned half_class_sizes[] = {
102 1, 2, 3, 4,
103 };
104 #define half_class_count ARRAY_SIZE(half_class_sizes)
105
106 /* seems to just be used for compute shaders? Seems like vec1 and vec3
107 * are sufficient (for now?)
108 */
109 static const unsigned high_class_sizes[] = {
110 1, 3,
111 };
112 #define high_class_count ARRAY_SIZE(high_class_sizes)
113
114 #define total_class_count (class_count + half_class_count + high_class_count)
115
116 /* Below a0.x are normal regs. RA doesn't need to assign a0.x/p0.x. */
117 #define NUM_REGS (4 * 48) /* r0 to r47 */
118 #define NUM_HIGH_REGS (4 * 8) /* r48 to r55 */
119 #define FIRST_HIGH_REG (4 * 48)
120 /* Number of virtual regs in a given class: */
121 #define CLASS_REGS(i) (NUM_REGS - (class_sizes[i] - 1))
122 #define HALF_CLASS_REGS(i) (NUM_REGS - (half_class_sizes[i] - 1))
123 #define HIGH_CLASS_REGS(i) (NUM_HIGH_REGS - (high_class_sizes[i] - 1))
124
125 #define HALF_OFFSET (class_count)
126 #define HIGH_OFFSET (class_count + half_class_count)
127
128 /* register-set, created one time, used for all shaders: */
129 struct ir3_ra_reg_set {
130 struct ra_regs *regs;
131 unsigned int classes[class_count];
132 unsigned int half_classes[half_class_count];
133 unsigned int high_classes[high_class_count];
134 /* maps flat virtual register space to base gpr: */
135 uint16_t *ra_reg_to_gpr;
136 /* maps cls,gpr to flat virtual register space: */
137 uint16_t **gpr_to_ra_reg;
138 };
139
140 static void
141 build_q_values(unsigned int **q_values, unsigned off,
142 const unsigned *sizes, unsigned count)
143 {
144 for (unsigned i = 0; i < count; i++) {
145 q_values[i + off] = rzalloc_array(q_values, unsigned, total_class_count);
146
147 /* From register_allocate.c:
148 *
149 * q(B,C) (indexed by C, B is this register class) in
150 * Runeson/Nyström paper. This is "how many registers of B could
151 * the worst choice register from C conflict with".
152 *
153 * If we just let the register allocation algorithm compute these
154 * values, is extremely expensive. However, since all of our
155 * registers are laid out, we can very easily compute them
156 * ourselves. View the register from C as fixed starting at GRF n
157 * somewhere in the middle, and the register from B as sliding back
158 * and forth. Then the first register to conflict from B is the
159 * one starting at n - class_size[B] + 1 and the last register to
160 * conflict will start at n + class_size[B] - 1. Therefore, the
161 * number of conflicts from B is class_size[B] + class_size[C] - 1.
162 *
163 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
164 * B | | | | | |n| --> | | | | | | |
165 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
166 * +-+-+-+-+-+
167 * C |n| | | | |
168 * +-+-+-+-+-+
169 *
170 * (Idea copied from brw_fs_reg_allocate.cpp)
171 */
172 for (unsigned j = 0; j < count; j++)
173 q_values[i + off][j + off] = sizes[i] + sizes[j] - 1;
174 }
175 }
176
177 /* One-time setup of RA register-set, which describes all the possible
178 * "virtual" registers and their interferences. Ie. double register
179 * occupies (and conflicts with) two single registers, and so forth.
180 * Since registers do not need to be aligned to their class size, they
181 * can conflict with other registers in the same class too. Ie:
182 *
183 * Single (base) | Double
184 * --------------+---------------
185 * R0 | D0
186 * R1 | D0 D1
187 * R2 | D1 D2
188 * R3 | D2
189 * .. and so on..
190 *
191 * (NOTE the disassembler uses notation like r0.x/y/z/w but those are
192 * really just four scalar registers. Don't let that confuse you.)
193 */
194 struct ir3_ra_reg_set *
195 ir3_ra_alloc_reg_set(struct ir3_compiler *compiler)
196 {
197 struct ir3_ra_reg_set *set = rzalloc(compiler, struct ir3_ra_reg_set);
198 unsigned ra_reg_count, reg, first_half_reg, first_high_reg, base;
199 unsigned int **q_values;
200
201 /* calculate # of regs across all classes: */
202 ra_reg_count = 0;
203 for (unsigned i = 0; i < class_count; i++)
204 ra_reg_count += CLASS_REGS(i);
205 for (unsigned i = 0; i < half_class_count; i++)
206 ra_reg_count += HALF_CLASS_REGS(i);
207 for (unsigned i = 0; i < high_class_count; i++)
208 ra_reg_count += HIGH_CLASS_REGS(i);
209
210 /* allocate and populate q_values: */
211 q_values = ralloc_array(set, unsigned *, total_class_count);
212
213 build_q_values(q_values, 0, class_sizes, class_count);
214 build_q_values(q_values, HALF_OFFSET, half_class_sizes, half_class_count);
215 build_q_values(q_values, HIGH_OFFSET, high_class_sizes, high_class_count);
216
217 /* allocate the reg-set.. */
218 set->regs = ra_alloc_reg_set(set, ra_reg_count, true);
219 set->ra_reg_to_gpr = ralloc_array(set, uint16_t, ra_reg_count);
220 set->gpr_to_ra_reg = ralloc_array(set, uint16_t *, total_class_count);
221
222 /* .. and classes */
223 reg = 0;
224 for (unsigned i = 0; i < class_count; i++) {
225 set->classes[i] = ra_alloc_reg_class(set->regs);
226
227 set->gpr_to_ra_reg[i] = ralloc_array(set, uint16_t, CLASS_REGS(i));
228
229 for (unsigned j = 0; j < CLASS_REGS(i); j++) {
230 ra_class_add_reg(set->regs, set->classes[i], reg);
231
232 set->ra_reg_to_gpr[reg] = j;
233 set->gpr_to_ra_reg[i][j] = reg;
234
235 for (unsigned br = j; br < j + class_sizes[i]; br++)
236 ra_add_transitive_reg_conflict(set->regs, br, reg);
237
238 reg++;
239 }
240 }
241
242 first_half_reg = reg;
243 base = HALF_OFFSET;
244
245 for (unsigned i = 0; i < half_class_count; i++) {
246 set->half_classes[i] = ra_alloc_reg_class(set->regs);
247
248 set->gpr_to_ra_reg[base + i] =
249 ralloc_array(set, uint16_t, HALF_CLASS_REGS(i));
250
251 for (unsigned j = 0; j < HALF_CLASS_REGS(i); j++) {
252 ra_class_add_reg(set->regs, set->half_classes[i], reg);
253
254 set->ra_reg_to_gpr[reg] = j;
255 set->gpr_to_ra_reg[base + i][j] = reg;
256
257 for (unsigned br = j; br < j + half_class_sizes[i]; br++)
258 ra_add_transitive_reg_conflict(set->regs, br + first_half_reg, reg);
259
260 reg++;
261 }
262 }
263
264 first_high_reg = reg;
265 base = HIGH_OFFSET;
266
267 for (unsigned i = 0; i < high_class_count; i++) {
268 set->high_classes[i] = ra_alloc_reg_class(set->regs);
269
270 set->gpr_to_ra_reg[base + i] =
271 ralloc_array(set, uint16_t, HIGH_CLASS_REGS(i));
272
273 for (unsigned j = 0; j < HIGH_CLASS_REGS(i); j++) {
274 ra_class_add_reg(set->regs, set->high_classes[i], reg);
275
276 set->ra_reg_to_gpr[reg] = j;
277 set->gpr_to_ra_reg[base + i][j] = reg;
278
279 for (unsigned br = j; br < j + high_class_sizes[i]; br++)
280 ra_add_transitive_reg_conflict(set->regs, br + first_high_reg, reg);
281
282 reg++;
283 }
284 }
285
286 /* starting a6xx, half precision regs conflict w/ full precision regs: */
287 if (compiler->gpu_id >= 600) {
288 /* because of transitivity, we can get away with just setting up
289 * conflicts between the first class of full and half regs:
290 */
291 for (unsigned j = 0; j < CLASS_REGS(0) / 2; j++) {
292 unsigned freg = set->gpr_to_ra_reg[0][j];
293 unsigned hreg0 = set->gpr_to_ra_reg[HALF_OFFSET][(j * 2) + 0];
294 unsigned hreg1 = set->gpr_to_ra_reg[HALF_OFFSET][(j * 2) + 1];
295
296 ra_add_transitive_reg_conflict(set->regs, freg, hreg0);
297 ra_add_transitive_reg_conflict(set->regs, freg, hreg1);
298 }
299
300 // TODO also need to update q_values, but for now:
301 ra_set_finalize(set->regs, NULL);
302 } else {
303 ra_set_finalize(set->regs, q_values);
304 }
305
306 ralloc_free(q_values);
307
308 return set;
309 }
310
311 /* additional block-data (per-block) */
312 struct ir3_ra_block_data {
313 BITSET_WORD *def; /* variables defined before used in block */
314 BITSET_WORD *use; /* variables used before defined in block */
315 BITSET_WORD *livein; /* which defs reach entry point of block */
316 BITSET_WORD *liveout; /* which defs reach exit point of block */
317 };
318
319 /* additional instruction-data (per-instruction) */
320 struct ir3_ra_instr_data {
321 /* cached instruction 'definer' info: */
322 struct ir3_instruction *defn;
323 int off, sz, cls;
324 };
325
326 /* register-assign context, per-shader */
327 struct ir3_ra_ctx {
328 struct ir3 *ir;
329 enum shader_t type;
330 bool frag_face;
331
332 struct ir3_ra_reg_set *set;
333 struct ra_graph *g;
334 unsigned alloc_count;
335 /* one per class, plus one slot for arrays: */
336 unsigned class_alloc_count[total_class_count + 1];
337 unsigned class_base[total_class_count + 1];
338 unsigned instr_cnt;
339 unsigned *def, *use; /* def/use table */
340 struct ir3_ra_instr_data *instrd;
341 };
342
343 /* does it conflict? */
344 static inline bool
345 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
346 {
347 return !((a_start >= b_end) || (b_start >= a_end));
348 }
349
350 static bool
351 is_half(struct ir3_instruction *instr)
352 {
353 return !!(instr->regs[0]->flags & IR3_REG_HALF);
354 }
355
356 static bool
357 is_high(struct ir3_instruction *instr)
358 {
359 return !!(instr->regs[0]->flags & IR3_REG_HIGH);
360 }
361
362 static int
363 size_to_class(unsigned sz, bool half, bool high)
364 {
365 if (high) {
366 for (unsigned i = 0; i < high_class_count; i++)
367 if (high_class_sizes[i] >= sz)
368 return i + HIGH_OFFSET;
369 } else if (half) {
370 for (unsigned i = 0; i < half_class_count; i++)
371 if (half_class_sizes[i] >= sz)
372 return i + HALF_OFFSET;
373 } else {
374 for (unsigned i = 0; i < class_count; i++)
375 if (class_sizes[i] >= sz)
376 return i;
377 }
378 debug_assert(0);
379 return -1;
380 }
381
382 static bool
383 writes_gpr(struct ir3_instruction *instr)
384 {
385 if (is_store(instr))
386 return false;
387 /* is dest a normal temp register: */
388 struct ir3_register *reg = instr->regs[0];
389 if (reg->flags & (IR3_REG_CONST | IR3_REG_IMMED))
390 return false;
391 if ((reg->num == regid(REG_A0, 0)) ||
392 (reg->num == regid(REG_P0, 0)))
393 return false;
394 return true;
395 }
396
397 static bool
398 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
399 {
400 if (a->flags & IR3_INSTR_UNUSED)
401 return false;
402 return (a->ip < b->ip);
403 }
404
405 static struct ir3_instruction *
406 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
407 int *sz, int *off)
408 {
409 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
410 struct ir3_instruction *d = NULL;
411
412 if (id->defn) {
413 *sz = id->sz;
414 *off = id->off;
415 return id->defn;
416 }
417
418 if (instr->opc == OPC_META_FI) {
419 /* What about the case where collect is subset of array, we
420 * need to find the distance between where actual array starts
421 * and fanin.. that probably doesn't happen currently.
422 */
423 struct ir3_register *src;
424 int dsz, doff;
425
426 /* note: don't use foreach_ssa_src as this gets called once
427 * while assigning regs (which clears SSA flag)
428 */
429 foreach_src_n(src, n, instr) {
430 struct ir3_instruction *dd;
431 if (!src->instr)
432 continue;
433
434 dd = get_definer(ctx, src->instr, &dsz, &doff);
435
436 if ((!d) || instr_before(dd, d)) {
437 d = dd;
438 *sz = dsz;
439 *off = doff - n;
440 }
441 }
442
443 } else if (instr->cp.right || instr->cp.left) {
444 /* covers also the meta:fo case, which ends up w/ single
445 * scalar instructions for each component:
446 */
447 struct ir3_instruction *f = ir3_neighbor_first(instr);
448
449 /* by definition, the entire sequence forms one linked list
450 * of single scalar register nodes (even if some of them may
451 * be fanouts from a texture sample (for example) instr. We
452 * just need to walk the list finding the first element of
453 * the group defined (lowest ip)
454 */
455 int cnt = 0;
456
457 /* need to skip over unused in the group: */
458 while (f && (f->flags & IR3_INSTR_UNUSED)) {
459 f = f->cp.right;
460 cnt++;
461 }
462
463 while (f) {
464 if ((!d) || instr_before(f, d))
465 d = f;
466 if (f == instr)
467 *off = cnt;
468 f = f->cp.right;
469 cnt++;
470 }
471
472 *sz = cnt;
473
474 } else {
475 /* second case is looking directly at the instruction which
476 * produces multiple values (eg, texture sample), rather
477 * than the fanout nodes that point back to that instruction.
478 * This isn't quite right, because it may be part of a larger
479 * group, such as:
480 *
481 * sam (f32)(xyzw)r0.x, ...
482 * add r1.x, ...
483 * add r1.y, ...
484 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
485 *
486 * need to come up with a better way to handle that case.
487 */
488 if (instr->address) {
489 *sz = instr->regs[0]->size;
490 } else {
491 *sz = util_last_bit(instr->regs[0]->wrmask);
492 }
493 *off = 0;
494 d = instr;
495 }
496
497 if (d->opc == OPC_META_FO) {
498 struct ir3_instruction *dd;
499 int dsz, doff;
500
501 dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
502
503 /* by definition, should come before: */
504 debug_assert(instr_before(dd, d));
505
506 *sz = MAX2(*sz, dsz);
507
508 debug_assert(instr->opc == OPC_META_FO);
509 *off = MAX2(*off, instr->fo.off);
510
511 d = dd;
512 }
513
514 id->defn = d;
515 id->sz = *sz;
516 id->off = *off;
517
518 return d;
519 }
520
521 static void
522 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
523 {
524 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
525 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
526 if (instr->regs_count == 0)
527 continue;
528 /* couple special cases: */
529 if (writes_addr(instr) || writes_pred(instr)) {
530 id->cls = -1;
531 } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
532 id->cls = total_class_count;
533 } else {
534 id->defn = get_definer(ctx, instr, &id->sz, &id->off);
535 id->cls = size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
536 }
537 }
538 }
539
540 /* give each instruction a name (and ip), and count up the # of names
541 * of each class
542 */
543 static void
544 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
545 {
546 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
547 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
548
549 #ifdef DEBUG
550 instr->name = ~0;
551 #endif
552
553 ctx->instr_cnt++;
554
555 if (instr->regs_count == 0)
556 continue;
557
558 if (!writes_gpr(instr))
559 continue;
560
561 if (id->defn != instr)
562 continue;
563
564 /* arrays which don't fit in one of the pre-defined class
565 * sizes are pre-colored:
566 */
567 if ((id->cls >= 0) && (id->cls < total_class_count)) {
568 instr->name = ctx->class_alloc_count[id->cls]++;
569 ctx->alloc_count++;
570 }
571 }
572 }
573
574 static void
575 ra_init(struct ir3_ra_ctx *ctx)
576 {
577 unsigned n, base;
578
579 ir3_clear_mark(ctx->ir);
580 n = ir3_count_instructions(ctx->ir);
581
582 ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
583
584 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
585 ra_block_find_definers(ctx, block);
586 }
587
588 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
589 ra_block_name_instructions(ctx, block);
590 }
591
592 /* figure out the base register name for each class. The
593 * actual ra name is class_base[cls] + instr->name;
594 */
595 ctx->class_base[0] = 0;
596 for (unsigned i = 1; i <= total_class_count; i++) {
597 ctx->class_base[i] = ctx->class_base[i-1] +
598 ctx->class_alloc_count[i-1];
599 }
600
601 /* and vreg names for array elements: */
602 base = ctx->class_base[total_class_count];
603 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
604 arr->base = base;
605 ctx->class_alloc_count[total_class_count] += arr->length;
606 base += arr->length;
607 }
608 ctx->alloc_count += ctx->class_alloc_count[total_class_count];
609
610 ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
611 ralloc_steal(ctx->g, ctx->instrd);
612 ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
613 ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
614 }
615
616 static unsigned
617 __ra_name(struct ir3_ra_ctx *ctx, int cls, struct ir3_instruction *defn)
618 {
619 unsigned name;
620 debug_assert(cls >= 0);
621 debug_assert(cls < total_class_count); /* we shouldn't get arrays here.. */
622 name = ctx->class_base[cls] + defn->name;
623 debug_assert(name < ctx->alloc_count);
624 return name;
625 }
626
627 static int
628 ra_name(struct ir3_ra_ctx *ctx, struct ir3_ra_instr_data *id)
629 {
630 /* TODO handle name mapping for arrays */
631 return __ra_name(ctx, id->cls, id->defn);
632 }
633
634 static void
635 ra_destroy(struct ir3_ra_ctx *ctx)
636 {
637 ralloc_free(ctx->g);
638 }
639
640 static void
641 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
642 {
643 struct ir3_ra_block_data *bd;
644 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
645
646 #define def(name, instr) \
647 do { \
648 /* defined on first write: */ \
649 if (!ctx->def[name]) \
650 ctx->def[name] = instr->ip; \
651 ctx->use[name] = instr->ip; \
652 BITSET_SET(bd->def, name); \
653 } while(0);
654
655 #define use(name, instr) \
656 do { \
657 ctx->use[name] = MAX2(ctx->use[name], instr->ip); \
658 if (!BITSET_TEST(bd->def, name)) \
659 BITSET_SET(bd->use, name); \
660 } while(0);
661
662 bd = rzalloc(ctx->g, struct ir3_ra_block_data);
663
664 bd->def = rzalloc_array(bd, BITSET_WORD, bitset_words);
665 bd->use = rzalloc_array(bd, BITSET_WORD, bitset_words);
666 bd->livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
667 bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
668
669 block->data = bd;
670
671 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
672 struct ir3_instruction *src;
673 struct ir3_register *reg;
674
675 if (instr->regs_count == 0)
676 continue;
677
678 /* There are a couple special cases to deal with here:
679 *
680 * fanout: used to split values from a higher class to a lower
681 * class, for example split the results of a texture fetch
682 * into individual scalar values; We skip over these from
683 * a 'def' perspective, and for a 'use' we walk the chain
684 * up to the defining instruction.
685 *
686 * fanin: used to collect values from lower class and assemble
687 * them together into a higher class, for example arguments
688 * to texture sample instructions; We consider these to be
689 * defined at the earliest fanin source.
690 *
691 * Most of this is handled in the get_definer() helper.
692 *
693 * In either case, we trace the instruction back to the original
694 * definer and consider that as the def/use ip.
695 */
696
697 if (writes_gpr(instr)) {
698 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
699 struct ir3_register *dst = instr->regs[0];
700
701 if (dst->flags & IR3_REG_ARRAY) {
702 struct ir3_array *arr =
703 ir3_lookup_array(ctx->ir, dst->array.id);
704 unsigned i;
705
706 arr->start_ip = MIN2(arr->start_ip, instr->ip);
707 arr->end_ip = MAX2(arr->end_ip, instr->ip);
708
709 /* set the node class now.. in case we don't encounter
710 * this array dst again. From register_alloc algo's
711 * perspective, these are all single/scalar regs:
712 */
713 for (i = 0; i < arr->length; i++) {
714 unsigned name = arr->base + i;
715 ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
716 }
717
718 /* indirect write is treated like a write to all array
719 * elements, since we don't know which one is actually
720 * written:
721 */
722 if (dst->flags & IR3_REG_RELATIV) {
723 for (i = 0; i < arr->length; i++) {
724 unsigned name = arr->base + i;
725 def(name, instr);
726 }
727 } else {
728 unsigned name = arr->base + dst->array.offset;
729 def(name, instr);
730 }
731
732 } else if (id->defn == instr) {
733 unsigned name = ra_name(ctx, id);
734
735 /* since we are in SSA at this point: */
736 debug_assert(!BITSET_TEST(bd->use, name));
737
738 def(name, id->defn);
739
740 if (is_high(id->defn)) {
741 ra_set_node_class(ctx->g, name,
742 ctx->set->high_classes[id->cls - HIGH_OFFSET]);
743 } else if (is_half(id->defn)) {
744 ra_set_node_class(ctx->g, name,
745 ctx->set->half_classes[id->cls - HALF_OFFSET]);
746 } else {
747 ra_set_node_class(ctx->g, name,
748 ctx->set->classes[id->cls]);
749 }
750 }
751 }
752
753 foreach_src(reg, instr) {
754 if (reg->flags & IR3_REG_ARRAY) {
755 struct ir3_array *arr =
756 ir3_lookup_array(ctx->ir, reg->array.id);
757 arr->start_ip = MIN2(arr->start_ip, instr->ip);
758 arr->end_ip = MAX2(arr->end_ip, instr->ip);
759
760 /* indirect read is treated like a read fromall array
761 * elements, since we don't know which one is actually
762 * read:
763 */
764 if (reg->flags & IR3_REG_RELATIV) {
765 unsigned i;
766 for (i = 0; i < arr->length; i++) {
767 unsigned name = arr->base + i;
768 use(name, instr);
769 }
770 } else {
771 unsigned name = arr->base + reg->array.offset;
772 use(name, instr);
773 /* NOTE: arrays are not SSA so unconditionally
774 * set use bit:
775 */
776 BITSET_SET(bd->use, name);
777 debug_assert(reg->array.offset < arr->length);
778 }
779 } else if ((src = ssa(reg)) && writes_gpr(src)) {
780 unsigned name = ra_name(ctx, &ctx->instrd[src->ip]);
781 use(name, instr);
782 }
783 }
784 }
785 }
786
787 static bool
788 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
789 {
790 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
791 bool progress = false;
792
793 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
794 struct ir3_ra_block_data *bd = block->data;
795
796 /* update livein: */
797 for (unsigned i = 0; i < bitset_words; i++) {
798 BITSET_WORD new_livein =
799 (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
800
801 if (new_livein & ~bd->livein[i]) {
802 bd->livein[i] |= new_livein;
803 progress = true;
804 }
805 }
806
807 /* update liveout: */
808 for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
809 struct ir3_block *succ = block->successors[j];
810 struct ir3_ra_block_data *succ_bd;
811
812 if (!succ)
813 continue;
814
815 succ_bd = succ->data;
816
817 for (unsigned i = 0; i < bitset_words; i++) {
818 BITSET_WORD new_liveout =
819 (succ_bd->livein[i] & ~bd->liveout[i]);
820
821 if (new_liveout) {
822 bd->liveout[i] |= new_liveout;
823 progress = true;
824 }
825 }
826 }
827 }
828
829 return progress;
830 }
831
832 static void
833 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
834 {
835 bool first = true;
836 debug_printf(" %s:", name);
837 for (unsigned i = 0; i < cnt; i++) {
838 if (BITSET_TEST(bs, i)) {
839 if (!first)
840 debug_printf(",");
841 debug_printf(" %04u", i);
842 first = false;
843 }
844 }
845 debug_printf("\n");
846 }
847
848 static void
849 ra_add_interference(struct ir3_ra_ctx *ctx)
850 {
851 struct ir3 *ir = ctx->ir;
852
853 /* initialize array live ranges: */
854 list_for_each_entry (struct ir3_array, arr, &ir->array_list, node) {
855 arr->start_ip = ~0;
856 arr->end_ip = 0;
857 }
858
859 /* compute live ranges (use/def) on a block level, also updating
860 * block's def/use bitmasks (used below to calculate per-block
861 * livein/liveout):
862 */
863 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
864 ra_block_compute_live_ranges(ctx, block);
865 }
866
867 /* update per-block livein/liveout: */
868 while (ra_compute_livein_liveout(ctx)) {}
869
870 if (fd_mesa_debug & FD_DBG_OPTMSGS) {
871 debug_printf("AFTER LIVEIN/OUT:\n");
872 ir3_print(ir);
873 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
874 struct ir3_ra_block_data *bd = block->data;
875 debug_printf("block%u:\n", block_id(block));
876 print_bitset(" def", bd->def, ctx->alloc_count);
877 print_bitset(" use", bd->use, ctx->alloc_count);
878 print_bitset(" l/i", bd->livein, ctx->alloc_count);
879 print_bitset(" l/o", bd->liveout, ctx->alloc_count);
880 }
881 list_for_each_entry (struct ir3_array, arr, &ir->array_list, node) {
882 debug_printf("array%u:\n", arr->id);
883 debug_printf(" length: %u\n", arr->length);
884 debug_printf(" start_ip: %u\n", arr->start_ip);
885 debug_printf(" end_ip: %u\n", arr->end_ip);
886 }
887 }
888
889 /* extend start/end ranges based on livein/liveout info from cfg: */
890 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
891 struct ir3_ra_block_data *bd = block->data;
892
893 for (unsigned i = 0; i < ctx->alloc_count; i++) {
894 if (BITSET_TEST(bd->livein, i)) {
895 ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
896 ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
897 }
898
899 if (BITSET_TEST(bd->liveout, i)) {
900 ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
901 ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
902 }
903 }
904
905 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
906 for (unsigned i = 0; i < arr->length; i++) {
907 if (BITSET_TEST(bd->livein, i + arr->base)) {
908 arr->start_ip = MIN2(arr->start_ip, block->start_ip);
909 }
910 if (BITSET_TEST(bd->livein, i + arr->base)) {
911 arr->end_ip = MAX2(arr->end_ip, block->end_ip);
912 }
913 }
914 }
915 }
916
917 /* need to fix things up to keep outputs live: */
918 for (unsigned i = 0; i < ir->noutputs; i++) {
919 struct ir3_instruction *instr = ir->outputs[i];
920 unsigned name = ra_name(ctx, &ctx->instrd[instr->ip]);
921 ctx->use[name] = ctx->instr_cnt;
922 }
923
924 for (unsigned i = 0; i < ctx->alloc_count; i++) {
925 for (unsigned j = 0; j < ctx->alloc_count; j++) {
926 if (intersects(ctx->def[i], ctx->use[i],
927 ctx->def[j], ctx->use[j])) {
928 ra_add_node_interference(ctx->g, i, j);
929 }
930 }
931 }
932 }
933
934 /* some instructions need fix-up if dst register is half precision: */
935 static void fixup_half_instr_dst(struct ir3_instruction *instr)
936 {
937 switch (opc_cat(instr->opc)) {
938 case 1: /* move instructions */
939 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
940 break;
941 case 3:
942 switch (instr->opc) {
943 case OPC_MAD_F32:
944 instr->opc = OPC_MAD_F16;
945 break;
946 case OPC_SEL_B32:
947 instr->opc = OPC_SEL_B16;
948 break;
949 case OPC_SEL_S32:
950 instr->opc = OPC_SEL_S16;
951 break;
952 case OPC_SEL_F32:
953 instr->opc = OPC_SEL_F16;
954 break;
955 case OPC_SAD_S32:
956 instr->opc = OPC_SAD_S16;
957 break;
958 /* instructions may already be fixed up: */
959 case OPC_MAD_F16:
960 case OPC_SEL_B16:
961 case OPC_SEL_S16:
962 case OPC_SEL_F16:
963 case OPC_SAD_S16:
964 break;
965 default:
966 assert(0);
967 break;
968 }
969 break;
970 case 5:
971 instr->cat5.type = half_type(instr->cat5.type);
972 break;
973 }
974 }
975 /* some instructions need fix-up if src register is half precision: */
976 static void fixup_half_instr_src(struct ir3_instruction *instr)
977 {
978 switch (instr->opc) {
979 case OPC_MOV:
980 instr->cat1.src_type = half_type(instr->cat1.src_type);
981 break;
982 default:
983 break;
984 }
985 }
986
987 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
988 * array access(es) which do not have any previous access to depend
989 * on from scheduling point of view
990 */
991 static void
992 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
993 struct ir3_instruction *instr)
994 {
995 struct ir3_ra_instr_data *id;
996
997 if (reg->flags & IR3_REG_ARRAY) {
998 struct ir3_array *arr =
999 ir3_lookup_array(ctx->ir, reg->array.id);
1000 unsigned name = arr->base + reg->array.offset;
1001 unsigned r = ra_get_node_reg(ctx->g, name);
1002 unsigned num = ctx->set->ra_reg_to_gpr[r];
1003
1004 if (reg->flags & IR3_REG_RELATIV) {
1005 reg->array.offset = num;
1006 } else {
1007 reg->num = num;
1008 reg->flags &= ~IR3_REG_SSA;
1009 }
1010
1011 reg->flags &= ~IR3_REG_ARRAY;
1012 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1013 unsigned name = ra_name(ctx, id);
1014 unsigned r = ra_get_node_reg(ctx->g, name);
1015 unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
1016
1017 debug_assert(!(reg->flags & IR3_REG_RELATIV));
1018
1019 if (is_high(id->defn))
1020 num += FIRST_HIGH_REG;
1021
1022 reg->num = num;
1023 reg->flags &= ~IR3_REG_SSA;
1024
1025 if (is_half(id->defn))
1026 reg->flags |= IR3_REG_HALF;
1027 }
1028 }
1029
1030 static void
1031 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
1032 {
1033 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
1034 struct ir3_register *reg;
1035
1036 if (instr->regs_count == 0)
1037 continue;
1038
1039 if (writes_gpr(instr)) {
1040 reg_assign(ctx, instr->regs[0], instr);
1041 if (instr->regs[0]->flags & IR3_REG_HALF)
1042 fixup_half_instr_dst(instr);
1043 }
1044
1045 foreach_src_n(reg, n, instr) {
1046 struct ir3_instruction *src = reg->instr;
1047 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1048 if (!(src || (reg->flags & IR3_REG_ARRAY)))
1049 continue;
1050 reg_assign(ctx, instr->regs[n+1], src);
1051 if (instr->regs[n+1]->flags & IR3_REG_HALF)
1052 fixup_half_instr_src(instr);
1053 }
1054 }
1055 }
1056
1057 static int
1058 ra_alloc(struct ir3_ra_ctx *ctx)
1059 {
1060 /* pre-assign array elements:
1061 */
1062 list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
1063 unsigned base = 0;
1064
1065 if (arr->end_ip == 0)
1066 continue;
1067
1068 /* figure out what else we conflict with which has already
1069 * been assigned:
1070 */
1071 retry:
1072 list_for_each_entry (struct ir3_array, arr2, &ctx->ir->array_list, node) {
1073 if (arr2 == arr)
1074 break;
1075 if (arr2->end_ip == 0)
1076 continue;
1077 /* if it intersects with liverange AND register range.. */
1078 if (intersects(arr->start_ip, arr->end_ip,
1079 arr2->start_ip, arr2->end_ip) &&
1080 intersects(base, base + arr->length,
1081 arr2->reg, arr2->reg + arr2->length)) {
1082 base = MAX2(base, arr2->reg + arr2->length);
1083 goto retry;
1084 }
1085 }
1086
1087 arr->reg = base;
1088
1089 for (unsigned i = 0; i < arr->length; i++) {
1090 unsigned name, reg;
1091
1092 name = arr->base + i;
1093 reg = ctx->set->gpr_to_ra_reg[0][base++];
1094
1095 ra_set_node_reg(ctx->g, name, reg);
1096 }
1097 }
1098
1099 if (!ra_allocate(ctx->g))
1100 return -1;
1101
1102 list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
1103 ra_block_alloc(ctx, block);
1104 }
1105
1106 return 0;
1107 }
1108
1109 int ir3_ra(struct ir3 *ir, enum shader_t type,
1110 bool frag_coord, bool frag_face)
1111 {
1112 struct ir3_ra_ctx ctx = {
1113 .ir = ir,
1114 .type = type,
1115 .frag_face = frag_face,
1116 .set = ir->compiler->set,
1117 };
1118 int ret;
1119
1120 ra_init(&ctx);
1121 ra_add_interference(&ctx);
1122 ret = ra_alloc(&ctx);
1123 ra_destroy(&ctx);
1124
1125 return ret;
1126 }