nvc0: import nvc0 gallium driver
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_pc_regalloc.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #define NOUVEAU_DEBUG 1
24
25 /* #define NVC0_RA_DEBUG_LIVEI */
26 /* #define NVC0_RA_DEBUG_LIVE_SETS */
27 /* #define NVC0_RA_DEBUG_JOIN */
28
29 #include "nvc0_pc.h"
30 #include "util/u_simple_list.h"
31
32 #define NVC0_NUM_REGISTER_FILES 3
33
34 /* @unit_shift: log2 of min allocation unit for register */
35 struct register_set {
36 uint32_t bits[NVC0_NUM_REGISTER_FILES][2];
37 uint32_t last[NVC0_NUM_REGISTER_FILES];
38 int log2_unit[NVC0_NUM_REGISTER_FILES];
39 struct nv_pc *pc;
40 };
41
42 struct nv_pc_pass {
43 struct nv_pc *pc;
44 struct nv_instruction **insns;
45 uint num_insns;
46 uint pass_seq;
47 };
48
49 static void
50 ranges_coalesce(struct nv_range *range)
51 {
52 while (range->next && range->end >= range->next->bgn) {
53 struct nv_range *rnn = range->next->next;
54 assert(range->bgn <= range->next->bgn);
55 range->end = MAX2(range->end, range->next->end);
56 FREE(range->next);
57 range->next = rnn;
58 }
59 }
60
61 static boolean
62 add_range_ex(struct nv_value *val, int bgn, int end, struct nv_range *new_range)
63 {
64 struct nv_range *range, **nextp = &val->livei;
65
66 for (range = val->livei; range; range = range->next) {
67 if (end < range->bgn)
68 break; /* insert before */
69
70 if (bgn > range->end) {
71 nextp = &range->next;
72 continue; /* insert after */
73 }
74
75 /* overlap */
76 if (bgn < range->bgn) {
77 range->bgn = bgn;
78 if (end > range->end)
79 range->end = end;
80 ranges_coalesce(range);
81 return TRUE;
82 }
83 if (end > range->end) {
84 range->end = end;
85 ranges_coalesce(range);
86 return TRUE;
87 }
88 assert(bgn >= range->bgn);
89 assert(end <= range->end);
90 return TRUE;
91 }
92
93 if (!new_range)
94 new_range = CALLOC_STRUCT(nv_range);
95
96 new_range->bgn = bgn;
97 new_range->end = end;
98 new_range->next = range;
99 *(nextp) = new_range;
100 return FALSE;
101 }
102
103 static void
104 add_range(struct nv_value *val, struct nv_basic_block *b, int end)
105 {
106 int bgn;
107
108 if (!val->insn) /* ignore non-def values */
109 return;
110 assert(b->entry->serial <= b->exit->serial);
111 assert(b->phi->serial <= end);
112 assert(b->exit->serial + 1 >= end);
113
114 bgn = val->insn->serial;
115 if (bgn < b->entry->serial || bgn > b->exit->serial)
116 bgn = b->entry->serial;
117
118 assert(bgn <= end);
119
120 add_range_ex(val, bgn, end, NULL);
121 }
122
123 #if defined(NVC0_RA_DEBUG_JOIN) || defined(NVC0_RA_DEBUG_LIVEI)
124 static void
125 livei_print(struct nv_value *a)
126 {
127 struct nv_range *r = a->livei;
128
129 debug_printf("livei %i: ", a->n);
130 while (r) {
131 debug_printf("[%i, %i) ", r->bgn, r->end);
132 r = r->next;
133 }
134 debug_printf("\n");
135 }
136 #endif
137
138 static void
139 livei_unify(struct nv_value *dst, struct nv_value *src)
140 {
141 struct nv_range *range, *next;
142
143 for (range = src->livei; range; range = next) {
144 next = range->next;
145 if (add_range_ex(dst, range->bgn, range->end, range))
146 FREE(range);
147 }
148 src->livei = NULL;
149 }
150
151 static void
152 livei_release(struct nv_value *val)
153 {
154 struct nv_range *range, *next;
155
156 for (range = val->livei; range; range = next) {
157 next = range->next;
158 FREE(range);
159 }
160 }
161
162 static boolean
163 livei_have_overlap(struct nv_value *a, struct nv_value *b)
164 {
165 struct nv_range *r_a, *r_b;
166
167 for (r_a = a->livei; r_a; r_a = r_a->next) {
168 for (r_b = b->livei; r_b; r_b = r_b->next) {
169 if (r_b->bgn < r_a->end &&
170 r_b->end > r_a->bgn)
171 return TRUE;
172 }
173 }
174 return FALSE;
175 }
176
177 static int
178 livei_end(struct nv_value *a)
179 {
180 struct nv_range *r = a->livei;
181
182 assert(r);
183 while (r->next)
184 r = r->next;
185 return r->end;
186 }
187
188 static boolean
189 livei_contains(struct nv_value *a, int pos)
190 {
191 struct nv_range *r;
192
193 for (r = a->livei; r && r->bgn <= pos; r = r->next)
194 if (r->end > pos)
195 return TRUE;
196 return FALSE;
197 }
198
199 static boolean
200 reg_assign(struct register_set *set, struct nv_value **def, int n)
201 {
202 int i, id, s, k;
203 uint32_t m;
204 int f = def[0]->reg.file;
205
206 k = n;
207 if (k == 3)
208 k = 4;
209 s = (k * def[0]->reg.size) >> set->log2_unit[f];
210 m = (1 << s) - 1;
211
212 id = set->last[f];
213
214 for (i = 0; i * 32 < set->last[f]; ++i) {
215 if (set->bits[f][i] == 0xffffffff)
216 continue;
217
218 for (id = 0; id < 32; id += s)
219 if (!(set->bits[f][i] & (m << id)))
220 break;
221 if (id < 32)
222 break;
223 }
224 if (i * 32 + id > set->last[f])
225 return FALSE;
226
227 set->bits[f][i] |= m << id;
228
229 id += i * 32;
230
231 set->pc->max_reg[f] = MAX2(set->pc->max_reg[f], id + s - 1);
232
233 for (i = 0; i < n; ++i)
234 if (def[i]->livei)
235 def[i]->reg.id = id++;
236
237 return TRUE;
238 }
239
240 static INLINE void
241 reg_occupy(struct register_set *set, struct nv_value *val)
242 {
243 int id = val->reg.id, f = val->reg.file;
244 uint32_t m;
245
246 if (id < 0)
247 return;
248 m = (1 << (val->reg.size >> set->log2_unit[f])) - 1;
249
250 set->bits[f][id / 32] |= m << (id % 32);
251
252 if (set->pc->max_reg[f] < id)
253 set->pc->max_reg[f] = id;
254 }
255
256 static INLINE void
257 reg_release(struct register_set *set, struct nv_value *val)
258 {
259 int id = val->reg.id, f = val->reg.file;
260 uint32_t m;
261
262 if (id < 0)
263 return;
264 m = (1 << (val->reg.size >> set->log2_unit[f])) - 1;
265
266 set->bits[f][id / 32] &= ~(m << (id % 32));
267 }
268
269 static INLINE boolean
270 join_allowed(struct nv_pc_pass *ctx, struct nv_value *a, struct nv_value *b)
271 {
272 int i;
273 struct nv_value *val;
274
275 if (a->reg.file != b->reg.file || a->reg.size != b->reg.size)
276 return FALSE;
277
278 if (a->join->reg.id == b->join->reg.id)
279 return TRUE;
280
281 /* either a or b or both have been assigned */
282
283 if (a->join->reg.id >= 0 && b->join->reg.id >= 0)
284 return FALSE;
285 else
286 if (b->join->reg.id >= 0) {
287 if (b->join->reg.id == 63)
288 return FALSE;
289 val = a;
290 a = b;
291 b = val;
292 } else
293 if (a->join->reg.id == 63)
294 return FALSE;
295
296 for (i = 0; i < ctx->pc->num_values; ++i) {
297 val = &ctx->pc->values[i];
298
299 if (val->join->reg.id != a->join->reg.id)
300 continue;
301 if (val->join != a->join && livei_have_overlap(val->join, b->join))
302 return FALSE;
303 }
304 return TRUE;
305 }
306
307 static INLINE void
308 do_join_values(struct nv_pc_pass *ctx, struct nv_value *a, struct nv_value *b)
309 {
310 int j;
311 struct nv_value *bjoin = b->join;
312
313 if (b->join->reg.id >= 0)
314 a->join->reg.id = b->join->reg.id;
315
316 livei_unify(a->join, b->join);
317
318 #ifdef NVC0_RA_DEBUG_JOIN
319 debug_printf("joining %i to %i\n", b->n, a->n);
320 #endif
321
322 /* make a->join the new representative */
323 for (j = 0; j < ctx->pc->num_values; ++j)
324 if (ctx->pc->values[j].join == bjoin)
325 ctx->pc->values[j].join = a->join;
326
327 assert(b->join == a->join);
328 }
329
330 static INLINE void
331 try_join_values(struct nv_pc_pass *ctx, struct nv_value *a, struct nv_value *b)
332 {
333 if (!join_allowed(ctx, a, b)) {
334 #ifdef NVC0_RA_DEBUG_JOIN
335 debug_printf("cannot join %i to %i: not allowed\n", b->n, a->n);
336 #endif
337 return;
338 }
339 if (livei_have_overlap(a->join, b->join)) {
340 #ifdef NVC0_RA_DEBUG_JOIN
341 debug_printf("cannot join %i to %i: livei overlap\n", b->n, a->n);
342 livei_print(a);
343 livei_print(b);
344 #endif
345 return;
346 }
347
348 do_join_values(ctx, a, b);
349 }
350
351 static INLINE boolean
352 need_new_else_block(struct nv_basic_block *b, struct nv_basic_block *p)
353 {
354 int i = 0, n = 0;
355
356 for (; i < 2; ++i)
357 if (p->out[i] && !IS_LOOP_EDGE(p->out_kind[i]))
358 ++n;
359
360 return (b->num_in > 1) && (n == 2);
361 }
362
363 static int
364 phi_opnd_for_bb(struct nv_instruction *phi, struct nv_basic_block *b,
365 struct nv_basic_block *tb)
366 {
367 int i, j;
368
369 for (j = -1, i = 0; i < 6 && phi->src[i]; ++i) {
370 if (!nvc0_bblock_reachable_by(b, phi->src[i]->value->insn->bb, tb))
371 continue;
372 /* NOTE: back-edges are ignored by the reachable-by check */
373 if (j < 0 || !nvc0_bblock_reachable_by(phi->src[j]->value->insn->bb,
374 phi->src[i]->value->insn->bb, tb))
375 j = i;
376 }
377 return j;
378 }
379
380 /* For each operand of each PHI in b, generate a new value by inserting a MOV
381 * at the end of the block it is coming from and replace the operand with its
382 * result. This eliminates liveness conflicts and enables us to let values be
383 * copied to the right register if such a conflict exists nonetheless.
384 *
385 * These MOVs are also crucial in making sure the live intervals of phi srces
386 * are extended until the end of the loop, since they are not included in the
387 * live-in sets.
388 */
389 static int
390 pass_generate_phi_movs(struct nv_pc_pass *ctx, struct nv_basic_block *b)
391 {
392 struct nv_instruction *i, *ni;
393 struct nv_value *val;
394 struct nv_basic_block *p, *pn;
395 int n, j;
396
397 b->pass_seq = ctx->pc->pass_seq;
398
399 for (n = 0; n < b->num_in; ++n) {
400 p = pn = b->in[n];
401 assert(p);
402
403 if (need_new_else_block(b, p)) {
404 pn = new_basic_block(ctx->pc);
405
406 if (p->out[0] == b)
407 p->out[0] = pn;
408 else
409 p->out[1] = pn;
410
411 if (p->exit->target == b) /* target to new else-block */
412 p->exit->target = pn;
413
414 b->in[n] = pn;
415
416 pn->out[0] = b;
417 pn->in[0] = p;
418 pn->num_in = 1;
419 }
420 ctx->pc->current_block = pn;
421
422 for (i = b->phi; i && i->opcode == NV_OP_PHI; i = i->next) {
423 if ((j = phi_opnd_for_bb(i, p, b)) < 0)
424 continue;
425 val = i->src[j]->value;
426
427 if (i->src[j]->flags) {
428 /* value already encountered from a different in-block */
429 val = val->insn->src[0]->value;
430 while (j < 6 && i->src[j])
431 ++j;
432 assert(j < 6);
433 }
434
435 ni = new_instruction(ctx->pc, NV_OP_MOV);
436
437 /* TODO: insert instruction at correct position in the first place */
438 if (ni->prev && ni->prev->target)
439 nvc0_insns_permute(ni->prev, ni);
440
441 ni->def[0] = new_value_like(ctx->pc, val);
442 ni->def[0]->insn = ni;
443 nv_reference(ctx->pc, ni, 0, val);
444 nv_reference(ctx->pc, i, j, ni->def[0]); /* new phi source = MOV def */
445 i->src[j]->flags = 1;
446 }
447
448 if (pn != p && pn->exit) {
449 ctx->pc->current_block = b->in[n ? 0 : 1];
450 ni = new_instruction(ctx->pc, NV_OP_BRA);
451 ni->target = b;
452 ni->terminator = 1;
453 }
454 }
455
456 for (j = 0; j < 2; ++j)
457 if (b->out[j] && b->out[j]->pass_seq < ctx->pc->pass_seq)
458 pass_generate_phi_movs(ctx, b->out[j]);
459
460 return 0;
461 }
462
463 static int
464 pass_join_values(struct nv_pc_pass *ctx, int iter)
465 {
466 int c, n;
467
468 for (n = 0; n < ctx->num_insns; ++n) {
469 struct nv_instruction *i = ctx->insns[n];
470
471 switch (i->opcode) {
472 case NV_OP_PHI:
473 if (iter != 2)
474 break;
475 for (c = 0; c < 6 && i->src[c]; ++c)
476 try_join_values(ctx, i->def[0], i->src[c]->value);
477 break;
478 case NV_OP_MOV:
479 if ((iter == 2) && i->src[0]->value->insn &&
480 !nv_is_texture_op(i->src[0]->value->join->insn->opcode))
481 try_join_values(ctx, i->def[0], i->src[0]->value);
482 break;
483 case NV_OP_SELECT:
484 if (iter != 1)
485 break;
486 for (c = 0; c < 6 && i->src[c]; ++c) {
487 assert(join_allowed(ctx, i->def[0], i->src[c]->value));
488 do_join_values(ctx, i->def[0], i->src[c]->value);
489 }
490 break;
491 case NV_OP_TEX:
492 case NV_OP_TXB:
493 case NV_OP_TXL:
494 case NV_OP_TXQ:
495 case NV_OP_BIND:
496 if (iter)
497 break;
498 for (c = 0; c < 6 && i->src[c]; ++c)
499 do_join_values(ctx, i->def[c], i->src[c]->value);
500 break;
501 default:
502 break;
503 }
504 }
505 return 0;
506 }
507
508 /* Order the instructions so that live intervals can be expressed in numbers. */
509 static void
510 pass_order_instructions(void *priv, struct nv_basic_block *b)
511 {
512 struct nv_pc_pass *ctx = (struct nv_pc_pass *)priv;
513 struct nv_instruction *i;
514
515 b->pass_seq = ctx->pc->pass_seq;
516
517 assert(!b->exit || !b->exit->next);
518 for (i = b->phi; i; i = i->next) {
519 i->serial = ctx->num_insns;
520 ctx->insns[ctx->num_insns++] = i;
521 }
522 }
523
524 static void
525 bb_live_set_print(struct nv_pc *pc, struct nv_basic_block *b)
526 {
527 #ifdef NVC0_RA_DEBUG_LIVE_SETS
528 struct nv_value *val;
529 int j;
530
531 debug_printf("LIVE-INs of BB:%i: ", b->id);
532
533 for (j = 0; j < pc->num_values; ++j) {
534 if (!(b->live_set[j / 32] & (1 << (j % 32))))
535 continue;
536 val = &pc->values[j];
537 if (!val->insn)
538 continue;
539 debug_printf("%i ", val->n);
540 }
541 debug_printf("\n");
542 #endif
543 }
544
545 static INLINE void
546 live_set_add(struct nv_basic_block *b, struct nv_value *val)
547 {
548 if (!val->insn) /* don't add non-def values */
549 return;
550 b->live_set[val->n / 32] |= 1 << (val->n % 32);
551 }
552
553 static INLINE void
554 live_set_rem(struct nv_basic_block *b, struct nv_value *val)
555 {
556 b->live_set[val->n / 32] &= ~(1 << (val->n % 32));
557 }
558
559 static INLINE boolean
560 live_set_test(struct nv_basic_block *b, struct nv_ref *ref)
561 {
562 int n = ref->value->n;
563 return b->live_set[n / 32] & (1 << (n % 32));
564 }
565
566 /* The live set of a block contains those values that are live immediately
567 * before the beginning of the block, so do a backwards scan.
568 */
569 static int
570 pass_build_live_sets(struct nv_pc_pass *ctx, struct nv_basic_block *b)
571 {
572 struct nv_instruction *i;
573 int j, n, ret = 0;
574
575 if (b->pass_seq >= ctx->pc->pass_seq)
576 return 0;
577 b->pass_seq = ctx->pc->pass_seq;
578
579 /* slight hack for undecidedness: set phi = entry if it's undefined */
580 if (!b->phi)
581 b->phi = b->entry;
582
583 for (n = 0; n < 2; ++n) {
584 if (!b->out[n] || b->out[n] == b)
585 continue;
586 ret = pass_build_live_sets(ctx, b->out[n]);
587 if (ret)
588 return ret;
589
590 if (n == 0) {
591 for (j = 0; j < (ctx->pc->num_values + 31) / 32; ++j)
592 b->live_set[j] = b->out[n]->live_set[j];
593 } else {
594 for (j = 0; j < (ctx->pc->num_values + 31) / 32; ++j)
595 b->live_set[j] |= b->out[n]->live_set[j];
596 }
597 }
598
599 if (!b->entry)
600 return 0;
601
602 bb_live_set_print(ctx->pc, b);
603
604 for (i = b->exit; i != b->entry->prev; i = i->prev) {
605 for (j = 0; j < 5 && i->def[j]; j++)
606 live_set_rem(b, i->def[j]);
607 for (j = 0; j < 6 && i->src[j]; j++)
608 live_set_add(b, i->src[j]->value);
609 }
610 for (i = b->phi; i && i->opcode == NV_OP_PHI; i = i->next)
611 live_set_rem(b, i->def[0]);
612
613 bb_live_set_print(ctx->pc, b);
614
615 return 0;
616 }
617
618 static void collect_live_values(struct nv_basic_block *b, const int n)
619 {
620 int i;
621
622 if (b->out[0]) {
623 if (b->out[1]) { /* what to do about back-edges ? */
624 for (i = 0; i < n; ++i)
625 b->live_set[i] = b->out[0]->live_set[i] | b->out[1]->live_set[i];
626 } else {
627 memcpy(b->live_set, b->out[0]->live_set, n * sizeof(uint32_t));
628 }
629 } else
630 if (b->out[1]) {
631 memcpy(b->live_set, b->out[1]->live_set, n * sizeof(uint32_t));
632 } else {
633 memset(b->live_set, 0, n * sizeof(uint32_t));
634 }
635 }
636
637 /* NOTE: the live intervals of phi functions start at the first non-phi insn. */
638 static int
639 pass_build_intervals(struct nv_pc_pass *ctx, struct nv_basic_block *b)
640 {
641 struct nv_instruction *i, *i_stop;
642 int j, s;
643 const int n = (ctx->pc->num_values + 31) / 32;
644
645 /* verify that first block does not have live-in values */
646 if (b->num_in == 0)
647 for (j = 0; j < n; ++j)
648 assert(b->live_set[j] == 0);
649
650 collect_live_values(b, n);
651
652 /* remove live-outs def'd in a parallel block, hopefully they're all phi'd */
653 for (j = 0; j < 2; ++j) {
654 if (!b->out[j] || !b->out[j]->phi)
655 continue;
656 for (i = b->out[j]->phi; i->opcode == NV_OP_PHI; i = i->next) {
657 live_set_rem(b, i->def[0]);
658
659 for (s = 0; s < 6 && i->src[s]; ++s) {
660 assert(i->src[s]->value->insn);
661 if (nvc0_bblock_reachable_by(b, i->src[s]->value->insn->bb,
662 b->out[j]))
663 live_set_add(b, i->src[s]->value);
664 else
665 live_set_rem(b, i->src[s]->value);
666 }
667 }
668 }
669
670 /* remaining live-outs are live until the end */
671 if (b->exit) {
672 for (j = 0; j < ctx->pc->num_values; ++j) {
673 if (!(b->live_set[j / 32] & (1 << (j % 32))))
674 continue;
675 add_range(&ctx->pc->values[j], b, b->exit->serial + 1);
676 #ifdef NVC0_RA_DEBUG_LIVEI
677 debug_printf("adding range for live value %i: ", j);
678 livei_print(&ctx->pc->values[j]);
679 #endif
680 }
681 }
682
683 i_stop = b->entry ? b->entry->prev : NULL;
684
685 /* don't have to include phi functions here (will have 0 live range) */
686 for (i = b->exit; i != i_stop; i = i->prev) {
687 assert(i->serial >= b->phi->serial && i->serial <= b->exit->serial);
688 for (j = 0; j < 4 && i->def[j]; ++j)
689 live_set_rem(b, i->def[j]);
690
691 for (j = 0; j < 6 && i->src[j]; ++j) {
692 if (!live_set_test(b, i->src[j])) {
693 live_set_add(b, i->src[j]->value);
694 add_range(i->src[j]->value, b, i->serial);
695 #ifdef NVC0_RA_DEBUG_LIVEI
696 debug_printf("adding range for source %i (ends living): ",
697 i->src[j]->value->n);
698 livei_print(i->src[j]->value);
699 #endif
700 }
701 }
702 }
703
704 b->pass_seq = ctx->pc->pass_seq;
705
706 if (b->out[0] && b->out[0]->pass_seq < ctx->pc->pass_seq)
707 pass_build_intervals(ctx, b->out[0]);
708
709 if (b->out[1] && b->out[1]->pass_seq < ctx->pc->pass_seq)
710 pass_build_intervals(ctx, b->out[1]);
711
712 return 0;
713 }
714
715 static INLINE void
716 nvc0_ctor_register_set(struct nv_pc *pc, struct register_set *set)
717 {
718 memset(set, 0, sizeof(*set));
719
720 set->last[NV_FILE_GPR] = 62;
721 set->last[NV_FILE_PRED] = 6;
722 set->last[NV_FILE_COND] = 1;
723
724 set->log2_unit[NV_FILE_GPR] = 2;
725 set->log2_unit[NV_FILE_COND] = 0;
726 set->log2_unit[NV_FILE_PRED] = 0;
727
728 set->pc = pc;
729 }
730
731 static void
732 insert_ordered_tail(struct nv_value *list, struct nv_value *nval)
733 {
734 struct nv_value *elem;
735
736 for (elem = list->prev;
737 elem != list && elem->livei->bgn > nval->livei->bgn;
738 elem = elem->prev);
739 /* now elem begins before or at the same time as val */
740
741 nval->prev = elem;
742 nval->next = elem->next;
743 elem->next->prev = nval;
744 elem->next = nval;
745 }
746
747 static int
748 pass_linear_scan(struct nv_pc_pass *ctx, int iter)
749 {
750 struct nv_instruction *i;
751 struct register_set f, free;
752 int k, n;
753 struct nv_value *cur, *val, *tmp[2];
754 struct nv_value active, inactive, handled, unhandled;
755
756 make_empty_list(&active);
757 make_empty_list(&inactive);
758 make_empty_list(&handled);
759 make_empty_list(&unhandled);
760
761 nvc0_ctor_register_set(ctx->pc, &free);
762
763 /* joined values should have range = NULL and thus not be added;
764 * also, fixed memory values won't be added because they're not
765 * def'd, just used
766 */
767 for (n = 0; n < ctx->num_insns; ++n) {
768 i = ctx->insns[n];
769
770 for (k = 0; k < 5; ++k) {
771 if (i->def[k] && i->def[k]->livei)
772 insert_ordered_tail(&unhandled, i->def[k]);
773 else
774 if (0 && i->def[k])
775 debug_printf("skipping def'd value %i: no livei\n", i->def[k]->n);
776 }
777 }
778
779 for (val = unhandled.next; val != unhandled.prev; val = val->next) {
780 assert(val->join == val);
781 assert(val->livei->bgn <= val->next->livei->bgn);
782 }
783
784 foreach_s(cur, tmp[0], &unhandled) {
785 remove_from_list(cur);
786
787 foreach_s(val, tmp[1], &active) {
788 if (livei_end(val) <= cur->livei->bgn) {
789 reg_release(&free, val);
790 move_to_head(&handled, val);
791 } else
792 if (!livei_contains(val, cur->livei->bgn)) {
793 reg_release(&free, val);
794 move_to_head(&inactive, val);
795 }
796 }
797
798 foreach_s(val, tmp[1], &inactive) {
799 if (livei_end(val) <= cur->livei->bgn)
800 move_to_head(&handled, val);
801 else
802 if (livei_contains(val, cur->livei->bgn)) {
803 reg_occupy(&free, val);
804 move_to_head(&active, val);
805 }
806 }
807
808 f = free;
809
810 foreach(val, &inactive)
811 if (livei_have_overlap(val, cur))
812 reg_occupy(&f, val);
813
814 foreach(val, &unhandled)
815 if (val->reg.id >= 0 && livei_have_overlap(val, cur))
816 reg_occupy(&f, val);
817
818 if (cur->reg.id < 0) {
819 boolean mem = FALSE;
820 int v = nvi_vector_size(cur->insn);
821
822 if (v > 1)
823 mem = !reg_assign(&f, &cur->insn->def[0], v);
824 else
825 if (iter)
826 mem = !reg_assign(&f, &cur, 1);
827
828 if (mem) {
829 NOUVEAU_ERR("out of registers\n");
830 abort();
831 }
832 }
833 insert_at_head(&active, cur);
834 reg_occupy(&free, cur);
835 }
836
837 return 0;
838 }
839
840 static int
841 nv_pc_pass1(struct nv_pc *pc, struct nv_basic_block *root)
842 {
843 struct nv_pc_pass *ctx;
844 int i, ret;
845
846 NOUVEAU_DBG("REGISTER ALLOCATION - entering\n");
847
848 ctx = CALLOC_STRUCT(nv_pc_pass);
849 if (!ctx)
850 return -1;
851 ctx->pc = pc;
852
853 ctx->insns = CALLOC(NV_PC_MAX_INSTRUCTIONS, sizeof(struct nv_instruction *));
854 if (!ctx->insns) {
855 FREE(ctx);
856 return -1;
857 }
858
859 pc->pass_seq++;
860 ret = pass_generate_phi_movs(ctx, root);
861 assert(!ret);
862
863 for (i = 0; i < pc->loop_nesting_bound; ++i) {
864 pc->pass_seq++;
865 ret = pass_build_live_sets(ctx, root);
866 assert(!ret && "live sets");
867 if (ret) {
868 NOUVEAU_ERR("failed to build live sets (iteration %d)\n", i);
869 goto out;
870 }
871 }
872
873 pc->pass_seq++;
874 nvc0_pc_pass_in_order(root, pass_order_instructions, ctx);
875
876 pc->pass_seq++;
877 ret = pass_build_intervals(ctx, root);
878 assert(!ret && "build intervals");
879 if (ret) {
880 NOUVEAU_ERR("failed to build live intervals\n");
881 goto out;
882 }
883
884 #ifdef NVC0_RA_DEBUG_LIVEI
885 for (i = 0; i < pc->num_values; ++i)
886 livei_print(&pc->values[i]);
887 #endif
888
889 ret = pass_join_values(ctx, 0);
890 if (ret)
891 goto out;
892 ret = pass_linear_scan(ctx, 0);
893 if (ret)
894 goto out;
895 ret = pass_join_values(ctx, 1);
896 if (ret)
897 goto out;
898 ret = pass_join_values(ctx, 2);
899 if (ret)
900 goto out;
901 ret = pass_linear_scan(ctx, 1);
902 if (ret)
903 goto out;
904
905 for (i = 0; i < pc->num_values; ++i)
906 livei_release(&pc->values[i]);
907
908 NOUVEAU_DBG("REGISTER ALLOCATION - leaving\n");
909
910 out:
911 FREE(ctx->insns);
912 FREE(ctx);
913 return ret;
914 }
915
916 int
917 nvc0_pc_exec_pass1(struct nv_pc *pc)
918 {
919 int i, ret;
920
921 for (i = 0; i < pc->num_subroutines + 1; ++i)
922 if (pc->root[i] && (ret = nv_pc_pass1(pc, pc->root[i])))
923 return ret;
924 return 0;
925 }