2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 /** @file register_allocate.c
30 * Graph-coloring register allocator.
32 * The basic idea of graph coloring is to make a node in a graph for
33 * every thing that needs a register (color) number assigned, and make
34 * edges in the graph between nodes that interfere (can't be allocated
35 * to the same register at the same time).
37 * During the "simplify" process, any any node with fewer edges than
38 * there are registers means that that edge can get assigned a
39 * register regardless of what its neighbors choose, so that node is
40 * pushed on a stack and removed (with its edges) from the graph.
41 * That likely causes other nodes to become trivially colorable as well.
43 * Then during the "select" process, nodes are popped off of that
44 * stack, their edges restored, and assigned a color different from
45 * their neighbors. Because they were pushed on the stack only when
46 * they were trivially colorable, any color chosen won't interfere
47 * with the registers to be popped later.
49 * The downside to most graph coloring is that real hardware often has
50 * limitations, like registers that need to be allocated to a node in
51 * pairs, or aligned on some boundary. This implementation follows
52 * the paper "Retargetable Graph-Coloring Register Allocation for
53 * Irregular Architectures" by Johan Runeson and Sven-Olof Nyström.
55 * In this system, there are register classes each containing various
56 * registers, and registers may interfere with other registers. For
57 * example, one might have a class of base registers, and a class of
58 * aligned register pairs that would each interfere with their pair of
59 * the base registers. Each node has a register class it needs to be
60 * assigned to. Define p(B) to be the size of register class B, and
61 * q(B,C) to be the number of registers in B that the worst choice
62 * register in C could conflict with. Then, this system replaces the
63 * basic graph coloring test of "fewer edges from this node than there
64 * are registers" with "For this node of class B, the sum of q(B,C)
65 * for each neighbor node of class C is less than pB".
67 * A nice feature of the pq test is that q(B,C) can be computed once
68 * up front and stored in a 2-dimensional array, so that the cost of
69 * coloring a node is constant with the number of registers. We do
70 * this during ra_set_finalize().
77 #include "main/macros.h"
78 #include "util/bitset.h"
79 #include "util/u_dynarray.h"
81 #include "register_allocate.h"
84 BITSET_WORD
*conflicts
;
85 unsigned int *conflict_list
;
86 unsigned int conflict_list_size
;
87 unsigned int num_conflicts
;
94 struct ra_class
**classes
;
95 unsigned int class_count
;
102 * Bitset indicating which registers belong to this class.
104 * (If bit N is set, then register N belongs to this class.)
109 * p(B) in Runeson/Nyström paper.
111 * This is "how many regs are in the set."
116 * q(B,C) (indexed by C, B is this register class) in
117 * Runeson/Nyström paper. This is "how many registers of B could
118 * the worst choice register from C conflict with".
126 * List of which nodes this node interferes with. This should be
127 * symmetric with the other node.
129 BITSET_WORD
*adjacency
;
131 struct util_dynarray adjacency_list
;
136 /* Client-assigned register, if assigned, or NO_REG. */
137 unsigned int forced_reg
;
139 /* Register, if assigned, or NO_REG. */
143 * The q total, as defined in the Runeson/Nyström paper, for all the
144 * interfering nodes not in the stack.
146 unsigned int q_total
;
148 /* For an implementation that needs register spilling, this is the
149 * approximate cost of spilling this node.
153 /* Temporary data for the algorithm to scratch around in */
156 * Temporary version of q_total which we decrement as things are placed
159 unsigned int q_total
;
164 struct ra_regs
*regs
;
166 * the variables that need register allocation.
168 struct ra_node
*nodes
;
169 unsigned int count
; /**< count of nodes. */
171 unsigned int alloc
; /**< count of nodes allocated. */
173 ra_select_reg_callback select_reg_callback
;
174 void *select_reg_callback_data
;
176 /* Temporary data for the algorithm to scratch around in */
179 unsigned int stack_count
;
181 /** Bit-set indicating, for each register, if it's in the stack */
182 BITSET_WORD
*in_stack
;
184 /** Bit-set indicating, for each register, if it pre-assigned */
185 BITSET_WORD
*reg_assigned
;
187 /** Bit-set indicating, for each register, the value of the pq test */
188 BITSET_WORD
*pq_test
;
190 /** For each BITSET_WORD, the minimum q value or ~0 if unknown */
191 unsigned int *min_q_total
;
194 * * For each BITSET_WORD, the node with the minimum q_total if
195 * min_q_total[i] != ~0.
197 unsigned int *min_q_node
;
200 * Tracks the start of the set of optimistically-colored registers in the
203 unsigned int stack_optimistic_start
;
208 * Creates a set of registers for the allocator.
210 * mem_ctx is a ralloc context for the allocator. The reg set may be freed
211 * using ralloc_free().
214 ra_alloc_reg_set(void *mem_ctx
, unsigned int count
, bool need_conflict_lists
)
217 struct ra_regs
*regs
;
219 regs
= rzalloc(mem_ctx
, struct ra_regs
);
221 regs
->regs
= rzalloc_array(regs
, struct ra_reg
, count
);
223 for (i
= 0; i
< count
; i
++) {
224 regs
->regs
[i
].conflicts
= rzalloc_array(regs
->regs
, BITSET_WORD
,
225 BITSET_WORDS(count
));
226 BITSET_SET(regs
->regs
[i
].conflicts
, i
);
228 if (need_conflict_lists
) {
229 regs
->regs
[i
].conflict_list
= ralloc_array(regs
->regs
,
231 regs
->regs
[i
].conflict_list_size
= 4;
232 regs
->regs
[i
].conflict_list
[0] = i
;
234 regs
->regs
[i
].conflict_list
= NULL
;
235 regs
->regs
[i
].conflict_list_size
= 0;
237 regs
->regs
[i
].num_conflicts
= 1;
244 * The register allocator by default prefers to allocate low register numbers,
245 * since it was written for hardware (gen4/5 Intel) that is limited in its
246 * multithreadedness by the number of registers used in a given shader.
248 * However, for hardware without that restriction, densely packed register
249 * allocation can put serious constraints on instruction scheduling. This
250 * function tells the allocator to rotate around the registers if possible as
251 * it allocates the nodes.
254 ra_set_allocate_round_robin(struct ra_regs
*regs
)
256 regs
->round_robin
= true;
260 ra_add_conflict_list(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
262 struct ra_reg
*reg1
= ®s
->regs
[r1
];
264 if (reg1
->conflict_list
) {
265 if (reg1
->conflict_list_size
== reg1
->num_conflicts
) {
266 reg1
->conflict_list_size
*= 2;
267 reg1
->conflict_list
= reralloc(regs
->regs
, reg1
->conflict_list
,
268 unsigned int, reg1
->conflict_list_size
);
270 reg1
->conflict_list
[reg1
->num_conflicts
++] = r2
;
272 BITSET_SET(reg1
->conflicts
, r2
);
276 ra_add_reg_conflict(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
278 if (!BITSET_TEST(regs
->regs
[r1
].conflicts
, r2
)) {
279 ra_add_conflict_list(regs
, r1
, r2
);
280 ra_add_conflict_list(regs
, r2
, r1
);
285 * Adds a conflict between base_reg and reg, and also between reg and
286 * anything that base_reg conflicts with.
288 * This can simplify code for setting up multiple register classes
289 * which are aggregates of some base hardware registers, compared to
290 * explicitly using ra_add_reg_conflict.
293 ra_add_transitive_reg_conflict(struct ra_regs
*regs
,
294 unsigned int base_reg
, unsigned int reg
)
298 ra_add_reg_conflict(regs
, reg
, base_reg
);
300 for (i
= 0; i
< regs
->regs
[base_reg
].num_conflicts
; i
++) {
301 ra_add_reg_conflict(regs
, reg
, regs
->regs
[base_reg
].conflict_list
[i
]);
306 * Set up conflicts between base_reg and it's two half registers reg0 and
307 * reg1, but take care to not add conflicts between reg0 and reg1.
309 * This is useful for architectures where full size registers are aliased by
310 * two half size registers (eg 32 bit float and 16 bit float registers).
313 ra_add_transitive_reg_pair_conflict(struct ra_regs
*regs
,
314 unsigned int base_reg
, unsigned int reg0
, unsigned int reg1
)
318 ra_add_reg_conflict(regs
, reg0
, base_reg
);
319 ra_add_reg_conflict(regs
, reg1
, base_reg
);
321 for (i
= 0; i
< regs
->regs
[base_reg
].num_conflicts
; i
++) {
322 unsigned int conflict
= regs
->regs
[base_reg
].conflict_list
[i
];
323 if (conflict
!= reg1
)
324 ra_add_reg_conflict(regs
, reg0
, regs
->regs
[base_reg
].conflict_list
[i
]);
325 if (conflict
!= reg0
)
326 ra_add_reg_conflict(regs
, reg1
, regs
->regs
[base_reg
].conflict_list
[i
]);
331 * Makes every conflict on the given register transitive. In other words,
332 * every register that conflicts with r will now conflict with every other
333 * register conflicting with r.
335 * This can simplify code for setting up multiple register classes
336 * which are aggregates of some base hardware registers, compared to
337 * explicitly using ra_add_reg_conflict.
340 ra_make_reg_conflicts_transitive(struct ra_regs
*regs
, unsigned int r
)
342 struct ra_reg
*reg
= ®s
->regs
[r
];
345 BITSET_FOREACH_SET(c
, reg
->conflicts
, regs
->count
) {
346 struct ra_reg
*other
= ®s
->regs
[c
];
348 for (i
= 0; i
< BITSET_WORDS(regs
->count
); i
++)
349 other
->conflicts
[i
] |= reg
->conflicts
[i
];
354 ra_alloc_reg_class(struct ra_regs
*regs
)
356 struct ra_class
*class;
358 regs
->classes
= reralloc(regs
->regs
, regs
->classes
, struct ra_class
*,
359 regs
->class_count
+ 1);
361 class = rzalloc(regs
, struct ra_class
);
362 regs
->classes
[regs
->class_count
] = class;
364 class->regs
= rzalloc_array(class, BITSET_WORD
, BITSET_WORDS(regs
->count
));
366 return regs
->class_count
++;
370 ra_class_add_reg(struct ra_regs
*regs
, unsigned int c
, unsigned int r
)
372 struct ra_class
*class = regs
->classes
[c
];
374 assert(r
< regs
->count
);
376 BITSET_SET(class->regs
, r
);
381 * Returns true if the register belongs to the given class.
384 reg_belongs_to_class(unsigned int r
, struct ra_class
*c
)
386 return BITSET_TEST(c
->regs
, r
);
390 * Must be called after all conflicts and register classes have been
391 * set up and before the register set is used for allocation.
392 * To avoid costly q value computation, use the q_values paramater
393 * to pass precomputed q values to this function.
396 ra_set_finalize(struct ra_regs
*regs
, unsigned int **q_values
)
400 for (b
= 0; b
< regs
->class_count
; b
++) {
401 regs
->classes
[b
]->q
= ralloc_array(regs
, unsigned int, regs
->class_count
);
405 for (b
= 0; b
< regs
->class_count
; b
++) {
406 for (c
= 0; c
< regs
->class_count
; c
++) {
407 regs
->classes
[b
]->q
[c
] = q_values
[b
][c
];
411 /* Compute, for each class B and C, how many regs of B an
412 * allocation to C could conflict with.
414 for (b
= 0; b
< regs
->class_count
; b
++) {
415 for (c
= 0; c
< regs
->class_count
; c
++) {
417 int max_conflicts
= 0;
419 for (rc
= 0; rc
< regs
->count
; rc
++) {
423 if (!reg_belongs_to_class(rc
, regs
->classes
[c
]))
426 for (i
= 0; i
< regs
->regs
[rc
].num_conflicts
; i
++) {
427 unsigned int rb
= regs
->regs
[rc
].conflict_list
[i
];
428 if (reg_belongs_to_class(rb
, regs
->classes
[b
]))
431 max_conflicts
= MAX2(max_conflicts
, conflicts
);
433 regs
->classes
[b
]->q
[c
] = max_conflicts
;
438 for (b
= 0; b
< regs
->count
; b
++) {
439 ralloc_free(regs
->regs
[b
].conflict_list
);
440 regs
->regs
[b
].conflict_list
= NULL
;
445 ra_add_node_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
447 BITSET_SET(g
->nodes
[n1
].adjacency
, n2
);
451 int n1_class
= g
->nodes
[n1
].class;
452 int n2_class
= g
->nodes
[n2
].class;
453 g
->nodes
[n1
].q_total
+= g
->regs
->classes
[n1_class
]->q
[n2_class
];
455 util_dynarray_append(&g
->nodes
[n1
].adjacency_list
, unsigned int, n2
);
459 ra_node_remove_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
461 BITSET_CLEAR(g
->nodes
[n1
].adjacency
, n2
);
465 int n1_class
= g
->nodes
[n1
].class;
466 int n2_class
= g
->nodes
[n2
].class;
467 g
->nodes
[n1
].q_total
-= g
->regs
->classes
[n1_class
]->q
[n2_class
];
469 util_dynarray_delete_unordered(&g
->nodes
[n1
].adjacency_list
, unsigned int,
474 ra_realloc_interference_graph(struct ra_graph
*g
, unsigned int alloc
)
476 if (alloc
<= g
->alloc
)
479 /* If we always have a whole number of BITSET_WORDs, it makes it much
480 * easier to memset the top of the growing bitsets.
482 assert(g
->alloc
% BITSET_WORDBITS
== 0);
483 alloc
= align64(alloc
, BITSET_WORDBITS
);
485 g
->nodes
= reralloc(g
, g
->nodes
, struct ra_node
, alloc
);
487 unsigned g_bitset_count
= BITSET_WORDS(g
->alloc
);
488 unsigned bitset_count
= BITSET_WORDS(alloc
);
489 /* For nodes already in the graph, we just have to grow the adjacency set */
490 for (unsigned i
= 0; i
< g
->alloc
; i
++) {
491 assert(g
->nodes
[i
].adjacency
!= NULL
);
492 g
->nodes
[i
].adjacency
= rerzalloc(g
, g
->nodes
[i
].adjacency
, BITSET_WORD
,
493 g_bitset_count
, bitset_count
);
496 /* For new nodes, we have to fully initialize them */
497 for (unsigned i
= g
->alloc
; i
< alloc
; i
++) {
498 memset(&g
->nodes
[i
], 0, sizeof(g
->nodes
[i
]));
499 g
->nodes
[i
].adjacency
= rzalloc_array(g
, BITSET_WORD
, bitset_count
);
500 util_dynarray_init(&g
->nodes
[i
].adjacency_list
, g
);
501 g
->nodes
[i
].q_total
= 0;
503 g
->nodes
[i
].forced_reg
= NO_REG
;
504 g
->nodes
[i
].reg
= NO_REG
;
507 /* These are scratch values and don't need to be zeroed. We'll clear them
508 * as part of ra_select() setup.
510 g
->tmp
.stack
= reralloc(g
, g
->tmp
.stack
, unsigned int, alloc
);
511 g
->tmp
.in_stack
= reralloc(g
, g
->tmp
.in_stack
, BITSET_WORD
, bitset_count
);
513 g
->tmp
.reg_assigned
= reralloc(g
, g
->tmp
.reg_assigned
, BITSET_WORD
,
515 g
->tmp
.pq_test
= reralloc(g
, g
->tmp
.pq_test
, BITSET_WORD
, bitset_count
);
516 g
->tmp
.min_q_total
= reralloc(g
, g
->tmp
.min_q_total
, unsigned int,
518 g
->tmp
.min_q_node
= reralloc(g
, g
->tmp
.min_q_node
, unsigned int,
525 ra_alloc_interference_graph(struct ra_regs
*regs
, unsigned int count
)
529 g
= rzalloc(NULL
, struct ra_graph
);
532 ra_realloc_interference_graph(g
, count
);
538 ra_resize_interference_graph(struct ra_graph
*g
, unsigned int count
)
541 if (count
> g
->alloc
)
542 ra_realloc_interference_graph(g
, g
->alloc
* 2);
545 void ra_set_select_reg_callback(struct ra_graph
*g
,
546 ra_select_reg_callback callback
,
549 g
->select_reg_callback
= callback
;
550 g
->select_reg_callback_data
= data
;
554 ra_set_node_class(struct ra_graph
*g
,
555 unsigned int n
, unsigned int class)
557 g
->nodes
[n
].class = class;
561 ra_get_node_class(struct ra_graph
*g
,
564 return g
->nodes
[n
].class;
568 ra_add_node(struct ra_graph
*g
, unsigned int class)
570 unsigned int n
= g
->count
;
571 ra_resize_interference_graph(g
, g
->count
+ 1);
573 ra_set_node_class(g
, n
, class);
579 ra_add_node_interference(struct ra_graph
*g
,
580 unsigned int n1
, unsigned int n2
)
582 assert(n1
< g
->count
&& n2
< g
->count
);
583 if (n1
!= n2
&& !BITSET_TEST(g
->nodes
[n1
].adjacency
, n2
)) {
584 ra_add_node_adjacency(g
, n1
, n2
);
585 ra_add_node_adjacency(g
, n2
, n1
);
590 ra_reset_node_interference(struct ra_graph
*g
, unsigned int n
)
592 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
593 ra_node_remove_adjacency(g
, *n2p
, n
);
596 memset(g
->nodes
[n
].adjacency
, 0,
597 BITSET_WORDS(g
->count
) * sizeof(BITSET_WORD
));
598 util_dynarray_clear(&g
->nodes
[n
].adjacency_list
);
602 update_pq_info(struct ra_graph
*g
, unsigned int n
)
604 int i
= n
/ BITSET_WORDBITS
;
605 int n_class
= g
->nodes
[n
].class;
606 if (g
->nodes
[n
].tmp
.q_total
< g
->regs
->classes
[n_class
]->p
) {
607 BITSET_SET(g
->tmp
.pq_test
, n
);
608 } else if (g
->tmp
.min_q_total
[i
] != UINT_MAX
) {
609 /* Only update min_q_total and min_q_node if min_q_total != UINT_MAX so
610 * that we don't update while we have stale data and accidentally mark
611 * it as non-stale. Also, in order to remain consistent with the old
612 * naive implementation of the algorithm, we do a lexicographical sort
613 * to ensure that we always choose the node with the highest node index.
615 if (g
->nodes
[n
].tmp
.q_total
< g
->tmp
.min_q_total
[i
] ||
616 (g
->nodes
[n
].tmp
.q_total
== g
->tmp
.min_q_total
[i
] &&
617 n
> g
->tmp
.min_q_node
[i
])) {
618 g
->tmp
.min_q_total
[i
] = g
->nodes
[n
].tmp
.q_total
;
619 g
->tmp
.min_q_node
[i
] = n
;
625 add_node_to_stack(struct ra_graph
*g
, unsigned int n
)
627 int n_class
= g
->nodes
[n
].class;
629 assert(!BITSET_TEST(g
->tmp
.in_stack
, n
));
631 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
632 unsigned int n2
= *n2p
;
633 unsigned int n2_class
= g
->nodes
[n2
].class;
635 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
) &&
636 !BITSET_TEST(g
->tmp
.reg_assigned
, n2
)) {
637 assert(g
->nodes
[n2
].tmp
.q_total
>= g
->regs
->classes
[n2_class
]->q
[n_class
]);
638 g
->nodes
[n2
].tmp
.q_total
-= g
->regs
->classes
[n2_class
]->q
[n_class
];
639 update_pq_info(g
, n2
);
643 g
->tmp
.stack
[g
->tmp
.stack_count
] = n
;
644 g
->tmp
.stack_count
++;
645 BITSET_SET(g
->tmp
.in_stack
, n
);
647 /* Flag the min_q_total for n's block as dirty so it gets recalculated */
648 g
->tmp
.min_q_total
[n
/ BITSET_WORDBITS
] = UINT_MAX
;
652 * Simplifies the interference graph by pushing all
653 * trivially-colorable nodes into a stack of nodes to be colored,
654 * removing them from the graph, and rinsing and repeating.
656 * If we encounter a case where we can't push any nodes on the stack, then
657 * we optimistically choose a node and push it on the stack. We heuristically
658 * push the node with the lowest total q value, since it has the fewest
659 * neighbors and therefore is most likely to be allocated.
662 ra_simplify(struct ra_graph
*g
)
664 bool progress
= true;
665 unsigned int stack_optimistic_start
= UINT_MAX
;
667 /* Figure out the high bit and bit mask for the first iteration of a loop
670 const unsigned int top_word_high_bit
= (g
->count
- 1) % BITSET_WORDBITS
;
672 /* Do a quick pre-pass to set things up */
673 g
->tmp
.stack_count
= 0;
674 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
675 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
676 g
->tmp
.in_stack
[i
] = 0;
677 g
->tmp
.reg_assigned
[i
] = 0;
678 g
->tmp
.pq_test
[i
] = 0;
679 g
->tmp
.min_q_total
[i
] = UINT_MAX
;
680 g
->tmp
.min_q_node
[i
] = UINT_MAX
;
681 for (int j
= high_bit
; j
>= 0; j
--) {
682 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
683 g
->nodes
[n
].reg
= g
->nodes
[n
].forced_reg
;
684 g
->nodes
[n
].tmp
.q_total
= g
->nodes
[n
].q_total
;
685 if (g
->nodes
[n
].reg
!= NO_REG
)
686 g
->tmp
.reg_assigned
[i
] |= BITSET_BIT(j
);
687 update_pq_info(g
, n
);
692 unsigned int min_q_total
= UINT_MAX
;
693 unsigned int min_q_node
= UINT_MAX
;
697 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
698 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
699 BITSET_WORD mask
= ~(BITSET_WORD
)0 >> (31 - high_bit
);
701 BITSET_WORD skip
= g
->tmp
.in_stack
[i
] | g
->tmp
.reg_assigned
[i
];
705 BITSET_WORD pq
= g
->tmp
.pq_test
[i
] & ~skip
;
707 /* In this case, we have stuff we can immediately take off the
708 * stack. This also means that we're guaranteed to make progress
709 * and we don't need to bother updating lowest_q_total because we
710 * know we're going to loop again before attempting to do anything
713 for (int j
= high_bit
; j
>= 0; j
--) {
714 if (pq
& BITSET_BIT(j
)) {
715 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
716 assert(n
< g
->count
);
717 add_node_to_stack(g
, n
);
718 /* add_node_to_stack() may update pq_test for this word so
719 * we need to update our local copy.
721 pq
= g
->tmp
.pq_test
[i
] & ~skip
;
725 } else if (!progress
) {
726 if (g
->tmp
.min_q_total
[i
] == UINT_MAX
) {
727 /* The min_q_total and min_q_node are dirty because we added
728 * one of these nodes to the stack. It needs to be
731 for (int j
= high_bit
; j
>= 0; j
--) {
732 if (skip
& BITSET_BIT(j
))
735 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
736 assert(n
< g
->count
);
737 if (g
->nodes
[n
].tmp
.q_total
< g
->tmp
.min_q_total
[i
]) {
738 g
->tmp
.min_q_total
[i
] = g
->nodes
[n
].tmp
.q_total
;
739 g
->tmp
.min_q_node
[i
] = n
;
743 if (g
->tmp
.min_q_total
[i
] < min_q_total
) {
744 min_q_node
= g
->tmp
.min_q_node
[i
];
745 min_q_total
= g
->tmp
.min_q_total
[i
];
750 if (!progress
&& min_q_total
!= UINT_MAX
) {
751 if (stack_optimistic_start
== UINT_MAX
)
752 stack_optimistic_start
= g
->tmp
.stack_count
;
754 add_node_to_stack(g
, min_q_node
);
759 g
->tmp
.stack_optimistic_start
= stack_optimistic_start
;
763 ra_any_neighbors_conflict(struct ra_graph
*g
, unsigned int n
, unsigned int r
)
765 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
766 unsigned int n2
= *n2p
;
768 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
) &&
769 BITSET_TEST(g
->regs
->regs
[r
].conflicts
, g
->nodes
[n2
].reg
)) {
777 /* Computes a bitfield of what regs are available for a given register
780 * This lets drivers implement a more complicated policy than our simple first
781 * or round robin policies (which don't require knowing the whole bitset)
784 ra_compute_available_regs(struct ra_graph
*g
, unsigned int n
, BITSET_WORD
*regs
)
786 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
788 /* Populate with the set of regs that are in the node's class. */
789 memcpy(regs
, c
->regs
, BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
791 /* Remove any regs that conflict with nodes that we're adjacent to and have
794 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
795 unsigned int n2
= *n2p
;
796 unsigned int r
= g
->nodes
[n2
].reg
;
798 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
)) {
799 for (int j
= 0; j
< BITSET_WORDS(g
->regs
->count
); j
++)
800 regs
[j
] &= ~g
->regs
->regs
[r
].conflicts
[j
];
804 for (int i
= 0; i
< BITSET_WORDS(g
->regs
->count
); i
++) {
813 * Pops nodes from the stack back into the graph, coloring them with
814 * registers as they go.
816 * If all nodes were trivially colorable, then this must succeed. If
817 * not (optimistic coloring), then it may return false;
820 ra_select(struct ra_graph
*g
)
822 int start_search_reg
= 0;
823 BITSET_WORD
*select_regs
= NULL
;
825 if (g
->select_reg_callback
)
826 select_regs
= malloc(BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
828 while (g
->tmp
.stack_count
!= 0) {
831 int n
= g
->tmp
.stack
[g
->tmp
.stack_count
- 1];
832 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
834 /* set this to false even if we return here so that
835 * ra_get_best_spill_node() considers this node later.
837 BITSET_CLEAR(g
->tmp
.in_stack
, n
);
839 if (g
->select_reg_callback
) {
840 if (!ra_compute_available_regs(g
, n
, select_regs
)) {
845 r
= g
->select_reg_callback(n
, select_regs
, g
->select_reg_callback_data
);
846 assert(r
< g
->regs
->count
);
848 /* Find the lowest-numbered reg which is not used by a member
849 * of the graph adjacent to us.
851 for (ri
= 0; ri
< g
->regs
->count
; ri
++) {
852 r
= (start_search_reg
+ ri
) % g
->regs
->count
;
853 if (!reg_belongs_to_class(r
, c
))
856 if (!ra_any_neighbors_conflict(g
, n
, r
))
860 if (ri
>= g
->regs
->count
)
865 g
->tmp
.stack_count
--;
867 /* Rotate the starting point except for any nodes above the lowest
868 * optimistically colorable node. The likelihood that we will succeed
869 * at allocating optimistically colorable nodes is highly dependent on
870 * the way that the previous nodes popped off the stack are laid out.
871 * The round-robin strategy increases the fragmentation of the register
872 * file and decreases the number of nearby nodes assigned to the same
873 * color, what increases the likelihood of spilling with respect to the
874 * dense packing strategy.
876 if (g
->regs
->round_robin
&&
877 g
->tmp
.stack_count
- 1 <= g
->tmp
.stack_optimistic_start
)
878 start_search_reg
= r
+ 1;
887 ra_allocate(struct ra_graph
*g
)
894 ra_get_node_reg(struct ra_graph
*g
, unsigned int n
)
896 if (g
->nodes
[n
].forced_reg
!= NO_REG
)
897 return g
->nodes
[n
].forced_reg
;
899 return g
->nodes
[n
].reg
;
903 * Forces a node to a specific register. This can be used to avoid
904 * creating a register class containing one node when handling data
905 * that must live in a fixed location and is known to not conflict
906 * with other forced register assignment (as is common with shader
907 * input data). These nodes do not end up in the stack during
908 * ra_simplify(), and thus at ra_select() time it is as if they were
909 * the first popped off the stack and assigned their fixed locations.
910 * Nodes that use this function do not need to be assigned a register
913 * Must be called before ra_simplify().
916 ra_set_node_reg(struct ra_graph
*g
, unsigned int n
, unsigned int reg
)
918 g
->nodes
[n
].forced_reg
= reg
;
922 ra_get_spill_benefit(struct ra_graph
*g
, unsigned int n
)
925 int n_class
= g
->nodes
[n
].class;
927 /* Define the benefit of eliminating an interference between n, n2
928 * through spilling as q(C, B) / p(C). This is similar to the
929 * "count number of edges" approach of traditional graph coloring,
930 * but takes classes into account.
932 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
933 unsigned int n2
= *n2p
;
934 unsigned int n2_class
= g
->nodes
[n2
].class;
935 benefit
+= ((float)g
->regs
->classes
[n_class
]->q
[n2_class
] /
936 g
->regs
->classes
[n_class
]->p
);
943 * Returns a node number to be spilled according to the cost/benefit using
944 * the pq test, or -1 if there are no spillable nodes.
947 ra_get_best_spill_node(struct ra_graph
*g
)
949 unsigned int best_node
= -1;
950 float best_benefit
= 0.0;
953 /* Consider any nodes that we colored successfully or the node we failed to
954 * color for spilling. When we failed to color a node in ra_select(), we
955 * only considered these nodes, so spilling any other ones would not result
956 * in us making progress.
958 for (n
= 0; n
< g
->count
; n
++) {
959 float cost
= g
->nodes
[n
].spill_cost
;
965 if (BITSET_TEST(g
->tmp
.in_stack
, n
))
968 benefit
= ra_get_spill_benefit(g
, n
);
970 if (benefit
/ cost
> best_benefit
) {
971 best_benefit
= benefit
/ cost
;
980 * Only nodes with a spill cost set (cost != 0.0) will be considered
981 * for register spilling.
984 ra_set_node_spill_cost(struct ra_graph
*g
, unsigned int n
, float cost
)
986 g
->nodes
[n
].spill_cost
= cost
;