2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 /** @file register_allocate.c
30 * Graph-coloring register allocator.
32 * The basic idea of graph coloring is to make a node in a graph for
33 * every thing that needs a register (color) number assigned, and make
34 * edges in the graph between nodes that interfere (can't be allocated
35 * to the same register at the same time).
37 * During the "simplify" process, any any node with fewer edges than
38 * there are registers means that that edge can get assigned a
39 * register regardless of what its neighbors choose, so that node is
40 * pushed on a stack and removed (with its edges) from the graph.
41 * That likely causes other nodes to become trivially colorable as well.
43 * Then during the "select" process, nodes are popped off of that
44 * stack, their edges restored, and assigned a color different from
45 * their neighbors. Because they were pushed on the stack only when
46 * they were trivially colorable, any color chosen won't interfere
47 * with the registers to be popped later.
49 * The downside to most graph coloring is that real hardware often has
50 * limitations, like registers that need to be allocated to a node in
51 * pairs, or aligned on some boundary. This implementation follows
52 * the paper "Retargetable Graph-Coloring Register Allocation for
53 * Irregular Architectures" by Johan Runeson and Sven-Olof Nyström.
55 * In this system, there are register classes each containing various
56 * registers, and registers may interfere with other registers. For
57 * example, one might have a class of base registers, and a class of
58 * aligned register pairs that would each interfere with their pair of
59 * the base registers. Each node has a register class it needs to be
60 * assigned to. Define p(B) to be the size of register class B, and
61 * q(B,C) to be the number of registers in B that the worst choice
62 * register in C could conflict with. Then, this system replaces the
63 * basic graph coloring test of "fewer edges from this node than there
64 * are registers" with "For this node of class B, the sum of q(B,C)
65 * for each neighbor node of class C is less than pB".
67 * A nice feature of the pq test is that q(B,C) can be computed once
68 * up front and stored in a 2-dimensional array, so that the cost of
69 * coloring a node is constant with the number of registers. We do
70 * this during ra_set_finalize().
76 #include "main/imports.h"
77 #include "main/macros.h"
78 #include "util/bitset.h"
79 #include "register_allocate.h"
84 BITSET_WORD
*conflicts
;
85 unsigned int *conflict_list
;
86 unsigned int conflict_list_size
;
87 unsigned int num_conflicts
;
94 struct ra_class
**classes
;
95 unsigned int class_count
;
102 * Bitset indicating which registers belong to this class.
104 * (If bit N is set, then register N belongs to this class.)
109 * p(B) in Runeson/Nyström paper.
111 * This is "how many regs are in the set."
116 * q(B,C) (indexed by C, B is this register class) in
117 * Runeson/Nyström paper. This is "how many registers of B could
118 * the worst choice register from C conflict with".
126 * List of which nodes this node interferes with. This should be
127 * symmetric with the other node.
129 BITSET_WORD
*adjacency
;
130 unsigned int *adjacency_list
;
131 unsigned int adjacency_list_size
;
132 unsigned int adjacency_count
;
137 /* Register, if assigned, or NO_REG. */
141 * The q total, as defined in the Runeson/Nyström paper, for all the
142 * interfering nodes not in the stack.
144 unsigned int q_total
;
146 /* For an implementation that needs register spilling, this is the
147 * approximate cost of spilling this node.
153 struct ra_regs
*regs
;
155 * the variables that need register allocation.
157 struct ra_node
*nodes
;
158 unsigned int count
; /**< count of nodes. */
160 unsigned int alloc
; /**< count of nodes allocated. */
163 unsigned int stack_count
;
165 /** Bit-set indicating, for each register, if it's in the stack */
166 BITSET_WORD
*in_stack
;
168 /** Bit-set indicating, for each register, if it pre-assigned */
169 BITSET_WORD
*reg_assigned
;
171 /** Bit-set indicating, for each register, the value of the pq test */
172 BITSET_WORD
*pq_test
;
174 /** For each BITSET_WORD, the minimum q value or ~0 if unknown */
175 unsigned int *min_q_total
;
178 * * For each BITSET_WORD, the node with the minimum q_total if
179 * min_q_total[i] != ~0.
181 unsigned int *min_q_node
;
184 * Tracks the start of the set of optimistically-colored registers in the
187 unsigned int stack_optimistic_start
;
189 unsigned int (*select_reg_callback
)(struct ra_graph
*g
, BITSET_WORD
*regs
,
191 void *select_reg_callback_data
;
195 * Creates a set of registers for the allocator.
197 * mem_ctx is a ralloc context for the allocator. The reg set may be freed
198 * using ralloc_free().
201 ra_alloc_reg_set(void *mem_ctx
, unsigned int count
, bool need_conflict_lists
)
204 struct ra_regs
*regs
;
206 regs
= rzalloc(mem_ctx
, struct ra_regs
);
208 regs
->regs
= rzalloc_array(regs
, struct ra_reg
, count
);
210 for (i
= 0; i
< count
; i
++) {
211 regs
->regs
[i
].conflicts
= rzalloc_array(regs
->regs
, BITSET_WORD
,
212 BITSET_WORDS(count
));
213 BITSET_SET(regs
->regs
[i
].conflicts
, i
);
215 if (need_conflict_lists
) {
216 regs
->regs
[i
].conflict_list
= ralloc_array(regs
->regs
,
218 regs
->regs
[i
].conflict_list_size
= 4;
219 regs
->regs
[i
].conflict_list
[0] = i
;
221 regs
->regs
[i
].conflict_list
= NULL
;
222 regs
->regs
[i
].conflict_list_size
= 0;
224 regs
->regs
[i
].num_conflicts
= 1;
231 * The register allocator by default prefers to allocate low register numbers,
232 * since it was written for hardware (gen4/5 Intel) that is limited in its
233 * multithreadedness by the number of registers used in a given shader.
235 * However, for hardware without that restriction, densely packed register
236 * allocation can put serious constraints on instruction scheduling. This
237 * function tells the allocator to rotate around the registers if possible as
238 * it allocates the nodes.
241 ra_set_allocate_round_robin(struct ra_regs
*regs
)
243 regs
->round_robin
= true;
247 ra_add_conflict_list(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
249 struct ra_reg
*reg1
= ®s
->regs
[r1
];
251 if (reg1
->conflict_list
) {
252 if (reg1
->conflict_list_size
== reg1
->num_conflicts
) {
253 reg1
->conflict_list_size
*= 2;
254 reg1
->conflict_list
= reralloc(regs
->regs
, reg1
->conflict_list
,
255 unsigned int, reg1
->conflict_list_size
);
257 reg1
->conflict_list
[reg1
->num_conflicts
++] = r2
;
259 BITSET_SET(reg1
->conflicts
, r2
);
263 ra_add_reg_conflict(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
265 if (!BITSET_TEST(regs
->regs
[r1
].conflicts
, r2
)) {
266 ra_add_conflict_list(regs
, r1
, r2
);
267 ra_add_conflict_list(regs
, r2
, r1
);
272 * Adds a conflict between base_reg and reg, and also between reg and
273 * anything that base_reg conflicts with.
275 * This can simplify code for setting up multiple register classes
276 * which are aggregates of some base hardware registers, compared to
277 * explicitly using ra_add_reg_conflict.
280 ra_add_transitive_reg_conflict(struct ra_regs
*regs
,
281 unsigned int base_reg
, unsigned int reg
)
285 ra_add_reg_conflict(regs
, reg
, base_reg
);
287 for (i
= 0; i
< regs
->regs
[base_reg
].num_conflicts
; i
++) {
288 ra_add_reg_conflict(regs
, reg
, regs
->regs
[base_reg
].conflict_list
[i
]);
293 * Makes every conflict on the given register transitive. In other words,
294 * every register that conflicts with r will now conflict with every other
295 * register conflicting with r.
297 * This can simplify code for setting up multiple register classes
298 * which are aggregates of some base hardware registers, compared to
299 * explicitly using ra_add_reg_conflict.
302 ra_make_reg_conflicts_transitive(struct ra_regs
*regs
, unsigned int r
)
304 struct ra_reg
*reg
= ®s
->regs
[r
];
308 BITSET_FOREACH_SET(c
, tmp
, reg
->conflicts
, regs
->count
) {
309 struct ra_reg
*other
= ®s
->regs
[c
];
311 for (i
= 0; i
< BITSET_WORDS(regs
->count
); i
++)
312 other
->conflicts
[i
] |= reg
->conflicts
[i
];
317 ra_alloc_reg_class(struct ra_regs
*regs
)
319 struct ra_class
*class;
321 regs
->classes
= reralloc(regs
->regs
, regs
->classes
, struct ra_class
*,
322 regs
->class_count
+ 1);
324 class = rzalloc(regs
, struct ra_class
);
325 regs
->classes
[regs
->class_count
] = class;
327 class->regs
= rzalloc_array(class, BITSET_WORD
, BITSET_WORDS(regs
->count
));
329 return regs
->class_count
++;
333 ra_class_add_reg(struct ra_regs
*regs
, unsigned int c
, unsigned int r
)
335 struct ra_class
*class = regs
->classes
[c
];
337 BITSET_SET(class->regs
, r
);
342 * Returns true if the register belongs to the given class.
345 reg_belongs_to_class(unsigned int r
, struct ra_class
*c
)
347 return BITSET_TEST(c
->regs
, r
);
351 * Must be called after all conflicts and register classes have been
352 * set up and before the register set is used for allocation.
353 * To avoid costly q value computation, use the q_values paramater
354 * to pass precomputed q values to this function.
357 ra_set_finalize(struct ra_regs
*regs
, unsigned int **q_values
)
361 for (b
= 0; b
< regs
->class_count
; b
++) {
362 regs
->classes
[b
]->q
= ralloc_array(regs
, unsigned int, regs
->class_count
);
366 for (b
= 0; b
< regs
->class_count
; b
++) {
367 for (c
= 0; c
< regs
->class_count
; c
++) {
368 regs
->classes
[b
]->q
[c
] = q_values
[b
][c
];
372 /* Compute, for each class B and C, how many regs of B an
373 * allocation to C could conflict with.
375 for (b
= 0; b
< regs
->class_count
; b
++) {
376 for (c
= 0; c
< regs
->class_count
; c
++) {
378 int max_conflicts
= 0;
380 for (rc
= 0; rc
< regs
->count
; rc
++) {
384 if (!reg_belongs_to_class(rc
, regs
->classes
[c
]))
387 for (i
= 0; i
< regs
->regs
[rc
].num_conflicts
; i
++) {
388 unsigned int rb
= regs
->regs
[rc
].conflict_list
[i
];
389 if (reg_belongs_to_class(rb
, regs
->classes
[b
]))
392 max_conflicts
= MAX2(max_conflicts
, conflicts
);
394 regs
->classes
[b
]->q
[c
] = max_conflicts
;
399 for (b
= 0; b
< regs
->count
; b
++) {
400 ralloc_free(regs
->regs
[b
].conflict_list
);
401 regs
->regs
[b
].conflict_list
= NULL
;
406 ra_add_node_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
408 BITSET_SET(g
->nodes
[n1
].adjacency
, n2
);
412 int n1_class
= g
->nodes
[n1
].class;
413 int n2_class
= g
->nodes
[n2
].class;
414 g
->nodes
[n1
].q_total
+= g
->regs
->classes
[n1_class
]->q
[n2_class
];
416 if (g
->nodes
[n1
].adjacency_count
>=
417 g
->nodes
[n1
].adjacency_list_size
) {
418 g
->nodes
[n1
].adjacency_list_size
*= 2;
419 g
->nodes
[n1
].adjacency_list
= reralloc(g
, g
->nodes
[n1
].adjacency_list
,
421 g
->nodes
[n1
].adjacency_list_size
);
424 g
->nodes
[n1
].adjacency_list
[g
->nodes
[n1
].adjacency_count
] = n2
;
425 g
->nodes
[n1
].adjacency_count
++;
429 ra_realloc_interference_graph(struct ra_graph
*g
, unsigned int alloc
)
431 if (alloc
<= g
->alloc
)
434 /* If we always have a whole number of BITSET_WORDs, it makes it much
435 * easier to memset the top of the growing bitsets.
437 assert(g
->alloc
% BITSET_WORDBITS
== 0);
438 alloc
= ALIGN(alloc
, BITSET_WORDBITS
);
440 g
->nodes
= reralloc(g
, g
->nodes
, struct ra_node
, alloc
);
442 unsigned g_bitset_count
= BITSET_WORDS(g
->alloc
);
443 unsigned bitset_count
= BITSET_WORDS(alloc
);
444 /* For nodes already in the graph, we just have to grow the adjacency set */
445 for (unsigned i
= 0; i
< g
->alloc
; i
++) {
446 assert(g
->nodes
[i
].adjacency
!= NULL
);
447 g
->nodes
[i
].adjacency
= rerzalloc(g
, g
->nodes
[i
].adjacency
, BITSET_WORD
,
448 g_bitset_count
, bitset_count
);
451 /* For new nodes, we have to fully initialize them */
452 for (unsigned i
= g
->alloc
; i
< alloc
; i
++) {
453 memset(&g
->nodes
[i
], 0, sizeof(g
->nodes
[i
]));
454 g
->nodes
[i
].adjacency
= rzalloc_array(g
, BITSET_WORD
, bitset_count
);
455 g
->nodes
[i
].adjacency_list_size
= 4;
456 g
->nodes
[i
].adjacency_list
=
457 ralloc_array(g
, unsigned int, g
->nodes
[i
].adjacency_list_size
);
458 g
->nodes
[i
].adjacency_count
= 0;
459 g
->nodes
[i
].q_total
= 0;
461 g
->nodes
[i
].reg
= NO_REG
;
464 g
->stack
= reralloc(g
, g
->stack
, unsigned int, alloc
);
465 g
->in_stack
= rerzalloc(g
, g
->in_stack
, BITSET_WORD
,
466 g_bitset_count
, bitset_count
);
468 g
->reg_assigned
= rerzalloc(g
, g
->reg_assigned
, BITSET_WORD
,
469 g_bitset_count
, bitset_count
);
470 g
->pq_test
= rerzalloc(g
, g
->pq_test
, BITSET_WORD
,
471 g_bitset_count
, bitset_count
);
472 g
->min_q_total
= rerzalloc(g
, g
->min_q_total
, unsigned int,
473 g_bitset_count
, bitset_count
);
474 g
->min_q_node
= rerzalloc(g
, g
->min_q_node
, unsigned int,
475 g_bitset_count
, bitset_count
);
481 ra_alloc_interference_graph(struct ra_regs
*regs
, unsigned int count
)
485 g
= rzalloc(NULL
, struct ra_graph
);
488 ra_realloc_interference_graph(g
, count
);
494 ra_resize_interference_graph(struct ra_graph
*g
, unsigned int count
)
497 if (count
> g
->alloc
)
498 ra_realloc_interference_graph(g
, g
->alloc
* 2);
501 void ra_set_select_reg_callback(struct ra_graph
*g
,
502 unsigned int (*callback
)(struct ra_graph
*g
,
507 g
->select_reg_callback
= callback
;
508 g
->select_reg_callback_data
= data
;
512 ra_set_node_class(struct ra_graph
*g
,
513 unsigned int n
, unsigned int class)
515 g
->nodes
[n
].class = class;
519 ra_add_node(struct ra_graph
*g
, unsigned int class)
521 unsigned int n
= g
->count
;
522 ra_resize_interference_graph(g
, g
->count
+ 1);
524 ra_set_node_class(g
, n
, class);
530 ra_add_node_interference(struct ra_graph
*g
,
531 unsigned int n1
, unsigned int n2
)
533 if (n1
!= n2
&& !BITSET_TEST(g
->nodes
[n1
].adjacency
, n2
)) {
534 ra_add_node_adjacency(g
, n1
, n2
);
535 ra_add_node_adjacency(g
, n2
, n1
);
540 update_pq_info(struct ra_graph
*g
, unsigned int n
)
542 int i
= n
/ BITSET_WORDBITS
;
543 int n_class
= g
->nodes
[n
].class;
544 if (g
->nodes
[n
].q_total
< g
->regs
->classes
[n_class
]->p
) {
545 BITSET_SET(g
->pq_test
, n
);
546 } else if (g
->min_q_total
[i
] != UINT_MAX
) {
547 /* Only update min_q_total and min_q_node if min_q_total != UINT_MAX so
548 * that we don't update while we have stale data and accidentally mark
549 * it as non-stale. Also, in order to remain consistent with the old
550 * naive implementation of the algorithm, we do a lexicographical sort
551 * to ensure that we always choose the node with the highest node index.
553 if (g
->nodes
[n
].q_total
< g
->min_q_total
[i
] ||
554 (g
->nodes
[n
].q_total
== g
->min_q_total
[i
] &&
555 n
> g
->min_q_node
[i
])) {
556 g
->min_q_total
[i
] = g
->nodes
[n
].q_total
;
557 g
->min_q_node
[i
] = n
;
563 add_node_to_stack(struct ra_graph
*g
, unsigned int n
)
566 int n_class
= g
->nodes
[n
].class;
568 assert(!BITSET_TEST(g
->in_stack
, n
));
570 for (i
= 0; i
< g
->nodes
[n
].adjacency_count
; i
++) {
571 unsigned int n2
= g
->nodes
[n
].adjacency_list
[i
];
572 unsigned int n2_class
= g
->nodes
[n2
].class;
574 if (!BITSET_TEST(g
->in_stack
, n2
) && !BITSET_TEST(g
->reg_assigned
, n2
)) {
575 assert(g
->nodes
[n2
].q_total
>= g
->regs
->classes
[n2_class
]->q
[n_class
]);
576 g
->nodes
[n2
].q_total
-= g
->regs
->classes
[n2_class
]->q
[n_class
];
577 update_pq_info(g
, n2
);
581 g
->stack
[g
->stack_count
] = n
;
583 BITSET_SET(g
->in_stack
, n
);
585 /* Flag the min_q_total for n's block as dirty so it gets recalculated */
586 g
->min_q_total
[n
/ BITSET_WORDBITS
] = UINT_MAX
;
590 * Simplifies the interference graph by pushing all
591 * trivially-colorable nodes into a stack of nodes to be colored,
592 * removing them from the graph, and rinsing and repeating.
594 * If we encounter a case where we can't push any nodes on the stack, then
595 * we optimistically choose a node and push it on the stack. We heuristically
596 * push the node with the lowest total q value, since it has the fewest
597 * neighbors and therefore is most likely to be allocated.
600 ra_simplify(struct ra_graph
*g
)
602 bool progress
= true;
603 unsigned int stack_optimistic_start
= UINT_MAX
;
605 /* Figure out the high bit and bit mask for the first iteration of a loop
608 const unsigned int top_word_high_bit
= (g
->count
- 1) % BITSET_WORDBITS
;
610 /* Do a quick pre-pass to set things up */
611 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
612 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
613 g
->min_q_total
[i
] = UINT_MAX
;
614 g
->min_q_node
[i
] = UINT_MAX
;
615 for (int j
= high_bit
; j
>= 0; j
--) {
616 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
617 if (g
->nodes
[n
].reg
!= NO_REG
)
618 g
->reg_assigned
[i
] |= BITSET_BIT(j
);
619 update_pq_info(g
, n
);
624 unsigned int min_q_total
= UINT_MAX
;
625 unsigned int min_q_node
= UINT_MAX
;
629 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
630 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
631 BITSET_WORD mask
= ~(BITSET_WORD
)0 >> (31 - high_bit
);
633 BITSET_WORD skip
= g
->in_stack
[i
] | g
->reg_assigned
[i
];
637 BITSET_WORD pq
= g
->pq_test
[i
] & ~skip
;
639 /* In this case, we have stuff we can immediately take off the
640 * stack. This also means that we're guaranteed to make progress
641 * and we don't need to bother updating lowest_q_total because we
642 * know we're going to loop again before attempting to do anything
645 for (int j
= high_bit
; j
>= 0; j
--) {
646 if (pq
& BITSET_BIT(j
)) {
647 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
648 assert(n
< g
->count
);
649 add_node_to_stack(g
, n
);
650 /* add_node_to_stack() may update pq_test for this word so
651 * we need to update our local copy.
653 pq
= g
->pq_test
[i
] & ~skip
;
657 } else if (!progress
) {
658 if (g
->min_q_total
[i
] == UINT_MAX
) {
659 /* The min_q_total and min_q_node are dirty because we added
660 * one of these nodes to the stack. It needs to be
663 for (int j
= high_bit
; j
>= 0; j
--) {
664 if (skip
& BITSET_BIT(j
))
667 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
668 assert(n
< g
->count
);
669 if (g
->nodes
[n
].q_total
< g
->min_q_total
[i
]) {
670 g
->min_q_total
[i
] = g
->nodes
[n
].q_total
;
671 g
->min_q_node
[i
] = n
;
675 if (g
->min_q_total
[i
] < min_q_total
) {
676 min_q_node
= g
->min_q_node
[i
];
677 min_q_total
= g
->min_q_total
[i
];
682 if (!progress
&& min_q_total
!= UINT_MAX
) {
683 if (stack_optimistic_start
== UINT_MAX
)
684 stack_optimistic_start
= g
->stack_count
;
686 add_node_to_stack(g
, min_q_node
);
691 g
->stack_optimistic_start
= stack_optimistic_start
;
695 ra_any_neighbors_conflict(struct ra_graph
*g
, unsigned int n
, unsigned int r
)
699 for (i
= 0; i
< g
->nodes
[n
].adjacency_count
; i
++) {
700 unsigned int n2
= g
->nodes
[n
].adjacency_list
[i
];
702 if (!BITSET_TEST(g
->in_stack
, n2
) &&
703 BITSET_TEST(g
->regs
->regs
[r
].conflicts
, g
->nodes
[n2
].reg
)) {
711 /* Computes a bitfield of what regs are available for a given register
714 * This lets drivers implement a more complicated policy than our simple first
715 * or round robin policies (which don't require knowing the whole bitset)
718 ra_compute_available_regs(struct ra_graph
*g
, unsigned int n
, BITSET_WORD
*regs
)
720 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
722 /* Populate with the set of regs that are in the node's class. */
723 memcpy(regs
, c
->regs
, BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
725 /* Remove any regs that conflict with nodes that we're adjacent to and have
728 for (int i
= 0; i
< g
->nodes
[n
].adjacency_count
; i
++) {
729 unsigned int n2
= g
->nodes
[n
].adjacency_list
[i
];
730 unsigned int r
= g
->nodes
[n2
].reg
;
732 if (!BITSET_TEST(g
->in_stack
, n2
)) {
733 for (int j
= 0; j
< BITSET_WORDS(g
->regs
->count
); j
++)
734 regs
[j
] &= ~g
->regs
->regs
[r
].conflicts
[j
];
738 for (int i
= 0; i
< BITSET_WORDS(g
->regs
->count
); i
++) {
747 * Pops nodes from the stack back into the graph, coloring them with
748 * registers as they go.
750 * If all nodes were trivially colorable, then this must succeed. If
751 * not (optimistic coloring), then it may return false;
754 ra_select(struct ra_graph
*g
)
756 int start_search_reg
= 0;
757 BITSET_WORD
*select_regs
= NULL
;
759 if (g
->select_reg_callback
)
760 select_regs
= malloc(BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
762 while (g
->stack_count
!= 0) {
765 int n
= g
->stack
[g
->stack_count
- 1];
766 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
768 /* set this to false even if we return here so that
769 * ra_get_best_spill_node() considers this node later.
771 BITSET_CLEAR(g
->in_stack
, n
);
773 if (g
->select_reg_callback
) {
774 if (!ra_compute_available_regs(g
, n
, select_regs
)) {
779 r
= g
->select_reg_callback(g
, select_regs
, g
->select_reg_callback_data
);
781 /* Find the lowest-numbered reg which is not used by a member
782 * of the graph adjacent to us.
784 for (ri
= 0; ri
< g
->regs
->count
; ri
++) {
785 r
= (start_search_reg
+ ri
) % g
->regs
->count
;
786 if (!reg_belongs_to_class(r
, c
))
789 if (!ra_any_neighbors_conflict(g
, n
, r
))
793 if (ri
>= g
->regs
->count
)
800 /* Rotate the starting point except for any nodes above the lowest
801 * optimistically colorable node. The likelihood that we will succeed
802 * at allocating optimistically colorable nodes is highly dependent on
803 * the way that the previous nodes popped off the stack are laid out.
804 * The round-robin strategy increases the fragmentation of the register
805 * file and decreases the number of nearby nodes assigned to the same
806 * color, what increases the likelihood of spilling with respect to the
807 * dense packing strategy.
809 if (g
->regs
->round_robin
&&
810 g
->stack_count
- 1 <= g
->stack_optimistic_start
)
811 start_search_reg
= r
+ 1;
820 ra_allocate(struct ra_graph
*g
)
827 ra_get_node_reg(struct ra_graph
*g
, unsigned int n
)
829 return g
->nodes
[n
].reg
;
833 * Forces a node to a specific register. This can be used to avoid
834 * creating a register class containing one node when handling data
835 * that must live in a fixed location and is known to not conflict
836 * with other forced register assignment (as is common with shader
837 * input data). These nodes do not end up in the stack during
838 * ra_simplify(), and thus at ra_select() time it is as if they were
839 * the first popped off the stack and assigned their fixed locations.
840 * Nodes that use this function do not need to be assigned a register
843 * Must be called before ra_simplify().
846 ra_set_node_reg(struct ra_graph
*g
, unsigned int n
, unsigned int reg
)
848 g
->nodes
[n
].reg
= reg
;
849 BITSET_CLEAR(g
->in_stack
, n
);
853 ra_get_spill_benefit(struct ra_graph
*g
, unsigned int n
)
857 int n_class
= g
->nodes
[n
].class;
859 /* Define the benefit of eliminating an interference between n, n2
860 * through spilling as q(C, B) / p(C). This is similar to the
861 * "count number of edges" approach of traditional graph coloring,
862 * but takes classes into account.
864 for (j
= 0; j
< g
->nodes
[n
].adjacency_count
; j
++) {
865 unsigned int n2
= g
->nodes
[n
].adjacency_list
[j
];
866 unsigned int n2_class
= g
->nodes
[n2
].class;
867 benefit
+= ((float)g
->regs
->classes
[n_class
]->q
[n2_class
] /
868 g
->regs
->classes
[n_class
]->p
);
875 * Returns a node number to be spilled according to the cost/benefit using
876 * the pq test, or -1 if there are no spillable nodes.
879 ra_get_best_spill_node(struct ra_graph
*g
)
881 unsigned int best_node
= -1;
882 float best_benefit
= 0.0;
885 /* Consider any nodes that we colored successfully or the node we failed to
886 * color for spilling. When we failed to color a node in ra_select(), we
887 * only considered these nodes, so spilling any other ones would not result
888 * in us making progress.
890 for (n
= 0; n
< g
->count
; n
++) {
891 float cost
= g
->nodes
[n
].spill_cost
;
897 if (BITSET_TEST(g
->in_stack
, n
))
900 benefit
= ra_get_spill_benefit(g
, n
);
902 if (benefit
/ cost
> best_benefit
) {
903 best_benefit
= benefit
/ cost
;
912 * Only nodes with a spill cost set (cost != 0.0) will be considered
913 * for register spilling.
916 ra_set_node_spill_cost(struct ra_graph
*g
, unsigned int n
, float cost
)
918 g
->nodes
[n
].spill_cost
= cost
;