2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 /** @file register_allocate.c
30 * Graph-coloring register allocator.
32 * The basic idea of graph coloring is to make a node in a graph for
33 * every thing that needs a register (color) number assigned, and make
34 * edges in the graph between nodes that interfere (can't be allocated
35 * to the same register at the same time).
37 * During the "simplify" process, any any node with fewer edges than
38 * there are registers means that that edge can get assigned a
39 * register regardless of what its neighbors choose, so that node is
40 * pushed on a stack and removed (with its edges) from the graph.
41 * That likely causes other nodes to become trivially colorable as well.
43 * Then during the "select" process, nodes are popped off of that
44 * stack, their edges restored, and assigned a color different from
45 * their neighbors. Because they were pushed on the stack only when
46 * they were trivially colorable, any color chosen won't interfere
47 * with the registers to be popped later.
49 * The downside to most graph coloring is that real hardware often has
50 * limitations, like registers that need to be allocated to a node in
51 * pairs, or aligned on some boundary. This implementation follows
52 * the paper "Retargetable Graph-Coloring Register Allocation for
53 * Irregular Architectures" by Johan Runeson and Sven-Olof Nyström.
55 * In this system, there are register classes each containing various
56 * registers, and registers may interfere with other registers. For
57 * example, one might have a class of base registers, and a class of
58 * aligned register pairs that would each interfere with their pair of
59 * the base registers. Each node has a register class it needs to be
60 * assigned to. Define p(B) to be the size of register class B, and
61 * q(B,C) to be the number of registers in B that the worst choice
62 * register in C could conflict with. Then, this system replaces the
63 * basic graph coloring test of "fewer edges from this node than there
64 * are registers" with "For this node of class B, the sum of q(B,C)
65 * for each neighbor node of class C is less than pB".
67 * A nice feature of the pq test is that q(B,C) can be computed once
68 * up front and stored in a 2-dimensional array, so that the cost of
69 * coloring a node is constant with the number of registers. We do
70 * this during ra_set_finalize().
77 #include "main/macros.h"
78 #include "util/bitset.h"
79 #include "util/u_dynarray.h"
81 #include "register_allocate.h"
84 BITSET_WORD
*conflicts
;
85 struct util_dynarray conflict_list
;
92 struct ra_class
**classes
;
93 unsigned int class_count
;
100 * Bitset indicating which registers belong to this class.
102 * (If bit N is set, then register N belongs to this class.)
107 * p(B) in Runeson/Nyström paper.
109 * This is "how many regs are in the set."
114 * q(B,C) (indexed by C, B is this register class) in
115 * Runeson/Nyström paper. This is "how many registers of B could
116 * the worst choice register from C conflict with".
124 * List of which nodes this node interferes with. This should be
125 * symmetric with the other node.
127 BITSET_WORD
*adjacency
;
129 struct util_dynarray adjacency_list
;
134 /* Client-assigned register, if assigned, or NO_REG. */
135 unsigned int forced_reg
;
137 /* Register, if assigned, or NO_REG. */
141 * The q total, as defined in the Runeson/Nyström paper, for all the
142 * interfering nodes not in the stack.
144 unsigned int q_total
;
146 /* For an implementation that needs register spilling, this is the
147 * approximate cost of spilling this node.
151 /* Temporary data for the algorithm to scratch around in */
154 * Temporary version of q_total which we decrement as things are placed
157 unsigned int q_total
;
162 struct ra_regs
*regs
;
164 * the variables that need register allocation.
166 struct ra_node
*nodes
;
167 unsigned int count
; /**< count of nodes. */
169 unsigned int alloc
; /**< count of nodes allocated. */
171 ra_select_reg_callback select_reg_callback
;
172 void *select_reg_callback_data
;
174 /* Temporary data for the algorithm to scratch around in */
177 unsigned int stack_count
;
179 /** Bit-set indicating, for each register, if it's in the stack */
180 BITSET_WORD
*in_stack
;
182 /** Bit-set indicating, for each register, if it pre-assigned */
183 BITSET_WORD
*reg_assigned
;
185 /** Bit-set indicating, for each register, the value of the pq test */
186 BITSET_WORD
*pq_test
;
188 /** For each BITSET_WORD, the minimum q value or ~0 if unknown */
189 unsigned int *min_q_total
;
192 * * For each BITSET_WORD, the node with the minimum q_total if
193 * min_q_total[i] != ~0.
195 unsigned int *min_q_node
;
198 * Tracks the start of the set of optimistically-colored registers in the
201 unsigned int stack_optimistic_start
;
206 * Creates a set of registers for the allocator.
208 * mem_ctx is a ralloc context for the allocator. The reg set may be freed
209 * using ralloc_free().
212 ra_alloc_reg_set(void *mem_ctx
, unsigned int count
, bool need_conflict_lists
)
215 struct ra_regs
*regs
;
217 regs
= rzalloc(mem_ctx
, struct ra_regs
);
219 regs
->regs
= rzalloc_array(regs
, struct ra_reg
, count
);
221 for (i
= 0; i
< count
; i
++) {
222 regs
->regs
[i
].conflicts
= rzalloc_array(regs
->regs
, BITSET_WORD
,
223 BITSET_WORDS(count
));
224 BITSET_SET(regs
->regs
[i
].conflicts
, i
);
226 util_dynarray_init(®s
->regs
[i
].conflict_list
,
227 need_conflict_lists
? regs
->regs
: NULL
);
228 if (need_conflict_lists
)
229 util_dynarray_append(®s
->regs
[i
].conflict_list
, unsigned int, i
);
236 * The register allocator by default prefers to allocate low register numbers,
237 * since it was written for hardware (gen4/5 Intel) that is limited in its
238 * multithreadedness by the number of registers used in a given shader.
240 * However, for hardware without that restriction, densely packed register
241 * allocation can put serious constraints on instruction scheduling. This
242 * function tells the allocator to rotate around the registers if possible as
243 * it allocates the nodes.
246 ra_set_allocate_round_robin(struct ra_regs
*regs
)
248 regs
->round_robin
= true;
252 ra_add_conflict_list(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
254 struct ra_reg
*reg1
= ®s
->regs
[r1
];
256 if (reg1
->conflict_list
.mem_ctx
) {
257 util_dynarray_append(®1
->conflict_list
, unsigned int, r2
);
259 BITSET_SET(reg1
->conflicts
, r2
);
263 ra_add_reg_conflict(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
265 if (!BITSET_TEST(regs
->regs
[r1
].conflicts
, r2
)) {
266 ra_add_conflict_list(regs
, r1
, r2
);
267 ra_add_conflict_list(regs
, r2
, r1
);
272 * Adds a conflict between base_reg and reg, and also between reg and
273 * anything that base_reg conflicts with.
275 * This can simplify code for setting up multiple register classes
276 * which are aggregates of some base hardware registers, compared to
277 * explicitly using ra_add_reg_conflict.
280 ra_add_transitive_reg_conflict(struct ra_regs
*regs
,
281 unsigned int base_reg
, unsigned int reg
)
283 ra_add_reg_conflict(regs
, reg
, base_reg
);
285 util_dynarray_foreach(®s
->regs
[base_reg
].conflict_list
, unsigned int,
287 ra_add_reg_conflict(regs
, reg
, *r2p
);
292 * Set up conflicts between base_reg and it's two half registers reg0 and
293 * reg1, but take care to not add conflicts between reg0 and reg1.
295 * This is useful for architectures where full size registers are aliased by
296 * two half size registers (eg 32 bit float and 16 bit float registers).
299 ra_add_transitive_reg_pair_conflict(struct ra_regs
*regs
,
300 unsigned int base_reg
, unsigned int reg0
, unsigned int reg1
)
302 ra_add_reg_conflict(regs
, reg0
, base_reg
);
303 ra_add_reg_conflict(regs
, reg1
, base_reg
);
305 util_dynarray_foreach(®s
->regs
[base_reg
].conflict_list
, unsigned int, i
) {
306 unsigned int conflict
= *i
;
307 if (conflict
!= reg1
)
308 ra_add_reg_conflict(regs
, reg0
, conflict
);
309 if (conflict
!= reg0
)
310 ra_add_reg_conflict(regs
, reg1
, conflict
);
315 * Makes every conflict on the given register transitive. In other words,
316 * every register that conflicts with r will now conflict with every other
317 * register conflicting with r.
319 * This can simplify code for setting up multiple register classes
320 * which are aggregates of some base hardware registers, compared to
321 * explicitly using ra_add_reg_conflict.
324 ra_make_reg_conflicts_transitive(struct ra_regs
*regs
, unsigned int r
)
326 struct ra_reg
*reg
= ®s
->regs
[r
];
329 BITSET_FOREACH_SET(c
, reg
->conflicts
, regs
->count
) {
330 struct ra_reg
*other
= ®s
->regs
[c
];
332 for (i
= 0; i
< BITSET_WORDS(regs
->count
); i
++)
333 other
->conflicts
[i
] |= reg
->conflicts
[i
];
338 ra_alloc_reg_class(struct ra_regs
*regs
)
340 struct ra_class
*class;
342 regs
->classes
= reralloc(regs
->regs
, regs
->classes
, struct ra_class
*,
343 regs
->class_count
+ 1);
345 class = rzalloc(regs
, struct ra_class
);
346 regs
->classes
[regs
->class_count
] = class;
348 class->regs
= rzalloc_array(class, BITSET_WORD
, BITSET_WORDS(regs
->count
));
350 return regs
->class_count
++;
354 ra_class_add_reg(struct ra_regs
*regs
, unsigned int c
, unsigned int r
)
356 struct ra_class
*class = regs
->classes
[c
];
358 assert(r
< regs
->count
);
360 BITSET_SET(class->regs
, r
);
365 * Returns true if the register belongs to the given class.
368 reg_belongs_to_class(unsigned int r
, struct ra_class
*c
)
370 return BITSET_TEST(c
->regs
, r
);
374 * Must be called after all conflicts and register classes have been
375 * set up and before the register set is used for allocation.
376 * To avoid costly q value computation, use the q_values paramater
377 * to pass precomputed q values to this function.
380 ra_set_finalize(struct ra_regs
*regs
, unsigned int **q_values
)
384 for (b
= 0; b
< regs
->class_count
; b
++) {
385 regs
->classes
[b
]->q
= ralloc_array(regs
, unsigned int, regs
->class_count
);
389 for (b
= 0; b
< regs
->class_count
; b
++) {
390 for (c
= 0; c
< regs
->class_count
; c
++) {
391 regs
->classes
[b
]->q
[c
] = q_values
[b
][c
];
395 /* Compute, for each class B and C, how many regs of B an
396 * allocation to C could conflict with.
398 for (b
= 0; b
< regs
->class_count
; b
++) {
399 for (c
= 0; c
< regs
->class_count
; c
++) {
401 int max_conflicts
= 0;
403 for (rc
= 0; rc
< regs
->count
; rc
++) {
406 if (!reg_belongs_to_class(rc
, regs
->classes
[c
]))
409 util_dynarray_foreach(®s
->regs
[rc
].conflict_list
,
411 unsigned int rb
= *rbp
;
412 if (reg_belongs_to_class(rb
, regs
->classes
[b
]))
415 max_conflicts
= MAX2(max_conflicts
, conflicts
);
417 regs
->classes
[b
]->q
[c
] = max_conflicts
;
422 for (b
= 0; b
< regs
->count
; b
++) {
423 util_dynarray_fini(®s
->regs
[b
].conflict_list
);
428 ra_add_node_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
430 BITSET_SET(g
->nodes
[n1
].adjacency
, n2
);
434 int n1_class
= g
->nodes
[n1
].class;
435 int n2_class
= g
->nodes
[n2
].class;
436 g
->nodes
[n1
].q_total
+= g
->regs
->classes
[n1_class
]->q
[n2_class
];
438 util_dynarray_append(&g
->nodes
[n1
].adjacency_list
, unsigned int, n2
);
442 ra_node_remove_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
444 BITSET_CLEAR(g
->nodes
[n1
].adjacency
, n2
);
448 int n1_class
= g
->nodes
[n1
].class;
449 int n2_class
= g
->nodes
[n2
].class;
450 g
->nodes
[n1
].q_total
-= g
->regs
->classes
[n1_class
]->q
[n2_class
];
452 util_dynarray_delete_unordered(&g
->nodes
[n1
].adjacency_list
, unsigned int,
457 ra_realloc_interference_graph(struct ra_graph
*g
, unsigned int alloc
)
459 if (alloc
<= g
->alloc
)
462 /* If we always have a whole number of BITSET_WORDs, it makes it much
463 * easier to memset the top of the growing bitsets.
465 assert(g
->alloc
% BITSET_WORDBITS
== 0);
466 alloc
= align64(alloc
, BITSET_WORDBITS
);
468 g
->nodes
= reralloc(g
, g
->nodes
, struct ra_node
, alloc
);
470 unsigned g_bitset_count
= BITSET_WORDS(g
->alloc
);
471 unsigned bitset_count
= BITSET_WORDS(alloc
);
472 /* For nodes already in the graph, we just have to grow the adjacency set */
473 for (unsigned i
= 0; i
< g
->alloc
; i
++) {
474 assert(g
->nodes
[i
].adjacency
!= NULL
);
475 g
->nodes
[i
].adjacency
= rerzalloc(g
, g
->nodes
[i
].adjacency
, BITSET_WORD
,
476 g_bitset_count
, bitset_count
);
479 /* For new nodes, we have to fully initialize them */
480 for (unsigned i
= g
->alloc
; i
< alloc
; i
++) {
481 memset(&g
->nodes
[i
], 0, sizeof(g
->nodes
[i
]));
482 g
->nodes
[i
].adjacency
= rzalloc_array(g
, BITSET_WORD
, bitset_count
);
483 util_dynarray_init(&g
->nodes
[i
].adjacency_list
, g
);
484 g
->nodes
[i
].q_total
= 0;
486 g
->nodes
[i
].forced_reg
= NO_REG
;
487 g
->nodes
[i
].reg
= NO_REG
;
490 /* These are scratch values and don't need to be zeroed. We'll clear them
491 * as part of ra_select() setup.
493 g
->tmp
.stack
= reralloc(g
, g
->tmp
.stack
, unsigned int, alloc
);
494 g
->tmp
.in_stack
= reralloc(g
, g
->tmp
.in_stack
, BITSET_WORD
, bitset_count
);
496 g
->tmp
.reg_assigned
= reralloc(g
, g
->tmp
.reg_assigned
, BITSET_WORD
,
498 g
->tmp
.pq_test
= reralloc(g
, g
->tmp
.pq_test
, BITSET_WORD
, bitset_count
);
499 g
->tmp
.min_q_total
= reralloc(g
, g
->tmp
.min_q_total
, unsigned int,
501 g
->tmp
.min_q_node
= reralloc(g
, g
->tmp
.min_q_node
, unsigned int,
508 ra_alloc_interference_graph(struct ra_regs
*regs
, unsigned int count
)
512 g
= rzalloc(NULL
, struct ra_graph
);
515 ra_realloc_interference_graph(g
, count
);
521 ra_resize_interference_graph(struct ra_graph
*g
, unsigned int count
)
524 if (count
> g
->alloc
)
525 ra_realloc_interference_graph(g
, g
->alloc
* 2);
528 void ra_set_select_reg_callback(struct ra_graph
*g
,
529 ra_select_reg_callback callback
,
532 g
->select_reg_callback
= callback
;
533 g
->select_reg_callback_data
= data
;
537 ra_set_node_class(struct ra_graph
*g
,
538 unsigned int n
, unsigned int class)
540 g
->nodes
[n
].class = class;
544 ra_get_node_class(struct ra_graph
*g
,
547 return g
->nodes
[n
].class;
551 ra_add_node(struct ra_graph
*g
, unsigned int class)
553 unsigned int n
= g
->count
;
554 ra_resize_interference_graph(g
, g
->count
+ 1);
556 ra_set_node_class(g
, n
, class);
562 ra_add_node_interference(struct ra_graph
*g
,
563 unsigned int n1
, unsigned int n2
)
565 assert(n1
< g
->count
&& n2
< g
->count
);
566 if (n1
!= n2
&& !BITSET_TEST(g
->nodes
[n1
].adjacency
, n2
)) {
567 ra_add_node_adjacency(g
, n1
, n2
);
568 ra_add_node_adjacency(g
, n2
, n1
);
573 ra_reset_node_interference(struct ra_graph
*g
, unsigned int n
)
575 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
576 ra_node_remove_adjacency(g
, *n2p
, n
);
579 memset(g
->nodes
[n
].adjacency
, 0,
580 BITSET_WORDS(g
->count
) * sizeof(BITSET_WORD
));
581 util_dynarray_clear(&g
->nodes
[n
].adjacency_list
);
585 update_pq_info(struct ra_graph
*g
, unsigned int n
)
587 int i
= n
/ BITSET_WORDBITS
;
588 int n_class
= g
->nodes
[n
].class;
589 if (g
->nodes
[n
].tmp
.q_total
< g
->regs
->classes
[n_class
]->p
) {
590 BITSET_SET(g
->tmp
.pq_test
, n
);
591 } else if (g
->tmp
.min_q_total
[i
] != UINT_MAX
) {
592 /* Only update min_q_total and min_q_node if min_q_total != UINT_MAX so
593 * that we don't update while we have stale data and accidentally mark
594 * it as non-stale. Also, in order to remain consistent with the old
595 * naive implementation of the algorithm, we do a lexicographical sort
596 * to ensure that we always choose the node with the highest node index.
598 if (g
->nodes
[n
].tmp
.q_total
< g
->tmp
.min_q_total
[i
] ||
599 (g
->nodes
[n
].tmp
.q_total
== g
->tmp
.min_q_total
[i
] &&
600 n
> g
->tmp
.min_q_node
[i
])) {
601 g
->tmp
.min_q_total
[i
] = g
->nodes
[n
].tmp
.q_total
;
602 g
->tmp
.min_q_node
[i
] = n
;
608 add_node_to_stack(struct ra_graph
*g
, unsigned int n
)
610 int n_class
= g
->nodes
[n
].class;
612 assert(!BITSET_TEST(g
->tmp
.in_stack
, n
));
614 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
615 unsigned int n2
= *n2p
;
616 unsigned int n2_class
= g
->nodes
[n2
].class;
618 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
) &&
619 !BITSET_TEST(g
->tmp
.reg_assigned
, n2
)) {
620 assert(g
->nodes
[n2
].tmp
.q_total
>= g
->regs
->classes
[n2_class
]->q
[n_class
]);
621 g
->nodes
[n2
].tmp
.q_total
-= g
->regs
->classes
[n2_class
]->q
[n_class
];
622 update_pq_info(g
, n2
);
626 g
->tmp
.stack
[g
->tmp
.stack_count
] = n
;
627 g
->tmp
.stack_count
++;
628 BITSET_SET(g
->tmp
.in_stack
, n
);
630 /* Flag the min_q_total for n's block as dirty so it gets recalculated */
631 g
->tmp
.min_q_total
[n
/ BITSET_WORDBITS
] = UINT_MAX
;
635 * Simplifies the interference graph by pushing all
636 * trivially-colorable nodes into a stack of nodes to be colored,
637 * removing them from the graph, and rinsing and repeating.
639 * If we encounter a case where we can't push any nodes on the stack, then
640 * we optimistically choose a node and push it on the stack. We heuristically
641 * push the node with the lowest total q value, since it has the fewest
642 * neighbors and therefore is most likely to be allocated.
645 ra_simplify(struct ra_graph
*g
)
647 bool progress
= true;
648 unsigned int stack_optimistic_start
= UINT_MAX
;
650 /* Figure out the high bit and bit mask for the first iteration of a loop
653 const unsigned int top_word_high_bit
= (g
->count
- 1) % BITSET_WORDBITS
;
655 /* Do a quick pre-pass to set things up */
656 g
->tmp
.stack_count
= 0;
657 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
658 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
659 g
->tmp
.in_stack
[i
] = 0;
660 g
->tmp
.reg_assigned
[i
] = 0;
661 g
->tmp
.pq_test
[i
] = 0;
662 g
->tmp
.min_q_total
[i
] = UINT_MAX
;
663 g
->tmp
.min_q_node
[i
] = UINT_MAX
;
664 for (int j
= high_bit
; j
>= 0; j
--) {
665 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
666 g
->nodes
[n
].reg
= g
->nodes
[n
].forced_reg
;
667 g
->nodes
[n
].tmp
.q_total
= g
->nodes
[n
].q_total
;
668 if (g
->nodes
[n
].reg
!= NO_REG
)
669 g
->tmp
.reg_assigned
[i
] |= BITSET_BIT(j
);
670 update_pq_info(g
, n
);
675 unsigned int min_q_total
= UINT_MAX
;
676 unsigned int min_q_node
= UINT_MAX
;
680 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
681 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
682 BITSET_WORD mask
= ~(BITSET_WORD
)0 >> (31 - high_bit
);
684 BITSET_WORD skip
= g
->tmp
.in_stack
[i
] | g
->tmp
.reg_assigned
[i
];
688 BITSET_WORD pq
= g
->tmp
.pq_test
[i
] & ~skip
;
690 /* In this case, we have stuff we can immediately take off the
691 * stack. This also means that we're guaranteed to make progress
692 * and we don't need to bother updating lowest_q_total because we
693 * know we're going to loop again before attempting to do anything
696 for (int j
= high_bit
; j
>= 0; j
--) {
697 if (pq
& BITSET_BIT(j
)) {
698 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
699 assert(n
< g
->count
);
700 add_node_to_stack(g
, n
);
701 /* add_node_to_stack() may update pq_test for this word so
702 * we need to update our local copy.
704 pq
= g
->tmp
.pq_test
[i
] & ~skip
;
708 } else if (!progress
) {
709 if (g
->tmp
.min_q_total
[i
] == UINT_MAX
) {
710 /* The min_q_total and min_q_node are dirty because we added
711 * one of these nodes to the stack. It needs to be
714 for (int j
= high_bit
; j
>= 0; j
--) {
715 if (skip
& BITSET_BIT(j
))
718 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
719 assert(n
< g
->count
);
720 if (g
->nodes
[n
].tmp
.q_total
< g
->tmp
.min_q_total
[i
]) {
721 g
->tmp
.min_q_total
[i
] = g
->nodes
[n
].tmp
.q_total
;
722 g
->tmp
.min_q_node
[i
] = n
;
726 if (g
->tmp
.min_q_total
[i
] < min_q_total
) {
727 min_q_node
= g
->tmp
.min_q_node
[i
];
728 min_q_total
= g
->tmp
.min_q_total
[i
];
733 if (!progress
&& min_q_total
!= UINT_MAX
) {
734 if (stack_optimistic_start
== UINT_MAX
)
735 stack_optimistic_start
= g
->tmp
.stack_count
;
737 add_node_to_stack(g
, min_q_node
);
742 g
->tmp
.stack_optimistic_start
= stack_optimistic_start
;
746 ra_any_neighbors_conflict(struct ra_graph
*g
, unsigned int n
, unsigned int r
)
748 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
749 unsigned int n2
= *n2p
;
751 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
) &&
752 BITSET_TEST(g
->regs
->regs
[r
].conflicts
, g
->nodes
[n2
].reg
)) {
760 /* Computes a bitfield of what regs are available for a given register
763 * This lets drivers implement a more complicated policy than our simple first
764 * or round robin policies (which don't require knowing the whole bitset)
767 ra_compute_available_regs(struct ra_graph
*g
, unsigned int n
, BITSET_WORD
*regs
)
769 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
771 /* Populate with the set of regs that are in the node's class. */
772 memcpy(regs
, c
->regs
, BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
774 /* Remove any regs that conflict with nodes that we're adjacent to and have
777 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
778 unsigned int n2
= *n2p
;
779 unsigned int r
= g
->nodes
[n2
].reg
;
781 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
)) {
782 for (int j
= 0; j
< BITSET_WORDS(g
->regs
->count
); j
++)
783 regs
[j
] &= ~g
->regs
->regs
[r
].conflicts
[j
];
787 for (int i
= 0; i
< BITSET_WORDS(g
->regs
->count
); i
++) {
796 * Pops nodes from the stack back into the graph, coloring them with
797 * registers as they go.
799 * If all nodes were trivially colorable, then this must succeed. If
800 * not (optimistic coloring), then it may return false;
803 ra_select(struct ra_graph
*g
)
805 int start_search_reg
= 0;
806 BITSET_WORD
*select_regs
= NULL
;
808 if (g
->select_reg_callback
)
809 select_regs
= malloc(BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
811 while (g
->tmp
.stack_count
!= 0) {
814 int n
= g
->tmp
.stack
[g
->tmp
.stack_count
- 1];
815 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
817 /* set this to false even if we return here so that
818 * ra_get_best_spill_node() considers this node later.
820 BITSET_CLEAR(g
->tmp
.in_stack
, n
);
822 if (g
->select_reg_callback
) {
823 if (!ra_compute_available_regs(g
, n
, select_regs
)) {
828 r
= g
->select_reg_callback(n
, select_regs
, g
->select_reg_callback_data
);
829 assert(r
< g
->regs
->count
);
831 /* Find the lowest-numbered reg which is not used by a member
832 * of the graph adjacent to us.
834 for (ri
= 0; ri
< g
->regs
->count
; ri
++) {
835 r
= (start_search_reg
+ ri
) % g
->regs
->count
;
836 if (!reg_belongs_to_class(r
, c
))
839 if (!ra_any_neighbors_conflict(g
, n
, r
))
843 if (ri
>= g
->regs
->count
)
848 g
->tmp
.stack_count
--;
850 /* Rotate the starting point except for any nodes above the lowest
851 * optimistically colorable node. The likelihood that we will succeed
852 * at allocating optimistically colorable nodes is highly dependent on
853 * the way that the previous nodes popped off the stack are laid out.
854 * The round-robin strategy increases the fragmentation of the register
855 * file and decreases the number of nearby nodes assigned to the same
856 * color, what increases the likelihood of spilling with respect to the
857 * dense packing strategy.
859 if (g
->regs
->round_robin
&&
860 g
->tmp
.stack_count
- 1 <= g
->tmp
.stack_optimistic_start
)
861 start_search_reg
= r
+ 1;
870 ra_allocate(struct ra_graph
*g
)
877 ra_get_node_reg(struct ra_graph
*g
, unsigned int n
)
879 if (g
->nodes
[n
].forced_reg
!= NO_REG
)
880 return g
->nodes
[n
].forced_reg
;
882 return g
->nodes
[n
].reg
;
886 * Forces a node to a specific register. This can be used to avoid
887 * creating a register class containing one node when handling data
888 * that must live in a fixed location and is known to not conflict
889 * with other forced register assignment (as is common with shader
890 * input data). These nodes do not end up in the stack during
891 * ra_simplify(), and thus at ra_select() time it is as if they were
892 * the first popped off the stack and assigned their fixed locations.
893 * Nodes that use this function do not need to be assigned a register
896 * Must be called before ra_simplify().
899 ra_set_node_reg(struct ra_graph
*g
, unsigned int n
, unsigned int reg
)
901 g
->nodes
[n
].forced_reg
= reg
;
905 ra_get_spill_benefit(struct ra_graph
*g
, unsigned int n
)
908 int n_class
= g
->nodes
[n
].class;
910 /* Define the benefit of eliminating an interference between n, n2
911 * through spilling as q(C, B) / p(C). This is similar to the
912 * "count number of edges" approach of traditional graph coloring,
913 * but takes classes into account.
915 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
916 unsigned int n2
= *n2p
;
917 unsigned int n2_class
= g
->nodes
[n2
].class;
918 benefit
+= ((float)g
->regs
->classes
[n_class
]->q
[n2_class
] /
919 g
->regs
->classes
[n_class
]->p
);
926 * Returns a node number to be spilled according to the cost/benefit using
927 * the pq test, or -1 if there are no spillable nodes.
930 ra_get_best_spill_node(struct ra_graph
*g
)
932 unsigned int best_node
= -1;
933 float best_benefit
= 0.0;
936 /* Consider any nodes that we colored successfully or the node we failed to
937 * color for spilling. When we failed to color a node in ra_select(), we
938 * only considered these nodes, so spilling any other ones would not result
939 * in us making progress.
941 for (n
= 0; n
< g
->count
; n
++) {
942 float cost
= g
->nodes
[n
].spill_cost
;
948 if (BITSET_TEST(g
->tmp
.in_stack
, n
))
951 benefit
= ra_get_spill_benefit(g
, n
);
953 if (benefit
/ cost
> best_benefit
) {
954 best_benefit
= benefit
/ cost
;
963 * Only nodes with a spill cost set (cost != 0.0) will be considered
964 * for register spilling.
967 ra_set_node_spill_cost(struct ra_graph
*g
, unsigned int n
, float cost
)
969 g
->nodes
[n
].spill_cost
= cost
;