2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 /** @file register_allocate.c
30 * Graph-coloring register allocator.
32 * The basic idea of graph coloring is to make a node in a graph for
33 * every thing that needs a register (color) number assigned, and make
34 * edges in the graph between nodes that interfere (can't be allocated
35 * to the same register at the same time).
37 * During the "simplify" process, any any node with fewer edges than
38 * there are registers means that that edge can get assigned a
39 * register regardless of what its neighbors choose, so that node is
40 * pushed on a stack and removed (with its edges) from the graph.
41 * That likely causes other nodes to become trivially colorable as well.
43 * Then during the "select" process, nodes are popped off of that
44 * stack, their edges restored, and assigned a color different from
45 * their neighbors. Because they were pushed on the stack only when
46 * they were trivially colorable, any color chosen won't interfere
47 * with the registers to be popped later.
49 * The downside to most graph coloring is that real hardware often has
50 * limitations, like registers that need to be allocated to a node in
51 * pairs, or aligned on some boundary. This implementation follows
52 * the paper "Retargetable Graph-Coloring Register Allocation for
53 * Irregular Architectures" by Johan Runeson and Sven-Olof Nyström.
55 * In this system, there are register classes each containing various
56 * registers, and registers may interfere with other registers. For
57 * example, one might have a class of base registers, and a class of
58 * aligned register pairs that would each interfere with their pair of
59 * the base registers. Each node has a register class it needs to be
60 * assigned to. Define p(B) to be the size of register class B, and
61 * q(B,C) to be the number of registers in B that the worst choice
62 * register in C could conflict with. Then, this system replaces the
63 * basic graph coloring test of "fewer edges from this node than there
64 * are registers" with "For this node of class B, the sum of q(B,C)
65 * for each neighbor node of class C is less than pB".
67 * A nice feature of the pq test is that q(B,C) can be computed once
68 * up front and stored in a 2-dimensional array, so that the cost of
69 * coloring a node is constant with the number of registers. We do
70 * this during ra_set_finalize().
76 #include "main/imports.h"
77 #include "main/macros.h"
78 #include "util/bitset.h"
79 #include "register_allocate.h"
84 BITSET_WORD
*conflicts
;
85 unsigned int *conflict_list
;
86 unsigned int conflict_list_size
;
87 unsigned int num_conflicts
;
94 struct ra_class
**classes
;
95 unsigned int class_count
;
102 * Bitset indicating which registers belong to this class.
104 * (If bit N is set, then register N belongs to this class.)
109 * p(B) in Runeson/Nyström paper.
111 * This is "how many regs are in the set."
116 * q(B,C) (indexed by C, B is this register class) in
117 * Runeson/Nyström paper. This is "how many registers of B could
118 * the worst choice register from C conflict with".
126 * List of which nodes this node interferes with. This should be
127 * symmetric with the other node.
129 BITSET_WORD
*adjacency
;
130 unsigned int *adjacency_list
;
131 unsigned int adjacency_list_size
;
132 unsigned int adjacency_count
;
137 /* Client-assigned register, if assigned, or NO_REG. */
138 unsigned int forced_reg
;
140 /* Register, if assigned, or NO_REG. */
144 * The q total, as defined in the Runeson/Nyström paper, for all the
145 * interfering nodes not in the stack.
147 unsigned int q_total
;
149 /* For an implementation that needs register spilling, this is the
150 * approximate cost of spilling this node.
154 /* Temporary data for the algorithm to scratch around in */
157 * Temporary version of q_total which we decrement as things are placed
160 unsigned int q_total
;
165 struct ra_regs
*regs
;
167 * the variables that need register allocation.
169 struct ra_node
*nodes
;
170 unsigned int count
; /**< count of nodes. */
172 unsigned int alloc
; /**< count of nodes allocated. */
174 unsigned int (*select_reg_callback
)(struct ra_graph
*g
, BITSET_WORD
*regs
,
176 void *select_reg_callback_data
;
178 /* Temporary data for the algorithm to scratch around in */
181 unsigned int stack_count
;
183 /** Bit-set indicating, for each register, if it's in the stack */
184 BITSET_WORD
*in_stack
;
186 /** Bit-set indicating, for each register, if it pre-assigned */
187 BITSET_WORD
*reg_assigned
;
189 /** Bit-set indicating, for each register, the value of the pq test */
190 BITSET_WORD
*pq_test
;
192 /** For each BITSET_WORD, the minimum q value or ~0 if unknown */
193 unsigned int *min_q_total
;
196 * * For each BITSET_WORD, the node with the minimum q_total if
197 * min_q_total[i] != ~0.
199 unsigned int *min_q_node
;
202 * Tracks the start of the set of optimistically-colored registers in the
205 unsigned int stack_optimistic_start
;
210 * Creates a set of registers for the allocator.
212 * mem_ctx is a ralloc context for the allocator. The reg set may be freed
213 * using ralloc_free().
216 ra_alloc_reg_set(void *mem_ctx
, unsigned int count
, bool need_conflict_lists
)
219 struct ra_regs
*regs
;
221 regs
= rzalloc(mem_ctx
, struct ra_regs
);
223 regs
->regs
= rzalloc_array(regs
, struct ra_reg
, count
);
225 for (i
= 0; i
< count
; i
++) {
226 regs
->regs
[i
].conflicts
= rzalloc_array(regs
->regs
, BITSET_WORD
,
227 BITSET_WORDS(count
));
228 BITSET_SET(regs
->regs
[i
].conflicts
, i
);
230 if (need_conflict_lists
) {
231 regs
->regs
[i
].conflict_list
= ralloc_array(regs
->regs
,
233 regs
->regs
[i
].conflict_list_size
= 4;
234 regs
->regs
[i
].conflict_list
[0] = i
;
236 regs
->regs
[i
].conflict_list
= NULL
;
237 regs
->regs
[i
].conflict_list_size
= 0;
239 regs
->regs
[i
].num_conflicts
= 1;
246 * The register allocator by default prefers to allocate low register numbers,
247 * since it was written for hardware (gen4/5 Intel) that is limited in its
248 * multithreadedness by the number of registers used in a given shader.
250 * However, for hardware without that restriction, densely packed register
251 * allocation can put serious constraints on instruction scheduling. This
252 * function tells the allocator to rotate around the registers if possible as
253 * it allocates the nodes.
256 ra_set_allocate_round_robin(struct ra_regs
*regs
)
258 regs
->round_robin
= true;
262 ra_add_conflict_list(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
264 struct ra_reg
*reg1
= ®s
->regs
[r1
];
266 if (reg1
->conflict_list
) {
267 if (reg1
->conflict_list_size
== reg1
->num_conflicts
) {
268 reg1
->conflict_list_size
*= 2;
269 reg1
->conflict_list
= reralloc(regs
->regs
, reg1
->conflict_list
,
270 unsigned int, reg1
->conflict_list_size
);
272 reg1
->conflict_list
[reg1
->num_conflicts
++] = r2
;
274 BITSET_SET(reg1
->conflicts
, r2
);
278 ra_add_reg_conflict(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
280 if (!BITSET_TEST(regs
->regs
[r1
].conflicts
, r2
)) {
281 ra_add_conflict_list(regs
, r1
, r2
);
282 ra_add_conflict_list(regs
, r2
, r1
);
287 * Adds a conflict between base_reg and reg, and also between reg and
288 * anything that base_reg conflicts with.
290 * This can simplify code for setting up multiple register classes
291 * which are aggregates of some base hardware registers, compared to
292 * explicitly using ra_add_reg_conflict.
295 ra_add_transitive_reg_conflict(struct ra_regs
*regs
,
296 unsigned int base_reg
, unsigned int reg
)
300 ra_add_reg_conflict(regs
, reg
, base_reg
);
302 for (i
= 0; i
< regs
->regs
[base_reg
].num_conflicts
; i
++) {
303 ra_add_reg_conflict(regs
, reg
, regs
->regs
[base_reg
].conflict_list
[i
]);
308 * Set up conflicts between base_reg and it's two half registers reg0 and
309 * reg1, but take care to not add conflicts between reg0 and reg1.
311 * This is useful for architectures where full size registers are aliased by
312 * two half size registers (eg 32 bit float and 16 bit float registers).
315 ra_add_transitive_reg_pair_conflict(struct ra_regs
*regs
,
316 unsigned int base_reg
, unsigned int reg0
, unsigned int reg1
)
320 ra_add_reg_conflict(regs
, reg0
, base_reg
);
321 ra_add_reg_conflict(regs
, reg1
, base_reg
);
323 for (i
= 0; i
< regs
->regs
[base_reg
].num_conflicts
; i
++) {
324 unsigned int conflict
= regs
->regs
[base_reg
].conflict_list
[i
];
325 if (conflict
!= reg1
)
326 ra_add_reg_conflict(regs
, reg0
, regs
->regs
[base_reg
].conflict_list
[i
]);
327 if (conflict
!= reg0
)
328 ra_add_reg_conflict(regs
, reg1
, regs
->regs
[base_reg
].conflict_list
[i
]);
333 * Makes every conflict on the given register transitive. In other words,
334 * every register that conflicts with r will now conflict with every other
335 * register conflicting with r.
337 * This can simplify code for setting up multiple register classes
338 * which are aggregates of some base hardware registers, compared to
339 * explicitly using ra_add_reg_conflict.
342 ra_make_reg_conflicts_transitive(struct ra_regs
*regs
, unsigned int r
)
344 struct ra_reg
*reg
= ®s
->regs
[r
];
348 BITSET_FOREACH_SET(c
, tmp
, reg
->conflicts
, regs
->count
) {
349 struct ra_reg
*other
= ®s
->regs
[c
];
351 for (i
= 0; i
< BITSET_WORDS(regs
->count
); i
++)
352 other
->conflicts
[i
] |= reg
->conflicts
[i
];
357 ra_alloc_reg_class(struct ra_regs
*regs
)
359 struct ra_class
*class;
361 regs
->classes
= reralloc(regs
->regs
, regs
->classes
, struct ra_class
*,
362 regs
->class_count
+ 1);
364 class = rzalloc(regs
, struct ra_class
);
365 regs
->classes
[regs
->class_count
] = class;
367 class->regs
= rzalloc_array(class, BITSET_WORD
, BITSET_WORDS(regs
->count
));
369 return regs
->class_count
++;
373 ra_class_add_reg(struct ra_regs
*regs
, unsigned int c
, unsigned int r
)
375 struct ra_class
*class = regs
->classes
[c
];
377 BITSET_SET(class->regs
, r
);
382 * Returns true if the register belongs to the given class.
385 reg_belongs_to_class(unsigned int r
, struct ra_class
*c
)
387 return BITSET_TEST(c
->regs
, r
);
391 * Must be called after all conflicts and register classes have been
392 * set up and before the register set is used for allocation.
393 * To avoid costly q value computation, use the q_values paramater
394 * to pass precomputed q values to this function.
397 ra_set_finalize(struct ra_regs
*regs
, unsigned int **q_values
)
401 for (b
= 0; b
< regs
->class_count
; b
++) {
402 regs
->classes
[b
]->q
= ralloc_array(regs
, unsigned int, regs
->class_count
);
406 for (b
= 0; b
< regs
->class_count
; b
++) {
407 for (c
= 0; c
< regs
->class_count
; c
++) {
408 regs
->classes
[b
]->q
[c
] = q_values
[b
][c
];
412 /* Compute, for each class B and C, how many regs of B an
413 * allocation to C could conflict with.
415 for (b
= 0; b
< regs
->class_count
; b
++) {
416 for (c
= 0; c
< regs
->class_count
; c
++) {
418 int max_conflicts
= 0;
420 for (rc
= 0; rc
< regs
->count
; rc
++) {
424 if (!reg_belongs_to_class(rc
, regs
->classes
[c
]))
427 for (i
= 0; i
< regs
->regs
[rc
].num_conflicts
; i
++) {
428 unsigned int rb
= regs
->regs
[rc
].conflict_list
[i
];
429 if (reg_belongs_to_class(rb
, regs
->classes
[b
]))
432 max_conflicts
= MAX2(max_conflicts
, conflicts
);
434 regs
->classes
[b
]->q
[c
] = max_conflicts
;
439 for (b
= 0; b
< regs
->count
; b
++) {
440 ralloc_free(regs
->regs
[b
].conflict_list
);
441 regs
->regs
[b
].conflict_list
= NULL
;
446 ra_add_node_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
448 BITSET_SET(g
->nodes
[n1
].adjacency
, n2
);
452 int n1_class
= g
->nodes
[n1
].class;
453 int n2_class
= g
->nodes
[n2
].class;
454 g
->nodes
[n1
].q_total
+= g
->regs
->classes
[n1_class
]->q
[n2_class
];
456 if (g
->nodes
[n1
].adjacency_count
>=
457 g
->nodes
[n1
].adjacency_list_size
) {
458 g
->nodes
[n1
].adjacency_list_size
*= 2;
459 g
->nodes
[n1
].adjacency_list
= reralloc(g
, g
->nodes
[n1
].adjacency_list
,
461 g
->nodes
[n1
].adjacency_list_size
);
464 g
->nodes
[n1
].adjacency_list
[g
->nodes
[n1
].adjacency_count
] = n2
;
465 g
->nodes
[n1
].adjacency_count
++;
469 ra_node_remove_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
471 BITSET_CLEAR(g
->nodes
[n1
].adjacency
, n2
);
475 int n1_class
= g
->nodes
[n1
].class;
476 int n2_class
= g
->nodes
[n2
].class;
477 g
->nodes
[n1
].q_total
-= g
->regs
->classes
[n1_class
]->q
[n2_class
];
480 for (i
= 0; i
< g
->nodes
[n1
].adjacency_count
; i
++) {
481 if (g
->nodes
[n1
].adjacency_list
[i
] == n2
) {
482 memmove(&g
->nodes
[n1
].adjacency_list
[i
],
483 &g
->nodes
[n1
].adjacency_list
[i
+ 1],
484 (g
->nodes
[n1
].adjacency_count
- i
- 1) *
485 sizeof(g
->nodes
[n1
].adjacency_list
[0]));
489 assert(i
< g
->nodes
[n1
].adjacency_count
);
490 g
->nodes
[n1
].adjacency_count
--;
494 ra_realloc_interference_graph(struct ra_graph
*g
, unsigned int alloc
)
496 if (alloc
<= g
->alloc
)
499 /* If we always have a whole number of BITSET_WORDs, it makes it much
500 * easier to memset the top of the growing bitsets.
502 assert(g
->alloc
% BITSET_WORDBITS
== 0);
503 alloc
= ALIGN(alloc
, BITSET_WORDBITS
);
505 g
->nodes
= reralloc(g
, g
->nodes
, struct ra_node
, alloc
);
507 unsigned g_bitset_count
= BITSET_WORDS(g
->alloc
);
508 unsigned bitset_count
= BITSET_WORDS(alloc
);
509 /* For nodes already in the graph, we just have to grow the adjacency set */
510 for (unsigned i
= 0; i
< g
->alloc
; i
++) {
511 assert(g
->nodes
[i
].adjacency
!= NULL
);
512 g
->nodes
[i
].adjacency
= rerzalloc(g
, g
->nodes
[i
].adjacency
, BITSET_WORD
,
513 g_bitset_count
, bitset_count
);
516 /* For new nodes, we have to fully initialize them */
517 for (unsigned i
= g
->alloc
; i
< alloc
; i
++) {
518 memset(&g
->nodes
[i
], 0, sizeof(g
->nodes
[i
]));
519 g
->nodes
[i
].adjacency
= rzalloc_array(g
, BITSET_WORD
, bitset_count
);
520 g
->nodes
[i
].adjacency_list_size
= 4;
521 g
->nodes
[i
].adjacency_list
=
522 ralloc_array(g
, unsigned int, g
->nodes
[i
].adjacency_list_size
);
523 g
->nodes
[i
].adjacency_count
= 0;
524 g
->nodes
[i
].q_total
= 0;
526 g
->nodes
[i
].forced_reg
= NO_REG
;
527 g
->nodes
[i
].reg
= NO_REG
;
530 /* These are scratch values and don't need to be zeroed. We'll clear them
531 * as part of ra_select() setup.
533 g
->tmp
.stack
= reralloc(g
, g
->tmp
.stack
, unsigned int, alloc
);
534 g
->tmp
.in_stack
= reralloc(g
, g
->tmp
.in_stack
, BITSET_WORD
, bitset_count
);
536 g
->tmp
.reg_assigned
= reralloc(g
, g
->tmp
.reg_assigned
, BITSET_WORD
,
538 g
->tmp
.pq_test
= reralloc(g
, g
->tmp
.pq_test
, BITSET_WORD
, bitset_count
);
539 g
->tmp
.min_q_total
= reralloc(g
, g
->tmp
.min_q_total
, unsigned int,
541 g
->tmp
.min_q_node
= reralloc(g
, g
->tmp
.min_q_node
, unsigned int,
548 ra_alloc_interference_graph(struct ra_regs
*regs
, unsigned int count
)
552 g
= rzalloc(NULL
, struct ra_graph
);
555 ra_realloc_interference_graph(g
, count
);
561 ra_resize_interference_graph(struct ra_graph
*g
, unsigned int count
)
564 if (count
> g
->alloc
)
565 ra_realloc_interference_graph(g
, g
->alloc
* 2);
568 void ra_set_select_reg_callback(struct ra_graph
*g
,
569 unsigned int (*callback
)(struct ra_graph
*g
,
574 g
->select_reg_callback
= callback
;
575 g
->select_reg_callback_data
= data
;
579 ra_set_node_class(struct ra_graph
*g
,
580 unsigned int n
, unsigned int class)
582 g
->nodes
[n
].class = class;
586 ra_get_node_class(struct ra_graph
*g
,
589 return g
->nodes
[n
].class;
593 ra_add_node(struct ra_graph
*g
, unsigned int class)
595 unsigned int n
= g
->count
;
596 ra_resize_interference_graph(g
, g
->count
+ 1);
598 ra_set_node_class(g
, n
, class);
604 ra_add_node_interference(struct ra_graph
*g
,
605 unsigned int n1
, unsigned int n2
)
607 assert(n1
< g
->count
&& n2
< g
->count
);
608 if (n1
!= n2
&& !BITSET_TEST(g
->nodes
[n1
].adjacency
, n2
)) {
609 ra_add_node_adjacency(g
, n1
, n2
);
610 ra_add_node_adjacency(g
, n2
, n1
);
615 ra_reset_node_interference(struct ra_graph
*g
, unsigned int n
)
617 for (unsigned int i
= 0; i
< g
->nodes
[n
].adjacency_count
; i
++)
618 ra_node_remove_adjacency(g
, g
->nodes
[n
].adjacency_list
[i
], n
);
620 memset(g
->nodes
[n
].adjacency
, 0,
621 BITSET_WORDS(g
->count
) * sizeof(BITSET_WORD
));
622 g
->nodes
[n
].adjacency_count
= 0;
626 update_pq_info(struct ra_graph
*g
, unsigned int n
)
628 int i
= n
/ BITSET_WORDBITS
;
629 int n_class
= g
->nodes
[n
].class;
630 if (g
->nodes
[n
].tmp
.q_total
< g
->regs
->classes
[n_class
]->p
) {
631 BITSET_SET(g
->tmp
.pq_test
, n
);
632 } else if (g
->tmp
.min_q_total
[i
] != UINT_MAX
) {
633 /* Only update min_q_total and min_q_node if min_q_total != UINT_MAX so
634 * that we don't update while we have stale data and accidentally mark
635 * it as non-stale. Also, in order to remain consistent with the old
636 * naive implementation of the algorithm, we do a lexicographical sort
637 * to ensure that we always choose the node with the highest node index.
639 if (g
->nodes
[n
].tmp
.q_total
< g
->tmp
.min_q_total
[i
] ||
640 (g
->nodes
[n
].tmp
.q_total
== g
->tmp
.min_q_total
[i
] &&
641 n
> g
->tmp
.min_q_node
[i
])) {
642 g
->tmp
.min_q_total
[i
] = g
->nodes
[n
].tmp
.q_total
;
643 g
->tmp
.min_q_node
[i
] = n
;
649 add_node_to_stack(struct ra_graph
*g
, unsigned int n
)
652 int n_class
= g
->nodes
[n
].class;
654 assert(!BITSET_TEST(g
->tmp
.in_stack
, n
));
656 for (i
= 0; i
< g
->nodes
[n
].adjacency_count
; i
++) {
657 unsigned int n2
= g
->nodes
[n
].adjacency_list
[i
];
658 unsigned int n2_class
= g
->nodes
[n2
].class;
660 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
) &&
661 !BITSET_TEST(g
->tmp
.reg_assigned
, n2
)) {
662 assert(g
->nodes
[n2
].tmp
.q_total
>= g
->regs
->classes
[n2_class
]->q
[n_class
]);
663 g
->nodes
[n2
].tmp
.q_total
-= g
->regs
->classes
[n2_class
]->q
[n_class
];
664 update_pq_info(g
, n2
);
668 g
->tmp
.stack
[g
->tmp
.stack_count
] = n
;
669 g
->tmp
.stack_count
++;
670 BITSET_SET(g
->tmp
.in_stack
, n
);
672 /* Flag the min_q_total for n's block as dirty so it gets recalculated */
673 g
->tmp
.min_q_total
[n
/ BITSET_WORDBITS
] = UINT_MAX
;
677 * Simplifies the interference graph by pushing all
678 * trivially-colorable nodes into a stack of nodes to be colored,
679 * removing them from the graph, and rinsing and repeating.
681 * If we encounter a case where we can't push any nodes on the stack, then
682 * we optimistically choose a node and push it on the stack. We heuristically
683 * push the node with the lowest total q value, since it has the fewest
684 * neighbors and therefore is most likely to be allocated.
687 ra_simplify(struct ra_graph
*g
)
689 bool progress
= true;
690 unsigned int stack_optimistic_start
= UINT_MAX
;
692 /* Figure out the high bit and bit mask for the first iteration of a loop
695 const unsigned int top_word_high_bit
= (g
->count
- 1) % BITSET_WORDBITS
;
697 /* Do a quick pre-pass to set things up */
698 g
->tmp
.stack_count
= 0;
699 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
700 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
701 g
->tmp
.in_stack
[i
] = 0;
702 g
->tmp
.reg_assigned
[i
] = 0;
703 g
->tmp
.pq_test
[i
] = 0;
704 g
->tmp
.min_q_total
[i
] = UINT_MAX
;
705 g
->tmp
.min_q_node
[i
] = UINT_MAX
;
706 for (int j
= high_bit
; j
>= 0; j
--) {
707 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
708 g
->nodes
[n
].reg
= g
->nodes
[n
].forced_reg
;
709 g
->nodes
[n
].tmp
.q_total
= g
->nodes
[n
].q_total
;
710 if (g
->nodes
[n
].reg
!= NO_REG
)
711 g
->tmp
.reg_assigned
[i
] |= BITSET_BIT(j
);
712 update_pq_info(g
, n
);
717 unsigned int min_q_total
= UINT_MAX
;
718 unsigned int min_q_node
= UINT_MAX
;
722 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
723 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
724 BITSET_WORD mask
= ~(BITSET_WORD
)0 >> (31 - high_bit
);
726 BITSET_WORD skip
= g
->tmp
.in_stack
[i
] | g
->tmp
.reg_assigned
[i
];
730 BITSET_WORD pq
= g
->tmp
.pq_test
[i
] & ~skip
;
732 /* In this case, we have stuff we can immediately take off the
733 * stack. This also means that we're guaranteed to make progress
734 * and we don't need to bother updating lowest_q_total because we
735 * know we're going to loop again before attempting to do anything
738 for (int j
= high_bit
; j
>= 0; j
--) {
739 if (pq
& BITSET_BIT(j
)) {
740 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
741 assert(n
< g
->count
);
742 add_node_to_stack(g
, n
);
743 /* add_node_to_stack() may update pq_test for this word so
744 * we need to update our local copy.
746 pq
= g
->tmp
.pq_test
[i
] & ~skip
;
750 } else if (!progress
) {
751 if (g
->tmp
.min_q_total
[i
] == UINT_MAX
) {
752 /* The min_q_total and min_q_node are dirty because we added
753 * one of these nodes to the stack. It needs to be
756 for (int j
= high_bit
; j
>= 0; j
--) {
757 if (skip
& BITSET_BIT(j
))
760 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
761 assert(n
< g
->count
);
762 if (g
->nodes
[n
].tmp
.q_total
< g
->tmp
.min_q_total
[i
]) {
763 g
->tmp
.min_q_total
[i
] = g
->nodes
[n
].tmp
.q_total
;
764 g
->tmp
.min_q_node
[i
] = n
;
768 if (g
->tmp
.min_q_total
[i
] < min_q_total
) {
769 min_q_node
= g
->tmp
.min_q_node
[i
];
770 min_q_total
= g
->tmp
.min_q_total
[i
];
775 if (!progress
&& min_q_total
!= UINT_MAX
) {
776 if (stack_optimistic_start
== UINT_MAX
)
777 stack_optimistic_start
= g
->tmp
.stack_count
;
779 add_node_to_stack(g
, min_q_node
);
784 g
->tmp
.stack_optimistic_start
= stack_optimistic_start
;
788 ra_any_neighbors_conflict(struct ra_graph
*g
, unsigned int n
, unsigned int r
)
792 for (i
= 0; i
< g
->nodes
[n
].adjacency_count
; i
++) {
793 unsigned int n2
= g
->nodes
[n
].adjacency_list
[i
];
795 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
) &&
796 BITSET_TEST(g
->regs
->regs
[r
].conflicts
, g
->nodes
[n2
].reg
)) {
804 /* Computes a bitfield of what regs are available for a given register
807 * This lets drivers implement a more complicated policy than our simple first
808 * or round robin policies (which don't require knowing the whole bitset)
811 ra_compute_available_regs(struct ra_graph
*g
, unsigned int n
, BITSET_WORD
*regs
)
813 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
815 /* Populate with the set of regs that are in the node's class. */
816 memcpy(regs
, c
->regs
, BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
818 /* Remove any regs that conflict with nodes that we're adjacent to and have
821 for (int i
= 0; i
< g
->nodes
[n
].adjacency_count
; i
++) {
822 unsigned int n2
= g
->nodes
[n
].adjacency_list
[i
];
823 unsigned int r
= g
->nodes
[n2
].reg
;
825 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
)) {
826 for (int j
= 0; j
< BITSET_WORDS(g
->regs
->count
); j
++)
827 regs
[j
] &= ~g
->regs
->regs
[r
].conflicts
[j
];
831 for (int i
= 0; i
< BITSET_WORDS(g
->regs
->count
); i
++) {
840 * Pops nodes from the stack back into the graph, coloring them with
841 * registers as they go.
843 * If all nodes were trivially colorable, then this must succeed. If
844 * not (optimistic coloring), then it may return false;
847 ra_select(struct ra_graph
*g
)
849 int start_search_reg
= 0;
850 BITSET_WORD
*select_regs
= NULL
;
852 if (g
->select_reg_callback
)
853 select_regs
= malloc(BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
855 while (g
->tmp
.stack_count
!= 0) {
858 int n
= g
->tmp
.stack
[g
->tmp
.stack_count
- 1];
859 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
861 /* set this to false even if we return here so that
862 * ra_get_best_spill_node() considers this node later.
864 BITSET_CLEAR(g
->tmp
.in_stack
, n
);
866 if (g
->select_reg_callback
) {
867 if (!ra_compute_available_regs(g
, n
, select_regs
)) {
872 r
= g
->select_reg_callback(g
, select_regs
, g
->select_reg_callback_data
);
874 /* Find the lowest-numbered reg which is not used by a member
875 * of the graph adjacent to us.
877 for (ri
= 0; ri
< g
->regs
->count
; ri
++) {
878 r
= (start_search_reg
+ ri
) % g
->regs
->count
;
879 if (!reg_belongs_to_class(r
, c
))
882 if (!ra_any_neighbors_conflict(g
, n
, r
))
886 if (ri
>= g
->regs
->count
)
891 g
->tmp
.stack_count
--;
893 /* Rotate the starting point except for any nodes above the lowest
894 * optimistically colorable node. The likelihood that we will succeed
895 * at allocating optimistically colorable nodes is highly dependent on
896 * the way that the previous nodes popped off the stack are laid out.
897 * The round-robin strategy increases the fragmentation of the register
898 * file and decreases the number of nearby nodes assigned to the same
899 * color, what increases the likelihood of spilling with respect to the
900 * dense packing strategy.
902 if (g
->regs
->round_robin
&&
903 g
->tmp
.stack_count
- 1 <= g
->tmp
.stack_optimistic_start
)
904 start_search_reg
= r
+ 1;
913 ra_allocate(struct ra_graph
*g
)
920 ra_get_node_reg(struct ra_graph
*g
, unsigned int n
)
922 if (g
->nodes
[n
].forced_reg
!= NO_REG
)
923 return g
->nodes
[n
].forced_reg
;
925 return g
->nodes
[n
].reg
;
929 * Forces a node to a specific register. This can be used to avoid
930 * creating a register class containing one node when handling data
931 * that must live in a fixed location and is known to not conflict
932 * with other forced register assignment (as is common with shader
933 * input data). These nodes do not end up in the stack during
934 * ra_simplify(), and thus at ra_select() time it is as if they were
935 * the first popped off the stack and assigned their fixed locations.
936 * Nodes that use this function do not need to be assigned a register
939 * Must be called before ra_simplify().
942 ra_set_node_reg(struct ra_graph
*g
, unsigned int n
, unsigned int reg
)
944 g
->nodes
[n
].forced_reg
= reg
;
948 ra_get_spill_benefit(struct ra_graph
*g
, unsigned int n
)
952 int n_class
= g
->nodes
[n
].class;
954 /* Define the benefit of eliminating an interference between n, n2
955 * through spilling as q(C, B) / p(C). This is similar to the
956 * "count number of edges" approach of traditional graph coloring,
957 * but takes classes into account.
959 for (j
= 0; j
< g
->nodes
[n
].adjacency_count
; j
++) {
960 unsigned int n2
= g
->nodes
[n
].adjacency_list
[j
];
961 unsigned int n2_class
= g
->nodes
[n2
].class;
962 benefit
+= ((float)g
->regs
->classes
[n_class
]->q
[n2_class
] /
963 g
->regs
->classes
[n_class
]->p
);
970 * Returns a node number to be spilled according to the cost/benefit using
971 * the pq test, or -1 if there are no spillable nodes.
974 ra_get_best_spill_node(struct ra_graph
*g
)
976 unsigned int best_node
= -1;
977 float best_benefit
= 0.0;
980 /* Consider any nodes that we colored successfully or the node we failed to
981 * color for spilling. When we failed to color a node in ra_select(), we
982 * only considered these nodes, so spilling any other ones would not result
983 * in us making progress.
985 for (n
= 0; n
< g
->count
; n
++) {
986 float cost
= g
->nodes
[n
].spill_cost
;
992 if (BITSET_TEST(g
->tmp
.in_stack
, n
))
995 benefit
= ra_get_spill_benefit(g
, n
);
997 if (benefit
/ cost
> best_benefit
) {
998 best_benefit
= benefit
/ cost
;
1007 * Only nodes with a spill cost set (cost != 0.0) will be considered
1008 * for register spilling.
1011 ra_set_node_spill_cost(struct ra_graph
*g
, unsigned int n
, float cost
)
1013 g
->nodes
[n
].spill_cost
= cost
;