2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 /** @file register_allocate.c
30 * Graph-coloring register allocator.
32 * The basic idea of graph coloring is to make a node in a graph for
33 * every thing that needs a register (color) number assigned, and make
34 * edges in the graph between nodes that interfere (can't be allocated
35 * to the same register at the same time).
37 * During the "simplify" process, any any node with fewer edges than
38 * there are registers means that that edge can get assigned a
39 * register regardless of what its neighbors choose, so that node is
40 * pushed on a stack and removed (with its edges) from the graph.
41 * That likely causes other nodes to become trivially colorable as well.
43 * Then during the "select" process, nodes are popped off of that
44 * stack, their edges restored, and assigned a color different from
45 * their neighbors. Because they were pushed on the stack only when
46 * they were trivially colorable, any color chosen won't interfere
47 * with the registers to be popped later.
49 * The downside to most graph coloring is that real hardware often has
50 * limitations, like registers that need to be allocated to a node in
51 * pairs, or aligned on some boundary. This implementation follows
52 * the paper "Retargetable Graph-Coloring Register Allocation for
53 * Irregular Architectures" by Johan Runeson and Sven-Olof Nyström.
55 * In this system, there are register classes each containing various
56 * registers, and registers may interfere with other registers. For
57 * example, one might have a class of base registers, and a class of
58 * aligned register pairs that would each interfere with their pair of
59 * the base registers. Each node has a register class it needs to be
60 * assigned to. Define p(B) to be the size of register class B, and
61 * q(B,C) to be the number of registers in B that the worst choice
62 * register in C could conflict with. Then, this system replaces the
63 * basic graph coloring test of "fewer edges from this node than there
64 * are registers" with "For this node of class B, the sum of q(B,C)
65 * for each neighbor node of class C is less than pB".
67 * A nice feature of the pq test is that q(B,C) can be computed once
68 * up front and stored in a 2-dimensional array, so that the cost of
69 * coloring a node is constant with the number of registers. We do
70 * this during ra_set_finalize().
78 #include "main/macros.h"
79 #include "util/bitset.h"
80 #include "util/u_dynarray.h"
82 #include "register_allocate.h"
85 BITSET_WORD
*conflicts
;
86 struct util_dynarray conflict_list
;
93 struct ra_class
**classes
;
94 unsigned int class_count
;
101 * Bitset indicating which registers belong to this class.
103 * (If bit N is set, then register N belongs to this class.)
108 * p(B) in Runeson/Nyström paper.
110 * This is "how many regs are in the set."
115 * q(B,C) (indexed by C, B is this register class) in
116 * Runeson/Nyström paper. This is "how many registers of B could
117 * the worst choice register from C conflict with".
125 * List of which nodes this node interferes with. This should be
126 * symmetric with the other node.
128 BITSET_WORD
*adjacency
;
130 struct util_dynarray adjacency_list
;
135 /* Client-assigned register, if assigned, or NO_REG. */
136 unsigned int forced_reg
;
138 /* Register, if assigned, or NO_REG. */
142 * The q total, as defined in the Runeson/Nyström paper, for all the
143 * interfering nodes not in the stack.
145 unsigned int q_total
;
147 /* For an implementation that needs register spilling, this is the
148 * approximate cost of spilling this node.
152 /* Temporary data for the algorithm to scratch around in */
155 * Temporary version of q_total which we decrement as things are placed
158 unsigned int q_total
;
163 struct ra_regs
*regs
;
165 * the variables that need register allocation.
167 struct ra_node
*nodes
;
168 unsigned int count
; /**< count of nodes. */
170 unsigned int alloc
; /**< count of nodes allocated. */
172 ra_select_reg_callback select_reg_callback
;
173 void *select_reg_callback_data
;
175 /* Temporary data for the algorithm to scratch around in */
178 unsigned int stack_count
;
180 /** Bit-set indicating, for each register, if it's in the stack */
181 BITSET_WORD
*in_stack
;
183 /** Bit-set indicating, for each register, if it pre-assigned */
184 BITSET_WORD
*reg_assigned
;
186 /** Bit-set indicating, for each register, the value of the pq test */
187 BITSET_WORD
*pq_test
;
189 /** For each BITSET_WORD, the minimum q value or ~0 if unknown */
190 unsigned int *min_q_total
;
193 * * For each BITSET_WORD, the node with the minimum q_total if
194 * min_q_total[i] != ~0.
196 unsigned int *min_q_node
;
199 * Tracks the start of the set of optimistically-colored registers in the
202 unsigned int stack_optimistic_start
;
207 * Creates a set of registers for the allocator.
209 * mem_ctx is a ralloc context for the allocator. The reg set may be freed
210 * using ralloc_free().
213 ra_alloc_reg_set(void *mem_ctx
, unsigned int count
, bool need_conflict_lists
)
216 struct ra_regs
*regs
;
218 regs
= rzalloc(mem_ctx
, struct ra_regs
);
220 regs
->regs
= rzalloc_array(regs
, struct ra_reg
, count
);
222 for (i
= 0; i
< count
; i
++) {
223 regs
->regs
[i
].conflicts
= rzalloc_array(regs
->regs
, BITSET_WORD
,
224 BITSET_WORDS(count
));
225 BITSET_SET(regs
->regs
[i
].conflicts
, i
);
227 util_dynarray_init(®s
->regs
[i
].conflict_list
,
228 need_conflict_lists
? regs
->regs
: NULL
);
229 if (need_conflict_lists
)
230 util_dynarray_append(®s
->regs
[i
].conflict_list
, unsigned int, i
);
237 * The register allocator by default prefers to allocate low register numbers,
238 * since it was written for hardware (gen4/5 Intel) that is limited in its
239 * multithreadedness by the number of registers used in a given shader.
241 * However, for hardware without that restriction, densely packed register
242 * allocation can put serious constraints on instruction scheduling. This
243 * function tells the allocator to rotate around the registers if possible as
244 * it allocates the nodes.
247 ra_set_allocate_round_robin(struct ra_regs
*regs
)
249 regs
->round_robin
= true;
253 ra_add_conflict_list(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
255 struct ra_reg
*reg1
= ®s
->regs
[r1
];
257 if (reg1
->conflict_list
.mem_ctx
) {
258 util_dynarray_append(®1
->conflict_list
, unsigned int, r2
);
260 BITSET_SET(reg1
->conflicts
, r2
);
264 ra_add_reg_conflict(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
266 if (!BITSET_TEST(regs
->regs
[r1
].conflicts
, r2
)) {
267 ra_add_conflict_list(regs
, r1
, r2
);
268 ra_add_conflict_list(regs
, r2
, r1
);
273 * Adds a conflict between base_reg and reg, and also between reg and
274 * anything that base_reg conflicts with.
276 * This can simplify code for setting up multiple register classes
277 * which are aggregates of some base hardware registers, compared to
278 * explicitly using ra_add_reg_conflict.
281 ra_add_transitive_reg_conflict(struct ra_regs
*regs
,
282 unsigned int base_reg
, unsigned int reg
)
284 ra_add_reg_conflict(regs
, reg
, base_reg
);
286 util_dynarray_foreach(®s
->regs
[base_reg
].conflict_list
, unsigned int,
288 ra_add_reg_conflict(regs
, reg
, *r2p
);
293 * Set up conflicts between base_reg and it's two half registers reg0 and
294 * reg1, but take care to not add conflicts between reg0 and reg1.
296 * This is useful for architectures where full size registers are aliased by
297 * two half size registers (eg 32 bit float and 16 bit float registers).
300 ra_add_transitive_reg_pair_conflict(struct ra_regs
*regs
,
301 unsigned int base_reg
, unsigned int reg0
, unsigned int reg1
)
303 ra_add_reg_conflict(regs
, reg0
, base_reg
);
304 ra_add_reg_conflict(regs
, reg1
, base_reg
);
306 util_dynarray_foreach(®s
->regs
[base_reg
].conflict_list
, unsigned int, i
) {
307 unsigned int conflict
= *i
;
308 if (conflict
!= reg1
)
309 ra_add_reg_conflict(regs
, reg0
, conflict
);
310 if (conflict
!= reg0
)
311 ra_add_reg_conflict(regs
, reg1
, conflict
);
316 * Makes every conflict on the given register transitive. In other words,
317 * every register that conflicts with r will now conflict with every other
318 * register conflicting with r.
320 * This can simplify code for setting up multiple register classes
321 * which are aggregates of some base hardware registers, compared to
322 * explicitly using ra_add_reg_conflict.
325 ra_make_reg_conflicts_transitive(struct ra_regs
*regs
, unsigned int r
)
327 struct ra_reg
*reg
= ®s
->regs
[r
];
330 BITSET_FOREACH_SET(c
, reg
->conflicts
, regs
->count
) {
331 struct ra_reg
*other
= ®s
->regs
[c
];
333 for (i
= 0; i
< BITSET_WORDS(regs
->count
); i
++)
334 other
->conflicts
[i
] |= reg
->conflicts
[i
];
339 ra_alloc_reg_class(struct ra_regs
*regs
)
341 struct ra_class
*class;
343 regs
->classes
= reralloc(regs
->regs
, regs
->classes
, struct ra_class
*,
344 regs
->class_count
+ 1);
346 class = rzalloc(regs
, struct ra_class
);
347 regs
->classes
[regs
->class_count
] = class;
349 class->regs
= rzalloc_array(class, BITSET_WORD
, BITSET_WORDS(regs
->count
));
351 return regs
->class_count
++;
355 ra_class_add_reg(struct ra_regs
*regs
, unsigned int c
, unsigned int r
)
357 struct ra_class
*class = regs
->classes
[c
];
359 assert(r
< regs
->count
);
361 BITSET_SET(class->regs
, r
);
366 * Returns true if the register belongs to the given class.
369 reg_belongs_to_class(unsigned int r
, struct ra_class
*c
)
371 return BITSET_TEST(c
->regs
, r
);
375 * Must be called after all conflicts and register classes have been
376 * set up and before the register set is used for allocation.
377 * To avoid costly q value computation, use the q_values paramater
378 * to pass precomputed q values to this function.
381 ra_set_finalize(struct ra_regs
*regs
, unsigned int **q_values
)
385 for (b
= 0; b
< regs
->class_count
; b
++) {
386 regs
->classes
[b
]->q
= ralloc_array(regs
, unsigned int, regs
->class_count
);
390 for (b
= 0; b
< regs
->class_count
; b
++) {
391 for (c
= 0; c
< regs
->class_count
; c
++) {
392 regs
->classes
[b
]->q
[c
] = q_values
[b
][c
];
396 /* Compute, for each class B and C, how many regs of B an
397 * allocation to C could conflict with.
399 for (b
= 0; b
< regs
->class_count
; b
++) {
400 for (c
= 0; c
< regs
->class_count
; c
++) {
402 int max_conflicts
= 0;
404 BITSET_FOREACH_SET(rc
, regs
->classes
[c
]->regs
, regs
->count
) {
407 util_dynarray_foreach(®s
->regs
[rc
].conflict_list
,
409 unsigned int rb
= *rbp
;
410 if (reg_belongs_to_class(rb
, regs
->classes
[b
]))
413 max_conflicts
= MAX2(max_conflicts
, conflicts
);
415 regs
->classes
[b
]->q
[c
] = max_conflicts
;
420 for (b
= 0; b
< regs
->count
; b
++) {
421 util_dynarray_fini(®s
->regs
[b
].conflict_list
);
426 ra_set_serialize(const struct ra_regs
*regs
, struct blob
*blob
)
428 blob_write_uint32(blob
, regs
->count
);
429 blob_write_uint32(blob
, regs
->class_count
);
431 for (unsigned int r
= 0; r
< regs
->count
; r
++) {
432 struct ra_reg
*reg
= ®s
->regs
[r
];
433 blob_write_bytes(blob
, reg
->conflicts
, BITSET_WORDS(regs
->count
) *
434 sizeof(BITSET_WORD
));
435 assert(util_dynarray_num_elements(®
->conflict_list
, unsigned int) == 0);
438 for (unsigned int c
= 0; c
< regs
->class_count
; c
++) {
439 struct ra_class
*class = regs
->classes
[c
];
440 blob_write_bytes(blob
, class->regs
, BITSET_WORDS(regs
->count
) *
441 sizeof(BITSET_WORD
));
442 blob_write_uint32(blob
, class->p
);
443 blob_write_bytes(blob
, class->q
, regs
->class_count
* sizeof(*class->q
));
446 blob_write_uint32(blob
, regs
->round_robin
);
450 ra_set_deserialize(void *mem_ctx
, struct blob_reader
*blob
)
452 unsigned int reg_count
= blob_read_uint32(blob
);
453 unsigned int class_count
= blob_read_uint32(blob
);
455 struct ra_regs
*regs
= ra_alloc_reg_set(mem_ctx
, reg_count
, false);
456 assert(regs
->count
== reg_count
);
458 for (unsigned int r
= 0; r
< reg_count
; r
++) {
459 struct ra_reg
*reg
= ®s
->regs
[r
];
460 blob_copy_bytes(blob
, reg
->conflicts
, BITSET_WORDS(reg_count
) *
461 sizeof(BITSET_WORD
));
464 assert(regs
->classes
== NULL
);
465 regs
->classes
= ralloc_array(regs
->regs
, struct ra_class
*, class_count
);
466 regs
->class_count
= class_count
;
468 for (unsigned int c
= 0; c
< class_count
; c
++) {
469 struct ra_class
*class = rzalloc(regs
, struct ra_class
);
470 regs
->classes
[c
] = class;
472 class->regs
= ralloc_array(class, BITSET_WORD
, BITSET_WORDS(reg_count
));
473 blob_copy_bytes(blob
, class->regs
, BITSET_WORDS(reg_count
) *
474 sizeof(BITSET_WORD
));
476 class->p
= blob_read_uint32(blob
);
478 class->q
= ralloc_array(regs
->classes
[c
], unsigned int, class_count
);
479 blob_copy_bytes(blob
, class->q
, class_count
* sizeof(*class->q
));
482 regs
->round_robin
= blob_read_uint32(blob
);
488 ra_add_node_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
490 BITSET_SET(g
->nodes
[n1
].adjacency
, n2
);
494 int n1_class
= g
->nodes
[n1
].class;
495 int n2_class
= g
->nodes
[n2
].class;
496 g
->nodes
[n1
].q_total
+= g
->regs
->classes
[n1_class
]->q
[n2_class
];
498 util_dynarray_append(&g
->nodes
[n1
].adjacency_list
, unsigned int, n2
);
502 ra_node_remove_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
504 BITSET_CLEAR(g
->nodes
[n1
].adjacency
, n2
);
508 int n1_class
= g
->nodes
[n1
].class;
509 int n2_class
= g
->nodes
[n2
].class;
510 g
->nodes
[n1
].q_total
-= g
->regs
->classes
[n1_class
]->q
[n2_class
];
512 util_dynarray_delete_unordered(&g
->nodes
[n1
].adjacency_list
, unsigned int,
517 ra_realloc_interference_graph(struct ra_graph
*g
, unsigned int alloc
)
519 if (alloc
<= g
->alloc
)
522 /* If we always have a whole number of BITSET_WORDs, it makes it much
523 * easier to memset the top of the growing bitsets.
525 assert(g
->alloc
% BITSET_WORDBITS
== 0);
526 alloc
= align64(alloc
, BITSET_WORDBITS
);
528 g
->nodes
= reralloc(g
, g
->nodes
, struct ra_node
, alloc
);
530 unsigned g_bitset_count
= BITSET_WORDS(g
->alloc
);
531 unsigned bitset_count
= BITSET_WORDS(alloc
);
532 /* For nodes already in the graph, we just have to grow the adjacency set */
533 for (unsigned i
= 0; i
< g
->alloc
; i
++) {
534 assert(g
->nodes
[i
].adjacency
!= NULL
);
535 g
->nodes
[i
].adjacency
= rerzalloc(g
, g
->nodes
[i
].adjacency
, BITSET_WORD
,
536 g_bitset_count
, bitset_count
);
539 /* For new nodes, we have to fully initialize them */
540 for (unsigned i
= g
->alloc
; i
< alloc
; i
++) {
541 memset(&g
->nodes
[i
], 0, sizeof(g
->nodes
[i
]));
542 g
->nodes
[i
].adjacency
= rzalloc_array(g
, BITSET_WORD
, bitset_count
);
543 util_dynarray_init(&g
->nodes
[i
].adjacency_list
, g
);
544 g
->nodes
[i
].q_total
= 0;
546 g
->nodes
[i
].forced_reg
= NO_REG
;
547 g
->nodes
[i
].reg
= NO_REG
;
550 /* These are scratch values and don't need to be zeroed. We'll clear them
551 * as part of ra_select() setup.
553 g
->tmp
.stack
= reralloc(g
, g
->tmp
.stack
, unsigned int, alloc
);
554 g
->tmp
.in_stack
= reralloc(g
, g
->tmp
.in_stack
, BITSET_WORD
, bitset_count
);
556 g
->tmp
.reg_assigned
= reralloc(g
, g
->tmp
.reg_assigned
, BITSET_WORD
,
558 g
->tmp
.pq_test
= reralloc(g
, g
->tmp
.pq_test
, BITSET_WORD
, bitset_count
);
559 g
->tmp
.min_q_total
= reralloc(g
, g
->tmp
.min_q_total
, unsigned int,
561 g
->tmp
.min_q_node
= reralloc(g
, g
->tmp
.min_q_node
, unsigned int,
568 ra_alloc_interference_graph(struct ra_regs
*regs
, unsigned int count
)
572 g
= rzalloc(NULL
, struct ra_graph
);
575 ra_realloc_interference_graph(g
, count
);
581 ra_resize_interference_graph(struct ra_graph
*g
, unsigned int count
)
584 if (count
> g
->alloc
)
585 ra_realloc_interference_graph(g
, g
->alloc
* 2);
588 void ra_set_select_reg_callback(struct ra_graph
*g
,
589 ra_select_reg_callback callback
,
592 g
->select_reg_callback
= callback
;
593 g
->select_reg_callback_data
= data
;
597 ra_set_node_class(struct ra_graph
*g
,
598 unsigned int n
, unsigned int class)
600 g
->nodes
[n
].class = class;
604 ra_get_node_class(struct ra_graph
*g
,
607 return g
->nodes
[n
].class;
611 ra_add_node(struct ra_graph
*g
, unsigned int class)
613 unsigned int n
= g
->count
;
614 ra_resize_interference_graph(g
, g
->count
+ 1);
616 ra_set_node_class(g
, n
, class);
622 ra_add_node_interference(struct ra_graph
*g
,
623 unsigned int n1
, unsigned int n2
)
625 assert(n1
< g
->count
&& n2
< g
->count
);
626 if (n1
!= n2
&& !BITSET_TEST(g
->nodes
[n1
].adjacency
, n2
)) {
627 ra_add_node_adjacency(g
, n1
, n2
);
628 ra_add_node_adjacency(g
, n2
, n1
);
633 ra_reset_node_interference(struct ra_graph
*g
, unsigned int n
)
635 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
636 ra_node_remove_adjacency(g
, *n2p
, n
);
639 memset(g
->nodes
[n
].adjacency
, 0,
640 BITSET_WORDS(g
->count
) * sizeof(BITSET_WORD
));
641 util_dynarray_clear(&g
->nodes
[n
].adjacency_list
);
645 update_pq_info(struct ra_graph
*g
, unsigned int n
)
647 int i
= n
/ BITSET_WORDBITS
;
648 int n_class
= g
->nodes
[n
].class;
649 if (g
->nodes
[n
].tmp
.q_total
< g
->regs
->classes
[n_class
]->p
) {
650 BITSET_SET(g
->tmp
.pq_test
, n
);
651 } else if (g
->tmp
.min_q_total
[i
] != UINT_MAX
) {
652 /* Only update min_q_total and min_q_node if min_q_total != UINT_MAX so
653 * that we don't update while we have stale data and accidentally mark
654 * it as non-stale. Also, in order to remain consistent with the old
655 * naive implementation of the algorithm, we do a lexicographical sort
656 * to ensure that we always choose the node with the highest node index.
658 if (g
->nodes
[n
].tmp
.q_total
< g
->tmp
.min_q_total
[i
] ||
659 (g
->nodes
[n
].tmp
.q_total
== g
->tmp
.min_q_total
[i
] &&
660 n
> g
->tmp
.min_q_node
[i
])) {
661 g
->tmp
.min_q_total
[i
] = g
->nodes
[n
].tmp
.q_total
;
662 g
->tmp
.min_q_node
[i
] = n
;
668 add_node_to_stack(struct ra_graph
*g
, unsigned int n
)
670 int n_class
= g
->nodes
[n
].class;
672 assert(!BITSET_TEST(g
->tmp
.in_stack
, n
));
674 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
675 unsigned int n2
= *n2p
;
676 unsigned int n2_class
= g
->nodes
[n2
].class;
678 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
) &&
679 !BITSET_TEST(g
->tmp
.reg_assigned
, n2
)) {
680 assert(g
->nodes
[n2
].tmp
.q_total
>= g
->regs
->classes
[n2_class
]->q
[n_class
]);
681 g
->nodes
[n2
].tmp
.q_total
-= g
->regs
->classes
[n2_class
]->q
[n_class
];
682 update_pq_info(g
, n2
);
686 g
->tmp
.stack
[g
->tmp
.stack_count
] = n
;
687 g
->tmp
.stack_count
++;
688 BITSET_SET(g
->tmp
.in_stack
, n
);
690 /* Flag the min_q_total for n's block as dirty so it gets recalculated */
691 g
->tmp
.min_q_total
[n
/ BITSET_WORDBITS
] = UINT_MAX
;
695 * Simplifies the interference graph by pushing all
696 * trivially-colorable nodes into a stack of nodes to be colored,
697 * removing them from the graph, and rinsing and repeating.
699 * If we encounter a case where we can't push any nodes on the stack, then
700 * we optimistically choose a node and push it on the stack. We heuristically
701 * push the node with the lowest total q value, since it has the fewest
702 * neighbors and therefore is most likely to be allocated.
705 ra_simplify(struct ra_graph
*g
)
707 bool progress
= true;
708 unsigned int stack_optimistic_start
= UINT_MAX
;
710 /* Figure out the high bit and bit mask for the first iteration of a loop
713 const unsigned int top_word_high_bit
= (g
->count
- 1) % BITSET_WORDBITS
;
715 /* Do a quick pre-pass to set things up */
716 g
->tmp
.stack_count
= 0;
717 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
718 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
719 g
->tmp
.in_stack
[i
] = 0;
720 g
->tmp
.reg_assigned
[i
] = 0;
721 g
->tmp
.pq_test
[i
] = 0;
722 g
->tmp
.min_q_total
[i
] = UINT_MAX
;
723 g
->tmp
.min_q_node
[i
] = UINT_MAX
;
724 for (int j
= high_bit
; j
>= 0; j
--) {
725 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
726 g
->nodes
[n
].reg
= g
->nodes
[n
].forced_reg
;
727 g
->nodes
[n
].tmp
.q_total
= g
->nodes
[n
].q_total
;
728 if (g
->nodes
[n
].reg
!= NO_REG
)
729 g
->tmp
.reg_assigned
[i
] |= BITSET_BIT(j
);
730 update_pq_info(g
, n
);
735 unsigned int min_q_total
= UINT_MAX
;
736 unsigned int min_q_node
= UINT_MAX
;
740 for (int i
= BITSET_WORDS(g
->count
) - 1, high_bit
= top_word_high_bit
;
741 i
>= 0; i
--, high_bit
= BITSET_WORDBITS
- 1) {
742 BITSET_WORD mask
= ~(BITSET_WORD
)0 >> (31 - high_bit
);
744 BITSET_WORD skip
= g
->tmp
.in_stack
[i
] | g
->tmp
.reg_assigned
[i
];
748 BITSET_WORD pq
= g
->tmp
.pq_test
[i
] & ~skip
;
750 /* In this case, we have stuff we can immediately take off the
751 * stack. This also means that we're guaranteed to make progress
752 * and we don't need to bother updating lowest_q_total because we
753 * know we're going to loop again before attempting to do anything
756 for (int j
= high_bit
; j
>= 0; j
--) {
757 if (pq
& BITSET_BIT(j
)) {
758 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
759 assert(n
< g
->count
);
760 add_node_to_stack(g
, n
);
761 /* add_node_to_stack() may update pq_test for this word so
762 * we need to update our local copy.
764 pq
= g
->tmp
.pq_test
[i
] & ~skip
;
768 } else if (!progress
) {
769 if (g
->tmp
.min_q_total
[i
] == UINT_MAX
) {
770 /* The min_q_total and min_q_node are dirty because we added
771 * one of these nodes to the stack. It needs to be
774 for (int j
= high_bit
; j
>= 0; j
--) {
775 if (skip
& BITSET_BIT(j
))
778 unsigned int n
= i
* BITSET_WORDBITS
+ j
;
779 assert(n
< g
->count
);
780 if (g
->nodes
[n
].tmp
.q_total
< g
->tmp
.min_q_total
[i
]) {
781 g
->tmp
.min_q_total
[i
] = g
->nodes
[n
].tmp
.q_total
;
782 g
->tmp
.min_q_node
[i
] = n
;
786 if (g
->tmp
.min_q_total
[i
] < min_q_total
) {
787 min_q_node
= g
->tmp
.min_q_node
[i
];
788 min_q_total
= g
->tmp
.min_q_total
[i
];
793 if (!progress
&& min_q_total
!= UINT_MAX
) {
794 if (stack_optimistic_start
== UINT_MAX
)
795 stack_optimistic_start
= g
->tmp
.stack_count
;
797 add_node_to_stack(g
, min_q_node
);
802 g
->tmp
.stack_optimistic_start
= stack_optimistic_start
;
806 ra_any_neighbors_conflict(struct ra_graph
*g
, unsigned int n
, unsigned int r
)
808 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
809 unsigned int n2
= *n2p
;
811 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
) &&
812 BITSET_TEST(g
->regs
->regs
[r
].conflicts
, g
->nodes
[n2
].reg
)) {
820 /* Computes a bitfield of what regs are available for a given register
823 * This lets drivers implement a more complicated policy than our simple first
824 * or round robin policies (which don't require knowing the whole bitset)
827 ra_compute_available_regs(struct ra_graph
*g
, unsigned int n
, BITSET_WORD
*regs
)
829 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
831 /* Populate with the set of regs that are in the node's class. */
832 memcpy(regs
, c
->regs
, BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
834 /* Remove any regs that conflict with nodes that we're adjacent to and have
837 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
838 unsigned int n2
= *n2p
;
839 unsigned int r
= g
->nodes
[n2
].reg
;
841 if (!BITSET_TEST(g
->tmp
.in_stack
, n2
)) {
842 for (int j
= 0; j
< BITSET_WORDS(g
->regs
->count
); j
++)
843 regs
[j
] &= ~g
->regs
->regs
[r
].conflicts
[j
];
847 for (int i
= 0; i
< BITSET_WORDS(g
->regs
->count
); i
++) {
856 * Pops nodes from the stack back into the graph, coloring them with
857 * registers as they go.
859 * If all nodes were trivially colorable, then this must succeed. If
860 * not (optimistic coloring), then it may return false;
863 ra_select(struct ra_graph
*g
)
865 int start_search_reg
= 0;
866 BITSET_WORD
*select_regs
= NULL
;
868 if (g
->select_reg_callback
)
869 select_regs
= malloc(BITSET_WORDS(g
->regs
->count
) * sizeof(BITSET_WORD
));
871 while (g
->tmp
.stack_count
!= 0) {
874 int n
= g
->tmp
.stack
[g
->tmp
.stack_count
- 1];
875 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
877 /* set this to false even if we return here so that
878 * ra_get_best_spill_node() considers this node later.
880 BITSET_CLEAR(g
->tmp
.in_stack
, n
);
882 if (g
->select_reg_callback
) {
883 if (!ra_compute_available_regs(g
, n
, select_regs
)) {
888 r
= g
->select_reg_callback(n
, select_regs
, g
->select_reg_callback_data
);
889 assert(r
< g
->regs
->count
);
891 /* Find the lowest-numbered reg which is not used by a member
892 * of the graph adjacent to us.
894 for (ri
= 0; ri
< g
->regs
->count
; ri
++) {
895 r
= (start_search_reg
+ ri
) % g
->regs
->count
;
896 if (!reg_belongs_to_class(r
, c
))
899 if (!ra_any_neighbors_conflict(g
, n
, r
))
903 if (ri
>= g
->regs
->count
)
908 g
->tmp
.stack_count
--;
910 /* Rotate the starting point except for any nodes above the lowest
911 * optimistically colorable node. The likelihood that we will succeed
912 * at allocating optimistically colorable nodes is highly dependent on
913 * the way that the previous nodes popped off the stack are laid out.
914 * The round-robin strategy increases the fragmentation of the register
915 * file and decreases the number of nearby nodes assigned to the same
916 * color, what increases the likelihood of spilling with respect to the
917 * dense packing strategy.
919 if (g
->regs
->round_robin
&&
920 g
->tmp
.stack_count
- 1 <= g
->tmp
.stack_optimistic_start
)
921 start_search_reg
= r
+ 1;
930 ra_allocate(struct ra_graph
*g
)
937 ra_get_node_reg(struct ra_graph
*g
, unsigned int n
)
939 if (g
->nodes
[n
].forced_reg
!= NO_REG
)
940 return g
->nodes
[n
].forced_reg
;
942 return g
->nodes
[n
].reg
;
946 * Forces a node to a specific register. This can be used to avoid
947 * creating a register class containing one node when handling data
948 * that must live in a fixed location and is known to not conflict
949 * with other forced register assignment (as is common with shader
950 * input data). These nodes do not end up in the stack during
951 * ra_simplify(), and thus at ra_select() time it is as if they were
952 * the first popped off the stack and assigned their fixed locations.
953 * Nodes that use this function do not need to be assigned a register
956 * Must be called before ra_simplify().
959 ra_set_node_reg(struct ra_graph
*g
, unsigned int n
, unsigned int reg
)
961 g
->nodes
[n
].forced_reg
= reg
;
965 ra_get_spill_benefit(struct ra_graph
*g
, unsigned int n
)
968 int n_class
= g
->nodes
[n
].class;
970 /* Define the benefit of eliminating an interference between n, n2
971 * through spilling as q(C, B) / p(C). This is similar to the
972 * "count number of edges" approach of traditional graph coloring,
973 * but takes classes into account.
975 util_dynarray_foreach(&g
->nodes
[n
].adjacency_list
, unsigned int, n2p
) {
976 unsigned int n2
= *n2p
;
977 unsigned int n2_class
= g
->nodes
[n2
].class;
978 benefit
+= ((float)g
->regs
->classes
[n_class
]->q
[n2_class
] /
979 g
->regs
->classes
[n_class
]->p
);
986 * Returns a node number to be spilled according to the cost/benefit using
987 * the pq test, or -1 if there are no spillable nodes.
990 ra_get_best_spill_node(struct ra_graph
*g
)
992 unsigned int best_node
= -1;
993 float best_benefit
= 0.0;
996 /* Consider any nodes that we colored successfully or the node we failed to
997 * color for spilling. When we failed to color a node in ra_select(), we
998 * only considered these nodes, so spilling any other ones would not result
999 * in us making progress.
1001 for (n
= 0; n
< g
->count
; n
++) {
1002 float cost
= g
->nodes
[n
].spill_cost
;
1008 if (BITSET_TEST(g
->tmp
.in_stack
, n
))
1011 benefit
= ra_get_spill_benefit(g
, n
);
1013 if (benefit
/ cost
> best_benefit
) {
1014 best_benefit
= benefit
/ cost
;
1023 * Only nodes with a spill cost set (cost != 0.0) will be considered
1024 * for register spilling.
1027 ra_set_node_spill_cost(struct ra_graph
*g
, unsigned int n
, float cost
)
1029 g
->nodes
[n
].spill_cost
= cost
;