2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 /** @file register_allocate.c
30 * Graph-coloring register allocator.
32 * The basic idea of graph coloring is to make a node in a graph for
33 * every thing that needs a register (color) number assigned, and make
34 * edges in the graph between nodes that interfere (can't be allocated
35 * to the same register at the same time).
37 * During the "simplify" process, any any node with fewer edges than
38 * there are registers means that that edge can get assigned a
39 * register regardless of what its neighbors choose, so that node is
40 * pushed on a stack and removed (with its edges) from the graph.
41 * That likely causes other nodes to become trivially colorable as well.
43 * Then during the "select" process, nodes are popped off of that
44 * stack, their edges restored, and assigned a color different from
45 * their neighbors. Because they were pushed on the stack only when
46 * they were trivially colorable, any color chosen won't interfere
47 * with the registers to be popped later.
49 * The downside to most graph coloring is that real hardware often has
50 * limitations, like registers that need to be allocated to a node in
51 * pairs, or aligned on some boundary. This implementation follows
52 * the paper "Retargetable Graph-Coloring Register Allocation for
53 * Irregular Architectures" by Johan Runeson and Sven-Olof Nyström.
55 * In this system, there are register classes each containing various
56 * registers, and registers may interfere with other registers. For
57 * example, one might have a class of base registers, and a class of
58 * aligned register pairs that would each interfere with their pair of
59 * the base registers. Each node has a register class it needs to be
60 * assigned to. Define p(B) to be the size of register class B, and
61 * q(B,C) to be the number of registers in B that the worst choice
62 * register in C could conflict with. Then, this system replaces the
63 * basic graph coloring test of "fewer edges from this node than there
64 * are registers" with "For this node of class B, the sum of q(B,C)
65 * for each neighbor node of class C is less than pB".
67 * A nice feature of the pq test is that q(B,C) can be computed once
68 * up front and stored in a 2-dimensional array, so that the cost of
69 * coloring a node is constant with the number of registers. We do
70 * this during ra_set_finalize().
76 #include "main/imports.h"
77 #include "main/macros.h"
78 #include "main/mtypes.h"
79 #include "main/bitset.h"
80 #include "register_allocate.h"
86 unsigned int *conflict_list
;
87 unsigned int conflict_list_size
;
88 unsigned int num_conflicts
;
95 struct ra_class
**classes
;
96 unsigned int class_count
;
103 * Bitset indicating which registers belong to this class.
105 * (If bit N is set, then register N belongs to this class.)
110 * p(B) in Runeson/Nyström paper.
112 * This is "how many regs are in the set."
117 * q(B,C) (indexed by C, B is this register class) in
118 * Runeson/Nyström paper. This is "how many registers of B could
119 * the worst choice register from C conflict with".
127 * List of which nodes this node interferes with. This should be
128 * symmetric with the other node.
130 BITSET_WORD
*adjacency
;
131 unsigned int *adjacency_list
;
132 unsigned int adjacency_list_size
;
133 unsigned int adjacency_count
;
138 /* Register, if assigned, or NO_REG. */
142 * Set when the node is in the trivially colorable stack. When
143 * set, the adjacency to this node is ignored, to implement the
144 * "remove the edge from the graph" in simplification without
145 * having to actually modify the adjacency_list.
149 /* For an implementation that needs register spilling, this is the
150 * approximate cost of spilling this node.
156 struct ra_regs
*regs
;
158 * the variables that need register allocation.
160 struct ra_node
*nodes
;
161 unsigned int count
; /**< count of nodes. */
164 unsigned int stack_count
;
167 * Tracks the start of the set of optimistically-colored registers in the
170 * Along with any registers not in the stack (if one called ra_simplify()
171 * and didn't do optimistic coloring), these need to be considered for
174 unsigned int stack_optimistic_start
;
178 * Creates a set of registers for the allocator.
180 * mem_ctx is a ralloc context for the allocator. The reg set may be freed
181 * using ralloc_free().
184 ra_alloc_reg_set(void *mem_ctx
, unsigned int count
)
187 struct ra_regs
*regs
;
189 regs
= rzalloc(mem_ctx
, struct ra_regs
);
191 regs
->regs
= rzalloc_array(regs
, struct ra_reg
, count
);
193 for (i
= 0; i
< count
; i
++) {
194 regs
->regs
[i
].conflicts
= rzalloc_array(regs
->regs
, bool, count
);
195 regs
->regs
[i
].conflicts
[i
] = true;
197 regs
->regs
[i
].conflict_list
= ralloc_array(regs
->regs
, unsigned int, 4);
198 regs
->regs
[i
].conflict_list_size
= 4;
199 regs
->regs
[i
].conflict_list
[0] = i
;
200 regs
->regs
[i
].num_conflicts
= 1;
207 * The register allocator by default prefers to allocate low register numbers,
208 * since it was written for hardware (gen4/5 Intel) that is limited in its
209 * multithreadedness by the number of registers used in a given shader.
211 * However, for hardware without that restriction, densely packed register
212 * allocation can put serious constraints on instruction scheduling. This
213 * function tells the allocator to rotate around the registers if possible as
214 * it allocates the nodes.
217 ra_set_allocate_round_robin(struct ra_regs
*regs
)
219 regs
->round_robin
= true;
223 ra_add_conflict_list(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
225 struct ra_reg
*reg1
= ®s
->regs
[r1
];
227 if (reg1
->conflict_list_size
== reg1
->num_conflicts
) {
228 reg1
->conflict_list_size
*= 2;
229 reg1
->conflict_list
= reralloc(regs
->regs
, reg1
->conflict_list
,
230 unsigned int, reg1
->conflict_list_size
);
232 reg1
->conflict_list
[reg1
->num_conflicts
++] = r2
;
233 reg1
->conflicts
[r2
] = true;
237 ra_add_reg_conflict(struct ra_regs
*regs
, unsigned int r1
, unsigned int r2
)
239 if (!regs
->regs
[r1
].conflicts
[r2
]) {
240 ra_add_conflict_list(regs
, r1
, r2
);
241 ra_add_conflict_list(regs
, r2
, r1
);
246 * Adds a conflict between base_reg and reg, and also between reg and
247 * anything that base_reg conflicts with.
249 * This can simplify code for setting up multiple register classes
250 * which are aggregates of some base hardware registers, compared to
251 * explicitly using ra_add_reg_conflict.
254 ra_add_transitive_reg_conflict(struct ra_regs
*regs
,
255 unsigned int base_reg
, unsigned int reg
)
259 ra_add_reg_conflict(regs
, reg
, base_reg
);
261 for (i
= 0; i
< regs
->regs
[base_reg
].num_conflicts
; i
++) {
262 ra_add_reg_conflict(regs
, reg
, regs
->regs
[base_reg
].conflict_list
[i
]);
267 ra_alloc_reg_class(struct ra_regs
*regs
)
269 struct ra_class
*class;
271 regs
->classes
= reralloc(regs
->regs
, regs
->classes
, struct ra_class
*,
272 regs
->class_count
+ 1);
274 class = rzalloc(regs
, struct ra_class
);
275 regs
->classes
[regs
->class_count
] = class;
277 class->regs
= rzalloc_array(class, BITSET_WORD
, BITSET_WORDS(regs
->count
));
279 return regs
->class_count
++;
283 ra_class_add_reg(struct ra_regs
*regs
, unsigned int c
, unsigned int r
)
285 struct ra_class
*class = regs
->classes
[c
];
287 BITSET_SET(class->regs
, r
);
292 * Returns true if the register belongs to the given class.
295 reg_belongs_to_class(unsigned int r
, struct ra_class
*c
)
297 return BITSET_TEST(c
->regs
, r
);
301 * Must be called after all conflicts and register classes have been
302 * set up and before the register set is used for allocation.
303 * To avoid costly q value computation, use the q_values paramater
304 * to pass precomputed q values to this function.
307 ra_set_finalize(struct ra_regs
*regs
, unsigned int **q_values
)
311 for (b
= 0; b
< regs
->class_count
; b
++) {
312 regs
->classes
[b
]->q
= ralloc_array(regs
, unsigned int, regs
->class_count
);
316 for (b
= 0; b
< regs
->class_count
; b
++) {
317 for (c
= 0; c
< regs
->class_count
; c
++) {
318 regs
->classes
[b
]->q
[c
] = q_values
[b
][c
];
324 /* Compute, for each class B and C, how many regs of B an
325 * allocation to C could conflict with.
327 for (b
= 0; b
< regs
->class_count
; b
++) {
328 for (c
= 0; c
< regs
->class_count
; c
++) {
330 int max_conflicts
= 0;
332 for (rc
= 0; rc
< regs
->count
; rc
++) {
336 if (!reg_belongs_to_class(rc
, regs
->classes
[c
]))
339 for (i
= 0; i
< regs
->regs
[rc
].num_conflicts
; i
++) {
340 unsigned int rb
= regs
->regs
[rc
].conflict_list
[i
];
341 if (BITSET_TEST(regs
->classes
[b
]->regs
, rb
))
344 max_conflicts
= MAX2(max_conflicts
, conflicts
);
346 regs
->classes
[b
]->q
[c
] = max_conflicts
;
352 ra_add_node_adjacency(struct ra_graph
*g
, unsigned int n1
, unsigned int n2
)
354 BITSET_SET(g
->nodes
[n1
].adjacency
, n2
);
356 if (g
->nodes
[n1
].adjacency_count
>=
357 g
->nodes
[n1
].adjacency_list_size
) {
358 g
->nodes
[n1
].adjacency_list_size
*= 2;
359 g
->nodes
[n1
].adjacency_list
= reralloc(g
, g
->nodes
[n1
].adjacency_list
,
361 g
->nodes
[n1
].adjacency_list_size
);
364 g
->nodes
[n1
].adjacency_list
[g
->nodes
[n1
].adjacency_count
] = n2
;
365 g
->nodes
[n1
].adjacency_count
++;
369 ra_alloc_interference_graph(struct ra_regs
*regs
, unsigned int count
)
374 g
= rzalloc(regs
, struct ra_graph
);
376 g
->nodes
= rzalloc_array(g
, struct ra_node
, count
);
379 g
->stack
= rzalloc_array(g
, unsigned int, count
);
381 for (i
= 0; i
< count
; i
++) {
382 int bitset_count
= BITSET_WORDS(count
);
383 g
->nodes
[i
].adjacency
= rzalloc_array(g
, BITSET_WORD
, bitset_count
);
385 g
->nodes
[i
].adjacency_list_size
= 4;
386 g
->nodes
[i
].adjacency_list
=
387 ralloc_array(g
, unsigned int, g
->nodes
[i
].adjacency_list_size
);
388 g
->nodes
[i
].adjacency_count
= 0;
390 ra_add_node_adjacency(g
, i
, i
);
391 g
->nodes
[i
].reg
= NO_REG
;
398 ra_set_node_class(struct ra_graph
*g
,
399 unsigned int n
, unsigned int class)
401 g
->nodes
[n
].class = class;
405 ra_add_node_interference(struct ra_graph
*g
,
406 unsigned int n1
, unsigned int n2
)
408 if (!BITSET_TEST(g
->nodes
[n1
].adjacency
, n2
)) {
409 ra_add_node_adjacency(g
, n1
, n2
);
410 ra_add_node_adjacency(g
, n2
, n1
);
415 pq_test(struct ra_graph
*g
, unsigned int n
)
419 int n_class
= g
->nodes
[n
].class;
421 for (j
= 0; j
< g
->nodes
[n
].adjacency_count
; j
++) {
422 unsigned int n2
= g
->nodes
[n
].adjacency_list
[j
];
423 unsigned int n2_class
= g
->nodes
[n2
].class;
425 if (n
!= n2
&& !g
->nodes
[n2
].in_stack
) {
426 q
+= g
->regs
->classes
[n_class
]->q
[n2_class
];
430 return q
< g
->regs
->classes
[n_class
]->p
;
434 * Simplifies the interference graph by pushing all
435 * trivially-colorable nodes into a stack of nodes to be colored,
436 * removing them from the graph, and rinsing and repeating.
438 * Returns true if all nodes were removed from the graph. false
439 * means that either spilling will be required, or optimistic coloring
443 ra_simplify(struct ra_graph
*g
)
445 bool progress
= true;
451 for (i
= g
->count
- 1; i
>= 0; i
--) {
452 if (g
->nodes
[i
].in_stack
|| g
->nodes
[i
].reg
!= NO_REG
)
456 g
->stack
[g
->stack_count
] = i
;
458 g
->nodes
[i
].in_stack
= true;
464 for (i
= 0; i
< g
->count
; i
++) {
465 if (!g
->nodes
[i
].in_stack
&& g
->nodes
[i
].reg
== -1)
473 * Pops nodes from the stack back into the graph, coloring them with
474 * registers as they go.
476 * If all nodes were trivially colorable, then this must succeed. If
477 * not (optimistic coloring), then it may return false;
480 ra_select(struct ra_graph
*g
)
483 int start_search_reg
= 0;
485 while (g
->stack_count
!= 0) {
488 int n
= g
->stack
[g
->stack_count
- 1];
489 struct ra_class
*c
= g
->regs
->classes
[g
->nodes
[n
].class];
491 /* Find the lowest-numbered reg which is not used by a member
492 * of the graph adjacent to us.
494 for (ri
= 0; ri
< g
->regs
->count
; ri
++) {
495 r
= (start_search_reg
+ ri
) % g
->regs
->count
;
496 if (!reg_belongs_to_class(r
, c
))
499 /* Check if any of our neighbors conflict with this register choice. */
500 for (i
= 0; i
< g
->nodes
[n
].adjacency_count
; i
++) {
501 unsigned int n2
= g
->nodes
[n
].adjacency_list
[i
];
503 if (!g
->nodes
[n2
].in_stack
&&
504 g
->regs
->regs
[r
].conflicts
[g
->nodes
[n2
].reg
]) {
508 if (i
== g
->nodes
[n
].adjacency_count
)
511 if (ri
== g
->regs
->count
)
515 g
->nodes
[n
].in_stack
= false;
518 if (g
->regs
->round_robin
)
519 start_search_reg
= r
+ 1;
526 * Optimistic register coloring: Just push the remaining nodes
527 * on the stack. They'll be colored first in ra_select(), and
528 * if they succeed then the locally-colorable nodes are still
529 * locally-colorable and the rest of the register allocation
533 ra_optimistic_color(struct ra_graph
*g
)
537 g
->stack_optimistic_start
= g
->stack_count
;
538 for (i
= 0; i
< g
->count
; i
++) {
539 if (g
->nodes
[i
].in_stack
|| g
->nodes
[i
].reg
!= NO_REG
)
542 g
->stack
[g
->stack_count
] = i
;
544 g
->nodes
[i
].in_stack
= true;
549 ra_allocate_no_spills(struct ra_graph
*g
)
551 if (!ra_simplify(g
)) {
552 ra_optimistic_color(g
);
558 ra_get_node_reg(struct ra_graph
*g
, unsigned int n
)
560 return g
->nodes
[n
].reg
;
564 * Forces a node to a specific register. This can be used to avoid
565 * creating a register class containing one node when handling data
566 * that must live in a fixed location and is known to not conflict
567 * with other forced register assignment (as is common with shader
568 * input data). These nodes do not end up in the stack during
569 * ra_simplify(), and thus at ra_select() time it is as if they were
570 * the first popped off the stack and assigned their fixed locations.
571 * Nodes that use this function do not need to be assigned a register
574 * Must be called before ra_simplify().
577 ra_set_node_reg(struct ra_graph
*g
, unsigned int n
, unsigned int reg
)
579 g
->nodes
[n
].reg
= reg
;
580 g
->nodes
[n
].in_stack
= false;
584 ra_get_spill_benefit(struct ra_graph
*g
, unsigned int n
)
588 int n_class
= g
->nodes
[n
].class;
590 /* Define the benefit of eliminating an interference between n, n2
591 * through spilling as q(C, B) / p(C). This is similar to the
592 * "count number of edges" approach of traditional graph coloring,
593 * but takes classes into account.
595 for (j
= 0; j
< g
->nodes
[n
].adjacency_count
; j
++) {
596 unsigned int n2
= g
->nodes
[n
].adjacency_list
[j
];
598 unsigned int n2_class
= g
->nodes
[n2
].class;
599 benefit
+= ((float)g
->regs
->classes
[n_class
]->q
[n2_class
] /
600 g
->regs
->classes
[n_class
]->p
);
608 * Returns a node number to be spilled according to the cost/benefit using
609 * the pq test, or -1 if there are no spillable nodes.
612 ra_get_best_spill_node(struct ra_graph
*g
)
614 unsigned int best_node
= -1;
615 float best_benefit
= 0.0;
618 /* For any registers not in the stack to be colored, consider them for
619 * spilling. This will mostly collect nodes that were being optimistally
620 * colored as part of ra_allocate_no_spills() if we didn't successfully
621 * optimistically color.
623 * It also includes nodes not trivially colorable by ra_simplify() if it
624 * was used directly instead of as part of ra_allocate_no_spills().
626 for (n
= 0; n
< g
->count
; n
++) {
627 float cost
= g
->nodes
[n
].spill_cost
;
633 if (g
->nodes
[n
].in_stack
)
636 benefit
= ra_get_spill_benefit(g
, n
);
638 if (benefit
/ cost
> best_benefit
) {
639 best_benefit
= benefit
/ cost
;
644 /* Also consider spilling any nodes that were set up to be optimistically
645 * colored that we couldn't manage to color in ra_select().
647 for (i
= g
->stack_optimistic_start
; i
< g
->stack_count
; i
++) {
651 cost
= g
->nodes
[n
].spill_cost
;
656 benefit
= ra_get_spill_benefit(g
, n
);
658 if (benefit
/ cost
> best_benefit
) {
659 best_benefit
= benefit
/ cost
;
668 * Only nodes with a spill cost set (cost != 0.0) will be considered
669 * for register spilling.
672 ra_set_node_spill_cost(struct ra_graph
*g
, unsigned int n
, float cost
)
674 g
->nodes
[n
].spill_cost
= cost
;