700e351841b04bfbf7bc5e8d5bb5235781f8d6bc
[mesa.git] / src / mesa / program / register_allocate.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /** @file register_allocate.c
29 *
30 * Graph-coloring register allocator.
31 */
32
33 #include <talloc.h>
34
35 #include "main/imports.h"
36 #include "main/macros.h"
37 #include "main/mtypes.h"
38 #include "register_allocate.h"
39
40 struct ra_reg {
41 GLboolean *conflicts;
42 unsigned int *conflict_list;
43 unsigned int conflict_list_size;
44 unsigned int num_conflicts;
45 };
46
47 struct ra_regs {
48 struct ra_reg *regs;
49 unsigned int count;
50
51 struct ra_class **classes;
52 unsigned int class_count;
53 };
54
55 struct ra_class {
56 GLboolean *regs;
57
58 /**
59 * p_B in Runeson/Nyström paper.
60 *
61 * This is "how many regs are in the set."
62 */
63 unsigned int p;
64
65 /**
66 * q_B,C in Runeson/Nyström paper.
67 */
68 unsigned int *q;
69 };
70
71 struct ra_node {
72 GLboolean *adjacency;
73 unsigned int *adjacency_list;
74 unsigned int class;
75 unsigned int adjacency_count;
76 unsigned int reg;
77 GLboolean in_stack;
78 float spill_cost;
79 };
80
81 struct ra_graph {
82 struct ra_regs *regs;
83 /**
84 * the variables that need register allocation.
85 */
86 struct ra_node *nodes;
87 unsigned int count; /**< count of nodes. */
88
89 unsigned int *stack;
90 unsigned int stack_count;
91 };
92
93 struct ra_regs *
94 ra_alloc_reg_set(unsigned int count)
95 {
96 unsigned int i;
97 struct ra_regs *regs;
98
99 regs = talloc_zero(NULL, struct ra_regs);
100 regs->count = count;
101 regs->regs = talloc_zero_array(regs, struct ra_reg, count);
102
103 for (i = 0; i < count; i++) {
104 regs->regs[i].conflicts = talloc_zero_array(regs->regs, GLboolean, count);
105 regs->regs[i].conflicts[i] = GL_TRUE;
106
107 regs->regs[i].conflict_list = talloc_array(regs->regs, unsigned int, 4);
108 regs->regs[i].conflict_list_size = 4;
109 regs->regs[i].conflict_list[0] = i;
110 regs->regs[i].num_conflicts = 1;
111 }
112
113 return regs;
114 }
115
116 static void
117 ra_add_conflict_list(struct ra_regs *regs, unsigned int r1, unsigned int r2)
118 {
119 struct ra_reg *reg1 = &regs->regs[r1];
120
121 if (reg1->conflict_list_size == reg1->num_conflicts) {
122 reg1->conflict_list_size *= 2;
123 reg1->conflict_list = talloc_realloc(regs->regs,
124 reg1->conflict_list,
125 unsigned int,
126 reg1->conflict_list_size);
127 }
128 reg1->conflict_list[reg1->num_conflicts++] = r2;
129 reg1->conflicts[r2] = GL_TRUE;
130 }
131
132 void
133 ra_add_reg_conflict(struct ra_regs *regs, unsigned int r1, unsigned int r2)
134 {
135 if (!regs->regs[r1].conflicts[r2]) {
136 ra_add_conflict_list(regs, r1, r2);
137 ra_add_conflict_list(regs, r2, r1);
138 }
139 }
140
141 unsigned int
142 ra_alloc_reg_class(struct ra_regs *regs)
143 {
144 struct ra_class *class;
145
146 regs->classes = talloc_realloc(regs->regs, regs->classes,
147 struct ra_class *,
148 regs->class_count + 1);
149
150 class = talloc_zero(regs, struct ra_class);
151 regs->classes[regs->class_count] = class;
152
153 class->regs = talloc_zero_array(class, GLboolean, regs->count);
154
155 return regs->class_count++;
156 }
157
158 void
159 ra_class_add_reg(struct ra_regs *regs, unsigned int c, unsigned int r)
160 {
161 struct ra_class *class = regs->classes[c];
162
163 class->regs[r] = GL_TRUE;
164 class->p++;
165 }
166
167 /**
168 * Must be called after all conflicts and register classes have been
169 * set up and before the register set is used for allocation.
170 */
171 void
172 ra_set_finalize(struct ra_regs *regs)
173 {
174 unsigned int b, c;
175
176 for (b = 0; b < regs->class_count; b++) {
177 regs->classes[b]->q = talloc_array(regs, unsigned int, regs->class_count);
178 }
179
180 /* Compute, for each class B and C, how many regs of B an
181 * allocation to C could conflict with.
182 */
183 for (b = 0; b < regs->class_count; b++) {
184 for (c = 0; c < regs->class_count; c++) {
185 unsigned int rc;
186 int max_conflicts = 0;
187
188 for (rc = 0; rc < regs->count; rc++) {
189 int conflicts = 0;
190 int i;
191
192 if (!regs->classes[c]->regs[rc])
193 continue;
194
195 for (i = 0; i < regs->regs[rc].num_conflicts; i++) {
196 unsigned int rb = regs->regs[rc].conflict_list[i];
197 if (regs->classes[b]->regs[rb])
198 conflicts++;
199 }
200 max_conflicts = MAX2(max_conflicts, conflicts);
201 }
202 regs->classes[b]->q[c] = max_conflicts;
203 }
204 }
205 }
206
207 static void
208 ra_add_node_adjacency(struct ra_graph *g, unsigned int n1, unsigned int n2)
209 {
210 g->nodes[n1].adjacency[n2] = GL_TRUE;
211 g->nodes[n1].adjacency_list[g->nodes[n1].adjacency_count] = n2;
212 g->nodes[n1].adjacency_count++;
213 }
214
215 struct ra_graph *
216 ra_alloc_interference_graph(struct ra_regs *regs, unsigned int count)
217 {
218 struct ra_graph *g;
219 unsigned int i;
220
221 g = talloc_zero(regs, struct ra_graph);
222 g->regs = regs;
223 g->nodes = talloc_zero_array(g, struct ra_node, count);
224 g->count = count;
225
226 g->stack = talloc_zero_array(g, unsigned int, count);
227
228 for (i = 0; i < count; i++) {
229 g->nodes[i].adjacency = talloc_zero_array(g, GLboolean, count);
230 g->nodes[i].adjacency_list = talloc_array(g, unsigned int, count);
231 g->nodes[i].adjacency_count = 0;
232 ra_add_node_adjacency(g, i, i);
233 g->nodes[i].reg = ~0;
234 }
235
236 return g;
237 }
238
239 void
240 ra_set_node_class(struct ra_graph *g,
241 unsigned int n, unsigned int class)
242 {
243 g->nodes[n].class = class;
244 }
245
246 void
247 ra_add_node_interference(struct ra_graph *g,
248 unsigned int n1, unsigned int n2)
249 {
250 if (!g->nodes[n1].adjacency[n2]) {
251 ra_add_node_adjacency(g, n1, n2);
252 ra_add_node_adjacency(g, n2, n1);
253 }
254 }
255
256 static GLboolean pq_test(struct ra_graph *g, unsigned int n)
257 {
258 unsigned int j;
259 unsigned int q = 0;
260 int n_class = g->nodes[n].class;
261
262 for (j = 0; j < g->nodes[n].adjacency_count; j++) {
263 unsigned int n2 = g->nodes[n].adjacency_list[j];
264 unsigned int n2_class = g->nodes[n2].class;
265
266 if (n != n2 && !g->nodes[n2].in_stack) {
267 q += g->regs->classes[n_class]->q[n2_class];
268 }
269 }
270
271 return q < g->regs->classes[n_class]->p;
272 }
273
274 /**
275 * Simplifies the interference graph by pushing all
276 * trivially-colorable nodes into a stack of nodes to be colored,
277 * removing them from the graph, and rinsing and repeating.
278 *
279 * Returns GL_TRUE if all nodes were removed from the graph. GL_FALSE
280 * means that either spilling will be required, or optimistic coloring
281 * should be applied.
282 */
283 GLboolean
284 ra_simplify(struct ra_graph *g)
285 {
286 GLboolean progress = GL_TRUE;
287 int i;
288
289 while (progress) {
290 progress = GL_FALSE;
291
292 for (i = g->count - 1; i >= 0; i--) {
293 if (g->nodes[i].in_stack)
294 continue;
295
296 if (pq_test(g, i)) {
297 g->stack[g->stack_count] = i;
298 g->stack_count++;
299 g->nodes[i].in_stack = GL_TRUE;
300 progress = GL_TRUE;
301 }
302 }
303 }
304
305 for (i = 0; i < g->count; i++) {
306 if (!g->nodes[i].in_stack)
307 return GL_FALSE;
308 }
309
310 return GL_TRUE;
311 }
312
313 /**
314 * Pops nodes from the stack back into the graph, coloring them with
315 * registers as they go.
316 *
317 * If all nodes were trivially colorable, then this must succeed. If
318 * not (optimistic coloring), then it may return GL_FALSE;
319 */
320 GLboolean
321 ra_select(struct ra_graph *g)
322 {
323 int i;
324
325 while (g->stack_count != 0) {
326 unsigned int r;
327 int n = g->stack[g->stack_count - 1];
328 struct ra_class *c = g->regs->classes[g->nodes[n].class];
329
330 /* Find the lowest-numbered reg which is not used by a member
331 * of the graph adjacent to us.
332 */
333 for (r = 0; r < g->regs->count; r++) {
334 if (!c->regs[r])
335 continue;
336
337 /* Check if any of our neighbors conflict with this register choice. */
338 for (i = 0; i < g->nodes[n].adjacency_count; i++) {
339 unsigned int n2 = g->nodes[n].adjacency_list[i];
340
341 if (!g->nodes[n2].in_stack &&
342 g->regs->regs[r].conflicts[g->nodes[n2].reg]) {
343 break;
344 }
345 }
346 if (i == g->nodes[n].adjacency_count)
347 break;
348 }
349 if (r == g->regs->count)
350 return GL_FALSE;
351
352 g->nodes[n].reg = r;
353 g->nodes[n].in_stack = GL_FALSE;
354 g->stack_count--;
355 }
356
357 return GL_TRUE;
358 }
359
360 /**
361 * Optimistic register coloring: Just push the remaining nodes
362 * on the stack. They'll be colored first in ra_select(), and
363 * if they succeed then the locally-colorable nodes are still
364 * locally-colorable and the rest of the register allocation
365 * will succeed.
366 */
367 void
368 ra_optimistic_color(struct ra_graph *g)
369 {
370 unsigned int i;
371
372 for (i = 0; i < g->count; i++) {
373 if (g->nodes[i].in_stack)
374 continue;
375
376 g->stack[g->stack_count] = i;
377 g->stack_count++;
378 g->nodes[i].in_stack = GL_TRUE;
379 }
380 }
381
382 GLboolean
383 ra_allocate_no_spills(struct ra_graph *g)
384 {
385 if (!ra_simplify(g)) {
386 ra_optimistic_color(g);
387 }
388 return ra_select(g);
389 }
390
391 unsigned int
392 ra_get_node_reg(struct ra_graph *g, unsigned int n)
393 {
394 return g->nodes[n].reg;
395 }
396
397 static float
398 ra_get_spill_benefit(struct ra_graph *g, unsigned int n)
399 {
400 int j;
401 float benefit = 0;
402 int n_class = g->nodes[n].class;
403
404 /* Define the benefit of eliminating an interference between n, n2
405 * through spilling as q(C, B) / p(C). This is similar to the
406 * "count number of edges" approach of traditional graph coloring,
407 * but takes classes into account.
408 */
409 for (j = 0; j < g->nodes[n].adjacency_count; j++) {
410 unsigned int n2 = g->nodes[n].adjacency_list[j];
411 if (n != n2) {
412 unsigned int n2_class = g->nodes[n2].class;
413 benefit += ((float)g->regs->classes[n_class]->q[n2_class] /
414 g->regs->classes[n_class]->p);
415 }
416 }
417
418 return benefit;
419 }
420
421 /**
422 * Returns a node number to be spilled according to the cost/benefit using
423 * the pq test, or -1 if there are no spillable nodes.
424 */
425 int
426 ra_get_best_spill_node(struct ra_graph *g)
427 {
428 unsigned int best_node = -1;
429 unsigned int best_benefit = 0.0;
430 unsigned int n;
431
432 for (n = 0; n < g->count; n++) {
433 float cost = g->nodes[n].spill_cost;
434 float benefit;
435
436 if (cost <= 0.0)
437 continue;
438
439 benefit = ra_get_spill_benefit(g, n);
440
441 if (benefit / cost > best_benefit) {
442 best_benefit = benefit / cost;
443 best_node = n;
444 }
445 }
446
447 return best_node;
448 }
449
450 /**
451 * Only nodes with a spill cost set (cost != 0.0) will be considered
452 * for register spilling.
453 */
454 void
455 ra_set_node_spill_cost(struct ra_graph *g, unsigned int n, float cost)
456 {
457 g->nodes[n].spill_cost = cost;
458 }