0daeb5c526d87ae838d8136afb76c97cbee144af
[mesa.git] / src / gallium / drivers / lima / ir / pp / regalloc.c
1 /*
2 * Copyright (c) 2017 Lima Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include "util/ralloc.h"
26 #include "util/register_allocate.h"
27 #include "util/u_debug.h"
28
29 #include "ppir.h"
30 #include "lima_context.h"
31
32 #define PPIR_FULL_REG_NUM 6
33
34 #define PPIR_VEC1_REG_NUM (PPIR_FULL_REG_NUM * 4) /* x, y, z, w */
35 #define PPIR_VEC2_REG_NUM (PPIR_FULL_REG_NUM * 3) /* xy, yz, zw */
36 #define PPIR_VEC3_REG_NUM (PPIR_FULL_REG_NUM * 2) /* xyz, yzw */
37 #define PPIR_VEC4_REG_NUM PPIR_FULL_REG_NUM /* xyzw */
38 #define PPIR_HEAD_VEC1_REG_NUM PPIR_FULL_REG_NUM /* x */
39 #define PPIR_HEAD_VEC2_REG_NUM PPIR_FULL_REG_NUM /* xy */
40 #define PPIR_HEAD_VEC3_REG_NUM PPIR_FULL_REG_NUM /* xyz */
41 #define PPIR_HEAD_VEC4_REG_NUM PPIR_FULL_REG_NUM /* xyzw */
42
43 #define PPIR_VEC1_REG_BASE 0
44 #define PPIR_VEC2_REG_BASE (PPIR_VEC1_REG_BASE + PPIR_VEC1_REG_NUM)
45 #define PPIR_VEC3_REG_BASE (PPIR_VEC2_REG_BASE + PPIR_VEC2_REG_NUM)
46 #define PPIR_VEC4_REG_BASE (PPIR_VEC3_REG_BASE + PPIR_VEC3_REG_NUM)
47 #define PPIR_HEAD_VEC1_REG_BASE (PPIR_VEC4_REG_BASE + PPIR_VEC4_REG_NUM)
48 #define PPIR_HEAD_VEC2_REG_BASE (PPIR_HEAD_VEC1_REG_BASE + PPIR_HEAD_VEC1_REG_NUM)
49 #define PPIR_HEAD_VEC3_REG_BASE (PPIR_HEAD_VEC2_REG_BASE + PPIR_HEAD_VEC2_REG_NUM)
50 #define PPIR_HEAD_VEC4_REG_BASE (PPIR_HEAD_VEC3_REG_BASE + PPIR_HEAD_VEC3_REG_NUM)
51 #define PPIR_REG_COUNT (PPIR_HEAD_VEC4_REG_BASE + PPIR_HEAD_VEC4_REG_NUM)
52
53 enum ppir_ra_reg_class {
54 ppir_ra_reg_class_vec1,
55 ppir_ra_reg_class_vec2,
56 ppir_ra_reg_class_vec3,
57 ppir_ra_reg_class_vec4,
58
59 /* 4 reg class for load/store instr regs:
60 * load/store instr has no swizzle field, so the (virtual) register
61 * must be allocated at the beginning of a (physical) register,
62 */
63 ppir_ra_reg_class_head_vec1,
64 ppir_ra_reg_class_head_vec2,
65 ppir_ra_reg_class_head_vec3,
66 ppir_ra_reg_class_head_vec4,
67
68 ppir_ra_reg_class_num,
69 };
70
71 static const int ppir_ra_reg_base[ppir_ra_reg_class_num + 1] = {
72 [ppir_ra_reg_class_vec1] = PPIR_VEC1_REG_BASE,
73 [ppir_ra_reg_class_vec2] = PPIR_VEC2_REG_BASE,
74 [ppir_ra_reg_class_vec3] = PPIR_VEC3_REG_BASE,
75 [ppir_ra_reg_class_vec4] = PPIR_VEC4_REG_BASE,
76 [ppir_ra_reg_class_head_vec1] = PPIR_HEAD_VEC1_REG_BASE,
77 [ppir_ra_reg_class_head_vec2] = PPIR_HEAD_VEC2_REG_BASE,
78 [ppir_ra_reg_class_head_vec3] = PPIR_HEAD_VEC3_REG_BASE,
79 [ppir_ra_reg_class_head_vec4] = PPIR_HEAD_VEC4_REG_BASE,
80 [ppir_ra_reg_class_num] = PPIR_REG_COUNT,
81 };
82
83 static unsigned int *
84 ppir_ra_reg_q_values[ppir_ra_reg_class_num] = {
85 (unsigned int []) {1, 2, 3, 4, 1, 2, 3, 4},
86 (unsigned int []) {2, 3, 3, 3, 1, 2, 3, 3},
87 (unsigned int []) {2, 2, 2, 2, 1, 2, 2, 2},
88 (unsigned int []) {1, 1, 1, 1, 1, 1, 1, 1},
89 (unsigned int []) {1, 1, 1, 1, 1, 1, 1, 1},
90 (unsigned int []) {1, 1, 1, 1, 1, 1, 1, 1},
91 (unsigned int []) {1, 1, 1, 1, 1, 1, 1, 1},
92 (unsigned int []) {1, 1, 1, 1, 1, 1, 1, 1},
93 };
94
95 struct ra_regs *ppir_regalloc_init(void *mem_ctx)
96 {
97 struct ra_regs *ret = ra_alloc_reg_set(mem_ctx, PPIR_REG_COUNT, false);
98 if (!ret)
99 return NULL;
100
101 /* (x, y, z, w) (xy, yz, zw) (xyz, yzw) (xyzw) (x) (xy) (xyz) (xyzw) */
102 static const int class_reg_num[ppir_ra_reg_class_num] = {
103 4, 3, 2, 1, 1, 1, 1, 1,
104 };
105 /* base reg (x, y, z, w) confliction with other regs */
106 for (int h = 0; h < 4; h++) {
107 int base_reg_mask = 1 << h;
108 for (int i = 1; i < ppir_ra_reg_class_num; i++) {
109 int class_reg_base_mask = (1 << ((i % 4) + 1)) - 1;
110 for (int j = 0; j < class_reg_num[i]; j++) {
111 if (base_reg_mask & (class_reg_base_mask << j)) {
112 for (int k = 0; k < PPIR_FULL_REG_NUM; k++) {
113 ra_add_reg_conflict(ret, k * 4 + h,
114 ppir_ra_reg_base[i] + k * class_reg_num[i] + j);
115 }
116 }
117 }
118 }
119 }
120 /* build all other confliction by the base reg confliction */
121 for (int i = 0; i < PPIR_VEC1_REG_NUM; i++)
122 ra_make_reg_conflicts_transitive(ret, i);
123
124 for (int i = 0; i < ppir_ra_reg_class_num; i++)
125 ra_alloc_reg_class(ret);
126
127 int reg_index = 0;
128 for (int i = 0; i < ppir_ra_reg_class_num; i++) {
129 while (reg_index < ppir_ra_reg_base[i + 1])
130 ra_class_add_reg(ret, i, reg_index++);
131 }
132
133 ra_set_finalize(ret, ppir_ra_reg_q_values);
134 return ret;
135 }
136
137 static void ppir_regalloc_update_reglist_ssa(ppir_compiler *comp)
138 {
139 list_for_each_entry(ppir_block, block, &comp->block_list, list) {
140 list_for_each_entry(ppir_node, node, &block->node_list, list) {
141 if (node->op == ppir_op_store_color)
142 continue;
143
144 if (!node->instr || node->op == ppir_op_const)
145 continue;
146
147 ppir_dest *dest = ppir_node_get_dest(node);
148 if (dest) {
149 ppir_reg *reg = NULL;
150
151 if (dest->type == ppir_target_ssa) {
152 reg = &dest->ssa;
153 list_addtail(&reg->list, &comp->reg_list);
154 }
155 }
156 }
157 }
158 }
159
160 static int get_phy_reg_index(int reg)
161 {
162 int i;
163
164 for (i = 0; i < ppir_ra_reg_class_num; i++) {
165 if (reg < ppir_ra_reg_base[i + 1]) {
166 reg -= ppir_ra_reg_base[i];
167 break;
168 }
169 }
170
171 if (i < ppir_ra_reg_class_head_vec1)
172 return reg / (4 - i) * 4 + reg % (4 - i);
173 else
174 return reg * 4;
175 }
176
177 static void ppir_regalloc_print_result(ppir_compiler *comp)
178 {
179 printf("======ppir regalloc result======\n");
180 list_for_each_entry(ppir_block, block, &comp->block_list, list) {
181 list_for_each_entry(ppir_instr, instr, &block->instr_list, list) {
182 printf("%03d:", instr->index);
183 for (int i = 0; i < PPIR_INSTR_SLOT_NUM; i++) {
184 ppir_node *node = instr->slots[i];
185 if (!node)
186 continue;
187
188 printf(" (%d|", node->index);
189
190 ppir_dest *dest = ppir_node_get_dest(node);
191 if (dest)
192 printf("%d", ppir_target_get_dest_reg_index(dest));
193
194 printf("|");
195
196 for (int i = 0; i < ppir_node_get_src_num(node); i++) {
197 if (i)
198 printf(" ");
199 printf("%d", ppir_target_get_src_reg_index(ppir_node_get_src(node, i)));
200 }
201
202 printf(")");
203 }
204 printf("\n");
205 }
206 }
207 printf("--------------------------\n");
208 }
209
210 static bool create_new_instr_after(ppir_block *block, ppir_instr *ref,
211 ppir_node *node)
212 {
213 ppir_instr *newinstr = ppir_instr_create(block);
214 if (unlikely(!newinstr))
215 return false;
216
217 list_del(&newinstr->list);
218 list_add(&newinstr->list, &ref->list);
219
220 if (!ppir_instr_insert_node(newinstr, node))
221 return false;
222
223 list_for_each_entry_from(ppir_instr, instr, ref, &block->instr_list, list) {
224 instr->seq++;
225 }
226 newinstr->seq = ref->seq+1;
227 newinstr->scheduled = true;
228 return true;
229 }
230
231 static bool create_new_instr_before(ppir_block *block, ppir_instr *ref,
232 ppir_node *node)
233 {
234 ppir_instr *newinstr = ppir_instr_create(block);
235 if (unlikely(!newinstr))
236 return false;
237
238 list_del(&newinstr->list);
239 list_addtail(&newinstr->list, &ref->list);
240
241 if (!ppir_instr_insert_node(newinstr, node))
242 return false;
243
244 list_for_each_entry_from(ppir_instr, instr, ref, &block->instr_list, list) {
245 instr->seq++;
246 }
247 newinstr->seq = ref->seq-1;
248 newinstr->scheduled = true;
249 return true;
250 }
251
252 static bool ppir_update_spilled_src(ppir_compiler *comp, ppir_block *block,
253 ppir_node *node, ppir_src *src,
254 ppir_node **fill_node)
255 {
256 /* nodes might have multiple references to the same value.
257 * avoid creating unnecessary loads for the same fill by
258 * saving the node resulting from the temporary load */
259 if (*fill_node)
260 goto update_src;
261
262 int num_components = src->reg->num_components;
263
264 /* alloc new node to load value */
265 ppir_node *load_node = ppir_node_create(block, ppir_op_load_temp, -1, 0);
266 if (!load_node)
267 return false;
268 list_addtail(&load_node->list, &node->list);
269 comp->num_fills++;
270
271 ppir_load_node *load = ppir_node_to_load(load_node);
272
273 load->index = -comp->prog->stack_size; /* index sizes are negative */
274 load->num_components = num_components;
275
276 ppir_dest *ld_dest = &load->dest;
277 ld_dest->type = ppir_target_pipeline;
278 ld_dest->pipeline = ppir_pipeline_reg_uniform;
279 ld_dest->write_mask = u_bit_consecutive(0, num_components);
280
281 /* If the uniform slot is empty, we can insert the load_temp
282 * there and use it directly. Exceptionally, if the node is in the
283 * varying or texld slot, this doesn't work. */
284 if (!node->instr->slots[PPIR_INSTR_SLOT_UNIFORM] &&
285 node->instr_pos != PPIR_INSTR_SLOT_VARYING &&
286 node->instr_pos != PPIR_INSTR_SLOT_TEXLD) {
287 ppir_node_target_assign(src, load_node);
288 *fill_node = load_node;
289 return ppir_instr_insert_node(node->instr, load_node);
290 }
291
292 /* Uniform slot was taken, so fall back to a new instruction with a mov */
293 if (!create_new_instr_before(block, node->instr, load_node))
294 return false;
295
296 /* Create move node */
297 ppir_node *move_node = ppir_node_create(block, ppir_op_mov, -1 , 0);
298 if (unlikely(!move_node))
299 return false;
300 list_addtail(&move_node->list, &node->list);
301
302 ppir_alu_node *move_alu = ppir_node_to_alu(move_node);
303
304 move_alu->num_src = 1;
305 move_alu->src->type = ppir_target_pipeline;
306 move_alu->src->pipeline = ppir_pipeline_reg_uniform;
307 for (int i = 0; i < 4; i++)
308 move_alu->src->swizzle[i] = i;
309
310 ppir_dest *alu_dest = &move_alu->dest;
311 alu_dest->type = ppir_target_ssa;
312 alu_dest->ssa.num_components = num_components;
313 alu_dest->ssa.live_in = INT_MAX;
314 alu_dest->ssa.live_out = 0;
315 alu_dest->ssa.spilled = true;
316 alu_dest->write_mask = u_bit_consecutive(0, num_components);
317
318 list_addtail(&alu_dest->ssa.list, &comp->reg_list);
319
320 if (!ppir_instr_insert_node(load_node->instr, move_node))
321 return false;
322
323 /* insert the new node as predecessor */
324 ppir_node_foreach_pred_safe(node, dep) {
325 ppir_node *pred = dep->pred;
326 ppir_node_remove_dep(dep);
327 ppir_node_add_dep(load_node, pred, ppir_dep_src);
328 }
329 ppir_node_add_dep(node, move_node, ppir_dep_src);
330 ppir_node_add_dep(move_node, load_node, ppir_dep_src);
331
332 *fill_node = move_node;
333
334 update_src:
335 /* switch node src to use the fill node dest */
336 ppir_node_target_assign(src, *fill_node);
337
338 return true;
339 }
340
341 static bool ppir_update_spilled_dest_load(ppir_compiler *comp, ppir_block *block,
342 ppir_node *node)
343 {
344 ppir_dest *dest = ppir_node_get_dest(node);
345 assert(dest != NULL);
346 assert(dest->type == ppir_target_register);
347 ppir_reg *reg = dest->reg;
348 int num_components = reg->num_components;
349
350 /* alloc new node to load value */
351 ppir_node *load_node = ppir_node_create(block, ppir_op_load_temp, -1, 0);
352 if (!load_node)
353 return NULL;
354 list_addtail(&load_node->list, &node->list);
355 comp->num_fills++;
356
357 ppir_load_node *load = ppir_node_to_load(load_node);
358
359 load->index = -comp->prog->stack_size; /* index sizes are negative */
360 load->num_components = num_components;
361
362 load->dest.type = ppir_target_pipeline;
363 load->dest.pipeline = ppir_pipeline_reg_uniform;
364 load->dest.write_mask = u_bit_consecutive(0, num_components);
365
366 /* New instruction is needed since we're updating a dest register
367 * and we can't write to the uniform pipeline reg */
368 if (!create_new_instr_before(block, node->instr, load_node))
369 return false;
370
371 /* Create move node */
372 ppir_node *move_node = ppir_node_create(block, ppir_op_mov, -1 , 0);
373 if (unlikely(!move_node))
374 return false;
375 list_addtail(&move_node->list, &node->list);
376
377 ppir_alu_node *move_alu = ppir_node_to_alu(move_node);
378
379 move_alu->num_src = 1;
380 move_alu->src->type = ppir_target_pipeline;
381 move_alu->src->pipeline = ppir_pipeline_reg_uniform;
382 for (int i = 0; i < 4; i++)
383 move_alu->src->swizzle[i] = i;
384
385 move_alu->dest.type = ppir_target_register;
386 move_alu->dest.reg = reg;
387 move_alu->dest.write_mask = u_bit_consecutive(0, num_components);
388
389 if (!ppir_instr_insert_node(load_node->instr, move_node))
390 return false;
391
392 ppir_node_foreach_pred_safe(node, dep) {
393 ppir_node *pred = dep->pred;
394 ppir_node_remove_dep(dep);
395 ppir_node_add_dep(load_node, pred, ppir_dep_src);
396 }
397 ppir_node_add_dep(node, move_node, ppir_dep_src);
398 ppir_node_add_dep(move_node, load_node, ppir_dep_src);
399
400 return true;
401 }
402
403 static bool ppir_update_spilled_dest(ppir_compiler *comp, ppir_block *block,
404 ppir_node *node)
405 {
406 ppir_dest *dest = ppir_node_get_dest(node);
407 assert(dest != NULL);
408 ppir_reg *reg = ppir_dest_get_reg(dest);
409
410 /* alloc new node to store value */
411 ppir_node *store_node = ppir_node_create(block, ppir_op_store_temp, -1, 0);
412 if (!store_node)
413 return false;
414 list_addtail(&store_node->list, &node->list);
415 comp->num_spills++;
416
417 ppir_store_node *store = ppir_node_to_store(store_node);
418
419 store->index = -comp->prog->stack_size; /* index sizes are negative */
420 store->num_components = reg->num_components;
421
422 store->src.type = dest->type;
423 store->src.reg = reg;
424
425 /* insert the new node as successor */
426 ppir_node_foreach_succ_safe(node, dep) {
427 ppir_node *succ = dep->succ;
428 ppir_node_remove_dep(dep);
429 ppir_node_add_dep(succ, store_node, ppir_dep_src);
430 }
431 ppir_node_add_dep(store_node, node, ppir_dep_src);
432
433 /* If the store temp slot is empty, we can insert the store_temp
434 * there and use it directly. Exceptionally, if the node is in the
435 * combine slot, this doesn't work. */
436 if (!node->instr->slots[PPIR_INSTR_SLOT_STORE_TEMP] &&
437 node->instr_pos != PPIR_INSTR_SLOT_ALU_COMBINE)
438 return ppir_instr_insert_node(node->instr, store_node);
439
440 /* Not possible to merge store, so fall back to a new instruction */
441 return create_new_instr_after(block, node->instr, store_node);
442 }
443
444 static bool ppir_regalloc_spill_reg(ppir_compiler *comp, ppir_reg *chosen)
445 {
446 list_for_each_entry(ppir_block, block, &comp->block_list, list) {
447 list_for_each_entry(ppir_node, node, &block->node_list, list) {
448
449 ppir_dest *dest = ppir_node_get_dest(node);
450 if (dest && ppir_dest_get_reg(dest) == chosen) {
451 /* If dest is a register, it might be updating only some its
452 * components, so need to load the existing value first */
453 if (dest->type == ppir_target_register) {
454 if (!ppir_update_spilled_dest_load(comp, block, node))
455 return false;
456 }
457 if (!ppir_update_spilled_dest(comp, block, node))
458 return false;
459 }
460
461 ppir_node *fill_node = NULL;
462 /* nodes might have multiple references to the same value.
463 * avoid creating unnecessary loads for the same fill by
464 * saving the node resulting from the temporary load */
465 for (int i = 0; i < ppir_node_get_src_num(node); i++) {
466 ppir_src *src = ppir_node_get_src(node, i);
467 ppir_reg *reg = ppir_src_get_reg(src);
468 if (reg == chosen) {
469 if (!ppir_update_spilled_src(comp, block, node, src, &fill_node))
470 return false;
471 }
472 }
473 }
474 }
475
476 return true;
477 }
478
479 static ppir_reg *ppir_regalloc_choose_spill_node(ppir_compiler *comp,
480 struct ra_graph *g)
481 {
482 float spill_costs[list_length(&comp->reg_list)];
483 /* experimentally determined, it seems to be worth scaling cost of
484 * regs in instructions that have used uniform/store_temp slots,
485 * but not too much as to offset the num_components base cost. */
486 const float slot_scale = 1.1f;
487
488 list_for_each_entry(ppir_reg, reg, &comp->reg_list, list) {
489 if (reg->spilled || reg->live_out == INT_MAX) {
490 /* not considered for spilling */
491 spill_costs[reg->regalloc_index] = 0.0f;
492 continue;
493 }
494
495 /* It is beneficial to spill registers with higher component number,
496 * so increase the cost of spilling registers with few components */
497 float spill_cost = 4.0f / (float)reg->num_components;
498 spill_costs[reg->regalloc_index] = spill_cost;
499 }
500
501 list_for_each_entry(ppir_block, block, &comp->block_list, list) {
502 list_for_each_entry(ppir_instr, instr, &block->instr_list, list) {
503 if (instr->slots[PPIR_INSTR_SLOT_UNIFORM]) {
504 for (int i = 0; i < PPIR_INSTR_SLOT_NUM; i++) {
505 ppir_node *node = instr->slots[i];
506 if (!node)
507 continue;
508 for (int j = 0; j < ppir_node_get_src_num(node); j++) {
509 ppir_src *src = ppir_node_get_src(node, j);
510 if (!src)
511 continue;
512 ppir_reg *reg = ppir_src_get_reg(src);
513 if (!reg)
514 continue;
515
516 spill_costs[reg->regalloc_index] *= slot_scale;
517 }
518 }
519 }
520 if (instr->slots[PPIR_INSTR_SLOT_STORE_TEMP]) {
521 for (int i = 0; i < PPIR_INSTR_SLOT_NUM; i++) {
522 ppir_node *node = instr->slots[i];
523 if (!node)
524 continue;
525 ppir_dest *dest = ppir_node_get_dest(node);
526 if (!dest)
527 continue;
528 ppir_reg *reg = ppir_dest_get_reg(dest);
529 if (!reg)
530 continue;
531
532 spill_costs[reg->regalloc_index] *= slot_scale;
533 }
534 }
535 }
536 }
537
538 for (int i = 0; i < list_length(&comp->reg_list); i++)
539 ra_set_node_spill_cost(g, i, spill_costs[i]);
540
541 int r = ra_get_best_spill_node(g);
542 if (r == -1)
543 return NULL;
544
545 ppir_reg *chosen = NULL;
546 int i = 0;
547 list_for_each_entry(ppir_reg, reg, &comp->reg_list, list) {
548 if (i++ == r) {
549 chosen = reg;
550 break;
551 }
552 }
553 assert(chosen);
554 chosen->spilled = true;
555 chosen->is_head = true; /* store_temp unable to do swizzle */
556
557 return chosen;
558 }
559
560 static void ppir_regalloc_reset_liveness_info(ppir_compiler *comp)
561 {
562 int bitset_words = BITSET_WORDS(list_length(&comp->reg_list));
563 int idx = 0;
564
565 list_for_each_entry(ppir_reg, reg, &comp->reg_list, list) {
566 reg->live_in = INT_MAX;
567 reg->live_out = 0;
568 reg->regalloc_index = idx++;
569 }
570
571 list_for_each_entry(ppir_block, block, &comp->block_list, list) {
572 if (block->def)
573 ralloc_free(block->def);
574 block->def = rzalloc_array(comp, BITSET_WORD, bitset_words);
575
576 if (block->use)
577 ralloc_free(block->use);
578 block->use = rzalloc_array(comp, BITSET_WORD, bitset_words);
579
580 if (block->live_in)
581 ralloc_free(block->live_in);
582 block->live_in = rzalloc_array(comp, BITSET_WORD, bitset_words);
583
584 if (block->live_out)
585 ralloc_free(block->live_out);
586 block->live_out = rzalloc_array(comp, BITSET_WORD, bitset_words);
587 }
588 }
589
590 int lima_ppir_force_spilling = 0;
591
592 static bool ppir_regalloc_prog_try(ppir_compiler *comp, bool *spilled)
593 {
594 ppir_regalloc_reset_liveness_info(comp);
595
596 ppir_liveness_analysis(comp);
597
598 struct ra_graph *g = ra_alloc_interference_graph(
599 comp->ra, list_length(&comp->reg_list));
600
601 int n = 0;
602 list_for_each_entry(ppir_reg, reg, &comp->reg_list, list) {
603 int c = ppir_ra_reg_class_vec1 + (reg->num_components - 1);
604 if (reg->is_head)
605 c += 4;
606 ra_set_node_class(g, n++, c);
607 }
608
609 int n1 = 0;
610 list_for_each_entry(ppir_reg, reg1, &comp->reg_list, list) {
611 int n2 = n1 + 1;
612 list_for_each_entry_from(ppir_reg, reg2, reg1->list.next,
613 &comp->reg_list, list) {
614 bool interference = false;
615
616 if (reg1->undef || reg2->undef)
617 interference = false;
618 else if (reg1->live_in < reg2->live_in) {
619 if (reg1->live_out > reg2->live_in)
620 interference = true;
621 }
622 else if (reg1->live_in > reg2->live_in) {
623 if (reg2->live_out > reg1->live_in)
624 interference = true;
625 }
626 else
627 interference = true;
628
629 if (interference)
630 ra_add_node_interference(g, n1, n2);
631
632 n2++;
633 }
634 n1++;
635 }
636
637 *spilled = false;
638 bool ok = ra_allocate(g);
639 if (!ok || (comp->force_spilling-- > 0)) {
640 ppir_reg *chosen = ppir_regalloc_choose_spill_node(comp, g);
641 if (chosen) {
642 /* stack_size will be used to assemble the frame reg in lima_draw.
643 * It is also be used in the spilling code, as negative indices
644 * starting from -1, to create stack addresses. */
645 comp->prog->stack_size++;
646 if (!ppir_regalloc_spill_reg(comp, chosen))
647 goto err_out;
648 /* Ask the outer loop to call back in. */
649 *spilled = true;
650
651 ppir_debug("spilled register %d/%d, num_components: %d\n",
652 chosen->regalloc_index, list_length(&comp->reg_list),
653 chosen->num_components);
654 goto err_out;
655 }
656
657 ppir_error("regalloc fail\n");
658 goto err_out;
659 }
660
661 n = 0;
662 list_for_each_entry(ppir_reg, reg, &comp->reg_list, list) {
663 int reg_index = ra_get_node_reg(g, n++);
664 reg->index = get_phy_reg_index(reg_index);
665 }
666
667 ralloc_free(g);
668
669 if (lima_debug & LIMA_DEBUG_PP)
670 ppir_regalloc_print_result(comp);
671
672 return true;
673
674 err_out:
675 ralloc_free(g);
676 return false;
677 }
678
679 bool ppir_regalloc_prog(ppir_compiler *comp)
680 {
681 bool spilled = false;
682 comp->prog->stack_size = 0;
683
684 /* Set from an environment variable to force spilling
685 * for debugging purposes, see lima_screen.c */
686 comp->force_spilling = lima_ppir_force_spilling;
687
688 ppir_regalloc_update_reglist_ssa(comp);
689
690 /* No registers? Probably shader consists of discard instruction */
691 if (list_empty(&comp->reg_list))
692 return true;
693
694 /* this will most likely succeed in the first
695 * try, except for very complicated shaders */
696 while (!ppir_regalloc_prog_try(comp, &spilled))
697 if (!spilled)
698 return false;
699
700 return true;
701 }