nir: Add a couple quick-and-dirty out-of-SSA helpers
[mesa.git] / src / compiler / nir / nir_from_ssa.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_vla.h"
30
31 /*
32 * This file implements an out-of-SSA pass as described in "Revisiting
33 * Out-of-SSA Translation for Correctness, Code Quality, and Efficiency" by
34 * Boissinot et. al.
35 */
36
37 struct from_ssa_state {
38 void *mem_ctx;
39 void *dead_ctx;
40 bool phi_webs_only;
41 struct hash_table *merge_node_table;
42 nir_instr *instr;
43 nir_function_impl *impl;
44 };
45
46 /* Returns true if a dominates b */
47 static bool
48 ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
49 {
50 if (a->live_index == 0) {
51 /* SSA undefs always dominate */
52 return true;
53 } else if (b->live_index < a->live_index) {
54 return false;
55 } else if (a->parent_instr->block == b->parent_instr->block) {
56 return a->live_index <= b->live_index;
57 } else {
58 return nir_block_dominates(a->parent_instr->block,
59 b->parent_instr->block);
60 }
61 }
62
63
64 /* The following data structure, which I have named merge_set is a way of
65 * representing a set registers of non-interfering registers. This is
66 * based on the concept of a "dominence forest" presented in "Fast Copy
67 * Coalescing and Live-Range Identification" by Budimlic et. al. but the
68 * implementation concept is taken from "Revisiting Out-of-SSA Translation
69 * for Correctness, Code Quality, and Efficiency" by Boissinot et. al..
70 *
71 * Each SSA definition is associated with a merge_node and the association
72 * is represented by a combination of a hash table and the "def" parameter
73 * in the merge_node structure. The merge_set stores a linked list of
74 * merge_node's in dominence order of the ssa definitions. (Since the
75 * liveness analysis pass indexes the SSA values in dominence order for us,
76 * this is an easy thing to keep up.) It is assumed that no pair of the
77 * nodes in a given set interfere. Merging two sets or checking for
78 * interference can be done in a single linear-time merge-sort walk of the
79 * two lists of nodes.
80 */
81 struct merge_set;
82
83 typedef struct {
84 struct exec_node node;
85 struct merge_set *set;
86 nir_ssa_def *def;
87 } merge_node;
88
89 typedef struct merge_set {
90 struct exec_list nodes;
91 unsigned size;
92 nir_register *reg;
93 } merge_set;
94
95 #if 0
96 static void
97 merge_set_dump(merge_set *set, FILE *fp)
98 {
99 nir_ssa_def *dom[set->size];
100 int dom_idx = -1;
101
102 foreach_list_typed(merge_node, node, node, &set->nodes) {
103 while (dom_idx >= 0 && !ssa_def_dominates(dom[dom_idx], node->def))
104 dom_idx--;
105
106 for (int i = 0; i <= dom_idx; i++)
107 fprintf(fp, " ");
108
109 if (node->def->name)
110 fprintf(fp, "ssa_%d /* %s */\n", node->def->index, node->def->name);
111 else
112 fprintf(fp, "ssa_%d\n", node->def->index);
113
114 dom[++dom_idx] = node->def;
115 }
116 }
117 #endif
118
119 static merge_node *
120 get_merge_node(nir_ssa_def *def, struct from_ssa_state *state)
121 {
122 struct hash_entry *entry =
123 _mesa_hash_table_search(state->merge_node_table, def);
124 if (entry)
125 return entry->data;
126
127 merge_set *set = ralloc(state->dead_ctx, merge_set);
128 exec_list_make_empty(&set->nodes);
129 set->size = 1;
130 set->reg = NULL;
131
132 merge_node *node = ralloc(state->dead_ctx, merge_node);
133 node->set = set;
134 node->def = def;
135 exec_list_push_head(&set->nodes, &node->node);
136
137 _mesa_hash_table_insert(state->merge_node_table, def, node);
138
139 return node;
140 }
141
142 static bool
143 merge_nodes_interfere(merge_node *a, merge_node *b)
144 {
145 return nir_ssa_defs_interfere(a->def, b->def);
146 }
147
148 /* Merges b into a */
149 static merge_set *
150 merge_merge_sets(merge_set *a, merge_set *b)
151 {
152 struct exec_node *an = exec_list_get_head(&a->nodes);
153 struct exec_node *bn = exec_list_get_head(&b->nodes);
154 while (!exec_node_is_tail_sentinel(bn)) {
155 merge_node *a_node = exec_node_data(merge_node, an, node);
156 merge_node *b_node = exec_node_data(merge_node, bn, node);
157
158 if (exec_node_is_tail_sentinel(an) ||
159 a_node->def->live_index > b_node->def->live_index) {
160 struct exec_node *next = bn->next;
161 exec_node_remove(bn);
162 exec_node_insert_node_before(an, bn);
163 exec_node_data(merge_node, bn, node)->set = a;
164 bn = next;
165 } else {
166 an = an->next;
167 }
168 }
169
170 a->size += b->size;
171 b->size = 0;
172
173 return a;
174 }
175
176 /* Checks for any interference between two merge sets
177 *
178 * This is an implementation of Algorithm 2 in "Revisiting Out-of-SSA
179 * Translation for Correctness, Code Quality, and Efficiency" by
180 * Boissinot et. al.
181 */
182 static bool
183 merge_sets_interfere(merge_set *a, merge_set *b)
184 {
185 NIR_VLA(merge_node *, dom, a->size + b->size);
186 int dom_idx = -1;
187
188 struct exec_node *an = exec_list_get_head(&a->nodes);
189 struct exec_node *bn = exec_list_get_head(&b->nodes);
190 while (!exec_node_is_tail_sentinel(an) ||
191 !exec_node_is_tail_sentinel(bn)) {
192
193 merge_node *current;
194 if (exec_node_is_tail_sentinel(an)) {
195 current = exec_node_data(merge_node, bn, node);
196 bn = bn->next;
197 } else if (exec_node_is_tail_sentinel(bn)) {
198 current = exec_node_data(merge_node, an, node);
199 an = an->next;
200 } else {
201 merge_node *a_node = exec_node_data(merge_node, an, node);
202 merge_node *b_node = exec_node_data(merge_node, bn, node);
203
204 if (a_node->def->live_index <= b_node->def->live_index) {
205 current = a_node;
206 an = an->next;
207 } else {
208 current = b_node;
209 bn = bn->next;
210 }
211 }
212
213 while (dom_idx >= 0 &&
214 !ssa_def_dominates(dom[dom_idx]->def, current->def))
215 dom_idx--;
216
217 if (dom_idx >= 0 && merge_nodes_interfere(current, dom[dom_idx]))
218 return true;
219
220 dom[++dom_idx] = current;
221 }
222
223 return false;
224 }
225
226 static bool
227 add_parallel_copy_to_end_of_block(nir_block *block, void *dead_ctx)
228 {
229
230 bool need_end_copy = false;
231 if (block->successors[0]) {
232 nir_instr *instr = nir_block_first_instr(block->successors[0]);
233 if (instr && instr->type == nir_instr_type_phi)
234 need_end_copy = true;
235 }
236
237 if (block->successors[1]) {
238 nir_instr *instr = nir_block_first_instr(block->successors[1]);
239 if (instr && instr->type == nir_instr_type_phi)
240 need_end_copy = true;
241 }
242
243 if (need_end_copy) {
244 /* If one of our successors has at least one phi node, we need to
245 * create a parallel copy at the end of the block but before the jump
246 * (if there is one).
247 */
248 nir_parallel_copy_instr *pcopy =
249 nir_parallel_copy_instr_create(dead_ctx);
250
251 nir_instr_insert(nir_after_block_before_jump(block), &pcopy->instr);
252 }
253
254 return true;
255 }
256
257 static nir_parallel_copy_instr *
258 get_parallel_copy_at_end_of_block(nir_block *block)
259 {
260 nir_instr *last_instr = nir_block_last_instr(block);
261 if (last_instr == NULL)
262 return NULL;
263
264 /* The last instruction may be a jump in which case the parallel copy is
265 * right before it.
266 */
267 if (last_instr->type == nir_instr_type_jump)
268 last_instr = nir_instr_prev(last_instr);
269
270 if (last_instr && last_instr->type == nir_instr_type_parallel_copy)
271 return nir_instr_as_parallel_copy(last_instr);
272 else
273 return NULL;
274 }
275
276 /** Isolate phi nodes with parallel copies
277 *
278 * In order to solve the dependency problems with the sources and
279 * destinations of phi nodes, we first isolate them by adding parallel
280 * copies to the beginnings and ends of basic blocks. For every block with
281 * phi nodes, we add a parallel copy immediately following the last phi
282 * node that copies the destinations of all of the phi nodes to new SSA
283 * values. We also add a parallel copy to the end of every block that has
284 * a successor with phi nodes that, for each phi node in each successor,
285 * copies the corresponding sorce of the phi node and adjust the phi to
286 * used the destination of the parallel copy.
287 *
288 * In SSA form, each value has exactly one definition. What this does is
289 * ensure that each value used in a phi also has exactly one use. The
290 * destinations of phis are only used by the parallel copy immediately
291 * following the phi nodes and. Thanks to the parallel copy at the end of
292 * the predecessor block, the sources of phi nodes are are the only use of
293 * that value. This allows us to immediately assign all the sources and
294 * destinations of any given phi node to the same register without worrying
295 * about interference at all. We do coalescing to get rid of the parallel
296 * copies where possible.
297 *
298 * Before this pass can be run, we have to iterate over the blocks with
299 * add_parallel_copy_to_end_of_block to ensure that the parallel copies at
300 * the ends of blocks exist. We can create the ones at the beginnings as
301 * we go, but the ones at the ends of blocks need to be created ahead of
302 * time because of potential back-edges in the CFG.
303 */
304 static bool
305 isolate_phi_nodes_block(nir_block *block, void *dead_ctx)
306 {
307 nir_instr *last_phi_instr = NULL;
308 nir_foreach_instr(instr, block) {
309 /* Phi nodes only ever come at the start of a block */
310 if (instr->type != nir_instr_type_phi)
311 break;
312
313 last_phi_instr = instr;
314 }
315
316 /* If we don't have any phi's, then there's nothing for us to do. */
317 if (last_phi_instr == NULL)
318 return true;
319
320 /* If we have phi nodes, we need to create a parallel copy at the
321 * start of this block but after the phi nodes.
322 */
323 nir_parallel_copy_instr *block_pcopy =
324 nir_parallel_copy_instr_create(dead_ctx);
325 nir_instr_insert_after(last_phi_instr, &block_pcopy->instr);
326
327 nir_foreach_instr(instr, block) {
328 /* Phi nodes only ever come at the start of a block */
329 if (instr->type != nir_instr_type_phi)
330 break;
331
332 nir_phi_instr *phi = nir_instr_as_phi(instr);
333 assert(phi->dest.is_ssa);
334 nir_foreach_phi_src(src, phi) {
335 nir_parallel_copy_instr *pcopy =
336 get_parallel_copy_at_end_of_block(src->pred);
337 assert(pcopy);
338
339 nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
340 nir_parallel_copy_entry);
341 nir_ssa_dest_init(&pcopy->instr, &entry->dest,
342 phi->dest.ssa.num_components,
343 phi->dest.ssa.bit_size, src->src.ssa->name);
344 exec_list_push_tail(&pcopy->entries, &entry->node);
345
346 assert(src->src.is_ssa);
347 nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
348
349 nir_instr_rewrite_src(&phi->instr, &src->src,
350 nir_src_for_ssa(&entry->dest.ssa));
351 }
352
353 nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
354 nir_parallel_copy_entry);
355 nir_ssa_dest_init(&block_pcopy->instr, &entry->dest,
356 phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
357 phi->dest.ssa.name);
358 exec_list_push_tail(&block_pcopy->entries, &entry->node);
359
360 nir_ssa_def_rewrite_uses(&phi->dest.ssa,
361 nir_src_for_ssa(&entry->dest.ssa));
362
363 nir_instr_rewrite_src(&block_pcopy->instr, &entry->src,
364 nir_src_for_ssa(&phi->dest.ssa));
365 }
366
367 return true;
368 }
369
370 static bool
371 coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
372 {
373 nir_foreach_instr(instr, block) {
374 /* Phi nodes only ever come at the start of a block */
375 if (instr->type != nir_instr_type_phi)
376 break;
377
378 nir_phi_instr *phi = nir_instr_as_phi(instr);
379
380 assert(phi->dest.is_ssa);
381 merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
382
383 nir_foreach_phi_src(src, phi) {
384 assert(src->src.is_ssa);
385 merge_node *src_node = get_merge_node(src->src.ssa, state);
386 if (src_node->set != dest_node->set)
387 merge_merge_sets(dest_node->set, src_node->set);
388 }
389 }
390
391 return true;
392 }
393
394 static void
395 aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
396 struct from_ssa_state *state)
397 {
398 nir_foreach_parallel_copy_entry(entry, pcopy) {
399 if (!entry->src.is_ssa)
400 continue;
401
402 /* Since load_const instructions are SSA only, we can't replace their
403 * destinations with registers and, therefore, can't coalesce them.
404 */
405 if (entry->src.ssa->parent_instr->type == nir_instr_type_load_const)
406 continue;
407
408 /* Don't try and coalesce these */
409 if (entry->dest.ssa.num_components != entry->src.ssa->num_components)
410 continue;
411
412 merge_node *src_node = get_merge_node(entry->src.ssa, state);
413 merge_node *dest_node = get_merge_node(&entry->dest.ssa, state);
414
415 if (src_node->set == dest_node->set)
416 continue;
417
418 if (!merge_sets_interfere(src_node->set, dest_node->set))
419 merge_merge_sets(src_node->set, dest_node->set);
420 }
421 }
422
423 static bool
424 aggressive_coalesce_block(nir_block *block, struct from_ssa_state *state)
425 {
426 nir_parallel_copy_instr *start_pcopy = NULL;
427 nir_foreach_instr(instr, block) {
428 /* Phi nodes only ever come at the start of a block */
429 if (instr->type != nir_instr_type_phi) {
430 if (instr->type != nir_instr_type_parallel_copy)
431 break; /* The parallel copy must be right after the phis */
432
433 start_pcopy = nir_instr_as_parallel_copy(instr);
434
435 aggressive_coalesce_parallel_copy(start_pcopy, state);
436
437 break;
438 }
439 }
440
441 nir_parallel_copy_instr *end_pcopy =
442 get_parallel_copy_at_end_of_block(block);
443
444 if (end_pcopy && end_pcopy != start_pcopy)
445 aggressive_coalesce_parallel_copy(end_pcopy, state);
446
447 return true;
448 }
449
450 static nir_register *
451 create_reg_for_ssa_def(nir_ssa_def *def, nir_function_impl *impl)
452 {
453 nir_register *reg = nir_local_reg_create(impl);
454
455 reg->name = def->name;
456 reg->num_components = def->num_components;
457 reg->bit_size = def->bit_size;
458 reg->num_array_elems = 0;
459
460 return reg;
461 }
462
463 static bool
464 rewrite_ssa_def(nir_ssa_def *def, void *void_state)
465 {
466 struct from_ssa_state *state = void_state;
467 nir_register *reg;
468
469 struct hash_entry *entry =
470 _mesa_hash_table_search(state->merge_node_table, def);
471 if (entry) {
472 /* In this case, we're part of a phi web. Use the web's register. */
473 merge_node *node = (merge_node *)entry->data;
474
475 /* If it doesn't have a register yet, create one. Note that all of
476 * the things in the merge set should be the same so it doesn't
477 * matter which node's definition we use.
478 */
479 if (node->set->reg == NULL)
480 node->set->reg = create_reg_for_ssa_def(def, state->impl);
481
482 reg = node->set->reg;
483 } else {
484 if (state->phi_webs_only)
485 return true;
486
487 /* We leave load_const SSA values alone. They act as immediates to
488 * the backend. If it got coalesced into a phi, that's ok.
489 */
490 if (def->parent_instr->type == nir_instr_type_load_const)
491 return true;
492
493 reg = create_reg_for_ssa_def(def, state->impl);
494 }
495
496 nir_ssa_def_rewrite_uses(def, nir_src_for_reg(reg));
497 assert(list_empty(&def->uses) && list_empty(&def->if_uses));
498
499 if (def->parent_instr->type == nir_instr_type_ssa_undef) {
500 /* If it's an ssa_undef instruction, remove it since we know we just got
501 * rid of all its uses.
502 */
503 nir_instr *parent_instr = def->parent_instr;
504 nir_instr_remove(parent_instr);
505 ralloc_steal(state->dead_ctx, parent_instr);
506 return true;
507 }
508
509 assert(def->parent_instr->type != nir_instr_type_load_const);
510
511 /* At this point we know a priori that this SSA def is part of a
512 * nir_dest. We can use exec_node_data to get the dest pointer.
513 */
514 nir_dest *dest = exec_node_data(nir_dest, def, ssa);
515
516 nir_instr_rewrite_dest(state->instr, dest, nir_dest_for_reg(reg));
517
518 return true;
519 }
520
521 /* Resolves ssa definitions to registers. While we're at it, we also
522 * remove phi nodes.
523 */
524 static bool
525 resolve_registers_block(nir_block *block, struct from_ssa_state *state)
526 {
527 nir_foreach_instr_safe(instr, block) {
528 state->instr = instr;
529 nir_foreach_ssa_def(instr, rewrite_ssa_def, state);
530
531 if (instr->type == nir_instr_type_phi) {
532 nir_instr_remove(instr);
533 ralloc_steal(state->dead_ctx, instr);
534 }
535 }
536 state->instr = NULL;
537
538 return true;
539 }
540
541 static void
542 emit_copy(nir_parallel_copy_instr *pcopy, nir_src src, nir_src dest_src,
543 void *mem_ctx)
544 {
545 assert(!dest_src.is_ssa &&
546 dest_src.reg.indirect == NULL &&
547 dest_src.reg.base_offset == 0);
548
549 if (src.is_ssa)
550 assert(src.ssa->num_components >= dest_src.reg.reg->num_components);
551 else
552 assert(src.reg.reg->num_components >= dest_src.reg.reg->num_components);
553
554 nir_alu_instr *mov = nir_alu_instr_create(mem_ctx, nir_op_imov);
555 nir_src_copy(&mov->src[0].src, &src, mov);
556 mov->dest.dest = nir_dest_for_reg(dest_src.reg.reg);
557 mov->dest.write_mask = (1 << dest_src.reg.reg->num_components) - 1;
558
559 nir_instr_insert_before(&pcopy->instr, &mov->instr);
560 }
561
562 /* Resolves a single parallel copy operation into a sequence of mov's
563 *
564 * This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
565 * Correctness, Code Quality, and Efficiency" by Boissinot et. al..
566 * However, I never got the algorithm to work as written, so this version
567 * is slightly modified.
568 *
569 * The algorithm works by playing this little shell game with the values.
570 * We start by recording where every source value is and which source value
571 * each destination value should receive. We then grab any copy whose
572 * destination is "empty", i.e. not used as a source, and do the following:
573 * - Find where its source value currently lives
574 * - Emit the move instruction
575 * - Set the location of the source value to the destination
576 * - Mark the location containing the source value
577 * - Mark the destination as no longer needing to be copied
578 *
579 * When we run out of "empty" destinations, we have a cycle and so we
580 * create a temporary register, copy to that register, and mark the value
581 * we copied as living in that temporary. Now, the cycle is broken, so we
582 * can continue with the above steps.
583 */
584 static void
585 resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
586 struct from_ssa_state *state)
587 {
588 unsigned num_copies = 0;
589 nir_foreach_parallel_copy_entry(entry, pcopy) {
590 /* Sources may be SSA */
591 if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
592 continue;
593
594 num_copies++;
595 }
596
597 if (num_copies == 0) {
598 /* Hooray, we don't need any copies! */
599 nir_instr_remove(&pcopy->instr);
600 return;
601 }
602
603 /* The register/source corresponding to the given index */
604 NIR_VLA_ZERO(nir_src, values, num_copies * 2);
605
606 /* The current location of a given piece of data. We will use -1 for "null" */
607 NIR_VLA_FILL(int, loc, num_copies * 2, -1);
608
609 /* The piece of data that the given piece of data is to be copied from. We will use -1 for "null" */
610 NIR_VLA_FILL(int, pred, num_copies * 2, -1);
611
612 /* The destinations we have yet to properly fill */
613 NIR_VLA(int, to_do, num_copies * 2);
614 int to_do_idx = -1;
615
616 /* Now we set everything up:
617 * - All values get assigned a temporary index
618 * - Current locations are set from sources
619 * - Predicessors are recorded from sources and destinations
620 */
621 int num_vals = 0;
622 nir_foreach_parallel_copy_entry(entry, pcopy) {
623 /* Sources may be SSA */
624 if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
625 continue;
626
627 int src_idx = -1;
628 for (int i = 0; i < num_vals; ++i) {
629 if (nir_srcs_equal(values[i], entry->src))
630 src_idx = i;
631 }
632 if (src_idx < 0) {
633 src_idx = num_vals++;
634 values[src_idx] = entry->src;
635 }
636
637 nir_src dest_src = nir_src_for_reg(entry->dest.reg.reg);
638
639 int dest_idx = -1;
640 for (int i = 0; i < num_vals; ++i) {
641 if (nir_srcs_equal(values[i], dest_src)) {
642 /* Each destination of a parallel copy instruction should be
643 * unique. A destination may get used as a source, so we still
644 * have to walk the list. However, the predecessor should not,
645 * at this point, be set yet, so we should have -1 here.
646 */
647 assert(pred[i] == -1);
648 dest_idx = i;
649 }
650 }
651 if (dest_idx < 0) {
652 dest_idx = num_vals++;
653 values[dest_idx] = dest_src;
654 }
655
656 loc[src_idx] = src_idx;
657 pred[dest_idx] = src_idx;
658
659 to_do[++to_do_idx] = dest_idx;
660 }
661
662 /* Currently empty destinations we can go ahead and fill */
663 NIR_VLA(int, ready, num_copies * 2);
664 int ready_idx = -1;
665
666 /* Mark the ones that are ready for copying. We know an index is a
667 * destination if it has a predecessor and it's ready for copying if
668 * it's not marked as containing data.
669 */
670 for (int i = 0; i < num_vals; i++) {
671 if (pred[i] != -1 && loc[i] == -1)
672 ready[++ready_idx] = i;
673 }
674
675 while (to_do_idx >= 0) {
676 while (ready_idx >= 0) {
677 int b = ready[ready_idx--];
678 int a = pred[b];
679 emit_copy(pcopy, values[loc[a]], values[b], state->mem_ctx);
680
681 /* If any other copies want a they can find it at b */
682 loc[a] = b;
683
684 /* b has been filled, mark it as not needing to be copied */
685 pred[b] = -1;
686
687 /* If a needs to be filled, it's ready for copying now */
688 if (pred[a] != -1)
689 ready[++ready_idx] = a;
690 }
691 int b = to_do[to_do_idx--];
692 if (pred[b] == -1)
693 continue;
694
695 /* If we got here, then we don't have any more trivial copies that we
696 * can do. We have to break a cycle, so we create a new temporary
697 * register for that purpose. Normally, if going out of SSA after
698 * register allocation, you would want to avoid creating temporary
699 * registers. However, we are going out of SSA before register
700 * allocation, so we would rather not create extra register
701 * dependencies for the backend to deal with. If it wants, the
702 * backend can coalesce the (possibly multiple) temporaries.
703 */
704 assert(num_vals < num_copies * 2);
705 nir_register *reg = nir_local_reg_create(state->impl);
706 reg->name = "copy_temp";
707 reg->num_array_elems = 0;
708 if (values[b].is_ssa)
709 reg->num_components = values[b].ssa->num_components;
710 else
711 reg->num_components = values[b].reg.reg->num_components;
712 values[num_vals].is_ssa = false;
713 values[num_vals].reg.reg = reg;
714
715 emit_copy(pcopy, values[b], values[num_vals], state->mem_ctx);
716 loc[b] = num_vals;
717 ready[++ready_idx] = b;
718 num_vals++;
719 }
720
721 nir_instr_remove(&pcopy->instr);
722 }
723
724 /* Resolves the parallel copies in a block. Each block can have at most
725 * two: One at the beginning, right after all the phi noces, and one at
726 * the end (or right before the final jump if it exists).
727 */
728 static bool
729 resolve_parallel_copies_block(nir_block *block, struct from_ssa_state *state)
730 {
731 /* At this point, we have removed all of the phi nodes. If a parallel
732 * copy existed right after the phi nodes in this block, it is now the
733 * first instruction.
734 */
735 nir_instr *first_instr = nir_block_first_instr(block);
736 if (first_instr == NULL)
737 return true; /* Empty, nothing to do. */
738
739 if (first_instr->type == nir_instr_type_parallel_copy) {
740 nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(first_instr);
741
742 resolve_parallel_copy(pcopy, state);
743 }
744
745 /* It's possible that the above code already cleaned up the end parallel
746 * copy. However, doing so removed it form the instructions list so we
747 * won't find it here. Therefore, it's safe to go ahead and just look
748 * for one and clean it up if it exists.
749 */
750 nir_parallel_copy_instr *end_pcopy =
751 get_parallel_copy_at_end_of_block(block);
752 if (end_pcopy)
753 resolve_parallel_copy(end_pcopy, state);
754
755 return true;
756 }
757
758 static void
759 nir_convert_from_ssa_impl(nir_function_impl *impl, bool phi_webs_only)
760 {
761 struct from_ssa_state state;
762
763 state.mem_ctx = ralloc_parent(impl);
764 state.dead_ctx = ralloc_context(NULL);
765 state.impl = impl;
766 state.phi_webs_only = phi_webs_only;
767 state.merge_node_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
768 _mesa_key_pointer_equal);
769
770 nir_foreach_block(block, impl) {
771 add_parallel_copy_to_end_of_block(block, state.dead_ctx);
772 }
773
774 nir_foreach_block(block, impl) {
775 isolate_phi_nodes_block(block, state.dead_ctx);
776 }
777
778 /* Mark metadata as dirty before we ask for liveness analysis */
779 nir_metadata_preserve(impl, nir_metadata_block_index |
780 nir_metadata_dominance);
781
782 nir_metadata_require(impl, nir_metadata_live_ssa_defs |
783 nir_metadata_dominance);
784
785 nir_foreach_block(block, impl) {
786 coalesce_phi_nodes_block(block, &state);
787 }
788
789 nir_foreach_block(block, impl) {
790 aggressive_coalesce_block(block, &state);
791 }
792
793 nir_foreach_block(block, impl) {
794 resolve_registers_block(block, &state);
795 }
796
797 nir_foreach_block(block, impl) {
798 resolve_parallel_copies_block(block, &state);
799 }
800
801 nir_metadata_preserve(impl, nir_metadata_block_index |
802 nir_metadata_dominance);
803
804 /* Clean up dead instructions and the hash tables */
805 _mesa_hash_table_destroy(state.merge_node_table, NULL);
806 ralloc_free(state.dead_ctx);
807 }
808
809 void
810 nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only)
811 {
812 nir_foreach_function(function, shader) {
813 if (function->impl)
814 nir_convert_from_ssa_impl(function->impl, phi_webs_only);
815 }
816 }
817
818
819 static void
820 place_phi_read(nir_shader *shader, nir_register *reg,
821 nir_ssa_def *def, nir_block *block)
822 {
823 if (block != def->parent_instr->block) {
824 /* Try to go up the single-successor tree */
825 bool all_single_successors = true;
826 struct set_entry *entry;
827 set_foreach(block->predecessors, entry) {
828 nir_block *pred = (nir_block *)entry->key;
829 if (pred->successors[0] && pred->successors[1]) {
830 all_single_successors = false;
831 break;
832 }
833 }
834
835 if (all_single_successors) {
836 /* All predecessors of this block have exactly one successor and it
837 * is this block so they must eventually lead here without
838 * intersecting each other. Place the reads in the predecessors
839 * instead of this block.
840 */
841 set_foreach(block->predecessors, entry)
842 place_phi_read(shader, reg, def, (nir_block *)entry->key);
843 return;
844 }
845 }
846
847 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_imov);
848 mov->src[0].src = nir_src_for_ssa(def);
849 mov->dest.dest = nir_dest_for_reg(reg);
850 mov->dest.write_mask = (1 << reg->num_components) - 1;
851 nir_instr_insert(nir_after_block_before_jump(block), &mov->instr);
852 }
853
854 /** Lower all of the phi nodes in a block to imov's to and from a register
855 *
856 * This provides a very quick-and-dirty out-of-SSA pass that you can run on a
857 * single block to convert all of it's phis to a register and some imov's.
858 * The code that is generated, while not optimal for actual codegen in a
859 * back-end, is easy to generate, correct, and will turn into the same set of
860 * phis after you call regs_to_ssa and do some copy propagation.
861 *
862 * The one intelligent thing this pass does is that it places the moves from
863 * the phi sources as high up the predecessor tree as possible instead of in
864 * the exact predecessor. This means that, in particular, it will crawl into
865 * the deepest nesting of any if-ladders. In order to ensure that doing so is
866 * safe, it stops as soon as one of the predecessors has multiple successors.
867 */
868 bool
869 nir_lower_phis_to_regs_block(nir_block *block)
870 {
871 nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
872 nir_shader *shader = impl->function->shader;
873
874 bool progress = false;
875 nir_foreach_instr_safe(instr, block) {
876 if (instr->type != nir_instr_type_phi)
877 break;
878
879 nir_phi_instr *phi = nir_instr_as_phi(instr);
880 assert(phi->dest.is_ssa);
881
882 nir_register *reg = create_reg_for_ssa_def(&phi->dest.ssa, impl);
883
884 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_imov);
885 mov->src[0].src = nir_src_for_reg(reg);
886 mov->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
887 nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
888 phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
889 phi->dest.ssa.name);
890 nir_instr_insert(nir_after_instr(&phi->instr), &mov->instr);
891
892 nir_ssa_def_rewrite_uses(&phi->dest.ssa,
893 nir_src_for_ssa(&mov->dest.dest.ssa));
894
895 nir_foreach_phi_src(src, phi) {
896 assert(src->src.is_ssa);
897 place_phi_read(shader, reg, src->src.ssa, src->pred);
898 }
899
900 nir_instr_remove(&phi->instr);
901
902 progress = true;
903 }
904
905 return progress;
906 }
907
908 struct ssa_def_to_reg_state {
909 nir_function_impl *impl;
910 bool progress;
911 };
912
913 static bool
914 dest_replace_ssa_with_reg(nir_dest *dest, void *void_state)
915 {
916 struct ssa_def_to_reg_state *state = void_state;
917
918 if (!dest->is_ssa)
919 return true;
920
921 nir_register *reg = create_reg_for_ssa_def(&dest->ssa, state->impl);
922
923 nir_ssa_def_rewrite_uses(&dest->ssa, nir_src_for_reg(reg));
924
925 nir_instr *instr = dest->ssa.parent_instr;
926 *dest = nir_dest_for_reg(reg);
927 dest->reg.parent_instr = instr;
928 list_addtail(&dest->reg.def_link, &reg->defs);
929
930 state->progress = true;
931
932 return true;
933 }
934
935 /** Lower all of the SSA defs in a block to registers
936 *
937 * This performs the very simple operation of blindly replacing all of the SSA
938 * defs in the given block with registers. If not used carefully, this may
939 * result in phi nodes with register sources which is technically invalid.
940 * Fortunately, the register-based into-SSA pass handles them anyway.
941 */
942 bool
943 nir_lower_ssa_defs_to_regs_block(nir_block *block)
944 {
945 nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
946 nir_shader *shader = impl->function->shader;
947
948 struct ssa_def_to_reg_state state = {
949 .impl = impl,
950 .progress = false,
951 };
952
953 nir_foreach_instr(instr, block) {
954 if (instr->type == nir_instr_type_ssa_undef) {
955 /* Undefs are just a read of something never written. */
956 nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
957 nir_register *reg = create_reg_for_ssa_def(&undef->def, state.impl);
958 nir_ssa_def_rewrite_uses(&undef->def, nir_src_for_reg(reg));
959 } else if (instr->type == nir_instr_type_load_const) {
960 /* Constant loads are SSA-only, we need to insert a move */
961 nir_load_const_instr *load = nir_instr_as_load_const(instr);
962 nir_register *reg = create_reg_for_ssa_def(&load->def, state.impl);
963 nir_ssa_def_rewrite_uses(&load->def, nir_src_for_reg(reg));
964
965 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_imov);
966 mov->src[0].src = nir_src_for_ssa(&load->def);
967 mov->dest.dest = nir_dest_for_reg(reg);
968 mov->dest.write_mask = (1 << reg->num_components) - 1;
969 nir_instr_insert(nir_after_instr(&load->instr), &mov->instr);
970 } else {
971 nir_foreach_dest(instr, dest_replace_ssa_with_reg, &state);
972 }
973 nir_foreach_dest(instr, dest_replace_ssa_with_reg, &state);
974 }
975
976 return state.progress;
977 }