nir: fix nir_shader_clone() and nir_sweep()
[mesa.git] / src / compiler / nir / nir_opt_gcm.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_instr_set.h"
30
31 /*
32 * Implements Global Code Motion. A description of GCM can be found in
33 * "Global Code Motion; Global Value Numbering" by Cliff Click.
34 * Unfortunately, the algorithm presented in the paper is broken in a
35 * number of ways. The algorithm used here differs substantially from the
36 * one in the paper but it is, in my opinion, much easier to read and
37 * verify correcness.
38 */
39
40 struct gcm_block_info {
41 /* Number of loops this block is inside */
42 unsigned loop_depth;
43
44 /* The last instruction inserted into this block. This is used as we
45 * traverse the instructions and insert them back into the program to
46 * put them in the right order.
47 */
48 nir_instr *last_instr;
49 };
50
51 /* Flags used in the instr->pass_flags field for various instruction states */
52 enum {
53 GCM_INSTR_PINNED = (1 << 0),
54 GCM_INSTR_SCHEDULED_EARLY = (1 << 1),
55 GCM_INSTR_SCHEDULED_LATE = (1 << 2),
56 GCM_INSTR_PLACED = (1 << 3),
57 };
58
59 struct gcm_state {
60 nir_function_impl *impl;
61 nir_instr *instr;
62
63 /* The list of non-pinned instructions. As we do the late scheduling,
64 * we pull non-pinned instructions out of their blocks and place them in
65 * this list. This saves us from having linked-list problems when we go
66 * to put instructions back in their blocks.
67 */
68 struct exec_list instrs;
69
70 struct gcm_block_info *blocks;
71 };
72
73 /* Recursively walks the CFG and builds the block_info structure */
74 static void
75 gcm_build_block_info(struct exec_list *cf_list, struct gcm_state *state,
76 unsigned loop_depth)
77 {
78 foreach_list_typed(nir_cf_node, node, node, cf_list) {
79 switch (node->type) {
80 case nir_cf_node_block: {
81 nir_block *block = nir_cf_node_as_block(node);
82 state->blocks[block->index].loop_depth = loop_depth;
83 break;
84 }
85 case nir_cf_node_if: {
86 nir_if *if_stmt = nir_cf_node_as_if(node);
87 gcm_build_block_info(&if_stmt->then_list, state, loop_depth);
88 gcm_build_block_info(&if_stmt->else_list, state, loop_depth);
89 break;
90 }
91 case nir_cf_node_loop: {
92 nir_loop *loop = nir_cf_node_as_loop(node);
93 gcm_build_block_info(&loop->body, state, loop_depth + 1);
94 break;
95 }
96 default:
97 unreachable("Invalid CF node type");
98 }
99 }
100 }
101
102 /* Walks the instruction list and marks immovable instructions as pinned
103 *
104 * This function also serves to initialize the instr->pass_flags field.
105 * After this is completed, all instructions' pass_flags fields will be set
106 * to either GCM_INSTR_PINNED or 0.
107 */
108 static bool
109 gcm_pin_instructions_block(nir_block *block, struct gcm_state *state)
110 {
111 nir_foreach_instr_safe(instr, block) {
112 switch (instr->type) {
113 case nir_instr_type_alu:
114 switch (nir_instr_as_alu(instr)->op) {
115 case nir_op_fddx:
116 case nir_op_fddy:
117 case nir_op_fddx_fine:
118 case nir_op_fddy_fine:
119 case nir_op_fddx_coarse:
120 case nir_op_fddy_coarse:
121 /* These can only go in uniform control flow; pin them for now */
122 instr->pass_flags = GCM_INSTR_PINNED;
123 break;
124
125 default:
126 instr->pass_flags = 0;
127 break;
128 }
129 break;
130
131 case nir_instr_type_tex:
132 switch (nir_instr_as_tex(instr)->op) {
133 case nir_texop_tex:
134 case nir_texop_txb:
135 case nir_texop_lod:
136 /* These two take implicit derivatives so they need to be pinned */
137 instr->pass_flags = GCM_INSTR_PINNED;
138 break;
139
140 default:
141 instr->pass_flags = 0;
142 break;
143 }
144 break;
145
146 case nir_instr_type_load_const:
147 instr->pass_flags = 0;
148 break;
149
150 case nir_instr_type_intrinsic: {
151 const nir_intrinsic_info *info =
152 &nir_intrinsic_infos[nir_instr_as_intrinsic(instr)->intrinsic];
153
154 if ((info->flags & NIR_INTRINSIC_CAN_ELIMINATE) &&
155 (info->flags & NIR_INTRINSIC_CAN_REORDER)) {
156 instr->pass_flags = 0;
157 } else {
158 instr->pass_flags = GCM_INSTR_PINNED;
159 }
160 break;
161 }
162
163 case nir_instr_type_jump:
164 case nir_instr_type_ssa_undef:
165 case nir_instr_type_phi:
166 instr->pass_flags = GCM_INSTR_PINNED;
167 break;
168
169 default:
170 unreachable("Invalid instruction type in GCM");
171 }
172
173 if (!(instr->pass_flags & GCM_INSTR_PINNED)) {
174 /* If this is an unpinned instruction, go ahead and pull it out of
175 * the program and put it on the instrs list. This has a couple
176 * of benifits. First, it makes the scheduling algorithm more
177 * efficient because we can avoid walking over basic blocks and
178 * pinned instructions. Second, it keeps us from causing linked
179 * list confusion when we're trying to put everything in its
180 * proper place at the end of the pass.
181 *
182 * Note that we don't use nir_instr_remove here because that also
183 * cleans up uses and defs and we want to keep that information.
184 */
185 exec_node_remove(&instr->node);
186 exec_list_push_tail(&state->instrs, &instr->node);
187 }
188 }
189
190 return true;
191 }
192
193 static void
194 gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state);
195
196 /** Update an instructions schedule for the given source
197 *
198 * This function is called iteratively as we walk the sources of an
199 * instruction. It ensures that the given source instruction has been
200 * scheduled and then update this instruction's block if the source
201 * instruction is lower down the tree.
202 */
203 static bool
204 gcm_schedule_early_src(nir_src *src, void *void_state)
205 {
206 struct gcm_state *state = void_state;
207 nir_instr *instr = state->instr;
208
209 assert(src->is_ssa);
210
211 gcm_schedule_early_instr(src->ssa->parent_instr, void_state);
212
213 /* While the index isn't a proper dominance depth, it does have the
214 * property that if A dominates B then A->index <= B->index. Since we
215 * know that this instruction must have been dominated by all of its
216 * sources at some point (even if it's gone through value-numbering),
217 * all of the sources must lie on the same branch of the dominance tree.
218 * Therefore, we can just go ahead and just compare indices.
219 */
220 if (instr->block->index < src->ssa->parent_instr->block->index)
221 instr->block = src->ssa->parent_instr->block;
222
223 /* We need to restore the state instruction because it may have been
224 * changed through the gcm_schedule_early_instr call above. Since we
225 * may still be iterating through sources and future calls to
226 * gcm_schedule_early_src for the same instruction will still need it.
227 */
228 state->instr = instr;
229
230 return true;
231 }
232
233 /** Schedules an instruction early
234 *
235 * This function performs a recursive depth-first search starting at the
236 * given instruction and proceeding through the sources to schedule
237 * instructions as early as they can possibly go in the dominance tree.
238 * The instructions are "scheduled" by updating their instr->block field.
239 */
240 static void
241 gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state)
242 {
243 if (instr->pass_flags & GCM_INSTR_SCHEDULED_EARLY)
244 return;
245
246 instr->pass_flags |= GCM_INSTR_SCHEDULED_EARLY;
247
248 /* Pinned instructions are already scheduled so we don't need to do
249 * anything. Also, bailing here keeps us from ever following the
250 * sources of phi nodes which can be back-edges.
251 */
252 if (instr->pass_flags & GCM_INSTR_PINNED)
253 return;
254
255 /* Start with the instruction at the top. As we iterate over the
256 * sources, it will get moved down as needed.
257 */
258 instr->block = nir_start_block(state->impl);
259 state->instr = instr;
260
261 nir_foreach_src(instr, gcm_schedule_early_src, state);
262 }
263
264 static void
265 gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state);
266
267 /** Schedules the instruction associated with the given SSA def late
268 *
269 * This function works by first walking all of the uses of the given SSA
270 * definition, ensuring that they are scheduled, and then computing the LCA
271 * (least common ancestor) of its uses. It then schedules this instruction
272 * as close to the LCA as possible while trying to stay out of loops.
273 */
274 static bool
275 gcm_schedule_late_def(nir_ssa_def *def, void *void_state)
276 {
277 struct gcm_state *state = void_state;
278
279 nir_block *lca = NULL;
280
281 nir_foreach_use(use_src, def) {
282 nir_instr *use_instr = use_src->parent_instr;
283
284 gcm_schedule_late_instr(use_instr, state);
285
286 /* Phi instructions are a bit special. SSA definitions don't have to
287 * dominate the sources of the phi nodes that use them; instead, they
288 * have to dominate the predecessor block corresponding to the phi
289 * source. We handle this by looking through the sources, finding
290 * any that are usingg this SSA def, and using those blocks instead
291 * of the one the phi lives in.
292 */
293 if (use_instr->type == nir_instr_type_phi) {
294 nir_phi_instr *phi = nir_instr_as_phi(use_instr);
295
296 nir_foreach_phi_src(phi_src, phi) {
297 if (phi_src->src.ssa == def)
298 lca = nir_dominance_lca(lca, phi_src->pred);
299 }
300 } else {
301 lca = nir_dominance_lca(lca, use_instr->block);
302 }
303 }
304
305 nir_foreach_if_use(use_src, def) {
306 nir_if *if_stmt = use_src->parent_if;
307
308 /* For if statements, we consider the block to be the one immediately
309 * preceding the if CF node.
310 */
311 nir_block *pred_block =
312 nir_cf_node_as_block(nir_cf_node_prev(&if_stmt->cf_node));
313
314 lca = nir_dominance_lca(lca, pred_block);
315 }
316
317 /* Some instructions may never be used. We'll just leave them scheduled
318 * early and let dead code clean them up.
319 */
320 if (lca == NULL)
321 return true;
322
323 /* We know have the LCA of all of the uses. If our invariants hold,
324 * this is dominated by the block that we chose when scheduling early.
325 * We now walk up the dominance tree and pick the lowest block that is
326 * as far outside loops as we can get.
327 */
328 nir_block *best = lca;
329 while (lca != def->parent_instr->block) {
330 assert(lca);
331 if (state->blocks[lca->index].loop_depth <
332 state->blocks[best->index].loop_depth)
333 best = lca;
334 lca = lca->imm_dom;
335 }
336 def->parent_instr->block = best;
337
338 return true;
339 }
340
341 /** Schedules an instruction late
342 *
343 * This function performs a depth-first search starting at the given
344 * instruction and proceeding through its uses to schedule instructions as
345 * late as they can reasonably go in the dominance tree. The instructions
346 * are "scheduled" by updating their instr->block field.
347 *
348 * The name of this function is actually a bit of a misnomer as it doesn't
349 * schedule them "as late as possible" as the paper implies. Instead, it
350 * first finds the lates possible place it can schedule the instruction and
351 * then possibly schedules it earlier than that. The actual location is as
352 * far down the tree as we can go while trying to stay out of loops.
353 */
354 static void
355 gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state)
356 {
357 if (instr->pass_flags & GCM_INSTR_SCHEDULED_LATE)
358 return;
359
360 instr->pass_flags |= GCM_INSTR_SCHEDULED_LATE;
361
362 /* Pinned instructions are already scheduled so we don't need to do
363 * anything. Also, bailing here keeps us from ever following phi nodes
364 * which can be back-edges.
365 */
366 if (instr->pass_flags & GCM_INSTR_PINNED)
367 return;
368
369 nir_foreach_ssa_def(instr, gcm_schedule_late_def, state);
370 }
371
372 static void
373 gcm_place_instr(nir_instr *instr, struct gcm_state *state);
374
375 static bool
376 gcm_place_instr_def(nir_ssa_def *def, void *state)
377 {
378 nir_foreach_use(use_src, def)
379 gcm_place_instr(use_src->parent_instr, state);
380
381 return false;
382 }
383
384 /** Places an instrution back into the program
385 *
386 * The earlier passes of GCM simply choose blocks for each instruction and
387 * otherwise leave them alone. This pass actually places the instructions
388 * into their chosen blocks.
389 *
390 * To do so, we use a standard post-order depth-first search linearization
391 * algorithm. We walk over the uses of the given instruction and ensure
392 * that they are placed and then place this instruction. Because we are
393 * working on multiple blocks at a time, we keep track of the last inserted
394 * instruction per-block in the state structure's block_info array. When
395 * we insert an instruction in a block we insert it before the last
396 * instruction inserted in that block rather than the last instruction
397 * inserted globally.
398 */
399 static void
400 gcm_place_instr(nir_instr *instr, struct gcm_state *state)
401 {
402 if (instr->pass_flags & GCM_INSTR_PLACED)
403 return;
404
405 instr->pass_flags |= GCM_INSTR_PLACED;
406
407 /* Phi nodes are our once source of back-edges. Since right now we are
408 * only doing scheduling within blocks, we don't need to worry about
409 * them since they are always at the top. Just skip them completely.
410 */
411 if (instr->type == nir_instr_type_phi) {
412 assert(instr->pass_flags & GCM_INSTR_PINNED);
413 return;
414 }
415
416 nir_foreach_ssa_def(instr, gcm_place_instr_def, state);
417
418 if (instr->pass_flags & GCM_INSTR_PINNED) {
419 /* Pinned instructions have an implicit dependence on the pinned
420 * instructions that come after them in the block. Since the pinned
421 * instructions will naturally "chain" together, we only need to
422 * explicitly visit one of them.
423 */
424 for (nir_instr *after = nir_instr_next(instr);
425 after;
426 after = nir_instr_next(after)) {
427 if (after->pass_flags & GCM_INSTR_PINNED) {
428 gcm_place_instr(after, state);
429 break;
430 }
431 }
432 }
433
434 struct gcm_block_info *block_info = &state->blocks[instr->block->index];
435 if (!(instr->pass_flags & GCM_INSTR_PINNED)) {
436 exec_node_remove(&instr->node);
437
438 if (block_info->last_instr) {
439 exec_node_insert_node_before(&block_info->last_instr->node,
440 &instr->node);
441 } else {
442 /* Schedule it at the end of the block */
443 nir_instr *jump_instr = nir_block_last_instr(instr->block);
444 if (jump_instr && jump_instr->type == nir_instr_type_jump) {
445 exec_node_insert_node_before(&jump_instr->node, &instr->node);
446 } else {
447 exec_list_push_tail(&instr->block->instr_list, &instr->node);
448 }
449 }
450 }
451
452 block_info->last_instr = instr;
453 }
454
455 static bool
456 opt_gcm_impl(nir_function_impl *impl, bool value_number)
457 {
458 struct gcm_state state;
459
460 state.impl = impl;
461 state.instr = NULL;
462 exec_list_make_empty(&state.instrs);
463 state.blocks = rzalloc_array(NULL, struct gcm_block_info, impl->num_blocks);
464
465 nir_metadata_require(impl, nir_metadata_block_index |
466 nir_metadata_dominance);
467
468 gcm_build_block_info(&impl->body, &state, 0);
469
470 nir_foreach_block(block, impl) {
471 gcm_pin_instructions_block(block, &state);
472 }
473
474 bool progress = false;
475 if (value_number) {
476 struct set *gvn_set = nir_instr_set_create(NULL);
477 foreach_list_typed_safe(nir_instr, instr, node, &state.instrs) {
478 if (nir_instr_set_add_or_rewrite(gvn_set, instr)) {
479 nir_instr_remove(instr);
480 progress = true;
481 }
482 }
483 nir_instr_set_destroy(gvn_set);
484 }
485
486 foreach_list_typed(nir_instr, instr, node, &state.instrs)
487 gcm_schedule_early_instr(instr, &state);
488
489 foreach_list_typed(nir_instr, instr, node, &state.instrs)
490 gcm_schedule_late_instr(instr, &state);
491
492 while (!exec_list_is_empty(&state.instrs)) {
493 nir_instr *instr = exec_node_data(nir_instr,
494 state.instrs.tail_sentinel.prev, node);
495 gcm_place_instr(instr, &state);
496 }
497
498 ralloc_free(state.blocks);
499
500 nir_metadata_preserve(impl, nir_metadata_block_index |
501 nir_metadata_dominance);
502
503 return progress;
504 }
505
506 bool
507 nir_opt_gcm(nir_shader *shader, bool value_number)
508 {
509 bool progress = false;
510
511 nir_foreach_function(function, shader) {
512 if (function->impl)
513 progress |= opt_gcm_impl(function->impl, value_number);
514 }
515
516 return progress;
517 }