2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
29 #include "nir_instr_set.h"
32 * Implements Global Code Motion. A description of GCM can be found in
33 * "Global Code Motion; Global Value Numbering" by Cliff Click.
34 * Unfortunately, the algorithm presented in the paper is broken in a
35 * number of ways. The algorithm used here differs substantially from the
36 * one in the paper but it is, in my opinion, much easier to read and
40 struct gcm_block_info
{
41 /* Number of loops this block is inside */
44 /* The last instruction inserted into this block. This is used as we
45 * traverse the instructions and insert them back into the program to
46 * put them in the right order.
48 nir_instr
*last_instr
;
51 struct gcm_instr_info
{
52 nir_block
*early_block
;
55 /* Flags used in the instr->pass_flags field for various instruction states */
57 GCM_INSTR_PINNED
= (1 << 0),
58 GCM_INSTR_SCHEDULED_EARLY
= (1 << 1),
59 GCM_INSTR_SCHEDULED_LATE
= (1 << 2),
60 GCM_INSTR_PLACED
= (1 << 3),
64 nir_function_impl
*impl
;
67 /* The list of non-pinned instructions. As we do the late scheduling,
68 * we pull non-pinned instructions out of their blocks and place them in
69 * this list. This saves us from having linked-list problems when we go
70 * to put instructions back in their blocks.
72 struct exec_list instrs
;
74 struct gcm_block_info
*blocks
;
77 struct gcm_instr_info
*instr_infos
;
80 /* Recursively walks the CFG and builds the block_info structure */
82 gcm_build_block_info(struct exec_list
*cf_list
, struct gcm_state
*state
,
85 foreach_list_typed(nir_cf_node
, node
, node
, cf_list
) {
87 case nir_cf_node_block
: {
88 nir_block
*block
= nir_cf_node_as_block(node
);
89 state
->blocks
[block
->index
].loop_depth
= loop_depth
;
92 case nir_cf_node_if
: {
93 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
94 gcm_build_block_info(&if_stmt
->then_list
, state
, loop_depth
);
95 gcm_build_block_info(&if_stmt
->else_list
, state
, loop_depth
);
98 case nir_cf_node_loop
: {
99 nir_loop
*loop
= nir_cf_node_as_loop(node
);
100 gcm_build_block_info(&loop
->body
, state
, loop_depth
+ 1);
104 unreachable("Invalid CF node type");
109 /* Walks the instruction list and marks immovable instructions as pinned
111 * This function also serves to initialize the instr->pass_flags field.
112 * After this is completed, all instructions' pass_flags fields will be set
113 * to either GCM_INSTR_PINNED or 0.
116 gcm_pin_instructions(nir_function_impl
*impl
, struct gcm_state
*state
)
118 state
->num_instrs
= 0;
120 nir_foreach_block(block
, impl
) {
121 nir_foreach_instr_safe(instr
, block
) {
122 /* Index the instructions for use in gcm_state::instrs */
123 instr
->index
= state
->num_instrs
++;
125 switch (instr
->type
) {
126 case nir_instr_type_alu
:
127 switch (nir_instr_as_alu(instr
)->op
) {
130 case nir_op_fddx_fine
:
131 case nir_op_fddy_fine
:
132 case nir_op_fddx_coarse
:
133 case nir_op_fddy_coarse
:
134 /* These can only go in uniform control flow; pin them for now */
135 instr
->pass_flags
= GCM_INSTR_PINNED
;
139 instr
->pass_flags
= 0;
144 case nir_instr_type_tex
:
145 if (nir_tex_instr_has_implicit_derivative(nir_instr_as_tex(instr
)))
146 instr
->pass_flags
= GCM_INSTR_PINNED
;
149 case nir_instr_type_deref
:
150 case nir_instr_type_load_const
:
151 instr
->pass_flags
= 0;
154 case nir_instr_type_intrinsic
: {
155 if (nir_intrinsic_can_reorder(nir_instr_as_intrinsic(instr
))) {
156 instr
->pass_flags
= 0;
158 instr
->pass_flags
= GCM_INSTR_PINNED
;
163 case nir_instr_type_jump
:
164 case nir_instr_type_ssa_undef
:
165 case nir_instr_type_phi
:
166 instr
->pass_flags
= GCM_INSTR_PINNED
;
170 unreachable("Invalid instruction type in GCM");
173 if (!(instr
->pass_flags
& GCM_INSTR_PINNED
)) {
174 /* If this is an unpinned instruction, go ahead and pull it out of
175 * the program and put it on the instrs list. This has a couple
176 * of benifits. First, it makes the scheduling algorithm more
177 * efficient because we can avoid walking over basic blocks and
178 * pinned instructions. Second, it keeps us from causing linked
179 * list confusion when we're trying to put everything in its
180 * proper place at the end of the pass.
182 * Note that we don't use nir_instr_remove here because that also
183 * cleans up uses and defs and we want to keep that information.
185 exec_node_remove(&instr
->node
);
186 exec_list_push_tail(&state
->instrs
, &instr
->node
);
193 gcm_schedule_early_instr(nir_instr
*instr
, struct gcm_state
*state
);
195 /** Update an instructions schedule for the given source
197 * This function is called iteratively as we walk the sources of an
198 * instruction. It ensures that the given source instruction has been
199 * scheduled and then update this instruction's block if the source
200 * instruction is lower down the tree.
203 gcm_schedule_early_src(nir_src
*src
, void *void_state
)
205 struct gcm_state
*state
= void_state
;
206 nir_instr
*instr
= state
->instr
;
210 gcm_schedule_early_instr(src
->ssa
->parent_instr
, void_state
);
212 /* While the index isn't a proper dominance depth, it does have the
213 * property that if A dominates B then A->index <= B->index. Since we
214 * know that this instruction must have been dominated by all of its
215 * sources at some point (even if it's gone through value-numbering),
216 * all of the sources must lie on the same branch of the dominance tree.
217 * Therefore, we can just go ahead and just compare indices.
219 struct gcm_instr_info
*src_info
=
220 &state
->instr_infos
[src
->ssa
->parent_instr
->index
];
221 struct gcm_instr_info
*info
= &state
->instr_infos
[instr
->index
];
222 if (info
->early_block
->index
< src_info
->early_block
->index
)
223 info
->early_block
= src_info
->early_block
;
225 /* We need to restore the state instruction because it may have been
226 * changed through the gcm_schedule_early_instr call above. Since we
227 * may still be iterating through sources and future calls to
228 * gcm_schedule_early_src for the same instruction will still need it.
230 state
->instr
= instr
;
235 /** Schedules an instruction early
237 * This function performs a recursive depth-first search starting at the
238 * given instruction and proceeding through the sources to schedule
239 * instructions as early as they can possibly go in the dominance tree.
240 * The instructions are "scheduled" by updating the early_block field of
241 * the corresponding gcm_instr_state entry.
244 gcm_schedule_early_instr(nir_instr
*instr
, struct gcm_state
*state
)
246 if (instr
->pass_flags
& GCM_INSTR_SCHEDULED_EARLY
)
249 instr
->pass_flags
|= GCM_INSTR_SCHEDULED_EARLY
;
251 /* Pinned instructions always get scheduled in their original block so we
252 * don't need to do anything. Also, bailing here keeps us from ever
253 * following the sources of phi nodes which can be back-edges.
255 if (instr
->pass_flags
& GCM_INSTR_PINNED
) {
256 state
->instr_infos
[instr
->index
].early_block
= instr
->block
;
260 /* Start with the instruction at the top. As we iterate over the
261 * sources, it will get moved down as needed.
263 state
->instr_infos
[instr
->index
].early_block
= nir_start_block(state
->impl
);
264 state
->instr
= instr
;
266 nir_foreach_src(instr
, gcm_schedule_early_src
, state
);
270 gcm_schedule_late_instr(nir_instr
*instr
, struct gcm_state
*state
);
272 /** Schedules the instruction associated with the given SSA def late
274 * This function works by first walking all of the uses of the given SSA
275 * definition, ensuring that they are scheduled, and then computing the LCA
276 * (least common ancestor) of its uses. It then schedules this instruction
277 * as close to the LCA as possible while trying to stay out of loops.
280 gcm_schedule_late_def(nir_ssa_def
*def
, void *void_state
)
282 struct gcm_state
*state
= void_state
;
284 nir_block
*lca
= NULL
;
286 nir_foreach_use(use_src
, def
) {
287 nir_instr
*use_instr
= use_src
->parent_instr
;
289 gcm_schedule_late_instr(use_instr
, state
);
291 /* Phi instructions are a bit special. SSA definitions don't have to
292 * dominate the sources of the phi nodes that use them; instead, they
293 * have to dominate the predecessor block corresponding to the phi
294 * source. We handle this by looking through the sources, finding
295 * any that are usingg this SSA def, and using those blocks instead
296 * of the one the phi lives in.
298 if (use_instr
->type
== nir_instr_type_phi
) {
299 nir_phi_instr
*phi
= nir_instr_as_phi(use_instr
);
301 nir_foreach_phi_src(phi_src
, phi
) {
302 if (phi_src
->src
.ssa
== def
)
303 lca
= nir_dominance_lca(lca
, phi_src
->pred
);
306 lca
= nir_dominance_lca(lca
, use_instr
->block
);
310 nir_foreach_if_use(use_src
, def
) {
311 nir_if
*if_stmt
= use_src
->parent_if
;
313 /* For if statements, we consider the block to be the one immediately
314 * preceding the if CF node.
316 nir_block
*pred_block
=
317 nir_cf_node_as_block(nir_cf_node_prev(&if_stmt
->cf_node
));
319 lca
= nir_dominance_lca(lca
, pred_block
);
322 nir_block
*early_block
=
323 state
->instr_infos
[def
->parent_instr
->index
].early_block
;
325 /* Some instructions may never be used. We'll just schedule them early and
326 * let dead code clean them up.
329 def
->parent_instr
->block
= early_block
;
333 /* We now have the LCA of all of the uses. If our invariants hold,
334 * this is dominated by the block that we chose when scheduling early.
335 * We now walk up the dominance tree and pick the lowest block that is
336 * as far outside loops as we can get.
338 assert(nir_block_dominates(early_block
, lca
));
339 nir_block
*best
= lca
;
340 for (nir_block
*block
= lca
; block
!= NULL
; block
= block
->imm_dom
) {
341 if (state
->blocks
[block
->index
].loop_depth
<
342 state
->blocks
[best
->index
].loop_depth
)
345 if (block
== early_block
)
348 def
->parent_instr
->block
= best
;
353 /** Schedules an instruction late
355 * This function performs a depth-first search starting at the given
356 * instruction and proceeding through its uses to schedule instructions as
357 * late as they can reasonably go in the dominance tree. The instructions
358 * are "scheduled" by updating their instr->block field.
360 * The name of this function is actually a bit of a misnomer as it doesn't
361 * schedule them "as late as possible" as the paper implies. Instead, it
362 * first finds the lates possible place it can schedule the instruction and
363 * then possibly schedules it earlier than that. The actual location is as
364 * far down the tree as we can go while trying to stay out of loops.
367 gcm_schedule_late_instr(nir_instr
*instr
, struct gcm_state
*state
)
369 if (instr
->pass_flags
& GCM_INSTR_SCHEDULED_LATE
)
372 instr
->pass_flags
|= GCM_INSTR_SCHEDULED_LATE
;
374 /* Pinned instructions are already scheduled so we don't need to do
375 * anything. Also, bailing here keeps us from ever following phi nodes
376 * which can be back-edges.
378 if (instr
->pass_flags
& GCM_INSTR_PINNED
)
381 nir_foreach_ssa_def(instr
, gcm_schedule_late_def
, state
);
385 gcm_place_instr(nir_instr
*instr
, struct gcm_state
*state
);
388 gcm_place_instr_def(nir_ssa_def
*def
, void *state
)
390 nir_foreach_use(use_src
, def
)
391 gcm_place_instr(use_src
->parent_instr
, state
);
396 /** Places an instrution back into the program
398 * The earlier passes of GCM simply choose blocks for each instruction and
399 * otherwise leave them alone. This pass actually places the instructions
400 * into their chosen blocks.
402 * To do so, we use a standard post-order depth-first search linearization
403 * algorithm. We walk over the uses of the given instruction and ensure
404 * that they are placed and then place this instruction. Because we are
405 * working on multiple blocks at a time, we keep track of the last inserted
406 * instruction per-block in the state structure's block_info array. When
407 * we insert an instruction in a block we insert it before the last
408 * instruction inserted in that block rather than the last instruction
412 gcm_place_instr(nir_instr
*instr
, struct gcm_state
*state
)
414 if (instr
->pass_flags
& GCM_INSTR_PLACED
)
417 instr
->pass_flags
|= GCM_INSTR_PLACED
;
419 /* Phi nodes are our once source of back-edges. Since right now we are
420 * only doing scheduling within blocks, we don't need to worry about
421 * them since they are always at the top. Just skip them completely.
423 if (instr
->type
== nir_instr_type_phi
) {
424 assert(instr
->pass_flags
& GCM_INSTR_PINNED
);
428 nir_foreach_ssa_def(instr
, gcm_place_instr_def
, state
);
430 if (instr
->pass_flags
& GCM_INSTR_PINNED
) {
431 /* Pinned instructions have an implicit dependence on the pinned
432 * instructions that come after them in the block. Since the pinned
433 * instructions will naturally "chain" together, we only need to
434 * explicitly visit one of them.
436 for (nir_instr
*after
= nir_instr_next(instr
);
438 after
= nir_instr_next(after
)) {
439 if (after
->pass_flags
& GCM_INSTR_PINNED
) {
440 gcm_place_instr(after
, state
);
446 struct gcm_block_info
*block_info
= &state
->blocks
[instr
->block
->index
];
447 if (!(instr
->pass_flags
& GCM_INSTR_PINNED
)) {
448 exec_node_remove(&instr
->node
);
450 if (block_info
->last_instr
) {
451 exec_node_insert_node_before(&block_info
->last_instr
->node
,
454 /* Schedule it at the end of the block */
455 nir_instr
*jump_instr
= nir_block_last_instr(instr
->block
);
456 if (jump_instr
&& jump_instr
->type
== nir_instr_type_jump
) {
457 exec_node_insert_node_before(&jump_instr
->node
, &instr
->node
);
459 exec_list_push_tail(&instr
->block
->instr_list
, &instr
->node
);
464 block_info
->last_instr
= instr
;
468 opt_gcm_impl(nir_function_impl
*impl
, bool value_number
)
470 nir_metadata_require(impl
, nir_metadata_block_index
|
471 nir_metadata_dominance
);
473 struct gcm_state state
;
477 exec_list_make_empty(&state
.instrs
);
478 state
.blocks
= rzalloc_array(NULL
, struct gcm_block_info
, impl
->num_blocks
);
480 gcm_build_block_info(&impl
->body
, &state
, 0);
482 gcm_pin_instructions(impl
, &state
);
485 rzalloc_array(NULL
, struct gcm_instr_info
, state
.num_instrs
);
487 bool progress
= false;
489 struct set
*gvn_set
= nir_instr_set_create(NULL
);
490 foreach_list_typed_safe(nir_instr
, instr
, node
, &state
.instrs
) {
491 if (nir_instr_set_add_or_rewrite(gvn_set
, instr
)) {
492 nir_instr_remove(instr
);
496 nir_instr_set_destroy(gvn_set
);
499 foreach_list_typed(nir_instr
, instr
, node
, &state
.instrs
)
500 gcm_schedule_early_instr(instr
, &state
);
502 foreach_list_typed(nir_instr
, instr
, node
, &state
.instrs
)
503 gcm_schedule_late_instr(instr
, &state
);
505 while (!exec_list_is_empty(&state
.instrs
)) {
506 nir_instr
*instr
= exec_node_data(nir_instr
,
507 state
.instrs
.tail_sentinel
.prev
, node
);
508 gcm_place_instr(instr
, &state
);
511 ralloc_free(state
.blocks
);
512 ralloc_free(state
.instr_infos
);
514 nir_metadata_preserve(impl
, nir_metadata_block_index
|
515 nir_metadata_dominance
);
521 nir_opt_gcm(nir_shader
*shader
, bool value_number
)
523 bool progress
= false;
525 nir_foreach_function(function
, shader
) {
527 progress
|= opt_gcm_impl(function
->impl
, value_number
);