2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
31 * Implements Global Code Motion. A description of GCM can be found in
32 * "Global Code Motion; Global Value Numbering" by Cliff Click.
33 * Unfortunately, the algorithm presented in the paper is broken in a
34 * number of ways. The algorithm used here differs substantially from the
35 * one in the paper but it is, in my opinion, much easier to read and
39 struct gcm_block_info
{
40 /* Number of loops this block is inside */
43 /* The last instruction inserted into this block. This is used as we
44 * traverse the instructions and insert them back into the program to
45 * put them in the right order.
47 nir_instr
*last_instr
;
50 /* Flags used in the instr->pass_flags field for various instruction states */
52 GCM_INSTR_PINNED
= (1 << 0),
53 GCM_INSTR_SCHEDULED_EARLY
= (1 << 1),
54 GCM_INSTR_SCHEDULED_LATE
= (1 << 2),
55 GCM_INSTR_PLACED
= (1 << 3),
59 nir_function_impl
*impl
;
62 /* The list of non-pinned instructions. As we do the late scheduling,
63 * we pull non-pinned instructions out of their blocks and place them in
64 * this list. This saves us from having linked-list problems when we go
65 * to put instructions back in their blocks.
67 struct exec_list instrs
;
69 struct gcm_block_info
*blocks
;
72 /* Recursively walks the CFG and builds the block_info structure */
74 gcm_build_block_info(struct exec_list
*cf_list
, struct gcm_state
*state
,
77 foreach_list_typed(nir_cf_node
, node
, node
, cf_list
) {
79 case nir_cf_node_block
: {
80 nir_block
*block
= nir_cf_node_as_block(node
);
81 state
->blocks
[block
->index
].loop_depth
= loop_depth
;
84 case nir_cf_node_if
: {
85 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
86 gcm_build_block_info(&if_stmt
->then_list
, state
, loop_depth
);
87 gcm_build_block_info(&if_stmt
->else_list
, state
, loop_depth
);
90 case nir_cf_node_loop
: {
91 nir_loop
*loop
= nir_cf_node_as_loop(node
);
92 gcm_build_block_info(&loop
->body
, state
, loop_depth
+ 1);
96 unreachable("Invalid CF node type");
101 /* Walks the instruction list and marks immovable instructions as pinned
103 * This function also serves to initialize the instr->pass_flags field.
104 * After this is completed, all instructions' pass_flags fields will be set
105 * to either GCM_INSTR_PINNED or 0.
108 gcm_pin_instructions_block(nir_block
*block
, void *void_state
)
110 struct gcm_state
*state
= void_state
;
112 nir_foreach_instr_safe(block
, instr
) {
113 switch (instr
->type
) {
114 case nir_instr_type_alu
:
115 switch (nir_instr_as_alu(instr
)->op
) {
118 case nir_op_fddx_fine
:
119 case nir_op_fddy_fine
:
120 case nir_op_fddx_coarse
:
121 case nir_op_fddy_coarse
:
122 /* These can only go in uniform control flow; pin them for now */
123 instr
->pass_flags
= GCM_INSTR_PINNED
;
127 instr
->pass_flags
= 0;
132 case nir_instr_type_tex
:
133 switch (nir_instr_as_tex(instr
)->op
) {
137 /* These two take implicit derivatives so they need to be pinned */
138 instr
->pass_flags
= GCM_INSTR_PINNED
;
142 instr
->pass_flags
= 0;
147 case nir_instr_type_load_const
:
148 instr
->pass_flags
= 0;
151 case nir_instr_type_intrinsic
: {
152 const nir_intrinsic_info
*info
=
153 &nir_intrinsic_infos
[nir_instr_as_intrinsic(instr
)->intrinsic
];
155 if ((info
->flags
& NIR_INTRINSIC_CAN_ELIMINATE
) &&
156 (info
->flags
& NIR_INTRINSIC_CAN_REORDER
)) {
157 instr
->pass_flags
= 0;
159 instr
->pass_flags
= GCM_INSTR_PINNED
;
164 case nir_instr_type_jump
:
165 case nir_instr_type_ssa_undef
:
166 case nir_instr_type_phi
:
167 instr
->pass_flags
= GCM_INSTR_PINNED
;
171 unreachable("Invalid instruction type in GCM");
174 if (!(instr
->pass_flags
& GCM_INSTR_PINNED
)) {
175 /* If this is an unpinned instruction, go ahead and pull it out of
176 * the program and put it on the instrs list. This has a couple
177 * of benifits. First, it makes the scheduling algorithm more
178 * efficient because we can avoid walking over basic blocks and
179 * pinned instructions. Second, it keeps us from causing linked
180 * list confusion when we're trying to put everything in its
181 * proper place at the end of the pass.
183 * Note that we don't use nir_instr_remove here because that also
184 * cleans up uses and defs and we want to keep that information.
186 exec_node_remove(&instr
->node
);
187 exec_list_push_tail(&state
->instrs
, &instr
->node
);
195 gcm_schedule_early_instr(nir_instr
*instr
, struct gcm_state
*state
);
197 /** Update an instructions schedule for the given source
199 * This function is called iteratively as we walk the sources of an
200 * instruction. It ensures that the given source instruction has been
201 * scheduled and then update this instruction's block if the source
202 * instruction is lower down the tree.
205 gcm_schedule_early_src(nir_src
*src
, void *void_state
)
207 struct gcm_state
*state
= void_state
;
208 nir_instr
*instr
= state
->instr
;
212 gcm_schedule_early_instr(src
->ssa
->parent_instr
, void_state
);
214 /* While the index isn't a proper dominance depth, it does have the
215 * property that if A dominates B then A->index <= B->index. Since we
216 * know that this instruction must have been dominated by all of its
217 * sources at some point (even if it's gone through value-numbering),
218 * all of the sources must lie on the same branch of the dominance tree.
219 * Therefore, we can just go ahead and just compare indices.
221 if (instr
->block
->index
< src
->ssa
->parent_instr
->block
->index
)
222 instr
->block
= src
->ssa
->parent_instr
->block
;
224 /* We need to restore the state instruction because it may have been
225 * changed through the gcm_schedule_early_instr call above. Since we
226 * may still be iterating through sources and future calls to
227 * gcm_schedule_early_src for the same instruction will still need it.
229 state
->instr
= instr
;
234 /** Schedules an instruction early
236 * This function performs a recursive depth-first search starting at the
237 * given instruction and proceeding through the sources to schedule
238 * instructions as early as they can possibly go in the dominance tree.
239 * The instructions are "scheduled" by updating their instr->block field.
242 gcm_schedule_early_instr(nir_instr
*instr
, struct gcm_state
*state
)
244 if (instr
->pass_flags
& GCM_INSTR_SCHEDULED_EARLY
)
247 instr
->pass_flags
|= GCM_INSTR_SCHEDULED_EARLY
;
249 /* Pinned instructions are already scheduled so we don't need to do
250 * anything. Also, bailing here keeps us from ever following the
251 * sources of phi nodes which can be back-edges.
253 if (instr
->pass_flags
& GCM_INSTR_PINNED
)
256 /* Start with the instruction at the top. As we iterate over the
257 * sources, it will get moved down as needed.
259 instr
->block
= state
->impl
->start_block
;
260 state
->instr
= instr
;
262 nir_foreach_src(instr
, gcm_schedule_early_src
, state
);
266 gcm_schedule_late_instr(nir_instr
*instr
, struct gcm_state
*state
);
268 /** Schedules the instruction associated with the given SSA def late
270 * This function works by first walking all of the uses of the given SSA
271 * definition, ensuring that they are scheduled, and then computing the LCA
272 * (least common ancestor) of its uses. It then schedules this instruction
273 * as close to the LCA as possible while trying to stay out of loops.
276 gcm_schedule_late_def(nir_ssa_def
*def
, void *void_state
)
278 struct gcm_state
*state
= void_state
;
280 nir_block
*lca
= NULL
;
282 struct set_entry
*entry
;
283 set_foreach(def
->uses
, entry
) {
284 nir_instr
*use_instr
= (nir_instr
*)entry
->key
;
286 gcm_schedule_late_instr(use_instr
, state
);
288 /* Phi instructions are a bit special. SSA definitions don't have to
289 * dominate the sources of the phi nodes that use them; instead, they
290 * have to dominate the predecessor block corresponding to the phi
291 * source. We handle this by looking through the sources, finding
292 * any that are usingg this SSA def, and using those blocks instead
293 * of the one the phi lives in.
295 if (use_instr
->type
== nir_instr_type_phi
) {
296 nir_phi_instr
*phi
= nir_instr_as_phi(use_instr
);
298 nir_foreach_phi_src(phi
, phi_src
) {
299 if (phi_src
->src
.ssa
== def
)
300 lca
= nir_dominance_lca(lca
, phi_src
->pred
);
303 lca
= nir_dominance_lca(lca
, use_instr
->block
);
307 set_foreach(def
->if_uses
, entry
) {
308 nir_if
*if_stmt
= (nir_if
*)entry
->key
;
310 /* For if statements, we consider the block to be the one immediately
311 * preceding the if CF node.
313 nir_block
*pred_block
=
314 nir_cf_node_as_block(nir_cf_node_prev(&if_stmt
->cf_node
));
316 lca
= nir_dominance_lca(lca
, pred_block
);
319 /* Some instructions may never be used. We'll just leave them scheduled
320 * early and let dead code clean them up.
325 /* We know have the LCA of all of the uses. If our invariants hold,
326 * this is dominated by the block that we chose when scheduling early.
327 * We now walk up the dominance tree and pick the lowest block that is
328 * as far outside loops as we can get.
330 nir_block
*best
= lca
;
331 while (lca
!= def
->parent_instr
->block
) {
333 if (state
->blocks
[lca
->index
].loop_depth
<
334 state
->blocks
[best
->index
].loop_depth
)
338 def
->parent_instr
->block
= best
;
343 /** Schedules an instruction late
345 * This function performs a depth-first search starting at the given
346 * instruction and proceeding through its uses to schedule instructions as
347 * late as they can reasonably go in the dominance tree. The instructions
348 * are "scheduled" by updating their instr->block field.
350 * The name of this function is actually a bit of a misnomer as it doesn't
351 * schedule them "as late as possible" as the paper implies. Instead, it
352 * first finds the lates possible place it can schedule the instruction and
353 * then possibly schedules it earlier than that. The actual location is as
354 * far down the tree as we can go while trying to stay out of loops.
357 gcm_schedule_late_instr(nir_instr
*instr
, struct gcm_state
*state
)
359 if (instr
->pass_flags
& GCM_INSTR_SCHEDULED_LATE
)
362 instr
->pass_flags
|= GCM_INSTR_SCHEDULED_LATE
;
364 /* Pinned instructions are already scheduled so we don't need to do
365 * anything. Also, bailing here keeps us from ever following phi nodes
366 * which can be back-edges.
368 if (instr
->pass_flags
& GCM_INSTR_PINNED
)
371 nir_foreach_ssa_def(instr
, gcm_schedule_late_def
, state
);
375 gcm_place_instr(nir_instr
*instr
, struct gcm_state
*state
);
378 gcm_place_instr_def(nir_ssa_def
*def
, void *state
)
380 struct set_entry
*entry
;
381 set_foreach(def
->uses
, entry
)
382 gcm_place_instr((nir_instr
*)entry
->key
, state
);
387 /** Places an instrution back into the program
389 * The earlier passes of GCM simply choose blocks for each instruction and
390 * otherwise leave them alone. This pass actually places the instructions
391 * into their chosen blocks.
393 * To do so, we use a standard post-order depth-first search linearization
394 * algorithm. We walk over the uses of the given instruction and ensure
395 * that they are placed and then place this instruction. Because we are
396 * working on multiple blocks at a time, we keep track of the last inserted
397 * instruction per-block in the state structure's block_info array. When
398 * we insert an instruction in a block we insert it before the last
399 * instruction inserted in that block rather than the last instruction
403 gcm_place_instr(nir_instr
*instr
, struct gcm_state
*state
)
405 if (instr
->pass_flags
& GCM_INSTR_PLACED
)
408 instr
->pass_flags
|= GCM_INSTR_PLACED
;
410 /* Phi nodes are our once source of back-edges. Since right now we are
411 * only doing scheduling within blocks, we don't need to worry about
412 * them since they are always at the top. Just skip them completely.
414 if (instr
->type
== nir_instr_type_phi
) {
415 assert(instr
->pass_flags
& GCM_INSTR_PINNED
);
419 nir_foreach_ssa_def(instr
, gcm_place_instr_def
, state
);
421 if (instr
->pass_flags
& GCM_INSTR_PINNED
) {
422 /* Pinned instructions have an implicit dependence on the pinned
423 * instructions that come after them in the block. Since the pinned
424 * instructions will naturally "chain" together, we only need to
425 * explicitly visit one of them.
427 for (nir_instr
*after
= nir_instr_next(instr
);
429 after
= nir_instr_next(after
)) {
430 if (after
->pass_flags
& GCM_INSTR_PINNED
) {
431 gcm_place_instr(after
, state
);
437 struct gcm_block_info
*block_info
= &state
->blocks
[instr
->block
->index
];
438 if (!(instr
->pass_flags
& GCM_INSTR_PINNED
)) {
439 exec_node_remove(&instr
->node
);
441 if (block_info
->last_instr
) {
442 exec_node_insert_node_before(&block_info
->last_instr
->node
,
445 /* Schedule it at the end of the block */
446 nir_instr
*jump_instr
= nir_block_last_instr(instr
->block
);
447 if (jump_instr
&& jump_instr
->type
== nir_instr_type_jump
) {
448 exec_node_insert_node_before(&jump_instr
->node
, &instr
->node
);
450 exec_list_push_tail(&instr
->block
->instr_list
, &instr
->node
);
455 block_info
->last_instr
= instr
;
459 opt_gcm_impl(nir_function_impl
*impl
)
461 struct gcm_state state
;
465 exec_list_make_empty(&state
.instrs
);
466 state
.blocks
= rzalloc_array(NULL
, struct gcm_block_info
, impl
->num_blocks
);
468 nir_metadata_require(impl
, nir_metadata_block_index
|
469 nir_metadata_dominance
);
471 gcm_build_block_info(&impl
->body
, &state
, 0);
472 nir_foreach_block(impl
, gcm_pin_instructions_block
, &state
);
474 foreach_list_typed(nir_instr
, instr
, node
, &state
.instrs
)
475 gcm_schedule_early_instr(instr
, &state
);
477 foreach_list_typed(nir_instr
, instr
, node
, &state
.instrs
)
478 gcm_schedule_late_instr(instr
, &state
);
480 while (!exec_list_is_empty(&state
.instrs
)) {
481 nir_instr
*instr
= exec_node_data(nir_instr
,
482 state
.instrs
.tail_pred
, node
);
483 gcm_place_instr(instr
, &state
);
486 ralloc_free(state
.blocks
);
490 nir_opt_gcm(nir_shader
*shader
)
492 nir_foreach_overload(shader
, overload
) {
494 opt_gcm_impl(overload
->impl
);