2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
31 * Implements Global Code Motion. A description of GCM can be found in
32 * "Global Code Motion; Global Value Numbering" by Cliff Click.
33 * Unfortunately, the algorithm presented in the paper is broken in a
34 * number of ways. The algorithm used here differs substantially from the
35 * one in the paper but it is, in my opinion, much easier to read and
39 struct gcm_block_info
{
40 /* Number of loops this block is inside */
43 /* The last instruction inserted into this block. This is used as we
44 * traverse the instructions and insert them back into the program to
45 * put them in the right order.
47 nir_instr
*last_instr
;
51 nir_function_impl
*impl
;
54 /* Marks all instructions that have been visited by the curren pass */
57 /* Marks instructions that are "pinned", i.e. cannot be moved from their
58 * basic block by code motion */
61 /* The list of non-pinned instructions. As we do the late scheduling,
62 * we pull non-pinned instructions out of their blocks and place them in
63 * this list. This saves us from having linked-list problems when we go
64 * to put instructions back in their blocks.
66 struct exec_list instrs
;
68 struct gcm_block_info
*blocks
;
71 /* Recursively walks the CFG and builds the block_info structure */
73 gcm_build_block_info(struct exec_list
*cf_list
, struct gcm_state
*state
,
76 foreach_list_typed(nir_cf_node
, node
, node
, cf_list
) {
78 case nir_cf_node_block
: {
79 nir_block
*block
= nir_cf_node_as_block(node
);
80 state
->blocks
[block
->index
].loop_depth
= loop_depth
;
83 case nir_cf_node_if
: {
84 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
85 gcm_build_block_info(&if_stmt
->then_list
, state
, loop_depth
);
86 gcm_build_block_info(&if_stmt
->else_list
, state
, loop_depth
);
89 case nir_cf_node_loop
: {
90 nir_loop
*loop
= nir_cf_node_as_loop(node
);
91 gcm_build_block_info(&loop
->body
, state
, loop_depth
+ 1);
95 unreachable("Invalid CF node type");
100 /* Walks the instruction list and marks immovable instructions as pinned */
102 gcm_pin_instructions_block(nir_block
*block
, void *void_state
)
104 struct gcm_state
*state
= void_state
;
106 nir_foreach_instr_safe(block
, instr
) {
108 switch (instr
->type
) {
109 case nir_instr_type_alu
:
110 switch (nir_instr_as_alu(instr
)->op
) {
113 case nir_op_fddx_fine
:
114 case nir_op_fddy_fine
:
115 case nir_op_fddx_coarse
:
116 case nir_op_fddy_coarse
:
117 /* These can only go in uniform control flow; pin them for now */
125 case nir_instr_type_tex
:
126 /* We need to pin texture ops that do partial derivatives */
127 pinned
= nir_instr_as_tex(instr
)->op
== nir_texop_txd
;
130 case nir_instr_type_load_const
:
134 case nir_instr_type_intrinsic
: {
135 const nir_intrinsic_info
*info
=
136 &nir_intrinsic_infos
[nir_instr_as_intrinsic(instr
)->intrinsic
];
137 pinned
= !(info
->flags
& NIR_INTRINSIC_CAN_ELIMINATE
) ||
138 !(info
->flags
& NIR_INTRINSIC_CAN_REORDER
);
142 case nir_instr_type_jump
:
143 case nir_instr_type_ssa_undef
:
144 case nir_instr_type_phi
:
149 unreachable("Invalid instruction type in GCM");
153 BITSET_SET(state
->pinned
, instr
->index
);
160 gcm_schedule_early_instr(nir_instr
*instr
, struct gcm_state
*state
);
162 /** Update an instructions schedule for the given source
164 * This function is called iteratively as we walk the sources of an
165 * instruction. It ensures that the given source instruction has been
166 * scheduled and then update this instruction's block if the source
167 * instruction is lower down the tree.
170 gcm_schedule_early_src(nir_src
*src
, void *void_state
)
172 struct gcm_state
*state
= void_state
;
173 nir_instr
*instr
= state
->instr
;
177 gcm_schedule_early_instr(src
->ssa
->parent_instr
, void_state
);
179 /* While the index isn't a proper dominance depth, it does have the
180 * property that if A dominates B then A->index <= B->index. Since we
181 * know that this instruction must have been dominated by all of its
182 * sources at some point (even if it's gone through value-numbering),
183 * all of the sources must lie on the same branch of the dominance tree.
184 * Therefore, we can just go ahead and just compare indices.
186 if (instr
->block
->index
< src
->ssa
->parent_instr
->block
->index
)
187 instr
->block
= src
->ssa
->parent_instr
->block
;
189 /* We need to restore the state instruction because it may have been
190 * changed through the gcm_schedule_early_instr call above. Since we
191 * may still be iterating through sources and future calls to
192 * gcm_schedule_early_src for the same instruction will still need it.
194 state
->instr
= instr
;
199 /** Schedules an instruction early
201 * This function performs a recursive depth-first search starting at the
202 * given instruction and proceeding through the sources to schedule
203 * instructions as early as they can possibly go in the dominance tree.
204 * The instructions are "scheduled" by updating their instr->block field.
207 gcm_schedule_early_instr(nir_instr
*instr
, struct gcm_state
*state
)
209 if (BITSET_TEST(state
->visited
, instr
->index
))
212 BITSET_SET(state
->visited
, instr
->index
);
214 /* Pinned instructions are already scheduled so we don't need to do
215 * anything. Also, bailing here keeps us from ever following the
216 * sources of phi nodes which can be back-edges.
218 if (BITSET_TEST(state
->pinned
, instr
->index
))
221 /* Start with the instruction at the top. As we iterate over the
222 * sources, it will get moved down as needed.
224 instr
->block
= state
->impl
->start_block
;
225 state
->instr
= instr
;
227 nir_foreach_src(instr
, gcm_schedule_early_src
, state
);
231 gcm_schedule_early_block(nir_block
*block
, void *state
)
233 nir_foreach_instr(block
, instr
)
234 gcm_schedule_early_instr(instr
, state
);
240 gcm_schedule_late_instr(nir_instr
*instr
, struct gcm_state
*state
);
242 /** Schedules the instruction associated with the given SSA def late
244 * This function works by first walking all of the uses of the given SSA
245 * definition, ensuring that they are scheduled, and then computing the LCA
246 * (least common ancestor) of its uses. It then schedules this instruction
247 * as close to the LCA as possible while trying to stay out of loops.
250 gcm_schedule_late_def(nir_ssa_def
*def
, void *void_state
)
252 struct gcm_state
*state
= void_state
;
254 nir_block
*lca
= NULL
;
256 struct set_entry
*entry
;
257 set_foreach(def
->uses
, entry
) {
258 nir_instr
*use_instr
= (nir_instr
*)entry
->key
;
260 gcm_schedule_late_instr(use_instr
, state
);
262 /* Phi instructions are a bit special. SSA definitions don't have to
263 * dominate the sources of the phi nodes that use them; instead, they
264 * have to dominate the predecessor block corresponding to the phi
265 * source. We handle this by looking through the sources, finding
266 * any that are usingg this SSA def, and using those blocks instead
267 * of the one the phi lives in.
269 if (use_instr
->type
== nir_instr_type_phi
) {
270 nir_phi_instr
*phi
= nir_instr_as_phi(use_instr
);
272 nir_foreach_phi_src(phi
, phi_src
) {
273 if (phi_src
->src
.ssa
== def
)
274 lca
= nir_dominance_lca(lca
, phi_src
->pred
);
277 lca
= nir_dominance_lca(lca
, use_instr
->block
);
281 set_foreach(def
->if_uses
, entry
) {
282 nir_if
*if_stmt
= (nir_if
*)entry
->key
;
284 /* For if statements, we consider the block to be the one immediately
285 * preceding the if CF node.
287 nir_block
*pred_block
=
288 nir_cf_node_as_block(nir_cf_node_prev(&if_stmt
->cf_node
));
290 lca
= nir_dominance_lca(lca
, pred_block
);
293 /* Some instructions may never be used. We'll just leave them scheduled
294 * early and let dead code clean them up.
299 /* We know have the LCA of all of the uses. If our invariants hold,
300 * this is dominated by the block that we chose when scheduling early.
301 * We now walk up the dominance tree and pick the lowest block that is
302 * as far outside loops as we can get.
304 nir_block
*best
= lca
;
305 while (lca
!= def
->parent_instr
->block
) {
307 if (state
->blocks
[lca
->index
].loop_depth
<
308 state
->blocks
[best
->index
].loop_depth
)
312 def
->parent_instr
->block
= best
;
317 /** Schedules an instruction late
319 * This function performs a depth-first search starting at the given
320 * instruction and proceeding through its uses to schedule instructions as
321 * late as they can reasonably go in the dominance tree. The instructions
322 * are "scheduled" by updating their instr->block field.
324 * The name of this function is actually a bit of a misnomer as it doesn't
325 * schedule them "as late as possible" as the paper implies. Instead, it
326 * first finds the lates possible place it can schedule the instruction and
327 * then possibly schedules it earlier than that. The actual location is as
328 * far down the tree as we can go while trying to stay out of loops.
331 gcm_schedule_late_instr(nir_instr
*instr
, struct gcm_state
*state
)
333 if (BITSET_TEST(state
->visited
, instr
->index
))
336 BITSET_SET(state
->visited
, instr
->index
);
338 /* Pinned instructions are already scheduled so we don't need to do
339 * anything. Also, bailing here keeps us from ever following phi nodes
340 * which can be back-edges.
342 if (BITSET_TEST(state
->pinned
, instr
->index
))
345 nir_foreach_ssa_def(instr
, gcm_schedule_late_def
, state
);
349 gcm_schedule_late_block(nir_block
*block
, void *void_state
)
351 struct gcm_state
*state
= void_state
;
353 nir_foreach_instr_safe(block
, instr
) {
354 gcm_schedule_late_instr(instr
, state
);
356 if (!BITSET_TEST(state
->pinned
, instr
->index
)) {
357 /* If this is an instruction we can move, go ahead and pull it out
358 * of the program and put it on the instrs list. This keeps us
359 * from causing linked list confusion when we're trying to put
360 * everything in its proper place.
362 * Note that we don't use nir_instr_remove here because that also
363 * cleans up uses and defs and we want to keep that information.
365 exec_node_remove(&instr
->node
);
366 exec_list_push_tail(&state
->instrs
, &instr
->node
);
374 gcm_place_instr(nir_instr
*instr
, struct gcm_state
*state
);
377 gcm_place_instr_def(nir_ssa_def
*def
, void *state
)
379 struct set_entry
*entry
;
380 set_foreach(def
->uses
, entry
)
381 gcm_place_instr((nir_instr
*)entry
->key
, state
);
386 /** Places an instrution back into the program
388 * The earlier passes of GCM simply choose blocks for each instruction and
389 * otherwise leave them alone. This pass actually places the instructions
390 * into their chosen blocks.
392 * To do so, we use a standard post-order depth-first search linearization
393 * algorithm. We walk over the uses of the given instruction and ensure
394 * that they are placed and then place this instruction. Because we are
395 * working on multiple blocks at a time, we keep track of the last inserted
396 * instruction per-block in the state structure's block_info array. When
397 * we insert an instruction in a block we insert it before the last
398 * instruction inserted in that block rather than the last instruction
402 gcm_place_instr(nir_instr
*instr
, struct gcm_state
*state
)
404 if (BITSET_TEST(state
->visited
, instr
->index
))
407 BITSET_SET(state
->visited
, instr
->index
);
409 /* Phi nodes are our once source of back-edges. Since right now we are
410 * only doing scheduling within blocks, we don't need to worry about
411 * them since they are always at the top. Just skip them completely.
413 if (instr
->type
== nir_instr_type_phi
) {
414 assert(BITSET_TEST(state
->pinned
, instr
->index
));
418 nir_foreach_ssa_def(instr
, gcm_place_instr_def
, state
);
420 if (BITSET_TEST(state
->pinned
, instr
->index
)) {
421 /* Pinned instructions have an implicit dependence on the pinned
422 * instructions that come after them in the block. Since the pinned
423 * instructions will naturally "chain" together, we only need to
424 * explicitly visit one of them.
426 for (nir_instr
*after
= nir_instr_next(instr
);
428 after
= nir_instr_next(after
)) {
429 if (BITSET_TEST(state
->pinned
, after
->index
)) {
430 gcm_place_instr(after
, state
);
436 struct gcm_block_info
*block_info
= &state
->blocks
[instr
->block
->index
];
437 if (!BITSET_TEST(state
->pinned
, instr
->index
)) {
438 exec_node_remove(&instr
->node
);
440 if (block_info
->last_instr
) {
441 exec_node_insert_node_before(&block_info
->last_instr
->node
,
444 /* Schedule it at the end of the block */
445 nir_instr
*jump_instr
= nir_block_last_instr(instr
->block
);
446 if (jump_instr
&& jump_instr
->type
== nir_instr_type_jump
) {
447 exec_node_insert_node_before(&jump_instr
->node
, &instr
->node
);
449 exec_list_push_tail(&instr
->block
->instr_list
, &instr
->node
);
454 block_info
->last_instr
= instr
;
458 opt_gcm_impl(nir_function_impl
*impl
)
460 struct gcm_state state
;
462 unsigned num_instrs
= nir_index_instrs(impl
);
463 unsigned instr_words
= BITSET_WORDS(num_instrs
);
467 state
.visited
= rzalloc_array(NULL
, BITSET_WORD
, instr_words
);
468 state
.pinned
= rzalloc_array(NULL
, BITSET_WORD
, instr_words
);
469 exec_list_make_empty(&state
.instrs
);
470 state
.blocks
= rzalloc_array(NULL
, struct gcm_block_info
, impl
->num_blocks
);
472 nir_metadata_require(impl
, nir_metadata_block_index
|
473 nir_metadata_dominance
);
475 gcm_build_block_info(&impl
->body
, &state
, 0);
476 nir_foreach_block(impl
, gcm_pin_instructions_block
, &state
);
478 nir_foreach_block(impl
, gcm_schedule_early_block
, &state
);
480 memset(state
.visited
, 0, instr_words
* sizeof(*state
.visited
));
481 nir_foreach_block(impl
, gcm_schedule_late_block
, &state
);
483 memset(state
.visited
, 0, instr_words
* sizeof(*state
.visited
));
484 while (!exec_list_is_empty(&state
.instrs
)) {
485 nir_instr
*instr
= exec_node_data(nir_instr
,
486 state
.instrs
.tail_pred
, node
);
487 gcm_place_instr(instr
, &state
);
490 ralloc_free(state
.visited
);
491 ralloc_free(state
.blocks
);
495 nir_opt_gcm(nir_shader
*shader
)
497 nir_foreach_overload(shader
, overload
) {
499 opt_gcm_impl(overload
->impl
);