2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
29 #include "nir_instr_set.h"
32 * Implements Global Code Motion. A description of GCM can be found in
33 * "Global Code Motion; Global Value Numbering" by Cliff Click.
34 * Unfortunately, the algorithm presented in the paper is broken in a
35 * number of ways. The algorithm used here differs substantially from the
36 * one in the paper but it is, in my opinion, much easier to read and
40 struct gcm_block_info
{
41 /* Number of loops this block is inside */
44 /* The last instruction inserted into this block. This is used as we
45 * traverse the instructions and insert them back into the program to
46 * put them in the right order.
48 nir_instr
*last_instr
;
51 struct gcm_instr_info
{
52 nir_block
*early_block
;
55 /* Flags used in the instr->pass_flags field for various instruction states */
57 GCM_INSTR_PINNED
= (1 << 0),
58 GCM_INSTR_SCHEDULE_EARLIER_ONLY
= (1 << 1),
59 GCM_INSTR_SCHEDULED_EARLY
= (1 << 2),
60 GCM_INSTR_SCHEDULED_LATE
= (1 << 3),
61 GCM_INSTR_PLACED
= (1 << 4),
65 nir_function_impl
*impl
;
70 /* The list of non-pinned instructions. As we do the late scheduling,
71 * we pull non-pinned instructions out of their blocks and place them in
72 * this list. This saves us from having linked-list problems when we go
73 * to put instructions back in their blocks.
75 struct exec_list instrs
;
77 struct gcm_block_info
*blocks
;
80 struct gcm_instr_info
*instr_infos
;
83 /* Recursively walks the CFG and builds the block_info structure */
85 gcm_build_block_info(struct exec_list
*cf_list
, struct gcm_state
*state
,
88 foreach_list_typed(nir_cf_node
, node
, node
, cf_list
) {
90 case nir_cf_node_block
: {
91 nir_block
*block
= nir_cf_node_as_block(node
);
92 state
->blocks
[block
->index
].loop_depth
= loop_depth
;
95 case nir_cf_node_if
: {
96 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
97 gcm_build_block_info(&if_stmt
->then_list
, state
, loop_depth
);
98 gcm_build_block_info(&if_stmt
->else_list
, state
, loop_depth
);
101 case nir_cf_node_loop
: {
102 nir_loop
*loop
= nir_cf_node_as_loop(node
);
103 gcm_build_block_info(&loop
->body
, state
, loop_depth
+ 1);
107 unreachable("Invalid CF node type");
113 is_src_scalarizable(nir_src
*src
)
117 nir_instr
*src_instr
= src
->ssa
->parent_instr
;
118 switch (src_instr
->type
) {
119 case nir_instr_type_alu
: {
120 nir_alu_instr
*src_alu
= nir_instr_as_alu(src_instr
);
122 /* ALU operations with output_size == 0 should be scalarized. We
123 * will also see a bunch of vecN operations from scalarizing ALU
124 * operations and, since they can easily be copy-propagated, they
127 return nir_op_infos
[src_alu
->op
].output_size
== 0 ||
128 src_alu
->op
== nir_op_vec2
||
129 src_alu
->op
== nir_op_vec3
||
130 src_alu
->op
== nir_op_vec4
;
133 case nir_instr_type_load_const
:
134 /* These are trivially scalarizable */
137 case nir_instr_type_ssa_undef
:
140 case nir_instr_type_intrinsic
: {
141 nir_intrinsic_instr
*src_intrin
= nir_instr_as_intrinsic(src_instr
);
143 switch (src_intrin
->intrinsic
) {
144 case nir_intrinsic_load_deref
: {
145 nir_deref_instr
*deref
= nir_src_as_deref(src_intrin
->src
[0]);
146 return deref
->mode
== nir_var_shader_in
||
147 deref
->mode
== nir_var_uniform
||
148 deref
->mode
== nir_var_mem_ubo
||
149 deref
->mode
== nir_var_mem_ssbo
||
150 deref
->mode
== nir_var_mem_global
;
153 case nir_intrinsic_interp_deref_at_centroid
:
154 case nir_intrinsic_interp_deref_at_sample
:
155 case nir_intrinsic_interp_deref_at_offset
:
156 case nir_intrinsic_load_uniform
:
157 case nir_intrinsic_load_ubo
:
158 case nir_intrinsic_load_ssbo
:
159 case nir_intrinsic_load_global
:
160 case nir_intrinsic_load_global_constant
:
161 case nir_intrinsic_load_input
:
171 /* We can't scalarize this type of instruction */
176 /* Walks the instruction list and marks immovable instructions as pinned
178 * This function also serves to initialize the instr->pass_flags field.
179 * After this is completed, all instructions' pass_flags fields will be set
180 * to either GCM_INSTR_PINNED or 0.
183 gcm_pin_instructions(nir_function_impl
*impl
, struct gcm_state
*state
)
185 state
->num_instrs
= 0;
187 nir_foreach_block(block
, impl
) {
188 nir_foreach_instr_safe(instr
, block
) {
189 /* Index the instructions for use in gcm_state::instrs */
190 instr
->index
= state
->num_instrs
++;
192 switch (instr
->type
) {
193 case nir_instr_type_alu
:
194 switch (nir_instr_as_alu(instr
)->op
) {
197 case nir_op_fddx_fine
:
198 case nir_op_fddy_fine
:
199 case nir_op_fddx_coarse
:
200 case nir_op_fddy_coarse
:
201 /* These can only go in uniform control flow */
202 instr
->pass_flags
= GCM_INSTR_SCHEDULE_EARLIER_ONLY
;
206 if (!is_src_scalarizable(&(nir_instr_as_alu(instr
)->src
[0].src
))) {
207 instr
->pass_flags
= GCM_INSTR_PINNED
;
213 instr
->pass_flags
= 0;
218 case nir_instr_type_tex
:
219 if (nir_tex_instr_has_implicit_derivative(nir_instr_as_tex(instr
)))
220 instr
->pass_flags
= GCM_INSTR_SCHEDULE_EARLIER_ONLY
;
223 case nir_instr_type_deref
:
224 case nir_instr_type_load_const
:
225 instr
->pass_flags
= 0;
228 case nir_instr_type_intrinsic
: {
229 if (nir_intrinsic_can_reorder(nir_instr_as_intrinsic(instr
))) {
230 instr
->pass_flags
= 0;
232 instr
->pass_flags
= GCM_INSTR_PINNED
;
237 case nir_instr_type_jump
:
238 case nir_instr_type_ssa_undef
:
239 case nir_instr_type_phi
:
240 instr
->pass_flags
= GCM_INSTR_PINNED
;
244 unreachable("Invalid instruction type in GCM");
247 if (!(instr
->pass_flags
& GCM_INSTR_PINNED
)) {
248 /* If this is an unpinned instruction, go ahead and pull it out of
249 * the program and put it on the instrs list. This has a couple
250 * of benifits. First, it makes the scheduling algorithm more
251 * efficient because we can avoid walking over basic blocks and
252 * pinned instructions. Second, it keeps us from causing linked
253 * list confusion when we're trying to put everything in its
254 * proper place at the end of the pass.
256 * Note that we don't use nir_instr_remove here because that also
257 * cleans up uses and defs and we want to keep that information.
259 exec_node_remove(&instr
->node
);
260 exec_list_push_tail(&state
->instrs
, &instr
->node
);
267 gcm_schedule_early_instr(nir_instr
*instr
, struct gcm_state
*state
);
269 /** Update an instructions schedule for the given source
271 * This function is called iteratively as we walk the sources of an
272 * instruction. It ensures that the given source instruction has been
273 * scheduled and then update this instruction's block if the source
274 * instruction is lower down the tree.
277 gcm_schedule_early_src(nir_src
*src
, void *void_state
)
279 struct gcm_state
*state
= void_state
;
280 nir_instr
*instr
= state
->instr
;
284 gcm_schedule_early_instr(src
->ssa
->parent_instr
, void_state
);
286 /* While the index isn't a proper dominance depth, it does have the
287 * property that if A dominates B then A->index <= B->index. Since we
288 * know that this instruction must have been dominated by all of its
289 * sources at some point (even if it's gone through value-numbering),
290 * all of the sources must lie on the same branch of the dominance tree.
291 * Therefore, we can just go ahead and just compare indices.
293 struct gcm_instr_info
*src_info
=
294 &state
->instr_infos
[src
->ssa
->parent_instr
->index
];
295 struct gcm_instr_info
*info
= &state
->instr_infos
[instr
->index
];
296 if (info
->early_block
->index
< src_info
->early_block
->index
)
297 info
->early_block
= src_info
->early_block
;
299 /* We need to restore the state instruction because it may have been
300 * changed through the gcm_schedule_early_instr call above. Since we
301 * may still be iterating through sources and future calls to
302 * gcm_schedule_early_src for the same instruction will still need it.
304 state
->instr
= instr
;
309 /** Schedules an instruction early
311 * This function performs a recursive depth-first search starting at the
312 * given instruction and proceeding through the sources to schedule
313 * instructions as early as they can possibly go in the dominance tree.
314 * The instructions are "scheduled" by updating the early_block field of
315 * the corresponding gcm_instr_state entry.
318 gcm_schedule_early_instr(nir_instr
*instr
, struct gcm_state
*state
)
320 if (instr
->pass_flags
& GCM_INSTR_SCHEDULED_EARLY
)
323 instr
->pass_flags
|= GCM_INSTR_SCHEDULED_EARLY
;
325 /* Pinned instructions always get scheduled in their original block so we
326 * don't need to do anything. Also, bailing here keeps us from ever
327 * following the sources of phi nodes which can be back-edges.
329 if (instr
->pass_flags
& GCM_INSTR_PINNED
) {
330 state
->instr_infos
[instr
->index
].early_block
= instr
->block
;
334 /* Start with the instruction at the top. As we iterate over the
335 * sources, it will get moved down as needed.
337 state
->instr_infos
[instr
->index
].early_block
= nir_start_block(state
->impl
);
338 state
->instr
= instr
;
340 nir_foreach_src(instr
, gcm_schedule_early_src
, state
);
344 gcm_choose_block_for_instr(nir_instr
*instr
, nir_block
*early_block
,
345 nir_block
*late_block
, struct gcm_state
*state
)
347 assert(nir_block_dominates(early_block
, late_block
));
349 nir_block
*best
= late_block
;
350 for (nir_block
*block
= late_block
; block
!= NULL
; block
= block
->imm_dom
) {
351 /* Being too aggressive with how we pull instructions out of loops can
352 * result in extra register pressure and spilling. For example its fairly
353 * common for loops in compute shaders to calculate SSBO offsets using
354 * the workgroup id, subgroup id and subgroup invocation, pulling all
355 * these calculations outside the loop causes register pressure.
357 * To work around these issues for now we only allow constant and texture
358 * instructions to be moved outside their original loops.
360 * TODO: figure out some heuristics to allow more to be moved out of loops.
362 if (state
->blocks
[block
->index
].loop_depth
<
363 state
->blocks
[best
->index
].loop_depth
&&
364 (nir_block_dominates(instr
->block
, block
) ||
365 instr
->type
== nir_instr_type_load_const
||
366 instr
->type
== nir_instr_type_tex
))
368 else if (block
== instr
->block
)
371 if (block
== early_block
)
379 gcm_schedule_late_instr(nir_instr
*instr
, struct gcm_state
*state
);
381 /** Schedules the instruction associated with the given SSA def late
383 * This function works by first walking all of the uses of the given SSA
384 * definition, ensuring that they are scheduled, and then computing the LCA
385 * (least common ancestor) of its uses. It then schedules this instruction
386 * as close to the LCA as possible while trying to stay out of loops.
389 gcm_schedule_late_def(nir_ssa_def
*def
, void *void_state
)
391 struct gcm_state
*state
= void_state
;
393 nir_block
*lca
= NULL
;
395 nir_foreach_use(use_src
, def
) {
396 nir_instr
*use_instr
= use_src
->parent_instr
;
398 gcm_schedule_late_instr(use_instr
, state
);
400 /* Phi instructions are a bit special. SSA definitions don't have to
401 * dominate the sources of the phi nodes that use them; instead, they
402 * have to dominate the predecessor block corresponding to the phi
403 * source. We handle this by looking through the sources, finding
404 * any that are usingg this SSA def, and using those blocks instead
405 * of the one the phi lives in.
407 if (use_instr
->type
== nir_instr_type_phi
) {
408 nir_phi_instr
*phi
= nir_instr_as_phi(use_instr
);
410 nir_foreach_phi_src(phi_src
, phi
) {
411 if (phi_src
->src
.ssa
== def
)
412 lca
= nir_dominance_lca(lca
, phi_src
->pred
);
415 lca
= nir_dominance_lca(lca
, use_instr
->block
);
419 nir_foreach_if_use(use_src
, def
) {
420 nir_if
*if_stmt
= use_src
->parent_if
;
422 /* For if statements, we consider the block to be the one immediately
423 * preceding the if CF node.
425 nir_block
*pred_block
=
426 nir_cf_node_as_block(nir_cf_node_prev(&if_stmt
->cf_node
));
428 lca
= nir_dominance_lca(lca
, pred_block
);
431 nir_block
*early_block
=
432 state
->instr_infos
[def
->parent_instr
->index
].early_block
;
434 /* Some instructions may never be used. Flag them and the instruction
435 * placement code will get rid of them for us.
438 def
->parent_instr
->block
= NULL
;
442 if (def
->parent_instr
->pass_flags
& GCM_INSTR_SCHEDULE_EARLIER_ONLY
&&
443 lca
!= def
->parent_instr
->block
&&
444 nir_block_dominates(def
->parent_instr
->block
, lca
)) {
445 lca
= def
->parent_instr
->block
;
448 /* We now have the LCA of all of the uses. If our invariants hold,
449 * this is dominated by the block that we chose when scheduling early.
450 * We now walk up the dominance tree and pick the lowest block that is
451 * as far outside loops as we can get.
453 nir_block
*best_block
=
454 gcm_choose_block_for_instr(def
->parent_instr
, early_block
, lca
, state
);
456 if (def
->parent_instr
->block
!= best_block
)
457 state
->progress
= true;
459 def
->parent_instr
->block
= best_block
;
464 /** Schedules an instruction late
466 * This function performs a depth-first search starting at the given
467 * instruction and proceeding through its uses to schedule instructions as
468 * late as they can reasonably go in the dominance tree. The instructions
469 * are "scheduled" by updating their instr->block field.
471 * The name of this function is actually a bit of a misnomer as it doesn't
472 * schedule them "as late as possible" as the paper implies. Instead, it
473 * first finds the lates possible place it can schedule the instruction and
474 * then possibly schedules it earlier than that. The actual location is as
475 * far down the tree as we can go while trying to stay out of loops.
478 gcm_schedule_late_instr(nir_instr
*instr
, struct gcm_state
*state
)
480 if (instr
->pass_flags
& GCM_INSTR_SCHEDULED_LATE
)
483 instr
->pass_flags
|= GCM_INSTR_SCHEDULED_LATE
;
485 /* Pinned instructions are already scheduled so we don't need to do
486 * anything. Also, bailing here keeps us from ever following phi nodes
487 * which can be back-edges.
489 if (instr
->pass_flags
& GCM_INSTR_PINNED
)
492 nir_foreach_ssa_def(instr
, gcm_schedule_late_def
, state
);
496 gcm_place_instr(nir_instr
*instr
, struct gcm_state
*state
);
499 gcm_place_instr_def(nir_ssa_def
*def
, void *state
)
501 nir_foreach_use(use_src
, def
)
502 gcm_place_instr(use_src
->parent_instr
, state
);
508 gcm_replace_def_with_undef(nir_ssa_def
*def
, void *void_state
)
510 struct gcm_state
*state
= void_state
;
512 if (list_is_empty(&def
->uses
) && list_is_empty(&def
->if_uses
))
515 nir_ssa_undef_instr
*undef
=
516 nir_ssa_undef_instr_create(state
->impl
->function
->shader
,
517 def
->num_components
, def
->bit_size
);
518 nir_instr_insert(nir_before_cf_list(&state
->impl
->body
), &undef
->instr
);
519 nir_ssa_def_rewrite_uses(def
, nir_src_for_ssa(&undef
->def
));
524 /** Places an instrution back into the program
526 * The earlier passes of GCM simply choose blocks for each instruction and
527 * otherwise leave them alone. This pass actually places the instructions
528 * into their chosen blocks.
530 * To do so, we use a standard post-order depth-first search linearization
531 * algorithm. We walk over the uses of the given instruction and ensure
532 * that they are placed and then place this instruction. Because we are
533 * working on multiple blocks at a time, we keep track of the last inserted
534 * instruction per-block in the state structure's block_info array. When
535 * we insert an instruction in a block we insert it before the last
536 * instruction inserted in that block rather than the last instruction
540 gcm_place_instr(nir_instr
*instr
, struct gcm_state
*state
)
542 if (instr
->pass_flags
& GCM_INSTR_PLACED
)
545 instr
->pass_flags
|= GCM_INSTR_PLACED
;
547 if (instr
->block
== NULL
) {
548 nir_foreach_ssa_def(instr
, gcm_replace_def_with_undef
, state
);
549 nir_instr_remove(instr
);
553 /* Phi nodes are our once source of back-edges. Since right now we are
554 * only doing scheduling within blocks, we don't need to worry about
555 * them since they are always at the top. Just skip them completely.
557 if (instr
->type
== nir_instr_type_phi
) {
558 assert(instr
->pass_flags
& GCM_INSTR_PINNED
);
562 nir_foreach_ssa_def(instr
, gcm_place_instr_def
, state
);
564 if (instr
->pass_flags
& GCM_INSTR_PINNED
) {
565 /* Pinned instructions have an implicit dependence on the pinned
566 * instructions that come after them in the block. Since the pinned
567 * instructions will naturally "chain" together, we only need to
568 * explicitly visit one of them.
570 for (nir_instr
*after
= nir_instr_next(instr
);
572 after
= nir_instr_next(after
)) {
573 if (after
->pass_flags
& GCM_INSTR_PINNED
) {
574 gcm_place_instr(after
, state
);
580 struct gcm_block_info
*block_info
= &state
->blocks
[instr
->block
->index
];
581 if (!(instr
->pass_flags
& GCM_INSTR_PINNED
)) {
582 exec_node_remove(&instr
->node
);
584 if (block_info
->last_instr
) {
585 exec_node_insert_node_before(&block_info
->last_instr
->node
,
588 /* Schedule it at the end of the block */
589 nir_instr
*jump_instr
= nir_block_last_instr(instr
->block
);
590 if (jump_instr
&& jump_instr
->type
== nir_instr_type_jump
) {
591 exec_node_insert_node_before(&jump_instr
->node
, &instr
->node
);
593 exec_list_push_tail(&instr
->block
->instr_list
, &instr
->node
);
598 block_info
->last_instr
= instr
;
602 opt_gcm_impl(nir_function_impl
*impl
, bool value_number
)
604 nir_metadata_require(impl
, nir_metadata_block_index
|
605 nir_metadata_dominance
);
607 struct gcm_state state
;
611 state
.progress
= false;
612 exec_list_make_empty(&state
.instrs
);
613 state
.blocks
= rzalloc_array(NULL
, struct gcm_block_info
, impl
->num_blocks
);
615 gcm_build_block_info(&impl
->body
, &state
, 0);
617 gcm_pin_instructions(impl
, &state
);
620 rzalloc_array(NULL
, struct gcm_instr_info
, state
.num_instrs
);
623 struct set
*gvn_set
= nir_instr_set_create(NULL
);
624 foreach_list_typed_safe(nir_instr
, instr
, node
, &state
.instrs
) {
625 if (nir_instr_set_add_or_rewrite(gvn_set
, instr
)) {
626 nir_instr_remove(instr
);
627 state
.progress
= true;
630 nir_instr_set_destroy(gvn_set
);
633 foreach_list_typed(nir_instr
, instr
, node
, &state
.instrs
)
634 gcm_schedule_early_instr(instr
, &state
);
636 foreach_list_typed(nir_instr
, instr
, node
, &state
.instrs
)
637 gcm_schedule_late_instr(instr
, &state
);
639 while (!exec_list_is_empty(&state
.instrs
)) {
640 nir_instr
*instr
= exec_node_data(nir_instr
,
641 state
.instrs
.tail_sentinel
.prev
, node
);
642 gcm_place_instr(instr
, &state
);
645 ralloc_free(state
.blocks
);
646 ralloc_free(state
.instr_infos
);
648 nir_metadata_preserve(impl
, nir_metadata_block_index
|
649 nir_metadata_dominance
);
651 return state
.progress
;
655 nir_opt_gcm(nir_shader
*shader
, bool value_number
)
657 bool progress
= false;
659 nir_foreach_function(function
, shader
) {
661 progress
|= opt_gcm_impl(function
->impl
, value_number
);