2 * Copyright © 2018 Red Hat
3 * Copyright © 2019 Valve Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Rob Clark (robdclark@gmail.com>
26 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
27 * Rhys Perry (pendingchaos02@gmail.com)
35 * A simple pass that moves some instructions into the least common
36 * anscestor of consuming instructions.
40 nir_can_move_instr(nir_instr
*instr
, nir_move_options options
)
42 if ((options
& nir_move_const_undef
) && instr
->type
== nir_instr_type_load_const
) {
46 if (instr
->type
== nir_instr_type_intrinsic
) {
47 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
48 if ((options
& nir_move_load_ubo
) && intrin
->intrinsic
== nir_intrinsic_load_ubo
)
51 if ((options
& nir_move_load_input
) &&
52 (intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
||
53 intrin
->intrinsic
== nir_intrinsic_load_input
||
54 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
))
58 if ((options
& nir_move_const_undef
) && instr
->type
== nir_instr_type_ssa_undef
) {
62 if ((options
& nir_move_copies
) && instr
->type
== nir_instr_type_alu
&&
63 nir_instr_as_alu(instr
)->op
== nir_op_mov
) {
67 if ((options
& nir_move_comparisons
) && instr
->type
== nir_instr_type_alu
&&
68 nir_alu_instr_is_comparison(nir_instr_as_alu(instr
))) {
76 get_innermost_loop(nir_cf_node
*node
)
78 for (; node
!= NULL
; node
= node
->parent
) {
79 if (node
->type
== nir_cf_node_loop
)
80 return (nir_loop
*)node
;
86 loop_contains_block(nir_loop
*loop
, nir_block
*block
)
88 nir_block
*before
= nir_cf_node_as_block(nir_cf_node_prev(&loop
->cf_node
));
89 nir_block
*after
= nir_cf_node_as_block(nir_cf_node_next(&loop
->cf_node
));
91 return block
->index
> before
->index
&& block
->index
< after
->index
;
94 /* Given the LCA of all uses and the definition, find a block on the path
95 * between them in the dominance tree that is outside of as many loops as
96 * possible. If "sink_out_of_loops" is false, then we disallow sinking the
97 * definition outside of the loop it's defined in (if any).
101 adjust_block_for_loops(nir_block
*use_block
, nir_block
*def_block
,
102 bool sink_out_of_loops
)
104 nir_loop
*def_loop
= NULL
;
105 if (!sink_out_of_loops
)
106 def_loop
= get_innermost_loop(&def_block
->cf_node
);
108 for (nir_block
*cur_block
= use_block
; cur_block
!= def_block
->imm_dom
;
109 cur_block
= cur_block
->imm_dom
) {
110 if (!sink_out_of_loops
&& def_loop
&&
111 !loop_contains_block(def_loop
, use_block
)) {
112 use_block
= cur_block
;
116 nir_cf_node
*next
= nir_cf_node_next(&cur_block
->cf_node
);
117 if (next
&& next
->type
== nir_cf_node_loop
) {
118 nir_loop
*following_loop
= nir_cf_node_as_loop(next
);
119 if (loop_contains_block(following_loop
, use_block
)) {
120 use_block
= cur_block
;
129 /* iterate a ssa def's use's and try to find a more optimal block to
130 * move it to, using the dominance tree. In short, if all of the uses
131 * are contained in a single block, the load will be moved there,
132 * otherwise it will be move to the least common ancestor block of all
136 get_preferred_block(nir_ssa_def
*def
, bool sink_into_loops
, bool sink_out_of_loops
)
138 nir_block
*lca
= NULL
;
140 nir_foreach_use(use
, def
) {
141 nir_instr
*instr
= use
->parent_instr
;
142 nir_block
*use_block
= instr
->block
;
145 * Kind of an ugly special-case, but phi instructions
146 * need to appear first in the block, so by definition
147 * we can't move an instruction into a block where it is
148 * consumed by a phi instruction. We could conceivably
149 * move it into a dominator block.
151 if (instr
->type
== nir_instr_type_phi
) {
152 nir_phi_instr
*phi
= nir_instr_as_phi(instr
);
153 nir_block
*phi_lca
= NULL
;
154 nir_foreach_phi_src(src
, phi
) {
155 if (&src
->src
== use
)
156 phi_lca
= nir_dominance_lca(phi_lca
, src
->pred
);
161 lca
= nir_dominance_lca(lca
, use_block
);
164 nir_foreach_if_use(use
, def
) {
165 nir_block
*use_block
=
166 nir_cf_node_as_block(nir_cf_node_prev(&use
->parent_if
->cf_node
));
168 lca
= nir_dominance_lca(lca
, use_block
);
171 /* If we're moving a load_ubo or load_interpolated_input, we don't want to
172 * sink it down into loops, which may result in accessing memory or shared
173 * functions multiple times. Sink it just above the start of the loop
174 * where it's used. For load_consts, undefs, and comparisons, we expect
175 * the driver to be able to emit them as simple ALU ops, so sinking as far
176 * in as we can go is probably worth it for register pressure.
178 if (!sink_into_loops
) {
179 lca
= adjust_block_for_loops(lca
, def
->parent_instr
->block
,
181 assert(nir_block_dominates(def
->parent_instr
->block
, lca
));
183 /* sink_into_loops = true and sink_out_of_loops = false isn't
184 * implemented yet because it's not used.
186 assert(sink_out_of_loops
);
193 /* insert before first non-phi instruction: */
195 insert_after_phi(nir_instr
*instr
, nir_block
*block
)
197 nir_foreach_instr(instr2
, block
) {
198 if (instr2
->type
== nir_instr_type_phi
)
201 exec_node_insert_node_before(&instr2
->node
,
207 /* if haven't inserted it, push to tail (ie. empty block or possibly
208 * a block only containing phi's?)
210 exec_list_push_tail(&block
->instr_list
, &instr
->node
);
214 nir_opt_sink(nir_shader
*shader
, nir_move_options options
)
216 bool progress
= false;
218 nir_foreach_function(function
, shader
) {
222 nir_metadata_require(function
->impl
,
223 nir_metadata_block_index
| nir_metadata_dominance
);
225 nir_foreach_block_reverse(block
, function
->impl
) {
226 nir_foreach_instr_reverse_safe(instr
, block
) {
227 if (!nir_can_move_instr(instr
, options
))
230 nir_ssa_def
*def
= nir_instr_ssa_def(instr
);
232 bool sink_into_loops
= instr
->type
!= nir_instr_type_intrinsic
;
233 /* Don't sink load_ubo out of loops because that can make its
234 * resource divergent and break code like that which is generated
235 * by nir_lower_non_uniform_access.
237 bool sink_out_of_loops
=
238 instr
->type
!= nir_instr_type_intrinsic
||
239 nir_instr_as_intrinsic(instr
)->intrinsic
!= nir_intrinsic_load_ubo
;
240 nir_block
*use_block
=
241 get_preferred_block(def
, sink_into_loops
, sink_out_of_loops
);
243 if (!use_block
|| use_block
== instr
->block
)
246 exec_node_remove(&instr
->node
);
248 insert_after_phi(instr
, use_block
);
250 instr
->block
= use_block
;
256 nir_metadata_preserve(function
->impl
,
257 nir_metadata_block_index
| nir_metadata_dominance
);