2 * Copyright © 2018 Red Hat
3 * Copyright © 2019 Valve Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Rob Clark (robdclark@gmail.com>
26 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
27 * Rhys Perry (pendingchaos02@gmail.com)
35 * A simple pass that moves some instructions into the least common
36 * anscestor of consuming instructions.
40 nir_can_move_instr(nir_instr
*instr
, nir_move_options options
)
42 if ((options
& nir_move_const_undef
) && instr
->type
== nir_instr_type_load_const
) {
46 if (instr
->type
== nir_instr_type_intrinsic
) {
47 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
48 if ((options
& nir_move_load_ubo
) && intrin
->intrinsic
== nir_intrinsic_load_ubo
)
51 if ((options
& nir_move_load_input
) &&
52 (intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
||
53 intrin
->intrinsic
== nir_intrinsic_load_input
))
57 if ((options
& nir_move_const_undef
) && instr
->type
== nir_instr_type_ssa_undef
) {
61 if ((options
& nir_move_comparisons
) && instr
->type
== nir_instr_type_alu
&&
62 nir_alu_instr_is_comparison(nir_instr_as_alu(instr
))) {
70 get_innermost_loop(nir_cf_node
*node
)
72 for (; node
!= NULL
; node
= node
->parent
) {
73 if (node
->type
== nir_cf_node_loop
)
74 return (nir_loop
*)node
;
80 loop_contains_block(nir_loop
*loop
, nir_block
*block
)
82 nir_block
*before
= nir_cf_node_as_block(nir_cf_node_prev(&loop
->cf_node
));
83 nir_block
*after
= nir_cf_node_as_block(nir_cf_node_next(&loop
->cf_node
));
85 return block
->index
> before
->index
&& block
->index
< after
->index
;
88 /* Given the LCA of all uses and the definition, find a block on the path
89 * between them in the dominance tree that is outside of as many loops as
90 * possible. If "sink_out_of_loops" is false, then we disallow sinking the
91 * definition outside of the loop it's defined in (if any).
95 adjust_block_for_loops(nir_block
*use_block
, nir_block
*def_block
,
96 bool sink_out_of_loops
)
98 nir_loop
*def_loop
= NULL
;
99 if (!sink_out_of_loops
)
100 def_loop
= get_innermost_loop(&def_block
->cf_node
);
102 for (nir_block
*cur_block
= use_block
; cur_block
!= def_block
->imm_dom
;
103 cur_block
= cur_block
->imm_dom
) {
104 if (!sink_out_of_loops
&& def_loop
&&
105 !loop_contains_block(def_loop
, use_block
)) {
106 use_block
= cur_block
;
110 nir_cf_node
*next
= nir_cf_node_next(&cur_block
->cf_node
);
111 if (next
&& next
->type
== nir_cf_node_loop
) {
112 nir_loop
*following_loop
= nir_cf_node_as_loop(next
);
113 if (loop_contains_block(following_loop
, use_block
)) {
114 use_block
= cur_block
;
123 /* iterate a ssa def's use's and try to find a more optimal block to
124 * move it to, using the dominance tree. In short, if all of the uses
125 * are contained in a single block, the load will be moved there,
126 * otherwise it will be move to the least common ancestor block of all
130 get_preferred_block(nir_ssa_def
*def
, bool sink_into_loops
, bool sink_out_of_loops
)
132 nir_block
*lca
= NULL
;
134 nir_foreach_use(use
, def
) {
135 nir_instr
*instr
= use
->parent_instr
;
136 nir_block
*use_block
= instr
->block
;
139 * Kind of an ugly special-case, but phi instructions
140 * need to appear first in the block, so by definition
141 * we can't move an instruction into a block where it is
142 * consumed by a phi instruction. We could conceivably
143 * move it into a dominator block.
145 if (instr
->type
== nir_instr_type_phi
) {
146 nir_phi_instr
*phi
= nir_instr_as_phi(instr
);
147 nir_block
*phi_lca
= NULL
;
148 nir_foreach_phi_src(src
, phi
) {
149 if (&src
->src
== use
)
150 phi_lca
= nir_dominance_lca(phi_lca
, src
->pred
);
155 lca
= nir_dominance_lca(lca
, use_block
);
158 nir_foreach_if_use(use
, def
) {
159 nir_block
*use_block
=
160 nir_cf_node_as_block(nir_cf_node_prev(&use
->parent_if
->cf_node
));
162 lca
= nir_dominance_lca(lca
, use_block
);
165 /* If we're moving a load_ubo or load_interpolated_input, we don't want to
166 * sink it down into loops, which may result in accessing memory or shared
167 * functions multiple times. Sink it just above the start of the loop
168 * where it's used. For load_consts, undefs, and comparisons, we expect
169 * the driver to be able to emit them as simple ALU ops, so sinking as far
170 * in as we can go is probably worth it for register pressure.
172 if (!sink_into_loops
) {
173 lca
= adjust_block_for_loops(lca
, def
->parent_instr
->block
,
175 assert(nir_block_dominates(def
->parent_instr
->block
, lca
));
177 /* sink_into_loops = true and sink_out_of_loops = false isn't
178 * implemented yet because it's not used.
180 assert(sink_out_of_loops
);
187 /* insert before first non-phi instruction: */
189 insert_after_phi(nir_instr
*instr
, nir_block
*block
)
191 nir_foreach_instr(instr2
, block
) {
192 if (instr2
->type
== nir_instr_type_phi
)
195 exec_node_insert_node_before(&instr2
->node
,
201 /* if haven't inserted it, push to tail (ie. empty block or possibly
202 * a block only containing phi's?)
204 exec_list_push_tail(&block
->instr_list
, &instr
->node
);
208 nir_opt_sink(nir_shader
*shader
, nir_move_options options
)
210 bool progress
= false;
212 nir_foreach_function(function
, shader
) {
216 nir_metadata_require(function
->impl
,
217 nir_metadata_block_index
| nir_metadata_dominance
);
219 nir_foreach_block_reverse(block
, function
->impl
) {
220 nir_foreach_instr_reverse_safe(instr
, block
) {
221 if (!nir_can_move_instr(instr
, options
))
224 nir_ssa_def
*def
= nir_instr_ssa_def(instr
);
226 bool sink_into_loops
= instr
->type
!= nir_instr_type_intrinsic
;
227 /* Don't sink load_ubo out of loops because that can make its
228 * resource divergent and break code like that which is generated
229 * by nir_lower_non_uniform_access.
231 bool sink_out_of_loops
=
232 instr
->type
!= nir_instr_type_intrinsic
||
233 nir_instr_as_intrinsic(instr
)->intrinsic
!= nir_intrinsic_load_ubo
;
234 nir_block
*use_block
=
235 get_preferred_block(def
, sink_into_loops
, sink_out_of_loops
);
237 if (!use_block
|| use_block
== instr
->block
)
240 exec_node_remove(&instr
->node
);
242 insert_after_phi(instr
, use_block
);
244 instr
->block
= use_block
;
250 nir_metadata_preserve(function
->impl
,
251 nir_metadata_block_index
| nir_metadata_dominance
);