2 * Copyright © 2018 Red Hat
3 * Copyright © 2019 Valve Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Rob Clark (robdclark@gmail.com>
26 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
27 * Rhys Perry (pendingchaos02@gmail.com)
35 * A simple pass that moves some instructions into the least common
36 * anscestor of consuming instructions.
40 nir_can_move_instr(nir_instr
*instr
, nir_move_options options
)
42 switch (instr
->type
) {
43 case nir_instr_type_load_const
:
44 case nir_instr_type_ssa_undef
: {
45 return options
& nir_move_const_undef
;
47 case nir_instr_type_alu
: {
48 if (nir_op_is_vec(nir_instr_as_alu(instr
)->op
))
49 return options
& nir_move_copies
;
50 if (nir_alu_instr_is_comparison(nir_instr_as_alu(instr
)))
51 return options
& nir_move_comparisons
;
54 case nir_instr_type_intrinsic
: {
55 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
56 switch (intrin
->intrinsic
) {
57 case nir_intrinsic_load_ubo
:
58 return options
& nir_move_load_ubo
;
59 case nir_intrinsic_load_input
:
60 case nir_intrinsic_load_interpolated_input
:
61 case nir_intrinsic_load_per_vertex_input
:
62 return options
& nir_move_load_input
;
73 get_innermost_loop(nir_cf_node
*node
)
75 for (; node
!= NULL
; node
= node
->parent
) {
76 if (node
->type
== nir_cf_node_loop
)
77 return (nir_loop
*)node
;
83 loop_contains_block(nir_loop
*loop
, nir_block
*block
)
85 nir_block
*before
= nir_cf_node_as_block(nir_cf_node_prev(&loop
->cf_node
));
86 nir_block
*after
= nir_cf_node_as_block(nir_cf_node_next(&loop
->cf_node
));
88 return block
->index
> before
->index
&& block
->index
< after
->index
;
91 /* Given the LCA of all uses and the definition, find a block on the path
92 * between them in the dominance tree that is outside of as many loops as
93 * possible. If "sink_out_of_loops" is false, then we disallow sinking the
94 * definition outside of the loop it's defined in (if any).
98 adjust_block_for_loops(nir_block
*use_block
, nir_block
*def_block
,
99 bool sink_out_of_loops
)
101 nir_loop
*def_loop
= NULL
;
102 if (!sink_out_of_loops
)
103 def_loop
= get_innermost_loop(&def_block
->cf_node
);
105 for (nir_block
*cur_block
= use_block
; cur_block
!= def_block
->imm_dom
;
106 cur_block
= cur_block
->imm_dom
) {
107 if (!sink_out_of_loops
&& def_loop
&&
108 !loop_contains_block(def_loop
, use_block
)) {
109 use_block
= cur_block
;
113 nir_cf_node
*next
= nir_cf_node_next(&cur_block
->cf_node
);
114 if (next
&& next
->type
== nir_cf_node_loop
) {
115 nir_loop
*following_loop
= nir_cf_node_as_loop(next
);
116 if (loop_contains_block(following_loop
, use_block
)) {
117 use_block
= cur_block
;
126 /* iterate a ssa def's use's and try to find a more optimal block to
127 * move it to, using the dominance tree. In short, if all of the uses
128 * are contained in a single block, the load will be moved there,
129 * otherwise it will be move to the least common ancestor block of all
133 get_preferred_block(nir_ssa_def
*def
, bool sink_into_loops
, bool sink_out_of_loops
)
135 nir_block
*lca
= NULL
;
137 nir_foreach_use(use
, def
) {
138 nir_instr
*instr
= use
->parent_instr
;
139 nir_block
*use_block
= instr
->block
;
142 * Kind of an ugly special-case, but phi instructions
143 * need to appear first in the block, so by definition
144 * we can't move an instruction into a block where it is
145 * consumed by a phi instruction. We could conceivably
146 * move it into a dominator block.
148 if (instr
->type
== nir_instr_type_phi
) {
149 nir_phi_instr
*phi
= nir_instr_as_phi(instr
);
150 nir_block
*phi_lca
= NULL
;
151 nir_foreach_phi_src(src
, phi
) {
152 if (&src
->src
== use
)
153 phi_lca
= nir_dominance_lca(phi_lca
, src
->pred
);
158 lca
= nir_dominance_lca(lca
, use_block
);
161 nir_foreach_if_use(use
, def
) {
162 nir_block
*use_block
=
163 nir_cf_node_as_block(nir_cf_node_prev(&use
->parent_if
->cf_node
));
165 lca
= nir_dominance_lca(lca
, use_block
);
168 /* If we're moving a load_ubo or load_interpolated_input, we don't want to
169 * sink it down into loops, which may result in accessing memory or shared
170 * functions multiple times. Sink it just above the start of the loop
171 * where it's used. For load_consts, undefs, and comparisons, we expect
172 * the driver to be able to emit them as simple ALU ops, so sinking as far
173 * in as we can go is probably worth it for register pressure.
175 if (!sink_into_loops
) {
176 lca
= adjust_block_for_loops(lca
, def
->parent_instr
->block
,
178 assert(nir_block_dominates(def
->parent_instr
->block
, lca
));
180 /* sink_into_loops = true and sink_out_of_loops = false isn't
181 * implemented yet because it's not used.
183 assert(sink_out_of_loops
);
190 /* insert before first non-phi instruction: */
192 insert_after_phi(nir_instr
*instr
, nir_block
*block
)
194 nir_foreach_instr(instr2
, block
) {
195 if (instr2
->type
== nir_instr_type_phi
)
198 exec_node_insert_node_before(&instr2
->node
,
204 /* if haven't inserted it, push to tail (ie. empty block or possibly
205 * a block only containing phi's?)
207 exec_list_push_tail(&block
->instr_list
, &instr
->node
);
211 nir_opt_sink(nir_shader
*shader
, nir_move_options options
)
213 bool progress
= false;
215 nir_foreach_function(function
, shader
) {
219 nir_metadata_require(function
->impl
,
220 nir_metadata_block_index
| nir_metadata_dominance
);
222 nir_foreach_block_reverse(block
, function
->impl
) {
223 nir_foreach_instr_reverse_safe(instr
, block
) {
224 if (!nir_can_move_instr(instr
, options
))
227 nir_ssa_def
*def
= nir_instr_ssa_def(instr
);
229 bool sink_into_loops
= instr
->type
!= nir_instr_type_intrinsic
;
230 /* Don't sink load_ubo out of loops because that can make its
231 * resource divergent and break code like that which is generated
232 * by nir_lower_non_uniform_access.
234 bool sink_out_of_loops
=
235 instr
->type
!= nir_instr_type_intrinsic
||
236 nir_instr_as_intrinsic(instr
)->intrinsic
!= nir_intrinsic_load_ubo
;
237 nir_block
*use_block
=
238 get_preferred_block(def
, sink_into_loops
, sink_out_of_loops
);
240 if (!use_block
|| use_block
== instr
->block
)
243 exec_node_remove(&instr
->node
);
245 insert_after_phi(instr
, use_block
);
247 instr
->block
= use_block
;
253 nir_metadata_preserve(function
->impl
,
254 nir_metadata_block_index
| nir_metadata_dominance
);