2 * Copyright © 2018 Red Hat
3 * Copyright © 2019 Valve Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Rob Clark (robdclark@gmail.com>
26 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
27 * Rhys Perry (pendingchaos02@gmail.com)
35 * A simple pass that moves some instructions into the least common
36 * anscestor of consuming instructions.
40 nir_can_move_instr(nir_instr
*instr
, nir_move_options options
)
42 switch (instr
->type
) {
43 case nir_instr_type_load_const
:
44 case nir_instr_type_ssa_undef
: {
45 return options
& nir_move_const_undef
;
47 case nir_instr_type_alu
: {
48 if (nir_op_is_vec(nir_instr_as_alu(instr
)->op
) ||
49 nir_instr_as_alu(instr
)->op
== nir_op_b2i32
)
50 return options
& nir_move_copies
;
51 if (nir_alu_instr_is_comparison(nir_instr_as_alu(instr
)))
52 return options
& nir_move_comparisons
;
55 case nir_instr_type_intrinsic
: {
56 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
57 switch (intrin
->intrinsic
) {
58 case nir_intrinsic_load_ubo
:
59 return options
& nir_move_load_ubo
;
60 case nir_intrinsic_load_input
:
61 case nir_intrinsic_load_interpolated_input
:
62 case nir_intrinsic_load_per_vertex_input
:
63 return options
& nir_move_load_input
;
74 get_innermost_loop(nir_cf_node
*node
)
76 for (; node
!= NULL
; node
= node
->parent
) {
77 if (node
->type
== nir_cf_node_loop
)
78 return (nir_loop
*)node
;
84 loop_contains_block(nir_loop
*loop
, nir_block
*block
)
86 nir_block
*before
= nir_cf_node_as_block(nir_cf_node_prev(&loop
->cf_node
));
87 nir_block
*after
= nir_cf_node_as_block(nir_cf_node_next(&loop
->cf_node
));
89 return block
->index
> before
->index
&& block
->index
< after
->index
;
92 /* Given the LCA of all uses and the definition, find a block on the path
93 * between them in the dominance tree that is outside of as many loops as
94 * possible. If "sink_out_of_loops" is false, then we disallow sinking the
95 * definition outside of the loop it's defined in (if any).
99 adjust_block_for_loops(nir_block
*use_block
, nir_block
*def_block
,
100 bool sink_out_of_loops
)
102 nir_loop
*def_loop
= NULL
;
103 if (!sink_out_of_loops
)
104 def_loop
= get_innermost_loop(&def_block
->cf_node
);
106 for (nir_block
*cur_block
= use_block
; cur_block
!= def_block
->imm_dom
;
107 cur_block
= cur_block
->imm_dom
) {
108 if (!sink_out_of_loops
&& def_loop
&&
109 !loop_contains_block(def_loop
, use_block
)) {
110 use_block
= cur_block
;
114 nir_cf_node
*next
= nir_cf_node_next(&cur_block
->cf_node
);
115 if (next
&& next
->type
== nir_cf_node_loop
) {
116 nir_loop
*following_loop
= nir_cf_node_as_loop(next
);
117 if (loop_contains_block(following_loop
, use_block
)) {
118 use_block
= cur_block
;
127 /* iterate a ssa def's use's and try to find a more optimal block to
128 * move it to, using the dominance tree. In short, if all of the uses
129 * are contained in a single block, the load will be moved there,
130 * otherwise it will be move to the least common ancestor block of all
134 get_preferred_block(nir_ssa_def
*def
, bool sink_into_loops
, bool sink_out_of_loops
)
136 nir_block
*lca
= NULL
;
138 nir_foreach_use(use
, def
) {
139 nir_instr
*instr
= use
->parent_instr
;
140 nir_block
*use_block
= instr
->block
;
143 * Kind of an ugly special-case, but phi instructions
144 * need to appear first in the block, so by definition
145 * we can't move an instruction into a block where it is
146 * consumed by a phi instruction. We could conceivably
147 * move it into a dominator block.
149 if (instr
->type
== nir_instr_type_phi
) {
150 nir_phi_instr
*phi
= nir_instr_as_phi(instr
);
151 nir_block
*phi_lca
= NULL
;
152 nir_foreach_phi_src(src
, phi
) {
153 if (&src
->src
== use
)
154 phi_lca
= nir_dominance_lca(phi_lca
, src
->pred
);
159 lca
= nir_dominance_lca(lca
, use_block
);
162 nir_foreach_if_use(use
, def
) {
163 nir_block
*use_block
=
164 nir_cf_node_as_block(nir_cf_node_prev(&use
->parent_if
->cf_node
));
166 lca
= nir_dominance_lca(lca
, use_block
);
169 /* If we're moving a load_ubo or load_interpolated_input, we don't want to
170 * sink it down into loops, which may result in accessing memory or shared
171 * functions multiple times. Sink it just above the start of the loop
172 * where it's used. For load_consts, undefs, and comparisons, we expect
173 * the driver to be able to emit them as simple ALU ops, so sinking as far
174 * in as we can go is probably worth it for register pressure.
176 if (!sink_into_loops
) {
177 lca
= adjust_block_for_loops(lca
, def
->parent_instr
->block
,
179 assert(nir_block_dominates(def
->parent_instr
->block
, lca
));
181 /* sink_into_loops = true and sink_out_of_loops = false isn't
182 * implemented yet because it's not used.
184 assert(sink_out_of_loops
);
191 /* insert before first non-phi instruction: */
193 insert_after_phi(nir_instr
*instr
, nir_block
*block
)
195 nir_foreach_instr(instr2
, block
) {
196 if (instr2
->type
== nir_instr_type_phi
)
199 exec_node_insert_node_before(&instr2
->node
,
205 /* if haven't inserted it, push to tail (ie. empty block or possibly
206 * a block only containing phi's?)
208 exec_list_push_tail(&block
->instr_list
, &instr
->node
);
212 nir_opt_sink(nir_shader
*shader
, nir_move_options options
)
214 bool progress
= false;
216 nir_foreach_function(function
, shader
) {
220 nir_metadata_require(function
->impl
,
221 nir_metadata_block_index
| nir_metadata_dominance
);
223 nir_foreach_block_reverse(block
, function
->impl
) {
224 nir_foreach_instr_reverse_safe(instr
, block
) {
225 if (!nir_can_move_instr(instr
, options
))
228 nir_ssa_def
*def
= nir_instr_ssa_def(instr
);
230 bool sink_into_loops
= instr
->type
!= nir_instr_type_intrinsic
;
231 /* Don't sink load_ubo out of loops because that can make its
232 * resource divergent and break code like that which is generated
233 * by nir_lower_non_uniform_access.
235 bool sink_out_of_loops
=
236 instr
->type
!= nir_instr_type_intrinsic
||
237 nir_instr_as_intrinsic(instr
)->intrinsic
!= nir_intrinsic_load_ubo
;
238 nir_block
*use_block
=
239 get_preferred_block(def
, sink_into_loops
, sink_out_of_loops
);
241 if (!use_block
|| use_block
== instr
->block
)
244 exec_node_remove(&instr
->node
);
246 insert_after_phi(instr
, use_block
);
248 instr
->block
= use_block
;
254 nir_metadata_preserve(function
->impl
,
255 nir_metadata_block_index
| nir_metadata_dominance
);