nir: fix memleak in error path
[mesa.git] / src / compiler / nir / nir_opt_sink.c
1 /*
2 * Copyright © 2018 Red Hat
3 * Copyright © 2019 Valve Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark (robdclark@gmail.com>
26 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
27 * Rhys Perry (pendingchaos02@gmail.com)
28 *
29 */
30
31 #include "nir.h"
32
33
34 /*
35 * A simple pass that moves some instructions into the least common
36 * anscestor of consuming instructions.
37 */
38
39 bool
40 nir_can_move_instr(nir_instr *instr, nir_move_options options)
41 {
42 if ((options & nir_move_const_undef) && instr->type == nir_instr_type_load_const) {
43 return true;
44 }
45
46 if (instr->type == nir_instr_type_intrinsic) {
47 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
48 if ((options & nir_move_load_ubo) && intrin->intrinsic == nir_intrinsic_load_ubo)
49 return true;
50
51 if ((options & nir_move_load_input) &&
52 (intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
53 intrin->intrinsic == nir_intrinsic_load_input))
54 return true;
55 }
56
57 if ((options & nir_move_const_undef) && instr->type == nir_instr_type_ssa_undef) {
58 return true;
59 }
60
61 if ((options & nir_move_comparisons) && instr->type == nir_instr_type_alu &&
62 nir_alu_instr_is_comparison(nir_instr_as_alu(instr))) {
63 return true;
64 }
65
66 return false;
67 }
68
69 static nir_loop *
70 get_innermost_loop(nir_cf_node *node)
71 {
72 for (; node != NULL; node = node->parent) {
73 if (node->type == nir_cf_node_loop)
74 return (nir_loop*)node;
75 }
76 return NULL;
77 }
78
79 /* return last block not after use_block with def_loop as it's innermost loop */
80 static nir_block *
81 adjust_block_for_loops(nir_block *use_block, nir_loop *def_loop)
82 {
83 nir_loop *use_loop = NULL;
84
85 for (nir_cf_node *node = &use_block->cf_node; node != NULL; node = node->parent) {
86 if (def_loop && node == &def_loop->cf_node)
87 break;
88 if (node->type == nir_cf_node_loop)
89 use_loop = nir_cf_node_as_loop(node);
90 }
91 if (use_loop) {
92 return nir_block_cf_tree_prev(nir_loop_first_block(use_loop));
93 } else {
94 return use_block;
95 }
96 }
97
98 /* iterate a ssa def's use's and try to find a more optimal block to
99 * move it to, using the dominance tree. In short, if all of the uses
100 * are contained in a single block, the load will be moved there,
101 * otherwise it will be move to the least common ancestor block of all
102 * the uses
103 */
104 static nir_block *
105 get_preferred_block(nir_ssa_def *def, bool sink_into_loops)
106 {
107 nir_block *lca = NULL;
108
109 nir_loop *def_loop = NULL;
110 if (!sink_into_loops)
111 def_loop = get_innermost_loop(&def->parent_instr->block->cf_node);
112
113 nir_foreach_use(use, def) {
114 nir_instr *instr = use->parent_instr;
115 nir_block *use_block = instr->block;
116
117 /*
118 * Kind of an ugly special-case, but phi instructions
119 * need to appear first in the block, so by definition
120 * we can't move an instruction into a block where it is
121 * consumed by a phi instruction. We could conceivably
122 * move it into a dominator block.
123 */
124 if (instr->type == nir_instr_type_phi) {
125 nir_phi_instr *phi = nir_instr_as_phi(instr);
126 nir_block *phi_lca = NULL;
127 nir_foreach_phi_src(src, phi) {
128 if (&src->src == use)
129 phi_lca = nir_dominance_lca(phi_lca, src->pred);
130 }
131 use_block = phi_lca;
132 }
133
134 /* If we're moving a load_ubo or load_interpolated_input, we don't want to
135 * sink it down into loops, which may result in accessing memory or shared
136 * functions multiple times. Sink it just above the start of the loop
137 * where it's used. For load_consts, undefs, and comparisons, we expect
138 * the driver to be able to emit them as simple ALU ops, so sinking as far
139 * in as we can go is probably worth it for register pressure.
140 */
141 if (!sink_into_loops) {
142 use_block = adjust_block_for_loops(use_block, def_loop);
143 assert(nir_block_dominates(def->parent_instr->block, use_block));
144 }
145
146 lca = nir_dominance_lca(lca, use_block);
147 }
148
149 nir_foreach_if_use(use, def) {
150 nir_block *use_block =
151 nir_cf_node_as_block(nir_cf_node_prev(&use->parent_if->cf_node));
152
153 if (!sink_into_loops) {
154 use_block = adjust_block_for_loops(use_block, def_loop);
155 assert(nir_block_dominates(def->parent_instr->block, use_block));
156 }
157
158 lca = nir_dominance_lca(lca, use_block);
159 }
160
161 return lca;
162 }
163
164 /* insert before first non-phi instruction: */
165 static void
166 insert_after_phi(nir_instr *instr, nir_block *block)
167 {
168 nir_foreach_instr(instr2, block) {
169 if (instr2->type == nir_instr_type_phi)
170 continue;
171
172 exec_node_insert_node_before(&instr2->node,
173 &instr->node);
174
175 return;
176 }
177
178 /* if haven't inserted it, push to tail (ie. empty block or possibly
179 * a block only containing phi's?)
180 */
181 exec_list_push_tail(&block->instr_list, &instr->node);
182 }
183
184 bool
185 nir_opt_sink(nir_shader *shader, nir_move_options options)
186 {
187 bool progress = false;
188
189 nir_foreach_function(function, shader) {
190 if (!function->impl)
191 continue;
192
193 nir_metadata_require(function->impl,
194 nir_metadata_block_index | nir_metadata_dominance);
195
196 nir_foreach_block_reverse(block, function->impl) {
197 nir_foreach_instr_reverse_safe(instr, block) {
198 if (!nir_can_move_instr(instr, options))
199 continue;
200
201 nir_ssa_def *def = nir_instr_ssa_def(instr);
202 nir_block *use_block =
203 get_preferred_block(def, instr->type != nir_instr_type_intrinsic);
204
205 if (!use_block || use_block == instr->block)
206 continue;
207
208 exec_node_remove(&instr->node);
209
210 insert_after_phi(instr, use_block);
211
212 instr->block = use_block;
213
214 progress = true;
215 }
216 }
217
218 nir_metadata_preserve(function->impl,
219 nir_metadata_block_index | nir_metadata_dominance);
220 }
221
222 return progress;
223 }