nir: rename nir_foreach_block*() to nir_foreach_block*_call()
[mesa.git] / src / compiler / nir / nir_lower_vec_to_movs.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir.h"
29
30 /*
31 * Implements a simple pass that lowers vecN instructions to a series of
32 * moves with partial writes.
33 */
34
35 struct vec_to_movs_state {
36 nir_function_impl *impl;
37 bool progress;
38 };
39
40 static bool
41 src_matches_dest_reg(nir_dest *dest, nir_src *src)
42 {
43 if (dest->is_ssa || src->is_ssa)
44 return false;
45
46 return (dest->reg.reg == src->reg.reg &&
47 dest->reg.base_offset == src->reg.base_offset &&
48 !dest->reg.indirect &&
49 !src->reg.indirect);
50 }
51
52 /**
53 * For a given starting writemask channel and corresponding source index in
54 * the vec instruction, insert a MOV to the vec instruction's dest of all the
55 * writemask channels that get read from the same src reg.
56 *
57 * Returns the writemask of our MOV, so the parent loop calling this knows
58 * which ones have been processed.
59 */
60 static unsigned
61 insert_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
62 {
63 assert(start_idx < nir_op_infos[vec->op].num_inputs);
64
65 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_imov);
66 nir_alu_src_copy(&mov->src[0], &vec->src[start_idx], mov);
67 nir_alu_dest_copy(&mov->dest, &vec->dest, mov);
68
69 mov->dest.write_mask = (1u << start_idx);
70 mov->src[0].swizzle[start_idx] = vec->src[start_idx].swizzle[0];
71 mov->src[0].negate = vec->src[start_idx].negate;
72 mov->src[0].abs = vec->src[start_idx].abs;
73
74 for (unsigned i = start_idx + 1; i < 4; i++) {
75 if (!(vec->dest.write_mask & (1 << i)))
76 continue;
77
78 if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
79 vec->src[i].negate == vec->src[start_idx].negate &&
80 vec->src[i].abs == vec->src[start_idx].abs) {
81 mov->dest.write_mask |= (1 << i);
82 mov->src[0].swizzle[i] = vec->src[i].swizzle[0];
83 }
84 }
85
86 unsigned channels_handled = mov->dest.write_mask;
87
88 /* In some situations (if the vecN is involved in a phi-web), we can end
89 * up with a mov from a register to itself. Some of those channels may end
90 * up doing nothing and there's no reason to have them as part of the mov.
91 */
92 if (src_matches_dest_reg(&mov->dest.dest, &mov->src[0].src) &&
93 !mov->src[0].abs && !mov->src[0].negate) {
94 for (unsigned i = 0; i < 4; i++) {
95 if (mov->src[0].swizzle[i] == i) {
96 mov->dest.write_mask &= ~(1 << i);
97 }
98 }
99 }
100
101 /* Only emit the instruction if it actually does something */
102 if (mov->dest.write_mask) {
103 nir_instr_insert_before(&vec->instr, &mov->instr);
104 } else {
105 ralloc_free(mov);
106 }
107
108 return channels_handled;
109 }
110
111 static bool
112 has_replicated_dest(nir_alu_instr *alu)
113 {
114 return alu->op == nir_op_fdot_replicated2 ||
115 alu->op == nir_op_fdot_replicated3 ||
116 alu->op == nir_op_fdot_replicated4 ||
117 alu->op == nir_op_fdph_replicated;
118 }
119
120 /* Attempts to coalesce the "move" from the given source of the vec to the
121 * destination of the instruction generating the value. If, for whatever
122 * reason, we cannot coalesce the mmove, it does nothing and returns 0. We
123 * can then call insert_mov as normal.
124 */
125 static unsigned
126 try_coalesce(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
127 {
128 assert(start_idx < nir_op_infos[vec->op].num_inputs);
129
130 /* We will only even try if the source is SSA */
131 if (!vec->src[start_idx].src.is_ssa)
132 return 0;
133
134 assert(vec->src[start_idx].src.ssa);
135
136 /* If we are going to do a reswizzle, then the vecN operation must be the
137 * only use of the source value. We also can't have any source modifiers.
138 */
139 nir_foreach_use(vec->src[start_idx].src.ssa, src) {
140 if (src->parent_instr != &vec->instr)
141 return 0;
142
143 nir_alu_src *alu_src = exec_node_data(nir_alu_src, src, src);
144 if (alu_src->abs || alu_src->negate)
145 return 0;
146 }
147
148 if (!list_empty(&vec->src[start_idx].src.ssa->if_uses))
149 return 0;
150
151 if (vec->src[start_idx].src.ssa->parent_instr->type != nir_instr_type_alu)
152 return 0;
153
154 nir_alu_instr *src_alu =
155 nir_instr_as_alu(vec->src[start_idx].src.ssa->parent_instr);
156
157 if (has_replicated_dest(src_alu)) {
158 /* The fdot instruction is special: It replicates its result to all
159 * components. This means that we can always rewrite its destination
160 * and we don't need to swizzle anything.
161 */
162 } else {
163 /* We only care about being able to re-swizzle the instruction if it is
164 * something that we can reswizzle. It must be per-component. The one
165 * exception to this is the fdotN instructions which implicitly splat
166 * their result out to all channels.
167 */
168 if (nir_op_infos[src_alu->op].output_size != 0)
169 return 0;
170
171 /* If we are going to reswizzle the instruction, we can't have any
172 * non-per-component sources either.
173 */
174 for (unsigned j = 0; j < nir_op_infos[src_alu->op].num_inputs; j++)
175 if (nir_op_infos[src_alu->op].input_sizes[j] != 0)
176 return 0;
177 }
178
179 /* Stash off all of the ALU instruction's swizzles. */
180 uint8_t swizzles[4][4];
181 for (unsigned j = 0; j < nir_op_infos[src_alu->op].num_inputs; j++)
182 for (unsigned i = 0; i < 4; i++)
183 swizzles[j][i] = src_alu->src[j].swizzle[i];
184
185 unsigned write_mask = 0;
186 for (unsigned i = start_idx; i < 4; i++) {
187 if (!(vec->dest.write_mask & (1 << i)))
188 continue;
189
190 if (!vec->src[i].src.is_ssa ||
191 vec->src[i].src.ssa != &src_alu->dest.dest.ssa)
192 continue;
193
194 /* At this point, the give vec source matchese up with the ALU
195 * instruction so we can re-swizzle that component to match.
196 */
197 write_mask |= 1 << i;
198 if (has_replicated_dest(src_alu)) {
199 /* Since the destination is a single replicated value, we don't need
200 * to do any reswizzling
201 */
202 } else {
203 for (unsigned j = 0; j < nir_op_infos[src_alu->op].num_inputs; j++)
204 src_alu->src[j].swizzle[i] = swizzles[j][vec->src[i].swizzle[0]];
205 }
206
207 /* Clear the no longer needed vec source */
208 nir_instr_rewrite_src(&vec->instr, &vec->src[i].src, NIR_SRC_INIT);
209 }
210
211 nir_instr_rewrite_dest(&src_alu->instr, &src_alu->dest.dest, vec->dest.dest);
212 src_alu->dest.write_mask = write_mask;
213
214 return write_mask;
215 }
216
217 static bool
218 lower_vec_to_movs_block(nir_block *block, void *void_state)
219 {
220 struct vec_to_movs_state *state = void_state;
221 nir_function_impl *impl = state->impl;
222 nir_shader *shader = impl->function->shader;
223
224 nir_foreach_instr_safe(block, instr) {
225 if (instr->type != nir_instr_type_alu)
226 continue;
227
228 nir_alu_instr *vec = nir_instr_as_alu(instr);
229
230 switch (vec->op) {
231 case nir_op_vec2:
232 case nir_op_vec3:
233 case nir_op_vec4:
234 break;
235 default:
236 continue; /* The loop */
237 }
238
239 if (vec->dest.dest.is_ssa) {
240 /* Since we insert multiple MOVs, we have a register destination. */
241 nir_register *reg = nir_local_reg_create(impl);
242 reg->num_components = vec->dest.dest.ssa.num_components;
243 reg->bit_size = vec->dest.dest.ssa.bit_size;
244
245 nir_ssa_def_rewrite_uses(&vec->dest.dest.ssa, nir_src_for_reg(reg));
246
247 nir_instr_rewrite_dest(&vec->instr, &vec->dest.dest,
248 nir_dest_for_reg(reg));
249 }
250
251 unsigned finished_write_mask = 0;
252
253 /* First, emit a MOV for all the src channels that are in the
254 * destination reg, in case other values we're populating in the dest
255 * might overwrite them.
256 */
257 for (unsigned i = 0; i < 4; i++) {
258 if (!(vec->dest.write_mask & (1 << i)))
259 continue;
260
261 if (src_matches_dest_reg(&vec->dest.dest, &vec->src[i].src)) {
262 finished_write_mask |= insert_mov(vec, i, shader);
263 break;
264 }
265 }
266
267 /* Now, emit MOVs for all the other src channels. */
268 for (unsigned i = 0; i < 4; i++) {
269 if (!(vec->dest.write_mask & (1 << i)))
270 continue;
271
272 if (!(finished_write_mask & (1 << i)))
273 finished_write_mask |= try_coalesce(vec, i, shader);
274
275 if (!(finished_write_mask & (1 << i)))
276 finished_write_mask |= insert_mov(vec, i, shader);
277 }
278
279 nir_instr_remove(&vec->instr);
280 ralloc_free(vec);
281 state->progress = true;
282 }
283
284 return true;
285 }
286
287 static bool
288 nir_lower_vec_to_movs_impl(nir_function_impl *impl)
289 {
290 struct vec_to_movs_state state = { impl, false };
291
292 nir_foreach_block_call(impl, lower_vec_to_movs_block, &state);
293
294 if (state.progress) {
295 nir_metadata_preserve(impl, nir_metadata_block_index |
296 nir_metadata_dominance);
297 }
298
299 return state.progress;
300 }
301
302 bool
303 nir_lower_vec_to_movs(nir_shader *shader)
304 {
305 bool progress = false;
306
307 nir_foreach_function(shader, function) {
308 if (function->impl)
309 progress = nir_lower_vec_to_movs_impl(function->impl) || progress;
310 }
311
312 return progress;
313 }