nir/lower_vec_to_movs: Convert to use nir_shader_instructions_pass().
[mesa.git] / src / compiler / nir / nir_lower_vec_to_movs.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_builder.h"
30
31 /*
32 * Implements a simple pass that lowers vecN instructions to a series of
33 * moves with partial writes.
34 */
35
36 static bool
37 src_matches_dest_reg(nir_dest *dest, nir_src *src)
38 {
39 if (dest->is_ssa || src->is_ssa)
40 return false;
41
42 return (dest->reg.reg == src->reg.reg &&
43 dest->reg.base_offset == src->reg.base_offset &&
44 !dest->reg.indirect &&
45 !src->reg.indirect);
46 }
47
48 /**
49 * For a given starting writemask channel and corresponding source index in
50 * the vec instruction, insert a MOV to the vec instruction's dest of all the
51 * writemask channels that get read from the same src reg.
52 *
53 * Returns the writemask of our MOV, so the parent loop calling this knows
54 * which ones have been processed.
55 */
56 static unsigned
57 insert_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
58 {
59 assert(start_idx < nir_op_infos[vec->op].num_inputs);
60
61 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
62 nir_alu_src_copy(&mov->src[0], &vec->src[start_idx], mov);
63 nir_alu_dest_copy(&mov->dest, &vec->dest, mov);
64
65 mov->dest.write_mask = (1u << start_idx);
66 mov->src[0].swizzle[start_idx] = vec->src[start_idx].swizzle[0];
67 mov->src[0].negate = vec->src[start_idx].negate;
68 mov->src[0].abs = vec->src[start_idx].abs;
69
70 for (unsigned i = start_idx + 1; i < 4; i++) {
71 if (!(vec->dest.write_mask & (1 << i)))
72 continue;
73
74 if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
75 vec->src[i].negate == vec->src[start_idx].negate &&
76 vec->src[i].abs == vec->src[start_idx].abs) {
77 mov->dest.write_mask |= (1 << i);
78 mov->src[0].swizzle[i] = vec->src[i].swizzle[0];
79 }
80 }
81
82 unsigned channels_handled = mov->dest.write_mask;
83
84 /* In some situations (if the vecN is involved in a phi-web), we can end
85 * up with a mov from a register to itself. Some of those channels may end
86 * up doing nothing and there's no reason to have them as part of the mov.
87 */
88 if (src_matches_dest_reg(&mov->dest.dest, &mov->src[0].src) &&
89 !mov->src[0].abs && !mov->src[0].negate) {
90 for (unsigned i = 0; i < 4; i++) {
91 if (mov->src[0].swizzle[i] == i) {
92 mov->dest.write_mask &= ~(1 << i);
93 }
94 }
95 }
96
97 /* Only emit the instruction if it actually does something */
98 if (mov->dest.write_mask) {
99 nir_instr_insert_before(&vec->instr, &mov->instr);
100 } else {
101 ralloc_free(mov);
102 }
103
104 return channels_handled;
105 }
106
107 static bool
108 has_replicated_dest(nir_alu_instr *alu)
109 {
110 return alu->op == nir_op_fdot_replicated2 ||
111 alu->op == nir_op_fdot_replicated3 ||
112 alu->op == nir_op_fdot_replicated4 ||
113 alu->op == nir_op_fdph_replicated;
114 }
115
116 /* Attempts to coalesce the "move" from the given source of the vec to the
117 * destination of the instruction generating the value. If, for whatever
118 * reason, we cannot coalesce the mmove, it does nothing and returns 0. We
119 * can then call insert_mov as normal.
120 */
121 static unsigned
122 try_coalesce(nir_alu_instr *vec, unsigned start_idx)
123 {
124 assert(start_idx < nir_op_infos[vec->op].num_inputs);
125
126 /* We will only even try if the source is SSA */
127 if (!vec->src[start_idx].src.is_ssa)
128 return 0;
129
130 assert(vec->src[start_idx].src.ssa);
131
132 /* If we are going to do a reswizzle, then the vecN operation must be the
133 * only use of the source value. We also can't have any source modifiers.
134 */
135 nir_foreach_use(src, vec->src[start_idx].src.ssa) {
136 if (src->parent_instr != &vec->instr)
137 return 0;
138
139 nir_alu_src *alu_src = exec_node_data(nir_alu_src, src, src);
140 if (alu_src->abs || alu_src->negate)
141 return 0;
142 }
143
144 if (!list_is_empty(&vec->src[start_idx].src.ssa->if_uses))
145 return 0;
146
147 if (vec->src[start_idx].src.ssa->parent_instr->type != nir_instr_type_alu)
148 return 0;
149
150 nir_alu_instr *src_alu =
151 nir_instr_as_alu(vec->src[start_idx].src.ssa->parent_instr);
152
153 if (has_replicated_dest(src_alu)) {
154 /* The fdot instruction is special: It replicates its result to all
155 * components. This means that we can always rewrite its destination
156 * and we don't need to swizzle anything.
157 */
158 } else {
159 /* We only care about being able to re-swizzle the instruction if it is
160 * something that we can reswizzle. It must be per-component. The one
161 * exception to this is the fdotN instructions which implicitly splat
162 * their result out to all channels.
163 */
164 if (nir_op_infos[src_alu->op].output_size != 0)
165 return 0;
166
167 /* If we are going to reswizzle the instruction, we can't have any
168 * non-per-component sources either.
169 */
170 for (unsigned j = 0; j < nir_op_infos[src_alu->op].num_inputs; j++)
171 if (nir_op_infos[src_alu->op].input_sizes[j] != 0)
172 return 0;
173 }
174
175 /* Stash off all of the ALU instruction's swizzles. */
176 uint8_t swizzles[4][4];
177 for (unsigned j = 0; j < nir_op_infos[src_alu->op].num_inputs; j++)
178 for (unsigned i = 0; i < 4; i++)
179 swizzles[j][i] = src_alu->src[j].swizzle[i];
180
181 unsigned write_mask = 0;
182 for (unsigned i = start_idx; i < 4; i++) {
183 if (!(vec->dest.write_mask & (1 << i)))
184 continue;
185
186 if (!vec->src[i].src.is_ssa ||
187 vec->src[i].src.ssa != &src_alu->dest.dest.ssa)
188 continue;
189
190 /* At this point, the give vec source matchese up with the ALU
191 * instruction so we can re-swizzle that component to match.
192 */
193 write_mask |= 1 << i;
194 if (has_replicated_dest(src_alu)) {
195 /* Since the destination is a single replicated value, we don't need
196 * to do any reswizzling
197 */
198 } else {
199 for (unsigned j = 0; j < nir_op_infos[src_alu->op].num_inputs; j++)
200 src_alu->src[j].swizzle[i] = swizzles[j][vec->src[i].swizzle[0]];
201 }
202
203 /* Clear the no longer needed vec source */
204 nir_instr_rewrite_src(&vec->instr, &vec->src[i].src, NIR_SRC_INIT);
205 }
206
207 nir_instr_rewrite_dest(&src_alu->instr, &src_alu->dest.dest, vec->dest.dest);
208 src_alu->dest.write_mask = write_mask;
209
210 return write_mask;
211 }
212
213 static bool
214 nir_lower_vec_to_movs_instr(nir_builder *b, nir_instr *instr, void *data)
215 {
216 if (instr->type != nir_instr_type_alu)
217 return false;
218
219 nir_alu_instr *vec = nir_instr_as_alu(instr);
220
221 switch (vec->op) {
222 case nir_op_vec2:
223 case nir_op_vec3:
224 case nir_op_vec4:
225 break;
226 default:
227 return false;
228 }
229
230 bool vec_had_ssa_dest = vec->dest.dest.is_ssa;
231 if (vec->dest.dest.is_ssa) {
232 /* Since we insert multiple MOVs, we have a register destination. */
233 nir_register *reg = nir_local_reg_create(b->impl);
234 reg->num_components = vec->dest.dest.ssa.num_components;
235 reg->bit_size = vec->dest.dest.ssa.bit_size;
236
237 nir_ssa_def_rewrite_uses(&vec->dest.dest.ssa, nir_src_for_reg(reg));
238
239 nir_instr_rewrite_dest(&vec->instr, &vec->dest.dest,
240 nir_dest_for_reg(reg));
241 }
242
243 unsigned finished_write_mask = 0;
244
245 /* First, emit a MOV for all the src channels that are in the
246 * destination reg, in case other values we're populating in the dest
247 * might overwrite them.
248 */
249 for (unsigned i = 0; i < 4; i++) {
250 if (!(vec->dest.write_mask & (1 << i)))
251 continue;
252
253 if (src_matches_dest_reg(&vec->dest.dest, &vec->src[i].src)) {
254 finished_write_mask |= insert_mov(vec, i, b->shader);
255 break;
256 }
257 }
258
259 /* Now, emit MOVs for all the other src channels. */
260 for (unsigned i = 0; i < 4; i++) {
261 if (!(vec->dest.write_mask & (1 << i)))
262 continue;
263
264 /* Coalescing moves the register writes from the vec up to the ALU
265 * instruction in the source. We can only do this if the original
266 * vecN had an SSA destination.
267 */
268 if (vec_had_ssa_dest && !(finished_write_mask & (1 << i)))
269 finished_write_mask |= try_coalesce(vec, i);
270
271 if (!(finished_write_mask & (1 << i)))
272 finished_write_mask |= insert_mov(vec, i, b->shader);
273 }
274
275 nir_instr_remove(&vec->instr);
276 ralloc_free(vec);
277
278 return true;
279 }
280
281 bool
282 nir_lower_vec_to_movs(nir_shader *shader)
283 {
284 return nir_shader_instructions_pass(shader,
285 nir_lower_vec_to_movs_instr,
286 nir_metadata_block_index |
287 nir_metadata_dominance,
288 NULL);
289 }