nir/alu_to_scalar: Use "alu" as the name for the nir_alu_instr
[mesa.git] / src / compiler / nir / nir_lower_alu_to_scalar.c
1 /*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 /** @file nir_lower_alu_to_scalar.c
28 *
29 * Replaces nir_alu_instr operations with more than one channel used in the
30 * arguments with individual per-channel operations.
31 */
32
33 static void
34 nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
35 unsigned bit_size)
36 {
37 nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
38 bit_size, NULL);
39 alu->dest.write_mask = (1 << num_components) - 1;
40 }
41
42 static void
43 lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op,
44 nir_builder *builder)
45 {
46 unsigned num_components = nir_op_infos[alu->op].input_sizes[0];
47
48 nir_ssa_def *last = NULL;
49 for (unsigned i = 0; i < num_components; i++) {
50 nir_alu_instr *chan = nir_alu_instr_create(builder->shader, chan_op);
51 nir_alu_ssa_dest_init(chan, 1, alu->dest.dest.ssa.bit_size);
52 nir_alu_src_copy(&chan->src[0], &alu->src[0], chan);
53 chan->src[0].swizzle[0] = chan->src[0].swizzle[i];
54 if (nir_op_infos[chan_op].num_inputs > 1) {
55 assert(nir_op_infos[chan_op].num_inputs == 2);
56 nir_alu_src_copy(&chan->src[1], &alu->src[1], chan);
57 chan->src[1].swizzle[0] = chan->src[1].swizzle[i];
58 }
59 chan->exact = alu->exact;
60
61 nir_builder_instr_insert(builder, &chan->instr);
62
63 if (i == 0) {
64 last = &chan->dest.dest.ssa;
65 } else {
66 last = nir_build_alu(builder, merge_op,
67 last, &chan->dest.dest.ssa, NULL, NULL);
68 }
69 }
70
71 assert(alu->dest.write_mask == 1);
72 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(last));
73 nir_instr_remove(&alu->instr);
74 }
75
76 static bool
77 lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_set)
78 {
79 unsigned num_src = nir_op_infos[alu->op].num_inputs;
80 unsigned i, chan;
81
82 assert(alu->dest.dest.is_ssa);
83 assert(alu->dest.write_mask != 0);
84
85 b->cursor = nir_before_instr(&alu->instr);
86 b->exact = alu->exact;
87
88 if (lower_set && !BITSET_TEST(lower_set, alu->op))
89 return false;
90
91 #define LOWER_REDUCTION(name, chan, merge) \
92 case name##2: \
93 case name##3: \
94 case name##4: \
95 lower_reduction(alu, chan, merge, b); \
96 return true;
97
98 switch (alu->op) {
99 case nir_op_vec4:
100 case nir_op_vec3:
101 case nir_op_vec2:
102 case nir_op_cube_face_coord:
103 case nir_op_cube_face_index:
104 /* We don't need to scalarize these ops, they're the ones generated to
105 * group up outputs into a value that can be SSAed.
106 */
107 return false;
108
109 case nir_op_pack_half_2x16:
110 if (!b->shader->options->lower_pack_half_2x16)
111 return false;
112
113 nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
114
115 nir_ssa_def *val =
116 nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0),
117 nir_channel(b, src_vec2, 1));
118
119 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
120 nir_instr_remove(&alu->instr);
121 return true;
122
123 case nir_op_unpack_unorm_4x8:
124 case nir_op_unpack_snorm_4x8:
125 case nir_op_unpack_unorm_2x16:
126 case nir_op_unpack_snorm_2x16:
127 /* There is no scalar version of these ops, unless we were to break it
128 * down to bitshifts and math (which is definitely not intended).
129 */
130 return false;
131
132 case nir_op_unpack_half_2x16: {
133 if (!b->shader->options->lower_unpack_half_2x16)
134 return false;
135
136 nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0);
137
138 nir_ssa_def *comps[2];
139 comps[0] = nir_unpack_half_2x16_split_x(b, packed);
140 comps[1] = nir_unpack_half_2x16_split_y(b, packed);
141 nir_ssa_def *vec = nir_vec(b, comps, 2);
142
143 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec));
144 nir_instr_remove(&alu->instr);
145 return true;
146 }
147
148 case nir_op_pack_uvec2_to_uint: {
149 assert(b->shader->options->lower_pack_snorm_2x16 ||
150 b->shader->options->lower_pack_unorm_2x16);
151
152 nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0),
153 nir_imm_int(b, 0));
154 nir_ssa_def *val =
155 nir_ior(b, nir_ishl(b, nir_channel(b, word, 1), nir_imm_int(b, 16)),
156 nir_channel(b, word, 0));
157
158 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
159 nir_instr_remove(&alu->instr);
160 break;
161 }
162
163 case nir_op_pack_uvec4_to_uint: {
164 assert(b->shader->options->lower_pack_snorm_4x8 ||
165 b->shader->options->lower_pack_unorm_4x8);
166
167 nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0),
168 nir_imm_int(b, 0));
169 nir_ssa_def *val =
170 nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3), nir_imm_int(b, 24)),
171 nir_ishl(b, nir_channel(b, byte, 2), nir_imm_int(b, 16))),
172 nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1), nir_imm_int(b, 8)),
173 nir_channel(b, byte, 0)));
174
175 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
176 nir_instr_remove(&alu->instr);
177 break;
178 }
179
180 case nir_op_fdph: {
181 nir_ssa_def *src0_vec = nir_ssa_for_alu_src(b, alu, 0);
182 nir_ssa_def *src1_vec = nir_ssa_for_alu_src(b, alu, 1);
183
184 nir_ssa_def *sum[4];
185 for (unsigned i = 0; i < 3; i++) {
186 sum[i] = nir_fmul(b, nir_channel(b, src0_vec, i),
187 nir_channel(b, src1_vec, i));
188 }
189 sum[3] = nir_channel(b, src1_vec, 3);
190
191 nir_ssa_def *val = nir_fadd(b, nir_fadd(b, sum[0], sum[1]),
192 nir_fadd(b, sum[2], sum[3]));
193
194 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
195 nir_instr_remove(&alu->instr);
196 return true;
197 }
198
199 case nir_op_unpack_64_2x32:
200 case nir_op_unpack_32_2x16:
201 return false;
202
203 LOWER_REDUCTION(nir_op_fdot, nir_op_fmul, nir_op_fadd);
204 LOWER_REDUCTION(nir_op_ball_fequal, nir_op_feq, nir_op_iand);
205 LOWER_REDUCTION(nir_op_ball_iequal, nir_op_ieq, nir_op_iand);
206 LOWER_REDUCTION(nir_op_bany_fnequal, nir_op_fne, nir_op_ior);
207 LOWER_REDUCTION(nir_op_bany_inequal, nir_op_ine, nir_op_ior);
208 LOWER_REDUCTION(nir_op_b32all_fequal, nir_op_feq32, nir_op_iand);
209 LOWER_REDUCTION(nir_op_b32all_iequal, nir_op_ieq32, nir_op_iand);
210 LOWER_REDUCTION(nir_op_b32any_fnequal, nir_op_fne32, nir_op_ior);
211 LOWER_REDUCTION(nir_op_b32any_inequal, nir_op_ine32, nir_op_ior);
212 LOWER_REDUCTION(nir_op_fall_equal, nir_op_seq, nir_op_fmin);
213 LOWER_REDUCTION(nir_op_fany_nequal, nir_op_sne, nir_op_fmax);
214
215 default:
216 break;
217 }
218
219 if (alu->dest.dest.ssa.num_components == 1)
220 return false;
221
222 unsigned num_components = alu->dest.dest.ssa.num_components;
223 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { NULL };
224
225 for (chan = 0; chan < NIR_MAX_VEC_COMPONENTS; chan++) {
226 if (!(alu->dest.write_mask & (1 << chan)))
227 continue;
228
229 nir_alu_instr *lower = nir_alu_instr_create(b->shader, alu->op);
230 for (i = 0; i < num_src; i++) {
231 /* We only handle same-size-as-dest (input_sizes[] == 0) or scalar
232 * args (input_sizes[] == 1).
233 */
234 assert(nir_op_infos[alu->op].input_sizes[i] < 2);
235 unsigned src_chan = (nir_op_infos[alu->op].input_sizes[i] == 1 ?
236 0 : chan);
237
238 nir_alu_src_copy(&lower->src[i], &alu->src[i], lower);
239 for (int j = 0; j < NIR_MAX_VEC_COMPONENTS; j++)
240 lower->src[i].swizzle[j] = alu->src[i].swizzle[src_chan];
241 }
242
243 nir_alu_ssa_dest_init(lower, 1, alu->dest.dest.ssa.bit_size);
244 lower->dest.saturate = alu->dest.saturate;
245 comps[chan] = &lower->dest.dest.ssa;
246 lower->exact = alu->exact;
247
248 nir_builder_instr_insert(b, &lower->instr);
249 }
250
251 nir_ssa_def *vec = nir_vec(b, comps, num_components);
252
253 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec));
254
255 nir_instr_remove(&alu->instr);
256 return true;
257 }
258
259 static bool
260 nir_lower_alu_to_scalar_impl(nir_function_impl *impl, BITSET_WORD *lower_set)
261 {
262 nir_builder builder;
263 nir_builder_init(&builder, impl);
264 bool progress = false;
265
266 nir_foreach_block(block, impl) {
267 nir_foreach_instr_safe(instr, block) {
268 if (instr->type == nir_instr_type_alu) {
269 progress = lower_alu_instr_scalar(nir_instr_as_alu(instr),
270 &builder,
271 lower_set) || progress;
272 }
273 }
274 }
275
276 nir_metadata_preserve(impl, nir_metadata_block_index |
277 nir_metadata_dominance);
278
279 return progress;
280 }
281
282 bool
283 nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set)
284 {
285 bool progress = false;
286
287 nir_foreach_function(function, shader) {
288 if (function->impl)
289 progress = nir_lower_alu_to_scalar_impl(function->impl,
290 lower_set) || progress;
291 }
292
293 return progress;
294 }