e3258429b5800b579cb5a9b52debd16b8653140d
[mesa.git] / src / compiler / nir / nir_lower_alu_to_scalar.c
1 /*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 struct alu_to_scalar_data {
28 nir_instr_filter_cb cb;
29 const void *data;
30 };
31
32 /** @file nir_lower_alu_to_scalar.c
33 *
34 * Replaces nir_alu_instr operations with more than one channel used in the
35 * arguments with individual per-channel operations.
36 */
37
38 static bool
39 inst_is_vector_alu(const nir_instr *instr, const void *_state)
40 {
41 if (instr->type != nir_instr_type_alu)
42 return false;
43
44 nir_alu_instr *alu = nir_instr_as_alu(instr);
45
46 /* There is no ALU instruction which has a scalar destination, scalar
47 * src[0], and some other vector source.
48 */
49 assert(alu->dest.dest.is_ssa);
50 assert(alu->src[0].src.is_ssa);
51 return alu->dest.dest.ssa.num_components > 1 ||
52 nir_op_infos[alu->op].input_sizes[0] > 1;
53 }
54
55 static void
56 nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
57 unsigned bit_size)
58 {
59 nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
60 bit_size, NULL);
61 alu->dest.write_mask = (1 << num_components) - 1;
62 }
63
64 static nir_ssa_def *
65 lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op,
66 nir_builder *builder)
67 {
68 unsigned num_components = nir_op_infos[alu->op].input_sizes[0];
69
70 nir_ssa_def *last = NULL;
71 for (unsigned i = 0; i < num_components; i++) {
72 nir_alu_instr *chan = nir_alu_instr_create(builder->shader, chan_op);
73 nir_alu_ssa_dest_init(chan, 1, alu->dest.dest.ssa.bit_size);
74 nir_alu_src_copy(&chan->src[0], &alu->src[0], chan);
75 chan->src[0].swizzle[0] = chan->src[0].swizzle[i];
76 if (nir_op_infos[chan_op].num_inputs > 1) {
77 assert(nir_op_infos[chan_op].num_inputs == 2);
78 nir_alu_src_copy(&chan->src[1], &alu->src[1], chan);
79 chan->src[1].swizzle[0] = chan->src[1].swizzle[i];
80 }
81 chan->exact = alu->exact;
82
83 nir_builder_instr_insert(builder, &chan->instr);
84
85 if (i == 0) {
86 last = &chan->dest.dest.ssa;
87 } else {
88 last = nir_build_alu(builder, merge_op,
89 last, &chan->dest.dest.ssa, NULL, NULL);
90 }
91 }
92
93 return last;
94 }
95
96 static nir_ssa_def *
97 lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_data)
98 {
99 struct alu_to_scalar_data *data = _data;
100 nir_alu_instr *alu = nir_instr_as_alu(instr);
101 unsigned num_src = nir_op_infos[alu->op].num_inputs;
102 unsigned i, chan;
103
104 assert(alu->dest.dest.is_ssa);
105 assert(alu->dest.write_mask != 0);
106
107 b->cursor = nir_before_instr(&alu->instr);
108 b->exact = alu->exact;
109
110 if (data->cb && !data->cb(instr, data->data))
111 return NULL;
112
113 #define LOWER_REDUCTION(name, chan, merge) \
114 case name##2: \
115 case name##3: \
116 case name##4: \
117 return lower_reduction(alu, chan, merge, b); \
118
119 switch (alu->op) {
120 case nir_op_vec16:
121 case nir_op_vec8:
122 case nir_op_vec4:
123 case nir_op_vec3:
124 case nir_op_vec2:
125 case nir_op_cube_face_coord:
126 case nir_op_cube_face_index:
127 /* We don't need to scalarize these ops, they're the ones generated to
128 * group up outputs into a value that can be SSAed.
129 */
130 return NULL;
131
132 case nir_op_pack_half_2x16: {
133 if (!b->shader->options->lower_pack_half_2x16)
134 return NULL;
135
136 nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
137 return nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0),
138 nir_channel(b, src_vec2, 1));
139 }
140
141 case nir_op_unpack_unorm_4x8:
142 case nir_op_unpack_snorm_4x8:
143 case nir_op_unpack_unorm_2x16:
144 case nir_op_unpack_snorm_2x16:
145 /* There is no scalar version of these ops, unless we were to break it
146 * down to bitshifts and math (which is definitely not intended).
147 */
148 return NULL;
149
150 case nir_op_unpack_half_2x16_flush_to_zero:
151 case nir_op_unpack_half_2x16: {
152 if (!b->shader->options->lower_unpack_half_2x16)
153 return NULL;
154
155 nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0);
156 if (alu->op == nir_op_unpack_half_2x16_flush_to_zero) {
157 return nir_vec2(b,
158 nir_unpack_half_2x16_split_x_flush_to_zero(b,
159 packed),
160 nir_unpack_half_2x16_split_y_flush_to_zero(b,
161 packed));
162 } else {
163 return nir_vec2(b,
164 nir_unpack_half_2x16_split_x(b, packed),
165 nir_unpack_half_2x16_split_y(b, packed));
166 }
167 }
168
169 case nir_op_pack_uvec2_to_uint: {
170 assert(b->shader->options->lower_pack_snorm_2x16 ||
171 b->shader->options->lower_pack_unorm_2x16);
172
173 nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0),
174 nir_imm_int(b, 0));
175 return nir_ior(b, nir_ishl(b, nir_channel(b, word, 1),
176 nir_imm_int(b, 16)),
177 nir_channel(b, word, 0));
178 }
179
180 case nir_op_pack_uvec4_to_uint: {
181 assert(b->shader->options->lower_pack_snorm_4x8 ||
182 b->shader->options->lower_pack_unorm_4x8);
183
184 nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0),
185 nir_imm_int(b, 0));
186 return nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3),
187 nir_imm_int(b, 24)),
188 nir_ishl(b, nir_channel(b, byte, 2),
189 nir_imm_int(b, 16))),
190 nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1),
191 nir_imm_int(b, 8)),
192 nir_channel(b, byte, 0)));
193 }
194
195 case nir_op_fdph: {
196 nir_ssa_def *src0_vec = nir_ssa_for_alu_src(b, alu, 0);
197 nir_ssa_def *src1_vec = nir_ssa_for_alu_src(b, alu, 1);
198
199 nir_ssa_def *sum[4];
200 for (unsigned i = 0; i < 3; i++) {
201 sum[i] = nir_fmul(b, nir_channel(b, src0_vec, i),
202 nir_channel(b, src1_vec, i));
203 }
204 sum[3] = nir_channel(b, src1_vec, 3);
205
206 return nir_fadd(b, nir_fadd(b, sum[0], sum[1]),
207 nir_fadd(b, sum[2], sum[3]));
208 }
209
210 case nir_op_unpack_64_2x32:
211 case nir_op_unpack_64_4x16:
212 case nir_op_unpack_32_2x16:
213 return NULL;
214
215 LOWER_REDUCTION(nir_op_fdot, nir_op_fmul, nir_op_fadd);
216 LOWER_REDUCTION(nir_op_ball_fequal, nir_op_feq, nir_op_iand);
217 LOWER_REDUCTION(nir_op_ball_iequal, nir_op_ieq, nir_op_iand);
218 LOWER_REDUCTION(nir_op_bany_fnequal, nir_op_fne, nir_op_ior);
219 LOWER_REDUCTION(nir_op_bany_inequal, nir_op_ine, nir_op_ior);
220 LOWER_REDUCTION(nir_op_b8all_fequal, nir_op_feq8, nir_op_iand);
221 LOWER_REDUCTION(nir_op_b8all_iequal, nir_op_ieq8, nir_op_iand);
222 LOWER_REDUCTION(nir_op_b8any_fnequal, nir_op_fne8, nir_op_ior);
223 LOWER_REDUCTION(nir_op_b8any_inequal, nir_op_ine8, nir_op_ior);
224 LOWER_REDUCTION(nir_op_b16all_fequal, nir_op_feq16, nir_op_iand);
225 LOWER_REDUCTION(nir_op_b16all_iequal, nir_op_ieq16, nir_op_iand);
226 LOWER_REDUCTION(nir_op_b16any_fnequal, nir_op_fne16, nir_op_ior);
227 LOWER_REDUCTION(nir_op_b16any_inequal, nir_op_ine16, nir_op_ior);
228 LOWER_REDUCTION(nir_op_b32all_fequal, nir_op_feq32, nir_op_iand);
229 LOWER_REDUCTION(nir_op_b32all_iequal, nir_op_ieq32, nir_op_iand);
230 LOWER_REDUCTION(nir_op_b32any_fnequal, nir_op_fne32, nir_op_ior);
231 LOWER_REDUCTION(nir_op_b32any_inequal, nir_op_ine32, nir_op_ior);
232 LOWER_REDUCTION(nir_op_fall_equal, nir_op_seq, nir_op_fmin);
233 LOWER_REDUCTION(nir_op_fany_nequal, nir_op_sne, nir_op_fmax);
234
235 default:
236 break;
237 }
238
239 if (alu->dest.dest.ssa.num_components == 1)
240 return NULL;
241
242 unsigned num_components = alu->dest.dest.ssa.num_components;
243 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { NULL };
244
245 for (chan = 0; chan < NIR_MAX_VEC_COMPONENTS; chan++) {
246 if (!(alu->dest.write_mask & (1 << chan)))
247 continue;
248
249 nir_alu_instr *lower = nir_alu_instr_create(b->shader, alu->op);
250 for (i = 0; i < num_src; i++) {
251 /* We only handle same-size-as-dest (input_sizes[] == 0) or scalar
252 * args (input_sizes[] == 1).
253 */
254 assert(nir_op_infos[alu->op].input_sizes[i] < 2);
255 unsigned src_chan = (nir_op_infos[alu->op].input_sizes[i] == 1 ?
256 0 : chan);
257
258 nir_alu_src_copy(&lower->src[i], &alu->src[i], lower);
259 for (int j = 0; j < NIR_MAX_VEC_COMPONENTS; j++)
260 lower->src[i].swizzle[j] = alu->src[i].swizzle[src_chan];
261 }
262
263 nir_alu_ssa_dest_init(lower, 1, alu->dest.dest.ssa.bit_size);
264 lower->dest.saturate = alu->dest.saturate;
265 comps[chan] = &lower->dest.dest.ssa;
266 lower->exact = alu->exact;
267
268 nir_builder_instr_insert(b, &lower->instr);
269 }
270
271 return nir_vec(b, comps, num_components);
272 }
273
274 bool
275 nir_lower_alu_to_scalar(nir_shader *shader, nir_instr_filter_cb cb, const void *_data)
276 {
277 struct alu_to_scalar_data data = {
278 .cb = cb,
279 .data = _data,
280 };
281
282 return nir_shader_lower_instructions(shader,
283 inst_is_vector_alu,
284 lower_alu_instr_scalar,
285 &data);
286 }