intel/nir: Combine store_derefs after vectorizing IO
[mesa.git] / src / intel / compiler / brw_nir_opt_peephole_ffma.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "brw_nir.h"
29 #include "compiler/nir/nir_builder.h"
30
31 /*
32 * Implements a small peephole optimization that looks for a multiply that
33 * is only ever used in an add and replaces both with an fma.
34 */
35
36 static inline bool
37 are_all_uses_fadd(nir_ssa_def *def)
38 {
39 if (!list_empty(&def->if_uses))
40 return false;
41
42 nir_foreach_use(use_src, def) {
43 nir_instr *use_instr = use_src->parent_instr;
44
45 if (use_instr->type != nir_instr_type_alu)
46 return false;
47
48 nir_alu_instr *use_alu = nir_instr_as_alu(use_instr);
49 switch (use_alu->op) {
50 case nir_op_fadd:
51 break; /* This one's ok */
52
53 case nir_op_imov:
54 case nir_op_fmov:
55 case nir_op_fneg:
56 case nir_op_fabs:
57 assert(use_alu->dest.dest.is_ssa);
58 if (!are_all_uses_fadd(&use_alu->dest.dest.ssa))
59 return false;
60 break;
61
62 default:
63 return false;
64 }
65 }
66
67 return true;
68 }
69
70 static nir_alu_instr *
71 get_mul_for_src(nir_alu_src *src, unsigned num_components,
72 uint8_t swizzle[4], bool *negate, bool *abs)
73 {
74 uint8_t swizzle_tmp[4];
75 assert(src->src.is_ssa && !src->abs && !src->negate);
76
77 nir_instr *instr = src->src.ssa->parent_instr;
78 if (instr->type != nir_instr_type_alu)
79 return NULL;
80
81 nir_alu_instr *alu = nir_instr_as_alu(instr);
82
83 /* We want to bail if any of the other ALU operations involved is labled
84 * exact. One reason for this is that, while the value that is changing is
85 * actually the result of the add and not the multiply, the intention of
86 * the user when they specify an exact multiply is that they want *that*
87 * value and what they don't care about is the add. Another reason is that
88 * SPIR-V explicitly requires this behaviour.
89 */
90 if (alu->exact)
91 return NULL;
92
93 switch (alu->op) {
94 case nir_op_imov:
95 case nir_op_fmov:
96 alu = get_mul_for_src(&alu->src[0], alu->dest.dest.ssa.num_components,
97 swizzle, negate, abs);
98 break;
99
100 case nir_op_fneg:
101 alu = get_mul_for_src(&alu->src[0], alu->dest.dest.ssa.num_components,
102 swizzle, negate, abs);
103 *negate = !*negate;
104 break;
105
106 case nir_op_fabs:
107 alu = get_mul_for_src(&alu->src[0], alu->dest.dest.ssa.num_components,
108 swizzle, negate, abs);
109 *negate = false;
110 *abs = true;
111 break;
112
113 case nir_op_fmul:
114 /* Only absorb a fmul into a ffma if the fmul is only used in fadd
115 * operations. This prevents us from being too aggressive with our
116 * fusing which can actually lead to more instructions.
117 */
118 if (!are_all_uses_fadd(&alu->dest.dest.ssa))
119 return NULL;
120 break;
121
122 default:
123 return NULL;
124 }
125
126 if (!alu)
127 return NULL;
128
129 /* Copy swizzle data before overwriting it to avoid setting a wrong swizzle.
130 *
131 * Example:
132 * Former swizzle[] = xyzw
133 * src->swizzle[] = zyxx
134 *
135 * Expected output swizzle = zyxx
136 * If we reuse swizzle in the loop, then output swizzle would be zyzz.
137 */
138 memcpy(swizzle_tmp, swizzle, 4*sizeof(uint8_t));
139 for (int i = 0; i < num_components; i++)
140 swizzle[i] = swizzle_tmp[src->swizzle[i]];
141
142 return alu;
143 }
144
145 /**
146 * Given a list of (at least two) nir_alu_src's, tells if any of them is a
147 * constant value and is used only once.
148 */
149 static bool
150 any_alu_src_is_a_constant(nir_alu_src srcs[])
151 {
152 for (unsigned i = 0; i < 2; i++) {
153 if (srcs[i].src.ssa->parent_instr->type == nir_instr_type_load_const) {
154 nir_load_const_instr *load_const =
155 nir_instr_as_load_const (srcs[i].src.ssa->parent_instr);
156
157 if (list_is_singular(&load_const->def.uses) &&
158 list_empty(&load_const->def.if_uses)) {
159 return true;
160 }
161 }
162 }
163
164 return false;
165 }
166
167 static bool
168 brw_nir_opt_peephole_ffma_block(nir_builder *b, nir_block *block)
169 {
170 bool progress = false;
171
172 nir_foreach_instr_safe(instr, block) {
173 if (instr->type != nir_instr_type_alu)
174 continue;
175
176 nir_alu_instr *add = nir_instr_as_alu(instr);
177 if (add->op != nir_op_fadd)
178 continue;
179
180 assert(add->dest.dest.is_ssa);
181 if (add->exact)
182 continue;
183
184 assert(add->src[0].src.is_ssa && add->src[1].src.is_ssa);
185
186 /* This, is the case a + a. We would rather handle this with an
187 * algebraic reduction than fuse it. Also, we want to only fuse
188 * things where the multiply is used only once and, in this case,
189 * it would be used twice by the same instruction.
190 */
191 if (add->src[0].src.ssa == add->src[1].src.ssa)
192 continue;
193
194 nir_alu_instr *mul;
195 uint8_t add_mul_src, swizzle[4];
196 bool negate, abs;
197 for (add_mul_src = 0; add_mul_src < 2; add_mul_src++) {
198 for (unsigned i = 0; i < 4; i++)
199 swizzle[i] = i;
200
201 negate = false;
202 abs = false;
203
204 mul = get_mul_for_src(&add->src[add_mul_src],
205 add->dest.dest.ssa.num_components,
206 swizzle, &negate, &abs);
207
208 if (mul != NULL)
209 break;
210 }
211
212 if (mul == NULL)
213 continue;
214
215 unsigned bit_size = add->dest.dest.ssa.bit_size;
216
217 nir_ssa_def *mul_src[2];
218 mul_src[0] = mul->src[0].src.ssa;
219 mul_src[1] = mul->src[1].src.ssa;
220
221 /* If any of the operands of the fmul and any of the fadd is a constant,
222 * we bypass because it will be more efficient as the constants will be
223 * propagated as operands, potentially saving two load_const instructions.
224 */
225 if (any_alu_src_is_a_constant(mul->src) &&
226 any_alu_src_is_a_constant(add->src)) {
227 continue;
228 }
229
230 b->cursor = nir_before_instr(&add->instr);
231
232 if (abs) {
233 for (unsigned i = 0; i < 2; i++)
234 mul_src[i] = nir_fabs(b, mul_src[i]);
235 }
236
237 if (negate)
238 mul_src[0] = nir_fneg(b, mul_src[0]);
239
240 nir_alu_instr *ffma = nir_alu_instr_create(b->shader, nir_op_ffma);
241 ffma->dest.saturate = add->dest.saturate;
242 ffma->dest.write_mask = add->dest.write_mask;
243
244 for (unsigned i = 0; i < 2; i++) {
245 ffma->src[i].src = nir_src_for_ssa(mul_src[i]);
246 for (unsigned j = 0; j < add->dest.dest.ssa.num_components; j++)
247 ffma->src[i].swizzle[j] = mul->src[i].swizzle[swizzle[j]];
248 }
249 nir_alu_src_copy(&ffma->src[2], &add->src[1 - add_mul_src], ffma);
250
251 assert(add->dest.dest.is_ssa);
252
253 nir_ssa_dest_init(&ffma->instr, &ffma->dest.dest,
254 add->dest.dest.ssa.num_components,
255 bit_size,
256 add->dest.dest.ssa.name);
257 nir_ssa_def_rewrite_uses(&add->dest.dest.ssa,
258 nir_src_for_ssa(&ffma->dest.dest.ssa));
259
260 nir_builder_instr_insert(b, &ffma->instr);
261 assert(list_empty(&add->dest.dest.ssa.uses));
262 nir_instr_remove(&add->instr);
263
264 progress = true;
265 }
266
267 return progress;
268 }
269
270 static bool
271 brw_nir_opt_peephole_ffma_impl(nir_function_impl *impl)
272 {
273 bool progress = false;
274
275 nir_builder builder;
276 nir_builder_init(&builder, impl);
277
278 nir_foreach_block(block, impl) {
279 progress |= brw_nir_opt_peephole_ffma_block(&builder, block);
280 }
281
282 if (progress)
283 nir_metadata_preserve(impl, nir_metadata_block_index |
284 nir_metadata_dominance);
285
286 return progress;
287 }
288
289 bool
290 brw_nir_opt_peephole_ffma(nir_shader *shader)
291 {
292 bool progress = false;
293
294 nir_foreach_function(function, shader) {
295 if (function->impl)
296 progress |= brw_nir_opt_peephole_ffma_impl(function->impl);
297 }
298
299 return progress;
300 }