nir: add nir_var_shader_storage
[mesa.git] / src / glsl / nir / nir_opt_peephole_ffma.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir.h"
29
30 /*
31 * Implements a small peephole optimization that looks for a multiply that
32 * is only ever used in an add and replaces both with an fma.
33 */
34
35 struct peephole_ffma_state {
36 void *mem_ctx;
37 nir_function_impl *impl;
38 bool progress;
39 };
40
41 static inline bool
42 are_all_uses_fadd(nir_ssa_def *def)
43 {
44 if (!list_empty(&def->if_uses))
45 return false;
46
47 nir_foreach_use(def, use_src) {
48 nir_instr *use_instr = use_src->parent_instr;
49
50 if (use_instr->type != nir_instr_type_alu)
51 return false;
52
53 nir_alu_instr *use_alu = nir_instr_as_alu(use_instr);
54 switch (use_alu->op) {
55 case nir_op_fadd:
56 break; /* This one's ok */
57
58 case nir_op_imov:
59 case nir_op_fmov:
60 case nir_op_fneg:
61 case nir_op_fabs:
62 assert(use_alu->dest.dest.is_ssa);
63 if (!are_all_uses_fadd(&use_alu->dest.dest.ssa))
64 return false;
65 break;
66
67 default:
68 return false;
69 }
70 }
71
72 return true;
73 }
74
75 static nir_alu_instr *
76 get_mul_for_src(nir_alu_src *src, int num_components,
77 uint8_t swizzle[4], bool *negate, bool *abs)
78 {
79 assert(src->src.is_ssa && !src->abs && !src->negate);
80
81 nir_instr *instr = src->src.ssa->parent_instr;
82 if (instr->type != nir_instr_type_alu)
83 return NULL;
84
85 nir_alu_instr *alu = nir_instr_as_alu(instr);
86 switch (alu->op) {
87 case nir_op_imov:
88 case nir_op_fmov:
89 alu = get_mul_for_src(&alu->src[0], num_components, swizzle, negate, abs);
90 break;
91
92 case nir_op_fneg:
93 alu = get_mul_for_src(&alu->src[0], num_components, swizzle, negate, abs);
94 *negate = !*negate;
95 break;
96
97 case nir_op_fabs:
98 alu = get_mul_for_src(&alu->src[0], num_components, swizzle, negate, abs);
99 *negate = false;
100 *abs = true;
101 break;
102
103 case nir_op_fmul:
104 /* Only absorb a fmul into a ffma if the fmul is is only used in fadd
105 * operations. This prevents us from being too aggressive with our
106 * fusing which can actually lead to more instructions.
107 */
108 if (!are_all_uses_fadd(&alu->dest.dest.ssa))
109 return NULL;
110 break;
111
112 default:
113 return NULL;
114 }
115
116 if (!alu)
117 return NULL;
118
119 for (unsigned i = 0; i < num_components; i++)
120 swizzle[i] = swizzle[src->swizzle[i]];
121
122 return alu;
123 }
124
125 static bool
126 nir_opt_peephole_ffma_block(nir_block *block, void *void_state)
127 {
128 struct peephole_ffma_state *state = void_state;
129
130 nir_foreach_instr_safe(block, instr) {
131 if (instr->type != nir_instr_type_alu)
132 continue;
133
134 nir_alu_instr *add = nir_instr_as_alu(instr);
135 if (add->op != nir_op_fadd)
136 continue;
137
138 /* TODO: Maybe bail if this expression is considered "precise"? */
139
140 assert(add->src[0].src.is_ssa && add->src[1].src.is_ssa);
141
142 /* This, is the case a + a. We would rather handle this with an
143 * algebraic reduction than fuse it. Also, we want to only fuse
144 * things where the multiply is used only once and, in this case,
145 * it would be used twice by the same instruction.
146 */
147 if (add->src[0].src.ssa == add->src[1].src.ssa)
148 continue;
149
150 nir_alu_instr *mul;
151 uint8_t add_mul_src, swizzle[4];
152 bool negate, abs;
153 for (add_mul_src = 0; add_mul_src < 2; add_mul_src++) {
154 for (unsigned i = 0; i < 4; i++)
155 swizzle[i] = i;
156
157 negate = false;
158 abs = false;
159
160 mul = get_mul_for_src(&add->src[add_mul_src],
161 add->dest.dest.ssa.num_components,
162 swizzle, &negate, &abs);
163
164 if (mul != NULL)
165 break;
166 }
167
168 if (mul == NULL)
169 continue;
170
171 nir_ssa_def *mul_src[2];
172 mul_src[0] = mul->src[0].src.ssa;
173 mul_src[1] = mul->src[1].src.ssa;
174
175 if (abs) {
176 for (unsigned i = 0; i < 2; i++) {
177 nir_alu_instr *abs = nir_alu_instr_create(state->mem_ctx,
178 nir_op_fabs);
179 abs->src[0].src = nir_src_for_ssa(mul_src[i]);
180 nir_ssa_dest_init(&abs->instr, &abs->dest.dest,
181 mul_src[i]->num_components, NULL);
182 abs->dest.write_mask = (1 << mul_src[i]->num_components) - 1;
183 nir_instr_insert_before(&add->instr, &abs->instr);
184 mul_src[i] = &abs->dest.dest.ssa;
185 }
186 }
187
188 if (negate) {
189 nir_alu_instr *neg = nir_alu_instr_create(state->mem_ctx,
190 nir_op_fneg);
191 neg->src[0].src = nir_src_for_ssa(mul_src[0]);
192 nir_ssa_dest_init(&neg->instr, &neg->dest.dest,
193 mul_src[0]->num_components, NULL);
194 neg->dest.write_mask = (1 << mul_src[0]->num_components) - 1;
195 nir_instr_insert_before(&add->instr, &neg->instr);
196 mul_src[0] = &neg->dest.dest.ssa;
197 }
198
199 nir_alu_instr *ffma = nir_alu_instr_create(state->mem_ctx, nir_op_ffma);
200 ffma->dest.saturate = add->dest.saturate;
201 ffma->dest.write_mask = add->dest.write_mask;
202
203 for (unsigned i = 0; i < 2; i++) {
204 ffma->src[i].src = nir_src_for_ssa(mul_src[i]);
205 for (unsigned j = 0; j < add->dest.dest.ssa.num_components; j++)
206 ffma->src[i].swizzle[j] = mul->src[i].swizzle[swizzle[j]];
207 }
208 nir_alu_src_copy(&ffma->src[2], &add->src[1 - add_mul_src],
209 state->mem_ctx);
210
211 assert(add->dest.dest.is_ssa);
212
213 nir_ssa_dest_init(&ffma->instr, &ffma->dest.dest,
214 add->dest.dest.ssa.num_components,
215 add->dest.dest.ssa.name);
216 nir_ssa_def_rewrite_uses(&add->dest.dest.ssa,
217 nir_src_for_ssa(&ffma->dest.dest.ssa),
218 state->mem_ctx);
219
220 nir_instr_insert_before(&add->instr, &ffma->instr);
221 assert(list_empty(&add->dest.dest.ssa.uses));
222 nir_instr_remove(&add->instr);
223
224 state->progress = true;
225 }
226
227 return true;
228 }
229
230 static bool
231 nir_opt_peephole_ffma_impl(nir_function_impl *impl)
232 {
233 struct peephole_ffma_state state;
234
235 state.mem_ctx = ralloc_parent(impl);
236 state.impl = impl;
237 state.progress = false;
238
239 nir_foreach_block(impl, nir_opt_peephole_ffma_block, &state);
240
241 if (state.progress)
242 nir_metadata_preserve(impl, nir_metadata_block_index |
243 nir_metadata_dominance);
244
245 return state.progress;
246 }
247
248 bool
249 nir_opt_peephole_ffma(nir_shader *shader)
250 {
251 bool progress = false;
252
253 nir_foreach_overload(shader, overload) {
254 if (overload->impl)
255 progress |= nir_opt_peephole_ffma_impl(overload->impl);
256 }
257
258 return progress;
259 }