nir: Add support for CSE on textures.
[mesa.git] / src / glsl / nir / nir_opt_cse.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir.h"
29
30 /*
31 * Implements common subexpression elimination
32 */
33
34 struct cse_state {
35 void *mem_ctx;
36 bool progress;
37 };
38
39 static bool
40 nir_alu_srcs_equal(nir_alu_instr *alu1, nir_alu_instr *alu2, unsigned src1,
41 unsigned src2)
42 {
43 if (alu1->src[src1].abs != alu2->src[src2].abs ||
44 alu1->src[src1].negate != alu2->src[src2].negate)
45 return false;
46
47 for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(alu1, src1); i++) {
48 if (alu1->src[src1].swizzle[i] != alu2->src[src2].swizzle[i])
49 return false;
50 }
51
52 return nir_srcs_equal(alu1->src[src1].src, alu2->src[src2].src);
53 }
54
55 static bool
56 nir_instrs_equal(nir_instr *instr1, nir_instr *instr2)
57 {
58 if (instr1->type != instr2->type)
59 return false;
60
61 switch (instr1->type) {
62 case nir_instr_type_alu: {
63 nir_alu_instr *alu1 = nir_instr_as_alu(instr1);
64 nir_alu_instr *alu2 = nir_instr_as_alu(instr2);
65
66 if (alu1->op != alu2->op)
67 return false;
68
69 /* TODO: We can probably acutally do something more inteligent such
70 * as allowing different numbers and taking a maximum or something
71 * here */
72 if (alu1->dest.dest.ssa.num_components != alu2->dest.dest.ssa.num_components)
73 return false;
74
75 if (nir_op_infos[alu1->op].algebraic_properties & NIR_OP_IS_COMMUTATIVE) {
76 assert(nir_op_infos[alu1->op].num_inputs == 2);
77 return (nir_alu_srcs_equal(alu1, alu2, 0, 0) &&
78 nir_alu_srcs_equal(alu1, alu2, 1, 1)) ||
79 (nir_alu_srcs_equal(alu1, alu2, 0, 1) &&
80 nir_alu_srcs_equal(alu1, alu2, 1, 0));
81 } else {
82 for (unsigned i = 0; i < nir_op_infos[alu1->op].num_inputs; i++) {
83 if (!nir_alu_srcs_equal(alu1, alu2, i, i))
84 return false;
85 }
86 }
87 return true;
88 }
89 case nir_instr_type_tex: {
90 nir_tex_instr *tex1 = nir_instr_as_tex(instr1);
91 nir_tex_instr *tex2 = nir_instr_as_tex(instr2);
92
93 if (tex1->op != tex2->op)
94 return false;
95
96 if (tex1->num_srcs != tex2->num_srcs)
97 return false;
98 for (unsigned i = 0; i < tex1->num_srcs; i++) {
99 if (tex1->src[i].src_type != tex2->src[i].src_type ||
100 !nir_srcs_equal(tex1->src[i].src, tex2->src[i].src)) {
101 return false;
102 }
103 }
104
105 if (tex1->coord_components != tex2->coord_components ||
106 tex1->sampler_dim != tex2->sampler_dim ||
107 tex1->is_array != tex2->is_array ||
108 tex1->is_shadow != tex2->is_shadow ||
109 tex1->is_new_style_shadow != tex2->is_new_style_shadow ||
110 memcmp(tex1->const_offset, tex2->const_offset,
111 sizeof(tex1->const_offset)) != 0 ||
112 tex1->component != tex2->component ||
113 tex1->sampler_index != tex2->sampler_index ||
114 tex1->sampler_array_size != tex2->sampler_array_size) {
115 return false;
116 }
117
118 /* Don't support un-lowered sampler derefs currently. */
119 if (tex1->sampler || tex2->sampler)
120 return false;
121
122 return true;
123 }
124 case nir_instr_type_load_const: {
125 nir_load_const_instr *load1 = nir_instr_as_load_const(instr1);
126 nir_load_const_instr *load2 = nir_instr_as_load_const(instr2);
127
128 if (load1->def.num_components != load2->def.num_components)
129 return false;
130
131 return memcmp(load1->value.f, load2->value.f,
132 load1->def.num_components * sizeof(*load2->value.f)) == 0;
133 }
134 case nir_instr_type_phi: {
135 nir_phi_instr *phi1 = nir_instr_as_phi(instr1);
136 nir_phi_instr *phi2 = nir_instr_as_phi(instr2);
137
138 if (phi1->instr.block != phi2->instr.block)
139 return false;
140
141 nir_foreach_phi_src(phi1, src1) {
142 nir_foreach_phi_src(phi2, src2) {
143 if (src1->pred == src2->pred) {
144 if (!nir_srcs_equal(src1->src, src2->src))
145 return false;
146
147 break;
148 }
149 }
150 }
151
152 return true;
153 }
154 case nir_instr_type_intrinsic: {
155 nir_intrinsic_instr *intrinsic1 = nir_instr_as_intrinsic(instr1);
156 nir_intrinsic_instr *intrinsic2 = nir_instr_as_intrinsic(instr2);
157 const nir_intrinsic_info *info =
158 &nir_intrinsic_infos[intrinsic1->intrinsic];
159
160 if (intrinsic1->intrinsic != intrinsic2->intrinsic ||
161 intrinsic1->num_components != intrinsic2->num_components)
162 return false;
163
164 if (info->has_dest && intrinsic1->dest.ssa.num_components !=
165 intrinsic2->dest.ssa.num_components)
166 return false;
167
168 for (unsigned i = 0; i < info->num_srcs; i++) {
169 if (!nir_srcs_equal(intrinsic1->src[i], intrinsic2->src[i]))
170 return false;
171 }
172
173 assert(info->num_variables == 0);
174
175 for (unsigned i = 0; i < info->num_indices; i++) {
176 if (intrinsic1->const_index[i] != intrinsic2->const_index[i])
177 return false;
178 }
179
180 return true;
181 }
182 case nir_instr_type_call:
183 case nir_instr_type_jump:
184 case nir_instr_type_ssa_undef:
185 case nir_instr_type_parallel_copy:
186 default:
187 unreachable("Invalid instruction type");
188 }
189
190 return false;
191 }
192
193 static bool
194 src_is_ssa(nir_src *src, void *data)
195 {
196 (void) data;
197 return src->is_ssa;
198 }
199
200 static bool
201 dest_is_ssa(nir_dest *dest, void *data)
202 {
203 (void) data;
204 return dest->is_ssa;
205 }
206
207 static bool
208 nir_instr_can_cse(nir_instr *instr)
209 {
210 /* We only handle SSA. */
211 if (!nir_foreach_dest(instr, dest_is_ssa, NULL) ||
212 !nir_foreach_src(instr, src_is_ssa, NULL))
213 return false;
214
215 switch (instr->type) {
216 case nir_instr_type_alu:
217 case nir_instr_type_tex:
218 case nir_instr_type_load_const:
219 case nir_instr_type_phi:
220 return true;
221 case nir_instr_type_intrinsic: {
222 const nir_intrinsic_info *info =
223 &nir_intrinsic_infos[nir_instr_as_intrinsic(instr)->intrinsic];
224 return (info->flags & NIR_INTRINSIC_CAN_ELIMINATE) &&
225 (info->flags & NIR_INTRINSIC_CAN_REORDER) &&
226 info->num_variables == 0; /* not implemented yet */
227 }
228 case nir_instr_type_call:
229 case nir_instr_type_jump:
230 case nir_instr_type_ssa_undef:
231 return false;
232 case nir_instr_type_parallel_copy:
233 default:
234 unreachable("Invalid instruction type");
235 }
236
237 return false;
238 }
239
240 static nir_ssa_def *
241 nir_instr_get_dest_ssa_def(nir_instr *instr)
242 {
243 switch (instr->type) {
244 case nir_instr_type_alu:
245 assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
246 return &nir_instr_as_alu(instr)->dest.dest.ssa;
247 case nir_instr_type_tex:
248 assert(nir_instr_as_tex(instr)->dest.is_ssa);
249 return &nir_instr_as_tex(instr)->dest.ssa;
250 case nir_instr_type_load_const:
251 return &nir_instr_as_load_const(instr)->def;
252 case nir_instr_type_phi:
253 assert(nir_instr_as_phi(instr)->dest.is_ssa);
254 return &nir_instr_as_phi(instr)->dest.ssa;
255 case nir_instr_type_intrinsic:
256 assert(nir_instr_as_intrinsic(instr)->dest.is_ssa);
257 return &nir_instr_as_intrinsic(instr)->dest.ssa;
258 default:
259 unreachable("We never ask for any of these");
260 }
261 }
262
263 static void
264 nir_opt_cse_instr(nir_instr *instr, struct cse_state *state)
265 {
266 if (!nir_instr_can_cse(instr))
267 return;
268
269 for (struct exec_node *node = instr->node.prev;
270 !exec_node_is_head_sentinel(node); node = node->prev) {
271 nir_instr *other = exec_node_data(nir_instr, node, node);
272 if (nir_instrs_equal(instr, other)) {
273 nir_ssa_def *other_def = nir_instr_get_dest_ssa_def(other);
274 nir_ssa_def_rewrite_uses(nir_instr_get_dest_ssa_def(instr),
275 nir_src_for_ssa(other_def),
276 state->mem_ctx);
277 nir_instr_remove(instr);
278 state->progress = true;
279 return;
280 }
281 }
282
283 for (nir_block *block = instr->block->imm_dom;
284 block != NULL; block = block->imm_dom) {
285 nir_foreach_instr_reverse(block, other) {
286 if (nir_instrs_equal(instr, other)) {
287 nir_ssa_def *other_def = nir_instr_get_dest_ssa_def(other);
288 nir_ssa_def_rewrite_uses(nir_instr_get_dest_ssa_def(instr),
289 nir_src_for_ssa(other_def),
290 state->mem_ctx);
291 nir_instr_remove(instr);
292 state->progress = true;
293 return;
294 }
295 }
296 }
297 }
298
299 static bool
300 nir_opt_cse_block(nir_block *block, void *void_state)
301 {
302 struct cse_state *state = void_state;
303
304 nir_foreach_instr_safe(block, instr)
305 nir_opt_cse_instr(instr, state);
306
307 return true;
308 }
309
310 static bool
311 nir_opt_cse_impl(nir_function_impl *impl)
312 {
313 struct cse_state state;
314
315 state.mem_ctx = ralloc_parent(impl);
316 state.progress = false;
317
318 nir_metadata_require(impl, nir_metadata_dominance);
319
320 nir_foreach_block(impl, nir_opt_cse_block, &state);
321
322 if (state.progress)
323 nir_metadata_preserve(impl, nir_metadata_block_index |
324 nir_metadata_dominance);
325
326 return state.progress;
327 }
328
329 bool
330 nir_opt_cse(nir_shader *shader)
331 {
332 bool progress = false;
333
334 nir_foreach_overload(shader, overload) {
335 if (overload->impl)
336 progress |= nir_opt_cse_impl(overload->impl);
337 }
338
339 return progress;
340 }