nir/algebraic: mark some optimizations with fsat(NaN) as inexact
[mesa.git] / src / compiler / nir / nir_opt_vectorize.c
1 /*
2 * Copyright © 2015 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "nir.h"
26 #include "nir_vla.h"
27 #include "nir_builder.h"
28 #include "util/u_dynarray.h"
29
30 #define HASH(hash, data) XXH32(&data, sizeof(data), hash)
31
32 static uint32_t
33 hash_src(uint32_t hash, const nir_src *src)
34 {
35 assert(src->is_ssa);
36 void *hash_data = nir_src_is_const(*src) ? NULL : src->ssa;
37
38 return HASH(hash, hash_data);
39 }
40
41 static uint32_t
42 hash_alu_src(uint32_t hash, const nir_alu_src *src)
43 {
44 assert(!src->abs && !src->negate);
45
46 /* intentionally don't hash swizzle */
47
48 return hash_src(hash, &src->src);
49 }
50
51 static uint32_t
52 hash_alu(uint32_t hash, const nir_alu_instr *instr)
53 {
54 hash = HASH(hash, instr->op);
55
56 hash = HASH(hash, instr->dest.dest.ssa.bit_size);
57
58 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
59 hash = hash_alu_src(hash, &instr->src[i]);
60
61 return hash;
62 }
63
64 static uint32_t
65 hash_instr(const nir_instr *instr)
66 {
67 uint32_t hash = 0;
68
69 switch (instr->type) {
70 case nir_instr_type_alu:
71 return hash_alu(hash, nir_instr_as_alu(instr));
72 default:
73 unreachable("bad instruction type");
74 }
75 }
76
77 static bool
78 srcs_equal(const nir_src *src1, const nir_src *src2)
79 {
80 assert(src1->is_ssa);
81 assert(src2->is_ssa);
82
83 return src1->ssa == src2->ssa ||
84 nir_src_is_const(*src1) == nir_src_is_const(*src2);
85 }
86
87 static bool
88 alu_srcs_equal(const nir_alu_src *src1, const nir_alu_src *src2)
89 {
90 assert(!src1->abs);
91 assert(!src1->negate);
92 assert(!src2->abs);
93 assert(!src2->negate);
94
95 return srcs_equal(&src1->src, &src2->src);
96 }
97
98 static bool
99 instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
100 {
101 switch (instr1->type) {
102 case nir_instr_type_alu: {
103 nir_alu_instr *alu1 = nir_instr_as_alu(instr1);
104 nir_alu_instr *alu2 = nir_instr_as_alu(instr2);
105
106 if (alu1->op != alu2->op)
107 return false;
108
109 if (alu1->dest.dest.ssa.bit_size != alu2->dest.dest.ssa.bit_size)
110 return false;
111
112 for (unsigned i = 0; i < nir_op_infos[alu1->op].num_inputs; i++) {
113 if (!alu_srcs_equal(&alu1->src[i], &alu2->src[i]))
114 return false;
115 }
116
117 return true;
118 }
119
120 default:
121 unreachable("bad instruction type");
122 }
123 }
124
125 static bool
126 instr_can_rewrite(nir_instr *instr)
127 {
128 switch (instr->type) {
129 case nir_instr_type_alu: {
130 nir_alu_instr *alu = nir_instr_as_alu(instr);
131
132 /* Don't try and vectorize mov's. Either they'll be handled by copy
133 * prop, or they're actually necessary and trying to vectorize them
134 * would result in fighting with copy prop.
135 */
136 if (alu->op == nir_op_mov)
137 return false;
138
139 if (nir_op_infos[alu->op].output_size != 0)
140 return false;
141
142 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
143 if (nir_op_infos[alu->op].input_sizes[i] != 0)
144 return false;
145 }
146
147 return true;
148 }
149
150 /* TODO support phi nodes */
151 default:
152 break;
153 }
154
155 return false;
156 }
157
158 /*
159 * Tries to combine two instructions whose sources are different components of
160 * the same instructions into one vectorized instruction. Note that instr1
161 * should dominate instr2.
162 */
163
164 static nir_instr *
165 instr_try_combine(struct nir_shader *nir, nir_instr *instr1, nir_instr *instr2)
166 {
167 assert(instr1->type == nir_instr_type_alu);
168 assert(instr2->type == nir_instr_type_alu);
169 nir_alu_instr *alu1 = nir_instr_as_alu(instr1);
170 nir_alu_instr *alu2 = nir_instr_as_alu(instr2);
171
172 assert(alu1->dest.dest.ssa.bit_size == alu2->dest.dest.ssa.bit_size);
173 unsigned alu1_components = alu1->dest.dest.ssa.num_components;
174 unsigned alu2_components = alu2->dest.dest.ssa.num_components;
175 unsigned total_components = alu1_components + alu2_components;
176
177 if (total_components > 4)
178 return NULL;
179
180 if (nir->options->vectorize_vec2_16bit &&
181 (total_components > 2 || alu1->dest.dest.ssa.bit_size != 16))
182 return NULL;
183
184 nir_builder b;
185 nir_builder_init(&b, nir_cf_node_get_function(&instr1->block->cf_node));
186 b.cursor = nir_after_instr(instr1);
187
188 nir_alu_instr *new_alu = nir_alu_instr_create(b.shader, alu1->op);
189 nir_ssa_dest_init(&new_alu->instr, &new_alu->dest.dest,
190 total_components, alu1->dest.dest.ssa.bit_size, NULL);
191 new_alu->dest.write_mask = (1 << total_components) - 1;
192
193 for (unsigned i = 0; i < nir_op_infos[alu1->op].num_inputs; i++) {
194 /* handle constant merging case */
195 if (alu1->src[i].src.ssa != alu2->src[i].src.ssa) {
196 nir_const_value *c1 = nir_src_as_const_value(alu1->src[i].src);
197 nir_const_value *c2 = nir_src_as_const_value(alu2->src[i].src);
198 assert(c1 && c2);
199 nir_const_value value[4];
200 unsigned bit_size = alu1->src[i].src.ssa->bit_size;
201
202 for (unsigned j = 0; j < total_components; j++) {
203 value[j].u64 = j < alu1_components ?
204 c1[alu1->src[i].swizzle[j]].u64 :
205 c2[alu2->src[i].swizzle[j - alu1_components]].u64;
206 }
207 nir_ssa_def *def = nir_build_imm(&b, total_components, bit_size, value);
208
209 new_alu->src[i].src = nir_src_for_ssa(def);
210 for (unsigned j = 0; j < total_components; j++)
211 new_alu->src[i].swizzle[j] = j;
212 continue;
213 }
214
215 new_alu->src[i].src = alu1->src[i].src;
216
217 for (unsigned j = 0; j < alu1_components; j++)
218 new_alu->src[i].swizzle[j] = alu1->src[i].swizzle[j];
219
220 for (unsigned j = 0; j < alu2_components; j++) {
221 new_alu->src[i].swizzle[j + alu1_components] =
222 alu2->src[i].swizzle[j];
223 }
224 }
225
226 nir_builder_instr_insert(&b, &new_alu->instr);
227
228 unsigned swiz[4] = {0, 1, 2, 3};
229 nir_ssa_def *new_alu1 = nir_swizzle(&b, &new_alu->dest.dest.ssa, swiz,
230 alu1_components);
231
232 for (unsigned i = 0; i < alu2_components; i++)
233 swiz[i] += alu1_components;
234 nir_ssa_def *new_alu2 = nir_swizzle(&b, &new_alu->dest.dest.ssa, swiz,
235 alu2_components);
236
237 nir_foreach_use_safe(src, &alu1->dest.dest.ssa) {
238 if (src->parent_instr->type == nir_instr_type_alu) {
239 /* For ALU instructions, rewrite the source directly to avoid a
240 * round-trip through copy propagation.
241 */
242
243 nir_instr_rewrite_src(src->parent_instr, src,
244 nir_src_for_ssa(&new_alu->dest.dest.ssa));
245 } else {
246 nir_instr_rewrite_src(src->parent_instr, src,
247 nir_src_for_ssa(new_alu1));
248 }
249 }
250
251 nir_foreach_if_use_safe(src, &alu1->dest.dest.ssa) {
252 nir_if_rewrite_condition(src->parent_if, nir_src_for_ssa(new_alu1));
253 }
254
255 assert(list_is_empty(&alu1->dest.dest.ssa.uses));
256 assert(list_is_empty(&alu1->dest.dest.ssa.if_uses));
257
258 nir_foreach_use_safe(src, &alu2->dest.dest.ssa) {
259 if (src->parent_instr->type == nir_instr_type_alu) {
260 /* For ALU instructions, rewrite the source directly to avoid a
261 * round-trip through copy propagation.
262 */
263
264 nir_alu_instr *use = nir_instr_as_alu(src->parent_instr);
265
266 unsigned src_index = 5;
267 for (unsigned i = 0; i < nir_op_infos[use->op].num_inputs; i++) {
268 if (&use->src[i].src == src) {
269 src_index = i;
270 break;
271 }
272 }
273 assert(src_index != 5);
274
275 nir_instr_rewrite_src(src->parent_instr, src,
276 nir_src_for_ssa(&new_alu->dest.dest.ssa));
277
278 for (unsigned i = 0;
279 i < nir_ssa_alu_instr_src_components(use, src_index); i++) {
280 use->src[src_index].swizzle[i] += alu1_components;
281 }
282 } else {
283 nir_instr_rewrite_src(src->parent_instr, src,
284 nir_src_for_ssa(new_alu2));
285 }
286 }
287
288 nir_foreach_if_use_safe(src, &alu2->dest.dest.ssa) {
289 nir_if_rewrite_condition(src->parent_if, nir_src_for_ssa(new_alu2));
290 }
291
292 assert(list_is_empty(&alu2->dest.dest.ssa.uses));
293 assert(list_is_empty(&alu2->dest.dest.ssa.if_uses));
294
295 nir_instr_remove(instr1);
296 nir_instr_remove(instr2);
297
298 return &new_alu->instr;
299 }
300
301 /*
302 * Use an array to represent a stack of instructions that are equivalent.
303 *
304 * We push and pop instructions off the stack in dominance order. The first
305 * element dominates the second element which dominates the third, etc. When
306 * trying to add to the stack, first we try and combine the instruction with
307 * each of the instructions on the stack and, if successful, replace the
308 * instruction on the stack with the newly-combined instruction.
309 */
310
311 static struct util_dynarray *
312 vec_instr_stack_create(void *mem_ctx)
313 {
314 struct util_dynarray *stack = ralloc(mem_ctx, struct util_dynarray);
315 util_dynarray_init(stack, mem_ctx);
316 return stack;
317 }
318
319 /* returns true if we were able to successfully replace the instruction */
320
321 static bool
322 vec_instr_stack_push(struct nir_shader *nir, struct util_dynarray *stack,
323 nir_instr *instr)
324 {
325 /* Walk the stack from child to parent to make live ranges shorter by
326 * matching the closest thing we can
327 */
328 util_dynarray_foreach_reverse(stack, nir_instr *, stack_instr) {
329 nir_instr *new_instr = instr_try_combine(nir, *stack_instr, instr);
330 if (new_instr) {
331 *stack_instr = new_instr;
332 return true;
333 }
334 }
335
336 util_dynarray_append(stack, nir_instr *, instr);
337 return false;
338 }
339
340 static void
341 vec_instr_stack_pop(struct util_dynarray *stack, nir_instr *instr)
342 {
343 ASSERTED nir_instr *last = util_dynarray_pop(stack, nir_instr *);
344 assert(last == instr);
345 }
346
347 static bool
348 cmp_func(const void *data1, const void *data2)
349 {
350 const struct util_dynarray *arr1 = data1;
351 const struct util_dynarray *arr2 = data2;
352
353 const nir_instr *instr1 = *(nir_instr **)util_dynarray_begin(arr1);
354 const nir_instr *instr2 = *(nir_instr **)util_dynarray_begin(arr2);
355
356 return instrs_equal(instr1, instr2);
357 }
358
359 static uint32_t
360 hash_stack(const void *data)
361 {
362 const struct util_dynarray *stack = data;
363 const nir_instr *first = *(nir_instr **)util_dynarray_begin(stack);
364 return hash_instr(first);
365 }
366
367 static struct set *
368 vec_instr_set_create(void)
369 {
370 return _mesa_set_create(NULL, hash_stack, cmp_func);
371 }
372
373 static void
374 vec_instr_set_destroy(struct set *instr_set)
375 {
376 _mesa_set_destroy(instr_set, NULL);
377 }
378
379 static bool
380 vec_instr_set_add_or_rewrite(struct nir_shader *nir, struct set *instr_set,
381 nir_instr *instr)
382 {
383 if (!instr_can_rewrite(instr))
384 return false;
385
386 struct util_dynarray *new_stack = vec_instr_stack_create(instr_set);
387 vec_instr_stack_push(nir, new_stack, instr);
388
389 struct set_entry *entry = _mesa_set_search(instr_set, new_stack);
390
391 if (entry) {
392 ralloc_free(new_stack);
393 struct util_dynarray *stack = (struct util_dynarray *) entry->key;
394 return vec_instr_stack_push(nir, stack, instr);
395 }
396
397 _mesa_set_add(instr_set, new_stack);
398 return false;
399 }
400
401 static void
402 vec_instr_set_remove(struct nir_shader *nir, struct set *instr_set,
403 nir_instr *instr)
404 {
405 if (!instr_can_rewrite(instr))
406 return;
407
408 /*
409 * It's pretty unfortunate that we have to do this, but it's a side effect
410 * of the hash set interfaces. The hash set assumes that we're only
411 * interested in storing one equivalent element at a time, and if we try to
412 * insert a duplicate element it will remove the original. We could hack up
413 * the comparison function to "know" which input is an instruction we
414 * passed in and which is an array that's part of the entry, but that
415 * wouldn't work because we need to pass an array to _mesa_set_add() in
416 * vec_instr_add_or_rewrite() above, and _mesa_set_add() will call our
417 * comparison function as well.
418 */
419 struct util_dynarray *temp = vec_instr_stack_create(instr_set);
420 vec_instr_stack_push(nir, temp, instr);
421 struct set_entry *entry = _mesa_set_search(instr_set, temp);
422 ralloc_free(temp);
423
424 if (entry) {
425 struct util_dynarray *stack = (struct util_dynarray *) entry->key;
426
427 if (util_dynarray_num_elements(stack, nir_instr *) > 1)
428 vec_instr_stack_pop(stack, instr);
429 else
430 _mesa_set_remove(instr_set, entry);
431 }
432 }
433
434 static bool
435 vectorize_block(struct nir_shader *nir, nir_block *block,
436 struct set *instr_set)
437 {
438 bool progress = false;
439
440 nir_foreach_instr_safe(instr, block) {
441 if (vec_instr_set_add_or_rewrite(nir, instr_set, instr))
442 progress = true;
443 }
444
445 for (unsigned i = 0; i < block->num_dom_children; i++) {
446 nir_block *child = block->dom_children[i];
447 progress |= vectorize_block(nir, child, instr_set);
448 }
449
450 nir_foreach_instr_reverse(instr, block)
451 vec_instr_set_remove(nir, instr_set, instr);
452
453 return progress;
454 }
455
456 static bool
457 nir_opt_vectorize_impl(struct nir_shader *nir, nir_function_impl *impl)
458 {
459 struct set *instr_set = vec_instr_set_create();
460
461 nir_metadata_require(impl, nir_metadata_dominance);
462
463 bool progress = vectorize_block(nir, nir_start_block(impl), instr_set);
464
465 if (progress)
466 nir_metadata_preserve(impl, nir_metadata_block_index |
467 nir_metadata_dominance);
468
469 vec_instr_set_destroy(instr_set);
470 return progress;
471 }
472
473 bool
474 nir_opt_vectorize(nir_shader *shader)
475 {
476 bool progress = false;
477
478 nir_foreach_function(function, shader) {
479 if (function->impl)
480 progress |= nir_opt_vectorize_impl(shader, function->impl);
481 }
482
483 return progress;
484 }