nir: Factor out most of the algebraic passes C code to .c/.h.
[mesa.git] / src / compiler / nir / nir_search.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include <inttypes.h>
29 #include "nir_search.h"
30 #include "nir_builder.h"
31 #include "util/half_float.h"
32
33 /* This should be the same as nir_search_max_comm_ops in nir_algebraic.py. */
34 #define NIR_SEARCH_MAX_COMM_OPS 8
35
36 struct match_state {
37 bool inexact_match;
38 bool has_exact_alu;
39 uint8_t comm_op_direction;
40 unsigned variables_seen;
41 nir_alu_src variables[NIR_SEARCH_MAX_VARIABLES];
42 struct hash_table *range_ht;
43 };
44
45 static bool
46 match_expression(const nir_search_expression *expr, nir_alu_instr *instr,
47 unsigned num_components, const uint8_t *swizzle,
48 struct match_state *state);
49
50 static const uint8_t identity_swizzle[NIR_MAX_VEC_COMPONENTS] = { 0, 1, 2, 3 };
51
52 /**
53 * Check if a source produces a value of the given type.
54 *
55 * Used for satisfying 'a@type' constraints.
56 */
57 static bool
58 src_is_type(nir_src src, nir_alu_type type)
59 {
60 assert(type != nir_type_invalid);
61
62 if (!src.is_ssa)
63 return false;
64
65 if (src.ssa->parent_instr->type == nir_instr_type_alu) {
66 nir_alu_instr *src_alu = nir_instr_as_alu(src.ssa->parent_instr);
67 nir_alu_type output_type = nir_op_infos[src_alu->op].output_type;
68
69 if (type == nir_type_bool) {
70 switch (src_alu->op) {
71 case nir_op_iand:
72 case nir_op_ior:
73 case nir_op_ixor:
74 return src_is_type(src_alu->src[0].src, nir_type_bool) &&
75 src_is_type(src_alu->src[1].src, nir_type_bool);
76 case nir_op_inot:
77 return src_is_type(src_alu->src[0].src, nir_type_bool);
78 default:
79 break;
80 }
81 }
82
83 return nir_alu_type_get_base_type(output_type) == type;
84 } else if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
85 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
86
87 if (type == nir_type_bool) {
88 return intr->intrinsic == nir_intrinsic_load_front_face ||
89 intr->intrinsic == nir_intrinsic_load_helper_invocation;
90 }
91 }
92
93 /* don't know */
94 return false;
95 }
96
97 static bool
98 nir_op_matches_search_op(nir_op nop, uint16_t sop)
99 {
100 if (sop <= nir_last_opcode)
101 return nop == sop;
102
103 #define MATCH_FCONV_CASE(op) \
104 case nir_search_op_##op: \
105 return nop == nir_op_##op##16 || \
106 nop == nir_op_##op##32 || \
107 nop == nir_op_##op##64;
108
109 #define MATCH_ICONV_CASE(op) \
110 case nir_search_op_##op: \
111 return nop == nir_op_##op##8 || \
112 nop == nir_op_##op##16 || \
113 nop == nir_op_##op##32 || \
114 nop == nir_op_##op##64;
115
116 #define MATCH_BCONV_CASE(op) \
117 case nir_search_op_##op: \
118 return nop == nir_op_##op##1 || \
119 nop == nir_op_##op##32;
120
121 switch (sop) {
122 MATCH_FCONV_CASE(i2f)
123 MATCH_FCONV_CASE(u2f)
124 MATCH_FCONV_CASE(f2f)
125 MATCH_ICONV_CASE(f2u)
126 MATCH_ICONV_CASE(f2i)
127 MATCH_ICONV_CASE(u2u)
128 MATCH_ICONV_CASE(i2i)
129 MATCH_FCONV_CASE(b2f)
130 MATCH_ICONV_CASE(b2i)
131 MATCH_BCONV_CASE(i2b)
132 MATCH_BCONV_CASE(f2b)
133 default:
134 unreachable("Invalid nir_search_op");
135 }
136
137 #undef MATCH_FCONV_CASE
138 #undef MATCH_ICONV_CASE
139 #undef MATCH_BCONV_CASE
140 }
141
142 uint16_t
143 nir_search_op_for_nir_op(nir_op nop)
144 {
145 #define MATCH_FCONV_CASE(op) \
146 case nir_op_##op##16: \
147 case nir_op_##op##32: \
148 case nir_op_##op##64: \
149 return nir_search_op_##op;
150
151 #define MATCH_ICONV_CASE(op) \
152 case nir_op_##op##8: \
153 case nir_op_##op##16: \
154 case nir_op_##op##32: \
155 case nir_op_##op##64: \
156 return nir_search_op_##op;
157
158 #define MATCH_BCONV_CASE(op) \
159 case nir_op_##op##1: \
160 case nir_op_##op##32: \
161 return nir_search_op_##op;
162
163
164 switch (nop) {
165 MATCH_FCONV_CASE(i2f)
166 MATCH_FCONV_CASE(u2f)
167 MATCH_FCONV_CASE(f2f)
168 MATCH_ICONV_CASE(f2u)
169 MATCH_ICONV_CASE(f2i)
170 MATCH_ICONV_CASE(u2u)
171 MATCH_ICONV_CASE(i2i)
172 MATCH_FCONV_CASE(b2f)
173 MATCH_ICONV_CASE(b2i)
174 MATCH_BCONV_CASE(i2b)
175 MATCH_BCONV_CASE(f2b)
176 default:
177 return nop;
178 }
179
180 #undef MATCH_FCONV_CASE
181 #undef MATCH_ICONV_CASE
182 #undef MATCH_BCONV_CASE
183 }
184
185 static nir_op
186 nir_op_for_search_op(uint16_t sop, unsigned bit_size)
187 {
188 if (sop <= nir_last_opcode)
189 return sop;
190
191 #define RET_FCONV_CASE(op) \
192 case nir_search_op_##op: \
193 switch (bit_size) { \
194 case 16: return nir_op_##op##16; \
195 case 32: return nir_op_##op##32; \
196 case 64: return nir_op_##op##64; \
197 default: unreachable("Invalid bit size"); \
198 }
199
200 #define RET_ICONV_CASE(op) \
201 case nir_search_op_##op: \
202 switch (bit_size) { \
203 case 8: return nir_op_##op##8; \
204 case 16: return nir_op_##op##16; \
205 case 32: return nir_op_##op##32; \
206 case 64: return nir_op_##op##64; \
207 default: unreachable("Invalid bit size"); \
208 }
209
210 #define RET_BCONV_CASE(op) \
211 case nir_search_op_##op: \
212 switch (bit_size) { \
213 case 1: return nir_op_##op##1; \
214 case 32: return nir_op_##op##32; \
215 default: unreachable("Invalid bit size"); \
216 }
217
218 switch (sop) {
219 RET_FCONV_CASE(i2f)
220 RET_FCONV_CASE(u2f)
221 RET_FCONV_CASE(f2f)
222 RET_ICONV_CASE(f2u)
223 RET_ICONV_CASE(f2i)
224 RET_ICONV_CASE(u2u)
225 RET_ICONV_CASE(i2i)
226 RET_FCONV_CASE(b2f)
227 RET_ICONV_CASE(b2i)
228 RET_BCONV_CASE(i2b)
229 RET_BCONV_CASE(f2b)
230 default:
231 unreachable("Invalid nir_search_op");
232 }
233
234 #undef RET_FCONV_CASE
235 #undef RET_ICONV_CASE
236 #undef RET_BCONV_CASE
237 }
238
239 static bool
240 match_value(const nir_search_value *value, nir_alu_instr *instr, unsigned src,
241 unsigned num_components, const uint8_t *swizzle,
242 struct match_state *state)
243 {
244 uint8_t new_swizzle[NIR_MAX_VEC_COMPONENTS];
245
246 /* Searching only works on SSA values because, if it's not SSA, we can't
247 * know if the value changed between one instance of that value in the
248 * expression and another. Also, the replace operation will place reads of
249 * that value right before the last instruction in the expression we're
250 * replacing so those reads will happen after the original reads and may
251 * not be valid if they're register reads.
252 */
253 assert(instr->src[src].src.is_ssa);
254
255 /* If the source is an explicitly sized source, then we need to reset
256 * both the number of components and the swizzle.
257 */
258 if (nir_op_infos[instr->op].input_sizes[src] != 0) {
259 num_components = nir_op_infos[instr->op].input_sizes[src];
260 swizzle = identity_swizzle;
261 }
262
263 for (unsigned i = 0; i < num_components; ++i)
264 new_swizzle[i] = instr->src[src].swizzle[swizzle[i]];
265
266 /* If the value has a specific bit size and it doesn't match, bail */
267 if (value->bit_size > 0 &&
268 nir_src_bit_size(instr->src[src].src) != value->bit_size)
269 return false;
270
271 switch (value->type) {
272 case nir_search_value_expression:
273 if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu)
274 return false;
275
276 return match_expression(nir_search_value_as_expression(value),
277 nir_instr_as_alu(instr->src[src].src.ssa->parent_instr),
278 num_components, new_swizzle, state);
279
280 case nir_search_value_variable: {
281 nir_search_variable *var = nir_search_value_as_variable(value);
282 assert(var->variable < NIR_SEARCH_MAX_VARIABLES);
283
284 if (state->variables_seen & (1 << var->variable)) {
285 if (state->variables[var->variable].src.ssa != instr->src[src].src.ssa)
286 return false;
287
288 assert(!instr->src[src].abs && !instr->src[src].negate);
289
290 for (unsigned i = 0; i < num_components; ++i) {
291 if (state->variables[var->variable].swizzle[i] != new_swizzle[i])
292 return false;
293 }
294
295 return true;
296 } else {
297 if (var->is_constant &&
298 instr->src[src].src.ssa->parent_instr->type != nir_instr_type_load_const)
299 return false;
300
301 if (var->cond && !var->cond(state->range_ht, instr,
302 src, num_components, new_swizzle))
303 return false;
304
305 if (var->type != nir_type_invalid &&
306 !src_is_type(instr->src[src].src, var->type))
307 return false;
308
309 state->variables_seen |= (1 << var->variable);
310 state->variables[var->variable].src = instr->src[src].src;
311 state->variables[var->variable].abs = false;
312 state->variables[var->variable].negate = false;
313
314 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i) {
315 if (i < num_components)
316 state->variables[var->variable].swizzle[i] = new_swizzle[i];
317 else
318 state->variables[var->variable].swizzle[i] = 0;
319 }
320
321 return true;
322 }
323 }
324
325 case nir_search_value_constant: {
326 nir_search_constant *const_val = nir_search_value_as_constant(value);
327
328 if (!nir_src_is_const(instr->src[src].src))
329 return false;
330
331 switch (const_val->type) {
332 case nir_type_float: {
333 nir_load_const_instr *const load =
334 nir_instr_as_load_const(instr->src[src].src.ssa->parent_instr);
335
336 /* There are 8-bit and 1-bit integer types, but there are no 8-bit or
337 * 1-bit float types. This prevents potential assertion failures in
338 * nir_src_comp_as_float.
339 */
340 if (load->def.bit_size < 16)
341 return false;
342
343 for (unsigned i = 0; i < num_components; ++i) {
344 double val = nir_src_comp_as_float(instr->src[src].src,
345 new_swizzle[i]);
346 if (val != const_val->data.d)
347 return false;
348 }
349 return true;
350 }
351
352 case nir_type_int:
353 case nir_type_uint:
354 case nir_type_bool: {
355 unsigned bit_size = nir_src_bit_size(instr->src[src].src);
356 uint64_t mask = bit_size == 64 ? UINT64_MAX : (1ull << bit_size) - 1;
357 for (unsigned i = 0; i < num_components; ++i) {
358 uint64_t val = nir_src_comp_as_uint(instr->src[src].src,
359 new_swizzle[i]);
360 if ((val & mask) != (const_val->data.u & mask))
361 return false;
362 }
363 return true;
364 }
365
366 default:
367 unreachable("Invalid alu source type");
368 }
369 }
370
371 default:
372 unreachable("Invalid search value type");
373 }
374 }
375
376 static bool
377 match_expression(const nir_search_expression *expr, nir_alu_instr *instr,
378 unsigned num_components, const uint8_t *swizzle,
379 struct match_state *state)
380 {
381 if (expr->cond && !expr->cond(instr))
382 return false;
383
384 if (!nir_op_matches_search_op(instr->op, expr->opcode))
385 return false;
386
387 assert(instr->dest.dest.is_ssa);
388
389 if (expr->value.bit_size > 0 &&
390 instr->dest.dest.ssa.bit_size != expr->value.bit_size)
391 return false;
392
393 state->inexact_match = expr->inexact || state->inexact_match;
394 state->has_exact_alu = instr->exact || state->has_exact_alu;
395 if (state->inexact_match && state->has_exact_alu)
396 return false;
397
398 assert(!instr->dest.saturate);
399 assert(nir_op_infos[instr->op].num_inputs > 0);
400
401 /* If we have an explicitly sized destination, we can only handle the
402 * identity swizzle. While dot(vec3(a, b, c).zxy) is a valid
403 * expression, we don't have the information right now to propagate that
404 * swizzle through. We can only properly propagate swizzles if the
405 * instruction is vectorized.
406 */
407 if (nir_op_infos[instr->op].output_size != 0) {
408 for (unsigned i = 0; i < num_components; i++) {
409 if (swizzle[i] != i)
410 return false;
411 }
412 }
413
414 /* If this is a commutative expression and it's one of the first few, look
415 * up its direction for the current search operation. We'll use that value
416 * to possibly flip the sources for the match.
417 */
418 unsigned comm_op_flip =
419 (expr->comm_expr_idx >= 0 &&
420 expr->comm_expr_idx < NIR_SEARCH_MAX_COMM_OPS) ?
421 ((state->comm_op_direction >> expr->comm_expr_idx) & 1) : 0;
422
423 bool matched = true;
424 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
425 /* 2src_commutative instructions that have 3 sources are only commutative
426 * in the first two sources. Source 2 is always source 2.
427 */
428 if (!match_value(expr->srcs[i], instr,
429 i < 2 ? i ^ comm_op_flip : i,
430 num_components, swizzle, state)) {
431 matched = false;
432 break;
433 }
434 }
435
436 return matched;
437 }
438
439 static unsigned
440 replace_bitsize(const nir_search_value *value, unsigned search_bitsize,
441 struct match_state *state)
442 {
443 if (value->bit_size > 0)
444 return value->bit_size;
445 if (value->bit_size < 0)
446 return nir_src_bit_size(state->variables[-value->bit_size - 1].src);
447 return search_bitsize;
448 }
449
450 static nir_alu_src
451 construct_value(nir_builder *build,
452 const nir_search_value *value,
453 unsigned num_components, unsigned search_bitsize,
454 struct match_state *state,
455 nir_instr *instr)
456 {
457 switch (value->type) {
458 case nir_search_value_expression: {
459 const nir_search_expression *expr = nir_search_value_as_expression(value);
460 unsigned dst_bit_size = replace_bitsize(value, search_bitsize, state);
461 nir_op op = nir_op_for_search_op(expr->opcode, dst_bit_size);
462
463 if (nir_op_infos[op].output_size != 0)
464 num_components = nir_op_infos[op].output_size;
465
466 nir_alu_instr *alu = nir_alu_instr_create(build->shader, op);
467 nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
468 dst_bit_size, NULL);
469 alu->dest.write_mask = (1 << num_components) - 1;
470 alu->dest.saturate = false;
471
472 /* We have no way of knowing what values in a given search expression
473 * map to a particular replacement value. Therefore, if the
474 * expression we are replacing has any exact values, the entire
475 * replacement should be exact.
476 */
477 alu->exact = state->has_exact_alu;
478
479 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
480 /* If the source is an explicitly sized source, then we need to reset
481 * the number of components to match.
482 */
483 if (nir_op_infos[alu->op].input_sizes[i] != 0)
484 num_components = nir_op_infos[alu->op].input_sizes[i];
485
486 alu->src[i] = construct_value(build, expr->srcs[i],
487 num_components, search_bitsize,
488 state, instr);
489 }
490
491 nir_builder_instr_insert(build, &alu->instr);
492
493 nir_alu_src val;
494 val.src = nir_src_for_ssa(&alu->dest.dest.ssa);
495 val.negate = false;
496 val.abs = false,
497 memcpy(val.swizzle, identity_swizzle, sizeof val.swizzle);
498
499 return val;
500 }
501
502 case nir_search_value_variable: {
503 const nir_search_variable *var = nir_search_value_as_variable(value);
504 assert(state->variables_seen & (1 << var->variable));
505
506 nir_alu_src val = { NIR_SRC_INIT };
507 nir_alu_src_copy(&val, &state->variables[var->variable],
508 (void *)build->shader);
509 assert(!var->is_constant);
510
511 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
512 val.swizzle[i] = state->variables[var->variable].swizzle[var->swizzle[i]];
513
514 return val;
515 }
516
517 case nir_search_value_constant: {
518 const nir_search_constant *c = nir_search_value_as_constant(value);
519 unsigned bit_size = replace_bitsize(value, search_bitsize, state);
520
521 nir_ssa_def *cval;
522 switch (c->type) {
523 case nir_type_float:
524 cval = nir_imm_floatN_t(build, c->data.d, bit_size);
525 break;
526
527 case nir_type_int:
528 case nir_type_uint:
529 cval = nir_imm_intN_t(build, c->data.i, bit_size);
530 break;
531
532 case nir_type_bool:
533 cval = nir_imm_boolN_t(build, c->data.u, bit_size);
534 break;
535
536 default:
537 unreachable("Invalid alu source type");
538 }
539
540 nir_alu_src val;
541 val.src = nir_src_for_ssa(cval);
542 val.negate = false;
543 val.abs = false,
544 memset(val.swizzle, 0, sizeof val.swizzle);
545
546 return val;
547 }
548
549 default:
550 unreachable("Invalid search value type");
551 }
552 }
553
554 UNUSED static void dump_value(const nir_search_value *val)
555 {
556 switch (val->type) {
557 case nir_search_value_constant: {
558 const nir_search_constant *sconst = nir_search_value_as_constant(val);
559 switch (sconst->type) {
560 case nir_type_float:
561 fprintf(stderr, "%f", sconst->data.d);
562 break;
563 case nir_type_int:
564 fprintf(stderr, "%"PRId64, sconst->data.i);
565 break;
566 case nir_type_uint:
567 fprintf(stderr, "0x%"PRIx64, sconst->data.u);
568 break;
569 case nir_type_bool:
570 fprintf(stderr, "%s", sconst->data.u != 0 ? "True" : "False");
571 break;
572 default:
573 unreachable("bad const type");
574 }
575 break;
576 }
577
578 case nir_search_value_variable: {
579 const nir_search_variable *var = nir_search_value_as_variable(val);
580 if (var->is_constant)
581 fprintf(stderr, "#");
582 fprintf(stderr, "%c", var->variable + 'a');
583 break;
584 }
585
586 case nir_search_value_expression: {
587 const nir_search_expression *expr = nir_search_value_as_expression(val);
588 fprintf(stderr, "(");
589 if (expr->inexact)
590 fprintf(stderr, "~");
591 switch (expr->opcode) {
592 #define CASE(n) \
593 case nir_search_op_##n: fprintf(stderr, #n); break;
594 CASE(f2b)
595 CASE(b2f)
596 CASE(b2i)
597 CASE(i2b)
598 CASE(i2i)
599 CASE(f2i)
600 CASE(i2f)
601 #undef CASE
602 default:
603 fprintf(stderr, "%s", nir_op_infos[expr->opcode].name);
604 }
605
606 unsigned num_srcs = 1;
607 if (expr->opcode <= nir_last_opcode)
608 num_srcs = nir_op_infos[expr->opcode].num_inputs;
609
610 for (unsigned i = 0; i < num_srcs; i++) {
611 fprintf(stderr, " ");
612 dump_value(expr->srcs[i]);
613 }
614
615 fprintf(stderr, ")");
616 break;
617 }
618 }
619
620 if (val->bit_size > 0)
621 fprintf(stderr, "@%d", val->bit_size);
622 }
623
624 nir_ssa_def *
625 nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
626 struct hash_table *range_ht,
627 const nir_search_expression *search,
628 const nir_search_value *replace)
629 {
630 uint8_t swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
631
632 for (unsigned i = 0; i < instr->dest.dest.ssa.num_components; ++i)
633 swizzle[i] = i;
634
635 assert(instr->dest.dest.is_ssa);
636
637 struct match_state state;
638 state.inexact_match = false;
639 state.has_exact_alu = false;
640 state.range_ht = range_ht;
641
642 STATIC_ASSERT(sizeof(state.comm_op_direction) * 8 >= NIR_SEARCH_MAX_COMM_OPS);
643
644 unsigned comm_expr_combinations =
645 1 << MIN2(search->comm_exprs, NIR_SEARCH_MAX_COMM_OPS);
646
647 bool found = false;
648 for (unsigned comb = 0; comb < comm_expr_combinations; comb++) {
649 /* The bitfield of directions is just the current iteration. Hooray for
650 * binary.
651 */
652 state.comm_op_direction = comb;
653 state.variables_seen = 0;
654
655 if (match_expression(search, instr,
656 instr->dest.dest.ssa.num_components,
657 swizzle, &state)) {
658 found = true;
659 break;
660 }
661 }
662 if (!found)
663 return NULL;
664
665 #if 0
666 fprintf(stderr, "matched: ");
667 dump_value(&search->value);
668 fprintf(stderr, " -> ");
669 dump_value(replace);
670 fprintf(stderr, " ssa_%d\n", instr->dest.dest.ssa.index);
671 #endif
672
673 build->cursor = nir_before_instr(&instr->instr);
674
675 nir_alu_src val = construct_value(build, replace,
676 instr->dest.dest.ssa.num_components,
677 instr->dest.dest.ssa.bit_size,
678 &state, &instr->instr);
679
680 /* Note that NIR builder will elide the MOV if it's a no-op, which may
681 * allow more work to be done in a single pass through algebraic.
682 */
683 nir_ssa_def *ssa_val =
684 nir_mov_alu(build, val, instr->dest.dest.ssa.num_components);
685 nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(ssa_val));
686
687 /* We know this one has no more uses because we just rewrote them all,
688 * so we can remove it. The rest of the matched expression, however, we
689 * don't know so much about. We'll just let dead code clean them up.
690 */
691 nir_instr_remove(&instr->instr);
692
693 return ssa_val;
694 }
695
696 static void
697 nir_algebraic_automaton(nir_block *block, uint16_t *states,
698 const struct per_op_table *pass_op_table)
699 {
700 nir_foreach_instr(instr, block) {
701 switch (instr->type) {
702 case nir_instr_type_alu: {
703 nir_alu_instr *alu = nir_instr_as_alu(instr);
704 nir_op op = alu->op;
705 uint16_t search_op = nir_search_op_for_nir_op(op);
706 const struct per_op_table *tbl = &pass_op_table[search_op];
707 if (tbl->num_filtered_states == 0)
708 continue;
709
710 /* Calculate the index into the transition table. Note the index
711 * calculated must match the iteration order of Python's
712 * itertools.product(), which was used to emit the transition
713 * table.
714 */
715 uint16_t index = 0;
716 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
717 index *= tbl->num_filtered_states;
718 index += tbl->filter[states[alu->src[i].src.ssa->index]];
719 }
720 states[alu->dest.dest.ssa.index] = tbl->table[index];
721 break;
722 }
723
724 case nir_instr_type_load_const: {
725 nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
726 states[load_const->def.index] = CONST_STATE;
727 break;
728 }
729
730 default:
731 break;
732 }
733 }
734 }
735
736 static bool
737 nir_algebraic_block(nir_builder *build, nir_block *block,
738 struct hash_table *range_ht,
739 const bool *condition_flags,
740 const struct transform **transforms,
741 const uint16_t *transform_counts,
742 const uint16_t *states)
743 {
744 bool progress = false;
745 const unsigned execution_mode = build->shader->info.float_controls_execution_mode;
746
747 nir_foreach_instr_reverse_safe(instr, block) {
748 if (instr->type != nir_instr_type_alu)
749 continue;
750
751 nir_alu_instr *alu = nir_instr_as_alu(instr);
752 if (!alu->dest.dest.is_ssa)
753 continue;
754
755 unsigned bit_size = alu->dest.dest.ssa.bit_size;
756 const bool ignore_inexact =
757 nir_is_float_control_signed_zero_inf_nan_preserve(execution_mode, bit_size) ||
758 nir_is_denorm_flush_to_zero(execution_mode, bit_size);
759
760 int xform_idx = states[alu->dest.dest.ssa.index];
761 for (uint16_t i = 0; i < transform_counts[xform_idx]; i++) {
762 const struct transform *xform = &transforms[xform_idx][i];
763 if (condition_flags[xform->condition_offset] &&
764 !(xform->search->inexact && ignore_inexact) &&
765 nir_replace_instr(build, alu, range_ht,
766 xform->search, xform->replace)) {
767 _mesa_hash_table_clear(range_ht, NULL);
768 progress = true;
769 break;
770 }
771 }
772 }
773
774 return progress;
775 }
776
777 bool
778 nir_algebraic_impl(nir_function_impl *impl,
779 const bool *condition_flags,
780 const struct transform **transforms,
781 const uint16_t *transform_counts,
782 const struct per_op_table *pass_op_table)
783 {
784 bool progress = false;
785
786 nir_builder build;
787 nir_builder_init(&build, impl);
788
789 /* Note: it's important here that we're allocating a zeroed array, since
790 * state 0 is the default state, which means we don't have to visit
791 * anything other than constants and ALU instructions.
792 */
793 uint16_t *states = calloc(impl->ssa_alloc, sizeof(*states));
794
795 struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
796
797 nir_foreach_block(block, impl) {
798 nir_algebraic_automaton(block, states, pass_op_table);
799 }
800
801 nir_foreach_block_reverse(block, impl) {
802 progress |= nir_algebraic_block(&build, block, range_ht, condition_flags,
803 transforms, transform_counts,
804 states);
805 }
806
807 ralloc_free(range_ht);
808 free(states);
809
810 if (progress) {
811 nir_metadata_preserve(impl, nir_metadata_block_index |
812 nir_metadata_dominance);
813 } else {
814 #ifndef NDEBUG
815 impl->valid_metadata &= ~nir_metadata_not_properly_reset;
816 #endif
817 }
818
819 return progress;
820 }