2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
29 #include "nir_search.h"
30 #include "nir_builder.h"
31 #include "util/half_float.h"
33 /* This should be the same as nir_search_max_comm_ops in nir_algebraic.py. */
34 #define NIR_SEARCH_MAX_COMM_OPS 8
39 uint8_t comm_op_direction
;
40 unsigned variables_seen
;
42 /* Used for running the automaton on newly-constructed instructions. */
43 struct util_dynarray
*states
;
44 const struct per_op_table
*pass_op_table
;
46 nir_alu_src variables
[NIR_SEARCH_MAX_VARIABLES
];
47 struct hash_table
*range_ht
;
51 match_expression(const nir_search_expression
*expr
, nir_alu_instr
*instr
,
52 unsigned num_components
, const uint8_t *swizzle
,
53 struct match_state
*state
);
55 nir_algebraic_automaton(nir_instr
*instr
, struct util_dynarray
*states
,
56 const struct per_op_table
*pass_op_table
);
58 static const uint8_t identity_swizzle
[NIR_MAX_VEC_COMPONENTS
] = { 0, 1, 2, 3 };
61 * Check if a source produces a value of the given type.
63 * Used for satisfying 'a@type' constraints.
66 src_is_type(nir_src src
, nir_alu_type type
)
68 assert(type
!= nir_type_invalid
);
73 if (src
.ssa
->parent_instr
->type
== nir_instr_type_alu
) {
74 nir_alu_instr
*src_alu
= nir_instr_as_alu(src
.ssa
->parent_instr
);
75 nir_alu_type output_type
= nir_op_infos
[src_alu
->op
].output_type
;
77 if (type
== nir_type_bool
) {
78 switch (src_alu
->op
) {
82 return src_is_type(src_alu
->src
[0].src
, nir_type_bool
) &&
83 src_is_type(src_alu
->src
[1].src
, nir_type_bool
);
85 return src_is_type(src_alu
->src
[0].src
, nir_type_bool
);
91 return nir_alu_type_get_base_type(output_type
) == type
;
92 } else if (src
.ssa
->parent_instr
->type
== nir_instr_type_intrinsic
) {
93 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(src
.ssa
->parent_instr
);
95 if (type
== nir_type_bool
) {
96 return intr
->intrinsic
== nir_intrinsic_load_front_face
||
97 intr
->intrinsic
== nir_intrinsic_load_helper_invocation
;
106 nir_op_matches_search_op(nir_op nop
, uint16_t sop
)
108 if (sop
<= nir_last_opcode
)
111 #define MATCH_FCONV_CASE(op) \
112 case nir_search_op_##op: \
113 return nop == nir_op_##op##16 || \
114 nop == nir_op_##op##32 || \
115 nop == nir_op_##op##64;
117 #define MATCH_ICONV_CASE(op) \
118 case nir_search_op_##op: \
119 return nop == nir_op_##op##8 || \
120 nop == nir_op_##op##16 || \
121 nop == nir_op_##op##32 || \
122 nop == nir_op_##op##64;
124 #define MATCH_BCONV_CASE(op) \
125 case nir_search_op_##op: \
126 return nop == nir_op_##op##1 || \
127 nop == nir_op_##op##32;
130 MATCH_FCONV_CASE(i2f
)
131 MATCH_FCONV_CASE(u2f
)
132 MATCH_FCONV_CASE(f2f
)
133 MATCH_ICONV_CASE(f2u
)
134 MATCH_ICONV_CASE(f2i
)
135 MATCH_ICONV_CASE(u2u
)
136 MATCH_ICONV_CASE(i2i
)
137 MATCH_FCONV_CASE(b2f
)
138 MATCH_ICONV_CASE(b2i
)
139 MATCH_BCONV_CASE(i2b
)
140 MATCH_BCONV_CASE(f2b
)
142 unreachable("Invalid nir_search_op");
145 #undef MATCH_FCONV_CASE
146 #undef MATCH_ICONV_CASE
147 #undef MATCH_BCONV_CASE
151 nir_search_op_for_nir_op(nir_op nop
)
153 #define MATCH_FCONV_CASE(op) \
154 case nir_op_##op##16: \
155 case nir_op_##op##32: \
156 case nir_op_##op##64: \
157 return nir_search_op_##op;
159 #define MATCH_ICONV_CASE(op) \
160 case nir_op_##op##8: \
161 case nir_op_##op##16: \
162 case nir_op_##op##32: \
163 case nir_op_##op##64: \
164 return nir_search_op_##op;
166 #define MATCH_BCONV_CASE(op) \
167 case nir_op_##op##1: \
168 case nir_op_##op##32: \
169 return nir_search_op_##op;
173 MATCH_FCONV_CASE(i2f
)
174 MATCH_FCONV_CASE(u2f
)
175 MATCH_FCONV_CASE(f2f
)
176 MATCH_ICONV_CASE(f2u
)
177 MATCH_ICONV_CASE(f2i
)
178 MATCH_ICONV_CASE(u2u
)
179 MATCH_ICONV_CASE(i2i
)
180 MATCH_FCONV_CASE(b2f
)
181 MATCH_ICONV_CASE(b2i
)
182 MATCH_BCONV_CASE(i2b
)
183 MATCH_BCONV_CASE(f2b
)
188 #undef MATCH_FCONV_CASE
189 #undef MATCH_ICONV_CASE
190 #undef MATCH_BCONV_CASE
194 nir_op_for_search_op(uint16_t sop
, unsigned bit_size
)
196 if (sop
<= nir_last_opcode
)
199 #define RET_FCONV_CASE(op) \
200 case nir_search_op_##op: \
201 switch (bit_size) { \
202 case 16: return nir_op_##op##16; \
203 case 32: return nir_op_##op##32; \
204 case 64: return nir_op_##op##64; \
205 default: unreachable("Invalid bit size"); \
208 #define RET_ICONV_CASE(op) \
209 case nir_search_op_##op: \
210 switch (bit_size) { \
211 case 8: return nir_op_##op##8; \
212 case 16: return nir_op_##op##16; \
213 case 32: return nir_op_##op##32; \
214 case 64: return nir_op_##op##64; \
215 default: unreachable("Invalid bit size"); \
218 #define RET_BCONV_CASE(op) \
219 case nir_search_op_##op: \
220 switch (bit_size) { \
221 case 1: return nir_op_##op##1; \
222 case 32: return nir_op_##op##32; \
223 default: unreachable("Invalid bit size"); \
239 unreachable("Invalid nir_search_op");
242 #undef RET_FCONV_CASE
243 #undef RET_ICONV_CASE
244 #undef RET_BCONV_CASE
248 match_value(const nir_search_value
*value
, nir_alu_instr
*instr
, unsigned src
,
249 unsigned num_components
, const uint8_t *swizzle
,
250 struct match_state
*state
)
252 uint8_t new_swizzle
[NIR_MAX_VEC_COMPONENTS
];
254 /* Searching only works on SSA values because, if it's not SSA, we can't
255 * know if the value changed between one instance of that value in the
256 * expression and another. Also, the replace operation will place reads of
257 * that value right before the last instruction in the expression we're
258 * replacing so those reads will happen after the original reads and may
259 * not be valid if they're register reads.
261 assert(instr
->src
[src
].src
.is_ssa
);
263 /* If the source is an explicitly sized source, then we need to reset
264 * both the number of components and the swizzle.
266 if (nir_op_infos
[instr
->op
].input_sizes
[src
] != 0) {
267 num_components
= nir_op_infos
[instr
->op
].input_sizes
[src
];
268 swizzle
= identity_swizzle
;
271 for (unsigned i
= 0; i
< num_components
; ++i
)
272 new_swizzle
[i
] = instr
->src
[src
].swizzle
[swizzle
[i
]];
274 /* If the value has a specific bit size and it doesn't match, bail */
275 if (value
->bit_size
> 0 &&
276 nir_src_bit_size(instr
->src
[src
].src
) != value
->bit_size
)
279 switch (value
->type
) {
280 case nir_search_value_expression
:
281 if (instr
->src
[src
].src
.ssa
->parent_instr
->type
!= nir_instr_type_alu
)
284 return match_expression(nir_search_value_as_expression(value
),
285 nir_instr_as_alu(instr
->src
[src
].src
.ssa
->parent_instr
),
286 num_components
, new_swizzle
, state
);
288 case nir_search_value_variable
: {
289 nir_search_variable
*var
= nir_search_value_as_variable(value
);
290 assert(var
->variable
< NIR_SEARCH_MAX_VARIABLES
);
292 if (state
->variables_seen
& (1 << var
->variable
)) {
293 if (state
->variables
[var
->variable
].src
.ssa
!= instr
->src
[src
].src
.ssa
)
296 assert(!instr
->src
[src
].abs
&& !instr
->src
[src
].negate
);
298 for (unsigned i
= 0; i
< num_components
; ++i
) {
299 if (state
->variables
[var
->variable
].swizzle
[i
] != new_swizzle
[i
])
305 if (var
->is_constant
&&
306 instr
->src
[src
].src
.ssa
->parent_instr
->type
!= nir_instr_type_load_const
)
309 if (var
->cond
&& !var
->cond(state
->range_ht
, instr
,
310 src
, num_components
, new_swizzle
))
313 if (var
->type
!= nir_type_invalid
&&
314 !src_is_type(instr
->src
[src
].src
, var
->type
))
317 state
->variables_seen
|= (1 << var
->variable
);
318 state
->variables
[var
->variable
].src
= instr
->src
[src
].src
;
319 state
->variables
[var
->variable
].abs
= false;
320 state
->variables
[var
->variable
].negate
= false;
322 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; ++i
) {
323 if (i
< num_components
)
324 state
->variables
[var
->variable
].swizzle
[i
] = new_swizzle
[i
];
326 state
->variables
[var
->variable
].swizzle
[i
] = 0;
333 case nir_search_value_constant
: {
334 nir_search_constant
*const_val
= nir_search_value_as_constant(value
);
336 if (!nir_src_is_const(instr
->src
[src
].src
))
339 switch (const_val
->type
) {
340 case nir_type_float
: {
341 nir_load_const_instr
*const load
=
342 nir_instr_as_load_const(instr
->src
[src
].src
.ssa
->parent_instr
);
344 /* There are 8-bit and 1-bit integer types, but there are no 8-bit or
345 * 1-bit float types. This prevents potential assertion failures in
346 * nir_src_comp_as_float.
348 if (load
->def
.bit_size
< 16)
351 for (unsigned i
= 0; i
< num_components
; ++i
) {
352 double val
= nir_src_comp_as_float(instr
->src
[src
].src
,
354 if (val
!= const_val
->data
.d
)
362 case nir_type_bool
: {
363 unsigned bit_size
= nir_src_bit_size(instr
->src
[src
].src
);
364 uint64_t mask
= bit_size
== 64 ? UINT64_MAX
: (1ull << bit_size
) - 1;
365 for (unsigned i
= 0; i
< num_components
; ++i
) {
366 uint64_t val
= nir_src_comp_as_uint(instr
->src
[src
].src
,
368 if ((val
& mask
) != (const_val
->data
.u
& mask
))
375 unreachable("Invalid alu source type");
380 unreachable("Invalid search value type");
385 match_expression(const nir_search_expression
*expr
, nir_alu_instr
*instr
,
386 unsigned num_components
, const uint8_t *swizzle
,
387 struct match_state
*state
)
389 if (expr
->cond
&& !expr
->cond(instr
))
392 if (!nir_op_matches_search_op(instr
->op
, expr
->opcode
))
395 assert(instr
->dest
.dest
.is_ssa
);
397 if (expr
->value
.bit_size
> 0 &&
398 instr
->dest
.dest
.ssa
.bit_size
!= expr
->value
.bit_size
)
401 state
->inexact_match
= expr
->inexact
|| state
->inexact_match
;
402 state
->has_exact_alu
= instr
->exact
|| state
->has_exact_alu
;
403 if (state
->inexact_match
&& state
->has_exact_alu
)
406 assert(!instr
->dest
.saturate
);
407 assert(nir_op_infos
[instr
->op
].num_inputs
> 0);
409 /* If we have an explicitly sized destination, we can only handle the
410 * identity swizzle. While dot(vec3(a, b, c).zxy) is a valid
411 * expression, we don't have the information right now to propagate that
412 * swizzle through. We can only properly propagate swizzles if the
413 * instruction is vectorized.
415 if (nir_op_infos
[instr
->op
].output_size
!= 0) {
416 for (unsigned i
= 0; i
< num_components
; i
++) {
422 /* If this is a commutative expression and it's one of the first few, look
423 * up its direction for the current search operation. We'll use that value
424 * to possibly flip the sources for the match.
426 unsigned comm_op_flip
=
427 (expr
->comm_expr_idx
>= 0 &&
428 expr
->comm_expr_idx
< NIR_SEARCH_MAX_COMM_OPS
) ?
429 ((state
->comm_op_direction
>> expr
->comm_expr_idx
) & 1) : 0;
432 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
433 /* 2src_commutative instructions that have 3 sources are only commutative
434 * in the first two sources. Source 2 is always source 2.
436 if (!match_value(expr
->srcs
[i
], instr
,
437 i
< 2 ? i
^ comm_op_flip
: i
,
438 num_components
, swizzle
, state
)) {
448 replace_bitsize(const nir_search_value
*value
, unsigned search_bitsize
,
449 struct match_state
*state
)
451 if (value
->bit_size
> 0)
452 return value
->bit_size
;
453 if (value
->bit_size
< 0)
454 return nir_src_bit_size(state
->variables
[-value
->bit_size
- 1].src
);
455 return search_bitsize
;
459 construct_value(nir_builder
*build
,
460 const nir_search_value
*value
,
461 unsigned num_components
, unsigned search_bitsize
,
462 struct match_state
*state
,
465 switch (value
->type
) {
466 case nir_search_value_expression
: {
467 const nir_search_expression
*expr
= nir_search_value_as_expression(value
);
468 unsigned dst_bit_size
= replace_bitsize(value
, search_bitsize
, state
);
469 nir_op op
= nir_op_for_search_op(expr
->opcode
, dst_bit_size
);
471 if (nir_op_infos
[op
].output_size
!= 0)
472 num_components
= nir_op_infos
[op
].output_size
;
474 nir_alu_instr
*alu
= nir_alu_instr_create(build
->shader
, op
);
475 nir_ssa_dest_init(&alu
->instr
, &alu
->dest
.dest
, num_components
,
477 alu
->dest
.write_mask
= (1 << num_components
) - 1;
478 alu
->dest
.saturate
= false;
480 /* We have no way of knowing what values in a given search expression
481 * map to a particular replacement value. Therefore, if the
482 * expression we are replacing has any exact values, the entire
483 * replacement should be exact.
485 alu
->exact
= state
->has_exact_alu
|| expr
->exact
;
487 for (unsigned i
= 0; i
< nir_op_infos
[op
].num_inputs
; i
++) {
488 /* If the source is an explicitly sized source, then we need to reset
489 * the number of components to match.
491 if (nir_op_infos
[alu
->op
].input_sizes
[i
] != 0)
492 num_components
= nir_op_infos
[alu
->op
].input_sizes
[i
];
494 alu
->src
[i
] = construct_value(build
, expr
->srcs
[i
],
495 num_components
, search_bitsize
,
499 nir_builder_instr_insert(build
, &alu
->instr
);
501 assert(alu
->dest
.dest
.ssa
.index
==
502 util_dynarray_num_elements(state
->states
, uint16_t));
503 util_dynarray_append(state
->states
, uint16_t, 0);
504 nir_algebraic_automaton(&alu
->instr
, state
->states
, state
->pass_op_table
);
507 val
.src
= nir_src_for_ssa(&alu
->dest
.dest
.ssa
);
510 memcpy(val
.swizzle
, identity_swizzle
, sizeof val
.swizzle
);
515 case nir_search_value_variable
: {
516 const nir_search_variable
*var
= nir_search_value_as_variable(value
);
517 assert(state
->variables_seen
& (1 << var
->variable
));
519 nir_alu_src val
= { NIR_SRC_INIT
};
520 nir_alu_src_copy(&val
, &state
->variables
[var
->variable
],
521 (void *)build
->shader
);
522 assert(!var
->is_constant
);
524 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; i
++)
525 val
.swizzle
[i
] = state
->variables
[var
->variable
].swizzle
[var
->swizzle
[i
]];
530 case nir_search_value_constant
: {
531 const nir_search_constant
*c
= nir_search_value_as_constant(value
);
532 unsigned bit_size
= replace_bitsize(value
, search_bitsize
, state
);
537 cval
= nir_imm_floatN_t(build
, c
->data
.d
, bit_size
);
542 cval
= nir_imm_intN_t(build
, c
->data
.i
, bit_size
);
546 cval
= nir_imm_boolN_t(build
, c
->data
.u
, bit_size
);
550 unreachable("Invalid alu source type");
553 assert(cval
->index
==
554 util_dynarray_num_elements(state
->states
, uint16_t));
555 util_dynarray_append(state
->states
, uint16_t, 0);
556 nir_algebraic_automaton(cval
->parent_instr
, state
->states
,
557 state
->pass_op_table
);
560 val
.src
= nir_src_for_ssa(cval
);
563 memset(val
.swizzle
, 0, sizeof val
.swizzle
);
569 unreachable("Invalid search value type");
573 UNUSED
static void dump_value(const nir_search_value
*val
)
576 case nir_search_value_constant
: {
577 const nir_search_constant
*sconst
= nir_search_value_as_constant(val
);
578 switch (sconst
->type
) {
580 fprintf(stderr
, "%f", sconst
->data
.d
);
583 fprintf(stderr
, "%"PRId64
, sconst
->data
.i
);
586 fprintf(stderr
, "0x%"PRIx64
, sconst
->data
.u
);
589 fprintf(stderr
, "%s", sconst
->data
.u
!= 0 ? "True" : "False");
592 unreachable("bad const type");
597 case nir_search_value_variable
: {
598 const nir_search_variable
*var
= nir_search_value_as_variable(val
);
599 if (var
->is_constant
)
600 fprintf(stderr
, "#");
601 fprintf(stderr
, "%c", var
->variable
+ 'a');
605 case nir_search_value_expression
: {
606 const nir_search_expression
*expr
= nir_search_value_as_expression(val
);
607 fprintf(stderr
, "(");
609 fprintf(stderr
, "~");
610 switch (expr
->opcode
) {
612 case nir_search_op_##n: fprintf(stderr, #n); break;
622 fprintf(stderr
, "%s", nir_op_infos
[expr
->opcode
].name
);
625 unsigned num_srcs
= 1;
626 if (expr
->opcode
<= nir_last_opcode
)
627 num_srcs
= nir_op_infos
[expr
->opcode
].num_inputs
;
629 for (unsigned i
= 0; i
< num_srcs
; i
++) {
630 fprintf(stderr
, " ");
631 dump_value(expr
->srcs
[i
]);
634 fprintf(stderr
, ")");
639 if (val
->bit_size
> 0)
640 fprintf(stderr
, "@%d", val
->bit_size
);
644 nir_replace_instr(nir_builder
*build
, nir_alu_instr
*instr
,
645 struct hash_table
*range_ht
,
646 struct util_dynarray
*states
,
647 const struct per_op_table
*pass_op_table
,
648 const nir_search_expression
*search
,
649 const nir_search_value
*replace
)
651 uint8_t swizzle
[NIR_MAX_VEC_COMPONENTS
] = { 0 };
653 for (unsigned i
= 0; i
< instr
->dest
.dest
.ssa
.num_components
; ++i
)
656 assert(instr
->dest
.dest
.is_ssa
);
658 struct match_state state
;
659 state
.inexact_match
= false;
660 state
.has_exact_alu
= false;
661 state
.range_ht
= range_ht
;
662 state
.pass_op_table
= pass_op_table
;
664 STATIC_ASSERT(sizeof(state
.comm_op_direction
) * 8 >= NIR_SEARCH_MAX_COMM_OPS
);
666 unsigned comm_expr_combinations
=
667 1 << MIN2(search
->comm_exprs
, NIR_SEARCH_MAX_COMM_OPS
);
670 for (unsigned comb
= 0; comb
< comm_expr_combinations
; comb
++) {
671 /* The bitfield of directions is just the current iteration. Hooray for
674 state
.comm_op_direction
= comb
;
675 state
.variables_seen
= 0;
677 if (match_expression(search
, instr
,
678 instr
->dest
.dest
.ssa
.num_components
,
688 fprintf(stderr
, "matched: ");
689 dump_value(&search
->value
);
690 fprintf(stderr
, " -> ");
692 fprintf(stderr
, " ssa_%d\n", instr
->dest
.dest
.ssa
.index
);
695 build
->cursor
= nir_before_instr(&instr
->instr
);
697 state
.states
= states
;
699 nir_alu_src val
= construct_value(build
, replace
,
700 instr
->dest
.dest
.ssa
.num_components
,
701 instr
->dest
.dest
.ssa
.bit_size
,
702 &state
, &instr
->instr
);
704 /* Note that NIR builder will elide the MOV if it's a no-op, which may
705 * allow more work to be done in a single pass through algebraic.
707 nir_ssa_def
*ssa_val
=
708 nir_mov_alu(build
, val
, instr
->dest
.dest
.ssa
.num_components
);
709 if (ssa_val
->index
== util_dynarray_num_elements(states
, uint16_t)) {
710 util_dynarray_append(states
, uint16_t, 0);
711 nir_algebraic_automaton(ssa_val
->parent_instr
, states
, pass_op_table
);
714 nir_ssa_def_rewrite_uses(&instr
->dest
.dest
.ssa
, nir_src_for_ssa(ssa_val
));
716 /* We know this one has no more uses because we just rewrote them all,
717 * so we can remove it. The rest of the matched expression, however, we
718 * don't know so much about. We'll just let dead code clean them up.
720 nir_instr_remove(&instr
->instr
);
726 nir_algebraic_automaton(nir_instr
*instr
, struct util_dynarray
*states
,
727 const struct per_op_table
*pass_op_table
)
729 switch (instr
->type
) {
730 case nir_instr_type_alu
: {
731 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
733 uint16_t search_op
= nir_search_op_for_nir_op(op
);
734 const struct per_op_table
*tbl
= &pass_op_table
[search_op
];
735 if (tbl
->num_filtered_states
== 0)
738 /* Calculate the index into the transition table. Note the index
739 * calculated must match the iteration order of Python's
740 * itertools.product(), which was used to emit the transition
744 for (unsigned i
= 0; i
< nir_op_infos
[op
].num_inputs
; i
++) {
745 index
*= tbl
->num_filtered_states
;
746 index
+= tbl
->filter
[*util_dynarray_element(states
, uint16_t,
747 alu
->src
[i
].src
.ssa
->index
)];
749 *util_dynarray_element(states
, uint16_t, alu
->dest
.dest
.ssa
.index
) =
754 case nir_instr_type_load_const
: {
755 nir_load_const_instr
*load_const
= nir_instr_as_load_const(instr
);
756 *util_dynarray_element(states
, uint16_t, load_const
->def
.index
) =
767 nir_algebraic_instr(nir_builder
*build
, nir_instr
*instr
,
768 struct hash_table
*range_ht
,
769 const bool *condition_flags
,
770 const struct transform
**transforms
,
771 const uint16_t *transform_counts
,
772 struct util_dynarray
*states
,
773 const struct per_op_table
*pass_op_table
)
776 if (instr
->type
!= nir_instr_type_alu
)
779 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
780 if (!alu
->dest
.dest
.is_ssa
)
783 unsigned bit_size
= alu
->dest
.dest
.ssa
.bit_size
;
784 const unsigned execution_mode
=
785 build
->shader
->info
.float_controls_execution_mode
;
786 const bool ignore_inexact
=
787 nir_is_float_control_signed_zero_inf_nan_preserve(execution_mode
, bit_size
) ||
788 nir_is_denorm_flush_to_zero(execution_mode
, bit_size
);
790 int xform_idx
= *util_dynarray_element(states
, uint16_t,
791 alu
->dest
.dest
.ssa
.index
);
792 for (uint16_t i
= 0; i
< transform_counts
[xform_idx
]; i
++) {
793 const struct transform
*xform
= &transforms
[xform_idx
][i
];
794 if (condition_flags
[xform
->condition_offset
] &&
795 !(xform
->search
->inexact
&& ignore_inexact
) &&
796 nir_replace_instr(build
, alu
, range_ht
, states
, pass_op_table
,
797 xform
->search
, xform
->replace
)) {
798 _mesa_hash_table_clear(range_ht
, NULL
);
807 nir_algebraic_impl(nir_function_impl
*impl
,
808 const bool *condition_flags
,
809 const struct transform
**transforms
,
810 const uint16_t *transform_counts
,
811 const struct per_op_table
*pass_op_table
)
813 bool progress
= false;
816 nir_builder_init(&build
, impl
);
818 /* Note: it's important here that we're allocating a zeroed array, since
819 * state 0 is the default state, which means we don't have to visit
820 * anything other than constants and ALU instructions.
822 struct util_dynarray states
= {0};
823 if (!util_dynarray_resize(&states
, uint16_t, impl
->ssa_alloc
))
825 memset(states
.data
, 0, states
.size
);
827 struct hash_table
*range_ht
= _mesa_pointer_hash_table_create(NULL
);
829 nir_foreach_block(block
, impl
) {
830 nir_foreach_instr(instr
, block
) {
831 nir_algebraic_automaton(instr
, &states
, pass_op_table
);
835 nir_foreach_block_reverse(block
, impl
) {
836 nir_foreach_instr_reverse_safe(instr
, block
) {
837 progress
|= nir_algebraic_instr(&build
, instr
,
838 range_ht
, condition_flags
,
839 transforms
, transform_counts
, &states
,
844 ralloc_free(range_ht
);
845 util_dynarray_fini(&states
);
848 nir_metadata_preserve(impl
, nir_metadata_block_index
|
849 nir_metadata_dominance
);
852 impl
->valid_metadata
&= ~nir_metadata_not_properly_reset
;