2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "nir_constant_expressions.h"
32 * Implements SSA-based constant folding.
35 struct constant_fold_state
{
37 nir_function_impl
*impl
;
42 constant_fold_alu_instr(nir_alu_instr
*instr
, void *mem_ctx
)
44 nir_const_value src
[NIR_MAX_VEC_COMPONENTS
];
46 if (!instr
->dest
.dest
.is_ssa
)
49 /* In the case that any outputs/inputs have unsized types, then we need to
50 * guess the bit-size. In this case, the validator ensures that all
51 * bit-sizes match so we can just take the bit-size from first
52 * output/input with an unsized type. If all the outputs/inputs are sized
53 * then we don't need to guess the bit-size at all because the code we
54 * generate for constant opcodes in this case already knows the sizes of
55 * the types involved and does not need the provided bit-size for anything
56 * (although it still requires to receive a valid bit-size).
58 unsigned bit_size
= 0;
59 if (!nir_alu_type_get_type_size(nir_op_infos
[instr
->op
].output_type
))
60 bit_size
= instr
->dest
.dest
.ssa
.bit_size
;
62 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
63 if (!instr
->src
[i
].src
.is_ssa
)
67 !nir_alu_type_get_type_size(nir_op_infos
[instr
->op
].input_sizes
[i
])) {
68 bit_size
= instr
->src
[i
].src
.ssa
->bit_size
;
71 nir_instr
*src_instr
= instr
->src
[i
].src
.ssa
->parent_instr
;
73 if (src_instr
->type
!= nir_instr_type_load_const
)
75 nir_load_const_instr
* load_const
= nir_instr_as_load_const(src_instr
);
77 for (unsigned j
= 0; j
< nir_ssa_alu_instr_src_components(instr
, i
);
79 switch(load_const
->def
.bit_size
) {
81 src
[i
].u64
[j
] = load_const
->value
.u64
[instr
->src
[i
].swizzle
[j
]];
84 src
[i
].u32
[j
] = load_const
->value
.u32
[instr
->src
[i
].swizzle
[j
]];
87 src
[i
].u16
[j
] = load_const
->value
.u16
[instr
->src
[i
].swizzle
[j
]];
90 src
[i
].u8
[j
] = load_const
->value
.u8
[instr
->src
[i
].swizzle
[j
]];
95 /* We shouldn't have any source modifiers in the optimization loop. */
96 assert(!instr
->src
[i
].abs
&& !instr
->src
[i
].negate
);
102 /* We shouldn't have any saturate modifiers in the optimization loop. */
103 assert(!instr
->dest
.saturate
);
105 nir_const_value dest
=
106 nir_eval_const_opcode(instr
->op
, instr
->dest
.dest
.ssa
.num_components
,
109 nir_load_const_instr
*new_instr
=
110 nir_load_const_instr_create(mem_ctx
,
111 instr
->dest
.dest
.ssa
.num_components
,
112 instr
->dest
.dest
.ssa
.bit_size
);
114 new_instr
->value
= dest
;
116 nir_instr_insert_before(&instr
->instr
, &new_instr
->instr
);
118 nir_ssa_def_rewrite_uses(&instr
->dest
.dest
.ssa
,
119 nir_src_for_ssa(&new_instr
->def
));
121 nir_instr_remove(&instr
->instr
);
128 constant_fold_intrinsic_instr(nir_intrinsic_instr
*instr
)
130 bool progress
= false;
132 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
133 nir_const_value
*src_val
= nir_src_as_const_value(instr
->src
[0]);
134 if (src_val
&& src_val
->u32
[0] == NIR_FALSE
) {
135 nir_instr_remove(&instr
->instr
);
137 } else if (src_val
&& src_val
->u32
[0] == NIR_TRUE
) {
138 /* This method of getting a nir_shader * from a nir_instr is
139 * admittedly gross, but given the rarity of hitting this case I think
140 * it's preferable to plumbing an otherwise unused nir_shader *
141 * parameter through four functions to get here.
143 nir_cf_node
*cf_node
= &instr
->instr
.block
->cf_node
;
144 nir_function_impl
*impl
= nir_cf_node_get_function(cf_node
);
145 nir_shader
*shader
= impl
->function
->shader
;
147 nir_intrinsic_instr
*discard
=
148 nir_intrinsic_instr_create(shader
, nir_intrinsic_discard
);
149 nir_instr_insert_before(&instr
->instr
, &discard
->instr
);
150 nir_instr_remove(&instr
->instr
);
159 constant_fold_block(nir_block
*block
, void *mem_ctx
)
161 bool progress
= false;
163 nir_foreach_instr_safe(instr
, block
) {
164 switch (instr
->type
) {
165 case nir_instr_type_alu
:
166 progress
|= constant_fold_alu_instr(nir_instr_as_alu(instr
), mem_ctx
);
168 case nir_instr_type_intrinsic
:
170 constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr
));
173 /* Don't know how to constant fold */
182 nir_opt_constant_folding_impl(nir_function_impl
*impl
)
184 void *mem_ctx
= ralloc_parent(impl
);
185 bool progress
= false;
187 nir_foreach_block(block
, impl
) {
188 progress
|= constant_fold_block(block
, mem_ctx
);
192 nir_metadata_preserve(impl
, nir_metadata_block_index
|
193 nir_metadata_dominance
);
199 nir_opt_constant_folding(nir_shader
*shader
)
201 bool progress
= false;
203 nir_foreach_function(function
, shader
) {
205 progress
|= nir_opt_constant_folding_impl(function
->impl
);