nir: add support for flushing to zero denorm constants
[mesa.git] / src / compiler / nir / nir_opt_constant_folding.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir_constant_expressions.h"
29 #include <math.h>
30
31 /*
32 * Implements SSA-based constant folding.
33 */
34
35 static bool
36 constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx, unsigned execution_mode)
37 {
38 nir_const_value src[NIR_MAX_VEC_COMPONENTS][NIR_MAX_VEC_COMPONENTS];
39
40 if (!instr->dest.dest.is_ssa)
41 return false;
42
43 /* In the case that any outputs/inputs have unsized types, then we need to
44 * guess the bit-size. In this case, the validator ensures that all
45 * bit-sizes match so we can just take the bit-size from first
46 * output/input with an unsized type. If all the outputs/inputs are sized
47 * then we don't need to guess the bit-size at all because the code we
48 * generate for constant opcodes in this case already knows the sizes of
49 * the types involved and does not need the provided bit-size for anything
50 * (although it still requires to receive a valid bit-size).
51 */
52 unsigned bit_size = 0;
53 if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type))
54 bit_size = instr->dest.dest.ssa.bit_size;
55
56 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
57 if (!instr->src[i].src.is_ssa)
58 return false;
59
60 if (bit_size == 0 &&
61 !nir_alu_type_get_type_size(nir_op_infos[instr->op].input_types[i]))
62 bit_size = instr->src[i].src.ssa->bit_size;
63
64 nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;
65
66 if (src_instr->type != nir_instr_type_load_const)
67 return false;
68 nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr);
69
70 for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
71 j++) {
72 src[i][j] = load_const->value[instr->src[i].swizzle[j]];
73 }
74
75 /* We shouldn't have any source modifiers in the optimization loop. */
76 assert(!instr->src[i].abs && !instr->src[i].negate);
77 }
78
79 if (bit_size == 0)
80 bit_size = 32;
81
82 /* We shouldn't have any saturate modifiers in the optimization loop. */
83 assert(!instr->dest.saturate);
84
85 nir_const_value dest[NIR_MAX_VEC_COMPONENTS];
86 nir_const_value *srcs[NIR_MAX_VEC_COMPONENTS];
87 memset(dest, 0, sizeof(dest));
88 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; ++i)
89 srcs[i] = src[i];
90 nir_eval_const_opcode(instr->op, dest, instr->dest.dest.ssa.num_components,
91 bit_size, srcs, execution_mode);
92
93 nir_load_const_instr *new_instr =
94 nir_load_const_instr_create(mem_ctx,
95 instr->dest.dest.ssa.num_components,
96 instr->dest.dest.ssa.bit_size);
97
98 memcpy(new_instr->value, dest, sizeof(*new_instr->value) * new_instr->def.num_components);
99
100 nir_instr_insert_before(&instr->instr, &new_instr->instr);
101
102 nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
103 nir_src_for_ssa(&new_instr->def));
104
105 nir_instr_remove(&instr->instr);
106 ralloc_free(instr);
107
108 return true;
109 }
110
111 static bool
112 constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
113 {
114 bool progress = false;
115
116 if ((instr->intrinsic == nir_intrinsic_demote_if ||
117 instr->intrinsic == nir_intrinsic_discard_if) &&
118 nir_src_is_const(instr->src[0])) {
119 if (nir_src_as_bool(instr->src[0])) {
120 /* This method of getting a nir_shader * from a nir_instr is
121 * admittedly gross, but given the rarity of hitting this case I think
122 * it's preferable to plumbing an otherwise unused nir_shader *
123 * parameter through four functions to get here.
124 */
125 nir_cf_node *cf_node = &instr->instr.block->cf_node;
126 nir_function_impl *impl = nir_cf_node_get_function(cf_node);
127 nir_shader *shader = impl->function->shader;
128
129 nir_intrinsic_op op = instr->intrinsic == nir_intrinsic_discard_if ?
130 nir_intrinsic_discard :
131 nir_intrinsic_demote;
132 nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(shader, op);
133 nir_instr_insert_before(&instr->instr, &new_instr->instr);
134 nir_instr_remove(&instr->instr);
135 progress = true;
136 } else {
137 /* We're not discarding, just delete the instruction */
138 nir_instr_remove(&instr->instr);
139 progress = true;
140 }
141 }
142
143 return progress;
144 }
145
146 static bool
147 constant_fold_block(nir_block *block, void *mem_ctx, unsigned execution_mode)
148 {
149 bool progress = false;
150
151 nir_foreach_instr_safe(instr, block) {
152 switch (instr->type) {
153 case nir_instr_type_alu:
154 progress |= constant_fold_alu_instr(nir_instr_as_alu(instr), mem_ctx, execution_mode);
155 break;
156 case nir_instr_type_intrinsic:
157 progress |=
158 constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
159 break;
160 default:
161 /* Don't know how to constant fold */
162 break;
163 }
164 }
165
166 return progress;
167 }
168
169 static bool
170 nir_opt_constant_folding_impl(nir_function_impl *impl, unsigned execution_mode)
171 {
172 void *mem_ctx = ralloc_parent(impl);
173 bool progress = false;
174
175 nir_foreach_block(block, impl) {
176 progress |= constant_fold_block(block, mem_ctx, execution_mode);
177 }
178
179 if (progress) {
180 nir_metadata_preserve(impl, nir_metadata_block_index |
181 nir_metadata_dominance);
182 } else {
183 #ifndef NDEBUG
184 impl->valid_metadata &= ~nir_metadata_not_properly_reset;
185 #endif
186 }
187
188 return progress;
189 }
190
191 bool
192 nir_opt_constant_folding(nir_shader *shader)
193 {
194 bool progress = false;
195 unsigned execution_mode = shader->info.float_controls_execution_mode;
196
197 nir_foreach_function(function, shader) {
198 if (function->impl)
199 progress |= nir_opt_constant_folding_impl(function->impl, execution_mode);
200 }
201
202 return progress;
203 }