nir: Transform discard_if(true) into discard
[mesa.git] / src / compiler / nir / nir_opt_constant_folding.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir_constant_expressions.h"
29 #include <math.h>
30
31 /*
32 * Implements SSA-based constant folding.
33 */
34
35 struct constant_fold_state {
36 void *mem_ctx;
37 nir_function_impl *impl;
38 bool progress;
39 };
40
41 static bool
42 constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
43 {
44 nir_const_value src[4];
45
46 if (!instr->dest.dest.is_ssa)
47 return false;
48
49 /* In the case that any outputs/inputs have unsized types, then we need to
50 * guess the bit-size. In this case, the validator ensures that all
51 * bit-sizes match so we can just take the bit-size from first
52 * output/input with an unsized type. If all the outputs/inputs are sized
53 * then we don't need to guess the bit-size at all because the code we
54 * generate for constant opcodes in this case already knows the sizes of
55 * the types involved and does not need the provided bit-size for anything
56 * (although it still requires to receive a valid bit-size).
57 */
58 unsigned bit_size = 0;
59 if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type))
60 bit_size = instr->dest.dest.ssa.bit_size;
61
62 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
63 if (!instr->src[i].src.is_ssa)
64 return false;
65
66 if (bit_size == 0 &&
67 !nir_alu_type_get_type_size(nir_op_infos[instr->op].input_sizes[i])) {
68 bit_size = instr->src[i].src.ssa->bit_size;
69 }
70
71 nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;
72
73 if (src_instr->type != nir_instr_type_load_const)
74 return false;
75 nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr);
76
77 for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
78 j++) {
79 switch(load_const->def.bit_size) {
80 case 64:
81 src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]];
82 break;
83 case 32:
84 src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]];
85 break;
86 case 16:
87 src[i].u16[j] = load_const->value.u16[instr->src[i].swizzle[j]];
88 break;
89 case 8:
90 src[i].u8[j] = load_const->value.u8[instr->src[i].swizzle[j]];
91 break;
92 }
93 }
94
95 /* We shouldn't have any source modifiers in the optimization loop. */
96 assert(!instr->src[i].abs && !instr->src[i].negate);
97 }
98
99 if (bit_size == 0)
100 bit_size = 32;
101
102 /* We shouldn't have any saturate modifiers in the optimization loop. */
103 assert(!instr->dest.saturate);
104
105 nir_const_value dest =
106 nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
107 bit_size, src);
108
109 nir_load_const_instr *new_instr =
110 nir_load_const_instr_create(mem_ctx,
111 instr->dest.dest.ssa.num_components,
112 instr->dest.dest.ssa.bit_size);
113
114 new_instr->value = dest;
115
116 nir_instr_insert_before(&instr->instr, &new_instr->instr);
117
118 nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
119 nir_src_for_ssa(&new_instr->def));
120
121 nir_instr_remove(&instr->instr);
122 ralloc_free(instr);
123
124 return true;
125 }
126
127 static bool
128 constant_fold_deref(nir_instr *instr, nir_deref_var *deref)
129 {
130 bool progress = false;
131
132 for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) {
133 if (tail->deref_type != nir_deref_type_array)
134 continue;
135
136 nir_deref_array *arr = nir_deref_as_array(tail);
137
138 if (arr->deref_array_type == nir_deref_array_type_indirect &&
139 arr->indirect.is_ssa &&
140 arr->indirect.ssa->parent_instr->type == nir_instr_type_load_const) {
141 nir_load_const_instr *indirect =
142 nir_instr_as_load_const(arr->indirect.ssa->parent_instr);
143
144 arr->base_offset += indirect->value.u32[0];
145
146 /* Clear out the source */
147 nir_instr_rewrite_src(instr, &arr->indirect, nir_src_for_ssa(NULL));
148
149 arr->deref_array_type = nir_deref_array_type_direct;
150
151 progress = true;
152 }
153 }
154
155 return progress;
156 }
157
158 static bool
159 constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
160 {
161 bool progress = false;
162
163 unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables;
164 for (unsigned i = 0; i < num_vars; i++) {
165 progress |= constant_fold_deref(&instr->instr, instr->variables[i]);
166 }
167
168 if (instr->intrinsic == nir_intrinsic_discard_if) {
169 nir_const_value *src_val = nir_src_as_const_value(instr->src[0]);
170 if (src_val && src_val->u32[0] == NIR_FALSE) {
171 nir_instr_remove(&instr->instr);
172 progress = true;
173 } else if (src_val && src_val->u32[0] == NIR_TRUE) {
174 /* This method of getting a nir_shader * from a nir_instr is
175 * admittedly gross, but given the rarity of hitting this case I think
176 * it's preferable to plumbing an otherwise unused nir_shader *
177 * parameter through four functions to get here.
178 */
179 nir_cf_node *cf_node = &instr->instr.block->cf_node;
180 nir_function_impl *impl = nir_cf_node_get_function(cf_node);
181 nir_shader *shader = impl->function->shader;
182
183 nir_intrinsic_instr *discard =
184 nir_intrinsic_instr_create(shader, nir_intrinsic_discard);
185 nir_instr_insert_before(&instr->instr, &discard->instr);
186 nir_instr_remove(&instr->instr);
187 progress = true;
188 }
189 }
190
191 return progress;
192 }
193
194 static bool
195 constant_fold_tex_instr(nir_tex_instr *instr)
196 {
197 bool progress = false;
198
199 if (instr->texture)
200 progress |= constant_fold_deref(&instr->instr, instr->texture);
201
202 if (instr->sampler)
203 progress |= constant_fold_deref(&instr->instr, instr->sampler);
204
205 return progress;
206 }
207
208 static bool
209 constant_fold_block(nir_block *block, void *mem_ctx)
210 {
211 bool progress = false;
212
213 nir_foreach_instr_safe(instr, block) {
214 switch (instr->type) {
215 case nir_instr_type_alu:
216 progress |= constant_fold_alu_instr(nir_instr_as_alu(instr), mem_ctx);
217 break;
218 case nir_instr_type_intrinsic:
219 progress |=
220 constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
221 break;
222 case nir_instr_type_tex:
223 progress |= constant_fold_tex_instr(nir_instr_as_tex(instr));
224 break;
225 default:
226 /* Don't know how to constant fold */
227 break;
228 }
229 }
230
231 return progress;
232 }
233
234 static bool
235 nir_opt_constant_folding_impl(nir_function_impl *impl)
236 {
237 void *mem_ctx = ralloc_parent(impl);
238 bool progress = false;
239
240 nir_foreach_block(block, impl) {
241 progress |= constant_fold_block(block, mem_ctx);
242 }
243
244 if (progress)
245 nir_metadata_preserve(impl, nir_metadata_block_index |
246 nir_metadata_dominance);
247
248 return progress;
249 }
250
251 bool
252 nir_opt_constant_folding(nir_shader *shader)
253 {
254 bool progress = false;
255
256 nir_foreach_function(function, shader) {
257 if (function->impl)
258 progress |= nir_opt_constant_folding_impl(function->impl);
259 }
260
261 return progress;
262 }