Merge commit mesa-public/master into vulkan
[mesa.git] / src / compiler / nir / nir_opt_constant_folding.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir_constant_expressions.h"
29 #include <math.h>
30
31 /*
32 * Implements SSA-based constant folding.
33 */
34
35 struct constant_fold_state {
36 void *mem_ctx;
37 nir_function_impl *impl;
38 bool progress;
39 };
40
41 static bool
42 constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
43 {
44 nir_const_value src[4];
45
46 if (!instr->dest.dest.is_ssa)
47 return false;
48
49 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
50 if (!instr->src[i].src.is_ssa)
51 return false;
52
53 nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;
54
55 if (src_instr->type != nir_instr_type_load_const)
56 return false;
57 nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr);
58
59 for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
60 j++) {
61 src[i].u[j] = load_const->value.u[instr->src[i].swizzle[j]];
62 }
63
64 /* We shouldn't have any source modifiers in the optimization loop. */
65 assert(!instr->src[i].abs && !instr->src[i].negate);
66 }
67
68 /* We shouldn't have any saturate modifiers in the optimization loop. */
69 assert(!instr->dest.saturate);
70
71 nir_const_value dest =
72 nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
73 src);
74
75 nir_load_const_instr *new_instr =
76 nir_load_const_instr_create(mem_ctx,
77 instr->dest.dest.ssa.num_components);
78
79 new_instr->value = dest;
80
81 nir_instr_insert_before(&instr->instr, &new_instr->instr);
82
83 nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
84 nir_src_for_ssa(&new_instr->def));
85
86 nir_instr_remove(&instr->instr);
87 ralloc_free(instr);
88
89 return true;
90 }
91
92 static bool
93 constant_fold_deref(nir_instr *instr, nir_deref_var *deref)
94 {
95 bool progress = false;
96
97 for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) {
98 if (tail->deref_type != nir_deref_type_array)
99 continue;
100
101 nir_deref_array *arr = nir_deref_as_array(tail);
102
103 if (arr->deref_array_type == nir_deref_array_type_indirect &&
104 arr->indirect.is_ssa &&
105 arr->indirect.ssa->parent_instr->type == nir_instr_type_load_const) {
106 nir_load_const_instr *indirect =
107 nir_instr_as_load_const(arr->indirect.ssa->parent_instr);
108
109 arr->base_offset += indirect->value.u[0];
110
111 /* Clear out the source */
112 nir_instr_rewrite_src(instr, &arr->indirect, nir_src_for_ssa(NULL));
113
114 arr->deref_array_type = nir_deref_array_type_direct;
115
116 progress = true;
117 }
118 }
119
120 return progress;
121 }
122
123 static bool
124 constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
125 {
126 bool progress = false;
127
128 unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables;
129 for (unsigned i = 0; i < num_vars; i++) {
130 progress |= constant_fold_deref(&instr->instr, instr->variables[i]);
131 }
132
133 return progress;
134 }
135
136 static bool
137 constant_fold_tex_instr(nir_tex_instr *instr)
138 {
139 if (instr->sampler)
140 return constant_fold_deref(&instr->instr, instr->sampler);
141 else
142 return false;
143 }
144
145 static bool
146 constant_fold_block(nir_block *block, void *void_state)
147 {
148 struct constant_fold_state *state = void_state;
149
150 nir_foreach_instr_safe(block, instr) {
151 switch (instr->type) {
152 case nir_instr_type_alu:
153 state->progress |= constant_fold_alu_instr(nir_instr_as_alu(instr),
154 state->mem_ctx);
155 break;
156 case nir_instr_type_intrinsic:
157 state->progress |=
158 constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
159 break;
160 case nir_instr_type_tex:
161 state->progress |= constant_fold_tex_instr(nir_instr_as_tex(instr));
162 break;
163 default:
164 /* Don't know how to constant fold */
165 break;
166 }
167 }
168
169 return true;
170 }
171
172 static bool
173 nir_opt_constant_folding_impl(nir_function_impl *impl)
174 {
175 struct constant_fold_state state;
176
177 state.mem_ctx = ralloc_parent(impl);
178 state.impl = impl;
179 state.progress = false;
180
181 nir_foreach_block(impl, constant_fold_block, &state);
182
183 if (state.progress)
184 nir_metadata_preserve(impl, nir_metadata_block_index |
185 nir_metadata_dominance);
186
187 return state.progress;
188 }
189
190 bool
191 nir_opt_constant_folding(nir_shader *shader)
192 {
193 bool progress = false;
194
195 nir_foreach_function(shader, function) {
196 if (function->impl)
197 progress |= nir_opt_constant_folding_impl(function->impl);
198 }
199
200 return progress;
201 }