nir: Make load_const SSA-only
[mesa.git] / src / glsl / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 *
33 * NOTE: This pass really only works for scalar backends at the moment due
34 * to the way it packes the input/output data.
35 */
36
37 #include "nir.h"
38
39 struct lower_io_state {
40 void *mem_ctx;
41 };
42
43 static unsigned
44 type_size(const struct glsl_type *type)
45 {
46 unsigned int size, i;
47
48 switch (glsl_get_base_type(type)) {
49 case GLSL_TYPE_UINT:
50 case GLSL_TYPE_INT:
51 case GLSL_TYPE_FLOAT:
52 case GLSL_TYPE_BOOL:
53 return glsl_get_components(type);
54 case GLSL_TYPE_ARRAY:
55 return type_size(glsl_get_array_element(type)) * glsl_get_length(type);
56 case GLSL_TYPE_STRUCT:
57 size = 0;
58 for (i = 0; i < glsl_get_length(type); i++) {
59 size += type_size(glsl_get_struct_field(type, i));
60 }
61 return size;
62 case GLSL_TYPE_SAMPLER:
63 return 0;
64 case GLSL_TYPE_ATOMIC_UINT:
65 return 0;
66 case GLSL_TYPE_INTERFACE:
67 return 0;
68 case GLSL_TYPE_IMAGE:
69 return 0;
70 case GLSL_TYPE_VOID:
71 case GLSL_TYPE_ERROR:
72 unreachable("not reached");
73 }
74
75 return 0;
76 }
77
78 static void
79 assign_var_locations(struct hash_table *ht, unsigned *size)
80 {
81 unsigned location = 0;
82
83 struct hash_entry *entry;
84 hash_table_foreach(ht, entry) {
85 nir_variable *var = (nir_variable *) entry->data;
86
87 /*
88 * UBO's have their own address spaces, so don't count them towards the
89 * number of global uniforms
90 */
91 if (var->data.mode == nir_var_uniform && var->interface_type != NULL)
92 continue;
93
94 var->data.driver_location = location;
95 location += type_size(var->type);
96 }
97
98 *size = location;
99 }
100
101 static void
102 assign_var_locations_shader(nir_shader *shader)
103 {
104 assign_var_locations(shader->inputs, &shader->num_inputs);
105 assign_var_locations(shader->outputs, &shader->num_outputs);
106 assign_var_locations(shader->uniforms, &shader->num_uniforms);
107 }
108
109 static bool
110 deref_has_indirect(nir_deref_var *deref)
111 {
112 for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) {
113 if (tail->deref_type == nir_deref_type_array) {
114 nir_deref_array *arr = nir_deref_as_array(tail);
115 if (arr->deref_array_type == nir_deref_array_type_indirect)
116 return true;
117 }
118 }
119
120 return false;
121 }
122
123 static unsigned
124 get_io_offset(nir_deref_var *deref, nir_instr *instr, nir_src *indirect,
125 struct lower_io_state *state)
126 {
127 bool found_indirect = false;
128 unsigned base_offset = 0;
129
130 nir_deref *tail = &deref->deref;
131 while (tail->child != NULL) {
132 const struct glsl_type *parent_type = tail->type;
133 tail = tail->child;
134
135 if (tail->deref_type == nir_deref_type_array) {
136 nir_deref_array *deref_array = nir_deref_as_array(tail);
137 unsigned size = type_size(tail->type);
138
139 base_offset += size * deref_array->base_offset;
140
141 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
142 nir_load_const_instr *load_const =
143 nir_load_const_instr_create(state->mem_ctx, 1);
144 load_const->value.u[0] = size;
145 nir_instr_insert_before(instr, &load_const->instr);
146
147 nir_alu_instr *mul = nir_alu_instr_create(state->mem_ctx,
148 nir_op_imul);
149 mul->src[0].src.is_ssa = true;
150 mul->src[0].src.ssa = &load_const->def;
151 mul->src[1].src = nir_src_copy(deref_array->indirect,
152 state->mem_ctx);
153 mul->dest.write_mask = 1;
154 mul->dest.dest.is_ssa = true;
155 nir_ssa_def_init(&mul->instr, &mul->dest.dest.ssa, 1, NULL);
156 nir_instr_insert_before(instr, &mul->instr);
157
158 if (found_indirect) {
159 nir_alu_instr *add = nir_alu_instr_create(state->mem_ctx,
160 nir_op_iadd);
161 add->src[0].src = *indirect;
162 add->src[1].src.is_ssa = true;
163 add->src[1].src.ssa = &mul->dest.dest.ssa;
164 add->dest.write_mask = 1;
165 add->dest.dest.is_ssa = true;
166 nir_ssa_def_init(&add->instr, &add->dest.dest.ssa, 1, NULL);
167 nir_instr_insert_before(instr, &add->instr);
168
169 indirect->is_ssa = true;
170 indirect->ssa = &add->dest.dest.ssa;
171 } else {
172 indirect->is_ssa = true;
173 indirect->ssa = &mul->dest.dest.ssa;
174 found_indirect = true;
175 }
176 }
177 } else if (tail->deref_type == nir_deref_type_struct) {
178 nir_deref_struct *deref_struct = nir_deref_as_struct(tail);
179
180 for (unsigned i = 0; i < deref_struct->index; i++)
181 base_offset += type_size(glsl_get_struct_field(parent_type, i));
182 }
183 }
184
185 return base_offset;
186 }
187
188 static bool
189 nir_lower_io_block(nir_block *block, void *void_state)
190 {
191 struct lower_io_state *state = void_state;
192
193 nir_foreach_instr_safe(block, instr) {
194 if (instr->type != nir_instr_type_intrinsic)
195 continue;
196
197 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
198
199 switch (intrin->intrinsic) {
200 case nir_intrinsic_load_var: {
201 nir_variable_mode mode = intrin->variables[0]->var->data.mode;
202 if (mode != nir_var_shader_in && mode != nir_var_uniform)
203 continue;
204
205 bool has_indirect = deref_has_indirect(intrin->variables[0]);
206
207 /* Figure out the opcode */
208 nir_intrinsic_op load_op;
209 switch (mode) {
210 case nir_var_shader_in:
211 load_op = has_indirect ? nir_intrinsic_load_input_indirect :
212 nir_intrinsic_load_input;
213 break;
214 case nir_var_uniform:
215 load_op = has_indirect ? nir_intrinsic_load_uniform_indirect :
216 nir_intrinsic_load_uniform;
217 break;
218 default:
219 unreachable("Unknown variable mode");
220 }
221
222 nir_intrinsic_instr *load = nir_intrinsic_instr_create(state->mem_ctx,
223 load_op);
224 load->num_components = intrin->num_components;
225
226 nir_src indirect;
227 unsigned offset = get_io_offset(intrin->variables[0],
228 &intrin->instr, &indirect, state);
229 offset += intrin->variables[0]->var->data.driver_location;
230
231 load->const_index[0] = offset;
232 load->const_index[1] = 1;
233
234 if (has_indirect)
235 load->src[0] = indirect;
236
237 if (intrin->dest.is_ssa) {
238 load->dest.is_ssa = true;
239 nir_ssa_def_init(&load->instr, &load->dest.ssa,
240 intrin->num_components, NULL);
241
242 nir_src new_src = {
243 .is_ssa = true,
244 .ssa = &load->dest.ssa,
245 };
246
247 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, new_src,
248 state->mem_ctx);
249 } else {
250 load->dest = nir_dest_copy(intrin->dest, state->mem_ctx);
251 }
252
253 nir_instr_insert_before(&intrin->instr, &load->instr);
254 nir_instr_remove(&intrin->instr);
255 break;
256 }
257
258 case nir_intrinsic_store_var: {
259 if (intrin->variables[0]->var->data.mode != nir_var_shader_out)
260 continue;
261
262 bool has_indirect = deref_has_indirect(intrin->variables[0]);
263
264 nir_intrinsic_op store_op;
265 if (has_indirect) {
266 store_op = nir_intrinsic_store_output_indirect;
267 } else {
268 store_op = nir_intrinsic_store_output;
269 }
270
271 nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx,
272 store_op);
273 store->num_components = intrin->num_components;
274
275 nir_src indirect;
276 unsigned offset = get_io_offset(intrin->variables[0],
277 &intrin->instr, &indirect, state);
278 offset += intrin->variables[0]->var->data.driver_location;
279
280 store->const_index[0] = offset;
281 store->const_index[1] = 1;
282
283 store->src[0] = nir_src_copy(intrin->src[0], state->mem_ctx);
284
285 if (has_indirect)
286 store->src[1] = indirect;
287
288 nir_instr_insert_before(&intrin->instr, &store->instr);
289 nir_instr_remove(&intrin->instr);
290 break;
291 }
292
293 default:
294 break;
295 }
296 }
297
298 return true;
299 }
300
301 static void
302 nir_lower_io_impl(nir_function_impl *impl)
303 {
304 struct lower_io_state state;
305
306 state.mem_ctx = ralloc_parent(impl);
307
308 nir_foreach_block(impl, nir_lower_io_block, &state);
309
310 nir_metadata_preserve(impl, nir_metadata_block_index |
311 nir_metadata_dominance);
312 }
313
314 void
315 nir_lower_io(nir_shader *shader)
316 {
317 assign_var_locations_shader(shader);
318
319 nir_foreach_overload(shader, overload) {
320 if (overload->impl)
321 nir_lower_io_impl(overload->impl);
322 }
323 }