nv50/ir/opt: fix constant folding with saturate modifier
[mesa.git] / src / glsl / lower_ubo_reference.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file lower_ubo_reference.cpp
26 *
27 * IR lower pass to replace dereferences of variables in a uniform
28 * buffer object with usage of ir_binop_ubo_load expressions, each of
29 * which can read data up to the size of a vec4.
30 *
31 * This relieves drivers of the responsibility to deal with tricky UBO
32 * layout issues like std140 structures and row_major matrices on
33 * their own.
34 */
35
36 #include "ir.h"
37 #include "ir_builder.h"
38 #include "ir_rvalue_visitor.h"
39 #include "main/macros.h"
40
41 using namespace ir_builder;
42
43 namespace {
44 class lower_ubo_reference_visitor : public ir_rvalue_enter_visitor {
45 public:
46 lower_ubo_reference_visitor(struct gl_shader *shader)
47 : shader(shader)
48 {
49 }
50
51 void handle_rvalue(ir_rvalue **rvalue);
52 void emit_ubo_loads(ir_dereference *deref, ir_variable *base_offset,
53 unsigned int deref_offset);
54 ir_expression *ubo_load(const struct glsl_type *type,
55 ir_rvalue *offset);
56
57 void *mem_ctx;
58 struct gl_shader *shader;
59 struct gl_uniform_buffer_variable *ubo_var;
60 unsigned uniform_block;
61 bool progress;
62 };
63
64 /**
65 * Determine the name of the interface block field
66 *
67 * This is the name of the specific member as it would appear in the
68 * \c gl_uniform_buffer_variable::Name field in the shader's
69 * \c UniformBlocks array.
70 */
71 static const char *
72 interface_field_name(void *mem_ctx, char *base_name, ir_dereference *d)
73 {
74 ir_constant *previous_index = NULL;
75
76 while (d != NULL) {
77 switch (d->ir_type) {
78 case ir_type_dereference_variable: {
79 ir_dereference_variable *v = (ir_dereference_variable *) d;
80 if (previous_index
81 && v->var->is_interface_instance()
82 && v->var->type->is_array())
83 return ralloc_asprintf(mem_ctx,
84 "%s[%d]",
85 base_name,
86 previous_index->get_uint_component(0));
87 else
88 return base_name;
89
90 break;
91 }
92
93 case ir_type_dereference_record: {
94 ir_dereference_record *r = (ir_dereference_record *) d;
95
96 d = r->record->as_dereference();
97 break;
98 }
99
100 case ir_type_dereference_array: {
101 ir_dereference_array *a = (ir_dereference_array *) d;
102
103 d = a->array->as_dereference();
104 previous_index = a->array_index->as_constant();
105 break;
106 }
107
108 default:
109 assert(!"Should not get here.");
110 break;
111 }
112 }
113
114 assert(!"Should not get here.");
115 return NULL;
116 }
117
118 void
119 lower_ubo_reference_visitor::handle_rvalue(ir_rvalue **rvalue)
120 {
121 if (!*rvalue)
122 return;
123
124 ir_dereference *deref = (*rvalue)->as_dereference();
125 if (!deref)
126 return;
127
128 ir_variable *var = deref->variable_referenced();
129 if (!var || !var->is_in_uniform_block())
130 return;
131
132 mem_ctx = ralloc_parent(*rvalue);
133
134 const char *const field_name =
135 interface_field_name(mem_ctx, (char *) var->get_interface_type()->name,
136 deref);
137
138 this->uniform_block = -1;
139 for (unsigned i = 0; i < shader->NumUniformBlocks; i++) {
140 if (strcmp(field_name, shader->UniformBlocks[i].Name) == 0) {
141 this->uniform_block = i;
142
143 struct gl_uniform_block *block = &shader->UniformBlocks[i];
144
145 this->ubo_var = var->is_interface_instance()
146 ? &block->Uniforms[0] : &block->Uniforms[var->data.location];
147
148 break;
149 }
150 }
151
152 assert(this->uniform_block != (unsigned) -1);
153
154 ir_rvalue *offset = new(mem_ctx) ir_constant(0u);
155 unsigned const_offset = 0;
156 bool row_major = ubo_var->RowMajor;
157
158 /* Calculate the offset to the start of the region of the UBO
159 * dereferenced by *rvalue. This may be a variable offset if an
160 * array dereference has a variable index.
161 */
162 while (deref) {
163 switch (deref->ir_type) {
164 case ir_type_dereference_variable: {
165 const_offset += ubo_var->Offset;
166 deref = NULL;
167 break;
168 }
169
170 case ir_type_dereference_array: {
171 ir_dereference_array *deref_array = (ir_dereference_array *)deref;
172 unsigned array_stride;
173 if (deref_array->array->type->is_matrix() && row_major) {
174 /* When loading a vector out of a row major matrix, the
175 * step between the columns (vectors) is the size of a
176 * float, while the step between the rows (elements of a
177 * vector) is handled below in emit_ubo_loads.
178 */
179 array_stride = 4;
180 } else if (deref_array->type->is_interface()) {
181 /* We're processing an array dereference of an interface instance
182 * array. The thing being dereferenced *must* be a variable
183 * dereference because intefaces cannot be embedded an other
184 * types. In terms of calculating the offsets for the lowering
185 * pass, we don't care about the array index. All elements of an
186 * interface instance array will have the same offsets relative to
187 * the base of the block that backs them.
188 */
189 assert(deref_array->array->as_dereference_variable());
190 deref = deref_array->array->as_dereference();
191 break;
192 } else {
193 array_stride = deref_array->type->std140_size(row_major);
194 array_stride = glsl_align(array_stride, 16);
195 }
196
197 ir_rvalue *array_index = deref_array->array_index;
198 if (array_index->type->base_type == GLSL_TYPE_INT)
199 array_index = i2u(array_index);
200
201 ir_constant *const_index = array_index->as_constant();
202 if (const_index) {
203 const_offset += array_stride * const_index->value.u[0];
204 } else {
205 offset = add(offset,
206 mul(array_index,
207 new(mem_ctx) ir_constant(array_stride)));
208 }
209 deref = deref_array->array->as_dereference();
210 break;
211 }
212
213 case ir_type_dereference_record: {
214 ir_dereference_record *deref_record = (ir_dereference_record *)deref;
215 const glsl_type *struct_type = deref_record->record->type;
216 unsigned intra_struct_offset = 0;
217
218 unsigned max_field_align = 16;
219 for (unsigned int i = 0; i < struct_type->length; i++) {
220 const glsl_type *type = struct_type->fields.structure[i].type;
221 unsigned field_align = type->std140_base_alignment(row_major);
222 max_field_align = MAX2(field_align, max_field_align);
223 intra_struct_offset = glsl_align(intra_struct_offset, field_align);
224
225 if (strcmp(struct_type->fields.structure[i].name,
226 deref_record->field) == 0)
227 break;
228 intra_struct_offset += type->std140_size(row_major);
229 }
230
231 const_offset = glsl_align(const_offset, max_field_align);
232 const_offset += intra_struct_offset;
233
234 deref = deref_record->record->as_dereference();
235 break;
236 }
237 default:
238 assert(!"not reached");
239 deref = NULL;
240 break;
241 }
242 }
243
244 /* Now that we've calculated the offset to the start of the
245 * dereference, walk over the type and emit loads into a temporary.
246 */
247 const glsl_type *type = (*rvalue)->type;
248 ir_variable *load_var = new(mem_ctx) ir_variable(type,
249 "ubo_load_temp",
250 ir_var_temporary);
251 base_ir->insert_before(load_var);
252
253 ir_variable *load_offset = new(mem_ctx) ir_variable(glsl_type::uint_type,
254 "ubo_load_temp_offset",
255 ir_var_temporary);
256 base_ir->insert_before(load_offset);
257 base_ir->insert_before(assign(load_offset, offset));
258
259 deref = new(mem_ctx) ir_dereference_variable(load_var);
260 emit_ubo_loads(deref, load_offset, const_offset);
261 *rvalue = deref;
262
263 progress = true;
264 }
265
266 ir_expression *
267 lower_ubo_reference_visitor::ubo_load(const glsl_type *type,
268 ir_rvalue *offset)
269 {
270 return new(mem_ctx)
271 ir_expression(ir_binop_ubo_load,
272 type,
273 new(mem_ctx) ir_constant(this->uniform_block),
274 offset);
275
276 }
277
278 /**
279 * Takes LHS and emits a series of assignments into its components
280 * from the UBO variable at variable_offset + deref_offset.
281 *
282 * Recursively calls itself to break the deref down to the point that
283 * the ir_binop_ubo_load expressions generated are contiguous scalars
284 * or vectors.
285 */
286 void
287 lower_ubo_reference_visitor::emit_ubo_loads(ir_dereference *deref,
288 ir_variable *base_offset,
289 unsigned int deref_offset)
290 {
291 if (deref->type->is_record()) {
292 unsigned int field_offset = 0;
293
294 for (unsigned i = 0; i < deref->type->length; i++) {
295 const struct glsl_struct_field *field =
296 &deref->type->fields.structure[i];
297 ir_dereference *field_deref =
298 new(mem_ctx) ir_dereference_record(deref->clone(mem_ctx, NULL),
299 field->name);
300
301 field_offset =
302 glsl_align(field_offset,
303 field->type->std140_base_alignment(ubo_var->RowMajor));
304
305 emit_ubo_loads(field_deref, base_offset, deref_offset + field_offset);
306
307 field_offset += field->type->std140_size(ubo_var->RowMajor);
308 }
309 return;
310 }
311
312 if (deref->type->is_array()) {
313 unsigned array_stride =
314 glsl_align(deref->type->fields.array->std140_size(ubo_var->RowMajor),
315 16);
316
317 for (unsigned i = 0; i < deref->type->length; i++) {
318 ir_constant *element = new(mem_ctx) ir_constant(i);
319 ir_dereference *element_deref =
320 new(mem_ctx) ir_dereference_array(deref->clone(mem_ctx, NULL),
321 element);
322 emit_ubo_loads(element_deref, base_offset,
323 deref_offset + i * array_stride);
324 }
325 return;
326 }
327
328 if (deref->type->is_matrix()) {
329 for (unsigned i = 0; i < deref->type->matrix_columns; i++) {
330 ir_constant *col = new(mem_ctx) ir_constant(i);
331 ir_dereference *col_deref =
332 new(mem_ctx) ir_dereference_array(deref->clone(mem_ctx, NULL),
333 col);
334
335 /* std140 always rounds the stride of arrays (and matrices)
336 * to a vec4, so matrices are always 16 between columns/rows.
337 */
338 emit_ubo_loads(col_deref, base_offset, deref_offset + i * 16);
339 }
340 return;
341 }
342
343 assert(deref->type->is_scalar() ||
344 deref->type->is_vector());
345
346 if (!ubo_var->RowMajor) {
347 ir_rvalue *offset = add(base_offset,
348 new(mem_ctx) ir_constant(deref_offset));
349 base_ir->insert_before(assign(deref->clone(mem_ctx, NULL),
350 ubo_load(deref->type, offset)));
351 } else {
352 /* We're dereffing a column out of a row-major matrix, so we
353 * gather the vector from each stored row.
354 */
355 assert(deref->type->base_type == GLSL_TYPE_FLOAT);
356 /* Matrices, row_major or not, are stored as if they were
357 * arrays of vectors of the appropriate size in std140.
358 * Arrays have their strides rounded up to a vec4, so the
359 * matrix stride is always 16.
360 */
361 unsigned matrix_stride = 16;
362
363 for (unsigned i = 0; i < deref->type->vector_elements; i++) {
364 ir_rvalue *chan_offset =
365 add(base_offset,
366 new(mem_ctx) ir_constant(deref_offset + i * matrix_stride));
367
368 base_ir->insert_before(assign(deref->clone(mem_ctx, NULL),
369 ubo_load(glsl_type::float_type,
370 chan_offset),
371 (1U << i)));
372 }
373 }
374 }
375
376 } /* unnamed namespace */
377
378 void
379 lower_ubo_reference(struct gl_shader *shader, exec_list *instructions)
380 {
381 lower_ubo_reference_visitor v(shader);
382
383 /* Loop over the instructions lowering references, because we take
384 * a deref of a UBO array using a UBO dereference as the index will
385 * produce a collection of instructions all of which have cloned
386 * UBO dereferences for that array index.
387 */
388 do {
389 v.progress = false;
390 visit_list_elements(&v, instructions);
391 } while (v.progress);
392 }