nir: Allocate nir_phi_src values out of the nir_phi_instr.
[mesa.git] / src / glsl / lower_ubo_reference.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file lower_ubo_reference.cpp
26 *
27 * IR lower pass to replace dereferences of variables in a uniform
28 * buffer object with usage of ir_binop_ubo_load expressions, each of
29 * which can read data up to the size of a vec4.
30 *
31 * This relieves drivers of the responsibility to deal with tricky UBO
32 * layout issues like std140 structures and row_major matrices on
33 * their own.
34 */
35
36 #include "ir.h"
37 #include "ir_builder.h"
38 #include "ir_rvalue_visitor.h"
39 #include "main/macros.h"
40
41 using namespace ir_builder;
42
43 /**
44 * Determine if a thing being dereferenced is row-major
45 *
46 * There is some trickery here.
47 *
48 * If the thing being dereferenced is a member of uniform block \b without an
49 * instance name, then the name of the \c ir_variable is the field name of an
50 * interface type. If this field is row-major, then the thing referenced is
51 * row-major.
52 *
53 * If the thing being dereferenced is a member of uniform block \b with an
54 * instance name, then the last dereference in the tree will be an
55 * \c ir_dereference_record. If that record field is row-major, then the
56 * thing referenced is row-major.
57 */
58 static bool
59 is_dereferenced_thing_row_major(const ir_dereference *deref)
60 {
61 bool matrix = false;
62 const ir_rvalue *ir = deref;
63
64 while (true) {
65 matrix = matrix || ir->type->without_array()->is_matrix();
66
67 switch (ir->ir_type) {
68 case ir_type_dereference_array: {
69 const ir_dereference_array *const array_deref =
70 (const ir_dereference_array *) ir;
71
72 ir = array_deref->array;
73 break;
74 }
75
76 case ir_type_dereference_record: {
77 const ir_dereference_record *const record_deref =
78 (const ir_dereference_record *) ir;
79
80 ir = record_deref->record;
81
82 const int idx = ir->type->field_index(record_deref->field);
83 assert(idx >= 0);
84
85 const enum glsl_matrix_layout matrix_layout =
86 glsl_matrix_layout(ir->type->fields.structure[idx].matrix_layout);
87
88 switch (matrix_layout) {
89 case GLSL_MATRIX_LAYOUT_INHERITED:
90 break;
91 case GLSL_MATRIX_LAYOUT_COLUMN_MAJOR:
92 return false;
93 case GLSL_MATRIX_LAYOUT_ROW_MAJOR:
94 return matrix || deref->type->without_array()->is_record();
95 }
96
97 break;
98 }
99
100 case ir_type_dereference_variable: {
101 const ir_dereference_variable *const var_deref =
102 (const ir_dereference_variable *) ir;
103
104 const enum glsl_matrix_layout matrix_layout =
105 glsl_matrix_layout(var_deref->var->data.matrix_layout);
106
107 switch (matrix_layout) {
108 case GLSL_MATRIX_LAYOUT_INHERITED:
109 assert(!matrix);
110 return false;
111 case GLSL_MATRIX_LAYOUT_COLUMN_MAJOR:
112 return false;
113 case GLSL_MATRIX_LAYOUT_ROW_MAJOR:
114 return matrix || deref->type->without_array()->is_record();
115 }
116
117 unreachable("invalid matrix layout");
118 break;
119 }
120
121 default:
122 return false;
123 }
124 }
125
126 /* The tree must have ended with a dereference that wasn't an
127 * ir_dereference_variable. That is invalid, and it should be impossible.
128 */
129 unreachable("invalid dereference tree");
130 return false;
131 }
132
133 namespace {
134 class lower_ubo_reference_visitor : public ir_rvalue_enter_visitor {
135 public:
136 lower_ubo_reference_visitor(struct gl_shader *shader)
137 : shader(shader)
138 {
139 }
140
141 void handle_rvalue(ir_rvalue **rvalue);
142 void emit_ubo_loads(ir_dereference *deref, ir_variable *base_offset,
143 unsigned int deref_offset, bool row_major,
144 int matrix_columns);
145 ir_expression *ubo_load(const struct glsl_type *type,
146 ir_rvalue *offset);
147
148 void *mem_ctx;
149 struct gl_shader *shader;
150 struct gl_uniform_buffer_variable *ubo_var;
151 ir_rvalue *uniform_block;
152 bool progress;
153 };
154
155 /**
156 * Determine the name of the interface block field
157 *
158 * This is the name of the specific member as it would appear in the
159 * \c gl_uniform_buffer_variable::Name field in the shader's
160 * \c UniformBlocks array.
161 */
162 static const char *
163 interface_field_name(void *mem_ctx, char *base_name, ir_dereference *d,
164 ir_rvalue **nonconst_block_index)
165 {
166 ir_rvalue *previous_index = NULL;
167 *nonconst_block_index = NULL;
168
169 while (d != NULL) {
170 switch (d->ir_type) {
171 case ir_type_dereference_variable: {
172 ir_dereference_variable *v = (ir_dereference_variable *) d;
173 if (previous_index
174 && v->var->is_interface_instance()
175 && v->var->type->is_array()) {
176
177 ir_constant *const_index = previous_index->as_constant();
178 if (!const_index) {
179 *nonconst_block_index = previous_index;
180 return ralloc_asprintf(mem_ctx, "%s[0]", base_name);
181 } else {
182 return ralloc_asprintf(mem_ctx,
183 "%s[%d]",
184 base_name,
185 const_index->get_uint_component(0));
186 }
187 } else {
188 return base_name;
189 }
190
191 break;
192 }
193
194 case ir_type_dereference_record: {
195 ir_dereference_record *r = (ir_dereference_record *) d;
196
197 d = r->record->as_dereference();
198 break;
199 }
200
201 case ir_type_dereference_array: {
202 ir_dereference_array *a = (ir_dereference_array *) d;
203
204 d = a->array->as_dereference();
205 previous_index = a->array_index;
206
207 break;
208 }
209
210 default:
211 assert(!"Should not get here.");
212 break;
213 }
214 }
215
216 assert(!"Should not get here.");
217 return NULL;
218 }
219
220 void
221 lower_ubo_reference_visitor::handle_rvalue(ir_rvalue **rvalue)
222 {
223 if (!*rvalue)
224 return;
225
226 ir_dereference *deref = (*rvalue)->as_dereference();
227 if (!deref)
228 return;
229
230 ir_variable *var = deref->variable_referenced();
231 if (!var || !var->is_in_uniform_block())
232 return;
233
234 mem_ctx = ralloc_parent(*rvalue);
235
236 ir_rvalue *nonconst_block_index;
237 const char *const field_name =
238 interface_field_name(mem_ctx, (char *) var->get_interface_type()->name,
239 deref, &nonconst_block_index);
240
241 this->uniform_block = NULL;
242 for (unsigned i = 0; i < shader->NumUniformBlocks; i++) {
243 if (strcmp(field_name, shader->UniformBlocks[i].Name) == 0) {
244
245 ir_constant *index = new(mem_ctx) ir_constant(i);
246
247 if (nonconst_block_index) {
248 if (nonconst_block_index->type != glsl_type::uint_type)
249 nonconst_block_index = i2u(nonconst_block_index);
250 this->uniform_block = add(nonconst_block_index, index);
251 } else {
252 this->uniform_block = index;
253 }
254
255 struct gl_uniform_block *block = &shader->UniformBlocks[i];
256
257 this->ubo_var = var->is_interface_instance()
258 ? &block->Uniforms[0] : &block->Uniforms[var->data.location];
259
260 break;
261 }
262 }
263
264 assert(this->uniform_block);
265
266 ir_rvalue *offset = new(mem_ctx) ir_constant(0u);
267 unsigned const_offset = 0;
268 bool row_major = is_dereferenced_thing_row_major(deref);
269 int matrix_columns = 1;
270
271 /* Calculate the offset to the start of the region of the UBO
272 * dereferenced by *rvalue. This may be a variable offset if an
273 * array dereference has a variable index.
274 */
275 while (deref) {
276 switch (deref->ir_type) {
277 case ir_type_dereference_variable: {
278 const_offset += ubo_var->Offset;
279 deref = NULL;
280 break;
281 }
282
283 case ir_type_dereference_array: {
284 ir_dereference_array *deref_array = (ir_dereference_array *)deref;
285 unsigned array_stride;
286 if (deref_array->array->type->is_matrix() && row_major) {
287 /* When loading a vector out of a row major matrix, the
288 * step between the columns (vectors) is the size of a
289 * float, while the step between the rows (elements of a
290 * vector) is handled below in emit_ubo_loads.
291 */
292 array_stride = 4;
293 if (deref_array->array->type->is_double())
294 array_stride *= 2;
295 matrix_columns = deref_array->array->type->matrix_columns;
296 } else if (deref_array->type->is_interface()) {
297 /* We're processing an array dereference of an interface instance
298 * array. The thing being dereferenced *must* be a variable
299 * dereference because intefaces cannot be embedded an other
300 * types. In terms of calculating the offsets for the lowering
301 * pass, we don't care about the array index. All elements of an
302 * interface instance array will have the same offsets relative to
303 * the base of the block that backs them.
304 */
305 assert(deref_array->array->as_dereference_variable());
306 deref = deref_array->array->as_dereference();
307 break;
308 } else {
309 /* Whether or not the field is row-major (because it might be a
310 * bvec2 or something) does not affect the array itself. We need
311 * to know whether an array element in its entirety is row-major.
312 */
313 const bool array_row_major =
314 is_dereferenced_thing_row_major(deref_array);
315
316 array_stride = deref_array->type->std140_size(array_row_major);
317 array_stride = glsl_align(array_stride, 16);
318 }
319
320 ir_rvalue *array_index = deref_array->array_index;
321 if (array_index->type->base_type == GLSL_TYPE_INT)
322 array_index = i2u(array_index);
323
324 ir_constant *const_index =
325 array_index->constant_expression_value(NULL);
326 if (const_index) {
327 const_offset += array_stride * const_index->value.u[0];
328 } else {
329 offset = add(offset,
330 mul(array_index,
331 new(mem_ctx) ir_constant(array_stride)));
332 }
333 deref = deref_array->array->as_dereference();
334 break;
335 }
336
337 case ir_type_dereference_record: {
338 ir_dereference_record *deref_record = (ir_dereference_record *)deref;
339 const glsl_type *struct_type = deref_record->record->type;
340 unsigned intra_struct_offset = 0;
341
342 for (unsigned int i = 0; i < struct_type->length; i++) {
343 const glsl_type *type = struct_type->fields.structure[i].type;
344
345 ir_dereference_record *field_deref =
346 new(mem_ctx) ir_dereference_record(deref_record->record,
347 struct_type->fields.structure[i].name);
348 const bool field_row_major =
349 is_dereferenced_thing_row_major(field_deref);
350
351 ralloc_free(field_deref);
352
353 unsigned field_align = type->std140_base_alignment(field_row_major);
354
355 intra_struct_offset = glsl_align(intra_struct_offset, field_align);
356
357 if (strcmp(struct_type->fields.structure[i].name,
358 deref_record->field) == 0)
359 break;
360 intra_struct_offset += type->std140_size(field_row_major);
361
362 /* If the field just examined was itself a structure, apply rule
363 * #9:
364 *
365 * "The structure may have padding at the end; the base offset
366 * of the member following the sub-structure is rounded up to
367 * the next multiple of the base alignment of the structure."
368 */
369 if (type->without_array()->is_record()) {
370 intra_struct_offset = glsl_align(intra_struct_offset,
371 field_align);
372
373 }
374 }
375
376 const_offset += intra_struct_offset;
377
378 deref = deref_record->record->as_dereference();
379 break;
380 }
381 default:
382 assert(!"not reached");
383 deref = NULL;
384 break;
385 }
386 }
387
388 /* Now that we've calculated the offset to the start of the
389 * dereference, walk over the type and emit loads into a temporary.
390 */
391 const glsl_type *type = (*rvalue)->type;
392 ir_variable *load_var = new(mem_ctx) ir_variable(type,
393 "ubo_load_temp",
394 ir_var_temporary);
395 base_ir->insert_before(load_var);
396
397 ir_variable *load_offset = new(mem_ctx) ir_variable(glsl_type::uint_type,
398 "ubo_load_temp_offset",
399 ir_var_temporary);
400 base_ir->insert_before(load_offset);
401 base_ir->insert_before(assign(load_offset, offset));
402
403 deref = new(mem_ctx) ir_dereference_variable(load_var);
404 emit_ubo_loads(deref, load_offset, const_offset, row_major, matrix_columns);
405 *rvalue = deref;
406
407 progress = true;
408 }
409
410 ir_expression *
411 lower_ubo_reference_visitor::ubo_load(const glsl_type *type,
412 ir_rvalue *offset)
413 {
414 ir_rvalue *block_ref = this->uniform_block->clone(mem_ctx, NULL);
415 return new(mem_ctx)
416 ir_expression(ir_binop_ubo_load,
417 type,
418 block_ref,
419 offset);
420
421 }
422
423 /**
424 * Takes LHS and emits a series of assignments into its components
425 * from the UBO variable at variable_offset + deref_offset.
426 *
427 * Recursively calls itself to break the deref down to the point that
428 * the ir_binop_ubo_load expressions generated are contiguous scalars
429 * or vectors.
430 */
431 void
432 lower_ubo_reference_visitor::emit_ubo_loads(ir_dereference *deref,
433 ir_variable *base_offset,
434 unsigned int deref_offset,
435 bool row_major,
436 int matrix_columns)
437 {
438 if (deref->type->is_record()) {
439 unsigned int field_offset = 0;
440
441 for (unsigned i = 0; i < deref->type->length; i++) {
442 const struct glsl_struct_field *field =
443 &deref->type->fields.structure[i];
444 ir_dereference *field_deref =
445 new(mem_ctx) ir_dereference_record(deref->clone(mem_ctx, NULL),
446 field->name);
447
448 field_offset =
449 glsl_align(field_offset,
450 field->type->std140_base_alignment(row_major));
451
452 emit_ubo_loads(field_deref, base_offset, deref_offset + field_offset,
453 row_major, 1);
454
455 field_offset += field->type->std140_size(row_major);
456 }
457 return;
458 }
459
460 if (deref->type->is_array()) {
461 unsigned array_stride =
462 glsl_align(deref->type->fields.array->std140_size(row_major),
463 16);
464
465 for (unsigned i = 0; i < deref->type->length; i++) {
466 ir_constant *element = new(mem_ctx) ir_constant(i);
467 ir_dereference *element_deref =
468 new(mem_ctx) ir_dereference_array(deref->clone(mem_ctx, NULL),
469 element);
470 emit_ubo_loads(element_deref, base_offset,
471 deref_offset + i * array_stride,
472 row_major, 1);
473 }
474 return;
475 }
476
477 if (deref->type->is_matrix()) {
478 for (unsigned i = 0; i < deref->type->matrix_columns; i++) {
479 ir_constant *col = new(mem_ctx) ir_constant(i);
480 ir_dereference *col_deref =
481 new(mem_ctx) ir_dereference_array(deref->clone(mem_ctx, NULL),
482 col);
483
484 if (row_major) {
485 /* For a row-major matrix, the next column starts at the next
486 * element.
487 */
488 int size_mul = deref->type->is_double() ? 8 : 4;
489 emit_ubo_loads(col_deref, base_offset, deref_offset + i * size_mul,
490 row_major, deref->type->matrix_columns);
491 } else {
492 /* std140 always rounds the stride of arrays (and matrices) to a
493 * vec4, so matrices are always 16 between columns/rows. With
494 * doubles, they will be 32 apart when there are more than 2 rows.
495 */
496 int size_mul = (deref->type->is_double() &&
497 deref->type->vector_elements > 2) ? 32 : 16;
498 emit_ubo_loads(col_deref, base_offset, deref_offset + i * size_mul,
499 row_major, deref->type->matrix_columns);
500 }
501 }
502 return;
503 }
504
505 assert(deref->type->is_scalar() ||
506 deref->type->is_vector());
507
508 if (!row_major) {
509 ir_rvalue *offset = add(base_offset,
510 new(mem_ctx) ir_constant(deref_offset));
511 base_ir->insert_before(assign(deref->clone(mem_ctx, NULL),
512 ubo_load(deref->type, offset)));
513 } else {
514 unsigned N = deref->type->is_double() ? 8 : 4;
515
516 /* We're dereffing a column out of a row-major matrix, so we
517 * gather the vector from each stored row.
518 */
519 assert(deref->type->base_type == GLSL_TYPE_FLOAT ||
520 deref->type->base_type == GLSL_TYPE_DOUBLE);
521 /* Matrices, row_major or not, are stored as if they were
522 * arrays of vectors of the appropriate size in std140.
523 * Arrays have their strides rounded up to a vec4, so the
524 * matrix stride is always 16. However a double matrix may either be 16
525 * or 32 depending on the number of columns.
526 */
527 assert(matrix_columns <= 4);
528 unsigned matrix_stride = glsl_align(matrix_columns * N, 16);
529
530 const glsl_type *ubo_type = deref->type->base_type == GLSL_TYPE_FLOAT ?
531 glsl_type::float_type : glsl_type::double_type;
532
533 for (unsigned i = 0; i < deref->type->vector_elements; i++) {
534 ir_rvalue *chan_offset =
535 add(base_offset,
536 new(mem_ctx) ir_constant(deref_offset + i * matrix_stride));
537
538 base_ir->insert_before(assign(deref->clone(mem_ctx, NULL),
539 ubo_load(ubo_type,
540 chan_offset),
541 (1U << i)));
542 }
543 }
544 }
545
546 } /* unnamed namespace */
547
548 void
549 lower_ubo_reference(struct gl_shader *shader, exec_list *instructions)
550 {
551 lower_ubo_reference_visitor v(shader);
552
553 /* Loop over the instructions lowering references, because we take
554 * a deref of a UBO array using a UBO dereference as the index will
555 * produce a collection of instructions all of which have cloned
556 * UBO dereferences for that array index.
557 */
558 do {
559 v.progress = false;
560 visit_list_elements(&v, instructions);
561 } while (v.progress);
562 }