2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file lower_ubo_reference.cpp
27 * IR lower pass to replace dereferences of variables in a uniform
28 * buffer object with usage of ir_binop_ubo_load expressions, each of
29 * which can read data up to the size of a vec4.
31 * This relieves drivers of the responsibility to deal with tricky UBO
32 * layout issues like std140 structures and row_major matrices on
37 #include "ir_builder.h"
38 #include "ir_rvalue_visitor.h"
39 #include "main/macros.h"
40 #include "glsl_parser_extras.h"
42 using namespace ir_builder
;
45 * Determine if a thing being dereferenced is row-major
47 * There is some trickery here.
49 * If the thing being dereferenced is a member of uniform block \b without an
50 * instance name, then the name of the \c ir_variable is the field name of an
51 * interface type. If this field is row-major, then the thing referenced is
54 * If the thing being dereferenced is a member of uniform block \b with an
55 * instance name, then the last dereference in the tree will be an
56 * \c ir_dereference_record. If that record field is row-major, then the
57 * thing referenced is row-major.
60 is_dereferenced_thing_row_major(const ir_rvalue
*deref
)
63 const ir_rvalue
*ir
= deref
;
66 matrix
= matrix
|| ir
->type
->without_array()->is_matrix();
68 switch (ir
->ir_type
) {
69 case ir_type_dereference_array
: {
70 const ir_dereference_array
*const array_deref
=
71 (const ir_dereference_array
*) ir
;
73 ir
= array_deref
->array
;
77 case ir_type_dereference_record
: {
78 const ir_dereference_record
*const record_deref
=
79 (const ir_dereference_record
*) ir
;
81 ir
= record_deref
->record
;
83 const int idx
= ir
->type
->field_index(record_deref
->field
);
86 const enum glsl_matrix_layout matrix_layout
=
87 glsl_matrix_layout(ir
->type
->fields
.structure
[idx
].matrix_layout
);
89 switch (matrix_layout
) {
90 case GLSL_MATRIX_LAYOUT_INHERITED
:
92 case GLSL_MATRIX_LAYOUT_COLUMN_MAJOR
:
94 case GLSL_MATRIX_LAYOUT_ROW_MAJOR
:
95 return matrix
|| deref
->type
->without_array()->is_record();
101 case ir_type_dereference_variable
: {
102 const ir_dereference_variable
*const var_deref
=
103 (const ir_dereference_variable
*) ir
;
105 const enum glsl_matrix_layout matrix_layout
=
106 glsl_matrix_layout(var_deref
->var
->data
.matrix_layout
);
108 switch (matrix_layout
) {
109 case GLSL_MATRIX_LAYOUT_INHERITED
:
112 case GLSL_MATRIX_LAYOUT_COLUMN_MAJOR
:
114 case GLSL_MATRIX_LAYOUT_ROW_MAJOR
:
115 return matrix
|| deref
->type
->without_array()->is_record();
118 unreachable("invalid matrix layout");
127 /* The tree must have ended with a dereference that wasn't an
128 * ir_dereference_variable. That is invalid, and it should be impossible.
130 unreachable("invalid dereference tree");
135 class lower_ubo_reference_visitor
: public ir_rvalue_enter_visitor
{
137 lower_ubo_reference_visitor(struct gl_shader
*shader
)
142 void handle_rvalue(ir_rvalue
**rvalue
);
143 ir_visitor_status
visit_enter(ir_assignment
*ir
);
145 void setup_for_load_or_store(ir_variable
*var
,
148 unsigned *const_offset
,
152 ir_expression
*ubo_load(const struct glsl_type
*type
,
154 ir_call
*ssbo_load(const struct glsl_type
*type
,
157 void check_for_ssbo_store(ir_assignment
*ir
);
158 void write_to_memory(ir_dereference
*deref
,
160 ir_variable
*write_var
,
161 unsigned write_mask
);
162 ir_call
*ssbo_store(ir_rvalue
*deref
, ir_rvalue
*offset
,
163 unsigned write_mask
);
165 void emit_access(bool is_write
, ir_dereference
*deref
,
166 ir_variable
*base_offset
, unsigned int deref_offset
,
167 bool row_major
, int matrix_columns
,
168 unsigned packing
, unsigned write_mask
);
170 ir_visitor_status
visit_enter(class ir_expression
*);
171 ir_expression
*calculate_ssbo_unsized_array_length(ir_expression
*expr
);
172 void check_ssbo_unsized_array_length_expression(class ir_expression
*);
173 void check_ssbo_unsized_array_length_assignment(ir_assignment
*ir
);
175 ir_expression
*process_ssbo_unsized_array_length(ir_rvalue
**,
178 ir_expression
*emit_ssbo_get_buffer_size();
180 unsigned calculate_unsized_array_stride(ir_dereference
*deref
,
183 ir_call
*lower_ssbo_atomic_intrinsic(ir_call
*ir
);
184 ir_call
*check_for_ssbo_atomic_intrinsic(ir_call
*ir
);
185 ir_visitor_status
visit_enter(ir_call
*ir
);
188 struct gl_shader
*shader
;
189 struct gl_uniform_buffer_variable
*ubo_var
;
190 ir_rvalue
*uniform_block
;
192 bool is_shader_storage
;
196 * Determine the name of the interface block field
198 * This is the name of the specific member as it would appear in the
199 * \c gl_uniform_buffer_variable::Name field in the shader's
200 * \c UniformBlocks array.
203 interface_field_name(void *mem_ctx
, char *base_name
, ir_rvalue
*d
,
204 ir_rvalue
**nonconst_block_index
)
206 *nonconst_block_index
= NULL
;
207 char *name_copy
= NULL
;
208 size_t base_length
= 0;
210 /* Loop back through the IR until we find the uniform block */
213 switch (ir
->ir_type
) {
214 case ir_type_dereference_variable
: {
220 case ir_type_dereference_record
: {
221 ir_dereference_record
*r
= (ir_dereference_record
*) ir
;
222 ir
= r
->record
->as_dereference();
224 /* If we got here it means any previous array subscripts belong to
225 * block members and not the block itself so skip over them in the
232 case ir_type_dereference_array
: {
233 ir_dereference_array
*a
= (ir_dereference_array
*) ir
;
234 ir
= a
->array
->as_dereference();
238 case ir_type_swizzle
: {
239 ir_swizzle
*s
= (ir_swizzle
*) ir
;
240 ir
= s
->val
->as_dereference();
241 /* Skip swizzle in the next pass */
247 assert(!"Should not get here.");
253 switch (d
->ir_type
) {
254 case ir_type_dereference_variable
: {
255 ir_dereference_variable
*v
= (ir_dereference_variable
*) d
;
256 if (name_copy
!= NULL
&&
257 v
->var
->is_interface_instance() &&
258 v
->var
->type
->is_array()) {
261 *nonconst_block_index
= NULL
;
268 case ir_type_dereference_array
: {
269 ir_dereference_array
*a
= (ir_dereference_array
*) d
;
272 if (name_copy
== NULL
) {
273 name_copy
= ralloc_strdup(mem_ctx
, base_name
);
274 base_length
= strlen(name_copy
);
277 /* For arrays of arrays we start at the innermost array and work our
278 * way out so we need to insert the subscript at the base of the
279 * name string rather than just attaching it to the end.
281 new_length
= base_length
;
282 ir_constant
*const_index
= a
->array_index
->as_constant();
283 char *end
= ralloc_strdup(NULL
, &name_copy
[new_length
]);
285 ir_rvalue
*array_index
= a
->array_index
;
286 if (array_index
->type
!= glsl_type::uint_type
)
287 array_index
= i2u(array_index
);
289 if (a
->array
->type
->is_array() &&
290 a
->array
->type
->fields
.array
->is_array()) {
291 ir_constant
*base_size
= new(mem_ctx
)
292 ir_constant(a
->array
->type
->fields
.array
->arrays_of_arrays_size());
293 array_index
= mul(array_index
, base_size
);
296 if (*nonconst_block_index
) {
297 *nonconst_block_index
= add(*nonconst_block_index
, array_index
);
299 *nonconst_block_index
= array_index
;
302 ralloc_asprintf_rewrite_tail(&name_copy
, &new_length
, "[0]%s",
305 ralloc_asprintf_rewrite_tail(&name_copy
, &new_length
, "[%d]%s",
306 const_index
->get_uint_component(0),
311 d
= a
->array
->as_dereference();
317 assert(!"Should not get here.");
322 assert(!"Should not get here.");
327 lower_ubo_reference_visitor::setup_for_load_or_store(ir_variable
*var
,
330 unsigned *const_offset
,
335 /* Determine the name of the interface block */
336 ir_rvalue
*nonconst_block_index
;
337 const char *const field_name
=
338 interface_field_name(mem_ctx
, (char *) var
->get_interface_type()->name
,
339 deref
, &nonconst_block_index
);
341 /* Locate the block by interface name */
342 this->is_shader_storage
= var
->is_in_shader_storage_block();
344 struct gl_uniform_block
**blocks
;
345 if (this->is_shader_storage
) {
346 num_blocks
= shader
->NumShaderStorageBlocks
;
347 blocks
= shader
->ShaderStorageBlocks
;
349 num_blocks
= shader
->NumUniformBlocks
;
350 blocks
= shader
->UniformBlocks
;
352 this->uniform_block
= NULL
;
353 for (unsigned i
= 0; i
< num_blocks
; i
++) {
354 if (strcmp(field_name
, blocks
[i
]->Name
) == 0) {
356 ir_constant
*index
= new(mem_ctx
) ir_constant(i
);
358 if (nonconst_block_index
) {
359 this->uniform_block
= add(nonconst_block_index
, index
);
361 this->uniform_block
= index
;
364 this->ubo_var
= var
->is_interface_instance()
365 ? &blocks
[i
]->Uniforms
[0] : &blocks
[i
]->Uniforms
[var
->data
.location
];
371 assert(this->uniform_block
);
373 *offset
= new(mem_ctx
) ir_constant(0u);
375 *row_major
= is_dereferenced_thing_row_major(deref
);
378 /* Calculate the offset to the start of the region of the UBO
379 * dereferenced by *rvalue. This may be a variable offset if an
380 * array dereference has a variable index.
383 switch (deref
->ir_type
) {
384 case ir_type_dereference_variable
: {
385 *const_offset
+= ubo_var
->Offset
;
390 case ir_type_dereference_array
: {
391 ir_dereference_array
*deref_array
= (ir_dereference_array
*) deref
;
392 unsigned array_stride
;
393 if (deref_array
->array
->type
->is_vector()) {
394 /* We get this when storing or loading a component out of a vector
395 * with a non-constant index. This happens for v[i] = f where v is
396 * a vector (or m[i][j] = f where m is a matrix). If we don't
397 * lower that here, it gets turned into v = vector_insert(v, i,
398 * f), which loads the entire vector, modifies one component and
399 * then write the entire thing back. That breaks if another
400 * thread or SIMD channel is modifying the same vector.
403 if (deref_array
->array
->type
->is_double())
405 } else if (deref_array
->array
->type
->is_matrix() && *row_major
) {
406 /* When loading a vector out of a row major matrix, the
407 * step between the columns (vectors) is the size of a
408 * float, while the step between the rows (elements of a
409 * vector) is handled below in emit_ubo_loads.
412 if (deref_array
->array
->type
->is_double())
414 *matrix_columns
= deref_array
->array
->type
->matrix_columns
;
415 } else if (deref_array
->type
->without_array()->is_interface()) {
416 /* We're processing an array dereference of an interface instance
417 * array. The thing being dereferenced *must* be a variable
418 * dereference because interfaces cannot be embedded in other
419 * types. In terms of calculating the offsets for the lowering
420 * pass, we don't care about the array index. All elements of an
421 * interface instance array will have the same offsets relative to
422 * the base of the block that backs them.
424 deref
= deref_array
->array
->as_dereference();
427 /* Whether or not the field is row-major (because it might be a
428 * bvec2 or something) does not affect the array itself. We need
429 * to know whether an array element in its entirety is row-major.
431 const bool array_row_major
=
432 is_dereferenced_thing_row_major(deref_array
);
434 /* The array type will give the correct interface packing
437 if (packing
== GLSL_INTERFACE_PACKING_STD430
) {
438 array_stride
= deref_array
->type
->std430_array_stride(array_row_major
);
440 array_stride
= deref_array
->type
->std140_size(array_row_major
);
441 array_stride
= glsl_align(array_stride
, 16);
445 ir_rvalue
*array_index
= deref_array
->array_index
;
446 if (array_index
->type
->base_type
== GLSL_TYPE_INT
)
447 array_index
= i2u(array_index
);
449 ir_constant
*const_index
=
450 array_index
->constant_expression_value(NULL
);
452 *const_offset
+= array_stride
* const_index
->value
.u
[0];
454 *offset
= add(*offset
,
456 new(mem_ctx
) ir_constant(array_stride
)));
458 deref
= deref_array
->array
->as_dereference();
462 case ir_type_dereference_record
: {
463 ir_dereference_record
*deref_record
= (ir_dereference_record
*) deref
;
464 const glsl_type
*struct_type
= deref_record
->record
->type
;
465 unsigned intra_struct_offset
= 0;
467 for (unsigned int i
= 0; i
< struct_type
->length
; i
++) {
468 const glsl_type
*type
= struct_type
->fields
.structure
[i
].type
;
470 ir_dereference_record
*field_deref
= new(mem_ctx
)
471 ir_dereference_record(deref_record
->record
,
472 struct_type
->fields
.structure
[i
].name
);
473 const bool field_row_major
=
474 is_dereferenced_thing_row_major(field_deref
);
476 ralloc_free(field_deref
);
478 unsigned field_align
= 0;
480 if (packing
== GLSL_INTERFACE_PACKING_STD430
)
481 field_align
= type
->std430_base_alignment(field_row_major
);
483 field_align
= type
->std140_base_alignment(field_row_major
);
485 intra_struct_offset
= glsl_align(intra_struct_offset
, field_align
);
487 if (strcmp(struct_type
->fields
.structure
[i
].name
,
488 deref_record
->field
) == 0)
491 if (packing
== GLSL_INTERFACE_PACKING_STD430
)
492 intra_struct_offset
+= type
->std430_size(field_row_major
);
494 intra_struct_offset
+= type
->std140_size(field_row_major
);
496 /* If the field just examined was itself a structure, apply rule
499 * "The structure may have padding at the end; the base offset
500 * of the member following the sub-structure is rounded up to
501 * the next multiple of the base alignment of the structure."
503 if (type
->without_array()->is_record()) {
504 intra_struct_offset
= glsl_align(intra_struct_offset
,
510 *const_offset
+= intra_struct_offset
;
511 deref
= deref_record
->record
->as_dereference();
515 case ir_type_swizzle
: {
516 ir_swizzle
*deref_swizzle
= (ir_swizzle
*) deref
;
518 assert(deref_swizzle
->mask
.num_components
== 1);
520 *const_offset
+= deref_swizzle
->mask
.x
* sizeof(int);
521 deref
= deref_swizzle
->val
->as_dereference();
526 assert(!"not reached");
534 lower_ubo_reference_visitor::handle_rvalue(ir_rvalue
**rvalue
)
539 ir_dereference
*deref
= (*rvalue
)->as_dereference();
543 ir_variable
*var
= deref
->variable_referenced();
544 if (!var
|| !var
->is_in_buffer_block())
547 mem_ctx
= ralloc_parent(shader
->ir
);
549 ir_rvalue
*offset
= NULL
;
550 unsigned const_offset
;
553 unsigned packing
= var
->get_interface_type()->interface_packing
;
555 /* Compute the offset to the start if the dereference as well as other
556 * information we need to configure the write
558 setup_for_load_or_store(var
, deref
,
559 &offset
, &const_offset
,
560 &row_major
, &matrix_columns
,
564 /* Now that we've calculated the offset to the start of the
565 * dereference, walk over the type and emit loads into a temporary.
567 const glsl_type
*type
= (*rvalue
)->type
;
568 ir_variable
*load_var
= new(mem_ctx
) ir_variable(type
,
571 base_ir
->insert_before(load_var
);
573 ir_variable
*load_offset
= new(mem_ctx
) ir_variable(glsl_type::uint_type
,
574 "ubo_load_temp_offset",
576 base_ir
->insert_before(load_offset
);
577 base_ir
->insert_before(assign(load_offset
, offset
));
579 deref
= new(mem_ctx
) ir_dereference_variable(load_var
);
580 emit_access(false, deref
, load_offset
, const_offset
,
581 row_major
, matrix_columns
, packing
, 0);
588 lower_ubo_reference_visitor::ubo_load(const glsl_type
*type
,
591 ir_rvalue
*block_ref
= this->uniform_block
->clone(mem_ctx
, NULL
);
593 ir_expression(ir_binop_ubo_load
,
601 shader_storage_buffer_object(const _mesa_glsl_parse_state
*state
)
603 return state
->ARB_shader_storage_buffer_object_enable
;
607 lower_ubo_reference_visitor::ssbo_store(ir_rvalue
*deref
,
611 exec_list sig_params
;
613 ir_variable
*block_ref
= new(mem_ctx
)
614 ir_variable(glsl_type::uint_type
, "block_ref" , ir_var_function_in
);
615 sig_params
.push_tail(block_ref
);
617 ir_variable
*offset_ref
= new(mem_ctx
)
618 ir_variable(glsl_type::uint_type
, "offset" , ir_var_function_in
);
619 sig_params
.push_tail(offset_ref
);
621 ir_variable
*val_ref
= new(mem_ctx
)
622 ir_variable(deref
->type
, "value" , ir_var_function_in
);
623 sig_params
.push_tail(val_ref
);
625 ir_variable
*writemask_ref
= new(mem_ctx
)
626 ir_variable(glsl_type::uint_type
, "write_mask" , ir_var_function_in
);
627 sig_params
.push_tail(writemask_ref
);
629 ir_function_signature
*sig
= new(mem_ctx
)
630 ir_function_signature(glsl_type::void_type
, shader_storage_buffer_object
);
632 sig
->replace_parameters(&sig_params
);
633 sig
->is_intrinsic
= true;
635 ir_function
*f
= new(mem_ctx
) ir_function("__intrinsic_store_ssbo");
636 f
->add_signature(sig
);
638 exec_list call_params
;
639 call_params
.push_tail(this->uniform_block
->clone(mem_ctx
, NULL
));
640 call_params
.push_tail(offset
->clone(mem_ctx
, NULL
));
641 call_params
.push_tail(deref
->clone(mem_ctx
, NULL
));
642 call_params
.push_tail(new(mem_ctx
) ir_constant(write_mask
));
643 return new(mem_ctx
) ir_call(sig
, NULL
, &call_params
);
647 lower_ubo_reference_visitor::ssbo_load(const struct glsl_type
*type
,
650 exec_list sig_params
;
652 ir_variable
*block_ref
= new(mem_ctx
)
653 ir_variable(glsl_type::uint_type
, "block_ref" , ir_var_function_in
);
654 sig_params
.push_tail(block_ref
);
656 ir_variable
*offset_ref
= new(mem_ctx
)
657 ir_variable(glsl_type::uint_type
, "offset_ref" , ir_var_function_in
);
658 sig_params
.push_tail(offset_ref
);
660 ir_function_signature
*sig
=
661 new(mem_ctx
) ir_function_signature(type
, shader_storage_buffer_object
);
663 sig
->replace_parameters(&sig_params
);
664 sig
->is_intrinsic
= true;
666 ir_function
*f
= new(mem_ctx
) ir_function("__intrinsic_load_ssbo");
667 f
->add_signature(sig
);
669 ir_variable
*result
= new(mem_ctx
)
670 ir_variable(type
, "ssbo_load_result", ir_var_temporary
);
671 base_ir
->insert_before(result
);
672 ir_dereference_variable
*deref_result
= new(mem_ctx
)
673 ir_dereference_variable(result
);
675 exec_list call_params
;
676 call_params
.push_tail(this->uniform_block
->clone(mem_ctx
, NULL
));
677 call_params
.push_tail(offset
->clone(mem_ctx
, NULL
));
679 return new(mem_ctx
) ir_call(sig
, deref_result
, &call_params
);
683 writemask_for_size(unsigned n
)
685 return ((1 << n
) - 1);
689 * Takes a deref and recursively calls itself to break the deref down to the
690 * point that the reads or writes generated are contiguous scalars or vectors.
693 lower_ubo_reference_visitor::emit_access(bool is_write
,
694 ir_dereference
*deref
,
695 ir_variable
*base_offset
,
696 unsigned int deref_offset
,
702 if (deref
->type
->is_record()) {
703 unsigned int field_offset
= 0;
705 for (unsigned i
= 0; i
< deref
->type
->length
; i
++) {
706 const struct glsl_struct_field
*field
=
707 &deref
->type
->fields
.structure
[i
];
708 ir_dereference
*field_deref
=
709 new(mem_ctx
) ir_dereference_record(deref
->clone(mem_ctx
, NULL
),
713 glsl_align(field_offset
,
714 field
->type
->std140_base_alignment(row_major
));
716 emit_access(is_write
, field_deref
, base_offset
,
717 deref_offset
+ field_offset
,
718 row_major
, 1, packing
,
719 writemask_for_size(field_deref
->type
->vector_elements
));
721 field_offset
+= field
->type
->std140_size(row_major
);
726 if (deref
->type
->is_array()) {
727 unsigned array_stride
= packing
== GLSL_INTERFACE_PACKING_STD430
?
728 deref
->type
->fields
.array
->std430_array_stride(row_major
) :
729 glsl_align(deref
->type
->fields
.array
->std140_size(row_major
), 16);
731 for (unsigned i
= 0; i
< deref
->type
->length
; i
++) {
732 ir_constant
*element
= new(mem_ctx
) ir_constant(i
);
733 ir_dereference
*element_deref
=
734 new(mem_ctx
) ir_dereference_array(deref
->clone(mem_ctx
, NULL
),
736 emit_access(is_write
, element_deref
, base_offset
,
737 deref_offset
+ i
* array_stride
,
738 row_major
, 1, packing
,
739 writemask_for_size(element_deref
->type
->vector_elements
));
744 if (deref
->type
->is_matrix()) {
745 for (unsigned i
= 0; i
< deref
->type
->matrix_columns
; i
++) {
746 ir_constant
*col
= new(mem_ctx
) ir_constant(i
);
747 ir_dereference
*col_deref
=
748 new(mem_ctx
) ir_dereference_array(deref
->clone(mem_ctx
, NULL
), col
);
751 /* For a row-major matrix, the next column starts at the next
754 int size_mul
= deref
->type
->is_double() ? 8 : 4;
755 emit_access(is_write
, col_deref
, base_offset
,
756 deref_offset
+ i
* size_mul
,
757 row_major
, deref
->type
->matrix_columns
, packing
,
758 writemask_for_size(col_deref
->type
->vector_elements
));
762 /* std430 doesn't round up vec2 size to a vec4 size */
763 if (packing
== GLSL_INTERFACE_PACKING_STD430
&&
764 deref
->type
->vector_elements
== 2 &&
765 !deref
->type
->is_double()) {
768 /* std140 always rounds the stride of arrays (and matrices) to a
769 * vec4, so matrices are always 16 between columns/rows. With
770 * doubles, they will be 32 apart when there are more than 2 rows.
772 * For both std140 and std430, if the member is a
773 * three-'component vector with components consuming N basic
774 * machine units, the base alignment is 4N. For vec4, base
777 size_mul
= (deref
->type
->is_double() &&
778 deref
->type
->vector_elements
> 2) ? 32 : 16;
781 emit_access(is_write
, col_deref
, base_offset
,
782 deref_offset
+ i
* size_mul
,
783 row_major
, deref
->type
->matrix_columns
, packing
,
784 writemask_for_size(col_deref
->type
->vector_elements
));
790 assert(deref
->type
->is_scalar() || deref
->type
->is_vector());
794 add(base_offset
, new(mem_ctx
) ir_constant(deref_offset
));
796 base_ir
->insert_after(ssbo_store(deref
, offset
, write_mask
));
798 if (!this->is_shader_storage
) {
799 base_ir
->insert_before(assign(deref
->clone(mem_ctx
, NULL
),
800 ubo_load(deref
->type
, offset
)));
802 ir_call
*load_ssbo
= ssbo_load(deref
->type
, offset
);
803 base_ir
->insert_before(load_ssbo
);
804 ir_rvalue
*value
= load_ssbo
->return_deref
->as_rvalue()->clone(mem_ctx
, NULL
);
805 base_ir
->insert_before(assign(deref
->clone(mem_ctx
, NULL
), value
));
809 unsigned N
= deref
->type
->is_double() ? 8 : 4;
811 /* We're dereffing a column out of a row-major matrix, so we
812 * gather the vector from each stored row.
814 assert(deref
->type
->base_type
== GLSL_TYPE_FLOAT
||
815 deref
->type
->base_type
== GLSL_TYPE_DOUBLE
);
816 /* Matrices, row_major or not, are stored as if they were
817 * arrays of vectors of the appropriate size in std140.
818 * Arrays have their strides rounded up to a vec4, so the
819 * matrix stride is always 16. However a double matrix may either be 16
820 * or 32 depending on the number of columns.
822 assert(matrix_columns
<= 4);
823 unsigned matrix_stride
= 0;
824 /* Matrix stride for std430 mat2xY matrices are not rounded up to
825 * vec4 size. From OpenGL 4.3 spec, section 7.6.2.2 "Standard Uniform
828 * "2. If the member is a two- or four-component vector with components
829 * consuming N basic machine units, the base alignment is 2N or 4N,
830 * respectively." [...]
831 * "4. If the member is an array of scalars or vectors, the base alignment
832 * and array stride are set to match the base alignment of a single array
833 * element, according to rules (1), (2), and (3), and rounded up to the
834 * base alignment of a vec4." [...]
835 * "7. If the member is a row-major matrix with C columns and R rows, the
836 * matrix is stored identically to an array of R row vectors with C
837 * components each, according to rule (4)." [...]
838 * "When using the std430 storage layout, shader storage blocks will be
839 * laid out in buffer storage identically to uniform and shader storage
840 * blocks using the std140 layout, except that the base alignment and
841 * stride of arrays of scalars and vectors in rule 4 and of structures in
842 * rule 9 are not rounded up a multiple of the base alignment of a vec4."
844 if (packing
== GLSL_INTERFACE_PACKING_STD430
&& matrix_columns
== 2)
845 matrix_stride
= 2 * N
;
847 matrix_stride
= glsl_align(matrix_columns
* N
, 16);
849 const glsl_type
*deref_type
= deref
->type
->base_type
== GLSL_TYPE_FLOAT
?
850 glsl_type::float_type
: glsl_type::double_type
;
852 for (unsigned i
= 0; i
< deref
->type
->vector_elements
; i
++) {
853 ir_rvalue
*chan_offset
=
855 new(mem_ctx
) ir_constant(deref_offset
+ i
* matrix_stride
));
857 /* If the component is not in the writemask, then don't
860 if (!((1 << i
) & write_mask
))
863 base_ir
->insert_after(ssbo_store(swizzle(deref
, i
, 1), chan_offset
, 1));
865 if (!this->is_shader_storage
) {
866 base_ir
->insert_before(assign(deref
->clone(mem_ctx
, NULL
),
867 ubo_load(deref_type
, chan_offset
),
870 ir_call
*load_ssbo
= ssbo_load(deref_type
, chan_offset
);
871 base_ir
->insert_before(load_ssbo
);
872 ir_rvalue
*value
= load_ssbo
->return_deref
->as_rvalue()->clone(mem_ctx
, NULL
);
873 base_ir
->insert_before(assign(deref
->clone(mem_ctx
, NULL
),
883 lower_ubo_reference_visitor::write_to_memory(ir_dereference
*deref
,
885 ir_variable
*write_var
,
888 ir_rvalue
*offset
= NULL
;
889 unsigned const_offset
;
892 unsigned packing
= var
->get_interface_type()->interface_packing
;
894 /* Compute the offset to the start if the dereference as well as other
895 * information we need to configure the write
897 setup_for_load_or_store(var
, deref
,
898 &offset
, &const_offset
,
899 &row_major
, &matrix_columns
,
903 /* Now emit writes from the temporary to memory */
904 ir_variable
*write_offset
=
905 new(mem_ctx
) ir_variable(glsl_type::uint_type
,
906 "ssbo_store_temp_offset",
909 base_ir
->insert_before(write_offset
);
910 base_ir
->insert_before(assign(write_offset
, offset
));
912 deref
= new(mem_ctx
) ir_dereference_variable(write_var
);
913 emit_access(true, deref
, write_offset
, const_offset
,
914 row_major
, matrix_columns
, packing
, write_mask
);
918 lower_ubo_reference_visitor::visit_enter(ir_expression
*ir
)
920 check_ssbo_unsized_array_length_expression(ir
);
921 return rvalue_visit(ir
);
925 lower_ubo_reference_visitor::calculate_ssbo_unsized_array_length(ir_expression
*expr
)
927 if (expr
->operation
!=
928 ir_expression_operation(ir_unop_ssbo_unsized_array_length
))
931 ir_rvalue
*rvalue
= expr
->operands
[0]->as_rvalue();
933 !rvalue
->type
->is_array() || !rvalue
->type
->is_unsized_array())
936 ir_dereference
*deref
= expr
->operands
[0]->as_dereference();
940 ir_variable
*var
= expr
->operands
[0]->variable_referenced();
941 if (!var
|| !var
->is_in_shader_storage_block())
943 return process_ssbo_unsized_array_length(&rvalue
, deref
, var
);
947 lower_ubo_reference_visitor::check_ssbo_unsized_array_length_expression(ir_expression
*ir
)
950 ir_expression_operation(ir_unop_ssbo_unsized_array_length
)) {
951 /* Don't replace this unop if it is found alone. It is going to be
952 * removed by the optimization passes or replaced if it is part of
953 * an ir_assignment or another ir_expression.
958 for (unsigned i
= 0; i
< ir
->get_num_operands(); i
++) {
959 if (ir
->operands
[i
]->ir_type
!= ir_type_expression
)
961 ir_expression
*expr
= (ir_expression
*) ir
->operands
[i
];
962 ir_expression
*temp
= calculate_ssbo_unsized_array_length(expr
);
967 ir
->operands
[i
] = temp
;
972 lower_ubo_reference_visitor::check_ssbo_unsized_array_length_assignment(ir_assignment
*ir
)
974 if (!ir
->rhs
|| ir
->rhs
->ir_type
!= ir_type_expression
)
977 ir_expression
*expr
= (ir_expression
*) ir
->rhs
;
978 ir_expression
*temp
= calculate_ssbo_unsized_array_length(expr
);
988 lower_ubo_reference_visitor::emit_ssbo_get_buffer_size()
990 ir_rvalue
*block_ref
= this->uniform_block
->clone(mem_ctx
, NULL
);
991 return new(mem_ctx
) ir_expression(ir_unop_get_buffer_size
,
997 lower_ubo_reference_visitor::calculate_unsized_array_stride(ir_dereference
*deref
,
1000 unsigned array_stride
= 0;
1002 switch (deref
->ir_type
) {
1003 case ir_type_dereference_variable
:
1005 ir_dereference_variable
*deref_var
= (ir_dereference_variable
*)deref
;
1006 const struct glsl_type
*unsized_array_type
= NULL
;
1007 /* An unsized array can be sized by other lowering passes, so pick
1008 * the first field of the array which has the data type of the unsized
1011 unsized_array_type
= deref_var
->var
->type
->fields
.array
;
1013 /* Whether or not the field is row-major (because it might be a
1014 * bvec2 or something) does not affect the array itself. We need
1015 * to know whether an array element in its entirety is row-major.
1017 const bool array_row_major
=
1018 is_dereferenced_thing_row_major(deref_var
);
1020 if (packing
== GLSL_INTERFACE_PACKING_STD430
) {
1021 array_stride
= unsized_array_type
->std430_array_stride(array_row_major
);
1023 array_stride
= unsized_array_type
->std140_size(array_row_major
);
1024 array_stride
= glsl_align(array_stride
, 16);
1028 case ir_type_dereference_record
:
1030 ir_dereference_record
*deref_record
= (ir_dereference_record
*) deref
;
1031 ir_dereference
*interface_deref
=
1032 deref_record
->record
->as_dereference();
1033 assert(interface_deref
!= NULL
);
1034 const struct glsl_type
*interface_type
= interface_deref
->type
;
1035 unsigned record_length
= interface_type
->length
;
1036 /* Unsized array is always the last element of the interface */
1037 const struct glsl_type
*unsized_array_type
=
1038 interface_type
->fields
.structure
[record_length
- 1].type
->fields
.array
;
1040 const bool array_row_major
=
1041 is_dereferenced_thing_row_major(deref_record
);
1043 if (packing
== GLSL_INTERFACE_PACKING_STD430
) {
1044 array_stride
= unsized_array_type
->std430_array_stride(array_row_major
);
1046 array_stride
= unsized_array_type
->std140_size(array_row_major
);
1047 array_stride
= glsl_align(array_stride
, 16);
1052 unreachable("Unsupported dereference type");
1054 return array_stride
;
1058 lower_ubo_reference_visitor::process_ssbo_unsized_array_length(ir_rvalue
**rvalue
,
1059 ir_dereference
*deref
,
1062 mem_ctx
= ralloc_parent(*rvalue
);
1064 ir_rvalue
*base_offset
= NULL
;
1065 unsigned const_offset
;
1068 unsigned packing
= var
->get_interface_type()->interface_packing
;
1069 int unsized_array_stride
= calculate_unsized_array_stride(deref
, packing
);
1071 /* Compute the offset to the start if the dereference as well as other
1072 * information we need to calculate the length.
1074 setup_for_load_or_store(var
, deref
,
1075 &base_offset
, &const_offset
,
1076 &row_major
, &matrix_columns
,
1079 * max((buffer_object_size - offset_of_array) / stride_of_array, 0)
1081 ir_expression
*buffer_size
= emit_ssbo_get_buffer_size();
1083 ir_expression
*offset_of_array
= new(mem_ctx
)
1084 ir_expression(ir_binop_add
, base_offset
,
1085 new(mem_ctx
) ir_constant(const_offset
));
1086 ir_expression
*offset_of_array_int
= new(mem_ctx
)
1087 ir_expression(ir_unop_u2i
, offset_of_array
);
1089 ir_expression
*sub
= new(mem_ctx
)
1090 ir_expression(ir_binop_sub
, buffer_size
, offset_of_array_int
);
1091 ir_expression
*div
= new(mem_ctx
)
1092 ir_expression(ir_binop_div
, sub
,
1093 new(mem_ctx
) ir_constant(unsized_array_stride
));
1094 ir_expression
*max
= new(mem_ctx
)
1095 ir_expression(ir_binop_max
, div
, new(mem_ctx
) ir_constant(0));
1101 lower_ubo_reference_visitor::check_for_ssbo_store(ir_assignment
*ir
)
1103 if (!ir
|| !ir
->lhs
)
1106 ir_rvalue
*rvalue
= ir
->lhs
->as_rvalue();
1110 ir_dereference
*deref
= ir
->lhs
->as_dereference();
1114 ir_variable
*var
= ir
->lhs
->variable_referenced();
1115 if (!var
|| !var
->is_in_buffer_block())
1118 /* We have a write to a buffer variable, so declare a temporary and rewrite
1119 * the assignment so that the temporary is the LHS.
1121 mem_ctx
= ralloc_parent(shader
->ir
);
1123 const glsl_type
*type
= rvalue
->type
;
1124 ir_variable
*write_var
= new(mem_ctx
) ir_variable(type
,
1127 base_ir
->insert_before(write_var
);
1128 ir
->lhs
= new(mem_ctx
) ir_dereference_variable(write_var
);
1130 /* Now we have to write the value assigned to the temporary back to memory */
1131 write_to_memory(deref
, var
, write_var
, ir
->write_mask
);
1137 lower_ubo_reference_visitor::visit_enter(ir_assignment
*ir
)
1139 check_ssbo_unsized_array_length_assignment(ir
);
1140 check_for_ssbo_store(ir
);
1141 return rvalue_visit(ir
);
1144 /* Lowers the intrinsic call to a new internal intrinsic that swaps the
1145 * access to the buffer variable in the first parameter by an offset
1146 * and block index. This involves creating the new internal intrinsic
1147 * (i.e. the new function signature).
1150 lower_ubo_reference_visitor::lower_ssbo_atomic_intrinsic(ir_call
*ir
)
1152 /* SSBO atomics usually have 2 parameters, the buffer variable and an
1153 * integer argument. The exception is CompSwap, that has an additional
1154 * integer parameter.
1156 int param_count
= ir
->actual_parameters
.length();
1157 assert(param_count
== 2 || param_count
== 3);
1159 /* First argument must be a scalar integer buffer variable */
1160 exec_node
*param
= ir
->actual_parameters
.get_head();
1161 ir_instruction
*inst
= (ir_instruction
*) param
;
1162 assert(inst
->ir_type
== ir_type_dereference_variable
||
1163 inst
->ir_type
== ir_type_dereference_array
||
1164 inst
->ir_type
== ir_type_dereference_record
||
1165 inst
->ir_type
== ir_type_swizzle
);
1167 ir_rvalue
*deref
= (ir_rvalue
*) inst
;
1168 assert(deref
->type
->is_scalar() && deref
->type
->is_integer());
1170 ir_variable
*var
= deref
->variable_referenced();
1173 /* Compute the offset to the start if the dereference and the
1176 mem_ctx
= ralloc_parent(shader
->ir
);
1178 ir_rvalue
*offset
= NULL
;
1179 unsigned const_offset
;
1182 unsigned packing
= var
->get_interface_type()->interface_packing
;
1184 setup_for_load_or_store(var
, deref
,
1185 &offset
, &const_offset
,
1186 &row_major
, &matrix_columns
,
1190 assert(matrix_columns
== 1);
1192 ir_rvalue
*deref_offset
=
1193 add(offset
, new(mem_ctx
) ir_constant(const_offset
));
1194 ir_rvalue
*block_index
= this->uniform_block
->clone(mem_ctx
, NULL
);
1196 /* Create the new internal function signature that will take a block
1197 * index and offset instead of a buffer variable
1199 exec_list sig_params
;
1200 ir_variable
*sig_param
= new(mem_ctx
)
1201 ir_variable(glsl_type::uint_type
, "block_ref" , ir_var_function_in
);
1202 sig_params
.push_tail(sig_param
);
1204 sig_param
= new(mem_ctx
)
1205 ir_variable(glsl_type::uint_type
, "offset" , ir_var_function_in
);
1206 sig_params
.push_tail(sig_param
);
1208 const glsl_type
*type
= deref
->type
->base_type
== GLSL_TYPE_INT
?
1209 glsl_type::int_type
: glsl_type::uint_type
;
1210 sig_param
= new(mem_ctx
)
1211 ir_variable(type
, "data1", ir_var_function_in
);
1212 sig_params
.push_tail(sig_param
);
1214 if (param_count
== 3) {
1215 sig_param
= new(mem_ctx
)
1216 ir_variable(type
, "data2", ir_var_function_in
);
1217 sig_params
.push_tail(sig_param
);
1220 ir_function_signature
*sig
=
1221 new(mem_ctx
) ir_function_signature(deref
->type
,
1222 shader_storage_buffer_object
);
1224 sig
->replace_parameters(&sig_params
);
1225 sig
->is_intrinsic
= true;
1228 sprintf(func_name
, "%s_internal", ir
->callee_name());
1229 ir_function
*f
= new(mem_ctx
) ir_function(func_name
);
1230 f
->add_signature(sig
);
1232 /* Now, create the call to the internal intrinsic */
1233 exec_list call_params
;
1234 call_params
.push_tail(block_index
);
1235 call_params
.push_tail(deref_offset
);
1236 param
= ir
->actual_parameters
.get_head()->get_next();
1237 ir_rvalue
*param_as_rvalue
= ((ir_instruction
*) param
)->as_rvalue();
1238 call_params
.push_tail(param_as_rvalue
->clone(mem_ctx
, NULL
));
1239 if (param_count
== 3) {
1240 param
= param
->get_next();
1241 param_as_rvalue
= ((ir_instruction
*) param
)->as_rvalue();
1242 call_params
.push_tail(param_as_rvalue
->clone(mem_ctx
, NULL
));
1244 ir_dereference_variable
*return_deref
=
1245 ir
->return_deref
->clone(mem_ctx
, NULL
);
1246 return new(mem_ctx
) ir_call(sig
, return_deref
, &call_params
);
1250 lower_ubo_reference_visitor::check_for_ssbo_atomic_intrinsic(ir_call
*ir
)
1252 const char *callee
= ir
->callee_name();
1253 if (!strcmp("__intrinsic_ssbo_atomic_add", callee
) ||
1254 !strcmp("__intrinsic_ssbo_atomic_min", callee
) ||
1255 !strcmp("__intrinsic_ssbo_atomic_max", callee
) ||
1256 !strcmp("__intrinsic_ssbo_atomic_and", callee
) ||
1257 !strcmp("__intrinsic_ssbo_atomic_or", callee
) ||
1258 !strcmp("__intrinsic_ssbo_atomic_xor", callee
) ||
1259 !strcmp("__intrinsic_ssbo_atomic_exchange", callee
) ||
1260 !strcmp("__intrinsic_ssbo_atomic_comp_swap", callee
)) {
1261 return lower_ssbo_atomic_intrinsic(ir
);
1269 lower_ubo_reference_visitor::visit_enter(ir_call
*ir
)
1271 ir_call
*new_ir
= check_for_ssbo_atomic_intrinsic(ir
);
1274 base_ir
->replace_with(new_ir
);
1275 return visit_continue_with_parent
;
1278 return rvalue_visit(ir
);
1282 } /* unnamed namespace */
1285 lower_ubo_reference(struct gl_shader
*shader
)
1287 lower_ubo_reference_visitor
v(shader
);
1289 /* Loop over the instructions lowering references, because we take
1290 * a deref of a UBO array using a UBO dereference as the index will
1291 * produce a collection of instructions all of which have cloned
1292 * UBO dereferences for that array index.
1296 visit_list_elements(&v
, shader
->ir
);
1297 } while (v
.progress
);