2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file lower_ubo_reference.cpp
27 * IR lower pass to replace dereferences of variables in a uniform
28 * buffer object with usage of ir_binop_ubo_load expressions, each of
29 * which can read data up to the size of a vec4.
31 * This relieves drivers of the responsibility to deal with tricky UBO
32 * layout issues like std140 structures and row_major matrices on
36 #include "lower_buffer_access.h"
37 #include "ir_builder.h"
38 #include "main/macros.h"
39 #include "glsl_parser_extras.h"
41 using namespace ir_builder
;
44 class lower_ubo_reference_visitor
:
45 public lower_buffer_access::lower_buffer_access
{
47 lower_ubo_reference_visitor(struct gl_shader
*shader
)
52 void handle_rvalue(ir_rvalue
**rvalue
);
53 ir_visitor_status
visit_enter(ir_assignment
*ir
);
55 void setup_for_load_or_store(ir_variable
*var
,
58 unsigned *const_offset
,
62 ir_expression
*ubo_load(const struct glsl_type
*type
,
64 ir_call
*ssbo_load(const struct glsl_type
*type
,
67 bool check_for_buffer_array_copy(ir_assignment
*ir
);
68 bool check_for_buffer_struct_copy(ir_assignment
*ir
);
69 void check_for_ssbo_store(ir_assignment
*ir
);
70 void write_to_memory(ir_dereference
*deref
,
72 ir_variable
*write_var
,
74 ir_call
*ssbo_store(ir_rvalue
*deref
, ir_rvalue
*offset
,
81 ssbo_unsized_array_length_access
,
85 void insert_buffer_access(void *mem_ctx
, ir_dereference
*deref
,
86 const glsl_type
*type
, ir_rvalue
*offset
,
87 unsigned mask
, int channel
);
89 ir_visitor_status
visit_enter(class ir_expression
*);
90 ir_expression
*calculate_ssbo_unsized_array_length(ir_expression
*expr
);
91 void check_ssbo_unsized_array_length_expression(class ir_expression
*);
92 void check_ssbo_unsized_array_length_assignment(ir_assignment
*ir
);
94 ir_expression
*process_ssbo_unsized_array_length(ir_rvalue
**,
97 ir_expression
*emit_ssbo_get_buffer_size();
99 unsigned calculate_unsized_array_stride(ir_dereference
*deref
,
102 ir_call
*lower_ssbo_atomic_intrinsic(ir_call
*ir
);
103 ir_call
*check_for_ssbo_atomic_intrinsic(ir_call
*ir
);
104 ir_visitor_status
visit_enter(ir_call
*ir
);
107 struct gl_shader
*shader
;
108 struct gl_uniform_buffer_variable
*ubo_var
;
109 ir_rvalue
*uniform_block
;
114 * Determine the name of the interface block field
116 * This is the name of the specific member as it would appear in the
117 * \c gl_uniform_buffer_variable::Name field in the shader's
118 * \c UniformBlocks array.
121 interface_field_name(void *mem_ctx
, char *base_name
, ir_rvalue
*d
,
122 ir_rvalue
**nonconst_block_index
)
124 *nonconst_block_index
= NULL
;
125 char *name_copy
= NULL
;
126 size_t base_length
= 0;
128 /* Loop back through the IR until we find the uniform block */
131 switch (ir
->ir_type
) {
132 case ir_type_dereference_variable
: {
138 case ir_type_dereference_record
: {
139 ir_dereference_record
*r
= (ir_dereference_record
*) ir
;
140 ir
= r
->record
->as_dereference();
142 /* If we got here it means any previous array subscripts belong to
143 * block members and not the block itself so skip over them in the
150 case ir_type_dereference_array
: {
151 ir_dereference_array
*a
= (ir_dereference_array
*) ir
;
152 ir
= a
->array
->as_dereference();
156 case ir_type_swizzle
: {
157 ir_swizzle
*s
= (ir_swizzle
*) ir
;
158 ir
= s
->val
->as_dereference();
159 /* Skip swizzle in the next pass */
165 assert(!"Should not get here.");
171 switch (d
->ir_type
) {
172 case ir_type_dereference_variable
: {
173 ir_dereference_variable
*v
= (ir_dereference_variable
*) d
;
174 if (name_copy
!= NULL
&&
175 v
->var
->is_interface_instance() &&
176 v
->var
->type
->is_array()) {
179 *nonconst_block_index
= NULL
;
186 case ir_type_dereference_array
: {
187 ir_dereference_array
*a
= (ir_dereference_array
*) d
;
190 if (name_copy
== NULL
) {
191 name_copy
= ralloc_strdup(mem_ctx
, base_name
);
192 base_length
= strlen(name_copy
);
195 /* For arrays of arrays we start at the innermost array and work our
196 * way out so we need to insert the subscript at the base of the
197 * name string rather than just attaching it to the end.
199 new_length
= base_length
;
200 ir_constant
*const_index
= a
->array_index
->as_constant();
201 char *end
= ralloc_strdup(NULL
, &name_copy
[new_length
]);
203 ir_rvalue
*array_index
= a
->array_index
;
204 if (array_index
->type
!= glsl_type::uint_type
)
205 array_index
= i2u(array_index
);
207 if (a
->array
->type
->is_array() &&
208 a
->array
->type
->fields
.array
->is_array()) {
209 ir_constant
*base_size
= new(mem_ctx
)
210 ir_constant(a
->array
->type
->fields
.array
->arrays_of_arrays_size());
211 array_index
= mul(array_index
, base_size
);
214 if (*nonconst_block_index
) {
215 *nonconst_block_index
= add(*nonconst_block_index
, array_index
);
217 *nonconst_block_index
= array_index
;
220 ralloc_asprintf_rewrite_tail(&name_copy
, &new_length
, "[0]%s",
223 ralloc_asprintf_rewrite_tail(&name_copy
, &new_length
, "[%d]%s",
224 const_index
->get_uint_component(0),
229 d
= a
->array
->as_dereference();
235 assert(!"Should not get here.");
240 assert(!"Should not get here.");
245 lower_ubo_reference_visitor::setup_for_load_or_store(ir_variable
*var
,
248 unsigned *const_offset
,
253 /* Determine the name of the interface block */
254 ir_rvalue
*nonconst_block_index
;
255 const char *const field_name
=
256 interface_field_name(mem_ctx
, (char *) var
->get_interface_type()->name
,
257 deref
, &nonconst_block_index
);
259 /* Locate the block by interface name */
261 struct gl_uniform_block
**blocks
;
262 if (this->buffer_access_type
!= ubo_load_access
) {
263 num_blocks
= shader
->NumShaderStorageBlocks
;
264 blocks
= shader
->ShaderStorageBlocks
;
266 num_blocks
= shader
->NumUniformBlocks
;
267 blocks
= shader
->UniformBlocks
;
269 this->uniform_block
= NULL
;
270 for (unsigned i
= 0; i
< num_blocks
; i
++) {
271 if (strcmp(field_name
, blocks
[i
]->Name
) == 0) {
273 ir_constant
*index
= new(mem_ctx
) ir_constant(i
);
275 if (nonconst_block_index
) {
276 this->uniform_block
= add(nonconst_block_index
, index
);
278 this->uniform_block
= index
;
281 this->ubo_var
= var
->is_interface_instance()
282 ? &blocks
[i
]->Uniforms
[0] : &blocks
[i
]->Uniforms
[var
->data
.location
];
288 assert(this->uniform_block
);
290 *offset
= new(mem_ctx
) ir_constant(0u);
292 *row_major
= is_dereferenced_thing_row_major(deref
);
295 /* Calculate the offset to the start of the region of the UBO
296 * dereferenced by *rvalue. This may be a variable offset if an
297 * array dereference has a variable index.
300 switch (deref
->ir_type
) {
301 case ir_type_dereference_variable
: {
302 *const_offset
+= ubo_var
->Offset
;
307 case ir_type_dereference_array
: {
308 ir_dereference_array
*deref_array
= (ir_dereference_array
*) deref
;
309 unsigned array_stride
;
310 if (deref_array
->array
->type
->is_vector()) {
311 /* We get this when storing or loading a component out of a vector
312 * with a non-constant index. This happens for v[i] = f where v is
313 * a vector (or m[i][j] = f where m is a matrix). If we don't
314 * lower that here, it gets turned into v = vector_insert(v, i,
315 * f), which loads the entire vector, modifies one component and
316 * then write the entire thing back. That breaks if another
317 * thread or SIMD channel is modifying the same vector.
320 if (deref_array
->array
->type
->is_double())
322 } else if (deref_array
->array
->type
->is_matrix() && *row_major
) {
323 /* When loading a vector out of a row major matrix, the
324 * step between the columns (vectors) is the size of a
325 * float, while the step between the rows (elements of a
326 * vector) is handled below in emit_ubo_loads.
329 if (deref_array
->array
->type
->is_double())
331 *matrix_columns
= deref_array
->array
->type
->matrix_columns
;
332 } else if (deref_array
->type
->without_array()->is_interface()) {
333 /* We're processing an array dereference of an interface instance
334 * array. The thing being dereferenced *must* be a variable
335 * dereference because interfaces cannot be embedded in other
336 * types. In terms of calculating the offsets for the lowering
337 * pass, we don't care about the array index. All elements of an
338 * interface instance array will have the same offsets relative to
339 * the base of the block that backs them.
341 deref
= deref_array
->array
->as_dereference();
344 /* Whether or not the field is row-major (because it might be a
345 * bvec2 or something) does not affect the array itself. We need
346 * to know whether an array element in its entirety is row-major.
348 const bool array_row_major
=
349 is_dereferenced_thing_row_major(deref_array
);
351 /* The array type will give the correct interface packing
354 if (packing
== GLSL_INTERFACE_PACKING_STD430
) {
355 array_stride
= deref_array
->type
->std430_array_stride(array_row_major
);
357 array_stride
= deref_array
->type
->std140_size(array_row_major
);
358 array_stride
= glsl_align(array_stride
, 16);
362 ir_rvalue
*array_index
= deref_array
->array_index
;
363 if (array_index
->type
->base_type
== GLSL_TYPE_INT
)
364 array_index
= i2u(array_index
);
366 ir_constant
*const_index
=
367 array_index
->constant_expression_value(NULL
);
369 *const_offset
+= array_stride
* const_index
->value
.u
[0];
371 *offset
= add(*offset
,
373 new(mem_ctx
) ir_constant(array_stride
)));
375 deref
= deref_array
->array
->as_dereference();
379 case ir_type_dereference_record
: {
380 ir_dereference_record
*deref_record
= (ir_dereference_record
*) deref
;
381 const glsl_type
*struct_type
= deref_record
->record
->type
;
382 unsigned intra_struct_offset
= 0;
384 for (unsigned int i
= 0; i
< struct_type
->length
; i
++) {
385 const glsl_type
*type
= struct_type
->fields
.structure
[i
].type
;
387 ir_dereference_record
*field_deref
= new(mem_ctx
)
388 ir_dereference_record(deref_record
->record
,
389 struct_type
->fields
.structure
[i
].name
);
390 const bool field_row_major
=
391 is_dereferenced_thing_row_major(field_deref
);
393 ralloc_free(field_deref
);
395 unsigned field_align
= 0;
397 if (packing
== GLSL_INTERFACE_PACKING_STD430
)
398 field_align
= type
->std430_base_alignment(field_row_major
);
400 field_align
= type
->std140_base_alignment(field_row_major
);
402 intra_struct_offset
= glsl_align(intra_struct_offset
, field_align
);
404 if (strcmp(struct_type
->fields
.structure
[i
].name
,
405 deref_record
->field
) == 0)
408 if (packing
== GLSL_INTERFACE_PACKING_STD430
)
409 intra_struct_offset
+= type
->std430_size(field_row_major
);
411 intra_struct_offset
+= type
->std140_size(field_row_major
);
413 /* If the field just examined was itself a structure, apply rule
416 * "The structure may have padding at the end; the base offset
417 * of the member following the sub-structure is rounded up to
418 * the next multiple of the base alignment of the structure."
420 if (type
->without_array()->is_record()) {
421 intra_struct_offset
= glsl_align(intra_struct_offset
,
427 *const_offset
+= intra_struct_offset
;
428 deref
= deref_record
->record
->as_dereference();
432 case ir_type_swizzle
: {
433 ir_swizzle
*deref_swizzle
= (ir_swizzle
*) deref
;
435 assert(deref_swizzle
->mask
.num_components
== 1);
437 *const_offset
+= deref_swizzle
->mask
.x
* sizeof(int);
438 deref
= deref_swizzle
->val
->as_dereference();
443 assert(!"not reached");
451 lower_ubo_reference_visitor::handle_rvalue(ir_rvalue
**rvalue
)
456 ir_dereference
*deref
= (*rvalue
)->as_dereference();
460 ir_variable
*var
= deref
->variable_referenced();
461 if (!var
|| !var
->is_in_buffer_block())
464 mem_ctx
= ralloc_parent(shader
->ir
);
466 ir_rvalue
*offset
= NULL
;
467 unsigned const_offset
;
470 unsigned packing
= var
->get_interface_type()->interface_packing
;
472 this->buffer_access_type
=
473 var
->is_in_shader_storage_block() ?
474 ssbo_load_access
: ubo_load_access
;
476 /* Compute the offset to the start if the dereference as well as other
477 * information we need to configure the write
479 setup_for_load_or_store(var
, deref
,
480 &offset
, &const_offset
,
481 &row_major
, &matrix_columns
,
485 /* Now that we've calculated the offset to the start of the
486 * dereference, walk over the type and emit loads into a temporary.
488 const glsl_type
*type
= (*rvalue
)->type
;
489 ir_variable
*load_var
= new(mem_ctx
) ir_variable(type
,
492 base_ir
->insert_before(load_var
);
494 ir_variable
*load_offset
= new(mem_ctx
) ir_variable(glsl_type::uint_type
,
495 "ubo_load_temp_offset",
497 base_ir
->insert_before(load_offset
);
498 base_ir
->insert_before(assign(load_offset
, offset
));
500 deref
= new(mem_ctx
) ir_dereference_variable(load_var
);
501 emit_access(mem_ctx
, false, deref
, load_offset
, const_offset
,
502 row_major
, matrix_columns
, packing
, 0);
509 lower_ubo_reference_visitor::ubo_load(const glsl_type
*type
,
512 ir_rvalue
*block_ref
= this->uniform_block
->clone(mem_ctx
, NULL
);
514 ir_expression(ir_binop_ubo_load
,
522 shader_storage_buffer_object(const _mesa_glsl_parse_state
*state
)
524 return state
->ARB_shader_storage_buffer_object_enable
;
528 lower_ubo_reference_visitor::ssbo_store(ir_rvalue
*deref
,
532 exec_list sig_params
;
534 ir_variable
*block_ref
= new(mem_ctx
)
535 ir_variable(glsl_type::uint_type
, "block_ref" , ir_var_function_in
);
536 sig_params
.push_tail(block_ref
);
538 ir_variable
*offset_ref
= new(mem_ctx
)
539 ir_variable(glsl_type::uint_type
, "offset" , ir_var_function_in
);
540 sig_params
.push_tail(offset_ref
);
542 ir_variable
*val_ref
= new(mem_ctx
)
543 ir_variable(deref
->type
, "value" , ir_var_function_in
);
544 sig_params
.push_tail(val_ref
);
546 ir_variable
*writemask_ref
= new(mem_ctx
)
547 ir_variable(glsl_type::uint_type
, "write_mask" , ir_var_function_in
);
548 sig_params
.push_tail(writemask_ref
);
550 ir_function_signature
*sig
= new(mem_ctx
)
551 ir_function_signature(glsl_type::void_type
, shader_storage_buffer_object
);
553 sig
->replace_parameters(&sig_params
);
554 sig
->is_intrinsic
= true;
556 ir_function
*f
= new(mem_ctx
) ir_function("__intrinsic_store_ssbo");
557 f
->add_signature(sig
);
559 exec_list call_params
;
560 call_params
.push_tail(this->uniform_block
->clone(mem_ctx
, NULL
));
561 call_params
.push_tail(offset
->clone(mem_ctx
, NULL
));
562 call_params
.push_tail(deref
->clone(mem_ctx
, NULL
));
563 call_params
.push_tail(new(mem_ctx
) ir_constant(write_mask
));
564 return new(mem_ctx
) ir_call(sig
, NULL
, &call_params
);
568 lower_ubo_reference_visitor::ssbo_load(const struct glsl_type
*type
,
571 exec_list sig_params
;
573 ir_variable
*block_ref
= new(mem_ctx
)
574 ir_variable(glsl_type::uint_type
, "block_ref" , ir_var_function_in
);
575 sig_params
.push_tail(block_ref
);
577 ir_variable
*offset_ref
= new(mem_ctx
)
578 ir_variable(glsl_type::uint_type
, "offset_ref" , ir_var_function_in
);
579 sig_params
.push_tail(offset_ref
);
581 ir_function_signature
*sig
=
582 new(mem_ctx
) ir_function_signature(type
, shader_storage_buffer_object
);
584 sig
->replace_parameters(&sig_params
);
585 sig
->is_intrinsic
= true;
587 ir_function
*f
= new(mem_ctx
) ir_function("__intrinsic_load_ssbo");
588 f
->add_signature(sig
);
590 ir_variable
*result
= new(mem_ctx
)
591 ir_variable(type
, "ssbo_load_result", ir_var_temporary
);
592 base_ir
->insert_before(result
);
593 ir_dereference_variable
*deref_result
= new(mem_ctx
)
594 ir_dereference_variable(result
);
596 exec_list call_params
;
597 call_params
.push_tail(this->uniform_block
->clone(mem_ctx
, NULL
));
598 call_params
.push_tail(offset
->clone(mem_ctx
, NULL
));
600 return new(mem_ctx
) ir_call(sig
, deref_result
, &call_params
);
604 lower_ubo_reference_visitor::insert_buffer_access(void *mem_ctx
,
605 ir_dereference
*deref
,
606 const glsl_type
*type
,
611 switch (this->buffer_access_type
) {
612 case ubo_load_access
:
613 base_ir
->insert_before(assign(deref
->clone(mem_ctx
, NULL
),
614 ubo_load(type
, offset
),
617 case ssbo_load_access
: {
618 ir_call
*load_ssbo
= ssbo_load(type
, offset
);
619 base_ir
->insert_before(load_ssbo
);
620 ir_rvalue
*value
= load_ssbo
->return_deref
->as_rvalue()->clone(mem_ctx
, NULL
);
621 ir_assignment
*assignment
=
622 assign(deref
->clone(mem_ctx
, NULL
), value
, mask
);
623 base_ir
->insert_before(assignment
);
626 case ssbo_store_access
:
628 base_ir
->insert_after(ssbo_store(swizzle(deref
, channel
, 1),
631 base_ir
->insert_after(ssbo_store(deref
, offset
, mask
));
635 unreachable("invalid buffer_access_type in insert_buffer_access");
640 lower_ubo_reference_visitor::write_to_memory(ir_dereference
*deref
,
642 ir_variable
*write_var
,
645 ir_rvalue
*offset
= NULL
;
646 unsigned const_offset
;
649 unsigned packing
= var
->get_interface_type()->interface_packing
;
651 this->buffer_access_type
= ssbo_store_access
;
653 /* Compute the offset to the start if the dereference as well as other
654 * information we need to configure the write
656 setup_for_load_or_store(var
, deref
,
657 &offset
, &const_offset
,
658 &row_major
, &matrix_columns
,
662 /* Now emit writes from the temporary to memory */
663 ir_variable
*write_offset
=
664 new(mem_ctx
) ir_variable(glsl_type::uint_type
,
665 "ssbo_store_temp_offset",
668 base_ir
->insert_before(write_offset
);
669 base_ir
->insert_before(assign(write_offset
, offset
));
671 deref
= new(mem_ctx
) ir_dereference_variable(write_var
);
672 emit_access(mem_ctx
, true, deref
, write_offset
, const_offset
,
673 row_major
, matrix_columns
, packing
, write_mask
);
677 lower_ubo_reference_visitor::visit_enter(ir_expression
*ir
)
679 check_ssbo_unsized_array_length_expression(ir
);
680 return rvalue_visit(ir
);
684 lower_ubo_reference_visitor::calculate_ssbo_unsized_array_length(ir_expression
*expr
)
686 if (expr
->operation
!=
687 ir_expression_operation(ir_unop_ssbo_unsized_array_length
))
690 ir_rvalue
*rvalue
= expr
->operands
[0]->as_rvalue();
692 !rvalue
->type
->is_array() || !rvalue
->type
->is_unsized_array())
695 ir_dereference
*deref
= expr
->operands
[0]->as_dereference();
699 ir_variable
*var
= expr
->operands
[0]->variable_referenced();
700 if (!var
|| !var
->is_in_shader_storage_block())
702 return process_ssbo_unsized_array_length(&rvalue
, deref
, var
);
706 lower_ubo_reference_visitor::check_ssbo_unsized_array_length_expression(ir_expression
*ir
)
709 ir_expression_operation(ir_unop_ssbo_unsized_array_length
)) {
710 /* Don't replace this unop if it is found alone. It is going to be
711 * removed by the optimization passes or replaced if it is part of
712 * an ir_assignment or another ir_expression.
717 for (unsigned i
= 0; i
< ir
->get_num_operands(); i
++) {
718 if (ir
->operands
[i
]->ir_type
!= ir_type_expression
)
720 ir_expression
*expr
= (ir_expression
*) ir
->operands
[i
];
721 ir_expression
*temp
= calculate_ssbo_unsized_array_length(expr
);
726 ir
->operands
[i
] = temp
;
731 lower_ubo_reference_visitor::check_ssbo_unsized_array_length_assignment(ir_assignment
*ir
)
733 if (!ir
->rhs
|| ir
->rhs
->ir_type
!= ir_type_expression
)
736 ir_expression
*expr
= (ir_expression
*) ir
->rhs
;
737 ir_expression
*temp
= calculate_ssbo_unsized_array_length(expr
);
747 lower_ubo_reference_visitor::emit_ssbo_get_buffer_size()
749 ir_rvalue
*block_ref
= this->uniform_block
->clone(mem_ctx
, NULL
);
750 return new(mem_ctx
) ir_expression(ir_unop_get_buffer_size
,
756 lower_ubo_reference_visitor::calculate_unsized_array_stride(ir_dereference
*deref
,
759 unsigned array_stride
= 0;
761 switch (deref
->ir_type
) {
762 case ir_type_dereference_variable
:
764 ir_dereference_variable
*deref_var
= (ir_dereference_variable
*)deref
;
765 const struct glsl_type
*unsized_array_type
= NULL
;
766 /* An unsized array can be sized by other lowering passes, so pick
767 * the first field of the array which has the data type of the unsized
770 unsized_array_type
= deref_var
->var
->type
->fields
.array
;
772 /* Whether or not the field is row-major (because it might be a
773 * bvec2 or something) does not affect the array itself. We need
774 * to know whether an array element in its entirety is row-major.
776 const bool array_row_major
=
777 is_dereferenced_thing_row_major(deref_var
);
779 if (packing
== GLSL_INTERFACE_PACKING_STD430
) {
780 array_stride
= unsized_array_type
->std430_array_stride(array_row_major
);
782 array_stride
= unsized_array_type
->std140_size(array_row_major
);
783 array_stride
= glsl_align(array_stride
, 16);
787 case ir_type_dereference_record
:
789 ir_dereference_record
*deref_record
= (ir_dereference_record
*) deref
;
790 ir_dereference
*interface_deref
=
791 deref_record
->record
->as_dereference();
792 assert(interface_deref
!= NULL
);
793 const struct glsl_type
*interface_type
= interface_deref
->type
;
794 unsigned record_length
= interface_type
->length
;
795 /* Unsized array is always the last element of the interface */
796 const struct glsl_type
*unsized_array_type
=
797 interface_type
->fields
.structure
[record_length
- 1].type
->fields
.array
;
799 const bool array_row_major
=
800 is_dereferenced_thing_row_major(deref_record
);
802 if (packing
== GLSL_INTERFACE_PACKING_STD430
) {
803 array_stride
= unsized_array_type
->std430_array_stride(array_row_major
);
805 array_stride
= unsized_array_type
->std140_size(array_row_major
);
806 array_stride
= glsl_align(array_stride
, 16);
811 unreachable("Unsupported dereference type");
817 lower_ubo_reference_visitor::process_ssbo_unsized_array_length(ir_rvalue
**rvalue
,
818 ir_dereference
*deref
,
821 mem_ctx
= ralloc_parent(*rvalue
);
823 ir_rvalue
*base_offset
= NULL
;
824 unsigned const_offset
;
827 unsigned packing
= var
->get_interface_type()->interface_packing
;
828 int unsized_array_stride
= calculate_unsized_array_stride(deref
, packing
);
830 this->buffer_access_type
= ssbo_unsized_array_length_access
;
832 /* Compute the offset to the start if the dereference as well as other
833 * information we need to calculate the length.
835 setup_for_load_or_store(var
, deref
,
836 &base_offset
, &const_offset
,
837 &row_major
, &matrix_columns
,
840 * max((buffer_object_size - offset_of_array) / stride_of_array, 0)
842 ir_expression
*buffer_size
= emit_ssbo_get_buffer_size();
844 ir_expression
*offset_of_array
= new(mem_ctx
)
845 ir_expression(ir_binop_add
, base_offset
,
846 new(mem_ctx
) ir_constant(const_offset
));
847 ir_expression
*offset_of_array_int
= new(mem_ctx
)
848 ir_expression(ir_unop_u2i
, offset_of_array
);
850 ir_expression
*sub
= new(mem_ctx
)
851 ir_expression(ir_binop_sub
, buffer_size
, offset_of_array_int
);
852 ir_expression
*div
= new(mem_ctx
)
853 ir_expression(ir_binop_div
, sub
,
854 new(mem_ctx
) ir_constant(unsized_array_stride
));
855 ir_expression
*max
= new(mem_ctx
)
856 ir_expression(ir_binop_max
, div
, new(mem_ctx
) ir_constant(0));
862 lower_ubo_reference_visitor::check_for_ssbo_store(ir_assignment
*ir
)
867 ir_rvalue
*rvalue
= ir
->lhs
->as_rvalue();
871 ir_dereference
*deref
= ir
->lhs
->as_dereference();
875 ir_variable
*var
= ir
->lhs
->variable_referenced();
876 if (!var
|| !var
->is_in_buffer_block())
879 /* We have a write to a buffer variable, so declare a temporary and rewrite
880 * the assignment so that the temporary is the LHS.
882 mem_ctx
= ralloc_parent(shader
->ir
);
884 const glsl_type
*type
= rvalue
->type
;
885 ir_variable
*write_var
= new(mem_ctx
) ir_variable(type
,
888 base_ir
->insert_before(write_var
);
889 ir
->lhs
= new(mem_ctx
) ir_dereference_variable(write_var
);
891 /* Now we have to write the value assigned to the temporary back to memory */
892 write_to_memory(deref
, var
, write_var
, ir
->write_mask
);
897 is_buffer_backed_variable(ir_variable
*var
)
899 return var
->is_in_buffer_block() ||
900 var
->data
.mode
== ir_var_shader_shared
;
904 lower_ubo_reference_visitor::check_for_buffer_array_copy(ir_assignment
*ir
)
906 if (!ir
|| !ir
->lhs
|| !ir
->rhs
)
909 /* LHS and RHS must be arrays
910 * FIXME: arrays of arrays?
912 if (!ir
->lhs
->type
->is_array() || !ir
->rhs
->type
->is_array())
915 /* RHS must be a buffer-backed variable. This is what can cause the problem
916 * since it would lead to a series of loads that need to live until we
917 * see the writes to the LHS.
919 ir_variable
*rhs_var
= ir
->rhs
->variable_referenced();
920 if (!rhs_var
|| !is_buffer_backed_variable(rhs_var
))
923 /* Split the array copy into individual element copies to reduce
926 ir_dereference
*rhs_deref
= ir
->rhs
->as_dereference();
930 ir_dereference
*lhs_deref
= ir
->lhs
->as_dereference();
934 assert(lhs_deref
->type
->length
== rhs_deref
->type
->length
);
935 mem_ctx
= ralloc_parent(shader
->ir
);
937 for (unsigned i
= 0; i
< lhs_deref
->type
->length
; i
++) {
938 ir_dereference
*lhs_i
=
939 new(mem_ctx
) ir_dereference_array(lhs_deref
->clone(mem_ctx
, NULL
),
940 new(mem_ctx
) ir_constant(i
));
942 ir_dereference
*rhs_i
=
943 new(mem_ctx
) ir_dereference_array(rhs_deref
->clone(mem_ctx
, NULL
),
944 new(mem_ctx
) ir_constant(i
));
945 ir
->insert_after(assign(lhs_i
, rhs_i
));
954 lower_ubo_reference_visitor::check_for_buffer_struct_copy(ir_assignment
*ir
)
956 if (!ir
|| !ir
->lhs
|| !ir
->rhs
)
959 /* LHS and RHS must be records */
960 if (!ir
->lhs
->type
->is_record() || !ir
->rhs
->type
->is_record())
963 /* RHS must be a buffer-backed variable. This is what can cause the problem
964 * since it would lead to a series of loads that need to live until we
965 * see the writes to the LHS.
967 ir_variable
*rhs_var
= ir
->rhs
->variable_referenced();
968 if (!rhs_var
|| !is_buffer_backed_variable(rhs_var
))
971 /* Split the struct copy into individual element copies to reduce
974 ir_dereference
*rhs_deref
= ir
->rhs
->as_dereference();
978 ir_dereference
*lhs_deref
= ir
->lhs
->as_dereference();
982 assert(lhs_deref
->type
->record_compare(rhs_deref
->type
));
983 mem_ctx
= ralloc_parent(shader
->ir
);
985 for (unsigned i
= 0; i
< lhs_deref
->type
->length
; i
++) {
986 const char *field_name
= lhs_deref
->type
->fields
.structure
[i
].name
;
987 ir_dereference
*lhs_field
=
988 new(mem_ctx
) ir_dereference_record(lhs_deref
->clone(mem_ctx
, NULL
),
990 ir_dereference
*rhs_field
=
991 new(mem_ctx
) ir_dereference_record(rhs_deref
->clone(mem_ctx
, NULL
),
993 ir
->insert_after(assign(lhs_field
, rhs_field
));
1002 lower_ubo_reference_visitor::visit_enter(ir_assignment
*ir
)
1004 /* Array and struct copies could involve large amounts of load/store
1005 * operations. To improve register pressure we want to special-case
1006 * these and split them into individual element copies.
1007 * This way we avoid emitting all the loads for the RHS first and
1008 * all the writes for the LHS second and register usage is more
1011 if (check_for_buffer_array_copy(ir
))
1012 return visit_continue_with_parent
;
1014 if (check_for_buffer_struct_copy(ir
))
1015 return visit_continue_with_parent
;
1017 check_ssbo_unsized_array_length_assignment(ir
);
1018 check_for_ssbo_store(ir
);
1019 return rvalue_visit(ir
);
1022 /* Lowers the intrinsic call to a new internal intrinsic that swaps the
1023 * access to the buffer variable in the first parameter by an offset
1024 * and block index. This involves creating the new internal intrinsic
1025 * (i.e. the new function signature).
1028 lower_ubo_reference_visitor::lower_ssbo_atomic_intrinsic(ir_call
*ir
)
1030 /* SSBO atomics usually have 2 parameters, the buffer variable and an
1031 * integer argument. The exception is CompSwap, that has an additional
1032 * integer parameter.
1034 int param_count
= ir
->actual_parameters
.length();
1035 assert(param_count
== 2 || param_count
== 3);
1037 /* First argument must be a scalar integer buffer variable */
1038 exec_node
*param
= ir
->actual_parameters
.get_head();
1039 ir_instruction
*inst
= (ir_instruction
*) param
;
1040 assert(inst
->ir_type
== ir_type_dereference_variable
||
1041 inst
->ir_type
== ir_type_dereference_array
||
1042 inst
->ir_type
== ir_type_dereference_record
||
1043 inst
->ir_type
== ir_type_swizzle
);
1045 ir_rvalue
*deref
= (ir_rvalue
*) inst
;
1046 assert(deref
->type
->is_scalar() && deref
->type
->is_integer());
1048 ir_variable
*var
= deref
->variable_referenced();
1051 /* Compute the offset to the start if the dereference and the
1054 mem_ctx
= ralloc_parent(shader
->ir
);
1056 ir_rvalue
*offset
= NULL
;
1057 unsigned const_offset
;
1060 unsigned packing
= var
->get_interface_type()->interface_packing
;
1062 this->buffer_access_type
= ssbo_atomic_access
;
1064 setup_for_load_or_store(var
, deref
,
1065 &offset
, &const_offset
,
1066 &row_major
, &matrix_columns
,
1070 assert(matrix_columns
== 1);
1072 ir_rvalue
*deref_offset
=
1073 add(offset
, new(mem_ctx
) ir_constant(const_offset
));
1074 ir_rvalue
*block_index
= this->uniform_block
->clone(mem_ctx
, NULL
);
1076 /* Create the new internal function signature that will take a block
1077 * index and offset instead of a buffer variable
1079 exec_list sig_params
;
1080 ir_variable
*sig_param
= new(mem_ctx
)
1081 ir_variable(glsl_type::uint_type
, "block_ref" , ir_var_function_in
);
1082 sig_params
.push_tail(sig_param
);
1084 sig_param
= new(mem_ctx
)
1085 ir_variable(glsl_type::uint_type
, "offset" , ir_var_function_in
);
1086 sig_params
.push_tail(sig_param
);
1088 const glsl_type
*type
= deref
->type
->base_type
== GLSL_TYPE_INT
?
1089 glsl_type::int_type
: glsl_type::uint_type
;
1090 sig_param
= new(mem_ctx
)
1091 ir_variable(type
, "data1", ir_var_function_in
);
1092 sig_params
.push_tail(sig_param
);
1094 if (param_count
== 3) {
1095 sig_param
= new(mem_ctx
)
1096 ir_variable(type
, "data2", ir_var_function_in
);
1097 sig_params
.push_tail(sig_param
);
1100 ir_function_signature
*sig
=
1101 new(mem_ctx
) ir_function_signature(deref
->type
,
1102 shader_storage_buffer_object
);
1104 sig
->replace_parameters(&sig_params
);
1105 sig
->is_intrinsic
= true;
1108 sprintf(func_name
, "%s_internal", ir
->callee_name());
1109 ir_function
*f
= new(mem_ctx
) ir_function(func_name
);
1110 f
->add_signature(sig
);
1112 /* Now, create the call to the internal intrinsic */
1113 exec_list call_params
;
1114 call_params
.push_tail(block_index
);
1115 call_params
.push_tail(deref_offset
);
1116 param
= ir
->actual_parameters
.get_head()->get_next();
1117 ir_rvalue
*param_as_rvalue
= ((ir_instruction
*) param
)->as_rvalue();
1118 call_params
.push_tail(param_as_rvalue
->clone(mem_ctx
, NULL
));
1119 if (param_count
== 3) {
1120 param
= param
->get_next();
1121 param_as_rvalue
= ((ir_instruction
*) param
)->as_rvalue();
1122 call_params
.push_tail(param_as_rvalue
->clone(mem_ctx
, NULL
));
1124 ir_dereference_variable
*return_deref
=
1125 ir
->return_deref
->clone(mem_ctx
, NULL
);
1126 return new(mem_ctx
) ir_call(sig
, return_deref
, &call_params
);
1130 lower_ubo_reference_visitor::check_for_ssbo_atomic_intrinsic(ir_call
*ir
)
1132 const char *callee
= ir
->callee_name();
1133 if (!strcmp("__intrinsic_ssbo_atomic_add", callee
) ||
1134 !strcmp("__intrinsic_ssbo_atomic_min", callee
) ||
1135 !strcmp("__intrinsic_ssbo_atomic_max", callee
) ||
1136 !strcmp("__intrinsic_ssbo_atomic_and", callee
) ||
1137 !strcmp("__intrinsic_ssbo_atomic_or", callee
) ||
1138 !strcmp("__intrinsic_ssbo_atomic_xor", callee
) ||
1139 !strcmp("__intrinsic_ssbo_atomic_exchange", callee
) ||
1140 !strcmp("__intrinsic_ssbo_atomic_comp_swap", callee
)) {
1141 return lower_ssbo_atomic_intrinsic(ir
);
1149 lower_ubo_reference_visitor::visit_enter(ir_call
*ir
)
1151 ir_call
*new_ir
= check_for_ssbo_atomic_intrinsic(ir
);
1154 base_ir
->replace_with(new_ir
);
1155 return visit_continue_with_parent
;
1158 return rvalue_visit(ir
);
1162 } /* unnamed namespace */
1165 lower_ubo_reference(struct gl_shader
*shader
)
1167 lower_ubo_reference_visitor
v(shader
);
1169 /* Loop over the instructions lowering references, because we take
1170 * a deref of a UBO array using a UBO dereference as the index will
1171 * produce a collection of instructions all of which have cloned
1172 * UBO dereferences for that array index.
1176 visit_list_elements(&v
, shader
->ir
);
1177 } while (v
.progress
);