2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "gl_nir_linker.h"
26 #include "compiler/glsl/ir_uniform.h" /* for gl_uniform_storage */
27 #include "linker_util.h"
28 #include "main/context.h"
29 #include "main/mtypes.h"
31 /* This file do the common link for GLSL uniforms, using NIR, instead of IR as
32 * the counter-part glsl/link_uniforms.cpp
34 * Also note that this is tailored for ARB_gl_spirv needs and particularities
35 * (like need to work/link without name available, explicit location for
36 * normal uniforms as mandatory, and so on).
39 #define UNMAPPED_UNIFORM_LOC ~0u
42 nir_setup_uniform_remap_tables(struct gl_context
*ctx
,
43 struct gl_shader_program
*prog
)
45 prog
->UniformRemapTable
= rzalloc_array(prog
,
46 struct gl_uniform_storage
*,
47 prog
->NumUniformRemapTable
);
48 union gl_constant_value
*data
=
49 rzalloc_array(prog
->data
,
50 union gl_constant_value
, prog
->data
->NumUniformDataSlots
);
51 if (!prog
->UniformRemapTable
|| !data
) {
52 linker_error(prog
, "Out of memory during linking.\n");
55 prog
->data
->UniformDataSlots
= data
;
57 prog
->data
->UniformDataDefaults
=
58 rzalloc_array(prog
->data
->UniformDataSlots
,
59 union gl_constant_value
, prog
->data
->NumUniformDataSlots
);
61 unsigned data_pos
= 0;
63 /* Reserve all the explicit locations of the active uniforms. */
64 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
65 struct gl_uniform_storage
*uniform
= &prog
->data
->UniformStorage
[i
];
67 if (prog
->data
->UniformStorage
[i
].remap_location
== UNMAPPED_UNIFORM_LOC
)
70 /* How many new entries for this uniform? */
71 const unsigned entries
= MAX2(1, uniform
->array_elements
);
72 unsigned num_slots
= glsl_get_component_slots(uniform
->type
);
74 uniform
->storage
= &data
[data_pos
];
76 /* Set remap table entries point to correct gl_uniform_storage. */
77 for (unsigned j
= 0; j
< entries
; j
++) {
78 unsigned element_loc
= uniform
->remap_location
+ j
;
79 prog
->UniformRemapTable
[element_loc
] = uniform
;
81 data_pos
+= num_slots
;
85 /* Reserve locations for rest of the uniforms. */
86 link_util_update_empty_uniform_locations(prog
);
88 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
89 struct gl_uniform_storage
*uniform
= &prog
->data
->UniformStorage
[i
];
91 if (uniform
->is_shader_storage
)
94 /* Built-in uniforms should not get any location. */
98 /* Explicit ones have been set already. */
99 if (uniform
->remap_location
!= UNMAPPED_UNIFORM_LOC
)
102 /* How many entries for this uniform? */
103 const unsigned entries
= MAX2(1, uniform
->array_elements
);
106 link_util_find_empty_block(prog
, &prog
->data
->UniformStorage
[i
]);
108 if (location
== -1 || location
+ entries
>= prog
->NumUniformRemapTable
) {
109 unsigned new_entries
= entries
;
111 location
= prog
->NumUniformRemapTable
;
113 new_entries
= location
- prog
->NumUniformRemapTable
+ entries
;
115 /* resize remap table to fit new entries */
116 prog
->UniformRemapTable
=
118 prog
->UniformRemapTable
,
119 struct gl_uniform_storage
*,
120 prog
->NumUniformRemapTable
+ new_entries
);
121 prog
->NumUniformRemapTable
+= new_entries
;
124 /* set the base location in remap table for the uniform */
125 uniform
->remap_location
= location
;
127 unsigned num_slots
= glsl_get_component_slots(uniform
->type
);
129 uniform
->storage
= &data
[data_pos
];
131 /* Set remap table entries point to correct gl_uniform_storage. */
132 for (unsigned j
= 0; j
< entries
; j
++) {
133 unsigned element_loc
= uniform
->remap_location
+ j
;
134 prog
->UniformRemapTable
[element_loc
] = uniform
;
136 data_pos
+= num_slots
;
142 mark_stage_as_active(struct gl_uniform_storage
*uniform
,
145 uniform
->active_shader_mask
|= 1 << stage
;
149 * Finds, returns, and updates the stage info for any uniform in UniformStorage
150 * defined by @var. In general this is done using the explicit location,
153 * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
154 * them. That means that more that one entry at the uniform storage can be
155 * found. In that case all of them are updated, and the first entry is
156 * returned, in order to update the location of the nir variable.
158 * * Special uniforms: like atomic counters. They lack a explicit location,
159 * so they are skipped. They will be handled and assigned a location later.
162 static struct gl_uniform_storage
*
163 find_and_update_previous_uniform_storage(struct gl_shader_program
*prog
,
167 if (nir_variable_is_in_block(var
)) {
168 struct gl_uniform_storage
*uniform
= NULL
;
170 ASSERTED
unsigned num_blks
= nir_variable_is_in_ubo(var
) ?
171 prog
->data
->NumUniformBlocks
:
172 prog
->data
->NumShaderStorageBlocks
;
174 struct gl_uniform_block
*blks
= nir_variable_is_in_ubo(var
) ?
175 prog
->data
->UniformBlocks
: prog
->data
->ShaderStorageBlocks
;
177 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
178 /* UniformStorage contains both variables from ubos and ssbos */
179 if ( prog
->data
->UniformStorage
[i
].is_shader_storage
!=
180 nir_variable_is_in_ssbo(var
))
183 int block_index
= prog
->data
->UniformStorage
[i
].block_index
;
184 if (block_index
!= -1) {
185 assert(block_index
< num_blks
);
187 if (var
->data
.binding
== blks
[block_index
].Binding
) {
189 uniform
= &prog
->data
->UniformStorage
[i
];
190 mark_stage_as_active(&prog
->data
->UniformStorage
[i
],
199 /* Beyond blocks, there are still some corner cases of uniforms without
200 * location (ie: atomic counters) that would have a initial location equal
201 * to -1. We just return on that case. Those uniforms will be handled
204 if (var
->data
.location
== -1)
207 /* TODO: following search can be problematic with shaders with a lot of
208 * uniforms. Would it be better to use some type of hash
210 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
211 if (prog
->data
->UniformStorage
[i
].remap_location
== var
->data
.location
) {
212 mark_stage_as_active(&prog
->data
->UniformStorage
[i
], stage
);
214 return &prog
->data
->UniformStorage
[i
];
221 /* Used to build a tree representing the glsl_type so that we can have a place
222 * to store the next index for opaque types. Array types are expanded so that
223 * they have a single child which is used for all elements of the array.
224 * Struct types have a child for each member. The tree is walked while
225 * processing a uniform so that we can recognise when an opaque type is
226 * encountered a second time in order to reuse the same range of indices that
227 * was reserved the first time. That way the sampler indices can be arranged
228 * so that members of an array are placed sequentially even if the array is an
229 * array of structs containing other opaque members.
231 struct type_tree_entry
{
232 /* For opaque types, this will be the next index to use. If we haven’t
233 * encountered this member yet, it will be UINT_MAX.
237 struct type_tree_entry
*parent
;
238 struct type_tree_entry
*next_sibling
;
239 struct type_tree_entry
*children
;
242 struct nir_link_uniforms_state
{
243 /* per-whole program */
244 unsigned num_hidden_uniforms
;
246 unsigned max_uniform_location
;
247 unsigned next_sampler_index
;
249 /* per-shader stage */
250 unsigned next_image_index
;
251 unsigned num_shader_samplers
;
252 unsigned num_shader_images
;
253 unsigned num_shader_uniform_components
;
254 unsigned shader_samplers_used
;
255 unsigned shader_shadow_samplers
;
256 struct gl_program_parameter_list
*params
;
259 nir_variable
*current_var
;
261 bool var_is_in_block
;
262 int top_level_array_size
;
263 int top_level_array_stride
;
265 struct type_tree_entry
*current_type
;
268 static struct type_tree_entry
*
269 build_type_tree_for_type(const struct glsl_type
*type
)
271 struct type_tree_entry
*entry
= malloc(sizeof *entry
);
273 entry
->array_size
= 1;
274 entry
->next_index
= UINT_MAX
;
275 entry
->children
= NULL
;
276 entry
->next_sibling
= NULL
;
277 entry
->parent
= NULL
;
279 if (glsl_type_is_array(type
)) {
280 entry
->array_size
= glsl_get_length(type
);
281 entry
->children
= build_type_tree_for_type(glsl_get_array_element(type
));
282 entry
->children
->parent
= entry
;
283 } else if (glsl_type_is_struct_or_ifc(type
)) {
284 struct type_tree_entry
*last
= NULL
;
286 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
287 const struct glsl_type
*field_type
= glsl_get_struct_field(type
, i
);
288 struct type_tree_entry
*field_entry
=
289 build_type_tree_for_type(field_type
);
292 entry
->children
= field_entry
;
294 last
->next_sibling
= field_entry
;
296 field_entry
->parent
= entry
;
306 free_type_tree(struct type_tree_entry
*entry
)
308 struct type_tree_entry
*p
, *next
;
310 for (p
= entry
->children
; p
; p
= next
) {
311 next
= p
->next_sibling
;
319 get_next_index(struct nir_link_uniforms_state
*state
,
320 const struct gl_uniform_storage
*uniform
,
321 unsigned *next_index
)
323 /* If we’ve already calculated an index for this member then we can just
326 if (state
->current_type
->next_index
== UINT_MAX
) {
327 /* Otherwise we need to reserve enough indices for all of the arrays
328 * enclosing this member.
331 unsigned array_size
= 1;
333 for (const struct type_tree_entry
*p
= state
->current_type
;
336 array_size
*= p
->array_size
;
339 state
->current_type
->next_index
= *next_index
;
340 *next_index
+= array_size
;
343 unsigned index
= state
->current_type
->next_index
;
345 state
->current_type
->next_index
+= MAX2(1, uniform
->array_elements
);
351 add_parameter(struct gl_uniform_storage
*uniform
,
352 struct gl_context
*ctx
,
353 struct gl_shader_program
*prog
,
354 const struct glsl_type
*type
,
355 struct nir_link_uniforms_state
*state
)
357 if (!state
->params
|| uniform
->is_shader_storage
|| glsl_contains_opaque(type
))
360 unsigned num_params
= glsl_get_aoa_size(type
);
361 num_params
= MAX2(num_params
, 1);
362 num_params
*= glsl_get_matrix_columns(glsl_without_array(type
));
364 bool is_dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
368 struct gl_program_parameter_list
*params
= state
->params
;
369 int base_index
= params
->NumParameters
;
370 _mesa_reserve_parameter_storage(params
, num_params
);
372 if (ctx
->Const
.PackedDriverUniformStorage
) {
373 for (unsigned i
= 0; i
< num_params
; i
++) {
374 unsigned dmul
= glsl_type_is_64bit(glsl_without_array(type
)) ? 2 : 1;
375 unsigned comps
= glsl_get_vector_elements(glsl_without_array(type
)) * dmul
;
383 _mesa_add_parameter(params
, PROGRAM_UNIFORM
, NULL
, comps
,
384 glsl_get_gl_type(type
), NULL
, NULL
, false);
387 for (unsigned i
= 0; i
< num_params
; i
++) {
388 _mesa_add_parameter(params
, PROGRAM_UNIFORM
, NULL
, 4,
389 glsl_get_gl_type(type
), NULL
, NULL
, true);
393 /* Each Parameter will hold the index to the backing uniform storage.
394 * This avoids relying on names to match parameters and uniform
397 for (unsigned i
= 0; i
< num_params
; i
++) {
398 struct gl_program_parameter
*param
= ¶ms
->Parameters
[base_index
+ i
];
399 param
->UniformStorageIndex
= uniform
- prog
->data
->UniformStorage
;
400 param
->MainUniformStorageIndex
= state
->current_var
->data
.location
;
405 * Creates the neccessary entries in UniformStorage for the uniform. Returns
406 * the number of locations used or -1 on failure.
409 nir_link_uniform(struct gl_context
*ctx
,
410 struct gl_shader_program
*prog
,
411 struct gl_program
*stage_program
,
412 gl_shader_stage stage
,
413 const struct glsl_type
*type
,
414 const struct glsl_type
*parent_type
,
415 unsigned index_in_parent
,
417 struct nir_link_uniforms_state
*state
)
419 struct gl_uniform_storage
*uniform
= NULL
;
421 if (parent_type
== state
->current_var
->type
&&
422 nir_variable_is_in_ssbo(state
->current_var
)) {
423 /* Type is the top level SSBO member */
424 if (glsl_type_is_array(type
) &&
425 (glsl_type_is_array(glsl_get_array_element(type
)) ||
426 glsl_type_is_struct_or_ifc(glsl_get_array_element(type
)))) {
427 /* Type is a top-level array (array of aggregate types) */
428 state
->top_level_array_size
= glsl_get_length(type
);
429 state
->top_level_array_stride
= glsl_get_explicit_stride(type
);
431 state
->top_level_array_size
= 1;
432 state
->top_level_array_stride
= 0;
436 /* gl_uniform_storage can cope with one level of array, so if the type is a
437 * composite type or an array where each element occupies more than one
438 * location than we need to recursively process it.
440 if (glsl_type_is_struct_or_ifc(type
) ||
441 (glsl_type_is_array(type
) &&
442 (glsl_type_is_array(glsl_get_array_element(type
)) ||
443 glsl_type_is_struct_or_ifc(glsl_get_array_element(type
))))) {
444 int location_count
= 0;
445 struct type_tree_entry
*old_type
= state
->current_type
;
446 unsigned int struct_base_offset
= state
->offset
;
448 state
->current_type
= old_type
->children
;
450 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
451 const struct glsl_type
*field_type
;
453 if (glsl_type_is_struct_or_ifc(type
)) {
454 field_type
= glsl_get_struct_field(type
, i
);
455 /* Use the offset inside the struct only for variables backed by
456 * a buffer object. For variables not backed by a buffer object,
459 if (state
->var_is_in_block
) {
461 struct_base_offset
+ glsl_get_struct_field_offset(type
, i
);
464 field_type
= glsl_get_array_element(type
);
467 int entries
= nir_link_uniform(ctx
, prog
, stage_program
, stage
,
468 field_type
, type
, i
, location
,
475 location_count
+= entries
;
477 if (glsl_type_is_struct_or_ifc(type
))
478 state
->current_type
= state
->current_type
->next_sibling
;
481 state
->current_type
= old_type
;
483 return location_count
;
485 /* Create a new uniform storage entry */
486 prog
->data
->UniformStorage
=
488 prog
->data
->UniformStorage
,
489 struct gl_uniform_storage
,
490 prog
->data
->NumUniformStorage
+ 1);
491 if (!prog
->data
->UniformStorage
) {
492 linker_error(prog
, "Out of memory during linking.\n");
496 uniform
= &prog
->data
->UniformStorage
[prog
->data
->NumUniformStorage
];
497 prog
->data
->NumUniformStorage
++;
499 /* Initialize its members */
500 memset(uniform
, 0x00, sizeof(struct gl_uniform_storage
));
501 /* ARB_gl_spirv: names are considered optional debug info, so the linker
502 * needs to work without them, and returning them is optional. For
503 * simplicity we ignore names.
505 uniform
->name
= NULL
;
507 const struct glsl_type
*type_no_array
= glsl_without_array(type
);
508 if (glsl_type_is_array(type
)) {
509 uniform
->type
= type_no_array
;
510 uniform
->array_elements
= glsl_get_length(type
);
512 uniform
->type
= type
;
513 uniform
->array_elements
= 0;
515 uniform
->top_level_array_size
= state
->top_level_array_size
;
516 uniform
->top_level_array_stride
= state
->top_level_array_stride
;
518 uniform
->active_shader_mask
|= 1 << stage
;
521 /* Uniform has an explicit location */
522 uniform
->remap_location
= location
;
524 uniform
->remap_location
= UNMAPPED_UNIFORM_LOC
;
527 uniform
->hidden
= state
->current_var
->data
.how_declared
== nir_var_hidden
;
529 state
->num_hidden_uniforms
++;
531 uniform
->is_shader_storage
= nir_variable_is_in_ssbo(state
->current_var
);
533 /* Set fields whose default value depend on the variable being inside a
536 * From the OpenGL 4.6 spec, 7.3 Program objects:
538 * "For the property ARRAY_STRIDE, ... For active variables not declared
539 * as an array of basic types, zero is written to params. For active
540 * variables not backed by a buffer object, -1 is written to params,
541 * regardless of the variable type."
543 * "For the property MATRIX_STRIDE, ... For active variables not declared
544 * as a matrix or array of matrices, zero is written to params. For active
545 * variables not backed by a buffer object, -1 is written to params,
546 * regardless of the variable type."
548 * For the property IS_ROW_MAJOR, ... For active variables backed by a
549 * buffer object, declared as a single matrix or array of matrices, and
550 * stored in row-major order, one is written to params. For all other
551 * active variables, zero is written to params.
553 uniform
->array_stride
= -1;
554 uniform
->matrix_stride
= -1;
555 uniform
->row_major
= false;
557 if (state
->var_is_in_block
) {
558 uniform
->array_stride
= glsl_type_is_array(type
) ?
559 glsl_get_explicit_stride(type
) : 0;
561 if (glsl_type_is_matrix(uniform
->type
)) {
562 uniform
->matrix_stride
= glsl_get_explicit_stride(uniform
->type
);
563 uniform
->row_major
= glsl_matrix_type_is_row_major(uniform
->type
);
565 uniform
->matrix_stride
= 0;
569 uniform
->offset
= state
->var_is_in_block
? state
->offset
: -1;
571 int buffer_block_index
= -1;
572 /* If the uniform is inside a uniform block determine its block index by
573 * comparing the bindings, we can not use names.
575 if (state
->var_is_in_block
) {
576 struct gl_uniform_block
*blocks
= nir_variable_is_in_ssbo(state
->current_var
) ?
577 prog
->data
->ShaderStorageBlocks
: prog
->data
->UniformBlocks
;
579 int num_blocks
= nir_variable_is_in_ssbo(state
->current_var
) ?
580 prog
->data
->NumShaderStorageBlocks
: prog
->data
->NumUniformBlocks
;
582 for (unsigned i
= 0; i
< num_blocks
; i
++) {
583 if (state
->current_var
->data
.binding
== blocks
[i
].Binding
) {
584 buffer_block_index
= i
;
588 assert(buffer_block_index
>= 0);
590 /* Compute the next offset. */
591 state
->offset
+= glsl_get_explicit_size(type
, true);
594 uniform
->block_index
= buffer_block_index
;
596 /* @FIXME: the initialization of the following will be done as we
597 * implement support for their specific features, like SSBO, atomics,
600 uniform
->builtin
= false;
601 uniform
->atomic_buffer_index
= -1;
602 uniform
->is_bindless
= false;
604 /* The following are not for features not supported by ARB_gl_spirv */
605 uniform
->num_compatible_subroutines
= 0;
607 unsigned entries
= MAX2(1, uniform
->array_elements
);
608 unsigned values
= glsl_get_component_slots(type
);
610 if (glsl_type_is_sampler(type_no_array
)) {
612 get_next_index(state
, uniform
, &state
->next_sampler_index
);
614 /* Samplers (bound or bindless) are counted as two components as
615 * specified by ARB_bindless_texture.
617 state
->num_shader_samplers
+= values
/ 2;
619 uniform
->opaque
[stage
].active
= true;
620 uniform
->opaque
[stage
].index
= sampler_index
;
622 const unsigned shadow
= glsl_sampler_type_is_shadow(type_no_array
);
624 for (unsigned i
= sampler_index
;
625 i
< MIN2(state
->next_sampler_index
, MAX_SAMPLERS
);
627 stage_program
->sh
.SamplerTargets
[i
] =
628 glsl_get_sampler_target(type_no_array
);
629 state
->shader_samplers_used
|= 1U << i
;
630 state
->shader_shadow_samplers
|= shadow
<< i
;
633 state
->num_values
+= values
;
634 } else if (glsl_type_is_image(type_no_array
)) {
635 /* @FIXME: image_index should match that of the same image
636 * uniform in other shaders. This means we need to match image
637 * uniforms by location (GLSL does it by variable name, but we
638 * want to avoid that).
640 int image_index
= state
->next_image_index
;
641 state
->next_image_index
+= entries
;
643 /* Images (bound or bindless) are counted as two components as
644 * specified by ARB_bindless_texture.
646 state
->num_shader_images
+= values
/ 2;
648 uniform
->opaque
[stage
].active
= true;
649 uniform
->opaque
[stage
].index
= image_index
;
651 /* Set image access qualifiers */
652 enum gl_access_qualifier image_access
=
653 state
->current_var
->data
.access
;
654 const GLenum access
=
655 (image_access
& ACCESS_NON_WRITEABLE
) ?
656 ((image_access
& ACCESS_NON_READABLE
) ? GL_NONE
:
658 ((image_access
& ACCESS_NON_READABLE
) ? GL_WRITE_ONLY
:
660 for (unsigned i
= image_index
;
661 i
< MIN2(state
->next_image_index
, MAX_IMAGE_UNIFORMS
);
663 stage_program
->sh
.ImageAccess
[i
] = access
;
666 if (!uniform
->is_shader_storage
) {
667 state
->num_shader_uniform_components
+= values
;
668 state
->num_values
+= values
;
671 if (!state
->var_is_in_block
) {
672 state
->num_shader_uniform_components
+= values
;
673 state
->num_values
+= values
;
677 if (uniform
->remap_location
!= UNMAPPED_UNIFORM_LOC
&&
678 state
->max_uniform_location
< uniform
->remap_location
+ entries
)
679 state
->max_uniform_location
= uniform
->remap_location
+ entries
;
681 if (!state
->var_is_in_block
)
682 add_parameter(uniform
, ctx
, prog
, type
, state
);
684 return MAX2(uniform
->array_elements
, 1);
689 gl_nir_link_uniforms(struct gl_context
*ctx
,
690 struct gl_shader_program
*prog
,
691 bool fill_parameters
)
693 /* First free up any previous UniformStorage items */
694 ralloc_free(prog
->data
->UniformStorage
);
695 prog
->data
->UniformStorage
= NULL
;
696 prog
->data
->NumUniformStorage
= 0;
698 /* Iterate through all linked shaders */
699 struct nir_link_uniforms_state state
= {0,};
701 for (unsigned shader_type
= 0; shader_type
< MESA_SHADER_STAGES
; shader_type
++) {
702 struct gl_linked_shader
*sh
= prog
->_LinkedShaders
[shader_type
];
706 nir_shader
*nir
= sh
->Program
->nir
;
709 state
.next_image_index
= 0;
710 state
.num_shader_samplers
= 0;
711 state
.num_shader_images
= 0;
712 state
.num_shader_uniform_components
= 0;
713 state
.shader_samplers_used
= 0;
714 state
.shader_shadow_samplers
= 0;
715 state
.params
= fill_parameters
? sh
->Program
->Parameters
: NULL
;
717 nir_foreach_variable(var
, &nir
->uniforms
) {
718 struct gl_uniform_storage
*uniform
= NULL
;
720 state
.current_var
= var
;
722 /* Check if the uniform has been processed already for
723 * other stage. If so, validate they are compatible and update
724 * the active stage mask.
726 uniform
= find_and_update_previous_uniform_storage(prog
, var
, shader_type
);
728 var
->data
.location
= uniform
- prog
->data
->UniformStorage
;
730 if (!state
.var_is_in_block
)
731 add_parameter(uniform
, ctx
, prog
, var
->type
, &state
);
736 int location
= var
->data
.location
;
737 /* From now on the variable’s location will be its uniform index */
738 var
->data
.location
= prog
->data
->NumUniformStorage
;
741 state
.var_is_in_block
= nir_variable_is_in_block(var
);
742 state
.top_level_array_size
= 0;
743 state
.top_level_array_stride
= 0;
746 * From ARB_program_interface spec, issue (16):
748 * "RESOLVED: We will follow the default rule for enumerating block
749 * members in the OpenGL API, which is:
751 * * If a variable is a member of an interface block without an
752 * instance name, it is enumerated using just the variable name.
754 * * If a variable is a member of an interface block with an
755 * instance name, it is enumerated as "BlockName.Member", where
756 * "BlockName" is the name of the interface block (not the
757 * instance name) and "Member" is the name of the variable.
759 * For example, in the following code:
769 * } instance3[2]; // uses two separate buffer bindings
771 * the three uniforms (if active) are enumerated as "member1",
772 * "Block2.member2", and "Block3.member3"."
774 * Note that in the last example, with an array of ubo, only one
775 * uniform is generated. For that reason, while unrolling the
776 * uniforms of a ubo, or the variables of a ssbo, we need to treat
777 * arrays of instance as a single block.
779 const struct glsl_type
*type
= var
->type
;
780 if (state
.var_is_in_block
&& glsl_type_is_array(type
)) {
781 type
= glsl_without_array(type
);
784 struct type_tree_entry
*type_tree
=
785 build_type_tree_for_type(type
);
786 state
.current_type
= type_tree
;
788 int res
= nir_link_uniform(ctx
, prog
, sh
->Program
, shader_type
, type
,
793 free_type_tree(type_tree
);
799 sh
->Program
->SamplersUsed
= state
.shader_samplers_used
;
800 sh
->shadow_samplers
= state
.shader_shadow_samplers
;
801 sh
->Program
->info
.num_textures
= state
.num_shader_samplers
;
802 sh
->Program
->info
.num_images
= state
.num_shader_images
;
803 sh
->num_uniform_components
= state
.num_shader_uniform_components
;
804 sh
->num_combined_uniform_components
= sh
->num_uniform_components
;
807 prog
->data
->NumHiddenUniforms
= state
.num_hidden_uniforms
;
808 prog
->NumUniformRemapTable
= state
.max_uniform_location
;
809 prog
->data
->NumUniformDataSlots
= state
.num_values
;
811 nir_setup_uniform_remap_tables(ctx
, prog
);
812 gl_nir_set_uniform_initializers(ctx
, prog
);