2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_deref.h"
26 #include "gl_nir_linker.h"
27 #include "compiler/glsl/ir_uniform.h" /* for gl_uniform_storage */
28 #include "linker_util.h"
29 #include "main/context.h"
30 #include "main/mtypes.h"
33 * This file do the common link for GLSL uniforms, using NIR, instead of IR as
34 * the counter-part glsl/link_uniforms.cpp
37 #define UNMAPPED_UNIFORM_LOC ~0u
40 * Built-in / reserved GL variables names start with "gl_"
43 is_gl_identifier(const char *s
)
45 return s
&& s
[0] == 'g' && s
[1] == 'l' && s
[2] == '_';
49 nir_setup_uniform_remap_tables(struct gl_context
*ctx
,
50 struct gl_shader_program
*prog
)
52 unsigned total_entries
= prog
->NumExplicitUniformLocations
;
54 /* For glsl this may have been allocated by reserve_explicit_locations() so
55 * that we can keep track of unused uniforms with explicit locations.
57 assert(!prog
->data
->spirv
||
58 (prog
->data
->spirv
&& !prog
->UniformRemapTable
));
59 if (!prog
->UniformRemapTable
) {
60 prog
->UniformRemapTable
= rzalloc_array(prog
,
61 struct gl_uniform_storage
*,
62 prog
->NumUniformRemapTable
);
65 union gl_constant_value
*data
=
66 rzalloc_array(prog
->data
,
67 union gl_constant_value
, prog
->data
->NumUniformDataSlots
);
68 if (!prog
->UniformRemapTable
|| !data
) {
69 linker_error(prog
, "Out of memory during linking.\n");
72 prog
->data
->UniformDataSlots
= data
;
74 prog
->data
->UniformDataDefaults
=
75 rzalloc_array(prog
->data
->UniformDataSlots
,
76 union gl_constant_value
, prog
->data
->NumUniformDataSlots
);
78 unsigned data_pos
= 0;
80 /* Reserve all the explicit locations of the active uniforms. */
81 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
82 struct gl_uniform_storage
*uniform
= &prog
->data
->UniformStorage
[i
];
84 if (uniform
->is_shader_storage
||
85 glsl_get_base_type(uniform
->type
) == GLSL_TYPE_SUBROUTINE
)
88 if (prog
->data
->UniformStorage
[i
].remap_location
== UNMAPPED_UNIFORM_LOC
)
91 /* How many new entries for this uniform? */
92 const unsigned entries
= MAX2(1, uniform
->array_elements
);
93 unsigned num_slots
= glsl_get_component_slots(uniform
->type
);
95 uniform
->storage
= &data
[data_pos
];
97 /* Set remap table entries point to correct gl_uniform_storage. */
98 for (unsigned j
= 0; j
< entries
; j
++) {
99 unsigned element_loc
= uniform
->remap_location
+ j
;
100 prog
->UniformRemapTable
[element_loc
] = uniform
;
102 data_pos
+= num_slots
;
106 /* Reserve locations for rest of the uniforms. */
107 if (prog
->data
->spirv
)
108 link_util_update_empty_uniform_locations(prog
);
110 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
111 struct gl_uniform_storage
*uniform
= &prog
->data
->UniformStorage
[i
];
113 if (uniform
->is_shader_storage
||
114 glsl_get_base_type(uniform
->type
) == GLSL_TYPE_SUBROUTINE
)
117 /* Built-in uniforms should not get any location. */
118 if (uniform
->builtin
)
121 /* Explicit ones have been set already. */
122 if (uniform
->remap_location
!= UNMAPPED_UNIFORM_LOC
)
125 /* How many entries for this uniform? */
126 const unsigned entries
= MAX2(1, uniform
->array_elements
);
128 /* Add new entries to the total amount for checking against MAX_UNIFORM-
129 * _LOCATIONS. This only applies to the default uniform block (-1),
130 * because locations of uniform block entries are not assignable.
132 if (prog
->data
->UniformStorage
[i
].block_index
== -1)
133 total_entries
+= entries
;
136 link_util_find_empty_block(prog
, &prog
->data
->UniformStorage
[i
]);
138 if (location
== -1) {
139 location
= prog
->NumUniformRemapTable
;
141 /* resize remap table to fit new entries */
142 prog
->UniformRemapTable
=
144 prog
->UniformRemapTable
,
145 struct gl_uniform_storage
*,
146 prog
->NumUniformRemapTable
+ entries
);
147 prog
->NumUniformRemapTable
+= entries
;
150 /* set the base location in remap table for the uniform */
151 uniform
->remap_location
= location
;
153 unsigned num_slots
= glsl_get_component_slots(uniform
->type
);
155 if (uniform
->block_index
== -1)
156 uniform
->storage
= &data
[data_pos
];
158 /* Set remap table entries point to correct gl_uniform_storage. */
159 for (unsigned j
= 0; j
< entries
; j
++) {
160 unsigned element_loc
= uniform
->remap_location
+ j
;
161 prog
->UniformRemapTable
[element_loc
] = uniform
;
163 if (uniform
->block_index
== -1)
164 data_pos
+= num_slots
;
168 /* Verify that total amount of entries for explicit and implicit locations
169 * is less than MAX_UNIFORM_LOCATIONS.
171 if (total_entries
> ctx
->Const
.MaxUserAssignableUniformLocations
) {
172 linker_error(prog
, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
173 "(%u > %u)", total_entries
,
174 ctx
->Const
.MaxUserAssignableUniformLocations
);
177 /* Reserve all the explicit locations of the active subroutine uniforms. */
178 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
179 struct gl_uniform_storage
*uniform
= &prog
->data
->UniformStorage
[i
];
181 if (glsl_get_base_type(uniform
->type
) != GLSL_TYPE_SUBROUTINE
)
184 if (prog
->data
->UniformStorage
[i
].remap_location
== UNMAPPED_UNIFORM_LOC
)
187 /* How many new entries for this uniform? */
188 const unsigned entries
=
189 MAX2(1, prog
->data
->UniformStorage
[i
].array_elements
);
191 uniform
->storage
= &data
[data_pos
];
193 unsigned num_slots
= glsl_get_component_slots(uniform
->type
);
194 unsigned mask
= prog
->data
->linked_stages
;
196 const int j
= u_bit_scan(&mask
);
197 struct gl_program
*p
= prog
->_LinkedShaders
[j
]->Program
;
199 if (!prog
->data
->UniformStorage
[i
].opaque
[j
].active
)
202 /* Set remap table entries point to correct gl_uniform_storage. */
203 for (unsigned k
= 0; k
< entries
; k
++) {
204 unsigned element_loc
=
205 prog
->data
->UniformStorage
[i
].remap_location
+ k
;
206 p
->sh
.SubroutineUniformRemapTable
[element_loc
] =
207 &prog
->data
->UniformStorage
[i
];
209 data_pos
+= num_slots
;
214 /* reserve subroutine locations */
215 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
216 struct gl_uniform_storage
*uniform
= &prog
->data
->UniformStorage
[i
];
218 if (glsl_get_base_type(uniform
->type
) != GLSL_TYPE_SUBROUTINE
)
221 if (prog
->data
->UniformStorage
[i
].remap_location
!=
222 UNMAPPED_UNIFORM_LOC
)
225 const unsigned entries
=
226 MAX2(1, prog
->data
->UniformStorage
[i
].array_elements
);
228 uniform
->storage
= &data
[data_pos
];
230 unsigned num_slots
= glsl_get_component_slots(uniform
->type
);
231 unsigned mask
= prog
->data
->linked_stages
;
233 const int j
= u_bit_scan(&mask
);
234 struct gl_program
*p
= prog
->_LinkedShaders
[j
]->Program
;
236 if (!prog
->data
->UniformStorage
[i
].opaque
[j
].active
)
239 p
->sh
.SubroutineUniformRemapTable
=
241 p
->sh
.SubroutineUniformRemapTable
,
242 struct gl_uniform_storage
*,
243 p
->sh
.NumSubroutineUniformRemapTable
+ entries
);
245 for (unsigned k
= 0; k
< entries
; k
++) {
246 p
->sh
.SubroutineUniformRemapTable
[p
->sh
.NumSubroutineUniformRemapTable
+ k
] =
247 &prog
->data
->UniformStorage
[i
];
249 data_pos
+= num_slots
;
251 prog
->data
->UniformStorage
[i
].remap_location
=
252 p
->sh
.NumSubroutineUniformRemapTable
;
253 p
->sh
.NumSubroutineUniformRemapTable
+= entries
;
259 add_var_use_deref(nir_deref_instr
*deref
, struct hash_table
*live
,
260 struct array_deref_range
**derefs
, unsigned *derefs_size
)
263 nir_deref_path_init(&path
, deref
, NULL
);
265 deref
= path
.path
[0];
266 if (deref
->deref_type
!= nir_deref_type_var
||
267 deref
->mode
& ~(nir_var_uniform
| nir_var_mem_ubo
| nir_var_mem_ssbo
)) {
268 nir_deref_path_finish(&path
);
272 /* Number of derefs used in current processing. */
273 unsigned num_derefs
= 0;
275 const struct glsl_type
*deref_type
= deref
->var
->type
;
276 nir_deref_instr
**p
= &path
.path
[1];
278 if ((*p
)->deref_type
== nir_deref_type_array
) {
280 /* Skip matrix derefences */
281 if (!glsl_type_is_array(deref_type
))
284 if ((num_derefs
+ 1) * sizeof(struct array_deref_range
) > *derefs_size
) {
285 void *ptr
= reralloc_size(NULL
, *derefs
, *derefs_size
+ 4096);
288 nir_deref_path_finish(&path
);
292 *derefs_size
+= 4096;
293 *derefs
= (struct array_deref_range
*)ptr
;
296 struct array_deref_range
*dr
= &(*derefs
)[num_derefs
];
299 dr
->size
= glsl_get_length(deref_type
);
301 if (nir_src_is_const((*p
)->arr
.index
)) {
302 dr
->index
= nir_src_as_uint((*p
)->arr
.index
);
304 /* An unsized array can occur at the end of an SSBO. We can't track
305 * accesses to such an array, so bail.
308 nir_deref_path_finish(&path
);
312 dr
->index
= dr
->size
;
315 deref_type
= glsl_get_array_element(deref_type
);
316 } else if ((*p
)->deref_type
== nir_deref_type_struct
) {
317 /* We have reached the end of the array. */
322 nir_deref_path_finish(&path
);
324 /** Set of bit-flags to note which array elements have been accessed. */
325 BITSET_WORD
*bits
= NULL
;
327 struct hash_entry
*entry
=
328 _mesa_hash_table_search(live
, deref
->var
);
329 if (!entry
&& glsl_type_is_array(deref
->var
->type
)) {
330 unsigned num_bits
= MAX2(1, glsl_get_aoa_size(deref
->var
->type
));
331 bits
= rzalloc_array(live
, BITSET_WORD
, BITSET_WORDS(num_bits
));
335 bits
= (BITSET_WORD
*) entry
->data
;
337 if (glsl_type_is_array(deref
->var
->type
)) {
338 /* Count the "depth" of the arrays-of-arrays. */
339 unsigned array_depth
= 0;
340 for (const struct glsl_type
*type
= deref
->var
->type
;
341 glsl_type_is_array(type
);
342 type
= glsl_get_array_element(type
)) {
346 link_util_mark_array_elements_referenced(*derefs
, num_derefs
, array_depth
,
350 assert(deref
->mode
== deref
->var
->data
.mode
);
351 _mesa_hash_table_insert(live
, deref
->var
, bits
);
354 /* Iterate over the shader and collect infomation about uniform use */
356 add_var_use_shader(nir_shader
*shader
, struct hash_table
*live
)
358 /* Currently allocated buffer block of derefs. */
359 struct array_deref_range
*derefs
= NULL
;
361 /* Size of the derefs buffer in bytes. */
362 unsigned derefs_size
= 0;
364 nir_foreach_function(function
, shader
) {
365 if (function
->impl
) {
366 nir_foreach_block(block
, function
->impl
) {
367 nir_foreach_instr(instr
, block
) {
368 if (instr
->type
== nir_instr_type_intrinsic
) {
369 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
370 switch (intr
->intrinsic
) {
371 case nir_intrinsic_atomic_counter_read_deref
:
372 case nir_intrinsic_atomic_counter_inc_deref
:
373 case nir_intrinsic_atomic_counter_pre_dec_deref
:
374 case nir_intrinsic_atomic_counter_post_dec_deref
:
375 case nir_intrinsic_atomic_counter_add_deref
:
376 case nir_intrinsic_atomic_counter_min_deref
:
377 case nir_intrinsic_atomic_counter_max_deref
:
378 case nir_intrinsic_atomic_counter_and_deref
:
379 case nir_intrinsic_atomic_counter_or_deref
:
380 case nir_intrinsic_atomic_counter_xor_deref
:
381 case nir_intrinsic_atomic_counter_exchange_deref
:
382 case nir_intrinsic_atomic_counter_comp_swap_deref
:
383 case nir_intrinsic_image_deref_load
:
384 case nir_intrinsic_image_deref_store
:
385 case nir_intrinsic_image_deref_atomic_add
:
386 case nir_intrinsic_image_deref_atomic_umin
:
387 case nir_intrinsic_image_deref_atomic_imin
:
388 case nir_intrinsic_image_deref_atomic_umax
:
389 case nir_intrinsic_image_deref_atomic_imax
:
390 case nir_intrinsic_image_deref_atomic_and
:
391 case nir_intrinsic_image_deref_atomic_or
:
392 case nir_intrinsic_image_deref_atomic_xor
:
393 case nir_intrinsic_image_deref_atomic_exchange
:
394 case nir_intrinsic_image_deref_atomic_comp_swap
:
395 case nir_intrinsic_image_deref_size
:
396 case nir_intrinsic_image_deref_samples
:
397 case nir_intrinsic_load_deref
:
398 case nir_intrinsic_store_deref
:
399 add_var_use_deref(nir_src_as_deref(intr
->src
[0]), live
,
400 &derefs
, &derefs_size
);
407 } else if (instr
->type
== nir_instr_type_tex
) {
408 nir_tex_instr
*tex_instr
= nir_instr_as_tex(instr
);
410 nir_tex_instr_src_index(tex_instr
,
411 nir_tex_src_sampler_deref
);
413 nir_tex_instr_src_index(tex_instr
,
414 nir_tex_src_texture_deref
);
416 if (sampler_idx
>= 0) {
417 nir_deref_instr
*deref
=
418 nir_src_as_deref(tex_instr
->src
[sampler_idx
].src
);
419 add_var_use_deref(deref
, live
, &derefs
, &derefs_size
);
422 if (texture_idx
>= 0) {
423 nir_deref_instr
*deref
=
424 nir_src_as_deref(tex_instr
->src
[texture_idx
].src
);
425 add_var_use_deref(deref
, live
, &derefs
, &derefs_size
);
437 mark_stage_as_active(struct gl_uniform_storage
*uniform
,
440 uniform
->active_shader_mask
|= 1 << stage
;
443 /* Used to build a tree representing the glsl_type so that we can have a place
444 * to store the next index for opaque types. Array types are expanded so that
445 * they have a single child which is used for all elements of the array.
446 * Struct types have a child for each member. The tree is walked while
447 * processing a uniform so that we can recognise when an opaque type is
448 * encountered a second time in order to reuse the same range of indices that
449 * was reserved the first time. That way the sampler indices can be arranged
450 * so that members of an array are placed sequentially even if the array is an
451 * array of structs containing other opaque members.
453 struct type_tree_entry
{
454 /* For opaque types, this will be the next index to use. If we haven’t
455 * encountered this member yet, it will be UINT_MAX.
459 struct type_tree_entry
*parent
;
460 struct type_tree_entry
*next_sibling
;
461 struct type_tree_entry
*children
;
464 struct nir_link_uniforms_state
{
465 /* per-whole program */
466 unsigned num_hidden_uniforms
;
468 unsigned max_uniform_location
;
469 unsigned next_subroutine
;
471 /* per-shader stage */
472 unsigned next_image_index
;
473 unsigned next_sampler_index
;
474 unsigned num_shader_samplers
;
475 unsigned num_shader_images
;
476 unsigned num_shader_uniform_components
;
477 unsigned shader_samplers_used
;
478 unsigned shader_shadow_samplers
;
479 unsigned shader_storage_blocks_write_access
;
480 struct gl_program_parameter_list
*params
;
483 nir_variable
*current_var
;
484 const struct glsl_type
*current_ifc_type
;
486 bool var_is_in_block
;
487 bool set_top_level_array
;
488 int top_level_array_size
;
489 int top_level_array_stride
;
491 struct type_tree_entry
*current_type
;
492 struct hash_table
*referenced_uniforms
;
493 struct hash_table
*uniform_hash
;
497 add_parameter(struct gl_uniform_storage
*uniform
,
498 struct gl_context
*ctx
,
499 struct gl_shader_program
*prog
,
500 const struct glsl_type
*type
,
501 struct nir_link_uniforms_state
*state
)
503 if (!state
->params
|| uniform
->is_shader_storage
|| glsl_contains_opaque(type
))
506 unsigned num_params
= glsl_get_aoa_size(type
);
507 num_params
= MAX2(num_params
, 1);
508 num_params
*= glsl_get_matrix_columns(glsl_without_array(type
));
510 bool is_dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
514 struct gl_program_parameter_list
*params
= state
->params
;
515 int base_index
= params
->NumParameters
;
516 _mesa_reserve_parameter_storage(params
, num_params
);
518 if (ctx
->Const
.PackedDriverUniformStorage
) {
519 for (unsigned i
= 0; i
< num_params
; i
++) {
520 unsigned dmul
= glsl_type_is_64bit(glsl_without_array(type
)) ? 2 : 1;
521 unsigned comps
= glsl_get_vector_elements(glsl_without_array(type
)) * dmul
;
529 _mesa_add_parameter(params
, PROGRAM_UNIFORM
, uniform
->name
, comps
,
530 glsl_get_gl_type(type
), NULL
, NULL
, false);
533 for (unsigned i
= 0; i
< num_params
; i
++) {
534 _mesa_add_parameter(params
, PROGRAM_UNIFORM
, uniform
->name
, 4,
535 glsl_get_gl_type(type
), NULL
, NULL
, true);
539 /* Each Parameter will hold the index to the backing uniform storage.
540 * This avoids relying on names to match parameters and uniform
543 for (unsigned i
= 0; i
< num_params
; i
++) {
544 struct gl_program_parameter
*param
= ¶ms
->Parameters
[base_index
+ i
];
545 param
->UniformStorageIndex
= uniform
- prog
->data
->UniformStorage
;
546 param
->MainUniformStorageIndex
= state
->current_var
->data
.location
;
551 get_next_index(struct nir_link_uniforms_state
*state
,
552 const struct gl_uniform_storage
*uniform
,
553 unsigned *next_index
, bool *initialised
)
555 /* If we’ve already calculated an index for this member then we can just
558 if (state
->current_type
->next_index
== UINT_MAX
) {
559 /* Otherwise we need to reserve enough indices for all of the arrays
560 * enclosing this member.
563 unsigned array_size
= 1;
565 for (const struct type_tree_entry
*p
= state
->current_type
;
568 array_size
*= p
->array_size
;
571 state
->current_type
->next_index
= *next_index
;
572 *next_index
+= array_size
;
575 *initialised
= false;
577 unsigned index
= state
->current_type
->next_index
;
579 state
->current_type
->next_index
+= MAX2(1, uniform
->array_elements
);
585 find_and_update_named_uniform_storage(struct gl_context
*ctx
,
586 struct gl_shader_program
*prog
,
587 struct nir_link_uniforms_state
*state
,
588 nir_variable
*var
, char **name
,
590 const struct glsl_type
*type
,
591 unsigned stage
, bool *first_element
)
593 /* gl_uniform_storage can cope with one level of array, so if the type is a
594 * composite type or an array where each element occupies more than one
595 * location than we need to recursively process it.
597 if (glsl_type_is_struct_or_ifc(type
) ||
598 (glsl_type_is_array(type
) &&
599 (glsl_type_is_array(glsl_get_array_element(type
)) ||
600 glsl_type_is_struct_or_ifc(glsl_get_array_element(type
))))) {
602 struct type_tree_entry
*old_type
= state
->current_type
;
603 state
->current_type
= old_type
->children
;
605 /* Shader storage block unsized arrays: add subscript [0] to variable
608 unsigned length
= glsl_get_length(type
);
609 if (glsl_type_is_unsized_array(type
))
613 for (unsigned i
= 0; i
< length
; i
++) {
614 const struct glsl_type
*field_type
;
615 size_t new_length
= name_length
;
617 if (glsl_type_is_struct_or_ifc(type
)) {
618 field_type
= glsl_get_struct_field(type
, i
);
620 /* Append '.field' to the current variable name. */
622 ralloc_asprintf_rewrite_tail(name
, &new_length
, ".%s",
623 glsl_get_struct_elem_name(type
, i
));
626 field_type
= glsl_get_array_element(type
);
628 /* Append the subscript to the current variable name */
630 ralloc_asprintf_rewrite_tail(name
, &new_length
, "[%u]", i
);
633 result
= find_and_update_named_uniform_storage(ctx
, prog
, state
,
634 var
, name
, new_length
,
638 if (glsl_type_is_struct_or_ifc(type
))
639 state
->current_type
= state
->current_type
->next_sibling
;
642 state
->current_type
= old_type
;
647 state
->current_type
= old_type
;
651 struct hash_entry
*entry
=
652 _mesa_hash_table_search(state
->uniform_hash
, *name
);
654 unsigned i
= (unsigned) (intptr_t) entry
->data
;
655 struct gl_uniform_storage
*uniform
= &prog
->data
->UniformStorage
[i
];
657 if (*first_element
&& !state
->var_is_in_block
) {
658 *first_element
= false;
659 var
->data
.location
= uniform
- prog
->data
->UniformStorage
;
662 unsigned values
= glsl_get_component_slots(type
);
663 const struct glsl_type
*type_no_array
= glsl_without_array(type
);
664 if (glsl_type_is_sampler(type_no_array
)) {
665 struct gl_linked_shader
*sh
= prog
->_LinkedShaders
[stage
];
667 unsigned sampler_index
=
668 get_next_index(state
, uniform
, &state
->next_sampler_index
,
671 /* Samplers (bound or bindless) are counted as two components as
672 * specified by ARB_bindless_texture.
674 state
->num_shader_samplers
+= values
/ 2;
676 uniform
->opaque
[stage
].active
= true;
677 uniform
->opaque
[stage
].index
= sampler_index
;
680 const unsigned shadow
=
681 glsl_sampler_type_is_shadow(type_no_array
);
682 for (unsigned i
= sampler_index
;
683 i
< MIN2(state
->next_sampler_index
, MAX_SAMPLERS
);
685 sh
->Program
->sh
.SamplerTargets
[i
] =
686 glsl_get_sampler_target(type_no_array
);
687 state
->shader_samplers_used
|= 1U << i
;
688 state
->shader_shadow_samplers
|= shadow
<< i
;
691 } else if (glsl_type_is_image(type_no_array
)) {
692 struct gl_linked_shader
*sh
= prog
->_LinkedShaders
[stage
];
693 int image_index
= state
->next_image_index
;
694 /* TODO: handle structs when bindless support is added */
695 state
->next_image_index
+= MAX2(1, uniform
->array_elements
);
697 /* Images (bound or bindless) are counted as two components as
698 * specified by ARB_bindless_texture.
700 state
->num_shader_images
+= values
/ 2;
702 uniform
->opaque
[stage
].active
= true;
703 uniform
->opaque
[stage
].index
= image_index
;
705 /* Set image access qualifiers */
706 enum gl_access_qualifier image_access
=
707 state
->current_var
->data
.access
;
708 const GLenum access
=
709 (image_access
& ACCESS_NON_WRITEABLE
) ?
710 ((image_access
& ACCESS_NON_READABLE
) ? GL_NONE
:
712 ((image_access
& ACCESS_NON_READABLE
) ? GL_WRITE_ONLY
:
714 for (unsigned i
= image_index
;
715 i
< MIN2(state
->next_image_index
, MAX_IMAGE_UNIFORMS
);
717 sh
->Program
->sh
.ImageAccess
[i
] = access
;
721 struct hash_entry
*entry
=
722 _mesa_hash_table_search(state
->referenced_uniforms
,
725 glsl_get_base_type(type_no_array
) == GLSL_TYPE_SUBROUTINE
)
726 uniform
->active_shader_mask
|= 1 << stage
;
728 if (!state
->var_is_in_block
)
729 add_parameter(uniform
, ctx
, prog
, type
, state
);
739 * Finds, returns, and updates the stage info for any uniform in UniformStorage
740 * defined by @var. For GLSL this is done using the name, for SPIR-V in general
741 * is this done using the explicit location, except:
743 * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
744 * them. That means that more that one entry at the uniform storage can be
745 * found. In that case all of them are updated, and the first entry is
746 * returned, in order to update the location of the nir variable.
748 * * Special uniforms: like atomic counters. They lack a explicit location,
749 * so they are skipped. They will be handled and assigned a location later.
753 find_and_update_previous_uniform_storage(struct gl_context
*ctx
,
754 struct gl_shader_program
*prog
,
755 struct nir_link_uniforms_state
*state
,
756 nir_variable
*var
, char *name
,
757 const struct glsl_type
*type
,
760 if (!prog
->data
->spirv
) {
761 bool first_element
= true;
762 char *name_tmp
= ralloc_strdup(NULL
, name
);
763 bool r
= find_and_update_named_uniform_storage(ctx
, prog
, state
, var
,
765 strlen(name_tmp
), type
,
766 stage
, &first_element
);
767 ralloc_free(name_tmp
);
772 if (nir_variable_is_in_block(var
)) {
773 struct gl_uniform_storage
*uniform
= NULL
;
775 ASSERTED
unsigned num_blks
= nir_variable_is_in_ubo(var
) ?
776 prog
->data
->NumUniformBlocks
:
777 prog
->data
->NumShaderStorageBlocks
;
779 struct gl_uniform_block
*blks
= nir_variable_is_in_ubo(var
) ?
780 prog
->data
->UniformBlocks
: prog
->data
->ShaderStorageBlocks
;
783 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
784 /* UniformStorage contains both variables from ubos and ssbos */
785 if ( prog
->data
->UniformStorage
[i
].is_shader_storage
!=
786 nir_variable_is_in_ssbo(var
))
789 int block_index
= prog
->data
->UniformStorage
[i
].block_index
;
790 if (block_index
!= -1) {
791 assert(block_index
< num_blks
);
793 if (var
->data
.binding
== blks
[block_index
].Binding
) {
795 uniform
= &prog
->data
->UniformStorage
[i
];
796 mark_stage_as_active(&prog
->data
->UniformStorage
[i
],
804 var
->data
.location
= uniform
- prog
->data
->UniformStorage
;
808 /* Beyond blocks, there are still some corner cases of uniforms without
809 * location (ie: atomic counters) that would have a initial location equal
810 * to -1. We just return on that case. Those uniforms will be handled
813 if (var
->data
.location
== -1)
816 /* TODO: following search can be problematic with shaders with a lot of
817 * uniforms. Would it be better to use some type of hash
819 for (unsigned i
= 0; i
< prog
->data
->NumUniformStorage
; i
++) {
820 if (prog
->data
->UniformStorage
[i
].remap_location
== var
->data
.location
) {
821 mark_stage_as_active(&prog
->data
->UniformStorage
[i
], stage
);
823 struct gl_uniform_storage
*uniform
= &prog
->data
->UniformStorage
[i
];
824 var
->data
.location
= uniform
- prog
->data
->UniformStorage
;
825 add_parameter(uniform
, ctx
, prog
, var
->type
, state
);
833 static struct type_tree_entry
*
834 build_type_tree_for_type(const struct glsl_type
*type
)
836 struct type_tree_entry
*entry
= malloc(sizeof *entry
);
838 entry
->array_size
= 1;
839 entry
->next_index
= UINT_MAX
;
840 entry
->children
= NULL
;
841 entry
->next_sibling
= NULL
;
842 entry
->parent
= NULL
;
844 if (glsl_type_is_array(type
)) {
845 entry
->array_size
= glsl_get_length(type
);
846 entry
->children
= build_type_tree_for_type(glsl_get_array_element(type
));
847 entry
->children
->parent
= entry
;
848 } else if (glsl_type_is_struct_or_ifc(type
)) {
849 struct type_tree_entry
*last
= NULL
;
851 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
852 const struct glsl_type
*field_type
= glsl_get_struct_field(type
, i
);
853 struct type_tree_entry
*field_entry
=
854 build_type_tree_for_type(field_type
);
857 entry
->children
= field_entry
;
859 last
->next_sibling
= field_entry
;
861 field_entry
->parent
= entry
;
871 free_type_tree(struct type_tree_entry
*entry
)
873 struct type_tree_entry
*p
, *next
;
875 for (p
= entry
->children
; p
; p
= next
) {
876 next
= p
->next_sibling
;
884 hash_free_uniform_name(struct hash_entry
*entry
)
886 free((void*)entry
->key
);
890 enter_record(struct nir_link_uniforms_state
*state
,
891 struct gl_context
*ctx
,
892 const struct glsl_type
*type
,
895 assert(glsl_type_is_struct(type
));
896 if (!state
->var_is_in_block
)
899 bool use_std430
= ctx
->Const
.UseSTD430AsDefaultPacking
;
900 const enum glsl_interface_packing packing
=
901 glsl_get_internal_ifc_packing(state
->current_var
->interface_type
,
904 if (packing
== GLSL_INTERFACE_PACKING_STD430
)
905 state
->offset
= glsl_align(
906 state
->offset
, glsl_get_std430_base_alignment(type
, row_major
));
908 state
->offset
= glsl_align(
909 state
->offset
, glsl_get_std140_base_alignment(type
, row_major
));
913 leave_record(struct nir_link_uniforms_state
*state
,
914 struct gl_context
*ctx
,
915 const struct glsl_type
*type
,
918 assert(glsl_type_is_struct(type
));
919 if (!state
->var_is_in_block
)
922 bool use_std430
= ctx
->Const
.UseSTD430AsDefaultPacking
;
923 const enum glsl_interface_packing packing
=
924 glsl_get_internal_ifc_packing(state
->current_var
->interface_type
,
927 if (packing
== GLSL_INTERFACE_PACKING_STD430
)
928 state
->offset
= glsl_align(
929 state
->offset
, glsl_get_std430_base_alignment(type
, row_major
));
931 state
->offset
= glsl_align(
932 state
->offset
, glsl_get_std140_base_alignment(type
, row_major
));
936 * Creates the neccessary entries in UniformStorage for the uniform. Returns
937 * the number of locations used or -1 on failure.
940 nir_link_uniform(struct gl_context
*ctx
,
941 struct gl_shader_program
*prog
,
942 struct gl_program
*stage_program
,
943 gl_shader_stage stage
,
944 const struct glsl_type
*type
,
945 unsigned index_in_parent
,
947 struct nir_link_uniforms_state
*state
,
948 char **name
, size_t name_length
, bool row_major
)
950 struct gl_uniform_storage
*uniform
= NULL
;
952 if (state
->set_top_level_array
&&
953 nir_variable_is_in_ssbo(state
->current_var
)) {
954 /* Type is the top level SSBO member */
955 if (glsl_type_is_array(type
) &&
956 (glsl_type_is_array(glsl_get_array_element(type
)) ||
957 glsl_type_is_struct_or_ifc(glsl_get_array_element(type
)))) {
958 /* Type is a top-level array (array of aggregate types) */
959 state
->top_level_array_size
= glsl_get_length(type
);
960 state
->top_level_array_stride
= glsl_get_explicit_stride(type
);
962 state
->top_level_array_size
= 1;
963 state
->top_level_array_stride
= 0;
966 state
->set_top_level_array
= false;
969 /* gl_uniform_storage can cope with one level of array, so if the type is a
970 * composite type or an array where each element occupies more than one
971 * location than we need to recursively process it.
973 if (glsl_type_is_struct_or_ifc(type
) ||
974 (glsl_type_is_array(type
) &&
975 (glsl_type_is_array(glsl_get_array_element(type
)) ||
976 glsl_type_is_struct_or_ifc(glsl_get_array_element(type
))))) {
977 int location_count
= 0;
978 struct type_tree_entry
*old_type
= state
->current_type
;
979 unsigned int struct_base_offset
= state
->offset
;
981 state
->current_type
= old_type
->children
;
983 /* Shader storage block unsized arrays: add subscript [0] to variable
986 unsigned length
= glsl_get_length(type
);
987 if (glsl_type_is_unsized_array(type
))
990 if (glsl_type_is_struct(type
) && !prog
->data
->spirv
)
991 enter_record(state
, ctx
, type
, row_major
);
993 for (unsigned i
= 0; i
< length
; i
++) {
994 const struct glsl_type
*field_type
;
995 size_t new_length
= name_length
;
996 bool field_row_major
= row_major
;
998 if (glsl_type_is_struct_or_ifc(type
)) {
999 field_type
= glsl_get_struct_field(type
, i
);
1000 /* Use the offset inside the struct only for variables backed by
1001 * a buffer object. For variables not backed by a buffer object,
1004 if (state
->var_is_in_block
) {
1005 if (prog
->data
->spirv
) {
1007 struct_base_offset
+ glsl_get_struct_field_offset(type
, i
);
1008 } else if (glsl_get_struct_field_offset(type
, i
) != -1 &&
1009 type
== state
->current_ifc_type
) {
1010 state
->offset
= glsl_get_struct_field_offset(type
, i
);
1013 if (glsl_type_is_interface(type
))
1014 state
->set_top_level_array
= true;
1017 /* Append '.field' to the current variable name. */
1019 ralloc_asprintf_rewrite_tail(name
, &new_length
, ".%s",
1020 glsl_get_struct_elem_name(type
, i
));
1024 /* The layout of structures at the top level of the block is set
1025 * during parsing. For matrices contained in multiple levels of
1026 * structures in the block, the inner structures have no layout.
1027 * These cases must potentially inherit the layout from the outer
1030 const enum glsl_matrix_layout matrix_layout
=
1031 glsl_get_struct_field_data(type
, i
)->matrix_layout
;
1032 if (matrix_layout
== GLSL_MATRIX_LAYOUT_ROW_MAJOR
) {
1033 field_row_major
= true;
1034 } else if (matrix_layout
== GLSL_MATRIX_LAYOUT_COLUMN_MAJOR
) {
1035 field_row_major
= false;
1038 field_type
= glsl_get_array_element(type
);
1040 /* Append the subscript to the current variable name */
1042 ralloc_asprintf_rewrite_tail(name
, &new_length
, "[%u]", i
);
1045 int entries
= nir_link_uniform(ctx
, prog
, stage_program
, stage
,
1046 field_type
, i
, location
,
1047 state
, name
, new_length
,
1054 location
+= entries
;
1055 location_count
+= entries
;
1057 if (glsl_type_is_struct_or_ifc(type
))
1058 state
->current_type
= state
->current_type
->next_sibling
;
1061 if (glsl_type_is_struct(type
) && !prog
->data
->spirv
)
1062 leave_record(state
, ctx
, type
, row_major
);
1064 state
->current_type
= old_type
;
1066 return location_count
;
1068 /* Create a new uniform storage entry */
1069 prog
->data
->UniformStorage
=
1070 reralloc(prog
->data
,
1071 prog
->data
->UniformStorage
,
1072 struct gl_uniform_storage
,
1073 prog
->data
->NumUniformStorage
+ 1);
1074 if (!prog
->data
->UniformStorage
) {
1075 linker_error(prog
, "Out of memory during linking.\n");
1079 uniform
= &prog
->data
->UniformStorage
[prog
->data
->NumUniformStorage
];
1080 prog
->data
->NumUniformStorage
++;
1082 /* Initialize its members */
1083 memset(uniform
, 0x00, sizeof(struct gl_uniform_storage
));
1086 name
? ralloc_strdup(prog
->data
->UniformStorage
, *name
) : NULL
;
1088 const struct glsl_type
*type_no_array
= glsl_without_array(type
);
1089 if (glsl_type_is_array(type
)) {
1090 uniform
->type
= type_no_array
;
1091 uniform
->array_elements
= glsl_get_length(type
);
1093 uniform
->type
= type
;
1094 uniform
->array_elements
= 0;
1096 uniform
->top_level_array_size
= state
->top_level_array_size
;
1097 uniform
->top_level_array_stride
= state
->top_level_array_stride
;
1099 struct hash_entry
*entry
=
1100 _mesa_hash_table_search(state
->referenced_uniforms
,
1101 state
->current_var
);
1102 if (entry
!= NULL
||
1103 glsl_get_base_type(type_no_array
) == GLSL_TYPE_SUBROUTINE
)
1104 uniform
->active_shader_mask
|= 1 << stage
;
1106 if (location
>= 0) {
1107 /* Uniform has an explicit location */
1108 uniform
->remap_location
= location
;
1110 uniform
->remap_location
= UNMAPPED_UNIFORM_LOC
;
1113 uniform
->hidden
= state
->current_var
->data
.how_declared
== nir_var_hidden
;
1114 if (uniform
->hidden
)
1115 state
->num_hidden_uniforms
++;
1117 uniform
->is_shader_storage
= nir_variable_is_in_ssbo(state
->current_var
);
1119 /* Set fields whose default value depend on the variable being inside a
1122 * From the OpenGL 4.6 spec, 7.3 Program objects:
1124 * "For the property ARRAY_STRIDE, ... For active variables not declared
1125 * as an array of basic types, zero is written to params. For active
1126 * variables not backed by a buffer object, -1 is written to params,
1127 * regardless of the variable type."
1129 * "For the property MATRIX_STRIDE, ... For active variables not declared
1130 * as a matrix or array of matrices, zero is written to params. For active
1131 * variables not backed by a buffer object, -1 is written to params,
1132 * regardless of the variable type."
1134 * For the property IS_ROW_MAJOR, ... For active variables backed by a
1135 * buffer object, declared as a single matrix or array of matrices, and
1136 * stored in row-major order, one is written to params. For all other
1137 * active variables, zero is written to params.
1139 uniform
->array_stride
= -1;
1140 uniform
->matrix_stride
= -1;
1141 uniform
->row_major
= false;
1143 if (state
->var_is_in_block
) {
1144 uniform
->array_stride
= glsl_type_is_array(type
) ?
1145 glsl_get_explicit_stride(type
) : 0;
1147 if (glsl_type_is_matrix(uniform
->type
)) {
1148 uniform
->matrix_stride
= glsl_get_explicit_stride(uniform
->type
);
1149 uniform
->row_major
= glsl_matrix_type_is_row_major(uniform
->type
);
1151 uniform
->matrix_stride
= 0;
1154 if (!prog
->data
->spirv
) {
1155 bool use_std430
= ctx
->Const
.UseSTD430AsDefaultPacking
;
1156 const enum glsl_interface_packing packing
=
1157 glsl_get_internal_ifc_packing(state
->current_var
->interface_type
,
1160 unsigned alignment
=
1161 glsl_get_std140_base_alignment(type
, uniform
->row_major
);
1162 if (packing
== GLSL_INTERFACE_PACKING_STD430
) {
1164 glsl_get_std430_base_alignment(type
, uniform
->row_major
);
1166 state
->offset
= glsl_align(state
->offset
, alignment
);
1170 uniform
->offset
= state
->var_is_in_block
? state
->offset
: -1;
1172 int buffer_block_index
= -1;
1173 /* If the uniform is inside a uniform block determine its block index by
1174 * comparing the bindings, we can not use names.
1176 if (state
->var_is_in_block
) {
1177 struct gl_uniform_block
*blocks
= nir_variable_is_in_ssbo(state
->current_var
) ?
1178 prog
->data
->ShaderStorageBlocks
: prog
->data
->UniformBlocks
;
1180 int num_blocks
= nir_variable_is_in_ssbo(state
->current_var
) ?
1181 prog
->data
->NumShaderStorageBlocks
: prog
->data
->NumUniformBlocks
;
1183 if (!prog
->data
->spirv
) {
1184 bool is_interface_array
=
1185 glsl_without_array(state
->current_var
->type
) == state
->current_var
->interface_type
&&
1186 glsl_type_is_array(state
->current_var
->type
);
1188 const char *ifc_name
=
1189 glsl_get_type_name(state
->current_var
->interface_type
);
1190 if (is_interface_array
) {
1191 unsigned l
= strlen(ifc_name
);
1192 for (unsigned i
= 0; i
< num_blocks
; i
++) {
1193 if (strncmp(ifc_name
, blocks
[i
].Name
, l
) == 0 &&
1194 blocks
[i
].Name
[l
] == '[') {
1195 buffer_block_index
= i
;
1200 for (unsigned i
= 0; i
< num_blocks
; i
++) {
1201 if (strcmp(ifc_name
, blocks
[i
].Name
) == 0) {
1202 buffer_block_index
= i
;
1208 /* Compute the next offset. */
1209 bool use_std430
= ctx
->Const
.UseSTD430AsDefaultPacking
;
1210 const enum glsl_interface_packing packing
=
1211 glsl_get_internal_ifc_packing(state
->current_var
->interface_type
,
1213 if (packing
== GLSL_INTERFACE_PACKING_STD430
)
1214 state
->offset
+= glsl_get_std430_size(type
, uniform
->row_major
);
1216 state
->offset
+= glsl_get_std140_size(type
, uniform
->row_major
);
1218 for (unsigned i
= 0; i
< num_blocks
; i
++) {
1219 if (state
->current_var
->data
.binding
== blocks
[i
].Binding
) {
1220 buffer_block_index
= i
;
1225 /* Compute the next offset. */
1226 state
->offset
+= glsl_get_explicit_size(type
, true);
1228 assert(buffer_block_index
>= 0);
1231 uniform
->block_index
= buffer_block_index
;
1233 /* @FIXME: the initialization of the following will be done as we
1234 * implement support for their specific features, like SSBO, atomics,
1237 uniform
->builtin
= is_gl_identifier(uniform
->name
);
1238 uniform
->atomic_buffer_index
= -1;
1239 uniform
->is_bindless
= false;
1241 /* The following are not for features not supported by ARB_gl_spirv */
1242 uniform
->num_compatible_subroutines
= 0;
1244 unsigned entries
= MAX2(1, uniform
->array_elements
);
1245 unsigned values
= glsl_get_component_slots(type
);
1247 if (glsl_type_is_sampler(type_no_array
)) {
1250 get_next_index(state
, uniform
, &state
->next_sampler_index
,
1253 /* Samplers (bound or bindless) are counted as two components as
1254 * specified by ARB_bindless_texture.
1256 state
->num_shader_samplers
+= values
/ 2;
1258 uniform
->opaque
[stage
].active
= true;
1259 uniform
->opaque
[stage
].index
= sampler_index
;
1262 const unsigned shadow
= glsl_sampler_type_is_shadow(type_no_array
);
1263 for (unsigned i
= sampler_index
;
1264 i
< MIN2(state
->next_sampler_index
, MAX_SAMPLERS
);
1266 stage_program
->sh
.SamplerTargets
[i
] =
1267 glsl_get_sampler_target(type_no_array
);
1268 state
->shader_samplers_used
|= 1U << i
;
1269 state
->shader_shadow_samplers
|= shadow
<< i
;
1272 } else if (glsl_type_is_image(type_no_array
)) {
1273 /* @FIXME: image_index should match that of the same image
1274 * uniform in other shaders. This means we need to match image
1275 * uniforms by location (GLSL does it by variable name, but we
1276 * want to avoid that).
1278 int image_index
= state
->next_image_index
;
1279 state
->next_image_index
+= entries
;
1281 /* Images (bound or bindless) are counted as two components as
1282 * specified by ARB_bindless_texture.
1284 state
->num_shader_images
+= values
/ 2;
1286 uniform
->opaque
[stage
].active
= true;
1287 uniform
->opaque
[stage
].index
= image_index
;
1289 /* Set image access qualifiers */
1290 enum gl_access_qualifier image_access
=
1291 state
->current_var
->data
.access
;
1292 const GLenum access
=
1293 (image_access
& ACCESS_NON_WRITEABLE
) ?
1294 ((image_access
& ACCESS_NON_READABLE
) ? GL_NONE
:
1296 ((image_access
& ACCESS_NON_READABLE
) ? GL_WRITE_ONLY
:
1298 for (unsigned i
= image_index
;
1299 i
< MIN2(state
->next_image_index
, MAX_IMAGE_UNIFORMS
);
1301 stage_program
->sh
.ImageAccess
[i
] = access
;
1304 if (!uniform
->is_shader_storage
)
1305 state
->num_shader_uniform_components
+= values
;
1307 if (glsl_get_base_type(type_no_array
) == GLSL_TYPE_SUBROUTINE
) {
1308 uniform
->opaque
[stage
].index
= state
->next_subroutine
;
1309 uniform
->opaque
[stage
].active
= true;
1311 prog
->_LinkedShaders
[stage
]->Program
->sh
.NumSubroutineUniforms
++;
1313 /* Increment the subroutine index by 1 for non-arrays and by the
1314 * number of array elements for arrays.
1316 state
->next_subroutine
+= MAX2(1, uniform
->array_elements
);
1319 if (!state
->var_is_in_block
)
1320 state
->num_shader_uniform_components
+= values
;
1323 if (uniform
->remap_location
!= UNMAPPED_UNIFORM_LOC
&&
1324 state
->max_uniform_location
< uniform
->remap_location
+ entries
)
1325 state
->max_uniform_location
= uniform
->remap_location
+ entries
;
1327 if (!state
->var_is_in_block
)
1328 add_parameter(uniform
, ctx
, prog
, type
, state
);
1331 _mesa_hash_table_insert(state
->uniform_hash
, strdup(*name
),
1333 (prog
->data
->NumUniformStorage
- 1));
1336 if (!is_gl_identifier(uniform
->name
) && !uniform
->is_shader_storage
&&
1337 !state
->var_is_in_block
)
1338 state
->num_values
+= values
;
1340 return MAX2(uniform
->array_elements
, 1);
1345 gl_nir_link_uniforms(struct gl_context
*ctx
,
1346 struct gl_shader_program
*prog
,
1347 bool fill_parameters
)
1349 /* First free up any previous UniformStorage items */
1350 ralloc_free(prog
->data
->UniformStorage
);
1351 prog
->data
->UniformStorage
= NULL
;
1352 prog
->data
->NumUniformStorage
= 0;
1354 /* Iterate through all linked shaders */
1355 struct nir_link_uniforms_state state
= {0,};
1356 state
.uniform_hash
= _mesa_hash_table_create(NULL
, _mesa_hash_string
,
1357 _mesa_key_string_equal
);
1359 for (unsigned shader_type
= 0; shader_type
< MESA_SHADER_STAGES
; shader_type
++) {
1360 struct gl_linked_shader
*sh
= prog
->_LinkedShaders
[shader_type
];
1364 nir_shader
*nir
= sh
->Program
->nir
;
1367 state
.referenced_uniforms
=
1368 _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
1369 _mesa_key_pointer_equal
);
1370 state
.next_image_index
= 0;
1371 state
.next_sampler_index
= 0;
1372 state
.num_shader_samplers
= 0;
1373 state
.num_shader_images
= 0;
1374 state
.num_shader_uniform_components
= 0;
1375 state
.shader_storage_blocks_write_access
= 0;
1376 state
.shader_samplers_used
= 0;
1377 state
.shader_shadow_samplers
= 0;
1378 state
.params
= fill_parameters
? sh
->Program
->Parameters
: NULL
;
1380 add_var_use_shader(nir
, state
.referenced_uniforms
);
1382 nir_foreach_variable(var
, &nir
->uniforms
) {
1383 state
.current_var
= var
;
1384 state
.current_ifc_type
= NULL
;
1386 state
.var_is_in_block
= nir_variable_is_in_block(var
);
1387 state
.set_top_level_array
= false;
1388 state
.top_level_array_size
= 0;
1389 state
.top_level_array_stride
= 0;
1392 * From ARB_program_interface spec, issue (16):
1394 * "RESOLVED: We will follow the default rule for enumerating block
1395 * members in the OpenGL API, which is:
1397 * * If a variable is a member of an interface block without an
1398 * instance name, it is enumerated using just the variable name.
1400 * * If a variable is a member of an interface block with an
1401 * instance name, it is enumerated as "BlockName.Member", where
1402 * "BlockName" is the name of the interface block (not the
1403 * instance name) and "Member" is the name of the variable.
1405 * For example, in the following code:
1415 * } instance3[2]; // uses two separate buffer bindings
1417 * the three uniforms (if active) are enumerated as "member1",
1418 * "Block2.member2", and "Block3.member3"."
1420 * Note that in the last example, with an array of ubo, only one
1421 * uniform is generated. For that reason, while unrolling the
1422 * uniforms of a ubo, or the variables of a ssbo, we need to treat
1423 * arrays of instance as a single block.
1426 const struct glsl_type
*type
= var
->type
;
1427 if (state
.var_is_in_block
&&
1428 ((!prog
->data
->spirv
&& glsl_without_array(type
) == var
->interface_type
) ||
1429 (prog
->data
->spirv
&& type
== var
->interface_type
))) {
1430 type
= glsl_without_array(var
->type
);
1431 state
.current_ifc_type
= type
;
1432 name
= ralloc_strdup(NULL
, glsl_get_type_name(type
));
1434 state
.set_top_level_array
= true;
1435 name
= ralloc_strdup(NULL
, var
->name
);
1438 struct type_tree_entry
*type_tree
=
1439 build_type_tree_for_type(type
);
1440 state
.current_type
= type_tree
;
1442 int location
= var
->data
.location
;
1444 struct gl_uniform_block
*blocks
;
1446 int buffer_block_index
= -1;
1447 if (!prog
->data
->spirv
&& state
.var_is_in_block
) {
1448 /* If the uniform is inside a uniform block determine its block index by
1449 * comparing the bindings, we can not use names.
1451 blocks
= nir_variable_is_in_ssbo(state
.current_var
) ?
1452 prog
->data
->ShaderStorageBlocks
: prog
->data
->UniformBlocks
;
1453 num_blocks
= nir_variable_is_in_ssbo(state
.current_var
) ?
1454 prog
->data
->NumShaderStorageBlocks
: prog
->data
->NumUniformBlocks
;
1456 bool is_interface_array
=
1457 glsl_without_array(state
.current_var
->type
) == state
.current_var
->interface_type
&&
1458 glsl_type_is_array(state
.current_var
->type
);
1460 const char *ifc_name
=
1461 glsl_get_type_name(state
.current_var
->interface_type
);
1463 if (is_interface_array
) {
1464 unsigned l
= strlen(ifc_name
);
1466 /* Even when a match is found, do not "break" here. As this is
1467 * an array of instances, all elements of the array need to be
1468 * marked as referenced.
1470 for (unsigned i
= 0; i
< num_blocks
; i
++) {
1471 if (strncmp(ifc_name
, blocks
[i
].Name
, l
) == 0 &&
1472 blocks
[i
].Name
[l
] == '[') {
1473 if (buffer_block_index
== -1)
1474 buffer_block_index
= i
;
1476 struct hash_entry
*entry
=
1477 _mesa_hash_table_search(state
.referenced_uniforms
, var
);
1479 BITSET_WORD
*bits
= (BITSET_WORD
*) entry
->data
;
1480 if (BITSET_TEST(bits
, blocks
[i
].linearized_array_index
))
1481 blocks
[i
].stageref
|= 1U << shader_type
;
1486 for (unsigned i
= 0; i
< num_blocks
; i
++) {
1487 if (strcmp(ifc_name
, blocks
[i
].Name
) == 0) {
1488 buffer_block_index
= i
;
1490 struct hash_entry
*entry
=
1491 _mesa_hash_table_search(state
.referenced_uniforms
, var
);
1493 blocks
[i
].stageref
|= 1U << shader_type
;
1500 if (nir_variable_is_in_ssbo(var
) &&
1501 !(var
->data
.access
& ACCESS_NON_WRITEABLE
)) {
1502 unsigned array_size
= is_interface_array
?
1503 glsl_get_length(var
->type
) : 1;
1505 STATIC_ASSERT(MAX_SHADER_STORAGE_BUFFERS
<= 32);
1507 /* Shaders that use too many SSBOs will fail to compile, which
1508 * we don't care about.
1510 * This is true for shaders that do not use too many SSBOs:
1512 if (buffer_block_index
+ array_size
<= 32) {
1513 state
.shader_storage_blocks_write_access
|=
1514 u_bit_consecutive(buffer_block_index
, array_size
);
1519 if (!prog
->data
->spirv
&& state
.var_is_in_block
&&
1520 glsl_without_array(state
.current_var
->type
) != state
.current_var
->interface_type
) {
1523 char sentinel
= '\0';
1525 if (glsl_type_is_struct(state
.current_var
->type
)) {
1527 } else if (glsl_type_is_array(state
.current_var
->type
) &&
1528 (glsl_type_is_array(glsl_get_array_element(state
.current_var
->type
))
1529 || glsl_type_is_struct(glsl_without_array(state
.current_var
->type
)))) {
1533 const unsigned l
= strlen(state
.current_var
->name
);
1534 for (unsigned i
= 0; i
< num_blocks
; i
++) {
1535 for (unsigned j
= 0; j
< blocks
[i
].NumUniforms
; j
++) {
1537 const char *begin
= blocks
[i
].Uniforms
[j
].Name
;
1538 const char *end
= strchr(begin
, sentinel
);
1543 if ((ptrdiff_t) l
!= (end
- begin
))
1545 found
= strncmp(state
.current_var
->name
, begin
, l
) == 0;
1547 found
= strcmp(state
.current_var
->name
, blocks
[i
].Uniforms
[j
].Name
) == 0;
1553 struct hash_entry
*entry
=
1554 _mesa_hash_table_search(state
.referenced_uniforms
, var
);
1556 blocks
[i
].stageref
|= 1U << shader_type
;
1567 const struct gl_uniform_block
*const block
=
1568 &blocks
[buffer_block_index
];
1569 assert(location
!= -1);
1571 const struct gl_uniform_buffer_variable
*const ubo_var
=
1572 &block
->Uniforms
[location
];
1574 state
.offset
= ubo_var
->Offset
;
1575 var
->data
.location
= location
;
1578 /* Check if the uniform has been processed already for
1579 * other stage. If so, validate they are compatible and update
1580 * the active stage mask.
1582 if (find_and_update_previous_uniform_storage(ctx
, prog
, &state
, var
,
1583 name
, type
, shader_type
)) {
1585 free_type_tree(type_tree
);
1589 /* From now on the variable’s location will be its uniform index */
1590 if (!state
.var_is_in_block
)
1591 var
->data
.location
= prog
->data
->NumUniformStorage
;
1596 var
->data
.matrix_layout
== GLSL_MATRIX_LAYOUT_ROW_MAJOR
;
1597 int res
= nir_link_uniform(ctx
, prog
, sh
->Program
, shader_type
, type
,
1600 !prog
->data
->spirv
? &name
: NULL
,
1601 !prog
->data
->spirv
? strlen(name
) : 0,
1604 free_type_tree(type_tree
);
1611 _mesa_hash_table_destroy(state
.referenced_uniforms
, NULL
);
1613 if (state
.num_shader_samplers
>
1614 ctx
->Const
.Program
[shader_type
].MaxTextureImageUnits
) {
1615 linker_error(prog
, "Too many %s shader texture samplers\n",
1616 _mesa_shader_stage_to_string(shader_type
));
1620 if (state
.num_shader_images
>
1621 ctx
->Const
.Program
[shader_type
].MaxImageUniforms
) {
1622 linker_error(prog
, "Too many %s shader image uniforms (%u > %u)\n",
1623 _mesa_shader_stage_to_string(shader_type
),
1624 state
.num_shader_images
,
1625 ctx
->Const
.Program
[shader_type
].MaxImageUniforms
);
1629 sh
->Program
->SamplersUsed
= state
.shader_samplers_used
;
1630 sh
->Program
->sh
.ShaderStorageBlocksWriteAccess
=
1631 state
.shader_storage_blocks_write_access
;
1632 sh
->shadow_samplers
= state
.shader_shadow_samplers
;
1633 sh
->Program
->info
.num_textures
= state
.num_shader_samplers
;
1634 sh
->Program
->info
.num_images
= state
.num_shader_images
;
1635 sh
->num_uniform_components
= state
.num_shader_uniform_components
;
1636 sh
->num_combined_uniform_components
= sh
->num_uniform_components
;
1639 prog
->data
->NumHiddenUniforms
= state
.num_hidden_uniforms
;
1640 prog
->data
->NumUniformDataSlots
= state
.num_values
;
1642 if (prog
->data
->spirv
)
1643 prog
->NumUniformRemapTable
= state
.max_uniform_location
;
1645 nir_setup_uniform_remap_tables(ctx
, prog
);
1646 gl_nir_set_uniform_initializers(ctx
, prog
);
1648 _mesa_hash_table_destroy(state
.uniform_hash
, hash_free_uniform_name
);