7ed484444606cacba97c1511fc0d6b045a34c2ab
[mesa.git] / src / compiler / glsl / gl_nir_link_uniforms.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_deref.h"
26 #include "gl_nir_linker.h"
27 #include "compiler/glsl/ir_uniform.h" /* for gl_uniform_storage */
28 #include "linker_util.h"
29 #include "main/context.h"
30 #include "main/mtypes.h"
31
32 /**
33 * This file do the common link for GLSL uniforms, using NIR, instead of IR as
34 * the counter-part glsl/link_uniforms.cpp
35 */
36
37 #define UNMAPPED_UNIFORM_LOC ~0u
38
39 /**
40 * Built-in / reserved GL variables names start with "gl_"
41 */
42 static inline bool
43 is_gl_identifier(const char *s)
44 {
45 return s && s[0] == 'g' && s[1] == 'l' && s[2] == '_';
46 }
47
48 static void
49 nir_setup_uniform_remap_tables(struct gl_context *ctx,
50 struct gl_shader_program *prog)
51 {
52 unsigned total_entries = prog->NumExplicitUniformLocations;
53
54 /* For glsl this may have been allocated by reserve_explicit_locations() so
55 * that we can keep track of unused uniforms with explicit locations.
56 */
57 assert(!prog->data->spirv ||
58 (prog->data->spirv && !prog->UniformRemapTable));
59 if (!prog->UniformRemapTable) {
60 prog->UniformRemapTable = rzalloc_array(prog,
61 struct gl_uniform_storage *,
62 prog->NumUniformRemapTable);
63 }
64
65 union gl_constant_value *data =
66 rzalloc_array(prog->data,
67 union gl_constant_value, prog->data->NumUniformDataSlots);
68 if (!prog->UniformRemapTable || !data) {
69 linker_error(prog, "Out of memory during linking.\n");
70 return;
71 }
72 prog->data->UniformDataSlots = data;
73
74 prog->data->UniformDataDefaults =
75 rzalloc_array(prog->data->UniformDataSlots,
76 union gl_constant_value, prog->data->NumUniformDataSlots);
77
78 unsigned data_pos = 0;
79
80 /* Reserve all the explicit locations of the active uniforms. */
81 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
82 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
83
84 if (uniform->is_shader_storage ||
85 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
86 continue;
87
88 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
89 continue;
90
91 /* How many new entries for this uniform? */
92 const unsigned entries = MAX2(1, uniform->array_elements);
93 unsigned num_slots = glsl_get_component_slots(uniform->type);
94
95 uniform->storage = &data[data_pos];
96
97 /* Set remap table entries point to correct gl_uniform_storage. */
98 for (unsigned j = 0; j < entries; j++) {
99 unsigned element_loc = uniform->remap_location + j;
100 prog->UniformRemapTable[element_loc] = uniform;
101
102 data_pos += num_slots;
103 }
104 }
105
106 /* Reserve locations for rest of the uniforms. */
107 if (prog->data->spirv)
108 link_util_update_empty_uniform_locations(prog);
109
110 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
111 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
112
113 if (uniform->is_shader_storage ||
114 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
115 continue;
116
117 /* Built-in uniforms should not get any location. */
118 if (uniform->builtin)
119 continue;
120
121 /* Explicit ones have been set already. */
122 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC)
123 continue;
124
125 /* How many entries for this uniform? */
126 const unsigned entries = MAX2(1, uniform->array_elements);
127
128 /* Add new entries to the total amount for checking against MAX_UNIFORM-
129 * _LOCATIONS. This only applies to the default uniform block (-1),
130 * because locations of uniform block entries are not assignable.
131 */
132 if (prog->data->UniformStorage[i].block_index == -1)
133 total_entries += entries;
134
135 unsigned location =
136 link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
137
138 if (location == -1) {
139 location = prog->NumUniformRemapTable;
140
141 /* resize remap table to fit new entries */
142 prog->UniformRemapTable =
143 reralloc(prog,
144 prog->UniformRemapTable,
145 struct gl_uniform_storage *,
146 prog->NumUniformRemapTable + entries);
147 prog->NumUniformRemapTable += entries;
148 }
149
150 /* set the base location in remap table for the uniform */
151 uniform->remap_location = location;
152
153 unsigned num_slots = glsl_get_component_slots(uniform->type);
154
155 if (uniform->block_index == -1)
156 uniform->storage = &data[data_pos];
157
158 /* Set remap table entries point to correct gl_uniform_storage. */
159 for (unsigned j = 0; j < entries; j++) {
160 unsigned element_loc = uniform->remap_location + j;
161 prog->UniformRemapTable[element_loc] = uniform;
162
163 if (uniform->block_index == -1)
164 data_pos += num_slots;
165 }
166 }
167
168 /* Verify that total amount of entries for explicit and implicit locations
169 * is less than MAX_UNIFORM_LOCATIONS.
170 */
171 if (total_entries > ctx->Const.MaxUserAssignableUniformLocations) {
172 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
173 "(%u > %u)", total_entries,
174 ctx->Const.MaxUserAssignableUniformLocations);
175 }
176
177 /* Reserve all the explicit locations of the active subroutine uniforms. */
178 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
179 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
180
181 if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
182 continue;
183
184 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
185 continue;
186
187 /* How many new entries for this uniform? */
188 const unsigned entries =
189 MAX2(1, prog->data->UniformStorage[i].array_elements);
190
191 uniform->storage = &data[data_pos];
192
193 unsigned num_slots = glsl_get_component_slots(uniform->type);
194 unsigned mask = prog->data->linked_stages;
195 while (mask) {
196 const int j = u_bit_scan(&mask);
197 struct gl_program *p = prog->_LinkedShaders[j]->Program;
198
199 if (!prog->data->UniformStorage[i].opaque[j].active)
200 continue;
201
202 /* Set remap table entries point to correct gl_uniform_storage. */
203 for (unsigned k = 0; k < entries; k++) {
204 unsigned element_loc =
205 prog->data->UniformStorage[i].remap_location + k;
206 p->sh.SubroutineUniformRemapTable[element_loc] =
207 &prog->data->UniformStorage[i];
208
209 data_pos += num_slots;
210 }
211 }
212 }
213
214 /* reserve subroutine locations */
215 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
216 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
217
218 if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
219 continue;
220
221 if (prog->data->UniformStorage[i].remap_location !=
222 UNMAPPED_UNIFORM_LOC)
223 continue;
224
225 const unsigned entries =
226 MAX2(1, prog->data->UniformStorage[i].array_elements);
227
228 uniform->storage = &data[data_pos];
229
230 unsigned num_slots = glsl_get_component_slots(uniform->type);
231 unsigned mask = prog->data->linked_stages;
232 while (mask) {
233 const int j = u_bit_scan(&mask);
234 struct gl_program *p = prog->_LinkedShaders[j]->Program;
235
236 if (!prog->data->UniformStorage[i].opaque[j].active)
237 continue;
238
239 p->sh.SubroutineUniformRemapTable =
240 reralloc(p,
241 p->sh.SubroutineUniformRemapTable,
242 struct gl_uniform_storage *,
243 p->sh.NumSubroutineUniformRemapTable + entries);
244
245 for (unsigned k = 0; k < entries; k++) {
246 p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
247 &prog->data->UniformStorage[i];
248
249 data_pos += num_slots;
250 }
251 prog->data->UniformStorage[i].remap_location =
252 p->sh.NumSubroutineUniformRemapTable;
253 p->sh.NumSubroutineUniformRemapTable += entries;
254 }
255 }
256 }
257
258 static void
259 add_var_use_deref(nir_deref_instr *deref, struct hash_table *live,
260 struct array_deref_range **derefs, unsigned *derefs_size)
261 {
262 nir_deref_path path;
263 nir_deref_path_init(&path, deref, NULL);
264
265 deref = path.path[0];
266 if (deref->deref_type != nir_deref_type_var ||
267 deref->mode & ~(nir_var_uniform | nir_var_mem_ubo | nir_var_mem_ssbo)) {
268 nir_deref_path_finish(&path);
269 return;
270 }
271
272 /* Number of derefs used in current processing. */
273 unsigned num_derefs = 0;
274
275 const struct glsl_type *deref_type = deref->var->type;
276 nir_deref_instr **p = &path.path[1];
277 for (; *p; p++) {
278 if ((*p)->deref_type == nir_deref_type_array) {
279
280 /* Skip matrix derefences */
281 if (!glsl_type_is_array(deref_type))
282 break;
283
284 if ((num_derefs + 1) * sizeof(struct array_deref_range) > *derefs_size) {
285 void *ptr = reralloc_size(NULL, *derefs, *derefs_size + 4096);
286
287 if (ptr == NULL) {
288 nir_deref_path_finish(&path);
289 return;
290 }
291
292 *derefs_size += 4096;
293 *derefs = (struct array_deref_range *)ptr;
294 }
295
296 struct array_deref_range *dr = &(*derefs)[num_derefs];
297 num_derefs++;
298
299 dr->size = glsl_get_length(deref_type);
300
301 if (nir_src_is_const((*p)->arr.index)) {
302 dr->index = nir_src_as_uint((*p)->arr.index);
303 } else {
304 /* An unsized array can occur at the end of an SSBO. We can't track
305 * accesses to such an array, so bail.
306 */
307 if (dr->size == 0) {
308 nir_deref_path_finish(&path);
309 return;
310 }
311
312 dr->index = dr->size;
313 }
314
315 deref_type = glsl_get_array_element(deref_type);
316 } else if ((*p)->deref_type == nir_deref_type_struct) {
317 /* We have reached the end of the array. */
318 break;
319 }
320 }
321
322 nir_deref_path_finish(&path);
323
324 /** Set of bit-flags to note which array elements have been accessed. */
325 BITSET_WORD *bits = NULL;
326
327 struct hash_entry *entry =
328 _mesa_hash_table_search(live, deref->var);
329 if (!entry && glsl_type_is_array(deref->var->type)) {
330 unsigned num_bits = MAX2(1, glsl_get_aoa_size(deref->var->type));
331 bits = rzalloc_array(live, BITSET_WORD, BITSET_WORDS(num_bits));
332 }
333
334 if (entry)
335 bits = (BITSET_WORD *) entry->data;
336
337 if (glsl_type_is_array(deref->var->type)) {
338 /* Count the "depth" of the arrays-of-arrays. */
339 unsigned array_depth = 0;
340 for (const struct glsl_type *type = deref->var->type;
341 glsl_type_is_array(type);
342 type = glsl_get_array_element(type)) {
343 array_depth++;
344 }
345
346 link_util_mark_array_elements_referenced(*derefs, num_derefs, array_depth,
347 bits);
348 }
349
350 assert(deref->mode == deref->var->data.mode);
351 _mesa_hash_table_insert(live, deref->var, bits);
352 }
353
354 /* Iterate over the shader and collect infomation about uniform use */
355 static void
356 add_var_use_shader(nir_shader *shader, struct hash_table *live)
357 {
358 /* Currently allocated buffer block of derefs. */
359 struct array_deref_range *derefs = NULL;
360
361 /* Size of the derefs buffer in bytes. */
362 unsigned derefs_size = 0;
363
364 nir_foreach_function(function, shader) {
365 if (function->impl) {
366 nir_foreach_block(block, function->impl) {
367 nir_foreach_instr(instr, block) {
368 if (instr->type == nir_instr_type_intrinsic) {
369 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
370 switch (intr->intrinsic) {
371 case nir_intrinsic_atomic_counter_read_deref:
372 case nir_intrinsic_atomic_counter_inc_deref:
373 case nir_intrinsic_atomic_counter_pre_dec_deref:
374 case nir_intrinsic_atomic_counter_post_dec_deref:
375 case nir_intrinsic_atomic_counter_add_deref:
376 case nir_intrinsic_atomic_counter_min_deref:
377 case nir_intrinsic_atomic_counter_max_deref:
378 case nir_intrinsic_atomic_counter_and_deref:
379 case nir_intrinsic_atomic_counter_or_deref:
380 case nir_intrinsic_atomic_counter_xor_deref:
381 case nir_intrinsic_atomic_counter_exchange_deref:
382 case nir_intrinsic_atomic_counter_comp_swap_deref:
383 case nir_intrinsic_image_deref_load:
384 case nir_intrinsic_image_deref_store:
385 case nir_intrinsic_image_deref_atomic_add:
386 case nir_intrinsic_image_deref_atomic_umin:
387 case nir_intrinsic_image_deref_atomic_imin:
388 case nir_intrinsic_image_deref_atomic_umax:
389 case nir_intrinsic_image_deref_atomic_imax:
390 case nir_intrinsic_image_deref_atomic_and:
391 case nir_intrinsic_image_deref_atomic_or:
392 case nir_intrinsic_image_deref_atomic_xor:
393 case nir_intrinsic_image_deref_atomic_exchange:
394 case nir_intrinsic_image_deref_atomic_comp_swap:
395 case nir_intrinsic_image_deref_size:
396 case nir_intrinsic_image_deref_samples:
397 case nir_intrinsic_load_deref:
398 case nir_intrinsic_store_deref:
399 add_var_use_deref(nir_src_as_deref(intr->src[0]), live,
400 &derefs, &derefs_size);
401 break;
402
403 default:
404 /* Nothing to do */
405 break;
406 }
407 } else if (instr->type == nir_instr_type_tex) {
408 nir_tex_instr *tex_instr = nir_instr_as_tex(instr);
409 int sampler_idx =
410 nir_tex_instr_src_index(tex_instr,
411 nir_tex_src_sampler_deref);
412 int texture_idx =
413 nir_tex_instr_src_index(tex_instr,
414 nir_tex_src_texture_deref);
415
416 if (sampler_idx >= 0) {
417 nir_deref_instr *deref =
418 nir_src_as_deref(tex_instr->src[sampler_idx].src);
419 add_var_use_deref(deref, live, &derefs, &derefs_size);
420 }
421
422 if (texture_idx >= 0) {
423 nir_deref_instr *deref =
424 nir_src_as_deref(tex_instr->src[texture_idx].src);
425 add_var_use_deref(deref, live, &derefs, &derefs_size);
426 }
427 }
428 }
429 }
430 }
431 }
432
433 ralloc_free(derefs);
434 }
435
436 static void
437 mark_stage_as_active(struct gl_uniform_storage *uniform,
438 unsigned stage)
439 {
440 uniform->active_shader_mask |= 1 << stage;
441 }
442
443 /* Used to build a tree representing the glsl_type so that we can have a place
444 * to store the next index for opaque types. Array types are expanded so that
445 * they have a single child which is used for all elements of the array.
446 * Struct types have a child for each member. The tree is walked while
447 * processing a uniform so that we can recognise when an opaque type is
448 * encountered a second time in order to reuse the same range of indices that
449 * was reserved the first time. That way the sampler indices can be arranged
450 * so that members of an array are placed sequentially even if the array is an
451 * array of structs containing other opaque members.
452 */
453 struct type_tree_entry {
454 /* For opaque types, this will be the next index to use. If we haven’t
455 * encountered this member yet, it will be UINT_MAX.
456 */
457 unsigned next_index;
458 unsigned array_size;
459 struct type_tree_entry *parent;
460 struct type_tree_entry *next_sibling;
461 struct type_tree_entry *children;
462 };
463
464 struct nir_link_uniforms_state {
465 /* per-whole program */
466 unsigned num_hidden_uniforms;
467 unsigned num_values;
468 unsigned max_uniform_location;
469 unsigned next_subroutine;
470
471 /* per-shader stage */
472 unsigned next_image_index;
473 unsigned next_sampler_index;
474 unsigned num_shader_samplers;
475 unsigned num_shader_images;
476 unsigned num_shader_uniform_components;
477 unsigned shader_samplers_used;
478 unsigned shader_shadow_samplers;
479 unsigned shader_storage_blocks_write_access;
480 struct gl_program_parameter_list *params;
481
482 /* per-variable */
483 nir_variable *current_var;
484 const struct glsl_type *current_ifc_type;
485 int offset;
486 bool var_is_in_block;
487 bool set_top_level_array;
488 int top_level_array_size;
489 int top_level_array_stride;
490
491 struct type_tree_entry *current_type;
492 struct hash_table *referenced_uniforms;
493 struct hash_table *uniform_hash;
494 };
495
496 static void
497 add_parameter(struct gl_uniform_storage *uniform,
498 struct gl_context *ctx,
499 struct gl_shader_program *prog,
500 const struct glsl_type *type,
501 struct nir_link_uniforms_state *state)
502 {
503 if (!state->params || uniform->is_shader_storage || glsl_contains_opaque(type))
504 return;
505
506 unsigned num_params = glsl_get_aoa_size(type);
507 num_params = MAX2(num_params, 1);
508 num_params *= glsl_get_matrix_columns(glsl_without_array(type));
509
510 bool is_dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
511 if (is_dual_slot)
512 num_params *= 2;
513
514 struct gl_program_parameter_list *params = state->params;
515 int base_index = params->NumParameters;
516 _mesa_reserve_parameter_storage(params, num_params);
517
518 if (ctx->Const.PackedDriverUniformStorage) {
519 for (unsigned i = 0; i < num_params; i++) {
520 unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
521 unsigned comps = glsl_get_vector_elements(glsl_without_array(type)) * dmul;
522 if (is_dual_slot) {
523 if (i & 0x1)
524 comps -= 4;
525 else
526 comps = 4;
527 }
528
529 _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name, comps,
530 glsl_get_gl_type(type), NULL, NULL, false);
531 }
532 } else {
533 for (unsigned i = 0; i < num_params; i++) {
534 _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name, 4,
535 glsl_get_gl_type(type), NULL, NULL, true);
536 }
537 }
538
539 /* Each Parameter will hold the index to the backing uniform storage.
540 * This avoids relying on names to match parameters and uniform
541 * storages.
542 */
543 for (unsigned i = 0; i < num_params; i++) {
544 struct gl_program_parameter *param = &params->Parameters[base_index + i];
545 param->UniformStorageIndex = uniform - prog->data->UniformStorage;
546 param->MainUniformStorageIndex = state->current_var->data.location;
547 }
548 }
549
550 static unsigned
551 get_next_index(struct nir_link_uniforms_state *state,
552 const struct gl_uniform_storage *uniform,
553 unsigned *next_index, bool *initialised)
554 {
555 /* If we’ve already calculated an index for this member then we can just
556 * offset from there.
557 */
558 if (state->current_type->next_index == UINT_MAX) {
559 /* Otherwise we need to reserve enough indices for all of the arrays
560 * enclosing this member.
561 */
562
563 unsigned array_size = 1;
564
565 for (const struct type_tree_entry *p = state->current_type;
566 p;
567 p = p->parent) {
568 array_size *= p->array_size;
569 }
570
571 state->current_type->next_index = *next_index;
572 *next_index += array_size;
573 *initialised = true;
574 } else
575 *initialised = false;
576
577 unsigned index = state->current_type->next_index;
578
579 state->current_type->next_index += MAX2(1, uniform->array_elements);
580
581 return index;
582 }
583
584 static bool
585 find_and_update_named_uniform_storage(struct gl_context *ctx,
586 struct gl_shader_program *prog,
587 struct nir_link_uniforms_state *state,
588 nir_variable *var, char **name,
589 size_t name_length,
590 const struct glsl_type *type,
591 unsigned stage, bool *first_element)
592 {
593 /* gl_uniform_storage can cope with one level of array, so if the type is a
594 * composite type or an array where each element occupies more than one
595 * location than we need to recursively process it.
596 */
597 if (glsl_type_is_struct_or_ifc(type) ||
598 (glsl_type_is_array(type) &&
599 (glsl_type_is_array(glsl_get_array_element(type)) ||
600 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
601
602 struct type_tree_entry *old_type = state->current_type;
603 state->current_type = old_type->children;
604
605 /* Shader storage block unsized arrays: add subscript [0] to variable
606 * names.
607 */
608 unsigned length = glsl_get_length(type);
609 if (glsl_type_is_unsized_array(type))
610 length = 1;
611
612 bool result = false;
613 for (unsigned i = 0; i < length; i++) {
614 const struct glsl_type *field_type;
615 size_t new_length = name_length;
616
617 if (glsl_type_is_struct_or_ifc(type)) {
618 field_type = glsl_get_struct_field(type, i);
619
620 /* Append '.field' to the current variable name. */
621 if (name) {
622 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
623 glsl_get_struct_elem_name(type, i));
624 }
625 } else {
626 field_type = glsl_get_array_element(type);
627
628 /* Append the subscript to the current variable name */
629 if (name)
630 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
631 }
632
633 result = find_and_update_named_uniform_storage(ctx, prog, state,
634 var, name, new_length,
635 field_type, stage,
636 first_element);
637
638 if (glsl_type_is_struct_or_ifc(type))
639 state->current_type = state->current_type->next_sibling;
640
641 if (!result) {
642 state->current_type = old_type;
643 return false;
644 }
645 }
646
647 state->current_type = old_type;
648
649 return result;
650 } else {
651 struct hash_entry *entry =
652 _mesa_hash_table_search(state->uniform_hash, *name);
653 if (entry) {
654 unsigned i = (unsigned) (intptr_t) entry->data;
655 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
656
657 if (*first_element && !state->var_is_in_block) {
658 *first_element = false;
659 var->data.location = uniform - prog->data->UniformStorage;
660 }
661
662 unsigned values = glsl_get_component_slots(type);
663 const struct glsl_type *type_no_array = glsl_without_array(type);
664 if (glsl_type_is_sampler(type_no_array)) {
665 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
666 bool init_idx;
667 unsigned sampler_index =
668 get_next_index(state, uniform, &state->next_sampler_index,
669 &init_idx);
670
671 /* Samplers (bound or bindless) are counted as two components as
672 * specified by ARB_bindless_texture.
673 */
674 state->num_shader_samplers += values / 2;
675
676 uniform->opaque[stage].active = true;
677 uniform->opaque[stage].index = sampler_index;
678
679 if (init_idx) {
680 const unsigned shadow =
681 glsl_sampler_type_is_shadow(type_no_array);
682 for (unsigned i = sampler_index;
683 i < MIN2(state->next_sampler_index, MAX_SAMPLERS);
684 i++) {
685 sh->Program->sh.SamplerTargets[i] =
686 glsl_get_sampler_target(type_no_array);
687 state->shader_samplers_used |= 1U << i;
688 state->shader_shadow_samplers |= shadow << i;
689 }
690 }
691 } else if (glsl_type_is_image(type_no_array)) {
692 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
693 int image_index = state->next_image_index;
694 /* TODO: handle structs when bindless support is added */
695 state->next_image_index += MAX2(1, uniform->array_elements);
696
697 /* Images (bound or bindless) are counted as two components as
698 * specified by ARB_bindless_texture.
699 */
700 state->num_shader_images += values / 2;
701
702 uniform->opaque[stage].active = true;
703 uniform->opaque[stage].index = image_index;
704
705 /* Set image access qualifiers */
706 enum gl_access_qualifier image_access =
707 state->current_var->data.access;
708 const GLenum access =
709 (image_access & ACCESS_NON_WRITEABLE) ?
710 ((image_access & ACCESS_NON_READABLE) ? GL_NONE :
711 GL_READ_ONLY) :
712 ((image_access & ACCESS_NON_READABLE) ? GL_WRITE_ONLY :
713 GL_READ_WRITE);
714 for (unsigned i = image_index;
715 i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS);
716 i++) {
717 sh->Program->sh.ImageAccess[i] = access;
718 }
719 }
720
721 struct hash_entry *entry =
722 _mesa_hash_table_search(state->referenced_uniforms,
723 state->current_var);
724 if (entry != NULL ||
725 glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE)
726 uniform->active_shader_mask |= 1 << stage;
727
728 if (!state->var_is_in_block)
729 add_parameter(uniform, ctx, prog, type, state);
730
731 return true;
732 }
733 }
734
735 return false;
736 }
737
738 /**
739 * Finds, returns, and updates the stage info for any uniform in UniformStorage
740 * defined by @var. For GLSL this is done using the name, for SPIR-V in general
741 * is this done using the explicit location, except:
742 *
743 * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
744 * them. That means that more that one entry at the uniform storage can be
745 * found. In that case all of them are updated, and the first entry is
746 * returned, in order to update the location of the nir variable.
747 *
748 * * Special uniforms: like atomic counters. They lack a explicit location,
749 * so they are skipped. They will be handled and assigned a location later.
750 *
751 */
752 static bool
753 find_and_update_previous_uniform_storage(struct gl_context *ctx,
754 struct gl_shader_program *prog,
755 struct nir_link_uniforms_state *state,
756 nir_variable *var, char *name,
757 const struct glsl_type *type,
758 unsigned stage)
759 {
760 if (!prog->data->spirv) {
761 bool first_element = true;
762 char *name_tmp = ralloc_strdup(NULL, name);
763 bool r = find_and_update_named_uniform_storage(ctx, prog, state, var,
764 &name_tmp,
765 strlen(name_tmp), type,
766 stage, &first_element);
767 ralloc_free(name_tmp);
768
769 return r;
770 }
771
772 if (nir_variable_is_in_block(var)) {
773 struct gl_uniform_storage *uniform = NULL;
774
775 ASSERTED unsigned num_blks = nir_variable_is_in_ubo(var) ?
776 prog->data->NumUniformBlocks :
777 prog->data->NumShaderStorageBlocks;
778
779 struct gl_uniform_block *blks = nir_variable_is_in_ubo(var) ?
780 prog->data->UniformBlocks : prog->data->ShaderStorageBlocks;
781
782 bool result = false;
783 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
784 /* UniformStorage contains both variables from ubos and ssbos */
785 if ( prog->data->UniformStorage[i].is_shader_storage !=
786 nir_variable_is_in_ssbo(var))
787 continue;
788
789 int block_index = prog->data->UniformStorage[i].block_index;
790 if (block_index != -1) {
791 assert(block_index < num_blks);
792
793 if (var->data.binding == blks[block_index].Binding) {
794 if (!uniform)
795 uniform = &prog->data->UniformStorage[i];
796 mark_stage_as_active(&prog->data->UniformStorage[i],
797 stage);
798 result = true;
799 }
800 }
801 }
802
803 if (result)
804 var->data.location = uniform - prog->data->UniformStorage;
805 return result;
806 }
807
808 /* Beyond blocks, there are still some corner cases of uniforms without
809 * location (ie: atomic counters) that would have a initial location equal
810 * to -1. We just return on that case. Those uniforms will be handled
811 * later.
812 */
813 if (var->data.location == -1)
814 return false;
815
816 /* TODO: following search can be problematic with shaders with a lot of
817 * uniforms. Would it be better to use some type of hash
818 */
819 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
820 if (prog->data->UniformStorage[i].remap_location == var->data.location) {
821 mark_stage_as_active(&prog->data->UniformStorage[i], stage);
822
823 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
824 var->data.location = uniform - prog->data->UniformStorage;
825 add_parameter(uniform, ctx, prog, var->type, state);
826 return true;
827 }
828 }
829
830 return false;
831 }
832
833 static struct type_tree_entry *
834 build_type_tree_for_type(const struct glsl_type *type)
835 {
836 struct type_tree_entry *entry = malloc(sizeof *entry);
837
838 entry->array_size = 1;
839 entry->next_index = UINT_MAX;
840 entry->children = NULL;
841 entry->next_sibling = NULL;
842 entry->parent = NULL;
843
844 if (glsl_type_is_array(type)) {
845 entry->array_size = glsl_get_length(type);
846 entry->children = build_type_tree_for_type(glsl_get_array_element(type));
847 entry->children->parent = entry;
848 } else if (glsl_type_is_struct_or_ifc(type)) {
849 struct type_tree_entry *last = NULL;
850
851 for (unsigned i = 0; i < glsl_get_length(type); i++) {
852 const struct glsl_type *field_type = glsl_get_struct_field(type, i);
853 struct type_tree_entry *field_entry =
854 build_type_tree_for_type(field_type);
855
856 if (last == NULL)
857 entry->children = field_entry;
858 else
859 last->next_sibling = field_entry;
860
861 field_entry->parent = entry;
862
863 last = field_entry;
864 }
865 }
866
867 return entry;
868 }
869
870 static void
871 free_type_tree(struct type_tree_entry *entry)
872 {
873 struct type_tree_entry *p, *next;
874
875 for (p = entry->children; p; p = next) {
876 next = p->next_sibling;
877 free_type_tree(p);
878 }
879
880 free(entry);
881 }
882
883 static void
884 hash_free_uniform_name(struct hash_entry *entry)
885 {
886 free((void*)entry->key);
887 }
888
889 static void
890 enter_record(struct nir_link_uniforms_state *state,
891 struct gl_context *ctx,
892 const struct glsl_type *type,
893 bool row_major)
894 {
895 assert(glsl_type_is_struct(type));
896 if (!state->var_is_in_block)
897 return;
898
899 bool use_std430 = ctx->Const.UseSTD430AsDefaultPacking;
900 const enum glsl_interface_packing packing =
901 glsl_get_internal_ifc_packing(state->current_var->interface_type,
902 use_std430);
903
904 if (packing == GLSL_INTERFACE_PACKING_STD430)
905 state->offset = glsl_align(
906 state->offset, glsl_get_std430_base_alignment(type, row_major));
907 else
908 state->offset = glsl_align(
909 state->offset, glsl_get_std140_base_alignment(type, row_major));
910 }
911
912 static void
913 leave_record(struct nir_link_uniforms_state *state,
914 struct gl_context *ctx,
915 const struct glsl_type *type,
916 bool row_major)
917 {
918 assert(glsl_type_is_struct(type));
919 if (!state->var_is_in_block)
920 return;
921
922 bool use_std430 = ctx->Const.UseSTD430AsDefaultPacking;
923 const enum glsl_interface_packing packing =
924 glsl_get_internal_ifc_packing(state->current_var->interface_type,
925 use_std430);
926
927 if (packing == GLSL_INTERFACE_PACKING_STD430)
928 state->offset = glsl_align(
929 state->offset, glsl_get_std430_base_alignment(type, row_major));
930 else
931 state->offset = glsl_align(
932 state->offset, glsl_get_std140_base_alignment(type, row_major));
933 }
934
935 /**
936 * Creates the neccessary entries in UniformStorage for the uniform. Returns
937 * the number of locations used or -1 on failure.
938 */
939 static int
940 nir_link_uniform(struct gl_context *ctx,
941 struct gl_shader_program *prog,
942 struct gl_program *stage_program,
943 gl_shader_stage stage,
944 const struct glsl_type *type,
945 unsigned index_in_parent,
946 int location,
947 struct nir_link_uniforms_state *state,
948 char **name, size_t name_length, bool row_major)
949 {
950 struct gl_uniform_storage *uniform = NULL;
951
952 if (state->set_top_level_array &&
953 nir_variable_is_in_ssbo(state->current_var)) {
954 /* Type is the top level SSBO member */
955 if (glsl_type_is_array(type) &&
956 (glsl_type_is_array(glsl_get_array_element(type)) ||
957 glsl_type_is_struct_or_ifc(glsl_get_array_element(type)))) {
958 /* Type is a top-level array (array of aggregate types) */
959 state->top_level_array_size = glsl_get_length(type);
960 state->top_level_array_stride = glsl_get_explicit_stride(type);
961 } else {
962 state->top_level_array_size = 1;
963 state->top_level_array_stride = 0;
964 }
965
966 state->set_top_level_array = false;
967 }
968
969 /* gl_uniform_storage can cope with one level of array, so if the type is a
970 * composite type or an array where each element occupies more than one
971 * location than we need to recursively process it.
972 */
973 if (glsl_type_is_struct_or_ifc(type) ||
974 (glsl_type_is_array(type) &&
975 (glsl_type_is_array(glsl_get_array_element(type)) ||
976 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
977 int location_count = 0;
978 struct type_tree_entry *old_type = state->current_type;
979 unsigned int struct_base_offset = state->offset;
980
981 state->current_type = old_type->children;
982
983 /* Shader storage block unsized arrays: add subscript [0] to variable
984 * names.
985 */
986 unsigned length = glsl_get_length(type);
987 if (glsl_type_is_unsized_array(type))
988 length = 1;
989
990 if (glsl_type_is_struct(type) && !prog->data->spirv)
991 enter_record(state, ctx, type, row_major);
992
993 for (unsigned i = 0; i < length; i++) {
994 const struct glsl_type *field_type;
995 size_t new_length = name_length;
996 bool field_row_major = row_major;
997
998 if (glsl_type_is_struct_or_ifc(type)) {
999 field_type = glsl_get_struct_field(type, i);
1000 /* Use the offset inside the struct only for variables backed by
1001 * a buffer object. For variables not backed by a buffer object,
1002 * offset is -1.
1003 */
1004 if (state->var_is_in_block) {
1005 if (prog->data->spirv) {
1006 state->offset =
1007 struct_base_offset + glsl_get_struct_field_offset(type, i);
1008 } else if (glsl_get_struct_field_offset(type, i) != -1 &&
1009 type == state->current_ifc_type) {
1010 state->offset = glsl_get_struct_field_offset(type, i);
1011 }
1012
1013 if (glsl_type_is_interface(type))
1014 state->set_top_level_array = true;
1015 }
1016
1017 /* Append '.field' to the current variable name. */
1018 if (name) {
1019 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
1020 glsl_get_struct_elem_name(type, i));
1021 }
1022
1023
1024 /* The layout of structures at the top level of the block is set
1025 * during parsing. For matrices contained in multiple levels of
1026 * structures in the block, the inner structures have no layout.
1027 * These cases must potentially inherit the layout from the outer
1028 * levels.
1029 */
1030 const enum glsl_matrix_layout matrix_layout =
1031 glsl_get_struct_field_data(type, i)->matrix_layout;
1032 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
1033 field_row_major = true;
1034 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
1035 field_row_major = false;
1036 }
1037 } else {
1038 field_type = glsl_get_array_element(type);
1039
1040 /* Append the subscript to the current variable name */
1041 if (name)
1042 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
1043 }
1044
1045 int entries = nir_link_uniform(ctx, prog, stage_program, stage,
1046 field_type, i, location,
1047 state, name, new_length,
1048 field_row_major);
1049
1050 if (entries == -1)
1051 return -1;
1052
1053 if (location != -1)
1054 location += entries;
1055 location_count += entries;
1056
1057 if (glsl_type_is_struct_or_ifc(type))
1058 state->current_type = state->current_type->next_sibling;
1059 }
1060
1061 if (glsl_type_is_struct(type) && !prog->data->spirv)
1062 leave_record(state, ctx, type, row_major);
1063
1064 state->current_type = old_type;
1065
1066 return location_count;
1067 } else {
1068 /* Create a new uniform storage entry */
1069 prog->data->UniformStorage =
1070 reralloc(prog->data,
1071 prog->data->UniformStorage,
1072 struct gl_uniform_storage,
1073 prog->data->NumUniformStorage + 1);
1074 if (!prog->data->UniformStorage) {
1075 linker_error(prog, "Out of memory during linking.\n");
1076 return -1;
1077 }
1078
1079 uniform = &prog->data->UniformStorage[prog->data->NumUniformStorage];
1080 prog->data->NumUniformStorage++;
1081
1082 /* Initialize its members */
1083 memset(uniform, 0x00, sizeof(struct gl_uniform_storage));
1084
1085 uniform->name =
1086 name ? ralloc_strdup(prog->data->UniformStorage, *name) : NULL;
1087
1088 const struct glsl_type *type_no_array = glsl_without_array(type);
1089 if (glsl_type_is_array(type)) {
1090 uniform->type = type_no_array;
1091 uniform->array_elements = glsl_get_length(type);
1092 } else {
1093 uniform->type = type;
1094 uniform->array_elements = 0;
1095 }
1096 uniform->top_level_array_size = state->top_level_array_size;
1097 uniform->top_level_array_stride = state->top_level_array_stride;
1098
1099 struct hash_entry *entry =
1100 _mesa_hash_table_search(state->referenced_uniforms,
1101 state->current_var);
1102 if (entry != NULL ||
1103 glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE)
1104 uniform->active_shader_mask |= 1 << stage;
1105
1106 if (location >= 0) {
1107 /* Uniform has an explicit location */
1108 uniform->remap_location = location;
1109 } else {
1110 uniform->remap_location = UNMAPPED_UNIFORM_LOC;
1111 }
1112
1113 uniform->hidden = state->current_var->data.how_declared == nir_var_hidden;
1114 if (uniform->hidden)
1115 state->num_hidden_uniforms++;
1116
1117 uniform->is_shader_storage = nir_variable_is_in_ssbo(state->current_var);
1118
1119 /* Set fields whose default value depend on the variable being inside a
1120 * block.
1121 *
1122 * From the OpenGL 4.6 spec, 7.3 Program objects:
1123 *
1124 * "For the property ARRAY_STRIDE, ... For active variables not declared
1125 * as an array of basic types, zero is written to params. For active
1126 * variables not backed by a buffer object, -1 is written to params,
1127 * regardless of the variable type."
1128 *
1129 * "For the property MATRIX_STRIDE, ... For active variables not declared
1130 * as a matrix or array of matrices, zero is written to params. For active
1131 * variables not backed by a buffer object, -1 is written to params,
1132 * regardless of the variable type."
1133 *
1134 * For the property IS_ROW_MAJOR, ... For active variables backed by a
1135 * buffer object, declared as a single matrix or array of matrices, and
1136 * stored in row-major order, one is written to params. For all other
1137 * active variables, zero is written to params.
1138 */
1139 uniform->array_stride = -1;
1140 uniform->matrix_stride = -1;
1141 uniform->row_major = false;
1142
1143 if (state->var_is_in_block) {
1144 uniform->array_stride = glsl_type_is_array(type) ?
1145 glsl_get_explicit_stride(type) : 0;
1146
1147 if (glsl_type_is_matrix(uniform->type)) {
1148 uniform->matrix_stride = glsl_get_explicit_stride(uniform->type);
1149 uniform->row_major = glsl_matrix_type_is_row_major(uniform->type);
1150 } else {
1151 uniform->matrix_stride = 0;
1152 }
1153
1154 if (!prog->data->spirv) {
1155 bool use_std430 = ctx->Const.UseSTD430AsDefaultPacking;
1156 const enum glsl_interface_packing packing =
1157 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1158 use_std430);
1159
1160 unsigned alignment =
1161 glsl_get_std140_base_alignment(type, uniform->row_major);
1162 if (packing == GLSL_INTERFACE_PACKING_STD430) {
1163 alignment =
1164 glsl_get_std430_base_alignment(type, uniform->row_major);
1165 }
1166 state->offset = glsl_align(state->offset, alignment);
1167 }
1168 }
1169
1170 uniform->offset = state->var_is_in_block ? state->offset : -1;
1171
1172 int buffer_block_index = -1;
1173 /* If the uniform is inside a uniform block determine its block index by
1174 * comparing the bindings, we can not use names.
1175 */
1176 if (state->var_is_in_block) {
1177 struct gl_uniform_block *blocks = nir_variable_is_in_ssbo(state->current_var) ?
1178 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1179
1180 int num_blocks = nir_variable_is_in_ssbo(state->current_var) ?
1181 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1182
1183 if (!prog->data->spirv) {
1184 bool is_interface_array =
1185 glsl_without_array(state->current_var->type) == state->current_var->interface_type &&
1186 glsl_type_is_array(state->current_var->type);
1187
1188 const char *ifc_name =
1189 glsl_get_type_name(state->current_var->interface_type);
1190 if (is_interface_array) {
1191 unsigned l = strlen(ifc_name);
1192 for (unsigned i = 0; i < num_blocks; i++) {
1193 if (strncmp(ifc_name, blocks[i].Name, l) == 0 &&
1194 blocks[i].Name[l] == '[') {
1195 buffer_block_index = i;
1196 break;
1197 }
1198 }
1199 } else {
1200 for (unsigned i = 0; i < num_blocks; i++) {
1201 if (strcmp(ifc_name, blocks[i].Name) == 0) {
1202 buffer_block_index = i;
1203 break;
1204 }
1205 }
1206 }
1207
1208 /* Compute the next offset. */
1209 bool use_std430 = ctx->Const.UseSTD430AsDefaultPacking;
1210 const enum glsl_interface_packing packing =
1211 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1212 use_std430);
1213 if (packing == GLSL_INTERFACE_PACKING_STD430)
1214 state->offset += glsl_get_std430_size(type, uniform->row_major);
1215 else
1216 state->offset += glsl_get_std140_size(type, uniform->row_major);
1217 } else {
1218 for (unsigned i = 0; i < num_blocks; i++) {
1219 if (state->current_var->data.binding == blocks[i].Binding) {
1220 buffer_block_index = i;
1221 break;
1222 }
1223 }
1224
1225 /* Compute the next offset. */
1226 state->offset += glsl_get_explicit_size(type, true);
1227 }
1228 assert(buffer_block_index >= 0);
1229 }
1230
1231 uniform->block_index = buffer_block_index;
1232
1233 /* @FIXME: the initialization of the following will be done as we
1234 * implement support for their specific features, like SSBO, atomics,
1235 * etc.
1236 */
1237 uniform->builtin = is_gl_identifier(uniform->name);
1238 uniform->atomic_buffer_index = -1;
1239 uniform->is_bindless = false;
1240
1241 /* The following are not for features not supported by ARB_gl_spirv */
1242 uniform->num_compatible_subroutines = 0;
1243
1244 unsigned entries = MAX2(1, uniform->array_elements);
1245 unsigned values = glsl_get_component_slots(type);
1246
1247 if (glsl_type_is_sampler(type_no_array)) {
1248 bool init_idx;
1249 int sampler_index =
1250 get_next_index(state, uniform, &state->next_sampler_index,
1251 &init_idx);
1252
1253 /* Samplers (bound or bindless) are counted as two components as
1254 * specified by ARB_bindless_texture.
1255 */
1256 state->num_shader_samplers += values / 2;
1257
1258 uniform->opaque[stage].active = true;
1259 uniform->opaque[stage].index = sampler_index;
1260
1261 if (init_idx) {
1262 const unsigned shadow = glsl_sampler_type_is_shadow(type_no_array);
1263 for (unsigned i = sampler_index;
1264 i < MIN2(state->next_sampler_index, MAX_SAMPLERS);
1265 i++) {
1266 stage_program->sh.SamplerTargets[i] =
1267 glsl_get_sampler_target(type_no_array);
1268 state->shader_samplers_used |= 1U << i;
1269 state->shader_shadow_samplers |= shadow << i;
1270 }
1271 }
1272 } else if (glsl_type_is_image(type_no_array)) {
1273 /* @FIXME: image_index should match that of the same image
1274 * uniform in other shaders. This means we need to match image
1275 * uniforms by location (GLSL does it by variable name, but we
1276 * want to avoid that).
1277 */
1278 int image_index = state->next_image_index;
1279 state->next_image_index += entries;
1280
1281 /* Images (bound or bindless) are counted as two components as
1282 * specified by ARB_bindless_texture.
1283 */
1284 state->num_shader_images += values / 2;
1285
1286 uniform->opaque[stage].active = true;
1287 uniform->opaque[stage].index = image_index;
1288
1289 /* Set image access qualifiers */
1290 enum gl_access_qualifier image_access =
1291 state->current_var->data.access;
1292 const GLenum access =
1293 (image_access & ACCESS_NON_WRITEABLE) ?
1294 ((image_access & ACCESS_NON_READABLE) ? GL_NONE :
1295 GL_READ_ONLY) :
1296 ((image_access & ACCESS_NON_READABLE) ? GL_WRITE_ONLY :
1297 GL_READ_WRITE);
1298 for (unsigned i = image_index;
1299 i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS);
1300 i++) {
1301 stage_program->sh.ImageAccess[i] = access;
1302 }
1303
1304 if (!uniform->is_shader_storage)
1305 state->num_shader_uniform_components += values;
1306 } else {
1307 if (glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE) {
1308 uniform->opaque[stage].index = state->next_subroutine;
1309 uniform->opaque[stage].active = true;
1310
1311 prog->_LinkedShaders[stage]->Program->sh.NumSubroutineUniforms++;
1312
1313 /* Increment the subroutine index by 1 for non-arrays and by the
1314 * number of array elements for arrays.
1315 */
1316 state->next_subroutine += MAX2(1, uniform->array_elements);
1317 }
1318
1319 if (!state->var_is_in_block)
1320 state->num_shader_uniform_components += values;
1321 }
1322
1323 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC &&
1324 state->max_uniform_location < uniform->remap_location + entries)
1325 state->max_uniform_location = uniform->remap_location + entries;
1326
1327 if (!state->var_is_in_block)
1328 add_parameter(uniform, ctx, prog, type, state);
1329
1330 if (name) {
1331 _mesa_hash_table_insert(state->uniform_hash, strdup(*name),
1332 (void *) (intptr_t)
1333 (prog->data->NumUniformStorage - 1));
1334 }
1335
1336 if (!is_gl_identifier(uniform->name) && !uniform->is_shader_storage &&
1337 !state->var_is_in_block)
1338 state->num_values += values;
1339
1340 return MAX2(uniform->array_elements, 1);
1341 }
1342 }
1343
1344 bool
1345 gl_nir_link_uniforms(struct gl_context *ctx,
1346 struct gl_shader_program *prog,
1347 bool fill_parameters)
1348 {
1349 /* First free up any previous UniformStorage items */
1350 ralloc_free(prog->data->UniformStorage);
1351 prog->data->UniformStorage = NULL;
1352 prog->data->NumUniformStorage = 0;
1353
1354 /* Iterate through all linked shaders */
1355 struct nir_link_uniforms_state state = {0,};
1356 state.uniform_hash = _mesa_hash_table_create(NULL, _mesa_hash_string,
1357 _mesa_key_string_equal);
1358
1359 for (unsigned shader_type = 0; shader_type < MESA_SHADER_STAGES; shader_type++) {
1360 struct gl_linked_shader *sh = prog->_LinkedShaders[shader_type];
1361 if (!sh)
1362 continue;
1363
1364 nir_shader *nir = sh->Program->nir;
1365 assert(nir);
1366
1367 state.referenced_uniforms =
1368 _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1369 _mesa_key_pointer_equal);
1370 state.next_image_index = 0;
1371 state.next_sampler_index = 0;
1372 state.num_shader_samplers = 0;
1373 state.num_shader_images = 0;
1374 state.num_shader_uniform_components = 0;
1375 state.shader_storage_blocks_write_access = 0;
1376 state.shader_samplers_used = 0;
1377 state.shader_shadow_samplers = 0;
1378 state.params = fill_parameters ? sh->Program->Parameters : NULL;
1379
1380 add_var_use_shader(nir, state.referenced_uniforms);
1381
1382 nir_foreach_variable(var, &nir->uniforms) {
1383 state.current_var = var;
1384 state.current_ifc_type = NULL;
1385 state.offset = 0;
1386 state.var_is_in_block = nir_variable_is_in_block(var);
1387 state.set_top_level_array = false;
1388 state.top_level_array_size = 0;
1389 state.top_level_array_stride = 0;
1390
1391 /*
1392 * From ARB_program_interface spec, issue (16):
1393 *
1394 * "RESOLVED: We will follow the default rule for enumerating block
1395 * members in the OpenGL API, which is:
1396 *
1397 * * If a variable is a member of an interface block without an
1398 * instance name, it is enumerated using just the variable name.
1399 *
1400 * * If a variable is a member of an interface block with an
1401 * instance name, it is enumerated as "BlockName.Member", where
1402 * "BlockName" is the name of the interface block (not the
1403 * instance name) and "Member" is the name of the variable.
1404 *
1405 * For example, in the following code:
1406 *
1407 * uniform Block1 {
1408 * int member1;
1409 * };
1410 * uniform Block2 {
1411 * int member2;
1412 * } instance2;
1413 * uniform Block3 {
1414 * int member3;
1415 * } instance3[2]; // uses two separate buffer bindings
1416 *
1417 * the three uniforms (if active) are enumerated as "member1",
1418 * "Block2.member2", and "Block3.member3"."
1419 *
1420 * Note that in the last example, with an array of ubo, only one
1421 * uniform is generated. For that reason, while unrolling the
1422 * uniforms of a ubo, or the variables of a ssbo, we need to treat
1423 * arrays of instance as a single block.
1424 */
1425 char *name;
1426 const struct glsl_type *type = var->type;
1427 if (state.var_is_in_block &&
1428 ((!prog->data->spirv && glsl_without_array(type) == var->interface_type) ||
1429 (prog->data->spirv && type == var->interface_type))) {
1430 type = glsl_without_array(var->type);
1431 state.current_ifc_type = type;
1432 name = ralloc_strdup(NULL, glsl_get_type_name(type));
1433 } else {
1434 state.set_top_level_array = true;
1435 name = ralloc_strdup(NULL, var->name);
1436 }
1437
1438 struct type_tree_entry *type_tree =
1439 build_type_tree_for_type(type);
1440 state.current_type = type_tree;
1441
1442 int location = var->data.location;
1443
1444 struct gl_uniform_block *blocks;
1445 int num_blocks;
1446 int buffer_block_index = -1;
1447 if (!prog->data->spirv && state.var_is_in_block) {
1448 /* If the uniform is inside a uniform block determine its block index by
1449 * comparing the bindings, we can not use names.
1450 */
1451 blocks = nir_variable_is_in_ssbo(state.current_var) ?
1452 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1453 num_blocks = nir_variable_is_in_ssbo(state.current_var) ?
1454 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1455
1456 bool is_interface_array =
1457 glsl_without_array(state.current_var->type) == state.current_var->interface_type &&
1458 glsl_type_is_array(state.current_var->type);
1459
1460 const char *ifc_name =
1461 glsl_get_type_name(state.current_var->interface_type);
1462
1463 if (is_interface_array) {
1464 unsigned l = strlen(ifc_name);
1465
1466 /* Even when a match is found, do not "break" here. As this is
1467 * an array of instances, all elements of the array need to be
1468 * marked as referenced.
1469 */
1470 for (unsigned i = 0; i < num_blocks; i++) {
1471 if (strncmp(ifc_name, blocks[i].Name, l) == 0 &&
1472 blocks[i].Name[l] == '[') {
1473 if (buffer_block_index == -1)
1474 buffer_block_index = i;
1475
1476 struct hash_entry *entry =
1477 _mesa_hash_table_search(state.referenced_uniforms, var);
1478 if (entry) {
1479 BITSET_WORD *bits = (BITSET_WORD *) entry->data;
1480 if (BITSET_TEST(bits, blocks[i].linearized_array_index))
1481 blocks[i].stageref |= 1U << shader_type;
1482 }
1483 }
1484 }
1485 } else {
1486 for (unsigned i = 0; i < num_blocks; i++) {
1487 if (strcmp(ifc_name, blocks[i].Name) == 0) {
1488 buffer_block_index = i;
1489
1490 struct hash_entry *entry =
1491 _mesa_hash_table_search(state.referenced_uniforms, var);
1492 if (entry)
1493 blocks[i].stageref |= 1U << shader_type;
1494
1495 break;
1496 }
1497 }
1498 }
1499
1500 if (nir_variable_is_in_ssbo(var) &&
1501 !(var->data.access & ACCESS_NON_WRITEABLE)) {
1502 unsigned array_size = is_interface_array ?
1503 glsl_get_length(var->type) : 1;
1504
1505 STATIC_ASSERT(MAX_SHADER_STORAGE_BUFFERS <= 32);
1506
1507 /* Shaders that use too many SSBOs will fail to compile, which
1508 * we don't care about.
1509 *
1510 * This is true for shaders that do not use too many SSBOs:
1511 */
1512 if (buffer_block_index + array_size <= 32) {
1513 state.shader_storage_blocks_write_access |=
1514 u_bit_consecutive(buffer_block_index, array_size);
1515 }
1516 }
1517 }
1518
1519 if (!prog->data->spirv && state.var_is_in_block &&
1520 glsl_without_array(state.current_var->type) != state.current_var->interface_type) {
1521
1522 bool found = false;
1523 char sentinel = '\0';
1524
1525 if (glsl_type_is_struct(state.current_var->type)) {
1526 sentinel = '.';
1527 } else if (glsl_type_is_array(state.current_var->type) &&
1528 (glsl_type_is_array(glsl_get_array_element(state.current_var->type))
1529 || glsl_type_is_struct(glsl_without_array(state.current_var->type)))) {
1530 sentinel = '[';
1531 }
1532
1533 const unsigned l = strlen(state.current_var->name);
1534 for (unsigned i = 0; i < num_blocks; i++) {
1535 for (unsigned j = 0; j < blocks[i].NumUniforms; j++) {
1536 if (sentinel) {
1537 const char *begin = blocks[i].Uniforms[j].Name;
1538 const char *end = strchr(begin, sentinel);
1539
1540 if (end == NULL)
1541 continue;
1542
1543 if ((ptrdiff_t) l != (end - begin))
1544 continue;
1545 found = strncmp(state.current_var->name, begin, l) == 0;
1546 } else {
1547 found = strcmp(state.current_var->name, blocks[i].Uniforms[j].Name) == 0;
1548 }
1549
1550 if (found) {
1551 location = j;
1552
1553 struct hash_entry *entry =
1554 _mesa_hash_table_search(state.referenced_uniforms, var);
1555 if (entry)
1556 blocks[i].stageref |= 1U << shader_type;
1557
1558 break;
1559 }
1560 }
1561
1562 if (found)
1563 break;
1564 }
1565 assert(found);
1566
1567 const struct gl_uniform_block *const block =
1568 &blocks[buffer_block_index];
1569 assert(location != -1);
1570
1571 const struct gl_uniform_buffer_variable *const ubo_var =
1572 &block->Uniforms[location];
1573
1574 state.offset = ubo_var->Offset;
1575 var->data.location = location;
1576 }
1577
1578 /* Check if the uniform has been processed already for
1579 * other stage. If so, validate they are compatible and update
1580 * the active stage mask.
1581 */
1582 if (find_and_update_previous_uniform_storage(ctx, prog, &state, var,
1583 name, type, shader_type)) {
1584 ralloc_free(name);
1585 free_type_tree(type_tree);
1586 continue;
1587 }
1588
1589 /* From now on the variable’s location will be its uniform index */
1590 if (!state.var_is_in_block)
1591 var->data.location = prog->data->NumUniformStorage;
1592 else
1593 location = -1;
1594
1595 bool row_major =
1596 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
1597 int res = nir_link_uniform(ctx, prog, sh->Program, shader_type, type,
1598 0, location,
1599 &state,
1600 !prog->data->spirv ? &name : NULL,
1601 !prog->data->spirv ? strlen(name) : 0,
1602 row_major);
1603
1604 free_type_tree(type_tree);
1605 ralloc_free(name);
1606
1607 if (res == -1)
1608 return false;
1609 }
1610
1611 _mesa_hash_table_destroy(state.referenced_uniforms, NULL);
1612
1613 if (state.num_shader_samplers >
1614 ctx->Const.Program[shader_type].MaxTextureImageUnits) {
1615 linker_error(prog, "Too many %s shader texture samplers\n",
1616 _mesa_shader_stage_to_string(shader_type));
1617 continue;
1618 }
1619
1620 if (state.num_shader_images >
1621 ctx->Const.Program[shader_type].MaxImageUniforms) {
1622 linker_error(prog, "Too many %s shader image uniforms (%u > %u)\n",
1623 _mesa_shader_stage_to_string(shader_type),
1624 state.num_shader_images,
1625 ctx->Const.Program[shader_type].MaxImageUniforms);
1626 continue;
1627 }
1628
1629 sh->Program->SamplersUsed = state.shader_samplers_used;
1630 sh->Program->sh.ShaderStorageBlocksWriteAccess =
1631 state.shader_storage_blocks_write_access;
1632 sh->shadow_samplers = state.shader_shadow_samplers;
1633 sh->Program->info.num_textures = state.num_shader_samplers;
1634 sh->Program->info.num_images = state.num_shader_images;
1635 sh->num_uniform_components = state.num_shader_uniform_components;
1636 sh->num_combined_uniform_components = sh->num_uniform_components;
1637 }
1638
1639 prog->data->NumHiddenUniforms = state.num_hidden_uniforms;
1640 prog->data->NumUniformDataSlots = state.num_values;
1641
1642 if (prog->data->spirv)
1643 prog->NumUniformRemapTable = state.max_uniform_location;
1644
1645 nir_setup_uniform_remap_tables(ctx, prog);
1646 gl_nir_set_uniform_initializers(ctx, prog);
1647
1648 _mesa_hash_table_destroy(state.uniform_hash, hash_free_uniform_name);
1649
1650 return true;
1651 }