glsl: remove some duplicate code from the nir uniform linker
[mesa.git] / src / compiler / glsl / gl_nir_link_uniforms.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_deref.h"
26 #include "gl_nir_linker.h"
27 #include "compiler/glsl/ir_uniform.h" /* for gl_uniform_storage */
28 #include "linker_util.h"
29 #include "main/context.h"
30 #include "main/mtypes.h"
31
32 /**
33 * This file do the common link for GLSL uniforms, using NIR, instead of IR as
34 * the counter-part glsl/link_uniforms.cpp
35 */
36
37 #define UNMAPPED_UNIFORM_LOC ~0u
38
39 /**
40 * Built-in / reserved GL variables names start with "gl_"
41 */
42 static inline bool
43 is_gl_identifier(const char *s)
44 {
45 return s && s[0] == 'g' && s[1] == 'l' && s[2] == '_';
46 }
47
48 static void
49 nir_setup_uniform_remap_tables(struct gl_context *ctx,
50 struct gl_shader_program *prog)
51 {
52 unsigned total_entries = prog->NumExplicitUniformLocations;
53
54 /* For glsl this may have been allocated by reserve_explicit_locations() so
55 * that we can keep track of unused uniforms with explicit locations.
56 */
57 assert(!prog->data->spirv ||
58 (prog->data->spirv && !prog->UniformRemapTable));
59 if (!prog->UniformRemapTable) {
60 prog->UniformRemapTable = rzalloc_array(prog,
61 struct gl_uniform_storage *,
62 prog->NumUniformRemapTable);
63 }
64
65 union gl_constant_value *data =
66 rzalloc_array(prog->data,
67 union gl_constant_value, prog->data->NumUniformDataSlots);
68 if (!prog->UniformRemapTable || !data) {
69 linker_error(prog, "Out of memory during linking.\n");
70 return;
71 }
72 prog->data->UniformDataSlots = data;
73
74 prog->data->UniformDataDefaults =
75 rzalloc_array(prog->data->UniformDataSlots,
76 union gl_constant_value, prog->data->NumUniformDataSlots);
77
78 unsigned data_pos = 0;
79
80 /* Reserve all the explicit locations of the active uniforms. */
81 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
82 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
83
84 if (uniform->is_shader_storage ||
85 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
86 continue;
87
88 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
89 continue;
90
91 /* How many new entries for this uniform? */
92 const unsigned entries = MAX2(1, uniform->array_elements);
93 unsigned num_slots = glsl_get_component_slots(uniform->type);
94
95 uniform->storage = &data[data_pos];
96
97 /* Set remap table entries point to correct gl_uniform_storage. */
98 for (unsigned j = 0; j < entries; j++) {
99 unsigned element_loc = uniform->remap_location + j;
100 prog->UniformRemapTable[element_loc] = uniform;
101
102 data_pos += num_slots;
103 }
104 }
105
106 /* Reserve locations for rest of the uniforms. */
107 if (prog->data->spirv)
108 link_util_update_empty_uniform_locations(prog);
109
110 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
111 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
112
113 if (uniform->is_shader_storage ||
114 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
115 continue;
116
117 /* Built-in uniforms should not get any location. */
118 if (uniform->builtin)
119 continue;
120
121 /* Explicit ones have been set already. */
122 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC)
123 continue;
124
125 /* How many entries for this uniform? */
126 const unsigned entries = MAX2(1, uniform->array_elements);
127
128 /* Add new entries to the total amount for checking against MAX_UNIFORM-
129 * _LOCATIONS. This only applies to the default uniform block (-1),
130 * because locations of uniform block entries are not assignable.
131 */
132 if (prog->data->UniformStorage[i].block_index == -1)
133 total_entries += entries;
134
135 unsigned location =
136 link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
137
138 if (location == -1) {
139 location = prog->NumUniformRemapTable;
140
141 /* resize remap table to fit new entries */
142 prog->UniformRemapTable =
143 reralloc(prog,
144 prog->UniformRemapTable,
145 struct gl_uniform_storage *,
146 prog->NumUniformRemapTable + entries);
147 prog->NumUniformRemapTable += entries;
148 }
149
150 /* set the base location in remap table for the uniform */
151 uniform->remap_location = location;
152
153 unsigned num_slots = glsl_get_component_slots(uniform->type);
154
155 if (uniform->block_index == -1)
156 uniform->storage = &data[data_pos];
157
158 /* Set remap table entries point to correct gl_uniform_storage. */
159 for (unsigned j = 0; j < entries; j++) {
160 unsigned element_loc = uniform->remap_location + j;
161 prog->UniformRemapTable[element_loc] = uniform;
162
163 if (uniform->block_index == -1)
164 data_pos += num_slots;
165 }
166 }
167
168 /* Verify that total amount of entries for explicit and implicit locations
169 * is less than MAX_UNIFORM_LOCATIONS.
170 */
171 if (total_entries > ctx->Const.MaxUserAssignableUniformLocations) {
172 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
173 "(%u > %u)", total_entries,
174 ctx->Const.MaxUserAssignableUniformLocations);
175 }
176
177 /* Reserve all the explicit locations of the active subroutine uniforms. */
178 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
179 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
180
181 if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
182 continue;
183
184 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
185 continue;
186
187 /* How many new entries for this uniform? */
188 const unsigned entries =
189 MAX2(1, prog->data->UniformStorage[i].array_elements);
190
191 uniform->storage = &data[data_pos];
192
193 unsigned num_slots = glsl_get_component_slots(uniform->type);
194 unsigned mask = prog->data->linked_stages;
195 while (mask) {
196 const int j = u_bit_scan(&mask);
197 struct gl_program *p = prog->_LinkedShaders[j]->Program;
198
199 if (!prog->data->UniformStorage[i].opaque[j].active)
200 continue;
201
202 /* Set remap table entries point to correct gl_uniform_storage. */
203 for (unsigned k = 0; k < entries; k++) {
204 unsigned element_loc =
205 prog->data->UniformStorage[i].remap_location + k;
206 p->sh.SubroutineUniformRemapTable[element_loc] =
207 &prog->data->UniformStorage[i];
208
209 data_pos += num_slots;
210 }
211 }
212 }
213
214 /* reserve subroutine locations */
215 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
216 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
217
218 if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
219 continue;
220
221 if (prog->data->UniformStorage[i].remap_location !=
222 UNMAPPED_UNIFORM_LOC)
223 continue;
224
225 const unsigned entries =
226 MAX2(1, prog->data->UniformStorage[i].array_elements);
227
228 uniform->storage = &data[data_pos];
229
230 unsigned num_slots = glsl_get_component_slots(uniform->type);
231 unsigned mask = prog->data->linked_stages;
232 while (mask) {
233 const int j = u_bit_scan(&mask);
234 struct gl_program *p = prog->_LinkedShaders[j]->Program;
235
236 if (!prog->data->UniformStorage[i].opaque[j].active)
237 continue;
238
239 p->sh.SubroutineUniformRemapTable =
240 reralloc(p,
241 p->sh.SubroutineUniformRemapTable,
242 struct gl_uniform_storage *,
243 p->sh.NumSubroutineUniformRemapTable + entries);
244
245 for (unsigned k = 0; k < entries; k++) {
246 p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
247 &prog->data->UniformStorage[i];
248
249 data_pos += num_slots;
250 }
251 prog->data->UniformStorage[i].remap_location =
252 p->sh.NumSubroutineUniformRemapTable;
253 p->sh.NumSubroutineUniformRemapTable += entries;
254 }
255 }
256 }
257
258 static void
259 add_var_use_deref(nir_deref_instr *deref, struct hash_table *live,
260 struct array_deref_range **derefs, unsigned *derefs_size)
261 {
262 nir_deref_path path;
263 nir_deref_path_init(&path, deref, NULL);
264
265 deref = path.path[0];
266 if (deref->deref_type != nir_deref_type_var ||
267 deref->mode & ~(nir_var_uniform | nir_var_mem_ubo | nir_var_mem_ssbo)) {
268 nir_deref_path_finish(&path);
269 return;
270 }
271
272 /* Number of derefs used in current processing. */
273 unsigned num_derefs = 0;
274
275 const struct glsl_type *deref_type = deref->var->type;
276 nir_deref_instr **p = &path.path[1];
277 for (; *p; p++) {
278 if ((*p)->deref_type == nir_deref_type_array) {
279
280 /* Skip matrix derefences */
281 if (!glsl_type_is_array(deref_type))
282 break;
283
284 if ((num_derefs + 1) * sizeof(struct array_deref_range) > *derefs_size) {
285 void *ptr = reralloc_size(NULL, *derefs, *derefs_size + 4096);
286
287 if (ptr == NULL) {
288 nir_deref_path_finish(&path);
289 return;
290 }
291
292 *derefs_size += 4096;
293 *derefs = (struct array_deref_range *)ptr;
294 }
295
296 struct array_deref_range *dr = &(*derefs)[num_derefs];
297 num_derefs++;
298
299 dr->size = glsl_get_length(deref_type);
300
301 if (nir_src_is_const((*p)->arr.index)) {
302 dr->index = nir_src_as_uint((*p)->arr.index);
303 } else {
304 /* An unsized array can occur at the end of an SSBO. We can't track
305 * accesses to such an array, so bail.
306 */
307 if (dr->size == 0) {
308 nir_deref_path_finish(&path);
309 return;
310 }
311
312 dr->index = dr->size;
313 }
314
315 deref_type = glsl_get_array_element(deref_type);
316 } else if ((*p)->deref_type == nir_deref_type_struct) {
317 /* We have reached the end of the array. */
318 break;
319 }
320 }
321
322 nir_deref_path_finish(&path);
323
324 /** Set of bit-flags to note which array elements have been accessed. */
325 BITSET_WORD *bits = NULL;
326
327 struct hash_entry *entry =
328 _mesa_hash_table_search(live, deref->var);
329 if (!entry && glsl_type_is_array(deref->var->type)) {
330 unsigned num_bits = MAX2(1, glsl_get_aoa_size(deref->var->type));
331 bits = rzalloc_array(live, BITSET_WORD, BITSET_WORDS(num_bits));
332 }
333
334 if (entry)
335 bits = (BITSET_WORD *) entry->data;
336
337 if (glsl_type_is_array(deref->var->type)) {
338 /* Count the "depth" of the arrays-of-arrays. */
339 unsigned array_depth = 0;
340 for (const struct glsl_type *type = deref->var->type;
341 glsl_type_is_array(type);
342 type = glsl_get_array_element(type)) {
343 array_depth++;
344 }
345
346 link_util_mark_array_elements_referenced(*derefs, num_derefs, array_depth,
347 bits);
348 }
349
350 assert(deref->mode == deref->var->data.mode);
351 _mesa_hash_table_insert(live, deref->var, bits);
352 }
353
354 /* Iterate over the shader and collect infomation about uniform use */
355 static void
356 add_var_use_shader(nir_shader *shader, struct hash_table *live)
357 {
358 /* Currently allocated buffer block of derefs. */
359 struct array_deref_range *derefs = NULL;
360
361 /* Size of the derefs buffer in bytes. */
362 unsigned derefs_size = 0;
363
364 nir_foreach_function(function, shader) {
365 if (function->impl) {
366 nir_foreach_block(block, function->impl) {
367 nir_foreach_instr(instr, block) {
368 if (instr->type == nir_instr_type_intrinsic) {
369 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
370 switch (intr->intrinsic) {
371 case nir_intrinsic_atomic_counter_read_deref:
372 case nir_intrinsic_atomic_counter_inc_deref:
373 case nir_intrinsic_atomic_counter_pre_dec_deref:
374 case nir_intrinsic_atomic_counter_post_dec_deref:
375 case nir_intrinsic_atomic_counter_add_deref:
376 case nir_intrinsic_atomic_counter_min_deref:
377 case nir_intrinsic_atomic_counter_max_deref:
378 case nir_intrinsic_atomic_counter_and_deref:
379 case nir_intrinsic_atomic_counter_or_deref:
380 case nir_intrinsic_atomic_counter_xor_deref:
381 case nir_intrinsic_atomic_counter_exchange_deref:
382 case nir_intrinsic_atomic_counter_comp_swap_deref:
383 case nir_intrinsic_image_deref_load:
384 case nir_intrinsic_image_deref_store:
385 case nir_intrinsic_image_deref_atomic_add:
386 case nir_intrinsic_image_deref_atomic_umin:
387 case nir_intrinsic_image_deref_atomic_imin:
388 case nir_intrinsic_image_deref_atomic_umax:
389 case nir_intrinsic_image_deref_atomic_imax:
390 case nir_intrinsic_image_deref_atomic_and:
391 case nir_intrinsic_image_deref_atomic_or:
392 case nir_intrinsic_image_deref_atomic_xor:
393 case nir_intrinsic_image_deref_atomic_exchange:
394 case nir_intrinsic_image_deref_atomic_comp_swap:
395 case nir_intrinsic_image_deref_size:
396 case nir_intrinsic_image_deref_samples:
397 case nir_intrinsic_load_deref:
398 case nir_intrinsic_store_deref:
399 add_var_use_deref(nir_src_as_deref(intr->src[0]), live,
400 &derefs, &derefs_size);
401 break;
402
403 default:
404 /* Nothing to do */
405 break;
406 }
407 } else if (instr->type == nir_instr_type_tex) {
408 nir_tex_instr *tex_instr = nir_instr_as_tex(instr);
409 int sampler_idx =
410 nir_tex_instr_src_index(tex_instr,
411 nir_tex_src_sampler_deref);
412 int texture_idx =
413 nir_tex_instr_src_index(tex_instr,
414 nir_tex_src_texture_deref);
415
416 if (sampler_idx >= 0) {
417 nir_deref_instr *deref =
418 nir_src_as_deref(tex_instr->src[sampler_idx].src);
419 add_var_use_deref(deref, live, &derefs, &derefs_size);
420 }
421
422 if (texture_idx >= 0) {
423 nir_deref_instr *deref =
424 nir_src_as_deref(tex_instr->src[texture_idx].src);
425 add_var_use_deref(deref, live, &derefs, &derefs_size);
426 }
427 }
428 }
429 }
430 }
431 }
432
433 ralloc_free(derefs);
434 }
435
436 static void
437 mark_stage_as_active(struct gl_uniform_storage *uniform,
438 unsigned stage)
439 {
440 uniform->active_shader_mask |= 1 << stage;
441 }
442
443 /* Used to build a tree representing the glsl_type so that we can have a place
444 * to store the next index for opaque types. Array types are expanded so that
445 * they have a single child which is used for all elements of the array.
446 * Struct types have a child for each member. The tree is walked while
447 * processing a uniform so that we can recognise when an opaque type is
448 * encountered a second time in order to reuse the same range of indices that
449 * was reserved the first time. That way the sampler indices can be arranged
450 * so that members of an array are placed sequentially even if the array is an
451 * array of structs containing other opaque members.
452 */
453 struct type_tree_entry {
454 /* For opaque types, this will be the next index to use. If we haven’t
455 * encountered this member yet, it will be UINT_MAX.
456 */
457 unsigned next_index;
458 unsigned array_size;
459 struct type_tree_entry *parent;
460 struct type_tree_entry *next_sibling;
461 struct type_tree_entry *children;
462 };
463
464 struct nir_link_uniforms_state {
465 /* per-whole program */
466 unsigned num_hidden_uniforms;
467 unsigned num_values;
468 unsigned max_uniform_location;
469
470 /* per-shader stage */
471 unsigned next_bindless_image_index;
472 unsigned next_bindless_sampler_index;
473 unsigned next_image_index;
474 unsigned next_sampler_index;
475 unsigned next_subroutine;
476 unsigned num_shader_samplers;
477 unsigned num_shader_images;
478 unsigned num_shader_uniform_components;
479 unsigned shader_samplers_used;
480 unsigned shader_shadow_samplers;
481 unsigned shader_storage_blocks_write_access;
482 struct gl_program_parameter_list *params;
483
484 /* per-variable */
485 nir_variable *current_var;
486 const struct glsl_type *current_ifc_type;
487 int offset;
488 bool var_is_in_block;
489 bool set_top_level_array;
490 int top_level_array_size;
491 int top_level_array_stride;
492
493 struct type_tree_entry *current_type;
494 struct hash_table *referenced_uniforms;
495 struct hash_table *uniform_hash;
496 };
497
498 static void
499 add_parameter(struct gl_uniform_storage *uniform,
500 struct gl_context *ctx,
501 struct gl_shader_program *prog,
502 const struct glsl_type *type,
503 struct nir_link_uniforms_state *state)
504 {
505 if (!state->params || uniform->is_shader_storage ||
506 (glsl_contains_opaque(type) && !state->current_var->data.bindless))
507 return;
508
509 unsigned num_params = glsl_get_aoa_size(type);
510 num_params = MAX2(num_params, 1);
511 num_params *= glsl_get_matrix_columns(glsl_without_array(type));
512
513 bool is_dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
514 if (is_dual_slot)
515 num_params *= 2;
516
517 struct gl_program_parameter_list *params = state->params;
518 int base_index = params->NumParameters;
519 _mesa_reserve_parameter_storage(params, num_params);
520
521 if (ctx->Const.PackedDriverUniformStorage) {
522 for (unsigned i = 0; i < num_params; i++) {
523 unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
524 unsigned comps = glsl_get_vector_elements(glsl_without_array(type)) * dmul;
525 if (is_dual_slot) {
526 if (i & 0x1)
527 comps -= 4;
528 else
529 comps = 4;
530 }
531
532 _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name, comps,
533 glsl_get_gl_type(type), NULL, NULL, false);
534 }
535 } else {
536 for (unsigned i = 0; i < num_params; i++) {
537 _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name, 4,
538 glsl_get_gl_type(type), NULL, NULL, true);
539 }
540 }
541
542 /* Each Parameter will hold the index to the backing uniform storage.
543 * This avoids relying on names to match parameters and uniform
544 * storages.
545 */
546 for (unsigned i = 0; i < num_params; i++) {
547 struct gl_program_parameter *param = &params->Parameters[base_index + i];
548 param->UniformStorageIndex = uniform - prog->data->UniformStorage;
549 param->MainUniformStorageIndex = state->current_var->data.location;
550 }
551 }
552
553 static unsigned
554 get_next_index(struct nir_link_uniforms_state *state,
555 const struct gl_uniform_storage *uniform,
556 unsigned *next_index, bool *initialised)
557 {
558 /* If we’ve already calculated an index for this member then we can just
559 * offset from there.
560 */
561 if (state->current_type->next_index == UINT_MAX) {
562 /* Otherwise we need to reserve enough indices for all of the arrays
563 * enclosing this member.
564 */
565
566 unsigned array_size = 1;
567
568 for (const struct type_tree_entry *p = state->current_type;
569 p;
570 p = p->parent) {
571 array_size *= p->array_size;
572 }
573
574 state->current_type->next_index = *next_index;
575 *next_index += array_size;
576 *initialised = true;
577 } else
578 *initialised = false;
579
580 unsigned index = state->current_type->next_index;
581
582 state->current_type->next_index += MAX2(1, uniform->array_elements);
583
584 return index;
585 }
586
587 /* Update the uniforms info for the current shader stage */
588 static void
589 update_uniforms_shader_info(struct gl_shader_program *prog,
590 struct nir_link_uniforms_state *state,
591 struct gl_uniform_storage *uniform,
592 const struct glsl_type *type,
593 unsigned stage)
594 {
595 unsigned values = glsl_get_component_slots(type);
596 const struct glsl_type *type_no_array = glsl_without_array(type);
597
598 if (glsl_type_is_sampler(type_no_array)) {
599 bool init_idx;
600 unsigned *next_index = state->current_var->data.bindless ?
601 &state->next_bindless_sampler_index :
602 &state->next_sampler_index;
603 int sampler_index = get_next_index(state, uniform, next_index, &init_idx);
604 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
605
606 if (state->current_var->data.bindless) {
607 if (init_idx) {
608 sh->Program->sh.BindlessSamplers =
609 rerzalloc(sh->Program, sh->Program->sh.BindlessSamplers,
610 struct gl_bindless_sampler,
611 sh->Program->sh.NumBindlessSamplers,
612 state->next_bindless_sampler_index);
613
614 for (unsigned j = sh->Program->sh.NumBindlessSamplers;
615 j < state->next_bindless_sampler_index; j++) {
616 sh->Program->sh.BindlessSamplers[j].target =
617 glsl_get_sampler_target(type_no_array);
618 }
619
620 sh->Program->sh.NumBindlessSamplers =
621 state->next_bindless_sampler_index;
622 }
623
624 if (!state->var_is_in_block)
625 state->num_shader_uniform_components += values;
626 } else {
627 /* Samplers (bound or bindless) are counted as two components
628 * as specified by ARB_bindless_texture.
629 */
630 state->num_shader_samplers += values / 2;
631
632 if (init_idx) {
633 const unsigned shadow = glsl_sampler_type_is_shadow(type_no_array);
634 for (unsigned i = sampler_index;
635 i < MIN2(state->next_sampler_index, MAX_SAMPLERS); i++) {
636 sh->Program->sh.SamplerTargets[i] =
637 glsl_get_sampler_target(type_no_array);
638 state->shader_samplers_used |= 1U << i;
639 state->shader_shadow_samplers |= shadow << i;
640 }
641 }
642 }
643
644 uniform->opaque[stage].active = true;
645 uniform->opaque[stage].index = sampler_index;
646 } else if (glsl_type_is_image(type_no_array)) {
647 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
648
649 /* Set image access qualifiers */
650 enum gl_access_qualifier image_access =
651 state->current_var->data.access;
652 const GLenum access =
653 (image_access & ACCESS_NON_WRITEABLE) ?
654 ((image_access & ACCESS_NON_READABLE) ? GL_NONE :
655 GL_READ_ONLY) :
656 ((image_access & ACCESS_NON_READABLE) ? GL_WRITE_ONLY :
657 GL_READ_WRITE);
658
659 int image_index;
660 if (state->current_var->data.bindless) {
661 image_index = state->next_bindless_image_index;
662 state->next_bindless_image_index += MAX2(1, uniform->array_elements);
663
664 sh->Program->sh.BindlessImages =
665 rerzalloc(sh->Program, sh->Program->sh.BindlessImages,
666 struct gl_bindless_image,
667 sh->Program->sh.NumBindlessImages,
668 state->next_bindless_image_index);
669
670 for (unsigned j = sh->Program->sh.NumBindlessImages;
671 j < state->next_bindless_image_index; j++) {
672 sh->Program->sh.BindlessImages[j].access = access;
673 }
674
675 sh->Program->sh.NumBindlessImages = state->next_bindless_image_index;
676
677 } else {
678 image_index = state->next_image_index;
679 state->next_image_index += MAX2(1, uniform->array_elements);
680
681 /* Images (bound or bindless) are counted as two components as
682 * specified by ARB_bindless_texture.
683 */
684 state->num_shader_images += values / 2;
685
686 for (unsigned i = image_index;
687 i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS); i++) {
688 sh->Program->sh.ImageAccess[i] = access;
689 }
690 }
691
692 uniform->opaque[stage].active = true;
693 uniform->opaque[stage].index = image_index;
694
695 if (!uniform->is_shader_storage)
696 state->num_shader_uniform_components += values;
697 } else {
698 if (glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE) {
699 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
700
701 uniform->opaque[stage].index = state->next_subroutine;
702 uniform->opaque[stage].active = true;
703
704 sh->Program->sh.NumSubroutineUniforms++;
705
706 /* Increment the subroutine index by 1 for non-arrays and by the
707 * number of array elements for arrays.
708 */
709 state->next_subroutine += MAX2(1, uniform->array_elements);
710 }
711
712 if (!state->var_is_in_block)
713 state->num_shader_uniform_components += values;
714 }
715 }
716
717 static bool
718 find_and_update_named_uniform_storage(struct gl_context *ctx,
719 struct gl_shader_program *prog,
720 struct nir_link_uniforms_state *state,
721 nir_variable *var, char **name,
722 size_t name_length,
723 const struct glsl_type *type,
724 unsigned stage, bool *first_element)
725 {
726 /* gl_uniform_storage can cope with one level of array, so if the type is a
727 * composite type or an array where each element occupies more than one
728 * location than we need to recursively process it.
729 */
730 if (glsl_type_is_struct_or_ifc(type) ||
731 (glsl_type_is_array(type) &&
732 (glsl_type_is_array(glsl_get_array_element(type)) ||
733 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
734
735 struct type_tree_entry *old_type = state->current_type;
736 state->current_type = old_type->children;
737
738 /* Shader storage block unsized arrays: add subscript [0] to variable
739 * names.
740 */
741 unsigned length = glsl_get_length(type);
742 if (glsl_type_is_unsized_array(type))
743 length = 1;
744
745 bool result = false;
746 for (unsigned i = 0; i < length; i++) {
747 const struct glsl_type *field_type;
748 size_t new_length = name_length;
749
750 if (glsl_type_is_struct_or_ifc(type)) {
751 field_type = glsl_get_struct_field(type, i);
752
753 /* Append '.field' to the current variable name. */
754 if (name) {
755 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
756 glsl_get_struct_elem_name(type, i));
757 }
758 } else {
759 field_type = glsl_get_array_element(type);
760
761 /* Append the subscript to the current variable name */
762 if (name)
763 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
764 }
765
766 result = find_and_update_named_uniform_storage(ctx, prog, state,
767 var, name, new_length,
768 field_type, stage,
769 first_element);
770
771 if (glsl_type_is_struct_or_ifc(type))
772 state->current_type = state->current_type->next_sibling;
773
774 if (!result) {
775 state->current_type = old_type;
776 return false;
777 }
778 }
779
780 state->current_type = old_type;
781
782 return result;
783 } else {
784 struct hash_entry *entry =
785 _mesa_hash_table_search(state->uniform_hash, *name);
786 if (entry) {
787 unsigned i = (unsigned) (intptr_t) entry->data;
788 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
789
790 if (*first_element && !state->var_is_in_block) {
791 *first_element = false;
792 var->data.location = uniform - prog->data->UniformStorage;
793 }
794
795 update_uniforms_shader_info(prog, state, uniform, type, stage);
796
797 const struct glsl_type *type_no_array = glsl_without_array(type);
798 struct hash_entry *entry =
799 _mesa_hash_table_search(state->referenced_uniforms,
800 state->current_var);
801 if (entry != NULL ||
802 glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE)
803 uniform->active_shader_mask |= 1 << stage;
804
805 if (!state->var_is_in_block)
806 add_parameter(uniform, ctx, prog, type, state);
807
808 return true;
809 }
810 }
811
812 return false;
813 }
814
815 /**
816 * Finds, returns, and updates the stage info for any uniform in UniformStorage
817 * defined by @var. For GLSL this is done using the name, for SPIR-V in general
818 * is this done using the explicit location, except:
819 *
820 * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
821 * them. That means that more that one entry at the uniform storage can be
822 * found. In that case all of them are updated, and the first entry is
823 * returned, in order to update the location of the nir variable.
824 *
825 * * Special uniforms: like atomic counters. They lack a explicit location,
826 * so they are skipped. They will be handled and assigned a location later.
827 *
828 */
829 static bool
830 find_and_update_previous_uniform_storage(struct gl_context *ctx,
831 struct gl_shader_program *prog,
832 struct nir_link_uniforms_state *state,
833 nir_variable *var, char *name,
834 const struct glsl_type *type,
835 unsigned stage)
836 {
837 if (!prog->data->spirv) {
838 bool first_element = true;
839 char *name_tmp = ralloc_strdup(NULL, name);
840 bool r = find_and_update_named_uniform_storage(ctx, prog, state, var,
841 &name_tmp,
842 strlen(name_tmp), type,
843 stage, &first_element);
844 ralloc_free(name_tmp);
845
846 return r;
847 }
848
849 if (nir_variable_is_in_block(var)) {
850 struct gl_uniform_storage *uniform = NULL;
851
852 ASSERTED unsigned num_blks = nir_variable_is_in_ubo(var) ?
853 prog->data->NumUniformBlocks :
854 prog->data->NumShaderStorageBlocks;
855
856 struct gl_uniform_block *blks = nir_variable_is_in_ubo(var) ?
857 prog->data->UniformBlocks : prog->data->ShaderStorageBlocks;
858
859 bool result = false;
860 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
861 /* UniformStorage contains both variables from ubos and ssbos */
862 if ( prog->data->UniformStorage[i].is_shader_storage !=
863 nir_variable_is_in_ssbo(var))
864 continue;
865
866 int block_index = prog->data->UniformStorage[i].block_index;
867 if (block_index != -1) {
868 assert(block_index < num_blks);
869
870 if (var->data.binding == blks[block_index].Binding) {
871 if (!uniform)
872 uniform = &prog->data->UniformStorage[i];
873 mark_stage_as_active(&prog->data->UniformStorage[i],
874 stage);
875 result = true;
876 }
877 }
878 }
879
880 if (result)
881 var->data.location = uniform - prog->data->UniformStorage;
882 return result;
883 }
884
885 /* Beyond blocks, there are still some corner cases of uniforms without
886 * location (ie: atomic counters) that would have a initial location equal
887 * to -1. We just return on that case. Those uniforms will be handled
888 * later.
889 */
890 if (var->data.location == -1)
891 return false;
892
893 /* TODO: following search can be problematic with shaders with a lot of
894 * uniforms. Would it be better to use some type of hash
895 */
896 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
897 if (prog->data->UniformStorage[i].remap_location == var->data.location) {
898 mark_stage_as_active(&prog->data->UniformStorage[i], stage);
899
900 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
901 var->data.location = uniform - prog->data->UniformStorage;
902 add_parameter(uniform, ctx, prog, var->type, state);
903 return true;
904 }
905 }
906
907 return false;
908 }
909
910 static struct type_tree_entry *
911 build_type_tree_for_type(const struct glsl_type *type)
912 {
913 struct type_tree_entry *entry = malloc(sizeof *entry);
914
915 entry->array_size = 1;
916 entry->next_index = UINT_MAX;
917 entry->children = NULL;
918 entry->next_sibling = NULL;
919 entry->parent = NULL;
920
921 if (glsl_type_is_array(type)) {
922 entry->array_size = glsl_get_length(type);
923 entry->children = build_type_tree_for_type(glsl_get_array_element(type));
924 entry->children->parent = entry;
925 } else if (glsl_type_is_struct_or_ifc(type)) {
926 struct type_tree_entry *last = NULL;
927
928 for (unsigned i = 0; i < glsl_get_length(type); i++) {
929 const struct glsl_type *field_type = glsl_get_struct_field(type, i);
930 struct type_tree_entry *field_entry =
931 build_type_tree_for_type(field_type);
932
933 if (last == NULL)
934 entry->children = field_entry;
935 else
936 last->next_sibling = field_entry;
937
938 field_entry->parent = entry;
939
940 last = field_entry;
941 }
942 }
943
944 return entry;
945 }
946
947 static void
948 free_type_tree(struct type_tree_entry *entry)
949 {
950 struct type_tree_entry *p, *next;
951
952 for (p = entry->children; p; p = next) {
953 next = p->next_sibling;
954 free_type_tree(p);
955 }
956
957 free(entry);
958 }
959
960 static void
961 hash_free_uniform_name(struct hash_entry *entry)
962 {
963 free((void*)entry->key);
964 }
965
966 static void
967 enter_record(struct nir_link_uniforms_state *state,
968 struct gl_context *ctx,
969 const struct glsl_type *type,
970 bool row_major)
971 {
972 assert(glsl_type_is_struct(type));
973 if (!state->var_is_in_block)
974 return;
975
976 bool use_std430 = ctx->Const.UseSTD430AsDefaultPacking;
977 const enum glsl_interface_packing packing =
978 glsl_get_internal_ifc_packing(state->current_var->interface_type,
979 use_std430);
980
981 if (packing == GLSL_INTERFACE_PACKING_STD430)
982 state->offset = glsl_align(
983 state->offset, glsl_get_std430_base_alignment(type, row_major));
984 else
985 state->offset = glsl_align(
986 state->offset, glsl_get_std140_base_alignment(type, row_major));
987 }
988
989 static void
990 leave_record(struct nir_link_uniforms_state *state,
991 struct gl_context *ctx,
992 const struct glsl_type *type,
993 bool row_major)
994 {
995 assert(glsl_type_is_struct(type));
996 if (!state->var_is_in_block)
997 return;
998
999 bool use_std430 = ctx->Const.UseSTD430AsDefaultPacking;
1000 const enum glsl_interface_packing packing =
1001 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1002 use_std430);
1003
1004 if (packing == GLSL_INTERFACE_PACKING_STD430)
1005 state->offset = glsl_align(
1006 state->offset, glsl_get_std430_base_alignment(type, row_major));
1007 else
1008 state->offset = glsl_align(
1009 state->offset, glsl_get_std140_base_alignment(type, row_major));
1010 }
1011
1012 /**
1013 * Creates the neccessary entries in UniformStorage for the uniform. Returns
1014 * the number of locations used or -1 on failure.
1015 */
1016 static int
1017 nir_link_uniform(struct gl_context *ctx,
1018 struct gl_shader_program *prog,
1019 struct gl_program *stage_program,
1020 gl_shader_stage stage,
1021 const struct glsl_type *type,
1022 unsigned index_in_parent,
1023 int location,
1024 struct nir_link_uniforms_state *state,
1025 char **name, size_t name_length, bool row_major)
1026 {
1027 struct gl_uniform_storage *uniform = NULL;
1028
1029 if (state->set_top_level_array &&
1030 nir_variable_is_in_ssbo(state->current_var)) {
1031 /* Type is the top level SSBO member */
1032 if (glsl_type_is_array(type) &&
1033 (glsl_type_is_array(glsl_get_array_element(type)) ||
1034 glsl_type_is_struct_or_ifc(glsl_get_array_element(type)))) {
1035 /* Type is a top-level array (array of aggregate types) */
1036 state->top_level_array_size = glsl_get_length(type);
1037 state->top_level_array_stride = glsl_get_explicit_stride(type);
1038 } else {
1039 state->top_level_array_size = 1;
1040 state->top_level_array_stride = 0;
1041 }
1042
1043 state->set_top_level_array = false;
1044 }
1045
1046 /* gl_uniform_storage can cope with one level of array, so if the type is a
1047 * composite type or an array where each element occupies more than one
1048 * location than we need to recursively process it.
1049 */
1050 if (glsl_type_is_struct_or_ifc(type) ||
1051 (glsl_type_is_array(type) &&
1052 (glsl_type_is_array(glsl_get_array_element(type)) ||
1053 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
1054 int location_count = 0;
1055 struct type_tree_entry *old_type = state->current_type;
1056 unsigned int struct_base_offset = state->offset;
1057
1058 state->current_type = old_type->children;
1059
1060 /* Shader storage block unsized arrays: add subscript [0] to variable
1061 * names.
1062 */
1063 unsigned length = glsl_get_length(type);
1064 if (glsl_type_is_unsized_array(type))
1065 length = 1;
1066
1067 if (glsl_type_is_struct(type) && !prog->data->spirv)
1068 enter_record(state, ctx, type, row_major);
1069
1070 for (unsigned i = 0; i < length; i++) {
1071 const struct glsl_type *field_type;
1072 size_t new_length = name_length;
1073 bool field_row_major = row_major;
1074
1075 if (glsl_type_is_struct_or_ifc(type)) {
1076 field_type = glsl_get_struct_field(type, i);
1077 /* Use the offset inside the struct only for variables backed by
1078 * a buffer object. For variables not backed by a buffer object,
1079 * offset is -1.
1080 */
1081 if (state->var_is_in_block) {
1082 if (prog->data->spirv) {
1083 state->offset =
1084 struct_base_offset + glsl_get_struct_field_offset(type, i);
1085 } else if (glsl_get_struct_field_offset(type, i) != -1 &&
1086 type == state->current_ifc_type) {
1087 state->offset = glsl_get_struct_field_offset(type, i);
1088 }
1089
1090 if (glsl_type_is_interface(type))
1091 state->set_top_level_array = true;
1092 }
1093
1094 /* Append '.field' to the current variable name. */
1095 if (name) {
1096 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
1097 glsl_get_struct_elem_name(type, i));
1098 }
1099
1100
1101 /* The layout of structures at the top level of the block is set
1102 * during parsing. For matrices contained in multiple levels of
1103 * structures in the block, the inner structures have no layout.
1104 * These cases must potentially inherit the layout from the outer
1105 * levels.
1106 */
1107 const enum glsl_matrix_layout matrix_layout =
1108 glsl_get_struct_field_data(type, i)->matrix_layout;
1109 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
1110 field_row_major = true;
1111 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
1112 field_row_major = false;
1113 }
1114 } else {
1115 field_type = glsl_get_array_element(type);
1116
1117 /* Append the subscript to the current variable name */
1118 if (name)
1119 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
1120 }
1121
1122 int entries = nir_link_uniform(ctx, prog, stage_program, stage,
1123 field_type, i, location,
1124 state, name, new_length,
1125 field_row_major);
1126
1127 if (entries == -1)
1128 return -1;
1129
1130 if (location != -1)
1131 location += entries;
1132 location_count += entries;
1133
1134 if (glsl_type_is_struct_or_ifc(type))
1135 state->current_type = state->current_type->next_sibling;
1136 }
1137
1138 if (glsl_type_is_struct(type) && !prog->data->spirv)
1139 leave_record(state, ctx, type, row_major);
1140
1141 state->current_type = old_type;
1142
1143 return location_count;
1144 } else {
1145 /* Create a new uniform storage entry */
1146 prog->data->UniformStorage =
1147 reralloc(prog->data,
1148 prog->data->UniformStorage,
1149 struct gl_uniform_storage,
1150 prog->data->NumUniformStorage + 1);
1151 if (!prog->data->UniformStorage) {
1152 linker_error(prog, "Out of memory during linking.\n");
1153 return -1;
1154 }
1155
1156 uniform = &prog->data->UniformStorage[prog->data->NumUniformStorage];
1157 prog->data->NumUniformStorage++;
1158
1159 /* Initialize its members */
1160 memset(uniform, 0x00, sizeof(struct gl_uniform_storage));
1161
1162 uniform->name =
1163 name ? ralloc_strdup(prog->data->UniformStorage, *name) : NULL;
1164
1165 const struct glsl_type *type_no_array = glsl_without_array(type);
1166 if (glsl_type_is_array(type)) {
1167 uniform->type = type_no_array;
1168 uniform->array_elements = glsl_get_length(type);
1169 } else {
1170 uniform->type = type;
1171 uniform->array_elements = 0;
1172 }
1173 uniform->top_level_array_size = state->top_level_array_size;
1174 uniform->top_level_array_stride = state->top_level_array_stride;
1175
1176 struct hash_entry *entry =
1177 _mesa_hash_table_search(state->referenced_uniforms,
1178 state->current_var);
1179 if (entry != NULL ||
1180 glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE)
1181 uniform->active_shader_mask |= 1 << stage;
1182
1183 if (location >= 0) {
1184 /* Uniform has an explicit location */
1185 uniform->remap_location = location;
1186 } else {
1187 uniform->remap_location = UNMAPPED_UNIFORM_LOC;
1188 }
1189
1190 uniform->hidden = state->current_var->data.how_declared == nir_var_hidden;
1191 if (uniform->hidden)
1192 state->num_hidden_uniforms++;
1193
1194 uniform->is_shader_storage = nir_variable_is_in_ssbo(state->current_var);
1195 uniform->is_bindless = state->current_var->data.bindless;
1196
1197 /* Set fields whose default value depend on the variable being inside a
1198 * block.
1199 *
1200 * From the OpenGL 4.6 spec, 7.3 Program objects:
1201 *
1202 * "For the property ARRAY_STRIDE, ... For active variables not declared
1203 * as an array of basic types, zero is written to params. For active
1204 * variables not backed by a buffer object, -1 is written to params,
1205 * regardless of the variable type."
1206 *
1207 * "For the property MATRIX_STRIDE, ... For active variables not declared
1208 * as a matrix or array of matrices, zero is written to params. For active
1209 * variables not backed by a buffer object, -1 is written to params,
1210 * regardless of the variable type."
1211 *
1212 * For the property IS_ROW_MAJOR, ... For active variables backed by a
1213 * buffer object, declared as a single matrix or array of matrices, and
1214 * stored in row-major order, one is written to params. For all other
1215 * active variables, zero is written to params.
1216 */
1217 uniform->array_stride = -1;
1218 uniform->matrix_stride = -1;
1219 uniform->row_major = false;
1220
1221 if (state->var_is_in_block) {
1222 uniform->array_stride = glsl_type_is_array(type) ?
1223 glsl_get_explicit_stride(type) : 0;
1224
1225 if (glsl_type_is_matrix(uniform->type)) {
1226 uniform->matrix_stride = glsl_get_explicit_stride(uniform->type);
1227 uniform->row_major = glsl_matrix_type_is_row_major(uniform->type);
1228 } else {
1229 uniform->matrix_stride = 0;
1230 }
1231
1232 if (!prog->data->spirv) {
1233 bool use_std430 = ctx->Const.UseSTD430AsDefaultPacking;
1234 const enum glsl_interface_packing packing =
1235 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1236 use_std430);
1237
1238 unsigned alignment =
1239 glsl_get_std140_base_alignment(type, uniform->row_major);
1240 if (packing == GLSL_INTERFACE_PACKING_STD430) {
1241 alignment =
1242 glsl_get_std430_base_alignment(type, uniform->row_major);
1243 }
1244 state->offset = glsl_align(state->offset, alignment);
1245 }
1246 }
1247
1248 uniform->offset = state->var_is_in_block ? state->offset : -1;
1249
1250 int buffer_block_index = -1;
1251 /* If the uniform is inside a uniform block determine its block index by
1252 * comparing the bindings, we can not use names.
1253 */
1254 if (state->var_is_in_block) {
1255 struct gl_uniform_block *blocks = nir_variable_is_in_ssbo(state->current_var) ?
1256 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1257
1258 int num_blocks = nir_variable_is_in_ssbo(state->current_var) ?
1259 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1260
1261 if (!prog->data->spirv) {
1262 bool is_interface_array =
1263 glsl_without_array(state->current_var->type) == state->current_var->interface_type &&
1264 glsl_type_is_array(state->current_var->type);
1265
1266 const char *ifc_name =
1267 glsl_get_type_name(state->current_var->interface_type);
1268 if (is_interface_array) {
1269 unsigned l = strlen(ifc_name);
1270 for (unsigned i = 0; i < num_blocks; i++) {
1271 if (strncmp(ifc_name, blocks[i].Name, l) == 0 &&
1272 blocks[i].Name[l] == '[') {
1273 buffer_block_index = i;
1274 break;
1275 }
1276 }
1277 } else {
1278 for (unsigned i = 0; i < num_blocks; i++) {
1279 if (strcmp(ifc_name, blocks[i].Name) == 0) {
1280 buffer_block_index = i;
1281 break;
1282 }
1283 }
1284 }
1285
1286 /* Compute the next offset. */
1287 bool use_std430 = ctx->Const.UseSTD430AsDefaultPacking;
1288 const enum glsl_interface_packing packing =
1289 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1290 use_std430);
1291 if (packing == GLSL_INTERFACE_PACKING_STD430)
1292 state->offset += glsl_get_std430_size(type, uniform->row_major);
1293 else
1294 state->offset += glsl_get_std140_size(type, uniform->row_major);
1295 } else {
1296 for (unsigned i = 0; i < num_blocks; i++) {
1297 if (state->current_var->data.binding == blocks[i].Binding) {
1298 buffer_block_index = i;
1299 break;
1300 }
1301 }
1302
1303 /* Compute the next offset. */
1304 state->offset += glsl_get_explicit_size(type, true);
1305 }
1306 assert(buffer_block_index >= 0);
1307 }
1308
1309 uniform->block_index = buffer_block_index;
1310 uniform->builtin = is_gl_identifier(uniform->name);
1311 uniform->atomic_buffer_index = -1;
1312
1313 /* The following are not for features not supported by ARB_gl_spirv */
1314 uniform->num_compatible_subroutines = 0;
1315
1316 unsigned entries = MAX2(1, uniform->array_elements);
1317 unsigned values = glsl_get_component_slots(type);
1318
1319 update_uniforms_shader_info(prog, state, uniform, type, stage);
1320
1321 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC &&
1322 state->max_uniform_location < uniform->remap_location + entries)
1323 state->max_uniform_location = uniform->remap_location + entries;
1324
1325 if (!state->var_is_in_block)
1326 add_parameter(uniform, ctx, prog, type, state);
1327
1328 if (name) {
1329 _mesa_hash_table_insert(state->uniform_hash, strdup(*name),
1330 (void *) (intptr_t)
1331 (prog->data->NumUniformStorage - 1));
1332 }
1333
1334 if (!is_gl_identifier(uniform->name) && !uniform->is_shader_storage &&
1335 !state->var_is_in_block)
1336 state->num_values += values;
1337
1338 return MAX2(uniform->array_elements, 1);
1339 }
1340 }
1341
1342 bool
1343 gl_nir_link_uniforms(struct gl_context *ctx,
1344 struct gl_shader_program *prog,
1345 bool fill_parameters)
1346 {
1347 /* First free up any previous UniformStorage items */
1348 ralloc_free(prog->data->UniformStorage);
1349 prog->data->UniformStorage = NULL;
1350 prog->data->NumUniformStorage = 0;
1351
1352 /* Iterate through all linked shaders */
1353 struct nir_link_uniforms_state state = {0,};
1354 state.uniform_hash = _mesa_hash_table_create(NULL, _mesa_hash_string,
1355 _mesa_key_string_equal);
1356
1357 for (unsigned shader_type = 0; shader_type < MESA_SHADER_STAGES; shader_type++) {
1358 struct gl_linked_shader *sh = prog->_LinkedShaders[shader_type];
1359 if (!sh)
1360 continue;
1361
1362 nir_shader *nir = sh->Program->nir;
1363 assert(nir);
1364
1365 state.referenced_uniforms =
1366 _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1367 _mesa_key_pointer_equal);
1368 state.next_bindless_image_index = 0;
1369 state.next_bindless_sampler_index = 0;
1370 state.next_image_index = 0;
1371 state.next_sampler_index = 0;
1372 state.num_shader_samplers = 0;
1373 state.num_shader_images = 0;
1374 state.num_shader_uniform_components = 0;
1375 state.shader_storage_blocks_write_access = 0;
1376 state.shader_samplers_used = 0;
1377 state.shader_shadow_samplers = 0;
1378 state.params = fill_parameters ? sh->Program->Parameters : NULL;
1379
1380 add_var_use_shader(nir, state.referenced_uniforms);
1381
1382 nir_foreach_variable(var, &nir->uniforms) {
1383 state.current_var = var;
1384 state.current_ifc_type = NULL;
1385 state.offset = 0;
1386 state.var_is_in_block = nir_variable_is_in_block(var);
1387 state.set_top_level_array = false;
1388 state.top_level_array_size = 0;
1389 state.top_level_array_stride = 0;
1390
1391 /*
1392 * From ARB_program_interface spec, issue (16):
1393 *
1394 * "RESOLVED: We will follow the default rule for enumerating block
1395 * members in the OpenGL API, which is:
1396 *
1397 * * If a variable is a member of an interface block without an
1398 * instance name, it is enumerated using just the variable name.
1399 *
1400 * * If a variable is a member of an interface block with an
1401 * instance name, it is enumerated as "BlockName.Member", where
1402 * "BlockName" is the name of the interface block (not the
1403 * instance name) and "Member" is the name of the variable.
1404 *
1405 * For example, in the following code:
1406 *
1407 * uniform Block1 {
1408 * int member1;
1409 * };
1410 * uniform Block2 {
1411 * int member2;
1412 * } instance2;
1413 * uniform Block3 {
1414 * int member3;
1415 * } instance3[2]; // uses two separate buffer bindings
1416 *
1417 * the three uniforms (if active) are enumerated as "member1",
1418 * "Block2.member2", and "Block3.member3"."
1419 *
1420 * Note that in the last example, with an array of ubo, only one
1421 * uniform is generated. For that reason, while unrolling the
1422 * uniforms of a ubo, or the variables of a ssbo, we need to treat
1423 * arrays of instance as a single block.
1424 */
1425 char *name;
1426 const struct glsl_type *type = var->type;
1427 if (state.var_is_in_block &&
1428 ((!prog->data->spirv && glsl_without_array(type) == var->interface_type) ||
1429 (prog->data->spirv && type == var->interface_type))) {
1430 type = glsl_without_array(var->type);
1431 state.current_ifc_type = type;
1432 name = ralloc_strdup(NULL, glsl_get_type_name(type));
1433 } else {
1434 state.set_top_level_array = true;
1435 name = ralloc_strdup(NULL, var->name);
1436 }
1437
1438 struct type_tree_entry *type_tree =
1439 build_type_tree_for_type(type);
1440 state.current_type = type_tree;
1441
1442 int location = var->data.location;
1443
1444 struct gl_uniform_block *blocks;
1445 int num_blocks;
1446 int buffer_block_index = -1;
1447 if (!prog->data->spirv && state.var_is_in_block) {
1448 /* If the uniform is inside a uniform block determine its block index by
1449 * comparing the bindings, we can not use names.
1450 */
1451 blocks = nir_variable_is_in_ssbo(state.current_var) ?
1452 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1453 num_blocks = nir_variable_is_in_ssbo(state.current_var) ?
1454 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1455
1456 bool is_interface_array =
1457 glsl_without_array(state.current_var->type) == state.current_var->interface_type &&
1458 glsl_type_is_array(state.current_var->type);
1459
1460 const char *ifc_name =
1461 glsl_get_type_name(state.current_var->interface_type);
1462
1463 if (is_interface_array) {
1464 unsigned l = strlen(ifc_name);
1465
1466 /* Even when a match is found, do not "break" here. As this is
1467 * an array of instances, all elements of the array need to be
1468 * marked as referenced.
1469 */
1470 for (unsigned i = 0; i < num_blocks; i++) {
1471 if (strncmp(ifc_name, blocks[i].Name, l) == 0 &&
1472 blocks[i].Name[l] == '[') {
1473 if (buffer_block_index == -1)
1474 buffer_block_index = i;
1475
1476 struct hash_entry *entry =
1477 _mesa_hash_table_search(state.referenced_uniforms, var);
1478 if (entry) {
1479 BITSET_WORD *bits = (BITSET_WORD *) entry->data;
1480 if (BITSET_TEST(bits, blocks[i].linearized_array_index))
1481 blocks[i].stageref |= 1U << shader_type;
1482 }
1483 }
1484 }
1485 } else {
1486 for (unsigned i = 0; i < num_blocks; i++) {
1487 if (strcmp(ifc_name, blocks[i].Name) == 0) {
1488 buffer_block_index = i;
1489
1490 struct hash_entry *entry =
1491 _mesa_hash_table_search(state.referenced_uniforms, var);
1492 if (entry)
1493 blocks[i].stageref |= 1U << shader_type;
1494
1495 break;
1496 }
1497 }
1498 }
1499
1500 if (nir_variable_is_in_ssbo(var) &&
1501 !(var->data.access & ACCESS_NON_WRITEABLE)) {
1502 unsigned array_size = is_interface_array ?
1503 glsl_get_length(var->type) : 1;
1504
1505 STATIC_ASSERT(MAX_SHADER_STORAGE_BUFFERS <= 32);
1506
1507 /* Shaders that use too many SSBOs will fail to compile, which
1508 * we don't care about.
1509 *
1510 * This is true for shaders that do not use too many SSBOs:
1511 */
1512 if (buffer_block_index + array_size <= 32) {
1513 state.shader_storage_blocks_write_access |=
1514 u_bit_consecutive(buffer_block_index, array_size);
1515 }
1516 }
1517 }
1518
1519 if (!prog->data->spirv && state.var_is_in_block &&
1520 glsl_without_array(state.current_var->type) != state.current_var->interface_type) {
1521
1522 bool found = false;
1523 char sentinel = '\0';
1524
1525 if (glsl_type_is_struct(state.current_var->type)) {
1526 sentinel = '.';
1527 } else if (glsl_type_is_array(state.current_var->type) &&
1528 (glsl_type_is_array(glsl_get_array_element(state.current_var->type))
1529 || glsl_type_is_struct(glsl_without_array(state.current_var->type)))) {
1530 sentinel = '[';
1531 }
1532
1533 const unsigned l = strlen(state.current_var->name);
1534 for (unsigned i = 0; i < num_blocks; i++) {
1535 for (unsigned j = 0; j < blocks[i].NumUniforms; j++) {
1536 if (sentinel) {
1537 const char *begin = blocks[i].Uniforms[j].Name;
1538 const char *end = strchr(begin, sentinel);
1539
1540 if (end == NULL)
1541 continue;
1542
1543 if ((ptrdiff_t) l != (end - begin))
1544 continue;
1545 found = strncmp(state.current_var->name, begin, l) == 0;
1546 } else {
1547 found = strcmp(state.current_var->name, blocks[i].Uniforms[j].Name) == 0;
1548 }
1549
1550 if (found) {
1551 location = j;
1552
1553 struct hash_entry *entry =
1554 _mesa_hash_table_search(state.referenced_uniforms, var);
1555 if (entry)
1556 blocks[i].stageref |= 1U << shader_type;
1557
1558 break;
1559 }
1560 }
1561
1562 if (found)
1563 break;
1564 }
1565 assert(found);
1566
1567 const struct gl_uniform_block *const block =
1568 &blocks[buffer_block_index];
1569 assert(location != -1);
1570
1571 const struct gl_uniform_buffer_variable *const ubo_var =
1572 &block->Uniforms[location];
1573
1574 state.offset = ubo_var->Offset;
1575 var->data.location = location;
1576 }
1577
1578 /* Check if the uniform has been processed already for
1579 * other stage. If so, validate they are compatible and update
1580 * the active stage mask.
1581 */
1582 if (find_and_update_previous_uniform_storage(ctx, prog, &state, var,
1583 name, type, shader_type)) {
1584 ralloc_free(name);
1585 free_type_tree(type_tree);
1586 continue;
1587 }
1588
1589 /* From now on the variable’s location will be its uniform index */
1590 if (!state.var_is_in_block)
1591 var->data.location = prog->data->NumUniformStorage;
1592 else
1593 location = -1;
1594
1595 bool row_major =
1596 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
1597 int res = nir_link_uniform(ctx, prog, sh->Program, shader_type, type,
1598 0, location,
1599 &state,
1600 !prog->data->spirv ? &name : NULL,
1601 !prog->data->spirv ? strlen(name) : 0,
1602 row_major);
1603
1604 free_type_tree(type_tree);
1605 ralloc_free(name);
1606
1607 if (res == -1)
1608 return false;
1609 }
1610
1611 _mesa_hash_table_destroy(state.referenced_uniforms, NULL);
1612
1613 if (state.num_shader_samplers >
1614 ctx->Const.Program[shader_type].MaxTextureImageUnits) {
1615 linker_error(prog, "Too many %s shader texture samplers\n",
1616 _mesa_shader_stage_to_string(shader_type));
1617 continue;
1618 }
1619
1620 if (state.num_shader_images >
1621 ctx->Const.Program[shader_type].MaxImageUniforms) {
1622 linker_error(prog, "Too many %s shader image uniforms (%u > %u)\n",
1623 _mesa_shader_stage_to_string(shader_type),
1624 state.num_shader_images,
1625 ctx->Const.Program[shader_type].MaxImageUniforms);
1626 continue;
1627 }
1628
1629 sh->Program->SamplersUsed = state.shader_samplers_used;
1630 sh->Program->sh.ShaderStorageBlocksWriteAccess =
1631 state.shader_storage_blocks_write_access;
1632 sh->shadow_samplers = state.shader_shadow_samplers;
1633 sh->Program->info.num_textures = state.num_shader_samplers;
1634 sh->Program->info.num_images = state.num_shader_images;
1635 sh->num_uniform_components = state.num_shader_uniform_components;
1636 sh->num_combined_uniform_components = sh->num_uniform_components;
1637 }
1638
1639 prog->data->NumHiddenUniforms = state.num_hidden_uniforms;
1640 prog->data->NumUniformDataSlots = state.num_values;
1641
1642 if (prog->data->spirv)
1643 prog->NumUniformRemapTable = state.max_uniform_location;
1644
1645 nir_setup_uniform_remap_tables(ctx, prog);
1646 gl_nir_set_uniform_initializers(ctx, prog);
1647
1648 _mesa_hash_table_destroy(state.uniform_hash, hash_free_uniform_name);
1649
1650 return true;
1651 }