glsl: fix check for matrices in blocks when using nir uniform linker
[mesa.git] / src / compiler / glsl / gl_nir_link_uniforms.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "gl_nir_linker.h"
26 #include "compiler/glsl/ir_uniform.h" /* for gl_uniform_storage */
27 #include "linker_util.h"
28 #include "main/context.h"
29 #include "main/mtypes.h"
30
31 /* This file do the common link for GLSL uniforms, using NIR, instead of IR as
32 * the counter-part glsl/link_uniforms.cpp
33 *
34 * Also note that this is tailored for ARB_gl_spirv needs and particularities
35 * (like need to work/link without name available, explicit location for
36 * normal uniforms as mandatory, and so on).
37 */
38
39 #define UNMAPPED_UNIFORM_LOC ~0u
40
41 static void
42 nir_setup_uniform_remap_tables(struct gl_context *ctx,
43 struct gl_shader_program *prog)
44 {
45 prog->UniformRemapTable = rzalloc_array(prog,
46 struct gl_uniform_storage *,
47 prog->NumUniformRemapTable);
48 union gl_constant_value *data =
49 rzalloc_array(prog->data,
50 union gl_constant_value, prog->data->NumUniformDataSlots);
51 if (!prog->UniformRemapTable || !data) {
52 linker_error(prog, "Out of memory during linking.\n");
53 return;
54 }
55 prog->data->UniformDataSlots = data;
56
57 prog->data->UniformDataDefaults =
58 rzalloc_array(prog->data->UniformStorage,
59 union gl_constant_value, prog->data->NumUniformDataSlots);
60
61 unsigned data_pos = 0;
62
63 /* Reserve all the explicit locations of the active uniforms. */
64 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
65 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
66
67 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
68 continue;
69
70 /* How many new entries for this uniform? */
71 const unsigned entries = MAX2(1, uniform->array_elements);
72 unsigned num_slots = glsl_get_component_slots(uniform->type);
73
74 uniform->storage = &data[data_pos];
75
76 /* Set remap table entries point to correct gl_uniform_storage. */
77 for (unsigned j = 0; j < entries; j++) {
78 unsigned element_loc = uniform->remap_location + j;
79 prog->UniformRemapTable[element_loc] = uniform;
80
81 data_pos += num_slots;
82 }
83 }
84
85 /* Reserve locations for rest of the uniforms. */
86 link_util_update_empty_uniform_locations(prog);
87
88 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
89 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
90
91 if (uniform->is_shader_storage)
92 continue;
93
94 /* Built-in uniforms should not get any location. */
95 if (uniform->builtin)
96 continue;
97
98 /* Explicit ones have been set already. */
99 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC)
100 continue;
101
102 /* How many entries for this uniform? */
103 const unsigned entries = MAX2(1, uniform->array_elements);
104
105 unsigned location =
106 link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
107
108 if (location == -1) {
109 location = prog->NumUniformRemapTable;
110
111 /* resize remap table to fit new entries */
112 prog->UniformRemapTable =
113 reralloc(prog,
114 prog->UniformRemapTable,
115 struct gl_uniform_storage *,
116 prog->NumUniformRemapTable + entries);
117 prog->NumUniformRemapTable += entries;
118 }
119
120 /* set the base location in remap table for the uniform */
121 uniform->remap_location = location;
122
123 unsigned num_slots = glsl_get_component_slots(uniform->type);
124
125 uniform->storage = &data[data_pos];
126
127 /* Set remap table entries point to correct gl_uniform_storage. */
128 for (unsigned j = 0; j < entries; j++) {
129 unsigned element_loc = uniform->remap_location + j;
130 prog->UniformRemapTable[element_loc] = uniform;
131
132 data_pos += num_slots;
133 }
134 }
135 }
136
137 static void
138 mark_stage_as_active(struct gl_uniform_storage *uniform,
139 unsigned stage)
140 {
141 uniform->active_shader_mask |= 1 << stage;
142 }
143
144 /**
145 * Finds, returns, and updates the stage info for any uniform in UniformStorage
146 * defined by @var. In general this is done using the explicit location,
147 * except:
148 *
149 * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
150 * them. That means that more that one entry at the uniform storage can be
151 * found. In that case all of them are updated, and the first entry is
152 * returned, in order to update the location of the nir variable.
153 *
154 * * Special uniforms: like atomic counters. They lack a explicit location,
155 * so they are skipped. They will be handled and assigned a location later.
156 *
157 */
158 static struct gl_uniform_storage *
159 find_and_update_previous_uniform_storage(struct gl_shader_program *prog,
160 nir_variable *var,
161 unsigned stage)
162 {
163 if (nir_variable_is_in_block(var)) {
164 struct gl_uniform_storage *uniform = NULL;
165
166 ASSERTED unsigned num_blks = nir_variable_is_in_ubo(var) ?
167 prog->data->NumUniformBlocks :
168 prog->data->NumShaderStorageBlocks;
169
170 struct gl_uniform_block *blks = nir_variable_is_in_ubo(var) ?
171 prog->data->UniformBlocks : prog->data->ShaderStorageBlocks;
172
173 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
174 /* UniformStorage contains both variables from ubos and ssbos */
175 if ( prog->data->UniformStorage[i].is_shader_storage !=
176 nir_variable_is_in_ssbo(var))
177 continue;
178
179 int block_index = prog->data->UniformStorage[i].block_index;
180 if (block_index != -1) {
181 assert(block_index < num_blks);
182
183 if (var->data.binding == blks[block_index].Binding) {
184 if (!uniform)
185 uniform = &prog->data->UniformStorage[i];
186 mark_stage_as_active(&prog->data->UniformStorage[i],
187 stage);
188 }
189 }
190 }
191
192 return uniform;
193 }
194
195 /* Beyond blocks, there are still some corner cases of uniforms without
196 * location (ie: atomic counters) that would have a initial location equal
197 * to -1. We just return on that case. Those uniforms will be handled
198 * later.
199 */
200 if (var->data.location == -1)
201 return NULL;
202
203 /* TODO: following search can be problematic with shaders with a lot of
204 * uniforms. Would it be better to use some type of hash
205 */
206 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
207 if (prog->data->UniformStorage[i].remap_location == var->data.location) {
208 mark_stage_as_active(&prog->data->UniformStorage[i], stage);
209
210 return &prog->data->UniformStorage[i];
211 }
212 }
213
214 return NULL;
215 }
216
217 /* Used to build a tree representing the glsl_type so that we can have a place
218 * to store the next index for opaque types. Array types are expanded so that
219 * they have a single child which is used for all elements of the array.
220 * Struct types have a child for each member. The tree is walked while
221 * processing a uniform so that we can recognise when an opaque type is
222 * encountered a second time in order to reuse the same range of indices that
223 * was reserved the first time. That way the sampler indices can be arranged
224 * so that members of an array are placed sequentially even if the array is an
225 * array of structs containing other opaque members.
226 */
227 struct type_tree_entry {
228 /* For opaque types, this will be the next index to use. If we haven’t
229 * encountered this member yet, it will be UINT_MAX.
230 */
231 unsigned next_index;
232 unsigned array_size;
233 struct type_tree_entry *parent;
234 struct type_tree_entry *next_sibling;
235 struct type_tree_entry *children;
236 };
237
238 struct nir_link_uniforms_state {
239 /* per-whole program */
240 unsigned num_hidden_uniforms;
241 unsigned num_values;
242 unsigned max_uniform_location;
243 unsigned next_sampler_index;
244 unsigned next_image_index;
245
246 /* per-shader stage */
247 unsigned num_shader_samplers;
248 unsigned num_shader_images;
249 unsigned num_shader_uniform_components;
250 unsigned shader_samplers_used;
251 unsigned shader_shadow_samplers;
252 struct gl_program_parameter_list *params;
253
254 /* per-variable */
255 nir_variable *current_var;
256 int offset;
257 bool var_is_in_block;
258 int top_level_array_size;
259 int top_level_array_stride;
260
261 struct type_tree_entry *current_type;
262 };
263
264 static struct type_tree_entry *
265 build_type_tree_for_type(const struct glsl_type *type)
266 {
267 struct type_tree_entry *entry = malloc(sizeof *entry);
268
269 entry->array_size = 1;
270 entry->next_index = UINT_MAX;
271 entry->children = NULL;
272 entry->next_sibling = NULL;
273 entry->parent = NULL;
274
275 if (glsl_type_is_array(type)) {
276 entry->array_size = glsl_get_length(type);
277 entry->children = build_type_tree_for_type(glsl_get_array_element(type));
278 entry->children->parent = entry;
279 } else if (glsl_type_is_struct_or_ifc(type)) {
280 struct type_tree_entry *last = NULL;
281
282 for (unsigned i = 0; i < glsl_get_length(type); i++) {
283 const struct glsl_type *field_type = glsl_get_struct_field(type, i);
284 struct type_tree_entry *field_entry =
285 build_type_tree_for_type(field_type);
286
287 if (last == NULL)
288 entry->children = field_entry;
289 else
290 last->next_sibling = field_entry;
291
292 field_entry->parent = entry;
293
294 last = field_entry;
295 }
296 }
297
298 return entry;
299 }
300
301 static void
302 free_type_tree(struct type_tree_entry *entry)
303 {
304 struct type_tree_entry *p, *next;
305
306 for (p = entry->children; p; p = next) {
307 next = p->next_sibling;
308 free_type_tree(p);
309 }
310
311 free(entry);
312 }
313
314 static unsigned
315 get_next_index(struct nir_link_uniforms_state *state,
316 const struct gl_uniform_storage *uniform,
317 unsigned *next_index)
318 {
319 /* If we’ve already calculated an index for this member then we can just
320 * offset from there.
321 */
322 if (state->current_type->next_index == UINT_MAX) {
323 /* Otherwise we need to reserve enough indices for all of the arrays
324 * enclosing this member.
325 */
326
327 unsigned array_size = 1;
328
329 for (const struct type_tree_entry *p = state->current_type;
330 p;
331 p = p->parent) {
332 array_size *= p->array_size;
333 }
334
335 state->current_type->next_index = *next_index;
336 *next_index += array_size;
337 }
338
339 unsigned index = state->current_type->next_index;
340
341 state->current_type->next_index += MAX2(1, uniform->array_elements);
342
343 return index;
344 }
345
346 static void
347 add_parameter(struct gl_uniform_storage *uniform,
348 struct gl_context *ctx,
349 struct gl_shader_program *prog,
350 const struct glsl_type *type,
351 struct nir_link_uniforms_state *state)
352 {
353 if (!state->params || uniform->is_shader_storage || glsl_contains_opaque(type))
354 return;
355
356 unsigned num_params = glsl_get_aoa_size(type);
357 num_params = MAX2(num_params, 1);
358 num_params *= glsl_get_matrix_columns(glsl_without_array(type));
359
360 bool is_dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
361 if (is_dual_slot)
362 num_params *= 2;
363
364 struct gl_program_parameter_list *params = state->params;
365 int base_index = params->NumParameters;
366 _mesa_reserve_parameter_storage(params, num_params);
367
368 if (ctx->Const.PackedDriverUniformStorage) {
369 for (unsigned i = 0; i < num_params; i++) {
370 unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
371 unsigned comps = glsl_get_vector_elements(glsl_without_array(type)) * dmul;
372 if (is_dual_slot) {
373 if (i & 0x1)
374 comps -= 4;
375 else
376 comps = 4;
377 }
378
379 _mesa_add_parameter(params, PROGRAM_UNIFORM, NULL, comps,
380 glsl_get_gl_type(type), NULL, NULL, false);
381 }
382 } else {
383 for (unsigned i = 0; i < num_params; i++) {
384 _mesa_add_parameter(params, PROGRAM_UNIFORM, NULL, 4,
385 glsl_get_gl_type(type), NULL, NULL, true);
386 }
387 }
388
389 /* Each Parameter will hold the index to the backing uniform storage.
390 * This avoids relying on names to match parameters and uniform
391 * storages.
392 */
393 for (unsigned i = 0; i < num_params; i++) {
394 struct gl_program_parameter *param = &params->Parameters[base_index + i];
395 param->UniformStorageIndex = uniform - prog->data->UniformStorage;
396 param->MainUniformStorageIndex = state->current_var->data.location;
397 }
398 }
399
400 /**
401 * Creates the neccessary entries in UniformStorage for the uniform. Returns
402 * the number of locations used or -1 on failure.
403 */
404 static int
405 nir_link_uniform(struct gl_context *ctx,
406 struct gl_shader_program *prog,
407 struct gl_program *stage_program,
408 gl_shader_stage stage,
409 const struct glsl_type *type,
410 const struct glsl_type *parent_type,
411 unsigned index_in_parent,
412 int location,
413 struct nir_link_uniforms_state *state)
414 {
415 struct gl_uniform_storage *uniform = NULL;
416
417 if (parent_type == state->current_var->type &&
418 nir_variable_is_in_ssbo(state->current_var)) {
419 /* Type is the top level SSBO member */
420 if (glsl_type_is_array(type) &&
421 (glsl_type_is_array(glsl_get_array_element(type)) ||
422 glsl_type_is_struct_or_ifc(glsl_get_array_element(type)))) {
423 /* Type is a top-level array (array of aggregate types) */
424 state->top_level_array_size = glsl_get_length(type);
425 state->top_level_array_stride = glsl_get_explicit_stride(type);
426 } else {
427 state->top_level_array_size = 1;
428 state->top_level_array_stride = 0;
429 }
430 }
431
432 /* gl_uniform_storage can cope with one level of array, so if the type is a
433 * composite type or an array where each element occupies more than one
434 * location than we need to recursively process it.
435 */
436 if (glsl_type_is_struct_or_ifc(type) ||
437 (glsl_type_is_array(type) &&
438 (glsl_type_is_array(glsl_get_array_element(type)) ||
439 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
440 int location_count = 0;
441 struct type_tree_entry *old_type = state->current_type;
442 unsigned int struct_base_offset = state->offset;
443
444 state->current_type = old_type->children;
445
446 for (unsigned i = 0; i < glsl_get_length(type); i++) {
447 const struct glsl_type *field_type;
448
449 if (glsl_type_is_struct_or_ifc(type)) {
450 field_type = glsl_get_struct_field(type, i);
451 /* Use the offset inside the struct only for variables backed by
452 * a buffer object. For variables not backed by a buffer object,
453 * offset is -1.
454 */
455 if (state->var_is_in_block) {
456 state->offset =
457 struct_base_offset + glsl_get_struct_field_offset(type, i);
458 }
459 } else {
460 field_type = glsl_get_array_element(type);
461 }
462
463 int entries = nir_link_uniform(ctx, prog, stage_program, stage,
464 field_type, type, i, location,
465 state);
466 if (entries == -1)
467 return -1;
468
469 if (location != -1)
470 location += entries;
471 location_count += entries;
472
473 if (glsl_type_is_struct_or_ifc(type))
474 state->current_type = state->current_type->next_sibling;
475 }
476
477 state->current_type = old_type;
478
479 return location_count;
480 } else {
481 /* Create a new uniform storage entry */
482 prog->data->UniformStorage =
483 reralloc(prog->data,
484 prog->data->UniformStorage,
485 struct gl_uniform_storage,
486 prog->data->NumUniformStorage + 1);
487 if (!prog->data->UniformStorage) {
488 linker_error(prog, "Out of memory during linking.\n");
489 return -1;
490 }
491
492 uniform = &prog->data->UniformStorage[prog->data->NumUniformStorage];
493 prog->data->NumUniformStorage++;
494
495 /* Initialize its members */
496 memset(uniform, 0x00, sizeof(struct gl_uniform_storage));
497 /* ARB_gl_spirv: names are considered optional debug info, so the linker
498 * needs to work without them, and returning them is optional. For
499 * simplicity we ignore names.
500 */
501 uniform->name = NULL;
502
503 const struct glsl_type *type_no_array = glsl_without_array(type);
504 if (glsl_type_is_array(type)) {
505 uniform->type = type_no_array;
506 uniform->array_elements = glsl_get_length(type);
507 } else {
508 uniform->type = type;
509 uniform->array_elements = 0;
510 }
511 uniform->top_level_array_size = state->top_level_array_size;
512 uniform->top_level_array_stride = state->top_level_array_stride;
513
514 uniform->active_shader_mask |= 1 << stage;
515
516 if (location >= 0) {
517 /* Uniform has an explicit location */
518 uniform->remap_location = location;
519 } else {
520 uniform->remap_location = UNMAPPED_UNIFORM_LOC;
521 }
522
523 uniform->hidden = state->current_var->data.how_declared == nir_var_hidden;
524 if (uniform->hidden)
525 state->num_hidden_uniforms++;
526
527 uniform->is_shader_storage = nir_variable_is_in_ssbo(state->current_var);
528
529 /* Set fields whose default value depend on the variable being inside a
530 * block.
531 *
532 * From the OpenGL 4.6 spec, 7.3 Program objects:
533 *
534 * "For the property ARRAY_STRIDE, ... For active variables not declared
535 * as an array of basic types, zero is written to params. For active
536 * variables not backed by a buffer object, -1 is written to params,
537 * regardless of the variable type."
538 *
539 * "For the property MATRIX_STRIDE, ... For active variables not declared
540 * as a matrix or array of matrices, zero is written to params. For active
541 * variables not backed by a buffer object, -1 is written to params,
542 * regardless of the variable type."
543 *
544 * For the property IS_ROW_MAJOR, ... For active variables backed by a
545 * buffer object, declared as a single matrix or array of matrices, and
546 * stored in row-major order, one is written to params. For all other
547 * active variables, zero is written to params.
548 */
549 uniform->array_stride = -1;
550 uniform->matrix_stride = -1;
551 uniform->row_major = false;
552
553 if (state->var_is_in_block) {
554 uniform->array_stride = glsl_type_is_array(type) ?
555 glsl_get_explicit_stride(type) : 0;
556
557 if (glsl_type_is_matrix(uniform->type)) {
558 uniform->matrix_stride = glsl_get_explicit_stride(uniform->type);
559 uniform->row_major = glsl_matrix_type_is_row_major(uniform->type);
560 } else {
561 uniform->matrix_stride = 0;
562 }
563 }
564
565 uniform->offset = state->var_is_in_block ? state->offset : -1;
566
567 int buffer_block_index = -1;
568 /* If the uniform is inside a uniform block determine its block index by
569 * comparing the bindings, we can not use names.
570 */
571 if (state->var_is_in_block) {
572 struct gl_uniform_block *blocks = nir_variable_is_in_ssbo(state->current_var) ?
573 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
574
575 int num_blocks = nir_variable_is_in_ssbo(state->current_var) ?
576 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
577
578 for (unsigned i = 0; i < num_blocks; i++) {
579 if (state->current_var->data.binding == blocks[i].Binding) {
580 buffer_block_index = i;
581 break;
582 }
583 }
584 assert(buffer_block_index >= 0);
585
586 /* Compute the next offset. */
587 state->offset += glsl_get_explicit_size(type, true);
588 }
589
590 uniform->block_index = buffer_block_index;
591
592 /* @FIXME: the initialization of the following will be done as we
593 * implement support for their specific features, like SSBO, atomics,
594 * etc.
595 */
596 uniform->builtin = false;
597 uniform->atomic_buffer_index = -1;
598 uniform->is_bindless = false;
599
600 /* The following are not for features not supported by ARB_gl_spirv */
601 uniform->num_compatible_subroutines = 0;
602
603 unsigned entries = MAX2(1, uniform->array_elements);
604
605 if (glsl_type_is_sampler(type_no_array)) {
606 int sampler_index =
607 get_next_index(state, uniform, &state->next_sampler_index);
608
609 state->num_shader_samplers++;
610
611 uniform->opaque[stage].active = true;
612 uniform->opaque[stage].index = sampler_index;
613
614 const unsigned shadow = glsl_sampler_type_is_shadow(type_no_array);
615
616 for (unsigned i = sampler_index;
617 i < MIN2(state->next_sampler_index, MAX_SAMPLERS);
618 i++) {
619 stage_program->sh.SamplerTargets[i] =
620 glsl_get_sampler_target(type_no_array);
621 state->shader_samplers_used |= 1U << i;
622 state->shader_shadow_samplers |= shadow << i;
623 }
624 } else if (glsl_type_is_image(type_no_array)) {
625 /* @FIXME: image_index should match that of the same image
626 * uniform in other shaders. This means we need to match image
627 * uniforms by location (GLSL does it by variable name, but we
628 * want to avoid that).
629 */
630 int image_index = state->next_image_index;
631 state->next_image_index += entries;
632
633 state->num_shader_images++;
634
635 uniform->opaque[stage].active = true;
636 uniform->opaque[stage].index = image_index;
637
638 /* Set image access qualifiers */
639 enum gl_access_qualifier image_access =
640 state->current_var->data.access;
641 const GLenum access =
642 (image_access & ACCESS_NON_WRITEABLE) ?
643 ((image_access & ACCESS_NON_READABLE) ? GL_NONE :
644 GL_READ_ONLY) :
645 ((image_access & ACCESS_NON_READABLE) ? GL_WRITE_ONLY :
646 GL_READ_WRITE);
647 for (unsigned i = image_index;
648 i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS);
649 i++) {
650 stage_program->sh.ImageAccess[i] = access;
651 }
652 }
653
654 unsigned values = glsl_get_component_slots(type);
655 state->num_shader_uniform_components += values;
656 state->num_values += values;
657
658 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC &&
659 state->max_uniform_location < uniform->remap_location + entries)
660 state->max_uniform_location = uniform->remap_location + entries;
661
662 if (!state->var_is_in_block)
663 add_parameter(uniform, ctx, prog, type, state);
664
665 return MAX2(uniform->array_elements, 1);
666 }
667 }
668
669 bool
670 gl_nir_link_uniforms(struct gl_context *ctx,
671 struct gl_shader_program *prog,
672 bool fill_parameters)
673 {
674 /* First free up any previous UniformStorage items */
675 ralloc_free(prog->data->UniformStorage);
676 prog->data->UniformStorage = NULL;
677 prog->data->NumUniformStorage = 0;
678
679 /* Iterate through all linked shaders */
680 struct nir_link_uniforms_state state = {0,};
681
682 for (unsigned shader_type = 0; shader_type < MESA_SHADER_STAGES; shader_type++) {
683 struct gl_linked_shader *sh = prog->_LinkedShaders[shader_type];
684 if (!sh)
685 continue;
686
687 nir_shader *nir = sh->Program->nir;
688 assert(nir);
689
690 state.num_shader_samplers = 0;
691 state.num_shader_images = 0;
692 state.num_shader_uniform_components = 0;
693 state.shader_samplers_used = 0;
694 state.shader_shadow_samplers = 0;
695 state.params = fill_parameters ? sh->Program->Parameters : NULL;
696
697 nir_foreach_variable(var, &nir->uniforms) {
698 struct gl_uniform_storage *uniform = NULL;
699
700 state.current_var = var;
701
702 /* Check if the uniform has been processed already for
703 * other stage. If so, validate they are compatible and update
704 * the active stage mask.
705 */
706 uniform = find_and_update_previous_uniform_storage(prog, var, shader_type);
707 if (uniform) {
708 var->data.location = uniform - prog->data->UniformStorage;
709
710 if (!state.var_is_in_block)
711 add_parameter(uniform, ctx, prog, var->type, &state);
712
713 continue;
714 }
715
716 int location = var->data.location;
717 /* From now on the variable’s location will be its uniform index */
718 var->data.location = prog->data->NumUniformStorage;
719
720 state.offset = 0;
721 state.var_is_in_block = nir_variable_is_in_block(var);
722 state.top_level_array_size = 0;
723 state.top_level_array_stride = 0;
724
725 /*
726 * From ARB_program_interface spec, issue (16):
727 *
728 * "RESOLVED: We will follow the default rule for enumerating block
729 * members in the OpenGL API, which is:
730 *
731 * * If a variable is a member of an interface block without an
732 * instance name, it is enumerated using just the variable name.
733 *
734 * * If a variable is a member of an interface block with an
735 * instance name, it is enumerated as "BlockName.Member", where
736 * "BlockName" is the name of the interface block (not the
737 * instance name) and "Member" is the name of the variable.
738 *
739 * For example, in the following code:
740 *
741 * uniform Block1 {
742 * int member1;
743 * };
744 * uniform Block2 {
745 * int member2;
746 * } instance2;
747 * uniform Block3 {
748 * int member3;
749 * } instance3[2]; // uses two separate buffer bindings
750 *
751 * the three uniforms (if active) are enumerated as "member1",
752 * "Block2.member2", and "Block3.member3"."
753 *
754 * Note that in the last example, with an array of ubo, only one
755 * uniform is generated. For that reason, while unrolling the
756 * uniforms of a ubo, or the variables of a ssbo, we need to treat
757 * arrays of instance as a single block.
758 */
759 const struct glsl_type *type = var->type;
760 if (state.var_is_in_block && glsl_type_is_array(type)) {
761 type = glsl_without_array(type);
762 }
763
764 struct type_tree_entry *type_tree =
765 build_type_tree_for_type(type);
766 state.current_type = type_tree;
767
768 int res = nir_link_uniform(ctx, prog, sh->Program, shader_type, type,
769 NULL, 0,
770 location,
771 &state);
772
773 free_type_tree(type_tree);
774
775 if (res == -1)
776 return false;
777 }
778
779 sh->Program->SamplersUsed = state.shader_samplers_used;
780 sh->shadow_samplers = state.shader_shadow_samplers;
781 sh->Program->info.num_textures = state.num_shader_samplers;
782 sh->Program->info.num_images = state.num_shader_images;
783 sh->num_uniform_components = state.num_shader_uniform_components;
784 sh->num_combined_uniform_components = sh->num_uniform_components;
785 }
786
787 prog->data->NumHiddenUniforms = state.num_hidden_uniforms;
788 prog->NumUniformRemapTable = state.max_uniform_location;
789 prog->data->NumUniformDataSlots = state.num_values;
790
791 nir_setup_uniform_remap_tables(ctx, prog);
792 gl_nir_set_uniform_initializers(ctx, prog);
793
794 return true;
795 }