glsl/nir: Fill in the Parameters in NIR linker
[mesa.git] / src / compiler / glsl / gl_nir_link_uniforms.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "gl_nir_linker.h"
26 #include "compiler/glsl/ir_uniform.h" /* for gl_uniform_storage */
27 #include "linker_util.h"
28 #include "main/context.h"
29 #include "main/mtypes.h"
30
31 /* This file do the common link for GLSL uniforms, using NIR, instead of IR as
32 * the counter-part glsl/link_uniforms.cpp
33 *
34 * Also note that this is tailored for ARB_gl_spirv needs and particularities
35 * (like need to work/link without name available, explicit location for
36 * normal uniforms as mandatory, and so on).
37 */
38
39 #define UNMAPPED_UNIFORM_LOC ~0u
40
41 static void
42 nir_setup_uniform_remap_tables(struct gl_context *ctx,
43 struct gl_shader_program *prog)
44 {
45 prog->UniformRemapTable = rzalloc_array(prog,
46 struct gl_uniform_storage *,
47 prog->NumUniformRemapTable);
48 union gl_constant_value *data =
49 rzalloc_array(prog->data,
50 union gl_constant_value, prog->data->NumUniformDataSlots);
51 if (!prog->UniformRemapTable || !data) {
52 linker_error(prog, "Out of memory during linking.\n");
53 return;
54 }
55 prog->data->UniformDataSlots = data;
56
57 prog->data->UniformDataDefaults =
58 rzalloc_array(prog->data->UniformStorage,
59 union gl_constant_value, prog->data->NumUniformDataSlots);
60
61 unsigned data_pos = 0;
62
63 /* Reserve all the explicit locations of the active uniforms. */
64 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
65 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
66
67 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
68 continue;
69
70 /* How many new entries for this uniform? */
71 const unsigned entries = MAX2(1, uniform->array_elements);
72 unsigned num_slots = glsl_get_component_slots(uniform->type);
73
74 uniform->storage = &data[data_pos];
75
76 /* Set remap table entries point to correct gl_uniform_storage. */
77 for (unsigned j = 0; j < entries; j++) {
78 unsigned element_loc = uniform->remap_location + j;
79 prog->UniformRemapTable[element_loc] = uniform;
80
81 data_pos += num_slots;
82 }
83 }
84
85 /* Reserve locations for rest of the uniforms. */
86 link_util_update_empty_uniform_locations(prog);
87
88 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
89 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
90
91 if (uniform->is_shader_storage)
92 continue;
93
94 /* Built-in uniforms should not get any location. */
95 if (uniform->builtin)
96 continue;
97
98 /* Explicit ones have been set already. */
99 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC)
100 continue;
101
102 /* How many entries for this uniform? */
103 const unsigned entries = MAX2(1, uniform->array_elements);
104
105 unsigned location =
106 link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
107
108 if (location == -1) {
109 location = prog->NumUniformRemapTable;
110
111 /* resize remap table to fit new entries */
112 prog->UniformRemapTable =
113 reralloc(prog,
114 prog->UniformRemapTable,
115 struct gl_uniform_storage *,
116 prog->NumUniformRemapTable + entries);
117 prog->NumUniformRemapTable += entries;
118 }
119
120 /* set the base location in remap table for the uniform */
121 uniform->remap_location = location;
122
123 unsigned num_slots = glsl_get_component_slots(uniform->type);
124
125 uniform->storage = &data[data_pos];
126
127 /* Set remap table entries point to correct gl_uniform_storage. */
128 for (unsigned j = 0; j < entries; j++) {
129 unsigned element_loc = uniform->remap_location + j;
130 prog->UniformRemapTable[element_loc] = uniform;
131
132 data_pos += num_slots;
133 }
134 }
135 }
136
137 static void
138 mark_stage_as_active(struct gl_uniform_storage *uniform,
139 unsigned stage)
140 {
141 uniform->active_shader_mask |= 1 << stage;
142 }
143
144 /**
145 * Finds, returns, and updates the stage info for any uniform in UniformStorage
146 * defined by @var. In general this is done using the explicit location,
147 * except:
148 *
149 * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
150 * them. That means that more that one entry at the uniform storage can be
151 * found. In that case all of them are updated, and the first entry is
152 * returned, in order to update the location of the nir variable.
153 *
154 * * Special uniforms: like atomic counters. They lack a explicit location,
155 * so they are skipped. They will be handled and assigned a location later.
156 *
157 */
158 static struct gl_uniform_storage *
159 find_and_update_previous_uniform_storage(struct gl_shader_program *prog,
160 nir_variable *var,
161 unsigned stage)
162 {
163 if (nir_variable_is_in_block(var)) {
164 struct gl_uniform_storage *uniform = NULL;
165
166 unsigned num_blks = nir_variable_is_in_ubo(var) ?
167 prog->data->NumUniformBlocks :
168 prog->data->NumShaderStorageBlocks;
169
170 struct gl_uniform_block *blks = nir_variable_is_in_ubo(var) ?
171 prog->data->UniformBlocks : prog->data->ShaderStorageBlocks;
172
173 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
174 /* UniformStorage contains both variables from ubos and ssbos */
175 if ( prog->data->UniformStorage[i].is_shader_storage !=
176 nir_variable_is_in_ssbo(var))
177 continue;
178
179 int block_index = prog->data->UniformStorage[i].block_index;
180 if (block_index != -1) {
181 assert(block_index < num_blks);
182
183 if (var->data.binding == blks[block_index].Binding) {
184 if (!uniform)
185 uniform = &prog->data->UniformStorage[i];
186 mark_stage_as_active(&prog->data->UniformStorage[i],
187 stage);
188 }
189 }
190 }
191
192 return uniform;
193 }
194
195 /* Beyond blocks, there are still some corner cases of uniforms without
196 * location (ie: atomic counters) that would have a initial location equal
197 * to -1. We just return on that case. Those uniforms will be handled
198 * later.
199 */
200 if (var->data.location == -1)
201 return NULL;
202
203 /* TODO: following search can be problematic with shaders with a lot of
204 * uniforms. Would it be better to use some type of hash
205 */
206 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
207 if (prog->data->UniformStorage[i].remap_location == var->data.location) {
208 mark_stage_as_active(&prog->data->UniformStorage[i], stage);
209
210 return &prog->data->UniformStorage[i];
211 }
212 }
213
214 return NULL;
215 }
216
217 /* Used to build a tree representing the glsl_type so that we can have a place
218 * to store the next index for opaque types. Array types are expanded so that
219 * they have a single child which is used for all elements of the array.
220 * Struct types have a child for each member. The tree is walked while
221 * processing a uniform so that we can recognise when an opaque type is
222 * encountered a second time in order to reuse the same range of indices that
223 * was reserved the first time. That way the sampler indices can be arranged
224 * so that members of an array are placed sequentially even if the array is an
225 * array of structs containing other opaque members.
226 */
227 struct type_tree_entry {
228 /* For opaque types, this will be the next index to use. If we haven’t
229 * encountered this member yet, it will be UINT_MAX.
230 */
231 unsigned next_index;
232 unsigned array_size;
233 struct type_tree_entry *parent;
234 struct type_tree_entry *next_sibling;
235 struct type_tree_entry *children;
236 };
237
238 struct nir_link_uniforms_state {
239 /* per-whole program */
240 unsigned num_hidden_uniforms;
241 unsigned num_values;
242 unsigned max_uniform_location;
243 unsigned next_sampler_index;
244 unsigned next_image_index;
245
246 /* per-shader stage */
247 unsigned num_shader_samplers;
248 unsigned num_shader_images;
249 unsigned num_shader_uniform_components;
250 unsigned shader_samplers_used;
251 unsigned shader_shadow_samplers;
252 struct gl_program_parameter_list *params;
253
254 /* per-variable */
255 nir_variable *current_var;
256 int offset;
257 bool var_is_in_block;
258 int top_level_array_size;
259 int top_level_array_stride;
260 int main_uniform_storage_index;
261
262 struct type_tree_entry *current_type;
263 };
264
265 static struct type_tree_entry *
266 build_type_tree_for_type(const struct glsl_type *type)
267 {
268 struct type_tree_entry *entry = malloc(sizeof *entry);
269
270 entry->array_size = 1;
271 entry->next_index = UINT_MAX;
272 entry->children = NULL;
273 entry->next_sibling = NULL;
274 entry->parent = NULL;
275
276 if (glsl_type_is_array(type)) {
277 entry->array_size = glsl_get_length(type);
278 entry->children = build_type_tree_for_type(glsl_get_array_element(type));
279 entry->children->parent = entry;
280 } else if (glsl_type_is_struct_or_ifc(type)) {
281 struct type_tree_entry *last = NULL;
282
283 for (unsigned i = 0; i < glsl_get_length(type); i++) {
284 const struct glsl_type *field_type = glsl_get_struct_field(type, i);
285 struct type_tree_entry *field_entry =
286 build_type_tree_for_type(field_type);
287
288 if (last == NULL)
289 entry->children = field_entry;
290 else
291 last->next_sibling = field_entry;
292
293 field_entry->parent = entry;
294
295 last = field_entry;
296 }
297 }
298
299 return entry;
300 }
301
302 static void
303 free_type_tree(struct type_tree_entry *entry)
304 {
305 struct type_tree_entry *p, *next;
306
307 for (p = entry->children; p; p = next) {
308 next = p->next_sibling;
309 free_type_tree(p);
310 }
311
312 free(entry);
313 }
314
315 static unsigned
316 get_next_index(struct nir_link_uniforms_state *state,
317 const struct gl_uniform_storage *uniform,
318 unsigned *next_index)
319 {
320 /* If we’ve already calculated an index for this member then we can just
321 * offset from there.
322 */
323 if (state->current_type->next_index == UINT_MAX) {
324 /* Otherwise we need to reserve enough indices for all of the arrays
325 * enclosing this member.
326 */
327
328 unsigned array_size = 1;
329
330 for (const struct type_tree_entry *p = state->current_type;
331 p;
332 p = p->parent) {
333 array_size *= p->array_size;
334 }
335
336 state->current_type->next_index = *next_index;
337 *next_index += array_size;
338 }
339
340 unsigned index = state->current_type->next_index;
341
342 state->current_type->next_index += MAX2(1, uniform->array_elements);
343
344 return index;
345 }
346
347 static void
348 add_parameter(struct gl_uniform_storage *uniform,
349 struct gl_context *ctx,
350 struct gl_shader_program *prog,
351 const struct glsl_type *type,
352 struct nir_link_uniforms_state *state)
353 {
354 if (!state->params || uniform->is_shader_storage || glsl_contains_opaque(type))
355 return;
356
357 unsigned num_params = glsl_get_aoa_size(type);
358 num_params = MAX2(num_params, 1);
359 num_params *= glsl_get_matrix_columns(glsl_without_array(type));
360
361 bool is_dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
362 if (is_dual_slot)
363 num_params *= 2;
364
365 struct gl_program_parameter_list *params = state->params;
366 int base_index = params->NumParameters;
367 _mesa_reserve_parameter_storage(params, num_params);
368
369 if (ctx->Const.PackedDriverUniformStorage) {
370 for (unsigned i = 0; i < num_params; i++) {
371 unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
372 unsigned comps = glsl_get_vector_elements(glsl_without_array(type)) * dmul;
373 if (is_dual_slot) {
374 if (i & 0x1)
375 comps -= 4;
376 else
377 comps = 4;
378 }
379
380 _mesa_add_parameter(params, PROGRAM_UNIFORM, NULL, comps,
381 glsl_get_gl_type(type), NULL, NULL, false);
382 }
383 } else {
384 for (unsigned i = 0; i < num_params; i++) {
385 _mesa_add_parameter(params, PROGRAM_UNIFORM, NULL, 4,
386 glsl_get_gl_type(type), NULL, NULL, true);
387 }
388 }
389
390 /* Each Parameter will hold the index to the backing uniform storage.
391 * This avoids relying on names to match parameters and uniform
392 * storages.
393 */
394 for (unsigned i = 0; i < num_params; i++) {
395 struct gl_program_parameter *param = &params->Parameters[base_index + i];
396 param->UniformStorageIndex = uniform - prog->data->UniformStorage;
397 param->MainUniformStorageIndex = state->main_uniform_storage_index;
398 }
399 }
400
401 /**
402 * Creates the neccessary entries in UniformStorage for the uniform. Returns
403 * the number of locations used or -1 on failure.
404 */
405 static int
406 nir_link_uniform(struct gl_context *ctx,
407 struct gl_shader_program *prog,
408 struct gl_program *stage_program,
409 gl_shader_stage stage,
410 const struct glsl_type *type,
411 const struct glsl_type *parent_type,
412 unsigned index_in_parent,
413 int location,
414 struct nir_link_uniforms_state *state)
415 {
416 struct gl_uniform_storage *uniform = NULL;
417
418 if (parent_type == state->current_var->type &&
419 nir_variable_is_in_ssbo(state->current_var)) {
420 /* Type is the top level SSBO member */
421 if (glsl_type_is_array(type) &&
422 (glsl_type_is_array(glsl_get_array_element(type)) ||
423 glsl_type_is_struct_or_ifc(glsl_get_array_element(type)))) {
424 /* Type is a top-level array (array of aggregate types) */
425 state->top_level_array_size = glsl_get_length(type);
426 state->top_level_array_stride = glsl_get_explicit_stride(type);
427 } else {
428 state->top_level_array_size = 1;
429 state->top_level_array_stride = 0;
430 }
431 }
432
433 /* gl_uniform_storage can cope with one level of array, so if the type is a
434 * composite type or an array where each element occupies more than one
435 * location than we need to recursively process it.
436 */
437 if (glsl_type_is_struct_or_ifc(type) ||
438 (glsl_type_is_array(type) &&
439 (glsl_type_is_array(glsl_get_array_element(type)) ||
440 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
441 int location_count = 0;
442 struct type_tree_entry *old_type = state->current_type;
443 unsigned int struct_base_offset = state->offset;
444
445 state->current_type = old_type->children;
446
447 for (unsigned i = 0; i < glsl_get_length(type); i++) {
448 const struct glsl_type *field_type;
449
450 if (glsl_type_is_struct_or_ifc(type)) {
451 field_type = glsl_get_struct_field(type, i);
452 /* Use the offset inside the struct only for variables backed by
453 * a buffer object. For variables not backed by a buffer object,
454 * offset is -1.
455 */
456 if (state->var_is_in_block) {
457 state->offset =
458 struct_base_offset + glsl_get_struct_field_offset(type, i);
459 }
460 } else {
461 field_type = glsl_get_array_element(type);
462 }
463
464 int entries = nir_link_uniform(ctx, prog, stage_program, stage,
465 field_type, type, i, location,
466 state);
467 if (entries == -1)
468 return -1;
469
470 if (location != -1)
471 location += entries;
472 location_count += entries;
473
474 if (glsl_type_is_struct_or_ifc(type))
475 state->current_type = state->current_type->next_sibling;
476 }
477
478 state->current_type = old_type;
479
480 return location_count;
481 } else {
482 /* Create a new uniform storage entry */
483 prog->data->UniformStorage =
484 reralloc(prog->data,
485 prog->data->UniformStorage,
486 struct gl_uniform_storage,
487 prog->data->NumUniformStorage + 1);
488 if (!prog->data->UniformStorage) {
489 linker_error(prog, "Out of memory during linking.\n");
490 return -1;
491 }
492
493 if (state->main_uniform_storage_index == -1)
494 state->main_uniform_storage_index = prog->data->NumUniformStorage;
495
496 uniform = &prog->data->UniformStorage[prog->data->NumUniformStorage];
497 prog->data->NumUniformStorage++;
498
499 /* Initialize its members */
500 memset(uniform, 0x00, sizeof(struct gl_uniform_storage));
501 /* ARB_gl_spirv: names are considered optional debug info, so the linker
502 * needs to work without them, and returning them is optional. For
503 * simplicity we ignore names.
504 */
505 uniform->name = NULL;
506
507 const struct glsl_type *type_no_array = glsl_without_array(type);
508 if (glsl_type_is_array(type)) {
509 uniform->type = type_no_array;
510 uniform->array_elements = glsl_get_length(type);
511 } else {
512 uniform->type = type;
513 uniform->array_elements = 0;
514 }
515 uniform->top_level_array_size = state->top_level_array_size;
516 uniform->top_level_array_stride = state->top_level_array_stride;
517
518 uniform->active_shader_mask |= 1 << stage;
519
520 if (location >= 0) {
521 /* Uniform has an explicit location */
522 uniform->remap_location = location;
523 } else {
524 uniform->remap_location = UNMAPPED_UNIFORM_LOC;
525 }
526
527 uniform->hidden = state->current_var->data.how_declared == nir_var_hidden;
528 if (uniform->hidden)
529 state->num_hidden_uniforms++;
530
531 uniform->is_shader_storage = nir_variable_is_in_ssbo(state->current_var);
532
533 /* Set fields whose default value depend on the variable being inside a
534 * block.
535 *
536 * From the OpenGL 4.6 spec, 7.3 Program objects:
537 *
538 * "For the property ARRAY_STRIDE, ... For active variables not declared
539 * as an array of basic types, zero is written to params. For active
540 * variables not backed by a buffer object, -1 is written to params,
541 * regardless of the variable type."
542 *
543 * "For the property MATRIX_STRIDE, ... For active variables not declared
544 * as a matrix or array of matrices, zero is written to params. For active
545 * variables not backed by a buffer object, -1 is written to params,
546 * regardless of the variable type."
547 *
548 * For the property IS_ROW_MAJOR, ... For active variables backed by a
549 * buffer object, declared as a single matrix or array of matrices, and
550 * stored in row-major order, one is written to params. For all other
551 * active variables, zero is written to params.
552 */
553 uniform->array_stride = -1;
554 uniform->matrix_stride = -1;
555 uniform->row_major = false;
556
557 if (state->var_is_in_block) {
558 uniform->array_stride = glsl_type_is_array(type) ?
559 glsl_get_explicit_stride(type) : 0;
560
561 if (glsl_type_is_matrix(type)) {
562 assert(parent_type);
563 uniform->matrix_stride = glsl_get_explicit_stride(type);
564
565 uniform->row_major = glsl_matrix_type_is_row_major(type);
566 } else {
567 uniform->matrix_stride = 0;
568 }
569 }
570
571 uniform->offset = state->var_is_in_block ? state->offset : -1;
572
573 int buffer_block_index = -1;
574 /* If the uniform is inside a uniform block determine its block index by
575 * comparing the bindings, we can not use names.
576 */
577 if (state->var_is_in_block) {
578 struct gl_uniform_block *blocks = nir_variable_is_in_ssbo(state->current_var) ?
579 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
580
581 int num_blocks = nir_variable_is_in_ssbo(state->current_var) ?
582 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
583
584 for (unsigned i = 0; i < num_blocks; i++) {
585 if (state->current_var->data.binding == blocks[i].Binding) {
586 buffer_block_index = i;
587 break;
588 }
589 }
590 assert(buffer_block_index >= 0);
591
592 /* Compute the next offset. */
593 state->offset += glsl_get_explicit_size(type, true);
594 }
595
596 uniform->block_index = buffer_block_index;
597
598 /* @FIXME: the initialization of the following will be done as we
599 * implement support for their specific features, like SSBO, atomics,
600 * etc.
601 */
602 uniform->builtin = false;
603 uniform->atomic_buffer_index = -1;
604 uniform->is_bindless = false;
605
606 /* The following are not for features not supported by ARB_gl_spirv */
607 uniform->num_compatible_subroutines = 0;
608
609 unsigned entries = MAX2(1, uniform->array_elements);
610
611 if (glsl_type_is_sampler(type_no_array)) {
612 int sampler_index =
613 get_next_index(state, uniform, &state->next_sampler_index);
614
615 state->num_shader_samplers++;
616
617 uniform->opaque[stage].active = true;
618 uniform->opaque[stage].index = sampler_index;
619
620 const unsigned shadow = glsl_sampler_type_is_shadow(type_no_array);
621
622 for (unsigned i = sampler_index;
623 i < MIN2(state->next_sampler_index, MAX_SAMPLERS);
624 i++) {
625 stage_program->sh.SamplerTargets[i] =
626 glsl_get_sampler_target(type_no_array);
627 state->shader_samplers_used |= 1U << i;
628 state->shader_shadow_samplers |= shadow << i;
629 }
630 } else if (glsl_type_is_image(type_no_array)) {
631 /* @FIXME: image_index should match that of the same image
632 * uniform in other shaders. This means we need to match image
633 * uniforms by location (GLSL does it by variable name, but we
634 * want to avoid that).
635 */
636 int image_index = state->next_image_index;
637 state->next_image_index += entries;
638
639 state->num_shader_images++;
640
641 uniform->opaque[stage].active = true;
642 uniform->opaque[stage].index = image_index;
643
644 /* Set image access qualifiers */
645 enum gl_access_qualifier image_access =
646 state->current_var->data.image.access;
647 const GLenum access =
648 (image_access & ACCESS_NON_WRITEABLE) ?
649 ((image_access & ACCESS_NON_READABLE) ? GL_NONE :
650 GL_READ_ONLY) :
651 ((image_access & ACCESS_NON_READABLE) ? GL_WRITE_ONLY :
652 GL_READ_WRITE);
653 for (unsigned i = image_index;
654 i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS);
655 i++) {
656 stage_program->sh.ImageAccess[i] = access;
657 }
658 }
659
660 unsigned values = glsl_get_component_slots(type);
661 state->num_shader_uniform_components += values;
662 state->num_values += values;
663
664 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC &&
665 state->max_uniform_location < uniform->remap_location + entries)
666 state->max_uniform_location = uniform->remap_location + entries;
667
668 if (!state->var_is_in_block)
669 add_parameter(uniform, ctx, prog, type, state);
670
671 return MAX2(uniform->array_elements, 1);
672 }
673 }
674
675 bool
676 gl_nir_link_uniforms(struct gl_context *ctx,
677 struct gl_shader_program *prog,
678 bool fill_parameters)
679 {
680 /* First free up any previous UniformStorage items */
681 ralloc_free(prog->data->UniformStorage);
682 prog->data->UniformStorage = NULL;
683 prog->data->NumUniformStorage = 0;
684
685 /* Iterate through all linked shaders */
686 struct nir_link_uniforms_state state = {0,};
687
688 for (unsigned shader_type = 0; shader_type < MESA_SHADER_STAGES; shader_type++) {
689 struct gl_linked_shader *sh = prog->_LinkedShaders[shader_type];
690 if (!sh)
691 continue;
692
693 nir_shader *nir = sh->Program->nir;
694 assert(nir);
695
696 state.num_shader_samplers = 0;
697 state.num_shader_images = 0;
698 state.num_shader_uniform_components = 0;
699 state.shader_samplers_used = 0;
700 state.shader_shadow_samplers = 0;
701 state.params = fill_parameters ? sh->Program->Parameters : NULL;
702
703 nir_foreach_variable(var, &nir->uniforms) {
704 struct gl_uniform_storage *uniform = NULL;
705
706 /* Check if the uniform has been processed already for
707 * other stage. If so, validate they are compatible and update
708 * the active stage mask.
709 */
710 uniform = find_and_update_previous_uniform_storage(prog, var, shader_type);
711 if (uniform) {
712 var->data.location = uniform - prog->data->UniformStorage;
713
714 if (!state.var_is_in_block)
715 add_parameter(uniform, ctx, prog, var->type, &state);
716
717 continue;
718 }
719
720 int location = var->data.location;
721 /* From now on the variable’s location will be its uniform index */
722 var->data.location = prog->data->NumUniformStorage;
723
724 state.current_var = var;
725 state.offset = 0;
726 state.var_is_in_block = nir_variable_is_in_block(var);
727 state.top_level_array_size = 0;
728 state.top_level_array_stride = 0;
729 state.main_uniform_storage_index = -1;
730
731 /*
732 * From ARB_program_interface spec, issue (16):
733 *
734 * "RESOLVED: We will follow the default rule for enumerating block
735 * members in the OpenGL API, which is:
736 *
737 * * If a variable is a member of an interface block without an
738 * instance name, it is enumerated using just the variable name.
739 *
740 * * If a variable is a member of an interface block with an
741 * instance name, it is enumerated as "BlockName.Member", where
742 * "BlockName" is the name of the interface block (not the
743 * instance name) and "Member" is the name of the variable.
744 *
745 * For example, in the following code:
746 *
747 * uniform Block1 {
748 * int member1;
749 * };
750 * uniform Block2 {
751 * int member2;
752 * } instance2;
753 * uniform Block3 {
754 * int member3;
755 * } instance3[2]; // uses two separate buffer bindings
756 *
757 * the three uniforms (if active) are enumerated as "member1",
758 * "Block2.member2", and "Block3.member3"."
759 *
760 * Note that in the last example, with an array of ubo, only one
761 * uniform is generated. For that reason, while unrolling the
762 * uniforms of a ubo, or the variables of a ssbo, we need to treat
763 * arrays of instance as a single block.
764 */
765 const struct glsl_type *type = var->type;
766 if (state.var_is_in_block && glsl_type_is_array(type)) {
767 type = glsl_without_array(type);
768 }
769
770 struct type_tree_entry *type_tree =
771 build_type_tree_for_type(type);
772 state.current_type = type_tree;
773
774 int res = nir_link_uniform(ctx, prog, sh->Program, shader_type, type,
775 NULL, 0,
776 location,
777 &state);
778
779 free_type_tree(type_tree);
780
781 if (res == -1)
782 return false;
783 }
784
785 sh->Program->SamplersUsed = state.shader_samplers_used;
786 sh->shadow_samplers = state.shader_shadow_samplers;
787 sh->Program->info.num_textures = state.num_shader_samplers;
788 sh->Program->info.num_images = state.num_shader_images;
789 sh->num_uniform_components = state.num_shader_uniform_components;
790 sh->num_combined_uniform_components = sh->num_uniform_components;
791 }
792
793 prog->data->NumHiddenUniforms = state.num_hidden_uniforms;
794 prog->NumUniformRemapTable = state.max_uniform_location;
795 prog->data->NumUniformDataSlots = state.num_values;
796
797 nir_setup_uniform_remap_tables(ctx, prog);
798 gl_nir_set_uniform_initializers(ctx, prog);
799
800 return true;
801 }