glsl: reset next_image_index count for each shader stage
[mesa.git] / src / compiler / glsl / gl_nir_link_uniforms.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "gl_nir_linker.h"
26 #include "compiler/glsl/ir_uniform.h" /* for gl_uniform_storage */
27 #include "linker_util.h"
28 #include "main/context.h"
29 #include "main/mtypes.h"
30
31 /* This file do the common link for GLSL uniforms, using NIR, instead of IR as
32 * the counter-part glsl/link_uniforms.cpp
33 *
34 * Also note that this is tailored for ARB_gl_spirv needs and particularities
35 * (like need to work/link without name available, explicit location for
36 * normal uniforms as mandatory, and so on).
37 */
38
39 #define UNMAPPED_UNIFORM_LOC ~0u
40
41 static void
42 nir_setup_uniform_remap_tables(struct gl_context *ctx,
43 struct gl_shader_program *prog)
44 {
45 prog->UniformRemapTable = rzalloc_array(prog,
46 struct gl_uniform_storage *,
47 prog->NumUniformRemapTable);
48 union gl_constant_value *data =
49 rzalloc_array(prog->data,
50 union gl_constant_value, prog->data->NumUniformDataSlots);
51 if (!prog->UniformRemapTable || !data) {
52 linker_error(prog, "Out of memory during linking.\n");
53 return;
54 }
55 prog->data->UniformDataSlots = data;
56
57 prog->data->UniformDataDefaults =
58 rzalloc_array(prog->data->UniformDataSlots,
59 union gl_constant_value, prog->data->NumUniformDataSlots);
60
61 unsigned data_pos = 0;
62
63 /* Reserve all the explicit locations of the active uniforms. */
64 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
65 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
66
67 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
68 continue;
69
70 /* How many new entries for this uniform? */
71 const unsigned entries = MAX2(1, uniform->array_elements);
72 unsigned num_slots = glsl_get_component_slots(uniform->type);
73
74 uniform->storage = &data[data_pos];
75
76 /* Set remap table entries point to correct gl_uniform_storage. */
77 for (unsigned j = 0; j < entries; j++) {
78 unsigned element_loc = uniform->remap_location + j;
79 prog->UniformRemapTable[element_loc] = uniform;
80
81 data_pos += num_slots;
82 }
83 }
84
85 /* Reserve locations for rest of the uniforms. */
86 link_util_update_empty_uniform_locations(prog);
87
88 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
89 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
90
91 if (uniform->is_shader_storage)
92 continue;
93
94 /* Built-in uniforms should not get any location. */
95 if (uniform->builtin)
96 continue;
97
98 /* Explicit ones have been set already. */
99 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC)
100 continue;
101
102 /* How many entries for this uniform? */
103 const unsigned entries = MAX2(1, uniform->array_elements);
104
105 unsigned location =
106 link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
107
108 if (location == -1 || location + entries >= prog->NumUniformRemapTable) {
109 unsigned new_entries = entries;
110 if (location == -1)
111 location = prog->NumUniformRemapTable;
112 else
113 new_entries = location - prog->NumUniformRemapTable + entries;
114
115 /* resize remap table to fit new entries */
116 prog->UniformRemapTable =
117 reralloc(prog,
118 prog->UniformRemapTable,
119 struct gl_uniform_storage *,
120 prog->NumUniformRemapTable + new_entries);
121 prog->NumUniformRemapTable += new_entries;
122 }
123
124 /* set the base location in remap table for the uniform */
125 uniform->remap_location = location;
126
127 unsigned num_slots = glsl_get_component_slots(uniform->type);
128
129 uniform->storage = &data[data_pos];
130
131 /* Set remap table entries point to correct gl_uniform_storage. */
132 for (unsigned j = 0; j < entries; j++) {
133 unsigned element_loc = uniform->remap_location + j;
134 prog->UniformRemapTable[element_loc] = uniform;
135
136 data_pos += num_slots;
137 }
138 }
139 }
140
141 static void
142 mark_stage_as_active(struct gl_uniform_storage *uniform,
143 unsigned stage)
144 {
145 uniform->active_shader_mask |= 1 << stage;
146 }
147
148 /**
149 * Finds, returns, and updates the stage info for any uniform in UniformStorage
150 * defined by @var. In general this is done using the explicit location,
151 * except:
152 *
153 * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
154 * them. That means that more that one entry at the uniform storage can be
155 * found. In that case all of them are updated, and the first entry is
156 * returned, in order to update the location of the nir variable.
157 *
158 * * Special uniforms: like atomic counters. They lack a explicit location,
159 * so they are skipped. They will be handled and assigned a location later.
160 *
161 */
162 static struct gl_uniform_storage *
163 find_and_update_previous_uniform_storage(struct gl_shader_program *prog,
164 nir_variable *var,
165 unsigned stage)
166 {
167 if (nir_variable_is_in_block(var)) {
168 struct gl_uniform_storage *uniform = NULL;
169
170 ASSERTED unsigned num_blks = nir_variable_is_in_ubo(var) ?
171 prog->data->NumUniformBlocks :
172 prog->data->NumShaderStorageBlocks;
173
174 struct gl_uniform_block *blks = nir_variable_is_in_ubo(var) ?
175 prog->data->UniformBlocks : prog->data->ShaderStorageBlocks;
176
177 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
178 /* UniformStorage contains both variables from ubos and ssbos */
179 if ( prog->data->UniformStorage[i].is_shader_storage !=
180 nir_variable_is_in_ssbo(var))
181 continue;
182
183 int block_index = prog->data->UniformStorage[i].block_index;
184 if (block_index != -1) {
185 assert(block_index < num_blks);
186
187 if (var->data.binding == blks[block_index].Binding) {
188 if (!uniform)
189 uniform = &prog->data->UniformStorage[i];
190 mark_stage_as_active(&prog->data->UniformStorage[i],
191 stage);
192 }
193 }
194 }
195
196 return uniform;
197 }
198
199 /* Beyond blocks, there are still some corner cases of uniforms without
200 * location (ie: atomic counters) that would have a initial location equal
201 * to -1. We just return on that case. Those uniforms will be handled
202 * later.
203 */
204 if (var->data.location == -1)
205 return NULL;
206
207 /* TODO: following search can be problematic with shaders with a lot of
208 * uniforms. Would it be better to use some type of hash
209 */
210 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
211 if (prog->data->UniformStorage[i].remap_location == var->data.location) {
212 mark_stage_as_active(&prog->data->UniformStorage[i], stage);
213
214 return &prog->data->UniformStorage[i];
215 }
216 }
217
218 return NULL;
219 }
220
221 /* Used to build a tree representing the glsl_type so that we can have a place
222 * to store the next index for opaque types. Array types are expanded so that
223 * they have a single child which is used for all elements of the array.
224 * Struct types have a child for each member. The tree is walked while
225 * processing a uniform so that we can recognise when an opaque type is
226 * encountered a second time in order to reuse the same range of indices that
227 * was reserved the first time. That way the sampler indices can be arranged
228 * so that members of an array are placed sequentially even if the array is an
229 * array of structs containing other opaque members.
230 */
231 struct type_tree_entry {
232 /* For opaque types, this will be the next index to use. If we haven’t
233 * encountered this member yet, it will be UINT_MAX.
234 */
235 unsigned next_index;
236 unsigned array_size;
237 struct type_tree_entry *parent;
238 struct type_tree_entry *next_sibling;
239 struct type_tree_entry *children;
240 };
241
242 struct nir_link_uniforms_state {
243 /* per-whole program */
244 unsigned num_hidden_uniforms;
245 unsigned num_values;
246 unsigned max_uniform_location;
247 unsigned next_sampler_index;
248
249 /* per-shader stage */
250 unsigned next_image_index;
251 unsigned num_shader_samplers;
252 unsigned num_shader_images;
253 unsigned num_shader_uniform_components;
254 unsigned shader_samplers_used;
255 unsigned shader_shadow_samplers;
256 struct gl_program_parameter_list *params;
257
258 /* per-variable */
259 nir_variable *current_var;
260 int offset;
261 bool var_is_in_block;
262 int top_level_array_size;
263 int top_level_array_stride;
264
265 struct type_tree_entry *current_type;
266 };
267
268 static struct type_tree_entry *
269 build_type_tree_for_type(const struct glsl_type *type)
270 {
271 struct type_tree_entry *entry = malloc(sizeof *entry);
272
273 entry->array_size = 1;
274 entry->next_index = UINT_MAX;
275 entry->children = NULL;
276 entry->next_sibling = NULL;
277 entry->parent = NULL;
278
279 if (glsl_type_is_array(type)) {
280 entry->array_size = glsl_get_length(type);
281 entry->children = build_type_tree_for_type(glsl_get_array_element(type));
282 entry->children->parent = entry;
283 } else if (glsl_type_is_struct_or_ifc(type)) {
284 struct type_tree_entry *last = NULL;
285
286 for (unsigned i = 0; i < glsl_get_length(type); i++) {
287 const struct glsl_type *field_type = glsl_get_struct_field(type, i);
288 struct type_tree_entry *field_entry =
289 build_type_tree_for_type(field_type);
290
291 if (last == NULL)
292 entry->children = field_entry;
293 else
294 last->next_sibling = field_entry;
295
296 field_entry->parent = entry;
297
298 last = field_entry;
299 }
300 }
301
302 return entry;
303 }
304
305 static void
306 free_type_tree(struct type_tree_entry *entry)
307 {
308 struct type_tree_entry *p, *next;
309
310 for (p = entry->children; p; p = next) {
311 next = p->next_sibling;
312 free_type_tree(p);
313 }
314
315 free(entry);
316 }
317
318 static unsigned
319 get_next_index(struct nir_link_uniforms_state *state,
320 const struct gl_uniform_storage *uniform,
321 unsigned *next_index)
322 {
323 /* If we’ve already calculated an index for this member then we can just
324 * offset from there.
325 */
326 if (state->current_type->next_index == UINT_MAX) {
327 /* Otherwise we need to reserve enough indices for all of the arrays
328 * enclosing this member.
329 */
330
331 unsigned array_size = 1;
332
333 for (const struct type_tree_entry *p = state->current_type;
334 p;
335 p = p->parent) {
336 array_size *= p->array_size;
337 }
338
339 state->current_type->next_index = *next_index;
340 *next_index += array_size;
341 }
342
343 unsigned index = state->current_type->next_index;
344
345 state->current_type->next_index += MAX2(1, uniform->array_elements);
346
347 return index;
348 }
349
350 static void
351 add_parameter(struct gl_uniform_storage *uniform,
352 struct gl_context *ctx,
353 struct gl_shader_program *prog,
354 const struct glsl_type *type,
355 struct nir_link_uniforms_state *state)
356 {
357 if (!state->params || uniform->is_shader_storage || glsl_contains_opaque(type))
358 return;
359
360 unsigned num_params = glsl_get_aoa_size(type);
361 num_params = MAX2(num_params, 1);
362 num_params *= glsl_get_matrix_columns(glsl_without_array(type));
363
364 bool is_dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
365 if (is_dual_slot)
366 num_params *= 2;
367
368 struct gl_program_parameter_list *params = state->params;
369 int base_index = params->NumParameters;
370 _mesa_reserve_parameter_storage(params, num_params);
371
372 if (ctx->Const.PackedDriverUniformStorage) {
373 for (unsigned i = 0; i < num_params; i++) {
374 unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
375 unsigned comps = glsl_get_vector_elements(glsl_without_array(type)) * dmul;
376 if (is_dual_slot) {
377 if (i & 0x1)
378 comps -= 4;
379 else
380 comps = 4;
381 }
382
383 _mesa_add_parameter(params, PROGRAM_UNIFORM, NULL, comps,
384 glsl_get_gl_type(type), NULL, NULL, false);
385 }
386 } else {
387 for (unsigned i = 0; i < num_params; i++) {
388 _mesa_add_parameter(params, PROGRAM_UNIFORM, NULL, 4,
389 glsl_get_gl_type(type), NULL, NULL, true);
390 }
391 }
392
393 /* Each Parameter will hold the index to the backing uniform storage.
394 * This avoids relying on names to match parameters and uniform
395 * storages.
396 */
397 for (unsigned i = 0; i < num_params; i++) {
398 struct gl_program_parameter *param = &params->Parameters[base_index + i];
399 param->UniformStorageIndex = uniform - prog->data->UniformStorage;
400 param->MainUniformStorageIndex = state->current_var->data.location;
401 }
402 }
403
404 /**
405 * Creates the neccessary entries in UniformStorage for the uniform. Returns
406 * the number of locations used or -1 on failure.
407 */
408 static int
409 nir_link_uniform(struct gl_context *ctx,
410 struct gl_shader_program *prog,
411 struct gl_program *stage_program,
412 gl_shader_stage stage,
413 const struct glsl_type *type,
414 const struct glsl_type *parent_type,
415 unsigned index_in_parent,
416 int location,
417 struct nir_link_uniforms_state *state)
418 {
419 struct gl_uniform_storage *uniform = NULL;
420
421 if (parent_type == state->current_var->type &&
422 nir_variable_is_in_ssbo(state->current_var)) {
423 /* Type is the top level SSBO member */
424 if (glsl_type_is_array(type) &&
425 (glsl_type_is_array(glsl_get_array_element(type)) ||
426 glsl_type_is_struct_or_ifc(glsl_get_array_element(type)))) {
427 /* Type is a top-level array (array of aggregate types) */
428 state->top_level_array_size = glsl_get_length(type);
429 state->top_level_array_stride = glsl_get_explicit_stride(type);
430 } else {
431 state->top_level_array_size = 1;
432 state->top_level_array_stride = 0;
433 }
434 }
435
436 /* gl_uniform_storage can cope with one level of array, so if the type is a
437 * composite type or an array where each element occupies more than one
438 * location than we need to recursively process it.
439 */
440 if (glsl_type_is_struct_or_ifc(type) ||
441 (glsl_type_is_array(type) &&
442 (glsl_type_is_array(glsl_get_array_element(type)) ||
443 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
444 int location_count = 0;
445 struct type_tree_entry *old_type = state->current_type;
446 unsigned int struct_base_offset = state->offset;
447
448 state->current_type = old_type->children;
449
450 for (unsigned i = 0; i < glsl_get_length(type); i++) {
451 const struct glsl_type *field_type;
452
453 if (glsl_type_is_struct_or_ifc(type)) {
454 field_type = glsl_get_struct_field(type, i);
455 /* Use the offset inside the struct only for variables backed by
456 * a buffer object. For variables not backed by a buffer object,
457 * offset is -1.
458 */
459 if (state->var_is_in_block) {
460 state->offset =
461 struct_base_offset + glsl_get_struct_field_offset(type, i);
462 }
463 } else {
464 field_type = glsl_get_array_element(type);
465 }
466
467 int entries = nir_link_uniform(ctx, prog, stage_program, stage,
468 field_type, type, i, location,
469 state);
470 if (entries == -1)
471 return -1;
472
473 if (location != -1)
474 location += entries;
475 location_count += entries;
476
477 if (glsl_type_is_struct_or_ifc(type))
478 state->current_type = state->current_type->next_sibling;
479 }
480
481 state->current_type = old_type;
482
483 return location_count;
484 } else {
485 /* Create a new uniform storage entry */
486 prog->data->UniformStorage =
487 reralloc(prog->data,
488 prog->data->UniformStorage,
489 struct gl_uniform_storage,
490 prog->data->NumUniformStorage + 1);
491 if (!prog->data->UniformStorage) {
492 linker_error(prog, "Out of memory during linking.\n");
493 return -1;
494 }
495
496 uniform = &prog->data->UniformStorage[prog->data->NumUniformStorage];
497 prog->data->NumUniformStorage++;
498
499 /* Initialize its members */
500 memset(uniform, 0x00, sizeof(struct gl_uniform_storage));
501 /* ARB_gl_spirv: names are considered optional debug info, so the linker
502 * needs to work without them, and returning them is optional. For
503 * simplicity we ignore names.
504 */
505 uniform->name = NULL;
506
507 const struct glsl_type *type_no_array = glsl_without_array(type);
508 if (glsl_type_is_array(type)) {
509 uniform->type = type_no_array;
510 uniform->array_elements = glsl_get_length(type);
511 } else {
512 uniform->type = type;
513 uniform->array_elements = 0;
514 }
515 uniform->top_level_array_size = state->top_level_array_size;
516 uniform->top_level_array_stride = state->top_level_array_stride;
517
518 uniform->active_shader_mask |= 1 << stage;
519
520 if (location >= 0) {
521 /* Uniform has an explicit location */
522 uniform->remap_location = location;
523 } else {
524 uniform->remap_location = UNMAPPED_UNIFORM_LOC;
525 }
526
527 uniform->hidden = state->current_var->data.how_declared == nir_var_hidden;
528 if (uniform->hidden)
529 state->num_hidden_uniforms++;
530
531 uniform->is_shader_storage = nir_variable_is_in_ssbo(state->current_var);
532
533 /* Set fields whose default value depend on the variable being inside a
534 * block.
535 *
536 * From the OpenGL 4.6 spec, 7.3 Program objects:
537 *
538 * "For the property ARRAY_STRIDE, ... For active variables not declared
539 * as an array of basic types, zero is written to params. For active
540 * variables not backed by a buffer object, -1 is written to params,
541 * regardless of the variable type."
542 *
543 * "For the property MATRIX_STRIDE, ... For active variables not declared
544 * as a matrix or array of matrices, zero is written to params. For active
545 * variables not backed by a buffer object, -1 is written to params,
546 * regardless of the variable type."
547 *
548 * For the property IS_ROW_MAJOR, ... For active variables backed by a
549 * buffer object, declared as a single matrix or array of matrices, and
550 * stored in row-major order, one is written to params. For all other
551 * active variables, zero is written to params.
552 */
553 uniform->array_stride = -1;
554 uniform->matrix_stride = -1;
555 uniform->row_major = false;
556
557 if (state->var_is_in_block) {
558 uniform->array_stride = glsl_type_is_array(type) ?
559 glsl_get_explicit_stride(type) : 0;
560
561 if (glsl_type_is_matrix(uniform->type)) {
562 uniform->matrix_stride = glsl_get_explicit_stride(uniform->type);
563 uniform->row_major = glsl_matrix_type_is_row_major(uniform->type);
564 } else {
565 uniform->matrix_stride = 0;
566 }
567 }
568
569 uniform->offset = state->var_is_in_block ? state->offset : -1;
570
571 int buffer_block_index = -1;
572 /* If the uniform is inside a uniform block determine its block index by
573 * comparing the bindings, we can not use names.
574 */
575 if (state->var_is_in_block) {
576 struct gl_uniform_block *blocks = nir_variable_is_in_ssbo(state->current_var) ?
577 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
578
579 int num_blocks = nir_variable_is_in_ssbo(state->current_var) ?
580 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
581
582 for (unsigned i = 0; i < num_blocks; i++) {
583 if (state->current_var->data.binding == blocks[i].Binding) {
584 buffer_block_index = i;
585 break;
586 }
587 }
588 assert(buffer_block_index >= 0);
589
590 /* Compute the next offset. */
591 state->offset += glsl_get_explicit_size(type, true);
592 }
593
594 uniform->block_index = buffer_block_index;
595
596 /* @FIXME: the initialization of the following will be done as we
597 * implement support for their specific features, like SSBO, atomics,
598 * etc.
599 */
600 uniform->builtin = false;
601 uniform->atomic_buffer_index = -1;
602 uniform->is_bindless = false;
603
604 /* The following are not for features not supported by ARB_gl_spirv */
605 uniform->num_compatible_subroutines = 0;
606
607 unsigned entries = MAX2(1, uniform->array_elements);
608 unsigned values = glsl_get_component_slots(type);
609
610 if (glsl_type_is_sampler(type_no_array)) {
611 int sampler_index =
612 get_next_index(state, uniform, &state->next_sampler_index);
613
614 /* Samplers (bound or bindless) are counted as two components as
615 * specified by ARB_bindless_texture.
616 */
617 state->num_shader_samplers += values / 2;
618
619 uniform->opaque[stage].active = true;
620 uniform->opaque[stage].index = sampler_index;
621
622 const unsigned shadow = glsl_sampler_type_is_shadow(type_no_array);
623
624 for (unsigned i = sampler_index;
625 i < MIN2(state->next_sampler_index, MAX_SAMPLERS);
626 i++) {
627 stage_program->sh.SamplerTargets[i] =
628 glsl_get_sampler_target(type_no_array);
629 state->shader_samplers_used |= 1U << i;
630 state->shader_shadow_samplers |= shadow << i;
631 }
632
633 state->num_values += values;
634 } else if (glsl_type_is_image(type_no_array)) {
635 /* @FIXME: image_index should match that of the same image
636 * uniform in other shaders. This means we need to match image
637 * uniforms by location (GLSL does it by variable name, but we
638 * want to avoid that).
639 */
640 int image_index = state->next_image_index;
641 state->next_image_index += entries;
642
643 /* Images (bound or bindless) are counted as two components as
644 * specified by ARB_bindless_texture.
645 */
646 state->num_shader_images += values / 2;
647
648 uniform->opaque[stage].active = true;
649 uniform->opaque[stage].index = image_index;
650
651 /* Set image access qualifiers */
652 enum gl_access_qualifier image_access =
653 state->current_var->data.access;
654 const GLenum access =
655 (image_access & ACCESS_NON_WRITEABLE) ?
656 ((image_access & ACCESS_NON_READABLE) ? GL_NONE :
657 GL_READ_ONLY) :
658 ((image_access & ACCESS_NON_READABLE) ? GL_WRITE_ONLY :
659 GL_READ_WRITE);
660 for (unsigned i = image_index;
661 i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS);
662 i++) {
663 stage_program->sh.ImageAccess[i] = access;
664 }
665
666 if (!uniform->is_shader_storage) {
667 state->num_shader_uniform_components += values;
668 state->num_values += values;
669 }
670 } else {
671 if (!state->var_is_in_block) {
672 state->num_shader_uniform_components += values;
673 state->num_values += values;
674 }
675 }
676
677 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC &&
678 state->max_uniform_location < uniform->remap_location + entries)
679 state->max_uniform_location = uniform->remap_location + entries;
680
681 if (!state->var_is_in_block)
682 add_parameter(uniform, ctx, prog, type, state);
683
684 return MAX2(uniform->array_elements, 1);
685 }
686 }
687
688 bool
689 gl_nir_link_uniforms(struct gl_context *ctx,
690 struct gl_shader_program *prog,
691 bool fill_parameters)
692 {
693 /* First free up any previous UniformStorage items */
694 ralloc_free(prog->data->UniformStorage);
695 prog->data->UniformStorage = NULL;
696 prog->data->NumUniformStorage = 0;
697
698 /* Iterate through all linked shaders */
699 struct nir_link_uniforms_state state = {0,};
700
701 for (unsigned shader_type = 0; shader_type < MESA_SHADER_STAGES; shader_type++) {
702 struct gl_linked_shader *sh = prog->_LinkedShaders[shader_type];
703 if (!sh)
704 continue;
705
706 nir_shader *nir = sh->Program->nir;
707 assert(nir);
708
709 state.next_image_index = 0;
710 state.num_shader_samplers = 0;
711 state.num_shader_images = 0;
712 state.num_shader_uniform_components = 0;
713 state.shader_samplers_used = 0;
714 state.shader_shadow_samplers = 0;
715 state.params = fill_parameters ? sh->Program->Parameters : NULL;
716
717 nir_foreach_variable(var, &nir->uniforms) {
718 struct gl_uniform_storage *uniform = NULL;
719
720 state.current_var = var;
721
722 /* Check if the uniform has been processed already for
723 * other stage. If so, validate they are compatible and update
724 * the active stage mask.
725 */
726 uniform = find_and_update_previous_uniform_storage(prog, var, shader_type);
727 if (uniform) {
728 var->data.location = uniform - prog->data->UniformStorage;
729
730 if (!state.var_is_in_block)
731 add_parameter(uniform, ctx, prog, var->type, &state);
732
733 continue;
734 }
735
736 int location = var->data.location;
737 /* From now on the variable’s location will be its uniform index */
738 var->data.location = prog->data->NumUniformStorage;
739
740 state.offset = 0;
741 state.var_is_in_block = nir_variable_is_in_block(var);
742 state.top_level_array_size = 0;
743 state.top_level_array_stride = 0;
744
745 /*
746 * From ARB_program_interface spec, issue (16):
747 *
748 * "RESOLVED: We will follow the default rule for enumerating block
749 * members in the OpenGL API, which is:
750 *
751 * * If a variable is a member of an interface block without an
752 * instance name, it is enumerated using just the variable name.
753 *
754 * * If a variable is a member of an interface block with an
755 * instance name, it is enumerated as "BlockName.Member", where
756 * "BlockName" is the name of the interface block (not the
757 * instance name) and "Member" is the name of the variable.
758 *
759 * For example, in the following code:
760 *
761 * uniform Block1 {
762 * int member1;
763 * };
764 * uniform Block2 {
765 * int member2;
766 * } instance2;
767 * uniform Block3 {
768 * int member3;
769 * } instance3[2]; // uses two separate buffer bindings
770 *
771 * the three uniforms (if active) are enumerated as "member1",
772 * "Block2.member2", and "Block3.member3"."
773 *
774 * Note that in the last example, with an array of ubo, only one
775 * uniform is generated. For that reason, while unrolling the
776 * uniforms of a ubo, or the variables of a ssbo, we need to treat
777 * arrays of instance as a single block.
778 */
779 const struct glsl_type *type = var->type;
780 if (state.var_is_in_block && glsl_type_is_array(type)) {
781 type = glsl_without_array(type);
782 }
783
784 struct type_tree_entry *type_tree =
785 build_type_tree_for_type(type);
786 state.current_type = type_tree;
787
788 int res = nir_link_uniform(ctx, prog, sh->Program, shader_type, type,
789 NULL, 0,
790 location,
791 &state);
792
793 free_type_tree(type_tree);
794
795 if (res == -1)
796 return false;
797 }
798
799 sh->Program->SamplersUsed = state.shader_samplers_used;
800 sh->shadow_samplers = state.shader_shadow_samplers;
801 sh->Program->info.num_textures = state.num_shader_samplers;
802 sh->Program->info.num_images = state.num_shader_images;
803 sh->num_uniform_components = state.num_shader_uniform_components;
804 sh->num_combined_uniform_components = sh->num_uniform_components;
805 }
806
807 prog->data->NumHiddenUniforms = state.num_hidden_uniforms;
808 prog->NumUniformRemapTable = state.max_uniform_location;
809 prog->data->NumUniformDataSlots = state.num_values;
810
811 nir_setup_uniform_remap_tables(ctx, prog);
812 gl_nir_set_uniform_initializers(ctx, prog);
813
814 return true;
815 }