egl: Update headers from Khronos
[mesa.git] / src / compiler / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "main/core.h"
25 #include "ir.h"
26 #include "linker.h"
27 #include "ir_uniform.h"
28 #include "glsl_symbol_table.h"
29 #include "program.h"
30 #include "util/string_to_uint_map.h"
31 #include "ir_array_refcount.h"
32
33 /**
34 * \file link_uniforms.cpp
35 * Assign locations for GLSL uniforms.
36 *
37 * \author Ian Romanick <ian.d.romanick@intel.com>
38 */
39
40 /**
41 * Used by linker to indicate uniforms that have no location set.
42 */
43 #define UNMAPPED_UNIFORM_LOC ~0u
44
45 void
46 program_resource_visitor::process(const glsl_type *type, const char *name)
47 {
48 assert(type->without_array()->is_record()
49 || type->without_array()->is_interface());
50
51 unsigned record_array_count = 1;
52 char *name_copy = ralloc_strdup(NULL, name);
53 enum glsl_interface_packing packing = type->get_interface_packing();
54
55 recursion(type, &name_copy, strlen(name), false, NULL, packing, false,
56 record_array_count, NULL);
57 ralloc_free(name_copy);
58 }
59
60 void
61 program_resource_visitor::process(ir_variable *var)
62 {
63 unsigned record_array_count = 1;
64 const bool row_major =
65 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
66
67 const enum glsl_interface_packing packing = var->get_interface_type() ?
68 var->get_interface_type_packing() :
69 var->type->get_interface_packing();
70
71 const glsl_type *t =
72 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
73 const glsl_type *t_without_array = t->without_array();
74
75 /* false is always passed for the row_major parameter to the other
76 * processing functions because no information is available to do
77 * otherwise. See the warning in linker.h.
78 */
79 if (t_without_array->is_record() ||
80 (t->is_array() && t->fields.array->is_array())) {
81 char *name = ralloc_strdup(NULL, var->name);
82 recursion(var->type, &name, strlen(name), row_major, NULL, packing,
83 false, record_array_count, NULL);
84 ralloc_free(name);
85 } else if (t_without_array->is_interface()) {
86 char *name = ralloc_strdup(NULL, t_without_array->name);
87 const glsl_struct_field *ifc_member = var->data.from_named_ifc_block ?
88 &t_without_array->
89 fields.structure[t_without_array->field_index(var->name)] : NULL;
90
91 recursion(t, &name, strlen(name), row_major, NULL, packing,
92 false, record_array_count, ifc_member);
93 ralloc_free(name);
94 } else {
95 this->set_record_array_count(record_array_count);
96 this->visit_field(t, var->name, row_major, NULL, packing, false);
97 }
98 }
99
100 void
101 program_resource_visitor::recursion(const glsl_type *t, char **name,
102 size_t name_length, bool row_major,
103 const glsl_type *record_type,
104 const enum glsl_interface_packing packing,
105 bool last_field,
106 unsigned record_array_count,
107 const glsl_struct_field *named_ifc_member)
108 {
109 /* Records need to have each field processed individually.
110 *
111 * Arrays of records need to have each array element processed
112 * individually, then each field of the resulting array elements processed
113 * individually.
114 */
115 if (t->is_interface() && named_ifc_member) {
116 ralloc_asprintf_rewrite_tail(name, &name_length, ".%s",
117 named_ifc_member->name);
118 recursion(named_ifc_member->type, name, name_length, row_major, NULL,
119 packing, false, record_array_count, NULL);
120 } else if (t->is_record() || t->is_interface()) {
121 if (record_type == NULL && t->is_record())
122 record_type = t;
123
124 if (t->is_record())
125 this->enter_record(t, *name, row_major, packing);
126
127 for (unsigned i = 0; i < t->length; i++) {
128 const char *field = t->fields.structure[i].name;
129 size_t new_length = name_length;
130
131 if (t->fields.structure[i].type->is_record())
132 this->visit_field(&t->fields.structure[i]);
133
134 if (t->is_interface() && t->fields.structure[i].offset != -1)
135 this->set_buffer_offset(t->fields.structure[i].offset);
136
137 /* Append '.field' to the current variable name. */
138 if (name_length == 0) {
139 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
140 } else {
141 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
142 }
143
144 /* The layout of structures at the top level of the block is set
145 * during parsing. For matrices contained in multiple levels of
146 * structures in the block, the inner structures have no layout.
147 * These cases must potentially inherit the layout from the outer
148 * levels.
149 */
150 bool field_row_major = row_major;
151 const enum glsl_matrix_layout matrix_layout =
152 glsl_matrix_layout(t->fields.structure[i].matrix_layout);
153 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
154 field_row_major = true;
155 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
156 field_row_major = false;
157 }
158
159 recursion(t->fields.structure[i].type, name, new_length,
160 field_row_major,
161 record_type,
162 packing,
163 (i + 1) == t->length, record_array_count, NULL);
164
165 /* Only the first leaf-field of the record gets called with the
166 * record type pointer.
167 */
168 record_type = NULL;
169 }
170
171 if (t->is_record()) {
172 (*name)[name_length] = '\0';
173 this->leave_record(t, *name, row_major, packing);
174 }
175 } else if (t->without_array()->is_record() ||
176 t->without_array()->is_interface() ||
177 (t->is_array() && t->fields.array->is_array())) {
178 if (record_type == NULL && t->fields.array->is_record())
179 record_type = t->fields.array;
180
181 unsigned length = t->length;
182
183 /* Shader storage block unsized arrays: add subscript [0] to variable
184 * names.
185 */
186 if (t->is_unsized_array())
187 length = 1;
188
189 record_array_count *= length;
190
191 for (unsigned i = 0; i < length; i++) {
192 size_t new_length = name_length;
193
194 /* Append the subscript to the current variable name */
195 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
196
197 recursion(t->fields.array, name, new_length, row_major,
198 record_type,
199 packing,
200 (i + 1) == t->length, record_array_count,
201 named_ifc_member);
202
203 /* Only the first leaf-field of the record gets called with the
204 * record type pointer.
205 */
206 record_type = NULL;
207 }
208 } else {
209 this->set_record_array_count(record_array_count);
210 this->visit_field(t, *name, row_major, record_type, packing, last_field);
211 }
212 }
213
214 void
215 program_resource_visitor::visit_field(const glsl_struct_field *)
216 {
217 }
218
219 void
220 program_resource_visitor::enter_record(const glsl_type *, const char *, bool,
221 const enum glsl_interface_packing)
222 {
223 }
224
225 void
226 program_resource_visitor::leave_record(const glsl_type *, const char *, bool,
227 const enum glsl_interface_packing)
228 {
229 }
230
231 void
232 program_resource_visitor::set_buffer_offset(unsigned)
233 {
234 }
235
236 void
237 program_resource_visitor::set_record_array_count(unsigned)
238 {
239 }
240
241 namespace {
242
243 /**
244 * Class to help calculate the storage requirements for a set of uniforms
245 *
246 * As uniforms are added to the active set the number of active uniforms and
247 * the storage requirements for those uniforms are accumulated. The active
248 * uniforms are added to the hash table supplied to the constructor.
249 *
250 * If the same uniform is added multiple times (i.e., once for each shader
251 * target), it will only be accounted once.
252 */
253 class count_uniform_size : public program_resource_visitor {
254 public:
255 count_uniform_size(struct string_to_uint_map *map,
256 struct string_to_uint_map *hidden_map)
257 : num_active_uniforms(0), num_hidden_uniforms(0), num_values(0),
258 num_shader_samplers(0), num_shader_images(0),
259 num_shader_uniform_components(0), num_shader_subroutines(0),
260 is_buffer_block(false), is_shader_storage(false), map(map),
261 hidden_map(hidden_map), current_var(NULL)
262 {
263 /* empty */
264 }
265
266 void start_shader()
267 {
268 this->num_shader_samplers = 0;
269 this->num_shader_images = 0;
270 this->num_shader_uniform_components = 0;
271 this->num_shader_subroutines = 0;
272 }
273
274 void process(ir_variable *var)
275 {
276 this->current_var = var;
277 this->is_buffer_block = var->is_in_buffer_block();
278 this->is_shader_storage = var->is_in_shader_storage_block();
279 if (var->is_interface_instance())
280 program_resource_visitor::process(var->get_interface_type(),
281 var->get_interface_type()->name);
282 else
283 program_resource_visitor::process(var);
284 }
285
286 /**
287 * Total number of active uniforms counted
288 */
289 unsigned num_active_uniforms;
290
291 unsigned num_hidden_uniforms;
292
293 /**
294 * Number of data values required to back the storage for the active uniforms
295 */
296 unsigned num_values;
297
298 /**
299 * Number of samplers used
300 */
301 unsigned num_shader_samplers;
302
303 /**
304 * Number of images used
305 */
306 unsigned num_shader_images;
307
308 /**
309 * Number of uniforms used in the current shader
310 */
311 unsigned num_shader_uniform_components;
312
313 /**
314 * Number of subroutine uniforms used
315 */
316 unsigned num_shader_subroutines;
317
318 bool is_buffer_block;
319 bool is_shader_storage;
320
321 struct string_to_uint_map *map;
322
323 private:
324 virtual void visit_field(const glsl_type *type, const char *name,
325 bool /* row_major */,
326 const glsl_type * /* record_type */,
327 const enum glsl_interface_packing,
328 bool /* last_field */)
329 {
330 assert(!type->without_array()->is_record());
331 assert(!type->without_array()->is_interface());
332 assert(!(type->is_array() && type->fields.array->is_array()));
333
334 /* Count the number of samplers regardless of whether the uniform is
335 * already in the hash table. The hash table prevents adding the same
336 * uniform for multiple shader targets, but in this case we want to
337 * count it for each shader target.
338 */
339 const unsigned values = type->component_slots();
340 if (type->contains_subroutine()) {
341 this->num_shader_subroutines += values;
342 } else if (type->contains_sampler() && !current_var->data.bindless) {
343 /* Samplers (bound or bindless) are counted as two components as
344 * specified by ARB_bindless_texture. */
345 this->num_shader_samplers += values / 2;
346 } else if (type->contains_image() && !current_var->data.bindless) {
347 /* Images (bound or bindless) are counted as two components as
348 * specified by ARB_bindless_texture. */
349 this->num_shader_images += values / 2;
350
351 /* As drivers are likely to represent image uniforms as
352 * scalar indices, count them against the limit of uniform
353 * components in the default block. The spec allows image
354 * uniforms to use up no more than one scalar slot.
355 */
356 if (!is_shader_storage)
357 this->num_shader_uniform_components += values;
358 } else {
359 /* Accumulate the total number of uniform slots used by this shader.
360 * Note that samplers do not count against this limit because they
361 * don't use any storage on current hardware.
362 */
363 if (!is_buffer_block)
364 this->num_shader_uniform_components += values;
365 }
366
367 /* If the uniform is already in the map, there's nothing more to do.
368 */
369 unsigned id;
370 if (this->map->get(id, name))
371 return;
372
373 if (this->current_var->data.how_declared == ir_var_hidden) {
374 this->hidden_map->put(this->num_hidden_uniforms, name);
375 this->num_hidden_uniforms++;
376 } else {
377 this->map->put(this->num_active_uniforms-this->num_hidden_uniforms,
378 name);
379 }
380
381 /* Each leaf uniform occupies one entry in the list of active
382 * uniforms.
383 */
384 this->num_active_uniforms++;
385
386 if(!is_gl_identifier(name) && !is_shader_storage && !is_buffer_block)
387 this->num_values += values;
388 }
389
390 struct string_to_uint_map *hidden_map;
391
392 /**
393 * Current variable being processed.
394 */
395 ir_variable *current_var;
396 };
397
398 } /* anonymous namespace */
399
400 /**
401 * Class to help parcel out pieces of backing storage to uniforms
402 *
403 * Each uniform processed has some range of the \c gl_constant_value
404 * structures associated with it. The association is done by finding
405 * the uniform in the \c string_to_uint_map and using the value from
406 * the map to connect that slot in the \c gl_uniform_storage table
407 * with the next available slot in the \c gl_constant_value array.
408 *
409 * \warning
410 * This class assumes that every uniform that will be processed is
411 * already in the \c string_to_uint_map. In addition, it assumes that
412 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
413 * enough."
414 */
415 class parcel_out_uniform_storage : public program_resource_visitor {
416 public:
417 parcel_out_uniform_storage(struct gl_shader_program *prog,
418 struct string_to_uint_map *map,
419 struct gl_uniform_storage *uniforms,
420 union gl_constant_value *values)
421 : prog(prog), map(map), uniforms(uniforms), values(values),
422 bindless_targets(NULL), bindless_access(NULL)
423 {
424 }
425
426 virtual ~parcel_out_uniform_storage()
427 {
428 free(this->bindless_targets);
429 free(this->bindless_access);
430 }
431
432 void start_shader(gl_shader_stage shader_type)
433 {
434 assert(shader_type < MESA_SHADER_STAGES);
435 this->shader_type = shader_type;
436
437 this->shader_samplers_used = 0;
438 this->shader_shadow_samplers = 0;
439 this->next_sampler = 0;
440 this->next_image = 0;
441 this->next_subroutine = 0;
442 this->record_array_count = 1;
443 memset(this->targets, 0, sizeof(this->targets));
444
445 this->num_bindless_samplers = 0;
446 this->next_bindless_sampler = 0;
447 free(this->bindless_targets);
448 this->bindless_targets = NULL;
449
450 this->num_bindless_images = 0;
451 this->next_bindless_image = 0;
452 free(this->bindless_access);
453 this->bindless_access = NULL;
454 }
455
456 void set_and_process(ir_variable *var)
457 {
458 current_var = var;
459 field_counter = 0;
460 this->record_next_sampler = new string_to_uint_map;
461 this->record_next_bindless_sampler = new string_to_uint_map;
462 this->record_next_image = new string_to_uint_map;
463 this->record_next_bindless_image = new string_to_uint_map;
464
465 buffer_block_index = -1;
466 if (var->is_in_buffer_block()) {
467 struct gl_uniform_block *blks = var->is_in_shader_storage_block() ?
468 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
469 unsigned num_blks = var->is_in_shader_storage_block() ?
470 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
471
472 if (var->is_interface_instance() && var->type->is_array()) {
473 unsigned l = strlen(var->get_interface_type()->name);
474
475 for (unsigned i = 0; i < num_blks; i++) {
476 if (strncmp(var->get_interface_type()->name, blks[i].Name, l)
477 == 0 && blks[i].Name[l] == '[') {
478 buffer_block_index = i;
479 break;
480 }
481 }
482 } else {
483 for (unsigned i = 0; i < num_blks; i++) {
484 if (strcmp(var->get_interface_type()->name, blks[i].Name) == 0) {
485 buffer_block_index = i;
486 break;
487 }
488 }
489 }
490 assert(buffer_block_index != -1);
491
492 /* Uniform blocks that were specified with an instance name must be
493 * handled a little bit differently. The name of the variable is the
494 * name used to reference the uniform block instead of being the name
495 * of a variable within the block. Therefore, searching for the name
496 * within the block will fail.
497 */
498 if (var->is_interface_instance()) {
499 ubo_byte_offset = 0;
500 process(var->get_interface_type(),
501 var->get_interface_type()->name);
502 } else {
503 const struct gl_uniform_block *const block =
504 &blks[buffer_block_index];
505
506 assert(var->data.location != -1);
507
508 const struct gl_uniform_buffer_variable *const ubo_var =
509 &block->Uniforms[var->data.location];
510
511 ubo_byte_offset = ubo_var->Offset;
512 process(var);
513 }
514 } else {
515 /* Store any explicit location and reset data location so we can
516 * reuse this variable for storing the uniform slot number.
517 */
518 this->explicit_location = current_var->data.location;
519 current_var->data.location = -1;
520
521 process(var);
522 }
523 delete this->record_next_sampler;
524 delete this->record_next_bindless_sampler;
525 delete this->record_next_image;
526 delete this->record_next_bindless_image;
527 }
528
529 int buffer_block_index;
530 int ubo_byte_offset;
531 gl_shader_stage shader_type;
532
533 private:
534 bool set_opaque_indices(const glsl_type *base_type,
535 struct gl_uniform_storage *uniform,
536 const char *name, unsigned &next_index,
537 struct string_to_uint_map *record_next_index)
538 {
539 assert(base_type->is_sampler() || base_type->is_image());
540
541 if (this->record_array_count > 1) {
542 unsigned inner_array_size = MAX2(1, uniform->array_elements);
543 char *name_copy = ralloc_strdup(NULL, name);
544
545 /* Remove all array subscripts from the sampler/image name */
546 char *str_start;
547 const char *str_end;
548 while((str_start = strchr(name_copy, '[')) &&
549 (str_end = strchr(name_copy, ']'))) {
550 memmove(str_start, str_end + 1, 1 + strlen(str_end + 1));
551 }
552
553 unsigned index = 0;
554 if (record_next_index->get(index, name_copy)) {
555 /* In this case, we've already seen this uniform so we just use the
556 * next sampler/image index recorded the last time we visited.
557 */
558 uniform->opaque[shader_type].index = index;
559 index = inner_array_size + uniform->opaque[shader_type].index;
560 record_next_index->put(index, name_copy);
561
562 ralloc_free(name_copy);
563 /* Return as everything else has already been initialised in a
564 * previous pass.
565 */
566 return false;
567 } else {
568 /* We've never seen this uniform before so we need to allocate
569 * enough indices to store it.
570 *
571 * Nested struct arrays behave like arrays of arrays so we need to
572 * increase the index by the total number of elements of the
573 * sampler/image in case there is more than one sampler/image
574 * inside the structs. This allows the offset to be easily
575 * calculated for indirect indexing.
576 */
577 uniform->opaque[shader_type].index = next_index;
578 next_index += inner_array_size * this->record_array_count;
579
580 /* Store the next index for future passes over the struct array
581 */
582 index = uniform->opaque[shader_type].index + inner_array_size;
583 record_next_index->put(index, name_copy);
584 ralloc_free(name_copy);
585 }
586 } else {
587 /* Increment the sampler/image by 1 for non-arrays and by the number
588 * of array elements for arrays.
589 */
590 uniform->opaque[shader_type].index = next_index;
591 next_index += MAX2(1, uniform->array_elements);
592 }
593 return true;
594 }
595
596 void handle_samplers(const glsl_type *base_type,
597 struct gl_uniform_storage *uniform, const char *name)
598 {
599 if (base_type->is_sampler()) {
600 uniform->opaque[shader_type].active = true;
601
602 const gl_texture_index target = base_type->sampler_index();
603 const unsigned shadow = base_type->sampler_shadow;
604
605 if (current_var->data.bindless) {
606 if (!set_opaque_indices(base_type, uniform, name,
607 this->next_bindless_sampler,
608 this->record_next_bindless_sampler))
609 return;
610
611 this->num_bindless_samplers = this->next_bindless_sampler;
612
613 this->bindless_targets = (gl_texture_index *)
614 realloc(this->bindless_targets,
615 this->num_bindless_samplers * sizeof(gl_texture_index));
616
617 for (unsigned i = uniform->opaque[shader_type].index;
618 i < this->num_bindless_samplers;
619 i++) {
620 this->bindless_targets[i] = target;
621 }
622 } else {
623 if (!set_opaque_indices(base_type, uniform, name,
624 this->next_sampler,
625 this->record_next_sampler))
626 return;
627
628 for (unsigned i = uniform->opaque[shader_type].index;
629 i < MIN2(this->next_sampler, MAX_SAMPLERS);
630 i++) {
631 this->targets[i] = target;
632 this->shader_samplers_used |= 1U << i;
633 this->shader_shadow_samplers |= shadow << i;
634 }
635 }
636 }
637 }
638
639 void handle_images(const glsl_type *base_type,
640 struct gl_uniform_storage *uniform, const char *name)
641 {
642 if (base_type->is_image()) {
643 uniform->opaque[shader_type].active = true;
644
645 /* Set image access qualifiers */
646 const GLenum access =
647 (current_var->data.memory_read_only ? GL_READ_ONLY :
648 current_var->data.memory_write_only ? GL_WRITE_ONLY :
649 GL_READ_WRITE);
650
651 if (current_var->data.bindless) {
652 if (!set_opaque_indices(base_type, uniform, name,
653 this->next_bindless_image,
654 this->record_next_bindless_image))
655 return;
656
657 this->num_bindless_images = this->next_bindless_image;
658
659 this->bindless_access = (GLenum *)
660 realloc(this->bindless_access,
661 this->num_bindless_images * sizeof(GLenum));
662
663 for (unsigned i = uniform->opaque[shader_type].index;
664 i < this->num_bindless_images;
665 i++) {
666 this->bindless_access[i] = access;
667 }
668 } else {
669 if (!set_opaque_indices(base_type, uniform, name,
670 this->next_image,
671 this->record_next_image))
672 return;
673
674 for (unsigned i = uniform->opaque[shader_type].index;
675 i < MIN2(this->next_image, MAX_IMAGE_UNIFORMS);
676 i++) {
677 prog->_LinkedShaders[shader_type]->Program->sh.ImageAccess[i] = access;
678 }
679 }
680 }
681 }
682
683 void handle_subroutines(const glsl_type *base_type,
684 struct gl_uniform_storage *uniform)
685 {
686 if (base_type->is_subroutine()) {
687 uniform->opaque[shader_type].index = this->next_subroutine;
688 uniform->opaque[shader_type].active = true;
689
690 prog->_LinkedShaders[shader_type]->Program->sh.NumSubroutineUniforms++;
691
692 /* Increment the subroutine index by 1 for non-arrays and by the
693 * number of array elements for arrays.
694 */
695 this->next_subroutine += MAX2(1, uniform->array_elements);
696
697 }
698 }
699
700 virtual void set_buffer_offset(unsigned offset)
701 {
702 this->ubo_byte_offset = offset;
703 }
704
705 virtual void set_record_array_count(unsigned record_array_count)
706 {
707 this->record_array_count = record_array_count;
708 }
709
710 virtual void enter_record(const glsl_type *type, const char *,
711 bool row_major,
712 const enum glsl_interface_packing packing)
713 {
714 assert(type->is_record());
715 if (this->buffer_block_index == -1)
716 return;
717 if (packing == GLSL_INTERFACE_PACKING_STD430)
718 this->ubo_byte_offset = glsl_align(
719 this->ubo_byte_offset, type->std430_base_alignment(row_major));
720 else
721 this->ubo_byte_offset = glsl_align(
722 this->ubo_byte_offset, type->std140_base_alignment(row_major));
723 }
724
725 virtual void leave_record(const glsl_type *type, const char *,
726 bool row_major,
727 const enum glsl_interface_packing packing)
728 {
729 assert(type->is_record());
730 if (this->buffer_block_index == -1)
731 return;
732 if (packing == GLSL_INTERFACE_PACKING_STD430)
733 this->ubo_byte_offset = glsl_align(
734 this->ubo_byte_offset, type->std430_base_alignment(row_major));
735 else
736 this->ubo_byte_offset = glsl_align(
737 this->ubo_byte_offset, type->std140_base_alignment(row_major));
738 }
739
740 virtual void visit_field(const glsl_type *type, const char *name,
741 bool row_major, const glsl_type * /* record_type */,
742 const enum glsl_interface_packing packing,
743 bool /* last_field */)
744 {
745 assert(!type->without_array()->is_record());
746 assert(!type->without_array()->is_interface());
747 assert(!(type->is_array() && type->fields.array->is_array()));
748
749 unsigned id;
750 bool found = this->map->get(id, name);
751 assert(found);
752
753 if (!found)
754 return;
755
756 const glsl_type *base_type;
757 if (type->is_array()) {
758 this->uniforms[id].array_elements = type->length;
759 base_type = type->fields.array;
760 } else {
761 this->uniforms[id].array_elements = 0;
762 base_type = type;
763 }
764
765 /* Initialise opaque data */
766 this->uniforms[id].opaque[shader_type].index = ~0;
767 this->uniforms[id].opaque[shader_type].active = false;
768
769 this->uniforms[id].active_shader_mask |= 1 << shader_type;
770
771 /* This assigns uniform indices to sampler and image uniforms. */
772 handle_samplers(base_type, &this->uniforms[id], name);
773 handle_images(base_type, &this->uniforms[id], name);
774 handle_subroutines(base_type, &this->uniforms[id]);
775
776 /* For array of arrays or struct arrays the base location may have
777 * already been set so don't set it again.
778 */
779 if (buffer_block_index == -1 && current_var->data.location == -1) {
780 current_var->data.location = id;
781 }
782
783 /* If there is already storage associated with this uniform or if the
784 * uniform is set as builtin, it means that it was set while processing
785 * an earlier shader stage. For example, we may be processing the
786 * uniform in the fragment shader, but the uniform was already processed
787 * in the vertex shader.
788 */
789 if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) {
790 return;
791 }
792
793 /* Assign explicit locations. */
794 if (current_var->data.explicit_location) {
795 /* Set sequential locations for struct fields. */
796 if (current_var->type->without_array()->is_record() ||
797 current_var->type->is_array_of_arrays()) {
798 const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
799 this->uniforms[id].remap_location =
800 this->explicit_location + field_counter;
801 field_counter += entries;
802 } else {
803 this->uniforms[id].remap_location = this->explicit_location;
804 }
805 } else {
806 /* Initialize to to indicate that no location is set */
807 this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
808 }
809
810 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
811 this->uniforms[id].type = base_type;
812 this->uniforms[id].num_driver_storage = 0;
813 this->uniforms[id].driver_storage = NULL;
814 this->uniforms[id].atomic_buffer_index = -1;
815 this->uniforms[id].hidden =
816 current_var->data.how_declared == ir_var_hidden;
817 this->uniforms[id].builtin = is_gl_identifier(name);
818
819 this->uniforms[id].is_shader_storage =
820 current_var->is_in_shader_storage_block();
821 this->uniforms[id].is_bindless = current_var->data.bindless;
822
823 /* Do not assign storage if the uniform is a builtin or buffer object */
824 if (!this->uniforms[id].builtin &&
825 !this->uniforms[id].is_shader_storage &&
826 this->buffer_block_index == -1)
827 this->uniforms[id].storage = this->values;
828
829 if (this->buffer_block_index != -1) {
830 this->uniforms[id].block_index = this->buffer_block_index;
831
832 unsigned alignment = type->std140_base_alignment(row_major);
833 if (packing == GLSL_INTERFACE_PACKING_STD430)
834 alignment = type->std430_base_alignment(row_major);
835 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
836 this->uniforms[id].offset = this->ubo_byte_offset;
837 if (packing == GLSL_INTERFACE_PACKING_STD430)
838 this->ubo_byte_offset += type->std430_size(row_major);
839 else
840 this->ubo_byte_offset += type->std140_size(row_major);
841
842 if (type->is_array()) {
843 if (packing == GLSL_INTERFACE_PACKING_STD430)
844 this->uniforms[id].array_stride =
845 type->without_array()->std430_array_stride(row_major);
846 else
847 this->uniforms[id].array_stride =
848 glsl_align(type->without_array()->std140_size(row_major),
849 16);
850 } else {
851 this->uniforms[id].array_stride = 0;
852 }
853
854 if (type->without_array()->is_matrix()) {
855 const glsl_type *matrix = type->without_array();
856 const unsigned N = matrix->is_double() ? 8 : 4;
857 const unsigned items =
858 row_major ? matrix->matrix_columns : matrix->vector_elements;
859
860 assert(items <= 4);
861 if (packing == GLSL_INTERFACE_PACKING_STD430)
862 this->uniforms[id].matrix_stride = items < 3 ? items * N :
863 glsl_align(items * N, 16);
864 else
865 this->uniforms[id].matrix_stride = glsl_align(items * N, 16);
866 this->uniforms[id].row_major = row_major;
867 } else {
868 this->uniforms[id].matrix_stride = 0;
869 this->uniforms[id].row_major = false;
870 }
871 } else {
872 this->uniforms[id].block_index = -1;
873 this->uniforms[id].offset = -1;
874 this->uniforms[id].array_stride = -1;
875 this->uniforms[id].matrix_stride = -1;
876 this->uniforms[id].row_major = false;
877 }
878
879 if (!this->uniforms[id].builtin &&
880 !this->uniforms[id].is_shader_storage &&
881 this->buffer_block_index == -1)
882 this->values += type->component_slots();
883 }
884
885 /**
886 * Current program being processed.
887 */
888 struct gl_shader_program *prog;
889
890 struct string_to_uint_map *map;
891
892 struct gl_uniform_storage *uniforms;
893 unsigned next_sampler;
894 unsigned next_bindless_sampler;
895 unsigned next_image;
896 unsigned next_bindless_image;
897 unsigned next_subroutine;
898
899 /**
900 * Field counter is used to take care that uniform structures
901 * with explicit locations get sequential locations.
902 */
903 unsigned field_counter;
904
905 /**
906 * Current variable being processed.
907 */
908 ir_variable *current_var;
909
910 /* Used to store the explicit location from current_var so that we can
911 * reuse the location field for storing the uniform slot id.
912 */
913 int explicit_location;
914
915 /* Stores total struct array elements including nested structs */
916 unsigned record_array_count;
917
918 /* Map for temporarily storing next sampler index when handling samplers in
919 * struct arrays.
920 */
921 struct string_to_uint_map *record_next_sampler;
922
923 /* Map for temporarily storing next imager index when handling images in
924 * struct arrays.
925 */
926 struct string_to_uint_map *record_next_image;
927
928 /* Map for temporarily storing next bindless sampler index when handling
929 * bindless samplers in struct arrays.
930 */
931 struct string_to_uint_map *record_next_bindless_sampler;
932
933 /* Map for temporarily storing next bindless image index when handling
934 * bindless images in struct arrays.
935 */
936 struct string_to_uint_map *record_next_bindless_image;
937
938 public:
939 union gl_constant_value *values;
940
941 gl_texture_index targets[MAX_SAMPLERS];
942
943 /**
944 * Mask of samplers used by the current shader stage.
945 */
946 unsigned shader_samplers_used;
947
948 /**
949 * Mask of samplers used by the current shader stage for shadows.
950 */
951 unsigned shader_shadow_samplers;
952
953 /**
954 * Number of bindless samplers used by the current shader stage.
955 */
956 unsigned num_bindless_samplers;
957
958 /**
959 * Texture targets for bindless samplers used by the current stage.
960 */
961 gl_texture_index *bindless_targets;
962
963 /**
964 * Number of bindless images used by the current shader stage.
965 */
966 unsigned num_bindless_images;
967
968 /**
969 * Access types for bindless images used by the current stage.
970 */
971 GLenum *bindless_access;
972
973 };
974
975 static bool
976 variable_is_referenced(ir_array_refcount_visitor &v, ir_variable *var)
977 {
978 ir_array_refcount_entry *const entry = v.get_variable_entry(var);
979
980 return entry->is_referenced;
981
982 }
983
984 /**
985 * Walks the IR and update the references to uniform blocks in the
986 * ir_variables to point at linked shader's list (previously, they
987 * would point at the uniform block list in one of the pre-linked
988 * shaders).
989 */
990 static void
991 link_update_uniform_buffer_variables(struct gl_linked_shader *shader,
992 unsigned stage)
993 {
994 ir_array_refcount_visitor v;
995
996 v.run(shader->ir);
997
998 foreach_in_list(ir_instruction, node, shader->ir) {
999 ir_variable *const var = node->as_variable();
1000
1001 if (var == NULL || !var->is_in_buffer_block())
1002 continue;
1003
1004 assert(var->data.mode == ir_var_uniform ||
1005 var->data.mode == ir_var_shader_storage);
1006
1007 unsigned num_blocks = var->data.mode == ir_var_uniform ?
1008 shader->Program->info.num_ubos : shader->Program->info.num_ssbos;
1009 struct gl_uniform_block **blks = var->data.mode == ir_var_uniform ?
1010 shader->Program->sh.UniformBlocks :
1011 shader->Program->sh.ShaderStorageBlocks;
1012
1013 if (var->is_interface_instance()) {
1014 const ir_array_refcount_entry *const entry = v.get_variable_entry(var);
1015
1016 if (entry->is_referenced) {
1017 /* Since this is an interface instance, the instance type will be
1018 * same as the array-stripped variable type. If the variable type
1019 * is an array, then the block names will be suffixed with [0]
1020 * through [n-1]. Unlike for non-interface instances, there will
1021 * not be structure types here, so the only name sentinel that we
1022 * have to worry about is [.
1023 */
1024 assert(var->type->without_array() == var->get_interface_type());
1025 const char sentinel = var->type->is_array() ? '[' : '\0';
1026
1027 const ptrdiff_t len = strlen(var->get_interface_type()->name);
1028 for (unsigned i = 0; i < num_blocks; i++) {
1029 const char *const begin = blks[i]->Name;
1030 const char *const end = strchr(begin, sentinel);
1031
1032 if (end == NULL)
1033 continue;
1034
1035 if (len != (end - begin))
1036 continue;
1037
1038 /* Even when a match is found, do not "break" here. This could
1039 * be an array of instances, and all elements of the array need
1040 * to be marked as referenced.
1041 */
1042 if (strncmp(begin, var->get_interface_type()->name, len) == 0 &&
1043 (!var->type->is_array() ||
1044 entry->is_linearized_index_referenced(blks[i]->linearized_array_index))) {
1045 blks[i]->stageref |= 1U << stage;
1046 }
1047 }
1048 }
1049
1050 var->data.location = 0;
1051 continue;
1052 }
1053
1054 bool found = false;
1055 char sentinel = '\0';
1056
1057 if (var->type->is_record()) {
1058 sentinel = '.';
1059 } else if (var->type->is_array() && (var->type->fields.array->is_array()
1060 || var->type->without_array()->is_record())) {
1061 sentinel = '[';
1062 }
1063
1064 const unsigned l = strlen(var->name);
1065 for (unsigned i = 0; i < num_blocks; i++) {
1066 for (unsigned j = 0; j < blks[i]->NumUniforms; j++) {
1067 if (sentinel) {
1068 const char *begin = blks[i]->Uniforms[j].Name;
1069 const char *end = strchr(begin, sentinel);
1070
1071 if (end == NULL)
1072 continue;
1073
1074 if ((ptrdiff_t) l != (end - begin))
1075 continue;
1076
1077 found = strncmp(var->name, begin, l) == 0;
1078 } else {
1079 found = strcmp(var->name, blks[i]->Uniforms[j].Name) == 0;
1080 }
1081
1082 if (found) {
1083 var->data.location = j;
1084
1085 if (variable_is_referenced(v, var))
1086 blks[i]->stageref |= 1U << stage;
1087
1088 break;
1089 }
1090 }
1091
1092 if (found)
1093 break;
1094 }
1095 assert(found);
1096 }
1097 }
1098
1099 /**
1100 * Combine the hidden uniform hash map with the uniform hash map so that the
1101 * hidden uniforms will be given indicies at the end of the uniform storage
1102 * array.
1103 */
1104 static void
1105 assign_hidden_uniform_slot_id(const char *name, unsigned hidden_id,
1106 void *closure)
1107 {
1108 count_uniform_size *uniform_size = (count_uniform_size *) closure;
1109 unsigned hidden_uniform_start = uniform_size->num_active_uniforms -
1110 uniform_size->num_hidden_uniforms;
1111
1112 uniform_size->map->put(hidden_uniform_start + hidden_id, name);
1113 }
1114
1115 /**
1116 * Search through the list of empty blocks to find one that fits the current
1117 * uniform.
1118 */
1119 static int
1120 find_empty_block(struct gl_shader_program *prog,
1121 struct gl_uniform_storage *uniform)
1122 {
1123 const unsigned entries = MAX2(1, uniform->array_elements);
1124
1125 foreach_list_typed(struct empty_uniform_block, block, link,
1126 &prog->EmptyUniformLocations) {
1127 /* Found a block with enough slots to fit the uniform */
1128 if (block->slots == entries) {
1129 unsigned start = block->start;
1130 exec_node_remove(&block->link);
1131 ralloc_free(block);
1132
1133 return start;
1134 /* Found a block with more slots than needed. It can still be used. */
1135 } else if (block->slots > entries) {
1136 unsigned start = block->start;
1137 block->start += entries;
1138 block->slots -= entries;
1139
1140 return start;
1141 }
1142 }
1143
1144 return -1;
1145 }
1146
1147 static void
1148 link_setup_uniform_remap_tables(struct gl_context *ctx,
1149 struct gl_shader_program *prog)
1150 {
1151 unsigned total_entries = prog->NumExplicitUniformLocations;
1152 unsigned empty_locs = prog->NumUniformRemapTable - total_entries;
1153
1154 /* Reserve all the explicit locations of the active uniforms. */
1155 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1156 if (prog->data->UniformStorage[i].type->is_subroutine() ||
1157 prog->data->UniformStorage[i].is_shader_storage)
1158 continue;
1159
1160 if (prog->data->UniformStorage[i].remap_location !=
1161 UNMAPPED_UNIFORM_LOC) {
1162 /* How many new entries for this uniform? */
1163 const unsigned entries =
1164 MAX2(1, prog->data->UniformStorage[i].array_elements);
1165
1166 /* Set remap table entries point to correct gl_uniform_storage. */
1167 for (unsigned j = 0; j < entries; j++) {
1168 unsigned element_loc =
1169 prog->data->UniformStorage[i].remap_location + j;
1170 assert(prog->UniformRemapTable[element_loc] ==
1171 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1172 prog->UniformRemapTable[element_loc] =
1173 &prog->data->UniformStorage[i];
1174 }
1175 }
1176 }
1177
1178 /* Reserve locations for rest of the uniforms. */
1179 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1180
1181 if (prog->data->UniformStorage[i].type->is_subroutine() ||
1182 prog->data->UniformStorage[i].is_shader_storage)
1183 continue;
1184
1185 /* Built-in uniforms should not get any location. */
1186 if (prog->data->UniformStorage[i].builtin)
1187 continue;
1188
1189 /* Explicit ones have been set already. */
1190 if (prog->data->UniformStorage[i].remap_location != UNMAPPED_UNIFORM_LOC)
1191 continue;
1192
1193 /* how many new entries for this uniform? */
1194 const unsigned entries =
1195 MAX2(1, prog->data->UniformStorage[i].array_elements);
1196
1197 /* Find UniformRemapTable for empty blocks where we can fit this uniform. */
1198 int chosen_location = -1;
1199
1200 if (empty_locs)
1201 chosen_location = find_empty_block(prog, &prog->data->UniformStorage[i]);
1202
1203 /* Add new entries to the total amount of entries. */
1204 total_entries += entries;
1205
1206 if (chosen_location != -1) {
1207 empty_locs -= entries;
1208 } else {
1209 chosen_location = prog->NumUniformRemapTable;
1210
1211 /* resize remap table to fit new entries */
1212 prog->UniformRemapTable =
1213 reralloc(prog,
1214 prog->UniformRemapTable,
1215 gl_uniform_storage *,
1216 prog->NumUniformRemapTable + entries);
1217 prog->NumUniformRemapTable += entries;
1218 }
1219
1220 /* set pointers for this uniform */
1221 for (unsigned j = 0; j < entries; j++)
1222 prog->UniformRemapTable[chosen_location + j] =
1223 &prog->data->UniformStorage[i];
1224
1225 /* set the base location in remap table for the uniform */
1226 prog->data->UniformStorage[i].remap_location = chosen_location;
1227 }
1228
1229 /* Verify that total amount of entries for explicit and implicit locations
1230 * is less than MAX_UNIFORM_LOCATIONS.
1231 */
1232
1233 if (total_entries > ctx->Const.MaxUserAssignableUniformLocations) {
1234 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
1235 "(%u > %u)", total_entries,
1236 ctx->Const.MaxUserAssignableUniformLocations);
1237 }
1238
1239 /* Reserve all the explicit locations of the active subroutine uniforms. */
1240 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1241 if (!prog->data->UniformStorage[i].type->is_subroutine())
1242 continue;
1243
1244 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
1245 continue;
1246
1247 /* How many new entries for this uniform? */
1248 const unsigned entries =
1249 MAX2(1, prog->data->UniformStorage[i].array_elements);
1250
1251 unsigned mask = prog->data->linked_stages;
1252 while (mask) {
1253 const int j = u_bit_scan(&mask);
1254 struct gl_program *p = prog->_LinkedShaders[j]->Program;
1255
1256 if (!prog->data->UniformStorage[i].opaque[j].active)
1257 continue;
1258
1259 /* Set remap table entries point to correct gl_uniform_storage. */
1260 for (unsigned k = 0; k < entries; k++) {
1261 unsigned element_loc =
1262 prog->data->UniformStorage[i].remap_location + k;
1263 assert(p->sh.SubroutineUniformRemapTable[element_loc] ==
1264 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1265 p->sh.SubroutineUniformRemapTable[element_loc] =
1266 &prog->data->UniformStorage[i];
1267 }
1268 }
1269 }
1270
1271 /* reserve subroutine locations */
1272 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1273 if (!prog->data->UniformStorage[i].type->is_subroutine())
1274 continue;
1275
1276 if (prog->data->UniformStorage[i].remap_location !=
1277 UNMAPPED_UNIFORM_LOC)
1278 continue;
1279
1280 const unsigned entries =
1281 MAX2(1, prog->data->UniformStorage[i].array_elements);
1282
1283 unsigned mask = prog->data->linked_stages;
1284 while (mask) {
1285 const int j = u_bit_scan(&mask);
1286 struct gl_program *p = prog->_LinkedShaders[j]->Program;
1287
1288 if (!prog->data->UniformStorage[i].opaque[j].active)
1289 continue;
1290
1291 p->sh.SubroutineUniformRemapTable =
1292 reralloc(p,
1293 p->sh.SubroutineUniformRemapTable,
1294 gl_uniform_storage *,
1295 p->sh.NumSubroutineUniformRemapTable + entries);
1296
1297 for (unsigned k = 0; k < entries; k++) {
1298 p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
1299 &prog->data->UniformStorage[i];
1300 }
1301 prog->data->UniformStorage[i].remap_location =
1302 p->sh.NumSubroutineUniformRemapTable;
1303 p->sh.NumSubroutineUniformRemapTable += entries;
1304 }
1305 }
1306 }
1307
1308 static void
1309 link_assign_uniform_storage(struct gl_context *ctx,
1310 struct gl_shader_program *prog,
1311 const unsigned num_data_slots)
1312 {
1313 /* On the outside chance that there were no uniforms, bail out.
1314 */
1315 if (prog->data->NumUniformStorage == 0)
1316 return;
1317
1318 unsigned int boolean_true = ctx->Const.UniformBooleanTrue;
1319
1320 union gl_constant_value *data;
1321 if (prog->data->UniformStorage == NULL) {
1322 prog->data->UniformStorage = rzalloc_array(prog,
1323 struct gl_uniform_storage,
1324 prog->data->NumUniformStorage);
1325 data = rzalloc_array(prog->data->UniformStorage,
1326 union gl_constant_value, num_data_slots);
1327 } else {
1328 data = prog->data->UniformDataSlots;
1329 }
1330
1331 #ifndef NDEBUG
1332 union gl_constant_value *data_end = &data[num_data_slots];
1333 #endif
1334
1335 parcel_out_uniform_storage parcel(prog, prog->UniformHash,
1336 prog->data->UniformStorage, data);
1337
1338 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1339 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
1340
1341 if (!shader)
1342 continue;
1343
1344 parcel.start_shader((gl_shader_stage)i);
1345
1346 foreach_in_list(ir_instruction, node, shader->ir) {
1347 ir_variable *const var = node->as_variable();
1348
1349 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1350 var->data.mode != ir_var_shader_storage))
1351 continue;
1352
1353 parcel.set_and_process(var);
1354 }
1355
1356 shader->Program->SamplersUsed = parcel.shader_samplers_used;
1357 shader->shadow_samplers = parcel.shader_shadow_samplers;
1358
1359 if (parcel.num_bindless_samplers > 0) {
1360 shader->Program->sh.NumBindlessSamplers = parcel.num_bindless_samplers;
1361 shader->Program->sh.BindlessSamplers =
1362 rzalloc_array(shader->Program, gl_bindless_sampler,
1363 parcel.num_bindless_samplers);
1364 for (unsigned j = 0; j < parcel.num_bindless_samplers; j++) {
1365 shader->Program->sh.BindlessSamplers[j].target =
1366 parcel.bindless_targets[j];
1367 }
1368 }
1369
1370 if (parcel.num_bindless_images > 0) {
1371 shader->Program->sh.NumBindlessImages = parcel.num_bindless_images;
1372 shader->Program->sh.BindlessImages =
1373 rzalloc_array(shader->Program, gl_bindless_image,
1374 parcel.num_bindless_images);
1375 for (unsigned j = 0; j < parcel.num_bindless_images; j++) {
1376 shader->Program->sh.BindlessImages[j].access =
1377 parcel.bindless_access[j];
1378 }
1379 }
1380
1381 STATIC_ASSERT(sizeof(shader->Program->sh.SamplerTargets) ==
1382 sizeof(parcel.targets));
1383 memcpy(shader->Program->sh.SamplerTargets,
1384 parcel.targets,
1385 sizeof(shader->Program->sh.SamplerTargets));
1386 }
1387
1388 /* If this is a fallback compile for a cache miss we already have the
1389 * correct uniform mappings and we don't want to reinitialise uniforms so
1390 * just return now.
1391 */
1392 if (prog->data->cache_fallback)
1393 return;
1394
1395 #ifndef NDEBUG
1396 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1397 assert(prog->data->UniformStorage[i].storage != NULL ||
1398 prog->data->UniformStorage[i].builtin ||
1399 prog->data->UniformStorage[i].is_shader_storage ||
1400 prog->data->UniformStorage[i].block_index != -1);
1401 }
1402
1403 assert(parcel.values == data_end);
1404 #endif
1405
1406 link_setup_uniform_remap_tables(ctx, prog);
1407
1408 /* Set shader cache fields */
1409 prog->data->NumUniformDataSlots = num_data_slots;
1410 prog->data->UniformDataSlots = data;
1411
1412 link_set_uniform_initializers(prog, boolean_true);
1413 }
1414
1415 void
1416 link_assign_uniform_locations(struct gl_shader_program *prog,
1417 struct gl_context *ctx)
1418 {
1419 if (!prog->data->cache_fallback) {
1420 ralloc_free(prog->data->UniformStorage);
1421 prog->data->UniformStorage = NULL;
1422 prog->data->NumUniformStorage = 0;
1423 }
1424
1425 if (prog->UniformHash != NULL) {
1426 prog->UniformHash->clear();
1427 } else {
1428 prog->UniformHash = new string_to_uint_map;
1429 }
1430
1431 /* First pass: Count the uniform resources used by the user-defined
1432 * uniforms. While this happens, each active uniform will have an index
1433 * assigned to it.
1434 *
1435 * Note: this is *NOT* the index that is returned to the application by
1436 * glGetUniformLocation.
1437 */
1438 struct string_to_uint_map *hiddenUniforms = new string_to_uint_map;
1439 count_uniform_size uniform_size(prog->UniformHash, hiddenUniforms);
1440 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1441 struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1442
1443 if (sh == NULL)
1444 continue;
1445
1446 link_update_uniform_buffer_variables(sh, i);
1447
1448 /* Reset various per-shader target counts.
1449 */
1450 uniform_size.start_shader();
1451
1452 foreach_in_list(ir_instruction, node, sh->ir) {
1453 ir_variable *const var = node->as_variable();
1454
1455 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1456 var->data.mode != ir_var_shader_storage))
1457 continue;
1458
1459 uniform_size.process(var);
1460 }
1461
1462 sh->Program->info.num_textures = uniform_size.num_shader_samplers;
1463 sh->Program->info.num_images = uniform_size.num_shader_images;
1464 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
1465 sh->num_combined_uniform_components = sh->num_uniform_components;
1466
1467 for (unsigned i = 0; i < sh->Program->info.num_ubos; i++) {
1468 sh->num_combined_uniform_components +=
1469 sh->Program->sh.UniformBlocks[i]->UniformBufferSize / 4;
1470 }
1471 }
1472
1473 prog->data->NumUniformStorage = uniform_size.num_active_uniforms;
1474 prog->data->NumHiddenUniforms = uniform_size.num_hidden_uniforms;
1475
1476 /* assign hidden uniforms a slot id */
1477 hiddenUniforms->iterate(assign_hidden_uniform_slot_id, &uniform_size);
1478 delete hiddenUniforms;
1479
1480 link_assign_uniform_storage(ctx, prog, uniform_size.num_values);
1481 }