glsl: drop cache_fallback
[mesa.git] / src / compiler / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "main/core.h"
25 #include "ir.h"
26 #include "linker.h"
27 #include "ir_uniform.h"
28 #include "glsl_symbol_table.h"
29 #include "program.h"
30 #include "string_to_uint_map.h"
31 #include "ir_array_refcount.h"
32
33 /**
34 * \file link_uniforms.cpp
35 * Assign locations for GLSL uniforms.
36 *
37 * \author Ian Romanick <ian.d.romanick@intel.com>
38 */
39
40 /**
41 * Used by linker to indicate uniforms that have no location set.
42 */
43 #define UNMAPPED_UNIFORM_LOC ~0u
44
45 void
46 program_resource_visitor::process(const glsl_type *type, const char *name,
47 bool use_std430_as_default)
48 {
49 assert(type->without_array()->is_record()
50 || type->without_array()->is_interface());
51
52 unsigned record_array_count = 1;
53 char *name_copy = ralloc_strdup(NULL, name);
54
55 enum glsl_interface_packing packing =
56 type->get_internal_ifc_packing(use_std430_as_default);
57
58 recursion(type, &name_copy, strlen(name), false, NULL, packing, false,
59 record_array_count, NULL);
60 ralloc_free(name_copy);
61 }
62
63 void
64 program_resource_visitor::process(ir_variable *var, bool use_std430_as_default)
65 {
66 unsigned record_array_count = 1;
67 const bool row_major =
68 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
69
70 enum glsl_interface_packing packing = var->get_interface_type() ?
71 var->get_interface_type()->
72 get_internal_ifc_packing(use_std430_as_default) :
73 var->type->get_internal_ifc_packing(use_std430_as_default);
74
75 const glsl_type *t =
76 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
77 const glsl_type *t_without_array = t->without_array();
78
79 /* false is always passed for the row_major parameter to the other
80 * processing functions because no information is available to do
81 * otherwise. See the warning in linker.h.
82 */
83 if (t_without_array->is_record() ||
84 (t->is_array() && t->fields.array->is_array())) {
85 char *name = ralloc_strdup(NULL, var->name);
86 recursion(var->type, &name, strlen(name), row_major, NULL, packing,
87 false, record_array_count, NULL);
88 ralloc_free(name);
89 } else if (t_without_array->is_interface()) {
90 char *name = ralloc_strdup(NULL, t_without_array->name);
91 const glsl_struct_field *ifc_member = var->data.from_named_ifc_block ?
92 &t_without_array->
93 fields.structure[t_without_array->field_index(var->name)] : NULL;
94
95 recursion(t, &name, strlen(name), row_major, NULL, packing,
96 false, record_array_count, ifc_member);
97 ralloc_free(name);
98 } else {
99 this->set_record_array_count(record_array_count);
100 this->visit_field(t, var->name, row_major, NULL, packing, false);
101 }
102 }
103
104 void
105 program_resource_visitor::recursion(const glsl_type *t, char **name,
106 size_t name_length, bool row_major,
107 const glsl_type *record_type,
108 const enum glsl_interface_packing packing,
109 bool last_field,
110 unsigned record_array_count,
111 const glsl_struct_field *named_ifc_member)
112 {
113 /* Records need to have each field processed individually.
114 *
115 * Arrays of records need to have each array element processed
116 * individually, then each field of the resulting array elements processed
117 * individually.
118 */
119 if (t->is_interface() && named_ifc_member) {
120 ralloc_asprintf_rewrite_tail(name, &name_length, ".%s",
121 named_ifc_member->name);
122 recursion(named_ifc_member->type, name, name_length, row_major, NULL,
123 packing, false, record_array_count, NULL);
124 } else if (t->is_record() || t->is_interface()) {
125 if (record_type == NULL && t->is_record())
126 record_type = t;
127
128 if (t->is_record())
129 this->enter_record(t, *name, row_major, packing);
130
131 for (unsigned i = 0; i < t->length; i++) {
132 const char *field = t->fields.structure[i].name;
133 size_t new_length = name_length;
134
135 if (t->fields.structure[i].type->is_record())
136 this->visit_field(&t->fields.structure[i]);
137
138 if (t->is_interface() && t->fields.structure[i].offset != -1)
139 this->set_buffer_offset(t->fields.structure[i].offset);
140
141 /* Append '.field' to the current variable name. */
142 if (name_length == 0) {
143 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
144 } else {
145 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
146 }
147
148 /* The layout of structures at the top level of the block is set
149 * during parsing. For matrices contained in multiple levels of
150 * structures in the block, the inner structures have no layout.
151 * These cases must potentially inherit the layout from the outer
152 * levels.
153 */
154 bool field_row_major = row_major;
155 const enum glsl_matrix_layout matrix_layout =
156 glsl_matrix_layout(t->fields.structure[i].matrix_layout);
157 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
158 field_row_major = true;
159 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
160 field_row_major = false;
161 }
162
163 recursion(t->fields.structure[i].type, name, new_length,
164 field_row_major,
165 record_type,
166 packing,
167 (i + 1) == t->length, record_array_count, NULL);
168
169 /* Only the first leaf-field of the record gets called with the
170 * record type pointer.
171 */
172 record_type = NULL;
173 }
174
175 if (t->is_record()) {
176 (*name)[name_length] = '\0';
177 this->leave_record(t, *name, row_major, packing);
178 }
179 } else if (t->without_array()->is_record() ||
180 t->without_array()->is_interface() ||
181 (t->is_array() && t->fields.array->is_array())) {
182 if (record_type == NULL && t->fields.array->is_record())
183 record_type = t->fields.array;
184
185 unsigned length = t->length;
186
187 /* Shader storage block unsized arrays: add subscript [0] to variable
188 * names.
189 */
190 if (t->is_unsized_array())
191 length = 1;
192
193 record_array_count *= length;
194
195 for (unsigned i = 0; i < length; i++) {
196 size_t new_length = name_length;
197
198 /* Append the subscript to the current variable name */
199 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
200
201 recursion(t->fields.array, name, new_length, row_major,
202 record_type,
203 packing,
204 (i + 1) == t->length, record_array_count,
205 named_ifc_member);
206
207 /* Only the first leaf-field of the record gets called with the
208 * record type pointer.
209 */
210 record_type = NULL;
211 }
212 } else {
213 this->set_record_array_count(record_array_count);
214 this->visit_field(t, *name, row_major, record_type, packing, last_field);
215 }
216 }
217
218 void
219 program_resource_visitor::visit_field(const glsl_struct_field *)
220 {
221 }
222
223 void
224 program_resource_visitor::enter_record(const glsl_type *, const char *, bool,
225 const enum glsl_interface_packing)
226 {
227 }
228
229 void
230 program_resource_visitor::leave_record(const glsl_type *, const char *, bool,
231 const enum glsl_interface_packing)
232 {
233 }
234
235 void
236 program_resource_visitor::set_buffer_offset(unsigned)
237 {
238 }
239
240 void
241 program_resource_visitor::set_record_array_count(unsigned)
242 {
243 }
244
245 namespace {
246
247 /**
248 * Class to help calculate the storage requirements for a set of uniforms
249 *
250 * As uniforms are added to the active set the number of active uniforms and
251 * the storage requirements for those uniforms are accumulated. The active
252 * uniforms are added to the hash table supplied to the constructor.
253 *
254 * If the same uniform is added multiple times (i.e., once for each shader
255 * target), it will only be accounted once.
256 */
257 class count_uniform_size : public program_resource_visitor {
258 public:
259 count_uniform_size(struct string_to_uint_map *map,
260 struct string_to_uint_map *hidden_map,
261 bool use_std430_as_default)
262 : num_active_uniforms(0), num_hidden_uniforms(0), num_values(0),
263 num_shader_samplers(0), num_shader_images(0),
264 num_shader_uniform_components(0), num_shader_subroutines(0),
265 is_buffer_block(false), is_shader_storage(false), map(map),
266 hidden_map(hidden_map), current_var(NULL),
267 use_std430_as_default(use_std430_as_default)
268 {
269 /* empty */
270 }
271
272 void start_shader()
273 {
274 this->num_shader_samplers = 0;
275 this->num_shader_images = 0;
276 this->num_shader_uniform_components = 0;
277 this->num_shader_subroutines = 0;
278 }
279
280 void process(ir_variable *var)
281 {
282 this->current_var = var;
283 this->is_buffer_block = var->is_in_buffer_block();
284 this->is_shader_storage = var->is_in_shader_storage_block();
285 if (var->is_interface_instance())
286 program_resource_visitor::process(var->get_interface_type(),
287 var->get_interface_type()->name,
288 use_std430_as_default);
289 else
290 program_resource_visitor::process(var, use_std430_as_default);
291 }
292
293 /**
294 * Total number of active uniforms counted
295 */
296 unsigned num_active_uniforms;
297
298 unsigned num_hidden_uniforms;
299
300 /**
301 * Number of data values required to back the storage for the active uniforms
302 */
303 unsigned num_values;
304
305 /**
306 * Number of samplers used
307 */
308 unsigned num_shader_samplers;
309
310 /**
311 * Number of images used
312 */
313 unsigned num_shader_images;
314
315 /**
316 * Number of uniforms used in the current shader
317 */
318 unsigned num_shader_uniform_components;
319
320 /**
321 * Number of subroutine uniforms used
322 */
323 unsigned num_shader_subroutines;
324
325 bool is_buffer_block;
326 bool is_shader_storage;
327
328 struct string_to_uint_map *map;
329
330 private:
331 virtual void visit_field(const glsl_type *type, const char *name,
332 bool /* row_major */,
333 const glsl_type * /* record_type */,
334 const enum glsl_interface_packing,
335 bool /* last_field */)
336 {
337 assert(!type->without_array()->is_record());
338 assert(!type->without_array()->is_interface());
339 assert(!(type->is_array() && type->fields.array->is_array()));
340
341 /* Count the number of samplers regardless of whether the uniform is
342 * already in the hash table. The hash table prevents adding the same
343 * uniform for multiple shader targets, but in this case we want to
344 * count it for each shader target.
345 */
346 const unsigned values = type->component_slots();
347 if (type->contains_subroutine()) {
348 this->num_shader_subroutines += values;
349 } else if (type->contains_sampler() && !current_var->data.bindless) {
350 /* Samplers (bound or bindless) are counted as two components as
351 * specified by ARB_bindless_texture. */
352 this->num_shader_samplers += values / 2;
353 } else if (type->contains_image() && !current_var->data.bindless) {
354 /* Images (bound or bindless) are counted as two components as
355 * specified by ARB_bindless_texture. */
356 this->num_shader_images += values / 2;
357
358 /* As drivers are likely to represent image uniforms as
359 * scalar indices, count them against the limit of uniform
360 * components in the default block. The spec allows image
361 * uniforms to use up no more than one scalar slot.
362 */
363 if (!is_shader_storage)
364 this->num_shader_uniform_components += values;
365 } else {
366 /* Accumulate the total number of uniform slots used by this shader.
367 * Note that samplers do not count against this limit because they
368 * don't use any storage on current hardware.
369 */
370 if (!is_buffer_block)
371 this->num_shader_uniform_components += values;
372 }
373
374 /* If the uniform is already in the map, there's nothing more to do.
375 */
376 unsigned id;
377 if (this->map->get(id, name))
378 return;
379
380 if (this->current_var->data.how_declared == ir_var_hidden) {
381 this->hidden_map->put(this->num_hidden_uniforms, name);
382 this->num_hidden_uniforms++;
383 } else {
384 this->map->put(this->num_active_uniforms-this->num_hidden_uniforms,
385 name);
386 }
387
388 /* Each leaf uniform occupies one entry in the list of active
389 * uniforms.
390 */
391 this->num_active_uniforms++;
392
393 if(!is_gl_identifier(name) && !is_shader_storage && !is_buffer_block)
394 this->num_values += values;
395 }
396
397 struct string_to_uint_map *hidden_map;
398
399 /**
400 * Current variable being processed.
401 */
402 ir_variable *current_var;
403
404 bool use_std430_as_default;
405 };
406
407 } /* anonymous namespace */
408
409 /**
410 * Class to help parcel out pieces of backing storage to uniforms
411 *
412 * Each uniform processed has some range of the \c gl_constant_value
413 * structures associated with it. The association is done by finding
414 * the uniform in the \c string_to_uint_map and using the value from
415 * the map to connect that slot in the \c gl_uniform_storage table
416 * with the next available slot in the \c gl_constant_value array.
417 *
418 * \warning
419 * This class assumes that every uniform that will be processed is
420 * already in the \c string_to_uint_map. In addition, it assumes that
421 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
422 * enough."
423 */
424 class parcel_out_uniform_storage : public program_resource_visitor {
425 public:
426 parcel_out_uniform_storage(struct gl_shader_program *prog,
427 struct string_to_uint_map *map,
428 struct gl_uniform_storage *uniforms,
429 union gl_constant_value *values,
430 bool use_std430_as_default)
431 : prog(prog), map(map), uniforms(uniforms),
432 use_std430_as_default(use_std430_as_default), values(values),
433 bindless_targets(NULL), bindless_access(NULL)
434 {
435 }
436
437 virtual ~parcel_out_uniform_storage()
438 {
439 free(this->bindless_targets);
440 free(this->bindless_access);
441 }
442
443 void start_shader(gl_shader_stage shader_type)
444 {
445 assert(shader_type < MESA_SHADER_STAGES);
446 this->shader_type = shader_type;
447
448 this->shader_samplers_used = 0;
449 this->shader_shadow_samplers = 0;
450 this->next_sampler = 0;
451 this->next_image = 0;
452 this->next_subroutine = 0;
453 this->record_array_count = 1;
454 memset(this->targets, 0, sizeof(this->targets));
455
456 this->num_bindless_samplers = 0;
457 this->next_bindless_sampler = 0;
458 free(this->bindless_targets);
459 this->bindless_targets = NULL;
460
461 this->num_bindless_images = 0;
462 this->next_bindless_image = 0;
463 free(this->bindless_access);
464 this->bindless_access = NULL;
465 }
466
467 void set_and_process(ir_variable *var)
468 {
469 current_var = var;
470 field_counter = 0;
471 this->record_next_sampler = new string_to_uint_map;
472 this->record_next_bindless_sampler = new string_to_uint_map;
473 this->record_next_image = new string_to_uint_map;
474 this->record_next_bindless_image = new string_to_uint_map;
475
476 buffer_block_index = -1;
477 if (var->is_in_buffer_block()) {
478 struct gl_uniform_block *blks = var->is_in_shader_storage_block() ?
479 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
480 unsigned num_blks = var->is_in_shader_storage_block() ?
481 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
482
483 if (var->is_interface_instance() && var->type->is_array()) {
484 unsigned l = strlen(var->get_interface_type()->name);
485
486 for (unsigned i = 0; i < num_blks; i++) {
487 if (strncmp(var->get_interface_type()->name, blks[i].Name, l)
488 == 0 && blks[i].Name[l] == '[') {
489 buffer_block_index = i;
490 break;
491 }
492 }
493 } else {
494 for (unsigned i = 0; i < num_blks; i++) {
495 if (strcmp(var->get_interface_type()->name, blks[i].Name) == 0) {
496 buffer_block_index = i;
497 break;
498 }
499 }
500 }
501 assert(buffer_block_index != -1);
502
503 /* Uniform blocks that were specified with an instance name must be
504 * handled a little bit differently. The name of the variable is the
505 * name used to reference the uniform block instead of being the name
506 * of a variable within the block. Therefore, searching for the name
507 * within the block will fail.
508 */
509 if (var->is_interface_instance()) {
510 ubo_byte_offset = 0;
511 process(var->get_interface_type(),
512 var->get_interface_type()->name,
513 use_std430_as_default);
514 } else {
515 const struct gl_uniform_block *const block =
516 &blks[buffer_block_index];
517
518 assert(var->data.location != -1);
519
520 const struct gl_uniform_buffer_variable *const ubo_var =
521 &block->Uniforms[var->data.location];
522
523 ubo_byte_offset = ubo_var->Offset;
524 process(var, use_std430_as_default);
525 }
526 } else {
527 /* Store any explicit location and reset data location so we can
528 * reuse this variable for storing the uniform slot number.
529 */
530 this->explicit_location = current_var->data.location;
531 current_var->data.location = -1;
532
533 process(var, use_std430_as_default);
534 }
535 delete this->record_next_sampler;
536 delete this->record_next_bindless_sampler;
537 delete this->record_next_image;
538 delete this->record_next_bindless_image;
539 }
540
541 int buffer_block_index;
542 int ubo_byte_offset;
543 gl_shader_stage shader_type;
544
545 private:
546 bool set_opaque_indices(const glsl_type *base_type,
547 struct gl_uniform_storage *uniform,
548 const char *name, unsigned &next_index,
549 struct string_to_uint_map *record_next_index)
550 {
551 assert(base_type->is_sampler() || base_type->is_image());
552
553 if (this->record_array_count > 1) {
554 unsigned inner_array_size = MAX2(1, uniform->array_elements);
555 char *name_copy = ralloc_strdup(NULL, name);
556
557 /* Remove all array subscripts from the sampler/image name */
558 char *str_start;
559 const char *str_end;
560 while((str_start = strchr(name_copy, '[')) &&
561 (str_end = strchr(name_copy, ']'))) {
562 memmove(str_start, str_end + 1, 1 + strlen(str_end + 1));
563 }
564
565 unsigned index = 0;
566 if (record_next_index->get(index, name_copy)) {
567 /* In this case, we've already seen this uniform so we just use the
568 * next sampler/image index recorded the last time we visited.
569 */
570 uniform->opaque[shader_type].index = index;
571 index = inner_array_size + uniform->opaque[shader_type].index;
572 record_next_index->put(index, name_copy);
573
574 ralloc_free(name_copy);
575 /* Return as everything else has already been initialised in a
576 * previous pass.
577 */
578 return false;
579 } else {
580 /* We've never seen this uniform before so we need to allocate
581 * enough indices to store it.
582 *
583 * Nested struct arrays behave like arrays of arrays so we need to
584 * increase the index by the total number of elements of the
585 * sampler/image in case there is more than one sampler/image
586 * inside the structs. This allows the offset to be easily
587 * calculated for indirect indexing.
588 */
589 uniform->opaque[shader_type].index = next_index;
590 next_index += inner_array_size * this->record_array_count;
591
592 /* Store the next index for future passes over the struct array
593 */
594 index = uniform->opaque[shader_type].index + inner_array_size;
595 record_next_index->put(index, name_copy);
596 ralloc_free(name_copy);
597 }
598 } else {
599 /* Increment the sampler/image by 1 for non-arrays and by the number
600 * of array elements for arrays.
601 */
602 uniform->opaque[shader_type].index = next_index;
603 next_index += MAX2(1, uniform->array_elements);
604 }
605 return true;
606 }
607
608 void handle_samplers(const glsl_type *base_type,
609 struct gl_uniform_storage *uniform, const char *name)
610 {
611 if (base_type->is_sampler()) {
612 uniform->opaque[shader_type].active = true;
613
614 const gl_texture_index target = base_type->sampler_index();
615 const unsigned shadow = base_type->sampler_shadow;
616
617 if (current_var->data.bindless) {
618 if (!set_opaque_indices(base_type, uniform, name,
619 this->next_bindless_sampler,
620 this->record_next_bindless_sampler))
621 return;
622
623 this->num_bindless_samplers = this->next_bindless_sampler;
624
625 this->bindless_targets = (gl_texture_index *)
626 realloc(this->bindless_targets,
627 this->num_bindless_samplers * sizeof(gl_texture_index));
628
629 for (unsigned i = uniform->opaque[shader_type].index;
630 i < this->num_bindless_samplers;
631 i++) {
632 this->bindless_targets[i] = target;
633 }
634 } else {
635 if (!set_opaque_indices(base_type, uniform, name,
636 this->next_sampler,
637 this->record_next_sampler))
638 return;
639
640 for (unsigned i = uniform->opaque[shader_type].index;
641 i < MIN2(this->next_sampler, MAX_SAMPLERS);
642 i++) {
643 this->targets[i] = target;
644 this->shader_samplers_used |= 1U << i;
645 this->shader_shadow_samplers |= shadow << i;
646 }
647 }
648 }
649 }
650
651 void handle_images(const glsl_type *base_type,
652 struct gl_uniform_storage *uniform, const char *name)
653 {
654 if (base_type->is_image()) {
655 uniform->opaque[shader_type].active = true;
656
657 /* Set image access qualifiers */
658 const GLenum access =
659 (current_var->data.memory_read_only ? GL_READ_ONLY :
660 current_var->data.memory_write_only ? GL_WRITE_ONLY :
661 GL_READ_WRITE);
662
663 if (current_var->data.bindless) {
664 if (!set_opaque_indices(base_type, uniform, name,
665 this->next_bindless_image,
666 this->record_next_bindless_image))
667 return;
668
669 this->num_bindless_images = this->next_bindless_image;
670
671 this->bindless_access = (GLenum *)
672 realloc(this->bindless_access,
673 this->num_bindless_images * sizeof(GLenum));
674
675 for (unsigned i = uniform->opaque[shader_type].index;
676 i < this->num_bindless_images;
677 i++) {
678 this->bindless_access[i] = access;
679 }
680 } else {
681 if (!set_opaque_indices(base_type, uniform, name,
682 this->next_image,
683 this->record_next_image))
684 return;
685
686 for (unsigned i = uniform->opaque[shader_type].index;
687 i < MIN2(this->next_image, MAX_IMAGE_UNIFORMS);
688 i++) {
689 prog->_LinkedShaders[shader_type]->Program->sh.ImageAccess[i] = access;
690 }
691 }
692 }
693 }
694
695 void handle_subroutines(const glsl_type *base_type,
696 struct gl_uniform_storage *uniform)
697 {
698 if (base_type->is_subroutine()) {
699 uniform->opaque[shader_type].index = this->next_subroutine;
700 uniform->opaque[shader_type].active = true;
701
702 prog->_LinkedShaders[shader_type]->Program->sh.NumSubroutineUniforms++;
703
704 /* Increment the subroutine index by 1 for non-arrays and by the
705 * number of array elements for arrays.
706 */
707 this->next_subroutine += MAX2(1, uniform->array_elements);
708
709 }
710 }
711
712 virtual void set_buffer_offset(unsigned offset)
713 {
714 this->ubo_byte_offset = offset;
715 }
716
717 virtual void set_record_array_count(unsigned record_array_count)
718 {
719 this->record_array_count = record_array_count;
720 }
721
722 virtual void enter_record(const glsl_type *type, const char *,
723 bool row_major,
724 const enum glsl_interface_packing packing)
725 {
726 assert(type->is_record());
727 if (this->buffer_block_index == -1)
728 return;
729 if (packing == GLSL_INTERFACE_PACKING_STD430)
730 this->ubo_byte_offset = glsl_align(
731 this->ubo_byte_offset, type->std430_base_alignment(row_major));
732 else
733 this->ubo_byte_offset = glsl_align(
734 this->ubo_byte_offset, type->std140_base_alignment(row_major));
735 }
736
737 virtual void leave_record(const glsl_type *type, const char *,
738 bool row_major,
739 const enum glsl_interface_packing packing)
740 {
741 assert(type->is_record());
742 if (this->buffer_block_index == -1)
743 return;
744 if (packing == GLSL_INTERFACE_PACKING_STD430)
745 this->ubo_byte_offset = glsl_align(
746 this->ubo_byte_offset, type->std430_base_alignment(row_major));
747 else
748 this->ubo_byte_offset = glsl_align(
749 this->ubo_byte_offset, type->std140_base_alignment(row_major));
750 }
751
752 virtual void visit_field(const glsl_type *type, const char *name,
753 bool row_major, const glsl_type * /* record_type */,
754 const enum glsl_interface_packing packing,
755 bool /* last_field */)
756 {
757 assert(!type->without_array()->is_record());
758 assert(!type->without_array()->is_interface());
759 assert(!(type->is_array() && type->fields.array->is_array()));
760
761 unsigned id;
762 bool found = this->map->get(id, name);
763 assert(found);
764
765 if (!found)
766 return;
767
768 const glsl_type *base_type;
769 if (type->is_array()) {
770 this->uniforms[id].array_elements = type->length;
771 base_type = type->fields.array;
772 } else {
773 this->uniforms[id].array_elements = 0;
774 base_type = type;
775 }
776
777 /* Initialise opaque data */
778 this->uniforms[id].opaque[shader_type].index = ~0;
779 this->uniforms[id].opaque[shader_type].active = false;
780
781 this->uniforms[id].active_shader_mask |= 1 << shader_type;
782
783 /* This assigns uniform indices to sampler and image uniforms. */
784 handle_samplers(base_type, &this->uniforms[id], name);
785 handle_images(base_type, &this->uniforms[id], name);
786 handle_subroutines(base_type, &this->uniforms[id]);
787
788 /* For array of arrays or struct arrays the base location may have
789 * already been set so don't set it again.
790 */
791 if (buffer_block_index == -1 && current_var->data.location == -1) {
792 current_var->data.location = id;
793 }
794
795 /* If there is already storage associated with this uniform or if the
796 * uniform is set as builtin, it means that it was set while processing
797 * an earlier shader stage. For example, we may be processing the
798 * uniform in the fragment shader, but the uniform was already processed
799 * in the vertex shader.
800 */
801 if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) {
802 return;
803 }
804
805 /* Assign explicit locations. */
806 if (current_var->data.explicit_location) {
807 /* Set sequential locations for struct fields. */
808 if (current_var->type->without_array()->is_record() ||
809 current_var->type->is_array_of_arrays()) {
810 const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
811 this->uniforms[id].remap_location =
812 this->explicit_location + field_counter;
813 field_counter += entries;
814 } else {
815 this->uniforms[id].remap_location = this->explicit_location;
816 }
817 } else {
818 /* Initialize to to indicate that no location is set */
819 this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
820 }
821
822 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
823 this->uniforms[id].type = base_type;
824 this->uniforms[id].num_driver_storage = 0;
825 this->uniforms[id].driver_storage = NULL;
826 this->uniforms[id].atomic_buffer_index = -1;
827 this->uniforms[id].hidden =
828 current_var->data.how_declared == ir_var_hidden;
829 this->uniforms[id].builtin = is_gl_identifier(name);
830
831 this->uniforms[id].is_shader_storage =
832 current_var->is_in_shader_storage_block();
833 this->uniforms[id].is_bindless = current_var->data.bindless;
834
835 /* Do not assign storage if the uniform is a builtin or buffer object */
836 if (!this->uniforms[id].builtin &&
837 !this->uniforms[id].is_shader_storage &&
838 this->buffer_block_index == -1)
839 this->uniforms[id].storage = this->values;
840
841 if (this->buffer_block_index != -1) {
842 this->uniforms[id].block_index = this->buffer_block_index;
843
844 unsigned alignment = type->std140_base_alignment(row_major);
845 if (packing == GLSL_INTERFACE_PACKING_STD430)
846 alignment = type->std430_base_alignment(row_major);
847 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
848 this->uniforms[id].offset = this->ubo_byte_offset;
849 if (packing == GLSL_INTERFACE_PACKING_STD430)
850 this->ubo_byte_offset += type->std430_size(row_major);
851 else
852 this->ubo_byte_offset += type->std140_size(row_major);
853
854 if (type->is_array()) {
855 if (packing == GLSL_INTERFACE_PACKING_STD430)
856 this->uniforms[id].array_stride =
857 type->without_array()->std430_array_stride(row_major);
858 else
859 this->uniforms[id].array_stride =
860 glsl_align(type->without_array()->std140_size(row_major),
861 16);
862 } else {
863 this->uniforms[id].array_stride = 0;
864 }
865
866 if (type->without_array()->is_matrix()) {
867 const glsl_type *matrix = type->without_array();
868 const unsigned N = matrix->is_double() ? 8 : 4;
869 const unsigned items =
870 row_major ? matrix->matrix_columns : matrix->vector_elements;
871
872 assert(items <= 4);
873 if (packing == GLSL_INTERFACE_PACKING_STD430)
874 this->uniforms[id].matrix_stride = items < 3 ? items * N :
875 glsl_align(items * N, 16);
876 else
877 this->uniforms[id].matrix_stride = glsl_align(items * N, 16);
878 this->uniforms[id].row_major = row_major;
879 } else {
880 this->uniforms[id].matrix_stride = 0;
881 this->uniforms[id].row_major = false;
882 }
883 } else {
884 this->uniforms[id].block_index = -1;
885 this->uniforms[id].offset = -1;
886 this->uniforms[id].array_stride = -1;
887 this->uniforms[id].matrix_stride = -1;
888 this->uniforms[id].row_major = false;
889 }
890
891 if (!this->uniforms[id].builtin &&
892 !this->uniforms[id].is_shader_storage &&
893 this->buffer_block_index == -1)
894 this->values += type->component_slots();
895 }
896
897 /**
898 * Current program being processed.
899 */
900 struct gl_shader_program *prog;
901
902 struct string_to_uint_map *map;
903
904 struct gl_uniform_storage *uniforms;
905 unsigned next_sampler;
906 unsigned next_bindless_sampler;
907 unsigned next_image;
908 unsigned next_bindless_image;
909 unsigned next_subroutine;
910
911 bool use_std430_as_default;
912
913 /**
914 * Field counter is used to take care that uniform structures
915 * with explicit locations get sequential locations.
916 */
917 unsigned field_counter;
918
919 /**
920 * Current variable being processed.
921 */
922 ir_variable *current_var;
923
924 /* Used to store the explicit location from current_var so that we can
925 * reuse the location field for storing the uniform slot id.
926 */
927 int explicit_location;
928
929 /* Stores total struct array elements including nested structs */
930 unsigned record_array_count;
931
932 /* Map for temporarily storing next sampler index when handling samplers in
933 * struct arrays.
934 */
935 struct string_to_uint_map *record_next_sampler;
936
937 /* Map for temporarily storing next imager index when handling images in
938 * struct arrays.
939 */
940 struct string_to_uint_map *record_next_image;
941
942 /* Map for temporarily storing next bindless sampler index when handling
943 * bindless samplers in struct arrays.
944 */
945 struct string_to_uint_map *record_next_bindless_sampler;
946
947 /* Map for temporarily storing next bindless image index when handling
948 * bindless images in struct arrays.
949 */
950 struct string_to_uint_map *record_next_bindless_image;
951
952 public:
953 union gl_constant_value *values;
954
955 gl_texture_index targets[MAX_SAMPLERS];
956
957 /**
958 * Mask of samplers used by the current shader stage.
959 */
960 unsigned shader_samplers_used;
961
962 /**
963 * Mask of samplers used by the current shader stage for shadows.
964 */
965 unsigned shader_shadow_samplers;
966
967 /**
968 * Number of bindless samplers used by the current shader stage.
969 */
970 unsigned num_bindless_samplers;
971
972 /**
973 * Texture targets for bindless samplers used by the current stage.
974 */
975 gl_texture_index *bindless_targets;
976
977 /**
978 * Number of bindless images used by the current shader stage.
979 */
980 unsigned num_bindless_images;
981
982 /**
983 * Access types for bindless images used by the current stage.
984 */
985 GLenum *bindless_access;
986
987 };
988
989 static bool
990 variable_is_referenced(ir_array_refcount_visitor &v, ir_variable *var)
991 {
992 ir_array_refcount_entry *const entry = v.get_variable_entry(var);
993
994 return entry->is_referenced;
995
996 }
997
998 /**
999 * Walks the IR and update the references to uniform blocks in the
1000 * ir_variables to point at linked shader's list (previously, they
1001 * would point at the uniform block list in one of the pre-linked
1002 * shaders).
1003 */
1004 static void
1005 link_update_uniform_buffer_variables(struct gl_linked_shader *shader,
1006 unsigned stage)
1007 {
1008 ir_array_refcount_visitor v;
1009
1010 v.run(shader->ir);
1011
1012 foreach_in_list(ir_instruction, node, shader->ir) {
1013 ir_variable *const var = node->as_variable();
1014
1015 if (var == NULL || !var->is_in_buffer_block())
1016 continue;
1017
1018 assert(var->data.mode == ir_var_uniform ||
1019 var->data.mode == ir_var_shader_storage);
1020
1021 unsigned num_blocks = var->data.mode == ir_var_uniform ?
1022 shader->Program->info.num_ubos : shader->Program->info.num_ssbos;
1023 struct gl_uniform_block **blks = var->data.mode == ir_var_uniform ?
1024 shader->Program->sh.UniformBlocks :
1025 shader->Program->sh.ShaderStorageBlocks;
1026
1027 if (var->is_interface_instance()) {
1028 const ir_array_refcount_entry *const entry = v.get_variable_entry(var);
1029
1030 if (entry->is_referenced) {
1031 /* Since this is an interface instance, the instance type will be
1032 * same as the array-stripped variable type. If the variable type
1033 * is an array, then the block names will be suffixed with [0]
1034 * through [n-1]. Unlike for non-interface instances, there will
1035 * not be structure types here, so the only name sentinel that we
1036 * have to worry about is [.
1037 */
1038 assert(var->type->without_array() == var->get_interface_type());
1039 const char sentinel = var->type->is_array() ? '[' : '\0';
1040
1041 const ptrdiff_t len = strlen(var->get_interface_type()->name);
1042 for (unsigned i = 0; i < num_blocks; i++) {
1043 const char *const begin = blks[i]->Name;
1044 const char *const end = strchr(begin, sentinel);
1045
1046 if (end == NULL)
1047 continue;
1048
1049 if (len != (end - begin))
1050 continue;
1051
1052 /* Even when a match is found, do not "break" here. This could
1053 * be an array of instances, and all elements of the array need
1054 * to be marked as referenced.
1055 */
1056 if (strncmp(begin, var->get_interface_type()->name, len) == 0 &&
1057 (!var->type->is_array() ||
1058 entry->is_linearized_index_referenced(blks[i]->linearized_array_index))) {
1059 blks[i]->stageref |= 1U << stage;
1060 }
1061 }
1062 }
1063
1064 var->data.location = 0;
1065 continue;
1066 }
1067
1068 bool found = false;
1069 char sentinel = '\0';
1070
1071 if (var->type->is_record()) {
1072 sentinel = '.';
1073 } else if (var->type->is_array() && (var->type->fields.array->is_array()
1074 || var->type->without_array()->is_record())) {
1075 sentinel = '[';
1076 }
1077
1078 const unsigned l = strlen(var->name);
1079 for (unsigned i = 0; i < num_blocks; i++) {
1080 for (unsigned j = 0; j < blks[i]->NumUniforms; j++) {
1081 if (sentinel) {
1082 const char *begin = blks[i]->Uniforms[j].Name;
1083 const char *end = strchr(begin, sentinel);
1084
1085 if (end == NULL)
1086 continue;
1087
1088 if ((ptrdiff_t) l != (end - begin))
1089 continue;
1090
1091 found = strncmp(var->name, begin, l) == 0;
1092 } else {
1093 found = strcmp(var->name, blks[i]->Uniforms[j].Name) == 0;
1094 }
1095
1096 if (found) {
1097 var->data.location = j;
1098
1099 if (variable_is_referenced(v, var))
1100 blks[i]->stageref |= 1U << stage;
1101
1102 break;
1103 }
1104 }
1105
1106 if (found)
1107 break;
1108 }
1109 assert(found);
1110 }
1111 }
1112
1113 /**
1114 * Combine the hidden uniform hash map with the uniform hash map so that the
1115 * hidden uniforms will be given indicies at the end of the uniform storage
1116 * array.
1117 */
1118 static void
1119 assign_hidden_uniform_slot_id(const char *name, unsigned hidden_id,
1120 void *closure)
1121 {
1122 count_uniform_size *uniform_size = (count_uniform_size *) closure;
1123 unsigned hidden_uniform_start = uniform_size->num_active_uniforms -
1124 uniform_size->num_hidden_uniforms;
1125
1126 uniform_size->map->put(hidden_uniform_start + hidden_id, name);
1127 }
1128
1129 /**
1130 * Search through the list of empty blocks to find one that fits the current
1131 * uniform.
1132 */
1133 static int
1134 find_empty_block(struct gl_shader_program *prog,
1135 struct gl_uniform_storage *uniform)
1136 {
1137 const unsigned entries = MAX2(1, uniform->array_elements);
1138
1139 foreach_list_typed(struct empty_uniform_block, block, link,
1140 &prog->EmptyUniformLocations) {
1141 /* Found a block with enough slots to fit the uniform */
1142 if (block->slots == entries) {
1143 unsigned start = block->start;
1144 exec_node_remove(&block->link);
1145 ralloc_free(block);
1146
1147 return start;
1148 /* Found a block with more slots than needed. It can still be used. */
1149 } else if (block->slots > entries) {
1150 unsigned start = block->start;
1151 block->start += entries;
1152 block->slots -= entries;
1153
1154 return start;
1155 }
1156 }
1157
1158 return -1;
1159 }
1160
1161 static void
1162 link_setup_uniform_remap_tables(struct gl_context *ctx,
1163 struct gl_shader_program *prog)
1164 {
1165 unsigned total_entries = prog->NumExplicitUniformLocations;
1166 unsigned empty_locs = prog->NumUniformRemapTable - total_entries;
1167
1168 /* Reserve all the explicit locations of the active uniforms. */
1169 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1170 if (prog->data->UniformStorage[i].type->is_subroutine() ||
1171 prog->data->UniformStorage[i].is_shader_storage)
1172 continue;
1173
1174 if (prog->data->UniformStorage[i].remap_location !=
1175 UNMAPPED_UNIFORM_LOC) {
1176 /* How many new entries for this uniform? */
1177 const unsigned entries =
1178 MAX2(1, prog->data->UniformStorage[i].array_elements);
1179
1180 /* Set remap table entries point to correct gl_uniform_storage. */
1181 for (unsigned j = 0; j < entries; j++) {
1182 unsigned element_loc =
1183 prog->data->UniformStorage[i].remap_location + j;
1184 assert(prog->UniformRemapTable[element_loc] ==
1185 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1186 prog->UniformRemapTable[element_loc] =
1187 &prog->data->UniformStorage[i];
1188 }
1189 }
1190 }
1191
1192 /* Reserve locations for rest of the uniforms. */
1193 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1194
1195 if (prog->data->UniformStorage[i].type->is_subroutine() ||
1196 prog->data->UniformStorage[i].is_shader_storage)
1197 continue;
1198
1199 /* Built-in uniforms should not get any location. */
1200 if (prog->data->UniformStorage[i].builtin)
1201 continue;
1202
1203 /* Explicit ones have been set already. */
1204 if (prog->data->UniformStorage[i].remap_location != UNMAPPED_UNIFORM_LOC)
1205 continue;
1206
1207 /* how many new entries for this uniform? */
1208 const unsigned entries =
1209 MAX2(1, prog->data->UniformStorage[i].array_elements);
1210
1211 /* Find UniformRemapTable for empty blocks where we can fit this uniform. */
1212 int chosen_location = -1;
1213
1214 if (empty_locs)
1215 chosen_location = find_empty_block(prog, &prog->data->UniformStorage[i]);
1216
1217 /* Add new entries to the total amount of entries. */
1218 total_entries += entries;
1219
1220 if (chosen_location != -1) {
1221 empty_locs -= entries;
1222 } else {
1223 chosen_location = prog->NumUniformRemapTable;
1224
1225 /* resize remap table to fit new entries */
1226 prog->UniformRemapTable =
1227 reralloc(prog,
1228 prog->UniformRemapTable,
1229 gl_uniform_storage *,
1230 prog->NumUniformRemapTable + entries);
1231 prog->NumUniformRemapTable += entries;
1232 }
1233
1234 /* set pointers for this uniform */
1235 for (unsigned j = 0; j < entries; j++)
1236 prog->UniformRemapTable[chosen_location + j] =
1237 &prog->data->UniformStorage[i];
1238
1239 /* set the base location in remap table for the uniform */
1240 prog->data->UniformStorage[i].remap_location = chosen_location;
1241 }
1242
1243 /* Verify that total amount of entries for explicit and implicit locations
1244 * is less than MAX_UNIFORM_LOCATIONS.
1245 */
1246
1247 if (total_entries > ctx->Const.MaxUserAssignableUniformLocations) {
1248 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
1249 "(%u > %u)", total_entries,
1250 ctx->Const.MaxUserAssignableUniformLocations);
1251 }
1252
1253 /* Reserve all the explicit locations of the active subroutine uniforms. */
1254 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1255 if (!prog->data->UniformStorage[i].type->is_subroutine())
1256 continue;
1257
1258 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
1259 continue;
1260
1261 /* How many new entries for this uniform? */
1262 const unsigned entries =
1263 MAX2(1, prog->data->UniformStorage[i].array_elements);
1264
1265 unsigned mask = prog->data->linked_stages;
1266 while (mask) {
1267 const int j = u_bit_scan(&mask);
1268 struct gl_program *p = prog->_LinkedShaders[j]->Program;
1269
1270 if (!prog->data->UniformStorage[i].opaque[j].active)
1271 continue;
1272
1273 /* Set remap table entries point to correct gl_uniform_storage. */
1274 for (unsigned k = 0; k < entries; k++) {
1275 unsigned element_loc =
1276 prog->data->UniformStorage[i].remap_location + k;
1277 assert(p->sh.SubroutineUniformRemapTable[element_loc] ==
1278 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1279 p->sh.SubroutineUniformRemapTable[element_loc] =
1280 &prog->data->UniformStorage[i];
1281 }
1282 }
1283 }
1284
1285 /* reserve subroutine locations */
1286 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1287 if (!prog->data->UniformStorage[i].type->is_subroutine())
1288 continue;
1289
1290 if (prog->data->UniformStorage[i].remap_location !=
1291 UNMAPPED_UNIFORM_LOC)
1292 continue;
1293
1294 const unsigned entries =
1295 MAX2(1, prog->data->UniformStorage[i].array_elements);
1296
1297 unsigned mask = prog->data->linked_stages;
1298 while (mask) {
1299 const int j = u_bit_scan(&mask);
1300 struct gl_program *p = prog->_LinkedShaders[j]->Program;
1301
1302 if (!prog->data->UniformStorage[i].opaque[j].active)
1303 continue;
1304
1305 p->sh.SubroutineUniformRemapTable =
1306 reralloc(p,
1307 p->sh.SubroutineUniformRemapTable,
1308 gl_uniform_storage *,
1309 p->sh.NumSubroutineUniformRemapTable + entries);
1310
1311 for (unsigned k = 0; k < entries; k++) {
1312 p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
1313 &prog->data->UniformStorage[i];
1314 }
1315 prog->data->UniformStorage[i].remap_location =
1316 p->sh.NumSubroutineUniformRemapTable;
1317 p->sh.NumSubroutineUniformRemapTable += entries;
1318 }
1319 }
1320 }
1321
1322 static void
1323 link_assign_uniform_storage(struct gl_context *ctx,
1324 struct gl_shader_program *prog,
1325 const unsigned num_data_slots)
1326 {
1327 /* On the outside chance that there were no uniforms, bail out.
1328 */
1329 if (prog->data->NumUniformStorage == 0)
1330 return;
1331
1332 unsigned int boolean_true = ctx->Const.UniformBooleanTrue;
1333
1334 union gl_constant_value *data;
1335 if (prog->data->UniformStorage == NULL) {
1336 prog->data->UniformStorage = rzalloc_array(prog,
1337 struct gl_uniform_storage,
1338 prog->data->NumUniformStorage);
1339 data = rzalloc_array(prog->data->UniformStorage,
1340 union gl_constant_value, num_data_slots);
1341 } else {
1342 data = prog->data->UniformDataSlots;
1343 }
1344
1345 #ifndef NDEBUG
1346 union gl_constant_value *data_end = &data[num_data_slots];
1347 #endif
1348
1349 parcel_out_uniform_storage parcel(prog, prog->UniformHash,
1350 prog->data->UniformStorage, data,
1351 ctx->Const.UseSTD430AsDefaultPacking);
1352
1353 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1354 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
1355
1356 if (!shader)
1357 continue;
1358
1359 parcel.start_shader((gl_shader_stage)i);
1360
1361 foreach_in_list(ir_instruction, node, shader->ir) {
1362 ir_variable *const var = node->as_variable();
1363
1364 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1365 var->data.mode != ir_var_shader_storage))
1366 continue;
1367
1368 parcel.set_and_process(var);
1369 }
1370
1371 shader->Program->SamplersUsed = parcel.shader_samplers_used;
1372 shader->shadow_samplers = parcel.shader_shadow_samplers;
1373
1374 if (parcel.num_bindless_samplers > 0) {
1375 shader->Program->sh.NumBindlessSamplers = parcel.num_bindless_samplers;
1376 shader->Program->sh.BindlessSamplers =
1377 rzalloc_array(shader->Program, gl_bindless_sampler,
1378 parcel.num_bindless_samplers);
1379 for (unsigned j = 0; j < parcel.num_bindless_samplers; j++) {
1380 shader->Program->sh.BindlessSamplers[j].target =
1381 parcel.bindless_targets[j];
1382 }
1383 }
1384
1385 if (parcel.num_bindless_images > 0) {
1386 shader->Program->sh.NumBindlessImages = parcel.num_bindless_images;
1387 shader->Program->sh.BindlessImages =
1388 rzalloc_array(shader->Program, gl_bindless_image,
1389 parcel.num_bindless_images);
1390 for (unsigned j = 0; j < parcel.num_bindless_images; j++) {
1391 shader->Program->sh.BindlessImages[j].access =
1392 parcel.bindless_access[j];
1393 }
1394 }
1395
1396 STATIC_ASSERT(sizeof(shader->Program->sh.SamplerTargets) ==
1397 sizeof(parcel.targets));
1398 memcpy(shader->Program->sh.SamplerTargets,
1399 parcel.targets,
1400 sizeof(shader->Program->sh.SamplerTargets));
1401 }
1402
1403 #ifndef NDEBUG
1404 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1405 assert(prog->data->UniformStorage[i].storage != NULL ||
1406 prog->data->UniformStorage[i].builtin ||
1407 prog->data->UniformStorage[i].is_shader_storage ||
1408 prog->data->UniformStorage[i].block_index != -1);
1409 }
1410
1411 assert(parcel.values == data_end);
1412 #endif
1413
1414 link_setup_uniform_remap_tables(ctx, prog);
1415
1416 /* Set shader cache fields */
1417 prog->data->NumUniformDataSlots = num_data_slots;
1418 prog->data->UniformDataSlots = data;
1419
1420 link_set_uniform_initializers(prog, boolean_true);
1421 }
1422
1423 void
1424 link_assign_uniform_locations(struct gl_shader_program *prog,
1425 struct gl_context *ctx)
1426 {
1427 ralloc_free(prog->data->UniformStorage);
1428 prog->data->UniformStorage = NULL;
1429 prog->data->NumUniformStorage = 0;
1430
1431 if (prog->UniformHash != NULL) {
1432 prog->UniformHash->clear();
1433 } else {
1434 prog->UniformHash = new string_to_uint_map;
1435 }
1436
1437 /* First pass: Count the uniform resources used by the user-defined
1438 * uniforms. While this happens, each active uniform will have an index
1439 * assigned to it.
1440 *
1441 * Note: this is *NOT* the index that is returned to the application by
1442 * glGetUniformLocation.
1443 */
1444 struct string_to_uint_map *hiddenUniforms = new string_to_uint_map;
1445 count_uniform_size uniform_size(prog->UniformHash, hiddenUniforms,
1446 ctx->Const.UseSTD430AsDefaultPacking);
1447 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1448 struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1449
1450 if (sh == NULL)
1451 continue;
1452
1453 link_update_uniform_buffer_variables(sh, i);
1454
1455 /* Reset various per-shader target counts.
1456 */
1457 uniform_size.start_shader();
1458
1459 foreach_in_list(ir_instruction, node, sh->ir) {
1460 ir_variable *const var = node->as_variable();
1461
1462 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1463 var->data.mode != ir_var_shader_storage))
1464 continue;
1465
1466 uniform_size.process(var);
1467 }
1468
1469 sh->Program->info.num_textures = uniform_size.num_shader_samplers;
1470 sh->Program->info.num_images = uniform_size.num_shader_images;
1471 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
1472 sh->num_combined_uniform_components = sh->num_uniform_components;
1473
1474 for (unsigned i = 0; i < sh->Program->info.num_ubos; i++) {
1475 sh->num_combined_uniform_components +=
1476 sh->Program->sh.UniformBlocks[i]->UniformBufferSize / 4;
1477 }
1478 }
1479
1480 prog->data->NumUniformStorage = uniform_size.num_active_uniforms;
1481 prog->data->NumHiddenUniforms = uniform_size.num_hidden_uniforms;
1482
1483 /* assign hidden uniforms a slot id */
1484 hiddenUniforms->iterate(assign_hidden_uniform_slot_id, &uniform_size);
1485 delete hiddenUniforms;
1486
1487 link_assign_uniform_storage(ctx, prog, uniform_size.num_values);
1488 }