59adc298b913fe3fe0dfceb6f5c956c3c1d7bcbb
[mesa.git] / src / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "main/core.h"
25 #include "ir.h"
26 #include "linker.h"
27 #include "ir_uniform.h"
28 #include "glsl_symbol_table.h"
29 #include "program/hash_table.h"
30 #include "program.h"
31
32 /**
33 * \file link_uniforms.cpp
34 * Assign locations for GLSL uniforms.
35 *
36 * \author Ian Romanick <ian.d.romanick@intel.com>
37 */
38
39 /**
40 * Used by linker to indicate uniforms that have no location set.
41 */
42 #define UNMAPPED_UNIFORM_LOC ~0u
43
44 /**
45 * Count the backing storage requirements for a type
46 */
47 static unsigned
48 values_for_type(const glsl_type *type)
49 {
50 if (type->is_sampler()) {
51 return 1;
52 } else if (type->is_array() && type->fields.array->is_sampler()) {
53 return type->array_size();
54 } else {
55 return type->component_slots();
56 }
57 }
58
59 void
60 program_resource_visitor::process(const glsl_type *type, const char *name)
61 {
62 assert(type->without_array()->is_record()
63 || type->without_array()->is_interface());
64
65 char *name_copy = ralloc_strdup(NULL, name);
66 recursion(type, &name_copy, strlen(name), false, NULL, false);
67 ralloc_free(name_copy);
68 }
69
70 void
71 program_resource_visitor::process(ir_variable *var)
72 {
73 const glsl_type *t = var->type;
74 const bool row_major =
75 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
76
77 /* false is always passed for the row_major parameter to the other
78 * processing functions because no information is available to do
79 * otherwise. See the warning in linker.h.
80 */
81
82 /* Only strdup the name if we actually will need to modify it. */
83 if (var->data.from_named_ifc_block_array) {
84 /* lower_named_interface_blocks created this variable by lowering an
85 * interface block array to an array variable. For example if the
86 * original source code was:
87 *
88 * out Blk { vec4 bar } foo[3];
89 *
90 * Then the variable is now:
91 *
92 * out vec4 bar[3];
93 *
94 * We need to visit each array element using the names constructed like
95 * so:
96 *
97 * Blk[0].bar
98 * Blk[1].bar
99 * Blk[2].bar
100 */
101 assert(t->is_array());
102 const glsl_type *ifc_type = var->get_interface_type();
103 char *name = ralloc_strdup(NULL, ifc_type->name);
104 size_t name_length = strlen(name);
105 for (unsigned i = 0; i < t->length; i++) {
106 size_t new_length = name_length;
107 ralloc_asprintf_rewrite_tail(&name, &new_length, "[%u].%s", i,
108 var->name);
109 /* Note: row_major is only meaningful for uniform blocks, and
110 * lowering is only applied to non-uniform interface blocks, so we
111 * can safely pass false for row_major.
112 */
113 recursion(var->type, &name, new_length, row_major, NULL, false);
114 }
115 ralloc_free(name);
116 } else if (var->data.from_named_ifc_block_nonarray) {
117 /* lower_named_interface_blocks created this variable by lowering a
118 * named interface block (non-array) to an ordinary variable. For
119 * example if the original source code was:
120 *
121 * out Blk { vec4 bar } foo;
122 *
123 * Then the variable is now:
124 *
125 * out vec4 bar;
126 *
127 * We need to visit this variable using the name:
128 *
129 * Blk.bar
130 */
131 const glsl_type *ifc_type = var->get_interface_type();
132 char *name = ralloc_asprintf(NULL, "%s.%s", ifc_type->name, var->name);
133 /* Note: row_major is only meaningful for uniform blocks, and lowering
134 * is only applied to non-uniform interface blocks, so we can safely
135 * pass false for row_major.
136 */
137 recursion(var->type, &name, strlen(name), row_major, NULL, false);
138 ralloc_free(name);
139 } else if (t->without_array()->is_record()) {
140 char *name = ralloc_strdup(NULL, var->name);
141 recursion(var->type, &name, strlen(name), row_major, NULL, false);
142 ralloc_free(name);
143 } else if (t->is_interface()) {
144 char *name = ralloc_strdup(NULL, var->type->name);
145 recursion(var->type, &name, strlen(name), row_major, NULL, false);
146 ralloc_free(name);
147 } else if (t->is_array() && t->fields.array->is_interface()) {
148 char *name = ralloc_strdup(NULL, var->type->fields.array->name);
149 recursion(var->type, &name, strlen(name), row_major, NULL, false);
150 ralloc_free(name);
151 } else {
152 this->visit_field(t, var->name, row_major, NULL, false);
153 }
154 }
155
156 void
157 program_resource_visitor::recursion(const glsl_type *t, char **name,
158 size_t name_length, bool row_major,
159 const glsl_type *record_type,
160 bool last_field)
161 {
162 /* Records need to have each field processed individually.
163 *
164 * Arrays of records need to have each array element processed
165 * individually, then each field of the resulting array elements processed
166 * individually.
167 */
168 if (t->is_record() || t->is_interface()) {
169 if (record_type == NULL && t->is_record())
170 record_type = t;
171
172 if (t->is_record())
173 this->enter_record(t, *name, row_major);
174
175 for (unsigned i = 0; i < t->length; i++) {
176 const char *field = t->fields.structure[i].name;
177 size_t new_length = name_length;
178
179 if (t->fields.structure[i].type->is_record())
180 this->visit_field(&t->fields.structure[i]);
181
182 /* Append '.field' to the current variable name. */
183 if (name_length == 0) {
184 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
185 } else {
186 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
187 }
188
189 /* The layout of structures at the top level of the block is set
190 * during parsing. For matrices contained in multiple levels of
191 * structures in the block, the inner structures have no layout.
192 * These cases must potentially inherit the layout from the outer
193 * levels.
194 */
195 bool field_row_major = row_major;
196 const enum glsl_matrix_layout matrix_layout =
197 glsl_matrix_layout(t->fields.structure[i].matrix_layout);
198 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
199 field_row_major = true;
200 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
201 field_row_major = false;
202 }
203
204 recursion(t->fields.structure[i].type, name, new_length,
205 field_row_major,
206 record_type,
207 (i + 1) == t->length);
208
209 /* Only the first leaf-field of the record gets called with the
210 * record type pointer.
211 */
212 record_type = NULL;
213 }
214
215 if (t->is_record()) {
216 (*name)[name_length] = '\0';
217 this->leave_record(t, *name, row_major);
218 }
219 } else if (t->is_array() && (t->fields.array->is_record()
220 || t->fields.array->is_interface())) {
221 if (record_type == NULL && t->fields.array->is_record())
222 record_type = t->fields.array;
223
224 for (unsigned i = 0; i < t->length; i++) {
225 size_t new_length = name_length;
226
227 /* Append the subscript to the current variable name */
228 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
229
230 recursion(t->fields.array, name, new_length, row_major,
231 record_type,
232 (i + 1) == t->length);
233
234 /* Only the first leaf-field of the record gets called with the
235 * record type pointer.
236 */
237 record_type = NULL;
238 }
239 } else {
240 this->visit_field(t, *name, row_major, record_type, last_field);
241 }
242 }
243
244 void
245 program_resource_visitor::visit_field(const glsl_type *type, const char *name,
246 bool row_major,
247 const glsl_type *,
248 bool /* last_field */)
249 {
250 visit_field(type, name, row_major);
251 }
252
253 void
254 program_resource_visitor::visit_field(const glsl_struct_field *field)
255 {
256 (void) field;
257 /* empty */
258 }
259
260 void
261 program_resource_visitor::enter_record(const glsl_type *, const char *, bool)
262 {
263 }
264
265 void
266 program_resource_visitor::leave_record(const glsl_type *, const char *, bool)
267 {
268 }
269
270 namespace {
271
272 /**
273 * Class to help calculate the storage requirements for a set of uniforms
274 *
275 * As uniforms are added to the active set the number of active uniforms and
276 * the storage requirements for those uniforms are accumulated. The active
277 * uniforms are added the the hash table supplied to the constructor.
278 *
279 * If the same uniform is added multiple times (i.e., once for each shader
280 * target), it will only be accounted once.
281 */
282 class count_uniform_size : public program_resource_visitor {
283 public:
284 count_uniform_size(struct string_to_uint_map *map)
285 : num_active_uniforms(0), num_values(0), num_shader_samplers(0),
286 num_shader_images(0), num_shader_uniform_components(0),
287 is_ubo_var(false), map(map)
288 {
289 /* empty */
290 }
291
292 void start_shader()
293 {
294 this->num_shader_samplers = 0;
295 this->num_shader_images = 0;
296 this->num_shader_uniform_components = 0;
297 }
298
299 void process(ir_variable *var)
300 {
301 this->is_ubo_var = var->is_in_uniform_block();
302 if (var->is_interface_instance())
303 program_resource_visitor::process(var->get_interface_type(),
304 var->get_interface_type()->name);
305 else
306 program_resource_visitor::process(var);
307 }
308
309 /**
310 * Total number of active uniforms counted
311 */
312 unsigned num_active_uniforms;
313
314 /**
315 * Number of data values required to back the storage for the active uniforms
316 */
317 unsigned num_values;
318
319 /**
320 * Number of samplers used
321 */
322 unsigned num_shader_samplers;
323
324 /**
325 * Number of images used
326 */
327 unsigned num_shader_images;
328
329 /**
330 * Number of uniforms used in the current shader
331 */
332 unsigned num_shader_uniform_components;
333
334 bool is_ubo_var;
335
336 private:
337 virtual void visit_field(const glsl_type *type, const char *name,
338 bool row_major)
339 {
340 assert(!type->without_array()->is_record());
341 assert(!type->without_array()->is_interface());
342
343 (void) row_major;
344
345 /* Count the number of samplers regardless of whether the uniform is
346 * already in the hash table. The hash table prevents adding the same
347 * uniform for multiple shader targets, but in this case we want to
348 * count it for each shader target.
349 */
350 const unsigned values = values_for_type(type);
351 if (type->contains_sampler()) {
352 this->num_shader_samplers += values;
353 } else if (type->contains_image()) {
354 this->num_shader_images += values;
355
356 /* As drivers are likely to represent image uniforms as
357 * scalar indices, count them against the limit of uniform
358 * components in the default block. The spec allows image
359 * uniforms to use up no more than one scalar slot.
360 */
361 this->num_shader_uniform_components += values;
362 } else {
363 /* Accumulate the total number of uniform slots used by this shader.
364 * Note that samplers do not count against this limit because they
365 * don't use any storage on current hardware.
366 */
367 if (!is_ubo_var)
368 this->num_shader_uniform_components += values;
369 }
370
371 /* If the uniform is already in the map, there's nothing more to do.
372 */
373 unsigned id;
374 if (this->map->get(id, name))
375 return;
376
377 this->map->put(this->num_active_uniforms, name);
378
379 /* Each leaf uniform occupies one entry in the list of active
380 * uniforms.
381 */
382 this->num_active_uniforms++;
383 this->num_values += values;
384 }
385
386 struct string_to_uint_map *map;
387 };
388
389 } /* anonymous namespace */
390
391 /**
392 * Class to help parcel out pieces of backing storage to uniforms
393 *
394 * Each uniform processed has some range of the \c gl_constant_value
395 * structures associated with it. The association is done by finding
396 * the uniform in the \c string_to_uint_map and using the value from
397 * the map to connect that slot in the \c gl_uniform_storage table
398 * with the next available slot in the \c gl_constant_value array.
399 *
400 * \warning
401 * This class assumes that every uniform that will be processed is
402 * already in the \c string_to_uint_map. In addition, it assumes that
403 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
404 * enough."
405 */
406 class parcel_out_uniform_storage : public program_resource_visitor {
407 public:
408 parcel_out_uniform_storage(struct string_to_uint_map *map,
409 struct gl_uniform_storage *uniforms,
410 union gl_constant_value *values)
411 : map(map), uniforms(uniforms), values(values)
412 {
413 }
414
415 void start_shader(gl_shader_stage shader_type)
416 {
417 assert(shader_type < MESA_SHADER_STAGES);
418 this->shader_type = shader_type;
419
420 this->shader_samplers_used = 0;
421 this->shader_shadow_samplers = 0;
422 this->next_sampler = 0;
423 this->next_image = 0;
424 memset(this->targets, 0, sizeof(this->targets));
425 }
426
427 void set_and_process(struct gl_shader_program *prog,
428 ir_variable *var)
429 {
430 current_var = var;
431 field_counter = 0;
432
433 ubo_block_index = -1;
434 if (var->is_in_uniform_block()) {
435 if (var->is_interface_instance() && var->type->is_array()) {
436 unsigned l = strlen(var->get_interface_type()->name);
437
438 for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
439 if (strncmp(var->get_interface_type()->name,
440 prog->UniformBlocks[i].Name,
441 l) == 0
442 && prog->UniformBlocks[i].Name[l] == '[') {
443 ubo_block_index = i;
444 break;
445 }
446 }
447 } else {
448 for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
449 if (strcmp(var->get_interface_type()->name,
450 prog->UniformBlocks[i].Name) == 0) {
451 ubo_block_index = i;
452 break;
453 }
454 }
455 }
456 assert(ubo_block_index != -1);
457
458 /* Uniform blocks that were specified with an instance name must be
459 * handled a little bit differently. The name of the variable is the
460 * name used to reference the uniform block instead of being the name
461 * of a variable within the block. Therefore, searching for the name
462 * within the block will fail.
463 */
464 if (var->is_interface_instance()) {
465 ubo_byte_offset = 0;
466 } else {
467 const struct gl_uniform_block *const block =
468 &prog->UniformBlocks[ubo_block_index];
469
470 assert(var->data.location != -1);
471
472 const struct gl_uniform_buffer_variable *const ubo_var =
473 &block->Uniforms[var->data.location];
474
475 ubo_byte_offset = ubo_var->Offset;
476 }
477
478 if (var->is_interface_instance())
479 process(var->get_interface_type(),
480 var->get_interface_type()->name);
481 else
482 process(var);
483 } else
484 process(var);
485 }
486
487 int ubo_block_index;
488 int ubo_byte_offset;
489 gl_shader_stage shader_type;
490
491 private:
492 void handle_samplers(const glsl_type *base_type,
493 struct gl_uniform_storage *uniform)
494 {
495 if (base_type->is_sampler()) {
496 uniform->sampler[shader_type].index = this->next_sampler;
497 uniform->sampler[shader_type].active = true;
498
499 /* Increment the sampler by 1 for non-arrays and by the number of
500 * array elements for arrays.
501 */
502 this->next_sampler +=
503 MAX2(1, uniform->array_elements);
504
505 const gl_texture_index target = base_type->sampler_index();
506 const unsigned shadow = base_type->sampler_shadow;
507 for (unsigned i = uniform->sampler[shader_type].index;
508 i < MIN2(this->next_sampler, MAX_SAMPLERS);
509 i++) {
510 this->targets[i] = target;
511 this->shader_samplers_used |= 1U << i;
512 this->shader_shadow_samplers |= shadow << i;
513 }
514 } else {
515 uniform->sampler[shader_type].index = ~0;
516 uniform->sampler[shader_type].active = false;
517 }
518 }
519
520 void handle_images(const glsl_type *base_type,
521 struct gl_uniform_storage *uniform)
522 {
523 if (base_type->is_image()) {
524 uniform->image[shader_type].index = this->next_image;
525 uniform->image[shader_type].active = true;
526
527 /* Increment the image index by 1 for non-arrays and by the
528 * number of array elements for arrays.
529 */
530 this->next_image += MAX2(1, uniform->array_elements);
531
532 } else {
533 uniform->image[shader_type].index = ~0;
534 uniform->image[shader_type].active = false;
535 }
536 }
537
538 virtual void visit_field(const glsl_type *type, const char *name,
539 bool row_major)
540 {
541 (void) type;
542 (void) name;
543 (void) row_major;
544 assert(!"Should not get here.");
545 }
546
547 virtual void enter_record(const glsl_type *type, const char *name,
548 bool row_major) {
549 assert(type->is_record());
550 if (this->ubo_block_index == -1)
551 return;
552 this->ubo_byte_offset = glsl_align(
553 this->ubo_byte_offset, type->std140_base_alignment(row_major));
554 }
555
556 virtual void leave_record(const glsl_type *type, const char *name,
557 bool row_major) {
558 assert(type->is_record());
559 if (this->ubo_block_index == -1)
560 return;
561 this->ubo_byte_offset = glsl_align(
562 this->ubo_byte_offset, type->std140_base_alignment(row_major));
563 }
564
565 virtual void visit_field(const glsl_type *type, const char *name,
566 bool row_major, const glsl_type *record_type,
567 bool last_field)
568 {
569 assert(!type->without_array()->is_record());
570 assert(!type->without_array()->is_interface());
571
572 unsigned id;
573 bool found = this->map->get(id, name);
574 assert(found);
575
576 if (!found)
577 return;
578
579 const glsl_type *base_type;
580 if (type->is_array()) {
581 this->uniforms[id].array_elements = type->length;
582 base_type = type->fields.array;
583 } else {
584 this->uniforms[id].array_elements = 0;
585 base_type = type;
586 }
587
588 /* This assigns uniform indices to sampler and image uniforms. */
589 handle_samplers(base_type, &this->uniforms[id]);
590 handle_images(base_type, &this->uniforms[id]);
591
592 /* If there is already storage associated with this uniform, it means
593 * that it was set while processing an earlier shader stage. For
594 * example, we may be processing the uniform in the fragment shader, but
595 * the uniform was already processed in the vertex shader.
596 */
597 if (this->uniforms[id].storage != NULL) {
598 return;
599 }
600
601 /* Assign explicit locations. */
602 if (current_var->data.explicit_location) {
603 /* Set sequential locations for struct fields. */
604 if (record_type != NULL) {
605 const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
606 this->uniforms[id].remap_location =
607 current_var->data.location + field_counter;
608 field_counter += entries;
609 } else {
610 this->uniforms[id].remap_location = current_var->data.location;
611 }
612 } else {
613 /* Initialize to to indicate that no location is set */
614 this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
615 }
616
617 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
618 this->uniforms[id].type = base_type;
619 this->uniforms[id].initialized = 0;
620 this->uniforms[id].num_driver_storage = 0;
621 this->uniforms[id].driver_storage = NULL;
622 this->uniforms[id].storage = this->values;
623 this->uniforms[id].atomic_buffer_index = -1;
624 this->uniforms[id].hidden =
625 current_var->data.how_declared == ir_var_hidden;
626 if (this->ubo_block_index != -1) {
627 this->uniforms[id].block_index = this->ubo_block_index;
628
629 const unsigned alignment = type->std140_base_alignment(row_major);
630 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
631 this->uniforms[id].offset = this->ubo_byte_offset;
632 this->ubo_byte_offset += type->std140_size(row_major);
633
634 if (type->is_array()) {
635 this->uniforms[id].array_stride =
636 glsl_align(type->fields.array->std140_size(row_major), 16);
637 } else {
638 this->uniforms[id].array_stride = 0;
639 }
640
641 if (type->without_array()->is_matrix()) {
642 const glsl_type *matrix = type->without_array();
643 const unsigned N = matrix->base_type == GLSL_TYPE_DOUBLE ? 8 : 4;
644 const unsigned items = row_major ? matrix->matrix_columns : matrix->vector_elements;
645
646 assert(items <= 4);
647 this->uniforms[id].matrix_stride = glsl_align(items * N, 16);
648 this->uniforms[id].row_major = row_major;
649 } else {
650 this->uniforms[id].matrix_stride = 0;
651 this->uniforms[id].row_major = false;
652 }
653 } else {
654 this->uniforms[id].block_index = -1;
655 this->uniforms[id].offset = -1;
656 this->uniforms[id].array_stride = -1;
657 this->uniforms[id].matrix_stride = -1;
658 this->uniforms[id].row_major = false;
659 }
660
661 this->values += values_for_type(type);
662 }
663
664 struct string_to_uint_map *map;
665
666 struct gl_uniform_storage *uniforms;
667 unsigned next_sampler;
668 unsigned next_image;
669
670 public:
671 union gl_constant_value *values;
672
673 gl_texture_index targets[MAX_SAMPLERS];
674
675 /**
676 * Current variable being processed.
677 */
678 ir_variable *current_var;
679
680 /**
681 * Field counter is used to take care that uniform structures
682 * with explicit locations get sequential locations.
683 */
684 unsigned field_counter;
685
686 /**
687 * Mask of samplers used by the current shader stage.
688 */
689 unsigned shader_samplers_used;
690
691 /**
692 * Mask of samplers used by the current shader stage for shadows.
693 */
694 unsigned shader_shadow_samplers;
695 };
696
697 /**
698 * Merges a uniform block into an array of uniform blocks that may or
699 * may not already contain a copy of it.
700 *
701 * Returns the index of the new block in the array.
702 */
703 int
704 link_cross_validate_uniform_block(void *mem_ctx,
705 struct gl_uniform_block **linked_blocks,
706 unsigned int *num_linked_blocks,
707 struct gl_uniform_block *new_block)
708 {
709 for (unsigned int i = 0; i < *num_linked_blocks; i++) {
710 struct gl_uniform_block *old_block = &(*linked_blocks)[i];
711
712 if (strcmp(old_block->Name, new_block->Name) == 0)
713 return link_uniform_blocks_are_compatible(old_block, new_block)
714 ? i : -1;
715 }
716
717 *linked_blocks = reralloc(mem_ctx, *linked_blocks,
718 struct gl_uniform_block,
719 *num_linked_blocks + 1);
720 int linked_block_index = (*num_linked_blocks)++;
721 struct gl_uniform_block *linked_block = &(*linked_blocks)[linked_block_index];
722
723 memcpy(linked_block, new_block, sizeof(*new_block));
724 linked_block->Uniforms = ralloc_array(*linked_blocks,
725 struct gl_uniform_buffer_variable,
726 linked_block->NumUniforms);
727
728 memcpy(linked_block->Uniforms,
729 new_block->Uniforms,
730 sizeof(*linked_block->Uniforms) * linked_block->NumUniforms);
731
732 for (unsigned int i = 0; i < linked_block->NumUniforms; i++) {
733 struct gl_uniform_buffer_variable *ubo_var =
734 &linked_block->Uniforms[i];
735
736 if (ubo_var->Name == ubo_var->IndexName) {
737 ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
738 ubo_var->IndexName = ubo_var->Name;
739 } else {
740 ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
741 ubo_var->IndexName = ralloc_strdup(*linked_blocks, ubo_var->IndexName);
742 }
743 }
744
745 return linked_block_index;
746 }
747
748 /**
749 * Walks the IR and update the references to uniform blocks in the
750 * ir_variables to point at linked shader's list (previously, they
751 * would point at the uniform block list in one of the pre-linked
752 * shaders).
753 */
754 static void
755 link_update_uniform_buffer_variables(struct gl_shader *shader)
756 {
757 foreach_in_list(ir_instruction, node, shader->ir) {
758 ir_variable *const var = node->as_variable();
759
760 if ((var == NULL) || !var->is_in_uniform_block())
761 continue;
762
763 assert(var->data.mode == ir_var_uniform);
764
765 if (var->is_interface_instance()) {
766 var->data.location = 0;
767 continue;
768 }
769
770 bool found = false;
771 char sentinel = '\0';
772
773 if (var->type->is_record()) {
774 sentinel = '.';
775 } else if (var->type->is_array()
776 && var->type->fields.array->is_record()) {
777 sentinel = '[';
778 }
779
780 const unsigned l = strlen(var->name);
781 for (unsigned i = 0; i < shader->NumUniformBlocks; i++) {
782 for (unsigned j = 0; j < shader->UniformBlocks[i].NumUniforms; j++) {
783 if (sentinel) {
784 const char *begin = shader->UniformBlocks[i].Uniforms[j].Name;
785 const char *end = strchr(begin, sentinel);
786
787 if (end == NULL)
788 continue;
789
790 if ((ptrdiff_t) l != (end - begin))
791 continue;
792
793 if (strncmp(var->name, begin, l) == 0) {
794 found = true;
795 var->data.location = j;
796 break;
797 }
798 } else if (!strcmp(var->name,
799 shader->UniformBlocks[i].Uniforms[j].Name)) {
800 found = true;
801 var->data.location = j;
802 break;
803 }
804 }
805 if (found)
806 break;
807 }
808 assert(found);
809 }
810 }
811
812 /**
813 * Scan the program for image uniforms and store image unit access
814 * information into the gl_shader data structure.
815 */
816 static void
817 link_set_image_access_qualifiers(struct gl_shader_program *prog)
818 {
819 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
820 gl_shader *sh = prog->_LinkedShaders[i];
821
822 if (sh == NULL)
823 continue;
824
825 foreach_in_list(ir_instruction, node, sh->ir) {
826 ir_variable *var = node->as_variable();
827
828 if (var && var->data.mode == ir_var_uniform &&
829 var->type->contains_image()) {
830 unsigned id = 0;
831 bool found = prog->UniformHash->get(id, var->name);
832 assert(found);
833 (void) found;
834 const gl_uniform_storage *storage = &prog->UniformStorage[id];
835 const unsigned index = storage->image[i].index;
836 const GLenum access = (var->data.image_read_only ? GL_READ_ONLY :
837 var->data.image_write_only ? GL_WRITE_ONLY :
838 GL_READ_WRITE);
839
840 for (unsigned j = 0; j < MAX2(1, storage->array_elements); ++j)
841 sh->ImageAccess[index + j] = access;
842 }
843 }
844 }
845 }
846
847 /**
848 * Sort the array of uniform storage so that the non-hidden uniforms are first
849 *
850 * This function sorts the list "in place." This is important because some of
851 * the storage accessible from \c uniforms has \c uniforms as its \c ralloc
852 * context. If \c uniforms is freed, some other storage will also be freed.
853 */
854 static unsigned
855 move_hidden_uniforms_to_end(struct gl_shader_program *prog,
856 struct gl_uniform_storage *uniforms,
857 unsigned num_elements)
858 {
859 struct gl_uniform_storage *sorted_uniforms =
860 ralloc_array(prog, struct gl_uniform_storage, num_elements);
861 unsigned hidden_uniforms = 0;
862 unsigned j = 0;
863
864 /* Add the non-hidden uniforms. */
865 for (unsigned i = 0; i < num_elements; i++) {
866 if (!uniforms[i].hidden)
867 sorted_uniforms[j++] = uniforms[i];
868 }
869
870 /* Add and count the hidden uniforms. */
871 for (unsigned i = 0; i < num_elements; i++) {
872 if (uniforms[i].hidden) {
873 sorted_uniforms[j++] = uniforms[i];
874 hidden_uniforms++;
875 }
876 }
877
878 assert(prog->UniformHash != NULL);
879 prog->UniformHash->clear();
880 for (unsigned i = 0; i < num_elements; i++) {
881 if (sorted_uniforms[i].name != NULL)
882 prog->UniformHash->put(i, sorted_uniforms[i].name);
883 }
884
885 memcpy(uniforms, sorted_uniforms, sizeof(uniforms[0]) * num_elements);
886 ralloc_free(sorted_uniforms);
887
888 return hidden_uniforms;
889 }
890
891 void
892 link_assign_uniform_locations(struct gl_shader_program *prog,
893 unsigned int boolean_true)
894 {
895 ralloc_free(prog->UniformStorage);
896 prog->UniformStorage = NULL;
897 prog->NumUserUniformStorage = 0;
898
899 if (prog->UniformHash != NULL) {
900 prog->UniformHash->clear();
901 } else {
902 prog->UniformHash = new string_to_uint_map;
903 }
904
905 /* First pass: Count the uniform resources used by the user-defined
906 * uniforms. While this happens, each active uniform will have an index
907 * assigned to it.
908 *
909 * Note: this is *NOT* the index that is returned to the application by
910 * glGetUniformLocation.
911 */
912 count_uniform_size uniform_size(prog->UniformHash);
913 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
914 struct gl_shader *sh = prog->_LinkedShaders[i];
915
916 if (sh == NULL)
917 continue;
918
919 /* Uniforms that lack an initializer in the shader code have an initial
920 * value of zero. This includes sampler uniforms.
921 *
922 * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says:
923 *
924 * "The link time initial value is either the value of the variable's
925 * initializer, if present, or 0 if no initializer is present. Sampler
926 * types cannot have initializers."
927 */
928 memset(sh->SamplerUnits, 0, sizeof(sh->SamplerUnits));
929 memset(sh->ImageUnits, 0, sizeof(sh->ImageUnits));
930
931 link_update_uniform_buffer_variables(sh);
932
933 /* Reset various per-shader target counts.
934 */
935 uniform_size.start_shader();
936
937 foreach_in_list(ir_instruction, node, sh->ir) {
938 ir_variable *const var = node->as_variable();
939
940 if ((var == NULL) || (var->data.mode != ir_var_uniform))
941 continue;
942
943 /* FINISHME: Update code to process built-in uniforms!
944 */
945 if (is_gl_identifier(var->name)) {
946 uniform_size.num_shader_uniform_components +=
947 var->type->component_slots();
948 continue;
949 }
950
951 uniform_size.process(var);
952 }
953
954 sh->num_samplers = uniform_size.num_shader_samplers;
955 sh->NumImages = uniform_size.num_shader_images;
956 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
957
958 sh->num_combined_uniform_components = sh->num_uniform_components;
959 for (unsigned i = 0; i < sh->NumUniformBlocks; i++) {
960 sh->num_combined_uniform_components +=
961 sh->UniformBlocks[i].UniformBufferSize / 4;
962 }
963 }
964
965 const unsigned num_user_uniforms = uniform_size.num_active_uniforms;
966 const unsigned num_data_slots = uniform_size.num_values;
967
968 /* On the outside chance that there were no uniforms, bail out.
969 */
970 if (num_user_uniforms == 0)
971 return;
972
973 struct gl_uniform_storage *uniforms =
974 rzalloc_array(prog, struct gl_uniform_storage, num_user_uniforms);
975 union gl_constant_value *data =
976 rzalloc_array(uniforms, union gl_constant_value, num_data_slots);
977 #ifndef NDEBUG
978 union gl_constant_value *data_end = &data[num_data_slots];
979 #endif
980
981 parcel_out_uniform_storage parcel(prog->UniformHash, uniforms, data);
982
983 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
984 if (prog->_LinkedShaders[i] == NULL)
985 continue;
986
987 parcel.start_shader((gl_shader_stage)i);
988
989 foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
990 ir_variable *const var = node->as_variable();
991
992 if ((var == NULL) || (var->data.mode != ir_var_uniform))
993 continue;
994
995 /* FINISHME: Update code to process built-in uniforms!
996 */
997 if (is_gl_identifier(var->name))
998 continue;
999
1000 parcel.set_and_process(prog, var);
1001 }
1002
1003 prog->_LinkedShaders[i]->active_samplers = parcel.shader_samplers_used;
1004 prog->_LinkedShaders[i]->shadow_samplers = parcel.shader_shadow_samplers;
1005
1006 STATIC_ASSERT(sizeof(prog->_LinkedShaders[i]->SamplerTargets) == sizeof(parcel.targets));
1007 memcpy(prog->_LinkedShaders[i]->SamplerTargets, parcel.targets,
1008 sizeof(prog->_LinkedShaders[i]->SamplerTargets));
1009 }
1010
1011 const unsigned hidden_uniforms =
1012 move_hidden_uniforms_to_end(prog, uniforms, num_user_uniforms);
1013
1014 /* Reserve all the explicit locations of the active uniforms. */
1015 for (unsigned i = 0; i < num_user_uniforms; i++) {
1016 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC) {
1017 /* How many new entries for this uniform? */
1018 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1019
1020 /* Set remap table entries point to correct gl_uniform_storage. */
1021 for (unsigned j = 0; j < entries; j++) {
1022 unsigned element_loc = uniforms[i].remap_location + j;
1023 assert(prog->UniformRemapTable[element_loc] ==
1024 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1025 prog->UniformRemapTable[element_loc] = &uniforms[i];
1026 }
1027 }
1028 }
1029
1030 /* Reserve locations for rest of the uniforms. */
1031 for (unsigned i = 0; i < num_user_uniforms; i++) {
1032
1033 /* Explicit ones have been set already. */
1034 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC)
1035 continue;
1036
1037 /* how many new entries for this uniform? */
1038 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1039
1040 /* resize remap table to fit new entries */
1041 prog->UniformRemapTable =
1042 reralloc(prog,
1043 prog->UniformRemapTable,
1044 gl_uniform_storage *,
1045 prog->NumUniformRemapTable + entries);
1046
1047 /* set pointers for this uniform */
1048 for (unsigned j = 0; j < entries; j++)
1049 prog->UniformRemapTable[prog->NumUniformRemapTable+j] = &uniforms[i];
1050
1051 /* set the base location in remap table for the uniform */
1052 uniforms[i].remap_location = prog->NumUniformRemapTable;
1053
1054 prog->NumUniformRemapTable += entries;
1055 }
1056
1057 #ifndef NDEBUG
1058 for (unsigned i = 0; i < num_user_uniforms; i++) {
1059 assert(uniforms[i].storage != NULL);
1060 }
1061
1062 assert(parcel.values == data_end);
1063 #endif
1064
1065 prog->NumUserUniformStorage = num_user_uniforms;
1066 prog->NumHiddenUniforms = hidden_uniforms;
1067 prog->UniformStorage = uniforms;
1068
1069 link_set_image_access_qualifiers(prog);
1070 link_set_uniform_initializers(prog, boolean_true);
1071
1072 return;
1073 }