glsl: Use typed foreach_in_list instead of foreach_list.
[mesa.git] / src / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "main/core.h"
25 #include "ir.h"
26 #include "linker.h"
27 #include "ir_uniform.h"
28 #include "glsl_symbol_table.h"
29 #include "program/hash_table.h"
30 #include "program.h"
31
32 /**
33 * \file link_uniforms.cpp
34 * Assign locations for GLSL uniforms.
35 *
36 * \author Ian Romanick <ian.d.romanick@intel.com>
37 */
38
39 /**
40 * Used by linker to indicate uniforms that have no location set.
41 */
42 #define UNMAPPED_UNIFORM_LOC ~0u
43
44 /**
45 * Count the backing storage requirements for a type
46 */
47 static unsigned
48 values_for_type(const glsl_type *type)
49 {
50 if (type->is_sampler()) {
51 return 1;
52 } else if (type->is_array() && type->fields.array->is_sampler()) {
53 return type->array_size();
54 } else {
55 return type->component_slots();
56 }
57 }
58
59 void
60 program_resource_visitor::process(const glsl_type *type, const char *name)
61 {
62 assert(type->is_record()
63 || (type->is_array() && type->fields.array->is_record())
64 || type->is_interface()
65 || (type->is_array() && type->fields.array->is_interface()));
66
67 char *name_copy = ralloc_strdup(NULL, name);
68 recursion(type, &name_copy, strlen(name), false, NULL);
69 ralloc_free(name_copy);
70 }
71
72 void
73 program_resource_visitor::process(ir_variable *var)
74 {
75 const glsl_type *t = var->type;
76
77 /* false is always passed for the row_major parameter to the other
78 * processing functions because no information is available to do
79 * otherwise. See the warning in linker.h.
80 */
81
82 /* Only strdup the name if we actually will need to modify it. */
83 if (var->data.from_named_ifc_block_array) {
84 /* lower_named_interface_blocks created this variable by lowering an
85 * interface block array to an array variable. For example if the
86 * original source code was:
87 *
88 * out Blk { vec4 bar } foo[3];
89 *
90 * Then the variable is now:
91 *
92 * out vec4 bar[3];
93 *
94 * We need to visit each array element using the names constructed like
95 * so:
96 *
97 * Blk[0].bar
98 * Blk[1].bar
99 * Blk[2].bar
100 */
101 assert(t->is_array());
102 const glsl_type *ifc_type = var->get_interface_type();
103 char *name = ralloc_strdup(NULL, ifc_type->name);
104 size_t name_length = strlen(name);
105 for (unsigned i = 0; i < t->length; i++) {
106 size_t new_length = name_length;
107 ralloc_asprintf_rewrite_tail(&name, &new_length, "[%u].%s", i,
108 var->name);
109 /* Note: row_major is only meaningful for uniform blocks, and
110 * lowering is only applied to non-uniform interface blocks, so we
111 * can safely pass false for row_major.
112 */
113 recursion(var->type, &name, new_length, false, NULL);
114 }
115 ralloc_free(name);
116 } else if (var->data.from_named_ifc_block_nonarray) {
117 /* lower_named_interface_blocks created this variable by lowering a
118 * named interface block (non-array) to an ordinary variable. For
119 * example if the original source code was:
120 *
121 * out Blk { vec4 bar } foo;
122 *
123 * Then the variable is now:
124 *
125 * out vec4 bar;
126 *
127 * We need to visit this variable using the name:
128 *
129 * Blk.bar
130 */
131 const glsl_type *ifc_type = var->get_interface_type();
132 char *name = ralloc_asprintf(NULL, "%s.%s", ifc_type->name, var->name);
133 /* Note: row_major is only meaningful for uniform blocks, and lowering
134 * is only applied to non-uniform interface blocks, so we can safely
135 * pass false for row_major.
136 */
137 recursion(var->type, &name, strlen(name), false, NULL);
138 ralloc_free(name);
139 } else if (t->is_record() || (t->is_array() && t->fields.array->is_record())) {
140 char *name = ralloc_strdup(NULL, var->name);
141 recursion(var->type, &name, strlen(name), false, NULL);
142 ralloc_free(name);
143 } else if (t->is_interface()) {
144 char *name = ralloc_strdup(NULL, var->type->name);
145 recursion(var->type, &name, strlen(name), false, NULL);
146 ralloc_free(name);
147 } else if (t->is_array() && t->fields.array->is_interface()) {
148 char *name = ralloc_strdup(NULL, var->type->fields.array->name);
149 recursion(var->type, &name, strlen(name), false, NULL);
150 ralloc_free(name);
151 } else {
152 this->visit_field(t, var->name, false, NULL);
153 }
154 }
155
156 void
157 program_resource_visitor::recursion(const glsl_type *t, char **name,
158 size_t name_length, bool row_major,
159 const glsl_type *record_type)
160 {
161 /* Records need to have each field processed individually.
162 *
163 * Arrays of records need to have each array element processed
164 * individually, then each field of the resulting array elements processed
165 * individually.
166 */
167 if (t->is_record() || t->is_interface()) {
168 if (record_type == NULL && t->is_record())
169 record_type = t;
170
171 for (unsigned i = 0; i < t->length; i++) {
172 const char *field = t->fields.structure[i].name;
173 size_t new_length = name_length;
174
175 if (t->fields.structure[i].type->is_record())
176 this->visit_field(&t->fields.structure[i]);
177
178 /* Append '.field' to the current variable name. */
179 if (name_length == 0) {
180 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
181 } else {
182 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
183 }
184
185 recursion(t->fields.structure[i].type, name, new_length,
186 t->fields.structure[i].row_major, record_type);
187
188 /* Only the first leaf-field of the record gets called with the
189 * record type pointer.
190 */
191 record_type = NULL;
192 }
193 } else if (t->is_array() && (t->fields.array->is_record()
194 || t->fields.array->is_interface())) {
195 if (record_type == NULL && t->fields.array->is_record())
196 record_type = t->fields.array;
197
198 for (unsigned i = 0; i < t->length; i++) {
199 size_t new_length = name_length;
200
201 /* Append the subscript to the current variable name */
202 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
203
204 recursion(t->fields.array, name, new_length, row_major,
205 record_type);
206
207 /* Only the first leaf-field of the record gets called with the
208 * record type pointer.
209 */
210 record_type = NULL;
211 }
212 } else {
213 this->visit_field(t, *name, row_major, record_type);
214 }
215 }
216
217 void
218 program_resource_visitor::visit_field(const glsl_type *type, const char *name,
219 bool row_major,
220 const glsl_type *)
221 {
222 visit_field(type, name, row_major);
223 }
224
225 void
226 program_resource_visitor::visit_field(const glsl_struct_field *field)
227 {
228 (void) field;
229 /* empty */
230 }
231
232 namespace {
233
234 /**
235 * Class to help calculate the storage requirements for a set of uniforms
236 *
237 * As uniforms are added to the active set the number of active uniforms and
238 * the storage requirements for those uniforms are accumulated. The active
239 * uniforms are added the the hash table supplied to the constructor.
240 *
241 * If the same uniform is added multiple times (i.e., once for each shader
242 * target), it will only be accounted once.
243 */
244 class count_uniform_size : public program_resource_visitor {
245 public:
246 count_uniform_size(struct string_to_uint_map *map)
247 : num_active_uniforms(0), num_values(0), num_shader_samplers(0),
248 num_shader_images(0), num_shader_uniform_components(0),
249 is_ubo_var(false), map(map)
250 {
251 /* empty */
252 }
253
254 void start_shader()
255 {
256 this->num_shader_samplers = 0;
257 this->num_shader_images = 0;
258 this->num_shader_uniform_components = 0;
259 }
260
261 void process(ir_variable *var)
262 {
263 this->is_ubo_var = var->is_in_uniform_block();
264 if (var->is_interface_instance())
265 program_resource_visitor::process(var->get_interface_type(),
266 var->get_interface_type()->name);
267 else
268 program_resource_visitor::process(var);
269 }
270
271 /**
272 * Total number of active uniforms counted
273 */
274 unsigned num_active_uniforms;
275
276 /**
277 * Number of data values required to back the storage for the active uniforms
278 */
279 unsigned num_values;
280
281 /**
282 * Number of samplers used
283 */
284 unsigned num_shader_samplers;
285
286 /**
287 * Number of images used
288 */
289 unsigned num_shader_images;
290
291 /**
292 * Number of uniforms used in the current shader
293 */
294 unsigned num_shader_uniform_components;
295
296 bool is_ubo_var;
297
298 private:
299 virtual void visit_field(const glsl_type *type, const char *name,
300 bool row_major)
301 {
302 assert(!type->is_record());
303 assert(!(type->is_array() && type->fields.array->is_record()));
304 assert(!type->is_interface());
305 assert(!(type->is_array() && type->fields.array->is_interface()));
306
307 (void) row_major;
308
309 /* Count the number of samplers regardless of whether the uniform is
310 * already in the hash table. The hash table prevents adding the same
311 * uniform for multiple shader targets, but in this case we want to
312 * count it for each shader target.
313 */
314 const unsigned values = values_for_type(type);
315 if (type->contains_sampler()) {
316 this->num_shader_samplers += values;
317 } else if (type->contains_image()) {
318 this->num_shader_images += values;
319
320 /* As drivers are likely to represent image uniforms as
321 * scalar indices, count them against the limit of uniform
322 * components in the default block. The spec allows image
323 * uniforms to use up no more than one scalar slot.
324 */
325 this->num_shader_uniform_components += values;
326 } else {
327 /* Accumulate the total number of uniform slots used by this shader.
328 * Note that samplers do not count against this limit because they
329 * don't use any storage on current hardware.
330 */
331 if (!is_ubo_var)
332 this->num_shader_uniform_components += values;
333 }
334
335 /* If the uniform is already in the map, there's nothing more to do.
336 */
337 unsigned id;
338 if (this->map->get(id, name))
339 return;
340
341 this->map->put(this->num_active_uniforms, name);
342
343 /* Each leaf uniform occupies one entry in the list of active
344 * uniforms.
345 */
346 this->num_active_uniforms++;
347 this->num_values += values;
348 }
349
350 struct string_to_uint_map *map;
351 };
352
353 } /* anonymous namespace */
354
355 /**
356 * Class to help parcel out pieces of backing storage to uniforms
357 *
358 * Each uniform processed has some range of the \c gl_constant_value
359 * structures associated with it. The association is done by finding
360 * the uniform in the \c string_to_uint_map and using the value from
361 * the map to connect that slot in the \c gl_uniform_storage table
362 * with the next available slot in the \c gl_constant_value array.
363 *
364 * \warning
365 * This class assumes that every uniform that will be processed is
366 * already in the \c string_to_uint_map. In addition, it assumes that
367 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
368 * enough."
369 */
370 class parcel_out_uniform_storage : public program_resource_visitor {
371 public:
372 parcel_out_uniform_storage(struct string_to_uint_map *map,
373 struct gl_uniform_storage *uniforms,
374 union gl_constant_value *values)
375 : map(map), uniforms(uniforms), values(values)
376 {
377 }
378
379 void start_shader(gl_shader_stage shader_type)
380 {
381 assert(shader_type < MESA_SHADER_STAGES);
382 this->shader_type = shader_type;
383
384 this->shader_samplers_used = 0;
385 this->shader_shadow_samplers = 0;
386 this->next_sampler = 0;
387 this->next_image = 0;
388 memset(this->targets, 0, sizeof(this->targets));
389 }
390
391 void set_and_process(struct gl_shader_program *prog,
392 ir_variable *var)
393 {
394 current_var = var;
395 field_counter = 0;
396
397 ubo_block_index = -1;
398 if (var->is_in_uniform_block()) {
399 if (var->is_interface_instance() && var->type->is_array()) {
400 unsigned l = strlen(var->get_interface_type()->name);
401
402 for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
403 if (strncmp(var->get_interface_type()->name,
404 prog->UniformBlocks[i].Name,
405 l) == 0
406 && prog->UniformBlocks[i].Name[l] == '[') {
407 ubo_block_index = i;
408 break;
409 }
410 }
411 } else {
412 for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
413 if (strcmp(var->get_interface_type()->name,
414 prog->UniformBlocks[i].Name) == 0) {
415 ubo_block_index = i;
416 break;
417 }
418 }
419 }
420 assert(ubo_block_index != -1);
421
422 /* Uniform blocks that were specified with an instance name must be
423 * handled a little bit differently. The name of the variable is the
424 * name used to reference the uniform block instead of being the name
425 * of a variable within the block. Therefore, searching for the name
426 * within the block will fail.
427 */
428 if (var->is_interface_instance()) {
429 ubo_byte_offset = 0;
430 ubo_row_major = false;
431 } else {
432 const struct gl_uniform_block *const block =
433 &prog->UniformBlocks[ubo_block_index];
434
435 assert(var->data.location != -1);
436
437 const struct gl_uniform_buffer_variable *const ubo_var =
438 &block->Uniforms[var->data.location];
439
440 ubo_row_major = ubo_var->RowMajor;
441 ubo_byte_offset = ubo_var->Offset;
442 }
443
444 if (var->is_interface_instance())
445 process(var->get_interface_type(),
446 var->get_interface_type()->name);
447 else
448 process(var);
449 } else
450 process(var);
451 }
452
453 int ubo_block_index;
454 int ubo_byte_offset;
455 bool ubo_row_major;
456 gl_shader_stage shader_type;
457
458 private:
459 void handle_samplers(const glsl_type *base_type,
460 struct gl_uniform_storage *uniform)
461 {
462 if (base_type->is_sampler()) {
463 uniform->sampler[shader_type].index = this->next_sampler;
464 uniform->sampler[shader_type].active = true;
465
466 /* Increment the sampler by 1 for non-arrays and by the number of
467 * array elements for arrays.
468 */
469 this->next_sampler +=
470 MAX2(1, uniform->array_elements);
471
472 const gl_texture_index target = base_type->sampler_index();
473 const unsigned shadow = base_type->sampler_shadow;
474 for (unsigned i = uniform->sampler[shader_type].index;
475 i < MIN2(this->next_sampler, MAX_SAMPLERS);
476 i++) {
477 this->targets[i] = target;
478 this->shader_samplers_used |= 1U << i;
479 this->shader_shadow_samplers |= shadow << i;
480 }
481 } else {
482 uniform->sampler[shader_type].index = ~0;
483 uniform->sampler[shader_type].active = false;
484 }
485 }
486
487 void handle_images(const glsl_type *base_type,
488 struct gl_uniform_storage *uniform)
489 {
490 if (base_type->is_image()) {
491 uniform->image[shader_type].index = this->next_image;
492 uniform->image[shader_type].active = true;
493
494 /* Increment the image index by 1 for non-arrays and by the
495 * number of array elements for arrays.
496 */
497 this->next_image += MAX2(1, uniform->array_elements);
498
499 } else {
500 uniform->image[shader_type].index = ~0;
501 uniform->image[shader_type].active = false;
502 }
503 }
504
505 virtual void visit_field(const glsl_type *type, const char *name,
506 bool row_major)
507 {
508 (void) type;
509 (void) name;
510 (void) row_major;
511 assert(!"Should not get here.");
512 }
513
514 virtual void visit_field(const glsl_type *type, const char *name,
515 bool row_major, const glsl_type *record_type)
516 {
517 assert(!type->is_record());
518 assert(!(type->is_array() && type->fields.array->is_record()));
519 assert(!type->is_interface());
520 assert(!(type->is_array() && type->fields.array->is_interface()));
521
522 (void) row_major;
523
524 unsigned id;
525 bool found = this->map->get(id, name);
526 assert(found);
527
528 if (!found)
529 return;
530
531 const glsl_type *base_type;
532 if (type->is_array()) {
533 this->uniforms[id].array_elements = type->length;
534 base_type = type->fields.array;
535 } else {
536 this->uniforms[id].array_elements = 0;
537 base_type = type;
538 }
539
540 /* This assigns uniform indices to sampler and image uniforms. */
541 handle_samplers(base_type, &this->uniforms[id]);
542 handle_images(base_type, &this->uniforms[id]);
543
544 /* If there is already storage associated with this uniform, it means
545 * that it was set while processing an earlier shader stage. For
546 * example, we may be processing the uniform in the fragment shader, but
547 * the uniform was already processed in the vertex shader.
548 */
549 if (this->uniforms[id].storage != NULL) {
550 return;
551 }
552
553 /* Assign explicit locations. */
554 if (current_var->data.explicit_location) {
555 /* Set sequential locations for struct fields. */
556 if (record_type != NULL) {
557 const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
558 this->uniforms[id].remap_location =
559 current_var->data.location + field_counter;
560 field_counter += entries;
561 } else {
562 this->uniforms[id].remap_location = current_var->data.location;
563 }
564 } else {
565 /* Initialize to to indicate that no location is set */
566 this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
567 }
568
569 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
570 this->uniforms[id].type = base_type;
571 this->uniforms[id].initialized = 0;
572 this->uniforms[id].num_driver_storage = 0;
573 this->uniforms[id].driver_storage = NULL;
574 this->uniforms[id].storage = this->values;
575 this->uniforms[id].atomic_buffer_index = -1;
576 if (this->ubo_block_index != -1) {
577 this->uniforms[id].block_index = this->ubo_block_index;
578
579 const unsigned alignment = record_type
580 ? record_type->std140_base_alignment(ubo_row_major)
581 : type->std140_base_alignment(ubo_row_major);
582 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
583 this->uniforms[id].offset = this->ubo_byte_offset;
584 this->ubo_byte_offset += type->std140_size(ubo_row_major);
585
586 if (type->is_array()) {
587 this->uniforms[id].array_stride =
588 glsl_align(type->fields.array->std140_size(ubo_row_major), 16);
589 } else {
590 this->uniforms[id].array_stride = 0;
591 }
592
593 if (type->is_matrix() ||
594 (type->is_array() && type->fields.array->is_matrix())) {
595 this->uniforms[id].matrix_stride = 16;
596 this->uniforms[id].row_major = ubo_row_major;
597 } else {
598 this->uniforms[id].matrix_stride = 0;
599 this->uniforms[id].row_major = false;
600 }
601 } else {
602 this->uniforms[id].block_index = -1;
603 this->uniforms[id].offset = -1;
604 this->uniforms[id].array_stride = -1;
605 this->uniforms[id].matrix_stride = -1;
606 this->uniforms[id].row_major = false;
607 }
608
609 this->values += values_for_type(type);
610 }
611
612 struct string_to_uint_map *map;
613
614 struct gl_uniform_storage *uniforms;
615 unsigned next_sampler;
616 unsigned next_image;
617
618 public:
619 union gl_constant_value *values;
620
621 gl_texture_index targets[MAX_SAMPLERS];
622
623 /**
624 * Current variable being processed.
625 */
626 ir_variable *current_var;
627
628 /**
629 * Field counter is used to take care that uniform structures
630 * with explicit locations get sequential locations.
631 */
632 unsigned field_counter;
633
634 /**
635 * Mask of samplers used by the current shader stage.
636 */
637 unsigned shader_samplers_used;
638
639 /**
640 * Mask of samplers used by the current shader stage for shadows.
641 */
642 unsigned shader_shadow_samplers;
643 };
644
645 /**
646 * Merges a uniform block into an array of uniform blocks that may or
647 * may not already contain a copy of it.
648 *
649 * Returns the index of the new block in the array.
650 */
651 int
652 link_cross_validate_uniform_block(void *mem_ctx,
653 struct gl_uniform_block **linked_blocks,
654 unsigned int *num_linked_blocks,
655 struct gl_uniform_block *new_block)
656 {
657 for (unsigned int i = 0; i < *num_linked_blocks; i++) {
658 struct gl_uniform_block *old_block = &(*linked_blocks)[i];
659
660 if (strcmp(old_block->Name, new_block->Name) == 0)
661 return link_uniform_blocks_are_compatible(old_block, new_block)
662 ? i : -1;
663 }
664
665 *linked_blocks = reralloc(mem_ctx, *linked_blocks,
666 struct gl_uniform_block,
667 *num_linked_blocks + 1);
668 int linked_block_index = (*num_linked_blocks)++;
669 struct gl_uniform_block *linked_block = &(*linked_blocks)[linked_block_index];
670
671 memcpy(linked_block, new_block, sizeof(*new_block));
672 linked_block->Uniforms = ralloc_array(*linked_blocks,
673 struct gl_uniform_buffer_variable,
674 linked_block->NumUniforms);
675
676 memcpy(linked_block->Uniforms,
677 new_block->Uniforms,
678 sizeof(*linked_block->Uniforms) * linked_block->NumUniforms);
679
680 for (unsigned int i = 0; i < linked_block->NumUniforms; i++) {
681 struct gl_uniform_buffer_variable *ubo_var =
682 &linked_block->Uniforms[i];
683
684 if (ubo_var->Name == ubo_var->IndexName) {
685 ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
686 ubo_var->IndexName = ubo_var->Name;
687 } else {
688 ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
689 ubo_var->IndexName = ralloc_strdup(*linked_blocks, ubo_var->IndexName);
690 }
691 }
692
693 return linked_block_index;
694 }
695
696 /**
697 * Walks the IR and update the references to uniform blocks in the
698 * ir_variables to point at linked shader's list (previously, they
699 * would point at the uniform block list in one of the pre-linked
700 * shaders).
701 */
702 static void
703 link_update_uniform_buffer_variables(struct gl_shader *shader)
704 {
705 foreach_in_list(ir_instruction, node, shader->ir) {
706 ir_variable *const var = node->as_variable();
707
708 if ((var == NULL) || !var->is_in_uniform_block())
709 continue;
710
711 assert(var->data.mode == ir_var_uniform);
712
713 if (var->is_interface_instance()) {
714 var->data.location = 0;
715 continue;
716 }
717
718 bool found = false;
719 char sentinel = '\0';
720
721 if (var->type->is_record()) {
722 sentinel = '.';
723 } else if (var->type->is_array()
724 && var->type->fields.array->is_record()) {
725 sentinel = '[';
726 }
727
728 const unsigned l = strlen(var->name);
729 for (unsigned i = 0; i < shader->NumUniformBlocks; i++) {
730 for (unsigned j = 0; j < shader->UniformBlocks[i].NumUniforms; j++) {
731 if (sentinel) {
732 const char *begin = shader->UniformBlocks[i].Uniforms[j].Name;
733 const char *end = strchr(begin, sentinel);
734
735 if (end == NULL)
736 continue;
737
738 if (l != (end - begin))
739 continue;
740
741 if (strncmp(var->name, begin, l) == 0) {
742 found = true;
743 var->data.location = j;
744 break;
745 }
746 } else if (!strcmp(var->name,
747 shader->UniformBlocks[i].Uniforms[j].Name)) {
748 found = true;
749 var->data.location = j;
750 break;
751 }
752 }
753 if (found)
754 break;
755 }
756 assert(found);
757 }
758 }
759
760 void
761 link_assign_uniform_block_offsets(struct gl_shader *shader)
762 {
763 for (unsigned b = 0; b < shader->NumUniformBlocks; b++) {
764 struct gl_uniform_block *block = &shader->UniformBlocks[b];
765
766 unsigned offset = 0;
767 for (unsigned int i = 0; i < block->NumUniforms; i++) {
768 struct gl_uniform_buffer_variable *ubo_var = &block->Uniforms[i];
769 const struct glsl_type *type = ubo_var->Type;
770
771 unsigned alignment = type->std140_base_alignment(ubo_var->RowMajor);
772 unsigned size = type->std140_size(ubo_var->RowMajor);
773
774 offset = glsl_align(offset, alignment);
775 ubo_var->Offset = offset;
776 offset += size;
777 }
778
779 /* From the GL_ARB_uniform_buffer_object spec:
780 *
781 * "For uniform blocks laid out according to [std140] rules,
782 * the minimum buffer object size returned by the
783 * UNIFORM_BLOCK_DATA_SIZE query is derived by taking the
784 * offset of the last basic machine unit consumed by the
785 * last uniform of the uniform block (including any
786 * end-of-array or end-of-structure padding), adding one,
787 * and rounding up to the next multiple of the base
788 * alignment required for a vec4."
789 */
790 block->UniformBufferSize = glsl_align(offset, 16);
791 }
792 }
793
794 /**
795 * Scan the program for image uniforms and store image unit access
796 * information into the gl_shader data structure.
797 */
798 static void
799 link_set_image_access_qualifiers(struct gl_shader_program *prog)
800 {
801 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
802 gl_shader *sh = prog->_LinkedShaders[i];
803
804 if (sh == NULL)
805 continue;
806
807 foreach_in_list(ir_instruction, node, sh->ir) {
808 ir_variable *var = node->as_variable();
809
810 if (var && var->data.mode == ir_var_uniform &&
811 var->type->contains_image()) {
812 unsigned id = 0;
813 bool found = prog->UniformHash->get(id, var->name);
814 assert(found);
815 (void) found;
816 const gl_uniform_storage *storage = &prog->UniformStorage[id];
817 const unsigned index = storage->image[i].index;
818 const GLenum access = (var->data.image.read_only ? GL_READ_ONLY :
819 var->data.image.write_only ? GL_WRITE_ONLY :
820 GL_READ_WRITE);
821
822 for (unsigned j = 0; j < MAX2(1, storage->array_elements); ++j)
823 sh->ImageAccess[index + j] = access;
824 }
825 }
826 }
827 }
828
829 void
830 link_assign_uniform_locations(struct gl_shader_program *prog)
831 {
832 ralloc_free(prog->UniformStorage);
833 prog->UniformStorage = NULL;
834 prog->NumUserUniformStorage = 0;
835
836 if (prog->UniformHash != NULL) {
837 prog->UniformHash->clear();
838 } else {
839 prog->UniformHash = new string_to_uint_map;
840 }
841
842 /* First pass: Count the uniform resources used by the user-defined
843 * uniforms. While this happens, each active uniform will have an index
844 * assigned to it.
845 *
846 * Note: this is *NOT* the index that is returned to the application by
847 * glGetUniformLocation.
848 */
849 count_uniform_size uniform_size(prog->UniformHash);
850 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
851 struct gl_shader *sh = prog->_LinkedShaders[i];
852
853 if (sh == NULL)
854 continue;
855
856 /* Uniforms that lack an initializer in the shader code have an initial
857 * value of zero. This includes sampler uniforms.
858 *
859 * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says:
860 *
861 * "The link time initial value is either the value of the variable's
862 * initializer, if present, or 0 if no initializer is present. Sampler
863 * types cannot have initializers."
864 */
865 memset(sh->SamplerUnits, 0, sizeof(sh->SamplerUnits));
866 memset(sh->ImageUnits, 0, sizeof(sh->ImageUnits));
867
868 link_update_uniform_buffer_variables(sh);
869
870 /* Reset various per-shader target counts.
871 */
872 uniform_size.start_shader();
873
874 foreach_in_list(ir_instruction, node, sh->ir) {
875 ir_variable *const var = node->as_variable();
876
877 if ((var == NULL) || (var->data.mode != ir_var_uniform))
878 continue;
879
880 /* FINISHME: Update code to process built-in uniforms!
881 */
882 if (is_gl_identifier(var->name)) {
883 uniform_size.num_shader_uniform_components +=
884 var->type->component_slots();
885 continue;
886 }
887
888 uniform_size.process(var);
889 }
890
891 sh->num_samplers = uniform_size.num_shader_samplers;
892 sh->NumImages = uniform_size.num_shader_images;
893 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
894
895 sh->num_combined_uniform_components = sh->num_uniform_components;
896 for (unsigned i = 0; i < sh->NumUniformBlocks; i++) {
897 sh->num_combined_uniform_components +=
898 sh->UniformBlocks[i].UniformBufferSize / 4;
899 }
900 }
901
902 const unsigned num_user_uniforms = uniform_size.num_active_uniforms;
903 const unsigned num_data_slots = uniform_size.num_values;
904
905 /* On the outside chance that there were no uniforms, bail out.
906 */
907 if (num_user_uniforms == 0)
908 return;
909
910 struct gl_uniform_storage *uniforms =
911 rzalloc_array(prog, struct gl_uniform_storage, num_user_uniforms);
912 union gl_constant_value *data =
913 rzalloc_array(uniforms, union gl_constant_value, num_data_slots);
914 #ifndef NDEBUG
915 union gl_constant_value *data_end = &data[num_data_slots];
916 #endif
917
918 parcel_out_uniform_storage parcel(prog->UniformHash, uniforms, data);
919
920 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
921 if (prog->_LinkedShaders[i] == NULL)
922 continue;
923
924 parcel.start_shader((gl_shader_stage)i);
925
926 foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
927 ir_variable *const var = node->as_variable();
928
929 if ((var == NULL) || (var->data.mode != ir_var_uniform))
930 continue;
931
932 /* FINISHME: Update code to process built-in uniforms!
933 */
934 if (is_gl_identifier(var->name))
935 continue;
936
937 parcel.set_and_process(prog, var);
938 }
939
940 prog->_LinkedShaders[i]->active_samplers = parcel.shader_samplers_used;
941 prog->_LinkedShaders[i]->shadow_samplers = parcel.shader_shadow_samplers;
942
943 STATIC_ASSERT(sizeof(prog->_LinkedShaders[i]->SamplerTargets) == sizeof(parcel.targets));
944 memcpy(prog->_LinkedShaders[i]->SamplerTargets, parcel.targets,
945 sizeof(prog->_LinkedShaders[i]->SamplerTargets));
946 }
947
948 /* Reserve all the explicit locations of the active uniforms. */
949 for (unsigned i = 0; i < num_user_uniforms; i++) {
950 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC) {
951 /* How many new entries for this uniform? */
952 const unsigned entries = MAX2(1, uniforms[i].array_elements);
953
954 /* Set remap table entries point to correct gl_uniform_storage. */
955 for (unsigned j = 0; j < entries; j++) {
956 unsigned element_loc = uniforms[i].remap_location + j;
957 assert(prog->UniformRemapTable[element_loc] ==
958 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
959 prog->UniformRemapTable[element_loc] = &uniforms[i];
960 }
961 }
962 }
963
964 /* Reserve locations for rest of the uniforms. */
965 for (unsigned i = 0; i < num_user_uniforms; i++) {
966
967 /* Explicit ones have been set already. */
968 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC)
969 continue;
970
971 /* how many new entries for this uniform? */
972 const unsigned entries = MAX2(1, uniforms[i].array_elements);
973
974 /* resize remap table to fit new entries */
975 prog->UniformRemapTable =
976 reralloc(prog,
977 prog->UniformRemapTable,
978 gl_uniform_storage *,
979 prog->NumUniformRemapTable + entries);
980
981 /* set pointers for this uniform */
982 for (unsigned j = 0; j < entries; j++)
983 prog->UniformRemapTable[prog->NumUniformRemapTable+j] = &uniforms[i];
984
985 /* set the base location in remap table for the uniform */
986 uniforms[i].remap_location = prog->NumUniformRemapTable;
987
988 prog->NumUniformRemapTable += entries;
989 }
990
991 #ifndef NDEBUG
992 for (unsigned i = 0; i < num_user_uniforms; i++) {
993 assert(uniforms[i].storage != NULL);
994 }
995
996 assert(parcel.values == data_end);
997 #endif
998
999 prog->NumUserUniformStorage = num_user_uniforms;
1000 prog->UniformStorage = uniforms;
1001
1002 link_set_image_access_qualifiers(prog);
1003 link_set_uniform_initializers(prog);
1004
1005 return;
1006 }