glsl: Use alignment of container record for its first field
[mesa.git] / src / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "main/core.h"
25 #include "ir.h"
26 #include "linker.h"
27 #include "ir_uniform.h"
28 #include "glsl_symbol_table.h"
29 #include "program/hash_table.h"
30 #include "program.h"
31
32 /**
33 * \file link_uniforms.cpp
34 * Assign locations for GLSL uniforms.
35 *
36 * \author Ian Romanick <ian.d.romanick@intel.com>
37 */
38
39 /**
40 * Count the backing storage requirements for a type
41 */
42 static unsigned
43 values_for_type(const glsl_type *type)
44 {
45 if (type->is_sampler()) {
46 return 1;
47 } else if (type->is_array() && type->fields.array->is_sampler()) {
48 return type->array_size();
49 } else {
50 return type->component_slots();
51 }
52 }
53
54 void
55 program_resource_visitor::process(const glsl_type *type, const char *name)
56 {
57 assert(type->is_record()
58 || (type->is_array() && type->fields.array->is_record())
59 || type->is_interface()
60 || (type->is_array() && type->fields.array->is_interface()));
61
62 char *name_copy = ralloc_strdup(NULL, name);
63 recursion(type, &name_copy, strlen(name), false, NULL);
64 ralloc_free(name_copy);
65 }
66
67 void
68 program_resource_visitor::process(ir_variable *var)
69 {
70 const glsl_type *t = var->type;
71
72 /* false is always passed for the row_major parameter to the other
73 * processing functions because no information is available to do
74 * otherwise. See the warning in linker.h.
75 */
76
77 /* Only strdup the name if we actually will need to modify it. */
78 if (t->is_record() || (t->is_array() && t->fields.array->is_record())) {
79 char *name = ralloc_strdup(NULL, var->name);
80 recursion(var->type, &name, strlen(name), false, NULL);
81 ralloc_free(name);
82 } else if (t->is_interface()) {
83 char *name = ralloc_strdup(NULL, var->type->name);
84 recursion(var->type, &name, strlen(name), false, NULL);
85 ralloc_free(name);
86 } else if (t->is_array() && t->fields.array->is_interface()) {
87 char *name = ralloc_strdup(NULL, var->type->fields.array->name);
88 recursion(var->type, &name, strlen(name), false, NULL);
89 ralloc_free(name);
90 } else {
91 this->visit_field(t, var->name, false, NULL);
92 }
93 }
94
95 void
96 program_resource_visitor::recursion(const glsl_type *t, char **name,
97 size_t name_length, bool row_major,
98 const glsl_type *record_type)
99 {
100 /* Records need to have each field processed individually.
101 *
102 * Arrays of records need to have each array element processed
103 * individually, then each field of the resulting array elements processed
104 * individually.
105 */
106 if (t->is_record() || t->is_interface()) {
107 if (record_type == NULL && t->is_record())
108 record_type = t;
109
110 for (unsigned i = 0; i < t->length; i++) {
111 const char *field = t->fields.structure[i].name;
112 size_t new_length = name_length;
113
114 if (t->fields.structure[i].type->is_record())
115 this->visit_field(&t->fields.structure[i]);
116
117 /* Append '.field' to the current variable name. */
118 if (name_length == 0) {
119 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
120 } else {
121 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
122 }
123
124 recursion(t->fields.structure[i].type, name, new_length,
125 t->fields.structure[i].row_major, record_type);
126
127 /* Only the first leaf-field of the record gets called with the
128 * record type pointer.
129 */
130 record_type = NULL;
131 }
132 } else if (t->is_array() && (t->fields.array->is_record()
133 || t->fields.array->is_interface())) {
134 if (record_type == NULL && t->fields.array->is_record())
135 record_type = t->fields.array;
136
137 for (unsigned i = 0; i < t->length; i++) {
138 size_t new_length = name_length;
139
140 /* Append the subscript to the current variable name */
141 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
142
143 recursion(t->fields.array, name, new_length,
144 t->fields.structure[i].row_major, record_type);
145
146 /* Only the first leaf-field of the record gets called with the
147 * record type pointer.
148 */
149 record_type = NULL;
150 }
151 } else {
152 this->visit_field(t, *name, row_major, record_type);
153 }
154 }
155
156 void
157 program_resource_visitor::visit_field(const glsl_type *type, const char *name,
158 bool row_major,
159 const glsl_type *record_type)
160 {
161 visit_field(type, name, row_major);
162 }
163
164 void
165 program_resource_visitor::visit_field(const glsl_struct_field *field)
166 {
167 (void) field;
168 /* empty */
169 }
170
171 /**
172 * Class to help calculate the storage requirements for a set of uniforms
173 *
174 * As uniforms are added to the active set the number of active uniforms and
175 * the storage requirements for those uniforms are accumulated. The active
176 * uniforms are added the the hash table supplied to the constructor.
177 *
178 * If the same uniform is added multiple times (i.e., once for each shader
179 * target), it will only be accounted once.
180 */
181 class count_uniform_size : public program_resource_visitor {
182 public:
183 count_uniform_size(struct string_to_uint_map *map)
184 : num_active_uniforms(0), num_values(0), num_shader_samplers(0),
185 num_shader_uniform_components(0), is_ubo_var(false), map(map)
186 {
187 /* empty */
188 }
189
190 void start_shader()
191 {
192 this->num_shader_samplers = 0;
193 this->num_shader_uniform_components = 0;
194 }
195
196 void process(ir_variable *var)
197 {
198 this->is_ubo_var = var->is_in_uniform_block();
199 if (var->is_interface_instance())
200 program_resource_visitor::process(var->interface_type,
201 var->interface_type->name);
202 else
203 program_resource_visitor::process(var);
204 }
205
206 /**
207 * Total number of active uniforms counted
208 */
209 unsigned num_active_uniforms;
210
211 /**
212 * Number of data values required to back the storage for the active uniforms
213 */
214 unsigned num_values;
215
216 /**
217 * Number of samplers used
218 */
219 unsigned num_shader_samplers;
220
221 /**
222 * Number of uniforms used in the current shader
223 */
224 unsigned num_shader_uniform_components;
225
226 bool is_ubo_var;
227
228 private:
229 virtual void visit_field(const glsl_type *type, const char *name,
230 bool row_major)
231 {
232 assert(!type->is_record());
233 assert(!(type->is_array() && type->fields.array->is_record()));
234 assert(!type->is_interface());
235 assert(!(type->is_array() && type->fields.array->is_interface()));
236
237 (void) row_major;
238
239 /* Count the number of samplers regardless of whether the uniform is
240 * already in the hash table. The hash table prevents adding the same
241 * uniform for multiple shader targets, but in this case we want to
242 * count it for each shader target.
243 */
244 const unsigned values = values_for_type(type);
245 if (type->contains_sampler()) {
246 this->num_shader_samplers +=
247 type->is_array() ? type->array_size() : 1;
248 } else {
249 /* Accumulate the total number of uniform slots used by this shader.
250 * Note that samplers do not count against this limit because they
251 * don't use any storage on current hardware.
252 */
253 if (!is_ubo_var)
254 this->num_shader_uniform_components += values;
255 }
256
257 /* If the uniform is already in the map, there's nothing more to do.
258 */
259 unsigned id;
260 if (this->map->get(id, name))
261 return;
262
263 this->map->put(this->num_active_uniforms, name);
264
265 /* Each leaf uniform occupies one entry in the list of active
266 * uniforms.
267 */
268 this->num_active_uniforms++;
269 this->num_values += values;
270 }
271
272 struct string_to_uint_map *map;
273 };
274
275 /**
276 * Class to help parcel out pieces of backing storage to uniforms
277 *
278 * Each uniform processed has some range of the \c gl_constant_value
279 * structures associated with it. The association is done by finding
280 * the uniform in the \c string_to_uint_map and using the value from
281 * the map to connect that slot in the \c gl_uniform_storage table
282 * with the next available slot in the \c gl_constant_value array.
283 *
284 * \warning
285 * This class assumes that every uniform that will be processed is
286 * already in the \c string_to_uint_map. In addition, it assumes that
287 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
288 * enough."
289 */
290 class parcel_out_uniform_storage : public program_resource_visitor {
291 public:
292 parcel_out_uniform_storage(struct string_to_uint_map *map,
293 struct gl_uniform_storage *uniforms,
294 union gl_constant_value *values)
295 : map(map), uniforms(uniforms), values(values)
296 {
297 }
298
299 void start_shader(gl_shader_type shader_type)
300 {
301 assert(shader_type < MESA_SHADER_TYPES);
302 this->shader_type = shader_type;
303
304 this->shader_samplers_used = 0;
305 this->shader_shadow_samplers = 0;
306 this->next_sampler = 0;
307 memset(this->targets, 0, sizeof(this->targets));
308 }
309
310 void set_and_process(struct gl_shader_program *prog,
311 ir_variable *var)
312 {
313 ubo_block_index = -1;
314 if (var->is_in_uniform_block()) {
315 if (var->is_interface_instance() && var->type->is_array()) {
316 unsigned l = strlen(var->interface_type->name);
317
318 for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
319 if (strncmp(var->interface_type->name,
320 prog->UniformBlocks[i].Name,
321 l) == 0
322 && prog->UniformBlocks[i].Name[l] == '[') {
323 ubo_block_index = i;
324 break;
325 }
326 }
327 } else {
328 for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
329 if (strcmp(var->interface_type->name,
330 prog->UniformBlocks[i].Name) == 0) {
331 ubo_block_index = i;
332 break;
333 }
334 }
335 }
336 assert(ubo_block_index != -1);
337
338 /* Uniform blocks that were specified with an instance name must be
339 * handled a little bit differently. The name of the variable is the
340 * name used to reference the uniform block instead of being the name
341 * of a variable within the block. Therefore, searching for the name
342 * within the block will fail.
343 */
344 if (var->is_interface_instance()) {
345 ubo_byte_offset = 0;
346 ubo_row_major = false;
347 } else {
348 const struct gl_uniform_block *const block =
349 &prog->UniformBlocks[ubo_block_index];
350
351 assert(var->location != -1);
352
353 const struct gl_uniform_buffer_variable *const ubo_var =
354 &block->Uniforms[var->location];
355
356 ubo_row_major = ubo_var->RowMajor;
357 ubo_byte_offset = ubo_var->Offset;
358 }
359
360 if (var->is_interface_instance())
361 process(var->interface_type, var->interface_type->name);
362 else
363 process(var);
364 } else
365 process(var);
366 }
367
368 int ubo_block_index;
369 int ubo_byte_offset;
370 bool ubo_row_major;
371 gl_shader_type shader_type;
372
373 private:
374 void handle_samplers(const glsl_type *base_type,
375 struct gl_uniform_storage *uniform)
376 {
377 if (base_type->is_sampler()) {
378 uniform->sampler[shader_type].index = this->next_sampler;
379 uniform->sampler[shader_type].active = true;
380
381 /* Increment the sampler by 1 for non-arrays and by the number of
382 * array elements for arrays.
383 */
384 this->next_sampler +=
385 MAX2(1, uniform->array_elements);
386
387 const gl_texture_index target = base_type->sampler_index();
388 const unsigned shadow = base_type->sampler_shadow;
389 for (unsigned i = uniform->sampler[shader_type].index;
390 i < MIN2(this->next_sampler, MAX_SAMPLERS);
391 i++) {
392 this->targets[i] = target;
393 this->shader_samplers_used |= 1U << i;
394 this->shader_shadow_samplers |= shadow << i;
395 }
396 } else {
397 uniform->sampler[shader_type].index = ~0;
398 uniform->sampler[shader_type].active = false;
399 }
400 }
401
402 virtual void visit_field(const glsl_type *type, const char *name,
403 bool row_major)
404 {
405 (void) type;
406 (void) name;
407 (void) row_major;
408 assert(!"Should not get here.");
409 }
410
411 virtual void visit_field(const glsl_type *type, const char *name,
412 bool row_major, const glsl_type *record_type)
413 {
414 assert(!type->is_record());
415 assert(!(type->is_array() && type->fields.array->is_record()));
416 assert(!type->is_interface());
417 assert(!(type->is_array() && type->fields.array->is_interface()));
418
419 (void) row_major;
420
421 unsigned id;
422 bool found = this->map->get(id, name);
423 assert(found);
424
425 if (!found)
426 return;
427
428 const glsl_type *base_type;
429 if (type->is_array()) {
430 this->uniforms[id].array_elements = type->length;
431 base_type = type->fields.array;
432 } else {
433 this->uniforms[id].array_elements = 0;
434 base_type = type;
435 }
436
437 /* This assigns sampler uniforms to sampler units. */
438 handle_samplers(base_type, &this->uniforms[id]);
439
440 /* If there is already storage associated with this uniform, it means
441 * that it was set while processing an earlier shader stage. For
442 * example, we may be processing the uniform in the fragment shader, but
443 * the uniform was already processed in the vertex shader.
444 */
445 if (this->uniforms[id].storage != NULL) {
446 return;
447 }
448
449 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
450 this->uniforms[id].type = base_type;
451 this->uniforms[id].initialized = 0;
452 this->uniforms[id].num_driver_storage = 0;
453 this->uniforms[id].driver_storage = NULL;
454 this->uniforms[id].storage = this->values;
455 if (this->ubo_block_index != -1) {
456 this->uniforms[id].block_index = this->ubo_block_index;
457
458 const unsigned alignment = record_type
459 ? record_type->std140_base_alignment(ubo_row_major)
460 : type->std140_base_alignment(ubo_row_major);
461 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
462 this->uniforms[id].offset = this->ubo_byte_offset;
463 this->ubo_byte_offset += type->std140_size(ubo_row_major);
464
465 if (type->is_array()) {
466 this->uniforms[id].array_stride =
467 glsl_align(type->fields.array->std140_size(ubo_row_major), 16);
468 } else {
469 this->uniforms[id].array_stride = 0;
470 }
471
472 if (type->is_matrix() ||
473 (type->is_array() && type->fields.array->is_matrix())) {
474 this->uniforms[id].matrix_stride = 16;
475 this->uniforms[id].row_major = ubo_row_major;
476 } else {
477 this->uniforms[id].matrix_stride = 0;
478 this->uniforms[id].row_major = false;
479 }
480 } else {
481 this->uniforms[id].block_index = -1;
482 this->uniforms[id].offset = -1;
483 this->uniforms[id].array_stride = -1;
484 this->uniforms[id].matrix_stride = -1;
485 this->uniforms[id].row_major = false;
486 }
487
488 this->values += values_for_type(type);
489 }
490
491 struct string_to_uint_map *map;
492
493 struct gl_uniform_storage *uniforms;
494 unsigned next_sampler;
495
496 public:
497 union gl_constant_value *values;
498
499 gl_texture_index targets[MAX_SAMPLERS];
500
501 /**
502 * Mask of samplers used by the current shader stage.
503 */
504 unsigned shader_samplers_used;
505
506 /**
507 * Mask of samplers used by the current shader stage for shadows.
508 */
509 unsigned shader_shadow_samplers;
510 };
511
512 /**
513 * Merges a uniform block into an array of uniform blocks that may or
514 * may not already contain a copy of it.
515 *
516 * Returns the index of the new block in the array.
517 */
518 int
519 link_cross_validate_uniform_block(void *mem_ctx,
520 struct gl_uniform_block **linked_blocks,
521 unsigned int *num_linked_blocks,
522 struct gl_uniform_block *new_block)
523 {
524 for (unsigned int i = 0; i < *num_linked_blocks; i++) {
525 struct gl_uniform_block *old_block = &(*linked_blocks)[i];
526
527 if (strcmp(old_block->Name, new_block->Name) == 0)
528 return link_uniform_blocks_are_compatible(old_block, new_block)
529 ? i : -1;
530 }
531
532 *linked_blocks = reralloc(mem_ctx, *linked_blocks,
533 struct gl_uniform_block,
534 *num_linked_blocks + 1);
535 int linked_block_index = (*num_linked_blocks)++;
536 struct gl_uniform_block *linked_block = &(*linked_blocks)[linked_block_index];
537
538 memcpy(linked_block, new_block, sizeof(*new_block));
539 linked_block->Uniforms = ralloc_array(*linked_blocks,
540 struct gl_uniform_buffer_variable,
541 linked_block->NumUniforms);
542
543 memcpy(linked_block->Uniforms,
544 new_block->Uniforms,
545 sizeof(*linked_block->Uniforms) * linked_block->NumUniforms);
546
547 for (unsigned int i = 0; i < linked_block->NumUniforms; i++) {
548 struct gl_uniform_buffer_variable *ubo_var =
549 &linked_block->Uniforms[i];
550
551 if (ubo_var->Name == ubo_var->IndexName) {
552 ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
553 ubo_var->IndexName = ubo_var->Name;
554 } else {
555 ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
556 ubo_var->IndexName = ralloc_strdup(*linked_blocks, ubo_var->IndexName);
557 }
558 }
559
560 return linked_block_index;
561 }
562
563 /**
564 * Walks the IR and update the references to uniform blocks in the
565 * ir_variables to point at linked shader's list (previously, they
566 * would point at the uniform block list in one of the pre-linked
567 * shaders).
568 */
569 static void
570 link_update_uniform_buffer_variables(struct gl_shader *shader)
571 {
572 foreach_list(node, shader->ir) {
573 ir_variable *const var = ((ir_instruction *) node)->as_variable();
574
575 if ((var == NULL) || !var->is_in_uniform_block())
576 continue;
577
578 assert(var->mode == ir_var_uniform);
579
580 if (var->is_interface_instance()) {
581 var->location = 0;
582 continue;
583 }
584
585 bool found = false;
586 char sentinel = '\0';
587
588 if (var->type->is_record()) {
589 sentinel = '.';
590 } else if (var->type->is_array()
591 && var->type->fields.array->is_record()) {
592 sentinel = '[';
593 }
594
595 const unsigned l = strlen(var->name);
596 for (unsigned i = 0; i < shader->NumUniformBlocks; i++) {
597 for (unsigned j = 0; j < shader->UniformBlocks[i].NumUniforms; j++) {
598 if (sentinel) {
599 const char *begin = shader->UniformBlocks[i].Uniforms[j].Name;
600 const char *end = strchr(begin, sentinel);
601
602 if (end == NULL)
603 continue;
604
605 if (l != (end - begin))
606 continue;
607
608 if (strncmp(var->name, begin, l) == 0) {
609 found = true;
610 var->location = j;
611 break;
612 }
613 } else if (!strcmp(var->name,
614 shader->UniformBlocks[i].Uniforms[j].Name)) {
615 found = true;
616 var->location = j;
617 break;
618 }
619 }
620 if (found)
621 break;
622 }
623 assert(found);
624 }
625 }
626
627 void
628 link_assign_uniform_block_offsets(struct gl_shader *shader)
629 {
630 for (unsigned b = 0; b < shader->NumUniformBlocks; b++) {
631 struct gl_uniform_block *block = &shader->UniformBlocks[b];
632
633 unsigned offset = 0;
634 for (unsigned int i = 0; i < block->NumUniforms; i++) {
635 struct gl_uniform_buffer_variable *ubo_var = &block->Uniforms[i];
636 const struct glsl_type *type = ubo_var->Type;
637
638 unsigned alignment = type->std140_base_alignment(ubo_var->RowMajor);
639 unsigned size = type->std140_size(ubo_var->RowMajor);
640
641 offset = glsl_align(offset, alignment);
642 ubo_var->Offset = offset;
643 offset += size;
644 }
645
646 /* From the GL_ARB_uniform_buffer_object spec:
647 *
648 * "For uniform blocks laid out according to [std140] rules,
649 * the minimum buffer object size returned by the
650 * UNIFORM_BLOCK_DATA_SIZE query is derived by taking the
651 * offset of the last basic machine unit consumed by the
652 * last uniform of the uniform block (including any
653 * end-of-array or end-of-structure padding), adding one,
654 * and rounding up to the next multiple of the base
655 * alignment required for a vec4."
656 */
657 block->UniformBufferSize = glsl_align(offset, 16);
658 }
659 }
660
661 void
662 link_assign_uniform_locations(struct gl_shader_program *prog)
663 {
664 ralloc_free(prog->UniformStorage);
665 prog->UniformStorage = NULL;
666 prog->NumUserUniformStorage = 0;
667
668 if (prog->UniformHash != NULL) {
669 prog->UniformHash->clear();
670 } else {
671 prog->UniformHash = new string_to_uint_map;
672 }
673
674 /* First pass: Count the uniform resources used by the user-defined
675 * uniforms. While this happens, each active uniform will have an index
676 * assigned to it.
677 *
678 * Note: this is *NOT* the index that is returned to the application by
679 * glGetUniformLocation.
680 */
681 count_uniform_size uniform_size(prog->UniformHash);
682 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
683 struct gl_shader *sh = prog->_LinkedShaders[i];
684
685 if (sh == NULL)
686 continue;
687
688 /* Uniforms that lack an initializer in the shader code have an initial
689 * value of zero. This includes sampler uniforms.
690 *
691 * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says:
692 *
693 * "The link time initial value is either the value of the variable's
694 * initializer, if present, or 0 if no initializer is present. Sampler
695 * types cannot have initializers."
696 */
697 memset(sh->SamplerUnits, 0, sizeof(sh->SamplerUnits));
698
699 link_update_uniform_buffer_variables(sh);
700
701 /* Reset various per-shader target counts.
702 */
703 uniform_size.start_shader();
704
705 foreach_list(node, sh->ir) {
706 ir_variable *const var = ((ir_instruction *) node)->as_variable();
707
708 if ((var == NULL) || (var->mode != ir_var_uniform))
709 continue;
710
711 /* FINISHME: Update code to process built-in uniforms!
712 */
713 if (strncmp("gl_", var->name, 3) == 0) {
714 uniform_size.num_shader_uniform_components +=
715 var->type->component_slots();
716 continue;
717 }
718
719 uniform_size.process(var);
720 }
721
722 sh->num_samplers = uniform_size.num_shader_samplers;
723 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
724
725 sh->num_combined_uniform_components = sh->num_uniform_components;
726 for (unsigned i = 0; i < sh->NumUniformBlocks; i++) {
727 sh->num_combined_uniform_components +=
728 sh->UniformBlocks[i].UniformBufferSize / 4;
729 }
730 }
731
732 const unsigned num_user_uniforms = uniform_size.num_active_uniforms;
733 const unsigned num_data_slots = uniform_size.num_values;
734
735 /* On the outside chance that there were no uniforms, bail out.
736 */
737 if (num_user_uniforms == 0)
738 return;
739
740 struct gl_uniform_storage *uniforms =
741 rzalloc_array(prog, struct gl_uniform_storage, num_user_uniforms);
742 union gl_constant_value *data =
743 rzalloc_array(uniforms, union gl_constant_value, num_data_slots);
744 #ifndef NDEBUG
745 union gl_constant_value *data_end = &data[num_data_slots];
746 #endif
747
748 parcel_out_uniform_storage parcel(prog->UniformHash, uniforms, data);
749
750 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
751 if (prog->_LinkedShaders[i] == NULL)
752 continue;
753
754 parcel.start_shader((gl_shader_type)i);
755
756 foreach_list(node, prog->_LinkedShaders[i]->ir) {
757 ir_variable *const var = ((ir_instruction *) node)->as_variable();
758
759 if ((var == NULL) || (var->mode != ir_var_uniform))
760 continue;
761
762 /* FINISHME: Update code to process built-in uniforms!
763 */
764 if (strncmp("gl_", var->name, 3) == 0)
765 continue;
766
767 parcel.set_and_process(prog, var);
768 }
769
770 prog->_LinkedShaders[i]->active_samplers = parcel.shader_samplers_used;
771 prog->_LinkedShaders[i]->shadow_samplers = parcel.shader_shadow_samplers;
772
773 STATIC_ASSERT(sizeof(prog->_LinkedShaders[i]->SamplerTargets) == sizeof(parcel.targets));
774 memcpy(prog->_LinkedShaders[i]->SamplerTargets, parcel.targets,
775 sizeof(prog->_LinkedShaders[i]->SamplerTargets));
776 }
777
778 /* Determine the size of the largest uniform array queryable via
779 * glGetUniformLocation. Using this as the location scale guarantees that
780 * there is enough "room" for the array index to be stored in the low order
781 * part of the uniform location. It also makes the locations be more
782 * tightly packed.
783 */
784 unsigned max_array_size = 1;
785 for (unsigned i = 0; i < num_user_uniforms; i++) {
786 if (uniforms[i].array_elements > max_array_size)
787 max_array_size = uniforms[i].array_elements;
788 }
789
790 prog->UniformLocationBaseScale = max_array_size;
791
792 #ifndef NDEBUG
793 for (unsigned i = 0; i < num_user_uniforms; i++) {
794 assert(uniforms[i].storage != NULL);
795 }
796
797 assert(parcel.values == data_end);
798 #endif
799
800 prog->NumUserUniformStorage = num_user_uniforms;
801 prog->UniformStorage = uniforms;
802
803 link_set_uniform_initializers(prog);
804
805 return;
806 }