linker: Slight code rearrange to prevent duplication in the next commit
[mesa.git] / src / compiler / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "main/core.h"
25 #include "ir.h"
26 #include "linker.h"
27 #include "ir_uniform.h"
28 #include "glsl_symbol_table.h"
29 #include "program.h"
30 #include "util/string_to_uint_map.h"
31
32 /**
33 * \file link_uniforms.cpp
34 * Assign locations for GLSL uniforms.
35 *
36 * \author Ian Romanick <ian.d.romanick@intel.com>
37 */
38
39 /**
40 * Used by linker to indicate uniforms that have no location set.
41 */
42 #define UNMAPPED_UNIFORM_LOC ~0u
43
44 /**
45 * Count the backing storage requirements for a type
46 */
47 static unsigned
48 values_for_type(const glsl_type *type)
49 {
50 if (type->is_sampler()) {
51 return 1;
52 } else if (type->is_array() && type->fields.array->is_sampler()) {
53 return type->array_size();
54 } else {
55 return type->component_slots();
56 }
57 }
58
59 void
60 program_resource_visitor::process(const glsl_type *type, const char *name)
61 {
62 assert(type->without_array()->is_record()
63 || type->without_array()->is_interface());
64
65 unsigned record_array_count = 1;
66 char *name_copy = ralloc_strdup(NULL, name);
67 enum glsl_interface_packing packing = type->get_interface_packing();
68
69 recursion(type, &name_copy, strlen(name), false, NULL, packing, false,
70 record_array_count, NULL);
71 ralloc_free(name_copy);
72 }
73
74 void
75 program_resource_visitor::process(ir_variable *var)
76 {
77 unsigned record_array_count = 1;
78 const bool row_major =
79 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
80
81 const enum glsl_interface_packing packing = var->get_interface_type() ?
82 var->get_interface_type_packing() :
83 var->type->get_interface_packing();
84
85 const glsl_type *t =
86 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
87 const glsl_type *t_without_array = t->without_array();
88
89 /* false is always passed for the row_major parameter to the other
90 * processing functions because no information is available to do
91 * otherwise. See the warning in linker.h.
92 */
93 if (t_without_array->is_record() ||
94 (t->is_array() && t->fields.array->is_array())) {
95 char *name = ralloc_strdup(NULL, var->name);
96 recursion(var->type, &name, strlen(name), row_major, NULL, packing,
97 false, record_array_count, NULL);
98 ralloc_free(name);
99 } else if (t_without_array->is_interface()) {
100 char *name = ralloc_strdup(NULL, t_without_array->name);
101 const glsl_struct_field *ifc_member = var->data.from_named_ifc_block ?
102 &t_without_array->
103 fields.structure[t_without_array->field_index(var->name)] : NULL;
104
105 recursion(t, &name, strlen(name), row_major, NULL, packing,
106 false, record_array_count, ifc_member);
107 ralloc_free(name);
108 } else {
109 this->set_record_array_count(record_array_count);
110 this->visit_field(t, var->name, row_major, NULL, packing, false);
111 }
112 }
113
114 void
115 program_resource_visitor::recursion(const glsl_type *t, char **name,
116 size_t name_length, bool row_major,
117 const glsl_type *record_type,
118 const enum glsl_interface_packing packing,
119 bool last_field,
120 unsigned record_array_count,
121 const glsl_struct_field *named_ifc_member)
122 {
123 /* Records need to have each field processed individually.
124 *
125 * Arrays of records need to have each array element processed
126 * individually, then each field of the resulting array elements processed
127 * individually.
128 */
129 if (t->is_interface() && named_ifc_member) {
130 ralloc_asprintf_rewrite_tail(name, &name_length, ".%s",
131 named_ifc_member->name);
132 recursion(named_ifc_member->type, name, name_length, row_major, NULL,
133 packing, false, record_array_count, NULL);
134 } else if (t->is_record() || t->is_interface()) {
135 if (record_type == NULL && t->is_record())
136 record_type = t;
137
138 if (t->is_record())
139 this->enter_record(t, *name, row_major, packing);
140
141 for (unsigned i = 0; i < t->length; i++) {
142 const char *field = t->fields.structure[i].name;
143 size_t new_length = name_length;
144
145 if (t->fields.structure[i].type->is_record())
146 this->visit_field(&t->fields.structure[i]);
147
148 if (t->is_interface() && t->fields.structure[i].offset != -1)
149 this->set_buffer_offset(t->fields.structure[i].offset);
150
151 /* Append '.field' to the current variable name. */
152 if (name_length == 0) {
153 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
154 } else {
155 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
156 }
157
158 /* The layout of structures at the top level of the block is set
159 * during parsing. For matrices contained in multiple levels of
160 * structures in the block, the inner structures have no layout.
161 * These cases must potentially inherit the layout from the outer
162 * levels.
163 */
164 bool field_row_major = row_major;
165 const enum glsl_matrix_layout matrix_layout =
166 glsl_matrix_layout(t->fields.structure[i].matrix_layout);
167 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
168 field_row_major = true;
169 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
170 field_row_major = false;
171 }
172
173 recursion(t->fields.structure[i].type, name, new_length,
174 field_row_major,
175 record_type,
176 packing,
177 (i + 1) == t->length, record_array_count, NULL);
178
179 /* Only the first leaf-field of the record gets called with the
180 * record type pointer.
181 */
182 record_type = NULL;
183 }
184
185 if (t->is_record()) {
186 (*name)[name_length] = '\0';
187 this->leave_record(t, *name, row_major, packing);
188 }
189 } else if (t->without_array()->is_record() ||
190 t->without_array()->is_interface() ||
191 (t->is_array() && t->fields.array->is_array())) {
192 if (record_type == NULL && t->fields.array->is_record())
193 record_type = t->fields.array;
194
195 unsigned length = t->length;
196 /* Shader storage block unsized arrays: add subscript [0] to variable
197 * names */
198 if (t->is_unsized_array())
199 length = 1;
200
201 record_array_count *= length;
202
203 for (unsigned i = 0; i < length; i++) {
204 size_t new_length = name_length;
205
206 /* Append the subscript to the current variable name */
207 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
208
209 recursion(t->fields.array, name, new_length, row_major,
210 record_type,
211 packing,
212 (i + 1) == t->length, record_array_count,
213 named_ifc_member);
214
215 /* Only the first leaf-field of the record gets called with the
216 * record type pointer.
217 */
218 record_type = NULL;
219 }
220 } else {
221 this->set_record_array_count(record_array_count);
222 this->visit_field(t, *name, row_major, record_type, packing, last_field);
223 }
224 }
225
226 void
227 program_resource_visitor::visit_field(const glsl_type *type, const char *name,
228 bool row_major,
229 const glsl_type *,
230 const enum glsl_interface_packing,
231 bool /* last_field */)
232 {
233 visit_field(type, name, row_major);
234 }
235
236 void
237 program_resource_visitor::visit_field(const glsl_struct_field *)
238 {
239 }
240
241 void
242 program_resource_visitor::enter_record(const glsl_type *, const char *, bool,
243 const enum glsl_interface_packing)
244 {
245 }
246
247 void
248 program_resource_visitor::leave_record(const glsl_type *, const char *, bool,
249 const enum glsl_interface_packing)
250 {
251 }
252
253 void
254 program_resource_visitor::set_buffer_offset(unsigned)
255 {
256 }
257
258 void
259 program_resource_visitor::set_record_array_count(unsigned)
260 {
261 }
262
263 namespace {
264
265 /**
266 * Class to help calculate the storage requirements for a set of uniforms
267 *
268 * As uniforms are added to the active set the number of active uniforms and
269 * the storage requirements for those uniforms are accumulated. The active
270 * uniforms are added to the hash table supplied to the constructor.
271 *
272 * If the same uniform is added multiple times (i.e., once for each shader
273 * target), it will only be accounted once.
274 */
275 class count_uniform_size : public program_resource_visitor {
276 public:
277 count_uniform_size(struct string_to_uint_map *map,
278 struct string_to_uint_map *hidden_map)
279 : num_active_uniforms(0), num_hidden_uniforms(0), num_values(0),
280 num_shader_samplers(0), num_shader_images(0),
281 num_shader_uniform_components(0), num_shader_subroutines(0),
282 is_buffer_block(false), is_shader_storage(false), map(map),
283 hidden_map(hidden_map)
284 {
285 /* empty */
286 }
287
288 void start_shader()
289 {
290 this->num_shader_samplers = 0;
291 this->num_shader_images = 0;
292 this->num_shader_uniform_components = 0;
293 this->num_shader_subroutines = 0;
294 }
295
296 void process(ir_variable *var)
297 {
298 this->current_var = var;
299 this->is_buffer_block = var->is_in_buffer_block();
300 this->is_shader_storage = var->is_in_shader_storage_block();
301 if (var->is_interface_instance())
302 program_resource_visitor::process(var->get_interface_type(),
303 var->get_interface_type()->name);
304 else
305 program_resource_visitor::process(var);
306 }
307
308 /**
309 * Total number of active uniforms counted
310 */
311 unsigned num_active_uniforms;
312
313 unsigned num_hidden_uniforms;
314
315 /**
316 * Number of data values required to back the storage for the active uniforms
317 */
318 unsigned num_values;
319
320 /**
321 * Number of samplers used
322 */
323 unsigned num_shader_samplers;
324
325 /**
326 * Number of images used
327 */
328 unsigned num_shader_images;
329
330 /**
331 * Number of uniforms used in the current shader
332 */
333 unsigned num_shader_uniform_components;
334
335 /**
336 * Number of subroutine uniforms used
337 */
338 unsigned num_shader_subroutines;
339
340 bool is_buffer_block;
341 bool is_shader_storage;
342
343 struct string_to_uint_map *map;
344
345 private:
346 virtual void visit_field(const glsl_type *type, const char *name,
347 bool /* row_major */)
348 {
349 assert(!type->without_array()->is_record());
350 assert(!type->without_array()->is_interface());
351 assert(!(type->is_array() && type->fields.array->is_array()));
352
353 /* Count the number of samplers regardless of whether the uniform is
354 * already in the hash table. The hash table prevents adding the same
355 * uniform for multiple shader targets, but in this case we want to
356 * count it for each shader target.
357 */
358 const unsigned values = values_for_type(type);
359 if (type->contains_subroutine()) {
360 this->num_shader_subroutines += values;
361 } else if (type->contains_sampler()) {
362 this->num_shader_samplers += values;
363 } else if (type->contains_image()) {
364 this->num_shader_images += values;
365
366 /* As drivers are likely to represent image uniforms as
367 * scalar indices, count them against the limit of uniform
368 * components in the default block. The spec allows image
369 * uniforms to use up no more than one scalar slot.
370 */
371 if (!is_shader_storage)
372 this->num_shader_uniform_components += values;
373 } else {
374 /* Accumulate the total number of uniform slots used by this shader.
375 * Note that samplers do not count against this limit because they
376 * don't use any storage on current hardware.
377 */
378 if (!is_buffer_block)
379 this->num_shader_uniform_components += values;
380 }
381
382 /* If the uniform is already in the map, there's nothing more to do.
383 */
384 unsigned id;
385 if (this->map->get(id, name))
386 return;
387
388 if (this->current_var->data.how_declared == ir_var_hidden) {
389 this->hidden_map->put(this->num_hidden_uniforms, name);
390 this->num_hidden_uniforms++;
391 } else {
392 this->map->put(this->num_active_uniforms-this->num_hidden_uniforms,
393 name);
394 }
395
396 /* Each leaf uniform occupies one entry in the list of active
397 * uniforms.
398 */
399 this->num_active_uniforms++;
400
401 if(!is_gl_identifier(name) && !is_shader_storage && !is_buffer_block)
402 this->num_values += values;
403 }
404
405 struct string_to_uint_map *hidden_map;
406
407 /**
408 * Current variable being processed.
409 */
410 ir_variable *current_var;
411 };
412
413 } /* anonymous namespace */
414
415 /**
416 * Class to help parcel out pieces of backing storage to uniforms
417 *
418 * Each uniform processed has some range of the \c gl_constant_value
419 * structures associated with it. The association is done by finding
420 * the uniform in the \c string_to_uint_map and using the value from
421 * the map to connect that slot in the \c gl_uniform_storage table
422 * with the next available slot in the \c gl_constant_value array.
423 *
424 * \warning
425 * This class assumes that every uniform that will be processed is
426 * already in the \c string_to_uint_map. In addition, it assumes that
427 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
428 * enough."
429 */
430 class parcel_out_uniform_storage : public program_resource_visitor {
431 public:
432 parcel_out_uniform_storage(struct gl_shader_program *prog,
433 struct string_to_uint_map *map,
434 struct gl_uniform_storage *uniforms,
435 union gl_constant_value *values)
436 : prog(prog), map(map), uniforms(uniforms), values(values)
437 {
438 }
439
440 void start_shader(gl_shader_stage shader_type)
441 {
442 assert(shader_type < MESA_SHADER_STAGES);
443 this->shader_type = shader_type;
444
445 this->shader_samplers_used = 0;
446 this->shader_shadow_samplers = 0;
447 this->next_sampler = 0;
448 this->next_image = 0;
449 this->next_subroutine = 0;
450 this->record_array_count = 1;
451 memset(this->targets, 0, sizeof(this->targets));
452 }
453
454 void set_and_process(ir_variable *var)
455 {
456 current_var = var;
457 field_counter = 0;
458 this->record_next_sampler = new string_to_uint_map;
459
460 buffer_block_index = -1;
461 if (var->is_in_buffer_block()) {
462 struct gl_uniform_block *blks = var->is_in_shader_storage_block() ?
463 prog->ShaderStorageBlocks : prog->UniformBlocks;
464 unsigned num_blks = var->is_in_shader_storage_block() ?
465 prog->NumShaderStorageBlocks : prog->NumUniformBlocks;
466
467 if (var->is_interface_instance() && var->type->is_array()) {
468 unsigned l = strlen(var->get_interface_type()->name);
469
470 for (unsigned i = 0; i < num_blks; i++) {
471 if (strncmp(var->get_interface_type()->name, blks[i].Name, l)
472 == 0 && blks[i].Name[l] == '[') {
473 buffer_block_index = i;
474 break;
475 }
476 }
477 } else {
478 for (unsigned i = 0; i < num_blks; i++) {
479 if (strcmp(var->get_interface_type()->name, blks[i].Name) ==
480 0) {
481 buffer_block_index = i;
482 break;
483 }
484 }
485 }
486 assert(buffer_block_index != -1);
487
488 /* Uniform blocks that were specified with an instance name must be
489 * handled a little bit differently. The name of the variable is the
490 * name used to reference the uniform block instead of being the name
491 * of a variable within the block. Therefore, searching for the name
492 * within the block will fail.
493 */
494 if (var->is_interface_instance()) {
495 ubo_byte_offset = 0;
496 process(var->get_interface_type(),
497 var->get_interface_type()->name);
498 } else {
499 const struct gl_uniform_block *const block =
500 &blks[buffer_block_index];
501
502 assert(var->data.location != -1);
503
504 const struct gl_uniform_buffer_variable *const ubo_var =
505 &block->Uniforms[var->data.location];
506
507 ubo_byte_offset = ubo_var->Offset;
508 process(var);
509 }
510 } else {
511 /* Store any explicit location and reset data location so we can
512 * reuse this variable for storing the uniform slot number.
513 */
514 this->explicit_location = current_var->data.location;
515 current_var->data.location = -1;
516
517 process(var);
518 }
519 delete this->record_next_sampler;
520 }
521
522 int buffer_block_index;
523 int ubo_byte_offset;
524 gl_shader_stage shader_type;
525
526 private:
527 void handle_samplers(const glsl_type *base_type,
528 struct gl_uniform_storage *uniform, const char *name)
529 {
530 if (base_type->is_sampler()) {
531 uniform->opaque[shader_type].active = true;
532
533 /* Handle multiple samplers inside struct arrays */
534 if (this->record_array_count > 1) {
535 unsigned inner_array_size = MAX2(1, uniform->array_elements);
536 char *name_copy = ralloc_strdup(NULL, name);
537
538 /* Remove all array subscripts from the sampler name */
539 char *str_start;
540 const char *str_end;
541 while((str_start = strchr(name_copy, '[')) &&
542 (str_end = strchr(name_copy, ']'))) {
543 memmove(str_start, str_end + 1, 1 + strlen(str_end));
544 }
545
546 unsigned index = 0;
547 if (this->record_next_sampler->get(index, name_copy)) {
548 /* In this case, we've already seen this uniform so we just use
549 * the next sampler index recorded the last time we visited.
550 */
551 uniform->opaque[shader_type].index = index;
552 index = inner_array_size + uniform->opaque[shader_type].index;
553 this->record_next_sampler->put(index, name_copy);
554
555 ralloc_free(name_copy);
556 /* Return as everything else has already been initialised in a
557 * previous pass.
558 */
559 return;
560 } else {
561 /* We've never seen this uniform before so we need to allocate
562 * enough indices to store it.
563 *
564 * Nested struct arrays behave like arrays of arrays so we need
565 * to increase the index by the total number of elements of the
566 * sampler in case there is more than one sampler inside the
567 * structs. This allows the offset to be easily calculated for
568 * indirect indexing.
569 */
570 uniform->opaque[shader_type].index = this->next_sampler;
571 this->next_sampler +=
572 inner_array_size * this->record_array_count;
573
574 /* Store the next index for future passes over the struct array
575 */
576 index = uniform->opaque[shader_type].index + inner_array_size;
577 this->record_next_sampler->put(index, name_copy);
578 ralloc_free(name_copy);
579 }
580 } else {
581 /* Increment the sampler by 1 for non-arrays and by the number of
582 * array elements for arrays.
583 */
584 uniform->opaque[shader_type].index = this->next_sampler;
585 this->next_sampler += MAX2(1, uniform->array_elements);
586 }
587
588 const gl_texture_index target = base_type->sampler_index();
589 const unsigned shadow = base_type->sampler_shadow;
590 for (unsigned i = uniform->opaque[shader_type].index;
591 i < MIN2(this->next_sampler, MAX_SAMPLERS);
592 i++) {
593 this->targets[i] = target;
594 this->shader_samplers_used |= 1U << i;
595 this->shader_shadow_samplers |= shadow << i;
596 }
597 }
598 }
599
600 void handle_images(const glsl_type *base_type,
601 struct gl_uniform_storage *uniform)
602 {
603 if (base_type->is_image()) {
604 uniform->opaque[shader_type].index = this->next_image;
605 uniform->opaque[shader_type].active = true;
606
607 /* Set image access qualifiers */
608 const GLenum access =
609 (current_var->data.image_read_only ? GL_READ_ONLY :
610 current_var->data.image_write_only ? GL_WRITE_ONLY :
611 GL_READ_WRITE);
612
613 const unsigned first = this->next_image;
614
615 /* Increment the image index by 1 for non-arrays and by the
616 * number of array elements for arrays.
617 */
618 this->next_image += MAX2(1, uniform->array_elements);
619
620 for (unsigned i = first; i < MIN2(next_image, MAX_IMAGE_UNIFORMS); i++)
621 prog->_LinkedShaders[shader_type]->ImageAccess[i] = access;
622 }
623 }
624
625 void handle_subroutines(const glsl_type *base_type,
626 struct gl_uniform_storage *uniform)
627 {
628 if (base_type->is_subroutine()) {
629 uniform->opaque[shader_type].index = this->next_subroutine;
630 uniform->opaque[shader_type].active = true;
631
632 /* Increment the subroutine index by 1 for non-arrays and by the
633 * number of array elements for arrays.
634 */
635 this->next_subroutine += MAX2(1, uniform->array_elements);
636
637 }
638 }
639
640 virtual void set_buffer_offset(unsigned offset)
641 {
642 this->ubo_byte_offset = offset;
643 }
644
645 virtual void set_record_array_count(unsigned record_array_count)
646 {
647 this->record_array_count = record_array_count;
648 }
649
650 virtual void visit_field(const glsl_type *, const char *,
651 bool /* row_major */)
652 {
653 assert(!"Should not get here.");
654 }
655
656 virtual void enter_record(const glsl_type *type, const char *,
657 bool row_major,
658 const enum glsl_interface_packing packing)
659 {
660 assert(type->is_record());
661 if (this->buffer_block_index == -1)
662 return;
663 if (packing == GLSL_INTERFACE_PACKING_STD430)
664 this->ubo_byte_offset = glsl_align(
665 this->ubo_byte_offset, type->std430_base_alignment(row_major));
666 else
667 this->ubo_byte_offset = glsl_align(
668 this->ubo_byte_offset, type->std140_base_alignment(row_major));
669 }
670
671 virtual void leave_record(const glsl_type *type, const char *,
672 bool row_major,
673 const enum glsl_interface_packing packing)
674 {
675 assert(type->is_record());
676 if (this->buffer_block_index == -1)
677 return;
678 if (packing == GLSL_INTERFACE_PACKING_STD430)
679 this->ubo_byte_offset = glsl_align(
680 this->ubo_byte_offset, type->std430_base_alignment(row_major));
681 else
682 this->ubo_byte_offset = glsl_align(
683 this->ubo_byte_offset, type->std140_base_alignment(row_major));
684 }
685
686 virtual void visit_field(const glsl_type *type, const char *name,
687 bool row_major, const glsl_type * /* record_type */,
688 const enum glsl_interface_packing packing,
689 bool /* last_field */)
690 {
691 assert(!type->without_array()->is_record());
692 assert(!type->without_array()->is_interface());
693 assert(!(type->is_array() && type->fields.array->is_array()));
694
695 unsigned id;
696 bool found = this->map->get(id, name);
697 assert(found);
698
699 if (!found)
700 return;
701
702 const glsl_type *base_type;
703 if (type->is_array()) {
704 this->uniforms[id].array_elements = type->length;
705 base_type = type->fields.array;
706 } else {
707 this->uniforms[id].array_elements = 0;
708 base_type = type;
709 }
710
711 /* Initialise opaque data */
712 this->uniforms[id].opaque[shader_type].index = ~0;
713 this->uniforms[id].opaque[shader_type].active = false;
714
715 /* This assigns uniform indices to sampler and image uniforms. */
716 handle_samplers(base_type, &this->uniforms[id], name);
717 handle_images(base_type, &this->uniforms[id]);
718 handle_subroutines(base_type, &this->uniforms[id]);
719
720 /* For array of arrays or struct arrays the base location may have
721 * already been set so don't set it again.
722 */
723 if (buffer_block_index == -1 && current_var->data.location == -1) {
724 current_var->data.location = id;
725 }
726
727 /* If there is already storage associated with this uniform or if the
728 * uniform is set as builtin, it means that it was set while processing
729 * an earlier shader stage. For example, we may be processing the
730 * uniform in the fragment shader, but the uniform was already processed
731 * in the vertex shader.
732 */
733 if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) {
734 return;
735 }
736
737 /* Assign explicit locations. */
738 if (current_var->data.explicit_location) {
739 /* Set sequential locations for struct fields. */
740 if (current_var->type->without_array()->is_record() ||
741 current_var->type->is_array_of_arrays()) {
742 const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
743 this->uniforms[id].remap_location =
744 this->explicit_location + field_counter;
745 field_counter += entries;
746 } else {
747 this->uniforms[id].remap_location = this->explicit_location;
748 }
749 } else {
750 /* Initialize to to indicate that no location is set */
751 this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
752 }
753
754 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
755 this->uniforms[id].type = base_type;
756 this->uniforms[id].num_driver_storage = 0;
757 this->uniforms[id].driver_storage = NULL;
758 this->uniforms[id].atomic_buffer_index = -1;
759 this->uniforms[id].hidden =
760 current_var->data.how_declared == ir_var_hidden;
761 this->uniforms[id].builtin = is_gl_identifier(name);
762
763 this->uniforms[id].is_shader_storage =
764 current_var->is_in_shader_storage_block();
765
766 /* Do not assign storage if the uniform is a builtin or buffer object */
767 if (!this->uniforms[id].builtin &&
768 !this->uniforms[id].is_shader_storage &&
769 this->buffer_block_index == -1)
770 this->uniforms[id].storage = this->values;
771
772 if (this->buffer_block_index != -1) {
773 this->uniforms[id].block_index = this->buffer_block_index;
774
775 unsigned alignment = type->std140_base_alignment(row_major);
776 if (packing == GLSL_INTERFACE_PACKING_STD430)
777 alignment = type->std430_base_alignment(row_major);
778 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
779 this->uniforms[id].offset = this->ubo_byte_offset;
780 if (packing == GLSL_INTERFACE_PACKING_STD430)
781 this->ubo_byte_offset += type->std430_size(row_major);
782 else
783 this->ubo_byte_offset += type->std140_size(row_major);
784
785 if (type->is_array()) {
786 if (packing == GLSL_INTERFACE_PACKING_STD430)
787 this->uniforms[id].array_stride =
788 type->without_array()->std430_array_stride(row_major);
789 else
790 this->uniforms[id].array_stride =
791 glsl_align(type->without_array()->std140_size(row_major),
792 16);
793 } else {
794 this->uniforms[id].array_stride = 0;
795 }
796
797 if (type->without_array()->is_matrix()) {
798 const glsl_type *matrix = type->without_array();
799 const unsigned N = matrix->base_type == GLSL_TYPE_DOUBLE ? 8 : 4;
800 const unsigned items =
801 row_major ? matrix->matrix_columns : matrix->vector_elements;
802
803 assert(items <= 4);
804 if (packing == GLSL_INTERFACE_PACKING_STD430)
805 this->uniforms[id].matrix_stride = items < 3 ? items * N :
806 glsl_align(items * N, 16);
807 else
808 this->uniforms[id].matrix_stride = glsl_align(items * N, 16);
809 this->uniforms[id].row_major = row_major;
810 } else {
811 this->uniforms[id].matrix_stride = 0;
812 this->uniforms[id].row_major = false;
813 }
814 } else {
815 this->uniforms[id].block_index = -1;
816 this->uniforms[id].offset = -1;
817 this->uniforms[id].array_stride = -1;
818 this->uniforms[id].matrix_stride = -1;
819 this->uniforms[id].row_major = false;
820 }
821
822 if (!this->uniforms[id].builtin &&
823 !this->uniforms[id].is_shader_storage &&
824 this->buffer_block_index == -1)
825 this->values += values_for_type(type);
826 }
827
828 /**
829 * Current program being processed.
830 */
831 struct gl_shader_program *prog;
832
833 struct string_to_uint_map *map;
834
835 struct gl_uniform_storage *uniforms;
836 unsigned next_sampler;
837 unsigned next_image;
838 unsigned next_subroutine;
839
840 /**
841 * Field counter is used to take care that uniform structures
842 * with explicit locations get sequential locations.
843 */
844 unsigned field_counter;
845
846 /**
847 * Current variable being processed.
848 */
849 ir_variable *current_var;
850
851 /* Used to store the explicit location from current_var so that we can
852 * reuse the location field for storing the uniform slot id.
853 */
854 int explicit_location;
855
856 /* Stores total struct array elements including nested structs */
857 unsigned record_array_count;
858
859 /* Map for temporarily storing next sampler index when handling samplers in
860 * struct arrays.
861 */
862 struct string_to_uint_map *record_next_sampler;
863
864 public:
865 union gl_constant_value *values;
866
867 gl_texture_index targets[MAX_SAMPLERS];
868
869 /**
870 * Mask of samplers used by the current shader stage.
871 */
872 unsigned shader_samplers_used;
873
874 /**
875 * Mask of samplers used by the current shader stage for shadows.
876 */
877 unsigned shader_shadow_samplers;
878 };
879
880 /**
881 * Walks the IR and update the references to uniform blocks in the
882 * ir_variables to point at linked shader's list (previously, they
883 * would point at the uniform block list in one of the pre-linked
884 * shaders).
885 */
886 static void
887 link_update_uniform_buffer_variables(struct gl_linked_shader *shader)
888 {
889 foreach_in_list(ir_instruction, node, shader->ir) {
890 ir_variable *const var = node->as_variable();
891
892 if (var == NULL || !var->is_in_buffer_block())
893 continue;
894
895 assert(var->data.mode == ir_var_uniform ||
896 var->data.mode == ir_var_shader_storage);
897
898 if (var->is_interface_instance()) {
899 var->data.location = 0;
900 continue;
901 }
902
903 bool found = false;
904 char sentinel = '\0';
905
906 if (var->type->is_record()) {
907 sentinel = '.';
908 } else if (var->type->is_array() && (var->type->fields.array->is_array()
909 || var->type->without_array()->is_record())) {
910 sentinel = '[';
911 }
912
913 unsigned num_blocks = var->data.mode == ir_var_uniform ?
914 shader->NumUniformBlocks : shader->NumShaderStorageBlocks;
915 struct gl_uniform_block **blks = var->data.mode == ir_var_uniform ?
916 shader->UniformBlocks : shader->ShaderStorageBlocks;
917
918 const unsigned l = strlen(var->name);
919 for (unsigned i = 0; i < num_blocks; i++) {
920 for (unsigned j = 0; j < blks[i]->NumUniforms; j++) {
921 if (sentinel) {
922 const char *begin = blks[i]->Uniforms[j].Name;
923 const char *end = strchr(begin, sentinel);
924
925 if (end == NULL)
926 continue;
927
928 if ((ptrdiff_t) l != (end - begin))
929 continue;
930
931 found = strncmp(var->name, begin, l) == 0;
932 } else {
933 found = strcmp(var->name, blks[i]->Uniforms[j].Name) == 0;
934 }
935
936 if (found) {
937 var->data.location = j;
938 break;
939 }
940 }
941
942 if (found)
943 break;
944 }
945 assert(found);
946 }
947 }
948
949 /**
950 * Combine the hidden uniform hash map with the uniform hash map so that the
951 * hidden uniforms will be given indicies at the end of the uniform storage
952 * array.
953 */
954 static void
955 assign_hidden_uniform_slot_id(const char *name, unsigned hidden_id,
956 void *closure)
957 {
958 count_uniform_size *uniform_size = (count_uniform_size *) closure;
959 unsigned hidden_uniform_start = uniform_size->num_active_uniforms -
960 uniform_size->num_hidden_uniforms;
961
962 uniform_size->map->put(hidden_uniform_start + hidden_id, name);
963 }
964
965 /**
966 * Search through the list of empty blocks to find one that fits the current
967 * uniform.
968 */
969 static int
970 find_empty_block(struct gl_shader_program *prog,
971 struct gl_uniform_storage *uniform)
972 {
973 const unsigned entries = MAX2(1, uniform->array_elements);
974
975 foreach_list_typed(struct empty_uniform_block, block, link,
976 &prog->EmptyUniformLocations) {
977 /* Found a block with enough slots to fit the uniform */
978 if (block->slots == entries) {
979 unsigned start = block->start;
980 exec_node_remove(&block->link);
981 ralloc_free(block);
982
983 return start;
984 /* Found a block with more slots than needed. It can still be used. */
985 } else if (block->slots > entries) {
986 unsigned start = block->start;
987 block->start += entries;
988 block->slots -= entries;
989
990 return start;
991 }
992 }
993
994 return -1;
995 }
996
997 static void
998 link_setup_uniform_remap_tables(struct gl_context *ctx,
999 struct gl_shader_program *prog,
1000 unsigned num_explicit_uniform_locs)
1001 {
1002 unsigned total_entries = num_explicit_uniform_locs;
1003 unsigned empty_locs =
1004 prog->NumUniformRemapTable - num_explicit_uniform_locs;
1005
1006 /* Reserve all the explicit locations of the active uniforms. */
1007 for (unsigned i = 0; i < prog->NumUniformStorage; i++) {
1008 if (prog->UniformStorage[i].type->is_subroutine() ||
1009 prog->UniformStorage[i].is_shader_storage)
1010 continue;
1011
1012 if (prog->UniformStorage[i].remap_location != UNMAPPED_UNIFORM_LOC) {
1013 /* How many new entries for this uniform? */
1014 const unsigned entries =
1015 MAX2(1, prog->UniformStorage[i].array_elements);
1016
1017 /* Set remap table entries point to correct gl_uniform_storage. */
1018 for (unsigned j = 0; j < entries; j++) {
1019 unsigned element_loc = prog->UniformStorage[i].remap_location + j;
1020 assert(prog->UniformRemapTable[element_loc] ==
1021 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1022 prog->UniformRemapTable[element_loc] = &prog->UniformStorage[i];
1023 }
1024 }
1025 }
1026
1027 /* Reserve locations for rest of the uniforms. */
1028 for (unsigned i = 0; i < prog->NumUniformStorage; i++) {
1029
1030 if (prog->UniformStorage[i].type->is_subroutine() ||
1031 prog->UniformStorage[i].is_shader_storage)
1032 continue;
1033
1034 /* Built-in uniforms should not get any location. */
1035 if (prog->UniformStorage[i].builtin)
1036 continue;
1037
1038 /* Explicit ones have been set already. */
1039 if (prog->UniformStorage[i].remap_location != UNMAPPED_UNIFORM_LOC)
1040 continue;
1041
1042 /* how many new entries for this uniform? */
1043 const unsigned entries = MAX2(1, prog->UniformStorage[i].array_elements);
1044
1045 /* Find UniformRemapTable for empty blocks where we can fit this uniform. */
1046 int chosen_location = -1;
1047
1048 if (empty_locs)
1049 chosen_location = find_empty_block(prog, &prog->UniformStorage[i]);
1050
1051 /* Add new entries to the total amount of entries. */
1052 total_entries += entries;
1053
1054 if (chosen_location != -1) {
1055 empty_locs -= entries;
1056 } else {
1057 chosen_location = prog->NumUniformRemapTable;
1058
1059 /* resize remap table to fit new entries */
1060 prog->UniformRemapTable =
1061 reralloc(prog,
1062 prog->UniformRemapTable,
1063 gl_uniform_storage *,
1064 prog->NumUniformRemapTable + entries);
1065 prog->NumUniformRemapTable += entries;
1066 }
1067
1068 /* set pointers for this uniform */
1069 for (unsigned j = 0; j < entries; j++)
1070 prog->UniformRemapTable[chosen_location + j] =
1071 &prog->UniformStorage[i];
1072
1073 /* set the base location in remap table for the uniform */
1074 prog->UniformStorage[i].remap_location = chosen_location;
1075 }
1076
1077 /* Verify that total amount of entries for explicit and implicit locations
1078 * is less than MAX_UNIFORM_LOCATIONS.
1079 */
1080
1081 if (total_entries > ctx->Const.MaxUserAssignableUniformLocations) {
1082 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
1083 "(%u > %u)", total_entries,
1084 ctx->Const.MaxUserAssignableUniformLocations);
1085 }
1086
1087 /* Reserve all the explicit locations of the active subroutine uniforms. */
1088 for (unsigned i = 0; i < prog->NumUniformStorage; i++) {
1089 if (!prog->UniformStorage[i].type->is_subroutine())
1090 continue;
1091
1092 if (prog->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
1093 continue;
1094
1095 for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
1096 struct gl_linked_shader *sh = prog->_LinkedShaders[j];
1097 if (!sh)
1098 continue;
1099
1100 if (!prog->UniformStorage[i].opaque[j].active)
1101 continue;
1102
1103 /* How many new entries for this uniform? */
1104 const unsigned entries =
1105 MAX2(1, prog->UniformStorage[i].array_elements);
1106
1107 /* Set remap table entries point to correct gl_uniform_storage. */
1108 for (unsigned k = 0; k < entries; k++) {
1109 unsigned element_loc = prog->UniformStorage[i].remap_location + k;
1110 assert(sh->SubroutineUniformRemapTable[element_loc] ==
1111 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1112 sh->SubroutineUniformRemapTable[element_loc] =
1113 &prog->UniformStorage[i];
1114 }
1115 }
1116 }
1117
1118 /* reserve subroutine locations */
1119 for (unsigned i = 0; i < prog->NumUniformStorage; i++) {
1120
1121 if (!prog->UniformStorage[i].type->is_subroutine())
1122 continue;
1123 const unsigned entries = MAX2(1, prog->UniformStorage[i].array_elements);
1124
1125 if (prog->UniformStorage[i].remap_location != UNMAPPED_UNIFORM_LOC)
1126 continue;
1127 for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
1128 struct gl_linked_shader *sh = prog->_LinkedShaders[j];
1129 if (!sh)
1130 continue;
1131
1132 if (!prog->UniformStorage[i].opaque[j].active)
1133 continue;
1134
1135 sh->SubroutineUniformRemapTable =
1136 reralloc(sh,
1137 sh->SubroutineUniformRemapTable,
1138 gl_uniform_storage *,
1139 sh->NumSubroutineUniformRemapTable + entries);
1140
1141 for (unsigned k = 0; k < entries; k++) {
1142 sh->SubroutineUniformRemapTable[sh->NumSubroutineUniformRemapTable + k] =
1143 &prog->UniformStorage[i];
1144 }
1145 prog->UniformStorage[i].remap_location =
1146 sh->NumSubroutineUniformRemapTable;
1147 sh->NumSubroutineUniformRemapTable += entries;
1148 }
1149 }
1150 }
1151
1152 static void
1153 link_assign_uniform_storage(struct gl_context *ctx,
1154 struct gl_shader_program *prog,
1155 const unsigned num_data_slots,
1156 unsigned num_explicit_uniform_locs)
1157 {
1158 /* On the outside chance that there were no uniforms, bail out.
1159 */
1160 if (prog->NumUniformStorage == 0)
1161 return;
1162
1163 unsigned int boolean_true = ctx->Const.UniformBooleanTrue;
1164
1165 prog->UniformStorage = rzalloc_array(prog, struct gl_uniform_storage,
1166 prog->NumUniformStorage);
1167 union gl_constant_value *data = rzalloc_array(prog->UniformStorage,
1168 union gl_constant_value,
1169 num_data_slots);
1170 #ifndef NDEBUG
1171 union gl_constant_value *data_end = &data[num_data_slots];
1172 #endif
1173
1174 parcel_out_uniform_storage parcel(prog, prog->UniformHash,
1175 prog->UniformStorage, data);
1176
1177 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1178 if (prog->_LinkedShaders[i] == NULL)
1179 continue;
1180
1181 parcel.start_shader((gl_shader_stage)i);
1182
1183 foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
1184 ir_variable *const var = node->as_variable();
1185
1186 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1187 var->data.mode != ir_var_shader_storage))
1188 continue;
1189
1190 parcel.set_and_process(var);
1191 }
1192
1193 prog->_LinkedShaders[i]->active_samplers = parcel.shader_samplers_used;
1194 prog->_LinkedShaders[i]->shadow_samplers = parcel.shader_shadow_samplers;
1195
1196 STATIC_ASSERT(sizeof(prog->_LinkedShaders[i]->SamplerTargets) ==
1197 sizeof(parcel.targets));
1198 memcpy(prog->_LinkedShaders[i]->SamplerTargets, parcel.targets,
1199 sizeof(prog->_LinkedShaders[i]->SamplerTargets));
1200 }
1201
1202 #ifndef NDEBUG
1203 for (unsigned i = 0; i < prog->NumUniformStorage; i++) {
1204 assert(prog->UniformStorage[i].storage != NULL ||
1205 prog->UniformStorage[i].builtin ||
1206 prog->UniformStorage[i].is_shader_storage ||
1207 prog->UniformStorage[i].block_index != -1);
1208 }
1209
1210 assert(parcel.values == data_end);
1211 #endif
1212
1213 link_setup_uniform_remap_tables(ctx, prog, num_explicit_uniform_locs);
1214
1215 link_set_uniform_initializers(prog, boolean_true);
1216 }
1217
1218 void
1219 link_assign_uniform_locations(struct gl_shader_program *prog,
1220 struct gl_context *ctx,
1221 unsigned int num_explicit_uniform_locs)
1222 {
1223 ralloc_free(prog->UniformStorage);
1224 prog->UniformStorage = NULL;
1225 prog->NumUniformStorage = 0;
1226
1227 if (prog->UniformHash != NULL) {
1228 prog->UniformHash->clear();
1229 } else {
1230 prog->UniformHash = new string_to_uint_map;
1231 }
1232
1233 /* First pass: Count the uniform resources used by the user-defined
1234 * uniforms. While this happens, each active uniform will have an index
1235 * assigned to it.
1236 *
1237 * Note: this is *NOT* the index that is returned to the application by
1238 * glGetUniformLocation.
1239 */
1240 struct string_to_uint_map *hiddenUniforms = new string_to_uint_map;
1241 count_uniform_size uniform_size(prog->UniformHash, hiddenUniforms);
1242 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1243 struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1244
1245 if (sh == NULL)
1246 continue;
1247
1248 /* Uniforms that lack an initializer in the shader code have an initial
1249 * value of zero. This includes sampler uniforms.
1250 *
1251 * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says:
1252 *
1253 * "The link time initial value is either the value of the variable's
1254 * initializer, if present, or 0 if no initializer is present. Sampler
1255 * types cannot have initializers."
1256 */
1257 memset(sh->SamplerUnits, 0, sizeof(sh->SamplerUnits));
1258 memset(sh->ImageUnits, 0, sizeof(sh->ImageUnits));
1259
1260 link_update_uniform_buffer_variables(sh);
1261
1262 /* Reset various per-shader target counts.
1263 */
1264 uniform_size.start_shader();
1265
1266 foreach_in_list(ir_instruction, node, sh->ir) {
1267 ir_variable *const var = node->as_variable();
1268
1269 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1270 var->data.mode != ir_var_shader_storage))
1271 continue;
1272
1273 uniform_size.process(var);
1274 }
1275
1276 sh->num_samplers = uniform_size.num_shader_samplers;
1277 sh->NumImages = uniform_size.num_shader_images;
1278 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
1279 sh->num_combined_uniform_components = sh->num_uniform_components;
1280
1281 for (unsigned i = 0; i < sh->NumUniformBlocks; i++) {
1282 sh->num_combined_uniform_components +=
1283 sh->UniformBlocks[i]->UniformBufferSize / 4;
1284 }
1285 }
1286
1287 prog->NumUniformStorage = uniform_size.num_active_uniforms;
1288 prog->NumHiddenUniforms = uniform_size.num_hidden_uniforms;
1289
1290 /* assign hidden uniforms a slot id */
1291 hiddenUniforms->iterate(assign_hidden_uniform_slot_id, &uniform_size);
1292 delete hiddenUniforms;
1293
1294 link_assign_uniform_storage(ctx, prog, uniform_size.num_values,
1295 num_explicit_uniform_locs);
1296 }