98ae3ad53718e37513cd91a9321e44462a24a027
[mesa.git] / src / compiler / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "main/core.h"
25 #include "ir.h"
26 #include "linker.h"
27 #include "ir_uniform.h"
28 #include "glsl_symbol_table.h"
29 #include "program/hash_table.h"
30 #include "program.h"
31 #include "util/hash_table.h"
32
33 /**
34 * \file link_uniforms.cpp
35 * Assign locations for GLSL uniforms.
36 *
37 * \author Ian Romanick <ian.d.romanick@intel.com>
38 */
39
40 /**
41 * Used by linker to indicate uniforms that have no location set.
42 */
43 #define UNMAPPED_UNIFORM_LOC ~0u
44
45 /**
46 * Count the backing storage requirements for a type
47 */
48 static unsigned
49 values_for_type(const glsl_type *type)
50 {
51 if (type->is_sampler()) {
52 return 1;
53 } else if (type->is_array() && type->fields.array->is_sampler()) {
54 return type->array_size();
55 } else {
56 return type->component_slots();
57 }
58 }
59
60 void
61 program_resource_visitor::process(const glsl_type *type, const char *name)
62 {
63 assert(type->without_array()->is_record()
64 || type->without_array()->is_interface());
65
66 unsigned record_array_count = 1;
67 char *name_copy = ralloc_strdup(NULL, name);
68 enum glsl_interface_packing packing = type->get_interface_packing();
69
70 recursion(type, &name_copy, strlen(name), false, NULL, packing, false,
71 record_array_count, NULL);
72 ralloc_free(name_copy);
73 }
74
75 void
76 program_resource_visitor::process(ir_variable *var)
77 {
78 unsigned record_array_count = 1;
79 const bool row_major =
80 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
81
82 const enum glsl_interface_packing packing = var->get_interface_type() ?
83 var->get_interface_type_packing() :
84 var->type->get_interface_packing();
85
86 const glsl_type *t =
87 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
88 const glsl_type *t_without_array = t->without_array();
89
90 /* false is always passed for the row_major parameter to the other
91 * processing functions because no information is available to do
92 * otherwise. See the warning in linker.h.
93 */
94 if (t_without_array->is_record() ||
95 (t->is_array() && t->fields.array->is_array())) {
96 char *name = ralloc_strdup(NULL, var->name);
97 recursion(var->type, &name, strlen(name), row_major, NULL, packing,
98 false, record_array_count, NULL);
99 ralloc_free(name);
100 } else if (t_without_array->is_interface()) {
101 char *name = ralloc_strdup(NULL, t_without_array->name);
102 const glsl_struct_field *ifc_member = var->data.from_named_ifc_block ?
103 &t_without_array->
104 fields.structure[t_without_array->field_index(var->name)] : NULL;
105
106 recursion(t, &name, strlen(name), row_major, NULL, packing,
107 false, record_array_count, ifc_member);
108 ralloc_free(name);
109 } else {
110 this->set_record_array_count(record_array_count);
111 this->visit_field(t, var->name, row_major, NULL, packing, false);
112 }
113 }
114
115 void
116 program_resource_visitor::recursion(const glsl_type *t, char **name,
117 size_t name_length, bool row_major,
118 const glsl_type *record_type,
119 const enum glsl_interface_packing packing,
120 bool last_field,
121 unsigned record_array_count,
122 const glsl_struct_field *named_ifc_member)
123 {
124 /* Records need to have each field processed individually.
125 *
126 * Arrays of records need to have each array element processed
127 * individually, then each field of the resulting array elements processed
128 * individually.
129 */
130 if (t->is_interface() && named_ifc_member) {
131 ralloc_asprintf_rewrite_tail(name, &name_length, ".%s",
132 named_ifc_member->name);
133 recursion(named_ifc_member->type, name, name_length, row_major, NULL,
134 packing, false, record_array_count, NULL);
135 } else if (t->is_record() || t->is_interface()) {
136 if (record_type == NULL && t->is_record())
137 record_type = t;
138
139 if (t->is_record())
140 this->enter_record(t, *name, row_major, packing);
141
142 for (unsigned i = 0; i < t->length; i++) {
143 const char *field = t->fields.structure[i].name;
144 size_t new_length = name_length;
145
146 if (t->fields.structure[i].type->is_record())
147 this->visit_field(&t->fields.structure[i]);
148
149 if (t->is_interface() && t->fields.structure[i].offset != -1)
150 this->set_buffer_offset(t->fields.structure[i].offset);
151
152 /* Append '.field' to the current variable name. */
153 if (name_length == 0) {
154 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
155 } else {
156 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
157 }
158
159 /* The layout of structures at the top level of the block is set
160 * during parsing. For matrices contained in multiple levels of
161 * structures in the block, the inner structures have no layout.
162 * These cases must potentially inherit the layout from the outer
163 * levels.
164 */
165 bool field_row_major = row_major;
166 const enum glsl_matrix_layout matrix_layout =
167 glsl_matrix_layout(t->fields.structure[i].matrix_layout);
168 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
169 field_row_major = true;
170 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
171 field_row_major = false;
172 }
173
174 recursion(t->fields.structure[i].type, name, new_length,
175 field_row_major,
176 record_type,
177 packing,
178 (i + 1) == t->length, record_array_count, NULL);
179
180 /* Only the first leaf-field of the record gets called with the
181 * record type pointer.
182 */
183 record_type = NULL;
184 }
185
186 if (t->is_record()) {
187 (*name)[name_length] = '\0';
188 this->leave_record(t, *name, row_major, packing);
189 }
190 } else if (t->without_array()->is_record() ||
191 t->without_array()->is_interface() ||
192 (t->is_array() && t->fields.array->is_array())) {
193 if (record_type == NULL && t->fields.array->is_record())
194 record_type = t->fields.array;
195
196 unsigned length = t->length;
197 /* Shader storage block unsized arrays: add subscript [0] to variable
198 * names */
199 if (t->is_unsized_array())
200 length = 1;
201
202 record_array_count *= length;
203
204 for (unsigned i = 0; i < length; i++) {
205 size_t new_length = name_length;
206
207 /* Append the subscript to the current variable name */
208 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
209
210 recursion(t->fields.array, name, new_length, row_major,
211 record_type,
212 packing,
213 (i + 1) == t->length, record_array_count,
214 named_ifc_member);
215
216 /* Only the first leaf-field of the record gets called with the
217 * record type pointer.
218 */
219 record_type = NULL;
220 }
221 } else {
222 this->set_record_array_count(record_array_count);
223 this->visit_field(t, *name, row_major, record_type, packing, last_field);
224 }
225 }
226
227 void
228 program_resource_visitor::visit_field(const glsl_type *type, const char *name,
229 bool row_major,
230 const glsl_type *,
231 const enum glsl_interface_packing,
232 bool /* last_field */)
233 {
234 visit_field(type, name, row_major);
235 }
236
237 void
238 program_resource_visitor::visit_field(const glsl_struct_field *field)
239 {
240 (void) field;
241 /* empty */
242 }
243
244 void
245 program_resource_visitor::enter_record(const glsl_type *, const char *, bool,
246 const enum glsl_interface_packing)
247 {
248 }
249
250 void
251 program_resource_visitor::leave_record(const glsl_type *, const char *, bool,
252 const enum glsl_interface_packing)
253 {
254 }
255
256 void
257 program_resource_visitor::set_buffer_offset(unsigned)
258 {
259 }
260
261 void
262 program_resource_visitor::set_record_array_count(unsigned)
263 {
264 }
265
266 namespace {
267
268 /**
269 * Class to help calculate the storage requirements for a set of uniforms
270 *
271 * As uniforms are added to the active set the number of active uniforms and
272 * the storage requirements for those uniforms are accumulated. The active
273 * uniforms are added to the hash table supplied to the constructor.
274 *
275 * If the same uniform is added multiple times (i.e., once for each shader
276 * target), it will only be accounted once.
277 */
278 class count_uniform_size : public program_resource_visitor {
279 public:
280 count_uniform_size(struct string_to_uint_map *map,
281 struct string_to_uint_map *hidden_map)
282 : num_active_uniforms(0), num_hidden_uniforms(0), num_values(0),
283 num_shader_samplers(0), num_shader_images(0),
284 num_shader_uniform_components(0), num_shader_subroutines(0),
285 is_buffer_block(false), is_shader_storage(false), map(map),
286 hidden_map(hidden_map)
287 {
288 /* empty */
289 }
290
291 void start_shader()
292 {
293 this->num_shader_samplers = 0;
294 this->num_shader_images = 0;
295 this->num_shader_uniform_components = 0;
296 this->num_shader_subroutines = 0;
297 }
298
299 void process(ir_variable *var)
300 {
301 this->current_var = var;
302 this->is_buffer_block = var->is_in_buffer_block();
303 this->is_shader_storage = var->is_in_shader_storage_block();
304 if (var->is_interface_instance())
305 program_resource_visitor::process(var->get_interface_type(),
306 var->get_interface_type()->name);
307 else
308 program_resource_visitor::process(var);
309 }
310
311 /**
312 * Total number of active uniforms counted
313 */
314 unsigned num_active_uniforms;
315
316 unsigned num_hidden_uniforms;
317
318 /**
319 * Number of data values required to back the storage for the active uniforms
320 */
321 unsigned num_values;
322
323 /**
324 * Number of samplers used
325 */
326 unsigned num_shader_samplers;
327
328 /**
329 * Number of images used
330 */
331 unsigned num_shader_images;
332
333 /**
334 * Number of uniforms used in the current shader
335 */
336 unsigned num_shader_uniform_components;
337
338 /**
339 * Number of subroutine uniforms used
340 */
341 unsigned num_shader_subroutines;
342
343 bool is_buffer_block;
344 bool is_shader_storage;
345
346 struct string_to_uint_map *map;
347
348 private:
349 virtual void visit_field(const glsl_type *type, const char *name,
350 bool row_major)
351 {
352 assert(!type->without_array()->is_record());
353 assert(!type->without_array()->is_interface());
354 assert(!(type->is_array() && type->fields.array->is_array()));
355
356 (void) row_major;
357
358 /* Count the number of samplers regardless of whether the uniform is
359 * already in the hash table. The hash table prevents adding the same
360 * uniform for multiple shader targets, but in this case we want to
361 * count it for each shader target.
362 */
363 const unsigned values = values_for_type(type);
364 if (type->contains_subroutine()) {
365 this->num_shader_subroutines += values;
366 } else if (type->contains_sampler()) {
367 this->num_shader_samplers += values;
368 } else if (type->contains_image()) {
369 this->num_shader_images += values;
370
371 /* As drivers are likely to represent image uniforms as
372 * scalar indices, count them against the limit of uniform
373 * components in the default block. The spec allows image
374 * uniforms to use up no more than one scalar slot.
375 */
376 if(!is_shader_storage)
377 this->num_shader_uniform_components += values;
378 } else {
379 /* Accumulate the total number of uniform slots used by this shader.
380 * Note that samplers do not count against this limit because they
381 * don't use any storage on current hardware.
382 */
383 if (!is_buffer_block)
384 this->num_shader_uniform_components += values;
385 }
386
387 /* If the uniform is already in the map, there's nothing more to do.
388 */
389 unsigned id;
390 if (this->map->get(id, name))
391 return;
392
393 if (this->current_var->data.how_declared == ir_var_hidden) {
394 this->hidden_map->put(this->num_hidden_uniforms, name);
395 this->num_hidden_uniforms++;
396 } else {
397 this->map->put(this->num_active_uniforms-this->num_hidden_uniforms,
398 name);
399 }
400
401 /* Each leaf uniform occupies one entry in the list of active
402 * uniforms.
403 */
404 this->num_active_uniforms++;
405
406 if(!is_gl_identifier(name) && !is_shader_storage)
407 this->num_values += values;
408 }
409
410 struct string_to_uint_map *hidden_map;
411
412 /**
413 * Current variable being processed.
414 */
415 ir_variable *current_var;
416 };
417
418 } /* anonymous namespace */
419
420 /**
421 * Class to help parcel out pieces of backing storage to uniforms
422 *
423 * Each uniform processed has some range of the \c gl_constant_value
424 * structures associated with it. The association is done by finding
425 * the uniform in the \c string_to_uint_map and using the value from
426 * the map to connect that slot in the \c gl_uniform_storage table
427 * with the next available slot in the \c gl_constant_value array.
428 *
429 * \warning
430 * This class assumes that every uniform that will be processed is
431 * already in the \c string_to_uint_map. In addition, it assumes that
432 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
433 * enough."
434 */
435 class parcel_out_uniform_storage : public program_resource_visitor {
436 public:
437 parcel_out_uniform_storage(struct gl_shader_program *prog,
438 struct string_to_uint_map *map,
439 struct gl_uniform_storage *uniforms,
440 union gl_constant_value *values)
441 : prog(prog), map(map), uniforms(uniforms), values(values)
442 {
443 }
444
445 void start_shader(gl_shader_stage shader_type)
446 {
447 assert(shader_type < MESA_SHADER_STAGES);
448 this->shader_type = shader_type;
449
450 this->shader_samplers_used = 0;
451 this->shader_shadow_samplers = 0;
452 this->next_sampler = 0;
453 this->next_image = 0;
454 this->next_subroutine = 0;
455 this->record_array_count = 1;
456 memset(this->targets, 0, sizeof(this->targets));
457 }
458
459 void set_and_process(ir_variable *var)
460 {
461 current_var = var;
462 field_counter = 0;
463 this->record_next_sampler = new string_to_uint_map;
464
465 buffer_block_index = -1;
466 if (var->is_in_buffer_block()) {
467 struct gl_uniform_block *blks = var->is_in_shader_storage_block() ?
468 prog->ShaderStorageBlocks : prog->UniformBlocks;
469 unsigned num_blks = var->is_in_shader_storage_block() ?
470 prog->NumShaderStorageBlocks : prog->NumUniformBlocks;
471
472 if (var->is_interface_instance() && var->type->is_array()) {
473 unsigned l = strlen(var->get_interface_type()->name);
474
475 for (unsigned i = 0; i < num_blks; i++) {
476 if (strncmp(var->get_interface_type()->name, blks[i].Name, l)
477 == 0 && blks[i].Name[l] == '[') {
478 buffer_block_index = i;
479 break;
480 }
481 }
482 } else {
483 for (unsigned i = 0; i < num_blks; i++) {
484 if (strcmp(var->get_interface_type()->name, blks[i].Name) ==
485 0) {
486 buffer_block_index = i;
487 break;
488 }
489 }
490 }
491 assert(buffer_block_index != -1);
492
493 /* Uniform blocks that were specified with an instance name must be
494 * handled a little bit differently. The name of the variable is the
495 * name used to reference the uniform block instead of being the name
496 * of a variable within the block. Therefore, searching for the name
497 * within the block will fail.
498 */
499 if (var->is_interface_instance()) {
500 ubo_byte_offset = 0;
501 process(var->get_interface_type(),
502 var->get_interface_type()->name);
503 } else {
504 const struct gl_uniform_block *const block =
505 &blks[buffer_block_index];
506
507 assert(var->data.location != -1);
508
509 const struct gl_uniform_buffer_variable *const ubo_var =
510 &block->Uniforms[var->data.location];
511
512 ubo_byte_offset = ubo_var->Offset;
513 process(var);
514 }
515 } else {
516 /* Store any explicit location and reset data location so we can
517 * reuse this variable for storing the uniform slot number.
518 */
519 this->explicit_location = current_var->data.location;
520 current_var->data.location = -1;
521
522 process(var);
523 }
524 delete this->record_next_sampler;
525 }
526
527 int buffer_block_index;
528 int ubo_byte_offset;
529 gl_shader_stage shader_type;
530
531 private:
532 void handle_samplers(const glsl_type *base_type,
533 struct gl_uniform_storage *uniform, const char *name)
534 {
535 if (base_type->is_sampler()) {
536 uniform->opaque[shader_type].active = true;
537
538 /* Handle multiple samplers inside struct arrays */
539 if (this->record_array_count > 1) {
540 unsigned inner_array_size = MAX2(1, uniform->array_elements);
541 char *name_copy = ralloc_strdup(NULL, name);
542
543 /* Remove all array subscripts from the sampler name */
544 char *str_start;
545 const char *str_end;
546 while((str_start = strchr(name_copy, '[')) &&
547 (str_end = strchr(name_copy, ']'))) {
548 memmove(str_start, str_end + 1, 1 + strlen(str_end));
549 }
550
551 unsigned index = 0;
552 if (this->record_next_sampler->get(index, name_copy)) {
553 /* In this case, we've already seen this uniform so we just use
554 * the next sampler index recorded the last time we visited.
555 */
556 uniform->opaque[shader_type].index = index;
557 index = inner_array_size + uniform->opaque[shader_type].index;
558 this->record_next_sampler->put(index, name_copy);
559
560 ralloc_free(name_copy);
561 /* Return as everything else has already been initialised in a
562 * previous pass.
563 */
564 return;
565 } else {
566 /* We've never seen this uniform before so we need to allocate
567 * enough indices to store it.
568 *
569 * Nested struct arrays behave like arrays of arrays so we need
570 * to increase the index by the total number of elements of the
571 * sampler in case there is more than one sampler inside the
572 * structs. This allows the offset to be easily calculated for
573 * indirect indexing.
574 */
575 uniform->opaque[shader_type].index = this->next_sampler;
576 this->next_sampler +=
577 inner_array_size * this->record_array_count;
578
579 /* Store the next index for future passes over the struct array
580 */
581 index = uniform->opaque[shader_type].index + inner_array_size;
582 this->record_next_sampler->put(index, name_copy);
583 ralloc_free(name_copy);
584 }
585 } else {
586 /* Increment the sampler by 1 for non-arrays and by the number of
587 * array elements for arrays.
588 */
589 uniform->opaque[shader_type].index = this->next_sampler;
590 this->next_sampler += MAX2(1, uniform->array_elements);
591 }
592
593 const gl_texture_index target = base_type->sampler_index();
594 const unsigned shadow = base_type->sampler_shadow;
595 for (unsigned i = uniform->opaque[shader_type].index;
596 i < MIN2(this->next_sampler, MAX_SAMPLERS);
597 i++) {
598 this->targets[i] = target;
599 this->shader_samplers_used |= 1U << i;
600 this->shader_shadow_samplers |= shadow << i;
601 }
602 }
603 }
604
605 void handle_images(const glsl_type *base_type,
606 struct gl_uniform_storage *uniform)
607 {
608 if (base_type->is_image()) {
609 uniform->opaque[shader_type].index = this->next_image;
610 uniform->opaque[shader_type].active = true;
611
612 /* Set image access qualifiers */
613 const GLenum access =
614 (current_var->data.image_read_only ? GL_READ_ONLY :
615 current_var->data.image_write_only ? GL_WRITE_ONLY :
616 GL_READ_WRITE);
617
618 const unsigned first = this->next_image;
619
620 /* Increment the image index by 1 for non-arrays and by the
621 * number of array elements for arrays.
622 */
623 this->next_image += MAX2(1, uniform->array_elements);
624
625 for (unsigned i = first; i < MIN2(next_image, MAX_IMAGE_UNIFORMS); i++)
626 prog->_LinkedShaders[shader_type]->ImageAccess[i] = access;
627 }
628 }
629
630 void handle_subroutines(const glsl_type *base_type,
631 struct gl_uniform_storage *uniform)
632 {
633 if (base_type->is_subroutine()) {
634 uniform->opaque[shader_type].index = this->next_subroutine;
635 uniform->opaque[shader_type].active = true;
636
637 /* Increment the subroutine index by 1 for non-arrays and by the
638 * number of array elements for arrays.
639 */
640 this->next_subroutine += MAX2(1, uniform->array_elements);
641
642 }
643 }
644
645 virtual void set_buffer_offset(unsigned offset)
646 {
647 this->ubo_byte_offset = offset;
648 }
649
650 virtual void set_record_array_count(unsigned record_array_count)
651 {
652 this->record_array_count = record_array_count;
653 }
654
655 virtual void visit_field(const glsl_type *type, const char *name,
656 bool row_major)
657 {
658 (void) type;
659 (void) name;
660 (void) row_major;
661 assert(!"Should not get here.");
662 }
663
664 virtual void enter_record(const glsl_type *type, const char *,
665 bool row_major, const enum glsl_interface_packing packing) {
666 assert(type->is_record());
667 if (this->buffer_block_index == -1)
668 return;
669 if (packing == GLSL_INTERFACE_PACKING_STD430)
670 this->ubo_byte_offset = glsl_align(
671 this->ubo_byte_offset, type->std430_base_alignment(row_major));
672 else
673 this->ubo_byte_offset = glsl_align(
674 this->ubo_byte_offset, type->std140_base_alignment(row_major));
675 }
676
677 virtual void leave_record(const glsl_type *type, const char *,
678 bool row_major, const enum glsl_interface_packing packing) {
679 assert(type->is_record());
680 if (this->buffer_block_index == -1)
681 return;
682 if (packing == GLSL_INTERFACE_PACKING_STD430)
683 this->ubo_byte_offset = glsl_align(
684 this->ubo_byte_offset, type->std430_base_alignment(row_major));
685 else
686 this->ubo_byte_offset = glsl_align(
687 this->ubo_byte_offset, type->std140_base_alignment(row_major));
688 }
689
690 virtual void visit_field(const glsl_type *type, const char *name,
691 bool row_major, const glsl_type * /* record_type */,
692 const enum glsl_interface_packing packing,
693 bool /* last_field */)
694 {
695 assert(!type->without_array()->is_record());
696 assert(!type->without_array()->is_interface());
697 assert(!(type->is_array() && type->fields.array->is_array()));
698
699 unsigned id;
700 bool found = this->map->get(id, name);
701 assert(found);
702
703 if (!found)
704 return;
705
706 const glsl_type *base_type;
707 if (type->is_array()) {
708 this->uniforms[id].array_elements = type->length;
709 base_type = type->fields.array;
710 } else {
711 this->uniforms[id].array_elements = 0;
712 base_type = type;
713 }
714
715 /* Initialise opaque data */
716 this->uniforms[id].opaque[shader_type].index = ~0;
717 this->uniforms[id].opaque[shader_type].active = false;
718
719 /* This assigns uniform indices to sampler and image uniforms. */
720 handle_samplers(base_type, &this->uniforms[id], name);
721 handle_images(base_type, &this->uniforms[id]);
722 handle_subroutines(base_type, &this->uniforms[id]);
723
724 /* For array of arrays or struct arrays the base location may have
725 * already been set so don't set it again.
726 */
727 if (buffer_block_index == -1 && current_var->data.location == -1) {
728 current_var->data.location = id;
729 }
730
731 /* If there is already storage associated with this uniform or if the
732 * uniform is set as builtin, it means that it was set while processing
733 * an earlier shader stage. For example, we may be processing the
734 * uniform in the fragment shader, but the uniform was already processed
735 * in the vertex shader.
736 */
737 if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) {
738 return;
739 }
740
741 /* Assign explicit locations. */
742 if (current_var->data.explicit_location) {
743 /* Set sequential locations for struct fields. */
744 if (current_var->type->without_array()->is_record() ||
745 current_var->type->is_array_of_arrays()) {
746 const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
747 this->uniforms[id].remap_location =
748 this->explicit_location + field_counter;
749 field_counter += entries;
750 } else {
751 this->uniforms[id].remap_location = this->explicit_location;
752 }
753 } else {
754 /* Initialize to to indicate that no location is set */
755 this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
756 }
757
758 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
759 this->uniforms[id].type = base_type;
760 this->uniforms[id].num_driver_storage = 0;
761 this->uniforms[id].driver_storage = NULL;
762 this->uniforms[id].atomic_buffer_index = -1;
763 this->uniforms[id].hidden =
764 current_var->data.how_declared == ir_var_hidden;
765 this->uniforms[id].builtin = is_gl_identifier(name);
766
767 this->uniforms[id].is_shader_storage =
768 current_var->is_in_shader_storage_block();
769
770 /* Do not assign storage if the uniform is builtin */
771 if (!this->uniforms[id].builtin &&
772 !this->uniforms[id].is_shader_storage)
773 this->uniforms[id].storage = this->values;
774
775 if (this->buffer_block_index != -1) {
776 this->uniforms[id].block_index = this->buffer_block_index;
777
778 unsigned alignment = type->std140_base_alignment(row_major);
779 if (packing == GLSL_INTERFACE_PACKING_STD430)
780 alignment = type->std430_base_alignment(row_major);
781 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
782 this->uniforms[id].offset = this->ubo_byte_offset;
783 if (packing == GLSL_INTERFACE_PACKING_STD430)
784 this->ubo_byte_offset += type->std430_size(row_major);
785 else
786 this->ubo_byte_offset += type->std140_size(row_major);
787
788 if (type->is_array()) {
789 if (packing == GLSL_INTERFACE_PACKING_STD430)
790 this->uniforms[id].array_stride =
791 type->without_array()->std430_array_stride(row_major);
792 else
793 this->uniforms[id].array_stride =
794 glsl_align(type->without_array()->std140_size(row_major),
795 16);
796 } else {
797 this->uniforms[id].array_stride = 0;
798 }
799
800 if (type->without_array()->is_matrix()) {
801 const glsl_type *matrix = type->without_array();
802 const unsigned N = matrix->base_type == GLSL_TYPE_DOUBLE ? 8 : 4;
803 const unsigned items =
804 row_major ? matrix->matrix_columns : matrix->vector_elements;
805
806 assert(items <= 4);
807 if (packing == GLSL_INTERFACE_PACKING_STD430)
808 this->uniforms[id].matrix_stride = items < 3 ? items * N :
809 glsl_align(items * N, 16);
810 else
811 this->uniforms[id].matrix_stride = glsl_align(items * N, 16);
812 this->uniforms[id].row_major = row_major;
813 } else {
814 this->uniforms[id].matrix_stride = 0;
815 this->uniforms[id].row_major = false;
816 }
817 } else {
818 this->uniforms[id].block_index = -1;
819 this->uniforms[id].offset = -1;
820 this->uniforms[id].array_stride = -1;
821 this->uniforms[id].matrix_stride = -1;
822 this->uniforms[id].row_major = false;
823 }
824
825 if (!this->uniforms[id].builtin &&
826 !this->uniforms[id].is_shader_storage)
827 this->values += values_for_type(type);
828 }
829
830 /**
831 * Current program being processed.
832 */
833 struct gl_shader_program *prog;
834
835 struct string_to_uint_map *map;
836
837 struct gl_uniform_storage *uniforms;
838 unsigned next_sampler;
839 unsigned next_image;
840 unsigned next_subroutine;
841
842 /**
843 * Field counter is used to take care that uniform structures
844 * with explicit locations get sequential locations.
845 */
846 unsigned field_counter;
847
848 /**
849 * Current variable being processed.
850 */
851 ir_variable *current_var;
852
853 /* Used to store the explicit location from current_var so that we can
854 * reuse the location field for storing the uniform slot id.
855 */
856 int explicit_location;
857
858 /* Stores total struct array elements including nested structs */
859 unsigned record_array_count;
860
861 /* Map for temporarily storing next sampler index when handling samplers in
862 * struct arrays.
863 */
864 struct string_to_uint_map *record_next_sampler;
865
866 public:
867 union gl_constant_value *values;
868
869 gl_texture_index targets[MAX_SAMPLERS];
870
871 /**
872 * Mask of samplers used by the current shader stage.
873 */
874 unsigned shader_samplers_used;
875
876 /**
877 * Mask of samplers used by the current shader stage for shadows.
878 */
879 unsigned shader_shadow_samplers;
880 };
881
882 /**
883 * Walks the IR and update the references to uniform blocks in the
884 * ir_variables to point at linked shader's list (previously, they
885 * would point at the uniform block list in one of the pre-linked
886 * shaders).
887 */
888 static void
889 link_update_uniform_buffer_variables(struct gl_shader *shader)
890 {
891 foreach_in_list(ir_instruction, node, shader->ir) {
892 ir_variable *const var = node->as_variable();
893
894 if ((var == NULL) || !var->is_in_buffer_block())
895 continue;
896
897 assert(var->data.mode == ir_var_uniform ||
898 var->data.mode == ir_var_shader_storage);
899
900 if (var->is_interface_instance()) {
901 var->data.location = 0;
902 continue;
903 }
904
905 bool found = false;
906 char sentinel = '\0';
907
908 if (var->type->is_record()) {
909 sentinel = '.';
910 } else if (var->type->is_array() && (var->type->fields.array->is_array()
911 || var->type->without_array()->is_record())) {
912 sentinel = '[';
913 }
914
915 unsigned num_blocks = var->data.mode == ir_var_uniform ?
916 shader->NumUniformBlocks : shader->NumShaderStorageBlocks;
917 struct gl_uniform_block **blks = var->data.mode == ir_var_uniform ?
918 shader->UniformBlocks : shader->ShaderStorageBlocks;
919
920 const unsigned l = strlen(var->name);
921 for (unsigned i = 0; i < num_blocks; i++) {
922 for (unsigned j = 0; j < blks[i]->NumUniforms; j++) {
923 if (sentinel) {
924 const char *begin = blks[i]->Uniforms[j].Name;
925 const char *end = strchr(begin, sentinel);
926
927 if (end == NULL)
928 continue;
929
930 if ((ptrdiff_t) l != (end - begin))
931 continue;
932
933 if (strncmp(var->name, begin, l) == 0) {
934 found = true;
935 var->data.location = j;
936 break;
937 }
938 } else if (!strcmp(var->name, blks[i]->Uniforms[j].Name)) {
939 found = true;
940 var->data.location = j;
941 break;
942 }
943 }
944 if (found)
945 break;
946 }
947 assert(found);
948 }
949 }
950
951 /**
952 * Combine the hidden uniform hash map with the uniform hash map so that the
953 * hidden uniforms will be given indicies at the end of the uniform storage
954 * array.
955 */
956 static void
957 assign_hidden_uniform_slot_id(const char *name, unsigned hidden_id,
958 void *closure)
959 {
960 count_uniform_size *uniform_size = (count_uniform_size *) closure;
961 unsigned hidden_uniform_start = uniform_size->num_active_uniforms -
962 uniform_size->num_hidden_uniforms;
963
964 uniform_size->map->put(hidden_uniform_start + hidden_id, name);
965 }
966
967 /**
968 * Search through the list of empty blocks to find one that fits the current
969 * uniform.
970 */
971 static int
972 find_empty_block(struct gl_shader_program *prog,
973 struct gl_uniform_storage *uniform)
974 {
975 const unsigned entries = MAX2(1, uniform->array_elements);
976
977 foreach_list_typed(struct empty_uniform_block, block, link,
978 &prog->EmptyUniformLocations) {
979 /* Found a block with enough slots to fit the uniform */
980 if (block->slots == entries) {
981 unsigned start = block->start;
982 exec_node_remove(&block->link);
983 ralloc_free(block);
984
985 return start;
986 /* Found a block with more slots than needed. It can still be used. */
987 } else if (block->slots > entries) {
988 unsigned start = block->start;
989 block->start += entries;
990 block->slots -= entries;
991
992 return start;
993 }
994 }
995
996 return -1;
997 }
998
999 void
1000 link_assign_uniform_locations(struct gl_shader_program *prog,
1001 unsigned int boolean_true,
1002 unsigned int num_explicit_uniform_locs,
1003 unsigned int max_uniform_locs)
1004 {
1005 ralloc_free(prog->UniformStorage);
1006 prog->UniformStorage = NULL;
1007 prog->NumUniformStorage = 0;
1008
1009 if (prog->UniformHash != NULL) {
1010 prog->UniformHash->clear();
1011 } else {
1012 prog->UniformHash = new string_to_uint_map;
1013 }
1014
1015 /* First pass: Count the uniform resources used by the user-defined
1016 * uniforms. While this happens, each active uniform will have an index
1017 * assigned to it.
1018 *
1019 * Note: this is *NOT* the index that is returned to the application by
1020 * glGetUniformLocation.
1021 */
1022 struct string_to_uint_map *hiddenUniforms = new string_to_uint_map;
1023 count_uniform_size uniform_size(prog->UniformHash, hiddenUniforms);
1024 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1025 struct gl_shader *sh = prog->_LinkedShaders[i];
1026
1027 if (sh == NULL)
1028 continue;
1029
1030 /* Uniforms that lack an initializer in the shader code have an initial
1031 * value of zero. This includes sampler uniforms.
1032 *
1033 * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says:
1034 *
1035 * "The link time initial value is either the value of the variable's
1036 * initializer, if present, or 0 if no initializer is present. Sampler
1037 * types cannot have initializers."
1038 */
1039 memset(sh->SamplerUnits, 0, sizeof(sh->SamplerUnits));
1040 memset(sh->ImageUnits, 0, sizeof(sh->ImageUnits));
1041
1042 link_update_uniform_buffer_variables(sh);
1043
1044 /* Reset various per-shader target counts.
1045 */
1046 uniform_size.start_shader();
1047
1048 foreach_in_list(ir_instruction, node, sh->ir) {
1049 ir_variable *const var = node->as_variable();
1050
1051 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1052 var->data.mode != ir_var_shader_storage))
1053 continue;
1054
1055 uniform_size.process(var);
1056 }
1057
1058 sh->num_samplers = uniform_size.num_shader_samplers;
1059 sh->NumImages = uniform_size.num_shader_images;
1060 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
1061 sh->num_combined_uniform_components = sh->num_uniform_components;
1062
1063 for (unsigned i = 0; i < sh->NumUniformBlocks; i++) {
1064 sh->num_combined_uniform_components +=
1065 sh->UniformBlocks[i]->UniformBufferSize / 4;
1066 }
1067 }
1068
1069 const unsigned num_uniforms = uniform_size.num_active_uniforms;
1070 const unsigned num_data_slots = uniform_size.num_values;
1071 const unsigned hidden_uniforms = uniform_size.num_hidden_uniforms;
1072
1073 /* assign hidden uniforms a slot id */
1074 hiddenUniforms->iterate(assign_hidden_uniform_slot_id, &uniform_size);
1075 delete hiddenUniforms;
1076
1077 /* On the outside chance that there were no uniforms, bail out.
1078 */
1079 if (num_uniforms == 0)
1080 return;
1081
1082 struct gl_uniform_storage *uniforms =
1083 rzalloc_array(prog, struct gl_uniform_storage, num_uniforms);
1084 union gl_constant_value *data =
1085 rzalloc_array(uniforms, union gl_constant_value, num_data_slots);
1086 #ifndef NDEBUG
1087 union gl_constant_value *data_end = &data[num_data_slots];
1088 #endif
1089
1090 parcel_out_uniform_storage parcel(prog, prog->UniformHash, uniforms, data);
1091
1092 unsigned total_entries = num_explicit_uniform_locs;
1093 unsigned empty_locs = prog->NumUniformRemapTable - num_explicit_uniform_locs;
1094
1095 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1096 if (prog->_LinkedShaders[i] == NULL)
1097 continue;
1098
1099 parcel.start_shader((gl_shader_stage)i);
1100
1101 foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
1102 ir_variable *const var = node->as_variable();
1103
1104 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1105 var->data.mode != ir_var_shader_storage))
1106 continue;
1107
1108 parcel.set_and_process(var);
1109 }
1110
1111 prog->_LinkedShaders[i]->active_samplers = parcel.shader_samplers_used;
1112 prog->_LinkedShaders[i]->shadow_samplers = parcel.shader_shadow_samplers;
1113
1114 STATIC_ASSERT(sizeof(prog->_LinkedShaders[i]->SamplerTargets) ==
1115 sizeof(parcel.targets));
1116 memcpy(prog->_LinkedShaders[i]->SamplerTargets, parcel.targets,
1117 sizeof(prog->_LinkedShaders[i]->SamplerTargets));
1118 }
1119
1120 /* Reserve all the explicit locations of the active uniforms. */
1121 for (unsigned i = 0; i < num_uniforms; i++) {
1122 if (uniforms[i].type->is_subroutine() ||
1123 uniforms[i].is_shader_storage)
1124 continue;
1125
1126 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC) {
1127 /* How many new entries for this uniform? */
1128 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1129
1130 /* Set remap table entries point to correct gl_uniform_storage. */
1131 for (unsigned j = 0; j < entries; j++) {
1132 unsigned element_loc = uniforms[i].remap_location + j;
1133 assert(prog->UniformRemapTable[element_loc] ==
1134 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1135 prog->UniformRemapTable[element_loc] = &uniforms[i];
1136 }
1137 }
1138 }
1139
1140 /* Reserve locations for rest of the uniforms. */
1141 for (unsigned i = 0; i < num_uniforms; i++) {
1142
1143 if (uniforms[i].type->is_subroutine() ||
1144 uniforms[i].is_shader_storage)
1145 continue;
1146
1147 /* Built-in uniforms should not get any location. */
1148 if (uniforms[i].builtin)
1149 continue;
1150
1151 /* Explicit ones have been set already. */
1152 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC)
1153 continue;
1154
1155 /* how many new entries for this uniform? */
1156 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1157
1158 /* Find UniformRemapTable for empty blocks where we can fit this uniform. */
1159 int chosen_location = -1;
1160
1161 if (empty_locs)
1162 chosen_location = find_empty_block(prog, &uniforms[i]);
1163
1164 /* Add new entries to the total amount of entries. */
1165 total_entries += entries;
1166
1167 if (chosen_location != -1) {
1168 empty_locs -= entries;
1169 } else {
1170 chosen_location = prog->NumUniformRemapTable;
1171
1172 /* resize remap table to fit new entries */
1173 prog->UniformRemapTable =
1174 reralloc(prog,
1175 prog->UniformRemapTable,
1176 gl_uniform_storage *,
1177 prog->NumUniformRemapTable + entries);
1178 prog->NumUniformRemapTable += entries;
1179 }
1180
1181 /* set pointers for this uniform */
1182 for (unsigned j = 0; j < entries; j++)
1183 prog->UniformRemapTable[chosen_location + j] = &uniforms[i];
1184
1185 /* set the base location in remap table for the uniform */
1186 uniforms[i].remap_location = chosen_location;
1187 }
1188
1189 /* Verify that total amount of entries for explicit and implicit locations
1190 * is less than MAX_UNIFORM_LOCATIONS.
1191 */
1192
1193 if (total_entries > max_uniform_locs) {
1194 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
1195 "(%u > %u)", total_entries, max_uniform_locs);
1196 }
1197
1198 /* Reserve all the explicit locations of the active subroutine uniforms. */
1199 for (unsigned i = 0; i < num_uniforms; i++) {
1200 if (!uniforms[i].type->is_subroutine())
1201 continue;
1202
1203 if (uniforms[i].remap_location == UNMAPPED_UNIFORM_LOC)
1204 continue;
1205
1206 for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
1207 struct gl_shader *sh = prog->_LinkedShaders[j];
1208 if (!sh)
1209 continue;
1210
1211 if (!uniforms[i].opaque[j].active)
1212 continue;
1213
1214 /* How many new entries for this uniform? */
1215 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1216
1217 /* Set remap table entries point to correct gl_uniform_storage. */
1218 for (unsigned k = 0; k < entries; k++) {
1219 unsigned element_loc = uniforms[i].remap_location + k;
1220 assert(sh->SubroutineUniformRemapTable[element_loc] ==
1221 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1222 sh->SubroutineUniformRemapTable[element_loc] = &uniforms[i];
1223 }
1224 }
1225 }
1226
1227 /* reserve subroutine locations */
1228 for (unsigned i = 0; i < num_uniforms; i++) {
1229
1230 if (!uniforms[i].type->is_subroutine())
1231 continue;
1232 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1233
1234 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC)
1235 continue;
1236 for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
1237 struct gl_shader *sh = prog->_LinkedShaders[j];
1238 if (!sh)
1239 continue;
1240
1241 if (!uniforms[i].opaque[j].active)
1242 continue;
1243
1244 sh->SubroutineUniformRemapTable =
1245 reralloc(sh,
1246 sh->SubroutineUniformRemapTable,
1247 gl_uniform_storage *,
1248 sh->NumSubroutineUniformRemapTable + entries);
1249
1250 for (unsigned k = 0; k < entries; k++)
1251 sh->SubroutineUniformRemapTable[sh->NumSubroutineUniformRemapTable + k] = &uniforms[i];
1252 uniforms[i].remap_location = sh->NumSubroutineUniformRemapTable;
1253 sh->NumSubroutineUniformRemapTable += entries;
1254 }
1255 }
1256
1257 #ifndef NDEBUG
1258 for (unsigned i = 0; i < num_uniforms; i++) {
1259 assert(uniforms[i].storage != NULL || uniforms[i].builtin ||
1260 uniforms[i].is_shader_storage);
1261 }
1262
1263 assert(parcel.values == data_end);
1264 #endif
1265
1266 prog->NumUniformStorage = num_uniforms;
1267 prog->NumHiddenUniforms = hidden_uniforms;
1268 prog->UniformStorage = uniforms;
1269
1270 link_set_uniform_initializers(prog, boolean_true);
1271
1272 return;
1273 }