glsl: Delete linker stuff relating to built-in functions.
[mesa.git] / src / compiler / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "main/core.h"
25 #include "ir.h"
26 #include "linker.h"
27 #include "ir_uniform.h"
28 #include "glsl_symbol_table.h"
29 #include "program.h"
30 #include "util/string_to_uint_map.h"
31
32 /**
33 * \file link_uniforms.cpp
34 * Assign locations for GLSL uniforms.
35 *
36 * \author Ian Romanick <ian.d.romanick@intel.com>
37 */
38
39 /**
40 * Used by linker to indicate uniforms that have no location set.
41 */
42 #define UNMAPPED_UNIFORM_LOC ~0u
43
44 /**
45 * Count the backing storage requirements for a type
46 */
47 static unsigned
48 values_for_type(const glsl_type *type)
49 {
50 if (type->is_sampler()) {
51 return 1;
52 } else if (type->is_array() && type->fields.array->is_sampler()) {
53 return type->array_size();
54 } else {
55 return type->component_slots();
56 }
57 }
58
59 void
60 program_resource_visitor::process(const glsl_type *type, const char *name)
61 {
62 assert(type->without_array()->is_record()
63 || type->without_array()->is_interface());
64
65 unsigned record_array_count = 1;
66 char *name_copy = ralloc_strdup(NULL, name);
67 enum glsl_interface_packing packing = type->get_interface_packing();
68
69 recursion(type, &name_copy, strlen(name), false, NULL, packing, false,
70 record_array_count, NULL);
71 ralloc_free(name_copy);
72 }
73
74 void
75 program_resource_visitor::process(ir_variable *var)
76 {
77 unsigned record_array_count = 1;
78 const bool row_major =
79 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
80
81 const enum glsl_interface_packing packing = var->get_interface_type() ?
82 var->get_interface_type_packing() :
83 var->type->get_interface_packing();
84
85 const glsl_type *t =
86 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
87 const glsl_type *t_without_array = t->without_array();
88
89 /* false is always passed for the row_major parameter to the other
90 * processing functions because no information is available to do
91 * otherwise. See the warning in linker.h.
92 */
93 if (t_without_array->is_record() ||
94 (t->is_array() && t->fields.array->is_array())) {
95 char *name = ralloc_strdup(NULL, var->name);
96 recursion(var->type, &name, strlen(name), row_major, NULL, packing,
97 false, record_array_count, NULL);
98 ralloc_free(name);
99 } else if (t_without_array->is_interface()) {
100 char *name = ralloc_strdup(NULL, t_without_array->name);
101 const glsl_struct_field *ifc_member = var->data.from_named_ifc_block ?
102 &t_without_array->
103 fields.structure[t_without_array->field_index(var->name)] : NULL;
104
105 recursion(t, &name, strlen(name), row_major, NULL, packing,
106 false, record_array_count, ifc_member);
107 ralloc_free(name);
108 } else {
109 this->set_record_array_count(record_array_count);
110 this->visit_field(t, var->name, row_major, NULL, packing, false);
111 }
112 }
113
114 void
115 program_resource_visitor::recursion(const glsl_type *t, char **name,
116 size_t name_length, bool row_major,
117 const glsl_type *record_type,
118 const enum glsl_interface_packing packing,
119 bool last_field,
120 unsigned record_array_count,
121 const glsl_struct_field *named_ifc_member)
122 {
123 /* Records need to have each field processed individually.
124 *
125 * Arrays of records need to have each array element processed
126 * individually, then each field of the resulting array elements processed
127 * individually.
128 */
129 if (t->is_interface() && named_ifc_member) {
130 ralloc_asprintf_rewrite_tail(name, &name_length, ".%s",
131 named_ifc_member->name);
132 recursion(named_ifc_member->type, name, name_length, row_major, NULL,
133 packing, false, record_array_count, NULL);
134 } else if (t->is_record() || t->is_interface()) {
135 if (record_type == NULL && t->is_record())
136 record_type = t;
137
138 if (t->is_record())
139 this->enter_record(t, *name, row_major, packing);
140
141 for (unsigned i = 0; i < t->length; i++) {
142 const char *field = t->fields.structure[i].name;
143 size_t new_length = name_length;
144
145 if (t->fields.structure[i].type->is_record())
146 this->visit_field(&t->fields.structure[i]);
147
148 if (t->is_interface() && t->fields.structure[i].offset != -1)
149 this->set_buffer_offset(t->fields.structure[i].offset);
150
151 /* Append '.field' to the current variable name. */
152 if (name_length == 0) {
153 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
154 } else {
155 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
156 }
157
158 /* The layout of structures at the top level of the block is set
159 * during parsing. For matrices contained in multiple levels of
160 * structures in the block, the inner structures have no layout.
161 * These cases must potentially inherit the layout from the outer
162 * levels.
163 */
164 bool field_row_major = row_major;
165 const enum glsl_matrix_layout matrix_layout =
166 glsl_matrix_layout(t->fields.structure[i].matrix_layout);
167 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
168 field_row_major = true;
169 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
170 field_row_major = false;
171 }
172
173 recursion(t->fields.structure[i].type, name, new_length,
174 field_row_major,
175 record_type,
176 packing,
177 (i + 1) == t->length, record_array_count, NULL);
178
179 /* Only the first leaf-field of the record gets called with the
180 * record type pointer.
181 */
182 record_type = NULL;
183 }
184
185 if (t->is_record()) {
186 (*name)[name_length] = '\0';
187 this->leave_record(t, *name, row_major, packing);
188 }
189 } else if (t->without_array()->is_record() ||
190 t->without_array()->is_interface() ||
191 (t->is_array() && t->fields.array->is_array())) {
192 if (record_type == NULL && t->fields.array->is_record())
193 record_type = t->fields.array;
194
195 unsigned length = t->length;
196 /* Shader storage block unsized arrays: add subscript [0] to variable
197 * names */
198 if (t->is_unsized_array())
199 length = 1;
200
201 record_array_count *= length;
202
203 for (unsigned i = 0; i < length; i++) {
204 size_t new_length = name_length;
205
206 /* Append the subscript to the current variable name */
207 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
208
209 recursion(t->fields.array, name, new_length, row_major,
210 record_type,
211 packing,
212 (i + 1) == t->length, record_array_count,
213 named_ifc_member);
214
215 /* Only the first leaf-field of the record gets called with the
216 * record type pointer.
217 */
218 record_type = NULL;
219 }
220 } else {
221 this->set_record_array_count(record_array_count);
222 this->visit_field(t, *name, row_major, record_type, packing, last_field);
223 }
224 }
225
226 void
227 program_resource_visitor::visit_field(const glsl_type *type, const char *name,
228 bool row_major,
229 const glsl_type *,
230 const enum glsl_interface_packing,
231 bool /* last_field */)
232 {
233 visit_field(type, name, row_major);
234 }
235
236 void
237 program_resource_visitor::visit_field(const glsl_struct_field *field)
238 {
239 (void) field;
240 /* empty */
241 }
242
243 void
244 program_resource_visitor::enter_record(const glsl_type *, const char *, bool,
245 const enum glsl_interface_packing)
246 {
247 }
248
249 void
250 program_resource_visitor::leave_record(const glsl_type *, const char *, bool,
251 const enum glsl_interface_packing)
252 {
253 }
254
255 void
256 program_resource_visitor::set_buffer_offset(unsigned)
257 {
258 }
259
260 void
261 program_resource_visitor::set_record_array_count(unsigned)
262 {
263 }
264
265 namespace {
266
267 /**
268 * Class to help calculate the storage requirements for a set of uniforms
269 *
270 * As uniforms are added to the active set the number of active uniforms and
271 * the storage requirements for those uniforms are accumulated. The active
272 * uniforms are added to the hash table supplied to the constructor.
273 *
274 * If the same uniform is added multiple times (i.e., once for each shader
275 * target), it will only be accounted once.
276 */
277 class count_uniform_size : public program_resource_visitor {
278 public:
279 count_uniform_size(struct string_to_uint_map *map,
280 struct string_to_uint_map *hidden_map)
281 : num_active_uniforms(0), num_hidden_uniforms(0), num_values(0),
282 num_shader_samplers(0), num_shader_images(0),
283 num_shader_uniform_components(0), num_shader_subroutines(0),
284 is_buffer_block(false), is_shader_storage(false), map(map),
285 hidden_map(hidden_map)
286 {
287 /* empty */
288 }
289
290 void start_shader()
291 {
292 this->num_shader_samplers = 0;
293 this->num_shader_images = 0;
294 this->num_shader_uniform_components = 0;
295 this->num_shader_subroutines = 0;
296 }
297
298 void process(ir_variable *var)
299 {
300 this->current_var = var;
301 this->is_buffer_block = var->is_in_buffer_block();
302 this->is_shader_storage = var->is_in_shader_storage_block();
303 if (var->is_interface_instance())
304 program_resource_visitor::process(var->get_interface_type(),
305 var->get_interface_type()->name);
306 else
307 program_resource_visitor::process(var);
308 }
309
310 /**
311 * Total number of active uniforms counted
312 */
313 unsigned num_active_uniforms;
314
315 unsigned num_hidden_uniforms;
316
317 /**
318 * Number of data values required to back the storage for the active uniforms
319 */
320 unsigned num_values;
321
322 /**
323 * Number of samplers used
324 */
325 unsigned num_shader_samplers;
326
327 /**
328 * Number of images used
329 */
330 unsigned num_shader_images;
331
332 /**
333 * Number of uniforms used in the current shader
334 */
335 unsigned num_shader_uniform_components;
336
337 /**
338 * Number of subroutine uniforms used
339 */
340 unsigned num_shader_subroutines;
341
342 bool is_buffer_block;
343 bool is_shader_storage;
344
345 struct string_to_uint_map *map;
346
347 private:
348 virtual void visit_field(const glsl_type *type, const char *name,
349 bool row_major)
350 {
351 assert(!type->without_array()->is_record());
352 assert(!type->without_array()->is_interface());
353 assert(!(type->is_array() && type->fields.array->is_array()));
354
355 (void) row_major;
356
357 /* Count the number of samplers regardless of whether the uniform is
358 * already in the hash table. The hash table prevents adding the same
359 * uniform for multiple shader targets, but in this case we want to
360 * count it for each shader target.
361 */
362 const unsigned values = values_for_type(type);
363 if (type->contains_subroutine()) {
364 this->num_shader_subroutines += values;
365 } else if (type->contains_sampler()) {
366 this->num_shader_samplers += values;
367 } else if (type->contains_image()) {
368 this->num_shader_images += values;
369
370 /* As drivers are likely to represent image uniforms as
371 * scalar indices, count them against the limit of uniform
372 * components in the default block. The spec allows image
373 * uniforms to use up no more than one scalar slot.
374 */
375 if(!is_shader_storage)
376 this->num_shader_uniform_components += values;
377 } else {
378 /* Accumulate the total number of uniform slots used by this shader.
379 * Note that samplers do not count against this limit because they
380 * don't use any storage on current hardware.
381 */
382 if (!is_buffer_block)
383 this->num_shader_uniform_components += values;
384 }
385
386 /* If the uniform is already in the map, there's nothing more to do.
387 */
388 unsigned id;
389 if (this->map->get(id, name))
390 return;
391
392 if (this->current_var->data.how_declared == ir_var_hidden) {
393 this->hidden_map->put(this->num_hidden_uniforms, name);
394 this->num_hidden_uniforms++;
395 } else {
396 this->map->put(this->num_active_uniforms-this->num_hidden_uniforms,
397 name);
398 }
399
400 /* Each leaf uniform occupies one entry in the list of active
401 * uniforms.
402 */
403 this->num_active_uniforms++;
404
405 if(!is_gl_identifier(name) && !is_shader_storage && !is_buffer_block)
406 this->num_values += values;
407 }
408
409 struct string_to_uint_map *hidden_map;
410
411 /**
412 * Current variable being processed.
413 */
414 ir_variable *current_var;
415 };
416
417 } /* anonymous namespace */
418
419 /**
420 * Class to help parcel out pieces of backing storage to uniforms
421 *
422 * Each uniform processed has some range of the \c gl_constant_value
423 * structures associated with it. The association is done by finding
424 * the uniform in the \c string_to_uint_map and using the value from
425 * the map to connect that slot in the \c gl_uniform_storage table
426 * with the next available slot in the \c gl_constant_value array.
427 *
428 * \warning
429 * This class assumes that every uniform that will be processed is
430 * already in the \c string_to_uint_map. In addition, it assumes that
431 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
432 * enough."
433 */
434 class parcel_out_uniform_storage : public program_resource_visitor {
435 public:
436 parcel_out_uniform_storage(struct gl_shader_program *prog,
437 struct string_to_uint_map *map,
438 struct gl_uniform_storage *uniforms,
439 union gl_constant_value *values)
440 : prog(prog), map(map), uniforms(uniforms), values(values)
441 {
442 }
443
444 void start_shader(gl_shader_stage shader_type)
445 {
446 assert(shader_type < MESA_SHADER_STAGES);
447 this->shader_type = shader_type;
448
449 this->shader_samplers_used = 0;
450 this->shader_shadow_samplers = 0;
451 this->next_sampler = 0;
452 this->next_image = 0;
453 this->next_subroutine = 0;
454 this->record_array_count = 1;
455 memset(this->targets, 0, sizeof(this->targets));
456 }
457
458 void set_and_process(ir_variable *var)
459 {
460 current_var = var;
461 field_counter = 0;
462 this->record_next_sampler = new string_to_uint_map;
463
464 buffer_block_index = -1;
465 if (var->is_in_buffer_block()) {
466 struct gl_uniform_block *blks = var->is_in_shader_storage_block() ?
467 prog->ShaderStorageBlocks : prog->UniformBlocks;
468 unsigned num_blks = var->is_in_shader_storage_block() ?
469 prog->NumShaderStorageBlocks : prog->NumUniformBlocks;
470
471 if (var->is_interface_instance() && var->type->is_array()) {
472 unsigned l = strlen(var->get_interface_type()->name);
473
474 for (unsigned i = 0; i < num_blks; i++) {
475 if (strncmp(var->get_interface_type()->name, blks[i].Name, l)
476 == 0 && blks[i].Name[l] == '[') {
477 buffer_block_index = i;
478 break;
479 }
480 }
481 } else {
482 for (unsigned i = 0; i < num_blks; i++) {
483 if (strcmp(var->get_interface_type()->name, blks[i].Name) ==
484 0) {
485 buffer_block_index = i;
486 break;
487 }
488 }
489 }
490 assert(buffer_block_index != -1);
491
492 /* Uniform blocks that were specified with an instance name must be
493 * handled a little bit differently. The name of the variable is the
494 * name used to reference the uniform block instead of being the name
495 * of a variable within the block. Therefore, searching for the name
496 * within the block will fail.
497 */
498 if (var->is_interface_instance()) {
499 ubo_byte_offset = 0;
500 process(var->get_interface_type(),
501 var->get_interface_type()->name);
502 } else {
503 const struct gl_uniform_block *const block =
504 &blks[buffer_block_index];
505
506 assert(var->data.location != -1);
507
508 const struct gl_uniform_buffer_variable *const ubo_var =
509 &block->Uniforms[var->data.location];
510
511 ubo_byte_offset = ubo_var->Offset;
512 process(var);
513 }
514 } else {
515 /* Store any explicit location and reset data location so we can
516 * reuse this variable for storing the uniform slot number.
517 */
518 this->explicit_location = current_var->data.location;
519 current_var->data.location = -1;
520
521 process(var);
522 }
523 delete this->record_next_sampler;
524 }
525
526 int buffer_block_index;
527 int ubo_byte_offset;
528 gl_shader_stage shader_type;
529
530 private:
531 void handle_samplers(const glsl_type *base_type,
532 struct gl_uniform_storage *uniform, const char *name)
533 {
534 if (base_type->is_sampler()) {
535 uniform->opaque[shader_type].active = true;
536
537 /* Handle multiple samplers inside struct arrays */
538 if (this->record_array_count > 1) {
539 unsigned inner_array_size = MAX2(1, uniform->array_elements);
540 char *name_copy = ralloc_strdup(NULL, name);
541
542 /* Remove all array subscripts from the sampler name */
543 char *str_start;
544 const char *str_end;
545 while((str_start = strchr(name_copy, '[')) &&
546 (str_end = strchr(name_copy, ']'))) {
547 memmove(str_start, str_end + 1, 1 + strlen(str_end));
548 }
549
550 unsigned index = 0;
551 if (this->record_next_sampler->get(index, name_copy)) {
552 /* In this case, we've already seen this uniform so we just use
553 * the next sampler index recorded the last time we visited.
554 */
555 uniform->opaque[shader_type].index = index;
556 index = inner_array_size + uniform->opaque[shader_type].index;
557 this->record_next_sampler->put(index, name_copy);
558
559 ralloc_free(name_copy);
560 /* Return as everything else has already been initialised in a
561 * previous pass.
562 */
563 return;
564 } else {
565 /* We've never seen this uniform before so we need to allocate
566 * enough indices to store it.
567 *
568 * Nested struct arrays behave like arrays of arrays so we need
569 * to increase the index by the total number of elements of the
570 * sampler in case there is more than one sampler inside the
571 * structs. This allows the offset to be easily calculated for
572 * indirect indexing.
573 */
574 uniform->opaque[shader_type].index = this->next_sampler;
575 this->next_sampler +=
576 inner_array_size * this->record_array_count;
577
578 /* Store the next index for future passes over the struct array
579 */
580 index = uniform->opaque[shader_type].index + inner_array_size;
581 this->record_next_sampler->put(index, name_copy);
582 ralloc_free(name_copy);
583 }
584 } else {
585 /* Increment the sampler by 1 for non-arrays and by the number of
586 * array elements for arrays.
587 */
588 uniform->opaque[shader_type].index = this->next_sampler;
589 this->next_sampler += MAX2(1, uniform->array_elements);
590 }
591
592 const gl_texture_index target = base_type->sampler_index();
593 const unsigned shadow = base_type->sampler_shadow;
594 for (unsigned i = uniform->opaque[shader_type].index;
595 i < MIN2(this->next_sampler, MAX_SAMPLERS);
596 i++) {
597 this->targets[i] = target;
598 this->shader_samplers_used |= 1U << i;
599 this->shader_shadow_samplers |= shadow << i;
600 }
601 }
602 }
603
604 void handle_images(const glsl_type *base_type,
605 struct gl_uniform_storage *uniform)
606 {
607 if (base_type->is_image()) {
608 uniform->opaque[shader_type].index = this->next_image;
609 uniform->opaque[shader_type].active = true;
610
611 /* Set image access qualifiers */
612 const GLenum access =
613 (current_var->data.image_read_only ? GL_READ_ONLY :
614 current_var->data.image_write_only ? GL_WRITE_ONLY :
615 GL_READ_WRITE);
616
617 const unsigned first = this->next_image;
618
619 /* Increment the image index by 1 for non-arrays and by the
620 * number of array elements for arrays.
621 */
622 this->next_image += MAX2(1, uniform->array_elements);
623
624 for (unsigned i = first; i < MIN2(next_image, MAX_IMAGE_UNIFORMS); i++)
625 prog->_LinkedShaders[shader_type]->ImageAccess[i] = access;
626 }
627 }
628
629 void handle_subroutines(const glsl_type *base_type,
630 struct gl_uniform_storage *uniform)
631 {
632 if (base_type->is_subroutine()) {
633 uniform->opaque[shader_type].index = this->next_subroutine;
634 uniform->opaque[shader_type].active = true;
635
636 /* Increment the subroutine index by 1 for non-arrays and by the
637 * number of array elements for arrays.
638 */
639 this->next_subroutine += MAX2(1, uniform->array_elements);
640
641 }
642 }
643
644 virtual void set_buffer_offset(unsigned offset)
645 {
646 this->ubo_byte_offset = offset;
647 }
648
649 virtual void set_record_array_count(unsigned record_array_count)
650 {
651 this->record_array_count = record_array_count;
652 }
653
654 virtual void visit_field(const glsl_type *type, const char *name,
655 bool row_major)
656 {
657 (void) type;
658 (void) name;
659 (void) row_major;
660 assert(!"Should not get here.");
661 }
662
663 virtual void enter_record(const glsl_type *type, const char *,
664 bool row_major, const enum glsl_interface_packing packing) {
665 assert(type->is_record());
666 if (this->buffer_block_index == -1)
667 return;
668 if (packing == GLSL_INTERFACE_PACKING_STD430)
669 this->ubo_byte_offset = glsl_align(
670 this->ubo_byte_offset, type->std430_base_alignment(row_major));
671 else
672 this->ubo_byte_offset = glsl_align(
673 this->ubo_byte_offset, type->std140_base_alignment(row_major));
674 }
675
676 virtual void leave_record(const glsl_type *type, const char *,
677 bool row_major, const enum glsl_interface_packing packing) {
678 assert(type->is_record());
679 if (this->buffer_block_index == -1)
680 return;
681 if (packing == GLSL_INTERFACE_PACKING_STD430)
682 this->ubo_byte_offset = glsl_align(
683 this->ubo_byte_offset, type->std430_base_alignment(row_major));
684 else
685 this->ubo_byte_offset = glsl_align(
686 this->ubo_byte_offset, type->std140_base_alignment(row_major));
687 }
688
689 virtual void visit_field(const glsl_type *type, const char *name,
690 bool row_major, const glsl_type * /* record_type */,
691 const enum glsl_interface_packing packing,
692 bool /* last_field */)
693 {
694 assert(!type->without_array()->is_record());
695 assert(!type->without_array()->is_interface());
696 assert(!(type->is_array() && type->fields.array->is_array()));
697
698 unsigned id;
699 bool found = this->map->get(id, name);
700 assert(found);
701
702 if (!found)
703 return;
704
705 const glsl_type *base_type;
706 if (type->is_array()) {
707 this->uniforms[id].array_elements = type->length;
708 base_type = type->fields.array;
709 } else {
710 this->uniforms[id].array_elements = 0;
711 base_type = type;
712 }
713
714 /* Initialise opaque data */
715 this->uniforms[id].opaque[shader_type].index = ~0;
716 this->uniforms[id].opaque[shader_type].active = false;
717
718 /* This assigns uniform indices to sampler and image uniforms. */
719 handle_samplers(base_type, &this->uniforms[id], name);
720 handle_images(base_type, &this->uniforms[id]);
721 handle_subroutines(base_type, &this->uniforms[id]);
722
723 /* For array of arrays or struct arrays the base location may have
724 * already been set so don't set it again.
725 */
726 if (buffer_block_index == -1 && current_var->data.location == -1) {
727 current_var->data.location = id;
728 }
729
730 /* If there is already storage associated with this uniform or if the
731 * uniform is set as builtin, it means that it was set while processing
732 * an earlier shader stage. For example, we may be processing the
733 * uniform in the fragment shader, but the uniform was already processed
734 * in the vertex shader.
735 */
736 if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) {
737 return;
738 }
739
740 /* Assign explicit locations. */
741 if (current_var->data.explicit_location) {
742 /* Set sequential locations for struct fields. */
743 if (current_var->type->without_array()->is_record() ||
744 current_var->type->is_array_of_arrays()) {
745 const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
746 this->uniforms[id].remap_location =
747 this->explicit_location + field_counter;
748 field_counter += entries;
749 } else {
750 this->uniforms[id].remap_location = this->explicit_location;
751 }
752 } else {
753 /* Initialize to to indicate that no location is set */
754 this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
755 }
756
757 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
758 this->uniforms[id].type = base_type;
759 this->uniforms[id].num_driver_storage = 0;
760 this->uniforms[id].driver_storage = NULL;
761 this->uniforms[id].atomic_buffer_index = -1;
762 this->uniforms[id].hidden =
763 current_var->data.how_declared == ir_var_hidden;
764 this->uniforms[id].builtin = is_gl_identifier(name);
765
766 this->uniforms[id].is_shader_storage =
767 current_var->is_in_shader_storage_block();
768
769 /* Do not assign storage if the uniform is a builtin or buffer object */
770 if (!this->uniforms[id].builtin &&
771 !this->uniforms[id].is_shader_storage &&
772 this->buffer_block_index == -1)
773 this->uniforms[id].storage = this->values;
774
775 if (this->buffer_block_index != -1) {
776 this->uniforms[id].block_index = this->buffer_block_index;
777
778 unsigned alignment = type->std140_base_alignment(row_major);
779 if (packing == GLSL_INTERFACE_PACKING_STD430)
780 alignment = type->std430_base_alignment(row_major);
781 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
782 this->uniforms[id].offset = this->ubo_byte_offset;
783 if (packing == GLSL_INTERFACE_PACKING_STD430)
784 this->ubo_byte_offset += type->std430_size(row_major);
785 else
786 this->ubo_byte_offset += type->std140_size(row_major);
787
788 if (type->is_array()) {
789 if (packing == GLSL_INTERFACE_PACKING_STD430)
790 this->uniforms[id].array_stride =
791 type->without_array()->std430_array_stride(row_major);
792 else
793 this->uniforms[id].array_stride =
794 glsl_align(type->without_array()->std140_size(row_major),
795 16);
796 } else {
797 this->uniforms[id].array_stride = 0;
798 }
799
800 if (type->without_array()->is_matrix()) {
801 const glsl_type *matrix = type->without_array();
802 const unsigned N = matrix->base_type == GLSL_TYPE_DOUBLE ? 8 : 4;
803 const unsigned items =
804 row_major ? matrix->matrix_columns : matrix->vector_elements;
805
806 assert(items <= 4);
807 if (packing == GLSL_INTERFACE_PACKING_STD430)
808 this->uniforms[id].matrix_stride = items < 3 ? items * N :
809 glsl_align(items * N, 16);
810 else
811 this->uniforms[id].matrix_stride = glsl_align(items * N, 16);
812 this->uniforms[id].row_major = row_major;
813 } else {
814 this->uniforms[id].matrix_stride = 0;
815 this->uniforms[id].row_major = false;
816 }
817 } else {
818 this->uniforms[id].block_index = -1;
819 this->uniforms[id].offset = -1;
820 this->uniforms[id].array_stride = -1;
821 this->uniforms[id].matrix_stride = -1;
822 this->uniforms[id].row_major = false;
823 }
824
825 if (!this->uniforms[id].builtin &&
826 !this->uniforms[id].is_shader_storage &&
827 this->buffer_block_index == -1)
828 this->values += values_for_type(type);
829 }
830
831 /**
832 * Current program being processed.
833 */
834 struct gl_shader_program *prog;
835
836 struct string_to_uint_map *map;
837
838 struct gl_uniform_storage *uniforms;
839 unsigned next_sampler;
840 unsigned next_image;
841 unsigned next_subroutine;
842
843 /**
844 * Field counter is used to take care that uniform structures
845 * with explicit locations get sequential locations.
846 */
847 unsigned field_counter;
848
849 /**
850 * Current variable being processed.
851 */
852 ir_variable *current_var;
853
854 /* Used to store the explicit location from current_var so that we can
855 * reuse the location field for storing the uniform slot id.
856 */
857 int explicit_location;
858
859 /* Stores total struct array elements including nested structs */
860 unsigned record_array_count;
861
862 /* Map for temporarily storing next sampler index when handling samplers in
863 * struct arrays.
864 */
865 struct string_to_uint_map *record_next_sampler;
866
867 public:
868 union gl_constant_value *values;
869
870 gl_texture_index targets[MAX_SAMPLERS];
871
872 /**
873 * Mask of samplers used by the current shader stage.
874 */
875 unsigned shader_samplers_used;
876
877 /**
878 * Mask of samplers used by the current shader stage for shadows.
879 */
880 unsigned shader_shadow_samplers;
881 };
882
883 /**
884 * Walks the IR and update the references to uniform blocks in the
885 * ir_variables to point at linked shader's list (previously, they
886 * would point at the uniform block list in one of the pre-linked
887 * shaders).
888 */
889 static void
890 link_update_uniform_buffer_variables(struct gl_linked_shader *shader)
891 {
892 foreach_in_list(ir_instruction, node, shader->ir) {
893 ir_variable *const var = node->as_variable();
894
895 if ((var == NULL) || !var->is_in_buffer_block())
896 continue;
897
898 assert(var->data.mode == ir_var_uniform ||
899 var->data.mode == ir_var_shader_storage);
900
901 if (var->is_interface_instance()) {
902 var->data.location = 0;
903 continue;
904 }
905
906 bool found = false;
907 char sentinel = '\0';
908
909 if (var->type->is_record()) {
910 sentinel = '.';
911 } else if (var->type->is_array() && (var->type->fields.array->is_array()
912 || var->type->without_array()->is_record())) {
913 sentinel = '[';
914 }
915
916 unsigned num_blocks = var->data.mode == ir_var_uniform ?
917 shader->NumUniformBlocks : shader->NumShaderStorageBlocks;
918 struct gl_uniform_block **blks = var->data.mode == ir_var_uniform ?
919 shader->UniformBlocks : shader->ShaderStorageBlocks;
920
921 const unsigned l = strlen(var->name);
922 for (unsigned i = 0; i < num_blocks; i++) {
923 for (unsigned j = 0; j < blks[i]->NumUniforms; j++) {
924 if (sentinel) {
925 const char *begin = blks[i]->Uniforms[j].Name;
926 const char *end = strchr(begin, sentinel);
927
928 if (end == NULL)
929 continue;
930
931 if ((ptrdiff_t) l != (end - begin))
932 continue;
933
934 if (strncmp(var->name, begin, l) == 0) {
935 found = true;
936 var->data.location = j;
937 break;
938 }
939 } else if (!strcmp(var->name, blks[i]->Uniforms[j].Name)) {
940 found = true;
941 var->data.location = j;
942 break;
943 }
944 }
945 if (found)
946 break;
947 }
948 assert(found);
949 }
950 }
951
952 /**
953 * Combine the hidden uniform hash map with the uniform hash map so that the
954 * hidden uniforms will be given indicies at the end of the uniform storage
955 * array.
956 */
957 static void
958 assign_hidden_uniform_slot_id(const char *name, unsigned hidden_id,
959 void *closure)
960 {
961 count_uniform_size *uniform_size = (count_uniform_size *) closure;
962 unsigned hidden_uniform_start = uniform_size->num_active_uniforms -
963 uniform_size->num_hidden_uniforms;
964
965 uniform_size->map->put(hidden_uniform_start + hidden_id, name);
966 }
967
968 /**
969 * Search through the list of empty blocks to find one that fits the current
970 * uniform.
971 */
972 static int
973 find_empty_block(struct gl_shader_program *prog,
974 struct gl_uniform_storage *uniform)
975 {
976 const unsigned entries = MAX2(1, uniform->array_elements);
977
978 foreach_list_typed(struct empty_uniform_block, block, link,
979 &prog->EmptyUniformLocations) {
980 /* Found a block with enough slots to fit the uniform */
981 if (block->slots == entries) {
982 unsigned start = block->start;
983 exec_node_remove(&block->link);
984 ralloc_free(block);
985
986 return start;
987 /* Found a block with more slots than needed. It can still be used. */
988 } else if (block->slots > entries) {
989 unsigned start = block->start;
990 block->start += entries;
991 block->slots -= entries;
992
993 return start;
994 }
995 }
996
997 return -1;
998 }
999
1000 void
1001 link_assign_uniform_locations(struct gl_shader_program *prog,
1002 unsigned int boolean_true,
1003 unsigned int num_explicit_uniform_locs,
1004 unsigned int max_uniform_locs)
1005 {
1006 ralloc_free(prog->UniformStorage);
1007 prog->UniformStorage = NULL;
1008 prog->NumUniformStorage = 0;
1009
1010 if (prog->UniformHash != NULL) {
1011 prog->UniformHash->clear();
1012 } else {
1013 prog->UniformHash = new string_to_uint_map;
1014 }
1015
1016 /* First pass: Count the uniform resources used by the user-defined
1017 * uniforms. While this happens, each active uniform will have an index
1018 * assigned to it.
1019 *
1020 * Note: this is *NOT* the index that is returned to the application by
1021 * glGetUniformLocation.
1022 */
1023 struct string_to_uint_map *hiddenUniforms = new string_to_uint_map;
1024 count_uniform_size uniform_size(prog->UniformHash, hiddenUniforms);
1025 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1026 struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1027
1028 if (sh == NULL)
1029 continue;
1030
1031 /* Uniforms that lack an initializer in the shader code have an initial
1032 * value of zero. This includes sampler uniforms.
1033 *
1034 * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says:
1035 *
1036 * "The link time initial value is either the value of the variable's
1037 * initializer, if present, or 0 if no initializer is present. Sampler
1038 * types cannot have initializers."
1039 */
1040 memset(sh->SamplerUnits, 0, sizeof(sh->SamplerUnits));
1041 memset(sh->ImageUnits, 0, sizeof(sh->ImageUnits));
1042
1043 link_update_uniform_buffer_variables(sh);
1044
1045 /* Reset various per-shader target counts.
1046 */
1047 uniform_size.start_shader();
1048
1049 foreach_in_list(ir_instruction, node, sh->ir) {
1050 ir_variable *const var = node->as_variable();
1051
1052 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1053 var->data.mode != ir_var_shader_storage))
1054 continue;
1055
1056 uniform_size.process(var);
1057 }
1058
1059 sh->num_samplers = uniform_size.num_shader_samplers;
1060 sh->NumImages = uniform_size.num_shader_images;
1061 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
1062 sh->num_combined_uniform_components = sh->num_uniform_components;
1063
1064 for (unsigned i = 0; i < sh->NumUniformBlocks; i++) {
1065 sh->num_combined_uniform_components +=
1066 sh->UniformBlocks[i]->UniformBufferSize / 4;
1067 }
1068 }
1069
1070 const unsigned num_uniforms = uniform_size.num_active_uniforms;
1071 const unsigned num_data_slots = uniform_size.num_values;
1072 const unsigned hidden_uniforms = uniform_size.num_hidden_uniforms;
1073
1074 /* assign hidden uniforms a slot id */
1075 hiddenUniforms->iterate(assign_hidden_uniform_slot_id, &uniform_size);
1076 delete hiddenUniforms;
1077
1078 /* On the outside chance that there were no uniforms, bail out.
1079 */
1080 if (num_uniforms == 0)
1081 return;
1082
1083 struct gl_uniform_storage *uniforms =
1084 rzalloc_array(prog, struct gl_uniform_storage, num_uniforms);
1085 union gl_constant_value *data =
1086 rzalloc_array(uniforms, union gl_constant_value, num_data_slots);
1087 #ifndef NDEBUG
1088 union gl_constant_value *data_end = &data[num_data_slots];
1089 #endif
1090
1091 parcel_out_uniform_storage parcel(prog, prog->UniformHash, uniforms, data);
1092
1093 unsigned total_entries = num_explicit_uniform_locs;
1094 unsigned empty_locs = prog->NumUniformRemapTable - num_explicit_uniform_locs;
1095
1096 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1097 if (prog->_LinkedShaders[i] == NULL)
1098 continue;
1099
1100 parcel.start_shader((gl_shader_stage)i);
1101
1102 foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
1103 ir_variable *const var = node->as_variable();
1104
1105 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1106 var->data.mode != ir_var_shader_storage))
1107 continue;
1108
1109 parcel.set_and_process(var);
1110 }
1111
1112 prog->_LinkedShaders[i]->active_samplers = parcel.shader_samplers_used;
1113 prog->_LinkedShaders[i]->shadow_samplers = parcel.shader_shadow_samplers;
1114
1115 STATIC_ASSERT(sizeof(prog->_LinkedShaders[i]->SamplerTargets) ==
1116 sizeof(parcel.targets));
1117 memcpy(prog->_LinkedShaders[i]->SamplerTargets, parcel.targets,
1118 sizeof(prog->_LinkedShaders[i]->SamplerTargets));
1119 }
1120
1121 /* Reserve all the explicit locations of the active uniforms. */
1122 for (unsigned i = 0; i < num_uniforms; i++) {
1123 if (uniforms[i].type->is_subroutine() ||
1124 uniforms[i].is_shader_storage)
1125 continue;
1126
1127 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC) {
1128 /* How many new entries for this uniform? */
1129 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1130
1131 /* Set remap table entries point to correct gl_uniform_storage. */
1132 for (unsigned j = 0; j < entries; j++) {
1133 unsigned element_loc = uniforms[i].remap_location + j;
1134 assert(prog->UniformRemapTable[element_loc] ==
1135 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1136 prog->UniformRemapTable[element_loc] = &uniforms[i];
1137 }
1138 }
1139 }
1140
1141 /* Reserve locations for rest of the uniforms. */
1142 for (unsigned i = 0; i < num_uniforms; i++) {
1143
1144 if (uniforms[i].type->is_subroutine() ||
1145 uniforms[i].is_shader_storage)
1146 continue;
1147
1148 /* Built-in uniforms should not get any location. */
1149 if (uniforms[i].builtin)
1150 continue;
1151
1152 /* Explicit ones have been set already. */
1153 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC)
1154 continue;
1155
1156 /* how many new entries for this uniform? */
1157 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1158
1159 /* Find UniformRemapTable for empty blocks where we can fit this uniform. */
1160 int chosen_location = -1;
1161
1162 if (empty_locs)
1163 chosen_location = find_empty_block(prog, &uniforms[i]);
1164
1165 /* Add new entries to the total amount of entries. */
1166 total_entries += entries;
1167
1168 if (chosen_location != -1) {
1169 empty_locs -= entries;
1170 } else {
1171 chosen_location = prog->NumUniformRemapTable;
1172
1173 /* resize remap table to fit new entries */
1174 prog->UniformRemapTable =
1175 reralloc(prog,
1176 prog->UniformRemapTable,
1177 gl_uniform_storage *,
1178 prog->NumUniformRemapTable + entries);
1179 prog->NumUniformRemapTable += entries;
1180 }
1181
1182 /* set pointers for this uniform */
1183 for (unsigned j = 0; j < entries; j++)
1184 prog->UniformRemapTable[chosen_location + j] = &uniforms[i];
1185
1186 /* set the base location in remap table for the uniform */
1187 uniforms[i].remap_location = chosen_location;
1188 }
1189
1190 /* Verify that total amount of entries for explicit and implicit locations
1191 * is less than MAX_UNIFORM_LOCATIONS.
1192 */
1193
1194 if (total_entries > max_uniform_locs) {
1195 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
1196 "(%u > %u)", total_entries, max_uniform_locs);
1197 }
1198
1199 /* Reserve all the explicit locations of the active subroutine uniforms. */
1200 for (unsigned i = 0; i < num_uniforms; i++) {
1201 if (!uniforms[i].type->is_subroutine())
1202 continue;
1203
1204 if (uniforms[i].remap_location == UNMAPPED_UNIFORM_LOC)
1205 continue;
1206
1207 for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
1208 struct gl_linked_shader *sh = prog->_LinkedShaders[j];
1209 if (!sh)
1210 continue;
1211
1212 if (!uniforms[i].opaque[j].active)
1213 continue;
1214
1215 /* How many new entries for this uniform? */
1216 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1217
1218 /* Set remap table entries point to correct gl_uniform_storage. */
1219 for (unsigned k = 0; k < entries; k++) {
1220 unsigned element_loc = uniforms[i].remap_location + k;
1221 assert(sh->SubroutineUniformRemapTable[element_loc] ==
1222 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1223 sh->SubroutineUniformRemapTable[element_loc] = &uniforms[i];
1224 }
1225 }
1226 }
1227
1228 /* reserve subroutine locations */
1229 for (unsigned i = 0; i < num_uniforms; i++) {
1230
1231 if (!uniforms[i].type->is_subroutine())
1232 continue;
1233 const unsigned entries = MAX2(1, uniforms[i].array_elements);
1234
1235 if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC)
1236 continue;
1237 for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
1238 struct gl_linked_shader *sh = prog->_LinkedShaders[j];
1239 if (!sh)
1240 continue;
1241
1242 if (!uniforms[i].opaque[j].active)
1243 continue;
1244
1245 sh->SubroutineUniformRemapTable =
1246 reralloc(sh,
1247 sh->SubroutineUniformRemapTable,
1248 gl_uniform_storage *,
1249 sh->NumSubroutineUniformRemapTable + entries);
1250
1251 for (unsigned k = 0; k < entries; k++)
1252 sh->SubroutineUniformRemapTable[sh->NumSubroutineUniformRemapTable + k] = &uniforms[i];
1253 uniforms[i].remap_location = sh->NumSubroutineUniformRemapTable;
1254 sh->NumSubroutineUniformRemapTable += entries;
1255 }
1256 }
1257
1258 #ifndef NDEBUG
1259 for (unsigned i = 0; i < num_uniforms; i++) {
1260 assert(uniforms[i].storage != NULL || uniforms[i].builtin ||
1261 uniforms[i].is_shader_storage ||
1262 uniforms[i].block_index != -1);
1263 }
1264
1265 assert(parcel.values == data_end);
1266 #endif
1267
1268 prog->NumUniformStorage = num_uniforms;
1269 prog->NumHiddenUniforms = hidden_uniforms;
1270 prog->UniformStorage = uniforms;
1271
1272 link_set_uniform_initializers(prog, boolean_true);
1273
1274 return;
1275 }