glsl/linker: check for xfb_offset aliasing
[mesa.git] / src / compiler / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "ir.h"
25 #include "linker.h"
26 #include "ir_uniform.h"
27 #include "glsl_symbol_table.h"
28 #include "program.h"
29 #include "string_to_uint_map.h"
30 #include "ir_array_refcount.h"
31 #include "main/mtypes.h"
32
33 /**
34 * \file link_uniforms.cpp
35 * Assign locations for GLSL uniforms.
36 *
37 * \author Ian Romanick <ian.d.romanick@intel.com>
38 */
39
40 /**
41 * Used by linker to indicate uniforms that have no location set.
42 */
43 #define UNMAPPED_UNIFORM_LOC ~0u
44
45 void
46 program_resource_visitor::process(const glsl_type *type, const char *name,
47 bool use_std430_as_default)
48 {
49 assert(type->without_array()->is_struct()
50 || type->without_array()->is_interface());
51
52 unsigned record_array_count = 1;
53 char *name_copy = ralloc_strdup(NULL, name);
54
55 enum glsl_interface_packing packing =
56 type->get_internal_ifc_packing(use_std430_as_default);
57
58 recursion(type, &name_copy, strlen(name), false, NULL, packing, false,
59 record_array_count, NULL);
60 ralloc_free(name_copy);
61 }
62
63 void
64 program_resource_visitor::process(ir_variable *var, bool use_std430_as_default)
65 {
66 const glsl_type *t =
67 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
68 process(var, t, use_std430_as_default);
69 }
70
71 void
72 program_resource_visitor::process(ir_variable *var, const glsl_type *var_type,
73 bool use_std430_as_default)
74 {
75 unsigned record_array_count = 1;
76 const bool row_major =
77 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
78
79 enum glsl_interface_packing packing = var->get_interface_type() ?
80 var->get_interface_type()->
81 get_internal_ifc_packing(use_std430_as_default) :
82 var->type->get_internal_ifc_packing(use_std430_as_default);
83
84 const glsl_type *t = var_type;
85 const glsl_type *t_without_array = t->without_array();
86
87 /* false is always passed for the row_major parameter to the other
88 * processing functions because no information is available to do
89 * otherwise. See the warning in linker.h.
90 */
91 if (t_without_array->is_struct() ||
92 (t->is_array() && t->fields.array->is_array())) {
93 char *name = ralloc_strdup(NULL, var->name);
94 recursion(var->type, &name, strlen(name), row_major, NULL, packing,
95 false, record_array_count, NULL);
96 ralloc_free(name);
97 } else if (t_without_array->is_interface()) {
98 char *name = ralloc_strdup(NULL, t_without_array->name);
99 const glsl_struct_field *ifc_member = var->data.from_named_ifc_block ?
100 &t_without_array->
101 fields.structure[t_without_array->field_index(var->name)] : NULL;
102
103 recursion(t, &name, strlen(name), row_major, NULL, packing,
104 false, record_array_count, ifc_member);
105 ralloc_free(name);
106 } else {
107 this->set_record_array_count(record_array_count);
108 this->visit_field(t, var->name, row_major, NULL, packing, false);
109 }
110 }
111
112 void
113 program_resource_visitor::recursion(const glsl_type *t, char **name,
114 size_t name_length, bool row_major,
115 const glsl_type *record_type,
116 const enum glsl_interface_packing packing,
117 bool last_field,
118 unsigned record_array_count,
119 const glsl_struct_field *named_ifc_member)
120 {
121 /* Records need to have each field processed individually.
122 *
123 * Arrays of records need to have each array element processed
124 * individually, then each field of the resulting array elements processed
125 * individually.
126 */
127 if (t->is_interface() && named_ifc_member) {
128 ralloc_asprintf_rewrite_tail(name, &name_length, ".%s",
129 named_ifc_member->name);
130 recursion(named_ifc_member->type, name, name_length, row_major, NULL,
131 packing, false, record_array_count, NULL);
132 } else if (t->is_struct() || t->is_interface()) {
133 if (record_type == NULL && t->is_struct())
134 record_type = t;
135
136 if (t->is_struct())
137 this->enter_record(t, *name, row_major, packing);
138
139 for (unsigned i = 0; i < t->length; i++) {
140 const char *field = t->fields.structure[i].name;
141 size_t new_length = name_length;
142
143 if (t->is_interface() && t->fields.structure[i].offset != -1)
144 this->set_buffer_offset(t->fields.structure[i].offset);
145
146 /* Append '.field' to the current variable name. */
147 if (name_length == 0) {
148 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
149 } else {
150 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
151 }
152
153 /* The layout of structures at the top level of the block is set
154 * during parsing. For matrices contained in multiple levels of
155 * structures in the block, the inner structures have no layout.
156 * These cases must potentially inherit the layout from the outer
157 * levels.
158 */
159 bool field_row_major = row_major;
160 const enum glsl_matrix_layout matrix_layout =
161 glsl_matrix_layout(t->fields.structure[i].matrix_layout);
162 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
163 field_row_major = true;
164 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
165 field_row_major = false;
166 }
167
168 recursion(t->fields.structure[i].type, name, new_length,
169 field_row_major,
170 record_type,
171 packing,
172 (i + 1) == t->length, record_array_count, NULL);
173
174 /* Only the first leaf-field of the record gets called with the
175 * record type pointer.
176 */
177 record_type = NULL;
178 }
179
180 if (t->is_struct()) {
181 (*name)[name_length] = '\0';
182 this->leave_record(t, *name, row_major, packing);
183 }
184 } else if (t->without_array()->is_struct() ||
185 t->without_array()->is_interface() ||
186 (t->is_array() && t->fields.array->is_array())) {
187 if (record_type == NULL && t->fields.array->is_struct())
188 record_type = t->fields.array;
189
190 unsigned length = t->length;
191
192 /* Shader storage block unsized arrays: add subscript [0] to variable
193 * names.
194 */
195 if (t->is_unsized_array())
196 length = 1;
197
198 record_array_count *= length;
199
200 for (unsigned i = 0; i < length; i++) {
201 size_t new_length = name_length;
202
203 /* Append the subscript to the current variable name */
204 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
205
206 recursion(t->fields.array, name, new_length, row_major,
207 record_type,
208 packing,
209 (i + 1) == t->length, record_array_count,
210 named_ifc_member);
211
212 /* Only the first leaf-field of the record gets called with the
213 * record type pointer.
214 */
215 record_type = NULL;
216 }
217 } else {
218 this->set_record_array_count(record_array_count);
219 this->visit_field(t, *name, row_major, record_type, packing, last_field);
220 }
221 }
222
223 void
224 program_resource_visitor::enter_record(const glsl_type *, const char *, bool,
225 const enum glsl_interface_packing)
226 {
227 }
228
229 void
230 program_resource_visitor::leave_record(const glsl_type *, const char *, bool,
231 const enum glsl_interface_packing)
232 {
233 }
234
235 void
236 program_resource_visitor::set_buffer_offset(unsigned)
237 {
238 }
239
240 void
241 program_resource_visitor::set_record_array_count(unsigned)
242 {
243 }
244
245 namespace {
246
247 /**
248 * Class to help calculate the storage requirements for a set of uniforms
249 *
250 * As uniforms are added to the active set the number of active uniforms and
251 * the storage requirements for those uniforms are accumulated. The active
252 * uniforms are added to the hash table supplied to the constructor.
253 *
254 * If the same uniform is added multiple times (i.e., once for each shader
255 * target), it will only be accounted once.
256 */
257 class count_uniform_size : public program_resource_visitor {
258 public:
259 count_uniform_size(struct string_to_uint_map *map,
260 struct string_to_uint_map *hidden_map,
261 bool use_std430_as_default)
262 : num_active_uniforms(0), num_hidden_uniforms(0), num_values(0),
263 num_shader_samplers(0), num_shader_images(0),
264 num_shader_uniform_components(0), num_shader_subroutines(0),
265 is_buffer_block(false), is_shader_storage(false), map(map),
266 hidden_map(hidden_map), current_var(NULL),
267 use_std430_as_default(use_std430_as_default)
268 {
269 /* empty */
270 }
271
272 void start_shader()
273 {
274 this->num_shader_samplers = 0;
275 this->num_shader_images = 0;
276 this->num_shader_uniform_components = 0;
277 this->num_shader_subroutines = 0;
278 }
279
280 void process(ir_variable *var)
281 {
282 this->current_var = var;
283 this->is_buffer_block = var->is_in_buffer_block();
284 this->is_shader_storage = var->is_in_shader_storage_block();
285 if (var->is_interface_instance())
286 program_resource_visitor::process(var->get_interface_type(),
287 var->get_interface_type()->name,
288 use_std430_as_default);
289 else
290 program_resource_visitor::process(var, use_std430_as_default);
291 }
292
293 /**
294 * Total number of active uniforms counted
295 */
296 unsigned num_active_uniforms;
297
298 unsigned num_hidden_uniforms;
299
300 /**
301 * Number of data values required to back the storage for the active uniforms
302 */
303 unsigned num_values;
304
305 /**
306 * Number of samplers used
307 */
308 unsigned num_shader_samplers;
309
310 /**
311 * Number of images used
312 */
313 unsigned num_shader_images;
314
315 /**
316 * Number of uniforms used in the current shader
317 */
318 unsigned num_shader_uniform_components;
319
320 /**
321 * Number of subroutine uniforms used
322 */
323 unsigned num_shader_subroutines;
324
325 bool is_buffer_block;
326 bool is_shader_storage;
327
328 struct string_to_uint_map *map;
329
330 private:
331 virtual void visit_field(const glsl_type *type, const char *name,
332 bool /* row_major */,
333 const glsl_type * /* record_type */,
334 const enum glsl_interface_packing,
335 bool /* last_field */)
336 {
337 assert(!type->without_array()->is_struct());
338 assert(!type->without_array()->is_interface());
339 assert(!(type->is_array() && type->fields.array->is_array()));
340
341 /* Count the number of samplers regardless of whether the uniform is
342 * already in the hash table. The hash table prevents adding the same
343 * uniform for multiple shader targets, but in this case we want to
344 * count it for each shader target.
345 */
346 const unsigned values = type->component_slots();
347 if (type->contains_subroutine()) {
348 this->num_shader_subroutines += values;
349 } else if (type->contains_sampler() && !current_var->data.bindless) {
350 /* Samplers (bound or bindless) are counted as two components as
351 * specified by ARB_bindless_texture. */
352 this->num_shader_samplers += values / 2;
353 } else if (type->contains_image() && !current_var->data.bindless) {
354 /* Images (bound or bindless) are counted as two components as
355 * specified by ARB_bindless_texture. */
356 this->num_shader_images += values / 2;
357
358 /* As drivers are likely to represent image uniforms as
359 * scalar indices, count them against the limit of uniform
360 * components in the default block. The spec allows image
361 * uniforms to use up no more than one scalar slot.
362 */
363 if (!is_shader_storage)
364 this->num_shader_uniform_components += values;
365 } else {
366 /* Accumulate the total number of uniform slots used by this shader.
367 * Note that samplers do not count against this limit because they
368 * don't use any storage on current hardware.
369 */
370 if (!is_buffer_block)
371 this->num_shader_uniform_components += values;
372 }
373
374 /* If the uniform is already in the map, there's nothing more to do.
375 */
376 unsigned id;
377 if (this->map->get(id, name))
378 return;
379
380 if (this->current_var->data.how_declared == ir_var_hidden) {
381 this->hidden_map->put(this->num_hidden_uniforms, name);
382 this->num_hidden_uniforms++;
383 } else {
384 this->map->put(this->num_active_uniforms-this->num_hidden_uniforms,
385 name);
386 }
387
388 /* Each leaf uniform occupies one entry in the list of active
389 * uniforms.
390 */
391 this->num_active_uniforms++;
392
393 if(!is_gl_identifier(name) && !is_shader_storage && !is_buffer_block)
394 this->num_values += values;
395 }
396
397 struct string_to_uint_map *hidden_map;
398
399 /**
400 * Current variable being processed.
401 */
402 ir_variable *current_var;
403
404 bool use_std430_as_default;
405 };
406
407 } /* anonymous namespace */
408
409 unsigned
410 link_calculate_matrix_stride(const glsl_type *matrix, bool row_major,
411 enum glsl_interface_packing packing)
412 {
413 const unsigned N = matrix->is_double() ? 8 : 4;
414 const unsigned items =
415 row_major ? matrix->matrix_columns : matrix->vector_elements;
416
417 assert(items <= 4);
418
419 /* Matrix stride for std430 mat2xY matrices are not rounded up to
420 * vec4 size.
421 *
422 * Section 7.6.2.2 "Standard Uniform Block Layout" of the OpenGL 4.3 spec
423 * says:
424 *
425 * 2. If the member is a two- or four-component vector with components
426 * consuming N basic machine units, the base alignment is 2N or 4N,
427 * respectively.
428 * ...
429 * 4. If the member is an array of scalars or vectors, the base
430 * alignment and array stride are set to match the base alignment of
431 * a single array element, according to rules (1), (2), and (3), and
432 * rounded up to the base alignment of a vec4.
433 * ...
434 * 7. If the member is a row-major matrix with C columns and R rows, the
435 * matrix is stored identically to an array of R row vectors with C
436 * components each, according to rule (4).
437 * ...
438 *
439 * When using the std430 storage layout, shader storage blocks will be
440 * laid out in buffer storage identically to uniform and shader storage
441 * blocks using the std140 layout, except that the base alignment and
442 * stride of arrays of scalars and vectors in rule 4 and of structures
443 * in rule 9 are not rounded up a multiple of the base alignment of a
444 * vec4.
445 */
446 return packing == GLSL_INTERFACE_PACKING_STD430
447 ? (items < 3 ? items * N : glsl_align(items * N, 16))
448 : glsl_align(items * N, 16);
449 }
450
451 /**
452 * Class to help parcel out pieces of backing storage to uniforms
453 *
454 * Each uniform processed has some range of the \c gl_constant_value
455 * structures associated with it. The association is done by finding
456 * the uniform in the \c string_to_uint_map and using the value from
457 * the map to connect that slot in the \c gl_uniform_storage table
458 * with the next available slot in the \c gl_constant_value array.
459 *
460 * \warning
461 * This class assumes that every uniform that will be processed is
462 * already in the \c string_to_uint_map. In addition, it assumes that
463 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
464 * enough."
465 */
466 class parcel_out_uniform_storage : public program_resource_visitor {
467 public:
468 parcel_out_uniform_storage(struct gl_shader_program *prog,
469 struct string_to_uint_map *map,
470 struct gl_uniform_storage *uniforms,
471 union gl_constant_value *values,
472 bool use_std430_as_default)
473 : prog(prog), map(map), uniforms(uniforms),
474 use_std430_as_default(use_std430_as_default), values(values),
475 bindless_targets(NULL), bindless_access(NULL)
476 {
477 }
478
479 virtual ~parcel_out_uniform_storage()
480 {
481 free(this->bindless_targets);
482 free(this->bindless_access);
483 }
484
485 void start_shader(gl_shader_stage shader_type)
486 {
487 assert(shader_type < MESA_SHADER_STAGES);
488 this->shader_type = shader_type;
489
490 this->shader_samplers_used = 0;
491 this->shader_shadow_samplers = 0;
492 this->next_sampler = 0;
493 this->next_image = 0;
494 this->next_subroutine = 0;
495 this->record_array_count = 1;
496 memset(this->targets, 0, sizeof(this->targets));
497
498 this->num_bindless_samplers = 0;
499 this->next_bindless_sampler = 0;
500 free(this->bindless_targets);
501 this->bindless_targets = NULL;
502
503 this->num_bindless_images = 0;
504 this->next_bindless_image = 0;
505 free(this->bindless_access);
506 this->bindless_access = NULL;
507 this->shader_storage_blocks_write_access = 0;
508 }
509
510 void set_and_process(ir_variable *var)
511 {
512 current_var = var;
513 field_counter = 0;
514 this->record_next_sampler = new string_to_uint_map;
515 this->record_next_bindless_sampler = new string_to_uint_map;
516 this->record_next_image = new string_to_uint_map;
517 this->record_next_bindless_image = new string_to_uint_map;
518
519 buffer_block_index = -1;
520 if (var->is_in_buffer_block()) {
521 struct gl_uniform_block *blks = var->is_in_shader_storage_block() ?
522 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
523 unsigned num_blks = var->is_in_shader_storage_block() ?
524 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
525 bool is_interface_array =
526 var->is_interface_instance() && var->type->is_array();
527
528 if (is_interface_array) {
529 unsigned l = strlen(var->get_interface_type()->name);
530
531 for (unsigned i = 0; i < num_blks; i++) {
532 if (strncmp(var->get_interface_type()->name, blks[i].Name, l)
533 == 0 && blks[i].Name[l] == '[') {
534 buffer_block_index = i;
535 break;
536 }
537 }
538 } else {
539 for (unsigned i = 0; i < num_blks; i++) {
540 if (strcmp(var->get_interface_type()->name, blks[i].Name) == 0) {
541 buffer_block_index = i;
542 break;
543 }
544 }
545 }
546 assert(buffer_block_index != -1);
547
548 if (var->is_in_shader_storage_block() &&
549 !var->data.memory_read_only) {
550 unsigned array_size = is_interface_array ?
551 var->type->array_size() : 1;
552
553 STATIC_ASSERT(MAX_SHADER_STORAGE_BUFFERS <= 32);
554
555 /* Shaders that use too many SSBOs will fail to compile, which
556 * we don't care about.
557 *
558 * This is true for shaders that do not use too many SSBOs:
559 */
560 if (buffer_block_index + array_size <= 32) {
561 shader_storage_blocks_write_access |=
562 u_bit_consecutive(buffer_block_index, array_size);
563 }
564 }
565
566 /* Uniform blocks that were specified with an instance name must be
567 * handled a little bit differently. The name of the variable is the
568 * name used to reference the uniform block instead of being the name
569 * of a variable within the block. Therefore, searching for the name
570 * within the block will fail.
571 */
572 if (var->is_interface_instance()) {
573 ubo_byte_offset = 0;
574 process(var->get_interface_type(),
575 var->get_interface_type()->name,
576 use_std430_as_default);
577 } else {
578 const struct gl_uniform_block *const block =
579 &blks[buffer_block_index];
580
581 assert(var->data.location != -1);
582
583 const struct gl_uniform_buffer_variable *const ubo_var =
584 &block->Uniforms[var->data.location];
585
586 ubo_byte_offset = ubo_var->Offset;
587 process(var, use_std430_as_default);
588 }
589 } else {
590 /* Store any explicit location and reset data location so we can
591 * reuse this variable for storing the uniform slot number.
592 */
593 this->explicit_location = current_var->data.location;
594 current_var->data.location = -1;
595
596 process(var, use_std430_as_default);
597 }
598 delete this->record_next_sampler;
599 delete this->record_next_bindless_sampler;
600 delete this->record_next_image;
601 delete this->record_next_bindless_image;
602 }
603
604 int buffer_block_index;
605 int ubo_byte_offset;
606 gl_shader_stage shader_type;
607
608 private:
609 bool set_opaque_indices(const glsl_type *base_type,
610 struct gl_uniform_storage *uniform,
611 const char *name, unsigned &next_index,
612 struct string_to_uint_map *record_next_index)
613 {
614 assert(base_type->is_sampler() || base_type->is_image());
615
616 if (this->record_array_count > 1) {
617 unsigned inner_array_size = MAX2(1, uniform->array_elements);
618 char *name_copy = ralloc_strdup(NULL, name);
619
620 /* Remove all array subscripts from the sampler/image name */
621 char *str_start;
622 const char *str_end;
623 while((str_start = strchr(name_copy, '[')) &&
624 (str_end = strchr(name_copy, ']'))) {
625 memmove(str_start, str_end + 1, 1 + strlen(str_end + 1));
626 }
627
628 unsigned index = 0;
629 if (record_next_index->get(index, name_copy)) {
630 /* In this case, we've already seen this uniform so we just use the
631 * next sampler/image index recorded the last time we visited.
632 */
633 uniform->opaque[shader_type].index = index;
634 index = inner_array_size + uniform->opaque[shader_type].index;
635 record_next_index->put(index, name_copy);
636
637 ralloc_free(name_copy);
638 /* Return as everything else has already been initialised in a
639 * previous pass.
640 */
641 return false;
642 } else {
643 /* We've never seen this uniform before so we need to allocate
644 * enough indices to store it.
645 *
646 * Nested struct arrays behave like arrays of arrays so we need to
647 * increase the index by the total number of elements of the
648 * sampler/image in case there is more than one sampler/image
649 * inside the structs. This allows the offset to be easily
650 * calculated for indirect indexing.
651 */
652 uniform->opaque[shader_type].index = next_index;
653 next_index += inner_array_size * this->record_array_count;
654
655 /* Store the next index for future passes over the struct array
656 */
657 index = uniform->opaque[shader_type].index + inner_array_size;
658 record_next_index->put(index, name_copy);
659 ralloc_free(name_copy);
660 }
661 } else {
662 /* Increment the sampler/image by 1 for non-arrays and by the number
663 * of array elements for arrays.
664 */
665 uniform->opaque[shader_type].index = next_index;
666 next_index += MAX2(1, uniform->array_elements);
667 }
668 return true;
669 }
670
671 void handle_samplers(const glsl_type *base_type,
672 struct gl_uniform_storage *uniform, const char *name)
673 {
674 if (base_type->is_sampler()) {
675 uniform->opaque[shader_type].active = true;
676
677 const gl_texture_index target = base_type->sampler_index();
678 const unsigned shadow = base_type->sampler_shadow;
679
680 if (current_var->data.bindless) {
681 if (!set_opaque_indices(base_type, uniform, name,
682 this->next_bindless_sampler,
683 this->record_next_bindless_sampler))
684 return;
685
686 this->num_bindless_samplers = this->next_bindless_sampler;
687
688 this->bindless_targets = (gl_texture_index *)
689 realloc(this->bindless_targets,
690 this->num_bindless_samplers * sizeof(gl_texture_index));
691
692 for (unsigned i = uniform->opaque[shader_type].index;
693 i < this->num_bindless_samplers;
694 i++) {
695 this->bindless_targets[i] = target;
696 }
697 } else {
698 if (!set_opaque_indices(base_type, uniform, name,
699 this->next_sampler,
700 this->record_next_sampler))
701 return;
702
703 for (unsigned i = uniform->opaque[shader_type].index;
704 i < MIN2(this->next_sampler, MAX_SAMPLERS);
705 i++) {
706 this->targets[i] = target;
707 this->shader_samplers_used |= 1U << i;
708 this->shader_shadow_samplers |= shadow << i;
709 }
710 }
711 }
712 }
713
714 void handle_images(const glsl_type *base_type,
715 struct gl_uniform_storage *uniform, const char *name)
716 {
717 if (base_type->is_image()) {
718 uniform->opaque[shader_type].active = true;
719
720 /* Set image access qualifiers */
721 const GLenum access =
722 current_var->data.memory_read_only ?
723 (current_var->data.memory_write_only ? GL_NONE :
724 GL_READ_ONLY) :
725 (current_var->data.memory_write_only ? GL_WRITE_ONLY :
726 GL_READ_WRITE);
727
728 if (current_var->data.bindless) {
729 if (!set_opaque_indices(base_type, uniform, name,
730 this->next_bindless_image,
731 this->record_next_bindless_image))
732 return;
733
734 this->num_bindless_images = this->next_bindless_image;
735
736 this->bindless_access = (GLenum *)
737 realloc(this->bindless_access,
738 this->num_bindless_images * sizeof(GLenum));
739
740 for (unsigned i = uniform->opaque[shader_type].index;
741 i < this->num_bindless_images;
742 i++) {
743 this->bindless_access[i] = access;
744 }
745 } else {
746 if (!set_opaque_indices(base_type, uniform, name,
747 this->next_image,
748 this->record_next_image))
749 return;
750
751 for (unsigned i = uniform->opaque[shader_type].index;
752 i < MIN2(this->next_image, MAX_IMAGE_UNIFORMS);
753 i++) {
754 prog->_LinkedShaders[shader_type]->Program->sh.ImageAccess[i] = access;
755 }
756 }
757 }
758 }
759
760 void handle_subroutines(const glsl_type *base_type,
761 struct gl_uniform_storage *uniform)
762 {
763 if (base_type->is_subroutine()) {
764 uniform->opaque[shader_type].index = this->next_subroutine;
765 uniform->opaque[shader_type].active = true;
766
767 prog->_LinkedShaders[shader_type]->Program->sh.NumSubroutineUniforms++;
768
769 /* Increment the subroutine index by 1 for non-arrays and by the
770 * number of array elements for arrays.
771 */
772 this->next_subroutine += MAX2(1, uniform->array_elements);
773
774 }
775 }
776
777 virtual void set_buffer_offset(unsigned offset)
778 {
779 this->ubo_byte_offset = offset;
780 }
781
782 virtual void set_record_array_count(unsigned record_array_count)
783 {
784 this->record_array_count = record_array_count;
785 }
786
787 virtual void enter_record(const glsl_type *type, const char *,
788 bool row_major,
789 const enum glsl_interface_packing packing)
790 {
791 assert(type->is_struct());
792 if (this->buffer_block_index == -1)
793 return;
794 if (packing == GLSL_INTERFACE_PACKING_STD430)
795 this->ubo_byte_offset = glsl_align(
796 this->ubo_byte_offset, type->std430_base_alignment(row_major));
797 else
798 this->ubo_byte_offset = glsl_align(
799 this->ubo_byte_offset, type->std140_base_alignment(row_major));
800 }
801
802 virtual void leave_record(const glsl_type *type, const char *,
803 bool row_major,
804 const enum glsl_interface_packing packing)
805 {
806 assert(type->is_struct());
807 if (this->buffer_block_index == -1)
808 return;
809 if (packing == GLSL_INTERFACE_PACKING_STD430)
810 this->ubo_byte_offset = glsl_align(
811 this->ubo_byte_offset, type->std430_base_alignment(row_major));
812 else
813 this->ubo_byte_offset = glsl_align(
814 this->ubo_byte_offset, type->std140_base_alignment(row_major));
815 }
816
817 virtual void visit_field(const glsl_type *type, const char *name,
818 bool row_major, const glsl_type * /* record_type */,
819 const enum glsl_interface_packing packing,
820 bool /* last_field */)
821 {
822 assert(!type->without_array()->is_struct());
823 assert(!type->without_array()->is_interface());
824 assert(!(type->is_array() && type->fields.array->is_array()));
825
826 unsigned id;
827 bool found = this->map->get(id, name);
828 assert(found);
829
830 if (!found)
831 return;
832
833 const glsl_type *base_type;
834 if (type->is_array()) {
835 this->uniforms[id].array_elements = type->length;
836 base_type = type->fields.array;
837 } else {
838 this->uniforms[id].array_elements = 0;
839 base_type = type;
840 }
841
842 /* Initialise opaque data */
843 this->uniforms[id].opaque[shader_type].index = ~0;
844 this->uniforms[id].opaque[shader_type].active = false;
845
846 this->uniforms[id].active_shader_mask |= 1 << shader_type;
847
848 /* This assigns uniform indices to sampler and image uniforms. */
849 handle_samplers(base_type, &this->uniforms[id], name);
850 handle_images(base_type, &this->uniforms[id], name);
851 handle_subroutines(base_type, &this->uniforms[id]);
852
853 /* For array of arrays or struct arrays the base location may have
854 * already been set so don't set it again.
855 */
856 if (buffer_block_index == -1 && current_var->data.location == -1) {
857 current_var->data.location = id;
858 }
859
860 /* If there is already storage associated with this uniform or if the
861 * uniform is set as builtin, it means that it was set while processing
862 * an earlier shader stage. For example, we may be processing the
863 * uniform in the fragment shader, but the uniform was already processed
864 * in the vertex shader.
865 */
866 if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) {
867 return;
868 }
869
870 /* Assign explicit locations. */
871 if (current_var->data.explicit_location) {
872 /* Set sequential locations for struct fields. */
873 if (current_var->type->without_array()->is_struct() ||
874 current_var->type->is_array_of_arrays()) {
875 const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
876 this->uniforms[id].remap_location =
877 this->explicit_location + field_counter;
878 field_counter += entries;
879 } else {
880 this->uniforms[id].remap_location = this->explicit_location;
881 }
882 } else {
883 /* Initialize to to indicate that no location is set */
884 this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
885 }
886
887 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
888 this->uniforms[id].type = base_type;
889 this->uniforms[id].num_driver_storage = 0;
890 this->uniforms[id].driver_storage = NULL;
891 this->uniforms[id].atomic_buffer_index = -1;
892 this->uniforms[id].hidden =
893 current_var->data.how_declared == ir_var_hidden;
894 this->uniforms[id].builtin = is_gl_identifier(name);
895
896 this->uniforms[id].is_shader_storage =
897 current_var->is_in_shader_storage_block();
898 this->uniforms[id].is_bindless = current_var->data.bindless;
899
900 /* Do not assign storage if the uniform is a builtin or buffer object */
901 if (!this->uniforms[id].builtin &&
902 !this->uniforms[id].is_shader_storage &&
903 this->buffer_block_index == -1)
904 this->uniforms[id].storage = this->values;
905
906 if (this->buffer_block_index != -1) {
907 this->uniforms[id].block_index = this->buffer_block_index;
908
909 unsigned alignment = type->std140_base_alignment(row_major);
910 if (packing == GLSL_INTERFACE_PACKING_STD430)
911 alignment = type->std430_base_alignment(row_major);
912 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
913 this->uniforms[id].offset = this->ubo_byte_offset;
914 if (packing == GLSL_INTERFACE_PACKING_STD430)
915 this->ubo_byte_offset += type->std430_size(row_major);
916 else
917 this->ubo_byte_offset += type->std140_size(row_major);
918
919 if (type->is_array()) {
920 if (packing == GLSL_INTERFACE_PACKING_STD430)
921 this->uniforms[id].array_stride =
922 type->without_array()->std430_array_stride(row_major);
923 else
924 this->uniforms[id].array_stride =
925 glsl_align(type->without_array()->std140_size(row_major),
926 16);
927 } else {
928 this->uniforms[id].array_stride = 0;
929 }
930
931 if (type->without_array()->is_matrix()) {
932 this->uniforms[id].matrix_stride =
933 link_calculate_matrix_stride(type->without_array(),
934 row_major,
935 packing);
936 this->uniforms[id].row_major = row_major;
937 } else {
938 this->uniforms[id].matrix_stride = 0;
939 this->uniforms[id].row_major = false;
940 }
941 } else {
942 this->uniforms[id].block_index = -1;
943 this->uniforms[id].offset = -1;
944 this->uniforms[id].array_stride = -1;
945 this->uniforms[id].matrix_stride = -1;
946 this->uniforms[id].row_major = false;
947 }
948
949 if (!this->uniforms[id].builtin &&
950 !this->uniforms[id].is_shader_storage &&
951 this->buffer_block_index == -1)
952 this->values += type->component_slots();
953 }
954
955 /**
956 * Current program being processed.
957 */
958 struct gl_shader_program *prog;
959
960 struct string_to_uint_map *map;
961
962 struct gl_uniform_storage *uniforms;
963 unsigned next_sampler;
964 unsigned next_bindless_sampler;
965 unsigned next_image;
966 unsigned next_bindless_image;
967 unsigned next_subroutine;
968
969 bool use_std430_as_default;
970
971 /**
972 * Field counter is used to take care that uniform structures
973 * with explicit locations get sequential locations.
974 */
975 unsigned field_counter;
976
977 /**
978 * Current variable being processed.
979 */
980 ir_variable *current_var;
981
982 /* Used to store the explicit location from current_var so that we can
983 * reuse the location field for storing the uniform slot id.
984 */
985 int explicit_location;
986
987 /* Stores total struct array elements including nested structs */
988 unsigned record_array_count;
989
990 /* Map for temporarily storing next sampler index when handling samplers in
991 * struct arrays.
992 */
993 struct string_to_uint_map *record_next_sampler;
994
995 /* Map for temporarily storing next imager index when handling images in
996 * struct arrays.
997 */
998 struct string_to_uint_map *record_next_image;
999
1000 /* Map for temporarily storing next bindless sampler index when handling
1001 * bindless samplers in struct arrays.
1002 */
1003 struct string_to_uint_map *record_next_bindless_sampler;
1004
1005 /* Map for temporarily storing next bindless image index when handling
1006 * bindless images in struct arrays.
1007 */
1008 struct string_to_uint_map *record_next_bindless_image;
1009
1010 public:
1011 union gl_constant_value *values;
1012
1013 gl_texture_index targets[MAX_SAMPLERS];
1014
1015 /**
1016 * Mask of samplers used by the current shader stage.
1017 */
1018 unsigned shader_samplers_used;
1019
1020 /**
1021 * Mask of samplers used by the current shader stage for shadows.
1022 */
1023 unsigned shader_shadow_samplers;
1024
1025 /**
1026 * Number of bindless samplers used by the current shader stage.
1027 */
1028 unsigned num_bindless_samplers;
1029
1030 /**
1031 * Texture targets for bindless samplers used by the current stage.
1032 */
1033 gl_texture_index *bindless_targets;
1034
1035 /**
1036 * Number of bindless images used by the current shader stage.
1037 */
1038 unsigned num_bindless_images;
1039
1040 /**
1041 * Access types for bindless images used by the current stage.
1042 */
1043 GLenum *bindless_access;
1044
1045 /**
1046 * Bitmask of shader storage blocks not declared as read-only.
1047 */
1048 unsigned shader_storage_blocks_write_access;
1049 };
1050
1051 static bool
1052 variable_is_referenced(ir_array_refcount_visitor &v, ir_variable *var)
1053 {
1054 ir_array_refcount_entry *const entry = v.get_variable_entry(var);
1055
1056 return entry->is_referenced;
1057
1058 }
1059
1060 /**
1061 * Walks the IR and update the references to uniform blocks in the
1062 * ir_variables to point at linked shader's list (previously, they
1063 * would point at the uniform block list in one of the pre-linked
1064 * shaders).
1065 */
1066 static void
1067 link_update_uniform_buffer_variables(struct gl_linked_shader *shader,
1068 unsigned stage)
1069 {
1070 ir_array_refcount_visitor v;
1071
1072 v.run(shader->ir);
1073
1074 foreach_in_list(ir_instruction, node, shader->ir) {
1075 ir_variable *const var = node->as_variable();
1076
1077 if (var == NULL || !var->is_in_buffer_block())
1078 continue;
1079
1080 assert(var->data.mode == ir_var_uniform ||
1081 var->data.mode == ir_var_shader_storage);
1082
1083 unsigned num_blocks = var->data.mode == ir_var_uniform ?
1084 shader->Program->info.num_ubos : shader->Program->info.num_ssbos;
1085 struct gl_uniform_block **blks = var->data.mode == ir_var_uniform ?
1086 shader->Program->sh.UniformBlocks :
1087 shader->Program->sh.ShaderStorageBlocks;
1088
1089 if (var->is_interface_instance()) {
1090 const ir_array_refcount_entry *const entry = v.get_variable_entry(var);
1091
1092 if (entry->is_referenced) {
1093 /* Since this is an interface instance, the instance type will be
1094 * same as the array-stripped variable type. If the variable type
1095 * is an array, then the block names will be suffixed with [0]
1096 * through [n-1]. Unlike for non-interface instances, there will
1097 * not be structure types here, so the only name sentinel that we
1098 * have to worry about is [.
1099 */
1100 assert(var->type->without_array() == var->get_interface_type());
1101 const char sentinel = var->type->is_array() ? '[' : '\0';
1102
1103 const ptrdiff_t len = strlen(var->get_interface_type()->name);
1104 for (unsigned i = 0; i < num_blocks; i++) {
1105 const char *const begin = blks[i]->Name;
1106 const char *const end = strchr(begin, sentinel);
1107
1108 if (end == NULL)
1109 continue;
1110
1111 if (len != (end - begin))
1112 continue;
1113
1114 /* Even when a match is found, do not "break" here. This could
1115 * be an array of instances, and all elements of the array need
1116 * to be marked as referenced.
1117 */
1118 if (strncmp(begin, var->get_interface_type()->name, len) == 0 &&
1119 (!var->type->is_array() ||
1120 entry->is_linearized_index_referenced(blks[i]->linearized_array_index))) {
1121 blks[i]->stageref |= 1U << stage;
1122 }
1123 }
1124 }
1125
1126 var->data.location = 0;
1127 continue;
1128 }
1129
1130 bool found = false;
1131 char sentinel = '\0';
1132
1133 if (var->type->is_struct()) {
1134 sentinel = '.';
1135 } else if (var->type->is_array() && (var->type->fields.array->is_array()
1136 || var->type->without_array()->is_struct())) {
1137 sentinel = '[';
1138 }
1139
1140 const unsigned l = strlen(var->name);
1141 for (unsigned i = 0; i < num_blocks; i++) {
1142 for (unsigned j = 0; j < blks[i]->NumUniforms; j++) {
1143 if (sentinel) {
1144 const char *begin = blks[i]->Uniforms[j].Name;
1145 const char *end = strchr(begin, sentinel);
1146
1147 if (end == NULL)
1148 continue;
1149
1150 if ((ptrdiff_t) l != (end - begin))
1151 continue;
1152
1153 found = strncmp(var->name, begin, l) == 0;
1154 } else {
1155 found = strcmp(var->name, blks[i]->Uniforms[j].Name) == 0;
1156 }
1157
1158 if (found) {
1159 var->data.location = j;
1160
1161 if (variable_is_referenced(v, var))
1162 blks[i]->stageref |= 1U << stage;
1163
1164 break;
1165 }
1166 }
1167
1168 if (found)
1169 break;
1170 }
1171 assert(found);
1172 }
1173 }
1174
1175 /**
1176 * Combine the hidden uniform hash map with the uniform hash map so that the
1177 * hidden uniforms will be given indicies at the end of the uniform storage
1178 * array.
1179 */
1180 static void
1181 assign_hidden_uniform_slot_id(const char *name, unsigned hidden_id,
1182 void *closure)
1183 {
1184 count_uniform_size *uniform_size = (count_uniform_size *) closure;
1185 unsigned hidden_uniform_start = uniform_size->num_active_uniforms -
1186 uniform_size->num_hidden_uniforms;
1187
1188 uniform_size->map->put(hidden_uniform_start + hidden_id, name);
1189 }
1190
1191 static void
1192 link_setup_uniform_remap_tables(struct gl_context *ctx,
1193 struct gl_shader_program *prog)
1194 {
1195 unsigned total_entries = prog->NumExplicitUniformLocations;
1196 unsigned empty_locs = prog->NumUniformRemapTable - total_entries;
1197
1198 /* Reserve all the explicit locations of the active uniforms. */
1199 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1200 if (prog->data->UniformStorage[i].type->is_subroutine() ||
1201 prog->data->UniformStorage[i].is_shader_storage)
1202 continue;
1203
1204 if (prog->data->UniformStorage[i].remap_location !=
1205 UNMAPPED_UNIFORM_LOC) {
1206 /* How many new entries for this uniform? */
1207 const unsigned entries =
1208 MAX2(1, prog->data->UniformStorage[i].array_elements);
1209
1210 /* Set remap table entries point to correct gl_uniform_storage. */
1211 for (unsigned j = 0; j < entries; j++) {
1212 unsigned element_loc =
1213 prog->data->UniformStorage[i].remap_location + j;
1214 assert(prog->UniformRemapTable[element_loc] ==
1215 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1216 prog->UniformRemapTable[element_loc] =
1217 &prog->data->UniformStorage[i];
1218 }
1219 }
1220 }
1221
1222 /* Reserve locations for rest of the uniforms. */
1223 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1224
1225 if (prog->data->UniformStorage[i].type->is_subroutine() ||
1226 prog->data->UniformStorage[i].is_shader_storage)
1227 continue;
1228
1229 /* Built-in uniforms should not get any location. */
1230 if (prog->data->UniformStorage[i].builtin)
1231 continue;
1232
1233 /* Explicit ones have been set already. */
1234 if (prog->data->UniformStorage[i].remap_location != UNMAPPED_UNIFORM_LOC)
1235 continue;
1236
1237 /* how many new entries for this uniform? */
1238 const unsigned entries =
1239 MAX2(1, prog->data->UniformStorage[i].array_elements);
1240
1241 /* Find UniformRemapTable for empty blocks where we can fit this uniform. */
1242 int chosen_location = -1;
1243
1244 if (empty_locs)
1245 chosen_location = link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
1246
1247 /* Add new entries to the total amount for checking against MAX_UNIFORM-
1248 * _LOCATIONS. This only applies to the default uniform block (-1),
1249 * because locations of uniform block entries are not assignable.
1250 */
1251 if (prog->data->UniformStorage[i].block_index == -1)
1252 total_entries += entries;
1253
1254 if (chosen_location != -1) {
1255 empty_locs -= entries;
1256 } else {
1257 chosen_location = prog->NumUniformRemapTable;
1258
1259 /* resize remap table to fit new entries */
1260 prog->UniformRemapTable =
1261 reralloc(prog,
1262 prog->UniformRemapTable,
1263 gl_uniform_storage *,
1264 prog->NumUniformRemapTable + entries);
1265 prog->NumUniformRemapTable += entries;
1266 }
1267
1268 /* set pointers for this uniform */
1269 for (unsigned j = 0; j < entries; j++)
1270 prog->UniformRemapTable[chosen_location + j] =
1271 &prog->data->UniformStorage[i];
1272
1273 /* set the base location in remap table for the uniform */
1274 prog->data->UniformStorage[i].remap_location = chosen_location;
1275 }
1276
1277 /* Verify that total amount of entries for explicit and implicit locations
1278 * is less than MAX_UNIFORM_LOCATIONS.
1279 */
1280
1281 if (total_entries > ctx->Const.MaxUserAssignableUniformLocations) {
1282 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
1283 "(%u > %u)", total_entries,
1284 ctx->Const.MaxUserAssignableUniformLocations);
1285 }
1286
1287 /* Reserve all the explicit locations of the active subroutine uniforms. */
1288 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1289 if (!prog->data->UniformStorage[i].type->is_subroutine())
1290 continue;
1291
1292 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
1293 continue;
1294
1295 /* How many new entries for this uniform? */
1296 const unsigned entries =
1297 MAX2(1, prog->data->UniformStorage[i].array_elements);
1298
1299 unsigned mask = prog->data->linked_stages;
1300 while (mask) {
1301 const int j = u_bit_scan(&mask);
1302 struct gl_program *p = prog->_LinkedShaders[j]->Program;
1303
1304 if (!prog->data->UniformStorage[i].opaque[j].active)
1305 continue;
1306
1307 /* Set remap table entries point to correct gl_uniform_storage. */
1308 for (unsigned k = 0; k < entries; k++) {
1309 unsigned element_loc =
1310 prog->data->UniformStorage[i].remap_location + k;
1311 assert(p->sh.SubroutineUniformRemapTable[element_loc] ==
1312 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1313 p->sh.SubroutineUniformRemapTable[element_loc] =
1314 &prog->data->UniformStorage[i];
1315 }
1316 }
1317 }
1318
1319 /* reserve subroutine locations */
1320 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1321 if (!prog->data->UniformStorage[i].type->is_subroutine())
1322 continue;
1323
1324 if (prog->data->UniformStorage[i].remap_location !=
1325 UNMAPPED_UNIFORM_LOC)
1326 continue;
1327
1328 const unsigned entries =
1329 MAX2(1, prog->data->UniformStorage[i].array_elements);
1330
1331 unsigned mask = prog->data->linked_stages;
1332 while (mask) {
1333 const int j = u_bit_scan(&mask);
1334 struct gl_program *p = prog->_LinkedShaders[j]->Program;
1335
1336 if (!prog->data->UniformStorage[i].opaque[j].active)
1337 continue;
1338
1339 p->sh.SubroutineUniformRemapTable =
1340 reralloc(p,
1341 p->sh.SubroutineUniformRemapTable,
1342 gl_uniform_storage *,
1343 p->sh.NumSubroutineUniformRemapTable + entries);
1344
1345 for (unsigned k = 0; k < entries; k++) {
1346 p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
1347 &prog->data->UniformStorage[i];
1348 }
1349 prog->data->UniformStorage[i].remap_location =
1350 p->sh.NumSubroutineUniformRemapTable;
1351 p->sh.NumSubroutineUniformRemapTable += entries;
1352 }
1353 }
1354 }
1355
1356 static void
1357 link_assign_uniform_storage(struct gl_context *ctx,
1358 struct gl_shader_program *prog,
1359 const unsigned num_data_slots)
1360 {
1361 /* On the outside chance that there were no uniforms, bail out.
1362 */
1363 if (prog->data->NumUniformStorage == 0)
1364 return;
1365
1366 unsigned int boolean_true = ctx->Const.UniformBooleanTrue;
1367
1368 union gl_constant_value *data;
1369 if (prog->data->UniformStorage == NULL) {
1370 prog->data->UniformStorage = rzalloc_array(prog->data,
1371 struct gl_uniform_storage,
1372 prog->data->NumUniformStorage);
1373 data = rzalloc_array(prog->data->UniformStorage,
1374 union gl_constant_value, num_data_slots);
1375 prog->data->UniformDataDefaults =
1376 rzalloc_array(prog->data->UniformStorage,
1377 union gl_constant_value, num_data_slots);
1378 } else {
1379 data = prog->data->UniformDataSlots;
1380 }
1381
1382 #ifndef NDEBUG
1383 union gl_constant_value *data_end = &data[num_data_slots];
1384 #endif
1385
1386 parcel_out_uniform_storage parcel(prog, prog->UniformHash,
1387 prog->data->UniformStorage, data,
1388 ctx->Const.UseSTD430AsDefaultPacking);
1389
1390 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1391 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
1392
1393 if (!shader)
1394 continue;
1395
1396 parcel.start_shader((gl_shader_stage)i);
1397
1398 foreach_in_list(ir_instruction, node, shader->ir) {
1399 ir_variable *const var = node->as_variable();
1400
1401 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1402 var->data.mode != ir_var_shader_storage))
1403 continue;
1404
1405 parcel.set_and_process(var);
1406 }
1407
1408 shader->Program->SamplersUsed = parcel.shader_samplers_used;
1409 shader->shadow_samplers = parcel.shader_shadow_samplers;
1410 shader->Program->sh.ShaderStorageBlocksWriteAccess =
1411 parcel.shader_storage_blocks_write_access;
1412
1413 if (parcel.num_bindless_samplers > 0) {
1414 shader->Program->sh.NumBindlessSamplers = parcel.num_bindless_samplers;
1415 shader->Program->sh.BindlessSamplers =
1416 rzalloc_array(shader->Program, gl_bindless_sampler,
1417 parcel.num_bindless_samplers);
1418 for (unsigned j = 0; j < parcel.num_bindless_samplers; j++) {
1419 shader->Program->sh.BindlessSamplers[j].target =
1420 parcel.bindless_targets[j];
1421 }
1422 }
1423
1424 if (parcel.num_bindless_images > 0) {
1425 shader->Program->sh.NumBindlessImages = parcel.num_bindless_images;
1426 shader->Program->sh.BindlessImages =
1427 rzalloc_array(shader->Program, gl_bindless_image,
1428 parcel.num_bindless_images);
1429 for (unsigned j = 0; j < parcel.num_bindless_images; j++) {
1430 shader->Program->sh.BindlessImages[j].access =
1431 parcel.bindless_access[j];
1432 }
1433 }
1434
1435 STATIC_ASSERT(ARRAY_SIZE(shader->Program->sh.SamplerTargets) ==
1436 ARRAY_SIZE(parcel.targets));
1437 for (unsigned j = 0; j < ARRAY_SIZE(parcel.targets); j++)
1438 shader->Program->sh.SamplerTargets[j] = parcel.targets[j];
1439 }
1440
1441 #ifndef NDEBUG
1442 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1443 assert(prog->data->UniformStorage[i].storage != NULL ||
1444 prog->data->UniformStorage[i].builtin ||
1445 prog->data->UniformStorage[i].is_shader_storage ||
1446 prog->data->UniformStorage[i].block_index != -1);
1447 }
1448
1449 assert(parcel.values == data_end);
1450 #endif
1451
1452 link_setup_uniform_remap_tables(ctx, prog);
1453
1454 /* Set shader cache fields */
1455 prog->data->NumUniformDataSlots = num_data_slots;
1456 prog->data->UniformDataSlots = data;
1457
1458 link_set_uniform_initializers(prog, boolean_true);
1459 }
1460
1461 void
1462 link_assign_uniform_locations(struct gl_shader_program *prog,
1463 struct gl_context *ctx)
1464 {
1465 ralloc_free(prog->data->UniformStorage);
1466 prog->data->UniformStorage = NULL;
1467 prog->data->NumUniformStorage = 0;
1468
1469 if (prog->UniformHash != NULL) {
1470 prog->UniformHash->clear();
1471 } else {
1472 prog->UniformHash = new string_to_uint_map;
1473 }
1474
1475 /* First pass: Count the uniform resources used by the user-defined
1476 * uniforms. While this happens, each active uniform will have an index
1477 * assigned to it.
1478 *
1479 * Note: this is *NOT* the index that is returned to the application by
1480 * glGetUniformLocation.
1481 */
1482 struct string_to_uint_map *hiddenUniforms = new string_to_uint_map;
1483 count_uniform_size uniform_size(prog->UniformHash, hiddenUniforms,
1484 ctx->Const.UseSTD430AsDefaultPacking);
1485 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1486 struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1487
1488 if (sh == NULL)
1489 continue;
1490
1491 link_update_uniform_buffer_variables(sh, i);
1492
1493 /* Reset various per-shader target counts.
1494 */
1495 uniform_size.start_shader();
1496
1497 foreach_in_list(ir_instruction, node, sh->ir) {
1498 ir_variable *const var = node->as_variable();
1499
1500 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1501 var->data.mode != ir_var_shader_storage))
1502 continue;
1503
1504 uniform_size.process(var);
1505 }
1506
1507 sh->Program->info.num_textures = uniform_size.num_shader_samplers;
1508 sh->Program->info.num_images = uniform_size.num_shader_images;
1509 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
1510 sh->num_combined_uniform_components = sh->num_uniform_components;
1511
1512 for (unsigned i = 0; i < sh->Program->info.num_ubos; i++) {
1513 sh->num_combined_uniform_components +=
1514 sh->Program->sh.UniformBlocks[i]->UniformBufferSize / 4;
1515 }
1516 }
1517
1518 prog->data->NumUniformStorage = uniform_size.num_active_uniforms;
1519 prog->data->NumHiddenUniforms = uniform_size.num_hidden_uniforms;
1520
1521 /* assign hidden uniforms a slot id */
1522 hiddenUniforms->iterate(assign_hidden_uniform_slot_id, &uniform_size);
1523 delete hiddenUniforms;
1524
1525 link_assign_uniform_storage(ctx, prog, uniform_size.num_values);
1526 }