nir/lower_clip: Fix incorrect driver loc for clipdist outputs
[mesa.git] / src / compiler / glsl / link_uniforms.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "ir.h"
25 #include "linker.h"
26 #include "ir_uniform.h"
27 #include "glsl_symbol_table.h"
28 #include "program.h"
29 #include "string_to_uint_map.h"
30 #include "ir_array_refcount.h"
31 #include "main/mtypes.h"
32
33 /**
34 * \file link_uniforms.cpp
35 * Assign locations for GLSL uniforms.
36 *
37 * \author Ian Romanick <ian.d.romanick@intel.com>
38 */
39
40 /**
41 * Used by linker to indicate uniforms that have no location set.
42 */
43 #define UNMAPPED_UNIFORM_LOC ~0u
44
45 void
46 program_resource_visitor::process(const glsl_type *type, const char *name,
47 bool use_std430_as_default)
48 {
49 assert(type->without_array()->is_struct()
50 || type->without_array()->is_interface());
51
52 unsigned record_array_count = 1;
53 char *name_copy = ralloc_strdup(NULL, name);
54
55 enum glsl_interface_packing packing =
56 type->get_internal_ifc_packing(use_std430_as_default);
57
58 recursion(type, &name_copy, strlen(name), false, NULL, packing, false,
59 record_array_count, NULL);
60 ralloc_free(name_copy);
61 }
62
63 void
64 program_resource_visitor::process(ir_variable *var, bool use_std430_as_default)
65 {
66 const glsl_type *t =
67 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
68 process(var, t, use_std430_as_default);
69 }
70
71 void
72 program_resource_visitor::process(ir_variable *var, const glsl_type *var_type,
73 bool use_std430_as_default)
74 {
75 unsigned record_array_count = 1;
76 const bool row_major =
77 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
78
79 enum glsl_interface_packing packing = var->get_interface_type() ?
80 var->get_interface_type()->
81 get_internal_ifc_packing(use_std430_as_default) :
82 var->type->get_internal_ifc_packing(use_std430_as_default);
83
84 const glsl_type *t = var_type;
85 const glsl_type *t_without_array = t->without_array();
86
87 /* false is always passed for the row_major parameter to the other
88 * processing functions because no information is available to do
89 * otherwise. See the warning in linker.h.
90 */
91 if (t_without_array->is_struct() ||
92 (t->is_array() && t->fields.array->is_array())) {
93 char *name = ralloc_strdup(NULL, var->name);
94 recursion(var->type, &name, strlen(name), row_major, NULL, packing,
95 false, record_array_count, NULL);
96 ralloc_free(name);
97 } else if (t_without_array->is_interface()) {
98 char *name = ralloc_strdup(NULL, t_without_array->name);
99 const glsl_struct_field *ifc_member = var->data.from_named_ifc_block ?
100 &t_without_array->
101 fields.structure[t_without_array->field_index(var->name)] : NULL;
102
103 recursion(t, &name, strlen(name), row_major, NULL, packing,
104 false, record_array_count, ifc_member);
105 ralloc_free(name);
106 } else {
107 this->set_record_array_count(record_array_count);
108 this->visit_field(t, var->name, row_major, NULL, packing, false);
109 }
110 }
111
112 void
113 program_resource_visitor::recursion(const glsl_type *t, char **name,
114 size_t name_length, bool row_major,
115 const glsl_type *record_type,
116 const enum glsl_interface_packing packing,
117 bool last_field,
118 unsigned record_array_count,
119 const glsl_struct_field *named_ifc_member)
120 {
121 /* Records need to have each field processed individually.
122 *
123 * Arrays of records need to have each array element processed
124 * individually, then each field of the resulting array elements processed
125 * individually.
126 */
127 if (t->is_interface() && named_ifc_member) {
128 ralloc_asprintf_rewrite_tail(name, &name_length, ".%s",
129 named_ifc_member->name);
130 recursion(named_ifc_member->type, name, name_length, row_major, NULL,
131 packing, false, record_array_count, NULL);
132 } else if (t->is_struct() || t->is_interface()) {
133 if (record_type == NULL && t->is_struct())
134 record_type = t;
135
136 if (t->is_struct())
137 this->enter_record(t, *name, row_major, packing);
138
139 for (unsigned i = 0; i < t->length; i++) {
140 const char *field = t->fields.structure[i].name;
141 size_t new_length = name_length;
142
143 if (t->is_interface() && t->fields.structure[i].offset != -1)
144 this->set_buffer_offset(t->fields.structure[i].offset);
145
146 /* Append '.field' to the current variable name. */
147 if (name_length == 0) {
148 ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
149 } else {
150 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
151 }
152
153 /* The layout of structures at the top level of the block is set
154 * during parsing. For matrices contained in multiple levels of
155 * structures in the block, the inner structures have no layout.
156 * These cases must potentially inherit the layout from the outer
157 * levels.
158 */
159 bool field_row_major = row_major;
160 const enum glsl_matrix_layout matrix_layout =
161 glsl_matrix_layout(t->fields.structure[i].matrix_layout);
162 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
163 field_row_major = true;
164 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
165 field_row_major = false;
166 }
167
168 recursion(t->fields.structure[i].type, name, new_length,
169 field_row_major,
170 record_type,
171 packing,
172 (i + 1) == t->length, record_array_count, NULL);
173
174 /* Only the first leaf-field of the record gets called with the
175 * record type pointer.
176 */
177 record_type = NULL;
178 }
179
180 if (t->is_struct()) {
181 (*name)[name_length] = '\0';
182 this->leave_record(t, *name, row_major, packing);
183 }
184 } else if (t->without_array()->is_struct() ||
185 t->without_array()->is_interface() ||
186 (t->is_array() && t->fields.array->is_array())) {
187 if (record_type == NULL && t->fields.array->is_struct())
188 record_type = t->fields.array;
189
190 unsigned length = t->length;
191
192 /* Shader storage block unsized arrays: add subscript [0] to variable
193 * names.
194 */
195 if (t->is_unsized_array())
196 length = 1;
197
198 record_array_count *= length;
199
200 for (unsigned i = 0; i < length; i++) {
201 size_t new_length = name_length;
202
203 /* Append the subscript to the current variable name */
204 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
205
206 recursion(t->fields.array, name, new_length, row_major,
207 record_type,
208 packing,
209 (i + 1) == t->length, record_array_count,
210 named_ifc_member);
211
212 /* Only the first leaf-field of the record gets called with the
213 * record type pointer.
214 */
215 record_type = NULL;
216 }
217 } else {
218 this->set_record_array_count(record_array_count);
219 this->visit_field(t, *name, row_major, record_type, packing, last_field);
220 }
221 }
222
223 void
224 program_resource_visitor::enter_record(const glsl_type *, const char *, bool,
225 const enum glsl_interface_packing)
226 {
227 }
228
229 void
230 program_resource_visitor::leave_record(const glsl_type *, const char *, bool,
231 const enum glsl_interface_packing)
232 {
233 }
234
235 void
236 program_resource_visitor::set_buffer_offset(unsigned)
237 {
238 }
239
240 void
241 program_resource_visitor::set_record_array_count(unsigned)
242 {
243 }
244
245 namespace {
246
247 /**
248 * Class to help calculate the storage requirements for a set of uniforms
249 *
250 * As uniforms are added to the active set the number of active uniforms and
251 * the storage requirements for those uniforms are accumulated. The active
252 * uniforms are added to the hash table supplied to the constructor.
253 *
254 * If the same uniform is added multiple times (i.e., once for each shader
255 * target), it will only be accounted once.
256 */
257 class count_uniform_size : public program_resource_visitor {
258 public:
259 count_uniform_size(struct string_to_uint_map *map,
260 struct string_to_uint_map *hidden_map,
261 bool use_std430_as_default)
262 : num_active_uniforms(0), num_hidden_uniforms(0), num_values(0),
263 num_shader_samplers(0), num_shader_images(0),
264 num_shader_uniform_components(0), num_shader_subroutines(0),
265 is_buffer_block(false), is_shader_storage(false), map(map),
266 hidden_map(hidden_map), current_var(NULL),
267 use_std430_as_default(use_std430_as_default)
268 {
269 /* empty */
270 }
271
272 void start_shader()
273 {
274 this->num_shader_samplers = 0;
275 this->num_shader_images = 0;
276 this->num_shader_uniform_components = 0;
277 this->num_shader_subroutines = 0;
278 }
279
280 void process(ir_variable *var)
281 {
282 this->current_var = var;
283 this->is_buffer_block = var->is_in_buffer_block();
284 this->is_shader_storage = var->is_in_shader_storage_block();
285 if (var->is_interface_instance())
286 program_resource_visitor::process(var->get_interface_type(),
287 var->get_interface_type()->name,
288 use_std430_as_default);
289 else
290 program_resource_visitor::process(var, use_std430_as_default);
291 }
292
293 /**
294 * Total number of active uniforms counted
295 */
296 unsigned num_active_uniforms;
297
298 unsigned num_hidden_uniforms;
299
300 /**
301 * Number of data values required to back the storage for the active uniforms
302 */
303 unsigned num_values;
304
305 /**
306 * Number of samplers used
307 */
308 unsigned num_shader_samplers;
309
310 /**
311 * Number of images used
312 */
313 unsigned num_shader_images;
314
315 /**
316 * Number of uniforms used in the current shader
317 */
318 unsigned num_shader_uniform_components;
319
320 /**
321 * Number of subroutine uniforms used
322 */
323 unsigned num_shader_subroutines;
324
325 bool is_buffer_block;
326 bool is_shader_storage;
327
328 struct string_to_uint_map *map;
329
330 private:
331 virtual void visit_field(const glsl_type *type, const char *name,
332 bool /* row_major */,
333 const glsl_type * /* record_type */,
334 const enum glsl_interface_packing,
335 bool /* last_field */)
336 {
337 assert(!type->without_array()->is_struct());
338 assert(!type->without_array()->is_interface());
339 assert(!(type->is_array() && type->fields.array->is_array()));
340
341 /* Count the number of samplers regardless of whether the uniform is
342 * already in the hash table. The hash table prevents adding the same
343 * uniform for multiple shader targets, but in this case we want to
344 * count it for each shader target.
345 */
346 const unsigned values = type->component_slots();
347 if (type->contains_subroutine()) {
348 this->num_shader_subroutines += values;
349 } else if (type->contains_sampler() && !current_var->data.bindless) {
350 /* Samplers (bound or bindless) are counted as two components as
351 * specified by ARB_bindless_texture. */
352 this->num_shader_samplers += values / 2;
353 } else if (type->contains_image() && !current_var->data.bindless) {
354 /* Images (bound or bindless) are counted as two components as
355 * specified by ARB_bindless_texture. */
356 this->num_shader_images += values / 2;
357
358 /* As drivers are likely to represent image uniforms as
359 * scalar indices, count them against the limit of uniform
360 * components in the default block. The spec allows image
361 * uniforms to use up no more than one scalar slot.
362 */
363 if (!is_shader_storage)
364 this->num_shader_uniform_components += values;
365 } else {
366 /* Accumulate the total number of uniform slots used by this shader.
367 * Note that samplers do not count against this limit because they
368 * don't use any storage on current hardware.
369 */
370 if (!is_buffer_block)
371 this->num_shader_uniform_components += values;
372 }
373
374 /* If the uniform is already in the map, there's nothing more to do.
375 */
376 unsigned id;
377 if (this->map->get(id, name))
378 return;
379
380 if (this->current_var->data.how_declared == ir_var_hidden) {
381 this->hidden_map->put(this->num_hidden_uniforms, name);
382 this->num_hidden_uniforms++;
383 } else {
384 this->map->put(this->num_active_uniforms-this->num_hidden_uniforms,
385 name);
386 }
387
388 /* Each leaf uniform occupies one entry in the list of active
389 * uniforms.
390 */
391 this->num_active_uniforms++;
392
393 if(!is_gl_identifier(name) && !is_shader_storage && !is_buffer_block)
394 this->num_values += values;
395 }
396
397 struct string_to_uint_map *hidden_map;
398
399 /**
400 * Current variable being processed.
401 */
402 ir_variable *current_var;
403
404 bool use_std430_as_default;
405 };
406
407 } /* anonymous namespace */
408
409 unsigned
410 link_calculate_matrix_stride(const glsl_type *matrix, bool row_major,
411 enum glsl_interface_packing packing)
412 {
413 const unsigned N = matrix->is_double() ? 8 : 4;
414 const unsigned items =
415 row_major ? matrix->matrix_columns : matrix->vector_elements;
416
417 assert(items <= 4);
418
419 /* Matrix stride for std430 mat2xY matrices are not rounded up to
420 * vec4 size.
421 *
422 * Section 7.6.2.2 "Standard Uniform Block Layout" of the OpenGL 4.3 spec
423 * says:
424 *
425 * 2. If the member is a two- or four-component vector with components
426 * consuming N basic machine units, the base alignment is 2N or 4N,
427 * respectively.
428 * ...
429 * 4. If the member is an array of scalars or vectors, the base
430 * alignment and array stride are set to match the base alignment of
431 * a single array element, according to rules (1), (2), and (3), and
432 * rounded up to the base alignment of a vec4.
433 * ...
434 * 7. If the member is a row-major matrix with C columns and R rows, the
435 * matrix is stored identically to an array of R row vectors with C
436 * components each, according to rule (4).
437 * ...
438 *
439 * When using the std430 storage layout, shader storage blocks will be
440 * laid out in buffer storage identically to uniform and shader storage
441 * blocks using the std140 layout, except that the base alignment and
442 * stride of arrays of scalars and vectors in rule 4 and of structures
443 * in rule 9 are not rounded up a multiple of the base alignment of a
444 * vec4.
445 */
446 return packing == GLSL_INTERFACE_PACKING_STD430
447 ? (items < 3 ? items * N : glsl_align(items * N, 16))
448 : glsl_align(items * N, 16);
449 }
450
451 /**
452 * Class to help parcel out pieces of backing storage to uniforms
453 *
454 * Each uniform processed has some range of the \c gl_constant_value
455 * structures associated with it. The association is done by finding
456 * the uniform in the \c string_to_uint_map and using the value from
457 * the map to connect that slot in the \c gl_uniform_storage table
458 * with the next available slot in the \c gl_constant_value array.
459 *
460 * \warning
461 * This class assumes that every uniform that will be processed is
462 * already in the \c string_to_uint_map. In addition, it assumes that
463 * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
464 * enough."
465 */
466 class parcel_out_uniform_storage : public program_resource_visitor {
467 public:
468 parcel_out_uniform_storage(struct gl_shader_program *prog,
469 struct string_to_uint_map *map,
470 struct gl_uniform_storage *uniforms,
471 union gl_constant_value *values,
472 bool use_std430_as_default)
473 : prog(prog), map(map), uniforms(uniforms),
474 use_std430_as_default(use_std430_as_default), values(values),
475 bindless_targets(NULL), bindless_access(NULL),
476 shader_storage_blocks_write_access(0)
477 {
478 }
479
480 virtual ~parcel_out_uniform_storage()
481 {
482 free(this->bindless_targets);
483 free(this->bindless_access);
484 }
485
486 void start_shader(gl_shader_stage shader_type)
487 {
488 assert(shader_type < MESA_SHADER_STAGES);
489 this->shader_type = shader_type;
490
491 this->shader_samplers_used = 0;
492 this->shader_shadow_samplers = 0;
493 this->next_sampler = 0;
494 this->next_image = 0;
495 this->next_subroutine = 0;
496 this->record_array_count = 1;
497 memset(this->targets, 0, sizeof(this->targets));
498
499 this->num_bindless_samplers = 0;
500 this->next_bindless_sampler = 0;
501 free(this->bindless_targets);
502 this->bindless_targets = NULL;
503
504 this->num_bindless_images = 0;
505 this->next_bindless_image = 0;
506 free(this->bindless_access);
507 this->bindless_access = NULL;
508 this->shader_storage_blocks_write_access = 0;
509 }
510
511 void set_and_process(ir_variable *var)
512 {
513 current_var = var;
514 field_counter = 0;
515 this->record_next_sampler = new string_to_uint_map;
516 this->record_next_bindless_sampler = new string_to_uint_map;
517 this->record_next_image = new string_to_uint_map;
518 this->record_next_bindless_image = new string_to_uint_map;
519
520 buffer_block_index = -1;
521 if (var->is_in_buffer_block()) {
522 struct gl_uniform_block *blks = var->is_in_shader_storage_block() ?
523 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
524 unsigned num_blks = var->is_in_shader_storage_block() ?
525 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
526 bool is_interface_array =
527 var->is_interface_instance() && var->type->is_array();
528
529 if (is_interface_array) {
530 unsigned l = strlen(var->get_interface_type()->name);
531
532 for (unsigned i = 0; i < num_blks; i++) {
533 if (strncmp(var->get_interface_type()->name, blks[i].Name, l)
534 == 0 && blks[i].Name[l] == '[') {
535 buffer_block_index = i;
536 break;
537 }
538 }
539 } else {
540 for (unsigned i = 0; i < num_blks; i++) {
541 if (strcmp(var->get_interface_type()->name, blks[i].Name) == 0) {
542 buffer_block_index = i;
543 break;
544 }
545 }
546 }
547 assert(buffer_block_index != -1);
548
549 if (var->is_in_shader_storage_block() &&
550 !var->data.memory_read_only) {
551 unsigned array_size = is_interface_array ?
552 var->type->array_size() : 1;
553
554 STATIC_ASSERT(MAX_SHADER_STORAGE_BUFFERS <= 32);
555
556 /* Shaders that use too many SSBOs will fail to compile, which
557 * we don't care about.
558 *
559 * This is true for shaders that do not use too many SSBOs:
560 */
561 if (buffer_block_index + array_size <= 32) {
562 shader_storage_blocks_write_access |=
563 u_bit_consecutive(buffer_block_index, array_size);
564 }
565 }
566
567 /* Uniform blocks that were specified with an instance name must be
568 * handled a little bit differently. The name of the variable is the
569 * name used to reference the uniform block instead of being the name
570 * of a variable within the block. Therefore, searching for the name
571 * within the block will fail.
572 */
573 if (var->is_interface_instance()) {
574 ubo_byte_offset = 0;
575 process(var->get_interface_type(),
576 var->get_interface_type()->name,
577 use_std430_as_default);
578 } else {
579 const struct gl_uniform_block *const block =
580 &blks[buffer_block_index];
581
582 assert(var->data.location != -1);
583
584 const struct gl_uniform_buffer_variable *const ubo_var =
585 &block->Uniforms[var->data.location];
586
587 ubo_byte_offset = ubo_var->Offset;
588 process(var, use_std430_as_default);
589 }
590 } else {
591 /* Store any explicit location and reset data location so we can
592 * reuse this variable for storing the uniform slot number.
593 */
594 this->explicit_location = current_var->data.location;
595 current_var->data.location = -1;
596
597 process(var, use_std430_as_default);
598 }
599 delete this->record_next_sampler;
600 delete this->record_next_bindless_sampler;
601 delete this->record_next_image;
602 delete this->record_next_bindless_image;
603 }
604
605 int buffer_block_index;
606 int ubo_byte_offset;
607 gl_shader_stage shader_type;
608
609 private:
610 bool set_opaque_indices(const glsl_type *base_type,
611 struct gl_uniform_storage *uniform,
612 const char *name, unsigned &next_index,
613 struct string_to_uint_map *record_next_index)
614 {
615 assert(base_type->is_sampler() || base_type->is_image());
616
617 if (this->record_array_count > 1) {
618 unsigned inner_array_size = MAX2(1, uniform->array_elements);
619 char *name_copy = ralloc_strdup(NULL, name);
620
621 /* Remove all array subscripts from the sampler/image name */
622 char *str_start;
623 const char *str_end;
624 while((str_start = strchr(name_copy, '[')) &&
625 (str_end = strchr(name_copy, ']'))) {
626 memmove(str_start, str_end + 1, 1 + strlen(str_end + 1));
627 }
628
629 unsigned index = 0;
630 if (record_next_index->get(index, name_copy)) {
631 /* In this case, we've already seen this uniform so we just use the
632 * next sampler/image index recorded the last time we visited.
633 */
634 uniform->opaque[shader_type].index = index;
635 index = inner_array_size + uniform->opaque[shader_type].index;
636 record_next_index->put(index, name_copy);
637
638 ralloc_free(name_copy);
639 /* Return as everything else has already been initialised in a
640 * previous pass.
641 */
642 return false;
643 } else {
644 /* We've never seen this uniform before so we need to allocate
645 * enough indices to store it.
646 *
647 * Nested struct arrays behave like arrays of arrays so we need to
648 * increase the index by the total number of elements of the
649 * sampler/image in case there is more than one sampler/image
650 * inside the structs. This allows the offset to be easily
651 * calculated for indirect indexing.
652 */
653 uniform->opaque[shader_type].index = next_index;
654 next_index += inner_array_size * this->record_array_count;
655
656 /* Store the next index for future passes over the struct array
657 */
658 index = uniform->opaque[shader_type].index + inner_array_size;
659 record_next_index->put(index, name_copy);
660 ralloc_free(name_copy);
661 }
662 } else {
663 /* Increment the sampler/image by 1 for non-arrays and by the number
664 * of array elements for arrays.
665 */
666 uniform->opaque[shader_type].index = next_index;
667 next_index += MAX2(1, uniform->array_elements);
668 }
669 return true;
670 }
671
672 void handle_samplers(const glsl_type *base_type,
673 struct gl_uniform_storage *uniform, const char *name)
674 {
675 if (base_type->is_sampler()) {
676 uniform->opaque[shader_type].active = true;
677
678 const gl_texture_index target = base_type->sampler_index();
679 const unsigned shadow = base_type->sampler_shadow;
680
681 if (current_var->data.bindless) {
682 if (!set_opaque_indices(base_type, uniform, name,
683 this->next_bindless_sampler,
684 this->record_next_bindless_sampler))
685 return;
686
687 this->num_bindless_samplers = this->next_bindless_sampler;
688
689 this->bindless_targets = (gl_texture_index *)
690 realloc(this->bindless_targets,
691 this->num_bindless_samplers * sizeof(gl_texture_index));
692
693 for (unsigned i = uniform->opaque[shader_type].index;
694 i < this->num_bindless_samplers;
695 i++) {
696 this->bindless_targets[i] = target;
697 }
698 } else {
699 if (!set_opaque_indices(base_type, uniform, name,
700 this->next_sampler,
701 this->record_next_sampler))
702 return;
703
704 for (unsigned i = uniform->opaque[shader_type].index;
705 i < MIN2(this->next_sampler, MAX_SAMPLERS);
706 i++) {
707 this->targets[i] = target;
708 this->shader_samplers_used |= 1U << i;
709 this->shader_shadow_samplers |= shadow << i;
710 }
711 }
712 }
713 }
714
715 void handle_images(const glsl_type *base_type,
716 struct gl_uniform_storage *uniform, const char *name)
717 {
718 if (base_type->is_image()) {
719 uniform->opaque[shader_type].active = true;
720
721 /* Set image access qualifiers */
722 const GLenum access =
723 current_var->data.memory_read_only ?
724 (current_var->data.memory_write_only ? GL_NONE :
725 GL_READ_ONLY) :
726 (current_var->data.memory_write_only ? GL_WRITE_ONLY :
727 GL_READ_WRITE);
728
729 if (current_var->data.bindless) {
730 if (!set_opaque_indices(base_type, uniform, name,
731 this->next_bindless_image,
732 this->record_next_bindless_image))
733 return;
734
735 this->num_bindless_images = this->next_bindless_image;
736
737 this->bindless_access = (GLenum *)
738 realloc(this->bindless_access,
739 this->num_bindless_images * sizeof(GLenum));
740
741 for (unsigned i = uniform->opaque[shader_type].index;
742 i < this->num_bindless_images;
743 i++) {
744 this->bindless_access[i] = access;
745 }
746 } else {
747 if (!set_opaque_indices(base_type, uniform, name,
748 this->next_image,
749 this->record_next_image))
750 return;
751
752 for (unsigned i = uniform->opaque[shader_type].index;
753 i < MIN2(this->next_image, MAX_IMAGE_UNIFORMS);
754 i++) {
755 prog->_LinkedShaders[shader_type]->Program->sh.ImageAccess[i] = access;
756 }
757 }
758 }
759 }
760
761 void handle_subroutines(const glsl_type *base_type,
762 struct gl_uniform_storage *uniform)
763 {
764 if (base_type->is_subroutine()) {
765 uniform->opaque[shader_type].index = this->next_subroutine;
766 uniform->opaque[shader_type].active = true;
767
768 prog->_LinkedShaders[shader_type]->Program->sh.NumSubroutineUniforms++;
769
770 /* Increment the subroutine index by 1 for non-arrays and by the
771 * number of array elements for arrays.
772 */
773 this->next_subroutine += MAX2(1, uniform->array_elements);
774
775 }
776 }
777
778 virtual void set_buffer_offset(unsigned offset)
779 {
780 this->ubo_byte_offset = offset;
781 }
782
783 virtual void set_record_array_count(unsigned record_array_count)
784 {
785 this->record_array_count = record_array_count;
786 }
787
788 virtual void enter_record(const glsl_type *type, const char *,
789 bool row_major,
790 const enum glsl_interface_packing packing)
791 {
792 assert(type->is_struct());
793 if (this->buffer_block_index == -1)
794 return;
795 if (packing == GLSL_INTERFACE_PACKING_STD430)
796 this->ubo_byte_offset = glsl_align(
797 this->ubo_byte_offset, type->std430_base_alignment(row_major));
798 else
799 this->ubo_byte_offset = glsl_align(
800 this->ubo_byte_offset, type->std140_base_alignment(row_major));
801 }
802
803 virtual void leave_record(const glsl_type *type, const char *,
804 bool row_major,
805 const enum glsl_interface_packing packing)
806 {
807 assert(type->is_struct());
808 if (this->buffer_block_index == -1)
809 return;
810 if (packing == GLSL_INTERFACE_PACKING_STD430)
811 this->ubo_byte_offset = glsl_align(
812 this->ubo_byte_offset, type->std430_base_alignment(row_major));
813 else
814 this->ubo_byte_offset = glsl_align(
815 this->ubo_byte_offset, type->std140_base_alignment(row_major));
816 }
817
818 virtual void visit_field(const glsl_type *type, const char *name,
819 bool row_major, const glsl_type * /* record_type */,
820 const enum glsl_interface_packing packing,
821 bool /* last_field */)
822 {
823 assert(!type->without_array()->is_struct());
824 assert(!type->without_array()->is_interface());
825 assert(!(type->is_array() && type->fields.array->is_array()));
826
827 unsigned id;
828 bool found = this->map->get(id, name);
829 assert(found);
830
831 if (!found)
832 return;
833
834 const glsl_type *base_type;
835 if (type->is_array()) {
836 this->uniforms[id].array_elements = type->length;
837 base_type = type->fields.array;
838 } else {
839 this->uniforms[id].array_elements = 0;
840 base_type = type;
841 }
842
843 /* Initialise opaque data */
844 this->uniforms[id].opaque[shader_type].index = ~0;
845 this->uniforms[id].opaque[shader_type].active = false;
846
847 this->uniforms[id].active_shader_mask |= 1 << shader_type;
848
849 /* This assigns uniform indices to sampler and image uniforms. */
850 handle_samplers(base_type, &this->uniforms[id], name);
851 handle_images(base_type, &this->uniforms[id], name);
852 handle_subroutines(base_type, &this->uniforms[id]);
853
854 /* For array of arrays or struct arrays the base location may have
855 * already been set so don't set it again.
856 */
857 if (buffer_block_index == -1 && current_var->data.location == -1) {
858 current_var->data.location = id;
859 }
860
861 /* If there is already storage associated with this uniform or if the
862 * uniform is set as builtin, it means that it was set while processing
863 * an earlier shader stage. For example, we may be processing the
864 * uniform in the fragment shader, but the uniform was already processed
865 * in the vertex shader.
866 */
867 if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) {
868 return;
869 }
870
871 /* Assign explicit locations. */
872 if (current_var->data.explicit_location) {
873 /* Set sequential locations for struct fields. */
874 if (current_var->type->without_array()->is_struct() ||
875 current_var->type->is_array_of_arrays()) {
876 const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
877 this->uniforms[id].remap_location =
878 this->explicit_location + field_counter;
879 field_counter += entries;
880 } else {
881 this->uniforms[id].remap_location = this->explicit_location;
882 }
883 } else {
884 /* Initialize to to indicate that no location is set */
885 this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
886 }
887
888 this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
889 this->uniforms[id].type = base_type;
890 this->uniforms[id].num_driver_storage = 0;
891 this->uniforms[id].driver_storage = NULL;
892 this->uniforms[id].atomic_buffer_index = -1;
893 this->uniforms[id].hidden =
894 current_var->data.how_declared == ir_var_hidden;
895 this->uniforms[id].builtin = is_gl_identifier(name);
896
897 this->uniforms[id].is_shader_storage =
898 current_var->is_in_shader_storage_block();
899 this->uniforms[id].is_bindless = current_var->data.bindless;
900
901 /* Do not assign storage if the uniform is a builtin or buffer object */
902 if (!this->uniforms[id].builtin &&
903 !this->uniforms[id].is_shader_storage &&
904 this->buffer_block_index == -1)
905 this->uniforms[id].storage = this->values;
906
907 if (this->buffer_block_index != -1) {
908 this->uniforms[id].block_index = this->buffer_block_index;
909
910 unsigned alignment = type->std140_base_alignment(row_major);
911 if (packing == GLSL_INTERFACE_PACKING_STD430)
912 alignment = type->std430_base_alignment(row_major);
913 this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
914 this->uniforms[id].offset = this->ubo_byte_offset;
915 if (packing == GLSL_INTERFACE_PACKING_STD430)
916 this->ubo_byte_offset += type->std430_size(row_major);
917 else
918 this->ubo_byte_offset += type->std140_size(row_major);
919
920 if (type->is_array()) {
921 if (packing == GLSL_INTERFACE_PACKING_STD430)
922 this->uniforms[id].array_stride =
923 type->without_array()->std430_array_stride(row_major);
924 else
925 this->uniforms[id].array_stride =
926 glsl_align(type->without_array()->std140_size(row_major),
927 16);
928 } else {
929 this->uniforms[id].array_stride = 0;
930 }
931
932 if (type->without_array()->is_matrix()) {
933 this->uniforms[id].matrix_stride =
934 link_calculate_matrix_stride(type->without_array(),
935 row_major,
936 packing);
937 this->uniforms[id].row_major = row_major;
938 } else {
939 this->uniforms[id].matrix_stride = 0;
940 this->uniforms[id].row_major = false;
941 }
942 } else {
943 this->uniforms[id].block_index = -1;
944 this->uniforms[id].offset = -1;
945 this->uniforms[id].array_stride = -1;
946 this->uniforms[id].matrix_stride = -1;
947 this->uniforms[id].row_major = false;
948 }
949
950 if (!this->uniforms[id].builtin &&
951 !this->uniforms[id].is_shader_storage &&
952 this->buffer_block_index == -1)
953 this->values += type->component_slots();
954 }
955
956 /**
957 * Current program being processed.
958 */
959 struct gl_shader_program *prog;
960
961 struct string_to_uint_map *map;
962
963 struct gl_uniform_storage *uniforms;
964 unsigned next_sampler;
965 unsigned next_bindless_sampler;
966 unsigned next_image;
967 unsigned next_bindless_image;
968 unsigned next_subroutine;
969
970 bool use_std430_as_default;
971
972 /**
973 * Field counter is used to take care that uniform structures
974 * with explicit locations get sequential locations.
975 */
976 unsigned field_counter;
977
978 /**
979 * Current variable being processed.
980 */
981 ir_variable *current_var;
982
983 /* Used to store the explicit location from current_var so that we can
984 * reuse the location field for storing the uniform slot id.
985 */
986 int explicit_location;
987
988 /* Stores total struct array elements including nested structs */
989 unsigned record_array_count;
990
991 /* Map for temporarily storing next sampler index when handling samplers in
992 * struct arrays.
993 */
994 struct string_to_uint_map *record_next_sampler;
995
996 /* Map for temporarily storing next imager index when handling images in
997 * struct arrays.
998 */
999 struct string_to_uint_map *record_next_image;
1000
1001 /* Map for temporarily storing next bindless sampler index when handling
1002 * bindless samplers in struct arrays.
1003 */
1004 struct string_to_uint_map *record_next_bindless_sampler;
1005
1006 /* Map for temporarily storing next bindless image index when handling
1007 * bindless images in struct arrays.
1008 */
1009 struct string_to_uint_map *record_next_bindless_image;
1010
1011 public:
1012 union gl_constant_value *values;
1013
1014 gl_texture_index targets[MAX_SAMPLERS];
1015
1016 /**
1017 * Mask of samplers used by the current shader stage.
1018 */
1019 unsigned shader_samplers_used;
1020
1021 /**
1022 * Mask of samplers used by the current shader stage for shadows.
1023 */
1024 unsigned shader_shadow_samplers;
1025
1026 /**
1027 * Number of bindless samplers used by the current shader stage.
1028 */
1029 unsigned num_bindless_samplers;
1030
1031 /**
1032 * Texture targets for bindless samplers used by the current stage.
1033 */
1034 gl_texture_index *bindless_targets;
1035
1036 /**
1037 * Number of bindless images used by the current shader stage.
1038 */
1039 unsigned num_bindless_images;
1040
1041 /**
1042 * Access types for bindless images used by the current stage.
1043 */
1044 GLenum *bindless_access;
1045
1046 /**
1047 * Bitmask of shader storage blocks not declared as read-only.
1048 */
1049 unsigned shader_storage_blocks_write_access;
1050 };
1051
1052 static bool
1053 variable_is_referenced(ir_array_refcount_visitor &v, ir_variable *var)
1054 {
1055 ir_array_refcount_entry *const entry = v.get_variable_entry(var);
1056
1057 return entry->is_referenced;
1058
1059 }
1060
1061 /**
1062 * Walks the IR and update the references to uniform blocks in the
1063 * ir_variables to point at linked shader's list (previously, they
1064 * would point at the uniform block list in one of the pre-linked
1065 * shaders).
1066 */
1067 static void
1068 link_update_uniform_buffer_variables(struct gl_linked_shader *shader,
1069 unsigned stage)
1070 {
1071 ir_array_refcount_visitor v;
1072
1073 v.run(shader->ir);
1074
1075 foreach_in_list(ir_instruction, node, shader->ir) {
1076 ir_variable *const var = node->as_variable();
1077
1078 if (var == NULL || !var->is_in_buffer_block())
1079 continue;
1080
1081 assert(var->data.mode == ir_var_uniform ||
1082 var->data.mode == ir_var_shader_storage);
1083
1084 unsigned num_blocks = var->data.mode == ir_var_uniform ?
1085 shader->Program->info.num_ubos : shader->Program->info.num_ssbos;
1086 struct gl_uniform_block **blks = var->data.mode == ir_var_uniform ?
1087 shader->Program->sh.UniformBlocks :
1088 shader->Program->sh.ShaderStorageBlocks;
1089
1090 if (var->is_interface_instance()) {
1091 const ir_array_refcount_entry *const entry = v.get_variable_entry(var);
1092
1093 if (entry->is_referenced) {
1094 /* Since this is an interface instance, the instance type will be
1095 * same as the array-stripped variable type. If the variable type
1096 * is an array, then the block names will be suffixed with [0]
1097 * through [n-1]. Unlike for non-interface instances, there will
1098 * not be structure types here, so the only name sentinel that we
1099 * have to worry about is [.
1100 */
1101 assert(var->type->without_array() == var->get_interface_type());
1102 const char sentinel = var->type->is_array() ? '[' : '\0';
1103
1104 const ptrdiff_t len = strlen(var->get_interface_type()->name);
1105 for (unsigned i = 0; i < num_blocks; i++) {
1106 const char *const begin = blks[i]->Name;
1107 const char *const end = strchr(begin, sentinel);
1108
1109 if (end == NULL)
1110 continue;
1111
1112 if (len != (end - begin))
1113 continue;
1114
1115 /* Even when a match is found, do not "break" here. This could
1116 * be an array of instances, and all elements of the array need
1117 * to be marked as referenced.
1118 */
1119 if (strncmp(begin, var->get_interface_type()->name, len) == 0 &&
1120 (!var->type->is_array() ||
1121 entry->is_linearized_index_referenced(blks[i]->linearized_array_index))) {
1122 blks[i]->stageref |= 1U << stage;
1123 }
1124 }
1125 }
1126
1127 var->data.location = 0;
1128 continue;
1129 }
1130
1131 bool found = false;
1132 char sentinel = '\0';
1133
1134 if (var->type->is_struct()) {
1135 sentinel = '.';
1136 } else if (var->type->is_array() && (var->type->fields.array->is_array()
1137 || var->type->without_array()->is_struct())) {
1138 sentinel = '[';
1139 }
1140
1141 const unsigned l = strlen(var->name);
1142 for (unsigned i = 0; i < num_blocks; i++) {
1143 for (unsigned j = 0; j < blks[i]->NumUniforms; j++) {
1144 if (sentinel) {
1145 const char *begin = blks[i]->Uniforms[j].Name;
1146 const char *end = strchr(begin, sentinel);
1147
1148 if (end == NULL)
1149 continue;
1150
1151 if ((ptrdiff_t) l != (end - begin))
1152 continue;
1153
1154 found = strncmp(var->name, begin, l) == 0;
1155 } else {
1156 found = strcmp(var->name, blks[i]->Uniforms[j].Name) == 0;
1157 }
1158
1159 if (found) {
1160 var->data.location = j;
1161
1162 if (variable_is_referenced(v, var))
1163 blks[i]->stageref |= 1U << stage;
1164
1165 break;
1166 }
1167 }
1168
1169 if (found)
1170 break;
1171 }
1172 assert(found);
1173 }
1174 }
1175
1176 /**
1177 * Combine the hidden uniform hash map with the uniform hash map so that the
1178 * hidden uniforms will be given indicies at the end of the uniform storage
1179 * array.
1180 */
1181 static void
1182 assign_hidden_uniform_slot_id(const char *name, unsigned hidden_id,
1183 void *closure)
1184 {
1185 count_uniform_size *uniform_size = (count_uniform_size *) closure;
1186 unsigned hidden_uniform_start = uniform_size->num_active_uniforms -
1187 uniform_size->num_hidden_uniforms;
1188
1189 uniform_size->map->put(hidden_uniform_start + hidden_id, name);
1190 }
1191
1192 static void
1193 link_setup_uniform_remap_tables(struct gl_context *ctx,
1194 struct gl_shader_program *prog)
1195 {
1196 unsigned total_entries = prog->NumExplicitUniformLocations;
1197 unsigned empty_locs = prog->NumUniformRemapTable - total_entries;
1198
1199 /* Reserve all the explicit locations of the active uniforms. */
1200 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1201 if (prog->data->UniformStorage[i].type->is_subroutine() ||
1202 prog->data->UniformStorage[i].is_shader_storage)
1203 continue;
1204
1205 if (prog->data->UniformStorage[i].remap_location !=
1206 UNMAPPED_UNIFORM_LOC) {
1207 /* How many new entries for this uniform? */
1208 const unsigned entries =
1209 MAX2(1, prog->data->UniformStorage[i].array_elements);
1210
1211 /* Set remap table entries point to correct gl_uniform_storage. */
1212 for (unsigned j = 0; j < entries; j++) {
1213 unsigned element_loc =
1214 prog->data->UniformStorage[i].remap_location + j;
1215 assert(prog->UniformRemapTable[element_loc] ==
1216 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1217 prog->UniformRemapTable[element_loc] =
1218 &prog->data->UniformStorage[i];
1219 }
1220 }
1221 }
1222
1223 /* Reserve locations for rest of the uniforms. */
1224 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1225
1226 if (prog->data->UniformStorage[i].type->is_subroutine() ||
1227 prog->data->UniformStorage[i].is_shader_storage)
1228 continue;
1229
1230 /* Built-in uniforms should not get any location. */
1231 if (prog->data->UniformStorage[i].builtin)
1232 continue;
1233
1234 /* Explicit ones have been set already. */
1235 if (prog->data->UniformStorage[i].remap_location != UNMAPPED_UNIFORM_LOC)
1236 continue;
1237
1238 /* how many new entries for this uniform? */
1239 const unsigned entries =
1240 MAX2(1, prog->data->UniformStorage[i].array_elements);
1241
1242 /* Find UniformRemapTable for empty blocks where we can fit this uniform. */
1243 int chosen_location = -1;
1244
1245 if (empty_locs)
1246 chosen_location = link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
1247
1248 /* Add new entries to the total amount for checking against MAX_UNIFORM-
1249 * _LOCATIONS. This only applies to the default uniform block (-1),
1250 * because locations of uniform block entries are not assignable.
1251 */
1252 if (prog->data->UniformStorage[i].block_index == -1)
1253 total_entries += entries;
1254
1255 if (chosen_location != -1) {
1256 empty_locs -= entries;
1257 } else {
1258 chosen_location = prog->NumUniformRemapTable;
1259
1260 /* resize remap table to fit new entries */
1261 prog->UniformRemapTable =
1262 reralloc(prog,
1263 prog->UniformRemapTable,
1264 gl_uniform_storage *,
1265 prog->NumUniformRemapTable + entries);
1266 prog->NumUniformRemapTable += entries;
1267 }
1268
1269 /* set pointers for this uniform */
1270 for (unsigned j = 0; j < entries; j++)
1271 prog->UniformRemapTable[chosen_location + j] =
1272 &prog->data->UniformStorage[i];
1273
1274 /* set the base location in remap table for the uniform */
1275 prog->data->UniformStorage[i].remap_location = chosen_location;
1276 }
1277
1278 /* Verify that total amount of entries for explicit and implicit locations
1279 * is less than MAX_UNIFORM_LOCATIONS.
1280 */
1281
1282 if (total_entries > ctx->Const.MaxUserAssignableUniformLocations) {
1283 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
1284 "(%u > %u)", total_entries,
1285 ctx->Const.MaxUserAssignableUniformLocations);
1286 }
1287
1288 /* Reserve all the explicit locations of the active subroutine uniforms. */
1289 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1290 if (!prog->data->UniformStorage[i].type->is_subroutine())
1291 continue;
1292
1293 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
1294 continue;
1295
1296 /* How many new entries for this uniform? */
1297 const unsigned entries =
1298 MAX2(1, prog->data->UniformStorage[i].array_elements);
1299
1300 unsigned mask = prog->data->linked_stages;
1301 while (mask) {
1302 const int j = u_bit_scan(&mask);
1303 struct gl_program *p = prog->_LinkedShaders[j]->Program;
1304
1305 if (!prog->data->UniformStorage[i].opaque[j].active)
1306 continue;
1307
1308 /* Set remap table entries point to correct gl_uniform_storage. */
1309 for (unsigned k = 0; k < entries; k++) {
1310 unsigned element_loc =
1311 prog->data->UniformStorage[i].remap_location + k;
1312 assert(p->sh.SubroutineUniformRemapTable[element_loc] ==
1313 INACTIVE_UNIFORM_EXPLICIT_LOCATION);
1314 p->sh.SubroutineUniformRemapTable[element_loc] =
1315 &prog->data->UniformStorage[i];
1316 }
1317 }
1318 }
1319
1320 /* reserve subroutine locations */
1321 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1322 if (!prog->data->UniformStorage[i].type->is_subroutine())
1323 continue;
1324
1325 if (prog->data->UniformStorage[i].remap_location !=
1326 UNMAPPED_UNIFORM_LOC)
1327 continue;
1328
1329 const unsigned entries =
1330 MAX2(1, prog->data->UniformStorage[i].array_elements);
1331
1332 unsigned mask = prog->data->linked_stages;
1333 while (mask) {
1334 const int j = u_bit_scan(&mask);
1335 struct gl_program *p = prog->_LinkedShaders[j]->Program;
1336
1337 if (!prog->data->UniformStorage[i].opaque[j].active)
1338 continue;
1339
1340 p->sh.SubroutineUniformRemapTable =
1341 reralloc(p,
1342 p->sh.SubroutineUniformRemapTable,
1343 gl_uniform_storage *,
1344 p->sh.NumSubroutineUniformRemapTable + entries);
1345
1346 for (unsigned k = 0; k < entries; k++) {
1347 p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
1348 &prog->data->UniformStorage[i];
1349 }
1350 prog->data->UniformStorage[i].remap_location =
1351 p->sh.NumSubroutineUniformRemapTable;
1352 p->sh.NumSubroutineUniformRemapTable += entries;
1353 }
1354 }
1355 }
1356
1357 static void
1358 link_assign_uniform_storage(struct gl_context *ctx,
1359 struct gl_shader_program *prog,
1360 const unsigned num_data_slots)
1361 {
1362 /* On the outside chance that there were no uniforms, bail out.
1363 */
1364 if (prog->data->NumUniformStorage == 0)
1365 return;
1366
1367 unsigned int boolean_true = ctx->Const.UniformBooleanTrue;
1368
1369 union gl_constant_value *data;
1370 if (prog->data->UniformStorage == NULL) {
1371 prog->data->UniformStorage = rzalloc_array(prog->data,
1372 struct gl_uniform_storage,
1373 prog->data->NumUniformStorage);
1374 data = rzalloc_array(prog->data->UniformStorage,
1375 union gl_constant_value, num_data_slots);
1376 prog->data->UniformDataDefaults =
1377 rzalloc_array(prog->data->UniformStorage,
1378 union gl_constant_value, num_data_slots);
1379 } else {
1380 data = prog->data->UniformDataSlots;
1381 }
1382
1383 #ifndef NDEBUG
1384 union gl_constant_value *data_end = &data[num_data_slots];
1385 #endif
1386
1387 parcel_out_uniform_storage parcel(prog, prog->UniformHash,
1388 prog->data->UniformStorage, data,
1389 ctx->Const.UseSTD430AsDefaultPacking);
1390
1391 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1392 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
1393
1394 if (!shader)
1395 continue;
1396
1397 parcel.start_shader((gl_shader_stage)i);
1398
1399 foreach_in_list(ir_instruction, node, shader->ir) {
1400 ir_variable *const var = node->as_variable();
1401
1402 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1403 var->data.mode != ir_var_shader_storage))
1404 continue;
1405
1406 parcel.set_and_process(var);
1407 }
1408
1409 shader->Program->SamplersUsed = parcel.shader_samplers_used;
1410 shader->shadow_samplers = parcel.shader_shadow_samplers;
1411 shader->Program->sh.ShaderStorageBlocksWriteAccess =
1412 parcel.shader_storage_blocks_write_access;
1413
1414 if (parcel.num_bindless_samplers > 0) {
1415 shader->Program->sh.NumBindlessSamplers = parcel.num_bindless_samplers;
1416 shader->Program->sh.BindlessSamplers =
1417 rzalloc_array(shader->Program, gl_bindless_sampler,
1418 parcel.num_bindless_samplers);
1419 for (unsigned j = 0; j < parcel.num_bindless_samplers; j++) {
1420 shader->Program->sh.BindlessSamplers[j].target =
1421 parcel.bindless_targets[j];
1422 }
1423 }
1424
1425 if (parcel.num_bindless_images > 0) {
1426 shader->Program->sh.NumBindlessImages = parcel.num_bindless_images;
1427 shader->Program->sh.BindlessImages =
1428 rzalloc_array(shader->Program, gl_bindless_image,
1429 parcel.num_bindless_images);
1430 for (unsigned j = 0; j < parcel.num_bindless_images; j++) {
1431 shader->Program->sh.BindlessImages[j].access =
1432 parcel.bindless_access[j];
1433 }
1434 }
1435
1436 STATIC_ASSERT(ARRAY_SIZE(shader->Program->sh.SamplerTargets) ==
1437 ARRAY_SIZE(parcel.targets));
1438 for (unsigned j = 0; j < ARRAY_SIZE(parcel.targets); j++)
1439 shader->Program->sh.SamplerTargets[j] = parcel.targets[j];
1440 }
1441
1442 #ifndef NDEBUG
1443 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1444 assert(prog->data->UniformStorage[i].storage != NULL ||
1445 prog->data->UniformStorage[i].builtin ||
1446 prog->data->UniformStorage[i].is_shader_storage ||
1447 prog->data->UniformStorage[i].block_index != -1);
1448 }
1449
1450 assert(parcel.values == data_end);
1451 #endif
1452
1453 link_setup_uniform_remap_tables(ctx, prog);
1454
1455 /* Set shader cache fields */
1456 prog->data->NumUniformDataSlots = num_data_slots;
1457 prog->data->UniformDataSlots = data;
1458
1459 link_set_uniform_initializers(prog, boolean_true);
1460 }
1461
1462 void
1463 link_assign_uniform_locations(struct gl_shader_program *prog,
1464 struct gl_context *ctx)
1465 {
1466 ralloc_free(prog->data->UniformStorage);
1467 prog->data->UniformStorage = NULL;
1468 prog->data->NumUniformStorage = 0;
1469
1470 if (prog->UniformHash != NULL) {
1471 prog->UniformHash->clear();
1472 } else {
1473 prog->UniformHash = new string_to_uint_map;
1474 }
1475
1476 /* First pass: Count the uniform resources used by the user-defined
1477 * uniforms. While this happens, each active uniform will have an index
1478 * assigned to it.
1479 *
1480 * Note: this is *NOT* the index that is returned to the application by
1481 * glGetUniformLocation.
1482 */
1483 struct string_to_uint_map *hiddenUniforms = new string_to_uint_map;
1484 count_uniform_size uniform_size(prog->UniformHash, hiddenUniforms,
1485 ctx->Const.UseSTD430AsDefaultPacking);
1486 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1487 struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1488
1489 if (sh == NULL)
1490 continue;
1491
1492 link_update_uniform_buffer_variables(sh, i);
1493
1494 /* Reset various per-shader target counts.
1495 */
1496 uniform_size.start_shader();
1497
1498 foreach_in_list(ir_instruction, node, sh->ir) {
1499 ir_variable *const var = node->as_variable();
1500
1501 if ((var == NULL) || (var->data.mode != ir_var_uniform &&
1502 var->data.mode != ir_var_shader_storage))
1503 continue;
1504
1505 uniform_size.process(var);
1506 }
1507
1508 if (uniform_size.num_shader_samplers >
1509 ctx->Const.Program[i].MaxTextureImageUnits) {
1510 linker_error(prog, "Too many %s shader texture samplers\n",
1511 _mesa_shader_stage_to_string(i));
1512 continue;
1513 }
1514
1515 if (uniform_size.num_shader_images >
1516 ctx->Const.Program[i].MaxImageUniforms) {
1517 linker_error(prog, "Too many %s shader image uniforms (%u > %u)\n",
1518 _mesa_shader_stage_to_string(i),
1519 sh->Program->info.num_images,
1520 ctx->Const.Program[i].MaxImageUniforms);
1521 continue;
1522 }
1523
1524 sh->Program->info.num_textures = uniform_size.num_shader_samplers;
1525 sh->Program->info.num_images = uniform_size.num_shader_images;
1526 sh->num_uniform_components = uniform_size.num_shader_uniform_components;
1527 sh->num_combined_uniform_components = sh->num_uniform_components;
1528
1529 for (unsigned i = 0; i < sh->Program->info.num_ubos; i++) {
1530 sh->num_combined_uniform_components +=
1531 sh->Program->sh.UniformBlocks[i]->UniformBufferSize / 4;
1532 }
1533 }
1534
1535 if (prog->data->LinkStatus == LINKING_FAILURE) {
1536 delete hiddenUniforms;
1537 return;
1538 }
1539
1540 prog->data->NumUniformStorage = uniform_size.num_active_uniforms;
1541 prog->data->NumHiddenUniforms = uniform_size.num_hidden_uniforms;
1542
1543 /* assign hidden uniforms a slot id */
1544 hiddenUniforms->iterate(assign_hidden_uniform_slot_id, &uniform_size);
1545 delete hiddenUniforms;
1546
1547 link_assign_uniform_storage(ctx, prog, uniform_size.num_values);
1548 }