spirv: Export vtn_storage_class_to_mode()
[mesa.git] / src / compiler / spirv / vtn_variables.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
32
33 static struct vtn_access_chain *
34 vtn_access_chain_create(struct vtn_builder *b, unsigned length)
35 {
36 struct vtn_access_chain *chain;
37
38 /* Subtract 1 from the length since there's already one built in */
39 size_t size = sizeof(*chain) +
40 (MAX2(length, 1) - 1) * sizeof(chain->link[0]);
41 chain = rzalloc_size(b, size);
42 chain->length = length;
43
44 return chain;
45 }
46
47 bool
48 vtn_pointer_uses_ssa_offset(struct vtn_builder *b,
49 struct vtn_pointer *ptr)
50 {
51 return ((ptr->mode == vtn_variable_mode_ubo ||
52 ptr->mode == vtn_variable_mode_ssbo) &&
53 b->options->lower_ubo_ssbo_access_to_offsets) ||
54 ptr->mode == vtn_variable_mode_push_constant ||
55 (ptr->mode == vtn_variable_mode_workgroup &&
56 b->options->lower_workgroup_access_to_offsets);
57 }
58
59 static bool
60 vtn_pointer_is_external_block(struct vtn_builder *b,
61 struct vtn_pointer *ptr)
62 {
63 return ptr->mode == vtn_variable_mode_ssbo ||
64 ptr->mode == vtn_variable_mode_ubo ||
65 ptr->mode == vtn_variable_mode_phys_ssbo ||
66 ptr->mode == vtn_variable_mode_push_constant ||
67 (ptr->mode == vtn_variable_mode_workgroup &&
68 b->options->lower_workgroup_access_to_offsets);
69 }
70
71 static nir_ssa_def *
72 vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
73 unsigned stride, unsigned bit_size)
74 {
75 vtn_assert(stride > 0);
76 if (link.mode == vtn_access_mode_literal) {
77 return nir_imm_intN_t(&b->nb, link.id * stride, bit_size);
78 } else {
79 nir_ssa_def *ssa = vtn_ssa_value(b, link.id)->def;
80 if (ssa->bit_size != bit_size)
81 ssa = nir_i2i(&b->nb, ssa, bit_size);
82 return nir_imul_imm(&b->nb, ssa, stride);
83 }
84 }
85
86 static VkDescriptorType
87 vk_desc_type_for_mode(struct vtn_builder *b, enum vtn_variable_mode mode)
88 {
89 switch (mode) {
90 case vtn_variable_mode_ubo:
91 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
92 case vtn_variable_mode_ssbo:
93 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
94 default:
95 vtn_fail("Invalid mode for vulkan_resource_index");
96 }
97 }
98
99 static const struct glsl_type *
100 vtn_ptr_type_for_mode(struct vtn_builder *b, enum vtn_variable_mode mode)
101 {
102 nir_address_format addr_format;
103 switch (mode) {
104 case vtn_variable_mode_ubo:
105 addr_format = b->options->ubo_addr_format;
106 break;
107 case vtn_variable_mode_ssbo:
108 addr_format = b->options->ssbo_addr_format;
109 break;
110 default:
111 vtn_fail("Invalid mode for vulkan_resource_index");
112 }
113 return nir_address_format_to_glsl_type(addr_format);
114 }
115
116 static nir_ssa_def *
117 vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
118 nir_ssa_def *desc_array_index)
119 {
120 if (!desc_array_index) {
121 vtn_assert(glsl_type_is_struct_or_ifc(var->type->type));
122 desc_array_index = nir_imm_int(&b->nb, 0);
123 }
124
125 nir_intrinsic_instr *instr =
126 nir_intrinsic_instr_create(b->nb.shader,
127 nir_intrinsic_vulkan_resource_index);
128 instr->src[0] = nir_src_for_ssa(desc_array_index);
129 nir_intrinsic_set_desc_set(instr, var->descriptor_set);
130 nir_intrinsic_set_binding(instr, var->binding);
131 nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, var->mode));
132
133 const struct glsl_type *index_type =
134 b->options->lower_ubo_ssbo_access_to_offsets ?
135 glsl_uint_type() : vtn_ptr_type_for_mode(b, var->mode);
136
137 instr->num_components = glsl_get_vector_elements(index_type);
138 nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
139 glsl_get_bit_size(index_type), NULL);
140 nir_builder_instr_insert(&b->nb, &instr->instr);
141
142 return &instr->dest.ssa;
143 }
144
145 static nir_ssa_def *
146 vtn_resource_reindex(struct vtn_builder *b, enum vtn_variable_mode mode,
147 nir_ssa_def *base_index, nir_ssa_def *offset_index)
148 {
149 nir_intrinsic_instr *instr =
150 nir_intrinsic_instr_create(b->nb.shader,
151 nir_intrinsic_vulkan_resource_reindex);
152 instr->src[0] = nir_src_for_ssa(base_index);
153 instr->src[1] = nir_src_for_ssa(offset_index);
154 nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, mode));
155
156 const struct glsl_type *index_type =
157 b->options->lower_ubo_ssbo_access_to_offsets ?
158 glsl_uint_type() : vtn_ptr_type_for_mode(b, mode);
159
160 instr->num_components = glsl_get_vector_elements(index_type);
161 nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
162 glsl_get_bit_size(index_type), NULL);
163 nir_builder_instr_insert(&b->nb, &instr->instr);
164
165 return &instr->dest.ssa;
166 }
167
168 static nir_ssa_def *
169 vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode,
170 nir_ssa_def *desc_index)
171 {
172 nir_intrinsic_instr *desc_load =
173 nir_intrinsic_instr_create(b->nb.shader,
174 nir_intrinsic_load_vulkan_descriptor);
175 desc_load->src[0] = nir_src_for_ssa(desc_index);
176 nir_intrinsic_set_desc_type(desc_load, vk_desc_type_for_mode(b, mode));
177
178 const struct glsl_type *ptr_type = vtn_ptr_type_for_mode(b, mode);
179
180 desc_load->num_components = glsl_get_vector_elements(ptr_type);
181 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
182 desc_load->num_components,
183 glsl_get_bit_size(ptr_type), NULL);
184 nir_builder_instr_insert(&b->nb, &desc_load->instr);
185
186 return &desc_load->dest.ssa;
187 }
188
189 /* Dereference the given base pointer by the access chain */
190 static struct vtn_pointer *
191 vtn_nir_deref_pointer_dereference(struct vtn_builder *b,
192 struct vtn_pointer *base,
193 struct vtn_access_chain *deref_chain)
194 {
195 struct vtn_type *type = base->type;
196 enum gl_access_qualifier access = base->access;
197 unsigned idx = 0;
198
199 nir_deref_instr *tail;
200 if (base->deref) {
201 tail = base->deref;
202 } else if (vtn_pointer_is_external_block(b, base)) {
203 nir_ssa_def *block_index = base->block_index;
204
205 /* We dereferencing an external block pointer. Correctness of this
206 * operation relies on one particular line in the SPIR-V spec, section
207 * entitled "Validation Rules for Shader Capabilities":
208 *
209 * "Block and BufferBlock decorations cannot decorate a structure
210 * type that is nested at any level inside another structure type
211 * decorated with Block or BufferBlock."
212 *
213 * This means that we can detect the point where we cross over from
214 * descriptor indexing to buffer indexing by looking for the block
215 * decorated struct type. Anything before the block decorated struct
216 * type is a descriptor indexing operation and anything after the block
217 * decorated struct is a buffer offset operation.
218 */
219
220 /* Figure out the descriptor array index if any
221 *
222 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
223 * to forget the Block or BufferBlock decoration from time to time.
224 * It's more robust if we check for both !block_index and for the type
225 * to contain a block. This way there's a decent chance that arrays of
226 * UBOs/SSBOs will work correctly even if variable pointers are
227 * completley toast.
228 */
229 nir_ssa_def *desc_arr_idx = NULL;
230 if (!block_index || vtn_type_contains_block(b, type)) {
231 /* If our type contains a block, then we're still outside the block
232 * and we need to process enough levels of dereferences to get inside
233 * of it.
234 */
235 if (deref_chain->ptr_as_array) {
236 unsigned aoa_size = glsl_get_aoa_size(type->type);
237 desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[idx],
238 MAX2(aoa_size, 1), 32);
239 idx++;
240 }
241
242 for (; idx < deref_chain->length; idx++) {
243 if (type->base_type != vtn_base_type_array) {
244 vtn_assert(type->base_type == vtn_base_type_struct);
245 break;
246 }
247
248 unsigned aoa_size = glsl_get_aoa_size(type->array_element->type);
249 nir_ssa_def *arr_offset =
250 vtn_access_link_as_ssa(b, deref_chain->link[idx],
251 MAX2(aoa_size, 1), 32);
252 if (desc_arr_idx)
253 desc_arr_idx = nir_iadd(&b->nb, desc_arr_idx, arr_offset);
254 else
255 desc_arr_idx = arr_offset;
256
257 type = type->array_element;
258 access |= type->access;
259 }
260 }
261
262 if (!block_index) {
263 vtn_assert(base->var && base->type);
264 block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
265 } else if (desc_arr_idx) {
266 block_index = vtn_resource_reindex(b, base->mode,
267 block_index, desc_arr_idx);
268 }
269
270 if (idx == deref_chain->length) {
271 /* The entire deref was consumed in finding the block index. Return
272 * a pointer which just has a block index and a later access chain
273 * will dereference deeper.
274 */
275 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
276 ptr->mode = base->mode;
277 ptr->type = type;
278 ptr->block_index = block_index;
279 ptr->access = access;
280 return ptr;
281 }
282
283 /* If we got here, there's more access chain to handle and we have the
284 * final block index. Insert a descriptor load and cast to a deref to
285 * start the deref chain.
286 */
287 nir_ssa_def *desc = vtn_descriptor_load(b, base->mode, block_index);
288
289 assert(base->mode == vtn_variable_mode_ssbo ||
290 base->mode == vtn_variable_mode_ubo);
291 nir_variable_mode nir_mode =
292 base->mode == vtn_variable_mode_ssbo ? nir_var_mem_ssbo : nir_var_mem_ubo;
293
294 tail = nir_build_deref_cast(&b->nb, desc, nir_mode, type->type,
295 base->ptr_type->stride);
296 } else {
297 assert(base->var && base->var->var);
298 tail = nir_build_deref_var(&b->nb, base->var->var);
299 if (base->ptr_type && base->ptr_type->type) {
300 tail->dest.ssa.num_components =
301 glsl_get_vector_elements(base->ptr_type->type);
302 tail->dest.ssa.bit_size = glsl_get_bit_size(base->ptr_type->type);
303 }
304 }
305
306 if (idx == 0 && deref_chain->ptr_as_array) {
307 /* We start with a deref cast to get the stride. Hopefully, we'll be
308 * able to delete that cast eventually.
309 */
310 tail = nir_build_deref_cast(&b->nb, &tail->dest.ssa, tail->mode,
311 tail->type, base->ptr_type->stride);
312
313 nir_ssa_def *index = vtn_access_link_as_ssa(b, deref_chain->link[0], 1,
314 tail->dest.ssa.bit_size);
315 tail = nir_build_deref_ptr_as_array(&b->nb, tail, index);
316 idx++;
317 }
318
319 for (; idx < deref_chain->length; idx++) {
320 if (glsl_type_is_struct_or_ifc(type->type)) {
321 vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
322 unsigned field = deref_chain->link[idx].id;
323 tail = nir_build_deref_struct(&b->nb, tail, field);
324 type = type->members[field];
325 } else {
326 nir_ssa_def *arr_index =
327 vtn_access_link_as_ssa(b, deref_chain->link[idx], 1,
328 tail->dest.ssa.bit_size);
329 tail = nir_build_deref_array(&b->nb, tail, arr_index);
330 type = type->array_element;
331 }
332
333 access |= type->access;
334 }
335
336 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
337 ptr->mode = base->mode;
338 ptr->type = type;
339 ptr->var = base->var;
340 ptr->deref = tail;
341 ptr->access = access;
342
343 return ptr;
344 }
345
346 static struct vtn_pointer *
347 vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
348 struct vtn_pointer *base,
349 struct vtn_access_chain *deref_chain)
350 {
351 nir_ssa_def *block_index = base->block_index;
352 nir_ssa_def *offset = base->offset;
353 struct vtn_type *type = base->type;
354 enum gl_access_qualifier access = base->access;
355
356 unsigned idx = 0;
357 if (base->mode == vtn_variable_mode_ubo ||
358 base->mode == vtn_variable_mode_ssbo) {
359 if (!block_index) {
360 vtn_assert(base->var && base->type);
361 nir_ssa_def *desc_arr_idx;
362 if (glsl_type_is_array(type->type)) {
363 if (deref_chain->length >= 1) {
364 desc_arr_idx =
365 vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
366 idx++;
367 /* This consumes a level of type */
368 type = type->array_element;
369 access |= type->access;
370 } else {
371 /* This is annoying. We've been asked for a pointer to the
372 * array of UBOs/SSBOs and not a specifc buffer. Return a
373 * pointer with a descriptor index of 0 and we'll have to do
374 * a reindex later to adjust it to the right thing.
375 */
376 desc_arr_idx = nir_imm_int(&b->nb, 0);
377 }
378 } else if (deref_chain->ptr_as_array) {
379 /* You can't have a zero-length OpPtrAccessChain */
380 vtn_assert(deref_chain->length >= 1);
381 desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
382 } else {
383 /* We have a regular non-array SSBO. */
384 desc_arr_idx = NULL;
385 }
386 block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
387 } else if (deref_chain->ptr_as_array &&
388 type->base_type == vtn_base_type_struct && type->block) {
389 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
390 * decorated block. This is an interesting corner in the SPIR-V
391 * spec. One interpretation would be that they client is clearly
392 * trying to treat that block as if it's an implicit array of blocks
393 * repeated in the buffer. However, the SPIR-V spec for the
394 * OpPtrAccessChain says:
395 *
396 * "Base is treated as the address of the first element of an
397 * array, and the Element element’s address is computed to be the
398 * base for the Indexes, as per OpAccessChain."
399 *
400 * Taken literally, that would mean that your struct type is supposed
401 * to be treated as an array of such a struct and, since it's
402 * decorated block, that means an array of blocks which corresponds
403 * to an array descriptor. Therefore, we need to do a reindex
404 * operation to add the index from the first link in the access chain
405 * to the index we recieved.
406 *
407 * The downside to this interpretation (there always is one) is that
408 * this might be somewhat surprising behavior to apps if they expect
409 * the implicit array behavior described above.
410 */
411 vtn_assert(deref_chain->length >= 1);
412 nir_ssa_def *offset_index =
413 vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
414 idx++;
415
416 block_index = vtn_resource_reindex(b, base->mode,
417 block_index, offset_index);
418 }
419 }
420
421 if (!offset) {
422 if (base->mode == vtn_variable_mode_workgroup) {
423 /* SLM doesn't need nor have a block index */
424 vtn_assert(!block_index);
425
426 /* We need the variable for the base offset */
427 vtn_assert(base->var);
428
429 /* We need ptr_type for size and alignment */
430 vtn_assert(base->ptr_type);
431
432 /* Assign location on first use so that we don't end up bloating SLM
433 * address space for variables which are never statically used.
434 */
435 if (base->var->shared_location < 0) {
436 vtn_assert(base->ptr_type->length > 0 && base->ptr_type->align > 0);
437 b->shader->num_shared = vtn_align_u32(b->shader->num_shared,
438 base->ptr_type->align);
439 base->var->shared_location = b->shader->num_shared;
440 b->shader->num_shared += base->ptr_type->length;
441 }
442
443 offset = nir_imm_int(&b->nb, base->var->shared_location);
444 } else if (base->mode == vtn_variable_mode_push_constant) {
445 /* Push constants neither need nor have a block index */
446 vtn_assert(!block_index);
447
448 /* Start off with at the start of the push constant block. */
449 offset = nir_imm_int(&b->nb, 0);
450 } else {
451 /* The code above should have ensured a block_index when needed. */
452 vtn_assert(block_index);
453
454 /* Start off with at the start of the buffer. */
455 offset = nir_imm_int(&b->nb, 0);
456 }
457 }
458
459 if (deref_chain->ptr_as_array && idx == 0) {
460 /* We need ptr_type for the stride */
461 vtn_assert(base->ptr_type);
462
463 /* We need at least one element in the chain */
464 vtn_assert(deref_chain->length >= 1);
465
466 nir_ssa_def *elem_offset =
467 vtn_access_link_as_ssa(b, deref_chain->link[idx],
468 base->ptr_type->stride, offset->bit_size);
469 offset = nir_iadd(&b->nb, offset, elem_offset);
470 idx++;
471 }
472
473 for (; idx < deref_chain->length; idx++) {
474 switch (glsl_get_base_type(type->type)) {
475 case GLSL_TYPE_UINT:
476 case GLSL_TYPE_INT:
477 case GLSL_TYPE_UINT16:
478 case GLSL_TYPE_INT16:
479 case GLSL_TYPE_UINT8:
480 case GLSL_TYPE_INT8:
481 case GLSL_TYPE_UINT64:
482 case GLSL_TYPE_INT64:
483 case GLSL_TYPE_FLOAT:
484 case GLSL_TYPE_FLOAT16:
485 case GLSL_TYPE_DOUBLE:
486 case GLSL_TYPE_BOOL:
487 case GLSL_TYPE_ARRAY: {
488 nir_ssa_def *elem_offset =
489 vtn_access_link_as_ssa(b, deref_chain->link[idx],
490 type->stride, offset->bit_size);
491 offset = nir_iadd(&b->nb, offset, elem_offset);
492 type = type->array_element;
493 access |= type->access;
494 break;
495 }
496
497 case GLSL_TYPE_INTERFACE:
498 case GLSL_TYPE_STRUCT: {
499 vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
500 unsigned member = deref_chain->link[idx].id;
501 offset = nir_iadd_imm(&b->nb, offset, type->offsets[member]);
502 type = type->members[member];
503 access |= type->access;
504 break;
505 }
506
507 default:
508 vtn_fail("Invalid type for deref");
509 }
510 }
511
512 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
513 ptr->mode = base->mode;
514 ptr->type = type;
515 ptr->block_index = block_index;
516 ptr->offset = offset;
517 ptr->access = access;
518
519 return ptr;
520 }
521
522 /* Dereference the given base pointer by the access chain */
523 static struct vtn_pointer *
524 vtn_pointer_dereference(struct vtn_builder *b,
525 struct vtn_pointer *base,
526 struct vtn_access_chain *deref_chain)
527 {
528 if (vtn_pointer_uses_ssa_offset(b, base)) {
529 return vtn_ssa_offset_pointer_dereference(b, base, deref_chain);
530 } else {
531 return vtn_nir_deref_pointer_dereference(b, base, deref_chain);
532 }
533 }
534
535 struct vtn_pointer *
536 vtn_pointer_for_variable(struct vtn_builder *b,
537 struct vtn_variable *var, struct vtn_type *ptr_type)
538 {
539 struct vtn_pointer *pointer = rzalloc(b, struct vtn_pointer);
540
541 pointer->mode = var->mode;
542 pointer->type = var->type;
543 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
544 vtn_assert(ptr_type->deref->type == var->type->type);
545 pointer->ptr_type = ptr_type;
546 pointer->var = var;
547 pointer->access = var->access | var->type->access;
548
549 return pointer;
550 }
551
552 /* Returns an atomic_uint type based on the original uint type. The returned
553 * type will be equivalent to the original one but will have an atomic_uint
554 * type as leaf instead of an uint.
555 *
556 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
557 */
558 static const struct glsl_type *
559 repair_atomic_type(const struct glsl_type *type)
560 {
561 assert(glsl_get_base_type(glsl_without_array(type)) == GLSL_TYPE_UINT);
562 assert(glsl_type_is_scalar(glsl_without_array(type)));
563
564 if (glsl_type_is_array(type)) {
565 const struct glsl_type *atomic =
566 repair_atomic_type(glsl_get_array_element(type));
567
568 return glsl_array_type(atomic, glsl_get_length(type),
569 glsl_get_explicit_stride(type));
570 } else {
571 return glsl_atomic_uint_type();
572 }
573 }
574
575 nir_deref_instr *
576 vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
577 {
578 if (b->wa_glslang_179) {
579 /* Do on-the-fly copy propagation for samplers. */
580 if (ptr->var && ptr->var->copy_prop_sampler)
581 return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
582 }
583
584 vtn_assert(!vtn_pointer_uses_ssa_offset(b, ptr));
585 if (!ptr->deref) {
586 struct vtn_access_chain chain = {
587 .length = 0,
588 };
589 ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
590 }
591
592 return ptr->deref;
593 }
594
595 static void
596 _vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
597 struct vtn_ssa_value *inout,
598 enum gl_access_qualifier access)
599 {
600 if (glsl_type_is_vector_or_scalar(deref->type)) {
601 if (load) {
602 inout->def = nir_load_deref_with_access(&b->nb, deref, access);
603 } else {
604 nir_store_deref_with_access(&b->nb, deref, inout->def, ~0, access);
605 }
606 } else if (glsl_type_is_array(deref->type) ||
607 glsl_type_is_matrix(deref->type)) {
608 unsigned elems = glsl_get_length(deref->type);
609 for (unsigned i = 0; i < elems; i++) {
610 nir_deref_instr *child =
611 nir_build_deref_array_imm(&b->nb, deref, i);
612 _vtn_local_load_store(b, load, child, inout->elems[i], access);
613 }
614 } else {
615 vtn_assert(glsl_type_is_struct_or_ifc(deref->type));
616 unsigned elems = glsl_get_length(deref->type);
617 for (unsigned i = 0; i < elems; i++) {
618 nir_deref_instr *child = nir_build_deref_struct(&b->nb, deref, i);
619 _vtn_local_load_store(b, load, child, inout->elems[i], access);
620 }
621 }
622 }
623
624 nir_deref_instr *
625 vtn_nir_deref(struct vtn_builder *b, uint32_t id)
626 {
627 struct vtn_pointer *ptr = vtn_value(b, id, vtn_value_type_pointer)->pointer;
628 return vtn_pointer_to_deref(b, ptr);
629 }
630
631 /*
632 * Gets the NIR-level deref tail, which may have as a child an array deref
633 * selecting which component due to OpAccessChain supporting per-component
634 * indexing in SPIR-V.
635 */
636 static nir_deref_instr *
637 get_deref_tail(nir_deref_instr *deref)
638 {
639 if (deref->deref_type != nir_deref_type_array)
640 return deref;
641
642 nir_deref_instr *parent =
643 nir_instr_as_deref(deref->parent.ssa->parent_instr);
644
645 if (glsl_type_is_vector(parent->type))
646 return parent;
647 else
648 return deref;
649 }
650
651 struct vtn_ssa_value *
652 vtn_local_load(struct vtn_builder *b, nir_deref_instr *src,
653 enum gl_access_qualifier access)
654 {
655 nir_deref_instr *src_tail = get_deref_tail(src);
656 struct vtn_ssa_value *val = vtn_create_ssa_value(b, src_tail->type);
657 _vtn_local_load_store(b, true, src_tail, val, access);
658
659 if (src_tail != src) {
660 val->type = src->type;
661 if (nir_src_is_const(src->arr.index))
662 val->def = vtn_vector_extract(b, val->def,
663 nir_src_as_uint(src->arr.index));
664 else
665 val->def = vtn_vector_extract_dynamic(b, val->def, src->arr.index.ssa);
666 }
667
668 return val;
669 }
670
671 void
672 vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
673 nir_deref_instr *dest, enum gl_access_qualifier access)
674 {
675 nir_deref_instr *dest_tail = get_deref_tail(dest);
676
677 if (dest_tail != dest) {
678 struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type);
679 _vtn_local_load_store(b, true, dest_tail, val, access);
680
681 if (nir_src_is_const(dest->arr.index))
682 val->def = vtn_vector_insert(b, val->def, src->def,
683 nir_src_as_uint(dest->arr.index));
684 else
685 val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
686 dest->arr.index.ssa);
687 _vtn_local_load_store(b, false, dest_tail, val, access);
688 } else {
689 _vtn_local_load_store(b, false, dest_tail, src, access);
690 }
691 }
692
693 nir_ssa_def *
694 vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
695 nir_ssa_def **index_out)
696 {
697 assert(vtn_pointer_uses_ssa_offset(b, ptr));
698 if (!ptr->offset) {
699 struct vtn_access_chain chain = {
700 .length = 0,
701 };
702 ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
703 }
704 *index_out = ptr->block_index;
705 return ptr->offset;
706 }
707
708 /* Tries to compute the size of an interface block based on the strides and
709 * offsets that are provided to us in the SPIR-V source.
710 */
711 static unsigned
712 vtn_type_block_size(struct vtn_builder *b, struct vtn_type *type)
713 {
714 enum glsl_base_type base_type = glsl_get_base_type(type->type);
715 switch (base_type) {
716 case GLSL_TYPE_UINT:
717 case GLSL_TYPE_INT:
718 case GLSL_TYPE_UINT16:
719 case GLSL_TYPE_INT16:
720 case GLSL_TYPE_UINT8:
721 case GLSL_TYPE_INT8:
722 case GLSL_TYPE_UINT64:
723 case GLSL_TYPE_INT64:
724 case GLSL_TYPE_FLOAT:
725 case GLSL_TYPE_FLOAT16:
726 case GLSL_TYPE_BOOL:
727 case GLSL_TYPE_DOUBLE: {
728 unsigned cols = type->row_major ? glsl_get_vector_elements(type->type) :
729 glsl_get_matrix_columns(type->type);
730 if (cols > 1) {
731 vtn_assert(type->stride > 0);
732 return type->stride * cols;
733 } else {
734 unsigned type_size = glsl_get_bit_size(type->type) / 8;
735 return glsl_get_vector_elements(type->type) * type_size;
736 }
737 }
738
739 case GLSL_TYPE_STRUCT:
740 case GLSL_TYPE_INTERFACE: {
741 unsigned size = 0;
742 unsigned num_fields = glsl_get_length(type->type);
743 for (unsigned f = 0; f < num_fields; f++) {
744 unsigned field_end = type->offsets[f] +
745 vtn_type_block_size(b, type->members[f]);
746 size = MAX2(size, field_end);
747 }
748 return size;
749 }
750
751 case GLSL_TYPE_ARRAY:
752 vtn_assert(type->stride > 0);
753 vtn_assert(glsl_get_length(type->type) > 0);
754 return type->stride * glsl_get_length(type->type);
755
756 default:
757 vtn_fail("Invalid block type");
758 return 0;
759 }
760 }
761
762 static void
763 _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
764 nir_ssa_def *index, nir_ssa_def *offset,
765 unsigned access_offset, unsigned access_size,
766 struct vtn_ssa_value **inout, const struct glsl_type *type,
767 enum gl_access_qualifier access)
768 {
769 nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
770 instr->num_components = glsl_get_vector_elements(type);
771
772 /* Booleans usually shouldn't show up in external memory in SPIR-V.
773 * However, they do for certain older GLSLang versions and can for shared
774 * memory when we lower access chains internally.
775 */
776 const unsigned data_bit_size = glsl_type_is_boolean(type) ? 32 :
777 glsl_get_bit_size(type);
778
779 int src = 0;
780 if (!load) {
781 nir_intrinsic_set_write_mask(instr, (1 << instr->num_components) - 1);
782 instr->src[src++] = nir_src_for_ssa((*inout)->def);
783 }
784
785 if (op == nir_intrinsic_load_push_constant) {
786 nir_intrinsic_set_base(instr, access_offset);
787 nir_intrinsic_set_range(instr, access_size);
788 }
789
790 if (op == nir_intrinsic_load_ubo ||
791 op == nir_intrinsic_load_ssbo ||
792 op == nir_intrinsic_store_ssbo) {
793 nir_intrinsic_set_access(instr, access);
794 }
795
796 /* With extensions like relaxed_block_layout, we really can't guarantee
797 * much more than scalar alignment.
798 */
799 if (op != nir_intrinsic_load_push_constant)
800 nir_intrinsic_set_align(instr, data_bit_size / 8, 0);
801
802 if (index)
803 instr->src[src++] = nir_src_for_ssa(index);
804
805 if (op == nir_intrinsic_load_push_constant) {
806 /* We need to subtract the offset from where the intrinsic will load the
807 * data. */
808 instr->src[src++] =
809 nir_src_for_ssa(nir_isub(&b->nb, offset,
810 nir_imm_int(&b->nb, access_offset)));
811 } else {
812 instr->src[src++] = nir_src_for_ssa(offset);
813 }
814
815 if (load) {
816 nir_ssa_dest_init(&instr->instr, &instr->dest,
817 instr->num_components, data_bit_size, NULL);
818 (*inout)->def = &instr->dest.ssa;
819 }
820
821 nir_builder_instr_insert(&b->nb, &instr->instr);
822
823 if (load && glsl_get_base_type(type) == GLSL_TYPE_BOOL)
824 (*inout)->def = nir_ine(&b->nb, (*inout)->def, nir_imm_int(&b->nb, 0));
825 }
826
827 static void
828 _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
829 nir_ssa_def *index, nir_ssa_def *offset,
830 unsigned access_offset, unsigned access_size,
831 struct vtn_type *type, enum gl_access_qualifier access,
832 struct vtn_ssa_value **inout)
833 {
834 if (load && *inout == NULL)
835 *inout = vtn_create_ssa_value(b, type->type);
836
837 enum glsl_base_type base_type = glsl_get_base_type(type->type);
838 switch (base_type) {
839 case GLSL_TYPE_UINT:
840 case GLSL_TYPE_INT:
841 case GLSL_TYPE_UINT16:
842 case GLSL_TYPE_INT16:
843 case GLSL_TYPE_UINT8:
844 case GLSL_TYPE_INT8:
845 case GLSL_TYPE_UINT64:
846 case GLSL_TYPE_INT64:
847 case GLSL_TYPE_FLOAT:
848 case GLSL_TYPE_FLOAT16:
849 case GLSL_TYPE_DOUBLE:
850 case GLSL_TYPE_BOOL:
851 /* This is where things get interesting. At this point, we've hit
852 * a vector, a scalar, or a matrix.
853 */
854 if (glsl_type_is_matrix(type->type)) {
855 /* Loading the whole matrix */
856 struct vtn_ssa_value *transpose;
857 unsigned num_ops, vec_width, col_stride;
858 if (type->row_major) {
859 num_ops = glsl_get_vector_elements(type->type);
860 vec_width = glsl_get_matrix_columns(type->type);
861 col_stride = type->array_element->stride;
862 if (load) {
863 const struct glsl_type *transpose_type =
864 glsl_matrix_type(base_type, vec_width, num_ops);
865 *inout = vtn_create_ssa_value(b, transpose_type);
866 } else {
867 transpose = vtn_ssa_transpose(b, *inout);
868 inout = &transpose;
869 }
870 } else {
871 num_ops = glsl_get_matrix_columns(type->type);
872 vec_width = glsl_get_vector_elements(type->type);
873 col_stride = type->stride;
874 }
875
876 for (unsigned i = 0; i < num_ops; i++) {
877 nir_ssa_def *elem_offset =
878 nir_iadd_imm(&b->nb, offset, i * col_stride);
879 _vtn_load_store_tail(b, op, load, index, elem_offset,
880 access_offset, access_size,
881 &(*inout)->elems[i],
882 glsl_vector_type(base_type, vec_width),
883 type->access | access);
884 }
885
886 if (load && type->row_major)
887 *inout = vtn_ssa_transpose(b, *inout);
888 } else {
889 unsigned elems = glsl_get_vector_elements(type->type);
890 unsigned type_size = glsl_get_bit_size(type->type) / 8;
891 if (elems == 1 || type->stride == type_size) {
892 /* This is a tightly-packed normal scalar or vector load */
893 vtn_assert(glsl_type_is_vector_or_scalar(type->type));
894 _vtn_load_store_tail(b, op, load, index, offset,
895 access_offset, access_size,
896 inout, type->type,
897 type->access | access);
898 } else {
899 /* This is a strided load. We have to load N things separately.
900 * This is the single column of a row-major matrix case.
901 */
902 vtn_assert(type->stride > type_size);
903 vtn_assert(type->stride % type_size == 0);
904
905 nir_ssa_def *per_comp[4];
906 for (unsigned i = 0; i < elems; i++) {
907 nir_ssa_def *elem_offset =
908 nir_iadd_imm(&b->nb, offset, i * type->stride);
909 struct vtn_ssa_value *comp, temp_val;
910 if (!load) {
911 temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
912 temp_val.type = glsl_scalar_type(base_type);
913 }
914 comp = &temp_val;
915 _vtn_load_store_tail(b, op, load, index, elem_offset,
916 access_offset, access_size,
917 &comp, glsl_scalar_type(base_type),
918 type->access | access);
919 per_comp[i] = comp->def;
920 }
921
922 if (load) {
923 if (*inout == NULL)
924 *inout = vtn_create_ssa_value(b, type->type);
925 (*inout)->def = nir_vec(&b->nb, per_comp, elems);
926 }
927 }
928 }
929 return;
930
931 case GLSL_TYPE_ARRAY: {
932 unsigned elems = glsl_get_length(type->type);
933 for (unsigned i = 0; i < elems; i++) {
934 nir_ssa_def *elem_off =
935 nir_iadd_imm(&b->nb, offset, i * type->stride);
936 _vtn_block_load_store(b, op, load, index, elem_off,
937 access_offset, access_size,
938 type->array_element,
939 type->array_element->access | access,
940 &(*inout)->elems[i]);
941 }
942 return;
943 }
944
945 case GLSL_TYPE_INTERFACE:
946 case GLSL_TYPE_STRUCT: {
947 unsigned elems = glsl_get_length(type->type);
948 for (unsigned i = 0; i < elems; i++) {
949 nir_ssa_def *elem_off =
950 nir_iadd_imm(&b->nb, offset, type->offsets[i]);
951 _vtn_block_load_store(b, op, load, index, elem_off,
952 access_offset, access_size,
953 type->members[i],
954 type->members[i]->access | access,
955 &(*inout)->elems[i]);
956 }
957 return;
958 }
959
960 default:
961 vtn_fail("Invalid block member type");
962 }
963 }
964
965 static struct vtn_ssa_value *
966 vtn_block_load(struct vtn_builder *b, struct vtn_pointer *src)
967 {
968 nir_intrinsic_op op;
969 unsigned access_offset = 0, access_size = 0;
970 switch (src->mode) {
971 case vtn_variable_mode_ubo:
972 op = nir_intrinsic_load_ubo;
973 break;
974 case vtn_variable_mode_ssbo:
975 op = nir_intrinsic_load_ssbo;
976 break;
977 case vtn_variable_mode_push_constant:
978 op = nir_intrinsic_load_push_constant;
979 access_size = b->shader->num_uniforms;
980 break;
981 case vtn_variable_mode_workgroup:
982 op = nir_intrinsic_load_shared;
983 break;
984 default:
985 vtn_fail("Invalid block variable mode");
986 }
987
988 nir_ssa_def *offset, *index = NULL;
989 offset = vtn_pointer_to_offset(b, src, &index);
990
991 struct vtn_ssa_value *value = NULL;
992 _vtn_block_load_store(b, op, true, index, offset,
993 access_offset, access_size,
994 src->type, src->access, &value);
995 return value;
996 }
997
998 static void
999 vtn_block_store(struct vtn_builder *b, struct vtn_ssa_value *src,
1000 struct vtn_pointer *dst)
1001 {
1002 nir_intrinsic_op op;
1003 switch (dst->mode) {
1004 case vtn_variable_mode_ssbo:
1005 op = nir_intrinsic_store_ssbo;
1006 break;
1007 case vtn_variable_mode_workgroup:
1008 op = nir_intrinsic_store_shared;
1009 break;
1010 default:
1011 vtn_fail("Invalid block variable mode");
1012 }
1013
1014 nir_ssa_def *offset, *index = NULL;
1015 offset = vtn_pointer_to_offset(b, dst, &index);
1016
1017 _vtn_block_load_store(b, op, false, index, offset,
1018 0, 0, dst->type, dst->access, &src);
1019 }
1020
1021 static void
1022 _vtn_variable_load_store(struct vtn_builder *b, bool load,
1023 struct vtn_pointer *ptr,
1024 enum gl_access_qualifier access,
1025 struct vtn_ssa_value **inout)
1026 {
1027 enum glsl_base_type base_type = glsl_get_base_type(ptr->type->type);
1028 switch (base_type) {
1029 case GLSL_TYPE_UINT:
1030 case GLSL_TYPE_INT:
1031 case GLSL_TYPE_UINT16:
1032 case GLSL_TYPE_INT16:
1033 case GLSL_TYPE_UINT8:
1034 case GLSL_TYPE_INT8:
1035 case GLSL_TYPE_UINT64:
1036 case GLSL_TYPE_INT64:
1037 case GLSL_TYPE_FLOAT:
1038 case GLSL_TYPE_FLOAT16:
1039 case GLSL_TYPE_BOOL:
1040 case GLSL_TYPE_DOUBLE:
1041 if (glsl_type_is_vector_or_scalar(ptr->type->type)) {
1042 /* We hit a vector or scalar; go ahead and emit the load[s] */
1043 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
1044 if (vtn_pointer_is_external_block(b, ptr)) {
1045 /* If it's external, we call nir_load/store_deref directly. The
1046 * vtn_local_load/store helpers are too clever and do magic to
1047 * avoid array derefs of vectors. That magic is both less
1048 * efficient than the direct load/store and, in the case of
1049 * stores, is broken because it creates a race condition if two
1050 * threads are writing to different components of the same vector
1051 * due to the load+insert+store it uses to emulate the array
1052 * deref.
1053 */
1054 if (load) {
1055 *inout = vtn_create_ssa_value(b, ptr->type->type);
1056 (*inout)->def = nir_load_deref_with_access(&b->nb, deref,
1057 ptr->type->access | access);
1058 } else {
1059 nir_store_deref_with_access(&b->nb, deref, (*inout)->def, ~0,
1060 ptr->type->access | access);
1061 }
1062 } else {
1063 if (load) {
1064 *inout = vtn_local_load(b, deref, ptr->type->access | access);
1065 } else {
1066 vtn_local_store(b, *inout, deref, ptr->type->access | access);
1067 }
1068 }
1069 return;
1070 }
1071 /* Fall through */
1072
1073 case GLSL_TYPE_INTERFACE:
1074 case GLSL_TYPE_ARRAY:
1075 case GLSL_TYPE_STRUCT: {
1076 unsigned elems = glsl_get_length(ptr->type->type);
1077 if (load) {
1078 vtn_assert(*inout == NULL);
1079 *inout = rzalloc(b, struct vtn_ssa_value);
1080 (*inout)->type = ptr->type->type;
1081 (*inout)->elems = rzalloc_array(b, struct vtn_ssa_value *, elems);
1082 }
1083
1084 struct vtn_access_chain chain = {
1085 .length = 1,
1086 .link = {
1087 { .mode = vtn_access_mode_literal, },
1088 }
1089 };
1090 for (unsigned i = 0; i < elems; i++) {
1091 chain.link[0].id = i;
1092 struct vtn_pointer *elem = vtn_pointer_dereference(b, ptr, &chain);
1093 _vtn_variable_load_store(b, load, elem, ptr->type->access | access,
1094 &(*inout)->elems[i]);
1095 }
1096 return;
1097 }
1098
1099 default:
1100 vtn_fail("Invalid access chain type");
1101 }
1102 }
1103
1104 struct vtn_ssa_value *
1105 vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src)
1106 {
1107 if (vtn_pointer_uses_ssa_offset(b, src)) {
1108 return vtn_block_load(b, src);
1109 } else {
1110 struct vtn_ssa_value *val = NULL;
1111 _vtn_variable_load_store(b, true, src, src->access, &val);
1112 return val;
1113 }
1114 }
1115
1116 void
1117 vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
1118 struct vtn_pointer *dest)
1119 {
1120 if (vtn_pointer_uses_ssa_offset(b, dest)) {
1121 vtn_assert(dest->mode == vtn_variable_mode_ssbo ||
1122 dest->mode == vtn_variable_mode_workgroup);
1123 vtn_block_store(b, src, dest);
1124 } else {
1125 _vtn_variable_load_store(b, false, dest, dest->access, &src);
1126 }
1127 }
1128
1129 static void
1130 _vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
1131 struct vtn_pointer *src)
1132 {
1133 vtn_assert(src->type->type == dest->type->type);
1134 enum glsl_base_type base_type = glsl_get_base_type(src->type->type);
1135 switch (base_type) {
1136 case GLSL_TYPE_UINT:
1137 case GLSL_TYPE_INT:
1138 case GLSL_TYPE_UINT16:
1139 case GLSL_TYPE_INT16:
1140 case GLSL_TYPE_UINT8:
1141 case GLSL_TYPE_INT8:
1142 case GLSL_TYPE_UINT64:
1143 case GLSL_TYPE_INT64:
1144 case GLSL_TYPE_FLOAT:
1145 case GLSL_TYPE_FLOAT16:
1146 case GLSL_TYPE_DOUBLE:
1147 case GLSL_TYPE_BOOL:
1148 /* At this point, we have a scalar, vector, or matrix so we know that
1149 * there cannot be any structure splitting still in the way. By
1150 * stopping at the matrix level rather than the vector level, we
1151 * ensure that matrices get loaded in the optimal way even if they
1152 * are storred row-major in a UBO.
1153 */
1154 vtn_variable_store(b, vtn_variable_load(b, src), dest);
1155 return;
1156
1157 case GLSL_TYPE_INTERFACE:
1158 case GLSL_TYPE_ARRAY:
1159 case GLSL_TYPE_STRUCT: {
1160 struct vtn_access_chain chain = {
1161 .length = 1,
1162 .link = {
1163 { .mode = vtn_access_mode_literal, },
1164 }
1165 };
1166 unsigned elems = glsl_get_length(src->type->type);
1167 for (unsigned i = 0; i < elems; i++) {
1168 chain.link[0].id = i;
1169 struct vtn_pointer *src_elem =
1170 vtn_pointer_dereference(b, src, &chain);
1171 struct vtn_pointer *dest_elem =
1172 vtn_pointer_dereference(b, dest, &chain);
1173
1174 _vtn_variable_copy(b, dest_elem, src_elem);
1175 }
1176 return;
1177 }
1178
1179 default:
1180 vtn_fail("Invalid access chain type");
1181 }
1182 }
1183
1184 static void
1185 vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
1186 struct vtn_pointer *src)
1187 {
1188 /* TODO: At some point, we should add a special-case for when we can
1189 * just emit a copy_var intrinsic.
1190 */
1191 _vtn_variable_copy(b, dest, src);
1192 }
1193
1194 static void
1195 set_mode_system_value(struct vtn_builder *b, nir_variable_mode *mode)
1196 {
1197 vtn_assert(*mode == nir_var_system_value || *mode == nir_var_shader_in);
1198 *mode = nir_var_system_value;
1199 }
1200
1201 static void
1202 vtn_get_builtin_location(struct vtn_builder *b,
1203 SpvBuiltIn builtin, int *location,
1204 nir_variable_mode *mode)
1205 {
1206 switch (builtin) {
1207 case SpvBuiltInPosition:
1208 *location = VARYING_SLOT_POS;
1209 break;
1210 case SpvBuiltInPointSize:
1211 *location = VARYING_SLOT_PSIZ;
1212 break;
1213 case SpvBuiltInClipDistance:
1214 *location = VARYING_SLOT_CLIP_DIST0; /* XXX CLIP_DIST1? */
1215 break;
1216 case SpvBuiltInCullDistance:
1217 *location = VARYING_SLOT_CULL_DIST0;
1218 break;
1219 case SpvBuiltInVertexId:
1220 case SpvBuiltInVertexIndex:
1221 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1222 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1223 * same as gl_VertexID, which is non-zero-based, and removes
1224 * VertexIndex. Since they're both defined to be non-zero-based, we use
1225 * SYSTEM_VALUE_VERTEX_ID for both.
1226 */
1227 *location = SYSTEM_VALUE_VERTEX_ID;
1228 set_mode_system_value(b, mode);
1229 break;
1230 case SpvBuiltInInstanceIndex:
1231 *location = SYSTEM_VALUE_INSTANCE_INDEX;
1232 set_mode_system_value(b, mode);
1233 break;
1234 case SpvBuiltInInstanceId:
1235 *location = SYSTEM_VALUE_INSTANCE_ID;
1236 set_mode_system_value(b, mode);
1237 break;
1238 case SpvBuiltInPrimitiveId:
1239 if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
1240 vtn_assert(*mode == nir_var_shader_in);
1241 *location = VARYING_SLOT_PRIMITIVE_ID;
1242 } else if (*mode == nir_var_shader_out) {
1243 *location = VARYING_SLOT_PRIMITIVE_ID;
1244 } else {
1245 *location = SYSTEM_VALUE_PRIMITIVE_ID;
1246 set_mode_system_value(b, mode);
1247 }
1248 break;
1249 case SpvBuiltInInvocationId:
1250 *location = SYSTEM_VALUE_INVOCATION_ID;
1251 set_mode_system_value(b, mode);
1252 break;
1253 case SpvBuiltInLayer:
1254 *location = VARYING_SLOT_LAYER;
1255 if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
1256 *mode = nir_var_shader_in;
1257 else if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
1258 *mode = nir_var_shader_out;
1259 else if (b->options && b->options->caps.shader_viewport_index_layer &&
1260 (b->shader->info.stage == MESA_SHADER_VERTEX ||
1261 b->shader->info.stage == MESA_SHADER_TESS_EVAL))
1262 *mode = nir_var_shader_out;
1263 else
1264 vtn_fail("invalid stage for SpvBuiltInLayer");
1265 break;
1266 case SpvBuiltInViewportIndex:
1267 *location = VARYING_SLOT_VIEWPORT;
1268 if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
1269 *mode = nir_var_shader_out;
1270 else if (b->options && b->options->caps.shader_viewport_index_layer &&
1271 (b->shader->info.stage == MESA_SHADER_VERTEX ||
1272 b->shader->info.stage == MESA_SHADER_TESS_EVAL))
1273 *mode = nir_var_shader_out;
1274 else if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
1275 *mode = nir_var_shader_in;
1276 else
1277 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1278 break;
1279 case SpvBuiltInTessLevelOuter:
1280 *location = VARYING_SLOT_TESS_LEVEL_OUTER;
1281 break;
1282 case SpvBuiltInTessLevelInner:
1283 *location = VARYING_SLOT_TESS_LEVEL_INNER;
1284 break;
1285 case SpvBuiltInTessCoord:
1286 *location = SYSTEM_VALUE_TESS_COORD;
1287 set_mode_system_value(b, mode);
1288 break;
1289 case SpvBuiltInPatchVertices:
1290 *location = SYSTEM_VALUE_VERTICES_IN;
1291 set_mode_system_value(b, mode);
1292 break;
1293 case SpvBuiltInFragCoord:
1294 *location = VARYING_SLOT_POS;
1295 vtn_assert(*mode == nir_var_shader_in);
1296 break;
1297 case SpvBuiltInPointCoord:
1298 *location = VARYING_SLOT_PNTC;
1299 vtn_assert(*mode == nir_var_shader_in);
1300 break;
1301 case SpvBuiltInFrontFacing:
1302 *location = SYSTEM_VALUE_FRONT_FACE;
1303 set_mode_system_value(b, mode);
1304 break;
1305 case SpvBuiltInSampleId:
1306 *location = SYSTEM_VALUE_SAMPLE_ID;
1307 set_mode_system_value(b, mode);
1308 break;
1309 case SpvBuiltInSamplePosition:
1310 *location = SYSTEM_VALUE_SAMPLE_POS;
1311 set_mode_system_value(b, mode);
1312 break;
1313 case SpvBuiltInSampleMask:
1314 if (*mode == nir_var_shader_out) {
1315 *location = FRAG_RESULT_SAMPLE_MASK;
1316 } else {
1317 *location = SYSTEM_VALUE_SAMPLE_MASK_IN;
1318 set_mode_system_value(b, mode);
1319 }
1320 break;
1321 case SpvBuiltInFragDepth:
1322 *location = FRAG_RESULT_DEPTH;
1323 vtn_assert(*mode == nir_var_shader_out);
1324 break;
1325 case SpvBuiltInHelperInvocation:
1326 *location = SYSTEM_VALUE_HELPER_INVOCATION;
1327 set_mode_system_value(b, mode);
1328 break;
1329 case SpvBuiltInNumWorkgroups:
1330 *location = SYSTEM_VALUE_NUM_WORK_GROUPS;
1331 set_mode_system_value(b, mode);
1332 break;
1333 case SpvBuiltInWorkgroupSize:
1334 *location = SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1335 set_mode_system_value(b, mode);
1336 break;
1337 case SpvBuiltInWorkgroupId:
1338 *location = SYSTEM_VALUE_WORK_GROUP_ID;
1339 set_mode_system_value(b, mode);
1340 break;
1341 case SpvBuiltInLocalInvocationId:
1342 *location = SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1343 set_mode_system_value(b, mode);
1344 break;
1345 case SpvBuiltInLocalInvocationIndex:
1346 *location = SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1347 set_mode_system_value(b, mode);
1348 break;
1349 case SpvBuiltInGlobalInvocationId:
1350 *location = SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1351 set_mode_system_value(b, mode);
1352 break;
1353 case SpvBuiltInGlobalLinearId:
1354 *location = SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX;
1355 set_mode_system_value(b, mode);
1356 break;
1357 case SpvBuiltInBaseVertex:
1358 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1359 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1360 */
1361 *location = SYSTEM_VALUE_FIRST_VERTEX;
1362 set_mode_system_value(b, mode);
1363 break;
1364 case SpvBuiltInBaseInstance:
1365 *location = SYSTEM_VALUE_BASE_INSTANCE;
1366 set_mode_system_value(b, mode);
1367 break;
1368 case SpvBuiltInDrawIndex:
1369 *location = SYSTEM_VALUE_DRAW_ID;
1370 set_mode_system_value(b, mode);
1371 break;
1372 case SpvBuiltInSubgroupSize:
1373 *location = SYSTEM_VALUE_SUBGROUP_SIZE;
1374 set_mode_system_value(b, mode);
1375 break;
1376 case SpvBuiltInSubgroupId:
1377 *location = SYSTEM_VALUE_SUBGROUP_ID;
1378 set_mode_system_value(b, mode);
1379 break;
1380 case SpvBuiltInSubgroupLocalInvocationId:
1381 *location = SYSTEM_VALUE_SUBGROUP_INVOCATION;
1382 set_mode_system_value(b, mode);
1383 break;
1384 case SpvBuiltInNumSubgroups:
1385 *location = SYSTEM_VALUE_NUM_SUBGROUPS;
1386 set_mode_system_value(b, mode);
1387 break;
1388 case SpvBuiltInDeviceIndex:
1389 *location = SYSTEM_VALUE_DEVICE_INDEX;
1390 set_mode_system_value(b, mode);
1391 break;
1392 case SpvBuiltInViewIndex:
1393 *location = SYSTEM_VALUE_VIEW_INDEX;
1394 set_mode_system_value(b, mode);
1395 break;
1396 case SpvBuiltInSubgroupEqMask:
1397 *location = SYSTEM_VALUE_SUBGROUP_EQ_MASK,
1398 set_mode_system_value(b, mode);
1399 break;
1400 case SpvBuiltInSubgroupGeMask:
1401 *location = SYSTEM_VALUE_SUBGROUP_GE_MASK,
1402 set_mode_system_value(b, mode);
1403 break;
1404 case SpvBuiltInSubgroupGtMask:
1405 *location = SYSTEM_VALUE_SUBGROUP_GT_MASK,
1406 set_mode_system_value(b, mode);
1407 break;
1408 case SpvBuiltInSubgroupLeMask:
1409 *location = SYSTEM_VALUE_SUBGROUP_LE_MASK,
1410 set_mode_system_value(b, mode);
1411 break;
1412 case SpvBuiltInSubgroupLtMask:
1413 *location = SYSTEM_VALUE_SUBGROUP_LT_MASK,
1414 set_mode_system_value(b, mode);
1415 break;
1416 case SpvBuiltInFragStencilRefEXT:
1417 *location = FRAG_RESULT_STENCIL;
1418 vtn_assert(*mode == nir_var_shader_out);
1419 break;
1420 case SpvBuiltInWorkDim:
1421 *location = SYSTEM_VALUE_WORK_DIM;
1422 set_mode_system_value(b, mode);
1423 break;
1424 case SpvBuiltInGlobalSize:
1425 *location = SYSTEM_VALUE_GLOBAL_GROUP_SIZE;
1426 set_mode_system_value(b, mode);
1427 break;
1428 default:
1429 vtn_fail("Unsupported builtin: %s (%u)",
1430 spirv_builtin_to_string(builtin), builtin);
1431 }
1432 }
1433
1434 static void
1435 apply_var_decoration(struct vtn_builder *b,
1436 struct nir_variable_data *var_data,
1437 const struct vtn_decoration *dec)
1438 {
1439 switch (dec->decoration) {
1440 case SpvDecorationRelaxedPrecision:
1441 break; /* FIXME: Do nothing with this for now. */
1442 case SpvDecorationNoPerspective:
1443 var_data->interpolation = INTERP_MODE_NOPERSPECTIVE;
1444 break;
1445 case SpvDecorationFlat:
1446 var_data->interpolation = INTERP_MODE_FLAT;
1447 break;
1448 case SpvDecorationCentroid:
1449 var_data->centroid = true;
1450 break;
1451 case SpvDecorationSample:
1452 var_data->sample = true;
1453 break;
1454 case SpvDecorationInvariant:
1455 var_data->invariant = true;
1456 break;
1457 case SpvDecorationConstant:
1458 var_data->read_only = true;
1459 break;
1460 case SpvDecorationNonReadable:
1461 var_data->image.access |= ACCESS_NON_READABLE;
1462 break;
1463 case SpvDecorationNonWritable:
1464 var_data->read_only = true;
1465 var_data->image.access |= ACCESS_NON_WRITEABLE;
1466 break;
1467 case SpvDecorationRestrict:
1468 var_data->image.access |= ACCESS_RESTRICT;
1469 break;
1470 case SpvDecorationVolatile:
1471 var_data->image.access |= ACCESS_VOLATILE;
1472 break;
1473 case SpvDecorationCoherent:
1474 var_data->image.access |= ACCESS_COHERENT;
1475 break;
1476 case SpvDecorationComponent:
1477 var_data->location_frac = dec->operands[0];
1478 break;
1479 case SpvDecorationIndex:
1480 var_data->index = dec->operands[0];
1481 break;
1482 case SpvDecorationBuiltIn: {
1483 SpvBuiltIn builtin = dec->operands[0];
1484
1485 nir_variable_mode mode = var_data->mode;
1486 vtn_get_builtin_location(b, builtin, &var_data->location, &mode);
1487 var_data->mode = mode;
1488
1489 switch (builtin) {
1490 case SpvBuiltInTessLevelOuter:
1491 case SpvBuiltInTessLevelInner:
1492 case SpvBuiltInClipDistance:
1493 case SpvBuiltInCullDistance:
1494 var_data->compact = true;
1495 break;
1496 default:
1497 break;
1498 }
1499 }
1500
1501 case SpvDecorationSpecId:
1502 case SpvDecorationRowMajor:
1503 case SpvDecorationColMajor:
1504 case SpvDecorationMatrixStride:
1505 case SpvDecorationAliased:
1506 case SpvDecorationUniform:
1507 case SpvDecorationLinkageAttributes:
1508 break; /* Do nothing with these here */
1509
1510 case SpvDecorationPatch:
1511 var_data->patch = true;
1512 break;
1513
1514 case SpvDecorationLocation:
1515 vtn_fail("Handled above");
1516
1517 case SpvDecorationBlock:
1518 case SpvDecorationBufferBlock:
1519 case SpvDecorationArrayStride:
1520 case SpvDecorationGLSLShared:
1521 case SpvDecorationGLSLPacked:
1522 break; /* These can apply to a type but we don't care about them */
1523
1524 case SpvDecorationBinding:
1525 case SpvDecorationDescriptorSet:
1526 case SpvDecorationNoContraction:
1527 case SpvDecorationInputAttachmentIndex:
1528 vtn_warn("Decoration not allowed for variable or structure member: %s",
1529 spirv_decoration_to_string(dec->decoration));
1530 break;
1531
1532 case SpvDecorationXfbBuffer:
1533 var_data->explicit_xfb_buffer = true;
1534 var_data->xfb_buffer = dec->operands[0];
1535 var_data->always_active_io = true;
1536 break;
1537 case SpvDecorationXfbStride:
1538 var_data->explicit_xfb_stride = true;
1539 var_data->xfb_stride = dec->operands[0];
1540 break;
1541 case SpvDecorationOffset:
1542 var_data->explicit_offset = true;
1543 var_data->offset = dec->operands[0];
1544 break;
1545
1546 case SpvDecorationStream:
1547 var_data->stream = dec->operands[0];
1548 break;
1549
1550 case SpvDecorationCPacked:
1551 case SpvDecorationSaturatedConversion:
1552 case SpvDecorationFuncParamAttr:
1553 case SpvDecorationFPRoundingMode:
1554 case SpvDecorationFPFastMathMode:
1555 case SpvDecorationAlignment:
1556 if (b->shader->info.stage != MESA_SHADER_KERNEL) {
1557 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1558 spirv_decoration_to_string(dec->decoration));
1559 }
1560 break;
1561
1562 case SpvDecorationHlslSemanticGOOGLE:
1563 /* HLSL semantic decorations can safely be ignored by the driver. */
1564 break;
1565
1566 case SpvDecorationRestrictPointerEXT:
1567 case SpvDecorationAliasedPointerEXT:
1568 /* TODO: We should actually plumb alias information through NIR. */
1569 break;
1570
1571 default:
1572 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1573 }
1574 }
1575
1576 static void
1577 var_is_patch_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1578 const struct vtn_decoration *dec, void *out_is_patch)
1579 {
1580 if (dec->decoration == SpvDecorationPatch) {
1581 *((bool *) out_is_patch) = true;
1582 }
1583 }
1584
1585 static void
1586 var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1587 const struct vtn_decoration *dec, void *void_var)
1588 {
1589 struct vtn_variable *vtn_var = void_var;
1590
1591 /* Handle decorations that apply to a vtn_variable as a whole */
1592 switch (dec->decoration) {
1593 case SpvDecorationBinding:
1594 vtn_var->binding = dec->operands[0];
1595 vtn_var->explicit_binding = true;
1596 return;
1597 case SpvDecorationDescriptorSet:
1598 vtn_var->descriptor_set = dec->operands[0];
1599 return;
1600 case SpvDecorationInputAttachmentIndex:
1601 vtn_var->input_attachment_index = dec->operands[0];
1602 return;
1603 case SpvDecorationPatch:
1604 vtn_var->patch = true;
1605 break;
1606 case SpvDecorationOffset:
1607 vtn_var->offset = dec->operands[0];
1608 break;
1609 case SpvDecorationNonWritable:
1610 vtn_var->access |= ACCESS_NON_WRITEABLE;
1611 break;
1612 case SpvDecorationNonReadable:
1613 vtn_var->access |= ACCESS_NON_READABLE;
1614 break;
1615 case SpvDecorationVolatile:
1616 vtn_var->access |= ACCESS_VOLATILE;
1617 break;
1618 case SpvDecorationCoherent:
1619 vtn_var->access |= ACCESS_COHERENT;
1620 break;
1621 case SpvDecorationHlslCounterBufferGOOGLE:
1622 /* HLSL semantic decorations can safely be ignored by the driver. */
1623 break;
1624 default:
1625 break;
1626 }
1627
1628 if (val->value_type == vtn_value_type_pointer) {
1629 assert(val->pointer->var == void_var);
1630 assert(member == -1);
1631 } else {
1632 assert(val->value_type == vtn_value_type_type);
1633 }
1634
1635 /* Location is odd. If applied to a split structure, we have to walk the
1636 * whole thing and accumulate the location. It's easier to handle as a
1637 * special case.
1638 */
1639 if (dec->decoration == SpvDecorationLocation) {
1640 unsigned location = dec->operands[0];
1641 if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
1642 vtn_var->mode == vtn_variable_mode_output) {
1643 location += FRAG_RESULT_DATA0;
1644 } else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
1645 vtn_var->mode == vtn_variable_mode_input) {
1646 location += VERT_ATTRIB_GENERIC0;
1647 } else if (vtn_var->mode == vtn_variable_mode_input ||
1648 vtn_var->mode == vtn_variable_mode_output) {
1649 location += vtn_var->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0;
1650 } else if (vtn_var->mode != vtn_variable_mode_uniform) {
1651 vtn_warn("Location must be on input, output, uniform, sampler or "
1652 "image variable");
1653 return;
1654 }
1655
1656 if (vtn_var->var->num_members == 0) {
1657 /* This handles the member and lone variable cases */
1658 vtn_var->var->data.location = location;
1659 } else {
1660 /* This handles the structure member case */
1661 assert(vtn_var->var->members);
1662
1663 if (member == -1)
1664 vtn_var->base_location = location;
1665 else
1666 vtn_var->var->members[member].location = location;
1667 }
1668
1669 return;
1670 } else {
1671 if (vtn_var->var) {
1672 if (vtn_var->var->num_members == 0) {
1673 /* We call this function on types as well as variables and not all
1674 * struct types get split so we can end up having stray member
1675 * decorations; just ignore them.
1676 */
1677 if (member == -1)
1678 apply_var_decoration(b, &vtn_var->var->data, dec);
1679 } else if (member >= 0) {
1680 /* Member decorations must come from a type */
1681 assert(val->value_type == vtn_value_type_type);
1682 apply_var_decoration(b, &vtn_var->var->members[member], dec);
1683 } else {
1684 unsigned length =
1685 glsl_get_length(glsl_without_array(vtn_var->type->type));
1686 for (unsigned i = 0; i < length; i++)
1687 apply_var_decoration(b, &vtn_var->var->members[i], dec);
1688 }
1689 } else {
1690 /* A few variables, those with external storage, have no actual
1691 * nir_variables associated with them. Fortunately, all decorations
1692 * we care about for those variables are on the type only.
1693 */
1694 vtn_assert(vtn_var->mode == vtn_variable_mode_ubo ||
1695 vtn_var->mode == vtn_variable_mode_ssbo ||
1696 vtn_var->mode == vtn_variable_mode_push_constant ||
1697 (vtn_var->mode == vtn_variable_mode_workgroup &&
1698 b->options->lower_workgroup_access_to_offsets));
1699 }
1700 }
1701 }
1702
1703 static void
1704 ptr_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1705 const struct vtn_decoration *dec, void *void_ptr)
1706 {
1707 struct vtn_pointer *ptr = void_ptr;
1708
1709 switch (dec->decoration) {
1710 case SpvDecorationNonUniformEXT:
1711 ptr->access |= ACCESS_NON_UNIFORM;
1712 break;
1713
1714 default:
1715 break;
1716 }
1717 }
1718
1719 enum vtn_variable_mode
1720 vtn_storage_class_to_mode(struct vtn_builder *b,
1721 SpvStorageClass class,
1722 struct vtn_type *interface_type,
1723 nir_variable_mode *nir_mode_out)
1724 {
1725 enum vtn_variable_mode mode;
1726 nir_variable_mode nir_mode;
1727 switch (class) {
1728 case SpvStorageClassUniform:
1729 if (interface_type->block) {
1730 mode = vtn_variable_mode_ubo;
1731 nir_mode = nir_var_mem_ubo;
1732 } else if (interface_type->buffer_block) {
1733 mode = vtn_variable_mode_ssbo;
1734 nir_mode = nir_var_mem_ssbo;
1735 } else {
1736 /* Default-block uniforms, coming from gl_spirv */
1737 mode = vtn_variable_mode_uniform;
1738 nir_mode = nir_var_uniform;
1739 }
1740 break;
1741 case SpvStorageClassStorageBuffer:
1742 mode = vtn_variable_mode_ssbo;
1743 nir_mode = nir_var_mem_ssbo;
1744 break;
1745 case SpvStorageClassPhysicalStorageBufferEXT:
1746 mode = vtn_variable_mode_phys_ssbo;
1747 nir_mode = nir_var_mem_global;
1748 break;
1749 case SpvStorageClassUniformConstant:
1750 mode = vtn_variable_mode_uniform;
1751 nir_mode = nir_var_uniform;
1752 break;
1753 case SpvStorageClassPushConstant:
1754 mode = vtn_variable_mode_push_constant;
1755 nir_mode = nir_var_uniform;
1756 break;
1757 case SpvStorageClassInput:
1758 mode = vtn_variable_mode_input;
1759 nir_mode = nir_var_shader_in;
1760 break;
1761 case SpvStorageClassOutput:
1762 mode = vtn_variable_mode_output;
1763 nir_mode = nir_var_shader_out;
1764 break;
1765 case SpvStorageClassPrivate:
1766 mode = vtn_variable_mode_private;
1767 nir_mode = nir_var_shader_temp;
1768 break;
1769 case SpvStorageClassFunction:
1770 mode = vtn_variable_mode_function;
1771 nir_mode = nir_var_function_temp;
1772 break;
1773 case SpvStorageClassWorkgroup:
1774 mode = vtn_variable_mode_workgroup;
1775 nir_mode = nir_var_mem_shared;
1776 break;
1777 case SpvStorageClassAtomicCounter:
1778 mode = vtn_variable_mode_uniform;
1779 nir_mode = nir_var_uniform;
1780 break;
1781 case SpvStorageClassCrossWorkgroup:
1782 mode = vtn_variable_mode_cross_workgroup;
1783 nir_mode = nir_var_mem_global;
1784 break;
1785 case SpvStorageClassGeneric:
1786 default:
1787 vtn_fail("Unhandled variable storage class: %s (%u)",
1788 spirv_storageclass_to_string(class), class);
1789 }
1790
1791 if (nir_mode_out)
1792 *nir_mode_out = nir_mode;
1793
1794 return mode;
1795 }
1796
1797 nir_ssa_def *
1798 vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
1799 {
1800 if (vtn_pointer_uses_ssa_offset(b, ptr)) {
1801 /* This pointer needs to have a pointer type with actual storage */
1802 vtn_assert(ptr->ptr_type);
1803 vtn_assert(ptr->ptr_type->type);
1804
1805 if (!ptr->offset) {
1806 /* If we don't have an offset then we must be a pointer to the variable
1807 * itself.
1808 */
1809 vtn_assert(!ptr->offset && !ptr->block_index);
1810
1811 struct vtn_access_chain chain = {
1812 .length = 0,
1813 };
1814 ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
1815 }
1816
1817 vtn_assert(ptr->offset);
1818 if (ptr->block_index) {
1819 vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
1820 ptr->mode == vtn_variable_mode_ssbo);
1821 return nir_vec2(&b->nb, ptr->block_index, ptr->offset);
1822 } else {
1823 vtn_assert(ptr->mode == vtn_variable_mode_workgroup);
1824 return ptr->offset;
1825 }
1826 } else {
1827 if (vtn_pointer_is_external_block(b, ptr) &&
1828 vtn_type_contains_block(b, ptr->type) &&
1829 ptr->mode != vtn_variable_mode_phys_ssbo) {
1830 /* In this case, we're looking for a block index and not an actual
1831 * deref.
1832 *
1833 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1834 * at all because we get the pointer directly from the client. This
1835 * assumes that there will never be a SSBO binding variable using the
1836 * PhysicalStorageBufferEXT storage class. This assumption appears
1837 * to be correct according to the Vulkan spec because the table,
1838 * "Shader Resource and Storage Class Correspondence," the only the
1839 * Uniform storage class with BufferBlock or the StorageBuffer
1840 * storage class with Block can be used.
1841 */
1842 if (!ptr->block_index) {
1843 /* If we don't have a block_index then we must be a pointer to the
1844 * variable itself.
1845 */
1846 vtn_assert(!ptr->deref);
1847
1848 struct vtn_access_chain chain = {
1849 .length = 0,
1850 };
1851 ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
1852 }
1853
1854 return ptr->block_index;
1855 } else {
1856 return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
1857 }
1858 }
1859 }
1860
1861 struct vtn_pointer *
1862 vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
1863 struct vtn_type *ptr_type)
1864 {
1865 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
1866
1867 struct vtn_type *interface_type = ptr_type->deref;
1868 while (interface_type->base_type == vtn_base_type_array)
1869 interface_type = interface_type->array_element;
1870
1871 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
1872 nir_variable_mode nir_mode;
1873 ptr->mode = vtn_storage_class_to_mode(b, ptr_type->storage_class,
1874 interface_type, &nir_mode);
1875 ptr->type = ptr_type->deref;
1876 ptr->ptr_type = ptr_type;
1877
1878 if (b->wa_glslang_179) {
1879 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1880 * need to whack the mode because it creates a function parameter with
1881 * the Function storage class even though it's a pointer to a sampler.
1882 * If we don't do this, then NIR won't get rid of the deref_cast for us.
1883 */
1884 if (ptr->mode == vtn_variable_mode_function &&
1885 (ptr->type->base_type == vtn_base_type_sampler ||
1886 ptr->type->base_type == vtn_base_type_sampled_image)) {
1887 ptr->mode = vtn_variable_mode_uniform;
1888 nir_mode = nir_var_uniform;
1889 }
1890 }
1891
1892 if (vtn_pointer_uses_ssa_offset(b, ptr)) {
1893 /* This pointer type needs to have actual storage */
1894 vtn_assert(ptr_type->type);
1895 if (ptr->mode == vtn_variable_mode_ubo ||
1896 ptr->mode == vtn_variable_mode_ssbo) {
1897 vtn_assert(ssa->num_components == 2);
1898 ptr->block_index = nir_channel(&b->nb, ssa, 0);
1899 ptr->offset = nir_channel(&b->nb, ssa, 1);
1900 } else {
1901 vtn_assert(ssa->num_components == 1);
1902 ptr->block_index = NULL;
1903 ptr->offset = ssa;
1904 }
1905 } else {
1906 const struct glsl_type *deref_type = ptr_type->deref->type;
1907 if (!vtn_pointer_is_external_block(b, ptr)) {
1908 ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
1909 deref_type, 0);
1910 } else if (vtn_type_contains_block(b, ptr->type) &&
1911 ptr->mode != vtn_variable_mode_phys_ssbo) {
1912 /* This is a pointer to somewhere in an array of blocks, not a
1913 * pointer to somewhere inside the block. Set the block index
1914 * instead of making a cast.
1915 */
1916 ptr->block_index = ssa;
1917 } else {
1918 /* This is a pointer to something internal or a pointer inside a
1919 * block. It's just a regular cast.
1920 *
1921 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1922 * at all because we get the pointer directly from the client. This
1923 * assumes that there will never be a SSBO binding variable using the
1924 * PhysicalStorageBufferEXT storage class. This assumption appears
1925 * to be correct according to the Vulkan spec because the table,
1926 * "Shader Resource and Storage Class Correspondence," the only the
1927 * Uniform storage class with BufferBlock or the StorageBuffer
1928 * storage class with Block can be used.
1929 */
1930 ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
1931 ptr_type->deref->type,
1932 ptr_type->stride);
1933 ptr->deref->dest.ssa.num_components =
1934 glsl_get_vector_elements(ptr_type->type);
1935 ptr->deref->dest.ssa.bit_size = glsl_get_bit_size(ptr_type->type);
1936 }
1937 }
1938
1939 return ptr;
1940 }
1941
1942 static bool
1943 is_per_vertex_inout(const struct vtn_variable *var, gl_shader_stage stage)
1944 {
1945 if (var->patch || !glsl_type_is_array(var->type->type))
1946 return false;
1947
1948 if (var->mode == vtn_variable_mode_input) {
1949 return stage == MESA_SHADER_TESS_CTRL ||
1950 stage == MESA_SHADER_TESS_EVAL ||
1951 stage == MESA_SHADER_GEOMETRY;
1952 }
1953
1954 if (var->mode == vtn_variable_mode_output)
1955 return stage == MESA_SHADER_TESS_CTRL;
1956
1957 return false;
1958 }
1959
1960 static void
1961 assign_missing_member_locations(struct vtn_variable *var)
1962 {
1963 unsigned length =
1964 glsl_get_length(glsl_without_array(var->type->type));
1965 int location = var->base_location;
1966
1967 for (unsigned i = 0; i < length; i++) {
1968 /* From the Vulkan spec:
1969 *
1970 * “If the structure type is a Block but without a Location, then each
1971 * of its members must have a Location decoration.”
1972 *
1973 */
1974 if (var->type->block) {
1975 assert(var->base_location != -1 ||
1976 var->var->members[i].location != -1);
1977 }
1978
1979 /* From the Vulkan spec:
1980 *
1981 * “Any member with its own Location decoration is assigned that
1982 * location. Each remaining member is assigned the location after the
1983 * immediately preceding member in declaration order.”
1984 */
1985 if (var->var->members[i].location != -1)
1986 location = var->var->members[i].location;
1987 else
1988 var->var->members[i].location = location;
1989
1990 /* Below we use type instead of interface_type, because interface_type
1991 * is only available when it is a Block. This code also supports
1992 * input/outputs that are just structs
1993 */
1994 const struct glsl_type *member_type =
1995 glsl_get_struct_field(glsl_without_array(var->type->type), i);
1996
1997 location +=
1998 glsl_count_attribute_slots(member_type,
1999 false /* is_gl_vertex_input */);
2000 }
2001 }
2002
2003
2004 static void
2005 vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
2006 struct vtn_type *ptr_type, SpvStorageClass storage_class,
2007 nir_constant *initializer)
2008 {
2009 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
2010 struct vtn_type *type = ptr_type->deref;
2011
2012 struct vtn_type *without_array = type;
2013 while(glsl_type_is_array(without_array->type))
2014 without_array = without_array->array_element;
2015
2016 enum vtn_variable_mode mode;
2017 nir_variable_mode nir_mode;
2018 mode = vtn_storage_class_to_mode(b, storage_class, without_array, &nir_mode);
2019
2020 switch (mode) {
2021 case vtn_variable_mode_ubo:
2022 /* There's no other way to get vtn_variable_mode_ubo */
2023 vtn_assert(without_array->block);
2024 b->shader->info.num_ubos++;
2025 break;
2026 case vtn_variable_mode_ssbo:
2027 if (storage_class == SpvStorageClassStorageBuffer &&
2028 !without_array->block) {
2029 if (b->variable_pointers) {
2030 vtn_fail("Variables in the StorageBuffer storage class must "
2031 "have a struct type with the Block decoration");
2032 } else {
2033 /* If variable pointers are not present, it's still malformed
2034 * SPIR-V but we can parse it and do the right thing anyway.
2035 * Since some of the 8-bit storage tests have bugs in this are,
2036 * just make it a warning for now.
2037 */
2038 vtn_warn("Variables in the StorageBuffer storage class must "
2039 "have a struct type with the Block decoration");
2040 }
2041 }
2042 b->shader->info.num_ssbos++;
2043 break;
2044 case vtn_variable_mode_uniform:
2045 if (glsl_type_is_image(without_array->type))
2046 b->shader->info.num_images++;
2047 else if (glsl_type_is_sampler(without_array->type))
2048 b->shader->info.num_textures++;
2049 break;
2050 case vtn_variable_mode_push_constant:
2051 b->shader->num_uniforms = vtn_type_block_size(b, type);
2052 break;
2053
2054 case vtn_variable_mode_phys_ssbo:
2055 vtn_fail("Cannot create a variable with the "
2056 "PhysicalStorageBufferEXT storage class");
2057 break;
2058
2059 default:
2060 /* No tallying is needed */
2061 break;
2062 }
2063
2064 struct vtn_variable *var = rzalloc(b, struct vtn_variable);
2065 var->type = type;
2066 var->mode = mode;
2067 var->base_location = -1;
2068
2069 vtn_assert(val->value_type == vtn_value_type_pointer);
2070 val->pointer = vtn_pointer_for_variable(b, var, ptr_type);
2071
2072 switch (var->mode) {
2073 case vtn_variable_mode_function:
2074 case vtn_variable_mode_private:
2075 case vtn_variable_mode_uniform:
2076 /* For these, we create the variable normally */
2077 var->var = rzalloc(b->shader, nir_variable);
2078 var->var->name = ralloc_strdup(var->var, val->name);
2079
2080 if (storage_class == SpvStorageClassAtomicCounter) {
2081 /* Need to tweak the nir type here as at vtn_handle_type we don't
2082 * have the access to storage_class, that is the one that points us
2083 * that is an atomic uint.
2084 */
2085 var->var->type = repair_atomic_type(var->type->type);
2086 } else {
2087 /* Private variables don't have any explicit layout but some layouts
2088 * may have leaked through due to type deduplication in the SPIR-V.
2089 */
2090 var->var->type = var->type->type;
2091 }
2092 var->var->data.mode = nir_mode;
2093 var->var->data.location = -1;
2094 var->var->interface_type = NULL;
2095 break;
2096
2097 case vtn_variable_mode_workgroup:
2098 if (b->options->lower_workgroup_access_to_offsets) {
2099 var->shared_location = -1;
2100 } else {
2101 /* Create the variable normally */
2102 var->var = rzalloc(b->shader, nir_variable);
2103 var->var->name = ralloc_strdup(var->var, val->name);
2104 /* Workgroup variables don't have any explicit layout but some
2105 * layouts may have leaked through due to type deduplication in the
2106 * SPIR-V.
2107 */
2108 var->var->type = var->type->type;
2109 var->var->data.mode = nir_var_mem_shared;
2110 }
2111 break;
2112
2113 case vtn_variable_mode_input:
2114 case vtn_variable_mode_output: {
2115 /* In order to know whether or not we're a per-vertex inout, we need
2116 * the patch qualifier. This means walking the variable decorations
2117 * early before we actually create any variables. Not a big deal.
2118 *
2119 * GLSLang really likes to place decorations in the most interior
2120 * thing it possibly can. In particular, if you have a struct, it
2121 * will place the patch decorations on the struct members. This
2122 * should be handled by the variable splitting below just fine.
2123 *
2124 * If you have an array-of-struct, things get even more weird as it
2125 * will place the patch decorations on the struct even though it's
2126 * inside an array and some of the members being patch and others not
2127 * makes no sense whatsoever. Since the only sensible thing is for
2128 * it to be all or nothing, we'll call it patch if any of the members
2129 * are declared patch.
2130 */
2131 var->patch = false;
2132 vtn_foreach_decoration(b, val, var_is_patch_cb, &var->patch);
2133 if (glsl_type_is_array(var->type->type) &&
2134 glsl_type_is_struct_or_ifc(without_array->type)) {
2135 vtn_foreach_decoration(b, vtn_value(b, without_array->id,
2136 vtn_value_type_type),
2137 var_is_patch_cb, &var->patch);
2138 }
2139
2140 /* For inputs and outputs, we immediately split structures. This
2141 * is for a couple of reasons. For one, builtins may all come in
2142 * a struct and we really want those split out into separate
2143 * variables. For another, interpolation qualifiers can be
2144 * applied to members of the top-level struct ane we need to be
2145 * able to preserve that information.
2146 */
2147
2148 struct vtn_type *per_vertex_type = var->type;
2149 if (is_per_vertex_inout(var, b->shader->info.stage)) {
2150 /* In Geometry shaders (and some tessellation), inputs come
2151 * in per-vertex arrays. However, some builtins come in
2152 * non-per-vertex, hence the need for the is_array check. In
2153 * any case, there are no non-builtin arrays allowed so this
2154 * check should be sufficient.
2155 */
2156 per_vertex_type = var->type->array_element;
2157 }
2158
2159 var->var = rzalloc(b->shader, nir_variable);
2160 var->var->name = ralloc_strdup(var->var, val->name);
2161 /* In Vulkan, shader I/O variables don't have any explicit layout but
2162 * some layouts may have leaked through due to type deduplication in
2163 * the SPIR-V. We do, however, keep the layouts in the variable's
2164 * interface_type because we need offsets for XFB arrays of blocks.
2165 */
2166 var->var->type = var->type->type;
2167 var->var->data.mode = nir_mode;
2168 var->var->data.patch = var->patch;
2169
2170 /* Figure out the interface block type. */
2171 struct vtn_type *iface_type = per_vertex_type;
2172 if (var->mode == vtn_variable_mode_output &&
2173 (b->shader->info.stage == MESA_SHADER_VERTEX ||
2174 b->shader->info.stage == MESA_SHADER_TESS_EVAL ||
2175 b->shader->info.stage == MESA_SHADER_GEOMETRY)) {
2176 /* For vertex data outputs, we can end up with arrays of blocks for
2177 * transform feedback where each array element corresponds to a
2178 * different XFB output buffer.
2179 */
2180 while (iface_type->base_type == vtn_base_type_array)
2181 iface_type = iface_type->array_element;
2182 }
2183 if (iface_type->base_type == vtn_base_type_struct && iface_type->block)
2184 var->var->interface_type = iface_type->type;
2185
2186 if (per_vertex_type->base_type == vtn_base_type_struct &&
2187 per_vertex_type->block) {
2188 /* It's a struct. Set it up as per-member. */
2189 var->var->num_members = glsl_get_length(per_vertex_type->type);
2190 var->var->members = rzalloc_array(var->var, struct nir_variable_data,
2191 var->var->num_members);
2192
2193 for (unsigned i = 0; i < var->var->num_members; i++) {
2194 var->var->members[i].mode = nir_mode;
2195 var->var->members[i].patch = var->patch;
2196 var->var->members[i].location = -1;
2197 }
2198 }
2199
2200 /* For inputs and outputs, we need to grab locations and builtin
2201 * information from the per-vertex type.
2202 */
2203 vtn_foreach_decoration(b, vtn_value(b, per_vertex_type->id,
2204 vtn_value_type_type),
2205 var_decoration_cb, var);
2206 break;
2207 }
2208
2209 case vtn_variable_mode_ubo:
2210 case vtn_variable_mode_ssbo:
2211 case vtn_variable_mode_push_constant:
2212 case vtn_variable_mode_cross_workgroup:
2213 /* These don't need actual variables. */
2214 break;
2215
2216 case vtn_variable_mode_phys_ssbo:
2217 unreachable("Should have been caught before");
2218 }
2219
2220 if (initializer) {
2221 var->var->constant_initializer =
2222 nir_constant_clone(initializer, var->var);
2223 }
2224
2225 vtn_foreach_decoration(b, val, var_decoration_cb, var);
2226 vtn_foreach_decoration(b, val, ptr_decoration_cb, val->pointer);
2227
2228 if ((var->mode == vtn_variable_mode_input ||
2229 var->mode == vtn_variable_mode_output) &&
2230 var->var->members) {
2231 assign_missing_member_locations(var);
2232 }
2233
2234 if (var->mode == vtn_variable_mode_uniform) {
2235 /* XXX: We still need the binding information in the nir_variable
2236 * for these. We should fix that.
2237 */
2238 var->var->data.binding = var->binding;
2239 var->var->data.explicit_binding = var->explicit_binding;
2240 var->var->data.descriptor_set = var->descriptor_set;
2241 var->var->data.index = var->input_attachment_index;
2242 var->var->data.offset = var->offset;
2243
2244 if (glsl_type_is_image(without_array->type))
2245 var->var->data.image.format = without_array->image_format;
2246 }
2247
2248 if (var->mode == vtn_variable_mode_function) {
2249 vtn_assert(var->var != NULL && var->var->members == NULL);
2250 nir_function_impl_add_variable(b->nb.impl, var->var);
2251 } else if (var->var) {
2252 nir_shader_add_variable(b->shader, var->var);
2253 } else {
2254 vtn_assert(vtn_pointer_is_external_block(b, val->pointer));
2255 }
2256 }
2257
2258 static void
2259 vtn_assert_types_equal(struct vtn_builder *b, SpvOp opcode,
2260 struct vtn_type *dst_type,
2261 struct vtn_type *src_type)
2262 {
2263 if (dst_type->id == src_type->id)
2264 return;
2265
2266 if (vtn_types_compatible(b, dst_type, src_type)) {
2267 /* Early versions of GLSLang would re-emit types unnecessarily and you
2268 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2269 * mismatched source and destination types.
2270 *
2271 * https://github.com/KhronosGroup/glslang/issues/304
2272 * https://github.com/KhronosGroup/glslang/issues/307
2273 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2274 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2275 */
2276 vtn_warn("Source and destination types of %s do not have the same "
2277 "ID (but are compatible): %u vs %u",
2278 spirv_op_to_string(opcode), dst_type->id, src_type->id);
2279 return;
2280 }
2281
2282 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2283 spirv_op_to_string(opcode),
2284 glsl_get_type_name(dst_type->type),
2285 glsl_get_type_name(src_type->type));
2286 }
2287
2288 static nir_ssa_def *
2289 nir_shrink_zero_pad_vec(nir_builder *b, nir_ssa_def *val,
2290 unsigned num_components)
2291 {
2292 if (val->num_components == num_components)
2293 return val;
2294
2295 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
2296 for (unsigned i = 0; i < num_components; i++) {
2297 if (i < val->num_components)
2298 comps[i] = nir_channel(b, val, i);
2299 else
2300 comps[i] = nir_imm_intN_t(b, 0, val->bit_size);
2301 }
2302 return nir_vec(b, comps, num_components);
2303 }
2304
2305 static nir_ssa_def *
2306 nir_sloppy_bitcast(nir_builder *b, nir_ssa_def *val,
2307 const struct glsl_type *type)
2308 {
2309 const unsigned num_components = glsl_get_vector_elements(type);
2310 const unsigned bit_size = glsl_get_bit_size(type);
2311
2312 /* First, zero-pad to ensure that the value is big enough that when we
2313 * bit-cast it, we don't loose anything.
2314 */
2315 if (val->bit_size < bit_size) {
2316 const unsigned src_num_components_needed =
2317 vtn_align_u32(val->num_components, bit_size / val->bit_size);
2318 val = nir_shrink_zero_pad_vec(b, val, src_num_components_needed);
2319 }
2320
2321 val = nir_bitcast_vector(b, val, bit_size);
2322
2323 return nir_shrink_zero_pad_vec(b, val, num_components);
2324 }
2325
2326 void
2327 vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
2328 const uint32_t *w, unsigned count)
2329 {
2330 switch (opcode) {
2331 case SpvOpUndef: {
2332 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
2333 val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
2334 break;
2335 }
2336
2337 case SpvOpVariable: {
2338 struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
2339
2340 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
2341
2342 SpvStorageClass storage_class = w[3];
2343 nir_constant *initializer = NULL;
2344 if (count > 4)
2345 initializer = vtn_value(b, w[4], vtn_value_type_constant)->constant;
2346
2347 vtn_create_variable(b, val, ptr_type, storage_class, initializer);
2348 break;
2349 }
2350
2351 case SpvOpAccessChain:
2352 case SpvOpPtrAccessChain:
2353 case SpvOpInBoundsAccessChain:
2354 case SpvOpInBoundsPtrAccessChain: {
2355 struct vtn_access_chain *chain = vtn_access_chain_create(b, count - 4);
2356 chain->ptr_as_array = (opcode == SpvOpPtrAccessChain || opcode == SpvOpInBoundsPtrAccessChain);
2357
2358 unsigned idx = 0;
2359 for (int i = 4; i < count; i++) {
2360 struct vtn_value *link_val = vtn_untyped_value(b, w[i]);
2361 if (link_val->value_type == vtn_value_type_constant) {
2362 chain->link[idx].mode = vtn_access_mode_literal;
2363 const unsigned bit_size = glsl_get_bit_size(link_val->type->type);
2364 switch (bit_size) {
2365 case 8:
2366 chain->link[idx].id = link_val->constant->values[0][0].i8;
2367 break;
2368 case 16:
2369 chain->link[idx].id = link_val->constant->values[0][0].i16;
2370 break;
2371 case 32:
2372 chain->link[idx].id = link_val->constant->values[0][0].i32;
2373 break;
2374 case 64:
2375 chain->link[idx].id = link_val->constant->values[0][0].i64;
2376 break;
2377 default:
2378 vtn_fail("Invalid bit size: %u", bit_size);
2379 }
2380 } else {
2381 chain->link[idx].mode = vtn_access_mode_id;
2382 chain->link[idx].id = w[i];
2383
2384 }
2385 idx++;
2386 }
2387
2388 struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
2389 struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
2390 if (base_val->value_type == vtn_value_type_sampled_image) {
2391 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2392 * to combine an array of images with a single sampler to get an
2393 * array of sampled images that all share the same sampler.
2394 * Fortunately, this means that we can more-or-less ignore the
2395 * sampler when crawling the access chain, but it does leave us
2396 * with this rather awkward little special-case.
2397 */
2398 struct vtn_value *val =
2399 vtn_push_value(b, w[2], vtn_value_type_sampled_image);
2400 val->sampled_image = ralloc(b, struct vtn_sampled_image);
2401 val->sampled_image->type = base_val->sampled_image->type;
2402 val->sampled_image->image =
2403 vtn_pointer_dereference(b, base_val->sampled_image->image, chain);
2404 val->sampled_image->sampler = base_val->sampled_image->sampler;
2405 vtn_foreach_decoration(b, val, ptr_decoration_cb,
2406 val->sampled_image->image);
2407 vtn_foreach_decoration(b, val, ptr_decoration_cb,
2408 val->sampled_image->sampler);
2409 } else {
2410 vtn_assert(base_val->value_type == vtn_value_type_pointer);
2411 struct vtn_value *val =
2412 vtn_push_value(b, w[2], vtn_value_type_pointer);
2413 val->pointer = vtn_pointer_dereference(b, base_val->pointer, chain);
2414 val->pointer->ptr_type = ptr_type;
2415 vtn_foreach_decoration(b, val, ptr_decoration_cb, val->pointer);
2416 }
2417 break;
2418 }
2419
2420 case SpvOpCopyMemory: {
2421 struct vtn_value *dest = vtn_value(b, w[1], vtn_value_type_pointer);
2422 struct vtn_value *src = vtn_value(b, w[2], vtn_value_type_pointer);
2423
2424 vtn_assert_types_equal(b, opcode, dest->type->deref, src->type->deref);
2425
2426 vtn_variable_copy(b, dest->pointer, src->pointer);
2427 break;
2428 }
2429
2430 case SpvOpLoad: {
2431 struct vtn_type *res_type =
2432 vtn_value(b, w[1], vtn_value_type_type)->type;
2433 struct vtn_value *src_val = vtn_value(b, w[3], vtn_value_type_pointer);
2434 struct vtn_pointer *src = src_val->pointer;
2435
2436 vtn_assert_types_equal(b, opcode, res_type, src_val->type->deref);
2437
2438 if (glsl_type_is_image(res_type->type) ||
2439 glsl_type_is_sampler(res_type->type)) {
2440 vtn_push_value(b, w[2], vtn_value_type_pointer)->pointer = src;
2441 return;
2442 }
2443
2444 vtn_push_ssa(b, w[2], res_type, vtn_variable_load(b, src));
2445 break;
2446 }
2447
2448 case SpvOpStore: {
2449 struct vtn_value *dest_val = vtn_value(b, w[1], vtn_value_type_pointer);
2450 struct vtn_pointer *dest = dest_val->pointer;
2451 struct vtn_value *src_val = vtn_untyped_value(b, w[2]);
2452
2453 /* OpStore requires us to actually have a storage type */
2454 vtn_fail_if(dest->type->type == NULL,
2455 "Invalid destination type for OpStore");
2456
2457 if (glsl_get_base_type(dest->type->type) == GLSL_TYPE_BOOL &&
2458 glsl_get_base_type(src_val->type->type) == GLSL_TYPE_UINT) {
2459 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2460 * would then store them to a local variable as bool. Work around
2461 * the issue by doing an implicit conversion.
2462 *
2463 * https://github.com/KhronosGroup/glslang/issues/170
2464 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2465 */
2466 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2467 "OpTypeBool. Doing an implicit conversion to work around "
2468 "the problem.");
2469 struct vtn_ssa_value *bool_ssa =
2470 vtn_create_ssa_value(b, dest->type->type);
2471 bool_ssa->def = nir_i2b(&b->nb, vtn_ssa_value(b, w[2])->def);
2472 vtn_variable_store(b, bool_ssa, dest);
2473 break;
2474 }
2475
2476 vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type);
2477
2478 if (glsl_type_is_sampler(dest->type->type)) {
2479 if (b->wa_glslang_179) {
2480 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2481 "propagation to workaround the problem.");
2482 vtn_assert(dest->var->copy_prop_sampler == NULL);
2483 dest->var->copy_prop_sampler =
2484 vtn_value(b, w[2], vtn_value_type_pointer)->pointer;
2485 } else {
2486 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2487 }
2488 break;
2489 }
2490
2491 struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
2492 vtn_variable_store(b, src, dest);
2493 break;
2494 }
2495
2496 case SpvOpArrayLength: {
2497 struct vtn_pointer *ptr =
2498 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2499 const uint32_t field = w[4];
2500
2501 vtn_fail_if(ptr->type->base_type != vtn_base_type_struct,
2502 "OpArrayLength must take a pointer to a structure type");
2503 vtn_fail_if(field != ptr->type->length - 1 ||
2504 ptr->type->members[field]->base_type != vtn_base_type_array,
2505 "OpArrayLength must reference the last memeber of the "
2506 "structure and that must be an array");
2507
2508 const uint32_t offset = ptr->type->offsets[field];
2509 const uint32_t stride = ptr->type->members[field]->stride;
2510
2511 if (!ptr->block_index) {
2512 struct vtn_access_chain chain = {
2513 .length = 0,
2514 };
2515 ptr = vtn_pointer_dereference(b, ptr, &chain);
2516 vtn_assert(ptr->block_index);
2517 }
2518
2519 nir_intrinsic_instr *instr =
2520 nir_intrinsic_instr_create(b->nb.shader,
2521 nir_intrinsic_get_buffer_size);
2522 instr->src[0] = nir_src_for_ssa(ptr->block_index);
2523 nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
2524 nir_builder_instr_insert(&b->nb, &instr->instr);
2525 nir_ssa_def *buf_size = &instr->dest.ssa;
2526
2527 /* array_length = max(buffer_size - offset, 0) / stride */
2528 nir_ssa_def *array_length =
2529 nir_idiv(&b->nb,
2530 nir_imax(&b->nb,
2531 nir_isub(&b->nb,
2532 buf_size,
2533 nir_imm_int(&b->nb, offset)),
2534 nir_imm_int(&b->nb, 0u)),
2535 nir_imm_int(&b->nb, stride));
2536
2537 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2538 val->ssa = vtn_create_ssa_value(b, glsl_uint_type());
2539 val->ssa->def = array_length;
2540 break;
2541 }
2542
2543 case SpvOpConvertPtrToU: {
2544 struct vtn_value *u_val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2545
2546 vtn_fail_if(u_val->type->base_type != vtn_base_type_vector &&
2547 u_val->type->base_type != vtn_base_type_scalar,
2548 "OpConvertPtrToU can only be used to cast to a vector or "
2549 "scalar type");
2550
2551 /* The pointer will be converted to an SSA value automatically */
2552 nir_ssa_def *ptr_ssa = vtn_ssa_value(b, w[3])->def;
2553
2554 u_val->ssa = vtn_create_ssa_value(b, u_val->type->type);
2555 u_val->ssa->def = nir_sloppy_bitcast(&b->nb, ptr_ssa, u_val->type->type);
2556 break;
2557 }
2558
2559 case SpvOpConvertUToPtr: {
2560 struct vtn_value *ptr_val =
2561 vtn_push_value(b, w[2], vtn_value_type_pointer);
2562 struct vtn_value *u_val = vtn_value(b, w[3], vtn_value_type_ssa);
2563
2564 vtn_fail_if(ptr_val->type->type == NULL,
2565 "OpConvertUToPtr can only be used on physical pointers");
2566
2567 vtn_fail_if(u_val->type->base_type != vtn_base_type_vector &&
2568 u_val->type->base_type != vtn_base_type_scalar,
2569 "OpConvertUToPtr can only be used to cast from a vector or "
2570 "scalar type");
2571
2572 nir_ssa_def *ptr_ssa = nir_sloppy_bitcast(&b->nb, u_val->ssa->def,
2573 ptr_val->type->type);
2574 ptr_val->pointer = vtn_pointer_from_ssa(b, ptr_ssa, ptr_val->type);
2575 break;
2576 }
2577
2578 case SpvOpCopyMemorySized:
2579 default:
2580 vtn_fail_with_opcode("Unhandled opcode", opcode);
2581 }
2582 }