spirv: Reuse helpers in vtn_handle_type()
[mesa.git] / src / compiler / spirv / vtn_variables.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
32
33 static struct vtn_access_chain *
34 vtn_access_chain_create(struct vtn_builder *b, unsigned length)
35 {
36 struct vtn_access_chain *chain;
37
38 /* Subtract 1 from the length since there's already one built in */
39 size_t size = sizeof(*chain) +
40 (MAX2(length, 1) - 1) * sizeof(chain->link[0]);
41 chain = rzalloc_size(b, size);
42 chain->length = length;
43
44 return chain;
45 }
46
47 bool
48 vtn_mode_uses_ssa_offset(struct vtn_builder *b,
49 enum vtn_variable_mode mode)
50 {
51 return ((mode == vtn_variable_mode_ubo ||
52 mode == vtn_variable_mode_ssbo) &&
53 b->options->lower_ubo_ssbo_access_to_offsets) ||
54 mode == vtn_variable_mode_push_constant ||
55 (mode == vtn_variable_mode_workgroup &&
56 b->options->lower_workgroup_access_to_offsets);
57 }
58
59 static bool
60 vtn_pointer_is_external_block(struct vtn_builder *b,
61 struct vtn_pointer *ptr)
62 {
63 return ptr->mode == vtn_variable_mode_ssbo ||
64 ptr->mode == vtn_variable_mode_ubo ||
65 ptr->mode == vtn_variable_mode_phys_ssbo ||
66 ptr->mode == vtn_variable_mode_push_constant ||
67 (ptr->mode == vtn_variable_mode_workgroup &&
68 b->options->lower_workgroup_access_to_offsets);
69 }
70
71 static nir_ssa_def *
72 vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
73 unsigned stride, unsigned bit_size)
74 {
75 vtn_assert(stride > 0);
76 if (link.mode == vtn_access_mode_literal) {
77 return nir_imm_intN_t(&b->nb, link.id * stride, bit_size);
78 } else {
79 nir_ssa_def *ssa = vtn_ssa_value(b, link.id)->def;
80 if (ssa->bit_size != bit_size)
81 ssa = nir_i2i(&b->nb, ssa, bit_size);
82 return nir_imul_imm(&b->nb, ssa, stride);
83 }
84 }
85
86 static VkDescriptorType
87 vk_desc_type_for_mode(struct vtn_builder *b, enum vtn_variable_mode mode)
88 {
89 switch (mode) {
90 case vtn_variable_mode_ubo:
91 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
92 case vtn_variable_mode_ssbo:
93 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
94 default:
95 vtn_fail("Invalid mode for vulkan_resource_index");
96 }
97 }
98
99 static nir_ssa_def *
100 vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
101 nir_ssa_def *desc_array_index)
102 {
103 if (!desc_array_index) {
104 vtn_assert(glsl_type_is_struct_or_ifc(var->type->type));
105 desc_array_index = nir_imm_int(&b->nb, 0);
106 }
107
108 nir_intrinsic_instr *instr =
109 nir_intrinsic_instr_create(b->nb.shader,
110 nir_intrinsic_vulkan_resource_index);
111 instr->src[0] = nir_src_for_ssa(desc_array_index);
112 nir_intrinsic_set_desc_set(instr, var->descriptor_set);
113 nir_intrinsic_set_binding(instr, var->binding);
114 nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, var->mode));
115
116 vtn_fail_if(var->mode != vtn_variable_mode_ubo &&
117 var->mode != vtn_variable_mode_ssbo,
118 "Invalid mode for vulkan_resource_index");
119
120 nir_address_format addr_format = vtn_mode_to_address_format(b, var->mode);
121 const struct glsl_type *index_type =
122 b->options->lower_ubo_ssbo_access_to_offsets ?
123 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format);
124
125 instr->num_components = glsl_get_vector_elements(index_type);
126 nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
127 glsl_get_bit_size(index_type), NULL);
128 nir_builder_instr_insert(&b->nb, &instr->instr);
129
130 return &instr->dest.ssa;
131 }
132
133 static nir_ssa_def *
134 vtn_resource_reindex(struct vtn_builder *b, enum vtn_variable_mode mode,
135 nir_ssa_def *base_index, nir_ssa_def *offset_index)
136 {
137 nir_intrinsic_instr *instr =
138 nir_intrinsic_instr_create(b->nb.shader,
139 nir_intrinsic_vulkan_resource_reindex);
140 instr->src[0] = nir_src_for_ssa(base_index);
141 instr->src[1] = nir_src_for_ssa(offset_index);
142 nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, mode));
143
144 vtn_fail_if(mode != vtn_variable_mode_ubo && mode != vtn_variable_mode_ssbo,
145 "Invalid mode for vulkan_resource_reindex");
146
147 nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
148 const struct glsl_type *index_type =
149 b->options->lower_ubo_ssbo_access_to_offsets ?
150 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format);
151
152 instr->num_components = glsl_get_vector_elements(index_type);
153 nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
154 glsl_get_bit_size(index_type), NULL);
155 nir_builder_instr_insert(&b->nb, &instr->instr);
156
157 return &instr->dest.ssa;
158 }
159
160 static nir_ssa_def *
161 vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode,
162 nir_ssa_def *desc_index)
163 {
164 nir_intrinsic_instr *desc_load =
165 nir_intrinsic_instr_create(b->nb.shader,
166 nir_intrinsic_load_vulkan_descriptor);
167 desc_load->src[0] = nir_src_for_ssa(desc_index);
168 nir_intrinsic_set_desc_type(desc_load, vk_desc_type_for_mode(b, mode));
169
170 vtn_fail_if(mode != vtn_variable_mode_ubo && mode != vtn_variable_mode_ssbo,
171 "Invalid mode for load_vulkan_descriptor");
172
173 nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
174 const struct glsl_type *ptr_type =
175 nir_address_format_to_glsl_type(addr_format);
176
177 desc_load->num_components = glsl_get_vector_elements(ptr_type);
178 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
179 desc_load->num_components,
180 glsl_get_bit_size(ptr_type), NULL);
181 nir_builder_instr_insert(&b->nb, &desc_load->instr);
182
183 return &desc_load->dest.ssa;
184 }
185
186 /* Dereference the given base pointer by the access chain */
187 static struct vtn_pointer *
188 vtn_nir_deref_pointer_dereference(struct vtn_builder *b,
189 struct vtn_pointer *base,
190 struct vtn_access_chain *deref_chain)
191 {
192 struct vtn_type *type = base->type;
193 enum gl_access_qualifier access = base->access;
194 unsigned idx = 0;
195
196 nir_deref_instr *tail;
197 if (base->deref) {
198 tail = base->deref;
199 } else if (vtn_pointer_is_external_block(b, base)) {
200 nir_ssa_def *block_index = base->block_index;
201
202 /* We dereferencing an external block pointer. Correctness of this
203 * operation relies on one particular line in the SPIR-V spec, section
204 * entitled "Validation Rules for Shader Capabilities":
205 *
206 * "Block and BufferBlock decorations cannot decorate a structure
207 * type that is nested at any level inside another structure type
208 * decorated with Block or BufferBlock."
209 *
210 * This means that we can detect the point where we cross over from
211 * descriptor indexing to buffer indexing by looking for the block
212 * decorated struct type. Anything before the block decorated struct
213 * type is a descriptor indexing operation and anything after the block
214 * decorated struct is a buffer offset operation.
215 */
216
217 /* Figure out the descriptor array index if any
218 *
219 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
220 * to forget the Block or BufferBlock decoration from time to time.
221 * It's more robust if we check for both !block_index and for the type
222 * to contain a block. This way there's a decent chance that arrays of
223 * UBOs/SSBOs will work correctly even if variable pointers are
224 * completley toast.
225 */
226 nir_ssa_def *desc_arr_idx = NULL;
227 if (!block_index || vtn_type_contains_block(b, type)) {
228 /* If our type contains a block, then we're still outside the block
229 * and we need to process enough levels of dereferences to get inside
230 * of it.
231 */
232 if (deref_chain->ptr_as_array) {
233 unsigned aoa_size = glsl_get_aoa_size(type->type);
234 desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[idx],
235 MAX2(aoa_size, 1), 32);
236 idx++;
237 }
238
239 for (; idx < deref_chain->length; idx++) {
240 if (type->base_type != vtn_base_type_array) {
241 vtn_assert(type->base_type == vtn_base_type_struct);
242 break;
243 }
244
245 unsigned aoa_size = glsl_get_aoa_size(type->array_element->type);
246 nir_ssa_def *arr_offset =
247 vtn_access_link_as_ssa(b, deref_chain->link[idx],
248 MAX2(aoa_size, 1), 32);
249 if (desc_arr_idx)
250 desc_arr_idx = nir_iadd(&b->nb, desc_arr_idx, arr_offset);
251 else
252 desc_arr_idx = arr_offset;
253
254 type = type->array_element;
255 access |= type->access;
256 }
257 }
258
259 if (!block_index) {
260 vtn_assert(base->var && base->type);
261 block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
262 } else if (desc_arr_idx) {
263 block_index = vtn_resource_reindex(b, base->mode,
264 block_index, desc_arr_idx);
265 }
266
267 if (idx == deref_chain->length) {
268 /* The entire deref was consumed in finding the block index. Return
269 * a pointer which just has a block index and a later access chain
270 * will dereference deeper.
271 */
272 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
273 ptr->mode = base->mode;
274 ptr->type = type;
275 ptr->block_index = block_index;
276 ptr->access = access;
277 return ptr;
278 }
279
280 /* If we got here, there's more access chain to handle and we have the
281 * final block index. Insert a descriptor load and cast to a deref to
282 * start the deref chain.
283 */
284 nir_ssa_def *desc = vtn_descriptor_load(b, base->mode, block_index);
285
286 assert(base->mode == vtn_variable_mode_ssbo ||
287 base->mode == vtn_variable_mode_ubo);
288 nir_variable_mode nir_mode =
289 base->mode == vtn_variable_mode_ssbo ? nir_var_mem_ssbo : nir_var_mem_ubo;
290
291 tail = nir_build_deref_cast(&b->nb, desc, nir_mode, type->type,
292 base->ptr_type->stride);
293 } else {
294 assert(base->var && base->var->var);
295 tail = nir_build_deref_var(&b->nb, base->var->var);
296 if (base->ptr_type && base->ptr_type->type) {
297 tail->dest.ssa.num_components =
298 glsl_get_vector_elements(base->ptr_type->type);
299 tail->dest.ssa.bit_size = glsl_get_bit_size(base->ptr_type->type);
300 }
301 }
302
303 if (idx == 0 && deref_chain->ptr_as_array) {
304 /* We start with a deref cast to get the stride. Hopefully, we'll be
305 * able to delete that cast eventually.
306 */
307 tail = nir_build_deref_cast(&b->nb, &tail->dest.ssa, tail->mode,
308 tail->type, base->ptr_type->stride);
309
310 nir_ssa_def *index = vtn_access_link_as_ssa(b, deref_chain->link[0], 1,
311 tail->dest.ssa.bit_size);
312 tail = nir_build_deref_ptr_as_array(&b->nb, tail, index);
313 idx++;
314 }
315
316 for (; idx < deref_chain->length; idx++) {
317 if (glsl_type_is_struct_or_ifc(type->type)) {
318 vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
319 unsigned field = deref_chain->link[idx].id;
320 tail = nir_build_deref_struct(&b->nb, tail, field);
321 type = type->members[field];
322 } else {
323 nir_ssa_def *arr_index =
324 vtn_access_link_as_ssa(b, deref_chain->link[idx], 1,
325 tail->dest.ssa.bit_size);
326 tail = nir_build_deref_array(&b->nb, tail, arr_index);
327 type = type->array_element;
328 }
329
330 access |= type->access;
331 }
332
333 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
334 ptr->mode = base->mode;
335 ptr->type = type;
336 ptr->var = base->var;
337 ptr->deref = tail;
338 ptr->access = access;
339
340 return ptr;
341 }
342
343 static struct vtn_pointer *
344 vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
345 struct vtn_pointer *base,
346 struct vtn_access_chain *deref_chain)
347 {
348 nir_ssa_def *block_index = base->block_index;
349 nir_ssa_def *offset = base->offset;
350 struct vtn_type *type = base->type;
351 enum gl_access_qualifier access = base->access;
352
353 unsigned idx = 0;
354 if (base->mode == vtn_variable_mode_ubo ||
355 base->mode == vtn_variable_mode_ssbo) {
356 if (!block_index) {
357 vtn_assert(base->var && base->type);
358 nir_ssa_def *desc_arr_idx;
359 if (glsl_type_is_array(type->type)) {
360 if (deref_chain->length >= 1) {
361 desc_arr_idx =
362 vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
363 idx++;
364 /* This consumes a level of type */
365 type = type->array_element;
366 access |= type->access;
367 } else {
368 /* This is annoying. We've been asked for a pointer to the
369 * array of UBOs/SSBOs and not a specifc buffer. Return a
370 * pointer with a descriptor index of 0 and we'll have to do
371 * a reindex later to adjust it to the right thing.
372 */
373 desc_arr_idx = nir_imm_int(&b->nb, 0);
374 }
375 } else if (deref_chain->ptr_as_array) {
376 /* You can't have a zero-length OpPtrAccessChain */
377 vtn_assert(deref_chain->length >= 1);
378 desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
379 } else {
380 /* We have a regular non-array SSBO. */
381 desc_arr_idx = NULL;
382 }
383 block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
384 } else if (deref_chain->ptr_as_array &&
385 type->base_type == vtn_base_type_struct && type->block) {
386 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
387 * decorated block. This is an interesting corner in the SPIR-V
388 * spec. One interpretation would be that they client is clearly
389 * trying to treat that block as if it's an implicit array of blocks
390 * repeated in the buffer. However, the SPIR-V spec for the
391 * OpPtrAccessChain says:
392 *
393 * "Base is treated as the address of the first element of an
394 * array, and the Element element’s address is computed to be the
395 * base for the Indexes, as per OpAccessChain."
396 *
397 * Taken literally, that would mean that your struct type is supposed
398 * to be treated as an array of such a struct and, since it's
399 * decorated block, that means an array of blocks which corresponds
400 * to an array descriptor. Therefore, we need to do a reindex
401 * operation to add the index from the first link in the access chain
402 * to the index we recieved.
403 *
404 * The downside to this interpretation (there always is one) is that
405 * this might be somewhat surprising behavior to apps if they expect
406 * the implicit array behavior described above.
407 */
408 vtn_assert(deref_chain->length >= 1);
409 nir_ssa_def *offset_index =
410 vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
411 idx++;
412
413 block_index = vtn_resource_reindex(b, base->mode,
414 block_index, offset_index);
415 }
416 }
417
418 if (!offset) {
419 if (base->mode == vtn_variable_mode_workgroup) {
420 /* SLM doesn't need nor have a block index */
421 vtn_assert(!block_index);
422
423 /* We need the variable for the base offset */
424 vtn_assert(base->var);
425
426 /* We need ptr_type for size and alignment */
427 vtn_assert(base->ptr_type);
428
429 /* Assign location on first use so that we don't end up bloating SLM
430 * address space for variables which are never statically used.
431 */
432 if (base->var->shared_location < 0) {
433 vtn_assert(base->ptr_type->length > 0 && base->ptr_type->align > 0);
434 b->shader->num_shared = vtn_align_u32(b->shader->num_shared,
435 base->ptr_type->align);
436 base->var->shared_location = b->shader->num_shared;
437 b->shader->num_shared += base->ptr_type->length;
438 }
439
440 offset = nir_imm_int(&b->nb, base->var->shared_location);
441 } else if (base->mode == vtn_variable_mode_push_constant) {
442 /* Push constants neither need nor have a block index */
443 vtn_assert(!block_index);
444
445 /* Start off with at the start of the push constant block. */
446 offset = nir_imm_int(&b->nb, 0);
447 } else {
448 /* The code above should have ensured a block_index when needed. */
449 vtn_assert(block_index);
450
451 /* Start off with at the start of the buffer. */
452 offset = nir_imm_int(&b->nb, 0);
453 }
454 }
455
456 if (deref_chain->ptr_as_array && idx == 0) {
457 /* We need ptr_type for the stride */
458 vtn_assert(base->ptr_type);
459
460 /* We need at least one element in the chain */
461 vtn_assert(deref_chain->length >= 1);
462
463 nir_ssa_def *elem_offset =
464 vtn_access_link_as_ssa(b, deref_chain->link[idx],
465 base->ptr_type->stride, offset->bit_size);
466 offset = nir_iadd(&b->nb, offset, elem_offset);
467 idx++;
468 }
469
470 for (; idx < deref_chain->length; idx++) {
471 switch (glsl_get_base_type(type->type)) {
472 case GLSL_TYPE_UINT:
473 case GLSL_TYPE_INT:
474 case GLSL_TYPE_UINT16:
475 case GLSL_TYPE_INT16:
476 case GLSL_TYPE_UINT8:
477 case GLSL_TYPE_INT8:
478 case GLSL_TYPE_UINT64:
479 case GLSL_TYPE_INT64:
480 case GLSL_TYPE_FLOAT:
481 case GLSL_TYPE_FLOAT16:
482 case GLSL_TYPE_DOUBLE:
483 case GLSL_TYPE_BOOL:
484 case GLSL_TYPE_ARRAY: {
485 nir_ssa_def *elem_offset =
486 vtn_access_link_as_ssa(b, deref_chain->link[idx],
487 type->stride, offset->bit_size);
488 offset = nir_iadd(&b->nb, offset, elem_offset);
489 type = type->array_element;
490 access |= type->access;
491 break;
492 }
493
494 case GLSL_TYPE_INTERFACE:
495 case GLSL_TYPE_STRUCT: {
496 vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
497 unsigned member = deref_chain->link[idx].id;
498 offset = nir_iadd_imm(&b->nb, offset, type->offsets[member]);
499 type = type->members[member];
500 access |= type->access;
501 break;
502 }
503
504 default:
505 vtn_fail("Invalid type for deref");
506 }
507 }
508
509 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
510 ptr->mode = base->mode;
511 ptr->type = type;
512 ptr->block_index = block_index;
513 ptr->offset = offset;
514 ptr->access = access;
515
516 return ptr;
517 }
518
519 /* Dereference the given base pointer by the access chain */
520 static struct vtn_pointer *
521 vtn_pointer_dereference(struct vtn_builder *b,
522 struct vtn_pointer *base,
523 struct vtn_access_chain *deref_chain)
524 {
525 if (vtn_pointer_uses_ssa_offset(b, base)) {
526 return vtn_ssa_offset_pointer_dereference(b, base, deref_chain);
527 } else {
528 return vtn_nir_deref_pointer_dereference(b, base, deref_chain);
529 }
530 }
531
532 struct vtn_pointer *
533 vtn_pointer_for_variable(struct vtn_builder *b,
534 struct vtn_variable *var, struct vtn_type *ptr_type)
535 {
536 struct vtn_pointer *pointer = rzalloc(b, struct vtn_pointer);
537
538 pointer->mode = var->mode;
539 pointer->type = var->type;
540 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
541 vtn_assert(ptr_type->deref->type == var->type->type);
542 pointer->ptr_type = ptr_type;
543 pointer->var = var;
544 pointer->access = var->access | var->type->access;
545
546 return pointer;
547 }
548
549 /* Returns an atomic_uint type based on the original uint type. The returned
550 * type will be equivalent to the original one but will have an atomic_uint
551 * type as leaf instead of an uint.
552 *
553 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
554 */
555 static const struct glsl_type *
556 repair_atomic_type(const struct glsl_type *type)
557 {
558 assert(glsl_get_base_type(glsl_without_array(type)) == GLSL_TYPE_UINT);
559 assert(glsl_type_is_scalar(glsl_without_array(type)));
560
561 if (glsl_type_is_array(type)) {
562 const struct glsl_type *atomic =
563 repair_atomic_type(glsl_get_array_element(type));
564
565 return glsl_array_type(atomic, glsl_get_length(type),
566 glsl_get_explicit_stride(type));
567 } else {
568 return glsl_atomic_uint_type();
569 }
570 }
571
572 nir_deref_instr *
573 vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
574 {
575 if (b->wa_glslang_179) {
576 /* Do on-the-fly copy propagation for samplers. */
577 if (ptr->var && ptr->var->copy_prop_sampler)
578 return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
579 }
580
581 vtn_assert(!vtn_pointer_uses_ssa_offset(b, ptr));
582 if (!ptr->deref) {
583 struct vtn_access_chain chain = {
584 .length = 0,
585 };
586 ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
587 }
588
589 return ptr->deref;
590 }
591
592 static void
593 _vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
594 struct vtn_ssa_value *inout,
595 enum gl_access_qualifier access)
596 {
597 if (glsl_type_is_vector_or_scalar(deref->type)) {
598 if (load) {
599 inout->def = nir_load_deref_with_access(&b->nb, deref, access);
600 } else {
601 nir_store_deref_with_access(&b->nb, deref, inout->def, ~0, access);
602 }
603 } else if (glsl_type_is_array(deref->type) ||
604 glsl_type_is_matrix(deref->type)) {
605 unsigned elems = glsl_get_length(deref->type);
606 for (unsigned i = 0; i < elems; i++) {
607 nir_deref_instr *child =
608 nir_build_deref_array_imm(&b->nb, deref, i);
609 _vtn_local_load_store(b, load, child, inout->elems[i], access);
610 }
611 } else {
612 vtn_assert(glsl_type_is_struct_or_ifc(deref->type));
613 unsigned elems = glsl_get_length(deref->type);
614 for (unsigned i = 0; i < elems; i++) {
615 nir_deref_instr *child = nir_build_deref_struct(&b->nb, deref, i);
616 _vtn_local_load_store(b, load, child, inout->elems[i], access);
617 }
618 }
619 }
620
621 nir_deref_instr *
622 vtn_nir_deref(struct vtn_builder *b, uint32_t id)
623 {
624 struct vtn_pointer *ptr = vtn_value(b, id, vtn_value_type_pointer)->pointer;
625 return vtn_pointer_to_deref(b, ptr);
626 }
627
628 /*
629 * Gets the NIR-level deref tail, which may have as a child an array deref
630 * selecting which component due to OpAccessChain supporting per-component
631 * indexing in SPIR-V.
632 */
633 static nir_deref_instr *
634 get_deref_tail(nir_deref_instr *deref)
635 {
636 if (deref->deref_type != nir_deref_type_array)
637 return deref;
638
639 nir_deref_instr *parent =
640 nir_instr_as_deref(deref->parent.ssa->parent_instr);
641
642 if (glsl_type_is_vector(parent->type))
643 return parent;
644 else
645 return deref;
646 }
647
648 struct vtn_ssa_value *
649 vtn_local_load(struct vtn_builder *b, nir_deref_instr *src,
650 enum gl_access_qualifier access)
651 {
652 nir_deref_instr *src_tail = get_deref_tail(src);
653 struct vtn_ssa_value *val = vtn_create_ssa_value(b, src_tail->type);
654 _vtn_local_load_store(b, true, src_tail, val, access);
655
656 if (src_tail != src) {
657 val->type = src->type;
658 if (nir_src_is_const(src->arr.index))
659 val->def = vtn_vector_extract(b, val->def,
660 nir_src_as_uint(src->arr.index));
661 else
662 val->def = vtn_vector_extract_dynamic(b, val->def, src->arr.index.ssa);
663 }
664
665 return val;
666 }
667
668 void
669 vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
670 nir_deref_instr *dest, enum gl_access_qualifier access)
671 {
672 nir_deref_instr *dest_tail = get_deref_tail(dest);
673
674 if (dest_tail != dest) {
675 struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type);
676 _vtn_local_load_store(b, true, dest_tail, val, access);
677
678 if (nir_src_is_const(dest->arr.index))
679 val->def = vtn_vector_insert(b, val->def, src->def,
680 nir_src_as_uint(dest->arr.index));
681 else
682 val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
683 dest->arr.index.ssa);
684 _vtn_local_load_store(b, false, dest_tail, val, access);
685 } else {
686 _vtn_local_load_store(b, false, dest_tail, src, access);
687 }
688 }
689
690 nir_ssa_def *
691 vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
692 nir_ssa_def **index_out)
693 {
694 assert(vtn_pointer_uses_ssa_offset(b, ptr));
695 if (!ptr->offset) {
696 struct vtn_access_chain chain = {
697 .length = 0,
698 };
699 ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
700 }
701 *index_out = ptr->block_index;
702 return ptr->offset;
703 }
704
705 /* Tries to compute the size of an interface block based on the strides and
706 * offsets that are provided to us in the SPIR-V source.
707 */
708 static unsigned
709 vtn_type_block_size(struct vtn_builder *b, struct vtn_type *type)
710 {
711 enum glsl_base_type base_type = glsl_get_base_type(type->type);
712 switch (base_type) {
713 case GLSL_TYPE_UINT:
714 case GLSL_TYPE_INT:
715 case GLSL_TYPE_UINT16:
716 case GLSL_TYPE_INT16:
717 case GLSL_TYPE_UINT8:
718 case GLSL_TYPE_INT8:
719 case GLSL_TYPE_UINT64:
720 case GLSL_TYPE_INT64:
721 case GLSL_TYPE_FLOAT:
722 case GLSL_TYPE_FLOAT16:
723 case GLSL_TYPE_BOOL:
724 case GLSL_TYPE_DOUBLE: {
725 unsigned cols = type->row_major ? glsl_get_vector_elements(type->type) :
726 glsl_get_matrix_columns(type->type);
727 if (cols > 1) {
728 vtn_assert(type->stride > 0);
729 return type->stride * cols;
730 } else {
731 unsigned type_size = glsl_get_bit_size(type->type) / 8;
732 return glsl_get_vector_elements(type->type) * type_size;
733 }
734 }
735
736 case GLSL_TYPE_STRUCT:
737 case GLSL_TYPE_INTERFACE: {
738 unsigned size = 0;
739 unsigned num_fields = glsl_get_length(type->type);
740 for (unsigned f = 0; f < num_fields; f++) {
741 unsigned field_end = type->offsets[f] +
742 vtn_type_block_size(b, type->members[f]);
743 size = MAX2(size, field_end);
744 }
745 return size;
746 }
747
748 case GLSL_TYPE_ARRAY:
749 vtn_assert(type->stride > 0);
750 vtn_assert(glsl_get_length(type->type) > 0);
751 return type->stride * glsl_get_length(type->type);
752
753 default:
754 vtn_fail("Invalid block type");
755 return 0;
756 }
757 }
758
759 static void
760 _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
761 nir_ssa_def *index, nir_ssa_def *offset,
762 unsigned access_offset, unsigned access_size,
763 struct vtn_ssa_value **inout, const struct glsl_type *type,
764 enum gl_access_qualifier access)
765 {
766 nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
767 instr->num_components = glsl_get_vector_elements(type);
768
769 /* Booleans usually shouldn't show up in external memory in SPIR-V.
770 * However, they do for certain older GLSLang versions and can for shared
771 * memory when we lower access chains internally.
772 */
773 const unsigned data_bit_size = glsl_type_is_boolean(type) ? 32 :
774 glsl_get_bit_size(type);
775
776 int src = 0;
777 if (!load) {
778 nir_intrinsic_set_write_mask(instr, (1 << instr->num_components) - 1);
779 instr->src[src++] = nir_src_for_ssa((*inout)->def);
780 }
781
782 if (op == nir_intrinsic_load_push_constant) {
783 nir_intrinsic_set_base(instr, access_offset);
784 nir_intrinsic_set_range(instr, access_size);
785 }
786
787 if (op == nir_intrinsic_load_ubo ||
788 op == nir_intrinsic_load_ssbo ||
789 op == nir_intrinsic_store_ssbo) {
790 nir_intrinsic_set_access(instr, access);
791 }
792
793 /* With extensions like relaxed_block_layout, we really can't guarantee
794 * much more than scalar alignment.
795 */
796 if (op != nir_intrinsic_load_push_constant)
797 nir_intrinsic_set_align(instr, data_bit_size / 8, 0);
798
799 if (index)
800 instr->src[src++] = nir_src_for_ssa(index);
801
802 if (op == nir_intrinsic_load_push_constant) {
803 /* We need to subtract the offset from where the intrinsic will load the
804 * data. */
805 instr->src[src++] =
806 nir_src_for_ssa(nir_isub(&b->nb, offset,
807 nir_imm_int(&b->nb, access_offset)));
808 } else {
809 instr->src[src++] = nir_src_for_ssa(offset);
810 }
811
812 if (load) {
813 nir_ssa_dest_init(&instr->instr, &instr->dest,
814 instr->num_components, data_bit_size, NULL);
815 (*inout)->def = &instr->dest.ssa;
816 }
817
818 nir_builder_instr_insert(&b->nb, &instr->instr);
819
820 if (load && glsl_get_base_type(type) == GLSL_TYPE_BOOL)
821 (*inout)->def = nir_ine(&b->nb, (*inout)->def, nir_imm_int(&b->nb, 0));
822 }
823
824 static void
825 _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
826 nir_ssa_def *index, nir_ssa_def *offset,
827 unsigned access_offset, unsigned access_size,
828 struct vtn_type *type, enum gl_access_qualifier access,
829 struct vtn_ssa_value **inout)
830 {
831 if (load && *inout == NULL)
832 *inout = vtn_create_ssa_value(b, type->type);
833
834 enum glsl_base_type base_type = glsl_get_base_type(type->type);
835 switch (base_type) {
836 case GLSL_TYPE_UINT:
837 case GLSL_TYPE_INT:
838 case GLSL_TYPE_UINT16:
839 case GLSL_TYPE_INT16:
840 case GLSL_TYPE_UINT8:
841 case GLSL_TYPE_INT8:
842 case GLSL_TYPE_UINT64:
843 case GLSL_TYPE_INT64:
844 case GLSL_TYPE_FLOAT:
845 case GLSL_TYPE_FLOAT16:
846 case GLSL_TYPE_DOUBLE:
847 case GLSL_TYPE_BOOL:
848 /* This is where things get interesting. At this point, we've hit
849 * a vector, a scalar, or a matrix.
850 */
851 if (glsl_type_is_matrix(type->type)) {
852 /* Loading the whole matrix */
853 struct vtn_ssa_value *transpose;
854 unsigned num_ops, vec_width, col_stride;
855 if (type->row_major) {
856 num_ops = glsl_get_vector_elements(type->type);
857 vec_width = glsl_get_matrix_columns(type->type);
858 col_stride = type->array_element->stride;
859 if (load) {
860 const struct glsl_type *transpose_type =
861 glsl_matrix_type(base_type, vec_width, num_ops);
862 *inout = vtn_create_ssa_value(b, transpose_type);
863 } else {
864 transpose = vtn_ssa_transpose(b, *inout);
865 inout = &transpose;
866 }
867 } else {
868 num_ops = glsl_get_matrix_columns(type->type);
869 vec_width = glsl_get_vector_elements(type->type);
870 col_stride = type->stride;
871 }
872
873 for (unsigned i = 0; i < num_ops; i++) {
874 nir_ssa_def *elem_offset =
875 nir_iadd_imm(&b->nb, offset, i * col_stride);
876 _vtn_load_store_tail(b, op, load, index, elem_offset,
877 access_offset, access_size,
878 &(*inout)->elems[i],
879 glsl_vector_type(base_type, vec_width),
880 type->access | access);
881 }
882
883 if (load && type->row_major)
884 *inout = vtn_ssa_transpose(b, *inout);
885 } else {
886 unsigned elems = glsl_get_vector_elements(type->type);
887 unsigned type_size = glsl_get_bit_size(type->type) / 8;
888 if (elems == 1 || type->stride == type_size) {
889 /* This is a tightly-packed normal scalar or vector load */
890 vtn_assert(glsl_type_is_vector_or_scalar(type->type));
891 _vtn_load_store_tail(b, op, load, index, offset,
892 access_offset, access_size,
893 inout, type->type,
894 type->access | access);
895 } else {
896 /* This is a strided load. We have to load N things separately.
897 * This is the single column of a row-major matrix case.
898 */
899 vtn_assert(type->stride > type_size);
900 vtn_assert(type->stride % type_size == 0);
901
902 nir_ssa_def *per_comp[4];
903 for (unsigned i = 0; i < elems; i++) {
904 nir_ssa_def *elem_offset =
905 nir_iadd_imm(&b->nb, offset, i * type->stride);
906 struct vtn_ssa_value *comp, temp_val;
907 if (!load) {
908 temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
909 temp_val.type = glsl_scalar_type(base_type);
910 }
911 comp = &temp_val;
912 _vtn_load_store_tail(b, op, load, index, elem_offset,
913 access_offset, access_size,
914 &comp, glsl_scalar_type(base_type),
915 type->access | access);
916 per_comp[i] = comp->def;
917 }
918
919 if (load) {
920 if (*inout == NULL)
921 *inout = vtn_create_ssa_value(b, type->type);
922 (*inout)->def = nir_vec(&b->nb, per_comp, elems);
923 }
924 }
925 }
926 return;
927
928 case GLSL_TYPE_ARRAY: {
929 unsigned elems = glsl_get_length(type->type);
930 for (unsigned i = 0; i < elems; i++) {
931 nir_ssa_def *elem_off =
932 nir_iadd_imm(&b->nb, offset, i * type->stride);
933 _vtn_block_load_store(b, op, load, index, elem_off,
934 access_offset, access_size,
935 type->array_element,
936 type->array_element->access | access,
937 &(*inout)->elems[i]);
938 }
939 return;
940 }
941
942 case GLSL_TYPE_INTERFACE:
943 case GLSL_TYPE_STRUCT: {
944 unsigned elems = glsl_get_length(type->type);
945 for (unsigned i = 0; i < elems; i++) {
946 nir_ssa_def *elem_off =
947 nir_iadd_imm(&b->nb, offset, type->offsets[i]);
948 _vtn_block_load_store(b, op, load, index, elem_off,
949 access_offset, access_size,
950 type->members[i],
951 type->members[i]->access | access,
952 &(*inout)->elems[i]);
953 }
954 return;
955 }
956
957 default:
958 vtn_fail("Invalid block member type");
959 }
960 }
961
962 static struct vtn_ssa_value *
963 vtn_block_load(struct vtn_builder *b, struct vtn_pointer *src)
964 {
965 nir_intrinsic_op op;
966 unsigned access_offset = 0, access_size = 0;
967 switch (src->mode) {
968 case vtn_variable_mode_ubo:
969 op = nir_intrinsic_load_ubo;
970 break;
971 case vtn_variable_mode_ssbo:
972 op = nir_intrinsic_load_ssbo;
973 break;
974 case vtn_variable_mode_push_constant:
975 op = nir_intrinsic_load_push_constant;
976 access_size = b->shader->num_uniforms;
977 break;
978 case vtn_variable_mode_workgroup:
979 op = nir_intrinsic_load_shared;
980 break;
981 default:
982 vtn_fail("Invalid block variable mode");
983 }
984
985 nir_ssa_def *offset, *index = NULL;
986 offset = vtn_pointer_to_offset(b, src, &index);
987
988 struct vtn_ssa_value *value = NULL;
989 _vtn_block_load_store(b, op, true, index, offset,
990 access_offset, access_size,
991 src->type, src->access, &value);
992 return value;
993 }
994
995 static void
996 vtn_block_store(struct vtn_builder *b, struct vtn_ssa_value *src,
997 struct vtn_pointer *dst)
998 {
999 nir_intrinsic_op op;
1000 switch (dst->mode) {
1001 case vtn_variable_mode_ssbo:
1002 op = nir_intrinsic_store_ssbo;
1003 break;
1004 case vtn_variable_mode_workgroup:
1005 op = nir_intrinsic_store_shared;
1006 break;
1007 default:
1008 vtn_fail("Invalid block variable mode");
1009 }
1010
1011 nir_ssa_def *offset, *index = NULL;
1012 offset = vtn_pointer_to_offset(b, dst, &index);
1013
1014 _vtn_block_load_store(b, op, false, index, offset,
1015 0, 0, dst->type, dst->access, &src);
1016 }
1017
1018 static void
1019 _vtn_variable_load_store(struct vtn_builder *b, bool load,
1020 struct vtn_pointer *ptr,
1021 enum gl_access_qualifier access,
1022 struct vtn_ssa_value **inout)
1023 {
1024 enum glsl_base_type base_type = glsl_get_base_type(ptr->type->type);
1025 switch (base_type) {
1026 case GLSL_TYPE_UINT:
1027 case GLSL_TYPE_INT:
1028 case GLSL_TYPE_UINT16:
1029 case GLSL_TYPE_INT16:
1030 case GLSL_TYPE_UINT8:
1031 case GLSL_TYPE_INT8:
1032 case GLSL_TYPE_UINT64:
1033 case GLSL_TYPE_INT64:
1034 case GLSL_TYPE_FLOAT:
1035 case GLSL_TYPE_FLOAT16:
1036 case GLSL_TYPE_BOOL:
1037 case GLSL_TYPE_DOUBLE:
1038 if (glsl_type_is_vector_or_scalar(ptr->type->type)) {
1039 /* We hit a vector or scalar; go ahead and emit the load[s] */
1040 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
1041 if (vtn_pointer_is_external_block(b, ptr)) {
1042 /* If it's external, we call nir_load/store_deref directly. The
1043 * vtn_local_load/store helpers are too clever and do magic to
1044 * avoid array derefs of vectors. That magic is both less
1045 * efficient than the direct load/store and, in the case of
1046 * stores, is broken because it creates a race condition if two
1047 * threads are writing to different components of the same vector
1048 * due to the load+insert+store it uses to emulate the array
1049 * deref.
1050 */
1051 if (load) {
1052 *inout = vtn_create_ssa_value(b, ptr->type->type);
1053 (*inout)->def = nir_load_deref_with_access(&b->nb, deref,
1054 ptr->type->access | access);
1055 } else {
1056 nir_store_deref_with_access(&b->nb, deref, (*inout)->def, ~0,
1057 ptr->type->access | access);
1058 }
1059 } else {
1060 if (load) {
1061 *inout = vtn_local_load(b, deref, ptr->type->access | access);
1062 } else {
1063 vtn_local_store(b, *inout, deref, ptr->type->access | access);
1064 }
1065 }
1066 return;
1067 }
1068 /* Fall through */
1069
1070 case GLSL_TYPE_INTERFACE:
1071 case GLSL_TYPE_ARRAY:
1072 case GLSL_TYPE_STRUCT: {
1073 unsigned elems = glsl_get_length(ptr->type->type);
1074 if (load) {
1075 vtn_assert(*inout == NULL);
1076 *inout = rzalloc(b, struct vtn_ssa_value);
1077 (*inout)->type = ptr->type->type;
1078 (*inout)->elems = rzalloc_array(b, struct vtn_ssa_value *, elems);
1079 }
1080
1081 struct vtn_access_chain chain = {
1082 .length = 1,
1083 .link = {
1084 { .mode = vtn_access_mode_literal, },
1085 }
1086 };
1087 for (unsigned i = 0; i < elems; i++) {
1088 chain.link[0].id = i;
1089 struct vtn_pointer *elem = vtn_pointer_dereference(b, ptr, &chain);
1090 _vtn_variable_load_store(b, load, elem, ptr->type->access | access,
1091 &(*inout)->elems[i]);
1092 }
1093 return;
1094 }
1095
1096 default:
1097 vtn_fail("Invalid access chain type");
1098 }
1099 }
1100
1101 struct vtn_ssa_value *
1102 vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src)
1103 {
1104 if (vtn_pointer_uses_ssa_offset(b, src)) {
1105 return vtn_block_load(b, src);
1106 } else {
1107 struct vtn_ssa_value *val = NULL;
1108 _vtn_variable_load_store(b, true, src, src->access, &val);
1109 return val;
1110 }
1111 }
1112
1113 void
1114 vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
1115 struct vtn_pointer *dest)
1116 {
1117 if (vtn_pointer_uses_ssa_offset(b, dest)) {
1118 vtn_assert(dest->mode == vtn_variable_mode_ssbo ||
1119 dest->mode == vtn_variable_mode_workgroup);
1120 vtn_block_store(b, src, dest);
1121 } else {
1122 _vtn_variable_load_store(b, false, dest, dest->access, &src);
1123 }
1124 }
1125
1126 static void
1127 _vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
1128 struct vtn_pointer *src)
1129 {
1130 vtn_assert(src->type->type == dest->type->type);
1131 enum glsl_base_type base_type = glsl_get_base_type(src->type->type);
1132 switch (base_type) {
1133 case GLSL_TYPE_UINT:
1134 case GLSL_TYPE_INT:
1135 case GLSL_TYPE_UINT16:
1136 case GLSL_TYPE_INT16:
1137 case GLSL_TYPE_UINT8:
1138 case GLSL_TYPE_INT8:
1139 case GLSL_TYPE_UINT64:
1140 case GLSL_TYPE_INT64:
1141 case GLSL_TYPE_FLOAT:
1142 case GLSL_TYPE_FLOAT16:
1143 case GLSL_TYPE_DOUBLE:
1144 case GLSL_TYPE_BOOL:
1145 /* At this point, we have a scalar, vector, or matrix so we know that
1146 * there cannot be any structure splitting still in the way. By
1147 * stopping at the matrix level rather than the vector level, we
1148 * ensure that matrices get loaded in the optimal way even if they
1149 * are storred row-major in a UBO.
1150 */
1151 vtn_variable_store(b, vtn_variable_load(b, src), dest);
1152 return;
1153
1154 case GLSL_TYPE_INTERFACE:
1155 case GLSL_TYPE_ARRAY:
1156 case GLSL_TYPE_STRUCT: {
1157 struct vtn_access_chain chain = {
1158 .length = 1,
1159 .link = {
1160 { .mode = vtn_access_mode_literal, },
1161 }
1162 };
1163 unsigned elems = glsl_get_length(src->type->type);
1164 for (unsigned i = 0; i < elems; i++) {
1165 chain.link[0].id = i;
1166 struct vtn_pointer *src_elem =
1167 vtn_pointer_dereference(b, src, &chain);
1168 struct vtn_pointer *dest_elem =
1169 vtn_pointer_dereference(b, dest, &chain);
1170
1171 _vtn_variable_copy(b, dest_elem, src_elem);
1172 }
1173 return;
1174 }
1175
1176 default:
1177 vtn_fail("Invalid access chain type");
1178 }
1179 }
1180
1181 static void
1182 vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
1183 struct vtn_pointer *src)
1184 {
1185 /* TODO: At some point, we should add a special-case for when we can
1186 * just emit a copy_var intrinsic.
1187 */
1188 _vtn_variable_copy(b, dest, src);
1189 }
1190
1191 static void
1192 set_mode_system_value(struct vtn_builder *b, nir_variable_mode *mode)
1193 {
1194 vtn_assert(*mode == nir_var_system_value || *mode == nir_var_shader_in);
1195 *mode = nir_var_system_value;
1196 }
1197
1198 static void
1199 vtn_get_builtin_location(struct vtn_builder *b,
1200 SpvBuiltIn builtin, int *location,
1201 nir_variable_mode *mode)
1202 {
1203 switch (builtin) {
1204 case SpvBuiltInPosition:
1205 *location = VARYING_SLOT_POS;
1206 break;
1207 case SpvBuiltInPointSize:
1208 *location = VARYING_SLOT_PSIZ;
1209 break;
1210 case SpvBuiltInClipDistance:
1211 *location = VARYING_SLOT_CLIP_DIST0; /* XXX CLIP_DIST1? */
1212 break;
1213 case SpvBuiltInCullDistance:
1214 *location = VARYING_SLOT_CULL_DIST0;
1215 break;
1216 case SpvBuiltInVertexId:
1217 case SpvBuiltInVertexIndex:
1218 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1219 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1220 * same as gl_VertexID, which is non-zero-based, and removes
1221 * VertexIndex. Since they're both defined to be non-zero-based, we use
1222 * SYSTEM_VALUE_VERTEX_ID for both.
1223 */
1224 *location = SYSTEM_VALUE_VERTEX_ID;
1225 set_mode_system_value(b, mode);
1226 break;
1227 case SpvBuiltInInstanceIndex:
1228 *location = SYSTEM_VALUE_INSTANCE_INDEX;
1229 set_mode_system_value(b, mode);
1230 break;
1231 case SpvBuiltInInstanceId:
1232 *location = SYSTEM_VALUE_INSTANCE_ID;
1233 set_mode_system_value(b, mode);
1234 break;
1235 case SpvBuiltInPrimitiveId:
1236 if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
1237 vtn_assert(*mode == nir_var_shader_in);
1238 *location = VARYING_SLOT_PRIMITIVE_ID;
1239 } else if (*mode == nir_var_shader_out) {
1240 *location = VARYING_SLOT_PRIMITIVE_ID;
1241 } else {
1242 *location = SYSTEM_VALUE_PRIMITIVE_ID;
1243 set_mode_system_value(b, mode);
1244 }
1245 break;
1246 case SpvBuiltInInvocationId:
1247 *location = SYSTEM_VALUE_INVOCATION_ID;
1248 set_mode_system_value(b, mode);
1249 break;
1250 case SpvBuiltInLayer:
1251 *location = VARYING_SLOT_LAYER;
1252 if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
1253 *mode = nir_var_shader_in;
1254 else if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
1255 *mode = nir_var_shader_out;
1256 else if (b->options && b->options->caps.shader_viewport_index_layer &&
1257 (b->shader->info.stage == MESA_SHADER_VERTEX ||
1258 b->shader->info.stage == MESA_SHADER_TESS_EVAL))
1259 *mode = nir_var_shader_out;
1260 else
1261 vtn_fail("invalid stage for SpvBuiltInLayer");
1262 break;
1263 case SpvBuiltInViewportIndex:
1264 *location = VARYING_SLOT_VIEWPORT;
1265 if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
1266 *mode = nir_var_shader_out;
1267 else if (b->options && b->options->caps.shader_viewport_index_layer &&
1268 (b->shader->info.stage == MESA_SHADER_VERTEX ||
1269 b->shader->info.stage == MESA_SHADER_TESS_EVAL))
1270 *mode = nir_var_shader_out;
1271 else if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
1272 *mode = nir_var_shader_in;
1273 else
1274 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1275 break;
1276 case SpvBuiltInTessLevelOuter:
1277 *location = VARYING_SLOT_TESS_LEVEL_OUTER;
1278 break;
1279 case SpvBuiltInTessLevelInner:
1280 *location = VARYING_SLOT_TESS_LEVEL_INNER;
1281 break;
1282 case SpvBuiltInTessCoord:
1283 *location = SYSTEM_VALUE_TESS_COORD;
1284 set_mode_system_value(b, mode);
1285 break;
1286 case SpvBuiltInPatchVertices:
1287 *location = SYSTEM_VALUE_VERTICES_IN;
1288 set_mode_system_value(b, mode);
1289 break;
1290 case SpvBuiltInFragCoord:
1291 *location = VARYING_SLOT_POS;
1292 vtn_assert(*mode == nir_var_shader_in);
1293 break;
1294 case SpvBuiltInPointCoord:
1295 *location = VARYING_SLOT_PNTC;
1296 vtn_assert(*mode == nir_var_shader_in);
1297 break;
1298 case SpvBuiltInFrontFacing:
1299 *location = SYSTEM_VALUE_FRONT_FACE;
1300 set_mode_system_value(b, mode);
1301 break;
1302 case SpvBuiltInSampleId:
1303 *location = SYSTEM_VALUE_SAMPLE_ID;
1304 set_mode_system_value(b, mode);
1305 break;
1306 case SpvBuiltInSamplePosition:
1307 *location = SYSTEM_VALUE_SAMPLE_POS;
1308 set_mode_system_value(b, mode);
1309 break;
1310 case SpvBuiltInSampleMask:
1311 if (*mode == nir_var_shader_out) {
1312 *location = FRAG_RESULT_SAMPLE_MASK;
1313 } else {
1314 *location = SYSTEM_VALUE_SAMPLE_MASK_IN;
1315 set_mode_system_value(b, mode);
1316 }
1317 break;
1318 case SpvBuiltInFragDepth:
1319 *location = FRAG_RESULT_DEPTH;
1320 vtn_assert(*mode == nir_var_shader_out);
1321 break;
1322 case SpvBuiltInHelperInvocation:
1323 *location = SYSTEM_VALUE_HELPER_INVOCATION;
1324 set_mode_system_value(b, mode);
1325 break;
1326 case SpvBuiltInNumWorkgroups:
1327 *location = SYSTEM_VALUE_NUM_WORK_GROUPS;
1328 set_mode_system_value(b, mode);
1329 break;
1330 case SpvBuiltInWorkgroupSize:
1331 *location = SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1332 set_mode_system_value(b, mode);
1333 break;
1334 case SpvBuiltInWorkgroupId:
1335 *location = SYSTEM_VALUE_WORK_GROUP_ID;
1336 set_mode_system_value(b, mode);
1337 break;
1338 case SpvBuiltInLocalInvocationId:
1339 *location = SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1340 set_mode_system_value(b, mode);
1341 break;
1342 case SpvBuiltInLocalInvocationIndex:
1343 *location = SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1344 set_mode_system_value(b, mode);
1345 break;
1346 case SpvBuiltInGlobalInvocationId:
1347 *location = SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1348 set_mode_system_value(b, mode);
1349 break;
1350 case SpvBuiltInGlobalLinearId:
1351 *location = SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX;
1352 set_mode_system_value(b, mode);
1353 break;
1354 case SpvBuiltInBaseVertex:
1355 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1356 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1357 */
1358 *location = SYSTEM_VALUE_FIRST_VERTEX;
1359 set_mode_system_value(b, mode);
1360 break;
1361 case SpvBuiltInBaseInstance:
1362 *location = SYSTEM_VALUE_BASE_INSTANCE;
1363 set_mode_system_value(b, mode);
1364 break;
1365 case SpvBuiltInDrawIndex:
1366 *location = SYSTEM_VALUE_DRAW_ID;
1367 set_mode_system_value(b, mode);
1368 break;
1369 case SpvBuiltInSubgroupSize:
1370 *location = SYSTEM_VALUE_SUBGROUP_SIZE;
1371 set_mode_system_value(b, mode);
1372 break;
1373 case SpvBuiltInSubgroupId:
1374 *location = SYSTEM_VALUE_SUBGROUP_ID;
1375 set_mode_system_value(b, mode);
1376 break;
1377 case SpvBuiltInSubgroupLocalInvocationId:
1378 *location = SYSTEM_VALUE_SUBGROUP_INVOCATION;
1379 set_mode_system_value(b, mode);
1380 break;
1381 case SpvBuiltInNumSubgroups:
1382 *location = SYSTEM_VALUE_NUM_SUBGROUPS;
1383 set_mode_system_value(b, mode);
1384 break;
1385 case SpvBuiltInDeviceIndex:
1386 *location = SYSTEM_VALUE_DEVICE_INDEX;
1387 set_mode_system_value(b, mode);
1388 break;
1389 case SpvBuiltInViewIndex:
1390 *location = SYSTEM_VALUE_VIEW_INDEX;
1391 set_mode_system_value(b, mode);
1392 break;
1393 case SpvBuiltInSubgroupEqMask:
1394 *location = SYSTEM_VALUE_SUBGROUP_EQ_MASK,
1395 set_mode_system_value(b, mode);
1396 break;
1397 case SpvBuiltInSubgroupGeMask:
1398 *location = SYSTEM_VALUE_SUBGROUP_GE_MASK,
1399 set_mode_system_value(b, mode);
1400 break;
1401 case SpvBuiltInSubgroupGtMask:
1402 *location = SYSTEM_VALUE_SUBGROUP_GT_MASK,
1403 set_mode_system_value(b, mode);
1404 break;
1405 case SpvBuiltInSubgroupLeMask:
1406 *location = SYSTEM_VALUE_SUBGROUP_LE_MASK,
1407 set_mode_system_value(b, mode);
1408 break;
1409 case SpvBuiltInSubgroupLtMask:
1410 *location = SYSTEM_VALUE_SUBGROUP_LT_MASK,
1411 set_mode_system_value(b, mode);
1412 break;
1413 case SpvBuiltInFragStencilRefEXT:
1414 *location = FRAG_RESULT_STENCIL;
1415 vtn_assert(*mode == nir_var_shader_out);
1416 break;
1417 case SpvBuiltInWorkDim:
1418 *location = SYSTEM_VALUE_WORK_DIM;
1419 set_mode_system_value(b, mode);
1420 break;
1421 case SpvBuiltInGlobalSize:
1422 *location = SYSTEM_VALUE_GLOBAL_GROUP_SIZE;
1423 set_mode_system_value(b, mode);
1424 break;
1425 default:
1426 vtn_fail("Unsupported builtin: %s (%u)",
1427 spirv_builtin_to_string(builtin), builtin);
1428 }
1429 }
1430
1431 static void
1432 apply_var_decoration(struct vtn_builder *b,
1433 struct nir_variable_data *var_data,
1434 const struct vtn_decoration *dec)
1435 {
1436 switch (dec->decoration) {
1437 case SpvDecorationRelaxedPrecision:
1438 break; /* FIXME: Do nothing with this for now. */
1439 case SpvDecorationNoPerspective:
1440 var_data->interpolation = INTERP_MODE_NOPERSPECTIVE;
1441 break;
1442 case SpvDecorationFlat:
1443 var_data->interpolation = INTERP_MODE_FLAT;
1444 break;
1445 case SpvDecorationCentroid:
1446 var_data->centroid = true;
1447 break;
1448 case SpvDecorationSample:
1449 var_data->sample = true;
1450 break;
1451 case SpvDecorationInvariant:
1452 var_data->invariant = true;
1453 break;
1454 case SpvDecorationConstant:
1455 var_data->read_only = true;
1456 break;
1457 case SpvDecorationNonReadable:
1458 var_data->image.access |= ACCESS_NON_READABLE;
1459 break;
1460 case SpvDecorationNonWritable:
1461 var_data->read_only = true;
1462 var_data->image.access |= ACCESS_NON_WRITEABLE;
1463 break;
1464 case SpvDecorationRestrict:
1465 var_data->image.access |= ACCESS_RESTRICT;
1466 break;
1467 case SpvDecorationVolatile:
1468 var_data->image.access |= ACCESS_VOLATILE;
1469 break;
1470 case SpvDecorationCoherent:
1471 var_data->image.access |= ACCESS_COHERENT;
1472 break;
1473 case SpvDecorationComponent:
1474 var_data->location_frac = dec->operands[0];
1475 break;
1476 case SpvDecorationIndex:
1477 var_data->index = dec->operands[0];
1478 break;
1479 case SpvDecorationBuiltIn: {
1480 SpvBuiltIn builtin = dec->operands[0];
1481
1482 nir_variable_mode mode = var_data->mode;
1483 vtn_get_builtin_location(b, builtin, &var_data->location, &mode);
1484 var_data->mode = mode;
1485
1486 switch (builtin) {
1487 case SpvBuiltInTessLevelOuter:
1488 case SpvBuiltInTessLevelInner:
1489 case SpvBuiltInClipDistance:
1490 case SpvBuiltInCullDistance:
1491 var_data->compact = true;
1492 break;
1493 default:
1494 break;
1495 }
1496 }
1497
1498 case SpvDecorationSpecId:
1499 case SpvDecorationRowMajor:
1500 case SpvDecorationColMajor:
1501 case SpvDecorationMatrixStride:
1502 case SpvDecorationAliased:
1503 case SpvDecorationUniform:
1504 case SpvDecorationLinkageAttributes:
1505 break; /* Do nothing with these here */
1506
1507 case SpvDecorationPatch:
1508 var_data->patch = true;
1509 break;
1510
1511 case SpvDecorationLocation:
1512 vtn_fail("Handled above");
1513
1514 case SpvDecorationBlock:
1515 case SpvDecorationBufferBlock:
1516 case SpvDecorationArrayStride:
1517 case SpvDecorationGLSLShared:
1518 case SpvDecorationGLSLPacked:
1519 break; /* These can apply to a type but we don't care about them */
1520
1521 case SpvDecorationBinding:
1522 case SpvDecorationDescriptorSet:
1523 case SpvDecorationNoContraction:
1524 case SpvDecorationInputAttachmentIndex:
1525 vtn_warn("Decoration not allowed for variable or structure member: %s",
1526 spirv_decoration_to_string(dec->decoration));
1527 break;
1528
1529 case SpvDecorationXfbBuffer:
1530 var_data->explicit_xfb_buffer = true;
1531 var_data->xfb_buffer = dec->operands[0];
1532 var_data->always_active_io = true;
1533 break;
1534 case SpvDecorationXfbStride:
1535 var_data->explicit_xfb_stride = true;
1536 var_data->xfb_stride = dec->operands[0];
1537 break;
1538 case SpvDecorationOffset:
1539 var_data->explicit_offset = true;
1540 var_data->offset = dec->operands[0];
1541 break;
1542
1543 case SpvDecorationStream:
1544 var_data->stream = dec->operands[0];
1545 break;
1546
1547 case SpvDecorationCPacked:
1548 case SpvDecorationSaturatedConversion:
1549 case SpvDecorationFuncParamAttr:
1550 case SpvDecorationFPRoundingMode:
1551 case SpvDecorationFPFastMathMode:
1552 case SpvDecorationAlignment:
1553 if (b->shader->info.stage != MESA_SHADER_KERNEL) {
1554 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1555 spirv_decoration_to_string(dec->decoration));
1556 }
1557 break;
1558
1559 case SpvDecorationHlslSemanticGOOGLE:
1560 /* HLSL semantic decorations can safely be ignored by the driver. */
1561 break;
1562
1563 case SpvDecorationRestrictPointerEXT:
1564 case SpvDecorationAliasedPointerEXT:
1565 /* TODO: We should actually plumb alias information through NIR. */
1566 break;
1567
1568 default:
1569 vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1570 }
1571 }
1572
1573 static void
1574 var_is_patch_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1575 const struct vtn_decoration *dec, void *out_is_patch)
1576 {
1577 if (dec->decoration == SpvDecorationPatch) {
1578 *((bool *) out_is_patch) = true;
1579 }
1580 }
1581
1582 static void
1583 var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1584 const struct vtn_decoration *dec, void *void_var)
1585 {
1586 struct vtn_variable *vtn_var = void_var;
1587
1588 /* Handle decorations that apply to a vtn_variable as a whole */
1589 switch (dec->decoration) {
1590 case SpvDecorationBinding:
1591 vtn_var->binding = dec->operands[0];
1592 vtn_var->explicit_binding = true;
1593 return;
1594 case SpvDecorationDescriptorSet:
1595 vtn_var->descriptor_set = dec->operands[0];
1596 return;
1597 case SpvDecorationInputAttachmentIndex:
1598 vtn_var->input_attachment_index = dec->operands[0];
1599 return;
1600 case SpvDecorationPatch:
1601 vtn_var->patch = true;
1602 break;
1603 case SpvDecorationOffset:
1604 vtn_var->offset = dec->operands[0];
1605 break;
1606 case SpvDecorationNonWritable:
1607 vtn_var->access |= ACCESS_NON_WRITEABLE;
1608 break;
1609 case SpvDecorationNonReadable:
1610 vtn_var->access |= ACCESS_NON_READABLE;
1611 break;
1612 case SpvDecorationVolatile:
1613 vtn_var->access |= ACCESS_VOLATILE;
1614 break;
1615 case SpvDecorationCoherent:
1616 vtn_var->access |= ACCESS_COHERENT;
1617 break;
1618 case SpvDecorationHlslCounterBufferGOOGLE:
1619 /* HLSL semantic decorations can safely be ignored by the driver. */
1620 break;
1621 default:
1622 break;
1623 }
1624
1625 if (val->value_type == vtn_value_type_pointer) {
1626 assert(val->pointer->var == void_var);
1627 assert(member == -1);
1628 } else {
1629 assert(val->value_type == vtn_value_type_type);
1630 }
1631
1632 /* Location is odd. If applied to a split structure, we have to walk the
1633 * whole thing and accumulate the location. It's easier to handle as a
1634 * special case.
1635 */
1636 if (dec->decoration == SpvDecorationLocation) {
1637 unsigned location = dec->operands[0];
1638 if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
1639 vtn_var->mode == vtn_variable_mode_output) {
1640 location += FRAG_RESULT_DATA0;
1641 } else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
1642 vtn_var->mode == vtn_variable_mode_input) {
1643 location += VERT_ATTRIB_GENERIC0;
1644 } else if (vtn_var->mode == vtn_variable_mode_input ||
1645 vtn_var->mode == vtn_variable_mode_output) {
1646 location += vtn_var->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0;
1647 } else if (vtn_var->mode != vtn_variable_mode_uniform) {
1648 vtn_warn("Location must be on input, output, uniform, sampler or "
1649 "image variable");
1650 return;
1651 }
1652
1653 if (vtn_var->var->num_members == 0) {
1654 /* This handles the member and lone variable cases */
1655 vtn_var->var->data.location = location;
1656 } else {
1657 /* This handles the structure member case */
1658 assert(vtn_var->var->members);
1659
1660 if (member == -1)
1661 vtn_var->base_location = location;
1662 else
1663 vtn_var->var->members[member].location = location;
1664 }
1665
1666 return;
1667 } else {
1668 if (vtn_var->var) {
1669 if (vtn_var->var->num_members == 0) {
1670 /* We call this function on types as well as variables and not all
1671 * struct types get split so we can end up having stray member
1672 * decorations; just ignore them.
1673 */
1674 if (member == -1)
1675 apply_var_decoration(b, &vtn_var->var->data, dec);
1676 } else if (member >= 0) {
1677 /* Member decorations must come from a type */
1678 assert(val->value_type == vtn_value_type_type);
1679 apply_var_decoration(b, &vtn_var->var->members[member], dec);
1680 } else {
1681 unsigned length =
1682 glsl_get_length(glsl_without_array(vtn_var->type->type));
1683 for (unsigned i = 0; i < length; i++)
1684 apply_var_decoration(b, &vtn_var->var->members[i], dec);
1685 }
1686 } else {
1687 /* A few variables, those with external storage, have no actual
1688 * nir_variables associated with them. Fortunately, all decorations
1689 * we care about for those variables are on the type only.
1690 */
1691 vtn_assert(vtn_var->mode == vtn_variable_mode_ubo ||
1692 vtn_var->mode == vtn_variable_mode_ssbo ||
1693 vtn_var->mode == vtn_variable_mode_push_constant ||
1694 (vtn_var->mode == vtn_variable_mode_workgroup &&
1695 b->options->lower_workgroup_access_to_offsets));
1696 }
1697 }
1698 }
1699
1700 static void
1701 ptr_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1702 const struct vtn_decoration *dec, void *void_ptr)
1703 {
1704 struct vtn_pointer *ptr = void_ptr;
1705
1706 switch (dec->decoration) {
1707 case SpvDecorationNonUniformEXT:
1708 ptr->access |= ACCESS_NON_UNIFORM;
1709 break;
1710
1711 default:
1712 break;
1713 }
1714 }
1715
1716 enum vtn_variable_mode
1717 vtn_storage_class_to_mode(struct vtn_builder *b,
1718 SpvStorageClass class,
1719 struct vtn_type *interface_type,
1720 nir_variable_mode *nir_mode_out)
1721 {
1722 enum vtn_variable_mode mode;
1723 nir_variable_mode nir_mode;
1724 switch (class) {
1725 case SpvStorageClassUniform:
1726 /* Assume it's an UBO if we lack the interface_type. */
1727 if (!interface_type || interface_type->block) {
1728 mode = vtn_variable_mode_ubo;
1729 nir_mode = nir_var_mem_ubo;
1730 } else if (interface_type->buffer_block) {
1731 mode = vtn_variable_mode_ssbo;
1732 nir_mode = nir_var_mem_ssbo;
1733 } else {
1734 /* Default-block uniforms, coming from gl_spirv */
1735 mode = vtn_variable_mode_uniform;
1736 nir_mode = nir_var_uniform;
1737 }
1738 break;
1739 case SpvStorageClassStorageBuffer:
1740 mode = vtn_variable_mode_ssbo;
1741 nir_mode = nir_var_mem_ssbo;
1742 break;
1743 case SpvStorageClassPhysicalStorageBufferEXT:
1744 mode = vtn_variable_mode_phys_ssbo;
1745 nir_mode = nir_var_mem_global;
1746 break;
1747 case SpvStorageClassUniformConstant:
1748 mode = vtn_variable_mode_uniform;
1749 nir_mode = nir_var_uniform;
1750 break;
1751 case SpvStorageClassPushConstant:
1752 mode = vtn_variable_mode_push_constant;
1753 nir_mode = nir_var_uniform;
1754 break;
1755 case SpvStorageClassInput:
1756 mode = vtn_variable_mode_input;
1757 nir_mode = nir_var_shader_in;
1758 break;
1759 case SpvStorageClassOutput:
1760 mode = vtn_variable_mode_output;
1761 nir_mode = nir_var_shader_out;
1762 break;
1763 case SpvStorageClassPrivate:
1764 mode = vtn_variable_mode_private;
1765 nir_mode = nir_var_shader_temp;
1766 break;
1767 case SpvStorageClassFunction:
1768 mode = vtn_variable_mode_function;
1769 nir_mode = nir_var_function_temp;
1770 break;
1771 case SpvStorageClassWorkgroup:
1772 mode = vtn_variable_mode_workgroup;
1773 nir_mode = nir_var_mem_shared;
1774 break;
1775 case SpvStorageClassAtomicCounter:
1776 mode = vtn_variable_mode_uniform;
1777 nir_mode = nir_var_uniform;
1778 break;
1779 case SpvStorageClassCrossWorkgroup:
1780 mode = vtn_variable_mode_cross_workgroup;
1781 nir_mode = nir_var_mem_global;
1782 break;
1783 case SpvStorageClassImage:
1784 mode = vtn_variable_mode_image;
1785 nir_mode = nir_var_mem_ubo;
1786 break;
1787 case SpvStorageClassGeneric:
1788 default:
1789 vtn_fail("Unhandled variable storage class: %s (%u)",
1790 spirv_storageclass_to_string(class), class);
1791 }
1792
1793 if (nir_mode_out)
1794 *nir_mode_out = nir_mode;
1795
1796 return mode;
1797 }
1798
1799 nir_address_format
1800 vtn_mode_to_address_format(struct vtn_builder *b, enum vtn_variable_mode mode)
1801 {
1802 switch (mode) {
1803 case vtn_variable_mode_ubo:
1804 return b->options->ubo_addr_format;
1805
1806 case vtn_variable_mode_ssbo:
1807 return b->options->ssbo_addr_format;
1808
1809 case vtn_variable_mode_phys_ssbo:
1810 return b->options->phys_ssbo_addr_format;
1811
1812 case vtn_variable_mode_push_constant:
1813 return b->options->push_const_addr_format;
1814
1815 case vtn_variable_mode_workgroup:
1816 return b->options->shared_addr_format;
1817
1818 case vtn_variable_mode_cross_workgroup:
1819 return b->options->global_addr_format;
1820
1821 case vtn_variable_mode_function:
1822 if (b->physical_ptrs)
1823 return b->options->temp_addr_format;
1824 /* Fall through. */
1825
1826 case vtn_variable_mode_private:
1827 case vtn_variable_mode_uniform:
1828 case vtn_variable_mode_input:
1829 case vtn_variable_mode_output:
1830 case vtn_variable_mode_image:
1831 return nir_address_format_logical;
1832 }
1833
1834 unreachable("Invalid variable mode");
1835 }
1836
1837 nir_ssa_def *
1838 vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
1839 {
1840 if (vtn_pointer_uses_ssa_offset(b, ptr)) {
1841 /* This pointer needs to have a pointer type with actual storage */
1842 vtn_assert(ptr->ptr_type);
1843 vtn_assert(ptr->ptr_type->type);
1844
1845 if (!ptr->offset) {
1846 /* If we don't have an offset then we must be a pointer to the variable
1847 * itself.
1848 */
1849 vtn_assert(!ptr->offset && !ptr->block_index);
1850
1851 struct vtn_access_chain chain = {
1852 .length = 0,
1853 };
1854 ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
1855 }
1856
1857 vtn_assert(ptr->offset);
1858 if (ptr->block_index) {
1859 vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
1860 ptr->mode == vtn_variable_mode_ssbo);
1861 return nir_vec2(&b->nb, ptr->block_index, ptr->offset);
1862 } else {
1863 vtn_assert(ptr->mode == vtn_variable_mode_workgroup);
1864 return ptr->offset;
1865 }
1866 } else {
1867 if (vtn_pointer_is_external_block(b, ptr) &&
1868 vtn_type_contains_block(b, ptr->type) &&
1869 ptr->mode != vtn_variable_mode_phys_ssbo) {
1870 /* In this case, we're looking for a block index and not an actual
1871 * deref.
1872 *
1873 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1874 * at all because we get the pointer directly from the client. This
1875 * assumes that there will never be a SSBO binding variable using the
1876 * PhysicalStorageBufferEXT storage class. This assumption appears
1877 * to be correct according to the Vulkan spec because the table,
1878 * "Shader Resource and Storage Class Correspondence," the only the
1879 * Uniform storage class with BufferBlock or the StorageBuffer
1880 * storage class with Block can be used.
1881 */
1882 if (!ptr->block_index) {
1883 /* If we don't have a block_index then we must be a pointer to the
1884 * variable itself.
1885 */
1886 vtn_assert(!ptr->deref);
1887
1888 struct vtn_access_chain chain = {
1889 .length = 0,
1890 };
1891 ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
1892 }
1893
1894 return ptr->block_index;
1895 } else {
1896 return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
1897 }
1898 }
1899 }
1900
1901 struct vtn_pointer *
1902 vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
1903 struct vtn_type *ptr_type)
1904 {
1905 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
1906
1907 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
1908 struct vtn_type *without_array =
1909 vtn_type_without_array(ptr_type->deref);
1910
1911 nir_variable_mode nir_mode;
1912 ptr->mode = vtn_storage_class_to_mode(b, ptr_type->storage_class,
1913 without_array, &nir_mode);
1914 ptr->type = ptr_type->deref;
1915 ptr->ptr_type = ptr_type;
1916
1917 if (b->wa_glslang_179) {
1918 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1919 * need to whack the mode because it creates a function parameter with
1920 * the Function storage class even though it's a pointer to a sampler.
1921 * If we don't do this, then NIR won't get rid of the deref_cast for us.
1922 */
1923 if (ptr->mode == vtn_variable_mode_function &&
1924 (ptr->type->base_type == vtn_base_type_sampler ||
1925 ptr->type->base_type == vtn_base_type_sampled_image)) {
1926 ptr->mode = vtn_variable_mode_uniform;
1927 nir_mode = nir_var_uniform;
1928 }
1929 }
1930
1931 if (vtn_pointer_uses_ssa_offset(b, ptr)) {
1932 /* This pointer type needs to have actual storage */
1933 vtn_assert(ptr_type->type);
1934 if (ptr->mode == vtn_variable_mode_ubo ||
1935 ptr->mode == vtn_variable_mode_ssbo) {
1936 vtn_assert(ssa->num_components == 2);
1937 ptr->block_index = nir_channel(&b->nb, ssa, 0);
1938 ptr->offset = nir_channel(&b->nb, ssa, 1);
1939 } else {
1940 vtn_assert(ssa->num_components == 1);
1941 ptr->block_index = NULL;
1942 ptr->offset = ssa;
1943 }
1944 } else {
1945 const struct glsl_type *deref_type = ptr_type->deref->type;
1946 if (!vtn_pointer_is_external_block(b, ptr)) {
1947 ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
1948 deref_type, 0);
1949 } else if (vtn_type_contains_block(b, ptr->type) &&
1950 ptr->mode != vtn_variable_mode_phys_ssbo) {
1951 /* This is a pointer to somewhere in an array of blocks, not a
1952 * pointer to somewhere inside the block. Set the block index
1953 * instead of making a cast.
1954 */
1955 ptr->block_index = ssa;
1956 } else {
1957 /* This is a pointer to something internal or a pointer inside a
1958 * block. It's just a regular cast.
1959 *
1960 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1961 * at all because we get the pointer directly from the client. This
1962 * assumes that there will never be a SSBO binding variable using the
1963 * PhysicalStorageBufferEXT storage class. This assumption appears
1964 * to be correct according to the Vulkan spec because the table,
1965 * "Shader Resource and Storage Class Correspondence," the only the
1966 * Uniform storage class with BufferBlock or the StorageBuffer
1967 * storage class with Block can be used.
1968 */
1969 ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
1970 ptr_type->deref->type,
1971 ptr_type->stride);
1972 ptr->deref->dest.ssa.num_components =
1973 glsl_get_vector_elements(ptr_type->type);
1974 ptr->deref->dest.ssa.bit_size = glsl_get_bit_size(ptr_type->type);
1975 }
1976 }
1977
1978 return ptr;
1979 }
1980
1981 static bool
1982 is_per_vertex_inout(const struct vtn_variable *var, gl_shader_stage stage)
1983 {
1984 if (var->patch || !glsl_type_is_array(var->type->type))
1985 return false;
1986
1987 if (var->mode == vtn_variable_mode_input) {
1988 return stage == MESA_SHADER_TESS_CTRL ||
1989 stage == MESA_SHADER_TESS_EVAL ||
1990 stage == MESA_SHADER_GEOMETRY;
1991 }
1992
1993 if (var->mode == vtn_variable_mode_output)
1994 return stage == MESA_SHADER_TESS_CTRL;
1995
1996 return false;
1997 }
1998
1999 static void
2000 assign_missing_member_locations(struct vtn_variable *var)
2001 {
2002 unsigned length =
2003 glsl_get_length(glsl_without_array(var->type->type));
2004 int location = var->base_location;
2005
2006 for (unsigned i = 0; i < length; i++) {
2007 /* From the Vulkan spec:
2008 *
2009 * “If the structure type is a Block but without a Location, then each
2010 * of its members must have a Location decoration.”
2011 *
2012 */
2013 if (var->type->block) {
2014 assert(var->base_location != -1 ||
2015 var->var->members[i].location != -1);
2016 }
2017
2018 /* From the Vulkan spec:
2019 *
2020 * “Any member with its own Location decoration is assigned that
2021 * location. Each remaining member is assigned the location after the
2022 * immediately preceding member in declaration order.”
2023 */
2024 if (var->var->members[i].location != -1)
2025 location = var->var->members[i].location;
2026 else
2027 var->var->members[i].location = location;
2028
2029 /* Below we use type instead of interface_type, because interface_type
2030 * is only available when it is a Block. This code also supports
2031 * input/outputs that are just structs
2032 */
2033 const struct glsl_type *member_type =
2034 glsl_get_struct_field(glsl_without_array(var->type->type), i);
2035
2036 location +=
2037 glsl_count_attribute_slots(member_type,
2038 false /* is_gl_vertex_input */);
2039 }
2040 }
2041
2042
2043 static void
2044 vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
2045 struct vtn_type *ptr_type, SpvStorageClass storage_class,
2046 nir_constant *initializer)
2047 {
2048 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
2049 struct vtn_type *type = ptr_type->deref;
2050
2051 struct vtn_type *without_array = vtn_type_without_array(ptr_type->deref);
2052
2053 enum vtn_variable_mode mode;
2054 nir_variable_mode nir_mode;
2055 mode = vtn_storage_class_to_mode(b, storage_class, without_array, &nir_mode);
2056
2057 switch (mode) {
2058 case vtn_variable_mode_ubo:
2059 /* There's no other way to get vtn_variable_mode_ubo */
2060 vtn_assert(without_array->block);
2061 b->shader->info.num_ubos++;
2062 break;
2063 case vtn_variable_mode_ssbo:
2064 if (storage_class == SpvStorageClassStorageBuffer &&
2065 !without_array->block) {
2066 if (b->variable_pointers) {
2067 vtn_fail("Variables in the StorageBuffer storage class must "
2068 "have a struct type with the Block decoration");
2069 } else {
2070 /* If variable pointers are not present, it's still malformed
2071 * SPIR-V but we can parse it and do the right thing anyway.
2072 * Since some of the 8-bit storage tests have bugs in this are,
2073 * just make it a warning for now.
2074 */
2075 vtn_warn("Variables in the StorageBuffer storage class must "
2076 "have a struct type with the Block decoration");
2077 }
2078 }
2079 b->shader->info.num_ssbos++;
2080 break;
2081 case vtn_variable_mode_uniform:
2082 if (glsl_type_is_image(without_array->type))
2083 b->shader->info.num_images++;
2084 else if (glsl_type_is_sampler(without_array->type))
2085 b->shader->info.num_textures++;
2086 break;
2087 case vtn_variable_mode_push_constant:
2088 b->shader->num_uniforms = vtn_type_block_size(b, type);
2089 break;
2090
2091 case vtn_variable_mode_image:
2092 vtn_fail("Cannot create a variable with the Image storage class");
2093 break;
2094
2095 case vtn_variable_mode_phys_ssbo:
2096 vtn_fail("Cannot create a variable with the "
2097 "PhysicalStorageBufferEXT storage class");
2098 break;
2099
2100 default:
2101 /* No tallying is needed */
2102 break;
2103 }
2104
2105 struct vtn_variable *var = rzalloc(b, struct vtn_variable);
2106 var->type = type;
2107 var->mode = mode;
2108 var->base_location = -1;
2109
2110 vtn_assert(val->value_type == vtn_value_type_pointer);
2111 val->pointer = vtn_pointer_for_variable(b, var, ptr_type);
2112
2113 switch (var->mode) {
2114 case vtn_variable_mode_function:
2115 case vtn_variable_mode_private:
2116 case vtn_variable_mode_uniform:
2117 /* For these, we create the variable normally */
2118 var->var = rzalloc(b->shader, nir_variable);
2119 var->var->name = ralloc_strdup(var->var, val->name);
2120
2121 if (storage_class == SpvStorageClassAtomicCounter) {
2122 /* Need to tweak the nir type here as at vtn_handle_type we don't
2123 * have the access to storage_class, that is the one that points us
2124 * that is an atomic uint.
2125 */
2126 var->var->type = repair_atomic_type(var->type->type);
2127 } else {
2128 /* Private variables don't have any explicit layout but some layouts
2129 * may have leaked through due to type deduplication in the SPIR-V.
2130 */
2131 var->var->type = var->type->type;
2132 }
2133 var->var->data.mode = nir_mode;
2134 var->var->data.location = -1;
2135 var->var->interface_type = NULL;
2136 break;
2137
2138 case vtn_variable_mode_workgroup:
2139 if (b->options->lower_workgroup_access_to_offsets) {
2140 var->shared_location = -1;
2141 } else {
2142 /* Create the variable normally */
2143 var->var = rzalloc(b->shader, nir_variable);
2144 var->var->name = ralloc_strdup(var->var, val->name);
2145 /* Workgroup variables don't have any explicit layout but some
2146 * layouts may have leaked through due to type deduplication in the
2147 * SPIR-V.
2148 */
2149 var->var->type = var->type->type;
2150 var->var->data.mode = nir_var_mem_shared;
2151 }
2152 break;
2153
2154 case vtn_variable_mode_input:
2155 case vtn_variable_mode_output: {
2156 /* In order to know whether or not we're a per-vertex inout, we need
2157 * the patch qualifier. This means walking the variable decorations
2158 * early before we actually create any variables. Not a big deal.
2159 *
2160 * GLSLang really likes to place decorations in the most interior
2161 * thing it possibly can. In particular, if you have a struct, it
2162 * will place the patch decorations on the struct members. This
2163 * should be handled by the variable splitting below just fine.
2164 *
2165 * If you have an array-of-struct, things get even more weird as it
2166 * will place the patch decorations on the struct even though it's
2167 * inside an array and some of the members being patch and others not
2168 * makes no sense whatsoever. Since the only sensible thing is for
2169 * it to be all or nothing, we'll call it patch if any of the members
2170 * are declared patch.
2171 */
2172 var->patch = false;
2173 vtn_foreach_decoration(b, val, var_is_patch_cb, &var->patch);
2174 if (glsl_type_is_array(var->type->type) &&
2175 glsl_type_is_struct_or_ifc(without_array->type)) {
2176 vtn_foreach_decoration(b, vtn_value(b, without_array->id,
2177 vtn_value_type_type),
2178 var_is_patch_cb, &var->patch);
2179 }
2180
2181 /* For inputs and outputs, we immediately split structures. This
2182 * is for a couple of reasons. For one, builtins may all come in
2183 * a struct and we really want those split out into separate
2184 * variables. For another, interpolation qualifiers can be
2185 * applied to members of the top-level struct ane we need to be
2186 * able to preserve that information.
2187 */
2188
2189 struct vtn_type *per_vertex_type = var->type;
2190 if (is_per_vertex_inout(var, b->shader->info.stage)) {
2191 /* In Geometry shaders (and some tessellation), inputs come
2192 * in per-vertex arrays. However, some builtins come in
2193 * non-per-vertex, hence the need for the is_array check. In
2194 * any case, there are no non-builtin arrays allowed so this
2195 * check should be sufficient.
2196 */
2197 per_vertex_type = var->type->array_element;
2198 }
2199
2200 var->var = rzalloc(b->shader, nir_variable);
2201 var->var->name = ralloc_strdup(var->var, val->name);
2202 /* In Vulkan, shader I/O variables don't have any explicit layout but
2203 * some layouts may have leaked through due to type deduplication in
2204 * the SPIR-V. We do, however, keep the layouts in the variable's
2205 * interface_type because we need offsets for XFB arrays of blocks.
2206 */
2207 var->var->type = var->type->type;
2208 var->var->data.mode = nir_mode;
2209 var->var->data.patch = var->patch;
2210
2211 /* Figure out the interface block type. */
2212 struct vtn_type *iface_type = per_vertex_type;
2213 if (var->mode == vtn_variable_mode_output &&
2214 (b->shader->info.stage == MESA_SHADER_VERTEX ||
2215 b->shader->info.stage == MESA_SHADER_TESS_EVAL ||
2216 b->shader->info.stage == MESA_SHADER_GEOMETRY)) {
2217 /* For vertex data outputs, we can end up with arrays of blocks for
2218 * transform feedback where each array element corresponds to a
2219 * different XFB output buffer.
2220 */
2221 while (iface_type->base_type == vtn_base_type_array)
2222 iface_type = iface_type->array_element;
2223 }
2224 if (iface_type->base_type == vtn_base_type_struct && iface_type->block)
2225 var->var->interface_type = iface_type->type;
2226
2227 if (per_vertex_type->base_type == vtn_base_type_struct &&
2228 per_vertex_type->block) {
2229 /* It's a struct. Set it up as per-member. */
2230 var->var->num_members = glsl_get_length(per_vertex_type->type);
2231 var->var->members = rzalloc_array(var->var, struct nir_variable_data,
2232 var->var->num_members);
2233
2234 for (unsigned i = 0; i < var->var->num_members; i++) {
2235 var->var->members[i].mode = nir_mode;
2236 var->var->members[i].patch = var->patch;
2237 var->var->members[i].location = -1;
2238 }
2239 }
2240
2241 /* For inputs and outputs, we need to grab locations and builtin
2242 * information from the per-vertex type.
2243 */
2244 vtn_foreach_decoration(b, vtn_value(b, per_vertex_type->id,
2245 vtn_value_type_type),
2246 var_decoration_cb, var);
2247 break;
2248 }
2249
2250 case vtn_variable_mode_ubo:
2251 case vtn_variable_mode_ssbo:
2252 case vtn_variable_mode_push_constant:
2253 case vtn_variable_mode_cross_workgroup:
2254 /* These don't need actual variables. */
2255 break;
2256
2257 case vtn_variable_mode_image:
2258 case vtn_variable_mode_phys_ssbo:
2259 unreachable("Should have been caught before");
2260 }
2261
2262 if (initializer) {
2263 var->var->constant_initializer =
2264 nir_constant_clone(initializer, var->var);
2265 }
2266
2267 vtn_foreach_decoration(b, val, var_decoration_cb, var);
2268 vtn_foreach_decoration(b, val, ptr_decoration_cb, val->pointer);
2269
2270 if ((var->mode == vtn_variable_mode_input ||
2271 var->mode == vtn_variable_mode_output) &&
2272 var->var->members) {
2273 assign_missing_member_locations(var);
2274 }
2275
2276 if (var->mode == vtn_variable_mode_uniform) {
2277 /* XXX: We still need the binding information in the nir_variable
2278 * for these. We should fix that.
2279 */
2280 var->var->data.binding = var->binding;
2281 var->var->data.explicit_binding = var->explicit_binding;
2282 var->var->data.descriptor_set = var->descriptor_set;
2283 var->var->data.index = var->input_attachment_index;
2284 var->var->data.offset = var->offset;
2285
2286 if (glsl_type_is_image(without_array->type))
2287 var->var->data.image.format = without_array->image_format;
2288 }
2289
2290 if (var->mode == vtn_variable_mode_function) {
2291 vtn_assert(var->var != NULL && var->var->members == NULL);
2292 nir_function_impl_add_variable(b->nb.impl, var->var);
2293 } else if (var->var) {
2294 nir_shader_add_variable(b->shader, var->var);
2295 } else {
2296 vtn_assert(vtn_pointer_is_external_block(b, val->pointer));
2297 }
2298 }
2299
2300 static void
2301 vtn_assert_types_equal(struct vtn_builder *b, SpvOp opcode,
2302 struct vtn_type *dst_type,
2303 struct vtn_type *src_type)
2304 {
2305 if (dst_type->id == src_type->id)
2306 return;
2307
2308 if (vtn_types_compatible(b, dst_type, src_type)) {
2309 /* Early versions of GLSLang would re-emit types unnecessarily and you
2310 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2311 * mismatched source and destination types.
2312 *
2313 * https://github.com/KhronosGroup/glslang/issues/304
2314 * https://github.com/KhronosGroup/glslang/issues/307
2315 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2316 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2317 */
2318 vtn_warn("Source and destination types of %s do not have the same "
2319 "ID (but are compatible): %u vs %u",
2320 spirv_op_to_string(opcode), dst_type->id, src_type->id);
2321 return;
2322 }
2323
2324 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2325 spirv_op_to_string(opcode),
2326 glsl_get_type_name(dst_type->type),
2327 glsl_get_type_name(src_type->type));
2328 }
2329
2330 static nir_ssa_def *
2331 nir_shrink_zero_pad_vec(nir_builder *b, nir_ssa_def *val,
2332 unsigned num_components)
2333 {
2334 if (val->num_components == num_components)
2335 return val;
2336
2337 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
2338 for (unsigned i = 0; i < num_components; i++) {
2339 if (i < val->num_components)
2340 comps[i] = nir_channel(b, val, i);
2341 else
2342 comps[i] = nir_imm_intN_t(b, 0, val->bit_size);
2343 }
2344 return nir_vec(b, comps, num_components);
2345 }
2346
2347 static nir_ssa_def *
2348 nir_sloppy_bitcast(nir_builder *b, nir_ssa_def *val,
2349 const struct glsl_type *type)
2350 {
2351 const unsigned num_components = glsl_get_vector_elements(type);
2352 const unsigned bit_size = glsl_get_bit_size(type);
2353
2354 /* First, zero-pad to ensure that the value is big enough that when we
2355 * bit-cast it, we don't loose anything.
2356 */
2357 if (val->bit_size < bit_size) {
2358 const unsigned src_num_components_needed =
2359 vtn_align_u32(val->num_components, bit_size / val->bit_size);
2360 val = nir_shrink_zero_pad_vec(b, val, src_num_components_needed);
2361 }
2362
2363 val = nir_bitcast_vector(b, val, bit_size);
2364
2365 return nir_shrink_zero_pad_vec(b, val, num_components);
2366 }
2367
2368 void
2369 vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
2370 const uint32_t *w, unsigned count)
2371 {
2372 switch (opcode) {
2373 case SpvOpUndef: {
2374 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
2375 val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
2376 break;
2377 }
2378
2379 case SpvOpVariable: {
2380 struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
2381
2382 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
2383
2384 SpvStorageClass storage_class = w[3];
2385 nir_constant *initializer = NULL;
2386 if (count > 4)
2387 initializer = vtn_value(b, w[4], vtn_value_type_constant)->constant;
2388
2389 vtn_create_variable(b, val, ptr_type, storage_class, initializer);
2390 break;
2391 }
2392
2393 case SpvOpAccessChain:
2394 case SpvOpPtrAccessChain:
2395 case SpvOpInBoundsAccessChain:
2396 case SpvOpInBoundsPtrAccessChain: {
2397 struct vtn_access_chain *chain = vtn_access_chain_create(b, count - 4);
2398 chain->ptr_as_array = (opcode == SpvOpPtrAccessChain || opcode == SpvOpInBoundsPtrAccessChain);
2399
2400 unsigned idx = 0;
2401 for (int i = 4; i < count; i++) {
2402 struct vtn_value *link_val = vtn_untyped_value(b, w[i]);
2403 if (link_val->value_type == vtn_value_type_constant) {
2404 chain->link[idx].mode = vtn_access_mode_literal;
2405 const unsigned bit_size = glsl_get_bit_size(link_val->type->type);
2406 switch (bit_size) {
2407 case 8:
2408 chain->link[idx].id = link_val->constant->values[0][0].i8;
2409 break;
2410 case 16:
2411 chain->link[idx].id = link_val->constant->values[0][0].i16;
2412 break;
2413 case 32:
2414 chain->link[idx].id = link_val->constant->values[0][0].i32;
2415 break;
2416 case 64:
2417 chain->link[idx].id = link_val->constant->values[0][0].i64;
2418 break;
2419 default:
2420 vtn_fail("Invalid bit size: %u", bit_size);
2421 }
2422 } else {
2423 chain->link[idx].mode = vtn_access_mode_id;
2424 chain->link[idx].id = w[i];
2425
2426 }
2427 idx++;
2428 }
2429
2430 struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
2431 struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
2432 if (base_val->value_type == vtn_value_type_sampled_image) {
2433 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2434 * to combine an array of images with a single sampler to get an
2435 * array of sampled images that all share the same sampler.
2436 * Fortunately, this means that we can more-or-less ignore the
2437 * sampler when crawling the access chain, but it does leave us
2438 * with this rather awkward little special-case.
2439 */
2440 struct vtn_value *val =
2441 vtn_push_value(b, w[2], vtn_value_type_sampled_image);
2442 val->sampled_image = ralloc(b, struct vtn_sampled_image);
2443 val->sampled_image->type = base_val->sampled_image->type;
2444 val->sampled_image->image =
2445 vtn_pointer_dereference(b, base_val->sampled_image->image, chain);
2446 val->sampled_image->sampler = base_val->sampled_image->sampler;
2447 vtn_foreach_decoration(b, val, ptr_decoration_cb,
2448 val->sampled_image->image);
2449 vtn_foreach_decoration(b, val, ptr_decoration_cb,
2450 val->sampled_image->sampler);
2451 } else {
2452 vtn_assert(base_val->value_type == vtn_value_type_pointer);
2453 struct vtn_value *val =
2454 vtn_push_value(b, w[2], vtn_value_type_pointer);
2455 val->pointer = vtn_pointer_dereference(b, base_val->pointer, chain);
2456 val->pointer->ptr_type = ptr_type;
2457 vtn_foreach_decoration(b, val, ptr_decoration_cb, val->pointer);
2458 }
2459 break;
2460 }
2461
2462 case SpvOpCopyMemory: {
2463 struct vtn_value *dest = vtn_value(b, w[1], vtn_value_type_pointer);
2464 struct vtn_value *src = vtn_value(b, w[2], vtn_value_type_pointer);
2465
2466 vtn_assert_types_equal(b, opcode, dest->type->deref, src->type->deref);
2467
2468 vtn_variable_copy(b, dest->pointer, src->pointer);
2469 break;
2470 }
2471
2472 case SpvOpLoad: {
2473 struct vtn_type *res_type =
2474 vtn_value(b, w[1], vtn_value_type_type)->type;
2475 struct vtn_value *src_val = vtn_value(b, w[3], vtn_value_type_pointer);
2476 struct vtn_pointer *src = src_val->pointer;
2477
2478 vtn_assert_types_equal(b, opcode, res_type, src_val->type->deref);
2479
2480 if (glsl_type_is_image(res_type->type) ||
2481 glsl_type_is_sampler(res_type->type)) {
2482 vtn_push_value(b, w[2], vtn_value_type_pointer)->pointer = src;
2483 return;
2484 }
2485
2486 vtn_push_ssa(b, w[2], res_type, vtn_variable_load(b, src));
2487 break;
2488 }
2489
2490 case SpvOpStore: {
2491 struct vtn_value *dest_val = vtn_value(b, w[1], vtn_value_type_pointer);
2492 struct vtn_pointer *dest = dest_val->pointer;
2493 struct vtn_value *src_val = vtn_untyped_value(b, w[2]);
2494
2495 /* OpStore requires us to actually have a storage type */
2496 vtn_fail_if(dest->type->type == NULL,
2497 "Invalid destination type for OpStore");
2498
2499 if (glsl_get_base_type(dest->type->type) == GLSL_TYPE_BOOL &&
2500 glsl_get_base_type(src_val->type->type) == GLSL_TYPE_UINT) {
2501 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2502 * would then store them to a local variable as bool. Work around
2503 * the issue by doing an implicit conversion.
2504 *
2505 * https://github.com/KhronosGroup/glslang/issues/170
2506 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2507 */
2508 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2509 "OpTypeBool. Doing an implicit conversion to work around "
2510 "the problem.");
2511 struct vtn_ssa_value *bool_ssa =
2512 vtn_create_ssa_value(b, dest->type->type);
2513 bool_ssa->def = nir_i2b(&b->nb, vtn_ssa_value(b, w[2])->def);
2514 vtn_variable_store(b, bool_ssa, dest);
2515 break;
2516 }
2517
2518 vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type);
2519
2520 if (glsl_type_is_sampler(dest->type->type)) {
2521 if (b->wa_glslang_179) {
2522 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2523 "propagation to workaround the problem.");
2524 vtn_assert(dest->var->copy_prop_sampler == NULL);
2525 dest->var->copy_prop_sampler =
2526 vtn_value(b, w[2], vtn_value_type_pointer)->pointer;
2527 } else {
2528 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2529 }
2530 break;
2531 }
2532
2533 struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
2534 vtn_variable_store(b, src, dest);
2535 break;
2536 }
2537
2538 case SpvOpArrayLength: {
2539 struct vtn_pointer *ptr =
2540 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2541 const uint32_t field = w[4];
2542
2543 vtn_fail_if(ptr->type->base_type != vtn_base_type_struct,
2544 "OpArrayLength must take a pointer to a structure type");
2545 vtn_fail_if(field != ptr->type->length - 1 ||
2546 ptr->type->members[field]->base_type != vtn_base_type_array,
2547 "OpArrayLength must reference the last memeber of the "
2548 "structure and that must be an array");
2549
2550 const uint32_t offset = ptr->type->offsets[field];
2551 const uint32_t stride = ptr->type->members[field]->stride;
2552
2553 if (!ptr->block_index) {
2554 struct vtn_access_chain chain = {
2555 .length = 0,
2556 };
2557 ptr = vtn_pointer_dereference(b, ptr, &chain);
2558 vtn_assert(ptr->block_index);
2559 }
2560
2561 nir_intrinsic_instr *instr =
2562 nir_intrinsic_instr_create(b->nb.shader,
2563 nir_intrinsic_get_buffer_size);
2564 instr->src[0] = nir_src_for_ssa(ptr->block_index);
2565 nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
2566 nir_builder_instr_insert(&b->nb, &instr->instr);
2567 nir_ssa_def *buf_size = &instr->dest.ssa;
2568
2569 /* array_length = max(buffer_size - offset, 0) / stride */
2570 nir_ssa_def *array_length =
2571 nir_idiv(&b->nb,
2572 nir_imax(&b->nb,
2573 nir_isub(&b->nb,
2574 buf_size,
2575 nir_imm_int(&b->nb, offset)),
2576 nir_imm_int(&b->nb, 0u)),
2577 nir_imm_int(&b->nb, stride));
2578
2579 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2580 val->ssa = vtn_create_ssa_value(b, glsl_uint_type());
2581 val->ssa->def = array_length;
2582 break;
2583 }
2584
2585 case SpvOpConvertPtrToU: {
2586 struct vtn_value *u_val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2587
2588 vtn_fail_if(u_val->type->base_type != vtn_base_type_vector &&
2589 u_val->type->base_type != vtn_base_type_scalar,
2590 "OpConvertPtrToU can only be used to cast to a vector or "
2591 "scalar type");
2592
2593 /* The pointer will be converted to an SSA value automatically */
2594 nir_ssa_def *ptr_ssa = vtn_ssa_value(b, w[3])->def;
2595
2596 u_val->ssa = vtn_create_ssa_value(b, u_val->type->type);
2597 u_val->ssa->def = nir_sloppy_bitcast(&b->nb, ptr_ssa, u_val->type->type);
2598 break;
2599 }
2600
2601 case SpvOpConvertUToPtr: {
2602 struct vtn_value *ptr_val =
2603 vtn_push_value(b, w[2], vtn_value_type_pointer);
2604 struct vtn_value *u_val = vtn_value(b, w[3], vtn_value_type_ssa);
2605
2606 vtn_fail_if(ptr_val->type->type == NULL,
2607 "OpConvertUToPtr can only be used on physical pointers");
2608
2609 vtn_fail_if(u_val->type->base_type != vtn_base_type_vector &&
2610 u_val->type->base_type != vtn_base_type_scalar,
2611 "OpConvertUToPtr can only be used to cast from a vector or "
2612 "scalar type");
2613
2614 nir_ssa_def *ptr_ssa = nir_sloppy_bitcast(&b->nb, u_val->ssa->def,
2615 ptr_val->type->type);
2616 ptr_val->pointer = vtn_pointer_from_ssa(b, ptr_ssa, ptr_val->type);
2617 break;
2618 }
2619
2620 case SpvOpCopyMemorySized:
2621 default:
2622 vtn_fail_with_opcode("Unhandled opcode", opcode);
2623 }
2624 }