spirv: Use the generic dereference function for OpArrayLength
[mesa.git] / src / compiler / spirv / vtn_variables.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
32
33 static struct vtn_access_chain *
34 vtn_access_chain_create(struct vtn_builder *b, unsigned length)
35 {
36 struct vtn_access_chain *chain;
37
38 /* Subtract 1 from the length since there's already one built in */
39 size_t size = sizeof(*chain) +
40 (MAX2(length, 1) - 1) * sizeof(chain->link[0]);
41 chain = rzalloc_size(b, size);
42 chain->length = length;
43
44 return chain;
45 }
46
47 bool
48 vtn_pointer_uses_ssa_offset(struct vtn_builder *b,
49 struct vtn_pointer *ptr)
50 {
51 return ((ptr->mode == vtn_variable_mode_ubo ||
52 ptr->mode == vtn_variable_mode_ssbo) &&
53 b->options->lower_ubo_ssbo_access_to_offsets) ||
54 ptr->mode == vtn_variable_mode_push_constant ||
55 (ptr->mode == vtn_variable_mode_workgroup &&
56 b->options->lower_workgroup_access_to_offsets);
57 }
58
59 static bool
60 vtn_pointer_is_external_block(struct vtn_builder *b,
61 struct vtn_pointer *ptr)
62 {
63 return ptr->mode == vtn_variable_mode_ssbo ||
64 ptr->mode == vtn_variable_mode_ubo ||
65 ptr->mode == vtn_variable_mode_phys_ssbo ||
66 ptr->mode == vtn_variable_mode_push_constant ||
67 (ptr->mode == vtn_variable_mode_workgroup &&
68 b->options->lower_workgroup_access_to_offsets);
69 }
70
71 static nir_ssa_def *
72 vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
73 unsigned stride, unsigned bit_size)
74 {
75 vtn_assert(stride > 0);
76 if (link.mode == vtn_access_mode_literal) {
77 return nir_imm_intN_t(&b->nb, link.id * stride, bit_size);
78 } else {
79 nir_ssa_def *ssa = vtn_ssa_value(b, link.id)->def;
80 if (ssa->bit_size != bit_size)
81 ssa = nir_i2i(&b->nb, ssa, bit_size);
82 if (stride != 1)
83 ssa = nir_imul_imm(&b->nb, ssa, stride);
84 return ssa;
85 }
86 }
87
88 static VkDescriptorType
89 vk_desc_type_for_mode(struct vtn_builder *b, enum vtn_variable_mode mode)
90 {
91 switch (mode) {
92 case vtn_variable_mode_ubo:
93 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
94 case vtn_variable_mode_ssbo:
95 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
96 default:
97 vtn_fail("Invalid mode for vulkan_resource_index");
98 }
99 }
100
101 static nir_ssa_def *
102 vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
103 nir_ssa_def *desc_array_index)
104 {
105 if (!desc_array_index) {
106 vtn_assert(glsl_type_is_struct(var->type->type));
107 desc_array_index = nir_imm_int(&b->nb, 0);
108 }
109
110 nir_intrinsic_instr *instr =
111 nir_intrinsic_instr_create(b->nb.shader,
112 nir_intrinsic_vulkan_resource_index);
113 instr->src[0] = nir_src_for_ssa(desc_array_index);
114 nir_intrinsic_set_desc_set(instr, var->descriptor_set);
115 nir_intrinsic_set_binding(instr, var->binding);
116 nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, var->mode));
117
118 nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
119 nir_builder_instr_insert(&b->nb, &instr->instr);
120
121 return &instr->dest.ssa;
122 }
123
124 static nir_ssa_def *
125 vtn_resource_reindex(struct vtn_builder *b, enum vtn_variable_mode mode,
126 nir_ssa_def *base_index, nir_ssa_def *offset_index)
127 {
128 nir_intrinsic_instr *instr =
129 nir_intrinsic_instr_create(b->nb.shader,
130 nir_intrinsic_vulkan_resource_reindex);
131 instr->src[0] = nir_src_for_ssa(base_index);
132 instr->src[1] = nir_src_for_ssa(offset_index);
133 nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, mode));
134
135 nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
136 nir_builder_instr_insert(&b->nb, &instr->instr);
137
138 return &instr->dest.ssa;
139 }
140
141 static nir_ssa_def *
142 vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode,
143 const struct glsl_type *desc_type, nir_ssa_def *desc_index)
144 {
145 nir_intrinsic_instr *desc_load =
146 nir_intrinsic_instr_create(b->nb.shader,
147 nir_intrinsic_load_vulkan_descriptor);
148 desc_load->src[0] = nir_src_for_ssa(desc_index);
149 desc_load->num_components = glsl_get_vector_elements(desc_type);
150 nir_intrinsic_set_desc_type(desc_load, vk_desc_type_for_mode(b, mode));
151 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
152 desc_load->num_components,
153 glsl_get_bit_size(desc_type), NULL);
154 nir_builder_instr_insert(&b->nb, &desc_load->instr);
155
156 return &desc_load->dest.ssa;
157 }
158
159 /* Dereference the given base pointer by the access chain */
160 static struct vtn_pointer *
161 vtn_nir_deref_pointer_dereference(struct vtn_builder *b,
162 struct vtn_pointer *base,
163 struct vtn_access_chain *deref_chain)
164 {
165 struct vtn_type *type = base->type;
166 enum gl_access_qualifier access = base->access;
167 unsigned idx = 0;
168
169 nir_deref_instr *tail;
170 if (base->deref) {
171 tail = base->deref;
172 } else if (vtn_pointer_is_external_block(b, base)) {
173 nir_ssa_def *block_index = base->block_index;
174
175 /* We dereferencing an external block pointer. Correctness of this
176 * operation relies on one particular line in the SPIR-V spec, section
177 * entitled "Validation Rules for Shader Capabilities":
178 *
179 * "Block and BufferBlock decorations cannot decorate a structure
180 * type that is nested at any level inside another structure type
181 * decorated with Block or BufferBlock."
182 *
183 * This means that we can detect the point where we cross over from
184 * descriptor indexing to buffer indexing by looking for the block
185 * decorated struct type. Anything before the block decorated struct
186 * type is a descriptor indexing operation and anything after the block
187 * decorated struct is a buffer offset operation.
188 */
189
190 /* Figure out the descriptor array index if any
191 *
192 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
193 * to forget the Block or BufferBlock decoration from time to time.
194 * It's more robust if we check for both !block_index and for the type
195 * to contain a block. This way there's a decent chance that arrays of
196 * UBOs/SSBOs will work correctly even if variable pointers are
197 * completley toast.
198 */
199 nir_ssa_def *desc_arr_idx = NULL;
200 if (!block_index || vtn_type_contains_block(b, type)) {
201 /* If our type contains a block, then we're still outside the block
202 * and we need to process enough levels of dereferences to get inside
203 * of it.
204 */
205 if (deref_chain->ptr_as_array) {
206 unsigned aoa_size = glsl_get_aoa_size(type->type);
207 desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[idx],
208 MAX2(aoa_size, 1), 32);
209 idx++;
210 }
211
212 for (; idx < deref_chain->length; idx++) {
213 if (type->base_type != vtn_base_type_array) {
214 vtn_assert(type->base_type == vtn_base_type_struct);
215 break;
216 }
217
218 unsigned aoa_size = glsl_get_aoa_size(type->array_element->type);
219 nir_ssa_def *arr_offset =
220 vtn_access_link_as_ssa(b, deref_chain->link[idx],
221 MAX2(aoa_size, 1), 32);
222 if (desc_arr_idx)
223 desc_arr_idx = nir_iadd(&b->nb, desc_arr_idx, arr_offset);
224 else
225 desc_arr_idx = arr_offset;
226
227 type = type->array_element;
228 access |= type->access;
229 }
230 }
231
232 if (!block_index) {
233 vtn_assert(base->var && base->type);
234 block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
235 } else if (desc_arr_idx) {
236 block_index = vtn_resource_reindex(b, base->mode,
237 block_index, desc_arr_idx);
238 }
239
240 if (idx == deref_chain->length) {
241 /* The entire deref was consumed in finding the block index. Return
242 * a pointer which just has a block index and a later access chain
243 * will dereference deeper.
244 */
245 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
246 ptr->mode = base->mode;
247 ptr->type = type;
248 ptr->block_index = block_index;
249 ptr->access = access;
250 return ptr;
251 }
252
253 /* If we got here, there's more access chain to handle and we have the
254 * final block index. Insert a descriptor load and cast to a deref to
255 * start the deref chain.
256 */
257 nir_ssa_def *desc =
258 vtn_descriptor_load(b, base->mode, base->ptr_type->type, block_index);
259
260 assert(base->mode == vtn_variable_mode_ssbo ||
261 base->mode == vtn_variable_mode_ubo);
262 nir_variable_mode nir_mode =
263 base->mode == vtn_variable_mode_ssbo ? nir_var_mem_ssbo : nir_var_mem_ubo;
264
265 tail = nir_build_deref_cast(&b->nb, desc, nir_mode, type->type,
266 base->ptr_type->stride);
267 } else {
268 assert(base->var && base->var->var);
269 tail = nir_build_deref_var(&b->nb, base->var->var);
270 if (base->ptr_type && base->ptr_type->type) {
271 tail->dest.ssa.num_components =
272 glsl_get_vector_elements(base->ptr_type->type);
273 tail->dest.ssa.bit_size = glsl_get_bit_size(base->ptr_type->type);
274 }
275 }
276
277 if (idx == 0 && deref_chain->ptr_as_array) {
278 /* We start with a deref cast to get the stride. Hopefully, we'll be
279 * able to delete that cast eventually.
280 */
281 tail = nir_build_deref_cast(&b->nb, &tail->dest.ssa, tail->mode,
282 tail->type, base->ptr_type->stride);
283
284 nir_ssa_def *index = vtn_access_link_as_ssa(b, deref_chain->link[0], 1,
285 tail->dest.ssa.bit_size);
286 tail = nir_build_deref_ptr_as_array(&b->nb, tail, index);
287 idx++;
288 }
289
290 for (; idx < deref_chain->length; idx++) {
291 if (glsl_type_is_struct(type->type)) {
292 vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
293 unsigned field = deref_chain->link[idx].id;
294 tail = nir_build_deref_struct(&b->nb, tail, field);
295 type = type->members[field];
296 } else {
297 nir_ssa_def *arr_index =
298 vtn_access_link_as_ssa(b, deref_chain->link[idx], 1,
299 tail->dest.ssa.bit_size);
300 tail = nir_build_deref_array(&b->nb, tail, arr_index);
301 type = type->array_element;
302 }
303
304 access |= type->access;
305 }
306
307 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
308 ptr->mode = base->mode;
309 ptr->type = type;
310 ptr->var = base->var;
311 ptr->deref = tail;
312 ptr->access = access;
313
314 return ptr;
315 }
316
317 static struct vtn_pointer *
318 vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
319 struct vtn_pointer *base,
320 struct vtn_access_chain *deref_chain)
321 {
322 nir_ssa_def *block_index = base->block_index;
323 nir_ssa_def *offset = base->offset;
324 struct vtn_type *type = base->type;
325 enum gl_access_qualifier access = base->access;
326
327 unsigned idx = 0;
328 if (base->mode == vtn_variable_mode_ubo ||
329 base->mode == vtn_variable_mode_ssbo) {
330 if (!block_index) {
331 vtn_assert(base->var && base->type);
332 nir_ssa_def *desc_arr_idx;
333 if (glsl_type_is_array(type->type)) {
334 if (deref_chain->length >= 1) {
335 desc_arr_idx =
336 vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
337 idx++;
338 /* This consumes a level of type */
339 type = type->array_element;
340 access |= type->access;
341 } else {
342 /* This is annoying. We've been asked for a pointer to the
343 * array of UBOs/SSBOs and not a specifc buffer. Return a
344 * pointer with a descriptor index of 0 and we'll have to do
345 * a reindex later to adjust it to the right thing.
346 */
347 desc_arr_idx = nir_imm_int(&b->nb, 0);
348 }
349 } else if (deref_chain->ptr_as_array) {
350 /* You can't have a zero-length OpPtrAccessChain */
351 vtn_assert(deref_chain->length >= 1);
352 desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
353 } else {
354 /* We have a regular non-array SSBO. */
355 desc_arr_idx = NULL;
356 }
357 block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
358 } else if (deref_chain->ptr_as_array &&
359 type->base_type == vtn_base_type_struct && type->block) {
360 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
361 * decorated block. This is an interesting corner in the SPIR-V
362 * spec. One interpretation would be that they client is clearly
363 * trying to treat that block as if it's an implicit array of blocks
364 * repeated in the buffer. However, the SPIR-V spec for the
365 * OpPtrAccessChain says:
366 *
367 * "Base is treated as the address of the first element of an
368 * array, and the Element element’s address is computed to be the
369 * base for the Indexes, as per OpAccessChain."
370 *
371 * Taken literally, that would mean that your struct type is supposed
372 * to be treated as an array of such a struct and, since it's
373 * decorated block, that means an array of blocks which corresponds
374 * to an array descriptor. Therefore, we need to do a reindex
375 * operation to add the index from the first link in the access chain
376 * to the index we recieved.
377 *
378 * The downside to this interpretation (there always is one) is that
379 * this might be somewhat surprising behavior to apps if they expect
380 * the implicit array behavior described above.
381 */
382 vtn_assert(deref_chain->length >= 1);
383 nir_ssa_def *offset_index =
384 vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
385 idx++;
386
387 block_index = vtn_resource_reindex(b, base->mode,
388 block_index, offset_index);
389 }
390 }
391
392 if (!offset) {
393 if (base->mode == vtn_variable_mode_workgroup) {
394 /* SLM doesn't need nor have a block index */
395 vtn_assert(!block_index);
396
397 /* We need the variable for the base offset */
398 vtn_assert(base->var);
399
400 /* We need ptr_type for size and alignment */
401 vtn_assert(base->ptr_type);
402
403 /* Assign location on first use so that we don't end up bloating SLM
404 * address space for variables which are never statically used.
405 */
406 if (base->var->shared_location < 0) {
407 vtn_assert(base->ptr_type->length > 0 && base->ptr_type->align > 0);
408 b->shader->num_shared = vtn_align_u32(b->shader->num_shared,
409 base->ptr_type->align);
410 base->var->shared_location = b->shader->num_shared;
411 b->shader->num_shared += base->ptr_type->length;
412 }
413
414 offset = nir_imm_int(&b->nb, base->var->shared_location);
415 } else if (base->mode == vtn_variable_mode_push_constant) {
416 /* Push constants neither need nor have a block index */
417 vtn_assert(!block_index);
418
419 /* Start off with at the start of the push constant block. */
420 offset = nir_imm_int(&b->nb, 0);
421 } else {
422 /* The code above should have ensured a block_index when needed. */
423 vtn_assert(block_index);
424
425 /* Start off with at the start of the buffer. */
426 offset = nir_imm_int(&b->nb, 0);
427 }
428 }
429
430 if (deref_chain->ptr_as_array && idx == 0) {
431 /* We need ptr_type for the stride */
432 vtn_assert(base->ptr_type);
433
434 /* We need at least one element in the chain */
435 vtn_assert(deref_chain->length >= 1);
436
437 nir_ssa_def *elem_offset =
438 vtn_access_link_as_ssa(b, deref_chain->link[idx],
439 base->ptr_type->stride, offset->bit_size);
440 offset = nir_iadd(&b->nb, offset, elem_offset);
441 idx++;
442 }
443
444 for (; idx < deref_chain->length; idx++) {
445 switch (glsl_get_base_type(type->type)) {
446 case GLSL_TYPE_UINT:
447 case GLSL_TYPE_INT:
448 case GLSL_TYPE_UINT16:
449 case GLSL_TYPE_INT16:
450 case GLSL_TYPE_UINT8:
451 case GLSL_TYPE_INT8:
452 case GLSL_TYPE_UINT64:
453 case GLSL_TYPE_INT64:
454 case GLSL_TYPE_FLOAT:
455 case GLSL_TYPE_FLOAT16:
456 case GLSL_TYPE_DOUBLE:
457 case GLSL_TYPE_BOOL:
458 case GLSL_TYPE_ARRAY: {
459 nir_ssa_def *elem_offset =
460 vtn_access_link_as_ssa(b, deref_chain->link[idx],
461 type->stride, offset->bit_size);
462 offset = nir_iadd(&b->nb, offset, elem_offset);
463 type = type->array_element;
464 access |= type->access;
465 break;
466 }
467
468 case GLSL_TYPE_STRUCT: {
469 vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
470 unsigned member = deref_chain->link[idx].id;
471 offset = nir_iadd_imm(&b->nb, offset, type->offsets[member]);
472 type = type->members[member];
473 access |= type->access;
474 break;
475 }
476
477 default:
478 vtn_fail("Invalid type for deref");
479 }
480 }
481
482 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
483 ptr->mode = base->mode;
484 ptr->type = type;
485 ptr->block_index = block_index;
486 ptr->offset = offset;
487 ptr->access = access;
488
489 return ptr;
490 }
491
492 /* Dereference the given base pointer by the access chain */
493 static struct vtn_pointer *
494 vtn_pointer_dereference(struct vtn_builder *b,
495 struct vtn_pointer *base,
496 struct vtn_access_chain *deref_chain)
497 {
498 if (vtn_pointer_uses_ssa_offset(b, base)) {
499 return vtn_ssa_offset_pointer_dereference(b, base, deref_chain);
500 } else {
501 return vtn_nir_deref_pointer_dereference(b, base, deref_chain);
502 }
503 }
504
505 struct vtn_pointer *
506 vtn_pointer_for_variable(struct vtn_builder *b,
507 struct vtn_variable *var, struct vtn_type *ptr_type)
508 {
509 struct vtn_pointer *pointer = rzalloc(b, struct vtn_pointer);
510
511 pointer->mode = var->mode;
512 pointer->type = var->type;
513 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
514 vtn_assert(ptr_type->deref->type == var->type->type);
515 pointer->ptr_type = ptr_type;
516 pointer->var = var;
517 pointer->access = var->access | var->type->access;
518
519 return pointer;
520 }
521
522 /* Returns an atomic_uint type based on the original uint type. The returned
523 * type will be equivalent to the original one but will have an atomic_uint
524 * type as leaf instead of an uint.
525 *
526 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
527 */
528 static const struct glsl_type *
529 repair_atomic_type(const struct glsl_type *type)
530 {
531 assert(glsl_get_base_type(glsl_without_array(type)) == GLSL_TYPE_UINT);
532 assert(glsl_type_is_scalar(glsl_without_array(type)));
533
534 if (glsl_type_is_array(type)) {
535 const struct glsl_type *atomic =
536 repair_atomic_type(glsl_get_array_element(type));
537
538 return glsl_array_type(atomic, glsl_get_length(type),
539 glsl_get_explicit_stride(type));
540 } else {
541 return glsl_atomic_uint_type();
542 }
543 }
544
545 nir_deref_instr *
546 vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
547 {
548 if (b->wa_glslang_179) {
549 /* Do on-the-fly copy propagation for samplers. */
550 if (ptr->var && ptr->var->copy_prop_sampler)
551 return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
552 }
553
554 vtn_assert(!vtn_pointer_uses_ssa_offset(b, ptr));
555 if (!ptr->deref) {
556 struct vtn_access_chain chain = {
557 .length = 0,
558 };
559 ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
560 }
561
562 return ptr->deref;
563 }
564
565 static void
566 _vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
567 struct vtn_ssa_value *inout)
568 {
569 if (glsl_type_is_vector_or_scalar(deref->type)) {
570 if (load) {
571 inout->def = nir_load_deref(&b->nb, deref);
572 } else {
573 nir_store_deref(&b->nb, deref, inout->def, ~0);
574 }
575 } else if (glsl_type_is_array(deref->type) ||
576 glsl_type_is_matrix(deref->type)) {
577 unsigned elems = glsl_get_length(deref->type);
578 for (unsigned i = 0; i < elems; i++) {
579 nir_deref_instr *child =
580 nir_build_deref_array(&b->nb, deref, nir_imm_int(&b->nb, i));
581 _vtn_local_load_store(b, load, child, inout->elems[i]);
582 }
583 } else {
584 vtn_assert(glsl_type_is_struct(deref->type));
585 unsigned elems = glsl_get_length(deref->type);
586 for (unsigned i = 0; i < elems; i++) {
587 nir_deref_instr *child = nir_build_deref_struct(&b->nb, deref, i);
588 _vtn_local_load_store(b, load, child, inout->elems[i]);
589 }
590 }
591 }
592
593 nir_deref_instr *
594 vtn_nir_deref(struct vtn_builder *b, uint32_t id)
595 {
596 struct vtn_pointer *ptr = vtn_value(b, id, vtn_value_type_pointer)->pointer;
597 return vtn_pointer_to_deref(b, ptr);
598 }
599
600 /*
601 * Gets the NIR-level deref tail, which may have as a child an array deref
602 * selecting which component due to OpAccessChain supporting per-component
603 * indexing in SPIR-V.
604 */
605 static nir_deref_instr *
606 get_deref_tail(nir_deref_instr *deref)
607 {
608 if (deref->deref_type != nir_deref_type_array)
609 return deref;
610
611 nir_deref_instr *parent =
612 nir_instr_as_deref(deref->parent.ssa->parent_instr);
613
614 if (glsl_type_is_vector(parent->type))
615 return parent;
616 else
617 return deref;
618 }
619
620 struct vtn_ssa_value *
621 vtn_local_load(struct vtn_builder *b, nir_deref_instr *src)
622 {
623 nir_deref_instr *src_tail = get_deref_tail(src);
624 struct vtn_ssa_value *val = vtn_create_ssa_value(b, src_tail->type);
625 _vtn_local_load_store(b, true, src_tail, val);
626
627 if (src_tail != src) {
628 val->type = src->type;
629 if (nir_src_is_const(src->arr.index))
630 val->def = vtn_vector_extract(b, val->def,
631 nir_src_as_uint(src->arr.index));
632 else
633 val->def = vtn_vector_extract_dynamic(b, val->def, src->arr.index.ssa);
634 }
635
636 return val;
637 }
638
639 void
640 vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
641 nir_deref_instr *dest)
642 {
643 nir_deref_instr *dest_tail = get_deref_tail(dest);
644
645 if (dest_tail != dest) {
646 struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type);
647 _vtn_local_load_store(b, true, dest_tail, val);
648
649 if (nir_src_is_const(dest->arr.index))
650 val->def = vtn_vector_insert(b, val->def, src->def,
651 nir_src_as_uint(dest->arr.index));
652 else
653 val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
654 dest->arr.index.ssa);
655 _vtn_local_load_store(b, false, dest_tail, val);
656 } else {
657 _vtn_local_load_store(b, false, dest_tail, src);
658 }
659 }
660
661 nir_ssa_def *
662 vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
663 nir_ssa_def **index_out)
664 {
665 assert(vtn_pointer_uses_ssa_offset(b, ptr));
666 if (!ptr->offset) {
667 struct vtn_access_chain chain = {
668 .length = 0,
669 };
670 ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
671 }
672 *index_out = ptr->block_index;
673 return ptr->offset;
674 }
675
676 /* Tries to compute the size of an interface block based on the strides and
677 * offsets that are provided to us in the SPIR-V source.
678 */
679 static unsigned
680 vtn_type_block_size(struct vtn_builder *b, struct vtn_type *type)
681 {
682 enum glsl_base_type base_type = glsl_get_base_type(type->type);
683 switch (base_type) {
684 case GLSL_TYPE_UINT:
685 case GLSL_TYPE_INT:
686 case GLSL_TYPE_UINT16:
687 case GLSL_TYPE_INT16:
688 case GLSL_TYPE_UINT8:
689 case GLSL_TYPE_INT8:
690 case GLSL_TYPE_UINT64:
691 case GLSL_TYPE_INT64:
692 case GLSL_TYPE_FLOAT:
693 case GLSL_TYPE_FLOAT16:
694 case GLSL_TYPE_BOOL:
695 case GLSL_TYPE_DOUBLE: {
696 unsigned cols = type->row_major ? glsl_get_vector_elements(type->type) :
697 glsl_get_matrix_columns(type->type);
698 if (cols > 1) {
699 vtn_assert(type->stride > 0);
700 return type->stride * cols;
701 } else {
702 unsigned type_size = glsl_get_bit_size(type->type) / 8;
703 return glsl_get_vector_elements(type->type) * type_size;
704 }
705 }
706
707 case GLSL_TYPE_STRUCT:
708 case GLSL_TYPE_INTERFACE: {
709 unsigned size = 0;
710 unsigned num_fields = glsl_get_length(type->type);
711 for (unsigned f = 0; f < num_fields; f++) {
712 unsigned field_end = type->offsets[f] +
713 vtn_type_block_size(b, type->members[f]);
714 size = MAX2(size, field_end);
715 }
716 return size;
717 }
718
719 case GLSL_TYPE_ARRAY:
720 vtn_assert(type->stride > 0);
721 vtn_assert(glsl_get_length(type->type) > 0);
722 return type->stride * glsl_get_length(type->type);
723
724 default:
725 vtn_fail("Invalid block type");
726 return 0;
727 }
728 }
729
730 static void
731 _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
732 nir_ssa_def *index, nir_ssa_def *offset,
733 unsigned access_offset, unsigned access_size,
734 struct vtn_ssa_value **inout, const struct glsl_type *type,
735 enum gl_access_qualifier access)
736 {
737 nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
738 instr->num_components = glsl_get_vector_elements(type);
739
740 /* Booleans usually shouldn't show up in external memory in SPIR-V.
741 * However, they do for certain older GLSLang versions and can for shared
742 * memory when we lower access chains internally.
743 */
744 const unsigned data_bit_size = glsl_type_is_boolean(type) ? 32 :
745 glsl_get_bit_size(type);
746
747 int src = 0;
748 if (!load) {
749 nir_intrinsic_set_write_mask(instr, (1 << instr->num_components) - 1);
750 instr->src[src++] = nir_src_for_ssa((*inout)->def);
751 }
752
753 if (op == nir_intrinsic_load_push_constant) {
754 nir_intrinsic_set_base(instr, access_offset);
755 nir_intrinsic_set_range(instr, access_size);
756 }
757
758 if (op == nir_intrinsic_load_ssbo ||
759 op == nir_intrinsic_store_ssbo) {
760 nir_intrinsic_set_access(instr, access);
761 }
762
763 /* With extensions like relaxed_block_layout, we really can't guarantee
764 * much more than scalar alignment.
765 */
766 if (op != nir_intrinsic_load_push_constant)
767 nir_intrinsic_set_align(instr, data_bit_size / 8, 0);
768
769 if (index)
770 instr->src[src++] = nir_src_for_ssa(index);
771
772 if (op == nir_intrinsic_load_push_constant) {
773 /* We need to subtract the offset from where the intrinsic will load the
774 * data. */
775 instr->src[src++] =
776 nir_src_for_ssa(nir_isub(&b->nb, offset,
777 nir_imm_int(&b->nb, access_offset)));
778 } else {
779 instr->src[src++] = nir_src_for_ssa(offset);
780 }
781
782 if (load) {
783 nir_ssa_dest_init(&instr->instr, &instr->dest,
784 instr->num_components, data_bit_size, NULL);
785 (*inout)->def = &instr->dest.ssa;
786 }
787
788 nir_builder_instr_insert(&b->nb, &instr->instr);
789
790 if (load && glsl_get_base_type(type) == GLSL_TYPE_BOOL)
791 (*inout)->def = nir_ine(&b->nb, (*inout)->def, nir_imm_int(&b->nb, 0));
792 }
793
794 static void
795 _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
796 nir_ssa_def *index, nir_ssa_def *offset,
797 unsigned access_offset, unsigned access_size,
798 struct vtn_type *type, enum gl_access_qualifier access,
799 struct vtn_ssa_value **inout)
800 {
801 if (load && *inout == NULL)
802 *inout = vtn_create_ssa_value(b, type->type);
803
804 enum glsl_base_type base_type = glsl_get_base_type(type->type);
805 switch (base_type) {
806 case GLSL_TYPE_UINT:
807 case GLSL_TYPE_INT:
808 case GLSL_TYPE_UINT16:
809 case GLSL_TYPE_INT16:
810 case GLSL_TYPE_UINT8:
811 case GLSL_TYPE_INT8:
812 case GLSL_TYPE_UINT64:
813 case GLSL_TYPE_INT64:
814 case GLSL_TYPE_FLOAT:
815 case GLSL_TYPE_FLOAT16:
816 case GLSL_TYPE_DOUBLE:
817 case GLSL_TYPE_BOOL:
818 /* This is where things get interesting. At this point, we've hit
819 * a vector, a scalar, or a matrix.
820 */
821 if (glsl_type_is_matrix(type->type)) {
822 /* Loading the whole matrix */
823 struct vtn_ssa_value *transpose;
824 unsigned num_ops, vec_width, col_stride;
825 if (type->row_major) {
826 num_ops = glsl_get_vector_elements(type->type);
827 vec_width = glsl_get_matrix_columns(type->type);
828 col_stride = type->array_element->stride;
829 if (load) {
830 const struct glsl_type *transpose_type =
831 glsl_matrix_type(base_type, vec_width, num_ops);
832 *inout = vtn_create_ssa_value(b, transpose_type);
833 } else {
834 transpose = vtn_ssa_transpose(b, *inout);
835 inout = &transpose;
836 }
837 } else {
838 num_ops = glsl_get_matrix_columns(type->type);
839 vec_width = glsl_get_vector_elements(type->type);
840 col_stride = type->stride;
841 }
842
843 for (unsigned i = 0; i < num_ops; i++) {
844 nir_ssa_def *elem_offset =
845 nir_iadd_imm(&b->nb, offset, i * col_stride);
846 _vtn_load_store_tail(b, op, load, index, elem_offset,
847 access_offset, access_size,
848 &(*inout)->elems[i],
849 glsl_vector_type(base_type, vec_width),
850 type->access | access);
851 }
852
853 if (load && type->row_major)
854 *inout = vtn_ssa_transpose(b, *inout);
855 } else {
856 unsigned elems = glsl_get_vector_elements(type->type);
857 unsigned type_size = glsl_get_bit_size(type->type) / 8;
858 if (elems == 1 || type->stride == type_size) {
859 /* This is a tightly-packed normal scalar or vector load */
860 vtn_assert(glsl_type_is_vector_or_scalar(type->type));
861 _vtn_load_store_tail(b, op, load, index, offset,
862 access_offset, access_size,
863 inout, type->type,
864 type->access | access);
865 } else {
866 /* This is a strided load. We have to load N things separately.
867 * This is the single column of a row-major matrix case.
868 */
869 vtn_assert(type->stride > type_size);
870 vtn_assert(type->stride % type_size == 0);
871
872 nir_ssa_def *per_comp[4];
873 for (unsigned i = 0; i < elems; i++) {
874 nir_ssa_def *elem_offset =
875 nir_iadd_imm(&b->nb, offset, i * type->stride);
876 struct vtn_ssa_value *comp, temp_val;
877 if (!load) {
878 temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
879 temp_val.type = glsl_scalar_type(base_type);
880 }
881 comp = &temp_val;
882 _vtn_load_store_tail(b, op, load, index, elem_offset,
883 access_offset, access_size,
884 &comp, glsl_scalar_type(base_type),
885 type->access | access);
886 per_comp[i] = comp->def;
887 }
888
889 if (load) {
890 if (*inout == NULL)
891 *inout = vtn_create_ssa_value(b, type->type);
892 (*inout)->def = nir_vec(&b->nb, per_comp, elems);
893 }
894 }
895 }
896 return;
897
898 case GLSL_TYPE_ARRAY: {
899 unsigned elems = glsl_get_length(type->type);
900 for (unsigned i = 0; i < elems; i++) {
901 nir_ssa_def *elem_off =
902 nir_iadd_imm(&b->nb, offset, i * type->stride);
903 _vtn_block_load_store(b, op, load, index, elem_off,
904 access_offset, access_size,
905 type->array_element,
906 type->array_element->access | access,
907 &(*inout)->elems[i]);
908 }
909 return;
910 }
911
912 case GLSL_TYPE_STRUCT: {
913 unsigned elems = glsl_get_length(type->type);
914 for (unsigned i = 0; i < elems; i++) {
915 nir_ssa_def *elem_off =
916 nir_iadd_imm(&b->nb, offset, type->offsets[i]);
917 _vtn_block_load_store(b, op, load, index, elem_off,
918 access_offset, access_size,
919 type->members[i],
920 type->members[i]->access | access,
921 &(*inout)->elems[i]);
922 }
923 return;
924 }
925
926 default:
927 vtn_fail("Invalid block member type");
928 }
929 }
930
931 static struct vtn_ssa_value *
932 vtn_block_load(struct vtn_builder *b, struct vtn_pointer *src)
933 {
934 nir_intrinsic_op op;
935 unsigned access_offset = 0, access_size = 0;
936 switch (src->mode) {
937 case vtn_variable_mode_ubo:
938 op = nir_intrinsic_load_ubo;
939 break;
940 case vtn_variable_mode_ssbo:
941 op = nir_intrinsic_load_ssbo;
942 break;
943 case vtn_variable_mode_push_constant:
944 op = nir_intrinsic_load_push_constant;
945 access_size = b->shader->num_uniforms;
946 break;
947 case vtn_variable_mode_workgroup:
948 op = nir_intrinsic_load_shared;
949 break;
950 default:
951 vtn_fail("Invalid block variable mode");
952 }
953
954 nir_ssa_def *offset, *index = NULL;
955 offset = vtn_pointer_to_offset(b, src, &index);
956
957 struct vtn_ssa_value *value = NULL;
958 _vtn_block_load_store(b, op, true, index, offset,
959 access_offset, access_size,
960 src->type, src->access, &value);
961 return value;
962 }
963
964 static void
965 vtn_block_store(struct vtn_builder *b, struct vtn_ssa_value *src,
966 struct vtn_pointer *dst)
967 {
968 nir_intrinsic_op op;
969 switch (dst->mode) {
970 case vtn_variable_mode_ssbo:
971 op = nir_intrinsic_store_ssbo;
972 break;
973 case vtn_variable_mode_workgroup:
974 op = nir_intrinsic_store_shared;
975 break;
976 default:
977 vtn_fail("Invalid block variable mode");
978 }
979
980 nir_ssa_def *offset, *index = NULL;
981 offset = vtn_pointer_to_offset(b, dst, &index);
982
983 _vtn_block_load_store(b, op, false, index, offset,
984 0, 0, dst->type, dst->access, &src);
985 }
986
987 static void
988 _vtn_variable_load_store(struct vtn_builder *b, bool load,
989 struct vtn_pointer *ptr,
990 struct vtn_ssa_value **inout)
991 {
992 enum glsl_base_type base_type = glsl_get_base_type(ptr->type->type);
993 switch (base_type) {
994 case GLSL_TYPE_UINT:
995 case GLSL_TYPE_INT:
996 case GLSL_TYPE_UINT16:
997 case GLSL_TYPE_INT16:
998 case GLSL_TYPE_UINT8:
999 case GLSL_TYPE_INT8:
1000 case GLSL_TYPE_UINT64:
1001 case GLSL_TYPE_INT64:
1002 case GLSL_TYPE_FLOAT:
1003 case GLSL_TYPE_FLOAT16:
1004 case GLSL_TYPE_BOOL:
1005 case GLSL_TYPE_DOUBLE:
1006 if (glsl_type_is_vector_or_scalar(ptr->type->type)) {
1007 /* We hit a vector or scalar; go ahead and emit the load[s] */
1008 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
1009 if (vtn_pointer_is_external_block(b, ptr)) {
1010 /* If it's external, we call nir_load/store_deref directly. The
1011 * vtn_local_load/store helpers are too clever and do magic to
1012 * avoid array derefs of vectors. That magic is both less
1013 * efficient than the direct load/store and, in the case of
1014 * stores, is broken because it creates a race condition if two
1015 * threads are writing to different components of the same vector
1016 * due to the load+insert+store it uses to emulate the array
1017 * deref.
1018 */
1019 if (load) {
1020 *inout = vtn_create_ssa_value(b, ptr->type->type);
1021 (*inout)->def = nir_load_deref(&b->nb, deref);
1022 } else {
1023 nir_store_deref(&b->nb, deref, (*inout)->def, ~0);
1024 }
1025 } else {
1026 if (load) {
1027 *inout = vtn_local_load(b, deref);
1028 } else {
1029 vtn_local_store(b, *inout, deref);
1030 }
1031 }
1032 return;
1033 }
1034 /* Fall through */
1035
1036 case GLSL_TYPE_ARRAY:
1037 case GLSL_TYPE_STRUCT: {
1038 unsigned elems = glsl_get_length(ptr->type->type);
1039 if (load) {
1040 vtn_assert(*inout == NULL);
1041 *inout = rzalloc(b, struct vtn_ssa_value);
1042 (*inout)->type = ptr->type->type;
1043 (*inout)->elems = rzalloc_array(b, struct vtn_ssa_value *, elems);
1044 }
1045
1046 struct vtn_access_chain chain = {
1047 .length = 1,
1048 .link = {
1049 { .mode = vtn_access_mode_literal, },
1050 }
1051 };
1052 for (unsigned i = 0; i < elems; i++) {
1053 chain.link[0].id = i;
1054 struct vtn_pointer *elem = vtn_pointer_dereference(b, ptr, &chain);
1055 _vtn_variable_load_store(b, load, elem, &(*inout)->elems[i]);
1056 }
1057 return;
1058 }
1059
1060 default:
1061 vtn_fail("Invalid access chain type");
1062 }
1063 }
1064
1065 struct vtn_ssa_value *
1066 vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src)
1067 {
1068 if (vtn_pointer_uses_ssa_offset(b, src)) {
1069 return vtn_block_load(b, src);
1070 } else {
1071 struct vtn_ssa_value *val = NULL;
1072 _vtn_variable_load_store(b, true, src, &val);
1073 return val;
1074 }
1075 }
1076
1077 void
1078 vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
1079 struct vtn_pointer *dest)
1080 {
1081 if (vtn_pointer_uses_ssa_offset(b, dest)) {
1082 vtn_assert(dest->mode == vtn_variable_mode_ssbo ||
1083 dest->mode == vtn_variable_mode_workgroup);
1084 vtn_block_store(b, src, dest);
1085 } else {
1086 _vtn_variable_load_store(b, false, dest, &src);
1087 }
1088 }
1089
1090 static void
1091 _vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
1092 struct vtn_pointer *src)
1093 {
1094 vtn_assert(src->type->type == dest->type->type);
1095 enum glsl_base_type base_type = glsl_get_base_type(src->type->type);
1096 switch (base_type) {
1097 case GLSL_TYPE_UINT:
1098 case GLSL_TYPE_INT:
1099 case GLSL_TYPE_UINT16:
1100 case GLSL_TYPE_INT16:
1101 case GLSL_TYPE_UINT8:
1102 case GLSL_TYPE_INT8:
1103 case GLSL_TYPE_UINT64:
1104 case GLSL_TYPE_INT64:
1105 case GLSL_TYPE_FLOAT:
1106 case GLSL_TYPE_FLOAT16:
1107 case GLSL_TYPE_DOUBLE:
1108 case GLSL_TYPE_BOOL:
1109 /* At this point, we have a scalar, vector, or matrix so we know that
1110 * there cannot be any structure splitting still in the way. By
1111 * stopping at the matrix level rather than the vector level, we
1112 * ensure that matrices get loaded in the optimal way even if they
1113 * are storred row-major in a UBO.
1114 */
1115 vtn_variable_store(b, vtn_variable_load(b, src), dest);
1116 return;
1117
1118 case GLSL_TYPE_ARRAY:
1119 case GLSL_TYPE_STRUCT: {
1120 struct vtn_access_chain chain = {
1121 .length = 1,
1122 .link = {
1123 { .mode = vtn_access_mode_literal, },
1124 }
1125 };
1126 unsigned elems = glsl_get_length(src->type->type);
1127 for (unsigned i = 0; i < elems; i++) {
1128 chain.link[0].id = i;
1129 struct vtn_pointer *src_elem =
1130 vtn_pointer_dereference(b, src, &chain);
1131 struct vtn_pointer *dest_elem =
1132 vtn_pointer_dereference(b, dest, &chain);
1133
1134 _vtn_variable_copy(b, dest_elem, src_elem);
1135 }
1136 return;
1137 }
1138
1139 default:
1140 vtn_fail("Invalid access chain type");
1141 }
1142 }
1143
1144 static void
1145 vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
1146 struct vtn_pointer *src)
1147 {
1148 /* TODO: At some point, we should add a special-case for when we can
1149 * just emit a copy_var intrinsic.
1150 */
1151 _vtn_variable_copy(b, dest, src);
1152 }
1153
1154 static void
1155 set_mode_system_value(struct vtn_builder *b, nir_variable_mode *mode)
1156 {
1157 vtn_assert(*mode == nir_var_system_value || *mode == nir_var_shader_in);
1158 *mode = nir_var_system_value;
1159 }
1160
1161 static void
1162 vtn_get_builtin_location(struct vtn_builder *b,
1163 SpvBuiltIn builtin, int *location,
1164 nir_variable_mode *mode)
1165 {
1166 switch (builtin) {
1167 case SpvBuiltInPosition:
1168 *location = VARYING_SLOT_POS;
1169 break;
1170 case SpvBuiltInPointSize:
1171 *location = VARYING_SLOT_PSIZ;
1172 break;
1173 case SpvBuiltInClipDistance:
1174 *location = VARYING_SLOT_CLIP_DIST0; /* XXX CLIP_DIST1? */
1175 break;
1176 case SpvBuiltInCullDistance:
1177 *location = VARYING_SLOT_CULL_DIST0;
1178 break;
1179 case SpvBuiltInVertexId:
1180 case SpvBuiltInVertexIndex:
1181 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1182 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1183 * same as gl_VertexID, which is non-zero-based, and removes
1184 * VertexIndex. Since they're both defined to be non-zero-based, we use
1185 * SYSTEM_VALUE_VERTEX_ID for both.
1186 */
1187 *location = SYSTEM_VALUE_VERTEX_ID;
1188 set_mode_system_value(b, mode);
1189 break;
1190 case SpvBuiltInInstanceIndex:
1191 *location = SYSTEM_VALUE_INSTANCE_INDEX;
1192 set_mode_system_value(b, mode);
1193 break;
1194 case SpvBuiltInInstanceId:
1195 *location = SYSTEM_VALUE_INSTANCE_ID;
1196 set_mode_system_value(b, mode);
1197 break;
1198 case SpvBuiltInPrimitiveId:
1199 if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
1200 vtn_assert(*mode == nir_var_shader_in);
1201 *location = VARYING_SLOT_PRIMITIVE_ID;
1202 } else if (*mode == nir_var_shader_out) {
1203 *location = VARYING_SLOT_PRIMITIVE_ID;
1204 } else {
1205 *location = SYSTEM_VALUE_PRIMITIVE_ID;
1206 set_mode_system_value(b, mode);
1207 }
1208 break;
1209 case SpvBuiltInInvocationId:
1210 *location = SYSTEM_VALUE_INVOCATION_ID;
1211 set_mode_system_value(b, mode);
1212 break;
1213 case SpvBuiltInLayer:
1214 *location = VARYING_SLOT_LAYER;
1215 if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
1216 *mode = nir_var_shader_in;
1217 else if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
1218 *mode = nir_var_shader_out;
1219 else if (b->options && b->options->caps.shader_viewport_index_layer &&
1220 (b->shader->info.stage == MESA_SHADER_VERTEX ||
1221 b->shader->info.stage == MESA_SHADER_TESS_EVAL))
1222 *mode = nir_var_shader_out;
1223 else
1224 vtn_fail("invalid stage for SpvBuiltInLayer");
1225 break;
1226 case SpvBuiltInViewportIndex:
1227 *location = VARYING_SLOT_VIEWPORT;
1228 if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
1229 *mode = nir_var_shader_out;
1230 else if (b->options && b->options->caps.shader_viewport_index_layer &&
1231 (b->shader->info.stage == MESA_SHADER_VERTEX ||
1232 b->shader->info.stage == MESA_SHADER_TESS_EVAL))
1233 *mode = nir_var_shader_out;
1234 else if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
1235 *mode = nir_var_shader_in;
1236 else
1237 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1238 break;
1239 case SpvBuiltInTessLevelOuter:
1240 *location = VARYING_SLOT_TESS_LEVEL_OUTER;
1241 break;
1242 case SpvBuiltInTessLevelInner:
1243 *location = VARYING_SLOT_TESS_LEVEL_INNER;
1244 break;
1245 case SpvBuiltInTessCoord:
1246 *location = SYSTEM_VALUE_TESS_COORD;
1247 set_mode_system_value(b, mode);
1248 break;
1249 case SpvBuiltInPatchVertices:
1250 *location = SYSTEM_VALUE_VERTICES_IN;
1251 set_mode_system_value(b, mode);
1252 break;
1253 case SpvBuiltInFragCoord:
1254 *location = VARYING_SLOT_POS;
1255 vtn_assert(*mode == nir_var_shader_in);
1256 break;
1257 case SpvBuiltInPointCoord:
1258 *location = VARYING_SLOT_PNTC;
1259 vtn_assert(*mode == nir_var_shader_in);
1260 break;
1261 case SpvBuiltInFrontFacing:
1262 *location = SYSTEM_VALUE_FRONT_FACE;
1263 set_mode_system_value(b, mode);
1264 break;
1265 case SpvBuiltInSampleId:
1266 *location = SYSTEM_VALUE_SAMPLE_ID;
1267 set_mode_system_value(b, mode);
1268 break;
1269 case SpvBuiltInSamplePosition:
1270 *location = SYSTEM_VALUE_SAMPLE_POS;
1271 set_mode_system_value(b, mode);
1272 break;
1273 case SpvBuiltInSampleMask:
1274 if (*mode == nir_var_shader_out) {
1275 *location = FRAG_RESULT_SAMPLE_MASK;
1276 } else {
1277 *location = SYSTEM_VALUE_SAMPLE_MASK_IN;
1278 set_mode_system_value(b, mode);
1279 }
1280 break;
1281 case SpvBuiltInFragDepth:
1282 *location = FRAG_RESULT_DEPTH;
1283 vtn_assert(*mode == nir_var_shader_out);
1284 break;
1285 case SpvBuiltInHelperInvocation:
1286 *location = SYSTEM_VALUE_HELPER_INVOCATION;
1287 set_mode_system_value(b, mode);
1288 break;
1289 case SpvBuiltInNumWorkgroups:
1290 *location = SYSTEM_VALUE_NUM_WORK_GROUPS;
1291 set_mode_system_value(b, mode);
1292 break;
1293 case SpvBuiltInWorkgroupSize:
1294 *location = SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1295 set_mode_system_value(b, mode);
1296 break;
1297 case SpvBuiltInWorkgroupId:
1298 *location = SYSTEM_VALUE_WORK_GROUP_ID;
1299 set_mode_system_value(b, mode);
1300 break;
1301 case SpvBuiltInLocalInvocationId:
1302 *location = SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1303 set_mode_system_value(b, mode);
1304 break;
1305 case SpvBuiltInLocalInvocationIndex:
1306 *location = SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1307 set_mode_system_value(b, mode);
1308 break;
1309 case SpvBuiltInGlobalInvocationId:
1310 *location = SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1311 set_mode_system_value(b, mode);
1312 break;
1313 case SpvBuiltInBaseVertex:
1314 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1315 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1316 */
1317 *location = SYSTEM_VALUE_FIRST_VERTEX;
1318 set_mode_system_value(b, mode);
1319 break;
1320 case SpvBuiltInBaseInstance:
1321 *location = SYSTEM_VALUE_BASE_INSTANCE;
1322 set_mode_system_value(b, mode);
1323 break;
1324 case SpvBuiltInDrawIndex:
1325 *location = SYSTEM_VALUE_DRAW_ID;
1326 set_mode_system_value(b, mode);
1327 break;
1328 case SpvBuiltInSubgroupSize:
1329 *location = SYSTEM_VALUE_SUBGROUP_SIZE;
1330 set_mode_system_value(b, mode);
1331 break;
1332 case SpvBuiltInSubgroupId:
1333 *location = SYSTEM_VALUE_SUBGROUP_ID;
1334 set_mode_system_value(b, mode);
1335 break;
1336 case SpvBuiltInSubgroupLocalInvocationId:
1337 *location = SYSTEM_VALUE_SUBGROUP_INVOCATION;
1338 set_mode_system_value(b, mode);
1339 break;
1340 case SpvBuiltInNumSubgroups:
1341 *location = SYSTEM_VALUE_NUM_SUBGROUPS;
1342 set_mode_system_value(b, mode);
1343 break;
1344 case SpvBuiltInDeviceIndex:
1345 *location = SYSTEM_VALUE_DEVICE_INDEX;
1346 set_mode_system_value(b, mode);
1347 break;
1348 case SpvBuiltInViewIndex:
1349 *location = SYSTEM_VALUE_VIEW_INDEX;
1350 set_mode_system_value(b, mode);
1351 break;
1352 case SpvBuiltInSubgroupEqMask:
1353 *location = SYSTEM_VALUE_SUBGROUP_EQ_MASK,
1354 set_mode_system_value(b, mode);
1355 break;
1356 case SpvBuiltInSubgroupGeMask:
1357 *location = SYSTEM_VALUE_SUBGROUP_GE_MASK,
1358 set_mode_system_value(b, mode);
1359 break;
1360 case SpvBuiltInSubgroupGtMask:
1361 *location = SYSTEM_VALUE_SUBGROUP_GT_MASK,
1362 set_mode_system_value(b, mode);
1363 break;
1364 case SpvBuiltInSubgroupLeMask:
1365 *location = SYSTEM_VALUE_SUBGROUP_LE_MASK,
1366 set_mode_system_value(b, mode);
1367 break;
1368 case SpvBuiltInSubgroupLtMask:
1369 *location = SYSTEM_VALUE_SUBGROUP_LT_MASK,
1370 set_mode_system_value(b, mode);
1371 break;
1372 case SpvBuiltInFragStencilRefEXT:
1373 *location = FRAG_RESULT_STENCIL;
1374 vtn_assert(*mode == nir_var_shader_out);
1375 break;
1376 case SpvBuiltInWorkDim:
1377 *location = SYSTEM_VALUE_WORK_DIM;
1378 set_mode_system_value(b, mode);
1379 break;
1380 case SpvBuiltInGlobalSize:
1381 *location = SYSTEM_VALUE_GLOBAL_GROUP_SIZE;
1382 set_mode_system_value(b, mode);
1383 break;
1384 default:
1385 vtn_fail("unsupported builtin: %u", builtin);
1386 }
1387 }
1388
1389 static void
1390 apply_var_decoration(struct vtn_builder *b,
1391 struct nir_variable_data *var_data,
1392 const struct vtn_decoration *dec)
1393 {
1394 switch (dec->decoration) {
1395 case SpvDecorationRelaxedPrecision:
1396 break; /* FIXME: Do nothing with this for now. */
1397 case SpvDecorationNoPerspective:
1398 var_data->interpolation = INTERP_MODE_NOPERSPECTIVE;
1399 break;
1400 case SpvDecorationFlat:
1401 var_data->interpolation = INTERP_MODE_FLAT;
1402 break;
1403 case SpvDecorationCentroid:
1404 var_data->centroid = true;
1405 break;
1406 case SpvDecorationSample:
1407 var_data->sample = true;
1408 break;
1409 case SpvDecorationInvariant:
1410 var_data->invariant = true;
1411 break;
1412 case SpvDecorationConstant:
1413 var_data->read_only = true;
1414 break;
1415 case SpvDecorationNonReadable:
1416 var_data->image.access |= ACCESS_NON_READABLE;
1417 break;
1418 case SpvDecorationNonWritable:
1419 var_data->read_only = true;
1420 var_data->image.access |= ACCESS_NON_WRITEABLE;
1421 break;
1422 case SpvDecorationRestrict:
1423 var_data->image.access |= ACCESS_RESTRICT;
1424 break;
1425 case SpvDecorationVolatile:
1426 var_data->image.access |= ACCESS_VOLATILE;
1427 break;
1428 case SpvDecorationCoherent:
1429 var_data->image.access |= ACCESS_COHERENT;
1430 break;
1431 case SpvDecorationComponent:
1432 var_data->location_frac = dec->literals[0];
1433 break;
1434 case SpvDecorationIndex:
1435 var_data->index = dec->literals[0];
1436 break;
1437 case SpvDecorationBuiltIn: {
1438 SpvBuiltIn builtin = dec->literals[0];
1439
1440 nir_variable_mode mode = var_data->mode;
1441 vtn_get_builtin_location(b, builtin, &var_data->location, &mode);
1442 var_data->mode = mode;
1443
1444 switch (builtin) {
1445 case SpvBuiltInTessLevelOuter:
1446 case SpvBuiltInTessLevelInner:
1447 case SpvBuiltInClipDistance:
1448 case SpvBuiltInCullDistance:
1449 var_data->compact = true;
1450 break;
1451 default:
1452 break;
1453 }
1454 }
1455
1456 case SpvDecorationSpecId:
1457 case SpvDecorationRowMajor:
1458 case SpvDecorationColMajor:
1459 case SpvDecorationMatrixStride:
1460 case SpvDecorationAliased:
1461 case SpvDecorationUniform:
1462 case SpvDecorationLinkageAttributes:
1463 break; /* Do nothing with these here */
1464
1465 case SpvDecorationPatch:
1466 var_data->patch = true;
1467 break;
1468
1469 case SpvDecorationLocation:
1470 vtn_fail("Handled above");
1471
1472 case SpvDecorationBlock:
1473 case SpvDecorationBufferBlock:
1474 case SpvDecorationArrayStride:
1475 case SpvDecorationGLSLShared:
1476 case SpvDecorationGLSLPacked:
1477 break; /* These can apply to a type but we don't care about them */
1478
1479 case SpvDecorationBinding:
1480 case SpvDecorationDescriptorSet:
1481 case SpvDecorationNoContraction:
1482 case SpvDecorationInputAttachmentIndex:
1483 vtn_warn("Decoration not allowed for variable or structure member: %s",
1484 spirv_decoration_to_string(dec->decoration));
1485 break;
1486
1487 case SpvDecorationXfbBuffer:
1488 var_data->explicit_xfb_buffer = true;
1489 var_data->xfb_buffer = dec->literals[0];
1490 var_data->always_active_io = true;
1491 break;
1492 case SpvDecorationXfbStride:
1493 var_data->explicit_xfb_stride = true;
1494 var_data->xfb_stride = dec->literals[0];
1495 break;
1496 case SpvDecorationOffset:
1497 var_data->explicit_offset = true;
1498 var_data->offset = dec->literals[0];
1499 break;
1500
1501 case SpvDecorationStream:
1502 var_data->stream = dec->literals[0];
1503 break;
1504
1505 case SpvDecorationCPacked:
1506 case SpvDecorationSaturatedConversion:
1507 case SpvDecorationFuncParamAttr:
1508 case SpvDecorationFPRoundingMode:
1509 case SpvDecorationFPFastMathMode:
1510 case SpvDecorationAlignment:
1511 if (b->shader->info.stage != MESA_SHADER_KERNEL) {
1512 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1513 spirv_decoration_to_string(dec->decoration));
1514 }
1515 break;
1516
1517 case SpvDecorationHlslSemanticGOOGLE:
1518 /* HLSL semantic decorations can safely be ignored by the driver. */
1519 break;
1520
1521 case SpvDecorationRestrictPointerEXT:
1522 case SpvDecorationAliasedPointerEXT:
1523 /* TODO: We should actually plumb alias information through NIR. */
1524 break;
1525
1526 default:
1527 vtn_fail("Unhandled decoration");
1528 }
1529 }
1530
1531 static void
1532 var_is_patch_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1533 const struct vtn_decoration *dec, void *out_is_patch)
1534 {
1535 if (dec->decoration == SpvDecorationPatch) {
1536 *((bool *) out_is_patch) = true;
1537 }
1538 }
1539
1540 static void
1541 var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1542 const struct vtn_decoration *dec, void *void_var)
1543 {
1544 struct vtn_variable *vtn_var = void_var;
1545
1546 /* Handle decorations that apply to a vtn_variable as a whole */
1547 switch (dec->decoration) {
1548 case SpvDecorationBinding:
1549 vtn_var->binding = dec->literals[0];
1550 vtn_var->explicit_binding = true;
1551 return;
1552 case SpvDecorationDescriptorSet:
1553 vtn_var->descriptor_set = dec->literals[0];
1554 return;
1555 case SpvDecorationInputAttachmentIndex:
1556 vtn_var->input_attachment_index = dec->literals[0];
1557 return;
1558 case SpvDecorationPatch:
1559 vtn_var->patch = true;
1560 break;
1561 case SpvDecorationOffset:
1562 vtn_var->offset = dec->literals[0];
1563 break;
1564 case SpvDecorationNonWritable:
1565 vtn_var->access |= ACCESS_NON_WRITEABLE;
1566 break;
1567 case SpvDecorationNonReadable:
1568 vtn_var->access |= ACCESS_NON_READABLE;
1569 break;
1570 case SpvDecorationVolatile:
1571 vtn_var->access |= ACCESS_VOLATILE;
1572 break;
1573 case SpvDecorationCoherent:
1574 vtn_var->access |= ACCESS_COHERENT;
1575 break;
1576 case SpvDecorationHlslCounterBufferGOOGLE:
1577 /* HLSL semantic decorations can safely be ignored by the driver. */
1578 break;
1579 default:
1580 break;
1581 }
1582
1583 if (val->value_type == vtn_value_type_pointer) {
1584 assert(val->pointer->var == void_var);
1585 assert(member == -1);
1586 } else {
1587 assert(val->value_type == vtn_value_type_type);
1588 }
1589
1590 /* Location is odd. If applied to a split structure, we have to walk the
1591 * whole thing and accumulate the location. It's easier to handle as a
1592 * special case.
1593 */
1594 if (dec->decoration == SpvDecorationLocation) {
1595 unsigned location = dec->literals[0];
1596 if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
1597 vtn_var->mode == vtn_variable_mode_output) {
1598 location += FRAG_RESULT_DATA0;
1599 } else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
1600 vtn_var->mode == vtn_variable_mode_input) {
1601 location += VERT_ATTRIB_GENERIC0;
1602 } else if (vtn_var->mode == vtn_variable_mode_input ||
1603 vtn_var->mode == vtn_variable_mode_output) {
1604 location += vtn_var->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0;
1605 } else if (vtn_var->mode != vtn_variable_mode_uniform) {
1606 vtn_warn("Location must be on input, output, uniform, sampler or "
1607 "image variable");
1608 return;
1609 }
1610
1611 if (vtn_var->var->num_members == 0) {
1612 /* This handles the member and lone variable cases */
1613 vtn_var->var->data.location = location;
1614 } else {
1615 /* This handles the structure member case */
1616 assert(vtn_var->var->members);
1617
1618 if (member == -1)
1619 vtn_var->base_location = location;
1620 else
1621 vtn_var->var->members[member].location = location;
1622 }
1623
1624 return;
1625 } else {
1626 if (vtn_var->var) {
1627 if (vtn_var->var->num_members == 0) {
1628 /* We call this function on types as well as variables and not all
1629 * struct types get split so we can end up having stray member
1630 * decorations; just ignore them.
1631 */
1632 if (member == -1)
1633 apply_var_decoration(b, &vtn_var->var->data, dec);
1634 } else if (member >= 0) {
1635 /* Member decorations must come from a type */
1636 assert(val->value_type == vtn_value_type_type);
1637 apply_var_decoration(b, &vtn_var->var->members[member], dec);
1638 } else {
1639 unsigned length =
1640 glsl_get_length(glsl_without_array(vtn_var->type->type));
1641 for (unsigned i = 0; i < length; i++)
1642 apply_var_decoration(b, &vtn_var->var->members[i], dec);
1643 }
1644 } else {
1645 /* A few variables, those with external storage, have no actual
1646 * nir_variables associated with them. Fortunately, all decorations
1647 * we care about for those variables are on the type only.
1648 */
1649 vtn_assert(vtn_var->mode == vtn_variable_mode_ubo ||
1650 vtn_var->mode == vtn_variable_mode_ssbo ||
1651 vtn_var->mode == vtn_variable_mode_push_constant ||
1652 (vtn_var->mode == vtn_variable_mode_workgroup &&
1653 b->options->lower_workgroup_access_to_offsets));
1654 }
1655 }
1656 }
1657
1658 static enum vtn_variable_mode
1659 vtn_storage_class_to_mode(struct vtn_builder *b,
1660 SpvStorageClass class,
1661 struct vtn_type *interface_type,
1662 nir_variable_mode *nir_mode_out)
1663 {
1664 enum vtn_variable_mode mode;
1665 nir_variable_mode nir_mode;
1666 switch (class) {
1667 case SpvStorageClassUniform:
1668 if (interface_type->block) {
1669 mode = vtn_variable_mode_ubo;
1670 nir_mode = nir_var_mem_ubo;
1671 } else if (interface_type->buffer_block) {
1672 mode = vtn_variable_mode_ssbo;
1673 nir_mode = nir_var_mem_ssbo;
1674 } else {
1675 /* Default-block uniforms, coming from gl_spirv */
1676 mode = vtn_variable_mode_uniform;
1677 nir_mode = nir_var_uniform;
1678 }
1679 break;
1680 case SpvStorageClassStorageBuffer:
1681 mode = vtn_variable_mode_ssbo;
1682 nir_mode = nir_var_mem_ssbo;
1683 break;
1684 case SpvStorageClassPhysicalStorageBufferEXT:
1685 mode = vtn_variable_mode_phys_ssbo;
1686 nir_mode = nir_var_mem_global;
1687 break;
1688 case SpvStorageClassUniformConstant:
1689 mode = vtn_variable_mode_uniform;
1690 nir_mode = nir_var_uniform;
1691 break;
1692 case SpvStorageClassPushConstant:
1693 mode = vtn_variable_mode_push_constant;
1694 nir_mode = nir_var_uniform;
1695 break;
1696 case SpvStorageClassInput:
1697 mode = vtn_variable_mode_input;
1698 nir_mode = nir_var_shader_in;
1699 break;
1700 case SpvStorageClassOutput:
1701 mode = vtn_variable_mode_output;
1702 nir_mode = nir_var_shader_out;
1703 break;
1704 case SpvStorageClassPrivate:
1705 mode = vtn_variable_mode_private;
1706 nir_mode = nir_var_shader_temp;
1707 break;
1708 case SpvStorageClassFunction:
1709 mode = vtn_variable_mode_function;
1710 nir_mode = nir_var_function_temp;
1711 break;
1712 case SpvStorageClassWorkgroup:
1713 mode = vtn_variable_mode_workgroup;
1714 nir_mode = nir_var_mem_shared;
1715 break;
1716 case SpvStorageClassAtomicCounter:
1717 mode = vtn_variable_mode_uniform;
1718 nir_mode = nir_var_uniform;
1719 break;
1720 case SpvStorageClassCrossWorkgroup:
1721 mode = vtn_variable_mode_cross_workgroup;
1722 nir_mode = nir_var_mem_global;
1723 break;
1724 case SpvStorageClassGeneric:
1725 default:
1726 vtn_fail("Unhandled variable storage class");
1727 }
1728
1729 if (nir_mode_out)
1730 *nir_mode_out = nir_mode;
1731
1732 return mode;
1733 }
1734
1735 nir_ssa_def *
1736 vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
1737 {
1738 if (vtn_pointer_uses_ssa_offset(b, ptr)) {
1739 /* This pointer needs to have a pointer type with actual storage */
1740 vtn_assert(ptr->ptr_type);
1741 vtn_assert(ptr->ptr_type->type);
1742
1743 if (!ptr->offset) {
1744 /* If we don't have an offset then we must be a pointer to the variable
1745 * itself.
1746 */
1747 vtn_assert(!ptr->offset && !ptr->block_index);
1748
1749 struct vtn_access_chain chain = {
1750 .length = 0,
1751 };
1752 ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
1753 }
1754
1755 vtn_assert(ptr->offset);
1756 if (ptr->block_index) {
1757 vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
1758 ptr->mode == vtn_variable_mode_ssbo);
1759 return nir_vec2(&b->nb, ptr->block_index, ptr->offset);
1760 } else {
1761 vtn_assert(ptr->mode == vtn_variable_mode_workgroup);
1762 return ptr->offset;
1763 }
1764 } else {
1765 if (vtn_pointer_is_external_block(b, ptr) &&
1766 vtn_type_contains_block(b, ptr->type) &&
1767 ptr->mode != vtn_variable_mode_phys_ssbo) {
1768 const unsigned bit_size = glsl_get_bit_size(ptr->ptr_type->type);
1769 const unsigned num_components =
1770 glsl_get_vector_elements(ptr->ptr_type->type);
1771
1772 /* In this case, we're looking for a block index and not an actual
1773 * deref.
1774 *
1775 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1776 * at all because we get the pointer directly from the client. This
1777 * assumes that there will never be a SSBO binding variable using the
1778 * PhysicalStorageBufferEXT storage class. This assumption appears
1779 * to be correct according to the Vulkan spec because the table,
1780 * "Shader Resource and Storage Class Correspondence," the only the
1781 * Uniform storage class with BufferBlock or the StorageBuffer
1782 * storage class with Block can be used.
1783 */
1784 if (!ptr->block_index) {
1785 /* If we don't have a block_index then we must be a pointer to the
1786 * variable itself.
1787 */
1788 vtn_assert(!ptr->deref);
1789
1790 struct vtn_access_chain chain = {
1791 .length = 0,
1792 };
1793 ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
1794 }
1795
1796 /* A block index is just a 32-bit value but the pointer has some
1797 * other dimensionality. Cram it in there and we'll unpack it later
1798 * in vtn_pointer_from_ssa.
1799 */
1800 const unsigned swiz[4] = { 0, };
1801 return nir_swizzle(&b->nb, nir_u2u(&b->nb, ptr->block_index, bit_size),
1802 swiz, num_components, false);
1803 } else {
1804 return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
1805 }
1806 }
1807 }
1808
1809 struct vtn_pointer *
1810 vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
1811 struct vtn_type *ptr_type)
1812 {
1813 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
1814
1815 struct vtn_type *interface_type = ptr_type->deref;
1816 while (interface_type->base_type == vtn_base_type_array)
1817 interface_type = interface_type->array_element;
1818
1819 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
1820 nir_variable_mode nir_mode;
1821 ptr->mode = vtn_storage_class_to_mode(b, ptr_type->storage_class,
1822 interface_type, &nir_mode);
1823 ptr->type = ptr_type->deref;
1824 ptr->ptr_type = ptr_type;
1825
1826 if (b->wa_glslang_179) {
1827 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1828 * need to whack the mode because it creates a function parameter with
1829 * the Function storage class even though it's a pointer to a sampler.
1830 * If we don't do this, then NIR won't get rid of the deref_cast for us.
1831 */
1832 if (ptr->mode == vtn_variable_mode_function &&
1833 (ptr->type->base_type == vtn_base_type_sampler ||
1834 ptr->type->base_type == vtn_base_type_sampled_image)) {
1835 ptr->mode = vtn_variable_mode_uniform;
1836 nir_mode = nir_var_uniform;
1837 }
1838 }
1839
1840 if (vtn_pointer_uses_ssa_offset(b, ptr)) {
1841 /* This pointer type needs to have actual storage */
1842 vtn_assert(ptr_type->type);
1843 if (ptr->mode == vtn_variable_mode_ubo ||
1844 ptr->mode == vtn_variable_mode_ssbo) {
1845 vtn_assert(ssa->num_components == 2);
1846 ptr->block_index = nir_channel(&b->nb, ssa, 0);
1847 ptr->offset = nir_channel(&b->nb, ssa, 1);
1848 } else {
1849 vtn_assert(ssa->num_components == 1);
1850 ptr->block_index = NULL;
1851 ptr->offset = ssa;
1852 }
1853 } else {
1854 const struct glsl_type *deref_type = ptr_type->deref->type;
1855 if (!vtn_pointer_is_external_block(b, ptr)) {
1856 assert(ssa->bit_size == 32 && ssa->num_components == 1);
1857 ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
1858 glsl_get_bare_type(deref_type), 0);
1859 } else if (vtn_type_contains_block(b, ptr->type) &&
1860 ptr->mode != vtn_variable_mode_phys_ssbo) {
1861 /* This is a pointer to somewhere in an array of blocks, not a
1862 * pointer to somewhere inside the block. We squashed it into a
1863 * random vector type before so just pick off the first channel and
1864 * cast it back to 32 bits.
1865 */
1866 ptr->block_index = nir_u2u32(&b->nb, nir_channel(&b->nb, ssa, 0));
1867 } else {
1868 /* This is a pointer to something internal or a pointer inside a
1869 * block. It's just a regular cast.
1870 *
1871 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1872 * at all because we get the pointer directly from the client. This
1873 * assumes that there will never be a SSBO binding variable using the
1874 * PhysicalStorageBufferEXT storage class. This assumption appears
1875 * to be correct according to the Vulkan spec because the table,
1876 * "Shader Resource and Storage Class Correspondence," the only the
1877 * Uniform storage class with BufferBlock or the StorageBuffer
1878 * storage class with Block can be used.
1879 */
1880 ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
1881 ptr_type->deref->type,
1882 ptr_type->stride);
1883 ptr->deref->dest.ssa.num_components =
1884 glsl_get_vector_elements(ptr_type->type);
1885 ptr->deref->dest.ssa.bit_size = glsl_get_bit_size(ptr_type->type);
1886 }
1887 }
1888
1889 return ptr;
1890 }
1891
1892 static bool
1893 is_per_vertex_inout(const struct vtn_variable *var, gl_shader_stage stage)
1894 {
1895 if (var->patch || !glsl_type_is_array(var->type->type))
1896 return false;
1897
1898 if (var->mode == vtn_variable_mode_input) {
1899 return stage == MESA_SHADER_TESS_CTRL ||
1900 stage == MESA_SHADER_TESS_EVAL ||
1901 stage == MESA_SHADER_GEOMETRY;
1902 }
1903
1904 if (var->mode == vtn_variable_mode_output)
1905 return stage == MESA_SHADER_TESS_CTRL;
1906
1907 return false;
1908 }
1909
1910 static void
1911 assign_missing_member_locations(struct vtn_variable *var)
1912 {
1913 unsigned length =
1914 glsl_get_length(glsl_without_array(var->type->type));
1915 int location = var->base_location;
1916
1917 for (unsigned i = 0; i < length; i++) {
1918 /* From the Vulkan spec:
1919 *
1920 * “If the structure type is a Block but without a Location, then each
1921 * of its members must have a Location decoration.”
1922 *
1923 */
1924 if (var->type->block) {
1925 assert(var->base_location != -1 ||
1926 var->var->members[i].location != -1);
1927 }
1928
1929 /* From the Vulkan spec:
1930 *
1931 * “Any member with its own Location decoration is assigned that
1932 * location. Each remaining member is assigned the location after the
1933 * immediately preceding member in declaration order.”
1934 */
1935 if (var->var->members[i].location != -1)
1936 location = var->var->members[i].location;
1937 else
1938 var->var->members[i].location = location;
1939
1940 /* Below we use type instead of interface_type, because interface_type
1941 * is only available when it is a Block. This code also supports
1942 * input/outputs that are just structs
1943 */
1944 const struct glsl_type *member_type =
1945 glsl_get_struct_field(glsl_without_array(var->type->type), i);
1946
1947 location +=
1948 glsl_count_attribute_slots(member_type,
1949 false /* is_gl_vertex_input */);
1950 }
1951 }
1952
1953
1954 static void
1955 vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
1956 struct vtn_type *ptr_type, SpvStorageClass storage_class,
1957 nir_constant *initializer)
1958 {
1959 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
1960 struct vtn_type *type = ptr_type->deref;
1961
1962 struct vtn_type *without_array = type;
1963 while(glsl_type_is_array(without_array->type))
1964 without_array = without_array->array_element;
1965
1966 enum vtn_variable_mode mode;
1967 nir_variable_mode nir_mode;
1968 mode = vtn_storage_class_to_mode(b, storage_class, without_array, &nir_mode);
1969
1970 switch (mode) {
1971 case vtn_variable_mode_ubo:
1972 /* There's no other way to get vtn_variable_mode_ubo */
1973 vtn_assert(without_array->block);
1974 b->shader->info.num_ubos++;
1975 break;
1976 case vtn_variable_mode_ssbo:
1977 if (storage_class == SpvStorageClassStorageBuffer &&
1978 !without_array->block) {
1979 if (b->variable_pointers) {
1980 vtn_fail("Variables in the StorageBuffer storage class must "
1981 "have a struct type with the Block decoration");
1982 } else {
1983 /* If variable pointers are not present, it's still malformed
1984 * SPIR-V but we can parse it and do the right thing anyway.
1985 * Since some of the 8-bit storage tests have bugs in this are,
1986 * just make it a warning for now.
1987 */
1988 vtn_warn("Variables in the StorageBuffer storage class must "
1989 "have a struct type with the Block decoration");
1990 }
1991 }
1992 b->shader->info.num_ssbos++;
1993 break;
1994 case vtn_variable_mode_uniform:
1995 if (glsl_type_is_image(without_array->type))
1996 b->shader->info.num_images++;
1997 else if (glsl_type_is_sampler(without_array->type))
1998 b->shader->info.num_textures++;
1999 break;
2000 case vtn_variable_mode_push_constant:
2001 b->shader->num_uniforms = vtn_type_block_size(b, type);
2002 break;
2003
2004 case vtn_variable_mode_phys_ssbo:
2005 vtn_fail("Cannot create a variable with the "
2006 "PhysicalStorageBufferEXT storage class");
2007 break;
2008
2009 default:
2010 /* No tallying is needed */
2011 break;
2012 }
2013
2014 struct vtn_variable *var = rzalloc(b, struct vtn_variable);
2015 var->type = type;
2016 var->mode = mode;
2017 var->base_location = -1;
2018
2019 vtn_assert(val->value_type == vtn_value_type_pointer);
2020 val->pointer = vtn_pointer_for_variable(b, var, ptr_type);
2021
2022 switch (var->mode) {
2023 case vtn_variable_mode_function:
2024 case vtn_variable_mode_private:
2025 case vtn_variable_mode_uniform:
2026 /* For these, we create the variable normally */
2027 var->var = rzalloc(b->shader, nir_variable);
2028 var->var->name = ralloc_strdup(var->var, val->name);
2029
2030 if (storage_class == SpvStorageClassAtomicCounter) {
2031 /* Need to tweak the nir type here as at vtn_handle_type we don't
2032 * have the access to storage_class, that is the one that points us
2033 * that is an atomic uint.
2034 */
2035 var->var->type = repair_atomic_type(var->type->type);
2036 } else {
2037 /* Private variables don't have any explicit layout but some layouts
2038 * may have leaked through due to type deduplication in the SPIR-V.
2039 */
2040 var->var->type = glsl_get_bare_type(var->type->type);
2041 }
2042 var->var->data.mode = nir_mode;
2043 var->var->data.location = -1;
2044 var->var->interface_type = NULL;
2045 break;
2046
2047 case vtn_variable_mode_workgroup:
2048 if (b->options->lower_workgroup_access_to_offsets) {
2049 var->shared_location = -1;
2050 } else {
2051 /* Create the variable normally */
2052 var->var = rzalloc(b->shader, nir_variable);
2053 var->var->name = ralloc_strdup(var->var, val->name);
2054 /* Workgroup variables don't have any explicit layout but some
2055 * layouts may have leaked through due to type deduplication in the
2056 * SPIR-V.
2057 */
2058 var->var->type = glsl_get_bare_type(var->type->type);
2059 var->var->data.mode = nir_var_mem_shared;
2060 }
2061 break;
2062
2063 case vtn_variable_mode_input:
2064 case vtn_variable_mode_output: {
2065 /* In order to know whether or not we're a per-vertex inout, we need
2066 * the patch qualifier. This means walking the variable decorations
2067 * early before we actually create any variables. Not a big deal.
2068 *
2069 * GLSLang really likes to place decorations in the most interior
2070 * thing it possibly can. In particular, if you have a struct, it
2071 * will place the patch decorations on the struct members. This
2072 * should be handled by the variable splitting below just fine.
2073 *
2074 * If you have an array-of-struct, things get even more weird as it
2075 * will place the patch decorations on the struct even though it's
2076 * inside an array and some of the members being patch and others not
2077 * makes no sense whatsoever. Since the only sensible thing is for
2078 * it to be all or nothing, we'll call it patch if any of the members
2079 * are declared patch.
2080 */
2081 var->patch = false;
2082 vtn_foreach_decoration(b, val, var_is_patch_cb, &var->patch);
2083 if (glsl_type_is_array(var->type->type) &&
2084 glsl_type_is_struct(without_array->type)) {
2085 vtn_foreach_decoration(b, vtn_value(b, without_array->id,
2086 vtn_value_type_type),
2087 var_is_patch_cb, &var->patch);
2088 }
2089
2090 /* For inputs and outputs, we immediately split structures. This
2091 * is for a couple of reasons. For one, builtins may all come in
2092 * a struct and we really want those split out into separate
2093 * variables. For another, interpolation qualifiers can be
2094 * applied to members of the top-level struct ane we need to be
2095 * able to preserve that information.
2096 */
2097
2098 struct vtn_type *per_vertex_type = var->type;
2099 if (is_per_vertex_inout(var, b->shader->info.stage)) {
2100 /* In Geometry shaders (and some tessellation), inputs come
2101 * in per-vertex arrays. However, some builtins come in
2102 * non-per-vertex, hence the need for the is_array check. In
2103 * any case, there are no non-builtin arrays allowed so this
2104 * check should be sufficient.
2105 */
2106 per_vertex_type = var->type->array_element;
2107 }
2108
2109 var->var = rzalloc(b->shader, nir_variable);
2110 var->var->name = ralloc_strdup(var->var, val->name);
2111 /* In Vulkan, shader I/O variables don't have any explicit layout but
2112 * some layouts may have leaked through due to type deduplication in
2113 * the SPIR-V. We do, however, keep the layouts in the variable's
2114 * interface_type because we need offsets for XFB arrays of blocks.
2115 */
2116 var->var->type = glsl_get_bare_type(var->type->type);
2117 var->var->data.mode = nir_mode;
2118 var->var->data.patch = var->patch;
2119
2120 /* Figure out the interface block type. */
2121 struct vtn_type *iface_type = per_vertex_type;
2122 if (var->mode == vtn_variable_mode_output &&
2123 (b->shader->info.stage == MESA_SHADER_VERTEX ||
2124 b->shader->info.stage == MESA_SHADER_TESS_EVAL ||
2125 b->shader->info.stage == MESA_SHADER_GEOMETRY)) {
2126 /* For vertex data outputs, we can end up with arrays of blocks for
2127 * transform feedback where each array element corresponds to a
2128 * different XFB output buffer.
2129 */
2130 while (iface_type->base_type == vtn_base_type_array)
2131 iface_type = iface_type->array_element;
2132 }
2133 if (iface_type->base_type == vtn_base_type_struct && iface_type->block)
2134 var->var->interface_type = iface_type->type;
2135
2136 if (per_vertex_type->base_type == vtn_base_type_struct &&
2137 per_vertex_type->block) {
2138 /* It's a struct. Set it up as per-member. */
2139 var->var->num_members = glsl_get_length(per_vertex_type->type);
2140 var->var->members = rzalloc_array(var->var, struct nir_variable_data,
2141 var->var->num_members);
2142
2143 for (unsigned i = 0; i < var->var->num_members; i++) {
2144 var->var->members[i].mode = nir_mode;
2145 var->var->members[i].patch = var->patch;
2146 var->var->members[i].location = -1;
2147 }
2148 }
2149
2150 /* For inputs and outputs, we need to grab locations and builtin
2151 * information from the per-vertex type.
2152 */
2153 vtn_foreach_decoration(b, vtn_value(b, per_vertex_type->id,
2154 vtn_value_type_type),
2155 var_decoration_cb, var);
2156 break;
2157 }
2158
2159 case vtn_variable_mode_ubo:
2160 case vtn_variable_mode_ssbo:
2161 case vtn_variable_mode_push_constant:
2162 case vtn_variable_mode_cross_workgroup:
2163 /* These don't need actual variables. */
2164 break;
2165
2166 case vtn_variable_mode_phys_ssbo:
2167 unreachable("Should have been caught before");
2168 }
2169
2170 if (initializer) {
2171 var->var->constant_initializer =
2172 nir_constant_clone(initializer, var->var);
2173 }
2174
2175 vtn_foreach_decoration(b, val, var_decoration_cb, var);
2176
2177 if ((var->mode == vtn_variable_mode_input ||
2178 var->mode == vtn_variable_mode_output) &&
2179 var->var->members) {
2180 assign_missing_member_locations(var);
2181 }
2182
2183 if (var->mode == vtn_variable_mode_uniform) {
2184 /* XXX: We still need the binding information in the nir_variable
2185 * for these. We should fix that.
2186 */
2187 var->var->data.binding = var->binding;
2188 var->var->data.explicit_binding = var->explicit_binding;
2189 var->var->data.descriptor_set = var->descriptor_set;
2190 var->var->data.index = var->input_attachment_index;
2191 var->var->data.offset = var->offset;
2192
2193 if (glsl_type_is_image(without_array->type))
2194 var->var->data.image.format = without_array->image_format;
2195 }
2196
2197 if (var->mode == vtn_variable_mode_function) {
2198 vtn_assert(var->var != NULL && var->var->members == NULL);
2199 nir_function_impl_add_variable(b->nb.impl, var->var);
2200 } else if (var->var) {
2201 nir_shader_add_variable(b->shader, var->var);
2202 } else {
2203 vtn_assert(vtn_pointer_is_external_block(b, val->pointer));
2204 }
2205 }
2206
2207 static void
2208 vtn_assert_types_equal(struct vtn_builder *b, SpvOp opcode,
2209 struct vtn_type *dst_type,
2210 struct vtn_type *src_type)
2211 {
2212 if (dst_type->id == src_type->id)
2213 return;
2214
2215 if (vtn_types_compatible(b, dst_type, src_type)) {
2216 /* Early versions of GLSLang would re-emit types unnecessarily and you
2217 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2218 * mismatched source and destination types.
2219 *
2220 * https://github.com/KhronosGroup/glslang/issues/304
2221 * https://github.com/KhronosGroup/glslang/issues/307
2222 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2223 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2224 */
2225 vtn_warn("Source and destination types of %s do not have the same "
2226 "ID (but are compatible): %u vs %u",
2227 spirv_op_to_string(opcode), dst_type->id, src_type->id);
2228 return;
2229 }
2230
2231 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2232 spirv_op_to_string(opcode),
2233 glsl_get_type_name(dst_type->type),
2234 glsl_get_type_name(src_type->type));
2235 }
2236
2237 static nir_ssa_def *
2238 nir_shrink_zero_pad_vec(nir_builder *b, nir_ssa_def *val,
2239 unsigned num_components)
2240 {
2241 if (val->num_components == num_components)
2242 return val;
2243
2244 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
2245 for (unsigned i = 0; i < num_components; i++) {
2246 if (i < val->num_components)
2247 comps[i] = nir_channel(b, val, i);
2248 else
2249 comps[i] = nir_imm_intN_t(b, 0, val->bit_size);
2250 }
2251 return nir_vec(b, comps, num_components);
2252 }
2253
2254 static nir_ssa_def *
2255 nir_sloppy_bitcast(nir_builder *b, nir_ssa_def *val,
2256 const struct glsl_type *type)
2257 {
2258 const unsigned num_components = glsl_get_vector_elements(type);
2259 const unsigned bit_size = glsl_get_bit_size(type);
2260
2261 /* First, zero-pad to ensure that the value is big enough that when we
2262 * bit-cast it, we don't loose anything.
2263 */
2264 if (val->bit_size < bit_size) {
2265 const unsigned src_num_components_needed =
2266 vtn_align_u32(val->num_components, bit_size / val->bit_size);
2267 val = nir_shrink_zero_pad_vec(b, val, src_num_components_needed);
2268 }
2269
2270 val = nir_bitcast_vector(b, val, bit_size);
2271
2272 return nir_shrink_zero_pad_vec(b, val, num_components);
2273 }
2274
2275 void
2276 vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
2277 const uint32_t *w, unsigned count)
2278 {
2279 switch (opcode) {
2280 case SpvOpUndef: {
2281 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
2282 val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
2283 break;
2284 }
2285
2286 case SpvOpVariable: {
2287 struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
2288
2289 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
2290
2291 SpvStorageClass storage_class = w[3];
2292 nir_constant *initializer = NULL;
2293 if (count > 4)
2294 initializer = vtn_value(b, w[4], vtn_value_type_constant)->constant;
2295
2296 vtn_create_variable(b, val, ptr_type, storage_class, initializer);
2297 break;
2298 }
2299
2300 case SpvOpAccessChain:
2301 case SpvOpPtrAccessChain:
2302 case SpvOpInBoundsAccessChain: {
2303 struct vtn_access_chain *chain = vtn_access_chain_create(b, count - 4);
2304 chain->ptr_as_array = (opcode == SpvOpPtrAccessChain);
2305
2306 unsigned idx = 0;
2307 for (int i = 4; i < count; i++) {
2308 struct vtn_value *link_val = vtn_untyped_value(b, w[i]);
2309 if (link_val->value_type == vtn_value_type_constant) {
2310 chain->link[idx].mode = vtn_access_mode_literal;
2311 switch (glsl_get_bit_size(link_val->type->type)) {
2312 case 8:
2313 chain->link[idx].id = link_val->constant->values[0].i8[0];
2314 break;
2315 case 16:
2316 chain->link[idx].id = link_val->constant->values[0].i16[0];
2317 break;
2318 case 32:
2319 chain->link[idx].id = link_val->constant->values[0].i32[0];
2320 break;
2321 case 64:
2322 chain->link[idx].id = link_val->constant->values[0].i64[0];
2323 break;
2324 default:
2325 vtn_fail("Invalid bit size");
2326 }
2327 } else {
2328 chain->link[idx].mode = vtn_access_mode_id;
2329 chain->link[idx].id = w[i];
2330
2331 }
2332 idx++;
2333 }
2334
2335 struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
2336 struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
2337 if (base_val->value_type == vtn_value_type_sampled_image) {
2338 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2339 * to combine an array of images with a single sampler to get an
2340 * array of sampled images that all share the same sampler.
2341 * Fortunately, this means that we can more-or-less ignore the
2342 * sampler when crawling the access chain, but it does leave us
2343 * with this rather awkward little special-case.
2344 */
2345 struct vtn_value *val =
2346 vtn_push_value(b, w[2], vtn_value_type_sampled_image);
2347 val->sampled_image = ralloc(b, struct vtn_sampled_image);
2348 val->sampled_image->type = base_val->sampled_image->type;
2349 val->sampled_image->image =
2350 vtn_pointer_dereference(b, base_val->sampled_image->image, chain);
2351 val->sampled_image->sampler = base_val->sampled_image->sampler;
2352 } else {
2353 vtn_assert(base_val->value_type == vtn_value_type_pointer);
2354 struct vtn_value *val =
2355 vtn_push_value(b, w[2], vtn_value_type_pointer);
2356 val->pointer = vtn_pointer_dereference(b, base_val->pointer, chain);
2357 val->pointer->ptr_type = ptr_type;
2358 }
2359 break;
2360 }
2361
2362 case SpvOpCopyMemory: {
2363 struct vtn_value *dest = vtn_value(b, w[1], vtn_value_type_pointer);
2364 struct vtn_value *src = vtn_value(b, w[2], vtn_value_type_pointer);
2365
2366 vtn_assert_types_equal(b, opcode, dest->type->deref, src->type->deref);
2367
2368 vtn_variable_copy(b, dest->pointer, src->pointer);
2369 break;
2370 }
2371
2372 case SpvOpLoad: {
2373 struct vtn_type *res_type =
2374 vtn_value(b, w[1], vtn_value_type_type)->type;
2375 struct vtn_value *src_val = vtn_value(b, w[3], vtn_value_type_pointer);
2376 struct vtn_pointer *src = src_val->pointer;
2377
2378 vtn_assert_types_equal(b, opcode, res_type, src_val->type->deref);
2379
2380 if (glsl_type_is_image(res_type->type) ||
2381 glsl_type_is_sampler(res_type->type)) {
2382 vtn_push_value(b, w[2], vtn_value_type_pointer)->pointer = src;
2383 return;
2384 }
2385
2386 vtn_push_ssa(b, w[2], res_type, vtn_variable_load(b, src));
2387 break;
2388 }
2389
2390 case SpvOpStore: {
2391 struct vtn_value *dest_val = vtn_value(b, w[1], vtn_value_type_pointer);
2392 struct vtn_pointer *dest = dest_val->pointer;
2393 struct vtn_value *src_val = vtn_untyped_value(b, w[2]);
2394
2395 /* OpStore requires us to actually have a storage type */
2396 vtn_fail_if(dest->type->type == NULL,
2397 "Invalid destination type for OpStore");
2398
2399 if (glsl_get_base_type(dest->type->type) == GLSL_TYPE_BOOL &&
2400 glsl_get_base_type(src_val->type->type) == GLSL_TYPE_UINT) {
2401 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2402 * would then store them to a local variable as bool. Work around
2403 * the issue by doing an implicit conversion.
2404 *
2405 * https://github.com/KhronosGroup/glslang/issues/170
2406 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2407 */
2408 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2409 "OpTypeBool. Doing an implicit conversion to work around "
2410 "the problem.");
2411 struct vtn_ssa_value *bool_ssa =
2412 vtn_create_ssa_value(b, dest->type->type);
2413 bool_ssa->def = nir_i2b(&b->nb, vtn_ssa_value(b, w[2])->def);
2414 vtn_variable_store(b, bool_ssa, dest);
2415 break;
2416 }
2417
2418 vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type);
2419
2420 if (glsl_type_is_sampler(dest->type->type)) {
2421 if (b->wa_glslang_179) {
2422 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2423 "propagation to workaround the problem.");
2424 vtn_assert(dest->var->copy_prop_sampler == NULL);
2425 dest->var->copy_prop_sampler =
2426 vtn_value(b, w[2], vtn_value_type_pointer)->pointer;
2427 } else {
2428 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2429 }
2430 break;
2431 }
2432
2433 struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
2434 vtn_variable_store(b, src, dest);
2435 break;
2436 }
2437
2438 case SpvOpArrayLength: {
2439 struct vtn_pointer *ptr =
2440 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2441 const uint32_t field = w[4];
2442
2443 vtn_fail_if(ptr->type->base_type != vtn_base_type_struct,
2444 "OpArrayLength must take a pointer to a structure type");
2445 vtn_fail_if(field != ptr->type->length - 1 ||
2446 ptr->type->members[field]->base_type != vtn_base_type_array,
2447 "OpArrayLength must reference the last memeber of the "
2448 "structure and that must be an array");
2449
2450 const uint32_t offset = ptr->type->offsets[field];
2451 const uint32_t stride = ptr->type->members[field]->stride;
2452
2453 if (!ptr->block_index) {
2454 struct vtn_access_chain chain = {
2455 .length = 0,
2456 };
2457 ptr = vtn_pointer_dereference(b, ptr, &chain);
2458 vtn_assert(ptr->block_index);
2459 }
2460
2461 nir_intrinsic_instr *instr =
2462 nir_intrinsic_instr_create(b->nb.shader,
2463 nir_intrinsic_get_buffer_size);
2464 instr->src[0] = nir_src_for_ssa(ptr->block_index);
2465 nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
2466 nir_builder_instr_insert(&b->nb, &instr->instr);
2467 nir_ssa_def *buf_size = &instr->dest.ssa;
2468
2469 /* array_length = max(buffer_size - offset, 0) / stride */
2470 nir_ssa_def *array_length =
2471 nir_idiv(&b->nb,
2472 nir_imax(&b->nb,
2473 nir_isub(&b->nb,
2474 buf_size,
2475 nir_imm_int(&b->nb, offset)),
2476 nir_imm_int(&b->nb, 0u)),
2477 nir_imm_int(&b->nb, stride));
2478
2479 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2480 val->ssa = vtn_create_ssa_value(b, glsl_uint_type());
2481 val->ssa->def = array_length;
2482 break;
2483 }
2484
2485 case SpvOpConvertPtrToU: {
2486 struct vtn_value *u_val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2487
2488 vtn_fail_if(u_val->type->base_type != vtn_base_type_vector &&
2489 u_val->type->base_type != vtn_base_type_scalar,
2490 "OpConvertPtrToU can only be used to cast to a vector or "
2491 "scalar type");
2492
2493 /* The pointer will be converted to an SSA value automatically */
2494 nir_ssa_def *ptr_ssa = vtn_ssa_value(b, w[3])->def;
2495
2496 u_val->ssa = vtn_create_ssa_value(b, u_val->type->type);
2497 u_val->ssa->def = nir_sloppy_bitcast(&b->nb, ptr_ssa, u_val->type->type);
2498 break;
2499 }
2500
2501 case SpvOpConvertUToPtr: {
2502 struct vtn_value *ptr_val =
2503 vtn_push_value(b, w[2], vtn_value_type_pointer);
2504 struct vtn_value *u_val = vtn_value(b, w[3], vtn_value_type_ssa);
2505
2506 vtn_fail_if(ptr_val->type->type == NULL,
2507 "OpConvertUToPtr can only be used on physical pointers");
2508
2509 vtn_fail_if(u_val->type->base_type != vtn_base_type_vector &&
2510 u_val->type->base_type != vtn_base_type_scalar,
2511 "OpConvertUToPtr can only be used to cast from a vector or "
2512 "scalar type");
2513
2514 nir_ssa_def *ptr_ssa = nir_sloppy_bitcast(&b->nb, u_val->ssa->def,
2515 ptr_val->type->type);
2516 ptr_val->pointer = vtn_pointer_from_ssa(b, ptr_ssa, ptr_val->type);
2517 break;
2518 }
2519
2520 case SpvOpCopyMemorySized:
2521 default:
2522 vtn_fail("Unhandled opcode");
2523 }
2524 }