spirv: Whack sampler/image pointers to uniform
[mesa.git] / src / compiler / spirv / vtn_variables.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
32
33 static struct vtn_access_chain *
34 vtn_access_chain_create(struct vtn_builder *b, unsigned length)
35 {
36 struct vtn_access_chain *chain;
37
38 /* Subtract 1 from the length since there's already one built in */
39 size_t size = sizeof(*chain) +
40 (MAX2(length, 1) - 1) * sizeof(chain->link[0]);
41 chain = rzalloc_size(b, size);
42 chain->length = length;
43
44 return chain;
45 }
46
47 bool
48 vtn_pointer_uses_ssa_offset(struct vtn_builder *b,
49 struct vtn_pointer *ptr)
50 {
51 return ((ptr->mode == vtn_variable_mode_ubo ||
52 ptr->mode == vtn_variable_mode_ssbo) &&
53 b->options->lower_ubo_ssbo_access_to_offsets) ||
54 ptr->mode == vtn_variable_mode_push_constant ||
55 (ptr->mode == vtn_variable_mode_workgroup &&
56 b->options->lower_workgroup_access_to_offsets);
57 }
58
59 static bool
60 vtn_pointer_is_external_block(struct vtn_builder *b,
61 struct vtn_pointer *ptr)
62 {
63 return ptr->mode == vtn_variable_mode_ssbo ||
64 ptr->mode == vtn_variable_mode_ubo ||
65 ptr->mode == vtn_variable_mode_push_constant ||
66 (ptr->mode == vtn_variable_mode_workgroup &&
67 b->options->lower_workgroup_access_to_offsets);
68 }
69
70 static nir_ssa_def *
71 vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link,
72 unsigned stride, unsigned bit_size)
73 {
74 vtn_assert(stride > 0);
75 if (link.mode == vtn_access_mode_literal) {
76 return nir_imm_intN_t(&b->nb, link.id * stride, bit_size);
77 } else {
78 nir_ssa_def *ssa = vtn_ssa_value(b, link.id)->def;
79 if (ssa->bit_size != bit_size)
80 ssa = nir_i2i(&b->nb, ssa, bit_size);
81 if (stride != 1)
82 ssa = nir_imul_imm(&b->nb, ssa, stride);
83 return ssa;
84 }
85 }
86
87 static VkDescriptorType
88 vk_desc_type_for_mode(struct vtn_builder *b, enum vtn_variable_mode mode)
89 {
90 switch (mode) {
91 case vtn_variable_mode_ubo:
92 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
93 case vtn_variable_mode_ssbo:
94 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
95 default:
96 vtn_fail("Invalid mode for vulkan_resource_index");
97 }
98 }
99
100 static nir_ssa_def *
101 vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
102 nir_ssa_def *desc_array_index)
103 {
104 if (!desc_array_index) {
105 vtn_assert(glsl_type_is_struct(var->type->type));
106 desc_array_index = nir_imm_int(&b->nb, 0);
107 }
108
109 nir_intrinsic_instr *instr =
110 nir_intrinsic_instr_create(b->nb.shader,
111 nir_intrinsic_vulkan_resource_index);
112 instr->src[0] = nir_src_for_ssa(desc_array_index);
113 nir_intrinsic_set_desc_set(instr, var->descriptor_set);
114 nir_intrinsic_set_binding(instr, var->binding);
115 nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, var->mode));
116
117 nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
118 nir_builder_instr_insert(&b->nb, &instr->instr);
119
120 return &instr->dest.ssa;
121 }
122
123 static nir_ssa_def *
124 vtn_resource_reindex(struct vtn_builder *b, enum vtn_variable_mode mode,
125 nir_ssa_def *base_index, nir_ssa_def *offset_index)
126 {
127 nir_intrinsic_instr *instr =
128 nir_intrinsic_instr_create(b->nb.shader,
129 nir_intrinsic_vulkan_resource_reindex);
130 instr->src[0] = nir_src_for_ssa(base_index);
131 instr->src[1] = nir_src_for_ssa(offset_index);
132 nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, mode));
133
134 nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
135 nir_builder_instr_insert(&b->nb, &instr->instr);
136
137 return &instr->dest.ssa;
138 }
139
140 static nir_ssa_def *
141 vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode,
142 const struct glsl_type *desc_type, nir_ssa_def *desc_index)
143 {
144 nir_intrinsic_instr *desc_load =
145 nir_intrinsic_instr_create(b->nb.shader,
146 nir_intrinsic_load_vulkan_descriptor);
147 desc_load->src[0] = nir_src_for_ssa(desc_index);
148 desc_load->num_components = glsl_get_vector_elements(desc_type);
149 nir_intrinsic_set_desc_type(desc_load, vk_desc_type_for_mode(b, mode));
150 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
151 desc_load->num_components,
152 glsl_get_bit_size(desc_type), NULL);
153 nir_builder_instr_insert(&b->nb, &desc_load->instr);
154
155 return &desc_load->dest.ssa;
156 }
157
158 /* Dereference the given base pointer by the access chain */
159 static struct vtn_pointer *
160 vtn_nir_deref_pointer_dereference(struct vtn_builder *b,
161 struct vtn_pointer *base,
162 struct vtn_access_chain *deref_chain)
163 {
164 struct vtn_type *type = base->type;
165 enum gl_access_qualifier access = base->access;
166 unsigned idx = 0;
167
168 nir_deref_instr *tail;
169 if (base->deref) {
170 tail = base->deref;
171 } else if (vtn_pointer_is_external_block(b, base)) {
172 nir_ssa_def *block_index = base->block_index;
173
174 /* We dereferencing an external block pointer. Correctness of this
175 * operation relies on one particular line in the SPIR-V spec, section
176 * entitled "Validation Rules for Shader Capabilities":
177 *
178 * "Block and BufferBlock decorations cannot decorate a structure
179 * type that is nested at any level inside another structure type
180 * decorated with Block or BufferBlock."
181 *
182 * This means that we can detect the point where we cross over from
183 * descriptor indexing to buffer indexing by looking for the block
184 * decorated struct type. Anything before the block decorated struct
185 * type is a descriptor indexing operation and anything after the block
186 * decorated struct is a buffer offset operation.
187 */
188
189 /* Figure out the descriptor array index if any
190 *
191 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
192 * to forget the Block or BufferBlock decoration from time to time.
193 * It's more robust if we check for both !block_index and for the type
194 * to contain a block. This way there's a decent chance that arrays of
195 * UBOs/SSBOs will work correctly even if variable pointers are
196 * completley toast.
197 */
198 nir_ssa_def *desc_arr_idx = NULL;
199 if (!block_index || vtn_type_contains_block(b, type)) {
200 /* If our type contains a block, then we're still outside the block
201 * and we need to process enough levels of dereferences to get inside
202 * of it.
203 */
204 if (deref_chain->ptr_as_array) {
205 unsigned aoa_size = glsl_get_aoa_size(type->type);
206 desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[idx],
207 MAX2(aoa_size, 1), 32);
208 idx++;
209 }
210
211 for (; idx < deref_chain->length; idx++) {
212 if (type->base_type != vtn_base_type_array) {
213 vtn_assert(type->base_type == vtn_base_type_struct);
214 break;
215 }
216
217 unsigned aoa_size = glsl_get_aoa_size(type->array_element->type);
218 nir_ssa_def *arr_offset =
219 vtn_access_link_as_ssa(b, deref_chain->link[idx],
220 MAX2(aoa_size, 1), 32);
221 if (desc_arr_idx)
222 desc_arr_idx = nir_iadd(&b->nb, desc_arr_idx, arr_offset);
223 else
224 desc_arr_idx = arr_offset;
225
226 type = type->array_element;
227 access |= type->access;
228 }
229 }
230
231 if (!block_index) {
232 vtn_assert(base->var && base->type);
233 block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
234 } else if (desc_arr_idx) {
235 block_index = vtn_resource_reindex(b, base->mode,
236 block_index, desc_arr_idx);
237 }
238
239 if (idx == deref_chain->length) {
240 /* The entire deref was consumed in finding the block index. Return
241 * a pointer which just has a block index and a later access chain
242 * will dereference deeper.
243 */
244 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
245 ptr->mode = base->mode;
246 ptr->type = type;
247 ptr->block_index = block_index;
248 ptr->access = access;
249 return ptr;
250 }
251
252 /* If we got here, there's more access chain to handle and we have the
253 * final block index. Insert a descriptor load and cast to a deref to
254 * start the deref chain.
255 */
256 nir_ssa_def *desc =
257 vtn_descriptor_load(b, base->mode, base->ptr_type->type, block_index);
258
259 assert(base->mode == vtn_variable_mode_ssbo ||
260 base->mode == vtn_variable_mode_ubo);
261 nir_variable_mode nir_mode =
262 base->mode == vtn_variable_mode_ssbo ? nir_var_ssbo : nir_var_ubo;
263
264 tail = nir_build_deref_cast(&b->nb, desc, nir_mode, type->type,
265 base->ptr_type->stride);
266 } else {
267 assert(base->var && base->var->var);
268 tail = nir_build_deref_var(&b->nb, base->var->var);
269 if (base->ptr_type && base->ptr_type->type) {
270 tail->dest.ssa.num_components =
271 glsl_get_vector_elements(base->ptr_type->type);
272 tail->dest.ssa.bit_size = glsl_get_bit_size(base->ptr_type->type);
273 }
274 }
275
276 if (idx == 0 && deref_chain->ptr_as_array) {
277 /* We start with a deref cast to get the stride. Hopefully, we'll be
278 * able to delete that cast eventually.
279 */
280 tail = nir_build_deref_cast(&b->nb, &tail->dest.ssa, tail->mode,
281 tail->type, base->ptr_type->stride);
282
283 nir_ssa_def *index = vtn_access_link_as_ssa(b, deref_chain->link[0], 1,
284 tail->dest.ssa.bit_size);
285 tail = nir_build_deref_ptr_as_array(&b->nb, tail, index);
286 idx++;
287 }
288
289 for (; idx < deref_chain->length; idx++) {
290 if (glsl_type_is_struct(type->type)) {
291 vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
292 unsigned field = deref_chain->link[idx].id;
293 tail = nir_build_deref_struct(&b->nb, tail, field);
294 type = type->members[field];
295 } else {
296 nir_ssa_def *arr_index =
297 vtn_access_link_as_ssa(b, deref_chain->link[idx], 1,
298 tail->dest.ssa.bit_size);
299 tail = nir_build_deref_array(&b->nb, tail, arr_index);
300 type = type->array_element;
301 }
302
303 access |= type->access;
304 }
305
306 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
307 ptr->mode = base->mode;
308 ptr->type = type;
309 ptr->var = base->var;
310 ptr->deref = tail;
311 ptr->access = access;
312
313 return ptr;
314 }
315
316 static struct vtn_pointer *
317 vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
318 struct vtn_pointer *base,
319 struct vtn_access_chain *deref_chain)
320 {
321 nir_ssa_def *block_index = base->block_index;
322 nir_ssa_def *offset = base->offset;
323 struct vtn_type *type = base->type;
324 enum gl_access_qualifier access = base->access;
325
326 unsigned idx = 0;
327 if (base->mode == vtn_variable_mode_ubo ||
328 base->mode == vtn_variable_mode_ssbo) {
329 if (!block_index) {
330 vtn_assert(base->var && base->type);
331 nir_ssa_def *desc_arr_idx;
332 if (glsl_type_is_array(type->type)) {
333 if (deref_chain->length >= 1) {
334 desc_arr_idx =
335 vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
336 idx++;
337 /* This consumes a level of type */
338 type = type->array_element;
339 access |= type->access;
340 } else {
341 /* This is annoying. We've been asked for a pointer to the
342 * array of UBOs/SSBOs and not a specifc buffer. Return a
343 * pointer with a descriptor index of 0 and we'll have to do
344 * a reindex later to adjust it to the right thing.
345 */
346 desc_arr_idx = nir_imm_int(&b->nb, 0);
347 }
348 } else if (deref_chain->ptr_as_array) {
349 /* You can't have a zero-length OpPtrAccessChain */
350 vtn_assert(deref_chain->length >= 1);
351 desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
352 } else {
353 /* We have a regular non-array SSBO. */
354 desc_arr_idx = NULL;
355 }
356 block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
357 } else if (deref_chain->ptr_as_array &&
358 type->base_type == vtn_base_type_struct && type->block) {
359 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
360 * decorated block. This is an interesting corner in the SPIR-V
361 * spec. One interpretation would be that they client is clearly
362 * trying to treat that block as if it's an implicit array of blocks
363 * repeated in the buffer. However, the SPIR-V spec for the
364 * OpPtrAccessChain says:
365 *
366 * "Base is treated as the address of the first element of an
367 * array, and the Element element’s address is computed to be the
368 * base for the Indexes, as per OpAccessChain."
369 *
370 * Taken literally, that would mean that your struct type is supposed
371 * to be treated as an array of such a struct and, since it's
372 * decorated block, that means an array of blocks which corresponds
373 * to an array descriptor. Therefore, we need to do a reindex
374 * operation to add the index from the first link in the access chain
375 * to the index we recieved.
376 *
377 * The downside to this interpretation (there always is one) is that
378 * this might be somewhat surprising behavior to apps if they expect
379 * the implicit array behavior described above.
380 */
381 vtn_assert(deref_chain->length >= 1);
382 nir_ssa_def *offset_index =
383 vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
384 idx++;
385
386 block_index = vtn_resource_reindex(b, base->mode,
387 block_index, offset_index);
388 }
389 }
390
391 if (!offset) {
392 if (base->mode == vtn_variable_mode_workgroup) {
393 /* SLM doesn't need nor have a block index */
394 vtn_assert(!block_index);
395
396 /* We need the variable for the base offset */
397 vtn_assert(base->var);
398
399 /* We need ptr_type for size and alignment */
400 vtn_assert(base->ptr_type);
401
402 /* Assign location on first use so that we don't end up bloating SLM
403 * address space for variables which are never statically used.
404 */
405 if (base->var->shared_location < 0) {
406 vtn_assert(base->ptr_type->length > 0 && base->ptr_type->align > 0);
407 b->shader->num_shared = vtn_align_u32(b->shader->num_shared,
408 base->ptr_type->align);
409 base->var->shared_location = b->shader->num_shared;
410 b->shader->num_shared += base->ptr_type->length;
411 }
412
413 offset = nir_imm_int(&b->nb, base->var->shared_location);
414 } else if (base->mode == vtn_variable_mode_push_constant) {
415 /* Push constants neither need nor have a block index */
416 vtn_assert(!block_index);
417
418 /* Start off with at the start of the push constant block. */
419 offset = nir_imm_int(&b->nb, 0);
420 } else {
421 /* The code above should have ensured a block_index when needed. */
422 vtn_assert(block_index);
423
424 /* Start off with at the start of the buffer. */
425 offset = nir_imm_int(&b->nb, 0);
426 }
427 }
428
429 if (deref_chain->ptr_as_array && idx == 0) {
430 /* We need ptr_type for the stride */
431 vtn_assert(base->ptr_type);
432
433 /* We need at least one element in the chain */
434 vtn_assert(deref_chain->length >= 1);
435
436 nir_ssa_def *elem_offset =
437 vtn_access_link_as_ssa(b, deref_chain->link[idx],
438 base->ptr_type->stride, offset->bit_size);
439 offset = nir_iadd(&b->nb, offset, elem_offset);
440 idx++;
441 }
442
443 for (; idx < deref_chain->length; idx++) {
444 switch (glsl_get_base_type(type->type)) {
445 case GLSL_TYPE_UINT:
446 case GLSL_TYPE_INT:
447 case GLSL_TYPE_UINT16:
448 case GLSL_TYPE_INT16:
449 case GLSL_TYPE_UINT8:
450 case GLSL_TYPE_INT8:
451 case GLSL_TYPE_UINT64:
452 case GLSL_TYPE_INT64:
453 case GLSL_TYPE_FLOAT:
454 case GLSL_TYPE_FLOAT16:
455 case GLSL_TYPE_DOUBLE:
456 case GLSL_TYPE_BOOL:
457 case GLSL_TYPE_ARRAY: {
458 nir_ssa_def *elem_offset =
459 vtn_access_link_as_ssa(b, deref_chain->link[idx],
460 type->stride, offset->bit_size);
461 offset = nir_iadd(&b->nb, offset, elem_offset);
462 type = type->array_element;
463 access |= type->access;
464 break;
465 }
466
467 case GLSL_TYPE_STRUCT: {
468 vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
469 unsigned member = deref_chain->link[idx].id;
470 offset = nir_iadd_imm(&b->nb, offset, type->offsets[member]);
471 type = type->members[member];
472 access |= type->access;
473 break;
474 }
475
476 default:
477 vtn_fail("Invalid type for deref");
478 }
479 }
480
481 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
482 ptr->mode = base->mode;
483 ptr->type = type;
484 ptr->block_index = block_index;
485 ptr->offset = offset;
486 ptr->access = access;
487
488 return ptr;
489 }
490
491 /* Dereference the given base pointer by the access chain */
492 static struct vtn_pointer *
493 vtn_pointer_dereference(struct vtn_builder *b,
494 struct vtn_pointer *base,
495 struct vtn_access_chain *deref_chain)
496 {
497 if (vtn_pointer_uses_ssa_offset(b, base)) {
498 return vtn_ssa_offset_pointer_dereference(b, base, deref_chain);
499 } else {
500 return vtn_nir_deref_pointer_dereference(b, base, deref_chain);
501 }
502 }
503
504 struct vtn_pointer *
505 vtn_pointer_for_variable(struct vtn_builder *b,
506 struct vtn_variable *var, struct vtn_type *ptr_type)
507 {
508 struct vtn_pointer *pointer = rzalloc(b, struct vtn_pointer);
509
510 pointer->mode = var->mode;
511 pointer->type = var->type;
512 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
513 vtn_assert(ptr_type->deref->type == var->type->type);
514 pointer->ptr_type = ptr_type;
515 pointer->var = var;
516 pointer->access = var->access | var->type->access;
517
518 return pointer;
519 }
520
521 /* Returns an atomic_uint type based on the original uint type. The returned
522 * type will be equivalent to the original one but will have an atomic_uint
523 * type as leaf instead of an uint.
524 *
525 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
526 */
527 static const struct glsl_type *
528 repair_atomic_type(const struct glsl_type *type)
529 {
530 assert(glsl_get_base_type(glsl_without_array(type)) == GLSL_TYPE_UINT);
531 assert(glsl_type_is_scalar(glsl_without_array(type)));
532
533 if (glsl_type_is_array(type)) {
534 const struct glsl_type *atomic =
535 repair_atomic_type(glsl_get_array_element(type));
536
537 return glsl_array_type(atomic, glsl_get_length(type),
538 glsl_get_explicit_stride(type));
539 } else {
540 return glsl_atomic_uint_type();
541 }
542 }
543
544 nir_deref_instr *
545 vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
546 {
547 /* Do on-the-fly copy propagation for samplers. */
548 if (ptr->var && ptr->var->copy_prop_sampler)
549 return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
550
551 vtn_assert(!vtn_pointer_uses_ssa_offset(b, ptr));
552 if (!ptr->deref) {
553 struct vtn_access_chain chain = {
554 .length = 0,
555 };
556 ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
557 }
558
559 return ptr->deref;
560 }
561
562 static void
563 _vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
564 struct vtn_ssa_value *inout)
565 {
566 if (glsl_type_is_vector_or_scalar(deref->type)) {
567 if (load) {
568 inout->def = nir_load_deref(&b->nb, deref);
569 } else {
570 nir_store_deref(&b->nb, deref, inout->def, ~0);
571 }
572 } else if (glsl_type_is_array(deref->type) ||
573 glsl_type_is_matrix(deref->type)) {
574 unsigned elems = glsl_get_length(deref->type);
575 for (unsigned i = 0; i < elems; i++) {
576 nir_deref_instr *child =
577 nir_build_deref_array(&b->nb, deref, nir_imm_int(&b->nb, i));
578 _vtn_local_load_store(b, load, child, inout->elems[i]);
579 }
580 } else {
581 vtn_assert(glsl_type_is_struct(deref->type));
582 unsigned elems = glsl_get_length(deref->type);
583 for (unsigned i = 0; i < elems; i++) {
584 nir_deref_instr *child = nir_build_deref_struct(&b->nb, deref, i);
585 _vtn_local_load_store(b, load, child, inout->elems[i]);
586 }
587 }
588 }
589
590 nir_deref_instr *
591 vtn_nir_deref(struct vtn_builder *b, uint32_t id)
592 {
593 struct vtn_pointer *ptr = vtn_value(b, id, vtn_value_type_pointer)->pointer;
594 return vtn_pointer_to_deref(b, ptr);
595 }
596
597 /*
598 * Gets the NIR-level deref tail, which may have as a child an array deref
599 * selecting which component due to OpAccessChain supporting per-component
600 * indexing in SPIR-V.
601 */
602 static nir_deref_instr *
603 get_deref_tail(nir_deref_instr *deref)
604 {
605 if (deref->deref_type != nir_deref_type_array)
606 return deref;
607
608 nir_deref_instr *parent =
609 nir_instr_as_deref(deref->parent.ssa->parent_instr);
610
611 if (glsl_type_is_vector(parent->type))
612 return parent;
613 else
614 return deref;
615 }
616
617 struct vtn_ssa_value *
618 vtn_local_load(struct vtn_builder *b, nir_deref_instr *src)
619 {
620 nir_deref_instr *src_tail = get_deref_tail(src);
621 struct vtn_ssa_value *val = vtn_create_ssa_value(b, src_tail->type);
622 _vtn_local_load_store(b, true, src_tail, val);
623
624 if (src_tail != src) {
625 val->type = src->type;
626 if (nir_src_is_const(src->arr.index))
627 val->def = vtn_vector_extract(b, val->def,
628 nir_src_as_uint(src->arr.index));
629 else
630 val->def = vtn_vector_extract_dynamic(b, val->def, src->arr.index.ssa);
631 }
632
633 return val;
634 }
635
636 void
637 vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
638 nir_deref_instr *dest)
639 {
640 nir_deref_instr *dest_tail = get_deref_tail(dest);
641
642 if (dest_tail != dest) {
643 struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type);
644 _vtn_local_load_store(b, true, dest_tail, val);
645
646 if (nir_src_is_const(dest->arr.index))
647 val->def = vtn_vector_insert(b, val->def, src->def,
648 nir_src_as_uint(dest->arr.index));
649 else
650 val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
651 dest->arr.index.ssa);
652 _vtn_local_load_store(b, false, dest_tail, val);
653 } else {
654 _vtn_local_load_store(b, false, dest_tail, src);
655 }
656 }
657
658 nir_ssa_def *
659 vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
660 nir_ssa_def **index_out)
661 {
662 assert(vtn_pointer_uses_ssa_offset(b, ptr));
663 if (!ptr->offset) {
664 struct vtn_access_chain chain = {
665 .length = 0,
666 };
667 ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
668 }
669 *index_out = ptr->block_index;
670 return ptr->offset;
671 }
672
673 /* Tries to compute the size of an interface block based on the strides and
674 * offsets that are provided to us in the SPIR-V source.
675 */
676 static unsigned
677 vtn_type_block_size(struct vtn_builder *b, struct vtn_type *type)
678 {
679 enum glsl_base_type base_type = glsl_get_base_type(type->type);
680 switch (base_type) {
681 case GLSL_TYPE_UINT:
682 case GLSL_TYPE_INT:
683 case GLSL_TYPE_UINT16:
684 case GLSL_TYPE_INT16:
685 case GLSL_TYPE_UINT8:
686 case GLSL_TYPE_INT8:
687 case GLSL_TYPE_UINT64:
688 case GLSL_TYPE_INT64:
689 case GLSL_TYPE_FLOAT:
690 case GLSL_TYPE_FLOAT16:
691 case GLSL_TYPE_BOOL:
692 case GLSL_TYPE_DOUBLE: {
693 unsigned cols = type->row_major ? glsl_get_vector_elements(type->type) :
694 glsl_get_matrix_columns(type->type);
695 if (cols > 1) {
696 vtn_assert(type->stride > 0);
697 return type->stride * cols;
698 } else {
699 unsigned type_size = glsl_get_bit_size(type->type) / 8;
700 return glsl_get_vector_elements(type->type) * type_size;
701 }
702 }
703
704 case GLSL_TYPE_STRUCT:
705 case GLSL_TYPE_INTERFACE: {
706 unsigned size = 0;
707 unsigned num_fields = glsl_get_length(type->type);
708 for (unsigned f = 0; f < num_fields; f++) {
709 unsigned field_end = type->offsets[f] +
710 vtn_type_block_size(b, type->members[f]);
711 size = MAX2(size, field_end);
712 }
713 return size;
714 }
715
716 case GLSL_TYPE_ARRAY:
717 vtn_assert(type->stride > 0);
718 vtn_assert(glsl_get_length(type->type) > 0);
719 return type->stride * glsl_get_length(type->type);
720
721 default:
722 vtn_fail("Invalid block type");
723 return 0;
724 }
725 }
726
727 static void
728 _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
729 nir_ssa_def *index, nir_ssa_def *offset,
730 unsigned access_offset, unsigned access_size,
731 struct vtn_ssa_value **inout, const struct glsl_type *type,
732 enum gl_access_qualifier access)
733 {
734 nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
735 instr->num_components = glsl_get_vector_elements(type);
736
737 /* Booleans usually shouldn't show up in external memory in SPIR-V.
738 * However, they do for certain older GLSLang versions and can for shared
739 * memory when we lower access chains internally.
740 */
741 const unsigned data_bit_size = glsl_type_is_boolean(type) ? 32 :
742 glsl_get_bit_size(type);
743
744 int src = 0;
745 if (!load) {
746 nir_intrinsic_set_write_mask(instr, (1 << instr->num_components) - 1);
747 instr->src[src++] = nir_src_for_ssa((*inout)->def);
748 }
749
750 if (op == nir_intrinsic_load_push_constant) {
751 nir_intrinsic_set_base(instr, access_offset);
752 nir_intrinsic_set_range(instr, access_size);
753 }
754
755 if (op == nir_intrinsic_load_ssbo ||
756 op == nir_intrinsic_store_ssbo) {
757 nir_intrinsic_set_access(instr, access);
758 }
759
760 /* With extensions like relaxed_block_layout, we really can't guarantee
761 * much more than scalar alignment.
762 */
763 if (op != nir_intrinsic_load_push_constant)
764 nir_intrinsic_set_align(instr, data_bit_size / 8, 0);
765
766 if (index)
767 instr->src[src++] = nir_src_for_ssa(index);
768
769 if (op == nir_intrinsic_load_push_constant) {
770 /* We need to subtract the offset from where the intrinsic will load the
771 * data. */
772 instr->src[src++] =
773 nir_src_for_ssa(nir_isub(&b->nb, offset,
774 nir_imm_int(&b->nb, access_offset)));
775 } else {
776 instr->src[src++] = nir_src_for_ssa(offset);
777 }
778
779 if (load) {
780 nir_ssa_dest_init(&instr->instr, &instr->dest,
781 instr->num_components, data_bit_size, NULL);
782 (*inout)->def = &instr->dest.ssa;
783 }
784
785 nir_builder_instr_insert(&b->nb, &instr->instr);
786
787 if (load && glsl_get_base_type(type) == GLSL_TYPE_BOOL)
788 (*inout)->def = nir_ine(&b->nb, (*inout)->def, nir_imm_int(&b->nb, 0));
789 }
790
791 static void
792 _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
793 nir_ssa_def *index, nir_ssa_def *offset,
794 unsigned access_offset, unsigned access_size,
795 struct vtn_type *type, enum gl_access_qualifier access,
796 struct vtn_ssa_value **inout)
797 {
798 if (load && *inout == NULL)
799 *inout = vtn_create_ssa_value(b, type->type);
800
801 enum glsl_base_type base_type = glsl_get_base_type(type->type);
802 switch (base_type) {
803 case GLSL_TYPE_UINT:
804 case GLSL_TYPE_INT:
805 case GLSL_TYPE_UINT16:
806 case GLSL_TYPE_INT16:
807 case GLSL_TYPE_UINT8:
808 case GLSL_TYPE_INT8:
809 case GLSL_TYPE_UINT64:
810 case GLSL_TYPE_INT64:
811 case GLSL_TYPE_FLOAT:
812 case GLSL_TYPE_FLOAT16:
813 case GLSL_TYPE_DOUBLE:
814 case GLSL_TYPE_BOOL:
815 /* This is where things get interesting. At this point, we've hit
816 * a vector, a scalar, or a matrix.
817 */
818 if (glsl_type_is_matrix(type->type)) {
819 /* Loading the whole matrix */
820 struct vtn_ssa_value *transpose;
821 unsigned num_ops, vec_width, col_stride;
822 if (type->row_major) {
823 num_ops = glsl_get_vector_elements(type->type);
824 vec_width = glsl_get_matrix_columns(type->type);
825 col_stride = type->array_element->stride;
826 if (load) {
827 const struct glsl_type *transpose_type =
828 glsl_matrix_type(base_type, vec_width, num_ops);
829 *inout = vtn_create_ssa_value(b, transpose_type);
830 } else {
831 transpose = vtn_ssa_transpose(b, *inout);
832 inout = &transpose;
833 }
834 } else {
835 num_ops = glsl_get_matrix_columns(type->type);
836 vec_width = glsl_get_vector_elements(type->type);
837 col_stride = type->stride;
838 }
839
840 for (unsigned i = 0; i < num_ops; i++) {
841 nir_ssa_def *elem_offset =
842 nir_iadd_imm(&b->nb, offset, i * col_stride);
843 _vtn_load_store_tail(b, op, load, index, elem_offset,
844 access_offset, access_size,
845 &(*inout)->elems[i],
846 glsl_vector_type(base_type, vec_width),
847 type->access | access);
848 }
849
850 if (load && type->row_major)
851 *inout = vtn_ssa_transpose(b, *inout);
852 } else {
853 unsigned elems = glsl_get_vector_elements(type->type);
854 unsigned type_size = glsl_get_bit_size(type->type) / 8;
855 if (elems == 1 || type->stride == type_size) {
856 /* This is a tightly-packed normal scalar or vector load */
857 vtn_assert(glsl_type_is_vector_or_scalar(type->type));
858 _vtn_load_store_tail(b, op, load, index, offset,
859 access_offset, access_size,
860 inout, type->type,
861 type->access | access);
862 } else {
863 /* This is a strided load. We have to load N things separately.
864 * This is the single column of a row-major matrix case.
865 */
866 vtn_assert(type->stride > type_size);
867 vtn_assert(type->stride % type_size == 0);
868
869 nir_ssa_def *per_comp[4];
870 for (unsigned i = 0; i < elems; i++) {
871 nir_ssa_def *elem_offset =
872 nir_iadd_imm(&b->nb, offset, i * type->stride);
873 struct vtn_ssa_value *comp, temp_val;
874 if (!load) {
875 temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
876 temp_val.type = glsl_scalar_type(base_type);
877 }
878 comp = &temp_val;
879 _vtn_load_store_tail(b, op, load, index, elem_offset,
880 access_offset, access_size,
881 &comp, glsl_scalar_type(base_type),
882 type->access | access);
883 per_comp[i] = comp->def;
884 }
885
886 if (load) {
887 if (*inout == NULL)
888 *inout = vtn_create_ssa_value(b, type->type);
889 (*inout)->def = nir_vec(&b->nb, per_comp, elems);
890 }
891 }
892 }
893 return;
894
895 case GLSL_TYPE_ARRAY: {
896 unsigned elems = glsl_get_length(type->type);
897 for (unsigned i = 0; i < elems; i++) {
898 nir_ssa_def *elem_off =
899 nir_iadd_imm(&b->nb, offset, i * type->stride);
900 _vtn_block_load_store(b, op, load, index, elem_off,
901 access_offset, access_size,
902 type->array_element,
903 type->array_element->access | access,
904 &(*inout)->elems[i]);
905 }
906 return;
907 }
908
909 case GLSL_TYPE_STRUCT: {
910 unsigned elems = glsl_get_length(type->type);
911 for (unsigned i = 0; i < elems; i++) {
912 nir_ssa_def *elem_off =
913 nir_iadd_imm(&b->nb, offset, type->offsets[i]);
914 _vtn_block_load_store(b, op, load, index, elem_off,
915 access_offset, access_size,
916 type->members[i],
917 type->members[i]->access | access,
918 &(*inout)->elems[i]);
919 }
920 return;
921 }
922
923 default:
924 vtn_fail("Invalid block member type");
925 }
926 }
927
928 static struct vtn_ssa_value *
929 vtn_block_load(struct vtn_builder *b, struct vtn_pointer *src)
930 {
931 nir_intrinsic_op op;
932 unsigned access_offset = 0, access_size = 0;
933 switch (src->mode) {
934 case vtn_variable_mode_ubo:
935 op = nir_intrinsic_load_ubo;
936 break;
937 case vtn_variable_mode_ssbo:
938 op = nir_intrinsic_load_ssbo;
939 break;
940 case vtn_variable_mode_push_constant:
941 op = nir_intrinsic_load_push_constant;
942 access_size = b->shader->num_uniforms;
943 break;
944 case vtn_variable_mode_workgroup:
945 op = nir_intrinsic_load_shared;
946 break;
947 default:
948 vtn_fail("Invalid block variable mode");
949 }
950
951 nir_ssa_def *offset, *index = NULL;
952 offset = vtn_pointer_to_offset(b, src, &index);
953
954 struct vtn_ssa_value *value = NULL;
955 _vtn_block_load_store(b, op, true, index, offset,
956 access_offset, access_size,
957 src->type, src->access, &value);
958 return value;
959 }
960
961 static void
962 vtn_block_store(struct vtn_builder *b, struct vtn_ssa_value *src,
963 struct vtn_pointer *dst)
964 {
965 nir_intrinsic_op op;
966 switch (dst->mode) {
967 case vtn_variable_mode_ssbo:
968 op = nir_intrinsic_store_ssbo;
969 break;
970 case vtn_variable_mode_workgroup:
971 op = nir_intrinsic_store_shared;
972 break;
973 default:
974 vtn_fail("Invalid block variable mode");
975 }
976
977 nir_ssa_def *offset, *index = NULL;
978 offset = vtn_pointer_to_offset(b, dst, &index);
979
980 _vtn_block_load_store(b, op, false, index, offset,
981 0, 0, dst->type, dst->access, &src);
982 }
983
984 static void
985 _vtn_variable_load_store(struct vtn_builder *b, bool load,
986 struct vtn_pointer *ptr,
987 struct vtn_ssa_value **inout)
988 {
989 enum glsl_base_type base_type = glsl_get_base_type(ptr->type->type);
990 switch (base_type) {
991 case GLSL_TYPE_UINT:
992 case GLSL_TYPE_INT:
993 case GLSL_TYPE_UINT16:
994 case GLSL_TYPE_INT16:
995 case GLSL_TYPE_UINT8:
996 case GLSL_TYPE_INT8:
997 case GLSL_TYPE_UINT64:
998 case GLSL_TYPE_INT64:
999 case GLSL_TYPE_FLOAT:
1000 case GLSL_TYPE_FLOAT16:
1001 case GLSL_TYPE_BOOL:
1002 case GLSL_TYPE_DOUBLE:
1003 if (glsl_type_is_vector_or_scalar(ptr->type->type)) {
1004 /* We hit a vector or scalar; go ahead and emit the load[s] */
1005 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
1006 if (vtn_pointer_is_external_block(b, ptr)) {
1007 /* If it's external, we call nir_load/store_deref directly. The
1008 * vtn_local_load/store helpers are too clever and do magic to
1009 * avoid array derefs of vectors. That magic is both less
1010 * efficient than the direct load/store and, in the case of
1011 * stores, is broken because it creates a race condition if two
1012 * threads are writing to different components of the same vector
1013 * due to the load+insert+store it uses to emulate the array
1014 * deref.
1015 */
1016 if (load) {
1017 *inout = vtn_create_ssa_value(b, ptr->type->type);
1018 (*inout)->def = nir_load_deref(&b->nb, deref);
1019 } else {
1020 nir_store_deref(&b->nb, deref, (*inout)->def, ~0);
1021 }
1022 } else {
1023 if (load) {
1024 *inout = vtn_local_load(b, deref);
1025 } else {
1026 vtn_local_store(b, *inout, deref);
1027 }
1028 }
1029 return;
1030 }
1031 /* Fall through */
1032
1033 case GLSL_TYPE_ARRAY:
1034 case GLSL_TYPE_STRUCT: {
1035 unsigned elems = glsl_get_length(ptr->type->type);
1036 if (load) {
1037 vtn_assert(*inout == NULL);
1038 *inout = rzalloc(b, struct vtn_ssa_value);
1039 (*inout)->type = ptr->type->type;
1040 (*inout)->elems = rzalloc_array(b, struct vtn_ssa_value *, elems);
1041 }
1042
1043 struct vtn_access_chain chain = {
1044 .length = 1,
1045 .link = {
1046 { .mode = vtn_access_mode_literal, },
1047 }
1048 };
1049 for (unsigned i = 0; i < elems; i++) {
1050 chain.link[0].id = i;
1051 struct vtn_pointer *elem = vtn_pointer_dereference(b, ptr, &chain);
1052 _vtn_variable_load_store(b, load, elem, &(*inout)->elems[i]);
1053 }
1054 return;
1055 }
1056
1057 default:
1058 vtn_fail("Invalid access chain type");
1059 }
1060 }
1061
1062 struct vtn_ssa_value *
1063 vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src)
1064 {
1065 if (vtn_pointer_uses_ssa_offset(b, src)) {
1066 return vtn_block_load(b, src);
1067 } else {
1068 struct vtn_ssa_value *val = NULL;
1069 _vtn_variable_load_store(b, true, src, &val);
1070 return val;
1071 }
1072 }
1073
1074 void
1075 vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
1076 struct vtn_pointer *dest)
1077 {
1078 if (vtn_pointer_uses_ssa_offset(b, dest)) {
1079 vtn_assert(dest->mode == vtn_variable_mode_ssbo ||
1080 dest->mode == vtn_variable_mode_workgroup);
1081 vtn_block_store(b, src, dest);
1082 } else {
1083 _vtn_variable_load_store(b, false, dest, &src);
1084 }
1085 }
1086
1087 static void
1088 _vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
1089 struct vtn_pointer *src)
1090 {
1091 vtn_assert(src->type->type == dest->type->type);
1092 enum glsl_base_type base_type = glsl_get_base_type(src->type->type);
1093 switch (base_type) {
1094 case GLSL_TYPE_UINT:
1095 case GLSL_TYPE_INT:
1096 case GLSL_TYPE_UINT16:
1097 case GLSL_TYPE_INT16:
1098 case GLSL_TYPE_UINT8:
1099 case GLSL_TYPE_INT8:
1100 case GLSL_TYPE_UINT64:
1101 case GLSL_TYPE_INT64:
1102 case GLSL_TYPE_FLOAT:
1103 case GLSL_TYPE_FLOAT16:
1104 case GLSL_TYPE_DOUBLE:
1105 case GLSL_TYPE_BOOL:
1106 /* At this point, we have a scalar, vector, or matrix so we know that
1107 * there cannot be any structure splitting still in the way. By
1108 * stopping at the matrix level rather than the vector level, we
1109 * ensure that matrices get loaded in the optimal way even if they
1110 * are storred row-major in a UBO.
1111 */
1112 vtn_variable_store(b, vtn_variable_load(b, src), dest);
1113 return;
1114
1115 case GLSL_TYPE_ARRAY:
1116 case GLSL_TYPE_STRUCT: {
1117 struct vtn_access_chain chain = {
1118 .length = 1,
1119 .link = {
1120 { .mode = vtn_access_mode_literal, },
1121 }
1122 };
1123 unsigned elems = glsl_get_length(src->type->type);
1124 for (unsigned i = 0; i < elems; i++) {
1125 chain.link[0].id = i;
1126 struct vtn_pointer *src_elem =
1127 vtn_pointer_dereference(b, src, &chain);
1128 struct vtn_pointer *dest_elem =
1129 vtn_pointer_dereference(b, dest, &chain);
1130
1131 _vtn_variable_copy(b, dest_elem, src_elem);
1132 }
1133 return;
1134 }
1135
1136 default:
1137 vtn_fail("Invalid access chain type");
1138 }
1139 }
1140
1141 static void
1142 vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
1143 struct vtn_pointer *src)
1144 {
1145 /* TODO: At some point, we should add a special-case for when we can
1146 * just emit a copy_var intrinsic.
1147 */
1148 _vtn_variable_copy(b, dest, src);
1149 }
1150
1151 static void
1152 set_mode_system_value(struct vtn_builder *b, nir_variable_mode *mode)
1153 {
1154 vtn_assert(*mode == nir_var_system_value || *mode == nir_var_shader_in);
1155 *mode = nir_var_system_value;
1156 }
1157
1158 static void
1159 vtn_get_builtin_location(struct vtn_builder *b,
1160 SpvBuiltIn builtin, int *location,
1161 nir_variable_mode *mode)
1162 {
1163 switch (builtin) {
1164 case SpvBuiltInPosition:
1165 *location = VARYING_SLOT_POS;
1166 break;
1167 case SpvBuiltInPointSize:
1168 *location = VARYING_SLOT_PSIZ;
1169 break;
1170 case SpvBuiltInClipDistance:
1171 *location = VARYING_SLOT_CLIP_DIST0; /* XXX CLIP_DIST1? */
1172 break;
1173 case SpvBuiltInCullDistance:
1174 *location = VARYING_SLOT_CULL_DIST0;
1175 break;
1176 case SpvBuiltInVertexId:
1177 case SpvBuiltInVertexIndex:
1178 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1179 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1180 * same as gl_VertexID, which is non-zero-based, and removes
1181 * VertexIndex. Since they're both defined to be non-zero-based, we use
1182 * SYSTEM_VALUE_VERTEX_ID for both.
1183 */
1184 *location = SYSTEM_VALUE_VERTEX_ID;
1185 set_mode_system_value(b, mode);
1186 break;
1187 case SpvBuiltInInstanceIndex:
1188 *location = SYSTEM_VALUE_INSTANCE_INDEX;
1189 set_mode_system_value(b, mode);
1190 break;
1191 case SpvBuiltInInstanceId:
1192 *location = SYSTEM_VALUE_INSTANCE_ID;
1193 set_mode_system_value(b, mode);
1194 break;
1195 case SpvBuiltInPrimitiveId:
1196 if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
1197 vtn_assert(*mode == nir_var_shader_in);
1198 *location = VARYING_SLOT_PRIMITIVE_ID;
1199 } else if (*mode == nir_var_shader_out) {
1200 *location = VARYING_SLOT_PRIMITIVE_ID;
1201 } else {
1202 *location = SYSTEM_VALUE_PRIMITIVE_ID;
1203 set_mode_system_value(b, mode);
1204 }
1205 break;
1206 case SpvBuiltInInvocationId:
1207 *location = SYSTEM_VALUE_INVOCATION_ID;
1208 set_mode_system_value(b, mode);
1209 break;
1210 case SpvBuiltInLayer:
1211 *location = VARYING_SLOT_LAYER;
1212 if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
1213 *mode = nir_var_shader_in;
1214 else if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
1215 *mode = nir_var_shader_out;
1216 else if (b->options && b->options->caps.shader_viewport_index_layer &&
1217 (b->shader->info.stage == MESA_SHADER_VERTEX ||
1218 b->shader->info.stage == MESA_SHADER_TESS_EVAL))
1219 *mode = nir_var_shader_out;
1220 else
1221 vtn_fail("invalid stage for SpvBuiltInLayer");
1222 break;
1223 case SpvBuiltInViewportIndex:
1224 *location = VARYING_SLOT_VIEWPORT;
1225 if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
1226 *mode = nir_var_shader_out;
1227 else if (b->options && b->options->caps.shader_viewport_index_layer &&
1228 (b->shader->info.stage == MESA_SHADER_VERTEX ||
1229 b->shader->info.stage == MESA_SHADER_TESS_EVAL))
1230 *mode = nir_var_shader_out;
1231 else if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
1232 *mode = nir_var_shader_in;
1233 else
1234 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1235 break;
1236 case SpvBuiltInTessLevelOuter:
1237 *location = VARYING_SLOT_TESS_LEVEL_OUTER;
1238 break;
1239 case SpvBuiltInTessLevelInner:
1240 *location = VARYING_SLOT_TESS_LEVEL_INNER;
1241 break;
1242 case SpvBuiltInTessCoord:
1243 *location = SYSTEM_VALUE_TESS_COORD;
1244 set_mode_system_value(b, mode);
1245 break;
1246 case SpvBuiltInPatchVertices:
1247 *location = SYSTEM_VALUE_VERTICES_IN;
1248 set_mode_system_value(b, mode);
1249 break;
1250 case SpvBuiltInFragCoord:
1251 *location = VARYING_SLOT_POS;
1252 vtn_assert(*mode == nir_var_shader_in);
1253 break;
1254 case SpvBuiltInPointCoord:
1255 *location = VARYING_SLOT_PNTC;
1256 vtn_assert(*mode == nir_var_shader_in);
1257 break;
1258 case SpvBuiltInFrontFacing:
1259 *location = SYSTEM_VALUE_FRONT_FACE;
1260 set_mode_system_value(b, mode);
1261 break;
1262 case SpvBuiltInSampleId:
1263 *location = SYSTEM_VALUE_SAMPLE_ID;
1264 set_mode_system_value(b, mode);
1265 break;
1266 case SpvBuiltInSamplePosition:
1267 *location = SYSTEM_VALUE_SAMPLE_POS;
1268 set_mode_system_value(b, mode);
1269 break;
1270 case SpvBuiltInSampleMask:
1271 if (*mode == nir_var_shader_out) {
1272 *location = FRAG_RESULT_SAMPLE_MASK;
1273 } else {
1274 *location = SYSTEM_VALUE_SAMPLE_MASK_IN;
1275 set_mode_system_value(b, mode);
1276 }
1277 break;
1278 case SpvBuiltInFragDepth:
1279 *location = FRAG_RESULT_DEPTH;
1280 vtn_assert(*mode == nir_var_shader_out);
1281 break;
1282 case SpvBuiltInHelperInvocation:
1283 *location = SYSTEM_VALUE_HELPER_INVOCATION;
1284 set_mode_system_value(b, mode);
1285 break;
1286 case SpvBuiltInNumWorkgroups:
1287 *location = SYSTEM_VALUE_NUM_WORK_GROUPS;
1288 set_mode_system_value(b, mode);
1289 break;
1290 case SpvBuiltInWorkgroupSize:
1291 *location = SYSTEM_VALUE_LOCAL_GROUP_SIZE;
1292 set_mode_system_value(b, mode);
1293 break;
1294 case SpvBuiltInWorkgroupId:
1295 *location = SYSTEM_VALUE_WORK_GROUP_ID;
1296 set_mode_system_value(b, mode);
1297 break;
1298 case SpvBuiltInLocalInvocationId:
1299 *location = SYSTEM_VALUE_LOCAL_INVOCATION_ID;
1300 set_mode_system_value(b, mode);
1301 break;
1302 case SpvBuiltInLocalInvocationIndex:
1303 *location = SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
1304 set_mode_system_value(b, mode);
1305 break;
1306 case SpvBuiltInGlobalInvocationId:
1307 *location = SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
1308 set_mode_system_value(b, mode);
1309 break;
1310 case SpvBuiltInBaseVertex:
1311 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1312 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1313 */
1314 *location = SYSTEM_VALUE_FIRST_VERTEX;
1315 set_mode_system_value(b, mode);
1316 break;
1317 case SpvBuiltInBaseInstance:
1318 *location = SYSTEM_VALUE_BASE_INSTANCE;
1319 set_mode_system_value(b, mode);
1320 break;
1321 case SpvBuiltInDrawIndex:
1322 *location = SYSTEM_VALUE_DRAW_ID;
1323 set_mode_system_value(b, mode);
1324 break;
1325 case SpvBuiltInSubgroupSize:
1326 *location = SYSTEM_VALUE_SUBGROUP_SIZE;
1327 set_mode_system_value(b, mode);
1328 break;
1329 case SpvBuiltInSubgroupId:
1330 *location = SYSTEM_VALUE_SUBGROUP_ID;
1331 set_mode_system_value(b, mode);
1332 break;
1333 case SpvBuiltInSubgroupLocalInvocationId:
1334 *location = SYSTEM_VALUE_SUBGROUP_INVOCATION;
1335 set_mode_system_value(b, mode);
1336 break;
1337 case SpvBuiltInNumSubgroups:
1338 *location = SYSTEM_VALUE_NUM_SUBGROUPS;
1339 set_mode_system_value(b, mode);
1340 break;
1341 case SpvBuiltInDeviceIndex:
1342 *location = SYSTEM_VALUE_DEVICE_INDEX;
1343 set_mode_system_value(b, mode);
1344 break;
1345 case SpvBuiltInViewIndex:
1346 *location = SYSTEM_VALUE_VIEW_INDEX;
1347 set_mode_system_value(b, mode);
1348 break;
1349 case SpvBuiltInSubgroupEqMask:
1350 *location = SYSTEM_VALUE_SUBGROUP_EQ_MASK,
1351 set_mode_system_value(b, mode);
1352 break;
1353 case SpvBuiltInSubgroupGeMask:
1354 *location = SYSTEM_VALUE_SUBGROUP_GE_MASK,
1355 set_mode_system_value(b, mode);
1356 break;
1357 case SpvBuiltInSubgroupGtMask:
1358 *location = SYSTEM_VALUE_SUBGROUP_GT_MASK,
1359 set_mode_system_value(b, mode);
1360 break;
1361 case SpvBuiltInSubgroupLeMask:
1362 *location = SYSTEM_VALUE_SUBGROUP_LE_MASK,
1363 set_mode_system_value(b, mode);
1364 break;
1365 case SpvBuiltInSubgroupLtMask:
1366 *location = SYSTEM_VALUE_SUBGROUP_LT_MASK,
1367 set_mode_system_value(b, mode);
1368 break;
1369 case SpvBuiltInFragStencilRefEXT:
1370 *location = FRAG_RESULT_STENCIL;
1371 vtn_assert(*mode == nir_var_shader_out);
1372 break;
1373 case SpvBuiltInWorkDim:
1374 *location = SYSTEM_VALUE_WORK_DIM;
1375 set_mode_system_value(b, mode);
1376 break;
1377 case SpvBuiltInGlobalSize:
1378 *location = SYSTEM_VALUE_GLOBAL_GROUP_SIZE;
1379 set_mode_system_value(b, mode);
1380 break;
1381 default:
1382 vtn_fail("unsupported builtin: %u", builtin);
1383 }
1384 }
1385
1386 static void
1387 apply_var_decoration(struct vtn_builder *b,
1388 struct nir_variable_data *var_data,
1389 const struct vtn_decoration *dec)
1390 {
1391 switch (dec->decoration) {
1392 case SpvDecorationRelaxedPrecision:
1393 break; /* FIXME: Do nothing with this for now. */
1394 case SpvDecorationNoPerspective:
1395 var_data->interpolation = INTERP_MODE_NOPERSPECTIVE;
1396 break;
1397 case SpvDecorationFlat:
1398 var_data->interpolation = INTERP_MODE_FLAT;
1399 break;
1400 case SpvDecorationCentroid:
1401 var_data->centroid = true;
1402 break;
1403 case SpvDecorationSample:
1404 var_data->sample = true;
1405 break;
1406 case SpvDecorationInvariant:
1407 var_data->invariant = true;
1408 break;
1409 case SpvDecorationConstant:
1410 var_data->read_only = true;
1411 break;
1412 case SpvDecorationNonReadable:
1413 var_data->image.access |= ACCESS_NON_READABLE;
1414 break;
1415 case SpvDecorationNonWritable:
1416 var_data->read_only = true;
1417 var_data->image.access |= ACCESS_NON_WRITEABLE;
1418 break;
1419 case SpvDecorationRestrict:
1420 var_data->image.access |= ACCESS_RESTRICT;
1421 break;
1422 case SpvDecorationVolatile:
1423 var_data->image.access |= ACCESS_VOLATILE;
1424 break;
1425 case SpvDecorationCoherent:
1426 var_data->image.access |= ACCESS_COHERENT;
1427 break;
1428 case SpvDecorationComponent:
1429 var_data->location_frac = dec->literals[0];
1430 break;
1431 case SpvDecorationIndex:
1432 var_data->index = dec->literals[0];
1433 break;
1434 case SpvDecorationBuiltIn: {
1435 SpvBuiltIn builtin = dec->literals[0];
1436
1437 nir_variable_mode mode = var_data->mode;
1438 vtn_get_builtin_location(b, builtin, &var_data->location, &mode);
1439 var_data->mode = mode;
1440
1441 switch (builtin) {
1442 case SpvBuiltInTessLevelOuter:
1443 case SpvBuiltInTessLevelInner:
1444 var_data->compact = true;
1445 break;
1446 case SpvBuiltInFragCoord:
1447 var_data->pixel_center_integer = b->pixel_center_integer;
1448 /* fallthrough */
1449 case SpvBuiltInSamplePosition:
1450 var_data->origin_upper_left = b->origin_upper_left;
1451 break;
1452 default:
1453 break;
1454 }
1455 }
1456
1457 case SpvDecorationSpecId:
1458 case SpvDecorationRowMajor:
1459 case SpvDecorationColMajor:
1460 case SpvDecorationMatrixStride:
1461 case SpvDecorationAliased:
1462 case SpvDecorationUniform:
1463 case SpvDecorationLinkageAttributes:
1464 break; /* Do nothing with these here */
1465
1466 case SpvDecorationPatch:
1467 var_data->patch = true;
1468 break;
1469
1470 case SpvDecorationLocation:
1471 vtn_fail("Handled above");
1472
1473 case SpvDecorationBlock:
1474 case SpvDecorationBufferBlock:
1475 case SpvDecorationArrayStride:
1476 case SpvDecorationGLSLShared:
1477 case SpvDecorationGLSLPacked:
1478 break; /* These can apply to a type but we don't care about them */
1479
1480 case SpvDecorationBinding:
1481 case SpvDecorationDescriptorSet:
1482 case SpvDecorationNoContraction:
1483 case SpvDecorationInputAttachmentIndex:
1484 vtn_warn("Decoration not allowed for variable or structure member: %s",
1485 spirv_decoration_to_string(dec->decoration));
1486 break;
1487
1488 case SpvDecorationXfbBuffer:
1489 var_data->explicit_xfb_buffer = true;
1490 var_data->xfb_buffer = dec->literals[0];
1491 var_data->always_active_io = true;
1492 break;
1493 case SpvDecorationXfbStride:
1494 var_data->explicit_xfb_stride = true;
1495 var_data->xfb_stride = dec->literals[0];
1496 break;
1497 case SpvDecorationOffset:
1498 var_data->explicit_offset = true;
1499 var_data->offset = dec->literals[0];
1500 break;
1501
1502 case SpvDecorationStream:
1503 var_data->stream = dec->literals[0];
1504 break;
1505
1506 case SpvDecorationCPacked:
1507 case SpvDecorationSaturatedConversion:
1508 case SpvDecorationFuncParamAttr:
1509 case SpvDecorationFPRoundingMode:
1510 case SpvDecorationFPFastMathMode:
1511 case SpvDecorationAlignment:
1512 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1513 spirv_decoration_to_string(dec->decoration));
1514 break;
1515
1516 case SpvDecorationHlslSemanticGOOGLE:
1517 /* HLSL semantic decorations can safely be ignored by the driver. */
1518 break;
1519
1520 default:
1521 vtn_fail("Unhandled decoration");
1522 }
1523 }
1524
1525 static void
1526 var_is_patch_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1527 const struct vtn_decoration *dec, void *out_is_patch)
1528 {
1529 if (dec->decoration == SpvDecorationPatch) {
1530 *((bool *) out_is_patch) = true;
1531 }
1532 }
1533
1534 static void
1535 var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
1536 const struct vtn_decoration *dec, void *void_var)
1537 {
1538 struct vtn_variable *vtn_var = void_var;
1539
1540 /* Handle decorations that apply to a vtn_variable as a whole */
1541 switch (dec->decoration) {
1542 case SpvDecorationBinding:
1543 vtn_var->binding = dec->literals[0];
1544 vtn_var->explicit_binding = true;
1545 return;
1546 case SpvDecorationDescriptorSet:
1547 vtn_var->descriptor_set = dec->literals[0];
1548 return;
1549 case SpvDecorationInputAttachmentIndex:
1550 vtn_var->input_attachment_index = dec->literals[0];
1551 return;
1552 case SpvDecorationPatch:
1553 vtn_var->patch = true;
1554 break;
1555 case SpvDecorationOffset:
1556 vtn_var->offset = dec->literals[0];
1557 break;
1558 case SpvDecorationNonWritable:
1559 vtn_var->access |= ACCESS_NON_WRITEABLE;
1560 break;
1561 case SpvDecorationNonReadable:
1562 vtn_var->access |= ACCESS_NON_READABLE;
1563 break;
1564 case SpvDecorationVolatile:
1565 vtn_var->access |= ACCESS_VOLATILE;
1566 break;
1567 case SpvDecorationCoherent:
1568 vtn_var->access |= ACCESS_COHERENT;
1569 break;
1570 case SpvDecorationHlslCounterBufferGOOGLE:
1571 /* HLSL semantic decorations can safely be ignored by the driver. */
1572 break;
1573 default:
1574 break;
1575 }
1576
1577 if (val->value_type == vtn_value_type_pointer) {
1578 assert(val->pointer->var == void_var);
1579 assert(member == -1);
1580 } else {
1581 assert(val->value_type == vtn_value_type_type);
1582 }
1583
1584 /* Location is odd. If applied to a split structure, we have to walk the
1585 * whole thing and accumulate the location. It's easier to handle as a
1586 * special case.
1587 */
1588 if (dec->decoration == SpvDecorationLocation) {
1589 unsigned location = dec->literals[0];
1590 bool is_vertex_input = false;
1591 if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
1592 vtn_var->mode == vtn_variable_mode_output) {
1593 location += FRAG_RESULT_DATA0;
1594 } else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
1595 vtn_var->mode == vtn_variable_mode_input) {
1596 is_vertex_input = true;
1597 location += VERT_ATTRIB_GENERIC0;
1598 } else if (vtn_var->mode == vtn_variable_mode_input ||
1599 vtn_var->mode == vtn_variable_mode_output) {
1600 location += vtn_var->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0;
1601 } else if (vtn_var->mode != vtn_variable_mode_uniform) {
1602 vtn_warn("Location must be on input, output, uniform, sampler or "
1603 "image variable");
1604 return;
1605 }
1606
1607 if (vtn_var->var->num_members == 0) {
1608 /* This handles the member and lone variable cases */
1609 vtn_var->var->data.location = location;
1610 } else {
1611 /* This handles the structure member case */
1612 assert(vtn_var->var->members);
1613 for (unsigned i = 0; i < vtn_var->var->num_members; i++) {
1614 vtn_var->var->members[i].location = location;
1615 const struct glsl_type *member_type =
1616 glsl_get_struct_field(vtn_var->var->interface_type, i);
1617 location += glsl_count_attribute_slots(member_type,
1618 is_vertex_input);
1619 }
1620 }
1621 return;
1622 } else {
1623 if (vtn_var->var) {
1624 if (vtn_var->var->num_members == 0) {
1625 assert(member == -1);
1626 apply_var_decoration(b, &vtn_var->var->data, dec);
1627 } else if (member >= 0) {
1628 /* Member decorations must come from a type */
1629 assert(val->value_type == vtn_value_type_type);
1630 apply_var_decoration(b, &vtn_var->var->members[member], dec);
1631 } else {
1632 unsigned length =
1633 glsl_get_length(glsl_without_array(vtn_var->type->type));
1634 for (unsigned i = 0; i < length; i++)
1635 apply_var_decoration(b, &vtn_var->var->members[i], dec);
1636 }
1637 } else {
1638 /* A few variables, those with external storage, have no actual
1639 * nir_variables associated with them. Fortunately, all decorations
1640 * we care about for those variables are on the type only.
1641 */
1642 vtn_assert(vtn_var->mode == vtn_variable_mode_ubo ||
1643 vtn_var->mode == vtn_variable_mode_ssbo ||
1644 vtn_var->mode == vtn_variable_mode_push_constant ||
1645 (vtn_var->mode == vtn_variable_mode_workgroup &&
1646 b->options->lower_workgroup_access_to_offsets));
1647 }
1648 }
1649 }
1650
1651 static enum vtn_variable_mode
1652 vtn_storage_class_to_mode(struct vtn_builder *b,
1653 SpvStorageClass class,
1654 struct vtn_type *interface_type,
1655 nir_variable_mode *nir_mode_out)
1656 {
1657 enum vtn_variable_mode mode;
1658 nir_variable_mode nir_mode;
1659 switch (class) {
1660 case SpvStorageClassUniform:
1661 if (interface_type->block) {
1662 mode = vtn_variable_mode_ubo;
1663 nir_mode = nir_var_ubo;
1664 } else if (interface_type->buffer_block) {
1665 mode = vtn_variable_mode_ssbo;
1666 nir_mode = nir_var_ssbo;
1667 } else {
1668 /* Default-block uniforms, coming from gl_spirv */
1669 mode = vtn_variable_mode_uniform;
1670 nir_mode = nir_var_uniform;
1671 }
1672 break;
1673 case SpvStorageClassStorageBuffer:
1674 mode = vtn_variable_mode_ssbo;
1675 nir_mode = nir_var_ssbo;
1676 break;
1677 case SpvStorageClassUniformConstant:
1678 mode = vtn_variable_mode_uniform;
1679 nir_mode = nir_var_uniform;
1680 break;
1681 case SpvStorageClassPushConstant:
1682 mode = vtn_variable_mode_push_constant;
1683 nir_mode = nir_var_uniform;
1684 break;
1685 case SpvStorageClassInput:
1686 mode = vtn_variable_mode_input;
1687 nir_mode = nir_var_shader_in;
1688 break;
1689 case SpvStorageClassOutput:
1690 mode = vtn_variable_mode_output;
1691 nir_mode = nir_var_shader_out;
1692 break;
1693 case SpvStorageClassPrivate:
1694 mode = vtn_variable_mode_private;
1695 nir_mode = nir_var_private;
1696 break;
1697 case SpvStorageClassFunction:
1698 mode = vtn_variable_mode_function;
1699 nir_mode = nir_var_function;
1700 break;
1701 case SpvStorageClassWorkgroup:
1702 mode = vtn_variable_mode_workgroup;
1703 nir_mode = nir_var_shared;
1704 break;
1705 case SpvStorageClassAtomicCounter:
1706 mode = vtn_variable_mode_uniform;
1707 nir_mode = nir_var_uniform;
1708 break;
1709 case SpvStorageClassCrossWorkgroup:
1710 case SpvStorageClassGeneric:
1711 default:
1712 vtn_fail("Unhandled variable storage class");
1713 }
1714
1715 if (nir_mode_out)
1716 *nir_mode_out = nir_mode;
1717
1718 return mode;
1719 }
1720
1721 nir_ssa_def *
1722 vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
1723 {
1724 if (vtn_pointer_uses_ssa_offset(b, ptr)) {
1725 /* This pointer needs to have a pointer type with actual storage */
1726 vtn_assert(ptr->ptr_type);
1727 vtn_assert(ptr->ptr_type->type);
1728
1729 if (!ptr->offset) {
1730 /* If we don't have an offset then we must be a pointer to the variable
1731 * itself.
1732 */
1733 vtn_assert(!ptr->offset && !ptr->block_index);
1734
1735 struct vtn_access_chain chain = {
1736 .length = 0,
1737 };
1738 ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
1739 }
1740
1741 vtn_assert(ptr->offset);
1742 if (ptr->block_index) {
1743 vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
1744 ptr->mode == vtn_variable_mode_ssbo);
1745 return nir_vec2(&b->nb, ptr->block_index, ptr->offset);
1746 } else {
1747 vtn_assert(ptr->mode == vtn_variable_mode_workgroup);
1748 return ptr->offset;
1749 }
1750 } else {
1751 if (vtn_pointer_is_external_block(b, ptr) &&
1752 vtn_type_contains_block(b, ptr->type)) {
1753 const unsigned bit_size = glsl_get_bit_size(ptr->ptr_type->type);
1754 const unsigned num_components =
1755 glsl_get_vector_elements(ptr->ptr_type->type);
1756
1757 /* In this case, we're looking for a block index and not an actual
1758 * deref.
1759 */
1760 if (!ptr->block_index) {
1761 /* If we don't have a block_index then we must be a pointer to the
1762 * variable itself.
1763 */
1764 vtn_assert(!ptr->deref);
1765
1766 struct vtn_access_chain chain = {
1767 .length = 0,
1768 };
1769 ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
1770 }
1771
1772 /* A block index is just a 32-bit value but the pointer has some
1773 * other dimensionality. Cram it in there and we'll unpack it later
1774 * in vtn_pointer_from_ssa.
1775 */
1776 const unsigned swiz[4] = { 0, };
1777 return nir_swizzle(&b->nb, nir_u2u(&b->nb, ptr->block_index, bit_size),
1778 swiz, num_components, false);
1779 } else {
1780 return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
1781 }
1782 }
1783 }
1784
1785 struct vtn_pointer *
1786 vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
1787 struct vtn_type *ptr_type)
1788 {
1789 vtn_assert(ssa->num_components <= 2 && ssa->bit_size == 32);
1790 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
1791
1792 struct vtn_type *interface_type = ptr_type->deref;
1793 while (interface_type->base_type == vtn_base_type_array)
1794 interface_type = interface_type->array_element;
1795
1796 struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
1797 nir_variable_mode nir_mode;
1798 ptr->mode = vtn_storage_class_to_mode(b, ptr_type->storage_class,
1799 interface_type, &nir_mode);
1800 ptr->type = ptr_type->deref;
1801 ptr->ptr_type = ptr_type;
1802
1803 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1804 * need to whack the mode because it creates a function parameter with the
1805 * Function storage class even though it's a pointer to a sampler. If we
1806 * don't do this, then NIR won't get rid of the deref_cast for us.
1807 */
1808 if (ptr->mode == vtn_variable_mode_function &&
1809 (ptr->type->base_type == vtn_base_type_sampler ||
1810 ptr->type->base_type == vtn_base_type_sampled_image)) {
1811 ptr->mode = vtn_variable_mode_uniform;
1812 nir_mode = nir_var_uniform;
1813 }
1814
1815 if (vtn_pointer_uses_ssa_offset(b, ptr)) {
1816 /* This pointer type needs to have actual storage */
1817 vtn_assert(ptr_type->type);
1818 if (ptr->mode == vtn_variable_mode_ubo ||
1819 ptr->mode == vtn_variable_mode_ssbo) {
1820 vtn_assert(ssa->num_components == 2);
1821 ptr->block_index = nir_channel(&b->nb, ssa, 0);
1822 ptr->offset = nir_channel(&b->nb, ssa, 1);
1823 } else {
1824 vtn_assert(ssa->num_components == 1);
1825 ptr->block_index = NULL;
1826 ptr->offset = ssa;
1827 }
1828 } else {
1829 const struct glsl_type *deref_type = ptr_type->deref->type;
1830 if (!vtn_pointer_is_external_block(b, ptr)) {
1831 assert(ssa->bit_size == 32 && ssa->num_components == 1);
1832 ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
1833 glsl_get_bare_type(deref_type), 0);
1834 } else if (vtn_type_contains_block(b, ptr->type)) {
1835 /* This is a pointer to somewhere in an array of blocks, not a
1836 * pointer to somewhere inside the block. We squashed it into a
1837 * random vector type before so just pick off the first channel and
1838 * cast it back to 32 bits.
1839 */
1840 ptr->block_index = nir_u2u32(&b->nb, nir_channel(&b->nb, ssa, 0));
1841 } else {
1842 /* This is a pointer to something internal or a pointer inside a
1843 * block. It's just a regular cast.
1844 */
1845 ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
1846 ptr_type->deref->type,
1847 ptr_type->stride);
1848 ptr->deref->dest.ssa.num_components =
1849 glsl_get_vector_elements(ptr_type->type);
1850 ptr->deref->dest.ssa.bit_size = glsl_get_bit_size(ptr_type->type);
1851 }
1852 }
1853
1854 return ptr;
1855 }
1856
1857 static bool
1858 is_per_vertex_inout(const struct vtn_variable *var, gl_shader_stage stage)
1859 {
1860 if (var->patch || !glsl_type_is_array(var->type->type))
1861 return false;
1862
1863 if (var->mode == vtn_variable_mode_input) {
1864 return stage == MESA_SHADER_TESS_CTRL ||
1865 stage == MESA_SHADER_TESS_EVAL ||
1866 stage == MESA_SHADER_GEOMETRY;
1867 }
1868
1869 if (var->mode == vtn_variable_mode_output)
1870 return stage == MESA_SHADER_TESS_CTRL;
1871
1872 return false;
1873 }
1874
1875 static void
1876 vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
1877 struct vtn_type *ptr_type, SpvStorageClass storage_class,
1878 nir_constant *initializer)
1879 {
1880 vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
1881 struct vtn_type *type = ptr_type->deref;
1882
1883 struct vtn_type *without_array = type;
1884 while(glsl_type_is_array(without_array->type))
1885 without_array = without_array->array_element;
1886
1887 enum vtn_variable_mode mode;
1888 nir_variable_mode nir_mode;
1889 mode = vtn_storage_class_to_mode(b, storage_class, without_array, &nir_mode);
1890
1891 switch (mode) {
1892 case vtn_variable_mode_ubo:
1893 /* There's no other way to get vtn_variable_mode_ubo */
1894 vtn_assert(without_array->block);
1895 b->shader->info.num_ubos++;
1896 break;
1897 case vtn_variable_mode_ssbo:
1898 if (storage_class == SpvStorageClassStorageBuffer &&
1899 !without_array->block) {
1900 if (b->variable_pointers) {
1901 vtn_fail("Variables in the StorageBuffer storage class must "
1902 "have a struct type with the Block decoration");
1903 } else {
1904 /* If variable pointers are not present, it's still malformed
1905 * SPIR-V but we can parse it and do the right thing anyway.
1906 * Since some of the 8-bit storage tests have bugs in this are,
1907 * just make it a warning for now.
1908 */
1909 vtn_warn("Variables in the StorageBuffer storage class must "
1910 "have a struct type with the Block decoration");
1911 }
1912 }
1913 b->shader->info.num_ssbos++;
1914 break;
1915 case vtn_variable_mode_uniform:
1916 if (glsl_type_is_image(without_array->type))
1917 b->shader->info.num_images++;
1918 else if (glsl_type_is_sampler(without_array->type))
1919 b->shader->info.num_textures++;
1920 break;
1921 case vtn_variable_mode_push_constant:
1922 b->shader->num_uniforms = vtn_type_block_size(b, type);
1923 break;
1924 default:
1925 /* No tallying is needed */
1926 break;
1927 }
1928
1929 struct vtn_variable *var = rzalloc(b, struct vtn_variable);
1930 var->type = type;
1931 var->mode = mode;
1932
1933 vtn_assert(val->value_type == vtn_value_type_pointer);
1934 val->pointer = vtn_pointer_for_variable(b, var, ptr_type);
1935
1936 switch (var->mode) {
1937 case vtn_variable_mode_function:
1938 case vtn_variable_mode_private:
1939 case vtn_variable_mode_uniform:
1940 /* For these, we create the variable normally */
1941 var->var = rzalloc(b->shader, nir_variable);
1942 var->var->name = ralloc_strdup(var->var, val->name);
1943
1944 if (storage_class == SpvStorageClassAtomicCounter) {
1945 /* Need to tweak the nir type here as at vtn_handle_type we don't
1946 * have the access to storage_class, that is the one that points us
1947 * that is an atomic uint.
1948 */
1949 var->var->type = repair_atomic_type(var->type->type);
1950 } else {
1951 /* Private variables don't have any explicit layout but some layouts
1952 * may have leaked through due to type deduplication in the SPIR-V.
1953 */
1954 var->var->type = glsl_get_bare_type(var->type->type);
1955 }
1956 var->var->data.mode = nir_mode;
1957 var->var->data.location = -1;
1958 var->var->interface_type = NULL;
1959 break;
1960
1961 case vtn_variable_mode_workgroup:
1962 if (b->options->lower_workgroup_access_to_offsets) {
1963 var->shared_location = -1;
1964 } else {
1965 /* Create the variable normally */
1966 var->var = rzalloc(b->shader, nir_variable);
1967 var->var->name = ralloc_strdup(var->var, val->name);
1968 /* Workgroup variables don't have any explicit layout but some
1969 * layouts may have leaked through due to type deduplication in the
1970 * SPIR-V.
1971 */
1972 var->var->type = glsl_get_bare_type(var->type->type);
1973 var->var->data.mode = nir_var_shared;
1974 }
1975 break;
1976
1977 case vtn_variable_mode_input:
1978 case vtn_variable_mode_output: {
1979 /* In order to know whether or not we're a per-vertex inout, we need
1980 * the patch qualifier. This means walking the variable decorations
1981 * early before we actually create any variables. Not a big deal.
1982 *
1983 * GLSLang really likes to place decorations in the most interior
1984 * thing it possibly can. In particular, if you have a struct, it
1985 * will place the patch decorations on the struct members. This
1986 * should be handled by the variable splitting below just fine.
1987 *
1988 * If you have an array-of-struct, things get even more weird as it
1989 * will place the patch decorations on the struct even though it's
1990 * inside an array and some of the members being patch and others not
1991 * makes no sense whatsoever. Since the only sensible thing is for
1992 * it to be all or nothing, we'll call it patch if any of the members
1993 * are declared patch.
1994 */
1995 var->patch = false;
1996 vtn_foreach_decoration(b, val, var_is_patch_cb, &var->patch);
1997 if (glsl_type_is_array(var->type->type) &&
1998 glsl_type_is_struct(without_array->type)) {
1999 vtn_foreach_decoration(b, vtn_value(b, without_array->id,
2000 vtn_value_type_type),
2001 var_is_patch_cb, &var->patch);
2002 }
2003
2004 /* For inputs and outputs, we immediately split structures. This
2005 * is for a couple of reasons. For one, builtins may all come in
2006 * a struct and we really want those split out into separate
2007 * variables. For another, interpolation qualifiers can be
2008 * applied to members of the top-level struct ane we need to be
2009 * able to preserve that information.
2010 */
2011
2012 struct vtn_type *interface_type = var->type;
2013 if (is_per_vertex_inout(var, b->shader->info.stage)) {
2014 /* In Geometry shaders (and some tessellation), inputs come
2015 * in per-vertex arrays. However, some builtins come in
2016 * non-per-vertex, hence the need for the is_array check. In
2017 * any case, there are no non-builtin arrays allowed so this
2018 * check should be sufficient.
2019 */
2020 interface_type = var->type->array_element;
2021 }
2022
2023 var->var = rzalloc(b->shader, nir_variable);
2024 var->var->name = ralloc_strdup(var->var, val->name);
2025 /* In Vulkan, shader I/O variables don't have any explicit layout but
2026 * some layouts may have leaked through due to type deduplication in
2027 * the SPIR-V.
2028 */
2029 var->var->type = glsl_get_bare_type(var->type->type);
2030 var->var->interface_type = interface_type->type;
2031 var->var->data.mode = nir_mode;
2032 var->var->data.patch = var->patch;
2033
2034 if (glsl_type_is_struct(interface_type->type)) {
2035 /* It's a struct. Set it up as per-member. */
2036 var->var->num_members = glsl_get_length(interface_type->type);
2037 var->var->members = rzalloc_array(var->var, struct nir_variable_data,
2038 var->var->num_members);
2039
2040 for (unsigned i = 0; i < var->var->num_members; i++) {
2041 var->var->members[i].mode = nir_mode;
2042 var->var->members[i].patch = var->patch;
2043 }
2044 }
2045
2046 /* For inputs and outputs, we need to grab locations and builtin
2047 * information from the interface type.
2048 */
2049 vtn_foreach_decoration(b, vtn_value(b, interface_type->id,
2050 vtn_value_type_type),
2051 var_decoration_cb, var);
2052 break;
2053 }
2054
2055 case vtn_variable_mode_ubo:
2056 case vtn_variable_mode_ssbo:
2057 case vtn_variable_mode_push_constant:
2058 /* These don't need actual variables. */
2059 break;
2060 }
2061
2062 if (initializer) {
2063 var->var->constant_initializer =
2064 nir_constant_clone(initializer, var->var);
2065 }
2066
2067 vtn_foreach_decoration(b, val, var_decoration_cb, var);
2068
2069 if (var->mode == vtn_variable_mode_uniform) {
2070 /* XXX: We still need the binding information in the nir_variable
2071 * for these. We should fix that.
2072 */
2073 var->var->data.binding = var->binding;
2074 var->var->data.explicit_binding = var->explicit_binding;
2075 var->var->data.descriptor_set = var->descriptor_set;
2076 var->var->data.index = var->input_attachment_index;
2077 var->var->data.offset = var->offset;
2078
2079 if (glsl_type_is_image(without_array->type))
2080 var->var->data.image.format = without_array->image_format;
2081 }
2082
2083 if (var->mode == vtn_variable_mode_function) {
2084 vtn_assert(var->var != NULL && var->var->members == NULL);
2085 nir_function_impl_add_variable(b->nb.impl, var->var);
2086 } else if (var->var) {
2087 nir_shader_add_variable(b->shader, var->var);
2088 } else {
2089 vtn_assert(vtn_pointer_is_external_block(b, val->pointer));
2090 }
2091 }
2092
2093 static void
2094 vtn_assert_types_equal(struct vtn_builder *b, SpvOp opcode,
2095 struct vtn_type *dst_type,
2096 struct vtn_type *src_type)
2097 {
2098 if (dst_type->id == src_type->id)
2099 return;
2100
2101 if (vtn_types_compatible(b, dst_type, src_type)) {
2102 /* Early versions of GLSLang would re-emit types unnecessarily and you
2103 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2104 * mismatched source and destination types.
2105 *
2106 * https://github.com/KhronosGroup/glslang/issues/304
2107 * https://github.com/KhronosGroup/glslang/issues/307
2108 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2109 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2110 */
2111 vtn_warn("Source and destination types of %s do not have the same "
2112 "ID (but are compatible): %u vs %u",
2113 spirv_op_to_string(opcode), dst_type->id, src_type->id);
2114 return;
2115 }
2116
2117 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2118 spirv_op_to_string(opcode),
2119 glsl_get_type_name(dst_type->type),
2120 glsl_get_type_name(src_type->type));
2121 }
2122
2123 void
2124 vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
2125 const uint32_t *w, unsigned count)
2126 {
2127 switch (opcode) {
2128 case SpvOpUndef: {
2129 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
2130 val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
2131 break;
2132 }
2133
2134 case SpvOpVariable: {
2135 struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
2136
2137 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
2138
2139 SpvStorageClass storage_class = w[3];
2140 nir_constant *initializer = NULL;
2141 if (count > 4)
2142 initializer = vtn_value(b, w[4], vtn_value_type_constant)->constant;
2143
2144 vtn_create_variable(b, val, ptr_type, storage_class, initializer);
2145 break;
2146 }
2147
2148 case SpvOpAccessChain:
2149 case SpvOpPtrAccessChain:
2150 case SpvOpInBoundsAccessChain: {
2151 struct vtn_access_chain *chain = vtn_access_chain_create(b, count - 4);
2152 chain->ptr_as_array = (opcode == SpvOpPtrAccessChain);
2153
2154 unsigned idx = 0;
2155 for (int i = 4; i < count; i++) {
2156 struct vtn_value *link_val = vtn_untyped_value(b, w[i]);
2157 if (link_val->value_type == vtn_value_type_constant) {
2158 chain->link[idx].mode = vtn_access_mode_literal;
2159 switch (glsl_get_bit_size(link_val->type->type)) {
2160 case 8:
2161 chain->link[idx].id = link_val->constant->values[0].i8[0];
2162 break;
2163 case 16:
2164 chain->link[idx].id = link_val->constant->values[0].i16[0];
2165 break;
2166 case 32:
2167 chain->link[idx].id = link_val->constant->values[0].i32[0];
2168 break;
2169 case 64:
2170 chain->link[idx].id = link_val->constant->values[0].i64[0];
2171 break;
2172 default:
2173 vtn_fail("Invalid bit size");
2174 }
2175 } else {
2176 chain->link[idx].mode = vtn_access_mode_id;
2177 chain->link[idx].id = w[i];
2178
2179 }
2180 idx++;
2181 }
2182
2183 struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
2184 struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
2185 if (base_val->value_type == vtn_value_type_sampled_image) {
2186 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2187 * to combine an array of images with a single sampler to get an
2188 * array of sampled images that all share the same sampler.
2189 * Fortunately, this means that we can more-or-less ignore the
2190 * sampler when crawling the access chain, but it does leave us
2191 * with this rather awkward little special-case.
2192 */
2193 struct vtn_value *val =
2194 vtn_push_value(b, w[2], vtn_value_type_sampled_image);
2195 val->sampled_image = ralloc(b, struct vtn_sampled_image);
2196 val->sampled_image->type = base_val->sampled_image->type;
2197 val->sampled_image->image =
2198 vtn_pointer_dereference(b, base_val->sampled_image->image, chain);
2199 val->sampled_image->sampler = base_val->sampled_image->sampler;
2200 } else {
2201 vtn_assert(base_val->value_type == vtn_value_type_pointer);
2202 struct vtn_value *val =
2203 vtn_push_value(b, w[2], vtn_value_type_pointer);
2204 val->pointer = vtn_pointer_dereference(b, base_val->pointer, chain);
2205 val->pointer->ptr_type = ptr_type;
2206 }
2207 break;
2208 }
2209
2210 case SpvOpCopyMemory: {
2211 struct vtn_value *dest = vtn_value(b, w[1], vtn_value_type_pointer);
2212 struct vtn_value *src = vtn_value(b, w[2], vtn_value_type_pointer);
2213
2214 vtn_assert_types_equal(b, opcode, dest->type->deref, src->type->deref);
2215
2216 vtn_variable_copy(b, dest->pointer, src->pointer);
2217 break;
2218 }
2219
2220 case SpvOpLoad: {
2221 struct vtn_type *res_type =
2222 vtn_value(b, w[1], vtn_value_type_type)->type;
2223 struct vtn_value *src_val = vtn_value(b, w[3], vtn_value_type_pointer);
2224 struct vtn_pointer *src = src_val->pointer;
2225
2226 vtn_assert_types_equal(b, opcode, res_type, src_val->type->deref);
2227
2228 if (glsl_type_is_image(res_type->type) ||
2229 glsl_type_is_sampler(res_type->type)) {
2230 vtn_push_value(b, w[2], vtn_value_type_pointer)->pointer = src;
2231 return;
2232 }
2233
2234 vtn_push_ssa(b, w[2], res_type, vtn_variable_load(b, src));
2235 break;
2236 }
2237
2238 case SpvOpStore: {
2239 struct vtn_value *dest_val = vtn_value(b, w[1], vtn_value_type_pointer);
2240 struct vtn_pointer *dest = dest_val->pointer;
2241 struct vtn_value *src_val = vtn_untyped_value(b, w[2]);
2242
2243 /* OpStore requires us to actually have a storage type */
2244 vtn_fail_if(dest->type->type == NULL,
2245 "Invalid destination type for OpStore");
2246
2247 if (glsl_get_base_type(dest->type->type) == GLSL_TYPE_BOOL &&
2248 glsl_get_base_type(src_val->type->type) == GLSL_TYPE_UINT) {
2249 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2250 * would then store them to a local variable as bool. Work around
2251 * the issue by doing an implicit conversion.
2252 *
2253 * https://github.com/KhronosGroup/glslang/issues/170
2254 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2255 */
2256 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2257 "OpTypeBool. Doing an implicit conversion to work around "
2258 "the problem.");
2259 struct vtn_ssa_value *bool_ssa =
2260 vtn_create_ssa_value(b, dest->type->type);
2261 bool_ssa->def = nir_i2b(&b->nb, vtn_ssa_value(b, w[2])->def);
2262 vtn_variable_store(b, bool_ssa, dest);
2263 break;
2264 }
2265
2266 vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type);
2267
2268 if (glsl_type_is_sampler(dest->type->type)) {
2269 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2270 "propagation to workaround the problem.");
2271 vtn_assert(dest->var->copy_prop_sampler == NULL);
2272 dest->var->copy_prop_sampler =
2273 vtn_value(b, w[2], vtn_value_type_pointer)->pointer;
2274 break;
2275 }
2276
2277 struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
2278 vtn_variable_store(b, src, dest);
2279 break;
2280 }
2281
2282 case SpvOpArrayLength: {
2283 struct vtn_pointer *ptr =
2284 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2285
2286 const uint32_t offset = ptr->var->type->offsets[w[4]];
2287 const uint32_t stride = ptr->var->type->members[w[4]]->stride;
2288
2289 if (!ptr->block_index) {
2290 struct vtn_access_chain chain = {
2291 .length = 0,
2292 };
2293 ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
2294 vtn_assert(ptr->block_index);
2295 }
2296
2297 nir_intrinsic_instr *instr =
2298 nir_intrinsic_instr_create(b->nb.shader,
2299 nir_intrinsic_get_buffer_size);
2300 instr->src[0] = nir_src_for_ssa(ptr->block_index);
2301 nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
2302 nir_builder_instr_insert(&b->nb, &instr->instr);
2303 nir_ssa_def *buf_size = &instr->dest.ssa;
2304
2305 /* array_length = max(buffer_size - offset, 0) / stride */
2306 nir_ssa_def *array_length =
2307 nir_idiv(&b->nb,
2308 nir_imax(&b->nb,
2309 nir_isub(&b->nb,
2310 buf_size,
2311 nir_imm_int(&b->nb, offset)),
2312 nir_imm_int(&b->nb, 0u)),
2313 nir_imm_int(&b->nb, stride));
2314
2315 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2316 val->ssa = vtn_create_ssa_value(b, glsl_uint_type());
2317 val->ssa->def = array_length;
2318 break;
2319 }
2320
2321 case SpvOpCopyMemorySized:
2322 default:
2323 vtn_fail("Unhandled opcode");
2324 }
2325 }