2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
33 static struct vtn_access_chain
*
34 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
36 struct vtn_access_chain
*chain
;
38 /* Subtract 1 from the length since there's already one built in */
39 size_t size
= sizeof(*chain
) +
40 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
41 chain
= rzalloc_size(b
, size
);
42 chain
->length
= length
;
48 vtn_pointer_uses_ssa_offset(struct vtn_builder
*b
,
49 struct vtn_pointer
*ptr
)
51 return ((ptr
->mode
== vtn_variable_mode_ubo
||
52 ptr
->mode
== vtn_variable_mode_ssbo
) &&
53 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
54 ptr
->mode
== vtn_variable_mode_push_constant
||
55 (ptr
->mode
== vtn_variable_mode_workgroup
&&
56 b
->options
->lower_workgroup_access_to_offsets
);
60 vtn_pointer_is_external_block(struct vtn_builder
*b
,
61 struct vtn_pointer
*ptr
)
63 return ptr
->mode
== vtn_variable_mode_ssbo
||
64 ptr
->mode
== vtn_variable_mode_ubo
||
65 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
66 ptr
->mode
== vtn_variable_mode_push_constant
||
67 (ptr
->mode
== vtn_variable_mode_workgroup
&&
68 b
->options
->lower_workgroup_access_to_offsets
);
72 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
73 unsigned stride
, unsigned bit_size
)
75 vtn_assert(stride
> 0);
76 if (link
.mode
== vtn_access_mode_literal
) {
77 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
79 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
80 if (ssa
->bit_size
!= bit_size
)
81 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
83 ssa
= nir_imul_imm(&b
->nb
, ssa
, stride
);
88 static VkDescriptorType
89 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
92 case vtn_variable_mode_ubo
:
93 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
94 case vtn_variable_mode_ssbo
:
95 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
97 vtn_fail("Invalid mode for vulkan_resource_index");
101 static const struct glsl_type
*
102 vtn_ptr_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
105 case vtn_variable_mode_ubo
:
106 return b
->options
->ubo_ptr_type
;
107 case vtn_variable_mode_ssbo
:
108 return b
->options
->ssbo_ptr_type
;
110 vtn_fail("Invalid mode for vulkan_resource_index");
115 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
116 nir_ssa_def
*desc_array_index
)
118 if (!desc_array_index
) {
119 vtn_assert(glsl_type_is_struct(var
->type
->type
));
120 desc_array_index
= nir_imm_int(&b
->nb
, 0);
123 nir_intrinsic_instr
*instr
=
124 nir_intrinsic_instr_create(b
->nb
.shader
,
125 nir_intrinsic_vulkan_resource_index
);
126 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
127 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
128 nir_intrinsic_set_binding(instr
, var
->binding
);
129 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
131 const struct glsl_type
*index_type
=
132 b
->options
->lower_ubo_ssbo_access_to_offsets
?
133 glsl_uint_type() : vtn_ptr_type_for_mode(b
, var
->mode
);
135 instr
->num_components
= glsl_get_vector_elements(index_type
);
136 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
137 glsl_get_bit_size(index_type
), NULL
);
138 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
140 return &instr
->dest
.ssa
;
144 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
145 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
147 nir_intrinsic_instr
*instr
=
148 nir_intrinsic_instr_create(b
->nb
.shader
,
149 nir_intrinsic_vulkan_resource_reindex
);
150 instr
->src
[0] = nir_src_for_ssa(base_index
);
151 instr
->src
[1] = nir_src_for_ssa(offset_index
);
152 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
154 const struct glsl_type
*index_type
=
155 b
->options
->lower_ubo_ssbo_access_to_offsets
?
156 glsl_uint_type() : vtn_ptr_type_for_mode(b
, mode
);
158 instr
->num_components
= glsl_get_vector_elements(index_type
);
159 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
160 glsl_get_bit_size(index_type
), NULL
);
161 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
163 return &instr
->dest
.ssa
;
167 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
168 nir_ssa_def
*desc_index
)
170 nir_intrinsic_instr
*desc_load
=
171 nir_intrinsic_instr_create(b
->nb
.shader
,
172 nir_intrinsic_load_vulkan_descriptor
);
173 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
174 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
176 const struct glsl_type
*ptr_type
= vtn_ptr_type_for_mode(b
, mode
);
178 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
179 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
180 desc_load
->num_components
,
181 glsl_get_bit_size(ptr_type
), NULL
);
182 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
184 return &desc_load
->dest
.ssa
;
187 /* Dereference the given base pointer by the access chain */
188 static struct vtn_pointer
*
189 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
190 struct vtn_pointer
*base
,
191 struct vtn_access_chain
*deref_chain
)
193 struct vtn_type
*type
= base
->type
;
194 enum gl_access_qualifier access
= base
->access
;
197 nir_deref_instr
*tail
;
200 } else if (vtn_pointer_is_external_block(b
, base
)) {
201 nir_ssa_def
*block_index
= base
->block_index
;
203 /* We dereferencing an external block pointer. Correctness of this
204 * operation relies on one particular line in the SPIR-V spec, section
205 * entitled "Validation Rules for Shader Capabilities":
207 * "Block and BufferBlock decorations cannot decorate a structure
208 * type that is nested at any level inside another structure type
209 * decorated with Block or BufferBlock."
211 * This means that we can detect the point where we cross over from
212 * descriptor indexing to buffer indexing by looking for the block
213 * decorated struct type. Anything before the block decorated struct
214 * type is a descriptor indexing operation and anything after the block
215 * decorated struct is a buffer offset operation.
218 /* Figure out the descriptor array index if any
220 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
221 * to forget the Block or BufferBlock decoration from time to time.
222 * It's more robust if we check for both !block_index and for the type
223 * to contain a block. This way there's a decent chance that arrays of
224 * UBOs/SSBOs will work correctly even if variable pointers are
227 nir_ssa_def
*desc_arr_idx
= NULL
;
228 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
229 /* If our type contains a block, then we're still outside the block
230 * and we need to process enough levels of dereferences to get inside
233 if (deref_chain
->ptr_as_array
) {
234 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
235 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
236 MAX2(aoa_size
, 1), 32);
240 for (; idx
< deref_chain
->length
; idx
++) {
241 if (type
->base_type
!= vtn_base_type_array
) {
242 vtn_assert(type
->base_type
== vtn_base_type_struct
);
246 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
247 nir_ssa_def
*arr_offset
=
248 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
249 MAX2(aoa_size
, 1), 32);
251 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
253 desc_arr_idx
= arr_offset
;
255 type
= type
->array_element
;
256 access
|= type
->access
;
261 vtn_assert(base
->var
&& base
->type
);
262 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
263 } else if (desc_arr_idx
) {
264 block_index
= vtn_resource_reindex(b
, base
->mode
,
265 block_index
, desc_arr_idx
);
268 if (idx
== deref_chain
->length
) {
269 /* The entire deref was consumed in finding the block index. Return
270 * a pointer which just has a block index and a later access chain
271 * will dereference deeper.
273 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
274 ptr
->mode
= base
->mode
;
276 ptr
->block_index
= block_index
;
277 ptr
->access
= access
;
281 /* If we got here, there's more access chain to handle and we have the
282 * final block index. Insert a descriptor load and cast to a deref to
283 * start the deref chain.
285 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
287 assert(base
->mode
== vtn_variable_mode_ssbo
||
288 base
->mode
== vtn_variable_mode_ubo
);
289 nir_variable_mode nir_mode
=
290 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
292 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
, type
->type
,
293 base
->ptr_type
->stride
);
295 assert(base
->var
&& base
->var
->var
);
296 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
297 if (base
->ptr_type
&& base
->ptr_type
->type
) {
298 tail
->dest
.ssa
.num_components
=
299 glsl_get_vector_elements(base
->ptr_type
->type
);
300 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
304 if (idx
== 0 && deref_chain
->ptr_as_array
) {
305 /* We start with a deref cast to get the stride. Hopefully, we'll be
306 * able to delete that cast eventually.
308 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
309 tail
->type
, base
->ptr_type
->stride
);
311 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
312 tail
->dest
.ssa
.bit_size
);
313 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
317 for (; idx
< deref_chain
->length
; idx
++) {
318 if (glsl_type_is_struct(type
->type
)) {
319 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
320 unsigned field
= deref_chain
->link
[idx
].id
;
321 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
322 type
= type
->members
[field
];
324 nir_ssa_def
*arr_index
=
325 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
326 tail
->dest
.ssa
.bit_size
);
327 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
328 type
= type
->array_element
;
331 access
|= type
->access
;
334 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
335 ptr
->mode
= base
->mode
;
337 ptr
->var
= base
->var
;
339 ptr
->access
= access
;
344 static struct vtn_pointer
*
345 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
346 struct vtn_pointer
*base
,
347 struct vtn_access_chain
*deref_chain
)
349 nir_ssa_def
*block_index
= base
->block_index
;
350 nir_ssa_def
*offset
= base
->offset
;
351 struct vtn_type
*type
= base
->type
;
352 enum gl_access_qualifier access
= base
->access
;
355 if (base
->mode
== vtn_variable_mode_ubo
||
356 base
->mode
== vtn_variable_mode_ssbo
) {
358 vtn_assert(base
->var
&& base
->type
);
359 nir_ssa_def
*desc_arr_idx
;
360 if (glsl_type_is_array(type
->type
)) {
361 if (deref_chain
->length
>= 1) {
363 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
365 /* This consumes a level of type */
366 type
= type
->array_element
;
367 access
|= type
->access
;
369 /* This is annoying. We've been asked for a pointer to the
370 * array of UBOs/SSBOs and not a specifc buffer. Return a
371 * pointer with a descriptor index of 0 and we'll have to do
372 * a reindex later to adjust it to the right thing.
374 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
376 } else if (deref_chain
->ptr_as_array
) {
377 /* You can't have a zero-length OpPtrAccessChain */
378 vtn_assert(deref_chain
->length
>= 1);
379 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
381 /* We have a regular non-array SSBO. */
384 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
385 } else if (deref_chain
->ptr_as_array
&&
386 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
387 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
388 * decorated block. This is an interesting corner in the SPIR-V
389 * spec. One interpretation would be that they client is clearly
390 * trying to treat that block as if it's an implicit array of blocks
391 * repeated in the buffer. However, the SPIR-V spec for the
392 * OpPtrAccessChain says:
394 * "Base is treated as the address of the first element of an
395 * array, and the Element element’s address is computed to be the
396 * base for the Indexes, as per OpAccessChain."
398 * Taken literally, that would mean that your struct type is supposed
399 * to be treated as an array of such a struct and, since it's
400 * decorated block, that means an array of blocks which corresponds
401 * to an array descriptor. Therefore, we need to do a reindex
402 * operation to add the index from the first link in the access chain
403 * to the index we recieved.
405 * The downside to this interpretation (there always is one) is that
406 * this might be somewhat surprising behavior to apps if they expect
407 * the implicit array behavior described above.
409 vtn_assert(deref_chain
->length
>= 1);
410 nir_ssa_def
*offset_index
=
411 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
414 block_index
= vtn_resource_reindex(b
, base
->mode
,
415 block_index
, offset_index
);
420 if (base
->mode
== vtn_variable_mode_workgroup
) {
421 /* SLM doesn't need nor have a block index */
422 vtn_assert(!block_index
);
424 /* We need the variable for the base offset */
425 vtn_assert(base
->var
);
427 /* We need ptr_type for size and alignment */
428 vtn_assert(base
->ptr_type
);
430 /* Assign location on first use so that we don't end up bloating SLM
431 * address space for variables which are never statically used.
433 if (base
->var
->shared_location
< 0) {
434 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
435 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
436 base
->ptr_type
->align
);
437 base
->var
->shared_location
= b
->shader
->num_shared
;
438 b
->shader
->num_shared
+= base
->ptr_type
->length
;
441 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
442 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
443 /* Push constants neither need nor have a block index */
444 vtn_assert(!block_index
);
446 /* Start off with at the start of the push constant block. */
447 offset
= nir_imm_int(&b
->nb
, 0);
449 /* The code above should have ensured a block_index when needed. */
450 vtn_assert(block_index
);
452 /* Start off with at the start of the buffer. */
453 offset
= nir_imm_int(&b
->nb
, 0);
457 if (deref_chain
->ptr_as_array
&& idx
== 0) {
458 /* We need ptr_type for the stride */
459 vtn_assert(base
->ptr_type
);
461 /* We need at least one element in the chain */
462 vtn_assert(deref_chain
->length
>= 1);
464 nir_ssa_def
*elem_offset
=
465 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
466 base
->ptr_type
->stride
, offset
->bit_size
);
467 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
471 for (; idx
< deref_chain
->length
; idx
++) {
472 switch (glsl_get_base_type(type
->type
)) {
475 case GLSL_TYPE_UINT16
:
476 case GLSL_TYPE_INT16
:
477 case GLSL_TYPE_UINT8
:
479 case GLSL_TYPE_UINT64
:
480 case GLSL_TYPE_INT64
:
481 case GLSL_TYPE_FLOAT
:
482 case GLSL_TYPE_FLOAT16
:
483 case GLSL_TYPE_DOUBLE
:
485 case GLSL_TYPE_ARRAY
: {
486 nir_ssa_def
*elem_offset
=
487 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
488 type
->stride
, offset
->bit_size
);
489 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
490 type
= type
->array_element
;
491 access
|= type
->access
;
495 case GLSL_TYPE_STRUCT
: {
496 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
497 unsigned member
= deref_chain
->link
[idx
].id
;
498 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
499 type
= type
->members
[member
];
500 access
|= type
->access
;
505 vtn_fail("Invalid type for deref");
509 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
510 ptr
->mode
= base
->mode
;
512 ptr
->block_index
= block_index
;
513 ptr
->offset
= offset
;
514 ptr
->access
= access
;
519 /* Dereference the given base pointer by the access chain */
520 static struct vtn_pointer
*
521 vtn_pointer_dereference(struct vtn_builder
*b
,
522 struct vtn_pointer
*base
,
523 struct vtn_access_chain
*deref_chain
)
525 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
526 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
528 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
533 vtn_pointer_for_variable(struct vtn_builder
*b
,
534 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
536 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
538 pointer
->mode
= var
->mode
;
539 pointer
->type
= var
->type
;
540 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
541 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
542 pointer
->ptr_type
= ptr_type
;
544 pointer
->access
= var
->access
| var
->type
->access
;
549 /* Returns an atomic_uint type based on the original uint type. The returned
550 * type will be equivalent to the original one but will have an atomic_uint
551 * type as leaf instead of an uint.
553 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
555 static const struct glsl_type
*
556 repair_atomic_type(const struct glsl_type
*type
)
558 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
559 assert(glsl_type_is_scalar(glsl_without_array(type
)));
561 if (glsl_type_is_array(type
)) {
562 const struct glsl_type
*atomic
=
563 repair_atomic_type(glsl_get_array_element(type
));
565 return glsl_array_type(atomic
, glsl_get_length(type
),
566 glsl_get_explicit_stride(type
));
568 return glsl_atomic_uint_type();
573 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
575 if (b
->wa_glslang_179
) {
576 /* Do on-the-fly copy propagation for samplers. */
577 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
578 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
581 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
583 struct vtn_access_chain chain
= {
586 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
593 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
594 struct vtn_ssa_value
*inout
)
596 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
598 inout
->def
= nir_load_deref(&b
->nb
, deref
);
600 nir_store_deref(&b
->nb
, deref
, inout
->def
, ~0);
602 } else if (glsl_type_is_array(deref
->type
) ||
603 glsl_type_is_matrix(deref
->type
)) {
604 unsigned elems
= glsl_get_length(deref
->type
);
605 for (unsigned i
= 0; i
< elems
; i
++) {
606 nir_deref_instr
*child
=
607 nir_build_deref_array(&b
->nb
, deref
, nir_imm_int(&b
->nb
, i
));
608 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
611 vtn_assert(glsl_type_is_struct(deref
->type
));
612 unsigned elems
= glsl_get_length(deref
->type
);
613 for (unsigned i
= 0; i
< elems
; i
++) {
614 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
615 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
621 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
623 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
624 return vtn_pointer_to_deref(b
, ptr
);
628 * Gets the NIR-level deref tail, which may have as a child an array deref
629 * selecting which component due to OpAccessChain supporting per-component
630 * indexing in SPIR-V.
632 static nir_deref_instr
*
633 get_deref_tail(nir_deref_instr
*deref
)
635 if (deref
->deref_type
!= nir_deref_type_array
)
638 nir_deref_instr
*parent
=
639 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
641 if (glsl_type_is_vector(parent
->type
))
647 struct vtn_ssa_value
*
648 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
)
650 nir_deref_instr
*src_tail
= get_deref_tail(src
);
651 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
652 _vtn_local_load_store(b
, true, src_tail
, val
);
654 if (src_tail
!= src
) {
655 val
->type
= src
->type
;
656 if (nir_src_is_const(src
->arr
.index
))
657 val
->def
= vtn_vector_extract(b
, val
->def
,
658 nir_src_as_uint(src
->arr
.index
));
660 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
, src
->arr
.index
.ssa
);
667 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
668 nir_deref_instr
*dest
)
670 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
672 if (dest_tail
!= dest
) {
673 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
674 _vtn_local_load_store(b
, true, dest_tail
, val
);
676 if (nir_src_is_const(dest
->arr
.index
))
677 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
678 nir_src_as_uint(dest
->arr
.index
));
680 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
681 dest
->arr
.index
.ssa
);
682 _vtn_local_load_store(b
, false, dest_tail
, val
);
684 _vtn_local_load_store(b
, false, dest_tail
, src
);
689 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
690 nir_ssa_def
**index_out
)
692 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
694 struct vtn_access_chain chain
= {
697 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
699 *index_out
= ptr
->block_index
;
703 /* Tries to compute the size of an interface block based on the strides and
704 * offsets that are provided to us in the SPIR-V source.
707 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
709 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
713 case GLSL_TYPE_UINT16
:
714 case GLSL_TYPE_INT16
:
715 case GLSL_TYPE_UINT8
:
717 case GLSL_TYPE_UINT64
:
718 case GLSL_TYPE_INT64
:
719 case GLSL_TYPE_FLOAT
:
720 case GLSL_TYPE_FLOAT16
:
722 case GLSL_TYPE_DOUBLE
: {
723 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
724 glsl_get_matrix_columns(type
->type
);
726 vtn_assert(type
->stride
> 0);
727 return type
->stride
* cols
;
729 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
730 return glsl_get_vector_elements(type
->type
) * type_size
;
734 case GLSL_TYPE_STRUCT
:
735 case GLSL_TYPE_INTERFACE
: {
737 unsigned num_fields
= glsl_get_length(type
->type
);
738 for (unsigned f
= 0; f
< num_fields
; f
++) {
739 unsigned field_end
= type
->offsets
[f
] +
740 vtn_type_block_size(b
, type
->members
[f
]);
741 size
= MAX2(size
, field_end
);
746 case GLSL_TYPE_ARRAY
:
747 vtn_assert(type
->stride
> 0);
748 vtn_assert(glsl_get_length(type
->type
) > 0);
749 return type
->stride
* glsl_get_length(type
->type
);
752 vtn_fail("Invalid block type");
758 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
759 nir_ssa_def
*index
, nir_ssa_def
*offset
,
760 unsigned access_offset
, unsigned access_size
,
761 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
762 enum gl_access_qualifier access
)
764 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
765 instr
->num_components
= glsl_get_vector_elements(type
);
767 /* Booleans usually shouldn't show up in external memory in SPIR-V.
768 * However, they do for certain older GLSLang versions and can for shared
769 * memory when we lower access chains internally.
771 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
772 glsl_get_bit_size(type
);
776 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
777 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
780 if (op
== nir_intrinsic_load_push_constant
) {
781 nir_intrinsic_set_base(instr
, access_offset
);
782 nir_intrinsic_set_range(instr
, access_size
);
785 if (op
== nir_intrinsic_load_ssbo
||
786 op
== nir_intrinsic_store_ssbo
) {
787 nir_intrinsic_set_access(instr
, access
);
790 /* With extensions like relaxed_block_layout, we really can't guarantee
791 * much more than scalar alignment.
793 if (op
!= nir_intrinsic_load_push_constant
)
794 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
797 instr
->src
[src
++] = nir_src_for_ssa(index
);
799 if (op
== nir_intrinsic_load_push_constant
) {
800 /* We need to subtract the offset from where the intrinsic will load the
803 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
804 nir_imm_int(&b
->nb
, access_offset
)));
806 instr
->src
[src
++] = nir_src_for_ssa(offset
);
810 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
811 instr
->num_components
, data_bit_size
, NULL
);
812 (*inout
)->def
= &instr
->dest
.ssa
;
815 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
817 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
818 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
822 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
823 nir_ssa_def
*index
, nir_ssa_def
*offset
,
824 unsigned access_offset
, unsigned access_size
,
825 struct vtn_type
*type
, enum gl_access_qualifier access
,
826 struct vtn_ssa_value
**inout
)
828 if (load
&& *inout
== NULL
)
829 *inout
= vtn_create_ssa_value(b
, type
->type
);
831 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
835 case GLSL_TYPE_UINT16
:
836 case GLSL_TYPE_INT16
:
837 case GLSL_TYPE_UINT8
:
839 case GLSL_TYPE_UINT64
:
840 case GLSL_TYPE_INT64
:
841 case GLSL_TYPE_FLOAT
:
842 case GLSL_TYPE_FLOAT16
:
843 case GLSL_TYPE_DOUBLE
:
845 /* This is where things get interesting. At this point, we've hit
846 * a vector, a scalar, or a matrix.
848 if (glsl_type_is_matrix(type
->type
)) {
849 /* Loading the whole matrix */
850 struct vtn_ssa_value
*transpose
;
851 unsigned num_ops
, vec_width
, col_stride
;
852 if (type
->row_major
) {
853 num_ops
= glsl_get_vector_elements(type
->type
);
854 vec_width
= glsl_get_matrix_columns(type
->type
);
855 col_stride
= type
->array_element
->stride
;
857 const struct glsl_type
*transpose_type
=
858 glsl_matrix_type(base_type
, vec_width
, num_ops
);
859 *inout
= vtn_create_ssa_value(b
, transpose_type
);
861 transpose
= vtn_ssa_transpose(b
, *inout
);
865 num_ops
= glsl_get_matrix_columns(type
->type
);
866 vec_width
= glsl_get_vector_elements(type
->type
);
867 col_stride
= type
->stride
;
870 for (unsigned i
= 0; i
< num_ops
; i
++) {
871 nir_ssa_def
*elem_offset
=
872 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
873 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
874 access_offset
, access_size
,
876 glsl_vector_type(base_type
, vec_width
),
877 type
->access
| access
);
880 if (load
&& type
->row_major
)
881 *inout
= vtn_ssa_transpose(b
, *inout
);
883 unsigned elems
= glsl_get_vector_elements(type
->type
);
884 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
885 if (elems
== 1 || type
->stride
== type_size
) {
886 /* This is a tightly-packed normal scalar or vector load */
887 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
888 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
889 access_offset
, access_size
,
891 type
->access
| access
);
893 /* This is a strided load. We have to load N things separately.
894 * This is the single column of a row-major matrix case.
896 vtn_assert(type
->stride
> type_size
);
897 vtn_assert(type
->stride
% type_size
== 0);
899 nir_ssa_def
*per_comp
[4];
900 for (unsigned i
= 0; i
< elems
; i
++) {
901 nir_ssa_def
*elem_offset
=
902 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
903 struct vtn_ssa_value
*comp
, temp_val
;
905 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
906 temp_val
.type
= glsl_scalar_type(base_type
);
909 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
910 access_offset
, access_size
,
911 &comp
, glsl_scalar_type(base_type
),
912 type
->access
| access
);
913 per_comp
[i
] = comp
->def
;
918 *inout
= vtn_create_ssa_value(b
, type
->type
);
919 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
925 case GLSL_TYPE_ARRAY
: {
926 unsigned elems
= glsl_get_length(type
->type
);
927 for (unsigned i
= 0; i
< elems
; i
++) {
928 nir_ssa_def
*elem_off
=
929 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
930 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
931 access_offset
, access_size
,
933 type
->array_element
->access
| access
,
934 &(*inout
)->elems
[i
]);
939 case GLSL_TYPE_STRUCT
: {
940 unsigned elems
= glsl_get_length(type
->type
);
941 for (unsigned i
= 0; i
< elems
; i
++) {
942 nir_ssa_def
*elem_off
=
943 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
944 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
945 access_offset
, access_size
,
947 type
->members
[i
]->access
| access
,
948 &(*inout
)->elems
[i
]);
954 vtn_fail("Invalid block member type");
958 static struct vtn_ssa_value
*
959 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
962 unsigned access_offset
= 0, access_size
= 0;
964 case vtn_variable_mode_ubo
:
965 op
= nir_intrinsic_load_ubo
;
967 case vtn_variable_mode_ssbo
:
968 op
= nir_intrinsic_load_ssbo
;
970 case vtn_variable_mode_push_constant
:
971 op
= nir_intrinsic_load_push_constant
;
972 access_size
= b
->shader
->num_uniforms
;
974 case vtn_variable_mode_workgroup
:
975 op
= nir_intrinsic_load_shared
;
978 vtn_fail("Invalid block variable mode");
981 nir_ssa_def
*offset
, *index
= NULL
;
982 offset
= vtn_pointer_to_offset(b
, src
, &index
);
984 struct vtn_ssa_value
*value
= NULL
;
985 _vtn_block_load_store(b
, op
, true, index
, offset
,
986 access_offset
, access_size
,
987 src
->type
, src
->access
, &value
);
992 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
993 struct vtn_pointer
*dst
)
997 case vtn_variable_mode_ssbo
:
998 op
= nir_intrinsic_store_ssbo
;
1000 case vtn_variable_mode_workgroup
:
1001 op
= nir_intrinsic_store_shared
;
1004 vtn_fail("Invalid block variable mode");
1007 nir_ssa_def
*offset
, *index
= NULL
;
1008 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1010 _vtn_block_load_store(b
, op
, false, index
, offset
,
1011 0, 0, dst
->type
, dst
->access
, &src
);
1015 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1016 struct vtn_pointer
*ptr
,
1017 struct vtn_ssa_value
**inout
)
1019 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1020 switch (base_type
) {
1021 case GLSL_TYPE_UINT
:
1023 case GLSL_TYPE_UINT16
:
1024 case GLSL_TYPE_INT16
:
1025 case GLSL_TYPE_UINT8
:
1026 case GLSL_TYPE_INT8
:
1027 case GLSL_TYPE_UINT64
:
1028 case GLSL_TYPE_INT64
:
1029 case GLSL_TYPE_FLOAT
:
1030 case GLSL_TYPE_FLOAT16
:
1031 case GLSL_TYPE_BOOL
:
1032 case GLSL_TYPE_DOUBLE
:
1033 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1034 /* We hit a vector or scalar; go ahead and emit the load[s] */
1035 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1036 if (vtn_pointer_is_external_block(b
, ptr
)) {
1037 /* If it's external, we call nir_load/store_deref directly. The
1038 * vtn_local_load/store helpers are too clever and do magic to
1039 * avoid array derefs of vectors. That magic is both less
1040 * efficient than the direct load/store and, in the case of
1041 * stores, is broken because it creates a race condition if two
1042 * threads are writing to different components of the same vector
1043 * due to the load+insert+store it uses to emulate the array
1047 *inout
= vtn_create_ssa_value(b
, ptr
->type
->type
);
1048 (*inout
)->def
= nir_load_deref(&b
->nb
, deref
);
1050 nir_store_deref(&b
->nb
, deref
, (*inout
)->def
, ~0);
1054 *inout
= vtn_local_load(b
, deref
);
1056 vtn_local_store(b
, *inout
, deref
);
1063 case GLSL_TYPE_ARRAY
:
1064 case GLSL_TYPE_STRUCT
: {
1065 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1067 vtn_assert(*inout
== NULL
);
1068 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1069 (*inout
)->type
= ptr
->type
->type
;
1070 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1073 struct vtn_access_chain chain
= {
1076 { .mode
= vtn_access_mode_literal
, },
1079 for (unsigned i
= 0; i
< elems
; i
++) {
1080 chain
.link
[0].id
= i
;
1081 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1082 _vtn_variable_load_store(b
, load
, elem
, &(*inout
)->elems
[i
]);
1088 vtn_fail("Invalid access chain type");
1092 struct vtn_ssa_value
*
1093 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1095 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1096 return vtn_block_load(b
, src
);
1098 struct vtn_ssa_value
*val
= NULL
;
1099 _vtn_variable_load_store(b
, true, src
, &val
);
1105 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1106 struct vtn_pointer
*dest
)
1108 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1109 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1110 dest
->mode
== vtn_variable_mode_workgroup
);
1111 vtn_block_store(b
, src
, dest
);
1113 _vtn_variable_load_store(b
, false, dest
, &src
);
1118 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1119 struct vtn_pointer
*src
)
1121 vtn_assert(src
->type
->type
== dest
->type
->type
);
1122 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1123 switch (base_type
) {
1124 case GLSL_TYPE_UINT
:
1126 case GLSL_TYPE_UINT16
:
1127 case GLSL_TYPE_INT16
:
1128 case GLSL_TYPE_UINT8
:
1129 case GLSL_TYPE_INT8
:
1130 case GLSL_TYPE_UINT64
:
1131 case GLSL_TYPE_INT64
:
1132 case GLSL_TYPE_FLOAT
:
1133 case GLSL_TYPE_FLOAT16
:
1134 case GLSL_TYPE_DOUBLE
:
1135 case GLSL_TYPE_BOOL
:
1136 /* At this point, we have a scalar, vector, or matrix so we know that
1137 * there cannot be any structure splitting still in the way. By
1138 * stopping at the matrix level rather than the vector level, we
1139 * ensure that matrices get loaded in the optimal way even if they
1140 * are storred row-major in a UBO.
1142 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1145 case GLSL_TYPE_ARRAY
:
1146 case GLSL_TYPE_STRUCT
: {
1147 struct vtn_access_chain chain
= {
1150 { .mode
= vtn_access_mode_literal
, },
1153 unsigned elems
= glsl_get_length(src
->type
->type
);
1154 for (unsigned i
= 0; i
< elems
; i
++) {
1155 chain
.link
[0].id
= i
;
1156 struct vtn_pointer
*src_elem
=
1157 vtn_pointer_dereference(b
, src
, &chain
);
1158 struct vtn_pointer
*dest_elem
=
1159 vtn_pointer_dereference(b
, dest
, &chain
);
1161 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1167 vtn_fail("Invalid access chain type");
1172 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1173 struct vtn_pointer
*src
)
1175 /* TODO: At some point, we should add a special-case for when we can
1176 * just emit a copy_var intrinsic.
1178 _vtn_variable_copy(b
, dest
, src
);
1182 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1184 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1185 *mode
= nir_var_system_value
;
1189 vtn_get_builtin_location(struct vtn_builder
*b
,
1190 SpvBuiltIn builtin
, int *location
,
1191 nir_variable_mode
*mode
)
1194 case SpvBuiltInPosition
:
1195 *location
= VARYING_SLOT_POS
;
1197 case SpvBuiltInPointSize
:
1198 *location
= VARYING_SLOT_PSIZ
;
1200 case SpvBuiltInClipDistance
:
1201 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1203 case SpvBuiltInCullDistance
:
1204 *location
= VARYING_SLOT_CULL_DIST0
;
1206 case SpvBuiltInVertexId
:
1207 case SpvBuiltInVertexIndex
:
1208 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1209 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1210 * same as gl_VertexID, which is non-zero-based, and removes
1211 * VertexIndex. Since they're both defined to be non-zero-based, we use
1212 * SYSTEM_VALUE_VERTEX_ID for both.
1214 *location
= SYSTEM_VALUE_VERTEX_ID
;
1215 set_mode_system_value(b
, mode
);
1217 case SpvBuiltInInstanceIndex
:
1218 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1219 set_mode_system_value(b
, mode
);
1221 case SpvBuiltInInstanceId
:
1222 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1223 set_mode_system_value(b
, mode
);
1225 case SpvBuiltInPrimitiveId
:
1226 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1227 vtn_assert(*mode
== nir_var_shader_in
);
1228 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1229 } else if (*mode
== nir_var_shader_out
) {
1230 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1232 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1233 set_mode_system_value(b
, mode
);
1236 case SpvBuiltInInvocationId
:
1237 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1238 set_mode_system_value(b
, mode
);
1240 case SpvBuiltInLayer
:
1241 *location
= VARYING_SLOT_LAYER
;
1242 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1243 *mode
= nir_var_shader_in
;
1244 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1245 *mode
= nir_var_shader_out
;
1246 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1247 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1248 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1249 *mode
= nir_var_shader_out
;
1251 vtn_fail("invalid stage for SpvBuiltInLayer");
1253 case SpvBuiltInViewportIndex
:
1254 *location
= VARYING_SLOT_VIEWPORT
;
1255 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1256 *mode
= nir_var_shader_out
;
1257 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1258 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1259 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1260 *mode
= nir_var_shader_out
;
1261 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1262 *mode
= nir_var_shader_in
;
1264 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1266 case SpvBuiltInTessLevelOuter
:
1267 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1269 case SpvBuiltInTessLevelInner
:
1270 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1272 case SpvBuiltInTessCoord
:
1273 *location
= SYSTEM_VALUE_TESS_COORD
;
1274 set_mode_system_value(b
, mode
);
1276 case SpvBuiltInPatchVertices
:
1277 *location
= SYSTEM_VALUE_VERTICES_IN
;
1278 set_mode_system_value(b
, mode
);
1280 case SpvBuiltInFragCoord
:
1281 *location
= VARYING_SLOT_POS
;
1282 vtn_assert(*mode
== nir_var_shader_in
);
1284 case SpvBuiltInPointCoord
:
1285 *location
= VARYING_SLOT_PNTC
;
1286 vtn_assert(*mode
== nir_var_shader_in
);
1288 case SpvBuiltInFrontFacing
:
1289 *location
= SYSTEM_VALUE_FRONT_FACE
;
1290 set_mode_system_value(b
, mode
);
1292 case SpvBuiltInSampleId
:
1293 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1294 set_mode_system_value(b
, mode
);
1296 case SpvBuiltInSamplePosition
:
1297 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1298 set_mode_system_value(b
, mode
);
1300 case SpvBuiltInSampleMask
:
1301 if (*mode
== nir_var_shader_out
) {
1302 *location
= FRAG_RESULT_SAMPLE_MASK
;
1304 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1305 set_mode_system_value(b
, mode
);
1308 case SpvBuiltInFragDepth
:
1309 *location
= FRAG_RESULT_DEPTH
;
1310 vtn_assert(*mode
== nir_var_shader_out
);
1312 case SpvBuiltInHelperInvocation
:
1313 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1314 set_mode_system_value(b
, mode
);
1316 case SpvBuiltInNumWorkgroups
:
1317 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1318 set_mode_system_value(b
, mode
);
1320 case SpvBuiltInWorkgroupSize
:
1321 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1322 set_mode_system_value(b
, mode
);
1324 case SpvBuiltInWorkgroupId
:
1325 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1326 set_mode_system_value(b
, mode
);
1328 case SpvBuiltInLocalInvocationId
:
1329 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1330 set_mode_system_value(b
, mode
);
1332 case SpvBuiltInLocalInvocationIndex
:
1333 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1334 set_mode_system_value(b
, mode
);
1336 case SpvBuiltInGlobalInvocationId
:
1337 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1338 set_mode_system_value(b
, mode
);
1340 case SpvBuiltInBaseVertex
:
1341 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1342 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1344 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1345 set_mode_system_value(b
, mode
);
1347 case SpvBuiltInBaseInstance
:
1348 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1349 set_mode_system_value(b
, mode
);
1351 case SpvBuiltInDrawIndex
:
1352 *location
= SYSTEM_VALUE_DRAW_ID
;
1353 set_mode_system_value(b
, mode
);
1355 case SpvBuiltInSubgroupSize
:
1356 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1357 set_mode_system_value(b
, mode
);
1359 case SpvBuiltInSubgroupId
:
1360 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1361 set_mode_system_value(b
, mode
);
1363 case SpvBuiltInSubgroupLocalInvocationId
:
1364 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1365 set_mode_system_value(b
, mode
);
1367 case SpvBuiltInNumSubgroups
:
1368 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1369 set_mode_system_value(b
, mode
);
1371 case SpvBuiltInDeviceIndex
:
1372 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1373 set_mode_system_value(b
, mode
);
1375 case SpvBuiltInViewIndex
:
1376 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1377 set_mode_system_value(b
, mode
);
1379 case SpvBuiltInSubgroupEqMask
:
1380 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1381 set_mode_system_value(b
, mode
);
1383 case SpvBuiltInSubgroupGeMask
:
1384 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1385 set_mode_system_value(b
, mode
);
1387 case SpvBuiltInSubgroupGtMask
:
1388 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1389 set_mode_system_value(b
, mode
);
1391 case SpvBuiltInSubgroupLeMask
:
1392 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1393 set_mode_system_value(b
, mode
);
1395 case SpvBuiltInSubgroupLtMask
:
1396 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1397 set_mode_system_value(b
, mode
);
1399 case SpvBuiltInFragStencilRefEXT
:
1400 *location
= FRAG_RESULT_STENCIL
;
1401 vtn_assert(*mode
== nir_var_shader_out
);
1403 case SpvBuiltInWorkDim
:
1404 *location
= SYSTEM_VALUE_WORK_DIM
;
1405 set_mode_system_value(b
, mode
);
1407 case SpvBuiltInGlobalSize
:
1408 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1409 set_mode_system_value(b
, mode
);
1412 vtn_fail("unsupported builtin: %u", builtin
);
1417 apply_var_decoration(struct vtn_builder
*b
,
1418 struct nir_variable_data
*var_data
,
1419 const struct vtn_decoration
*dec
)
1421 switch (dec
->decoration
) {
1422 case SpvDecorationRelaxedPrecision
:
1423 break; /* FIXME: Do nothing with this for now. */
1424 case SpvDecorationNoPerspective
:
1425 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1427 case SpvDecorationFlat
:
1428 var_data
->interpolation
= INTERP_MODE_FLAT
;
1430 case SpvDecorationCentroid
:
1431 var_data
->centroid
= true;
1433 case SpvDecorationSample
:
1434 var_data
->sample
= true;
1436 case SpvDecorationInvariant
:
1437 var_data
->invariant
= true;
1439 case SpvDecorationConstant
:
1440 var_data
->read_only
= true;
1442 case SpvDecorationNonReadable
:
1443 var_data
->image
.access
|= ACCESS_NON_READABLE
;
1445 case SpvDecorationNonWritable
:
1446 var_data
->read_only
= true;
1447 var_data
->image
.access
|= ACCESS_NON_WRITEABLE
;
1449 case SpvDecorationRestrict
:
1450 var_data
->image
.access
|= ACCESS_RESTRICT
;
1452 case SpvDecorationVolatile
:
1453 var_data
->image
.access
|= ACCESS_VOLATILE
;
1455 case SpvDecorationCoherent
:
1456 var_data
->image
.access
|= ACCESS_COHERENT
;
1458 case SpvDecorationComponent
:
1459 var_data
->location_frac
= dec
->literals
[0];
1461 case SpvDecorationIndex
:
1462 var_data
->index
= dec
->literals
[0];
1464 case SpvDecorationBuiltIn
: {
1465 SpvBuiltIn builtin
= dec
->literals
[0];
1467 nir_variable_mode mode
= var_data
->mode
;
1468 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1469 var_data
->mode
= mode
;
1472 case SpvBuiltInTessLevelOuter
:
1473 case SpvBuiltInTessLevelInner
:
1474 case SpvBuiltInClipDistance
:
1475 case SpvBuiltInCullDistance
:
1476 var_data
->compact
= true;
1483 case SpvDecorationSpecId
:
1484 case SpvDecorationRowMajor
:
1485 case SpvDecorationColMajor
:
1486 case SpvDecorationMatrixStride
:
1487 case SpvDecorationAliased
:
1488 case SpvDecorationUniform
:
1489 case SpvDecorationLinkageAttributes
:
1490 break; /* Do nothing with these here */
1492 case SpvDecorationPatch
:
1493 var_data
->patch
= true;
1496 case SpvDecorationLocation
:
1497 vtn_fail("Handled above");
1499 case SpvDecorationBlock
:
1500 case SpvDecorationBufferBlock
:
1501 case SpvDecorationArrayStride
:
1502 case SpvDecorationGLSLShared
:
1503 case SpvDecorationGLSLPacked
:
1504 break; /* These can apply to a type but we don't care about them */
1506 case SpvDecorationBinding
:
1507 case SpvDecorationDescriptorSet
:
1508 case SpvDecorationNoContraction
:
1509 case SpvDecorationInputAttachmentIndex
:
1510 vtn_warn("Decoration not allowed for variable or structure member: %s",
1511 spirv_decoration_to_string(dec
->decoration
));
1514 case SpvDecorationXfbBuffer
:
1515 var_data
->explicit_xfb_buffer
= true;
1516 var_data
->xfb_buffer
= dec
->literals
[0];
1517 var_data
->always_active_io
= true;
1519 case SpvDecorationXfbStride
:
1520 var_data
->explicit_xfb_stride
= true;
1521 var_data
->xfb_stride
= dec
->literals
[0];
1523 case SpvDecorationOffset
:
1524 var_data
->explicit_offset
= true;
1525 var_data
->offset
= dec
->literals
[0];
1528 case SpvDecorationStream
:
1529 var_data
->stream
= dec
->literals
[0];
1532 case SpvDecorationCPacked
:
1533 case SpvDecorationSaturatedConversion
:
1534 case SpvDecorationFuncParamAttr
:
1535 case SpvDecorationFPRoundingMode
:
1536 case SpvDecorationFPFastMathMode
:
1537 case SpvDecorationAlignment
:
1538 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1539 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1540 spirv_decoration_to_string(dec
->decoration
));
1544 case SpvDecorationHlslSemanticGOOGLE
:
1545 /* HLSL semantic decorations can safely be ignored by the driver. */
1548 case SpvDecorationRestrictPointerEXT
:
1549 case SpvDecorationAliasedPointerEXT
:
1550 /* TODO: We should actually plumb alias information through NIR. */
1554 vtn_fail("Unhandled decoration");
1559 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1560 const struct vtn_decoration
*dec
, void *out_is_patch
)
1562 if (dec
->decoration
== SpvDecorationPatch
) {
1563 *((bool *) out_is_patch
) = true;
1568 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1569 const struct vtn_decoration
*dec
, void *void_var
)
1571 struct vtn_variable
*vtn_var
= void_var
;
1573 /* Handle decorations that apply to a vtn_variable as a whole */
1574 switch (dec
->decoration
) {
1575 case SpvDecorationBinding
:
1576 vtn_var
->binding
= dec
->literals
[0];
1577 vtn_var
->explicit_binding
= true;
1579 case SpvDecorationDescriptorSet
:
1580 vtn_var
->descriptor_set
= dec
->literals
[0];
1582 case SpvDecorationInputAttachmentIndex
:
1583 vtn_var
->input_attachment_index
= dec
->literals
[0];
1585 case SpvDecorationPatch
:
1586 vtn_var
->patch
= true;
1588 case SpvDecorationOffset
:
1589 vtn_var
->offset
= dec
->literals
[0];
1591 case SpvDecorationNonWritable
:
1592 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1594 case SpvDecorationNonReadable
:
1595 vtn_var
->access
|= ACCESS_NON_READABLE
;
1597 case SpvDecorationVolatile
:
1598 vtn_var
->access
|= ACCESS_VOLATILE
;
1600 case SpvDecorationCoherent
:
1601 vtn_var
->access
|= ACCESS_COHERENT
;
1603 case SpvDecorationHlslCounterBufferGOOGLE
:
1604 /* HLSL semantic decorations can safely be ignored by the driver. */
1610 if (val
->value_type
== vtn_value_type_pointer
) {
1611 assert(val
->pointer
->var
== void_var
);
1612 assert(member
== -1);
1614 assert(val
->value_type
== vtn_value_type_type
);
1617 /* Location is odd. If applied to a split structure, we have to walk the
1618 * whole thing and accumulate the location. It's easier to handle as a
1621 if (dec
->decoration
== SpvDecorationLocation
) {
1622 unsigned location
= dec
->literals
[0];
1623 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1624 vtn_var
->mode
== vtn_variable_mode_output
) {
1625 location
+= FRAG_RESULT_DATA0
;
1626 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1627 vtn_var
->mode
== vtn_variable_mode_input
) {
1628 location
+= VERT_ATTRIB_GENERIC0
;
1629 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1630 vtn_var
->mode
== vtn_variable_mode_output
) {
1631 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1632 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1633 vtn_warn("Location must be on input, output, uniform, sampler or "
1638 if (vtn_var
->var
->num_members
== 0) {
1639 /* This handles the member and lone variable cases */
1640 vtn_var
->var
->data
.location
= location
;
1642 /* This handles the structure member case */
1643 assert(vtn_var
->var
->members
);
1646 vtn_var
->base_location
= location
;
1648 vtn_var
->var
->members
[member
].location
= location
;
1654 if (vtn_var
->var
->num_members
== 0) {
1655 /* We call this function on types as well as variables and not all
1656 * struct types get split so we can end up having stray member
1657 * decorations; just ignore them.
1660 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1661 } else if (member
>= 0) {
1662 /* Member decorations must come from a type */
1663 assert(val
->value_type
== vtn_value_type_type
);
1664 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1667 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1668 for (unsigned i
= 0; i
< length
; i
++)
1669 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1672 /* A few variables, those with external storage, have no actual
1673 * nir_variables associated with them. Fortunately, all decorations
1674 * we care about for those variables are on the type only.
1676 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1677 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1678 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1679 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1680 b
->options
->lower_workgroup_access_to_offsets
));
1685 static enum vtn_variable_mode
1686 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1687 SpvStorageClass
class,
1688 struct vtn_type
*interface_type
,
1689 nir_variable_mode
*nir_mode_out
)
1691 enum vtn_variable_mode mode
;
1692 nir_variable_mode nir_mode
;
1694 case SpvStorageClassUniform
:
1695 if (interface_type
->block
) {
1696 mode
= vtn_variable_mode_ubo
;
1697 nir_mode
= nir_var_mem_ubo
;
1698 } else if (interface_type
->buffer_block
) {
1699 mode
= vtn_variable_mode_ssbo
;
1700 nir_mode
= nir_var_mem_ssbo
;
1702 /* Default-block uniforms, coming from gl_spirv */
1703 mode
= vtn_variable_mode_uniform
;
1704 nir_mode
= nir_var_uniform
;
1707 case SpvStorageClassStorageBuffer
:
1708 mode
= vtn_variable_mode_ssbo
;
1709 nir_mode
= nir_var_mem_ssbo
;
1711 case SpvStorageClassPhysicalStorageBufferEXT
:
1712 mode
= vtn_variable_mode_phys_ssbo
;
1713 nir_mode
= nir_var_mem_global
;
1715 case SpvStorageClassUniformConstant
:
1716 mode
= vtn_variable_mode_uniform
;
1717 nir_mode
= nir_var_uniform
;
1719 case SpvStorageClassPushConstant
:
1720 mode
= vtn_variable_mode_push_constant
;
1721 nir_mode
= nir_var_uniform
;
1723 case SpvStorageClassInput
:
1724 mode
= vtn_variable_mode_input
;
1725 nir_mode
= nir_var_shader_in
;
1727 case SpvStorageClassOutput
:
1728 mode
= vtn_variable_mode_output
;
1729 nir_mode
= nir_var_shader_out
;
1731 case SpvStorageClassPrivate
:
1732 mode
= vtn_variable_mode_private
;
1733 nir_mode
= nir_var_shader_temp
;
1735 case SpvStorageClassFunction
:
1736 mode
= vtn_variable_mode_function
;
1737 nir_mode
= nir_var_function_temp
;
1739 case SpvStorageClassWorkgroup
:
1740 mode
= vtn_variable_mode_workgroup
;
1741 nir_mode
= nir_var_mem_shared
;
1743 case SpvStorageClassAtomicCounter
:
1744 mode
= vtn_variable_mode_uniform
;
1745 nir_mode
= nir_var_uniform
;
1747 case SpvStorageClassCrossWorkgroup
:
1748 mode
= vtn_variable_mode_cross_workgroup
;
1749 nir_mode
= nir_var_mem_global
;
1751 case SpvStorageClassGeneric
:
1753 vtn_fail("Unhandled variable storage class");
1757 *nir_mode_out
= nir_mode
;
1763 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1765 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1766 /* This pointer needs to have a pointer type with actual storage */
1767 vtn_assert(ptr
->ptr_type
);
1768 vtn_assert(ptr
->ptr_type
->type
);
1771 /* If we don't have an offset then we must be a pointer to the variable
1774 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1776 struct vtn_access_chain chain
= {
1779 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1782 vtn_assert(ptr
->offset
);
1783 if (ptr
->block_index
) {
1784 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1785 ptr
->mode
== vtn_variable_mode_ssbo
);
1786 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1788 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1792 if (vtn_pointer_is_external_block(b
, ptr
) &&
1793 vtn_type_contains_block(b
, ptr
->type
) &&
1794 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1795 /* In this case, we're looking for a block index and not an actual
1798 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1799 * at all because we get the pointer directly from the client. This
1800 * assumes that there will never be a SSBO binding variable using the
1801 * PhysicalStorageBufferEXT storage class. This assumption appears
1802 * to be correct according to the Vulkan spec because the table,
1803 * "Shader Resource and Storage Class Correspondence," the only the
1804 * Uniform storage class with BufferBlock or the StorageBuffer
1805 * storage class with Block can be used.
1807 if (!ptr
->block_index
) {
1808 /* If we don't have a block_index then we must be a pointer to the
1811 vtn_assert(!ptr
->deref
);
1813 struct vtn_access_chain chain
= {
1816 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
1819 return ptr
->block_index
;
1821 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1826 struct vtn_pointer
*
1827 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1828 struct vtn_type
*ptr_type
)
1830 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1832 struct vtn_type
*interface_type
= ptr_type
->deref
;
1833 while (interface_type
->base_type
== vtn_base_type_array
)
1834 interface_type
= interface_type
->array_element
;
1836 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1837 nir_variable_mode nir_mode
;
1838 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1839 interface_type
, &nir_mode
);
1840 ptr
->type
= ptr_type
->deref
;
1841 ptr
->ptr_type
= ptr_type
;
1843 if (b
->wa_glslang_179
) {
1844 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1845 * need to whack the mode because it creates a function parameter with
1846 * the Function storage class even though it's a pointer to a sampler.
1847 * If we don't do this, then NIR won't get rid of the deref_cast for us.
1849 if (ptr
->mode
== vtn_variable_mode_function
&&
1850 (ptr
->type
->base_type
== vtn_base_type_sampler
||
1851 ptr
->type
->base_type
== vtn_base_type_sampled_image
)) {
1852 ptr
->mode
= vtn_variable_mode_uniform
;
1853 nir_mode
= nir_var_uniform
;
1857 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1858 /* This pointer type needs to have actual storage */
1859 vtn_assert(ptr_type
->type
);
1860 if (ptr
->mode
== vtn_variable_mode_ubo
||
1861 ptr
->mode
== vtn_variable_mode_ssbo
) {
1862 vtn_assert(ssa
->num_components
== 2);
1863 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1864 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1866 vtn_assert(ssa
->num_components
== 1);
1867 ptr
->block_index
= NULL
;
1871 const struct glsl_type
*deref_type
= ptr_type
->deref
->type
;
1872 if (!vtn_pointer_is_external_block(b
, ptr
)) {
1873 assert(ssa
->bit_size
== 32 && ssa
->num_components
== 1);
1874 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1875 glsl_get_bare_type(deref_type
), 0);
1876 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
1877 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1878 /* This is a pointer to somewhere in an array of blocks, not a
1879 * pointer to somewhere inside the block. Set the block index
1880 * instead of making a cast.
1882 ptr
->block_index
= ssa
;
1884 /* This is a pointer to something internal or a pointer inside a
1885 * block. It's just a regular cast.
1887 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1888 * at all because we get the pointer directly from the client. This
1889 * assumes that there will never be a SSBO binding variable using the
1890 * PhysicalStorageBufferEXT storage class. This assumption appears
1891 * to be correct according to the Vulkan spec because the table,
1892 * "Shader Resource and Storage Class Correspondence," the only the
1893 * Uniform storage class with BufferBlock or the StorageBuffer
1894 * storage class with Block can be used.
1896 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1897 ptr_type
->deref
->type
,
1899 ptr
->deref
->dest
.ssa
.num_components
=
1900 glsl_get_vector_elements(ptr_type
->type
);
1901 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
1909 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
1911 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
1914 if (var
->mode
== vtn_variable_mode_input
) {
1915 return stage
== MESA_SHADER_TESS_CTRL
||
1916 stage
== MESA_SHADER_TESS_EVAL
||
1917 stage
== MESA_SHADER_GEOMETRY
;
1920 if (var
->mode
== vtn_variable_mode_output
)
1921 return stage
== MESA_SHADER_TESS_CTRL
;
1927 assign_missing_member_locations(struct vtn_variable
*var
)
1930 glsl_get_length(glsl_without_array(var
->type
->type
));
1931 int location
= var
->base_location
;
1933 for (unsigned i
= 0; i
< length
; i
++) {
1934 /* From the Vulkan spec:
1936 * “If the structure type is a Block but without a Location, then each
1937 * of its members must have a Location decoration.”
1940 if (var
->type
->block
) {
1941 assert(var
->base_location
!= -1 ||
1942 var
->var
->members
[i
].location
!= -1);
1945 /* From the Vulkan spec:
1947 * “Any member with its own Location decoration is assigned that
1948 * location. Each remaining member is assigned the location after the
1949 * immediately preceding member in declaration order.”
1951 if (var
->var
->members
[i
].location
!= -1)
1952 location
= var
->var
->members
[i
].location
;
1954 var
->var
->members
[i
].location
= location
;
1956 /* Below we use type instead of interface_type, because interface_type
1957 * is only available when it is a Block. This code also supports
1958 * input/outputs that are just structs
1960 const struct glsl_type
*member_type
=
1961 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
1964 glsl_count_attribute_slots(member_type
,
1965 false /* is_gl_vertex_input */);
1971 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
1972 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
1973 nir_constant
*initializer
)
1975 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1976 struct vtn_type
*type
= ptr_type
->deref
;
1978 struct vtn_type
*without_array
= type
;
1979 while(glsl_type_is_array(without_array
->type
))
1980 without_array
= without_array
->array_element
;
1982 enum vtn_variable_mode mode
;
1983 nir_variable_mode nir_mode
;
1984 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
1987 case vtn_variable_mode_ubo
:
1988 /* There's no other way to get vtn_variable_mode_ubo */
1989 vtn_assert(without_array
->block
);
1990 b
->shader
->info
.num_ubos
++;
1992 case vtn_variable_mode_ssbo
:
1993 if (storage_class
== SpvStorageClassStorageBuffer
&&
1994 !without_array
->block
) {
1995 if (b
->variable_pointers
) {
1996 vtn_fail("Variables in the StorageBuffer storage class must "
1997 "have a struct type with the Block decoration");
1999 /* If variable pointers are not present, it's still malformed
2000 * SPIR-V but we can parse it and do the right thing anyway.
2001 * Since some of the 8-bit storage tests have bugs in this are,
2002 * just make it a warning for now.
2004 vtn_warn("Variables in the StorageBuffer storage class must "
2005 "have a struct type with the Block decoration");
2008 b
->shader
->info
.num_ssbos
++;
2010 case vtn_variable_mode_uniform
:
2011 if (glsl_type_is_image(without_array
->type
))
2012 b
->shader
->info
.num_images
++;
2013 else if (glsl_type_is_sampler(without_array
->type
))
2014 b
->shader
->info
.num_textures
++;
2016 case vtn_variable_mode_push_constant
:
2017 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2020 case vtn_variable_mode_phys_ssbo
:
2021 vtn_fail("Cannot create a variable with the "
2022 "PhysicalStorageBufferEXT storage class");
2026 /* No tallying is needed */
2030 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2033 var
->base_location
= -1;
2035 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
2036 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
2038 switch (var
->mode
) {
2039 case vtn_variable_mode_function
:
2040 case vtn_variable_mode_private
:
2041 case vtn_variable_mode_uniform
:
2042 /* For these, we create the variable normally */
2043 var
->var
= rzalloc(b
->shader
, nir_variable
);
2044 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2046 if (storage_class
== SpvStorageClassAtomicCounter
) {
2047 /* Need to tweak the nir type here as at vtn_handle_type we don't
2048 * have the access to storage_class, that is the one that points us
2049 * that is an atomic uint.
2051 var
->var
->type
= repair_atomic_type(var
->type
->type
);
2053 /* Private variables don't have any explicit layout but some layouts
2054 * may have leaked through due to type deduplication in the SPIR-V.
2056 var
->var
->type
= glsl_get_bare_type(var
->type
->type
);
2058 var
->var
->data
.mode
= nir_mode
;
2059 var
->var
->data
.location
= -1;
2060 var
->var
->interface_type
= NULL
;
2063 case vtn_variable_mode_workgroup
:
2064 if (b
->options
->lower_workgroup_access_to_offsets
) {
2065 var
->shared_location
= -1;
2067 /* Create the variable normally */
2068 var
->var
= rzalloc(b
->shader
, nir_variable
);
2069 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2070 /* Workgroup variables don't have any explicit layout but some
2071 * layouts may have leaked through due to type deduplication in the
2074 var
->var
->type
= glsl_get_bare_type(var
->type
->type
);
2075 var
->var
->data
.mode
= nir_var_mem_shared
;
2079 case vtn_variable_mode_input
:
2080 case vtn_variable_mode_output
: {
2081 /* In order to know whether or not we're a per-vertex inout, we need
2082 * the patch qualifier. This means walking the variable decorations
2083 * early before we actually create any variables. Not a big deal.
2085 * GLSLang really likes to place decorations in the most interior
2086 * thing it possibly can. In particular, if you have a struct, it
2087 * will place the patch decorations on the struct members. This
2088 * should be handled by the variable splitting below just fine.
2090 * If you have an array-of-struct, things get even more weird as it
2091 * will place the patch decorations on the struct even though it's
2092 * inside an array and some of the members being patch and others not
2093 * makes no sense whatsoever. Since the only sensible thing is for
2094 * it to be all or nothing, we'll call it patch if any of the members
2095 * are declared patch.
2098 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2099 if (glsl_type_is_array(var
->type
->type
) &&
2100 glsl_type_is_struct(without_array
->type
)) {
2101 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2102 vtn_value_type_type
),
2103 var_is_patch_cb
, &var
->patch
);
2106 /* For inputs and outputs, we immediately split structures. This
2107 * is for a couple of reasons. For one, builtins may all come in
2108 * a struct and we really want those split out into separate
2109 * variables. For another, interpolation qualifiers can be
2110 * applied to members of the top-level struct ane we need to be
2111 * able to preserve that information.
2114 struct vtn_type
*per_vertex_type
= var
->type
;
2115 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2116 /* In Geometry shaders (and some tessellation), inputs come
2117 * in per-vertex arrays. However, some builtins come in
2118 * non-per-vertex, hence the need for the is_array check. In
2119 * any case, there are no non-builtin arrays allowed so this
2120 * check should be sufficient.
2122 per_vertex_type
= var
->type
->array_element
;
2125 var
->var
= rzalloc(b
->shader
, nir_variable
);
2126 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2127 /* In Vulkan, shader I/O variables don't have any explicit layout but
2128 * some layouts may have leaked through due to type deduplication in
2129 * the SPIR-V. We do, however, keep the layouts in the variable's
2130 * interface_type because we need offsets for XFB arrays of blocks.
2132 var
->var
->type
= glsl_get_bare_type(var
->type
->type
);
2133 var
->var
->data
.mode
= nir_mode
;
2134 var
->var
->data
.patch
= var
->patch
;
2136 /* Figure out the interface block type. */
2137 struct vtn_type
*iface_type
= per_vertex_type
;
2138 if (var
->mode
== vtn_variable_mode_output
&&
2139 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2140 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2141 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2142 /* For vertex data outputs, we can end up with arrays of blocks for
2143 * transform feedback where each array element corresponds to a
2144 * different XFB output buffer.
2146 while (iface_type
->base_type
== vtn_base_type_array
)
2147 iface_type
= iface_type
->array_element
;
2149 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2150 var
->var
->interface_type
= iface_type
->type
;
2152 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2153 per_vertex_type
->block
) {
2154 /* It's a struct. Set it up as per-member. */
2155 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2156 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2157 var
->var
->num_members
);
2159 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2160 var
->var
->members
[i
].mode
= nir_mode
;
2161 var
->var
->members
[i
].patch
= var
->patch
;
2162 var
->var
->members
[i
].location
= -1;
2166 /* For inputs and outputs, we need to grab locations and builtin
2167 * information from the per-vertex type.
2169 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2170 vtn_value_type_type
),
2171 var_decoration_cb
, var
);
2175 case vtn_variable_mode_ubo
:
2176 case vtn_variable_mode_ssbo
:
2177 case vtn_variable_mode_push_constant
:
2178 case vtn_variable_mode_cross_workgroup
:
2179 /* These don't need actual variables. */
2182 case vtn_variable_mode_phys_ssbo
:
2183 unreachable("Should have been caught before");
2187 var
->var
->constant_initializer
=
2188 nir_constant_clone(initializer
, var
->var
);
2191 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2193 if ((var
->mode
== vtn_variable_mode_input
||
2194 var
->mode
== vtn_variable_mode_output
) &&
2195 var
->var
->members
) {
2196 assign_missing_member_locations(var
);
2199 if (var
->mode
== vtn_variable_mode_uniform
) {
2200 /* XXX: We still need the binding information in the nir_variable
2201 * for these. We should fix that.
2203 var
->var
->data
.binding
= var
->binding
;
2204 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2205 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2206 var
->var
->data
.index
= var
->input_attachment_index
;
2207 var
->var
->data
.offset
= var
->offset
;
2209 if (glsl_type_is_image(without_array
->type
))
2210 var
->var
->data
.image
.format
= without_array
->image_format
;
2213 if (var
->mode
== vtn_variable_mode_function
) {
2214 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2215 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2216 } else if (var
->var
) {
2217 nir_shader_add_variable(b
->shader
, var
->var
);
2219 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2224 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2225 struct vtn_type
*dst_type
,
2226 struct vtn_type
*src_type
)
2228 if (dst_type
->id
== src_type
->id
)
2231 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2232 /* Early versions of GLSLang would re-emit types unnecessarily and you
2233 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2234 * mismatched source and destination types.
2236 * https://github.com/KhronosGroup/glslang/issues/304
2237 * https://github.com/KhronosGroup/glslang/issues/307
2238 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2239 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2241 vtn_warn("Source and destination types of %s do not have the same "
2242 "ID (but are compatible): %u vs %u",
2243 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2247 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2248 spirv_op_to_string(opcode
),
2249 glsl_get_type_name(dst_type
->type
),
2250 glsl_get_type_name(src_type
->type
));
2253 static nir_ssa_def
*
2254 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2255 unsigned num_components
)
2257 if (val
->num_components
== num_components
)
2260 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2261 for (unsigned i
= 0; i
< num_components
; i
++) {
2262 if (i
< val
->num_components
)
2263 comps
[i
] = nir_channel(b
, val
, i
);
2265 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2267 return nir_vec(b
, comps
, num_components
);
2270 static nir_ssa_def
*
2271 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2272 const struct glsl_type
*type
)
2274 const unsigned num_components
= glsl_get_vector_elements(type
);
2275 const unsigned bit_size
= glsl_get_bit_size(type
);
2277 /* First, zero-pad to ensure that the value is big enough that when we
2278 * bit-cast it, we don't loose anything.
2280 if (val
->bit_size
< bit_size
) {
2281 const unsigned src_num_components_needed
=
2282 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2283 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2286 val
= nir_bitcast_vector(b
, val
, bit_size
);
2288 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2292 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2293 const uint32_t *w
, unsigned count
)
2297 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2298 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2302 case SpvOpVariable
: {
2303 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2305 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2307 SpvStorageClass storage_class
= w
[3];
2308 nir_constant
*initializer
= NULL
;
2310 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
2312 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
2316 case SpvOpAccessChain
:
2317 case SpvOpPtrAccessChain
:
2318 case SpvOpInBoundsAccessChain
: {
2319 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2320 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
);
2323 for (int i
= 4; i
< count
; i
++) {
2324 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2325 if (link_val
->value_type
== vtn_value_type_constant
) {
2326 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2327 switch (glsl_get_bit_size(link_val
->type
->type
)) {
2329 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i8
[0];
2332 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i16
[0];
2335 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i32
[0];
2338 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i64
[0];
2341 vtn_fail("Invalid bit size");
2344 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2345 chain
->link
[idx
].id
= w
[i
];
2351 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2352 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2353 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2354 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2355 * to combine an array of images with a single sampler to get an
2356 * array of sampled images that all share the same sampler.
2357 * Fortunately, this means that we can more-or-less ignore the
2358 * sampler when crawling the access chain, but it does leave us
2359 * with this rather awkward little special-case.
2361 struct vtn_value
*val
=
2362 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2363 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2364 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
2365 val
->sampled_image
->image
=
2366 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2367 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2369 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2370 struct vtn_value
*val
=
2371 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2372 val
->pointer
= vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2373 val
->pointer
->ptr_type
= ptr_type
;
2378 case SpvOpCopyMemory
: {
2379 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2380 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2382 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2384 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2389 struct vtn_type
*res_type
=
2390 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2391 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2392 struct vtn_pointer
*src
= src_val
->pointer
;
2394 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2396 if (glsl_type_is_image(res_type
->type
) ||
2397 glsl_type_is_sampler(res_type
->type
)) {
2398 vtn_push_value(b
, w
[2], vtn_value_type_pointer
)->pointer
= src
;
2402 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2407 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2408 struct vtn_pointer
*dest
= dest_val
->pointer
;
2409 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2411 /* OpStore requires us to actually have a storage type */
2412 vtn_fail_if(dest
->type
->type
== NULL
,
2413 "Invalid destination type for OpStore");
2415 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2416 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2417 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2418 * would then store them to a local variable as bool. Work around
2419 * the issue by doing an implicit conversion.
2421 * https://github.com/KhronosGroup/glslang/issues/170
2422 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2424 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2425 "OpTypeBool. Doing an implicit conversion to work around "
2427 struct vtn_ssa_value
*bool_ssa
=
2428 vtn_create_ssa_value(b
, dest
->type
->type
);
2429 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2430 vtn_variable_store(b
, bool_ssa
, dest
);
2434 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2436 if (glsl_type_is_sampler(dest
->type
->type
)) {
2437 if (b
->wa_glslang_179
) {
2438 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2439 "propagation to workaround the problem.");
2440 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2441 dest
->var
->copy_prop_sampler
=
2442 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2444 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2449 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2450 vtn_variable_store(b
, src
, dest
);
2454 case SpvOpArrayLength
: {
2455 struct vtn_pointer
*ptr
=
2456 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2457 const uint32_t field
= w
[4];
2459 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2460 "OpArrayLength must take a pointer to a structure type");
2461 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2462 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2463 "OpArrayLength must reference the last memeber of the "
2464 "structure and that must be an array");
2466 const uint32_t offset
= ptr
->type
->offsets
[field
];
2467 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2469 if (!ptr
->block_index
) {
2470 struct vtn_access_chain chain
= {
2473 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2474 vtn_assert(ptr
->block_index
);
2477 nir_intrinsic_instr
*instr
=
2478 nir_intrinsic_instr_create(b
->nb
.shader
,
2479 nir_intrinsic_get_buffer_size
);
2480 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2481 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2482 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2483 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2485 /* array_length = max(buffer_size - offset, 0) / stride */
2486 nir_ssa_def
*array_length
=
2491 nir_imm_int(&b
->nb
, offset
)),
2492 nir_imm_int(&b
->nb
, 0u)),
2493 nir_imm_int(&b
->nb
, stride
));
2495 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2496 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2497 val
->ssa
->def
= array_length
;
2501 case SpvOpConvertPtrToU
: {
2502 struct vtn_value
*u_val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2504 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2505 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2506 "OpConvertPtrToU can only be used to cast to a vector or "
2509 /* The pointer will be converted to an SSA value automatically */
2510 nir_ssa_def
*ptr_ssa
= vtn_ssa_value(b
, w
[3])->def
;
2512 u_val
->ssa
= vtn_create_ssa_value(b
, u_val
->type
->type
);
2513 u_val
->ssa
->def
= nir_sloppy_bitcast(&b
->nb
, ptr_ssa
, u_val
->type
->type
);
2517 case SpvOpConvertUToPtr
: {
2518 struct vtn_value
*ptr_val
=
2519 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2520 struct vtn_value
*u_val
= vtn_value(b
, w
[3], vtn_value_type_ssa
);
2522 vtn_fail_if(ptr_val
->type
->type
== NULL
,
2523 "OpConvertUToPtr can only be used on physical pointers");
2525 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2526 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2527 "OpConvertUToPtr can only be used to cast from a vector or "
2530 nir_ssa_def
*ptr_ssa
= nir_sloppy_bitcast(&b
->nb
, u_val
->ssa
->def
,
2531 ptr_val
->type
->type
);
2532 ptr_val
->pointer
= vtn_pointer_from_ssa(b
, ptr_ssa
, ptr_val
->type
);
2536 case SpvOpCopyMemorySized
:
2538 vtn_fail("Unhandled opcode");