2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
33 static void ptr_decoration_cb(struct vtn_builder
*b
,
34 struct vtn_value
*val
, int member
,
35 const struct vtn_decoration
*dec
,
39 vtn_push_value_pointer(struct vtn_builder
*b
, uint32_t value_id
,
40 struct vtn_pointer
*ptr
)
42 struct vtn_value
*val
= vtn_push_value(b
, value_id
, vtn_value_type_pointer
);
44 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, ptr
);
49 ssa_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
50 const struct vtn_decoration
*dec
, void *void_ssa
)
52 struct vtn_ssa_value
*ssa
= void_ssa
;
54 switch (dec
->decoration
) {
55 case SpvDecorationNonUniformEXT
:
56 ssa
->access
|= ACCESS_NON_UNIFORM
;
65 vtn_push_ssa(struct vtn_builder
*b
, uint32_t value_id
,
66 struct vtn_type
*type
, struct vtn_ssa_value
*ssa
)
68 struct vtn_value
*val
;
69 if (type
->base_type
== vtn_base_type_pointer
) {
70 val
= vtn_push_value_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
72 val
= vtn_push_value(b
, value_id
, vtn_value_type_ssa
);
74 vtn_foreach_decoration(b
, val
, ssa_decoration_cb
, val
->ssa
);
79 static struct vtn_access_chain
*
80 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
82 struct vtn_access_chain
*chain
;
84 /* Subtract 1 from the length since there's already one built in */
85 size_t size
= sizeof(*chain
) +
86 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
87 chain
= rzalloc_size(b
, size
);
88 chain
->length
= length
;
94 vtn_mode_uses_ssa_offset(struct vtn_builder
*b
,
95 enum vtn_variable_mode mode
)
97 return ((mode
== vtn_variable_mode_ubo
||
98 mode
== vtn_variable_mode_ssbo
) &&
99 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
100 mode
== vtn_variable_mode_push_constant
||
101 (mode
== vtn_variable_mode_workgroup
&&
102 b
->options
->lower_workgroup_access_to_offsets
);
106 vtn_pointer_is_external_block(struct vtn_builder
*b
,
107 struct vtn_pointer
*ptr
)
109 return ptr
->mode
== vtn_variable_mode_ssbo
||
110 ptr
->mode
== vtn_variable_mode_ubo
||
111 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
112 ptr
->mode
== vtn_variable_mode_push_constant
||
113 (ptr
->mode
== vtn_variable_mode_workgroup
&&
114 b
->options
->lower_workgroup_access_to_offsets
);
118 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
119 unsigned stride
, unsigned bit_size
)
121 vtn_assert(stride
> 0);
122 if (link
.mode
== vtn_access_mode_literal
) {
123 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
125 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
126 if (ssa
->bit_size
!= bit_size
)
127 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
128 return nir_imul_imm(&b
->nb
, ssa
, stride
);
132 static VkDescriptorType
133 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
136 case vtn_variable_mode_ubo
:
137 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
138 case vtn_variable_mode_ssbo
:
139 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
141 vtn_fail("Invalid mode for vulkan_resource_index");
146 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
147 nir_ssa_def
*desc_array_index
)
149 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
151 if (!desc_array_index
) {
152 vtn_assert(glsl_type_is_struct_or_ifc(var
->type
->type
));
153 desc_array_index
= nir_imm_int(&b
->nb
, 0);
156 nir_intrinsic_instr
*instr
=
157 nir_intrinsic_instr_create(b
->nb
.shader
,
158 nir_intrinsic_vulkan_resource_index
);
159 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
160 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
161 nir_intrinsic_set_binding(instr
, var
->binding
);
162 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
164 vtn_fail_if(var
->mode
!= vtn_variable_mode_ubo
&&
165 var
->mode
!= vtn_variable_mode_ssbo
,
166 "Invalid mode for vulkan_resource_index");
168 nir_address_format addr_format
= vtn_mode_to_address_format(b
, var
->mode
);
169 const struct glsl_type
*index_type
=
170 b
->options
->lower_ubo_ssbo_access_to_offsets
?
171 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
173 instr
->num_components
= glsl_get_vector_elements(index_type
);
174 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
175 glsl_get_bit_size(index_type
), NULL
);
176 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
178 return &instr
->dest
.ssa
;
182 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
183 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
185 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
187 nir_intrinsic_instr
*instr
=
188 nir_intrinsic_instr_create(b
->nb
.shader
,
189 nir_intrinsic_vulkan_resource_reindex
);
190 instr
->src
[0] = nir_src_for_ssa(base_index
);
191 instr
->src
[1] = nir_src_for_ssa(offset_index
);
192 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
194 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
195 "Invalid mode for vulkan_resource_reindex");
197 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
198 const struct glsl_type
*index_type
=
199 b
->options
->lower_ubo_ssbo_access_to_offsets
?
200 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
202 instr
->num_components
= glsl_get_vector_elements(index_type
);
203 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
204 glsl_get_bit_size(index_type
), NULL
);
205 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
207 return &instr
->dest
.ssa
;
211 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
212 nir_ssa_def
*desc_index
)
214 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
216 nir_intrinsic_instr
*desc_load
=
217 nir_intrinsic_instr_create(b
->nb
.shader
,
218 nir_intrinsic_load_vulkan_descriptor
);
219 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
220 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
222 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
223 "Invalid mode for load_vulkan_descriptor");
225 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
226 const struct glsl_type
*ptr_type
=
227 nir_address_format_to_glsl_type(addr_format
);
229 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
230 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
231 desc_load
->num_components
,
232 glsl_get_bit_size(ptr_type
), NULL
);
233 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
235 return &desc_load
->dest
.ssa
;
238 /* Dereference the given base pointer by the access chain */
239 static struct vtn_pointer
*
240 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
241 struct vtn_pointer
*base
,
242 struct vtn_access_chain
*deref_chain
)
244 struct vtn_type
*type
= base
->type
;
245 enum gl_access_qualifier access
= base
->access
| deref_chain
->access
;
248 nir_deref_instr
*tail
;
251 } else if (b
->options
->environment
== NIR_SPIRV_VULKAN
&&
252 vtn_pointer_is_external_block(b
, base
)) {
253 nir_ssa_def
*block_index
= base
->block_index
;
255 /* We dereferencing an external block pointer. Correctness of this
256 * operation relies on one particular line in the SPIR-V spec, section
257 * entitled "Validation Rules for Shader Capabilities":
259 * "Block and BufferBlock decorations cannot decorate a structure
260 * type that is nested at any level inside another structure type
261 * decorated with Block or BufferBlock."
263 * This means that we can detect the point where we cross over from
264 * descriptor indexing to buffer indexing by looking for the block
265 * decorated struct type. Anything before the block decorated struct
266 * type is a descriptor indexing operation and anything after the block
267 * decorated struct is a buffer offset operation.
270 /* Figure out the descriptor array index if any
272 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
273 * to forget the Block or BufferBlock decoration from time to time.
274 * It's more robust if we check for both !block_index and for the type
275 * to contain a block. This way there's a decent chance that arrays of
276 * UBOs/SSBOs will work correctly even if variable pointers are
279 nir_ssa_def
*desc_arr_idx
= NULL
;
280 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
281 /* If our type contains a block, then we're still outside the block
282 * and we need to process enough levels of dereferences to get inside
285 if (deref_chain
->ptr_as_array
) {
286 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
287 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
288 MAX2(aoa_size
, 1), 32);
292 for (; idx
< deref_chain
->length
; idx
++) {
293 if (type
->base_type
!= vtn_base_type_array
) {
294 vtn_assert(type
->base_type
== vtn_base_type_struct
);
298 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
299 nir_ssa_def
*arr_offset
=
300 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
301 MAX2(aoa_size
, 1), 32);
303 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
305 desc_arr_idx
= arr_offset
;
307 type
= type
->array_element
;
308 access
|= type
->access
;
313 vtn_assert(base
->var
&& base
->type
);
314 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
315 } else if (desc_arr_idx
) {
316 block_index
= vtn_resource_reindex(b
, base
->mode
,
317 block_index
, desc_arr_idx
);
320 if (idx
== deref_chain
->length
) {
321 /* The entire deref was consumed in finding the block index. Return
322 * a pointer which just has a block index and a later access chain
323 * will dereference deeper.
325 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
326 ptr
->mode
= base
->mode
;
328 ptr
->block_index
= block_index
;
329 ptr
->access
= access
;
333 /* If we got here, there's more access chain to handle and we have the
334 * final block index. Insert a descriptor load and cast to a deref to
335 * start the deref chain.
337 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
339 assert(base
->mode
== vtn_variable_mode_ssbo
||
340 base
->mode
== vtn_variable_mode_ubo
);
341 nir_variable_mode nir_mode
=
342 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
344 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
, type
->type
,
345 base
->ptr_type
->stride
);
347 assert(base
->var
&& base
->var
->var
);
348 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
349 if (base
->ptr_type
&& base
->ptr_type
->type
) {
350 tail
->dest
.ssa
.num_components
=
351 glsl_get_vector_elements(base
->ptr_type
->type
);
352 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
356 if (idx
== 0 && deref_chain
->ptr_as_array
) {
357 /* We start with a deref cast to get the stride. Hopefully, we'll be
358 * able to delete that cast eventually.
360 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
361 tail
->type
, base
->ptr_type
->stride
);
363 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
364 tail
->dest
.ssa
.bit_size
);
365 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
369 for (; idx
< deref_chain
->length
; idx
++) {
370 if (glsl_type_is_struct_or_ifc(type
->type
)) {
371 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
372 unsigned field
= deref_chain
->link
[idx
].id
;
373 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
374 type
= type
->members
[field
];
376 nir_ssa_def
*arr_index
=
377 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
378 tail
->dest
.ssa
.bit_size
);
379 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
380 type
= type
->array_element
;
383 access
|= type
->access
;
386 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
387 ptr
->mode
= base
->mode
;
389 ptr
->var
= base
->var
;
391 ptr
->access
= access
;
396 static struct vtn_pointer
*
397 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
398 struct vtn_pointer
*base
,
399 struct vtn_access_chain
*deref_chain
)
401 nir_ssa_def
*block_index
= base
->block_index
;
402 nir_ssa_def
*offset
= base
->offset
;
403 struct vtn_type
*type
= base
->type
;
404 enum gl_access_qualifier access
= base
->access
;
407 if (base
->mode
== vtn_variable_mode_ubo
||
408 base
->mode
== vtn_variable_mode_ssbo
) {
410 vtn_assert(base
->var
&& base
->type
);
411 nir_ssa_def
*desc_arr_idx
;
412 if (glsl_type_is_array(type
->type
)) {
413 if (deref_chain
->length
>= 1) {
415 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
417 /* This consumes a level of type */
418 type
= type
->array_element
;
419 access
|= type
->access
;
421 /* This is annoying. We've been asked for a pointer to the
422 * array of UBOs/SSBOs and not a specifc buffer. Return a
423 * pointer with a descriptor index of 0 and we'll have to do
424 * a reindex later to adjust it to the right thing.
426 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
428 } else if (deref_chain
->ptr_as_array
) {
429 /* You can't have a zero-length OpPtrAccessChain */
430 vtn_assert(deref_chain
->length
>= 1);
431 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
433 /* We have a regular non-array SSBO. */
436 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
437 } else if (deref_chain
->ptr_as_array
&&
438 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
439 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
440 * decorated block. This is an interesting corner in the SPIR-V
441 * spec. One interpretation would be that they client is clearly
442 * trying to treat that block as if it's an implicit array of blocks
443 * repeated in the buffer. However, the SPIR-V spec for the
444 * OpPtrAccessChain says:
446 * "Base is treated as the address of the first element of an
447 * array, and the Element element’s address is computed to be the
448 * base for the Indexes, as per OpAccessChain."
450 * Taken literally, that would mean that your struct type is supposed
451 * to be treated as an array of such a struct and, since it's
452 * decorated block, that means an array of blocks which corresponds
453 * to an array descriptor. Therefore, we need to do a reindex
454 * operation to add the index from the first link in the access chain
455 * to the index we recieved.
457 * The downside to this interpretation (there always is one) is that
458 * this might be somewhat surprising behavior to apps if they expect
459 * the implicit array behavior described above.
461 vtn_assert(deref_chain
->length
>= 1);
462 nir_ssa_def
*offset_index
=
463 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
466 block_index
= vtn_resource_reindex(b
, base
->mode
,
467 block_index
, offset_index
);
472 if (base
->mode
== vtn_variable_mode_workgroup
) {
473 /* SLM doesn't need nor have a block index */
474 vtn_assert(!block_index
);
476 /* We need the variable for the base offset */
477 vtn_assert(base
->var
);
479 /* We need ptr_type for size and alignment */
480 vtn_assert(base
->ptr_type
);
482 /* Assign location on first use so that we don't end up bloating SLM
483 * address space for variables which are never statically used.
485 if (base
->var
->shared_location
< 0) {
486 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
487 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
488 base
->ptr_type
->align
);
489 base
->var
->shared_location
= b
->shader
->num_shared
;
490 b
->shader
->num_shared
+= base
->ptr_type
->length
;
493 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
494 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
495 /* Push constants neither need nor have a block index */
496 vtn_assert(!block_index
);
498 /* Start off with at the start of the push constant block. */
499 offset
= nir_imm_int(&b
->nb
, 0);
501 /* The code above should have ensured a block_index when needed. */
502 vtn_assert(block_index
);
504 /* Start off with at the start of the buffer. */
505 offset
= nir_imm_int(&b
->nb
, 0);
509 if (deref_chain
->ptr_as_array
&& idx
== 0) {
510 /* We need ptr_type for the stride */
511 vtn_assert(base
->ptr_type
);
513 /* We need at least one element in the chain */
514 vtn_assert(deref_chain
->length
>= 1);
516 nir_ssa_def
*elem_offset
=
517 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
518 base
->ptr_type
->stride
, offset
->bit_size
);
519 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
523 for (; idx
< deref_chain
->length
; idx
++) {
524 switch (glsl_get_base_type(type
->type
)) {
527 case GLSL_TYPE_UINT16
:
528 case GLSL_TYPE_INT16
:
529 case GLSL_TYPE_UINT8
:
531 case GLSL_TYPE_UINT64
:
532 case GLSL_TYPE_INT64
:
533 case GLSL_TYPE_FLOAT
:
534 case GLSL_TYPE_FLOAT16
:
535 case GLSL_TYPE_DOUBLE
:
537 case GLSL_TYPE_ARRAY
: {
538 nir_ssa_def
*elem_offset
=
539 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
540 type
->stride
, offset
->bit_size
);
541 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
542 type
= type
->array_element
;
543 access
|= type
->access
;
547 case GLSL_TYPE_INTERFACE
:
548 case GLSL_TYPE_STRUCT
: {
549 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
550 unsigned member
= deref_chain
->link
[idx
].id
;
551 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
552 type
= type
->members
[member
];
553 access
|= type
->access
;
558 vtn_fail("Invalid type for deref");
562 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
563 ptr
->mode
= base
->mode
;
565 ptr
->block_index
= block_index
;
566 ptr
->offset
= offset
;
567 ptr
->access
= access
;
572 /* Dereference the given base pointer by the access chain */
573 static struct vtn_pointer
*
574 vtn_pointer_dereference(struct vtn_builder
*b
,
575 struct vtn_pointer
*base
,
576 struct vtn_access_chain
*deref_chain
)
578 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
579 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
581 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
586 vtn_pointer_for_variable(struct vtn_builder
*b
,
587 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
589 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
591 pointer
->mode
= var
->mode
;
592 pointer
->type
= var
->type
;
593 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
594 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
595 pointer
->ptr_type
= ptr_type
;
597 pointer
->access
= var
->access
| var
->type
->access
;
602 /* Returns an atomic_uint type based on the original uint type. The returned
603 * type will be equivalent to the original one but will have an atomic_uint
604 * type as leaf instead of an uint.
606 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
608 static const struct glsl_type
*
609 repair_atomic_type(const struct glsl_type
*type
)
611 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
612 assert(glsl_type_is_scalar(glsl_without_array(type
)));
614 if (glsl_type_is_array(type
)) {
615 const struct glsl_type
*atomic
=
616 repair_atomic_type(glsl_get_array_element(type
));
618 return glsl_array_type(atomic
, glsl_get_length(type
),
619 glsl_get_explicit_stride(type
));
621 return glsl_atomic_uint_type();
626 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
628 if (b
->wa_glslang_179
) {
629 /* Do on-the-fly copy propagation for samplers. */
630 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
631 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
634 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
636 struct vtn_access_chain chain
= {
639 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
646 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
647 struct vtn_ssa_value
*inout
,
648 enum gl_access_qualifier access
)
650 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
652 inout
->def
= nir_load_deref_with_access(&b
->nb
, deref
, access
);
654 nir_store_deref_with_access(&b
->nb
, deref
, inout
->def
, ~0, access
);
656 } else if (glsl_type_is_array(deref
->type
) ||
657 glsl_type_is_matrix(deref
->type
)) {
658 unsigned elems
= glsl_get_length(deref
->type
);
659 for (unsigned i
= 0; i
< elems
; i
++) {
660 nir_deref_instr
*child
=
661 nir_build_deref_array_imm(&b
->nb
, deref
, i
);
662 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
665 vtn_assert(glsl_type_is_struct_or_ifc(deref
->type
));
666 unsigned elems
= glsl_get_length(deref
->type
);
667 for (unsigned i
= 0; i
< elems
; i
++) {
668 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
669 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
675 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
677 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
678 return vtn_pointer_to_deref(b
, ptr
);
682 * Gets the NIR-level deref tail, which may have as a child an array deref
683 * selecting which component due to OpAccessChain supporting per-component
684 * indexing in SPIR-V.
686 static nir_deref_instr
*
687 get_deref_tail(nir_deref_instr
*deref
)
689 if (deref
->deref_type
!= nir_deref_type_array
)
692 nir_deref_instr
*parent
=
693 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
695 if (glsl_type_is_vector(parent
->type
))
701 struct vtn_ssa_value
*
702 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
,
703 enum gl_access_qualifier access
)
705 nir_deref_instr
*src_tail
= get_deref_tail(src
);
706 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
707 _vtn_local_load_store(b
, true, src_tail
, val
, access
);
709 if (src_tail
!= src
) {
710 val
->type
= src
->type
;
711 if (nir_src_is_const(src
->arr
.index
))
712 val
->def
= vtn_vector_extract(b
, val
->def
,
713 nir_src_as_uint(src
->arr
.index
));
715 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
, src
->arr
.index
.ssa
);
722 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
723 nir_deref_instr
*dest
, enum gl_access_qualifier access
)
725 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
727 if (dest_tail
!= dest
) {
728 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
729 _vtn_local_load_store(b
, true, dest_tail
, val
, access
);
731 if (nir_src_is_const(dest
->arr
.index
))
732 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
733 nir_src_as_uint(dest
->arr
.index
));
735 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
736 dest
->arr
.index
.ssa
);
737 _vtn_local_load_store(b
, false, dest_tail
, val
, access
);
739 _vtn_local_load_store(b
, false, dest_tail
, src
, access
);
744 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
745 nir_ssa_def
**index_out
)
747 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
749 struct vtn_access_chain chain
= {
752 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
754 *index_out
= ptr
->block_index
;
758 /* Tries to compute the size of an interface block based on the strides and
759 * offsets that are provided to us in the SPIR-V source.
762 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
764 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
768 case GLSL_TYPE_UINT16
:
769 case GLSL_TYPE_INT16
:
770 case GLSL_TYPE_UINT8
:
772 case GLSL_TYPE_UINT64
:
773 case GLSL_TYPE_INT64
:
774 case GLSL_TYPE_FLOAT
:
775 case GLSL_TYPE_FLOAT16
:
777 case GLSL_TYPE_DOUBLE
: {
778 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
779 glsl_get_matrix_columns(type
->type
);
781 vtn_assert(type
->stride
> 0);
782 return type
->stride
* cols
;
784 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
785 return glsl_get_vector_elements(type
->type
) * type_size
;
789 case GLSL_TYPE_STRUCT
:
790 case GLSL_TYPE_INTERFACE
: {
792 unsigned num_fields
= glsl_get_length(type
->type
);
793 for (unsigned f
= 0; f
< num_fields
; f
++) {
794 unsigned field_end
= type
->offsets
[f
] +
795 vtn_type_block_size(b
, type
->members
[f
]);
796 size
= MAX2(size
, field_end
);
801 case GLSL_TYPE_ARRAY
:
802 vtn_assert(type
->stride
> 0);
803 vtn_assert(glsl_get_length(type
->type
) > 0);
804 return type
->stride
* glsl_get_length(type
->type
);
807 vtn_fail("Invalid block type");
813 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
814 nir_ssa_def
*index
, nir_ssa_def
*offset
,
815 unsigned access_offset
, unsigned access_size
,
816 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
817 enum gl_access_qualifier access
)
819 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
820 instr
->num_components
= glsl_get_vector_elements(type
);
822 /* Booleans usually shouldn't show up in external memory in SPIR-V.
823 * However, they do for certain older GLSLang versions and can for shared
824 * memory when we lower access chains internally.
826 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
827 glsl_get_bit_size(type
);
831 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
832 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
835 if (op
== nir_intrinsic_load_push_constant
) {
836 nir_intrinsic_set_base(instr
, access_offset
);
837 nir_intrinsic_set_range(instr
, access_size
);
840 if (op
== nir_intrinsic_load_ubo
||
841 op
== nir_intrinsic_load_ssbo
||
842 op
== nir_intrinsic_store_ssbo
) {
843 nir_intrinsic_set_access(instr
, access
);
846 /* With extensions like relaxed_block_layout, we really can't guarantee
847 * much more than scalar alignment.
849 if (op
!= nir_intrinsic_load_push_constant
)
850 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
853 instr
->src
[src
++] = nir_src_for_ssa(index
);
855 if (op
== nir_intrinsic_load_push_constant
) {
856 /* We need to subtract the offset from where the intrinsic will load the
859 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
860 nir_imm_int(&b
->nb
, access_offset
)));
862 instr
->src
[src
++] = nir_src_for_ssa(offset
);
866 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
867 instr
->num_components
, data_bit_size
, NULL
);
868 (*inout
)->def
= &instr
->dest
.ssa
;
871 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
873 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
874 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
878 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
879 nir_ssa_def
*index
, nir_ssa_def
*offset
,
880 unsigned access_offset
, unsigned access_size
,
881 struct vtn_type
*type
, enum gl_access_qualifier access
,
882 struct vtn_ssa_value
**inout
)
884 if (load
&& *inout
== NULL
)
885 *inout
= vtn_create_ssa_value(b
, type
->type
);
887 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
891 case GLSL_TYPE_UINT16
:
892 case GLSL_TYPE_INT16
:
893 case GLSL_TYPE_UINT8
:
895 case GLSL_TYPE_UINT64
:
896 case GLSL_TYPE_INT64
:
897 case GLSL_TYPE_FLOAT
:
898 case GLSL_TYPE_FLOAT16
:
899 case GLSL_TYPE_DOUBLE
:
901 /* This is where things get interesting. At this point, we've hit
902 * a vector, a scalar, or a matrix.
904 if (glsl_type_is_matrix(type
->type
)) {
905 /* Loading the whole matrix */
906 struct vtn_ssa_value
*transpose
;
907 unsigned num_ops
, vec_width
, col_stride
;
908 if (type
->row_major
) {
909 num_ops
= glsl_get_vector_elements(type
->type
);
910 vec_width
= glsl_get_matrix_columns(type
->type
);
911 col_stride
= type
->array_element
->stride
;
913 const struct glsl_type
*transpose_type
=
914 glsl_matrix_type(base_type
, vec_width
, num_ops
);
915 *inout
= vtn_create_ssa_value(b
, transpose_type
);
917 transpose
= vtn_ssa_transpose(b
, *inout
);
921 num_ops
= glsl_get_matrix_columns(type
->type
);
922 vec_width
= glsl_get_vector_elements(type
->type
);
923 col_stride
= type
->stride
;
926 for (unsigned i
= 0; i
< num_ops
; i
++) {
927 nir_ssa_def
*elem_offset
=
928 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
929 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
930 access_offset
, access_size
,
932 glsl_vector_type(base_type
, vec_width
),
933 type
->access
| access
);
936 if (load
&& type
->row_major
)
937 *inout
= vtn_ssa_transpose(b
, *inout
);
939 unsigned elems
= glsl_get_vector_elements(type
->type
);
940 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
941 if (elems
== 1 || type
->stride
== type_size
) {
942 /* This is a tightly-packed normal scalar or vector load */
943 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
944 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
945 access_offset
, access_size
,
947 type
->access
| access
);
949 /* This is a strided load. We have to load N things separately.
950 * This is the single column of a row-major matrix case.
952 vtn_assert(type
->stride
> type_size
);
953 vtn_assert(type
->stride
% type_size
== 0);
955 nir_ssa_def
*per_comp
[4];
956 for (unsigned i
= 0; i
< elems
; i
++) {
957 nir_ssa_def
*elem_offset
=
958 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
959 struct vtn_ssa_value
*comp
, temp_val
;
961 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
962 temp_val
.type
= glsl_scalar_type(base_type
);
965 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
966 access_offset
, access_size
,
967 &comp
, glsl_scalar_type(base_type
),
968 type
->access
| access
);
969 per_comp
[i
] = comp
->def
;
974 *inout
= vtn_create_ssa_value(b
, type
->type
);
975 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
981 case GLSL_TYPE_ARRAY
: {
982 unsigned elems
= glsl_get_length(type
->type
);
983 for (unsigned i
= 0; i
< elems
; i
++) {
984 nir_ssa_def
*elem_off
=
985 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
986 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
987 access_offset
, access_size
,
989 type
->array_element
->access
| access
,
990 &(*inout
)->elems
[i
]);
995 case GLSL_TYPE_INTERFACE
:
996 case GLSL_TYPE_STRUCT
: {
997 unsigned elems
= glsl_get_length(type
->type
);
998 for (unsigned i
= 0; i
< elems
; i
++) {
999 nir_ssa_def
*elem_off
=
1000 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
1001 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
1002 access_offset
, access_size
,
1004 type
->members
[i
]->access
| access
,
1005 &(*inout
)->elems
[i
]);
1011 vtn_fail("Invalid block member type");
1015 static struct vtn_ssa_value
*
1016 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1018 nir_intrinsic_op op
;
1019 unsigned access_offset
= 0, access_size
= 0;
1020 switch (src
->mode
) {
1021 case vtn_variable_mode_ubo
:
1022 op
= nir_intrinsic_load_ubo
;
1024 case vtn_variable_mode_ssbo
:
1025 op
= nir_intrinsic_load_ssbo
;
1027 case vtn_variable_mode_push_constant
:
1028 op
= nir_intrinsic_load_push_constant
;
1029 access_size
= b
->shader
->num_uniforms
;
1031 case vtn_variable_mode_workgroup
:
1032 op
= nir_intrinsic_load_shared
;
1035 vtn_fail("Invalid block variable mode");
1038 nir_ssa_def
*offset
, *index
= NULL
;
1039 offset
= vtn_pointer_to_offset(b
, src
, &index
);
1041 struct vtn_ssa_value
*value
= NULL
;
1042 _vtn_block_load_store(b
, op
, true, index
, offset
,
1043 access_offset
, access_size
,
1044 src
->type
, src
->access
, &value
);
1049 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1050 struct vtn_pointer
*dst
)
1052 nir_intrinsic_op op
;
1053 switch (dst
->mode
) {
1054 case vtn_variable_mode_ssbo
:
1055 op
= nir_intrinsic_store_ssbo
;
1057 case vtn_variable_mode_workgroup
:
1058 op
= nir_intrinsic_store_shared
;
1061 vtn_fail("Invalid block variable mode");
1064 nir_ssa_def
*offset
, *index
= NULL
;
1065 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1067 _vtn_block_load_store(b
, op
, false, index
, offset
,
1068 0, 0, dst
->type
, dst
->access
, &src
);
1072 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1073 struct vtn_pointer
*ptr
,
1074 enum gl_access_qualifier access
,
1075 struct vtn_ssa_value
**inout
)
1077 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1078 switch (base_type
) {
1079 case GLSL_TYPE_UINT
:
1081 case GLSL_TYPE_UINT16
:
1082 case GLSL_TYPE_INT16
:
1083 case GLSL_TYPE_UINT8
:
1084 case GLSL_TYPE_INT8
:
1085 case GLSL_TYPE_UINT64
:
1086 case GLSL_TYPE_INT64
:
1087 case GLSL_TYPE_FLOAT
:
1088 case GLSL_TYPE_FLOAT16
:
1089 case GLSL_TYPE_BOOL
:
1090 case GLSL_TYPE_DOUBLE
:
1091 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1092 /* We hit a vector or scalar; go ahead and emit the load[s] */
1093 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1094 if (vtn_pointer_is_external_block(b
, ptr
)) {
1095 /* If it's external, we call nir_load/store_deref directly. The
1096 * vtn_local_load/store helpers are too clever and do magic to
1097 * avoid array derefs of vectors. That magic is both less
1098 * efficient than the direct load/store and, in the case of
1099 * stores, is broken because it creates a race condition if two
1100 * threads are writing to different components of the same vector
1101 * due to the load+insert+store it uses to emulate the array
1105 *inout
= vtn_create_ssa_value(b
, ptr
->type
->type
);
1106 (*inout
)->def
= nir_load_deref_with_access(&b
->nb
, deref
,
1107 ptr
->type
->access
| access
);
1109 nir_store_deref_with_access(&b
->nb
, deref
, (*inout
)->def
, ~0,
1110 ptr
->type
->access
| access
);
1114 *inout
= vtn_local_load(b
, deref
, ptr
->type
->access
| access
);
1116 vtn_local_store(b
, *inout
, deref
, ptr
->type
->access
| access
);
1123 case GLSL_TYPE_INTERFACE
:
1124 case GLSL_TYPE_ARRAY
:
1125 case GLSL_TYPE_STRUCT
: {
1126 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1128 vtn_assert(*inout
== NULL
);
1129 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1130 (*inout
)->type
= ptr
->type
->type
;
1131 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1134 struct vtn_access_chain chain
= {
1137 { .mode
= vtn_access_mode_literal
, },
1140 for (unsigned i
= 0; i
< elems
; i
++) {
1141 chain
.link
[0].id
= i
;
1142 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1143 _vtn_variable_load_store(b
, load
, elem
, ptr
->type
->access
| access
,
1144 &(*inout
)->elems
[i
]);
1150 vtn_fail("Invalid access chain type");
1154 struct vtn_ssa_value
*
1155 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1157 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1158 return vtn_block_load(b
, src
);
1160 struct vtn_ssa_value
*val
= NULL
;
1161 _vtn_variable_load_store(b
, true, src
, src
->access
, &val
);
1167 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1168 struct vtn_pointer
*dest
)
1170 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1171 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1172 dest
->mode
== vtn_variable_mode_workgroup
);
1173 vtn_block_store(b
, src
, dest
);
1175 _vtn_variable_load_store(b
, false, dest
, dest
->access
, &src
);
1180 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1181 struct vtn_pointer
*src
)
1183 vtn_assert(src
->type
->type
== dest
->type
->type
);
1184 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1185 switch (base_type
) {
1186 case GLSL_TYPE_UINT
:
1188 case GLSL_TYPE_UINT16
:
1189 case GLSL_TYPE_INT16
:
1190 case GLSL_TYPE_UINT8
:
1191 case GLSL_TYPE_INT8
:
1192 case GLSL_TYPE_UINT64
:
1193 case GLSL_TYPE_INT64
:
1194 case GLSL_TYPE_FLOAT
:
1195 case GLSL_TYPE_FLOAT16
:
1196 case GLSL_TYPE_DOUBLE
:
1197 case GLSL_TYPE_BOOL
:
1198 /* At this point, we have a scalar, vector, or matrix so we know that
1199 * there cannot be any structure splitting still in the way. By
1200 * stopping at the matrix level rather than the vector level, we
1201 * ensure that matrices get loaded in the optimal way even if they
1202 * are storred row-major in a UBO.
1204 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1207 case GLSL_TYPE_INTERFACE
:
1208 case GLSL_TYPE_ARRAY
:
1209 case GLSL_TYPE_STRUCT
: {
1210 struct vtn_access_chain chain
= {
1213 { .mode
= vtn_access_mode_literal
, },
1216 unsigned elems
= glsl_get_length(src
->type
->type
);
1217 for (unsigned i
= 0; i
< elems
; i
++) {
1218 chain
.link
[0].id
= i
;
1219 struct vtn_pointer
*src_elem
=
1220 vtn_pointer_dereference(b
, src
, &chain
);
1221 struct vtn_pointer
*dest_elem
=
1222 vtn_pointer_dereference(b
, dest
, &chain
);
1224 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1230 vtn_fail("Invalid access chain type");
1235 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1236 struct vtn_pointer
*src
)
1238 /* TODO: At some point, we should add a special-case for when we can
1239 * just emit a copy_var intrinsic.
1241 _vtn_variable_copy(b
, dest
, src
);
1245 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1247 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1248 *mode
= nir_var_system_value
;
1252 vtn_get_builtin_location(struct vtn_builder
*b
,
1253 SpvBuiltIn builtin
, int *location
,
1254 nir_variable_mode
*mode
)
1257 case SpvBuiltInPosition
:
1258 *location
= VARYING_SLOT_POS
;
1260 case SpvBuiltInPointSize
:
1261 *location
= VARYING_SLOT_PSIZ
;
1263 case SpvBuiltInClipDistance
:
1264 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1266 case SpvBuiltInCullDistance
:
1267 *location
= VARYING_SLOT_CULL_DIST0
;
1269 case SpvBuiltInVertexId
:
1270 case SpvBuiltInVertexIndex
:
1271 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1272 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1273 * same as gl_VertexID, which is non-zero-based, and removes
1274 * VertexIndex. Since they're both defined to be non-zero-based, we use
1275 * SYSTEM_VALUE_VERTEX_ID for both.
1277 *location
= SYSTEM_VALUE_VERTEX_ID
;
1278 set_mode_system_value(b
, mode
);
1280 case SpvBuiltInInstanceIndex
:
1281 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1282 set_mode_system_value(b
, mode
);
1284 case SpvBuiltInInstanceId
:
1285 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1286 set_mode_system_value(b
, mode
);
1288 case SpvBuiltInPrimitiveId
:
1289 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1290 vtn_assert(*mode
== nir_var_shader_in
);
1291 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1292 } else if (*mode
== nir_var_shader_out
) {
1293 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1295 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1296 set_mode_system_value(b
, mode
);
1299 case SpvBuiltInInvocationId
:
1300 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1301 set_mode_system_value(b
, mode
);
1303 case SpvBuiltInLayer
:
1304 *location
= VARYING_SLOT_LAYER
;
1305 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1306 *mode
= nir_var_shader_in
;
1307 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1308 *mode
= nir_var_shader_out
;
1309 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1310 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1311 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1312 *mode
= nir_var_shader_out
;
1314 vtn_fail("invalid stage for SpvBuiltInLayer");
1316 case SpvBuiltInViewportIndex
:
1317 *location
= VARYING_SLOT_VIEWPORT
;
1318 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1319 *mode
= nir_var_shader_out
;
1320 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1321 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1322 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1323 *mode
= nir_var_shader_out
;
1324 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1325 *mode
= nir_var_shader_in
;
1327 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1329 case SpvBuiltInTessLevelOuter
:
1330 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1332 case SpvBuiltInTessLevelInner
:
1333 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1335 case SpvBuiltInTessCoord
:
1336 *location
= SYSTEM_VALUE_TESS_COORD
;
1337 set_mode_system_value(b
, mode
);
1339 case SpvBuiltInPatchVertices
:
1340 *location
= SYSTEM_VALUE_VERTICES_IN
;
1341 set_mode_system_value(b
, mode
);
1343 case SpvBuiltInFragCoord
:
1344 vtn_assert(*mode
== nir_var_shader_in
);
1345 if (b
->options
&& b
->options
->frag_coord_is_sysval
) {
1346 *mode
= nir_var_system_value
;
1347 *location
= SYSTEM_VALUE_FRAG_COORD
;
1349 *location
= VARYING_SLOT_POS
;
1352 case SpvBuiltInPointCoord
:
1353 *location
= VARYING_SLOT_PNTC
;
1354 vtn_assert(*mode
== nir_var_shader_in
);
1356 case SpvBuiltInFrontFacing
:
1357 *location
= SYSTEM_VALUE_FRONT_FACE
;
1358 set_mode_system_value(b
, mode
);
1360 case SpvBuiltInSampleId
:
1361 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1362 set_mode_system_value(b
, mode
);
1364 case SpvBuiltInSamplePosition
:
1365 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1366 set_mode_system_value(b
, mode
);
1368 case SpvBuiltInSampleMask
:
1369 if (*mode
== nir_var_shader_out
) {
1370 *location
= FRAG_RESULT_SAMPLE_MASK
;
1372 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1373 set_mode_system_value(b
, mode
);
1376 case SpvBuiltInFragDepth
:
1377 *location
= FRAG_RESULT_DEPTH
;
1378 vtn_assert(*mode
== nir_var_shader_out
);
1380 case SpvBuiltInHelperInvocation
:
1381 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1382 set_mode_system_value(b
, mode
);
1384 case SpvBuiltInNumWorkgroups
:
1385 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1386 set_mode_system_value(b
, mode
);
1388 case SpvBuiltInWorkgroupSize
:
1389 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1390 set_mode_system_value(b
, mode
);
1392 case SpvBuiltInWorkgroupId
:
1393 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1394 set_mode_system_value(b
, mode
);
1396 case SpvBuiltInLocalInvocationId
:
1397 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1398 set_mode_system_value(b
, mode
);
1400 case SpvBuiltInLocalInvocationIndex
:
1401 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1402 set_mode_system_value(b
, mode
);
1404 case SpvBuiltInGlobalInvocationId
:
1405 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1406 set_mode_system_value(b
, mode
);
1408 case SpvBuiltInGlobalLinearId
:
1409 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
;
1410 set_mode_system_value(b
, mode
);
1412 case SpvBuiltInBaseVertex
:
1413 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1414 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1416 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1417 set_mode_system_value(b
, mode
);
1419 case SpvBuiltInBaseInstance
:
1420 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1421 set_mode_system_value(b
, mode
);
1423 case SpvBuiltInDrawIndex
:
1424 *location
= SYSTEM_VALUE_DRAW_ID
;
1425 set_mode_system_value(b
, mode
);
1427 case SpvBuiltInSubgroupSize
:
1428 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1429 set_mode_system_value(b
, mode
);
1431 case SpvBuiltInSubgroupId
:
1432 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1433 set_mode_system_value(b
, mode
);
1435 case SpvBuiltInSubgroupLocalInvocationId
:
1436 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1437 set_mode_system_value(b
, mode
);
1439 case SpvBuiltInNumSubgroups
:
1440 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1441 set_mode_system_value(b
, mode
);
1443 case SpvBuiltInDeviceIndex
:
1444 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1445 set_mode_system_value(b
, mode
);
1447 case SpvBuiltInViewIndex
:
1448 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1449 set_mode_system_value(b
, mode
);
1451 case SpvBuiltInSubgroupEqMask
:
1452 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1453 set_mode_system_value(b
, mode
);
1455 case SpvBuiltInSubgroupGeMask
:
1456 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1457 set_mode_system_value(b
, mode
);
1459 case SpvBuiltInSubgroupGtMask
:
1460 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1461 set_mode_system_value(b
, mode
);
1463 case SpvBuiltInSubgroupLeMask
:
1464 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1465 set_mode_system_value(b
, mode
);
1467 case SpvBuiltInSubgroupLtMask
:
1468 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1469 set_mode_system_value(b
, mode
);
1471 case SpvBuiltInFragStencilRefEXT
:
1472 *location
= FRAG_RESULT_STENCIL
;
1473 vtn_assert(*mode
== nir_var_shader_out
);
1475 case SpvBuiltInWorkDim
:
1476 *location
= SYSTEM_VALUE_WORK_DIM
;
1477 set_mode_system_value(b
, mode
);
1479 case SpvBuiltInGlobalSize
:
1480 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1481 set_mode_system_value(b
, mode
);
1484 vtn_fail("Unsupported builtin: %s (%u)",
1485 spirv_builtin_to_string(builtin
), builtin
);
1490 apply_var_decoration(struct vtn_builder
*b
,
1491 struct nir_variable_data
*var_data
,
1492 const struct vtn_decoration
*dec
)
1494 switch (dec
->decoration
) {
1495 case SpvDecorationRelaxedPrecision
:
1496 break; /* FIXME: Do nothing with this for now. */
1497 case SpvDecorationNoPerspective
:
1498 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1500 case SpvDecorationFlat
:
1501 var_data
->interpolation
= INTERP_MODE_FLAT
;
1503 case SpvDecorationCentroid
:
1504 var_data
->centroid
= true;
1506 case SpvDecorationSample
:
1507 var_data
->sample
= true;
1509 case SpvDecorationInvariant
:
1510 var_data
->invariant
= true;
1512 case SpvDecorationConstant
:
1513 var_data
->read_only
= true;
1515 case SpvDecorationNonReadable
:
1516 var_data
->image
.access
|= ACCESS_NON_READABLE
;
1518 case SpvDecorationNonWritable
:
1519 var_data
->read_only
= true;
1520 var_data
->image
.access
|= ACCESS_NON_WRITEABLE
;
1522 case SpvDecorationRestrict
:
1523 var_data
->image
.access
|= ACCESS_RESTRICT
;
1525 case SpvDecorationVolatile
:
1526 var_data
->image
.access
|= ACCESS_VOLATILE
;
1528 case SpvDecorationCoherent
:
1529 var_data
->image
.access
|= ACCESS_COHERENT
;
1531 case SpvDecorationComponent
:
1532 var_data
->location_frac
= dec
->operands
[0];
1534 case SpvDecorationIndex
:
1535 var_data
->index
= dec
->operands
[0];
1537 case SpvDecorationBuiltIn
: {
1538 SpvBuiltIn builtin
= dec
->operands
[0];
1540 nir_variable_mode mode
= var_data
->mode
;
1541 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1542 var_data
->mode
= mode
;
1545 case SpvBuiltInTessLevelOuter
:
1546 case SpvBuiltInTessLevelInner
:
1547 case SpvBuiltInClipDistance
:
1548 case SpvBuiltInCullDistance
:
1549 var_data
->compact
= true;
1556 case SpvDecorationSpecId
:
1557 case SpvDecorationRowMajor
:
1558 case SpvDecorationColMajor
:
1559 case SpvDecorationMatrixStride
:
1560 case SpvDecorationAliased
:
1561 case SpvDecorationUniform
:
1562 case SpvDecorationUniformId
:
1563 case SpvDecorationLinkageAttributes
:
1564 break; /* Do nothing with these here */
1566 case SpvDecorationPatch
:
1567 var_data
->patch
= true;
1570 case SpvDecorationLocation
:
1571 vtn_fail("Handled above");
1573 case SpvDecorationBlock
:
1574 case SpvDecorationBufferBlock
:
1575 case SpvDecorationArrayStride
:
1576 case SpvDecorationGLSLShared
:
1577 case SpvDecorationGLSLPacked
:
1578 break; /* These can apply to a type but we don't care about them */
1580 case SpvDecorationBinding
:
1581 case SpvDecorationDescriptorSet
:
1582 case SpvDecorationNoContraction
:
1583 case SpvDecorationInputAttachmentIndex
:
1584 vtn_warn("Decoration not allowed for variable or structure member: %s",
1585 spirv_decoration_to_string(dec
->decoration
));
1588 case SpvDecorationXfbBuffer
:
1589 var_data
->explicit_xfb_buffer
= true;
1590 var_data
->xfb_buffer
= dec
->operands
[0];
1591 var_data
->always_active_io
= true;
1593 case SpvDecorationXfbStride
:
1594 var_data
->explicit_xfb_stride
= true;
1595 var_data
->xfb_stride
= dec
->operands
[0];
1597 case SpvDecorationOffset
:
1598 var_data
->explicit_offset
= true;
1599 var_data
->offset
= dec
->operands
[0];
1602 case SpvDecorationStream
:
1603 var_data
->stream
= dec
->operands
[0];
1606 case SpvDecorationCPacked
:
1607 case SpvDecorationSaturatedConversion
:
1608 case SpvDecorationFuncParamAttr
:
1609 case SpvDecorationFPRoundingMode
:
1610 case SpvDecorationFPFastMathMode
:
1611 case SpvDecorationAlignment
:
1612 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1613 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1614 spirv_decoration_to_string(dec
->decoration
));
1618 case SpvDecorationUserSemantic
:
1619 /* User semantic decorations can safely be ignored by the driver. */
1622 case SpvDecorationRestrictPointerEXT
:
1623 case SpvDecorationAliasedPointerEXT
:
1624 /* TODO: We should actually plumb alias information through NIR. */
1628 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1633 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1634 const struct vtn_decoration
*dec
, void *out_is_patch
)
1636 if (dec
->decoration
== SpvDecorationPatch
) {
1637 *((bool *) out_is_patch
) = true;
1642 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1643 const struct vtn_decoration
*dec
, void *void_var
)
1645 struct vtn_variable
*vtn_var
= void_var
;
1647 /* Handle decorations that apply to a vtn_variable as a whole */
1648 switch (dec
->decoration
) {
1649 case SpvDecorationBinding
:
1650 vtn_var
->binding
= dec
->operands
[0];
1651 vtn_var
->explicit_binding
= true;
1653 case SpvDecorationDescriptorSet
:
1654 vtn_var
->descriptor_set
= dec
->operands
[0];
1656 case SpvDecorationInputAttachmentIndex
:
1657 vtn_var
->input_attachment_index
= dec
->operands
[0];
1659 case SpvDecorationPatch
:
1660 vtn_var
->patch
= true;
1662 case SpvDecorationOffset
:
1663 vtn_var
->offset
= dec
->operands
[0];
1665 case SpvDecorationNonWritable
:
1666 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1668 case SpvDecorationNonReadable
:
1669 vtn_var
->access
|= ACCESS_NON_READABLE
;
1671 case SpvDecorationVolatile
:
1672 vtn_var
->access
|= ACCESS_VOLATILE
;
1674 case SpvDecorationCoherent
:
1675 vtn_var
->access
|= ACCESS_COHERENT
;
1677 case SpvDecorationCounterBuffer
:
1678 /* Counter buffer decorations can safely be ignored by the driver. */
1684 if (val
->value_type
== vtn_value_type_pointer
) {
1685 assert(val
->pointer
->var
== void_var
);
1686 assert(member
== -1);
1688 assert(val
->value_type
== vtn_value_type_type
);
1691 /* Location is odd. If applied to a split structure, we have to walk the
1692 * whole thing and accumulate the location. It's easier to handle as a
1695 if (dec
->decoration
== SpvDecorationLocation
) {
1696 unsigned location
= dec
->operands
[0];
1697 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1698 vtn_var
->mode
== vtn_variable_mode_output
) {
1699 location
+= FRAG_RESULT_DATA0
;
1700 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1701 vtn_var
->mode
== vtn_variable_mode_input
) {
1702 location
+= VERT_ATTRIB_GENERIC0
;
1703 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1704 vtn_var
->mode
== vtn_variable_mode_output
) {
1705 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1706 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1707 vtn_warn("Location must be on input, output, uniform, sampler or "
1712 if (vtn_var
->var
->num_members
== 0) {
1713 /* This handles the member and lone variable cases */
1714 vtn_var
->var
->data
.location
= location
;
1716 /* This handles the structure member case */
1717 assert(vtn_var
->var
->members
);
1720 vtn_var
->base_location
= location
;
1722 vtn_var
->var
->members
[member
].location
= location
;
1728 if (vtn_var
->var
->num_members
== 0) {
1729 /* We call this function on types as well as variables and not all
1730 * struct types get split so we can end up having stray member
1731 * decorations; just ignore them.
1734 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1735 } else if (member
>= 0) {
1736 /* Member decorations must come from a type */
1737 assert(val
->value_type
== vtn_value_type_type
);
1738 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1741 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1742 for (unsigned i
= 0; i
< length
; i
++)
1743 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1746 /* A few variables, those with external storage, have no actual
1747 * nir_variables associated with them. Fortunately, all decorations
1748 * we care about for those variables are on the type only.
1750 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1751 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1752 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1753 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1754 b
->options
->lower_workgroup_access_to_offsets
));
1760 ptr_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1761 const struct vtn_decoration
*dec
, void *void_ptr
)
1763 struct vtn_pointer
*ptr
= void_ptr
;
1765 switch (dec
->decoration
) {
1766 case SpvDecorationNonUniformEXT
:
1767 ptr
->access
|= ACCESS_NON_UNIFORM
;
1775 enum vtn_variable_mode
1776 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1777 SpvStorageClass
class,
1778 struct vtn_type
*interface_type
,
1779 nir_variable_mode
*nir_mode_out
)
1781 enum vtn_variable_mode mode
;
1782 nir_variable_mode nir_mode
;
1784 case SpvStorageClassUniform
:
1785 /* Assume it's an UBO if we lack the interface_type. */
1786 if (!interface_type
|| interface_type
->block
) {
1787 mode
= vtn_variable_mode_ubo
;
1788 nir_mode
= nir_var_mem_ubo
;
1789 } else if (interface_type
->buffer_block
) {
1790 mode
= vtn_variable_mode_ssbo
;
1791 nir_mode
= nir_var_mem_ssbo
;
1793 /* Default-block uniforms, coming from gl_spirv */
1794 mode
= vtn_variable_mode_uniform
;
1795 nir_mode
= nir_var_uniform
;
1798 case SpvStorageClassStorageBuffer
:
1799 mode
= vtn_variable_mode_ssbo
;
1800 nir_mode
= nir_var_mem_ssbo
;
1802 case SpvStorageClassPhysicalStorageBufferEXT
:
1803 mode
= vtn_variable_mode_phys_ssbo
;
1804 nir_mode
= nir_var_mem_global
;
1806 case SpvStorageClassUniformConstant
:
1807 mode
= vtn_variable_mode_uniform
;
1808 nir_mode
= nir_var_uniform
;
1810 case SpvStorageClassPushConstant
:
1811 mode
= vtn_variable_mode_push_constant
;
1812 nir_mode
= nir_var_uniform
;
1814 case SpvStorageClassInput
:
1815 mode
= vtn_variable_mode_input
;
1816 nir_mode
= nir_var_shader_in
;
1818 case SpvStorageClassOutput
:
1819 mode
= vtn_variable_mode_output
;
1820 nir_mode
= nir_var_shader_out
;
1822 case SpvStorageClassPrivate
:
1823 mode
= vtn_variable_mode_private
;
1824 nir_mode
= nir_var_shader_temp
;
1826 case SpvStorageClassFunction
:
1827 mode
= vtn_variable_mode_function
;
1828 nir_mode
= nir_var_function_temp
;
1830 case SpvStorageClassWorkgroup
:
1831 mode
= vtn_variable_mode_workgroup
;
1832 nir_mode
= nir_var_mem_shared
;
1834 case SpvStorageClassAtomicCounter
:
1835 mode
= vtn_variable_mode_uniform
;
1836 nir_mode
= nir_var_uniform
;
1838 case SpvStorageClassCrossWorkgroup
:
1839 mode
= vtn_variable_mode_cross_workgroup
;
1840 nir_mode
= nir_var_mem_global
;
1842 case SpvStorageClassImage
:
1843 mode
= vtn_variable_mode_image
;
1844 nir_mode
= nir_var_mem_ubo
;
1846 case SpvStorageClassGeneric
:
1848 vtn_fail("Unhandled variable storage class: %s (%u)",
1849 spirv_storageclass_to_string(class), class);
1853 *nir_mode_out
= nir_mode
;
1859 vtn_mode_to_address_format(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
1862 case vtn_variable_mode_ubo
:
1863 return b
->options
->ubo_addr_format
;
1865 case vtn_variable_mode_ssbo
:
1866 return b
->options
->ssbo_addr_format
;
1868 case vtn_variable_mode_phys_ssbo
:
1869 return b
->options
->phys_ssbo_addr_format
;
1871 case vtn_variable_mode_push_constant
:
1872 return b
->options
->push_const_addr_format
;
1874 case vtn_variable_mode_workgroup
:
1875 return b
->options
->shared_addr_format
;
1877 case vtn_variable_mode_cross_workgroup
:
1878 return b
->options
->global_addr_format
;
1880 case vtn_variable_mode_function
:
1881 if (b
->physical_ptrs
)
1882 return b
->options
->temp_addr_format
;
1885 case vtn_variable_mode_private
:
1886 case vtn_variable_mode_uniform
:
1887 case vtn_variable_mode_input
:
1888 case vtn_variable_mode_output
:
1889 case vtn_variable_mode_image
:
1890 return nir_address_format_logical
;
1893 unreachable("Invalid variable mode");
1897 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1899 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1900 /* This pointer needs to have a pointer type with actual storage */
1901 vtn_assert(ptr
->ptr_type
);
1902 vtn_assert(ptr
->ptr_type
->type
);
1905 /* If we don't have an offset then we must be a pointer to the variable
1908 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1910 struct vtn_access_chain chain
= {
1913 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1916 vtn_assert(ptr
->offset
);
1917 if (ptr
->block_index
) {
1918 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1919 ptr
->mode
== vtn_variable_mode_ssbo
);
1920 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1922 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1926 if (vtn_pointer_is_external_block(b
, ptr
) &&
1927 vtn_type_contains_block(b
, ptr
->type
) &&
1928 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1929 /* In this case, we're looking for a block index and not an actual
1932 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1933 * at all because we get the pointer directly from the client. This
1934 * assumes that there will never be a SSBO binding variable using the
1935 * PhysicalStorageBufferEXT storage class. This assumption appears
1936 * to be correct according to the Vulkan spec because the table,
1937 * "Shader Resource and Storage Class Correspondence," the only the
1938 * Uniform storage class with BufferBlock or the StorageBuffer
1939 * storage class with Block can be used.
1941 if (!ptr
->block_index
) {
1942 /* If we don't have a block_index then we must be a pointer to the
1945 vtn_assert(!ptr
->deref
);
1947 struct vtn_access_chain chain
= {
1950 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
1953 return ptr
->block_index
;
1955 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1960 struct vtn_pointer
*
1961 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1962 struct vtn_type
*ptr_type
)
1964 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1966 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1967 struct vtn_type
*without_array
=
1968 vtn_type_without_array(ptr_type
->deref
);
1970 nir_variable_mode nir_mode
;
1971 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1972 without_array
, &nir_mode
);
1973 ptr
->type
= ptr_type
->deref
;
1974 ptr
->ptr_type
= ptr_type
;
1976 if (b
->wa_glslang_179
) {
1977 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1978 * need to whack the mode because it creates a function parameter with
1979 * the Function storage class even though it's a pointer to a sampler.
1980 * If we don't do this, then NIR won't get rid of the deref_cast for us.
1982 if (ptr
->mode
== vtn_variable_mode_function
&&
1983 (ptr
->type
->base_type
== vtn_base_type_sampler
||
1984 ptr
->type
->base_type
== vtn_base_type_sampled_image
)) {
1985 ptr
->mode
= vtn_variable_mode_uniform
;
1986 nir_mode
= nir_var_uniform
;
1990 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1991 /* This pointer type needs to have actual storage */
1992 vtn_assert(ptr_type
->type
);
1993 if (ptr
->mode
== vtn_variable_mode_ubo
||
1994 ptr
->mode
== vtn_variable_mode_ssbo
) {
1995 vtn_assert(ssa
->num_components
== 2);
1996 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1997 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1999 vtn_assert(ssa
->num_components
== 1);
2000 ptr
->block_index
= NULL
;
2004 const struct glsl_type
*deref_type
= ptr_type
->deref
->type
;
2005 if (!vtn_pointer_is_external_block(b
, ptr
)) {
2006 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2007 deref_type
, ptr_type
->stride
);
2008 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
2009 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
2010 /* This is a pointer to somewhere in an array of blocks, not a
2011 * pointer to somewhere inside the block. Set the block index
2012 * instead of making a cast.
2014 ptr
->block_index
= ssa
;
2016 /* This is a pointer to something internal or a pointer inside a
2017 * block. It's just a regular cast.
2019 * For PhysicalStorageBufferEXT pointers, we don't have a block index
2020 * at all because we get the pointer directly from the client. This
2021 * assumes that there will never be a SSBO binding variable using the
2022 * PhysicalStorageBufferEXT storage class. This assumption appears
2023 * to be correct according to the Vulkan spec because the table,
2024 * "Shader Resource and Storage Class Correspondence," the only the
2025 * Uniform storage class with BufferBlock or the StorageBuffer
2026 * storage class with Block can be used.
2028 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2029 ptr_type
->deref
->type
,
2031 ptr
->deref
->dest
.ssa
.num_components
=
2032 glsl_get_vector_elements(ptr_type
->type
);
2033 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
2041 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
2043 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
2046 if (var
->mode
== vtn_variable_mode_input
) {
2047 return stage
== MESA_SHADER_TESS_CTRL
||
2048 stage
== MESA_SHADER_TESS_EVAL
||
2049 stage
== MESA_SHADER_GEOMETRY
;
2052 if (var
->mode
== vtn_variable_mode_output
)
2053 return stage
== MESA_SHADER_TESS_CTRL
;
2059 assign_missing_member_locations(struct vtn_variable
*var
)
2062 glsl_get_length(glsl_without_array(var
->type
->type
));
2063 int location
= var
->base_location
;
2065 for (unsigned i
= 0; i
< length
; i
++) {
2066 /* From the Vulkan spec:
2068 * “If the structure type is a Block but without a Location, then each
2069 * of its members must have a Location decoration.”
2072 if (var
->type
->block
) {
2073 assert(var
->base_location
!= -1 ||
2074 var
->var
->members
[i
].location
!= -1);
2077 /* From the Vulkan spec:
2079 * “Any member with its own Location decoration is assigned that
2080 * location. Each remaining member is assigned the location after the
2081 * immediately preceding member in declaration order.”
2083 if (var
->var
->members
[i
].location
!= -1)
2084 location
= var
->var
->members
[i
].location
;
2086 var
->var
->members
[i
].location
= location
;
2088 /* Below we use type instead of interface_type, because interface_type
2089 * is only available when it is a Block. This code also supports
2090 * input/outputs that are just structs
2092 const struct glsl_type
*member_type
=
2093 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
2096 glsl_count_attribute_slots(member_type
,
2097 false /* is_gl_vertex_input */);
2103 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
2104 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
2105 nir_constant
*initializer
)
2107 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2108 struct vtn_type
*type
= ptr_type
->deref
;
2110 struct vtn_type
*without_array
= vtn_type_without_array(ptr_type
->deref
);
2112 enum vtn_variable_mode mode
;
2113 nir_variable_mode nir_mode
;
2114 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
2117 case vtn_variable_mode_ubo
:
2118 /* There's no other way to get vtn_variable_mode_ubo */
2119 vtn_assert(without_array
->block
);
2120 b
->shader
->info
.num_ubos
++;
2122 case vtn_variable_mode_ssbo
:
2123 if (storage_class
== SpvStorageClassStorageBuffer
&&
2124 !without_array
->block
) {
2125 if (b
->variable_pointers
) {
2126 vtn_fail("Variables in the StorageBuffer storage class must "
2127 "have a struct type with the Block decoration");
2129 /* If variable pointers are not present, it's still malformed
2130 * SPIR-V but we can parse it and do the right thing anyway.
2131 * Since some of the 8-bit storage tests have bugs in this are,
2132 * just make it a warning for now.
2134 vtn_warn("Variables in the StorageBuffer storage class must "
2135 "have a struct type with the Block decoration");
2138 b
->shader
->info
.num_ssbos
++;
2140 case vtn_variable_mode_uniform
:
2141 if (glsl_type_is_image(without_array
->type
))
2142 b
->shader
->info
.num_images
++;
2143 else if (glsl_type_is_sampler(without_array
->type
))
2144 b
->shader
->info
.num_textures
++;
2146 case vtn_variable_mode_push_constant
:
2147 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2150 case vtn_variable_mode_image
:
2151 vtn_fail("Cannot create a variable with the Image storage class");
2154 case vtn_variable_mode_phys_ssbo
:
2155 vtn_fail("Cannot create a variable with the "
2156 "PhysicalStorageBufferEXT storage class");
2160 /* No tallying is needed */
2164 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2167 var
->base_location
= -1;
2169 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
2170 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
2172 switch (var
->mode
) {
2173 case vtn_variable_mode_function
:
2174 case vtn_variable_mode_private
:
2175 case vtn_variable_mode_uniform
:
2176 /* For these, we create the variable normally */
2177 var
->var
= rzalloc(b
->shader
, nir_variable
);
2178 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2180 if (storage_class
== SpvStorageClassAtomicCounter
) {
2181 /* Need to tweak the nir type here as at vtn_handle_type we don't
2182 * have the access to storage_class, that is the one that points us
2183 * that is an atomic uint.
2185 var
->var
->type
= repair_atomic_type(var
->type
->type
);
2187 /* Private variables don't have any explicit layout but some layouts
2188 * may have leaked through due to type deduplication in the SPIR-V.
2190 var
->var
->type
= var
->type
->type
;
2192 var
->var
->data
.mode
= nir_mode
;
2193 var
->var
->data
.location
= -1;
2194 var
->var
->interface_type
= NULL
;
2197 case vtn_variable_mode_ubo
:
2198 case vtn_variable_mode_ssbo
:
2199 var
->var
= rzalloc(b
->shader
, nir_variable
);
2200 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2202 var
->var
->type
= var
->type
->type
;
2203 var
->var
->interface_type
= var
->type
->type
;
2205 var
->var
->data
.mode
= nir_mode
;
2206 var
->var
->data
.location
= -1;
2210 case vtn_variable_mode_workgroup
:
2211 if (b
->options
->lower_workgroup_access_to_offsets
) {
2212 var
->shared_location
= -1;
2214 /* Create the variable normally */
2215 var
->var
= rzalloc(b
->shader
, nir_variable
);
2216 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2217 /* Workgroup variables don't have any explicit layout but some
2218 * layouts may have leaked through due to type deduplication in the
2221 var
->var
->type
= var
->type
->type
;
2222 var
->var
->data
.mode
= nir_var_mem_shared
;
2226 case vtn_variable_mode_input
:
2227 case vtn_variable_mode_output
: {
2228 /* In order to know whether or not we're a per-vertex inout, we need
2229 * the patch qualifier. This means walking the variable decorations
2230 * early before we actually create any variables. Not a big deal.
2232 * GLSLang really likes to place decorations in the most interior
2233 * thing it possibly can. In particular, if you have a struct, it
2234 * will place the patch decorations on the struct members. This
2235 * should be handled by the variable splitting below just fine.
2237 * If you have an array-of-struct, things get even more weird as it
2238 * will place the patch decorations on the struct even though it's
2239 * inside an array and some of the members being patch and others not
2240 * makes no sense whatsoever. Since the only sensible thing is for
2241 * it to be all or nothing, we'll call it patch if any of the members
2242 * are declared patch.
2245 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2246 if (glsl_type_is_array(var
->type
->type
) &&
2247 glsl_type_is_struct_or_ifc(without_array
->type
)) {
2248 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2249 vtn_value_type_type
),
2250 var_is_patch_cb
, &var
->patch
);
2253 /* For inputs and outputs, we immediately split structures. This
2254 * is for a couple of reasons. For one, builtins may all come in
2255 * a struct and we really want those split out into separate
2256 * variables. For another, interpolation qualifiers can be
2257 * applied to members of the top-level struct ane we need to be
2258 * able to preserve that information.
2261 struct vtn_type
*per_vertex_type
= var
->type
;
2262 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2263 /* In Geometry shaders (and some tessellation), inputs come
2264 * in per-vertex arrays. However, some builtins come in
2265 * non-per-vertex, hence the need for the is_array check. In
2266 * any case, there are no non-builtin arrays allowed so this
2267 * check should be sufficient.
2269 per_vertex_type
= var
->type
->array_element
;
2272 var
->var
= rzalloc(b
->shader
, nir_variable
);
2273 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2274 /* In Vulkan, shader I/O variables don't have any explicit layout but
2275 * some layouts may have leaked through due to type deduplication in
2276 * the SPIR-V. We do, however, keep the layouts in the variable's
2277 * interface_type because we need offsets for XFB arrays of blocks.
2279 var
->var
->type
= var
->type
->type
;
2280 var
->var
->data
.mode
= nir_mode
;
2281 var
->var
->data
.patch
= var
->patch
;
2283 /* Figure out the interface block type. */
2284 struct vtn_type
*iface_type
= per_vertex_type
;
2285 if (var
->mode
== vtn_variable_mode_output
&&
2286 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2287 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2288 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2289 /* For vertex data outputs, we can end up with arrays of blocks for
2290 * transform feedback where each array element corresponds to a
2291 * different XFB output buffer.
2293 while (iface_type
->base_type
== vtn_base_type_array
)
2294 iface_type
= iface_type
->array_element
;
2296 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2297 var
->var
->interface_type
= iface_type
->type
;
2299 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2300 per_vertex_type
->block
) {
2301 /* It's a struct. Set it up as per-member. */
2302 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2303 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2304 var
->var
->num_members
);
2306 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2307 var
->var
->members
[i
].mode
= nir_mode
;
2308 var
->var
->members
[i
].patch
= var
->patch
;
2309 var
->var
->members
[i
].location
= -1;
2313 /* For inputs and outputs, we need to grab locations and builtin
2314 * information from the per-vertex type.
2316 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2317 vtn_value_type_type
),
2318 var_decoration_cb
, var
);
2322 case vtn_variable_mode_push_constant
:
2323 case vtn_variable_mode_cross_workgroup
:
2324 /* These don't need actual variables. */
2327 case vtn_variable_mode_image
:
2328 case vtn_variable_mode_phys_ssbo
:
2329 unreachable("Should have been caught before");
2333 var
->var
->constant_initializer
=
2334 nir_constant_clone(initializer
, var
->var
);
2337 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2338 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2340 if ((var
->mode
== vtn_variable_mode_input
||
2341 var
->mode
== vtn_variable_mode_output
) &&
2342 var
->var
->members
) {
2343 assign_missing_member_locations(var
);
2346 if (var
->mode
== vtn_variable_mode_uniform
||
2347 var
->mode
== vtn_variable_mode_ubo
||
2348 var
->mode
== vtn_variable_mode_ssbo
) {
2349 /* XXX: We still need the binding information in the nir_variable
2350 * for these. We should fix that.
2352 var
->var
->data
.binding
= var
->binding
;
2353 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2354 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2355 var
->var
->data
.index
= var
->input_attachment_index
;
2356 var
->var
->data
.offset
= var
->offset
;
2358 if (glsl_type_is_image(without_array
->type
))
2359 var
->var
->data
.image
.format
= without_array
->image_format
;
2362 if (var
->mode
== vtn_variable_mode_function
) {
2363 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2364 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2365 } else if (var
->var
) {
2366 nir_shader_add_variable(b
->shader
, var
->var
);
2368 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2373 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2374 struct vtn_type
*dst_type
,
2375 struct vtn_type
*src_type
)
2377 if (dst_type
->id
== src_type
->id
)
2380 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2381 /* Early versions of GLSLang would re-emit types unnecessarily and you
2382 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2383 * mismatched source and destination types.
2385 * https://github.com/KhronosGroup/glslang/issues/304
2386 * https://github.com/KhronosGroup/glslang/issues/307
2387 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2388 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2390 vtn_warn("Source and destination types of %s do not have the same "
2391 "ID (but are compatible): %u vs %u",
2392 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2396 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2397 spirv_op_to_string(opcode
),
2398 glsl_get_type_name(dst_type
->type
),
2399 glsl_get_type_name(src_type
->type
));
2402 static nir_ssa_def
*
2403 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2404 unsigned num_components
)
2406 if (val
->num_components
== num_components
)
2409 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2410 for (unsigned i
= 0; i
< num_components
; i
++) {
2411 if (i
< val
->num_components
)
2412 comps
[i
] = nir_channel(b
, val
, i
);
2414 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2416 return nir_vec(b
, comps
, num_components
);
2419 static nir_ssa_def
*
2420 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2421 const struct glsl_type
*type
)
2423 const unsigned num_components
= glsl_get_vector_elements(type
);
2424 const unsigned bit_size
= glsl_get_bit_size(type
);
2426 /* First, zero-pad to ensure that the value is big enough that when we
2427 * bit-cast it, we don't loose anything.
2429 if (val
->bit_size
< bit_size
) {
2430 const unsigned src_num_components_needed
=
2431 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2432 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2435 val
= nir_bitcast_vector(b
, val
, bit_size
);
2437 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2441 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2442 const uint32_t *w
, unsigned count
)
2446 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2447 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2451 case SpvOpVariable
: {
2452 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2454 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2456 SpvStorageClass storage_class
= w
[3];
2457 nir_constant
*initializer
= NULL
;
2459 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
2461 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
2465 case SpvOpAccessChain
:
2466 case SpvOpPtrAccessChain
:
2467 case SpvOpInBoundsAccessChain
:
2468 case SpvOpInBoundsPtrAccessChain
: {
2469 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2470 enum gl_access_qualifier access
= 0;
2471 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
|| opcode
== SpvOpInBoundsPtrAccessChain
);
2474 for (int i
= 4; i
< count
; i
++) {
2475 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2476 if (link_val
->value_type
== vtn_value_type_constant
) {
2477 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2478 chain
->link
[idx
].id
= vtn_constant_int(b
, w
[i
]);
2480 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2481 chain
->link
[idx
].id
= w
[i
];
2483 access
|= vtn_value_access(link_val
);
2487 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2488 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2489 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2490 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2491 * to combine an array of images with a single sampler to get an
2492 * array of sampled images that all share the same sampler.
2493 * Fortunately, this means that we can more-or-less ignore the
2494 * sampler when crawling the access chain, but it does leave us
2495 * with this rather awkward little special-case.
2497 struct vtn_value
*val
=
2498 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2499 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2500 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
2501 val
->sampled_image
->image
=
2502 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2503 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2504 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
,
2505 val
->sampled_image
->image
);
2506 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
,
2507 val
->sampled_image
->sampler
);
2509 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2510 struct vtn_pointer
*ptr
=
2511 vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2512 ptr
->ptr_type
= ptr_type
;
2513 ptr
->access
|= access
;
2514 vtn_push_value_pointer(b
, w
[2], ptr
);
2519 case SpvOpCopyMemory
: {
2520 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2521 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2523 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2525 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2530 struct vtn_type
*res_type
=
2531 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2532 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2533 struct vtn_pointer
*src
= src_val
->pointer
;
2535 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2537 if (glsl_type_is_image(res_type
->type
) ||
2538 glsl_type_is_sampler(res_type
->type
)) {
2539 vtn_push_value_pointer(b
, w
[2], src
);
2543 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2548 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2549 struct vtn_pointer
*dest
= dest_val
->pointer
;
2550 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2552 /* OpStore requires us to actually have a storage type */
2553 vtn_fail_if(dest
->type
->type
== NULL
,
2554 "Invalid destination type for OpStore");
2556 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2557 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2558 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2559 * would then store them to a local variable as bool. Work around
2560 * the issue by doing an implicit conversion.
2562 * https://github.com/KhronosGroup/glslang/issues/170
2563 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2565 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2566 "OpTypeBool. Doing an implicit conversion to work around "
2568 struct vtn_ssa_value
*bool_ssa
=
2569 vtn_create_ssa_value(b
, dest
->type
->type
);
2570 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2571 vtn_variable_store(b
, bool_ssa
, dest
);
2575 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2577 if (glsl_type_is_sampler(dest
->type
->type
)) {
2578 if (b
->wa_glslang_179
) {
2579 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2580 "propagation to workaround the problem.");
2581 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2582 dest
->var
->copy_prop_sampler
=
2583 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2585 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2590 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2591 vtn_variable_store(b
, src
, dest
);
2595 case SpvOpArrayLength
: {
2596 struct vtn_pointer
*ptr
=
2597 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2598 const uint32_t field
= w
[4];
2600 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2601 "OpArrayLength must take a pointer to a structure type");
2602 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2603 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2604 "OpArrayLength must reference the last memeber of the "
2605 "structure and that must be an array");
2607 const uint32_t offset
= ptr
->type
->offsets
[field
];
2608 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2610 if (!ptr
->block_index
) {
2611 struct vtn_access_chain chain
= {
2614 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2615 vtn_assert(ptr
->block_index
);
2618 nir_intrinsic_instr
*instr
=
2619 nir_intrinsic_instr_create(b
->nb
.shader
,
2620 nir_intrinsic_get_buffer_size
);
2621 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2622 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2623 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2624 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2626 /* array_length = max(buffer_size - offset, 0) / stride */
2627 nir_ssa_def
*array_length
=
2632 nir_imm_int(&b
->nb
, offset
)),
2633 nir_imm_int(&b
->nb
, 0u)),
2634 nir_imm_int(&b
->nb
, stride
));
2636 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2637 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2638 val
->ssa
->def
= array_length
;
2642 case SpvOpConvertPtrToU
: {
2643 struct vtn_value
*u_val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2645 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2646 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2647 "OpConvertPtrToU can only be used to cast to a vector or "
2650 /* The pointer will be converted to an SSA value automatically */
2651 struct vtn_ssa_value
*ptr_ssa
= vtn_ssa_value(b
, w
[3]);
2653 u_val
->ssa
= vtn_create_ssa_value(b
, u_val
->type
->type
);
2654 u_val
->ssa
->def
= nir_sloppy_bitcast(&b
->nb
, ptr_ssa
->def
, u_val
->type
->type
);
2655 u_val
->ssa
->access
|= ptr_ssa
->access
;
2659 case SpvOpConvertUToPtr
: {
2660 struct vtn_value
*ptr_val
=
2661 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2662 struct vtn_value
*u_val
= vtn_value(b
, w
[3], vtn_value_type_ssa
);
2664 vtn_fail_if(ptr_val
->type
->type
== NULL
,
2665 "OpConvertUToPtr can only be used on physical pointers");
2667 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2668 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2669 "OpConvertUToPtr can only be used to cast from a vector or "
2672 nir_ssa_def
*ptr_ssa
= nir_sloppy_bitcast(&b
->nb
, u_val
->ssa
->def
,
2673 ptr_val
->type
->type
);
2674 ptr_val
->pointer
= vtn_pointer_from_ssa(b
, ptr_ssa
, ptr_val
->type
);
2675 vtn_foreach_decoration(b
, ptr_val
, ptr_decoration_cb
, ptr_val
->pointer
);
2676 ptr_val
->pointer
->access
|= u_val
->ssa
->access
;
2680 case SpvOpCopyMemorySized
:
2682 vtn_fail_with_opcode("Unhandled opcode", opcode
);