2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
34 ptr_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
35 const struct vtn_decoration
*dec
, void *void_ptr
)
37 struct vtn_pointer
*ptr
= void_ptr
;
39 switch (dec
->decoration
) {
40 case SpvDecorationNonUniformEXT
:
41 ptr
->access
|= ACCESS_NON_UNIFORM
;
49 static struct vtn_pointer
*
50 vtn_decorate_pointer(struct vtn_builder
*b
, struct vtn_value
*val
,
51 struct vtn_pointer
*ptr
)
53 struct vtn_pointer dummy
= { .access
= 0 };
54 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, &dummy
);
56 /* If we're adding access flags, make a copy of the pointer. We could
57 * probably just OR them in without doing so but this prevents us from
58 * leaking them any further than actually specified in the SPIR-V.
60 if (dummy
.access
& ~ptr
->access
) {
61 struct vtn_pointer
*copy
= ralloc(b
, struct vtn_pointer
);
63 copy
->access
|= dummy
.access
;
71 vtn_push_pointer(struct vtn_builder
*b
, uint32_t value_id
,
72 struct vtn_pointer
*ptr
)
74 struct vtn_value
*val
= vtn_push_value(b
, value_id
, vtn_value_type_pointer
);
75 val
->pointer
= vtn_decorate_pointer(b
, val
, ptr
);
80 vtn_copy_value(struct vtn_builder
*b
, uint32_t src_value_id
,
81 uint32_t dst_value_id
)
83 struct vtn_value
*src
= vtn_untyped_value(b
, src_value_id
);
84 struct vtn_value
*dst
= vtn_push_value(b
, dst_value_id
, src
->value_type
);
85 struct vtn_value src_copy
= *src
;
87 vtn_fail_if(dst
->type
->id
!= src
->type
->id
,
88 "Result Type must equal Operand type");
90 src_copy
.name
= dst
->name
;
91 src_copy
.decoration
= dst
->decoration
;
92 src_copy
.type
= dst
->type
;
95 if (dst
->value_type
== vtn_value_type_pointer
)
96 dst
->pointer
= vtn_decorate_pointer(b
, dst
, dst
->pointer
);
99 static struct vtn_access_chain
*
100 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
102 struct vtn_access_chain
*chain
;
104 /* Subtract 1 from the length since there's already one built in */
105 size_t size
= sizeof(*chain
) +
106 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
107 chain
= rzalloc_size(b
, size
);
108 chain
->length
= length
;
114 vtn_mode_uses_ssa_offset(struct vtn_builder
*b
,
115 enum vtn_variable_mode mode
)
117 return ((mode
== vtn_variable_mode_ubo
||
118 mode
== vtn_variable_mode_ssbo
) &&
119 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
120 mode
== vtn_variable_mode_push_constant
;
124 vtn_pointer_is_external_block(struct vtn_builder
*b
,
125 struct vtn_pointer
*ptr
)
127 return ptr
->mode
== vtn_variable_mode_ssbo
||
128 ptr
->mode
== vtn_variable_mode_ubo
||
129 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
130 ptr
->mode
== vtn_variable_mode_push_constant
;
134 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
135 unsigned stride
, unsigned bit_size
)
137 vtn_assert(stride
> 0);
138 if (link
.mode
== vtn_access_mode_literal
) {
139 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
141 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
142 if (ssa
->bit_size
!= bit_size
)
143 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
144 return nir_imul_imm(&b
->nb
, ssa
, stride
);
148 static VkDescriptorType
149 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
152 case vtn_variable_mode_ubo
:
153 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
154 case vtn_variable_mode_ssbo
:
155 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
157 vtn_fail("Invalid mode for vulkan_resource_index");
162 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
163 nir_ssa_def
*desc_array_index
)
165 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
167 if (!desc_array_index
) {
168 vtn_assert(glsl_type_is_struct_or_ifc(var
->type
->type
));
169 desc_array_index
= nir_imm_int(&b
->nb
, 0);
172 nir_intrinsic_instr
*instr
=
173 nir_intrinsic_instr_create(b
->nb
.shader
,
174 nir_intrinsic_vulkan_resource_index
);
175 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
176 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
177 nir_intrinsic_set_binding(instr
, var
->binding
);
178 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
180 vtn_fail_if(var
->mode
!= vtn_variable_mode_ubo
&&
181 var
->mode
!= vtn_variable_mode_ssbo
,
182 "Invalid mode for vulkan_resource_index");
184 nir_address_format addr_format
= vtn_mode_to_address_format(b
, var
->mode
);
185 const struct glsl_type
*index_type
=
186 b
->options
->lower_ubo_ssbo_access_to_offsets
?
187 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
189 instr
->num_components
= glsl_get_vector_elements(index_type
);
190 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
191 glsl_get_bit_size(index_type
), NULL
);
192 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
194 return &instr
->dest
.ssa
;
198 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
199 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
201 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
203 nir_intrinsic_instr
*instr
=
204 nir_intrinsic_instr_create(b
->nb
.shader
,
205 nir_intrinsic_vulkan_resource_reindex
);
206 instr
->src
[0] = nir_src_for_ssa(base_index
);
207 instr
->src
[1] = nir_src_for_ssa(offset_index
);
208 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
210 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
211 "Invalid mode for vulkan_resource_reindex");
213 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
214 const struct glsl_type
*index_type
=
215 b
->options
->lower_ubo_ssbo_access_to_offsets
?
216 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
218 instr
->num_components
= glsl_get_vector_elements(index_type
);
219 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
220 glsl_get_bit_size(index_type
), NULL
);
221 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
223 return &instr
->dest
.ssa
;
227 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
228 nir_ssa_def
*desc_index
)
230 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
232 nir_intrinsic_instr
*desc_load
=
233 nir_intrinsic_instr_create(b
->nb
.shader
,
234 nir_intrinsic_load_vulkan_descriptor
);
235 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
236 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
238 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
239 "Invalid mode for load_vulkan_descriptor");
241 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
242 const struct glsl_type
*ptr_type
=
243 nir_address_format_to_glsl_type(addr_format
);
245 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
246 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
247 desc_load
->num_components
,
248 glsl_get_bit_size(ptr_type
), NULL
);
249 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
251 return &desc_load
->dest
.ssa
;
254 /* Dereference the given base pointer by the access chain */
255 static struct vtn_pointer
*
256 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
257 struct vtn_pointer
*base
,
258 struct vtn_access_chain
*deref_chain
)
260 struct vtn_type
*type
= base
->type
;
261 enum gl_access_qualifier access
= base
->access
| deref_chain
->access
;
264 nir_deref_instr
*tail
;
267 } else if (b
->options
->environment
== NIR_SPIRV_VULKAN
&&
268 vtn_pointer_is_external_block(b
, base
)) {
269 nir_ssa_def
*block_index
= base
->block_index
;
271 /* We dereferencing an external block pointer. Correctness of this
272 * operation relies on one particular line in the SPIR-V spec, section
273 * entitled "Validation Rules for Shader Capabilities":
275 * "Block and BufferBlock decorations cannot decorate a structure
276 * type that is nested at any level inside another structure type
277 * decorated with Block or BufferBlock."
279 * This means that we can detect the point where we cross over from
280 * descriptor indexing to buffer indexing by looking for the block
281 * decorated struct type. Anything before the block decorated struct
282 * type is a descriptor indexing operation and anything after the block
283 * decorated struct is a buffer offset operation.
286 /* Figure out the descriptor array index if any
288 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
289 * to forget the Block or BufferBlock decoration from time to time.
290 * It's more robust if we check for both !block_index and for the type
291 * to contain a block. This way there's a decent chance that arrays of
292 * UBOs/SSBOs will work correctly even if variable pointers are
295 nir_ssa_def
*desc_arr_idx
= NULL
;
296 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
297 /* If our type contains a block, then we're still outside the block
298 * and we need to process enough levels of dereferences to get inside
301 if (deref_chain
->ptr_as_array
) {
302 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
303 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
304 MAX2(aoa_size
, 1), 32);
308 for (; idx
< deref_chain
->length
; idx
++) {
309 if (type
->base_type
!= vtn_base_type_array
) {
310 vtn_assert(type
->base_type
== vtn_base_type_struct
);
314 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
315 nir_ssa_def
*arr_offset
=
316 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
317 MAX2(aoa_size
, 1), 32);
319 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
321 desc_arr_idx
= arr_offset
;
323 type
= type
->array_element
;
324 access
|= type
->access
;
329 vtn_assert(base
->var
&& base
->type
);
330 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
331 } else if (desc_arr_idx
) {
332 block_index
= vtn_resource_reindex(b
, base
->mode
,
333 block_index
, desc_arr_idx
);
336 if (idx
== deref_chain
->length
) {
337 /* The entire deref was consumed in finding the block index. Return
338 * a pointer which just has a block index and a later access chain
339 * will dereference deeper.
341 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
342 ptr
->mode
= base
->mode
;
344 ptr
->block_index
= block_index
;
345 ptr
->access
= access
;
349 /* If we got here, there's more access chain to handle and we have the
350 * final block index. Insert a descriptor load and cast to a deref to
351 * start the deref chain.
353 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
355 assert(base
->mode
== vtn_variable_mode_ssbo
||
356 base
->mode
== vtn_variable_mode_ubo
);
357 nir_variable_mode nir_mode
=
358 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
360 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
, type
->type
,
361 base
->ptr_type
->stride
);
363 assert(base
->var
&& base
->var
->var
);
364 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
365 if (base
->ptr_type
&& base
->ptr_type
->type
) {
366 tail
->dest
.ssa
.num_components
=
367 glsl_get_vector_elements(base
->ptr_type
->type
);
368 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
372 if (idx
== 0 && deref_chain
->ptr_as_array
) {
373 /* We start with a deref cast to get the stride. Hopefully, we'll be
374 * able to delete that cast eventually.
376 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
377 tail
->type
, base
->ptr_type
->stride
);
379 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
380 tail
->dest
.ssa
.bit_size
);
381 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
385 for (; idx
< deref_chain
->length
; idx
++) {
386 if (glsl_type_is_struct_or_ifc(type
->type
)) {
387 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
388 unsigned field
= deref_chain
->link
[idx
].id
;
389 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
390 type
= type
->members
[field
];
392 nir_ssa_def
*arr_index
=
393 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
394 tail
->dest
.ssa
.bit_size
);
395 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
396 type
= type
->array_element
;
399 access
|= type
->access
;
402 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
403 ptr
->mode
= base
->mode
;
405 ptr
->var
= base
->var
;
407 ptr
->access
= access
;
412 static struct vtn_pointer
*
413 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
414 struct vtn_pointer
*base
,
415 struct vtn_access_chain
*deref_chain
)
417 nir_ssa_def
*block_index
= base
->block_index
;
418 nir_ssa_def
*offset
= base
->offset
;
419 struct vtn_type
*type
= base
->type
;
420 enum gl_access_qualifier access
= base
->access
;
423 if (base
->mode
== vtn_variable_mode_ubo
||
424 base
->mode
== vtn_variable_mode_ssbo
) {
426 vtn_assert(base
->var
&& base
->type
);
427 nir_ssa_def
*desc_arr_idx
;
428 if (glsl_type_is_array(type
->type
)) {
429 if (deref_chain
->length
>= 1) {
431 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
433 /* This consumes a level of type */
434 type
= type
->array_element
;
435 access
|= type
->access
;
437 /* This is annoying. We've been asked for a pointer to the
438 * array of UBOs/SSBOs and not a specifc buffer. Return a
439 * pointer with a descriptor index of 0 and we'll have to do
440 * a reindex later to adjust it to the right thing.
442 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
444 } else if (deref_chain
->ptr_as_array
) {
445 /* You can't have a zero-length OpPtrAccessChain */
446 vtn_assert(deref_chain
->length
>= 1);
447 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
449 /* We have a regular non-array SSBO. */
452 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
453 } else if (deref_chain
->ptr_as_array
&&
454 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
455 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
456 * decorated block. This is an interesting corner in the SPIR-V
457 * spec. One interpretation would be that they client is clearly
458 * trying to treat that block as if it's an implicit array of blocks
459 * repeated in the buffer. However, the SPIR-V spec for the
460 * OpPtrAccessChain says:
462 * "Base is treated as the address of the first element of an
463 * array, and the Element element’s address is computed to be the
464 * base for the Indexes, as per OpAccessChain."
466 * Taken literally, that would mean that your struct type is supposed
467 * to be treated as an array of such a struct and, since it's
468 * decorated block, that means an array of blocks which corresponds
469 * to an array descriptor. Therefore, we need to do a reindex
470 * operation to add the index from the first link in the access chain
471 * to the index we recieved.
473 * The downside to this interpretation (there always is one) is that
474 * this might be somewhat surprising behavior to apps if they expect
475 * the implicit array behavior described above.
477 vtn_assert(deref_chain
->length
>= 1);
478 nir_ssa_def
*offset_index
=
479 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
482 block_index
= vtn_resource_reindex(b
, base
->mode
,
483 block_index
, offset_index
);
488 if (base
->mode
== vtn_variable_mode_workgroup
) {
489 /* SLM doesn't need nor have a block index */
490 vtn_assert(!block_index
);
492 /* We need the variable for the base offset */
493 vtn_assert(base
->var
);
495 /* We need ptr_type for size and alignment */
496 vtn_assert(base
->ptr_type
);
498 /* Assign location on first use so that we don't end up bloating SLM
499 * address space for variables which are never statically used.
501 if (base
->var
->shared_location
< 0) {
502 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
503 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
504 base
->ptr_type
->align
);
505 base
->var
->shared_location
= b
->shader
->num_shared
;
506 b
->shader
->num_shared
+= base
->ptr_type
->length
;
509 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
510 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
511 /* Push constants neither need nor have a block index */
512 vtn_assert(!block_index
);
514 /* Start off with at the start of the push constant block. */
515 offset
= nir_imm_int(&b
->nb
, 0);
517 /* The code above should have ensured a block_index when needed. */
518 vtn_assert(block_index
);
520 /* Start off with at the start of the buffer. */
521 offset
= nir_imm_int(&b
->nb
, 0);
525 if (deref_chain
->ptr_as_array
&& idx
== 0) {
526 /* We need ptr_type for the stride */
527 vtn_assert(base
->ptr_type
);
529 /* We need at least one element in the chain */
530 vtn_assert(deref_chain
->length
>= 1);
532 nir_ssa_def
*elem_offset
=
533 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
534 base
->ptr_type
->stride
, offset
->bit_size
);
535 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
539 for (; idx
< deref_chain
->length
; idx
++) {
540 switch (glsl_get_base_type(type
->type
)) {
543 case GLSL_TYPE_UINT16
:
544 case GLSL_TYPE_INT16
:
545 case GLSL_TYPE_UINT8
:
547 case GLSL_TYPE_UINT64
:
548 case GLSL_TYPE_INT64
:
549 case GLSL_TYPE_FLOAT
:
550 case GLSL_TYPE_FLOAT16
:
551 case GLSL_TYPE_DOUBLE
:
553 case GLSL_TYPE_ARRAY
: {
554 nir_ssa_def
*elem_offset
=
555 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
556 type
->stride
, offset
->bit_size
);
557 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
558 type
= type
->array_element
;
559 access
|= type
->access
;
563 case GLSL_TYPE_INTERFACE
:
564 case GLSL_TYPE_STRUCT
: {
565 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
566 unsigned member
= deref_chain
->link
[idx
].id
;
567 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
568 type
= type
->members
[member
];
569 access
|= type
->access
;
574 vtn_fail("Invalid type for deref");
578 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
579 ptr
->mode
= base
->mode
;
581 ptr
->block_index
= block_index
;
582 ptr
->offset
= offset
;
583 ptr
->access
= access
;
588 /* Dereference the given base pointer by the access chain */
589 static struct vtn_pointer
*
590 vtn_pointer_dereference(struct vtn_builder
*b
,
591 struct vtn_pointer
*base
,
592 struct vtn_access_chain
*deref_chain
)
594 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
595 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
597 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
601 /* Returns an atomic_uint type based on the original uint type. The returned
602 * type will be equivalent to the original one but will have an atomic_uint
603 * type as leaf instead of an uint.
605 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
607 static const struct glsl_type
*
608 repair_atomic_type(const struct glsl_type
*type
)
610 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
611 assert(glsl_type_is_scalar(glsl_without_array(type
)));
613 if (glsl_type_is_array(type
)) {
614 const struct glsl_type
*atomic
=
615 repair_atomic_type(glsl_get_array_element(type
));
617 return glsl_array_type(atomic
, glsl_get_length(type
),
618 glsl_get_explicit_stride(type
));
620 return glsl_atomic_uint_type();
625 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
627 if (b
->wa_glslang_179
) {
628 /* Do on-the-fly copy propagation for samplers. */
629 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
630 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
633 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
635 struct vtn_access_chain chain
= {
638 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
645 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
646 struct vtn_ssa_value
*inout
,
647 enum gl_access_qualifier access
)
649 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
651 inout
->def
= nir_load_deref_with_access(&b
->nb
, deref
, access
);
653 nir_store_deref_with_access(&b
->nb
, deref
, inout
->def
, ~0, access
);
655 } else if (glsl_type_is_array(deref
->type
) ||
656 glsl_type_is_matrix(deref
->type
)) {
657 unsigned elems
= glsl_get_length(deref
->type
);
658 for (unsigned i
= 0; i
< elems
; i
++) {
659 nir_deref_instr
*child
=
660 nir_build_deref_array_imm(&b
->nb
, deref
, i
);
661 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
664 vtn_assert(glsl_type_is_struct_or_ifc(deref
->type
));
665 unsigned elems
= glsl_get_length(deref
->type
);
666 for (unsigned i
= 0; i
< elems
; i
++) {
667 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
668 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
674 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
676 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
677 return vtn_pointer_to_deref(b
, ptr
);
681 * Gets the NIR-level deref tail, which may have as a child an array deref
682 * selecting which component due to OpAccessChain supporting per-component
683 * indexing in SPIR-V.
685 static nir_deref_instr
*
686 get_deref_tail(nir_deref_instr
*deref
)
688 if (deref
->deref_type
!= nir_deref_type_array
)
691 nir_deref_instr
*parent
=
692 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
694 if (glsl_type_is_vector(parent
->type
))
700 struct vtn_ssa_value
*
701 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
,
702 enum gl_access_qualifier access
)
704 nir_deref_instr
*src_tail
= get_deref_tail(src
);
705 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
706 _vtn_local_load_store(b
, true, src_tail
, val
, access
);
708 if (src_tail
!= src
) {
709 val
->type
= src
->type
;
710 val
->def
= nir_vector_extract(&b
->nb
, val
->def
, src
->arr
.index
.ssa
);
717 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
718 nir_deref_instr
*dest
, enum gl_access_qualifier access
)
720 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
722 if (dest_tail
!= dest
) {
723 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
724 _vtn_local_load_store(b
, true, dest_tail
, val
, access
);
726 val
->def
= nir_vector_insert(&b
->nb
, val
->def
, src
->def
,
727 dest
->arr
.index
.ssa
);
728 _vtn_local_load_store(b
, false, dest_tail
, val
, access
);
730 _vtn_local_load_store(b
, false, dest_tail
, src
, access
);
735 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
736 nir_ssa_def
**index_out
)
738 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
740 struct vtn_access_chain chain
= {
743 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
745 *index_out
= ptr
->block_index
;
749 /* Tries to compute the size of an interface block based on the strides and
750 * offsets that are provided to us in the SPIR-V source.
753 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
755 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
759 case GLSL_TYPE_UINT16
:
760 case GLSL_TYPE_INT16
:
761 case GLSL_TYPE_UINT8
:
763 case GLSL_TYPE_UINT64
:
764 case GLSL_TYPE_INT64
:
765 case GLSL_TYPE_FLOAT
:
766 case GLSL_TYPE_FLOAT16
:
768 case GLSL_TYPE_DOUBLE
: {
769 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
770 glsl_get_matrix_columns(type
->type
);
772 vtn_assert(type
->stride
> 0);
773 return type
->stride
* cols
;
775 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
776 return glsl_get_vector_elements(type
->type
) * type_size
;
780 case GLSL_TYPE_STRUCT
:
781 case GLSL_TYPE_INTERFACE
: {
783 unsigned num_fields
= glsl_get_length(type
->type
);
784 for (unsigned f
= 0; f
< num_fields
; f
++) {
785 unsigned field_end
= type
->offsets
[f
] +
786 vtn_type_block_size(b
, type
->members
[f
]);
787 size
= MAX2(size
, field_end
);
792 case GLSL_TYPE_ARRAY
:
793 vtn_assert(type
->stride
> 0);
794 vtn_assert(glsl_get_length(type
->type
) > 0);
795 return type
->stride
* glsl_get_length(type
->type
);
798 vtn_fail("Invalid block type");
804 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
805 nir_ssa_def
*index
, nir_ssa_def
*offset
,
806 unsigned access_offset
, unsigned access_size
,
807 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
808 enum gl_access_qualifier access
)
810 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
811 instr
->num_components
= glsl_get_vector_elements(type
);
813 /* Booleans usually shouldn't show up in external memory in SPIR-V.
814 * However, they do for certain older GLSLang versions and can for shared
815 * memory when we lower access chains internally.
817 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
818 glsl_get_bit_size(type
);
822 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
823 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
826 if (op
== nir_intrinsic_load_push_constant
) {
827 nir_intrinsic_set_base(instr
, access_offset
);
828 nir_intrinsic_set_range(instr
, access_size
);
831 if (op
== nir_intrinsic_load_ubo
||
832 op
== nir_intrinsic_load_ssbo
||
833 op
== nir_intrinsic_store_ssbo
) {
834 nir_intrinsic_set_access(instr
, access
);
837 /* With extensions like relaxed_block_layout, we really can't guarantee
838 * much more than scalar alignment.
840 if (op
!= nir_intrinsic_load_push_constant
)
841 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
844 instr
->src
[src
++] = nir_src_for_ssa(index
);
846 if (op
== nir_intrinsic_load_push_constant
) {
847 /* We need to subtract the offset from where the intrinsic will load the
850 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
851 nir_imm_int(&b
->nb
, access_offset
)));
853 instr
->src
[src
++] = nir_src_for_ssa(offset
);
857 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
858 instr
->num_components
, data_bit_size
, NULL
);
859 (*inout
)->def
= &instr
->dest
.ssa
;
862 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
864 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
865 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
869 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
870 nir_ssa_def
*index
, nir_ssa_def
*offset
,
871 unsigned access_offset
, unsigned access_size
,
872 struct vtn_type
*type
, enum gl_access_qualifier access
,
873 struct vtn_ssa_value
**inout
)
875 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
879 case GLSL_TYPE_UINT16
:
880 case GLSL_TYPE_INT16
:
881 case GLSL_TYPE_UINT8
:
883 case GLSL_TYPE_UINT64
:
884 case GLSL_TYPE_INT64
:
885 case GLSL_TYPE_FLOAT
:
886 case GLSL_TYPE_FLOAT16
:
887 case GLSL_TYPE_DOUBLE
:
889 /* This is where things get interesting. At this point, we've hit
890 * a vector, a scalar, or a matrix.
892 if (glsl_type_is_matrix(type
->type
)) {
893 /* Loading the whole matrix */
894 struct vtn_ssa_value
*transpose
;
895 unsigned num_ops
, vec_width
, col_stride
;
896 if (type
->row_major
) {
897 num_ops
= glsl_get_vector_elements(type
->type
);
898 vec_width
= glsl_get_matrix_columns(type
->type
);
899 col_stride
= type
->array_element
->stride
;
901 const struct glsl_type
*transpose_type
=
902 glsl_matrix_type(base_type
, vec_width
, num_ops
);
903 *inout
= vtn_create_ssa_value(b
, transpose_type
);
905 transpose
= vtn_ssa_transpose(b
, *inout
);
909 num_ops
= glsl_get_matrix_columns(type
->type
);
910 vec_width
= glsl_get_vector_elements(type
->type
);
911 col_stride
= type
->stride
;
914 for (unsigned i
= 0; i
< num_ops
; i
++) {
915 nir_ssa_def
*elem_offset
=
916 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
917 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
918 access_offset
, access_size
,
920 glsl_vector_type(base_type
, vec_width
),
921 type
->access
| access
);
924 if (load
&& type
->row_major
)
925 *inout
= vtn_ssa_transpose(b
, *inout
);
927 unsigned elems
= glsl_get_vector_elements(type
->type
);
928 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
929 if (elems
== 1 || type
->stride
== type_size
) {
930 /* This is a tightly-packed normal scalar or vector load */
931 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
932 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
933 access_offset
, access_size
,
935 type
->access
| access
);
937 /* This is a strided load. We have to load N things separately.
938 * This is the single column of a row-major matrix case.
940 vtn_assert(type
->stride
> type_size
);
941 vtn_assert(type
->stride
% type_size
== 0);
943 nir_ssa_def
*per_comp
[4];
944 for (unsigned i
= 0; i
< elems
; i
++) {
945 nir_ssa_def
*elem_offset
=
946 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
947 struct vtn_ssa_value
*comp
, temp_val
;
949 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
950 temp_val
.type
= glsl_scalar_type(base_type
);
953 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
954 access_offset
, access_size
,
955 &comp
, glsl_scalar_type(base_type
),
956 type
->access
| access
);
957 per_comp
[i
] = comp
->def
;
962 *inout
= vtn_create_ssa_value(b
, type
->type
);
963 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
969 case GLSL_TYPE_ARRAY
: {
970 unsigned elems
= glsl_get_length(type
->type
);
971 for (unsigned i
= 0; i
< elems
; i
++) {
972 nir_ssa_def
*elem_off
=
973 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
974 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
975 access_offset
, access_size
,
977 type
->array_element
->access
| access
,
978 &(*inout
)->elems
[i
]);
983 case GLSL_TYPE_INTERFACE
:
984 case GLSL_TYPE_STRUCT
: {
985 unsigned elems
= glsl_get_length(type
->type
);
986 for (unsigned i
= 0; i
< elems
; i
++) {
987 nir_ssa_def
*elem_off
=
988 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
989 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
990 access_offset
, access_size
,
992 type
->members
[i
]->access
| access
,
993 &(*inout
)->elems
[i
]);
999 vtn_fail("Invalid block member type");
1003 static struct vtn_ssa_value
*
1004 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1006 nir_intrinsic_op op
;
1007 unsigned access_offset
= 0, access_size
= 0;
1008 switch (src
->mode
) {
1009 case vtn_variable_mode_ubo
:
1010 op
= nir_intrinsic_load_ubo
;
1012 case vtn_variable_mode_ssbo
:
1013 op
= nir_intrinsic_load_ssbo
;
1015 case vtn_variable_mode_push_constant
:
1016 op
= nir_intrinsic_load_push_constant
;
1017 access_size
= b
->shader
->num_uniforms
;
1019 case vtn_variable_mode_workgroup
:
1020 op
= nir_intrinsic_load_shared
;
1023 vtn_fail("Invalid block variable mode");
1026 nir_ssa_def
*offset
, *index
= NULL
;
1027 offset
= vtn_pointer_to_offset(b
, src
, &index
);
1029 struct vtn_ssa_value
*value
= vtn_create_ssa_value(b
, src
->type
->type
);
1030 _vtn_block_load_store(b
, op
, true, index
, offset
,
1031 access_offset
, access_size
,
1032 src
->type
, src
->access
, &value
);
1037 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1038 struct vtn_pointer
*dst
)
1040 nir_intrinsic_op op
;
1041 switch (dst
->mode
) {
1042 case vtn_variable_mode_ssbo
:
1043 op
= nir_intrinsic_store_ssbo
;
1045 case vtn_variable_mode_workgroup
:
1046 op
= nir_intrinsic_store_shared
;
1049 vtn_fail("Invalid block variable mode");
1052 nir_ssa_def
*offset
, *index
= NULL
;
1053 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1055 _vtn_block_load_store(b
, op
, false, index
, offset
,
1056 0, 0, dst
->type
, dst
->access
, &src
);
1060 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1061 struct vtn_pointer
*ptr
,
1062 enum gl_access_qualifier access
,
1063 struct vtn_ssa_value
**inout
)
1065 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1066 switch (base_type
) {
1067 case GLSL_TYPE_UINT
:
1069 case GLSL_TYPE_UINT16
:
1070 case GLSL_TYPE_INT16
:
1071 case GLSL_TYPE_UINT8
:
1072 case GLSL_TYPE_INT8
:
1073 case GLSL_TYPE_UINT64
:
1074 case GLSL_TYPE_INT64
:
1075 case GLSL_TYPE_FLOAT
:
1076 case GLSL_TYPE_FLOAT16
:
1077 case GLSL_TYPE_BOOL
:
1078 case GLSL_TYPE_DOUBLE
:
1079 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1080 /* We hit a vector or scalar; go ahead and emit the load[s] */
1081 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1082 if (vtn_pointer_is_external_block(b
, ptr
)) {
1083 /* If it's external, we call nir_load/store_deref directly. The
1084 * vtn_local_load/store helpers are too clever and do magic to
1085 * avoid array derefs of vectors. That magic is both less
1086 * efficient than the direct load/store and, in the case of
1087 * stores, is broken because it creates a race condition if two
1088 * threads are writing to different components of the same vector
1089 * due to the load+insert+store it uses to emulate the array
1093 (*inout
)->def
= nir_load_deref_with_access(&b
->nb
, deref
,
1094 ptr
->type
->access
| access
);
1096 nir_store_deref_with_access(&b
->nb
, deref
, (*inout
)->def
, ~0,
1097 ptr
->type
->access
| access
);
1101 *inout
= vtn_local_load(b
, deref
, ptr
->type
->access
| access
);
1103 vtn_local_store(b
, *inout
, deref
, ptr
->type
->access
| access
);
1110 case GLSL_TYPE_INTERFACE
:
1111 case GLSL_TYPE_ARRAY
:
1112 case GLSL_TYPE_STRUCT
: {
1113 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1114 struct vtn_access_chain chain
= {
1117 { .mode
= vtn_access_mode_literal
, },
1120 for (unsigned i
= 0; i
< elems
; i
++) {
1121 chain
.link
[0].id
= i
;
1122 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1123 _vtn_variable_load_store(b
, load
, elem
, ptr
->type
->access
| access
,
1124 &(*inout
)->elems
[i
]);
1130 vtn_fail("Invalid access chain type");
1134 struct vtn_ssa_value
*
1135 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1137 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1138 return vtn_block_load(b
, src
);
1140 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src
->type
->type
);
1141 _vtn_variable_load_store(b
, true, src
, src
->access
, &val
);
1147 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1148 struct vtn_pointer
*dest
)
1150 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1151 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1152 dest
->mode
== vtn_variable_mode_workgroup
);
1153 vtn_block_store(b
, src
, dest
);
1155 _vtn_variable_load_store(b
, false, dest
, dest
->access
, &src
);
1160 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1161 struct vtn_pointer
*src
)
1163 vtn_assert(src
->type
->type
== dest
->type
->type
);
1164 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1165 switch (base_type
) {
1166 case GLSL_TYPE_UINT
:
1168 case GLSL_TYPE_UINT16
:
1169 case GLSL_TYPE_INT16
:
1170 case GLSL_TYPE_UINT8
:
1171 case GLSL_TYPE_INT8
:
1172 case GLSL_TYPE_UINT64
:
1173 case GLSL_TYPE_INT64
:
1174 case GLSL_TYPE_FLOAT
:
1175 case GLSL_TYPE_FLOAT16
:
1176 case GLSL_TYPE_DOUBLE
:
1177 case GLSL_TYPE_BOOL
:
1178 /* At this point, we have a scalar, vector, or matrix so we know that
1179 * there cannot be any structure splitting still in the way. By
1180 * stopping at the matrix level rather than the vector level, we
1181 * ensure that matrices get loaded in the optimal way even if they
1182 * are storred row-major in a UBO.
1184 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1187 case GLSL_TYPE_INTERFACE
:
1188 case GLSL_TYPE_ARRAY
:
1189 case GLSL_TYPE_STRUCT
: {
1190 struct vtn_access_chain chain
= {
1193 { .mode
= vtn_access_mode_literal
, },
1196 unsigned elems
= glsl_get_length(src
->type
->type
);
1197 for (unsigned i
= 0; i
< elems
; i
++) {
1198 chain
.link
[0].id
= i
;
1199 struct vtn_pointer
*src_elem
=
1200 vtn_pointer_dereference(b
, src
, &chain
);
1201 struct vtn_pointer
*dest_elem
=
1202 vtn_pointer_dereference(b
, dest
, &chain
);
1204 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1210 vtn_fail("Invalid access chain type");
1215 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1216 struct vtn_pointer
*src
)
1218 /* TODO: At some point, we should add a special-case for when we can
1219 * just emit a copy_var intrinsic.
1221 _vtn_variable_copy(b
, dest
, src
);
1225 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1227 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1228 *mode
= nir_var_system_value
;
1232 vtn_get_builtin_location(struct vtn_builder
*b
,
1233 SpvBuiltIn builtin
, int *location
,
1234 nir_variable_mode
*mode
)
1237 case SpvBuiltInPosition
:
1238 *location
= VARYING_SLOT_POS
;
1240 case SpvBuiltInPointSize
:
1241 *location
= VARYING_SLOT_PSIZ
;
1243 case SpvBuiltInClipDistance
:
1244 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1246 case SpvBuiltInCullDistance
:
1247 *location
= VARYING_SLOT_CULL_DIST0
;
1249 case SpvBuiltInVertexId
:
1250 case SpvBuiltInVertexIndex
:
1251 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1252 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1253 * same as gl_VertexID, which is non-zero-based, and removes
1254 * VertexIndex. Since they're both defined to be non-zero-based, we use
1255 * SYSTEM_VALUE_VERTEX_ID for both.
1257 *location
= SYSTEM_VALUE_VERTEX_ID
;
1258 set_mode_system_value(b
, mode
);
1260 case SpvBuiltInInstanceIndex
:
1261 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1262 set_mode_system_value(b
, mode
);
1264 case SpvBuiltInInstanceId
:
1265 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1266 set_mode_system_value(b
, mode
);
1268 case SpvBuiltInPrimitiveId
:
1269 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1270 vtn_assert(*mode
== nir_var_shader_in
);
1271 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1272 } else if (*mode
== nir_var_shader_out
) {
1273 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1275 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1276 set_mode_system_value(b
, mode
);
1279 case SpvBuiltInInvocationId
:
1280 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1281 set_mode_system_value(b
, mode
);
1283 case SpvBuiltInLayer
:
1284 *location
= VARYING_SLOT_LAYER
;
1285 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1286 *mode
= nir_var_shader_in
;
1287 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1288 *mode
= nir_var_shader_out
;
1289 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1290 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1291 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1292 *mode
= nir_var_shader_out
;
1294 vtn_fail("invalid stage for SpvBuiltInLayer");
1296 case SpvBuiltInViewportIndex
:
1297 *location
= VARYING_SLOT_VIEWPORT
;
1298 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1299 *mode
= nir_var_shader_out
;
1300 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1301 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1302 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1303 *mode
= nir_var_shader_out
;
1304 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1305 *mode
= nir_var_shader_in
;
1307 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1309 case SpvBuiltInTessLevelOuter
:
1310 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1312 case SpvBuiltInTessLevelInner
:
1313 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1315 case SpvBuiltInTessCoord
:
1316 *location
= SYSTEM_VALUE_TESS_COORD
;
1317 set_mode_system_value(b
, mode
);
1319 case SpvBuiltInPatchVertices
:
1320 *location
= SYSTEM_VALUE_VERTICES_IN
;
1321 set_mode_system_value(b
, mode
);
1323 case SpvBuiltInFragCoord
:
1324 vtn_assert(*mode
== nir_var_shader_in
);
1325 if (b
->options
&& b
->options
->frag_coord_is_sysval
) {
1326 *mode
= nir_var_system_value
;
1327 *location
= SYSTEM_VALUE_FRAG_COORD
;
1329 *location
= VARYING_SLOT_POS
;
1332 case SpvBuiltInPointCoord
:
1333 *location
= VARYING_SLOT_PNTC
;
1334 vtn_assert(*mode
== nir_var_shader_in
);
1336 case SpvBuiltInFrontFacing
:
1337 *location
= SYSTEM_VALUE_FRONT_FACE
;
1338 set_mode_system_value(b
, mode
);
1340 case SpvBuiltInSampleId
:
1341 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1342 set_mode_system_value(b
, mode
);
1344 case SpvBuiltInSamplePosition
:
1345 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1346 set_mode_system_value(b
, mode
);
1348 case SpvBuiltInSampleMask
:
1349 if (*mode
== nir_var_shader_out
) {
1350 *location
= FRAG_RESULT_SAMPLE_MASK
;
1352 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1353 set_mode_system_value(b
, mode
);
1356 case SpvBuiltInFragDepth
:
1357 *location
= FRAG_RESULT_DEPTH
;
1358 vtn_assert(*mode
== nir_var_shader_out
);
1360 case SpvBuiltInHelperInvocation
:
1361 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1362 set_mode_system_value(b
, mode
);
1364 case SpvBuiltInNumWorkgroups
:
1365 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1366 set_mode_system_value(b
, mode
);
1368 case SpvBuiltInWorkgroupSize
:
1369 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1370 set_mode_system_value(b
, mode
);
1372 case SpvBuiltInWorkgroupId
:
1373 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1374 set_mode_system_value(b
, mode
);
1376 case SpvBuiltInLocalInvocationId
:
1377 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1378 set_mode_system_value(b
, mode
);
1380 case SpvBuiltInLocalInvocationIndex
:
1381 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1382 set_mode_system_value(b
, mode
);
1384 case SpvBuiltInGlobalInvocationId
:
1385 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1386 set_mode_system_value(b
, mode
);
1388 case SpvBuiltInGlobalLinearId
:
1389 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
;
1390 set_mode_system_value(b
, mode
);
1392 case SpvBuiltInBaseVertex
:
1393 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1394 * semantic as Vulkan BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1396 if (b
->options
->environment
== NIR_SPIRV_OPENGL
)
1397 *location
= SYSTEM_VALUE_BASE_VERTEX
;
1399 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1400 set_mode_system_value(b
, mode
);
1402 case SpvBuiltInBaseInstance
:
1403 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1404 set_mode_system_value(b
, mode
);
1406 case SpvBuiltInDrawIndex
:
1407 *location
= SYSTEM_VALUE_DRAW_ID
;
1408 set_mode_system_value(b
, mode
);
1410 case SpvBuiltInSubgroupSize
:
1411 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1412 set_mode_system_value(b
, mode
);
1414 case SpvBuiltInSubgroupId
:
1415 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1416 set_mode_system_value(b
, mode
);
1418 case SpvBuiltInSubgroupLocalInvocationId
:
1419 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1420 set_mode_system_value(b
, mode
);
1422 case SpvBuiltInNumSubgroups
:
1423 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1424 set_mode_system_value(b
, mode
);
1426 case SpvBuiltInDeviceIndex
:
1427 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1428 set_mode_system_value(b
, mode
);
1430 case SpvBuiltInViewIndex
:
1431 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1432 set_mode_system_value(b
, mode
);
1434 case SpvBuiltInSubgroupEqMask
:
1435 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1436 set_mode_system_value(b
, mode
);
1438 case SpvBuiltInSubgroupGeMask
:
1439 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1440 set_mode_system_value(b
, mode
);
1442 case SpvBuiltInSubgroupGtMask
:
1443 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1444 set_mode_system_value(b
, mode
);
1446 case SpvBuiltInSubgroupLeMask
:
1447 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1448 set_mode_system_value(b
, mode
);
1450 case SpvBuiltInSubgroupLtMask
:
1451 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1452 set_mode_system_value(b
, mode
);
1454 case SpvBuiltInFragStencilRefEXT
:
1455 *location
= FRAG_RESULT_STENCIL
;
1456 vtn_assert(*mode
== nir_var_shader_out
);
1458 case SpvBuiltInWorkDim
:
1459 *location
= SYSTEM_VALUE_WORK_DIM
;
1460 set_mode_system_value(b
, mode
);
1462 case SpvBuiltInGlobalSize
:
1463 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1464 set_mode_system_value(b
, mode
);
1466 case SpvBuiltInBaryCoordNoPerspAMD
:
1467 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL
;
1468 set_mode_system_value(b
, mode
);
1470 case SpvBuiltInBaryCoordNoPerspCentroidAMD
:
1471 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID
;
1472 set_mode_system_value(b
, mode
);
1474 case SpvBuiltInBaryCoordNoPerspSampleAMD
:
1475 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE
;
1476 set_mode_system_value(b
, mode
);
1478 case SpvBuiltInBaryCoordSmoothAMD
:
1479 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
;
1480 set_mode_system_value(b
, mode
);
1482 case SpvBuiltInBaryCoordSmoothCentroidAMD
:
1483 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
;
1484 set_mode_system_value(b
, mode
);
1486 case SpvBuiltInBaryCoordSmoothSampleAMD
:
1487 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
;
1488 set_mode_system_value(b
, mode
);
1490 case SpvBuiltInBaryCoordPullModelAMD
:
1491 *location
= SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL
;
1492 set_mode_system_value(b
, mode
);
1495 vtn_fail("Unsupported builtin: %s (%u)",
1496 spirv_builtin_to_string(builtin
), builtin
);
1501 apply_var_decoration(struct vtn_builder
*b
,
1502 struct nir_variable_data
*var_data
,
1503 const struct vtn_decoration
*dec
)
1505 switch (dec
->decoration
) {
1506 case SpvDecorationRelaxedPrecision
:
1507 break; /* FIXME: Do nothing with this for now. */
1508 case SpvDecorationNoPerspective
:
1509 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1511 case SpvDecorationFlat
:
1512 var_data
->interpolation
= INTERP_MODE_FLAT
;
1514 case SpvDecorationExplicitInterpAMD
:
1515 var_data
->interpolation
= INTERP_MODE_EXPLICIT
;
1517 case SpvDecorationCentroid
:
1518 var_data
->centroid
= true;
1520 case SpvDecorationSample
:
1521 var_data
->sample
= true;
1523 case SpvDecorationInvariant
:
1524 var_data
->invariant
= true;
1526 case SpvDecorationConstant
:
1527 var_data
->read_only
= true;
1529 case SpvDecorationNonReadable
:
1530 var_data
->access
|= ACCESS_NON_READABLE
;
1532 case SpvDecorationNonWritable
:
1533 var_data
->read_only
= true;
1534 var_data
->access
|= ACCESS_NON_WRITEABLE
;
1536 case SpvDecorationRestrict
:
1537 var_data
->access
|= ACCESS_RESTRICT
;
1539 case SpvDecorationAliased
:
1540 var_data
->access
&= ~ACCESS_RESTRICT
;
1542 case SpvDecorationVolatile
:
1543 var_data
->access
|= ACCESS_VOLATILE
;
1545 case SpvDecorationCoherent
:
1546 var_data
->access
|= ACCESS_COHERENT
;
1548 case SpvDecorationComponent
:
1549 var_data
->location_frac
= dec
->operands
[0];
1551 case SpvDecorationIndex
:
1552 var_data
->index
= dec
->operands
[0];
1554 case SpvDecorationBuiltIn
: {
1555 SpvBuiltIn builtin
= dec
->operands
[0];
1557 nir_variable_mode mode
= var_data
->mode
;
1558 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1559 var_data
->mode
= mode
;
1562 case SpvBuiltInTessLevelOuter
:
1563 case SpvBuiltInTessLevelInner
:
1564 case SpvBuiltInClipDistance
:
1565 case SpvBuiltInCullDistance
:
1566 var_data
->compact
= true;
1573 case SpvDecorationSpecId
:
1574 case SpvDecorationRowMajor
:
1575 case SpvDecorationColMajor
:
1576 case SpvDecorationMatrixStride
:
1577 case SpvDecorationUniform
:
1578 case SpvDecorationUniformId
:
1579 case SpvDecorationLinkageAttributes
:
1580 break; /* Do nothing with these here */
1582 case SpvDecorationPatch
:
1583 var_data
->patch
= true;
1586 case SpvDecorationLocation
:
1587 vtn_fail("Handled above");
1589 case SpvDecorationBlock
:
1590 case SpvDecorationBufferBlock
:
1591 case SpvDecorationArrayStride
:
1592 case SpvDecorationGLSLShared
:
1593 case SpvDecorationGLSLPacked
:
1594 break; /* These can apply to a type but we don't care about them */
1596 case SpvDecorationBinding
:
1597 case SpvDecorationDescriptorSet
:
1598 case SpvDecorationNoContraction
:
1599 case SpvDecorationInputAttachmentIndex
:
1600 vtn_warn("Decoration not allowed for variable or structure member: %s",
1601 spirv_decoration_to_string(dec
->decoration
));
1604 case SpvDecorationXfbBuffer
:
1605 var_data
->explicit_xfb_buffer
= true;
1606 var_data
->xfb
.buffer
= dec
->operands
[0];
1607 var_data
->always_active_io
= true;
1609 case SpvDecorationXfbStride
:
1610 var_data
->explicit_xfb_stride
= true;
1611 var_data
->xfb
.stride
= dec
->operands
[0];
1613 case SpvDecorationOffset
:
1614 var_data
->explicit_offset
= true;
1615 var_data
->offset
= dec
->operands
[0];
1618 case SpvDecorationStream
:
1619 var_data
->stream
= dec
->operands
[0];
1622 case SpvDecorationCPacked
:
1623 case SpvDecorationSaturatedConversion
:
1624 case SpvDecorationFuncParamAttr
:
1625 case SpvDecorationFPRoundingMode
:
1626 case SpvDecorationFPFastMathMode
:
1627 case SpvDecorationAlignment
:
1628 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1629 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1630 spirv_decoration_to_string(dec
->decoration
));
1634 case SpvDecorationUserSemantic
:
1635 case SpvDecorationUserTypeGOOGLE
:
1636 /* User semantic decorations can safely be ignored by the driver. */
1639 case SpvDecorationRestrictPointerEXT
:
1640 case SpvDecorationAliasedPointerEXT
:
1641 /* TODO: We should actually plumb alias information through NIR. */
1645 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1650 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1651 const struct vtn_decoration
*dec
, void *out_is_patch
)
1653 if (dec
->decoration
== SpvDecorationPatch
) {
1654 *((bool *) out_is_patch
) = true;
1659 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1660 const struct vtn_decoration
*dec
, void *void_var
)
1662 struct vtn_variable
*vtn_var
= void_var
;
1664 /* Handle decorations that apply to a vtn_variable as a whole */
1665 switch (dec
->decoration
) {
1666 case SpvDecorationBinding
:
1667 vtn_var
->binding
= dec
->operands
[0];
1668 vtn_var
->explicit_binding
= true;
1670 case SpvDecorationDescriptorSet
:
1671 vtn_var
->descriptor_set
= dec
->operands
[0];
1673 case SpvDecorationInputAttachmentIndex
:
1674 vtn_var
->input_attachment_index
= dec
->operands
[0];
1676 case SpvDecorationPatch
:
1677 vtn_var
->patch
= true;
1679 case SpvDecorationOffset
:
1680 vtn_var
->offset
= dec
->operands
[0];
1682 case SpvDecorationNonWritable
:
1683 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1685 case SpvDecorationNonReadable
:
1686 vtn_var
->access
|= ACCESS_NON_READABLE
;
1688 case SpvDecorationVolatile
:
1689 vtn_var
->access
|= ACCESS_VOLATILE
;
1691 case SpvDecorationCoherent
:
1692 vtn_var
->access
|= ACCESS_COHERENT
;
1694 case SpvDecorationCounterBuffer
:
1695 /* Counter buffer decorations can safely be ignored by the driver. */
1701 if (val
->value_type
== vtn_value_type_pointer
) {
1702 assert(val
->pointer
->var
== void_var
);
1703 assert(member
== -1);
1705 assert(val
->value_type
== vtn_value_type_type
);
1708 /* Location is odd. If applied to a split structure, we have to walk the
1709 * whole thing and accumulate the location. It's easier to handle as a
1712 if (dec
->decoration
== SpvDecorationLocation
) {
1713 unsigned location
= dec
->operands
[0];
1714 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1715 vtn_var
->mode
== vtn_variable_mode_output
) {
1716 location
+= FRAG_RESULT_DATA0
;
1717 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1718 vtn_var
->mode
== vtn_variable_mode_input
) {
1719 location
+= VERT_ATTRIB_GENERIC0
;
1720 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1721 vtn_var
->mode
== vtn_variable_mode_output
) {
1722 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1723 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1724 vtn_warn("Location must be on input, output, uniform, sampler or "
1729 if (vtn_var
->var
->num_members
== 0) {
1730 /* This handles the member and lone variable cases */
1731 vtn_var
->var
->data
.location
= location
;
1733 /* This handles the structure member case */
1734 assert(vtn_var
->var
->members
);
1737 vtn_var
->base_location
= location
;
1739 vtn_var
->var
->members
[member
].location
= location
;
1745 if (vtn_var
->var
->num_members
== 0) {
1746 /* We call this function on types as well as variables and not all
1747 * struct types get split so we can end up having stray member
1748 * decorations; just ignore them.
1751 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1752 } else if (member
>= 0) {
1753 /* Member decorations must come from a type */
1754 assert(val
->value_type
== vtn_value_type_type
);
1755 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1758 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1759 for (unsigned i
= 0; i
< length
; i
++)
1760 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1763 /* A few variables, those with external storage, have no actual
1764 * nir_variables associated with them. Fortunately, all decorations
1765 * we care about for those variables are on the type only.
1767 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1768 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1769 vtn_var
->mode
== vtn_variable_mode_push_constant
);
1774 enum vtn_variable_mode
1775 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1776 SpvStorageClass
class,
1777 struct vtn_type
*interface_type
,
1778 nir_variable_mode
*nir_mode_out
)
1780 enum vtn_variable_mode mode
;
1781 nir_variable_mode nir_mode
;
1783 case SpvStorageClassUniform
:
1784 /* Assume it's an UBO if we lack the interface_type. */
1785 if (!interface_type
|| interface_type
->block
) {
1786 mode
= vtn_variable_mode_ubo
;
1787 nir_mode
= nir_var_mem_ubo
;
1788 } else if (interface_type
->buffer_block
) {
1789 mode
= vtn_variable_mode_ssbo
;
1790 nir_mode
= nir_var_mem_ssbo
;
1792 /* Default-block uniforms, coming from gl_spirv */
1793 mode
= vtn_variable_mode_uniform
;
1794 nir_mode
= nir_var_uniform
;
1797 case SpvStorageClassStorageBuffer
:
1798 mode
= vtn_variable_mode_ssbo
;
1799 nir_mode
= nir_var_mem_ssbo
;
1801 case SpvStorageClassPhysicalStorageBuffer
:
1802 mode
= vtn_variable_mode_phys_ssbo
;
1803 nir_mode
= nir_var_mem_global
;
1805 case SpvStorageClassUniformConstant
:
1806 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1807 if (b
->options
->constant_as_global
) {
1808 mode
= vtn_variable_mode_cross_workgroup
;
1809 nir_mode
= nir_var_mem_global
;
1811 mode
= vtn_variable_mode_ubo
;
1812 nir_mode
= nir_var_mem_ubo
;
1815 mode
= vtn_variable_mode_uniform
;
1816 nir_mode
= nir_var_uniform
;
1819 case SpvStorageClassPushConstant
:
1820 mode
= vtn_variable_mode_push_constant
;
1821 nir_mode
= nir_var_uniform
;
1823 case SpvStorageClassInput
:
1824 mode
= vtn_variable_mode_input
;
1825 nir_mode
= nir_var_shader_in
;
1827 case SpvStorageClassOutput
:
1828 mode
= vtn_variable_mode_output
;
1829 nir_mode
= nir_var_shader_out
;
1831 case SpvStorageClassPrivate
:
1832 mode
= vtn_variable_mode_private
;
1833 nir_mode
= nir_var_shader_temp
;
1835 case SpvStorageClassFunction
:
1836 mode
= vtn_variable_mode_function
;
1837 nir_mode
= nir_var_function_temp
;
1839 case SpvStorageClassWorkgroup
:
1840 mode
= vtn_variable_mode_workgroup
;
1841 nir_mode
= nir_var_mem_shared
;
1843 case SpvStorageClassAtomicCounter
:
1844 mode
= vtn_variable_mode_uniform
;
1845 nir_mode
= nir_var_uniform
;
1847 case SpvStorageClassCrossWorkgroup
:
1848 mode
= vtn_variable_mode_cross_workgroup
;
1849 nir_mode
= nir_var_mem_global
;
1851 case SpvStorageClassImage
:
1852 mode
= vtn_variable_mode_image
;
1853 nir_mode
= nir_var_mem_ubo
;
1855 case SpvStorageClassGeneric
:
1857 vtn_fail("Unhandled variable storage class: %s (%u)",
1858 spirv_storageclass_to_string(class), class);
1862 *nir_mode_out
= nir_mode
;
1868 vtn_mode_to_address_format(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
1871 case vtn_variable_mode_ubo
:
1872 return b
->options
->ubo_addr_format
;
1874 case vtn_variable_mode_ssbo
:
1875 return b
->options
->ssbo_addr_format
;
1877 case vtn_variable_mode_phys_ssbo
:
1878 return b
->options
->phys_ssbo_addr_format
;
1880 case vtn_variable_mode_push_constant
:
1881 return b
->options
->push_const_addr_format
;
1883 case vtn_variable_mode_workgroup
:
1884 return b
->options
->shared_addr_format
;
1886 case vtn_variable_mode_cross_workgroup
:
1887 return b
->options
->global_addr_format
;
1889 case vtn_variable_mode_function
:
1890 if (b
->physical_ptrs
)
1891 return b
->options
->temp_addr_format
;
1894 case vtn_variable_mode_private
:
1895 case vtn_variable_mode_uniform
:
1896 case vtn_variable_mode_input
:
1897 case vtn_variable_mode_output
:
1898 case vtn_variable_mode_image
:
1899 return nir_address_format_logical
;
1902 unreachable("Invalid variable mode");
1906 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1908 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1909 /* This pointer needs to have a pointer type with actual storage */
1910 vtn_assert(ptr
->ptr_type
);
1911 vtn_assert(ptr
->ptr_type
->type
);
1914 /* If we don't have an offset then we must be a pointer to the variable
1917 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1919 struct vtn_access_chain chain
= {
1922 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1925 vtn_assert(ptr
->offset
);
1926 if (ptr
->block_index
) {
1927 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1928 ptr
->mode
== vtn_variable_mode_ssbo
);
1929 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1931 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1935 if (vtn_pointer_is_external_block(b
, ptr
) &&
1936 vtn_type_contains_block(b
, ptr
->type
) &&
1937 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1938 /* In this case, we're looking for a block index and not an actual
1941 * For PhysicalStorageBuffer pointers, we don't have a block index
1942 * at all because we get the pointer directly from the client. This
1943 * assumes that there will never be a SSBO binding variable using the
1944 * PhysicalStorageBuffer storage class. This assumption appears
1945 * to be correct according to the Vulkan spec because the table,
1946 * "Shader Resource and Storage Class Correspondence," the only the
1947 * Uniform storage class with BufferBlock or the StorageBuffer
1948 * storage class with Block can be used.
1950 if (!ptr
->block_index
) {
1951 /* If we don't have a block_index then we must be a pointer to the
1954 vtn_assert(!ptr
->deref
);
1956 struct vtn_access_chain chain
= {
1959 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
1962 return ptr
->block_index
;
1964 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1969 struct vtn_pointer
*
1970 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1971 struct vtn_type
*ptr_type
)
1973 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1975 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1976 struct vtn_type
*without_array
=
1977 vtn_type_without_array(ptr_type
->deref
);
1979 nir_variable_mode nir_mode
;
1980 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1981 without_array
, &nir_mode
);
1982 ptr
->type
= ptr_type
->deref
;
1983 ptr
->ptr_type
= ptr_type
;
1985 if (b
->wa_glslang_179
) {
1986 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1987 * need to whack the mode because it creates a function parameter with
1988 * the Function storage class even though it's a pointer to a sampler.
1989 * If we don't do this, then NIR won't get rid of the deref_cast for us.
1991 if (ptr
->mode
== vtn_variable_mode_function
&&
1992 (ptr
->type
->base_type
== vtn_base_type_sampler
||
1993 ptr
->type
->base_type
== vtn_base_type_sampled_image
)) {
1994 ptr
->mode
= vtn_variable_mode_uniform
;
1995 nir_mode
= nir_var_uniform
;
1999 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
2000 /* This pointer type needs to have actual storage */
2001 vtn_assert(ptr_type
->type
);
2002 if (ptr
->mode
== vtn_variable_mode_ubo
||
2003 ptr
->mode
== vtn_variable_mode_ssbo
) {
2004 vtn_assert(ssa
->num_components
== 2);
2005 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
2006 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
2008 vtn_assert(ssa
->num_components
== 1);
2009 ptr
->block_index
= NULL
;
2013 const struct glsl_type
*deref_type
= ptr_type
->deref
->type
;
2014 if (!vtn_pointer_is_external_block(b
, ptr
)) {
2015 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2016 deref_type
, ptr_type
->stride
);
2017 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
2018 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
2019 /* This is a pointer to somewhere in an array of blocks, not a
2020 * pointer to somewhere inside the block. Set the block index
2021 * instead of making a cast.
2023 ptr
->block_index
= ssa
;
2025 /* This is a pointer to something internal or a pointer inside a
2026 * block. It's just a regular cast.
2028 * For PhysicalStorageBuffer pointers, we don't have a block index
2029 * at all because we get the pointer directly from the client. This
2030 * assumes that there will never be a SSBO binding variable using the
2031 * PhysicalStorageBuffer storage class. This assumption appears
2032 * to be correct according to the Vulkan spec because the table,
2033 * "Shader Resource and Storage Class Correspondence," the only the
2034 * Uniform storage class with BufferBlock or the StorageBuffer
2035 * storage class with Block can be used.
2037 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2038 ptr_type
->deref
->type
,
2040 ptr
->deref
->dest
.ssa
.num_components
=
2041 glsl_get_vector_elements(ptr_type
->type
);
2042 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
2050 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
2052 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
2055 if (var
->mode
== vtn_variable_mode_input
) {
2056 return stage
== MESA_SHADER_TESS_CTRL
||
2057 stage
== MESA_SHADER_TESS_EVAL
||
2058 stage
== MESA_SHADER_GEOMETRY
;
2061 if (var
->mode
== vtn_variable_mode_output
)
2062 return stage
== MESA_SHADER_TESS_CTRL
;
2068 assign_missing_member_locations(struct vtn_variable
*var
)
2071 glsl_get_length(glsl_without_array(var
->type
->type
));
2072 int location
= var
->base_location
;
2074 for (unsigned i
= 0; i
< length
; i
++) {
2075 /* From the Vulkan spec:
2077 * “If the structure type is a Block but without a Location, then each
2078 * of its members must have a Location decoration.”
2081 if (var
->type
->block
) {
2082 assert(var
->base_location
!= -1 ||
2083 var
->var
->members
[i
].location
!= -1);
2086 /* From the Vulkan spec:
2088 * “Any member with its own Location decoration is assigned that
2089 * location. Each remaining member is assigned the location after the
2090 * immediately preceding member in declaration order.”
2092 if (var
->var
->members
[i
].location
!= -1)
2093 location
= var
->var
->members
[i
].location
;
2095 var
->var
->members
[i
].location
= location
;
2097 /* Below we use type instead of interface_type, because interface_type
2098 * is only available when it is a Block. This code also supports
2099 * input/outputs that are just structs
2101 const struct glsl_type
*member_type
=
2102 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
2105 glsl_count_attribute_slots(member_type
,
2106 false /* is_gl_vertex_input */);
2112 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
2113 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
2114 nir_constant
*const_initializer
, nir_variable
*var_initializer
)
2116 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2117 struct vtn_type
*type
= ptr_type
->deref
;
2119 struct vtn_type
*without_array
= vtn_type_without_array(ptr_type
->deref
);
2121 enum vtn_variable_mode mode
;
2122 nir_variable_mode nir_mode
;
2123 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
2126 case vtn_variable_mode_ubo
:
2127 /* There's no other way to get vtn_variable_mode_ubo */
2128 vtn_assert(without_array
->block
);
2129 b
->shader
->info
.num_ubos
++;
2131 case vtn_variable_mode_ssbo
:
2132 if (storage_class
== SpvStorageClassStorageBuffer
&&
2133 !without_array
->block
) {
2134 if (b
->variable_pointers
) {
2135 vtn_fail("Variables in the StorageBuffer storage class must "
2136 "have a struct type with the Block decoration");
2138 /* If variable pointers are not present, it's still malformed
2139 * SPIR-V but we can parse it and do the right thing anyway.
2140 * Since some of the 8-bit storage tests have bugs in this are,
2141 * just make it a warning for now.
2143 vtn_warn("Variables in the StorageBuffer storage class must "
2144 "have a struct type with the Block decoration");
2147 b
->shader
->info
.num_ssbos
++;
2149 case vtn_variable_mode_uniform
:
2150 if (glsl_type_is_image(without_array
->type
))
2151 b
->shader
->info
.num_images
++;
2152 else if (glsl_type_is_sampler(without_array
->type
))
2153 b
->shader
->info
.num_textures
++;
2155 case vtn_variable_mode_push_constant
:
2156 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2159 case vtn_variable_mode_image
:
2160 vtn_fail("Cannot create a variable with the Image storage class");
2163 case vtn_variable_mode_phys_ssbo
:
2164 vtn_fail("Cannot create a variable with the "
2165 "PhysicalStorageBuffer storage class");
2169 /* No tallying is needed */
2173 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2176 var
->base_location
= -1;
2178 val
->pointer
= rzalloc(b
, struct vtn_pointer
);
2179 val
->pointer
->mode
= var
->mode
;
2180 val
->pointer
->type
= var
->type
;
2181 val
->pointer
->ptr_type
= ptr_type
;
2182 val
->pointer
->var
= var
;
2183 val
->pointer
->access
= var
->type
->access
;
2185 switch (var
->mode
) {
2186 case vtn_variable_mode_function
:
2187 case vtn_variable_mode_private
:
2188 case vtn_variable_mode_uniform
:
2189 /* For these, we create the variable normally */
2190 var
->var
= rzalloc(b
->shader
, nir_variable
);
2191 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2193 if (storage_class
== SpvStorageClassAtomicCounter
) {
2194 /* Need to tweak the nir type here as at vtn_handle_type we don't
2195 * have the access to storage_class, that is the one that points us
2196 * that is an atomic uint.
2198 var
->var
->type
= repair_atomic_type(var
->type
->type
);
2200 /* Private variables don't have any explicit layout but some layouts
2201 * may have leaked through due to type deduplication in the SPIR-V.
2203 var
->var
->type
= var
->type
->type
;
2205 var
->var
->data
.mode
= nir_mode
;
2206 var
->var
->data
.location
= -1;
2207 var
->var
->interface_type
= NULL
;
2210 case vtn_variable_mode_ubo
:
2211 case vtn_variable_mode_ssbo
:
2212 var
->var
= rzalloc(b
->shader
, nir_variable
);
2213 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2215 var
->var
->type
= var
->type
->type
;
2216 var
->var
->interface_type
= var
->type
->type
;
2218 var
->var
->data
.mode
= nir_mode
;
2219 var
->var
->data
.location
= -1;
2223 case vtn_variable_mode_workgroup
:
2224 /* Create the variable normally */
2225 var
->var
= rzalloc(b
->shader
, nir_variable
);
2226 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2227 /* Workgroup variables don't have any explicit layout but some
2228 * layouts may have leaked through due to type deduplication in the
2231 var
->var
->type
= var
->type
->type
;
2232 var
->var
->data
.mode
= nir_var_mem_shared
;
2235 case vtn_variable_mode_input
:
2236 case vtn_variable_mode_output
: {
2237 /* In order to know whether or not we're a per-vertex inout, we need
2238 * the patch qualifier. This means walking the variable decorations
2239 * early before we actually create any variables. Not a big deal.
2241 * GLSLang really likes to place decorations in the most interior
2242 * thing it possibly can. In particular, if you have a struct, it
2243 * will place the patch decorations on the struct members. This
2244 * should be handled by the variable splitting below just fine.
2246 * If you have an array-of-struct, things get even more weird as it
2247 * will place the patch decorations on the struct even though it's
2248 * inside an array and some of the members being patch and others not
2249 * makes no sense whatsoever. Since the only sensible thing is for
2250 * it to be all or nothing, we'll call it patch if any of the members
2251 * are declared patch.
2254 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2255 if (glsl_type_is_array(var
->type
->type
) &&
2256 glsl_type_is_struct_or_ifc(without_array
->type
)) {
2257 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2258 vtn_value_type_type
),
2259 var_is_patch_cb
, &var
->patch
);
2262 /* For inputs and outputs, we immediately split structures. This
2263 * is for a couple of reasons. For one, builtins may all come in
2264 * a struct and we really want those split out into separate
2265 * variables. For another, interpolation qualifiers can be
2266 * applied to members of the top-level struct ane we need to be
2267 * able to preserve that information.
2270 struct vtn_type
*per_vertex_type
= var
->type
;
2271 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2272 /* In Geometry shaders (and some tessellation), inputs come
2273 * in per-vertex arrays. However, some builtins come in
2274 * non-per-vertex, hence the need for the is_array check. In
2275 * any case, there are no non-builtin arrays allowed so this
2276 * check should be sufficient.
2278 per_vertex_type
= var
->type
->array_element
;
2281 var
->var
= rzalloc(b
->shader
, nir_variable
);
2282 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2283 /* In Vulkan, shader I/O variables don't have any explicit layout but
2284 * some layouts may have leaked through due to type deduplication in
2285 * the SPIR-V. We do, however, keep the layouts in the variable's
2286 * interface_type because we need offsets for XFB arrays of blocks.
2288 var
->var
->type
= var
->type
->type
;
2289 var
->var
->data
.mode
= nir_mode
;
2290 var
->var
->data
.patch
= var
->patch
;
2292 /* Figure out the interface block type. */
2293 struct vtn_type
*iface_type
= per_vertex_type
;
2294 if (var
->mode
== vtn_variable_mode_output
&&
2295 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2296 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2297 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2298 /* For vertex data outputs, we can end up with arrays of blocks for
2299 * transform feedback where each array element corresponds to a
2300 * different XFB output buffer.
2302 while (iface_type
->base_type
== vtn_base_type_array
)
2303 iface_type
= iface_type
->array_element
;
2305 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2306 var
->var
->interface_type
= iface_type
->type
;
2308 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2309 per_vertex_type
->block
) {
2310 /* It's a struct. Set it up as per-member. */
2311 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2312 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2313 var
->var
->num_members
);
2315 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2316 var
->var
->members
[i
].mode
= nir_mode
;
2317 var
->var
->members
[i
].patch
= var
->patch
;
2318 var
->var
->members
[i
].location
= -1;
2322 /* For inputs and outputs, we need to grab locations and builtin
2323 * information from the per-vertex type.
2325 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2326 vtn_value_type_type
),
2327 var_decoration_cb
, var
);
2331 case vtn_variable_mode_push_constant
:
2332 case vtn_variable_mode_cross_workgroup
:
2333 /* These don't need actual variables. */
2336 case vtn_variable_mode_image
:
2337 case vtn_variable_mode_phys_ssbo
:
2338 unreachable("Should have been caught before");
2341 /* We can only have one type of initializer */
2342 assert(!(const_initializer
&& var_initializer
));
2343 if (const_initializer
) {
2344 var
->var
->constant_initializer
=
2345 nir_constant_clone(const_initializer
, var
->var
);
2347 if (var_initializer
)
2348 var
->var
->pointer_initializer
= var_initializer
;
2350 if (var
->mode
== vtn_variable_mode_uniform
||
2351 var
->mode
== vtn_variable_mode_ssbo
) {
2352 /* SSBOs and images are assumed to not alias in the Simple, GLSL and Vulkan memory models */
2353 var
->var
->data
.access
|= b
->mem_model
!= SpvMemoryModelOpenCL
? ACCESS_RESTRICT
: 0;
2356 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2357 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2359 /* Propagate access flags from the OpVariable decorations. */
2360 val
->pointer
->access
|= var
->access
;
2362 if ((var
->mode
== vtn_variable_mode_input
||
2363 var
->mode
== vtn_variable_mode_output
) &&
2364 var
->var
->members
) {
2365 assign_missing_member_locations(var
);
2368 if (var
->mode
== vtn_variable_mode_uniform
||
2369 var
->mode
== vtn_variable_mode_ubo
||
2370 var
->mode
== vtn_variable_mode_ssbo
) {
2371 /* XXX: We still need the binding information in the nir_variable
2372 * for these. We should fix that.
2374 var
->var
->data
.binding
= var
->binding
;
2375 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2376 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2377 var
->var
->data
.index
= var
->input_attachment_index
;
2378 var
->var
->data
.offset
= var
->offset
;
2380 if (glsl_type_is_image(without_array
->type
))
2381 var
->var
->data
.image
.format
= without_array
->image_format
;
2384 if (var
->mode
== vtn_variable_mode_function
) {
2385 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2386 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2387 } else if (var
->var
) {
2388 nir_shader_add_variable(b
->shader
, var
->var
);
2390 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2395 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2396 struct vtn_type
*dst_type
,
2397 struct vtn_type
*src_type
)
2399 if (dst_type
->id
== src_type
->id
)
2402 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2403 /* Early versions of GLSLang would re-emit types unnecessarily and you
2404 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2405 * mismatched source and destination types.
2407 * https://github.com/KhronosGroup/glslang/issues/304
2408 * https://github.com/KhronosGroup/glslang/issues/307
2409 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2410 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2412 vtn_warn("Source and destination types of %s do not have the same "
2413 "ID (but are compatible): %u vs %u",
2414 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2418 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2419 spirv_op_to_string(opcode
),
2420 glsl_get_type_name(dst_type
->type
),
2421 glsl_get_type_name(src_type
->type
));
2424 static nir_ssa_def
*
2425 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2426 unsigned num_components
)
2428 if (val
->num_components
== num_components
)
2431 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2432 for (unsigned i
= 0; i
< num_components
; i
++) {
2433 if (i
< val
->num_components
)
2434 comps
[i
] = nir_channel(b
, val
, i
);
2436 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2438 return nir_vec(b
, comps
, num_components
);
2441 static nir_ssa_def
*
2442 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2443 const struct glsl_type
*type
)
2445 const unsigned num_components
= glsl_get_vector_elements(type
);
2446 const unsigned bit_size
= glsl_get_bit_size(type
);
2448 /* First, zero-pad to ensure that the value is big enough that when we
2449 * bit-cast it, we don't loose anything.
2451 if (val
->bit_size
< bit_size
) {
2452 const unsigned src_num_components_needed
=
2453 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2454 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2457 val
= nir_bitcast_vector(b
, val
, bit_size
);
2459 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2463 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2464 const uint32_t *w
, unsigned count
)
2468 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2469 val
->type
= vtn_get_type(b
, w
[1]);
2473 case SpvOpVariable
: {
2474 struct vtn_type
*ptr_type
= vtn_get_type(b
, w
[1]);
2476 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2478 SpvStorageClass storage_class
= w
[3];
2479 nir_constant
*const_initializer
= NULL
;
2480 nir_variable
*var_initializer
= NULL
;
2482 struct vtn_value
*init
= vtn_untyped_value(b
, w
[4]);
2483 switch (init
->value_type
) {
2484 case vtn_value_type_constant
:
2485 const_initializer
= init
->constant
;
2487 case vtn_value_type_pointer
:
2488 var_initializer
= init
->pointer
->var
->var
;
2491 vtn_fail("SPIR-V variable initializer %u must be constant or pointer",
2496 vtn_create_variable(b
, val
, ptr_type
, storage_class
, const_initializer
, var_initializer
);
2501 case SpvOpAccessChain
:
2502 case SpvOpPtrAccessChain
:
2503 case SpvOpInBoundsAccessChain
:
2504 case SpvOpInBoundsPtrAccessChain
: {
2505 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2506 enum gl_access_qualifier access
= 0;
2507 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
|| opcode
== SpvOpInBoundsPtrAccessChain
);
2510 for (int i
= 4; i
< count
; i
++) {
2511 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2512 if (link_val
->value_type
== vtn_value_type_constant
) {
2513 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2514 chain
->link
[idx
].id
= vtn_constant_int(b
, w
[i
]);
2516 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2517 chain
->link
[idx
].id
= w
[i
];
2522 struct vtn_type
*ptr_type
= vtn_get_type(b
, w
[1]);
2523 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2524 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2525 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2526 * to combine an array of images with a single sampler to get an
2527 * array of sampled images that all share the same sampler.
2528 * Fortunately, this means that we can more-or-less ignore the
2529 * sampler when crawling the access chain, but it does leave us
2530 * with this rather awkward little special-case.
2532 struct vtn_value
*val
=
2533 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2534 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2535 val
->sampled_image
->image
=
2536 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2537 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2538 val
->sampled_image
->image
=
2539 vtn_decorate_pointer(b
, val
, val
->sampled_image
->image
);
2540 val
->sampled_image
->sampler
=
2541 vtn_decorate_pointer(b
, val
, val
->sampled_image
->sampler
);
2543 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2544 struct vtn_pointer
*ptr
=
2545 vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2546 ptr
->ptr_type
= ptr_type
;
2547 ptr
->access
|= access
;
2548 vtn_push_pointer(b
, w
[2], ptr
);
2553 case SpvOpCopyMemory
: {
2554 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2555 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2557 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2559 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2564 struct vtn_type
*res_type
= vtn_get_type(b
, w
[1]);
2565 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2566 struct vtn_pointer
*src
= src_val
->pointer
;
2568 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2570 if (res_type
->base_type
== vtn_base_type_image
||
2571 res_type
->base_type
== vtn_base_type_sampler
) {
2572 vtn_push_pointer(b
, w
[2], src
);
2574 } else if (res_type
->base_type
== vtn_base_type_sampled_image
) {
2575 struct vtn_value
*val
=
2576 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2577 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2578 val
->sampled_image
->image
= val
->sampled_image
->sampler
=
2579 vtn_decorate_pointer(b
, val
, src
);
2585 SpvMemoryAccessMask access
= w
[4];
2586 if (access
& SpvMemoryAccessAlignedMask
)
2589 if (access
& SpvMemoryAccessMakePointerVisibleMask
) {
2590 SpvMemorySemanticsMask semantics
=
2591 SpvMemorySemanticsMakeVisibleMask
|
2592 vtn_storage_class_to_memory_semantics(src
->ptr_type
->storage_class
);
2594 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2595 vtn_emit_memory_barrier(b
, scope
, semantics
);
2599 vtn_push_ssa_value(b
, w
[2], vtn_variable_load(b
, src
));
2604 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2605 struct vtn_pointer
*dest
= dest_val
->pointer
;
2606 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2608 /* OpStore requires us to actually have a storage type */
2609 vtn_fail_if(dest
->type
->type
== NULL
,
2610 "Invalid destination type for OpStore");
2612 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2613 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2614 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2615 * would then store them to a local variable as bool. Work around
2616 * the issue by doing an implicit conversion.
2618 * https://github.com/KhronosGroup/glslang/issues/170
2619 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2621 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2622 "OpTypeBool. Doing an implicit conversion to work around "
2624 struct vtn_ssa_value
*bool_ssa
=
2625 vtn_create_ssa_value(b
, dest
->type
->type
);
2626 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2627 vtn_variable_store(b
, bool_ssa
, dest
);
2631 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2633 if (glsl_type_is_sampler(dest
->type
->type
)) {
2634 if (b
->wa_glslang_179
) {
2635 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2636 "propagation to workaround the problem.");
2637 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2638 struct vtn_value
*v
= vtn_untyped_value(b
, w
[2]);
2639 if (v
->value_type
== vtn_value_type_sampled_image
) {
2640 dest
->var
->copy_prop_sampler
= v
->sampled_image
->sampler
;
2642 vtn_assert(v
->value_type
== vtn_value_type_pointer
);
2643 dest
->var
->copy_prop_sampler
= v
->pointer
;
2646 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2651 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2652 vtn_variable_store(b
, src
, dest
);
2656 SpvMemoryAccessMask access
= w
[3];
2658 if (access
& SpvMemoryAccessAlignedMask
)
2661 if (access
& SpvMemoryAccessMakePointerAvailableMask
) {
2662 SpvMemorySemanticsMask semantics
=
2663 SpvMemorySemanticsMakeAvailableMask
|
2664 vtn_storage_class_to_memory_semantics(dest
->ptr_type
->storage_class
);
2665 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2666 vtn_emit_memory_barrier(b
, scope
, semantics
);
2672 case SpvOpArrayLength
: {
2673 struct vtn_pointer
*ptr
=
2674 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2675 const uint32_t field
= w
[4];
2677 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2678 "OpArrayLength must take a pointer to a structure type");
2679 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2680 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2681 "OpArrayLength must reference the last memeber of the "
2682 "structure and that must be an array");
2684 const uint32_t offset
= ptr
->type
->offsets
[field
];
2685 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2687 if (!ptr
->block_index
) {
2688 struct vtn_access_chain chain
= {
2691 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2692 vtn_assert(ptr
->block_index
);
2695 nir_intrinsic_instr
*instr
=
2696 nir_intrinsic_instr_create(b
->nb
.shader
,
2697 nir_intrinsic_get_buffer_size
);
2698 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2699 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2700 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2701 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2703 /* array_length = max(buffer_size - offset, 0) / stride */
2704 nir_ssa_def
*array_length
=
2709 nir_imm_int(&b
->nb
, offset
)),
2710 nir_imm_int(&b
->nb
, 0u)),
2711 nir_imm_int(&b
->nb
, stride
));
2713 vtn_push_nir_ssa(b
, w
[2], array_length
);
2717 case SpvOpConvertPtrToU
: {
2718 struct vtn_type
*u_type
= vtn_get_type(b
, w
[1]);
2719 struct vtn_type
*ptr_type
= vtn_get_value_type(b
, w
[3]);
2721 vtn_fail_if(ptr_type
->base_type
!= vtn_base_type_pointer
||
2722 ptr_type
->type
== NULL
,
2723 "OpConvertPtrToU can only be used on physical pointers");
2725 vtn_fail_if(u_type
->base_type
!= vtn_base_type_vector
&&
2726 u_type
->base_type
!= vtn_base_type_scalar
,
2727 "OpConvertPtrToU can only be used to cast to a vector or "
2730 /* The pointer will be converted to an SSA value automatically */
2731 nir_ssa_def
*ptr
= vtn_get_nir_ssa(b
, w
[3]);
2732 nir_ssa_def
*u
= nir_sloppy_bitcast(&b
->nb
, ptr
, u_type
->type
);
2733 vtn_push_nir_ssa(b
, w
[2], u
);
2737 case SpvOpConvertUToPtr
: {
2738 struct vtn_type
*ptr_type
= vtn_get_type(b
, w
[1]);
2739 struct vtn_type
*u_type
= vtn_get_value_type(b
, w
[3]);
2741 vtn_fail_if(ptr_type
->base_type
!= vtn_base_type_pointer
||
2742 ptr_type
->type
== NULL
,
2743 "OpConvertUToPtr can only be used on physical pointers");
2745 vtn_fail_if(u_type
->base_type
!= vtn_base_type_vector
&&
2746 u_type
->base_type
!= vtn_base_type_scalar
,
2747 "OpConvertUToPtr can only be used to cast from a vector or "
2750 nir_ssa_def
*u
= vtn_get_nir_ssa(b
, w
[3]);
2751 nir_ssa_def
*ptr
= nir_sloppy_bitcast(&b
->nb
, u
, ptr_type
->type
);
2752 vtn_push_pointer(b
, w
[2], vtn_pointer_from_ssa(b
, ptr
, ptr_type
));
2756 case SpvOpCopyMemorySized
:
2758 vtn_fail_with_opcode("Unhandled opcode", opcode
);