2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
34 ptr_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
35 const struct vtn_decoration
*dec
, void *void_ptr
)
37 struct vtn_pointer
*ptr
= void_ptr
;
39 switch (dec
->decoration
) {
40 case SpvDecorationNonUniformEXT
:
41 ptr
->access
|= ACCESS_NON_UNIFORM
;
49 static struct vtn_pointer
*
50 vtn_decorate_pointer(struct vtn_builder
*b
, struct vtn_value
*val
,
51 struct vtn_pointer
*ptr
)
53 struct vtn_pointer dummy
= { .access
= 0 };
54 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, &dummy
);
56 /* If we're adding access flags, make a copy of the pointer. We could
57 * probably just OR them in without doing so but this prevents us from
58 * leaking them any further than actually specified in the SPIR-V.
60 if (dummy
.access
& ~ptr
->access
) {
61 struct vtn_pointer
*copy
= ralloc(b
, struct vtn_pointer
);
63 copy
->access
|= dummy
.access
;
71 vtn_push_pointer(struct vtn_builder
*b
, uint32_t value_id
,
72 struct vtn_pointer
*ptr
)
74 struct vtn_value
*val
= vtn_push_value(b
, value_id
, vtn_value_type_pointer
);
75 val
->pointer
= vtn_decorate_pointer(b
, val
, ptr
);
80 vtn_copy_value(struct vtn_builder
*b
, uint32_t src_value_id
,
81 uint32_t dst_value_id
)
83 struct vtn_value
*src
= vtn_untyped_value(b
, src_value_id
);
84 struct vtn_value
*dst
= vtn_untyped_value(b
, dst_value_id
);
85 struct vtn_value src_copy
= *src
;
87 vtn_fail_if(dst
->value_type
!= vtn_value_type_invalid
,
88 "SPIR-V id %u has already been written by another instruction",
91 vtn_fail_if(dst
->type
->id
!= src
->type
->id
,
92 "Result Type must equal Operand type");
94 src_copy
.name
= dst
->name
;
95 src_copy
.decoration
= dst
->decoration
;
96 src_copy
.type
= dst
->type
;
99 if (dst
->value_type
== vtn_value_type_pointer
)
100 dst
->pointer
= vtn_decorate_pointer(b
, dst
, dst
->pointer
);
103 static struct vtn_access_chain
*
104 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
106 struct vtn_access_chain
*chain
;
108 /* Subtract 1 from the length since there's already one built in */
109 size_t size
= sizeof(*chain
) +
110 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
111 chain
= rzalloc_size(b
, size
);
112 chain
->length
= length
;
118 vtn_mode_uses_ssa_offset(struct vtn_builder
*b
,
119 enum vtn_variable_mode mode
)
121 return ((mode
== vtn_variable_mode_ubo
||
122 mode
== vtn_variable_mode_ssbo
) &&
123 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
124 mode
== vtn_variable_mode_push_constant
;
128 vtn_pointer_is_external_block(struct vtn_builder
*b
,
129 struct vtn_pointer
*ptr
)
131 return ptr
->mode
== vtn_variable_mode_ssbo
||
132 ptr
->mode
== vtn_variable_mode_ubo
||
133 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
134 ptr
->mode
== vtn_variable_mode_push_constant
;
138 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
139 unsigned stride
, unsigned bit_size
)
141 vtn_assert(stride
> 0);
142 if (link
.mode
== vtn_access_mode_literal
) {
143 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
145 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
146 if (ssa
->bit_size
!= bit_size
)
147 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
148 return nir_imul_imm(&b
->nb
, ssa
, stride
);
152 static VkDescriptorType
153 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
156 case vtn_variable_mode_ubo
:
157 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
158 case vtn_variable_mode_ssbo
:
159 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
161 vtn_fail("Invalid mode for vulkan_resource_index");
166 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
167 nir_ssa_def
*desc_array_index
)
169 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
171 if (!desc_array_index
) {
172 vtn_assert(glsl_type_is_struct_or_ifc(var
->type
->type
));
173 desc_array_index
= nir_imm_int(&b
->nb
, 0);
176 nir_intrinsic_instr
*instr
=
177 nir_intrinsic_instr_create(b
->nb
.shader
,
178 nir_intrinsic_vulkan_resource_index
);
179 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
180 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
181 nir_intrinsic_set_binding(instr
, var
->binding
);
182 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
184 vtn_fail_if(var
->mode
!= vtn_variable_mode_ubo
&&
185 var
->mode
!= vtn_variable_mode_ssbo
,
186 "Invalid mode for vulkan_resource_index");
188 nir_address_format addr_format
= vtn_mode_to_address_format(b
, var
->mode
);
189 const struct glsl_type
*index_type
=
190 b
->options
->lower_ubo_ssbo_access_to_offsets
?
191 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
193 instr
->num_components
= glsl_get_vector_elements(index_type
);
194 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
195 glsl_get_bit_size(index_type
), NULL
);
196 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
198 return &instr
->dest
.ssa
;
202 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
203 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
205 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
207 nir_intrinsic_instr
*instr
=
208 nir_intrinsic_instr_create(b
->nb
.shader
,
209 nir_intrinsic_vulkan_resource_reindex
);
210 instr
->src
[0] = nir_src_for_ssa(base_index
);
211 instr
->src
[1] = nir_src_for_ssa(offset_index
);
212 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
214 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
215 "Invalid mode for vulkan_resource_reindex");
217 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
218 const struct glsl_type
*index_type
=
219 b
->options
->lower_ubo_ssbo_access_to_offsets
?
220 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
222 instr
->num_components
= glsl_get_vector_elements(index_type
);
223 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
224 glsl_get_bit_size(index_type
), NULL
);
225 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
227 return &instr
->dest
.ssa
;
231 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
232 nir_ssa_def
*desc_index
)
234 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
236 nir_intrinsic_instr
*desc_load
=
237 nir_intrinsic_instr_create(b
->nb
.shader
,
238 nir_intrinsic_load_vulkan_descriptor
);
239 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
240 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
242 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
243 "Invalid mode for load_vulkan_descriptor");
245 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
246 const struct glsl_type
*ptr_type
=
247 nir_address_format_to_glsl_type(addr_format
);
249 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
250 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
251 desc_load
->num_components
,
252 glsl_get_bit_size(ptr_type
), NULL
);
253 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
255 return &desc_load
->dest
.ssa
;
258 /* Dereference the given base pointer by the access chain */
259 static struct vtn_pointer
*
260 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
261 struct vtn_pointer
*base
,
262 struct vtn_access_chain
*deref_chain
)
264 struct vtn_type
*type
= base
->type
;
265 enum gl_access_qualifier access
= base
->access
| deref_chain
->access
;
268 nir_deref_instr
*tail
;
271 } else if (b
->options
->environment
== NIR_SPIRV_VULKAN
&&
272 vtn_pointer_is_external_block(b
, base
)) {
273 nir_ssa_def
*block_index
= base
->block_index
;
275 /* We dereferencing an external block pointer. Correctness of this
276 * operation relies on one particular line in the SPIR-V spec, section
277 * entitled "Validation Rules for Shader Capabilities":
279 * "Block and BufferBlock decorations cannot decorate a structure
280 * type that is nested at any level inside another structure type
281 * decorated with Block or BufferBlock."
283 * This means that we can detect the point where we cross over from
284 * descriptor indexing to buffer indexing by looking for the block
285 * decorated struct type. Anything before the block decorated struct
286 * type is a descriptor indexing operation and anything after the block
287 * decorated struct is a buffer offset operation.
290 /* Figure out the descriptor array index if any
292 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
293 * to forget the Block or BufferBlock decoration from time to time.
294 * It's more robust if we check for both !block_index and for the type
295 * to contain a block. This way there's a decent chance that arrays of
296 * UBOs/SSBOs will work correctly even if variable pointers are
299 nir_ssa_def
*desc_arr_idx
= NULL
;
300 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
301 /* If our type contains a block, then we're still outside the block
302 * and we need to process enough levels of dereferences to get inside
305 if (deref_chain
->ptr_as_array
) {
306 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
307 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
308 MAX2(aoa_size
, 1), 32);
312 for (; idx
< deref_chain
->length
; idx
++) {
313 if (type
->base_type
!= vtn_base_type_array
) {
314 vtn_assert(type
->base_type
== vtn_base_type_struct
);
318 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
319 nir_ssa_def
*arr_offset
=
320 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
321 MAX2(aoa_size
, 1), 32);
323 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
325 desc_arr_idx
= arr_offset
;
327 type
= type
->array_element
;
328 access
|= type
->access
;
333 vtn_assert(base
->var
&& base
->type
);
334 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
335 } else if (desc_arr_idx
) {
336 block_index
= vtn_resource_reindex(b
, base
->mode
,
337 block_index
, desc_arr_idx
);
340 if (idx
== deref_chain
->length
) {
341 /* The entire deref was consumed in finding the block index. Return
342 * a pointer which just has a block index and a later access chain
343 * will dereference deeper.
345 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
346 ptr
->mode
= base
->mode
;
348 ptr
->block_index
= block_index
;
349 ptr
->access
= access
;
353 /* If we got here, there's more access chain to handle and we have the
354 * final block index. Insert a descriptor load and cast to a deref to
355 * start the deref chain.
357 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
359 assert(base
->mode
== vtn_variable_mode_ssbo
||
360 base
->mode
== vtn_variable_mode_ubo
);
361 nir_variable_mode nir_mode
=
362 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
364 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
,
365 vtn_type_get_nir_type(b
, type
, base
->mode
),
366 base
->ptr_type
->stride
);
368 assert(base
->var
&& base
->var
->var
);
369 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
370 if (base
->ptr_type
&& base
->ptr_type
->type
) {
371 tail
->dest
.ssa
.num_components
=
372 glsl_get_vector_elements(base
->ptr_type
->type
);
373 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
377 if (idx
== 0 && deref_chain
->ptr_as_array
) {
378 /* We start with a deref cast to get the stride. Hopefully, we'll be
379 * able to delete that cast eventually.
381 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
382 tail
->type
, base
->ptr_type
->stride
);
384 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
385 tail
->dest
.ssa
.bit_size
);
386 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
390 for (; idx
< deref_chain
->length
; idx
++) {
391 if (glsl_type_is_struct_or_ifc(type
->type
)) {
392 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
393 unsigned field
= deref_chain
->link
[idx
].id
;
394 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
395 type
= type
->members
[field
];
397 nir_ssa_def
*arr_index
=
398 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
399 tail
->dest
.ssa
.bit_size
);
400 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
401 type
= type
->array_element
;
404 access
|= type
->access
;
407 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
408 ptr
->mode
= base
->mode
;
410 ptr
->var
= base
->var
;
412 ptr
->access
= access
;
417 static struct vtn_pointer
*
418 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
419 struct vtn_pointer
*base
,
420 struct vtn_access_chain
*deref_chain
)
422 nir_ssa_def
*block_index
= base
->block_index
;
423 nir_ssa_def
*offset
= base
->offset
;
424 struct vtn_type
*type
= base
->type
;
425 enum gl_access_qualifier access
= base
->access
;
428 if (base
->mode
== vtn_variable_mode_ubo
||
429 base
->mode
== vtn_variable_mode_ssbo
) {
431 vtn_assert(base
->var
&& base
->type
);
432 nir_ssa_def
*desc_arr_idx
;
433 if (glsl_type_is_array(type
->type
)) {
434 if (deref_chain
->length
>= 1) {
436 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
438 /* This consumes a level of type */
439 type
= type
->array_element
;
440 access
|= type
->access
;
442 /* This is annoying. We've been asked for a pointer to the
443 * array of UBOs/SSBOs and not a specifc buffer. Return a
444 * pointer with a descriptor index of 0 and we'll have to do
445 * a reindex later to adjust it to the right thing.
447 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
449 } else if (deref_chain
->ptr_as_array
) {
450 /* You can't have a zero-length OpPtrAccessChain */
451 vtn_assert(deref_chain
->length
>= 1);
452 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
454 /* We have a regular non-array SSBO. */
457 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
458 } else if (deref_chain
->ptr_as_array
&&
459 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
460 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
461 * decorated block. This is an interesting corner in the SPIR-V
462 * spec. One interpretation would be that they client is clearly
463 * trying to treat that block as if it's an implicit array of blocks
464 * repeated in the buffer. However, the SPIR-V spec for the
465 * OpPtrAccessChain says:
467 * "Base is treated as the address of the first element of an
468 * array, and the Element element’s address is computed to be the
469 * base for the Indexes, as per OpAccessChain."
471 * Taken literally, that would mean that your struct type is supposed
472 * to be treated as an array of such a struct and, since it's
473 * decorated block, that means an array of blocks which corresponds
474 * to an array descriptor. Therefore, we need to do a reindex
475 * operation to add the index from the first link in the access chain
476 * to the index we recieved.
478 * The downside to this interpretation (there always is one) is that
479 * this might be somewhat surprising behavior to apps if they expect
480 * the implicit array behavior described above.
482 vtn_assert(deref_chain
->length
>= 1);
483 nir_ssa_def
*offset_index
=
484 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
487 block_index
= vtn_resource_reindex(b
, base
->mode
,
488 block_index
, offset_index
);
493 if (base
->mode
== vtn_variable_mode_workgroup
) {
494 /* SLM doesn't need nor have a block index */
495 vtn_assert(!block_index
);
497 /* We need the variable for the base offset */
498 vtn_assert(base
->var
);
500 /* We need ptr_type for size and alignment */
501 vtn_assert(base
->ptr_type
);
503 /* Assign location on first use so that we don't end up bloating SLM
504 * address space for variables which are never statically used.
506 if (base
->var
->shared_location
< 0) {
507 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
508 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
509 base
->ptr_type
->align
);
510 base
->var
->shared_location
= b
->shader
->num_shared
;
511 b
->shader
->num_shared
+= base
->ptr_type
->length
;
514 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
515 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
516 /* Push constants neither need nor have a block index */
517 vtn_assert(!block_index
);
519 /* Start off with at the start of the push constant block. */
520 offset
= nir_imm_int(&b
->nb
, 0);
522 /* The code above should have ensured a block_index when needed. */
523 vtn_assert(block_index
);
525 /* Start off with at the start of the buffer. */
526 offset
= nir_imm_int(&b
->nb
, 0);
530 if (deref_chain
->ptr_as_array
&& idx
== 0) {
531 /* We need ptr_type for the stride */
532 vtn_assert(base
->ptr_type
);
534 /* We need at least one element in the chain */
535 vtn_assert(deref_chain
->length
>= 1);
537 nir_ssa_def
*elem_offset
=
538 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
539 base
->ptr_type
->stride
, offset
->bit_size
);
540 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
544 for (; idx
< deref_chain
->length
; idx
++) {
545 switch (glsl_get_base_type(type
->type
)) {
548 case GLSL_TYPE_UINT16
:
549 case GLSL_TYPE_INT16
:
550 case GLSL_TYPE_UINT8
:
552 case GLSL_TYPE_UINT64
:
553 case GLSL_TYPE_INT64
:
554 case GLSL_TYPE_FLOAT
:
555 case GLSL_TYPE_FLOAT16
:
556 case GLSL_TYPE_DOUBLE
:
558 case GLSL_TYPE_ARRAY
: {
559 nir_ssa_def
*elem_offset
=
560 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
561 type
->stride
, offset
->bit_size
);
562 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
563 type
= type
->array_element
;
564 access
|= type
->access
;
568 case GLSL_TYPE_INTERFACE
:
569 case GLSL_TYPE_STRUCT
: {
570 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
571 unsigned member
= deref_chain
->link
[idx
].id
;
572 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
573 type
= type
->members
[member
];
574 access
|= type
->access
;
579 vtn_fail("Invalid type for deref");
583 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
584 ptr
->mode
= base
->mode
;
586 ptr
->block_index
= block_index
;
587 ptr
->offset
= offset
;
588 ptr
->access
= access
;
593 /* Dereference the given base pointer by the access chain */
594 static struct vtn_pointer
*
595 vtn_pointer_dereference(struct vtn_builder
*b
,
596 struct vtn_pointer
*base
,
597 struct vtn_access_chain
*deref_chain
)
599 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
600 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
602 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
607 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
609 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
611 struct vtn_access_chain chain
= {
614 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
621 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
622 struct vtn_ssa_value
*inout
,
623 enum gl_access_qualifier access
)
625 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
627 inout
->def
= nir_load_deref_with_access(&b
->nb
, deref
, access
);
629 nir_store_deref_with_access(&b
->nb
, deref
, inout
->def
, ~0, access
);
631 } else if (glsl_type_is_array(deref
->type
) ||
632 glsl_type_is_matrix(deref
->type
)) {
633 unsigned elems
= glsl_get_length(deref
->type
);
634 for (unsigned i
= 0; i
< elems
; i
++) {
635 nir_deref_instr
*child
=
636 nir_build_deref_array_imm(&b
->nb
, deref
, i
);
637 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
640 vtn_assert(glsl_type_is_struct_or_ifc(deref
->type
));
641 unsigned elems
= glsl_get_length(deref
->type
);
642 for (unsigned i
= 0; i
< elems
; i
++) {
643 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
644 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
650 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
652 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
653 return vtn_pointer_to_deref(b
, ptr
);
657 * Gets the NIR-level deref tail, which may have as a child an array deref
658 * selecting which component due to OpAccessChain supporting per-component
659 * indexing in SPIR-V.
661 static nir_deref_instr
*
662 get_deref_tail(nir_deref_instr
*deref
)
664 if (deref
->deref_type
!= nir_deref_type_array
)
667 nir_deref_instr
*parent
=
668 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
670 if (glsl_type_is_vector(parent
->type
))
676 struct vtn_ssa_value
*
677 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
,
678 enum gl_access_qualifier access
)
680 nir_deref_instr
*src_tail
= get_deref_tail(src
);
681 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
682 _vtn_local_load_store(b
, true, src_tail
, val
, access
);
684 if (src_tail
!= src
) {
685 val
->type
= src
->type
;
686 val
->def
= nir_vector_extract(&b
->nb
, val
->def
, src
->arr
.index
.ssa
);
693 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
694 nir_deref_instr
*dest
, enum gl_access_qualifier access
)
696 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
698 if (dest_tail
!= dest
) {
699 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
700 _vtn_local_load_store(b
, true, dest_tail
, val
, access
);
702 val
->def
= nir_vector_insert(&b
->nb
, val
->def
, src
->def
,
703 dest
->arr
.index
.ssa
);
704 _vtn_local_load_store(b
, false, dest_tail
, val
, access
);
706 _vtn_local_load_store(b
, false, dest_tail
, src
, access
);
711 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
712 nir_ssa_def
**index_out
)
714 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
716 struct vtn_access_chain chain
= {
719 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
721 *index_out
= ptr
->block_index
;
725 /* Tries to compute the size of an interface block based on the strides and
726 * offsets that are provided to us in the SPIR-V source.
729 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
731 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
735 case GLSL_TYPE_UINT16
:
736 case GLSL_TYPE_INT16
:
737 case GLSL_TYPE_UINT8
:
739 case GLSL_TYPE_UINT64
:
740 case GLSL_TYPE_INT64
:
741 case GLSL_TYPE_FLOAT
:
742 case GLSL_TYPE_FLOAT16
:
744 case GLSL_TYPE_DOUBLE
: {
745 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
746 glsl_get_matrix_columns(type
->type
);
748 vtn_assert(type
->stride
> 0);
749 return type
->stride
* cols
;
751 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
752 return glsl_get_vector_elements(type
->type
) * type_size
;
756 case GLSL_TYPE_STRUCT
:
757 case GLSL_TYPE_INTERFACE
: {
759 unsigned num_fields
= glsl_get_length(type
->type
);
760 for (unsigned f
= 0; f
< num_fields
; f
++) {
761 unsigned field_end
= type
->offsets
[f
] +
762 vtn_type_block_size(b
, type
->members
[f
]);
763 size
= MAX2(size
, field_end
);
768 case GLSL_TYPE_ARRAY
:
769 vtn_assert(type
->stride
> 0);
770 vtn_assert(glsl_get_length(type
->type
) > 0);
771 return type
->stride
* glsl_get_length(type
->type
);
774 vtn_fail("Invalid block type");
780 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
781 nir_ssa_def
*index
, nir_ssa_def
*offset
,
782 unsigned access_offset
, unsigned access_size
,
783 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
784 enum gl_access_qualifier access
)
786 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
787 instr
->num_components
= glsl_get_vector_elements(type
);
789 /* Booleans usually shouldn't show up in external memory in SPIR-V.
790 * However, they do for certain older GLSLang versions and can for shared
791 * memory when we lower access chains internally.
793 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
794 glsl_get_bit_size(type
);
798 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
799 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
802 if (op
== nir_intrinsic_load_push_constant
) {
803 nir_intrinsic_set_base(instr
, access_offset
);
804 nir_intrinsic_set_range(instr
, access_size
);
807 if (op
== nir_intrinsic_load_ubo
||
808 op
== nir_intrinsic_load_ssbo
||
809 op
== nir_intrinsic_store_ssbo
) {
810 nir_intrinsic_set_access(instr
, access
);
813 /* With extensions like relaxed_block_layout, we really can't guarantee
814 * much more than scalar alignment.
816 if (op
!= nir_intrinsic_load_push_constant
)
817 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
820 instr
->src
[src
++] = nir_src_for_ssa(index
);
822 if (op
== nir_intrinsic_load_push_constant
) {
823 /* We need to subtract the offset from where the intrinsic will load the
826 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
827 nir_imm_int(&b
->nb
, access_offset
)));
829 instr
->src
[src
++] = nir_src_for_ssa(offset
);
833 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
834 instr
->num_components
, data_bit_size
, NULL
);
835 (*inout
)->def
= &instr
->dest
.ssa
;
838 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
840 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
841 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
845 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
846 nir_ssa_def
*index
, nir_ssa_def
*offset
,
847 unsigned access_offset
, unsigned access_size
,
848 struct vtn_type
*type
, enum gl_access_qualifier access
,
849 struct vtn_ssa_value
**inout
)
851 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
855 case GLSL_TYPE_UINT16
:
856 case GLSL_TYPE_INT16
:
857 case GLSL_TYPE_UINT8
:
859 case GLSL_TYPE_UINT64
:
860 case GLSL_TYPE_INT64
:
861 case GLSL_TYPE_FLOAT
:
862 case GLSL_TYPE_FLOAT16
:
863 case GLSL_TYPE_DOUBLE
:
865 /* This is where things get interesting. At this point, we've hit
866 * a vector, a scalar, or a matrix.
868 if (glsl_type_is_matrix(type
->type
)) {
869 /* Loading the whole matrix */
870 struct vtn_ssa_value
*transpose
;
871 unsigned num_ops
, vec_width
, col_stride
;
872 if (type
->row_major
) {
873 num_ops
= glsl_get_vector_elements(type
->type
);
874 vec_width
= glsl_get_matrix_columns(type
->type
);
875 col_stride
= type
->array_element
->stride
;
877 const struct glsl_type
*transpose_type
=
878 glsl_matrix_type(base_type
, vec_width
, num_ops
);
879 *inout
= vtn_create_ssa_value(b
, transpose_type
);
881 transpose
= vtn_ssa_transpose(b
, *inout
);
885 num_ops
= glsl_get_matrix_columns(type
->type
);
886 vec_width
= glsl_get_vector_elements(type
->type
);
887 col_stride
= type
->stride
;
890 for (unsigned i
= 0; i
< num_ops
; i
++) {
891 nir_ssa_def
*elem_offset
=
892 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
893 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
894 access_offset
, access_size
,
896 glsl_vector_type(base_type
, vec_width
),
897 type
->access
| access
);
900 if (load
&& type
->row_major
)
901 *inout
= vtn_ssa_transpose(b
, *inout
);
903 unsigned elems
= glsl_get_vector_elements(type
->type
);
904 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
905 if (elems
== 1 || type
->stride
== type_size
) {
906 /* This is a tightly-packed normal scalar or vector load */
907 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
908 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
909 access_offset
, access_size
,
911 type
->access
| access
);
913 /* This is a strided load. We have to load N things separately.
914 * This is the single column of a row-major matrix case.
916 vtn_assert(type
->stride
> type_size
);
917 vtn_assert(type
->stride
% type_size
== 0);
919 nir_ssa_def
*per_comp
[4];
920 for (unsigned i
= 0; i
< elems
; i
++) {
921 nir_ssa_def
*elem_offset
=
922 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
923 struct vtn_ssa_value
*comp
, temp_val
;
925 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
926 temp_val
.type
= glsl_scalar_type(base_type
);
929 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
930 access_offset
, access_size
,
931 &comp
, glsl_scalar_type(base_type
),
932 type
->access
| access
);
933 per_comp
[i
] = comp
->def
;
938 *inout
= vtn_create_ssa_value(b
, type
->type
);
939 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
945 case GLSL_TYPE_ARRAY
: {
946 unsigned elems
= glsl_get_length(type
->type
);
947 for (unsigned i
= 0; i
< elems
; i
++) {
948 nir_ssa_def
*elem_off
=
949 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
950 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
951 access_offset
, access_size
,
953 type
->array_element
->access
| access
,
954 &(*inout
)->elems
[i
]);
959 case GLSL_TYPE_INTERFACE
:
960 case GLSL_TYPE_STRUCT
: {
961 unsigned elems
= glsl_get_length(type
->type
);
962 for (unsigned i
= 0; i
< elems
; i
++) {
963 nir_ssa_def
*elem_off
=
964 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
965 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
966 access_offset
, access_size
,
968 type
->members
[i
]->access
| access
,
969 &(*inout
)->elems
[i
]);
975 vtn_fail("Invalid block member type");
979 static struct vtn_ssa_value
*
980 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
983 unsigned access_offset
= 0, access_size
= 0;
985 case vtn_variable_mode_ubo
:
986 op
= nir_intrinsic_load_ubo
;
988 case vtn_variable_mode_ssbo
:
989 op
= nir_intrinsic_load_ssbo
;
991 case vtn_variable_mode_push_constant
:
992 op
= nir_intrinsic_load_push_constant
;
993 access_size
= b
->shader
->num_uniforms
;
995 case vtn_variable_mode_workgroup
:
996 op
= nir_intrinsic_load_shared
;
999 vtn_fail("Invalid block variable mode");
1002 nir_ssa_def
*offset
, *index
= NULL
;
1003 offset
= vtn_pointer_to_offset(b
, src
, &index
);
1005 struct vtn_ssa_value
*value
= vtn_create_ssa_value(b
, src
->type
->type
);
1006 _vtn_block_load_store(b
, op
, true, index
, offset
,
1007 access_offset
, access_size
,
1008 src
->type
, src
->access
, &value
);
1013 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1014 struct vtn_pointer
*dst
)
1016 nir_intrinsic_op op
;
1017 switch (dst
->mode
) {
1018 case vtn_variable_mode_ssbo
:
1019 op
= nir_intrinsic_store_ssbo
;
1021 case vtn_variable_mode_workgroup
:
1022 op
= nir_intrinsic_store_shared
;
1025 vtn_fail("Invalid block variable mode");
1028 nir_ssa_def
*offset
, *index
= NULL
;
1029 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1031 _vtn_block_load_store(b
, op
, false, index
, offset
,
1032 0, 0, dst
->type
, dst
->access
, &src
);
1036 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1037 struct vtn_pointer
*ptr
,
1038 enum gl_access_qualifier access
,
1039 struct vtn_ssa_value
**inout
)
1041 if (ptr
->mode
== vtn_variable_mode_uniform
) {
1042 if (ptr
->type
->base_type
== vtn_base_type_image
||
1043 ptr
->type
->base_type
== vtn_base_type_sampler
) {
1044 /* See also our handling of OpTypeSampler and OpTypeImage */
1046 (*inout
)->def
= vtn_pointer_to_ssa(b
, ptr
);
1048 } else if (ptr
->type
->base_type
== vtn_base_type_sampled_image
) {
1049 /* See also our handling of OpTypeSampledImage */
1051 struct vtn_sampled_image si
= {
1052 .image
= vtn_pointer_to_deref(b
, ptr
),
1053 .sampler
= vtn_pointer_to_deref(b
, ptr
),
1055 (*inout
)->def
= vtn_sampled_image_to_nir_ssa(b
, si
);
1060 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1061 switch (base_type
) {
1062 case GLSL_TYPE_UINT
:
1064 case GLSL_TYPE_UINT16
:
1065 case GLSL_TYPE_INT16
:
1066 case GLSL_TYPE_UINT8
:
1067 case GLSL_TYPE_INT8
:
1068 case GLSL_TYPE_UINT64
:
1069 case GLSL_TYPE_INT64
:
1070 case GLSL_TYPE_FLOAT
:
1071 case GLSL_TYPE_FLOAT16
:
1072 case GLSL_TYPE_BOOL
:
1073 case GLSL_TYPE_DOUBLE
:
1074 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1075 /* We hit a vector or scalar; go ahead and emit the load[s] */
1076 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1077 if (vtn_pointer_is_external_block(b
, ptr
)) {
1078 /* If it's external, we call nir_load/store_deref directly. The
1079 * vtn_local_load/store helpers are too clever and do magic to
1080 * avoid array derefs of vectors. That magic is both less
1081 * efficient than the direct load/store and, in the case of
1082 * stores, is broken because it creates a race condition if two
1083 * threads are writing to different components of the same vector
1084 * due to the load+insert+store it uses to emulate the array
1088 (*inout
)->def
= nir_load_deref_with_access(&b
->nb
, deref
,
1089 ptr
->type
->access
| access
);
1091 nir_store_deref_with_access(&b
->nb
, deref
, (*inout
)->def
, ~0,
1092 ptr
->type
->access
| access
);
1096 *inout
= vtn_local_load(b
, deref
, ptr
->type
->access
| access
);
1098 vtn_local_store(b
, *inout
, deref
, ptr
->type
->access
| access
);
1105 case GLSL_TYPE_INTERFACE
:
1106 case GLSL_TYPE_ARRAY
:
1107 case GLSL_TYPE_STRUCT
: {
1108 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1109 struct vtn_access_chain chain
= {
1112 { .mode
= vtn_access_mode_literal
, },
1115 for (unsigned i
= 0; i
< elems
; i
++) {
1116 chain
.link
[0].id
= i
;
1117 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1118 _vtn_variable_load_store(b
, load
, elem
, ptr
->type
->access
| access
,
1119 &(*inout
)->elems
[i
]);
1125 vtn_fail("Invalid access chain type");
1129 struct vtn_ssa_value
*
1130 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1132 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1133 return vtn_block_load(b
, src
);
1135 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src
->type
->type
);
1136 _vtn_variable_load_store(b
, true, src
, src
->access
, &val
);
1142 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1143 struct vtn_pointer
*dest
)
1145 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1146 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1147 dest
->mode
== vtn_variable_mode_workgroup
);
1148 vtn_block_store(b
, src
, dest
);
1150 _vtn_variable_load_store(b
, false, dest
, dest
->access
, &src
);
1155 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1156 struct vtn_pointer
*src
)
1158 vtn_assert(src
->type
->type
== dest
->type
->type
);
1159 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1160 switch (base_type
) {
1161 case GLSL_TYPE_UINT
:
1163 case GLSL_TYPE_UINT16
:
1164 case GLSL_TYPE_INT16
:
1165 case GLSL_TYPE_UINT8
:
1166 case GLSL_TYPE_INT8
:
1167 case GLSL_TYPE_UINT64
:
1168 case GLSL_TYPE_INT64
:
1169 case GLSL_TYPE_FLOAT
:
1170 case GLSL_TYPE_FLOAT16
:
1171 case GLSL_TYPE_DOUBLE
:
1172 case GLSL_TYPE_BOOL
:
1173 /* At this point, we have a scalar, vector, or matrix so we know that
1174 * there cannot be any structure splitting still in the way. By
1175 * stopping at the matrix level rather than the vector level, we
1176 * ensure that matrices get loaded in the optimal way even if they
1177 * are storred row-major in a UBO.
1179 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1182 case GLSL_TYPE_INTERFACE
:
1183 case GLSL_TYPE_ARRAY
:
1184 case GLSL_TYPE_STRUCT
: {
1185 struct vtn_access_chain chain
= {
1188 { .mode
= vtn_access_mode_literal
, },
1191 unsigned elems
= glsl_get_length(src
->type
->type
);
1192 for (unsigned i
= 0; i
< elems
; i
++) {
1193 chain
.link
[0].id
= i
;
1194 struct vtn_pointer
*src_elem
=
1195 vtn_pointer_dereference(b
, src
, &chain
);
1196 struct vtn_pointer
*dest_elem
=
1197 vtn_pointer_dereference(b
, dest
, &chain
);
1199 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1205 vtn_fail("Invalid access chain type");
1210 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1211 struct vtn_pointer
*src
)
1213 /* TODO: At some point, we should add a special-case for when we can
1214 * just emit a copy_var intrinsic.
1216 _vtn_variable_copy(b
, dest
, src
);
1220 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1222 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1223 *mode
= nir_var_system_value
;
1227 vtn_get_builtin_location(struct vtn_builder
*b
,
1228 SpvBuiltIn builtin
, int *location
,
1229 nir_variable_mode
*mode
)
1232 case SpvBuiltInPosition
:
1233 *location
= VARYING_SLOT_POS
;
1235 case SpvBuiltInPointSize
:
1236 *location
= VARYING_SLOT_PSIZ
;
1238 case SpvBuiltInClipDistance
:
1239 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1241 case SpvBuiltInCullDistance
:
1242 *location
= VARYING_SLOT_CULL_DIST0
;
1244 case SpvBuiltInVertexId
:
1245 case SpvBuiltInVertexIndex
:
1246 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1247 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1248 * same as gl_VertexID, which is non-zero-based, and removes
1249 * VertexIndex. Since they're both defined to be non-zero-based, we use
1250 * SYSTEM_VALUE_VERTEX_ID for both.
1252 *location
= SYSTEM_VALUE_VERTEX_ID
;
1253 set_mode_system_value(b
, mode
);
1255 case SpvBuiltInInstanceIndex
:
1256 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1257 set_mode_system_value(b
, mode
);
1259 case SpvBuiltInInstanceId
:
1260 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1261 set_mode_system_value(b
, mode
);
1263 case SpvBuiltInPrimitiveId
:
1264 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1265 vtn_assert(*mode
== nir_var_shader_in
);
1266 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1267 } else if (*mode
== nir_var_shader_out
) {
1268 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1270 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1271 set_mode_system_value(b
, mode
);
1274 case SpvBuiltInInvocationId
:
1275 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1276 set_mode_system_value(b
, mode
);
1278 case SpvBuiltInLayer
:
1279 *location
= VARYING_SLOT_LAYER
;
1280 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1281 *mode
= nir_var_shader_in
;
1282 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1283 *mode
= nir_var_shader_out
;
1284 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1285 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1286 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1287 *mode
= nir_var_shader_out
;
1289 vtn_fail("invalid stage for SpvBuiltInLayer");
1291 case SpvBuiltInViewportIndex
:
1292 *location
= VARYING_SLOT_VIEWPORT
;
1293 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1294 *mode
= nir_var_shader_out
;
1295 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1296 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1297 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1298 *mode
= nir_var_shader_out
;
1299 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1300 *mode
= nir_var_shader_in
;
1302 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1304 case SpvBuiltInTessLevelOuter
:
1305 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1307 case SpvBuiltInTessLevelInner
:
1308 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1310 case SpvBuiltInTessCoord
:
1311 *location
= SYSTEM_VALUE_TESS_COORD
;
1312 set_mode_system_value(b
, mode
);
1314 case SpvBuiltInPatchVertices
:
1315 *location
= SYSTEM_VALUE_VERTICES_IN
;
1316 set_mode_system_value(b
, mode
);
1318 case SpvBuiltInFragCoord
:
1319 vtn_assert(*mode
== nir_var_shader_in
);
1320 if (b
->options
&& b
->options
->frag_coord_is_sysval
) {
1321 *mode
= nir_var_system_value
;
1322 *location
= SYSTEM_VALUE_FRAG_COORD
;
1324 *location
= VARYING_SLOT_POS
;
1327 case SpvBuiltInPointCoord
:
1328 *location
= VARYING_SLOT_PNTC
;
1329 vtn_assert(*mode
== nir_var_shader_in
);
1331 case SpvBuiltInFrontFacing
:
1332 *location
= SYSTEM_VALUE_FRONT_FACE
;
1333 set_mode_system_value(b
, mode
);
1335 case SpvBuiltInSampleId
:
1336 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1337 set_mode_system_value(b
, mode
);
1339 case SpvBuiltInSamplePosition
:
1340 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1341 set_mode_system_value(b
, mode
);
1343 case SpvBuiltInSampleMask
:
1344 if (*mode
== nir_var_shader_out
) {
1345 *location
= FRAG_RESULT_SAMPLE_MASK
;
1347 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1348 set_mode_system_value(b
, mode
);
1351 case SpvBuiltInFragDepth
:
1352 *location
= FRAG_RESULT_DEPTH
;
1353 vtn_assert(*mode
== nir_var_shader_out
);
1355 case SpvBuiltInHelperInvocation
:
1356 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1357 set_mode_system_value(b
, mode
);
1359 case SpvBuiltInNumWorkgroups
:
1360 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1361 set_mode_system_value(b
, mode
);
1363 case SpvBuiltInWorkgroupSize
:
1364 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1365 set_mode_system_value(b
, mode
);
1367 case SpvBuiltInWorkgroupId
:
1368 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1369 set_mode_system_value(b
, mode
);
1371 case SpvBuiltInLocalInvocationId
:
1372 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1373 set_mode_system_value(b
, mode
);
1375 case SpvBuiltInLocalInvocationIndex
:
1376 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1377 set_mode_system_value(b
, mode
);
1379 case SpvBuiltInGlobalInvocationId
:
1380 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1381 set_mode_system_value(b
, mode
);
1383 case SpvBuiltInGlobalLinearId
:
1384 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
;
1385 set_mode_system_value(b
, mode
);
1387 case SpvBuiltInBaseVertex
:
1388 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1389 * semantic as Vulkan BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1391 if (b
->options
->environment
== NIR_SPIRV_OPENGL
)
1392 *location
= SYSTEM_VALUE_BASE_VERTEX
;
1394 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1395 set_mode_system_value(b
, mode
);
1397 case SpvBuiltInBaseInstance
:
1398 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1399 set_mode_system_value(b
, mode
);
1401 case SpvBuiltInDrawIndex
:
1402 *location
= SYSTEM_VALUE_DRAW_ID
;
1403 set_mode_system_value(b
, mode
);
1405 case SpvBuiltInSubgroupSize
:
1406 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1407 set_mode_system_value(b
, mode
);
1409 case SpvBuiltInSubgroupId
:
1410 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1411 set_mode_system_value(b
, mode
);
1413 case SpvBuiltInSubgroupLocalInvocationId
:
1414 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1415 set_mode_system_value(b
, mode
);
1417 case SpvBuiltInNumSubgroups
:
1418 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1419 set_mode_system_value(b
, mode
);
1421 case SpvBuiltInDeviceIndex
:
1422 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1423 set_mode_system_value(b
, mode
);
1425 case SpvBuiltInViewIndex
:
1426 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1427 set_mode_system_value(b
, mode
);
1429 case SpvBuiltInSubgroupEqMask
:
1430 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1431 set_mode_system_value(b
, mode
);
1433 case SpvBuiltInSubgroupGeMask
:
1434 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1435 set_mode_system_value(b
, mode
);
1437 case SpvBuiltInSubgroupGtMask
:
1438 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1439 set_mode_system_value(b
, mode
);
1441 case SpvBuiltInSubgroupLeMask
:
1442 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1443 set_mode_system_value(b
, mode
);
1445 case SpvBuiltInSubgroupLtMask
:
1446 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1447 set_mode_system_value(b
, mode
);
1449 case SpvBuiltInFragStencilRefEXT
:
1450 *location
= FRAG_RESULT_STENCIL
;
1451 vtn_assert(*mode
== nir_var_shader_out
);
1453 case SpvBuiltInWorkDim
:
1454 *location
= SYSTEM_VALUE_WORK_DIM
;
1455 set_mode_system_value(b
, mode
);
1457 case SpvBuiltInGlobalSize
:
1458 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1459 set_mode_system_value(b
, mode
);
1461 case SpvBuiltInBaryCoordNoPerspAMD
:
1462 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL
;
1463 set_mode_system_value(b
, mode
);
1465 case SpvBuiltInBaryCoordNoPerspCentroidAMD
:
1466 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID
;
1467 set_mode_system_value(b
, mode
);
1469 case SpvBuiltInBaryCoordNoPerspSampleAMD
:
1470 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE
;
1471 set_mode_system_value(b
, mode
);
1473 case SpvBuiltInBaryCoordSmoothAMD
:
1474 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
;
1475 set_mode_system_value(b
, mode
);
1477 case SpvBuiltInBaryCoordSmoothCentroidAMD
:
1478 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
;
1479 set_mode_system_value(b
, mode
);
1481 case SpvBuiltInBaryCoordSmoothSampleAMD
:
1482 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
;
1483 set_mode_system_value(b
, mode
);
1485 case SpvBuiltInBaryCoordPullModelAMD
:
1486 *location
= SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL
;
1487 set_mode_system_value(b
, mode
);
1490 vtn_fail("Unsupported builtin: %s (%u)",
1491 spirv_builtin_to_string(builtin
), builtin
);
1496 apply_var_decoration(struct vtn_builder
*b
,
1497 struct nir_variable_data
*var_data
,
1498 const struct vtn_decoration
*dec
)
1500 switch (dec
->decoration
) {
1501 case SpvDecorationRelaxedPrecision
:
1502 break; /* FIXME: Do nothing with this for now. */
1503 case SpvDecorationNoPerspective
:
1504 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1506 case SpvDecorationFlat
:
1507 var_data
->interpolation
= INTERP_MODE_FLAT
;
1509 case SpvDecorationExplicitInterpAMD
:
1510 var_data
->interpolation
= INTERP_MODE_EXPLICIT
;
1512 case SpvDecorationCentroid
:
1513 var_data
->centroid
= true;
1515 case SpvDecorationSample
:
1516 var_data
->sample
= true;
1518 case SpvDecorationInvariant
:
1519 var_data
->invariant
= true;
1521 case SpvDecorationConstant
:
1522 var_data
->read_only
= true;
1524 case SpvDecorationNonReadable
:
1525 var_data
->access
|= ACCESS_NON_READABLE
;
1527 case SpvDecorationNonWritable
:
1528 var_data
->read_only
= true;
1529 var_data
->access
|= ACCESS_NON_WRITEABLE
;
1531 case SpvDecorationRestrict
:
1532 var_data
->access
|= ACCESS_RESTRICT
;
1534 case SpvDecorationAliased
:
1535 var_data
->access
&= ~ACCESS_RESTRICT
;
1537 case SpvDecorationVolatile
:
1538 var_data
->access
|= ACCESS_VOLATILE
;
1540 case SpvDecorationCoherent
:
1541 var_data
->access
|= ACCESS_COHERENT
;
1543 case SpvDecorationComponent
:
1544 var_data
->location_frac
= dec
->operands
[0];
1546 case SpvDecorationIndex
:
1547 var_data
->index
= dec
->operands
[0];
1549 case SpvDecorationBuiltIn
: {
1550 SpvBuiltIn builtin
= dec
->operands
[0];
1552 nir_variable_mode mode
= var_data
->mode
;
1553 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1554 var_data
->mode
= mode
;
1557 case SpvBuiltInTessLevelOuter
:
1558 case SpvBuiltInTessLevelInner
:
1559 case SpvBuiltInClipDistance
:
1560 case SpvBuiltInCullDistance
:
1561 var_data
->compact
= true;
1568 case SpvDecorationSpecId
:
1569 case SpvDecorationRowMajor
:
1570 case SpvDecorationColMajor
:
1571 case SpvDecorationMatrixStride
:
1572 case SpvDecorationUniform
:
1573 case SpvDecorationUniformId
:
1574 case SpvDecorationLinkageAttributes
:
1575 break; /* Do nothing with these here */
1577 case SpvDecorationPatch
:
1578 var_data
->patch
= true;
1581 case SpvDecorationLocation
:
1582 vtn_fail("Handled above");
1584 case SpvDecorationBlock
:
1585 case SpvDecorationBufferBlock
:
1586 case SpvDecorationArrayStride
:
1587 case SpvDecorationGLSLShared
:
1588 case SpvDecorationGLSLPacked
:
1589 break; /* These can apply to a type but we don't care about them */
1591 case SpvDecorationBinding
:
1592 case SpvDecorationDescriptorSet
:
1593 case SpvDecorationNoContraction
:
1594 case SpvDecorationInputAttachmentIndex
:
1595 vtn_warn("Decoration not allowed for variable or structure member: %s",
1596 spirv_decoration_to_string(dec
->decoration
));
1599 case SpvDecorationXfbBuffer
:
1600 var_data
->explicit_xfb_buffer
= true;
1601 var_data
->xfb
.buffer
= dec
->operands
[0];
1602 var_data
->always_active_io
= true;
1604 case SpvDecorationXfbStride
:
1605 var_data
->explicit_xfb_stride
= true;
1606 var_data
->xfb
.stride
= dec
->operands
[0];
1608 case SpvDecorationOffset
:
1609 var_data
->explicit_offset
= true;
1610 var_data
->offset
= dec
->operands
[0];
1613 case SpvDecorationStream
:
1614 var_data
->stream
= dec
->operands
[0];
1617 case SpvDecorationCPacked
:
1618 case SpvDecorationSaturatedConversion
:
1619 case SpvDecorationFuncParamAttr
:
1620 case SpvDecorationFPRoundingMode
:
1621 case SpvDecorationFPFastMathMode
:
1622 case SpvDecorationAlignment
:
1623 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1624 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1625 spirv_decoration_to_string(dec
->decoration
));
1629 case SpvDecorationUserSemantic
:
1630 case SpvDecorationUserTypeGOOGLE
:
1631 /* User semantic decorations can safely be ignored by the driver. */
1634 case SpvDecorationRestrictPointerEXT
:
1635 case SpvDecorationAliasedPointerEXT
:
1636 /* TODO: We should actually plumb alias information through NIR. */
1640 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1645 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1646 const struct vtn_decoration
*dec
, void *out_is_patch
)
1648 if (dec
->decoration
== SpvDecorationPatch
) {
1649 *((bool *) out_is_patch
) = true;
1654 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1655 const struct vtn_decoration
*dec
, void *void_var
)
1657 struct vtn_variable
*vtn_var
= void_var
;
1659 /* Handle decorations that apply to a vtn_variable as a whole */
1660 switch (dec
->decoration
) {
1661 case SpvDecorationBinding
:
1662 vtn_var
->binding
= dec
->operands
[0];
1663 vtn_var
->explicit_binding
= true;
1665 case SpvDecorationDescriptorSet
:
1666 vtn_var
->descriptor_set
= dec
->operands
[0];
1668 case SpvDecorationInputAttachmentIndex
:
1669 vtn_var
->input_attachment_index
= dec
->operands
[0];
1671 case SpvDecorationPatch
:
1672 vtn_var
->patch
= true;
1674 case SpvDecorationOffset
:
1675 vtn_var
->offset
= dec
->operands
[0];
1677 case SpvDecorationNonWritable
:
1678 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1680 case SpvDecorationNonReadable
:
1681 vtn_var
->access
|= ACCESS_NON_READABLE
;
1683 case SpvDecorationVolatile
:
1684 vtn_var
->access
|= ACCESS_VOLATILE
;
1686 case SpvDecorationCoherent
:
1687 vtn_var
->access
|= ACCESS_COHERENT
;
1689 case SpvDecorationCounterBuffer
:
1690 /* Counter buffer decorations can safely be ignored by the driver. */
1696 if (val
->value_type
== vtn_value_type_pointer
) {
1697 assert(val
->pointer
->var
== void_var
);
1698 assert(member
== -1);
1700 assert(val
->value_type
== vtn_value_type_type
);
1703 /* Location is odd. If applied to a split structure, we have to walk the
1704 * whole thing and accumulate the location. It's easier to handle as a
1707 if (dec
->decoration
== SpvDecorationLocation
) {
1708 unsigned location
= dec
->operands
[0];
1709 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1710 vtn_var
->mode
== vtn_variable_mode_output
) {
1711 location
+= FRAG_RESULT_DATA0
;
1712 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1713 vtn_var
->mode
== vtn_variable_mode_input
) {
1714 location
+= VERT_ATTRIB_GENERIC0
;
1715 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1716 vtn_var
->mode
== vtn_variable_mode_output
) {
1717 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1718 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1719 vtn_warn("Location must be on input, output, uniform, sampler or "
1724 if (vtn_var
->var
->num_members
== 0) {
1725 /* This handles the member and lone variable cases */
1726 vtn_var
->var
->data
.location
= location
;
1728 /* This handles the structure member case */
1729 assert(vtn_var
->var
->members
);
1732 vtn_var
->base_location
= location
;
1734 vtn_var
->var
->members
[member
].location
= location
;
1740 if (vtn_var
->var
->num_members
== 0) {
1741 /* We call this function on types as well as variables and not all
1742 * struct types get split so we can end up having stray member
1743 * decorations; just ignore them.
1746 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1747 } else if (member
>= 0) {
1748 /* Member decorations must come from a type */
1749 assert(val
->value_type
== vtn_value_type_type
);
1750 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1753 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1754 for (unsigned i
= 0; i
< length
; i
++)
1755 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1758 /* A few variables, those with external storage, have no actual
1759 * nir_variables associated with them. Fortunately, all decorations
1760 * we care about for those variables are on the type only.
1762 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1763 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1764 vtn_var
->mode
== vtn_variable_mode_push_constant
);
1769 enum vtn_variable_mode
1770 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1771 SpvStorageClass
class,
1772 struct vtn_type
*interface_type
,
1773 nir_variable_mode
*nir_mode_out
)
1775 enum vtn_variable_mode mode
;
1776 nir_variable_mode nir_mode
;
1778 case SpvStorageClassUniform
:
1779 /* Assume it's an UBO if we lack the interface_type. */
1780 if (!interface_type
|| interface_type
->block
) {
1781 mode
= vtn_variable_mode_ubo
;
1782 nir_mode
= nir_var_mem_ubo
;
1783 } else if (interface_type
->buffer_block
) {
1784 mode
= vtn_variable_mode_ssbo
;
1785 nir_mode
= nir_var_mem_ssbo
;
1787 /* Default-block uniforms, coming from gl_spirv */
1788 mode
= vtn_variable_mode_uniform
;
1789 nir_mode
= nir_var_uniform
;
1792 case SpvStorageClassStorageBuffer
:
1793 mode
= vtn_variable_mode_ssbo
;
1794 nir_mode
= nir_var_mem_ssbo
;
1796 case SpvStorageClassPhysicalStorageBuffer
:
1797 mode
= vtn_variable_mode_phys_ssbo
;
1798 nir_mode
= nir_var_mem_global
;
1800 case SpvStorageClassUniformConstant
:
1801 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1802 if (b
->options
->constant_as_global
) {
1803 mode
= vtn_variable_mode_cross_workgroup
;
1804 nir_mode
= nir_var_mem_global
;
1806 mode
= vtn_variable_mode_ubo
;
1807 nir_mode
= nir_var_mem_ubo
;
1810 mode
= vtn_variable_mode_uniform
;
1811 nir_mode
= nir_var_uniform
;
1814 case SpvStorageClassPushConstant
:
1815 mode
= vtn_variable_mode_push_constant
;
1816 nir_mode
= nir_var_uniform
;
1818 case SpvStorageClassInput
:
1819 mode
= vtn_variable_mode_input
;
1820 nir_mode
= nir_var_shader_in
;
1822 case SpvStorageClassOutput
:
1823 mode
= vtn_variable_mode_output
;
1824 nir_mode
= nir_var_shader_out
;
1826 case SpvStorageClassPrivate
:
1827 mode
= vtn_variable_mode_private
;
1828 nir_mode
= nir_var_shader_temp
;
1830 case SpvStorageClassFunction
:
1831 mode
= vtn_variable_mode_function
;
1832 nir_mode
= nir_var_function_temp
;
1834 case SpvStorageClassWorkgroup
:
1835 mode
= vtn_variable_mode_workgroup
;
1836 nir_mode
= nir_var_mem_shared
;
1838 case SpvStorageClassAtomicCounter
:
1839 mode
= vtn_variable_mode_atomic_counter
;
1840 nir_mode
= nir_var_uniform
;
1842 case SpvStorageClassCrossWorkgroup
:
1843 mode
= vtn_variable_mode_cross_workgroup
;
1844 nir_mode
= nir_var_mem_global
;
1846 case SpvStorageClassImage
:
1847 mode
= vtn_variable_mode_image
;
1848 nir_mode
= nir_var_mem_ubo
;
1850 case SpvStorageClassGeneric
:
1852 vtn_fail("Unhandled variable storage class: %s (%u)",
1853 spirv_storageclass_to_string(class), class);
1857 *nir_mode_out
= nir_mode
;
1863 vtn_mode_to_address_format(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
1866 case vtn_variable_mode_ubo
:
1867 return b
->options
->ubo_addr_format
;
1869 case vtn_variable_mode_ssbo
:
1870 return b
->options
->ssbo_addr_format
;
1872 case vtn_variable_mode_phys_ssbo
:
1873 return b
->options
->phys_ssbo_addr_format
;
1875 case vtn_variable_mode_push_constant
:
1876 return b
->options
->push_const_addr_format
;
1878 case vtn_variable_mode_workgroup
:
1879 return b
->options
->shared_addr_format
;
1881 case vtn_variable_mode_cross_workgroup
:
1882 return b
->options
->global_addr_format
;
1884 case vtn_variable_mode_function
:
1885 if (b
->physical_ptrs
)
1886 return b
->options
->temp_addr_format
;
1889 case vtn_variable_mode_private
:
1890 case vtn_variable_mode_uniform
:
1891 case vtn_variable_mode_atomic_counter
:
1892 case vtn_variable_mode_input
:
1893 case vtn_variable_mode_output
:
1894 case vtn_variable_mode_image
:
1895 return nir_address_format_logical
;
1898 unreachable("Invalid variable mode");
1902 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1904 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1905 /* This pointer needs to have a pointer type with actual storage */
1906 vtn_assert(ptr
->ptr_type
);
1907 vtn_assert(ptr
->ptr_type
->type
);
1910 /* If we don't have an offset then we must be a pointer to the variable
1913 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1915 struct vtn_access_chain chain
= {
1918 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1921 vtn_assert(ptr
->offset
);
1922 if (ptr
->block_index
) {
1923 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1924 ptr
->mode
== vtn_variable_mode_ssbo
);
1925 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1927 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1931 if (vtn_pointer_is_external_block(b
, ptr
) &&
1932 vtn_type_contains_block(b
, ptr
->type
) &&
1933 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1934 /* In this case, we're looking for a block index and not an actual
1937 * For PhysicalStorageBuffer pointers, we don't have a block index
1938 * at all because we get the pointer directly from the client. This
1939 * assumes that there will never be a SSBO binding variable using the
1940 * PhysicalStorageBuffer storage class. This assumption appears
1941 * to be correct according to the Vulkan spec because the table,
1942 * "Shader Resource and Storage Class Correspondence," the only the
1943 * Uniform storage class with BufferBlock or the StorageBuffer
1944 * storage class with Block can be used.
1946 if (!ptr
->block_index
) {
1947 /* If we don't have a block_index then we must be a pointer to the
1950 vtn_assert(!ptr
->deref
);
1952 struct vtn_access_chain chain
= {
1955 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
1958 return ptr
->block_index
;
1960 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1965 struct vtn_pointer
*
1966 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1967 struct vtn_type
*ptr_type
)
1969 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1971 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1972 struct vtn_type
*without_array
=
1973 vtn_type_without_array(ptr_type
->deref
);
1975 nir_variable_mode nir_mode
;
1976 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1977 without_array
, &nir_mode
);
1978 ptr
->type
= ptr_type
->deref
;
1979 ptr
->ptr_type
= ptr_type
;
1981 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1982 /* This pointer type needs to have actual storage */
1983 vtn_assert(ptr_type
->type
);
1984 if (ptr
->mode
== vtn_variable_mode_ubo
||
1985 ptr
->mode
== vtn_variable_mode_ssbo
) {
1986 vtn_assert(ssa
->num_components
== 2);
1987 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1988 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1990 vtn_assert(ssa
->num_components
== 1);
1991 ptr
->block_index
= NULL
;
1995 const struct glsl_type
*deref_type
=
1996 vtn_type_get_nir_type(b
, ptr_type
->deref
, ptr
->mode
);
1997 if (!vtn_pointer_is_external_block(b
, ptr
)) {
1998 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1999 deref_type
, ptr_type
->stride
);
2000 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
2001 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
2002 /* This is a pointer to somewhere in an array of blocks, not a
2003 * pointer to somewhere inside the block. Set the block index
2004 * instead of making a cast.
2006 ptr
->block_index
= ssa
;
2008 /* This is a pointer to something internal or a pointer inside a
2009 * block. It's just a regular cast.
2011 * For PhysicalStorageBuffer pointers, we don't have a block index
2012 * at all because we get the pointer directly from the client. This
2013 * assumes that there will never be a SSBO binding variable using the
2014 * PhysicalStorageBuffer storage class. This assumption appears
2015 * to be correct according to the Vulkan spec because the table,
2016 * "Shader Resource and Storage Class Correspondence," the only the
2017 * Uniform storage class with BufferBlock or the StorageBuffer
2018 * storage class with Block can be used.
2020 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2021 deref_type
, ptr_type
->stride
);
2022 ptr
->deref
->dest
.ssa
.num_components
=
2023 glsl_get_vector_elements(ptr_type
->type
);
2024 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
2032 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
2034 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
2037 if (var
->mode
== vtn_variable_mode_input
) {
2038 return stage
== MESA_SHADER_TESS_CTRL
||
2039 stage
== MESA_SHADER_TESS_EVAL
||
2040 stage
== MESA_SHADER_GEOMETRY
;
2043 if (var
->mode
== vtn_variable_mode_output
)
2044 return stage
== MESA_SHADER_TESS_CTRL
;
2050 assign_missing_member_locations(struct vtn_variable
*var
)
2053 glsl_get_length(glsl_without_array(var
->type
->type
));
2054 int location
= var
->base_location
;
2056 for (unsigned i
= 0; i
< length
; i
++) {
2057 /* From the Vulkan spec:
2059 * “If the structure type is a Block but without a Location, then each
2060 * of its members must have a Location decoration.”
2063 if (var
->type
->block
) {
2064 assert(var
->base_location
!= -1 ||
2065 var
->var
->members
[i
].location
!= -1);
2068 /* From the Vulkan spec:
2070 * “Any member with its own Location decoration is assigned that
2071 * location. Each remaining member is assigned the location after the
2072 * immediately preceding member in declaration order.”
2074 if (var
->var
->members
[i
].location
!= -1)
2075 location
= var
->var
->members
[i
].location
;
2077 var
->var
->members
[i
].location
= location
;
2079 /* Below we use type instead of interface_type, because interface_type
2080 * is only available when it is a Block. This code also supports
2081 * input/outputs that are just structs
2083 const struct glsl_type
*member_type
=
2084 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
2087 glsl_count_attribute_slots(member_type
,
2088 false /* is_gl_vertex_input */);
2094 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
2095 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
2096 nir_constant
*const_initializer
, nir_variable
*var_initializer
)
2098 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2099 struct vtn_type
*type
= ptr_type
->deref
;
2101 struct vtn_type
*without_array
= vtn_type_without_array(ptr_type
->deref
);
2103 enum vtn_variable_mode mode
;
2104 nir_variable_mode nir_mode
;
2105 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
2108 case vtn_variable_mode_ubo
:
2109 /* There's no other way to get vtn_variable_mode_ubo */
2110 vtn_assert(without_array
->block
);
2111 b
->shader
->info
.num_ubos
++;
2113 case vtn_variable_mode_ssbo
:
2114 if (storage_class
== SpvStorageClassStorageBuffer
&&
2115 !without_array
->block
) {
2116 if (b
->variable_pointers
) {
2117 vtn_fail("Variables in the StorageBuffer storage class must "
2118 "have a struct type with the Block decoration");
2120 /* If variable pointers are not present, it's still malformed
2121 * SPIR-V but we can parse it and do the right thing anyway.
2122 * Since some of the 8-bit storage tests have bugs in this are,
2123 * just make it a warning for now.
2125 vtn_warn("Variables in the StorageBuffer storage class must "
2126 "have a struct type with the Block decoration");
2129 b
->shader
->info
.num_ssbos
++;
2131 case vtn_variable_mode_uniform
:
2132 if (without_array
->base_type
== vtn_base_type_image
) {
2133 if (glsl_type_is_image(without_array
->glsl_image
))
2134 b
->shader
->info
.num_images
++;
2135 else if (glsl_type_is_sampler(without_array
->glsl_image
))
2136 b
->shader
->info
.num_textures
++;
2139 case vtn_variable_mode_push_constant
:
2140 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2143 case vtn_variable_mode_image
:
2144 vtn_fail("Cannot create a variable with the Image storage class");
2147 case vtn_variable_mode_phys_ssbo
:
2148 vtn_fail("Cannot create a variable with the "
2149 "PhysicalStorageBuffer storage class");
2153 /* No tallying is needed */
2157 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2160 var
->base_location
= -1;
2162 val
->pointer
= rzalloc(b
, struct vtn_pointer
);
2163 val
->pointer
->mode
= var
->mode
;
2164 val
->pointer
->type
= var
->type
;
2165 val
->pointer
->ptr_type
= ptr_type
;
2166 val
->pointer
->var
= var
;
2167 val
->pointer
->access
= var
->type
->access
;
2169 switch (var
->mode
) {
2170 case vtn_variable_mode_function
:
2171 case vtn_variable_mode_private
:
2172 case vtn_variable_mode_uniform
:
2173 case vtn_variable_mode_atomic_counter
:
2174 /* For these, we create the variable normally */
2175 var
->var
= rzalloc(b
->shader
, nir_variable
);
2176 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2177 var
->var
->type
= vtn_type_get_nir_type(b
, var
->type
, var
->mode
);
2178 var
->var
->data
.mode
= nir_mode
;
2179 var
->var
->data
.location
= -1;
2180 var
->var
->interface_type
= NULL
;
2183 case vtn_variable_mode_ubo
:
2184 case vtn_variable_mode_ssbo
:
2185 var
->var
= rzalloc(b
->shader
, nir_variable
);
2186 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2188 var
->var
->type
= vtn_type_get_nir_type(b
, var
->type
, var
->mode
);
2189 var
->var
->interface_type
= var
->var
->type
;
2191 var
->var
->data
.mode
= nir_mode
;
2192 var
->var
->data
.location
= -1;
2196 case vtn_variable_mode_workgroup
:
2197 /* Create the variable normally */
2198 var
->var
= rzalloc(b
->shader
, nir_variable
);
2199 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2200 /* Workgroup variables don't have any explicit layout but some
2201 * layouts may have leaked through due to type deduplication in the
2204 var
->var
->type
= vtn_type_get_nir_type(b
, var
->type
, var
->mode
);
2205 var
->var
->data
.mode
= nir_var_mem_shared
;
2208 case vtn_variable_mode_input
:
2209 case vtn_variable_mode_output
: {
2210 /* In order to know whether or not we're a per-vertex inout, we need
2211 * the patch qualifier. This means walking the variable decorations
2212 * early before we actually create any variables. Not a big deal.
2214 * GLSLang really likes to place decorations in the most interior
2215 * thing it possibly can. In particular, if you have a struct, it
2216 * will place the patch decorations on the struct members. This
2217 * should be handled by the variable splitting below just fine.
2219 * If you have an array-of-struct, things get even more weird as it
2220 * will place the patch decorations on the struct even though it's
2221 * inside an array and some of the members being patch and others not
2222 * makes no sense whatsoever. Since the only sensible thing is for
2223 * it to be all or nothing, we'll call it patch if any of the members
2224 * are declared patch.
2227 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2228 if (glsl_type_is_array(var
->type
->type
) &&
2229 glsl_type_is_struct_or_ifc(without_array
->type
)) {
2230 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2231 vtn_value_type_type
),
2232 var_is_patch_cb
, &var
->patch
);
2235 /* For inputs and outputs, we immediately split structures. This
2236 * is for a couple of reasons. For one, builtins may all come in
2237 * a struct and we really want those split out into separate
2238 * variables. For another, interpolation qualifiers can be
2239 * applied to members of the top-level struct ane we need to be
2240 * able to preserve that information.
2243 struct vtn_type
*per_vertex_type
= var
->type
;
2244 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2245 /* In Geometry shaders (and some tessellation), inputs come
2246 * in per-vertex arrays. However, some builtins come in
2247 * non-per-vertex, hence the need for the is_array check. In
2248 * any case, there are no non-builtin arrays allowed so this
2249 * check should be sufficient.
2251 per_vertex_type
= var
->type
->array_element
;
2254 var
->var
= rzalloc(b
->shader
, nir_variable
);
2255 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2256 /* In Vulkan, shader I/O variables don't have any explicit layout but
2257 * some layouts may have leaked through due to type deduplication in
2258 * the SPIR-V. We do, however, keep the layouts in the variable's
2259 * interface_type because we need offsets for XFB arrays of blocks.
2261 var
->var
->type
= vtn_type_get_nir_type(b
, var
->type
, var
->mode
);
2262 var
->var
->data
.mode
= nir_mode
;
2263 var
->var
->data
.patch
= var
->patch
;
2265 /* Figure out the interface block type. */
2266 struct vtn_type
*iface_type
= per_vertex_type
;
2267 if (var
->mode
== vtn_variable_mode_output
&&
2268 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2269 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2270 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2271 /* For vertex data outputs, we can end up with arrays of blocks for
2272 * transform feedback where each array element corresponds to a
2273 * different XFB output buffer.
2275 while (iface_type
->base_type
== vtn_base_type_array
)
2276 iface_type
= iface_type
->array_element
;
2278 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2279 var
->var
->interface_type
= vtn_type_get_nir_type(b
, iface_type
,
2282 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2283 per_vertex_type
->block
) {
2284 /* It's a struct. Set it up as per-member. */
2285 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2286 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2287 var
->var
->num_members
);
2289 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2290 var
->var
->members
[i
].mode
= nir_mode
;
2291 var
->var
->members
[i
].patch
= var
->patch
;
2292 var
->var
->members
[i
].location
= -1;
2296 /* For inputs and outputs, we need to grab locations and builtin
2297 * information from the per-vertex type.
2299 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2300 vtn_value_type_type
),
2301 var_decoration_cb
, var
);
2305 case vtn_variable_mode_push_constant
:
2306 case vtn_variable_mode_cross_workgroup
:
2307 /* These don't need actual variables. */
2310 case vtn_variable_mode_image
:
2311 case vtn_variable_mode_phys_ssbo
:
2312 unreachable("Should have been caught before");
2315 /* We can only have one type of initializer */
2316 assert(!(const_initializer
&& var_initializer
));
2317 if (const_initializer
) {
2318 var
->var
->constant_initializer
=
2319 nir_constant_clone(const_initializer
, var
->var
);
2321 if (var_initializer
)
2322 var
->var
->pointer_initializer
= var_initializer
;
2324 if (var
->mode
== vtn_variable_mode_uniform
||
2325 var
->mode
== vtn_variable_mode_ssbo
) {
2326 /* SSBOs and images are assumed to not alias in the Simple, GLSL and Vulkan memory models */
2327 var
->var
->data
.access
|= b
->mem_model
!= SpvMemoryModelOpenCL
? ACCESS_RESTRICT
: 0;
2330 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2331 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2333 /* Propagate access flags from the OpVariable decorations. */
2334 val
->pointer
->access
|= var
->access
;
2336 if ((var
->mode
== vtn_variable_mode_input
||
2337 var
->mode
== vtn_variable_mode_output
) &&
2338 var
->var
->members
) {
2339 assign_missing_member_locations(var
);
2342 if (var
->mode
== vtn_variable_mode_uniform
||
2343 var
->mode
== vtn_variable_mode_ubo
||
2344 var
->mode
== vtn_variable_mode_ssbo
) {
2345 /* XXX: We still need the binding information in the nir_variable
2346 * for these. We should fix that.
2348 var
->var
->data
.binding
= var
->binding
;
2349 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2350 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2351 var
->var
->data
.index
= var
->input_attachment_index
;
2352 var
->var
->data
.offset
= var
->offset
;
2354 if (glsl_type_is_image(glsl_without_array(var
->var
->type
)))
2355 var
->var
->data
.image
.format
= without_array
->image_format
;
2358 if (var
->mode
== vtn_variable_mode_function
) {
2359 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2360 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2361 } else if (var
->var
) {
2362 nir_shader_add_variable(b
->shader
, var
->var
);
2364 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2369 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2370 struct vtn_type
*dst_type
,
2371 struct vtn_type
*src_type
)
2373 if (dst_type
->id
== src_type
->id
)
2376 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2377 /* Early versions of GLSLang would re-emit types unnecessarily and you
2378 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2379 * mismatched source and destination types.
2381 * https://github.com/KhronosGroup/glslang/issues/304
2382 * https://github.com/KhronosGroup/glslang/issues/307
2383 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2384 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2386 vtn_warn("Source and destination types of %s do not have the same "
2387 "ID (but are compatible): %u vs %u",
2388 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2392 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2393 spirv_op_to_string(opcode
),
2394 glsl_get_type_name(dst_type
->type
),
2395 glsl_get_type_name(src_type
->type
));
2398 static nir_ssa_def
*
2399 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2400 unsigned num_components
)
2402 if (val
->num_components
== num_components
)
2405 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2406 for (unsigned i
= 0; i
< num_components
; i
++) {
2407 if (i
< val
->num_components
)
2408 comps
[i
] = nir_channel(b
, val
, i
);
2410 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2412 return nir_vec(b
, comps
, num_components
);
2415 static nir_ssa_def
*
2416 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2417 const struct glsl_type
*type
)
2419 const unsigned num_components
= glsl_get_vector_elements(type
);
2420 const unsigned bit_size
= glsl_get_bit_size(type
);
2422 /* First, zero-pad to ensure that the value is big enough that when we
2423 * bit-cast it, we don't loose anything.
2425 if (val
->bit_size
< bit_size
) {
2426 const unsigned src_num_components_needed
=
2427 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2428 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2431 val
= nir_bitcast_vector(b
, val
, bit_size
);
2433 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2437 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2438 const uint32_t *w
, unsigned count
)
2442 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2443 val
->type
= vtn_get_type(b
, w
[1]);
2447 case SpvOpVariable
: {
2448 struct vtn_type
*ptr_type
= vtn_get_type(b
, w
[1]);
2450 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2452 SpvStorageClass storage_class
= w
[3];
2453 nir_constant
*const_initializer
= NULL
;
2454 nir_variable
*var_initializer
= NULL
;
2456 struct vtn_value
*init
= vtn_untyped_value(b
, w
[4]);
2457 switch (init
->value_type
) {
2458 case vtn_value_type_constant
:
2459 const_initializer
= init
->constant
;
2461 case vtn_value_type_pointer
:
2462 var_initializer
= init
->pointer
->var
->var
;
2465 vtn_fail("SPIR-V variable initializer %u must be constant or pointer",
2470 vtn_create_variable(b
, val
, ptr_type
, storage_class
, const_initializer
, var_initializer
);
2475 case SpvOpAccessChain
:
2476 case SpvOpPtrAccessChain
:
2477 case SpvOpInBoundsAccessChain
:
2478 case SpvOpInBoundsPtrAccessChain
: {
2479 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2480 enum gl_access_qualifier access
= 0;
2481 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
|| opcode
== SpvOpInBoundsPtrAccessChain
);
2484 for (int i
= 4; i
< count
; i
++) {
2485 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2486 if (link_val
->value_type
== vtn_value_type_constant
) {
2487 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2488 chain
->link
[idx
].id
= vtn_constant_int(b
, w
[i
]);
2490 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2491 chain
->link
[idx
].id
= w
[i
];
2496 struct vtn_type
*ptr_type
= vtn_get_type(b
, w
[1]);
2497 struct vtn_pointer
*base
=
2498 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2499 struct vtn_pointer
*ptr
= vtn_pointer_dereference(b
, base
, chain
);
2500 ptr
->ptr_type
= ptr_type
;
2501 ptr
->access
|= access
;
2502 vtn_push_pointer(b
, w
[2], ptr
);
2506 case SpvOpCopyMemory
: {
2507 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2508 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2510 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2512 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2517 struct vtn_type
*res_type
= vtn_get_type(b
, w
[1]);
2518 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2519 struct vtn_pointer
*src
= src_val
->pointer
;
2521 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2525 SpvMemoryAccessMask access
= w
[4];
2526 if (access
& SpvMemoryAccessAlignedMask
)
2529 if (access
& SpvMemoryAccessMakePointerVisibleMask
) {
2530 SpvMemorySemanticsMask semantics
=
2531 SpvMemorySemanticsMakeVisibleMask
|
2532 vtn_storage_class_to_memory_semantics(src
->ptr_type
->storage_class
);
2534 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2535 vtn_emit_memory_barrier(b
, scope
, semantics
);
2539 vtn_push_ssa_value(b
, w
[2], vtn_variable_load(b
, src
));
2544 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2545 struct vtn_pointer
*dest
= dest_val
->pointer
;
2546 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2548 /* OpStore requires us to actually have a storage type */
2549 vtn_fail_if(dest
->type
->type
== NULL
,
2550 "Invalid destination type for OpStore");
2552 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2553 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2554 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2555 * would then store them to a local variable as bool. Work around
2556 * the issue by doing an implicit conversion.
2558 * https://github.com/KhronosGroup/glslang/issues/170
2559 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2561 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2562 "OpTypeBool. Doing an implicit conversion to work around "
2564 struct vtn_ssa_value
*bool_ssa
=
2565 vtn_create_ssa_value(b
, dest
->type
->type
);
2566 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2567 vtn_variable_store(b
, bool_ssa
, dest
);
2571 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2573 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2574 vtn_variable_store(b
, src
, dest
);
2578 SpvMemoryAccessMask access
= w
[3];
2580 if (access
& SpvMemoryAccessAlignedMask
)
2583 if (access
& SpvMemoryAccessMakePointerAvailableMask
) {
2584 SpvMemorySemanticsMask semantics
=
2585 SpvMemorySemanticsMakeAvailableMask
|
2586 vtn_storage_class_to_memory_semantics(dest
->ptr_type
->storage_class
);
2587 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2588 vtn_emit_memory_barrier(b
, scope
, semantics
);
2594 case SpvOpArrayLength
: {
2595 struct vtn_pointer
*ptr
=
2596 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2597 const uint32_t field
= w
[4];
2599 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2600 "OpArrayLength must take a pointer to a structure type");
2601 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2602 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2603 "OpArrayLength must reference the last memeber of the "
2604 "structure and that must be an array");
2606 const uint32_t offset
= ptr
->type
->offsets
[field
];
2607 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2609 if (!ptr
->block_index
) {
2610 struct vtn_access_chain chain
= {
2613 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2614 vtn_assert(ptr
->block_index
);
2617 nir_intrinsic_instr
*instr
=
2618 nir_intrinsic_instr_create(b
->nb
.shader
,
2619 nir_intrinsic_get_buffer_size
);
2620 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2621 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2622 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2623 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2625 /* array_length = max(buffer_size - offset, 0) / stride */
2626 nir_ssa_def
*array_length
=
2631 nir_imm_int(&b
->nb
, offset
)),
2632 nir_imm_int(&b
->nb
, 0u)),
2633 nir_imm_int(&b
->nb
, stride
));
2635 vtn_push_nir_ssa(b
, w
[2], array_length
);
2639 case SpvOpConvertPtrToU
: {
2640 struct vtn_type
*u_type
= vtn_get_type(b
, w
[1]);
2641 struct vtn_type
*ptr_type
= vtn_get_value_type(b
, w
[3]);
2643 vtn_fail_if(ptr_type
->base_type
!= vtn_base_type_pointer
||
2644 ptr_type
->type
== NULL
,
2645 "OpConvertPtrToU can only be used on physical pointers");
2647 vtn_fail_if(u_type
->base_type
!= vtn_base_type_vector
&&
2648 u_type
->base_type
!= vtn_base_type_scalar
,
2649 "OpConvertPtrToU can only be used to cast to a vector or "
2652 /* The pointer will be converted to an SSA value automatically */
2653 nir_ssa_def
*ptr
= vtn_get_nir_ssa(b
, w
[3]);
2654 nir_ssa_def
*u
= nir_sloppy_bitcast(&b
->nb
, ptr
, u_type
->type
);
2655 vtn_push_nir_ssa(b
, w
[2], u
);
2659 case SpvOpConvertUToPtr
: {
2660 struct vtn_type
*ptr_type
= vtn_get_type(b
, w
[1]);
2661 struct vtn_type
*u_type
= vtn_get_value_type(b
, w
[3]);
2663 vtn_fail_if(ptr_type
->base_type
!= vtn_base_type_pointer
||
2664 ptr_type
->type
== NULL
,
2665 "OpConvertUToPtr can only be used on physical pointers");
2667 vtn_fail_if(u_type
->base_type
!= vtn_base_type_vector
&&
2668 u_type
->base_type
!= vtn_base_type_scalar
,
2669 "OpConvertUToPtr can only be used to cast from a vector or "
2672 nir_ssa_def
*u
= vtn_get_nir_ssa(b
, w
[3]);
2673 nir_ssa_def
*ptr
= nir_sloppy_bitcast(&b
->nb
, u
, ptr_type
->type
);
2674 vtn_push_pointer(b
, w
[2], vtn_pointer_from_ssa(b
, ptr
, ptr_type
));
2678 case SpvOpCopyMemorySized
:
2680 vtn_fail_with_opcode("Unhandled opcode", opcode
);