2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
34 ptr_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
35 const struct vtn_decoration
*dec
, void *void_ptr
)
37 struct vtn_pointer
*ptr
= void_ptr
;
39 switch (dec
->decoration
) {
40 case SpvDecorationNonUniformEXT
:
41 ptr
->access
|= ACCESS_NON_UNIFORM
;
49 static struct vtn_pointer
*
50 vtn_decorate_pointer(struct vtn_builder
*b
, struct vtn_value
*val
,
51 struct vtn_pointer
*ptr
)
53 struct vtn_pointer dummy
= { .access
= 0 };
54 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, &dummy
);
56 /* If we're adding access flags, make a copy of the pointer. We could
57 * probably just OR them in without doing so but this prevents us from
58 * leaking them any further than actually specified in the SPIR-V.
60 if (dummy
.access
& ~ptr
->access
) {
61 struct vtn_pointer
*copy
= ralloc(b
, struct vtn_pointer
);
63 copy
->access
|= dummy
.access
;
71 vtn_push_pointer(struct vtn_builder
*b
, uint32_t value_id
,
72 struct vtn_pointer
*ptr
)
74 struct vtn_value
*val
= vtn_push_value(b
, value_id
, vtn_value_type_pointer
);
75 val
->pointer
= vtn_decorate_pointer(b
, val
, ptr
);
80 vtn_copy_value(struct vtn_builder
*b
, uint32_t src_value_id
,
81 uint32_t dst_value_id
)
83 struct vtn_value
*src
= vtn_untyped_value(b
, src_value_id
);
84 struct vtn_value
*dst
= vtn_untyped_value(b
, dst_value_id
);
85 struct vtn_value src_copy
= *src
;
87 vtn_fail_if(dst
->value_type
!= vtn_value_type_invalid
,
88 "SPIR-V id %u has already been written by another instruction",
91 vtn_fail_if(dst
->type
->id
!= src
->type
->id
,
92 "Result Type must equal Operand type");
94 src_copy
.name
= dst
->name
;
95 src_copy
.decoration
= dst
->decoration
;
96 src_copy
.type
= dst
->type
;
99 if (dst
->value_type
== vtn_value_type_pointer
)
100 dst
->pointer
= vtn_decorate_pointer(b
, dst
, dst
->pointer
);
103 static struct vtn_access_chain
*
104 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
106 struct vtn_access_chain
*chain
;
108 /* Subtract 1 from the length since there's already one built in */
109 size_t size
= sizeof(*chain
) +
110 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
111 chain
= rzalloc_size(b
, size
);
112 chain
->length
= length
;
118 vtn_mode_uses_ssa_offset(struct vtn_builder
*b
,
119 enum vtn_variable_mode mode
)
121 return ((mode
== vtn_variable_mode_ubo
||
122 mode
== vtn_variable_mode_ssbo
) &&
123 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
124 mode
== vtn_variable_mode_push_constant
;
128 vtn_mode_is_cross_invocation(struct vtn_builder
*b
,
129 enum vtn_variable_mode mode
)
131 return mode
== vtn_variable_mode_ssbo
||
132 mode
== vtn_variable_mode_ubo
||
133 mode
== vtn_variable_mode_phys_ssbo
||
134 mode
== vtn_variable_mode_push_constant
||
135 mode
== vtn_variable_mode_workgroup
||
136 mode
== vtn_variable_mode_cross_workgroup
;
140 vtn_pointer_is_external_block(struct vtn_builder
*b
,
141 struct vtn_pointer
*ptr
)
143 return ptr
->mode
== vtn_variable_mode_ssbo
||
144 ptr
->mode
== vtn_variable_mode_ubo
||
145 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
146 ptr
->mode
== vtn_variable_mode_push_constant
;
150 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
151 unsigned stride
, unsigned bit_size
)
153 vtn_assert(stride
> 0);
154 if (link
.mode
== vtn_access_mode_literal
) {
155 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
157 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
158 if (ssa
->bit_size
!= bit_size
)
159 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
160 return nir_imul_imm(&b
->nb
, ssa
, stride
);
164 static VkDescriptorType
165 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
168 case vtn_variable_mode_ubo
:
169 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
170 case vtn_variable_mode_ssbo
:
171 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
173 vtn_fail("Invalid mode for vulkan_resource_index");
178 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
179 nir_ssa_def
*desc_array_index
)
181 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
183 if (!desc_array_index
) {
184 vtn_assert(glsl_type_is_struct_or_ifc(var
->type
->type
));
185 desc_array_index
= nir_imm_int(&b
->nb
, 0);
188 nir_intrinsic_instr
*instr
=
189 nir_intrinsic_instr_create(b
->nb
.shader
,
190 nir_intrinsic_vulkan_resource_index
);
191 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
192 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
193 nir_intrinsic_set_binding(instr
, var
->binding
);
194 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
196 vtn_fail_if(var
->mode
!= vtn_variable_mode_ubo
&&
197 var
->mode
!= vtn_variable_mode_ssbo
,
198 "Invalid mode for vulkan_resource_index");
200 nir_address_format addr_format
= vtn_mode_to_address_format(b
, var
->mode
);
201 const struct glsl_type
*index_type
=
202 b
->options
->lower_ubo_ssbo_access_to_offsets
?
203 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
205 instr
->num_components
= glsl_get_vector_elements(index_type
);
206 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
207 glsl_get_bit_size(index_type
), NULL
);
208 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
210 return &instr
->dest
.ssa
;
214 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
215 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
217 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
219 nir_intrinsic_instr
*instr
=
220 nir_intrinsic_instr_create(b
->nb
.shader
,
221 nir_intrinsic_vulkan_resource_reindex
);
222 instr
->src
[0] = nir_src_for_ssa(base_index
);
223 instr
->src
[1] = nir_src_for_ssa(offset_index
);
224 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
226 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
227 "Invalid mode for vulkan_resource_reindex");
229 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
230 const struct glsl_type
*index_type
=
231 b
->options
->lower_ubo_ssbo_access_to_offsets
?
232 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
234 instr
->num_components
= glsl_get_vector_elements(index_type
);
235 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
236 glsl_get_bit_size(index_type
), NULL
);
237 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
239 return &instr
->dest
.ssa
;
243 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
244 nir_ssa_def
*desc_index
)
246 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
248 nir_intrinsic_instr
*desc_load
=
249 nir_intrinsic_instr_create(b
->nb
.shader
,
250 nir_intrinsic_load_vulkan_descriptor
);
251 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
252 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
254 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
255 "Invalid mode for load_vulkan_descriptor");
257 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
258 const struct glsl_type
*ptr_type
=
259 nir_address_format_to_glsl_type(addr_format
);
261 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
262 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
263 desc_load
->num_components
,
264 glsl_get_bit_size(ptr_type
), NULL
);
265 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
267 return &desc_load
->dest
.ssa
;
270 /* Dereference the given base pointer by the access chain */
271 static struct vtn_pointer
*
272 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
273 struct vtn_pointer
*base
,
274 struct vtn_access_chain
*deref_chain
)
276 struct vtn_type
*type
= base
->type
;
277 enum gl_access_qualifier access
= base
->access
| deref_chain
->access
;
280 nir_deref_instr
*tail
;
283 } else if (b
->options
->environment
== NIR_SPIRV_VULKAN
&&
284 vtn_pointer_is_external_block(b
, base
)) {
285 nir_ssa_def
*block_index
= base
->block_index
;
287 /* We dereferencing an external block pointer. Correctness of this
288 * operation relies on one particular line in the SPIR-V spec, section
289 * entitled "Validation Rules for Shader Capabilities":
291 * "Block and BufferBlock decorations cannot decorate a structure
292 * type that is nested at any level inside another structure type
293 * decorated with Block or BufferBlock."
295 * This means that we can detect the point where we cross over from
296 * descriptor indexing to buffer indexing by looking for the block
297 * decorated struct type. Anything before the block decorated struct
298 * type is a descriptor indexing operation and anything after the block
299 * decorated struct is a buffer offset operation.
302 /* Figure out the descriptor array index if any
304 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
305 * to forget the Block or BufferBlock decoration from time to time.
306 * It's more robust if we check for both !block_index and for the type
307 * to contain a block. This way there's a decent chance that arrays of
308 * UBOs/SSBOs will work correctly even if variable pointers are
311 nir_ssa_def
*desc_arr_idx
= NULL
;
312 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
313 /* If our type contains a block, then we're still outside the block
314 * and we need to process enough levels of dereferences to get inside
317 if (deref_chain
->ptr_as_array
) {
318 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
319 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
320 MAX2(aoa_size
, 1), 32);
324 for (; idx
< deref_chain
->length
; idx
++) {
325 if (type
->base_type
!= vtn_base_type_array
) {
326 vtn_assert(type
->base_type
== vtn_base_type_struct
);
330 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
331 nir_ssa_def
*arr_offset
=
332 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
333 MAX2(aoa_size
, 1), 32);
335 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
337 desc_arr_idx
= arr_offset
;
339 type
= type
->array_element
;
340 access
|= type
->access
;
345 vtn_assert(base
->var
&& base
->type
);
346 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
347 } else if (desc_arr_idx
) {
348 block_index
= vtn_resource_reindex(b
, base
->mode
,
349 block_index
, desc_arr_idx
);
352 if (idx
== deref_chain
->length
) {
353 /* The entire deref was consumed in finding the block index. Return
354 * a pointer which just has a block index and a later access chain
355 * will dereference deeper.
357 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
358 ptr
->mode
= base
->mode
;
360 ptr
->block_index
= block_index
;
361 ptr
->access
= access
;
365 /* If we got here, there's more access chain to handle and we have the
366 * final block index. Insert a descriptor load and cast to a deref to
367 * start the deref chain.
369 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
371 assert(base
->mode
== vtn_variable_mode_ssbo
||
372 base
->mode
== vtn_variable_mode_ubo
);
373 nir_variable_mode nir_mode
=
374 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
376 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
,
377 vtn_type_get_nir_type(b
, type
, base
->mode
),
378 base
->ptr_type
->stride
);
380 assert(base
->var
&& base
->var
->var
);
381 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
382 if (base
->ptr_type
&& base
->ptr_type
->type
) {
383 tail
->dest
.ssa
.num_components
=
384 glsl_get_vector_elements(base
->ptr_type
->type
);
385 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
389 if (idx
== 0 && deref_chain
->ptr_as_array
) {
390 /* We start with a deref cast to get the stride. Hopefully, we'll be
391 * able to delete that cast eventually.
393 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
394 tail
->type
, base
->ptr_type
->stride
);
396 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
397 tail
->dest
.ssa
.bit_size
);
398 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
402 for (; idx
< deref_chain
->length
; idx
++) {
403 if (glsl_type_is_struct_or_ifc(type
->type
)) {
404 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
405 unsigned field
= deref_chain
->link
[idx
].id
;
406 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
407 type
= type
->members
[field
];
409 nir_ssa_def
*arr_index
=
410 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
411 tail
->dest
.ssa
.bit_size
);
412 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
413 type
= type
->array_element
;
416 access
|= type
->access
;
419 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
420 ptr
->mode
= base
->mode
;
422 ptr
->var
= base
->var
;
424 ptr
->access
= access
;
429 static struct vtn_pointer
*
430 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
431 struct vtn_pointer
*base
,
432 struct vtn_access_chain
*deref_chain
)
434 nir_ssa_def
*block_index
= base
->block_index
;
435 nir_ssa_def
*offset
= base
->offset
;
436 struct vtn_type
*type
= base
->type
;
437 enum gl_access_qualifier access
= base
->access
;
440 if (base
->mode
== vtn_variable_mode_ubo
||
441 base
->mode
== vtn_variable_mode_ssbo
) {
443 vtn_assert(base
->var
&& base
->type
);
444 nir_ssa_def
*desc_arr_idx
;
445 if (glsl_type_is_array(type
->type
)) {
446 if (deref_chain
->length
>= 1) {
448 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
450 /* This consumes a level of type */
451 type
= type
->array_element
;
452 access
|= type
->access
;
454 /* This is annoying. We've been asked for a pointer to the
455 * array of UBOs/SSBOs and not a specifc buffer. Return a
456 * pointer with a descriptor index of 0 and we'll have to do
457 * a reindex later to adjust it to the right thing.
459 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
461 } else if (deref_chain
->ptr_as_array
) {
462 /* You can't have a zero-length OpPtrAccessChain */
463 vtn_assert(deref_chain
->length
>= 1);
464 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
466 /* We have a regular non-array SSBO. */
469 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
470 } else if (deref_chain
->ptr_as_array
&&
471 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
472 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
473 * decorated block. This is an interesting corner in the SPIR-V
474 * spec. One interpretation would be that they client is clearly
475 * trying to treat that block as if it's an implicit array of blocks
476 * repeated in the buffer. However, the SPIR-V spec for the
477 * OpPtrAccessChain says:
479 * "Base is treated as the address of the first element of an
480 * array, and the Element element’s address is computed to be the
481 * base for the Indexes, as per OpAccessChain."
483 * Taken literally, that would mean that your struct type is supposed
484 * to be treated as an array of such a struct and, since it's
485 * decorated block, that means an array of blocks which corresponds
486 * to an array descriptor. Therefore, we need to do a reindex
487 * operation to add the index from the first link in the access chain
488 * to the index we recieved.
490 * The downside to this interpretation (there always is one) is that
491 * this might be somewhat surprising behavior to apps if they expect
492 * the implicit array behavior described above.
494 vtn_assert(deref_chain
->length
>= 1);
495 nir_ssa_def
*offset_index
=
496 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
499 block_index
= vtn_resource_reindex(b
, base
->mode
,
500 block_index
, offset_index
);
505 if (base
->mode
== vtn_variable_mode_workgroup
) {
506 /* SLM doesn't need nor have a block index */
507 vtn_assert(!block_index
);
509 /* We need the variable for the base offset */
510 vtn_assert(base
->var
);
512 /* We need ptr_type for size and alignment */
513 vtn_assert(base
->ptr_type
);
515 /* Assign location on first use so that we don't end up bloating SLM
516 * address space for variables which are never statically used.
518 if (base
->var
->shared_location
< 0) {
519 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
520 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
521 base
->ptr_type
->align
);
522 base
->var
->shared_location
= b
->shader
->num_shared
;
523 b
->shader
->num_shared
+= base
->ptr_type
->length
;
526 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
527 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
528 /* Push constants neither need nor have a block index */
529 vtn_assert(!block_index
);
531 /* Start off with at the start of the push constant block. */
532 offset
= nir_imm_int(&b
->nb
, 0);
534 /* The code above should have ensured a block_index when needed. */
535 vtn_assert(block_index
);
537 /* Start off with at the start of the buffer. */
538 offset
= nir_imm_int(&b
->nb
, 0);
542 if (deref_chain
->ptr_as_array
&& idx
== 0) {
543 /* We need ptr_type for the stride */
544 vtn_assert(base
->ptr_type
);
546 /* We need at least one element in the chain */
547 vtn_assert(deref_chain
->length
>= 1);
549 nir_ssa_def
*elem_offset
=
550 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
551 base
->ptr_type
->stride
, offset
->bit_size
);
552 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
556 for (; idx
< deref_chain
->length
; idx
++) {
557 switch (glsl_get_base_type(type
->type
)) {
560 case GLSL_TYPE_UINT16
:
561 case GLSL_TYPE_INT16
:
562 case GLSL_TYPE_UINT8
:
564 case GLSL_TYPE_UINT64
:
565 case GLSL_TYPE_INT64
:
566 case GLSL_TYPE_FLOAT
:
567 case GLSL_TYPE_FLOAT16
:
568 case GLSL_TYPE_DOUBLE
:
570 case GLSL_TYPE_ARRAY
: {
571 nir_ssa_def
*elem_offset
=
572 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
573 type
->stride
, offset
->bit_size
);
574 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
575 type
= type
->array_element
;
576 access
|= type
->access
;
580 case GLSL_TYPE_INTERFACE
:
581 case GLSL_TYPE_STRUCT
: {
582 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
583 unsigned member
= deref_chain
->link
[idx
].id
;
584 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
585 type
= type
->members
[member
];
586 access
|= type
->access
;
591 vtn_fail("Invalid type for deref");
595 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
596 ptr
->mode
= base
->mode
;
598 ptr
->block_index
= block_index
;
599 ptr
->offset
= offset
;
600 ptr
->access
= access
;
605 /* Dereference the given base pointer by the access chain */
606 static struct vtn_pointer
*
607 vtn_pointer_dereference(struct vtn_builder
*b
,
608 struct vtn_pointer
*base
,
609 struct vtn_access_chain
*deref_chain
)
611 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
612 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
614 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
619 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
621 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
623 struct vtn_access_chain chain
= {
626 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
633 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
634 struct vtn_ssa_value
*inout
,
635 enum gl_access_qualifier access
)
637 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
639 inout
->def
= nir_load_deref_with_access(&b
->nb
, deref
, access
);
641 nir_store_deref_with_access(&b
->nb
, deref
, inout
->def
, ~0, access
);
643 } else if (glsl_type_is_array(deref
->type
) ||
644 glsl_type_is_matrix(deref
->type
)) {
645 unsigned elems
= glsl_get_length(deref
->type
);
646 for (unsigned i
= 0; i
< elems
; i
++) {
647 nir_deref_instr
*child
=
648 nir_build_deref_array_imm(&b
->nb
, deref
, i
);
649 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
652 vtn_assert(glsl_type_is_struct_or_ifc(deref
->type
));
653 unsigned elems
= glsl_get_length(deref
->type
);
654 for (unsigned i
= 0; i
< elems
; i
++) {
655 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
656 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
662 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
664 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
665 return vtn_pointer_to_deref(b
, ptr
);
669 * Gets the NIR-level deref tail, which may have as a child an array deref
670 * selecting which component due to OpAccessChain supporting per-component
671 * indexing in SPIR-V.
673 static nir_deref_instr
*
674 get_deref_tail(nir_deref_instr
*deref
)
676 if (deref
->deref_type
!= nir_deref_type_array
)
679 nir_deref_instr
*parent
=
680 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
682 if (glsl_type_is_vector(parent
->type
))
688 struct vtn_ssa_value
*
689 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
,
690 enum gl_access_qualifier access
)
692 nir_deref_instr
*src_tail
= get_deref_tail(src
);
693 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
694 _vtn_local_load_store(b
, true, src_tail
, val
, access
);
696 if (src_tail
!= src
) {
697 val
->type
= src
->type
;
698 val
->def
= nir_vector_extract(&b
->nb
, val
->def
, src
->arr
.index
.ssa
);
705 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
706 nir_deref_instr
*dest
, enum gl_access_qualifier access
)
708 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
710 if (dest_tail
!= dest
) {
711 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
712 _vtn_local_load_store(b
, true, dest_tail
, val
, access
);
714 val
->def
= nir_vector_insert(&b
->nb
, val
->def
, src
->def
,
715 dest
->arr
.index
.ssa
);
716 _vtn_local_load_store(b
, false, dest_tail
, val
, access
);
718 _vtn_local_load_store(b
, false, dest_tail
, src
, access
);
723 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
724 nir_ssa_def
**index_out
)
726 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
728 struct vtn_access_chain chain
= {
731 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
733 *index_out
= ptr
->block_index
;
737 /* Tries to compute the size of an interface block based on the strides and
738 * offsets that are provided to us in the SPIR-V source.
741 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
743 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
747 case GLSL_TYPE_UINT16
:
748 case GLSL_TYPE_INT16
:
749 case GLSL_TYPE_UINT8
:
751 case GLSL_TYPE_UINT64
:
752 case GLSL_TYPE_INT64
:
753 case GLSL_TYPE_FLOAT
:
754 case GLSL_TYPE_FLOAT16
:
756 case GLSL_TYPE_DOUBLE
: {
757 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
758 glsl_get_matrix_columns(type
->type
);
760 vtn_assert(type
->stride
> 0);
761 return type
->stride
* cols
;
763 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
764 return glsl_get_vector_elements(type
->type
) * type_size
;
768 case GLSL_TYPE_STRUCT
:
769 case GLSL_TYPE_INTERFACE
: {
771 unsigned num_fields
= glsl_get_length(type
->type
);
772 for (unsigned f
= 0; f
< num_fields
; f
++) {
773 unsigned field_end
= type
->offsets
[f
] +
774 vtn_type_block_size(b
, type
->members
[f
]);
775 size
= MAX2(size
, field_end
);
780 case GLSL_TYPE_ARRAY
:
781 vtn_assert(type
->stride
> 0);
782 vtn_assert(glsl_get_length(type
->type
) > 0);
783 return type
->stride
* glsl_get_length(type
->type
);
786 vtn_fail("Invalid block type");
792 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
793 nir_ssa_def
*index
, nir_ssa_def
*offset
,
794 unsigned access_offset
, unsigned access_size
,
795 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
796 enum gl_access_qualifier access
)
798 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
799 instr
->num_components
= glsl_get_vector_elements(type
);
801 /* Booleans usually shouldn't show up in external memory in SPIR-V.
802 * However, they do for certain older GLSLang versions and can for shared
803 * memory when we lower access chains internally.
805 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
806 glsl_get_bit_size(type
);
810 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
811 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
814 if (op
== nir_intrinsic_load_push_constant
) {
815 nir_intrinsic_set_base(instr
, access_offset
);
816 nir_intrinsic_set_range(instr
, access_size
);
819 if (op
== nir_intrinsic_load_ubo
||
820 op
== nir_intrinsic_load_ssbo
||
821 op
== nir_intrinsic_store_ssbo
) {
822 nir_intrinsic_set_access(instr
, access
);
825 /* With extensions like relaxed_block_layout, we really can't guarantee
826 * much more than scalar alignment.
828 if (op
!= nir_intrinsic_load_push_constant
)
829 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
832 instr
->src
[src
++] = nir_src_for_ssa(index
);
834 if (op
== nir_intrinsic_load_push_constant
) {
835 /* We need to subtract the offset from where the intrinsic will load the
838 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
839 nir_imm_int(&b
->nb
, access_offset
)));
841 instr
->src
[src
++] = nir_src_for_ssa(offset
);
845 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
846 instr
->num_components
, data_bit_size
, NULL
);
847 (*inout
)->def
= &instr
->dest
.ssa
;
850 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
852 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
853 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
857 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
858 nir_ssa_def
*index
, nir_ssa_def
*offset
,
859 unsigned access_offset
, unsigned access_size
,
860 struct vtn_type
*type
, enum gl_access_qualifier access
,
861 struct vtn_ssa_value
**inout
)
863 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
867 case GLSL_TYPE_UINT16
:
868 case GLSL_TYPE_INT16
:
869 case GLSL_TYPE_UINT8
:
871 case GLSL_TYPE_UINT64
:
872 case GLSL_TYPE_INT64
:
873 case GLSL_TYPE_FLOAT
:
874 case GLSL_TYPE_FLOAT16
:
875 case GLSL_TYPE_DOUBLE
:
877 /* This is where things get interesting. At this point, we've hit
878 * a vector, a scalar, or a matrix.
880 if (glsl_type_is_matrix(type
->type
)) {
881 /* Loading the whole matrix */
882 struct vtn_ssa_value
*transpose
;
883 unsigned num_ops
, vec_width
, col_stride
;
884 if (type
->row_major
) {
885 num_ops
= glsl_get_vector_elements(type
->type
);
886 vec_width
= glsl_get_matrix_columns(type
->type
);
887 col_stride
= type
->array_element
->stride
;
889 const struct glsl_type
*transpose_type
=
890 glsl_matrix_type(base_type
, vec_width
, num_ops
);
891 *inout
= vtn_create_ssa_value(b
, transpose_type
);
893 transpose
= vtn_ssa_transpose(b
, *inout
);
897 num_ops
= glsl_get_matrix_columns(type
->type
);
898 vec_width
= glsl_get_vector_elements(type
->type
);
899 col_stride
= type
->stride
;
902 for (unsigned i
= 0; i
< num_ops
; i
++) {
903 nir_ssa_def
*elem_offset
=
904 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
905 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
906 access_offset
, access_size
,
908 glsl_vector_type(base_type
, vec_width
),
909 type
->access
| access
);
912 if (load
&& type
->row_major
)
913 *inout
= vtn_ssa_transpose(b
, *inout
);
915 unsigned elems
= glsl_get_vector_elements(type
->type
);
916 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
917 if (elems
== 1 || type
->stride
== type_size
) {
918 /* This is a tightly-packed normal scalar or vector load */
919 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
920 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
921 access_offset
, access_size
,
923 type
->access
| access
);
925 /* This is a strided load. We have to load N things separately.
926 * This is the single column of a row-major matrix case.
928 vtn_assert(type
->stride
> type_size
);
929 vtn_assert(type
->stride
% type_size
== 0);
931 nir_ssa_def
*per_comp
[4];
932 for (unsigned i
= 0; i
< elems
; i
++) {
933 nir_ssa_def
*elem_offset
=
934 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
935 struct vtn_ssa_value
*comp
, temp_val
;
937 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
938 temp_val
.type
= glsl_scalar_type(base_type
);
941 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
942 access_offset
, access_size
,
943 &comp
, glsl_scalar_type(base_type
),
944 type
->access
| access
);
945 per_comp
[i
] = comp
->def
;
950 *inout
= vtn_create_ssa_value(b
, type
->type
);
951 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
957 case GLSL_TYPE_ARRAY
: {
958 unsigned elems
= glsl_get_length(type
->type
);
959 for (unsigned i
= 0; i
< elems
; i
++) {
960 nir_ssa_def
*elem_off
=
961 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
962 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
963 access_offset
, access_size
,
965 type
->array_element
->access
| access
,
966 &(*inout
)->elems
[i
]);
971 case GLSL_TYPE_INTERFACE
:
972 case GLSL_TYPE_STRUCT
: {
973 unsigned elems
= glsl_get_length(type
->type
);
974 for (unsigned i
= 0; i
< elems
; i
++) {
975 nir_ssa_def
*elem_off
=
976 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
977 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
978 access_offset
, access_size
,
980 type
->members
[i
]->access
| access
,
981 &(*inout
)->elems
[i
]);
987 vtn_fail("Invalid block member type");
991 static struct vtn_ssa_value
*
992 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
,
993 enum gl_access_qualifier access
)
996 unsigned access_offset
= 0, access_size
= 0;
998 case vtn_variable_mode_ubo
:
999 op
= nir_intrinsic_load_ubo
;
1001 case vtn_variable_mode_ssbo
:
1002 op
= nir_intrinsic_load_ssbo
;
1004 case vtn_variable_mode_push_constant
:
1005 op
= nir_intrinsic_load_push_constant
;
1006 access_size
= b
->shader
->num_uniforms
;
1008 case vtn_variable_mode_workgroup
:
1009 op
= nir_intrinsic_load_shared
;
1012 vtn_fail("Invalid block variable mode");
1015 nir_ssa_def
*offset
, *index
= NULL
;
1016 offset
= vtn_pointer_to_offset(b
, src
, &index
);
1018 struct vtn_ssa_value
*value
= vtn_create_ssa_value(b
, src
->type
->type
);
1019 _vtn_block_load_store(b
, op
, true, index
, offset
,
1020 access_offset
, access_size
,
1021 src
->type
, src
->access
| access
, &value
);
1026 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1027 struct vtn_pointer
*dst
, enum gl_access_qualifier access
)
1029 nir_intrinsic_op op
;
1030 switch (dst
->mode
) {
1031 case vtn_variable_mode_ssbo
:
1032 op
= nir_intrinsic_store_ssbo
;
1034 case vtn_variable_mode_workgroup
:
1035 op
= nir_intrinsic_store_shared
;
1038 vtn_fail("Invalid block variable mode");
1041 nir_ssa_def
*offset
, *index
= NULL
;
1042 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1044 _vtn_block_load_store(b
, op
, false, index
, offset
,
1045 0, 0, dst
->type
, dst
->access
| access
, &src
);
1049 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1050 struct vtn_pointer
*ptr
,
1051 enum gl_access_qualifier access
,
1052 struct vtn_ssa_value
**inout
)
1054 if (ptr
->mode
== vtn_variable_mode_uniform
) {
1055 if (ptr
->type
->base_type
== vtn_base_type_image
||
1056 ptr
->type
->base_type
== vtn_base_type_sampler
) {
1057 /* See also our handling of OpTypeSampler and OpTypeImage */
1059 (*inout
)->def
= vtn_pointer_to_ssa(b
, ptr
);
1061 } else if (ptr
->type
->base_type
== vtn_base_type_sampled_image
) {
1062 /* See also our handling of OpTypeSampledImage */
1064 struct vtn_sampled_image si
= {
1065 .image
= vtn_pointer_to_deref(b
, ptr
),
1066 .sampler
= vtn_pointer_to_deref(b
, ptr
),
1068 (*inout
)->def
= vtn_sampled_image_to_nir_ssa(b
, si
);
1073 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1074 switch (base_type
) {
1075 case GLSL_TYPE_UINT
:
1077 case GLSL_TYPE_UINT16
:
1078 case GLSL_TYPE_INT16
:
1079 case GLSL_TYPE_UINT8
:
1080 case GLSL_TYPE_INT8
:
1081 case GLSL_TYPE_UINT64
:
1082 case GLSL_TYPE_INT64
:
1083 case GLSL_TYPE_FLOAT
:
1084 case GLSL_TYPE_FLOAT16
:
1085 case GLSL_TYPE_BOOL
:
1086 case GLSL_TYPE_DOUBLE
:
1087 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1088 /* We hit a vector or scalar; go ahead and emit the load[s] */
1089 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1090 if (vtn_mode_is_cross_invocation(b
, ptr
->mode
)) {
1091 /* If it's cross-invocation, we call nir_load/store_deref
1092 * directly. The vtn_local_load/store helpers are too clever and
1093 * do magic to avoid array derefs of vectors. That magic is both
1094 * less efficient than the direct load/store and, in the case of
1095 * stores, is broken because it creates a race condition if two
1096 * threads are writing to different components of the same vector
1097 * due to the load+insert+store it uses to emulate the array
1101 (*inout
)->def
= nir_load_deref_with_access(&b
->nb
, deref
,
1102 ptr
->type
->access
| access
);
1104 nir_store_deref_with_access(&b
->nb
, deref
, (*inout
)->def
, ~0,
1105 ptr
->type
->access
| access
);
1109 *inout
= vtn_local_load(b
, deref
, ptr
->type
->access
| access
);
1111 vtn_local_store(b
, *inout
, deref
, ptr
->type
->access
| access
);
1118 case GLSL_TYPE_INTERFACE
:
1119 case GLSL_TYPE_ARRAY
:
1120 case GLSL_TYPE_STRUCT
: {
1121 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1122 struct vtn_access_chain chain
= {
1125 { .mode
= vtn_access_mode_literal
, },
1128 for (unsigned i
= 0; i
< elems
; i
++) {
1129 chain
.link
[0].id
= i
;
1130 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1131 _vtn_variable_load_store(b
, load
, elem
, ptr
->type
->access
| access
,
1132 &(*inout
)->elems
[i
]);
1138 vtn_fail("Invalid access chain type");
1142 struct vtn_ssa_value
*
1143 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
,
1144 enum gl_access_qualifier access
)
1146 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1147 return vtn_block_load(b
, src
, access
);
1149 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src
->type
->type
);
1150 _vtn_variable_load_store(b
, true, src
, src
->access
| access
, &val
);
1156 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1157 struct vtn_pointer
*dest
, enum gl_access_qualifier access
)
1159 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1160 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1161 dest
->mode
== vtn_variable_mode_workgroup
);
1162 vtn_block_store(b
, src
, dest
, access
);
1164 _vtn_variable_load_store(b
, false, dest
, dest
->access
| access
, &src
);
1169 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1170 struct vtn_pointer
*src
, enum gl_access_qualifier dest_access
,
1171 enum gl_access_qualifier src_access
)
1173 vtn_assert(glsl_get_bare_type(src
->type
->type
) ==
1174 glsl_get_bare_type(dest
->type
->type
));
1175 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1176 switch (base_type
) {
1177 case GLSL_TYPE_UINT
:
1179 case GLSL_TYPE_UINT16
:
1180 case GLSL_TYPE_INT16
:
1181 case GLSL_TYPE_UINT8
:
1182 case GLSL_TYPE_INT8
:
1183 case GLSL_TYPE_UINT64
:
1184 case GLSL_TYPE_INT64
:
1185 case GLSL_TYPE_FLOAT
:
1186 case GLSL_TYPE_FLOAT16
:
1187 case GLSL_TYPE_DOUBLE
:
1188 case GLSL_TYPE_BOOL
:
1189 /* At this point, we have a scalar, vector, or matrix so we know that
1190 * there cannot be any structure splitting still in the way. By
1191 * stopping at the matrix level rather than the vector level, we
1192 * ensure that matrices get loaded in the optimal way even if they
1193 * are storred row-major in a UBO.
1195 vtn_variable_store(b
, vtn_variable_load(b
, src
, src_access
), dest
, dest_access
);
1198 case GLSL_TYPE_INTERFACE
:
1199 case GLSL_TYPE_ARRAY
:
1200 case GLSL_TYPE_STRUCT
: {
1201 struct vtn_access_chain chain
= {
1204 { .mode
= vtn_access_mode_literal
, },
1207 unsigned elems
= glsl_get_length(src
->type
->type
);
1208 for (unsigned i
= 0; i
< elems
; i
++) {
1209 chain
.link
[0].id
= i
;
1210 struct vtn_pointer
*src_elem
=
1211 vtn_pointer_dereference(b
, src
, &chain
);
1212 struct vtn_pointer
*dest_elem
=
1213 vtn_pointer_dereference(b
, dest
, &chain
);
1215 _vtn_variable_copy(b
, dest_elem
, src_elem
, dest_access
, src_access
);
1221 vtn_fail("Invalid access chain type");
1226 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1227 struct vtn_pointer
*src
, enum gl_access_qualifier dest_access
,
1228 enum gl_access_qualifier src_access
)
1230 /* TODO: At some point, we should add a special-case for when we can
1231 * just emit a copy_var intrinsic.
1233 _vtn_variable_copy(b
, dest
, src
, dest_access
, src_access
);
1237 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1239 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1240 *mode
= nir_var_system_value
;
1244 vtn_get_builtin_location(struct vtn_builder
*b
,
1245 SpvBuiltIn builtin
, int *location
,
1246 nir_variable_mode
*mode
)
1249 case SpvBuiltInPosition
:
1250 *location
= VARYING_SLOT_POS
;
1252 case SpvBuiltInPointSize
:
1253 *location
= VARYING_SLOT_PSIZ
;
1255 case SpvBuiltInClipDistance
:
1256 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1258 case SpvBuiltInCullDistance
:
1259 *location
= VARYING_SLOT_CULL_DIST0
;
1261 case SpvBuiltInVertexId
:
1262 case SpvBuiltInVertexIndex
:
1263 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1264 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1265 * same as gl_VertexID, which is non-zero-based, and removes
1266 * VertexIndex. Since they're both defined to be non-zero-based, we use
1267 * SYSTEM_VALUE_VERTEX_ID for both.
1269 *location
= SYSTEM_VALUE_VERTEX_ID
;
1270 set_mode_system_value(b
, mode
);
1272 case SpvBuiltInInstanceIndex
:
1273 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1274 set_mode_system_value(b
, mode
);
1276 case SpvBuiltInInstanceId
:
1277 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1278 set_mode_system_value(b
, mode
);
1280 case SpvBuiltInPrimitiveId
:
1281 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1282 vtn_assert(*mode
== nir_var_shader_in
);
1283 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1284 } else if (*mode
== nir_var_shader_out
) {
1285 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1287 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1288 set_mode_system_value(b
, mode
);
1291 case SpvBuiltInInvocationId
:
1292 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1293 set_mode_system_value(b
, mode
);
1295 case SpvBuiltInLayer
:
1296 *location
= VARYING_SLOT_LAYER
;
1297 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1298 *mode
= nir_var_shader_in
;
1299 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1300 *mode
= nir_var_shader_out
;
1301 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1302 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1303 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1304 *mode
= nir_var_shader_out
;
1306 vtn_fail("invalid stage for SpvBuiltInLayer");
1308 case SpvBuiltInViewportIndex
:
1309 *location
= VARYING_SLOT_VIEWPORT
;
1310 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1311 *mode
= nir_var_shader_out
;
1312 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1313 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1314 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1315 *mode
= nir_var_shader_out
;
1316 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1317 *mode
= nir_var_shader_in
;
1319 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1321 case SpvBuiltInTessLevelOuter
:
1322 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1324 case SpvBuiltInTessLevelInner
:
1325 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1327 case SpvBuiltInTessCoord
:
1328 *location
= SYSTEM_VALUE_TESS_COORD
;
1329 set_mode_system_value(b
, mode
);
1331 case SpvBuiltInPatchVertices
:
1332 *location
= SYSTEM_VALUE_VERTICES_IN
;
1333 set_mode_system_value(b
, mode
);
1335 case SpvBuiltInFragCoord
:
1336 vtn_assert(*mode
== nir_var_shader_in
);
1337 if (b
->options
&& b
->options
->frag_coord_is_sysval
) {
1338 *mode
= nir_var_system_value
;
1339 *location
= SYSTEM_VALUE_FRAG_COORD
;
1341 *location
= VARYING_SLOT_POS
;
1344 case SpvBuiltInPointCoord
:
1345 *location
= VARYING_SLOT_PNTC
;
1346 vtn_assert(*mode
== nir_var_shader_in
);
1348 case SpvBuiltInFrontFacing
:
1349 *location
= SYSTEM_VALUE_FRONT_FACE
;
1350 set_mode_system_value(b
, mode
);
1352 case SpvBuiltInSampleId
:
1353 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1354 set_mode_system_value(b
, mode
);
1356 case SpvBuiltInSamplePosition
:
1357 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1358 set_mode_system_value(b
, mode
);
1360 case SpvBuiltInSampleMask
:
1361 if (*mode
== nir_var_shader_out
) {
1362 *location
= FRAG_RESULT_SAMPLE_MASK
;
1364 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1365 set_mode_system_value(b
, mode
);
1368 case SpvBuiltInFragDepth
:
1369 *location
= FRAG_RESULT_DEPTH
;
1370 vtn_assert(*mode
== nir_var_shader_out
);
1372 case SpvBuiltInHelperInvocation
:
1373 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1374 set_mode_system_value(b
, mode
);
1376 case SpvBuiltInNumWorkgroups
:
1377 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1378 set_mode_system_value(b
, mode
);
1380 case SpvBuiltInWorkgroupSize
:
1381 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1382 set_mode_system_value(b
, mode
);
1384 case SpvBuiltInWorkgroupId
:
1385 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1386 set_mode_system_value(b
, mode
);
1388 case SpvBuiltInLocalInvocationId
:
1389 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1390 set_mode_system_value(b
, mode
);
1392 case SpvBuiltInLocalInvocationIndex
:
1393 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1394 set_mode_system_value(b
, mode
);
1396 case SpvBuiltInGlobalInvocationId
:
1397 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1398 set_mode_system_value(b
, mode
);
1400 case SpvBuiltInGlobalLinearId
:
1401 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
;
1402 set_mode_system_value(b
, mode
);
1404 case SpvBuiltInGlobalOffset
:
1405 *location
= SYSTEM_VALUE_BASE_GLOBAL_INVOCATION_ID
;
1406 set_mode_system_value(b
, mode
);
1408 case SpvBuiltInBaseVertex
:
1409 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1410 * semantic as Vulkan BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1412 if (b
->options
->environment
== NIR_SPIRV_OPENGL
)
1413 *location
= SYSTEM_VALUE_BASE_VERTEX
;
1415 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1416 set_mode_system_value(b
, mode
);
1418 case SpvBuiltInBaseInstance
:
1419 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1420 set_mode_system_value(b
, mode
);
1422 case SpvBuiltInDrawIndex
:
1423 *location
= SYSTEM_VALUE_DRAW_ID
;
1424 set_mode_system_value(b
, mode
);
1426 case SpvBuiltInSubgroupSize
:
1427 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1428 set_mode_system_value(b
, mode
);
1430 case SpvBuiltInSubgroupId
:
1431 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1432 set_mode_system_value(b
, mode
);
1434 case SpvBuiltInSubgroupLocalInvocationId
:
1435 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1436 set_mode_system_value(b
, mode
);
1438 case SpvBuiltInNumSubgroups
:
1439 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1440 set_mode_system_value(b
, mode
);
1442 case SpvBuiltInDeviceIndex
:
1443 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1444 set_mode_system_value(b
, mode
);
1446 case SpvBuiltInViewIndex
:
1447 if (b
->options
&& b
->options
->view_index_is_input
) {
1448 *location
= VARYING_SLOT_VIEW_INDEX
;
1449 vtn_assert(*mode
== nir_var_shader_in
);
1451 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1452 set_mode_system_value(b
, mode
);
1455 case SpvBuiltInSubgroupEqMask
:
1456 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1457 set_mode_system_value(b
, mode
);
1459 case SpvBuiltInSubgroupGeMask
:
1460 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1461 set_mode_system_value(b
, mode
);
1463 case SpvBuiltInSubgroupGtMask
:
1464 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1465 set_mode_system_value(b
, mode
);
1467 case SpvBuiltInSubgroupLeMask
:
1468 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1469 set_mode_system_value(b
, mode
);
1471 case SpvBuiltInSubgroupLtMask
:
1472 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1473 set_mode_system_value(b
, mode
);
1475 case SpvBuiltInFragStencilRefEXT
:
1476 *location
= FRAG_RESULT_STENCIL
;
1477 vtn_assert(*mode
== nir_var_shader_out
);
1479 case SpvBuiltInWorkDim
:
1480 *location
= SYSTEM_VALUE_WORK_DIM
;
1481 set_mode_system_value(b
, mode
);
1483 case SpvBuiltInGlobalSize
:
1484 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1485 set_mode_system_value(b
, mode
);
1487 case SpvBuiltInBaryCoordNoPerspAMD
:
1488 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL
;
1489 set_mode_system_value(b
, mode
);
1491 case SpvBuiltInBaryCoordNoPerspCentroidAMD
:
1492 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID
;
1493 set_mode_system_value(b
, mode
);
1495 case SpvBuiltInBaryCoordNoPerspSampleAMD
:
1496 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE
;
1497 set_mode_system_value(b
, mode
);
1499 case SpvBuiltInBaryCoordSmoothAMD
:
1500 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
;
1501 set_mode_system_value(b
, mode
);
1503 case SpvBuiltInBaryCoordSmoothCentroidAMD
:
1504 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
;
1505 set_mode_system_value(b
, mode
);
1507 case SpvBuiltInBaryCoordSmoothSampleAMD
:
1508 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
;
1509 set_mode_system_value(b
, mode
);
1511 case SpvBuiltInBaryCoordPullModelAMD
:
1512 *location
= SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL
;
1513 set_mode_system_value(b
, mode
);
1516 vtn_fail("Unsupported builtin: %s (%u)",
1517 spirv_builtin_to_string(builtin
), builtin
);
1522 apply_var_decoration(struct vtn_builder
*b
,
1523 struct nir_variable_data
*var_data
,
1524 const struct vtn_decoration
*dec
)
1526 switch (dec
->decoration
) {
1527 case SpvDecorationRelaxedPrecision
:
1528 break; /* FIXME: Do nothing with this for now. */
1529 case SpvDecorationNoPerspective
:
1530 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1532 case SpvDecorationFlat
:
1533 var_data
->interpolation
= INTERP_MODE_FLAT
;
1535 case SpvDecorationExplicitInterpAMD
:
1536 var_data
->interpolation
= INTERP_MODE_EXPLICIT
;
1538 case SpvDecorationCentroid
:
1539 var_data
->centroid
= true;
1541 case SpvDecorationSample
:
1542 var_data
->sample
= true;
1544 case SpvDecorationInvariant
:
1545 var_data
->invariant
= true;
1547 case SpvDecorationConstant
:
1548 var_data
->read_only
= true;
1550 case SpvDecorationNonReadable
:
1551 var_data
->access
|= ACCESS_NON_READABLE
;
1553 case SpvDecorationNonWritable
:
1554 var_data
->read_only
= true;
1555 var_data
->access
|= ACCESS_NON_WRITEABLE
;
1557 case SpvDecorationRestrict
:
1558 var_data
->access
|= ACCESS_RESTRICT
;
1560 case SpvDecorationAliased
:
1561 var_data
->access
&= ~ACCESS_RESTRICT
;
1563 case SpvDecorationVolatile
:
1564 var_data
->access
|= ACCESS_VOLATILE
;
1566 case SpvDecorationCoherent
:
1567 var_data
->access
|= ACCESS_COHERENT
;
1569 case SpvDecorationComponent
:
1570 var_data
->location_frac
= dec
->operands
[0];
1572 case SpvDecorationIndex
:
1573 var_data
->index
= dec
->operands
[0];
1575 case SpvDecorationBuiltIn
: {
1576 SpvBuiltIn builtin
= dec
->operands
[0];
1578 nir_variable_mode mode
= var_data
->mode
;
1579 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1580 var_data
->mode
= mode
;
1583 case SpvBuiltInTessLevelOuter
:
1584 case SpvBuiltInTessLevelInner
:
1585 case SpvBuiltInClipDistance
:
1586 case SpvBuiltInCullDistance
:
1587 var_data
->compact
= true;
1594 case SpvDecorationSpecId
:
1595 case SpvDecorationRowMajor
:
1596 case SpvDecorationColMajor
:
1597 case SpvDecorationMatrixStride
:
1598 case SpvDecorationUniform
:
1599 case SpvDecorationUniformId
:
1600 case SpvDecorationLinkageAttributes
:
1601 break; /* Do nothing with these here */
1603 case SpvDecorationPatch
:
1604 var_data
->patch
= true;
1607 case SpvDecorationLocation
:
1608 vtn_fail("Handled above");
1610 case SpvDecorationBlock
:
1611 case SpvDecorationBufferBlock
:
1612 case SpvDecorationArrayStride
:
1613 case SpvDecorationGLSLShared
:
1614 case SpvDecorationGLSLPacked
:
1615 break; /* These can apply to a type but we don't care about them */
1617 case SpvDecorationBinding
:
1618 case SpvDecorationDescriptorSet
:
1619 case SpvDecorationNoContraction
:
1620 case SpvDecorationInputAttachmentIndex
:
1621 vtn_warn("Decoration not allowed for variable or structure member: %s",
1622 spirv_decoration_to_string(dec
->decoration
));
1625 case SpvDecorationXfbBuffer
:
1626 var_data
->explicit_xfb_buffer
= true;
1627 var_data
->xfb
.buffer
= dec
->operands
[0];
1628 var_data
->always_active_io
= true;
1630 case SpvDecorationXfbStride
:
1631 var_data
->explicit_xfb_stride
= true;
1632 var_data
->xfb
.stride
= dec
->operands
[0];
1634 case SpvDecorationOffset
:
1635 var_data
->explicit_offset
= true;
1636 var_data
->offset
= dec
->operands
[0];
1639 case SpvDecorationStream
:
1640 var_data
->stream
= dec
->operands
[0];
1643 case SpvDecorationCPacked
:
1644 case SpvDecorationSaturatedConversion
:
1645 case SpvDecorationFuncParamAttr
:
1646 case SpvDecorationFPRoundingMode
:
1647 case SpvDecorationFPFastMathMode
:
1648 case SpvDecorationAlignment
:
1649 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1650 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1651 spirv_decoration_to_string(dec
->decoration
));
1655 case SpvDecorationUserSemantic
:
1656 case SpvDecorationUserTypeGOOGLE
:
1657 /* User semantic decorations can safely be ignored by the driver. */
1660 case SpvDecorationRestrictPointerEXT
:
1661 case SpvDecorationAliasedPointerEXT
:
1662 /* TODO: We should actually plumb alias information through NIR. */
1666 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1671 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1672 const struct vtn_decoration
*dec
, void *out_is_patch
)
1674 if (dec
->decoration
== SpvDecorationPatch
) {
1675 *((bool *) out_is_patch
) = true;
1680 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1681 const struct vtn_decoration
*dec
, void *void_var
)
1683 struct vtn_variable
*vtn_var
= void_var
;
1685 /* Handle decorations that apply to a vtn_variable as a whole */
1686 switch (dec
->decoration
) {
1687 case SpvDecorationBinding
:
1688 vtn_var
->binding
= dec
->operands
[0];
1689 vtn_var
->explicit_binding
= true;
1691 case SpvDecorationDescriptorSet
:
1692 vtn_var
->descriptor_set
= dec
->operands
[0];
1694 case SpvDecorationInputAttachmentIndex
:
1695 vtn_var
->input_attachment_index
= dec
->operands
[0];
1697 case SpvDecorationPatch
:
1698 vtn_var
->patch
= true;
1700 case SpvDecorationOffset
:
1701 vtn_var
->offset
= dec
->operands
[0];
1703 case SpvDecorationNonWritable
:
1704 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1706 case SpvDecorationNonReadable
:
1707 vtn_var
->access
|= ACCESS_NON_READABLE
;
1709 case SpvDecorationVolatile
:
1710 vtn_var
->access
|= ACCESS_VOLATILE
;
1712 case SpvDecorationCoherent
:
1713 vtn_var
->access
|= ACCESS_COHERENT
;
1715 case SpvDecorationCounterBuffer
:
1716 /* Counter buffer decorations can safely be ignored by the driver. */
1722 if (val
->value_type
== vtn_value_type_pointer
) {
1723 assert(val
->pointer
->var
== void_var
);
1724 assert(member
== -1);
1726 assert(val
->value_type
== vtn_value_type_type
);
1729 /* Location is odd. If applied to a split structure, we have to walk the
1730 * whole thing and accumulate the location. It's easier to handle as a
1733 if (dec
->decoration
== SpvDecorationLocation
) {
1734 unsigned location
= dec
->operands
[0];
1735 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1736 vtn_var
->mode
== vtn_variable_mode_output
) {
1737 location
+= FRAG_RESULT_DATA0
;
1738 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1739 vtn_var
->mode
== vtn_variable_mode_input
) {
1740 location
+= VERT_ATTRIB_GENERIC0
;
1741 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1742 vtn_var
->mode
== vtn_variable_mode_output
) {
1743 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1744 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1745 vtn_warn("Location must be on input, output, uniform, sampler or "
1750 if (vtn_var
->var
->num_members
== 0) {
1751 /* This handles the member and lone variable cases */
1752 vtn_var
->var
->data
.location
= location
;
1754 /* This handles the structure member case */
1755 assert(vtn_var
->var
->members
);
1758 vtn_var
->base_location
= location
;
1760 vtn_var
->var
->members
[member
].location
= location
;
1766 if (vtn_var
->var
->num_members
== 0) {
1767 /* We call this function on types as well as variables and not all
1768 * struct types get split so we can end up having stray member
1769 * decorations; just ignore them.
1772 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1773 } else if (member
>= 0) {
1774 /* Member decorations must come from a type */
1775 assert(val
->value_type
== vtn_value_type_type
);
1776 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1779 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1780 for (unsigned i
= 0; i
< length
; i
++)
1781 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1784 /* A few variables, those with external storage, have no actual
1785 * nir_variables associated with them. Fortunately, all decorations
1786 * we care about for those variables are on the type only.
1788 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1789 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1790 vtn_var
->mode
== vtn_variable_mode_push_constant
);
1795 enum vtn_variable_mode
1796 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1797 SpvStorageClass
class,
1798 struct vtn_type
*interface_type
,
1799 nir_variable_mode
*nir_mode_out
)
1801 enum vtn_variable_mode mode
;
1802 nir_variable_mode nir_mode
;
1804 case SpvStorageClassUniform
:
1805 /* Assume it's an UBO if we lack the interface_type. */
1806 if (!interface_type
|| interface_type
->block
) {
1807 mode
= vtn_variable_mode_ubo
;
1808 nir_mode
= nir_var_mem_ubo
;
1809 } else if (interface_type
->buffer_block
) {
1810 mode
= vtn_variable_mode_ssbo
;
1811 nir_mode
= nir_var_mem_ssbo
;
1813 /* Default-block uniforms, coming from gl_spirv */
1814 mode
= vtn_variable_mode_uniform
;
1815 nir_mode
= nir_var_uniform
;
1818 case SpvStorageClassStorageBuffer
:
1819 mode
= vtn_variable_mode_ssbo
;
1820 nir_mode
= nir_var_mem_ssbo
;
1822 case SpvStorageClassPhysicalStorageBuffer
:
1823 mode
= vtn_variable_mode_phys_ssbo
;
1824 nir_mode
= nir_var_mem_global
;
1826 case SpvStorageClassUniformConstant
:
1827 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1828 if (b
->options
->constant_as_global
) {
1829 mode
= vtn_variable_mode_cross_workgroup
;
1830 nir_mode
= nir_var_mem_global
;
1832 mode
= vtn_variable_mode_ubo
;
1833 nir_mode
= nir_var_mem_ubo
;
1836 mode
= vtn_variable_mode_uniform
;
1837 nir_mode
= nir_var_uniform
;
1840 case SpvStorageClassPushConstant
:
1841 mode
= vtn_variable_mode_push_constant
;
1842 nir_mode
= nir_var_uniform
;
1844 case SpvStorageClassInput
:
1845 mode
= vtn_variable_mode_input
;
1846 nir_mode
= nir_var_shader_in
;
1848 case SpvStorageClassOutput
:
1849 mode
= vtn_variable_mode_output
;
1850 nir_mode
= nir_var_shader_out
;
1852 case SpvStorageClassPrivate
:
1853 mode
= vtn_variable_mode_private
;
1854 nir_mode
= nir_var_shader_temp
;
1856 case SpvStorageClassFunction
:
1857 mode
= vtn_variable_mode_function
;
1858 nir_mode
= nir_var_function_temp
;
1860 case SpvStorageClassWorkgroup
:
1861 mode
= vtn_variable_mode_workgroup
;
1862 nir_mode
= nir_var_mem_shared
;
1864 case SpvStorageClassAtomicCounter
:
1865 mode
= vtn_variable_mode_atomic_counter
;
1866 nir_mode
= nir_var_uniform
;
1868 case SpvStorageClassCrossWorkgroup
:
1869 mode
= vtn_variable_mode_cross_workgroup
;
1870 nir_mode
= nir_var_mem_global
;
1872 case SpvStorageClassImage
:
1873 mode
= vtn_variable_mode_image
;
1874 nir_mode
= nir_var_mem_ubo
;
1876 case SpvStorageClassGeneric
:
1878 vtn_fail("Unhandled variable storage class: %s (%u)",
1879 spirv_storageclass_to_string(class), class);
1883 *nir_mode_out
= nir_mode
;
1889 vtn_mode_to_address_format(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
1892 case vtn_variable_mode_ubo
:
1893 return b
->options
->ubo_addr_format
;
1895 case vtn_variable_mode_ssbo
:
1896 return b
->options
->ssbo_addr_format
;
1898 case vtn_variable_mode_phys_ssbo
:
1899 return b
->options
->phys_ssbo_addr_format
;
1901 case vtn_variable_mode_push_constant
:
1902 return b
->options
->push_const_addr_format
;
1904 case vtn_variable_mode_workgroup
:
1905 return b
->options
->shared_addr_format
;
1907 case vtn_variable_mode_cross_workgroup
:
1908 return b
->options
->global_addr_format
;
1910 case vtn_variable_mode_function
:
1911 if (b
->physical_ptrs
)
1912 return b
->options
->temp_addr_format
;
1915 case vtn_variable_mode_private
:
1916 case vtn_variable_mode_uniform
:
1917 case vtn_variable_mode_atomic_counter
:
1918 case vtn_variable_mode_input
:
1919 case vtn_variable_mode_output
:
1920 case vtn_variable_mode_image
:
1921 return nir_address_format_logical
;
1924 unreachable("Invalid variable mode");
1928 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1930 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1931 /* This pointer needs to have a pointer type with actual storage */
1932 vtn_assert(ptr
->ptr_type
);
1933 vtn_assert(ptr
->ptr_type
->type
);
1936 /* If we don't have an offset then we must be a pointer to the variable
1939 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1941 struct vtn_access_chain chain
= {
1944 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1947 vtn_assert(ptr
->offset
);
1948 if (ptr
->block_index
) {
1949 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1950 ptr
->mode
== vtn_variable_mode_ssbo
);
1951 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1953 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1957 if (vtn_pointer_is_external_block(b
, ptr
) &&
1958 vtn_type_contains_block(b
, ptr
->type
) &&
1959 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1960 /* In this case, we're looking for a block index and not an actual
1963 * For PhysicalStorageBuffer pointers, we don't have a block index
1964 * at all because we get the pointer directly from the client. This
1965 * assumes that there will never be a SSBO binding variable using the
1966 * PhysicalStorageBuffer storage class. This assumption appears
1967 * to be correct according to the Vulkan spec because the table,
1968 * "Shader Resource and Storage Class Correspondence," the only the
1969 * Uniform storage class with BufferBlock or the StorageBuffer
1970 * storage class with Block can be used.
1972 if (!ptr
->block_index
) {
1973 /* If we don't have a block_index then we must be a pointer to the
1976 vtn_assert(!ptr
->deref
);
1978 struct vtn_access_chain chain
= {
1981 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
1984 return ptr
->block_index
;
1986 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1991 struct vtn_pointer
*
1992 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1993 struct vtn_type
*ptr_type
)
1995 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1997 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1998 struct vtn_type
*without_array
=
1999 vtn_type_without_array(ptr_type
->deref
);
2001 nir_variable_mode nir_mode
;
2002 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
2003 without_array
, &nir_mode
);
2004 ptr
->type
= ptr_type
->deref
;
2005 ptr
->ptr_type
= ptr_type
;
2007 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
2008 /* This pointer type needs to have actual storage */
2009 vtn_assert(ptr_type
->type
);
2010 if (ptr
->mode
== vtn_variable_mode_ubo
||
2011 ptr
->mode
== vtn_variable_mode_ssbo
) {
2012 vtn_assert(ssa
->num_components
== 2);
2013 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
2014 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
2016 vtn_assert(ssa
->num_components
== 1);
2017 ptr
->block_index
= NULL
;
2021 const struct glsl_type
*deref_type
=
2022 vtn_type_get_nir_type(b
, ptr_type
->deref
, ptr
->mode
);
2023 if (!vtn_pointer_is_external_block(b
, ptr
)) {
2024 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2025 deref_type
, ptr_type
->stride
);
2026 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
2027 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
2028 /* This is a pointer to somewhere in an array of blocks, not a
2029 * pointer to somewhere inside the block. Set the block index
2030 * instead of making a cast.
2032 ptr
->block_index
= ssa
;
2034 /* This is a pointer to something internal or a pointer inside a
2035 * block. It's just a regular cast.
2037 * For PhysicalStorageBuffer pointers, we don't have a block index
2038 * at all because we get the pointer directly from the client. This
2039 * assumes that there will never be a SSBO binding variable using the
2040 * PhysicalStorageBuffer storage class. This assumption appears
2041 * to be correct according to the Vulkan spec because the table,
2042 * "Shader Resource and Storage Class Correspondence," the only the
2043 * Uniform storage class with BufferBlock or the StorageBuffer
2044 * storage class with Block can be used.
2046 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2047 deref_type
, ptr_type
->stride
);
2048 ptr
->deref
->dest
.ssa
.num_components
=
2049 glsl_get_vector_elements(ptr_type
->type
);
2050 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
2058 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
2060 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
2063 if (var
->mode
== vtn_variable_mode_input
) {
2064 return stage
== MESA_SHADER_TESS_CTRL
||
2065 stage
== MESA_SHADER_TESS_EVAL
||
2066 stage
== MESA_SHADER_GEOMETRY
;
2069 if (var
->mode
== vtn_variable_mode_output
)
2070 return stage
== MESA_SHADER_TESS_CTRL
;
2076 assign_missing_member_locations(struct vtn_variable
*var
)
2079 glsl_get_length(glsl_without_array(var
->type
->type
));
2080 int location
= var
->base_location
;
2082 for (unsigned i
= 0; i
< length
; i
++) {
2083 /* From the Vulkan spec:
2085 * “If the structure type is a Block but without a Location, then each
2086 * of its members must have a Location decoration.”
2089 if (var
->type
->block
) {
2090 assert(var
->base_location
!= -1 ||
2091 var
->var
->members
[i
].location
!= -1);
2094 /* From the Vulkan spec:
2096 * “Any member with its own Location decoration is assigned that
2097 * location. Each remaining member is assigned the location after the
2098 * immediately preceding member in declaration order.”
2100 if (var
->var
->members
[i
].location
!= -1)
2101 location
= var
->var
->members
[i
].location
;
2103 var
->var
->members
[i
].location
= location
;
2105 /* Below we use type instead of interface_type, because interface_type
2106 * is only available when it is a Block. This code also supports
2107 * input/outputs that are just structs
2109 const struct glsl_type
*member_type
=
2110 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
2113 glsl_count_attribute_slots(member_type
,
2114 false /* is_gl_vertex_input */);
2120 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
2121 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
2122 nir_constant
*const_initializer
, nir_variable
*var_initializer
)
2124 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2125 struct vtn_type
*type
= ptr_type
->deref
;
2127 struct vtn_type
*without_array
= vtn_type_without_array(ptr_type
->deref
);
2129 enum vtn_variable_mode mode
;
2130 nir_variable_mode nir_mode
;
2131 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
2134 case vtn_variable_mode_ubo
:
2135 /* There's no other way to get vtn_variable_mode_ubo */
2136 vtn_assert(without_array
->block
);
2137 b
->shader
->info
.num_ubos
++;
2139 case vtn_variable_mode_ssbo
:
2140 if (storage_class
== SpvStorageClassStorageBuffer
&&
2141 !without_array
->block
) {
2142 if (b
->variable_pointers
) {
2143 vtn_fail("Variables in the StorageBuffer storage class must "
2144 "have a struct type with the Block decoration");
2146 /* If variable pointers are not present, it's still malformed
2147 * SPIR-V but we can parse it and do the right thing anyway.
2148 * Since some of the 8-bit storage tests have bugs in this are,
2149 * just make it a warning for now.
2151 vtn_warn("Variables in the StorageBuffer storage class must "
2152 "have a struct type with the Block decoration");
2155 b
->shader
->info
.num_ssbos
++;
2157 case vtn_variable_mode_uniform
:
2158 if (without_array
->base_type
== vtn_base_type_image
) {
2159 if (glsl_type_is_image(without_array
->glsl_image
))
2160 b
->shader
->info
.num_images
++;
2161 else if (glsl_type_is_sampler(without_array
->glsl_image
))
2162 b
->shader
->info
.num_textures
++;
2165 case vtn_variable_mode_push_constant
:
2166 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2169 case vtn_variable_mode_image
:
2170 vtn_fail("Cannot create a variable with the Image storage class");
2173 case vtn_variable_mode_phys_ssbo
:
2174 vtn_fail("Cannot create a variable with the "
2175 "PhysicalStorageBuffer storage class");
2179 /* No tallying is needed */
2183 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2186 var
->base_location
= -1;
2188 val
->pointer
= rzalloc(b
, struct vtn_pointer
);
2189 val
->pointer
->mode
= var
->mode
;
2190 val
->pointer
->type
= var
->type
;
2191 val
->pointer
->ptr_type
= ptr_type
;
2192 val
->pointer
->var
= var
;
2193 val
->pointer
->access
= var
->type
->access
;
2195 switch (var
->mode
) {
2196 case vtn_variable_mode_function
:
2197 case vtn_variable_mode_private
:
2198 case vtn_variable_mode_uniform
:
2199 case vtn_variable_mode_atomic_counter
:
2200 /* For these, we create the variable normally */
2201 var
->var
= rzalloc(b
->shader
, nir_variable
);
2202 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2203 var
->var
->type
= vtn_type_get_nir_type(b
, var
->type
, var
->mode
);
2204 var
->var
->data
.mode
= nir_mode
;
2205 var
->var
->data
.location
= -1;
2206 var
->var
->interface_type
= NULL
;
2209 case vtn_variable_mode_ubo
:
2210 case vtn_variable_mode_ssbo
:
2211 var
->var
= rzalloc(b
->shader
, nir_variable
);
2212 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2214 var
->var
->type
= vtn_type_get_nir_type(b
, var
->type
, var
->mode
);
2215 var
->var
->interface_type
= var
->var
->type
;
2217 var
->var
->data
.mode
= nir_mode
;
2218 var
->var
->data
.location
= -1;
2222 case vtn_variable_mode_workgroup
:
2223 /* Create the variable normally */
2224 var
->var
= rzalloc(b
->shader
, nir_variable
);
2225 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2226 var
->var
->type
= vtn_type_get_nir_type(b
, var
->type
, var
->mode
);
2227 var
->var
->data
.mode
= nir_var_mem_shared
;
2230 case vtn_variable_mode_input
:
2231 case vtn_variable_mode_output
: {
2232 /* In order to know whether or not we're a per-vertex inout, we need
2233 * the patch qualifier. This means walking the variable decorations
2234 * early before we actually create any variables. Not a big deal.
2236 * GLSLang really likes to place decorations in the most interior
2237 * thing it possibly can. In particular, if you have a struct, it
2238 * will place the patch decorations on the struct members. This
2239 * should be handled by the variable splitting below just fine.
2241 * If you have an array-of-struct, things get even more weird as it
2242 * will place the patch decorations on the struct even though it's
2243 * inside an array and some of the members being patch and others not
2244 * makes no sense whatsoever. Since the only sensible thing is for
2245 * it to be all or nothing, we'll call it patch if any of the members
2246 * are declared patch.
2249 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2250 if (glsl_type_is_array(var
->type
->type
) &&
2251 glsl_type_is_struct_or_ifc(without_array
->type
)) {
2252 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2253 vtn_value_type_type
),
2254 var_is_patch_cb
, &var
->patch
);
2257 /* For inputs and outputs, we immediately split structures. This
2258 * is for a couple of reasons. For one, builtins may all come in
2259 * a struct and we really want those split out into separate
2260 * variables. For another, interpolation qualifiers can be
2261 * applied to members of the top-level struct ane we need to be
2262 * able to preserve that information.
2265 struct vtn_type
*per_vertex_type
= var
->type
;
2266 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2267 /* In Geometry shaders (and some tessellation), inputs come
2268 * in per-vertex arrays. However, some builtins come in
2269 * non-per-vertex, hence the need for the is_array check. In
2270 * any case, there are no non-builtin arrays allowed so this
2271 * check should be sufficient.
2273 per_vertex_type
= var
->type
->array_element
;
2276 var
->var
= rzalloc(b
->shader
, nir_variable
);
2277 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2278 var
->var
->type
= vtn_type_get_nir_type(b
, var
->type
, var
->mode
);
2279 var
->var
->data
.mode
= nir_mode
;
2280 var
->var
->data
.patch
= var
->patch
;
2282 /* Figure out the interface block type. */
2283 struct vtn_type
*iface_type
= per_vertex_type
;
2284 if (var
->mode
== vtn_variable_mode_output
&&
2285 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2286 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2287 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2288 /* For vertex data outputs, we can end up with arrays of blocks for
2289 * transform feedback where each array element corresponds to a
2290 * different XFB output buffer.
2292 while (iface_type
->base_type
== vtn_base_type_array
)
2293 iface_type
= iface_type
->array_element
;
2295 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2296 var
->var
->interface_type
= vtn_type_get_nir_type(b
, iface_type
,
2299 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2300 per_vertex_type
->block
) {
2301 /* It's a struct. Set it up as per-member. */
2302 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2303 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2304 var
->var
->num_members
);
2306 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2307 var
->var
->members
[i
].mode
= nir_mode
;
2308 var
->var
->members
[i
].patch
= var
->patch
;
2309 var
->var
->members
[i
].location
= -1;
2313 /* For inputs and outputs, we need to grab locations and builtin
2314 * information from the per-vertex type.
2316 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2317 vtn_value_type_type
),
2318 var_decoration_cb
, var
);
2322 case vtn_variable_mode_push_constant
:
2323 case vtn_variable_mode_cross_workgroup
:
2324 /* These don't need actual variables. */
2327 case vtn_variable_mode_image
:
2328 case vtn_variable_mode_phys_ssbo
:
2329 unreachable("Should have been caught before");
2332 /* We can only have one type of initializer */
2333 assert(!(const_initializer
&& var_initializer
));
2334 if (const_initializer
) {
2335 var
->var
->constant_initializer
=
2336 nir_constant_clone(const_initializer
, var
->var
);
2338 if (var_initializer
)
2339 var
->var
->pointer_initializer
= var_initializer
;
2341 if (var
->mode
== vtn_variable_mode_uniform
||
2342 var
->mode
== vtn_variable_mode_ssbo
) {
2343 /* SSBOs and images are assumed to not alias in the Simple, GLSL and Vulkan memory models */
2344 var
->var
->data
.access
|= b
->mem_model
!= SpvMemoryModelOpenCL
? ACCESS_RESTRICT
: 0;
2347 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2348 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2350 /* Propagate access flags from the OpVariable decorations. */
2351 val
->pointer
->access
|= var
->access
;
2353 if ((var
->mode
== vtn_variable_mode_input
||
2354 var
->mode
== vtn_variable_mode_output
) &&
2355 var
->var
->members
) {
2356 assign_missing_member_locations(var
);
2359 if (var
->mode
== vtn_variable_mode_uniform
||
2360 var
->mode
== vtn_variable_mode_ubo
||
2361 var
->mode
== vtn_variable_mode_ssbo
||
2362 var
->mode
== vtn_variable_mode_atomic_counter
) {
2363 /* XXX: We still need the binding information in the nir_variable
2364 * for these. We should fix that.
2366 var
->var
->data
.binding
= var
->binding
;
2367 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2368 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2369 var
->var
->data
.index
= var
->input_attachment_index
;
2370 var
->var
->data
.offset
= var
->offset
;
2372 if (glsl_type_is_image(glsl_without_array(var
->var
->type
)))
2373 var
->var
->data
.image
.format
= without_array
->image_format
;
2376 if (var
->mode
== vtn_variable_mode_function
) {
2377 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2378 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2379 } else if (var
->var
) {
2380 nir_shader_add_variable(b
->shader
, var
->var
);
2382 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2387 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2388 struct vtn_type
*dst_type
,
2389 struct vtn_type
*src_type
)
2391 if (dst_type
->id
== src_type
->id
)
2394 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2395 /* Early versions of GLSLang would re-emit types unnecessarily and you
2396 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2397 * mismatched source and destination types.
2399 * https://github.com/KhronosGroup/glslang/issues/304
2400 * https://github.com/KhronosGroup/glslang/issues/307
2401 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2402 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2404 vtn_warn("Source and destination types of %s do not have the same "
2405 "ID (but are compatible): %u vs %u",
2406 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2410 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2411 spirv_op_to_string(opcode
),
2412 glsl_get_type_name(dst_type
->type
),
2413 glsl_get_type_name(src_type
->type
));
2416 static nir_ssa_def
*
2417 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2418 unsigned num_components
)
2420 if (val
->num_components
== num_components
)
2423 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2424 for (unsigned i
= 0; i
< num_components
; i
++) {
2425 if (i
< val
->num_components
)
2426 comps
[i
] = nir_channel(b
, val
, i
);
2428 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2430 return nir_vec(b
, comps
, num_components
);
2433 static nir_ssa_def
*
2434 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2435 const struct glsl_type
*type
)
2437 const unsigned num_components
= glsl_get_vector_elements(type
);
2438 const unsigned bit_size
= glsl_get_bit_size(type
);
2440 /* First, zero-pad to ensure that the value is big enough that when we
2441 * bit-cast it, we don't loose anything.
2443 if (val
->bit_size
< bit_size
) {
2444 const unsigned src_num_components_needed
=
2445 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2446 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2449 val
= nir_bitcast_vector(b
, val
, bit_size
);
2451 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2455 vtn_get_mem_operands(struct vtn_builder
*b
, const uint32_t *w
, unsigned count
,
2456 unsigned *idx
, SpvMemoryAccessMask
*access
, unsigned *alignment
,
2457 SpvScope
*dest_scope
, SpvScope
*src_scope
)
2464 *access
= w
[(*idx
)++];
2465 if (*access
& SpvMemoryAccessAlignedMask
) {
2466 vtn_assert(*idx
< count
);
2467 *alignment
= w
[(*idx
)++];
2470 if (*access
& SpvMemoryAccessMakePointerAvailableMask
) {
2471 vtn_assert(*idx
< count
);
2472 vtn_assert(dest_scope
);
2473 *dest_scope
= vtn_constant_uint(b
, w
[(*idx
)++]);
2476 if (*access
& SpvMemoryAccessMakePointerVisibleMask
) {
2477 vtn_assert(*idx
< count
);
2478 vtn_assert(src_scope
);
2479 *src_scope
= vtn_constant_uint(b
, w
[(*idx
)++]);
2485 static enum gl_access_qualifier
2486 spv_access_to_gl_access(SpvMemoryAccessMask access
)
2488 if (access
& SpvMemoryAccessVolatileMask
)
2489 return ACCESS_VOLATILE
;
2495 SpvMemorySemanticsMask
2496 vtn_mode_to_memory_semantics(enum vtn_variable_mode mode
)
2499 case vtn_variable_mode_ssbo
:
2500 case vtn_variable_mode_phys_ssbo
:
2501 return SpvMemorySemanticsUniformMemoryMask
;
2502 case vtn_variable_mode_workgroup
:
2503 return SpvMemorySemanticsWorkgroupMemoryMask
;
2504 case vtn_variable_mode_cross_workgroup
:
2505 return SpvMemorySemanticsCrossWorkgroupMemoryMask
;
2506 case vtn_variable_mode_atomic_counter
:
2507 return SpvMemorySemanticsAtomicCounterMemoryMask
;
2508 case vtn_variable_mode_image
:
2509 return SpvMemorySemanticsImageMemoryMask
;
2510 case vtn_variable_mode_output
:
2511 return SpvMemorySemanticsOutputMemoryMask
;
2513 return SpvMemorySemanticsMaskNone
;
2518 vtn_emit_make_visible_barrier(struct vtn_builder
*b
, SpvMemoryAccessMask access
,
2519 SpvScope scope
, enum vtn_variable_mode mode
)
2521 if (!(access
& SpvMemoryAccessMakePointerVisibleMask
))
2524 vtn_emit_memory_barrier(b
, scope
, SpvMemorySemanticsMakeVisibleMask
|
2525 SpvMemorySemanticsAcquireMask
|
2526 vtn_mode_to_memory_semantics(mode
));
2530 vtn_emit_make_available_barrier(struct vtn_builder
*b
, SpvMemoryAccessMask access
,
2531 SpvScope scope
, enum vtn_variable_mode mode
)
2533 if (!(access
& SpvMemoryAccessMakePointerAvailableMask
))
2536 vtn_emit_memory_barrier(b
, scope
, SpvMemorySemanticsMakeAvailableMask
|
2537 SpvMemorySemanticsReleaseMask
|
2538 vtn_mode_to_memory_semantics(mode
));
2542 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2543 const uint32_t *w
, unsigned count
)
2547 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2548 val
->type
= vtn_get_type(b
, w
[1]);
2552 case SpvOpVariable
: {
2553 struct vtn_type
*ptr_type
= vtn_get_type(b
, w
[1]);
2555 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2557 SpvStorageClass storage_class
= w
[3];
2558 nir_constant
*const_initializer
= NULL
;
2559 nir_variable
*var_initializer
= NULL
;
2561 struct vtn_value
*init
= vtn_untyped_value(b
, w
[4]);
2562 switch (init
->value_type
) {
2563 case vtn_value_type_constant
:
2564 const_initializer
= init
->constant
;
2566 case vtn_value_type_pointer
:
2567 var_initializer
= init
->pointer
->var
->var
;
2570 vtn_fail("SPIR-V variable initializer %u must be constant or pointer",
2575 vtn_create_variable(b
, val
, ptr_type
, storage_class
, const_initializer
, var_initializer
);
2580 case SpvOpConstantSampler
: {
2581 /* Synthesize a pointer-to-sampler type, create a variable of that type,
2582 * and give the variable a constant initializer with the sampler params */
2583 struct vtn_type
*sampler_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2584 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2586 struct vtn_type
*ptr_type
= rzalloc(b
, struct vtn_type
);
2587 ptr_type
= rzalloc(b
, struct vtn_type
);
2588 ptr_type
->base_type
= vtn_base_type_pointer
;
2589 ptr_type
->deref
= sampler_type
;
2590 ptr_type
->storage_class
= SpvStorageClassUniform
;
2592 ptr_type
->type
= nir_address_format_to_glsl_type(
2593 vtn_mode_to_address_format(b
, vtn_variable_mode_function
));
2595 vtn_create_variable(b
, val
, ptr_type
, ptr_type
->storage_class
, NULL
, NULL
);
2597 nir_variable
*nir_var
= val
->pointer
->var
->var
;
2598 nir_var
->data
.sampler
.is_inline_sampler
= true;
2599 nir_var
->data
.sampler
.addressing_mode
= w
[3];
2600 nir_var
->data
.sampler
.normalized_coordinates
= w
[4];
2601 nir_var
->data
.sampler
.filter_mode
= w
[5];
2606 case SpvOpAccessChain
:
2607 case SpvOpPtrAccessChain
:
2608 case SpvOpInBoundsAccessChain
:
2609 case SpvOpInBoundsPtrAccessChain
: {
2610 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2611 enum gl_access_qualifier access
= 0;
2612 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
|| opcode
== SpvOpInBoundsPtrAccessChain
);
2615 for (int i
= 4; i
< count
; i
++) {
2616 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2617 if (link_val
->value_type
== vtn_value_type_constant
) {
2618 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2619 chain
->link
[idx
].id
= vtn_constant_int(b
, w
[i
]);
2621 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2622 chain
->link
[idx
].id
= w
[i
];
2627 struct vtn_type
*ptr_type
= vtn_get_type(b
, w
[1]);
2628 struct vtn_pointer
*base
=
2629 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2630 struct vtn_pointer
*ptr
= vtn_pointer_dereference(b
, base
, chain
);
2631 ptr
->ptr_type
= ptr_type
;
2632 ptr
->access
|= access
;
2633 vtn_push_pointer(b
, w
[2], ptr
);
2637 case SpvOpCopyMemory
: {
2638 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2639 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2641 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2643 unsigned idx
= 3, dest_alignment
, src_alignment
;
2644 SpvMemoryAccessMask dest_access
, src_access
;
2645 SpvScope dest_scope
, src_scope
;
2646 vtn_get_mem_operands(b
, w
, count
, &idx
, &dest_access
, &dest_alignment
,
2647 &dest_scope
, &src_scope
);
2648 if (!vtn_get_mem_operands(b
, w
, count
, &idx
, &src_access
, &src_alignment
,
2649 NULL
, &src_scope
)) {
2650 src_alignment
= dest_alignment
;
2651 src_access
= dest_access
;
2654 vtn_emit_make_visible_barrier(b
, src_access
, src_scope
, src
->pointer
->mode
);
2656 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
,
2657 spv_access_to_gl_access(dest_access
),
2658 spv_access_to_gl_access(src_access
));
2660 vtn_emit_make_available_barrier(b
, dest_access
, dest_scope
, dest
->pointer
->mode
);
2665 struct vtn_type
*res_type
= vtn_get_type(b
, w
[1]);
2666 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2667 struct vtn_pointer
*src
= src_val
->pointer
;
2669 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2671 unsigned idx
= 4, alignment
;
2672 SpvMemoryAccessMask access
;
2674 vtn_get_mem_operands(b
, w
, count
, &idx
, &access
, &alignment
, NULL
, &scope
);
2676 vtn_emit_make_visible_barrier(b
, access
, scope
, src
->mode
);
2678 vtn_push_ssa_value(b
, w
[2], vtn_variable_load(b
, src
, spv_access_to_gl_access(access
)));
2683 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2684 struct vtn_pointer
*dest
= dest_val
->pointer
;
2685 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2687 /* OpStore requires us to actually have a storage type */
2688 vtn_fail_if(dest
->type
->type
== NULL
,
2689 "Invalid destination type for OpStore");
2691 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2692 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2693 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2694 * would then store them to a local variable as bool. Work around
2695 * the issue by doing an implicit conversion.
2697 * https://github.com/KhronosGroup/glslang/issues/170
2698 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2700 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2701 "OpTypeBool. Doing an implicit conversion to work around "
2703 struct vtn_ssa_value
*bool_ssa
=
2704 vtn_create_ssa_value(b
, dest
->type
->type
);
2705 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2706 vtn_variable_store(b
, bool_ssa
, dest
, 0);
2710 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2712 unsigned idx
= 3, alignment
;
2713 SpvMemoryAccessMask access
;
2715 vtn_get_mem_operands(b
, w
, count
, &idx
, &access
, &alignment
, &scope
, NULL
);
2717 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2718 vtn_variable_store(b
, src
, dest
, spv_access_to_gl_access(access
));
2720 vtn_emit_make_available_barrier(b
, access
, scope
, dest
->mode
);
2724 case SpvOpArrayLength
: {
2725 struct vtn_pointer
*ptr
=
2726 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2727 const uint32_t field
= w
[4];
2729 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2730 "OpArrayLength must take a pointer to a structure type");
2731 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2732 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2733 "OpArrayLength must reference the last memeber of the "
2734 "structure and that must be an array");
2736 const uint32_t offset
= ptr
->type
->offsets
[field
];
2737 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2739 if (!ptr
->block_index
) {
2740 struct vtn_access_chain chain
= {
2743 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2744 vtn_assert(ptr
->block_index
);
2747 nir_intrinsic_instr
*instr
=
2748 nir_intrinsic_instr_create(b
->nb
.shader
,
2749 nir_intrinsic_get_buffer_size
);
2750 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2751 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2752 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2753 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2755 /* array_length = max(buffer_size - offset, 0) / stride */
2756 nir_ssa_def
*array_length
=
2761 nir_imm_int(&b
->nb
, offset
)),
2762 nir_imm_int(&b
->nb
, 0u)),
2763 nir_imm_int(&b
->nb
, stride
));
2765 vtn_push_nir_ssa(b
, w
[2], array_length
);
2769 case SpvOpConvertPtrToU
: {
2770 struct vtn_type
*u_type
= vtn_get_type(b
, w
[1]);
2771 struct vtn_type
*ptr_type
= vtn_get_value_type(b
, w
[3]);
2773 vtn_fail_if(ptr_type
->base_type
!= vtn_base_type_pointer
||
2774 ptr_type
->type
== NULL
,
2775 "OpConvertPtrToU can only be used on physical pointers");
2777 vtn_fail_if(u_type
->base_type
!= vtn_base_type_vector
&&
2778 u_type
->base_type
!= vtn_base_type_scalar
,
2779 "OpConvertPtrToU can only be used to cast to a vector or "
2782 /* The pointer will be converted to an SSA value automatically */
2783 nir_ssa_def
*ptr
= vtn_get_nir_ssa(b
, w
[3]);
2784 nir_ssa_def
*u
= nir_sloppy_bitcast(&b
->nb
, ptr
, u_type
->type
);
2785 vtn_push_nir_ssa(b
, w
[2], u
);
2789 case SpvOpConvertUToPtr
: {
2790 struct vtn_type
*ptr_type
= vtn_get_type(b
, w
[1]);
2791 struct vtn_type
*u_type
= vtn_get_value_type(b
, w
[3]);
2793 vtn_fail_if(ptr_type
->base_type
!= vtn_base_type_pointer
||
2794 ptr_type
->type
== NULL
,
2795 "OpConvertUToPtr can only be used on physical pointers");
2797 vtn_fail_if(u_type
->base_type
!= vtn_base_type_vector
&&
2798 u_type
->base_type
!= vtn_base_type_scalar
,
2799 "OpConvertUToPtr can only be used to cast from a vector or "
2802 nir_ssa_def
*u
= vtn_get_nir_ssa(b
, w
[3]);
2803 nir_ssa_def
*ptr
= nir_sloppy_bitcast(&b
->nb
, u
, ptr_type
->type
);
2804 vtn_push_pointer(b
, w
[2], vtn_pointer_from_ssa(b
, ptr
, ptr_type
));
2808 case SpvOpCopyMemorySized
:
2810 vtn_fail_with_opcode("Unhandled opcode", opcode
);