2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
34 ptr_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
35 const struct vtn_decoration
*dec
, void *void_ptr
)
37 struct vtn_pointer
*ptr
= void_ptr
;
39 switch (dec
->decoration
) {
40 case SpvDecorationNonUniformEXT
:
41 ptr
->access
|= ACCESS_NON_UNIFORM
;
49 static struct vtn_pointer
*
50 vtn_decorate_pointer(struct vtn_builder
*b
, struct vtn_value
*val
,
51 struct vtn_pointer
*ptr
)
53 struct vtn_pointer dummy
= { .access
= 0 };
54 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, &dummy
);
56 /* If we're adding access flags, make a copy of the pointer. We could
57 * probably just OR them in without doing so but this prevents us from
58 * leaking them any further than actually specified in the SPIR-V.
60 if (dummy
.access
& ~ptr
->access
) {
61 struct vtn_pointer
*copy
= ralloc(b
, struct vtn_pointer
);
63 copy
->access
|= dummy
.access
;
71 vtn_push_value_pointer(struct vtn_builder
*b
, uint32_t value_id
,
72 struct vtn_pointer
*ptr
)
74 struct vtn_value
*val
= vtn_push_value(b
, value_id
, vtn_value_type_pointer
);
75 val
->pointer
= vtn_decorate_pointer(b
, val
, ptr
);
80 ssa_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
81 const struct vtn_decoration
*dec
, void *void_ctx
)
83 switch (dec
->decoration
) {
84 case SpvDecorationNonUniformEXT
:
85 if (val
->value_type
== vtn_value_type_ssa
) {
86 val
->ssa
->access
|= ACCESS_NON_UNIFORM
;
87 } else if (val
->value_type
== vtn_value_type_pointer
) {
88 val
->pointer
->access
|= ACCESS_NON_UNIFORM
;
89 } else if (val
->value_type
== vtn_value_type_sampled_image
) {
90 val
->sampled_image
->image
->access
|= ACCESS_NON_UNIFORM
;
91 } else if (val
->value_type
== vtn_value_type_image_pointer
) {
92 val
->image
->image
->access
|= ACCESS_NON_UNIFORM
;
102 vtn_push_ssa(struct vtn_builder
*b
, uint32_t value_id
,
103 struct vtn_type
*type
, struct vtn_ssa_value
*ssa
)
105 struct vtn_value
*val
;
106 if (type
->base_type
== vtn_base_type_pointer
) {
107 val
= vtn_push_value_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
109 val
= vtn_push_value(b
, value_id
, vtn_value_type_ssa
);
111 vtn_foreach_decoration(b
, val
, ssa_decoration_cb
, NULL
);
117 vtn_copy_value(struct vtn_builder
*b
, uint32_t src_value_id
,
118 uint32_t dst_value_id
)
120 struct vtn_value
*src
= vtn_untyped_value(b
, src_value_id
);
121 struct vtn_value
*dst
= vtn_push_value(b
, dst_value_id
, src
->value_type
);
122 struct vtn_value src_copy
= *src
;
124 vtn_fail_if(dst
->type
->id
!= src
->type
->id
,
125 "Result Type must equal Operand type");
127 src_copy
.name
= dst
->name
;
128 src_copy
.decoration
= dst
->decoration
;
129 src_copy
.type
= dst
->type
;
132 vtn_foreach_decoration(b
, dst
, ssa_decoration_cb
, NULL
);
135 static struct vtn_access_chain
*
136 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
138 struct vtn_access_chain
*chain
;
140 /* Subtract 1 from the length since there's already one built in */
141 size_t size
= sizeof(*chain
) +
142 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
143 chain
= rzalloc_size(b
, size
);
144 chain
->length
= length
;
150 vtn_mode_uses_ssa_offset(struct vtn_builder
*b
,
151 enum vtn_variable_mode mode
)
153 return ((mode
== vtn_variable_mode_ubo
||
154 mode
== vtn_variable_mode_ssbo
) &&
155 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
156 mode
== vtn_variable_mode_push_constant
;
160 vtn_pointer_is_external_block(struct vtn_builder
*b
,
161 struct vtn_pointer
*ptr
)
163 return ptr
->mode
== vtn_variable_mode_ssbo
||
164 ptr
->mode
== vtn_variable_mode_ubo
||
165 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
166 ptr
->mode
== vtn_variable_mode_push_constant
;
170 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
171 unsigned stride
, unsigned bit_size
)
173 vtn_assert(stride
> 0);
174 if (link
.mode
== vtn_access_mode_literal
) {
175 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
177 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
178 if (ssa
->bit_size
!= bit_size
)
179 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
180 return nir_imul_imm(&b
->nb
, ssa
, stride
);
184 static VkDescriptorType
185 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
188 case vtn_variable_mode_ubo
:
189 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
190 case vtn_variable_mode_ssbo
:
191 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
193 vtn_fail("Invalid mode for vulkan_resource_index");
198 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
199 nir_ssa_def
*desc_array_index
)
201 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
203 if (!desc_array_index
) {
204 vtn_assert(glsl_type_is_struct_or_ifc(var
->type
->type
));
205 desc_array_index
= nir_imm_int(&b
->nb
, 0);
208 nir_intrinsic_instr
*instr
=
209 nir_intrinsic_instr_create(b
->nb
.shader
,
210 nir_intrinsic_vulkan_resource_index
);
211 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
212 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
213 nir_intrinsic_set_binding(instr
, var
->binding
);
214 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
216 vtn_fail_if(var
->mode
!= vtn_variable_mode_ubo
&&
217 var
->mode
!= vtn_variable_mode_ssbo
,
218 "Invalid mode for vulkan_resource_index");
220 nir_address_format addr_format
= vtn_mode_to_address_format(b
, var
->mode
);
221 const struct glsl_type
*index_type
=
222 b
->options
->lower_ubo_ssbo_access_to_offsets
?
223 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
225 instr
->num_components
= glsl_get_vector_elements(index_type
);
226 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
227 glsl_get_bit_size(index_type
), NULL
);
228 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
230 return &instr
->dest
.ssa
;
234 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
235 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
237 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
239 nir_intrinsic_instr
*instr
=
240 nir_intrinsic_instr_create(b
->nb
.shader
,
241 nir_intrinsic_vulkan_resource_reindex
);
242 instr
->src
[0] = nir_src_for_ssa(base_index
);
243 instr
->src
[1] = nir_src_for_ssa(offset_index
);
244 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
246 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
247 "Invalid mode for vulkan_resource_reindex");
249 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
250 const struct glsl_type
*index_type
=
251 b
->options
->lower_ubo_ssbo_access_to_offsets
?
252 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
254 instr
->num_components
= glsl_get_vector_elements(index_type
);
255 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
256 glsl_get_bit_size(index_type
), NULL
);
257 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
259 return &instr
->dest
.ssa
;
263 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
264 nir_ssa_def
*desc_index
)
266 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
268 nir_intrinsic_instr
*desc_load
=
269 nir_intrinsic_instr_create(b
->nb
.shader
,
270 nir_intrinsic_load_vulkan_descriptor
);
271 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
272 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
274 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
275 "Invalid mode for load_vulkan_descriptor");
277 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
278 const struct glsl_type
*ptr_type
=
279 nir_address_format_to_glsl_type(addr_format
);
281 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
282 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
283 desc_load
->num_components
,
284 glsl_get_bit_size(ptr_type
), NULL
);
285 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
287 return &desc_load
->dest
.ssa
;
290 /* Dereference the given base pointer by the access chain */
291 static struct vtn_pointer
*
292 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
293 struct vtn_pointer
*base
,
294 struct vtn_access_chain
*deref_chain
)
296 struct vtn_type
*type
= base
->type
;
297 enum gl_access_qualifier access
= base
->access
| deref_chain
->access
;
300 nir_deref_instr
*tail
;
303 } else if (b
->options
->environment
== NIR_SPIRV_VULKAN
&&
304 vtn_pointer_is_external_block(b
, base
)) {
305 nir_ssa_def
*block_index
= base
->block_index
;
307 /* We dereferencing an external block pointer. Correctness of this
308 * operation relies on one particular line in the SPIR-V spec, section
309 * entitled "Validation Rules for Shader Capabilities":
311 * "Block and BufferBlock decorations cannot decorate a structure
312 * type that is nested at any level inside another structure type
313 * decorated with Block or BufferBlock."
315 * This means that we can detect the point where we cross over from
316 * descriptor indexing to buffer indexing by looking for the block
317 * decorated struct type. Anything before the block decorated struct
318 * type is a descriptor indexing operation and anything after the block
319 * decorated struct is a buffer offset operation.
322 /* Figure out the descriptor array index if any
324 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
325 * to forget the Block or BufferBlock decoration from time to time.
326 * It's more robust if we check for both !block_index and for the type
327 * to contain a block. This way there's a decent chance that arrays of
328 * UBOs/SSBOs will work correctly even if variable pointers are
331 nir_ssa_def
*desc_arr_idx
= NULL
;
332 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
333 /* If our type contains a block, then we're still outside the block
334 * and we need to process enough levels of dereferences to get inside
337 if (deref_chain
->ptr_as_array
) {
338 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
339 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
340 MAX2(aoa_size
, 1), 32);
344 for (; idx
< deref_chain
->length
; idx
++) {
345 if (type
->base_type
!= vtn_base_type_array
) {
346 vtn_assert(type
->base_type
== vtn_base_type_struct
);
350 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
351 nir_ssa_def
*arr_offset
=
352 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
353 MAX2(aoa_size
, 1), 32);
355 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
357 desc_arr_idx
= arr_offset
;
359 type
= type
->array_element
;
360 access
|= type
->access
;
365 vtn_assert(base
->var
&& base
->type
);
366 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
367 } else if (desc_arr_idx
) {
368 block_index
= vtn_resource_reindex(b
, base
->mode
,
369 block_index
, desc_arr_idx
);
372 if (idx
== deref_chain
->length
) {
373 /* The entire deref was consumed in finding the block index. Return
374 * a pointer which just has a block index and a later access chain
375 * will dereference deeper.
377 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
378 ptr
->mode
= base
->mode
;
380 ptr
->block_index
= block_index
;
381 ptr
->access
= access
;
385 /* If we got here, there's more access chain to handle and we have the
386 * final block index. Insert a descriptor load and cast to a deref to
387 * start the deref chain.
389 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
391 assert(base
->mode
== vtn_variable_mode_ssbo
||
392 base
->mode
== vtn_variable_mode_ubo
);
393 nir_variable_mode nir_mode
=
394 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
396 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
, type
->type
,
397 base
->ptr_type
->stride
);
399 assert(base
->var
&& base
->var
->var
);
400 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
401 if (base
->ptr_type
&& base
->ptr_type
->type
) {
402 tail
->dest
.ssa
.num_components
=
403 glsl_get_vector_elements(base
->ptr_type
->type
);
404 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
408 if (idx
== 0 && deref_chain
->ptr_as_array
) {
409 /* We start with a deref cast to get the stride. Hopefully, we'll be
410 * able to delete that cast eventually.
412 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
413 tail
->type
, base
->ptr_type
->stride
);
415 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
416 tail
->dest
.ssa
.bit_size
);
417 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
421 for (; idx
< deref_chain
->length
; idx
++) {
422 if (glsl_type_is_struct_or_ifc(type
->type
)) {
423 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
424 unsigned field
= deref_chain
->link
[idx
].id
;
425 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
426 type
= type
->members
[field
];
428 nir_ssa_def
*arr_index
=
429 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
430 tail
->dest
.ssa
.bit_size
);
431 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
432 type
= type
->array_element
;
435 access
|= type
->access
;
438 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
439 ptr
->mode
= base
->mode
;
441 ptr
->var
= base
->var
;
443 ptr
->access
= access
;
448 static struct vtn_pointer
*
449 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
450 struct vtn_pointer
*base
,
451 struct vtn_access_chain
*deref_chain
)
453 nir_ssa_def
*block_index
= base
->block_index
;
454 nir_ssa_def
*offset
= base
->offset
;
455 struct vtn_type
*type
= base
->type
;
456 enum gl_access_qualifier access
= base
->access
;
459 if (base
->mode
== vtn_variable_mode_ubo
||
460 base
->mode
== vtn_variable_mode_ssbo
) {
462 vtn_assert(base
->var
&& base
->type
);
463 nir_ssa_def
*desc_arr_idx
;
464 if (glsl_type_is_array(type
->type
)) {
465 if (deref_chain
->length
>= 1) {
467 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
469 /* This consumes a level of type */
470 type
= type
->array_element
;
471 access
|= type
->access
;
473 /* This is annoying. We've been asked for a pointer to the
474 * array of UBOs/SSBOs and not a specifc buffer. Return a
475 * pointer with a descriptor index of 0 and we'll have to do
476 * a reindex later to adjust it to the right thing.
478 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
480 } else if (deref_chain
->ptr_as_array
) {
481 /* You can't have a zero-length OpPtrAccessChain */
482 vtn_assert(deref_chain
->length
>= 1);
483 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
485 /* We have a regular non-array SSBO. */
488 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
489 } else if (deref_chain
->ptr_as_array
&&
490 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
491 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
492 * decorated block. This is an interesting corner in the SPIR-V
493 * spec. One interpretation would be that they client is clearly
494 * trying to treat that block as if it's an implicit array of blocks
495 * repeated in the buffer. However, the SPIR-V spec for the
496 * OpPtrAccessChain says:
498 * "Base is treated as the address of the first element of an
499 * array, and the Element element’s address is computed to be the
500 * base for the Indexes, as per OpAccessChain."
502 * Taken literally, that would mean that your struct type is supposed
503 * to be treated as an array of such a struct and, since it's
504 * decorated block, that means an array of blocks which corresponds
505 * to an array descriptor. Therefore, we need to do a reindex
506 * operation to add the index from the first link in the access chain
507 * to the index we recieved.
509 * The downside to this interpretation (there always is one) is that
510 * this might be somewhat surprising behavior to apps if they expect
511 * the implicit array behavior described above.
513 vtn_assert(deref_chain
->length
>= 1);
514 nir_ssa_def
*offset_index
=
515 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
518 block_index
= vtn_resource_reindex(b
, base
->mode
,
519 block_index
, offset_index
);
524 if (base
->mode
== vtn_variable_mode_workgroup
) {
525 /* SLM doesn't need nor have a block index */
526 vtn_assert(!block_index
);
528 /* We need the variable for the base offset */
529 vtn_assert(base
->var
);
531 /* We need ptr_type for size and alignment */
532 vtn_assert(base
->ptr_type
);
534 /* Assign location on first use so that we don't end up bloating SLM
535 * address space for variables which are never statically used.
537 if (base
->var
->shared_location
< 0) {
538 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
539 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
540 base
->ptr_type
->align
);
541 base
->var
->shared_location
= b
->shader
->num_shared
;
542 b
->shader
->num_shared
+= base
->ptr_type
->length
;
545 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
546 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
547 /* Push constants neither need nor have a block index */
548 vtn_assert(!block_index
);
550 /* Start off with at the start of the push constant block. */
551 offset
= nir_imm_int(&b
->nb
, 0);
553 /* The code above should have ensured a block_index when needed. */
554 vtn_assert(block_index
);
556 /* Start off with at the start of the buffer. */
557 offset
= nir_imm_int(&b
->nb
, 0);
561 if (deref_chain
->ptr_as_array
&& idx
== 0) {
562 /* We need ptr_type for the stride */
563 vtn_assert(base
->ptr_type
);
565 /* We need at least one element in the chain */
566 vtn_assert(deref_chain
->length
>= 1);
568 nir_ssa_def
*elem_offset
=
569 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
570 base
->ptr_type
->stride
, offset
->bit_size
);
571 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
575 for (; idx
< deref_chain
->length
; idx
++) {
576 switch (glsl_get_base_type(type
->type
)) {
579 case GLSL_TYPE_UINT16
:
580 case GLSL_TYPE_INT16
:
581 case GLSL_TYPE_UINT8
:
583 case GLSL_TYPE_UINT64
:
584 case GLSL_TYPE_INT64
:
585 case GLSL_TYPE_FLOAT
:
586 case GLSL_TYPE_FLOAT16
:
587 case GLSL_TYPE_DOUBLE
:
589 case GLSL_TYPE_ARRAY
: {
590 nir_ssa_def
*elem_offset
=
591 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
592 type
->stride
, offset
->bit_size
);
593 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
594 type
= type
->array_element
;
595 access
|= type
->access
;
599 case GLSL_TYPE_INTERFACE
:
600 case GLSL_TYPE_STRUCT
: {
601 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
602 unsigned member
= deref_chain
->link
[idx
].id
;
603 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
604 type
= type
->members
[member
];
605 access
|= type
->access
;
610 vtn_fail("Invalid type for deref");
614 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
615 ptr
->mode
= base
->mode
;
617 ptr
->block_index
= block_index
;
618 ptr
->offset
= offset
;
619 ptr
->access
= access
;
624 /* Dereference the given base pointer by the access chain */
625 static struct vtn_pointer
*
626 vtn_pointer_dereference(struct vtn_builder
*b
,
627 struct vtn_pointer
*base
,
628 struct vtn_access_chain
*deref_chain
)
630 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
631 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
633 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
637 /* Returns an atomic_uint type based on the original uint type. The returned
638 * type will be equivalent to the original one but will have an atomic_uint
639 * type as leaf instead of an uint.
641 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
643 static const struct glsl_type
*
644 repair_atomic_type(const struct glsl_type
*type
)
646 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
647 assert(glsl_type_is_scalar(glsl_without_array(type
)));
649 if (glsl_type_is_array(type
)) {
650 const struct glsl_type
*atomic
=
651 repair_atomic_type(glsl_get_array_element(type
));
653 return glsl_array_type(atomic
, glsl_get_length(type
),
654 glsl_get_explicit_stride(type
));
656 return glsl_atomic_uint_type();
661 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
663 if (b
->wa_glslang_179
) {
664 /* Do on-the-fly copy propagation for samplers. */
665 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
666 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
669 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
671 struct vtn_access_chain chain
= {
674 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
681 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
682 struct vtn_ssa_value
*inout
,
683 enum gl_access_qualifier access
)
685 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
687 inout
->def
= nir_load_deref_with_access(&b
->nb
, deref
, access
);
689 nir_store_deref_with_access(&b
->nb
, deref
, inout
->def
, ~0, access
);
691 } else if (glsl_type_is_array(deref
->type
) ||
692 glsl_type_is_matrix(deref
->type
)) {
693 unsigned elems
= glsl_get_length(deref
->type
);
694 for (unsigned i
= 0; i
< elems
; i
++) {
695 nir_deref_instr
*child
=
696 nir_build_deref_array_imm(&b
->nb
, deref
, i
);
697 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
700 vtn_assert(glsl_type_is_struct_or_ifc(deref
->type
));
701 unsigned elems
= glsl_get_length(deref
->type
);
702 for (unsigned i
= 0; i
< elems
; i
++) {
703 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
704 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
710 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
712 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
713 return vtn_pointer_to_deref(b
, ptr
);
717 * Gets the NIR-level deref tail, which may have as a child an array deref
718 * selecting which component due to OpAccessChain supporting per-component
719 * indexing in SPIR-V.
721 static nir_deref_instr
*
722 get_deref_tail(nir_deref_instr
*deref
)
724 if (deref
->deref_type
!= nir_deref_type_array
)
727 nir_deref_instr
*parent
=
728 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
730 if (glsl_type_is_vector(parent
->type
))
736 struct vtn_ssa_value
*
737 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
,
738 enum gl_access_qualifier access
)
740 nir_deref_instr
*src_tail
= get_deref_tail(src
);
741 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
742 _vtn_local_load_store(b
, true, src_tail
, val
, access
);
744 if (src_tail
!= src
) {
745 val
->type
= src
->type
;
746 val
->def
= nir_vector_extract(&b
->nb
, val
->def
, src
->arr
.index
.ssa
);
753 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
754 nir_deref_instr
*dest
, enum gl_access_qualifier access
)
756 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
758 if (dest_tail
!= dest
) {
759 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
760 _vtn_local_load_store(b
, true, dest_tail
, val
, access
);
762 val
->def
= nir_vector_insert(&b
->nb
, val
->def
, src
->def
,
763 dest
->arr
.index
.ssa
);
764 _vtn_local_load_store(b
, false, dest_tail
, val
, access
);
766 _vtn_local_load_store(b
, false, dest_tail
, src
, access
);
771 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
772 nir_ssa_def
**index_out
)
774 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
776 struct vtn_access_chain chain
= {
779 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
781 *index_out
= ptr
->block_index
;
785 /* Tries to compute the size of an interface block based on the strides and
786 * offsets that are provided to us in the SPIR-V source.
789 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
791 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
795 case GLSL_TYPE_UINT16
:
796 case GLSL_TYPE_INT16
:
797 case GLSL_TYPE_UINT8
:
799 case GLSL_TYPE_UINT64
:
800 case GLSL_TYPE_INT64
:
801 case GLSL_TYPE_FLOAT
:
802 case GLSL_TYPE_FLOAT16
:
804 case GLSL_TYPE_DOUBLE
: {
805 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
806 glsl_get_matrix_columns(type
->type
);
808 vtn_assert(type
->stride
> 0);
809 return type
->stride
* cols
;
811 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
812 return glsl_get_vector_elements(type
->type
) * type_size
;
816 case GLSL_TYPE_STRUCT
:
817 case GLSL_TYPE_INTERFACE
: {
819 unsigned num_fields
= glsl_get_length(type
->type
);
820 for (unsigned f
= 0; f
< num_fields
; f
++) {
821 unsigned field_end
= type
->offsets
[f
] +
822 vtn_type_block_size(b
, type
->members
[f
]);
823 size
= MAX2(size
, field_end
);
828 case GLSL_TYPE_ARRAY
:
829 vtn_assert(type
->stride
> 0);
830 vtn_assert(glsl_get_length(type
->type
) > 0);
831 return type
->stride
* glsl_get_length(type
->type
);
834 vtn_fail("Invalid block type");
840 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
841 nir_ssa_def
*index
, nir_ssa_def
*offset
,
842 unsigned access_offset
, unsigned access_size
,
843 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
844 enum gl_access_qualifier access
)
846 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
847 instr
->num_components
= glsl_get_vector_elements(type
);
849 /* Booleans usually shouldn't show up in external memory in SPIR-V.
850 * However, they do for certain older GLSLang versions and can for shared
851 * memory when we lower access chains internally.
853 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
854 glsl_get_bit_size(type
);
858 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
859 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
862 if (op
== nir_intrinsic_load_push_constant
) {
863 nir_intrinsic_set_base(instr
, access_offset
);
864 nir_intrinsic_set_range(instr
, access_size
);
867 if (op
== nir_intrinsic_load_ubo
||
868 op
== nir_intrinsic_load_ssbo
||
869 op
== nir_intrinsic_store_ssbo
) {
870 nir_intrinsic_set_access(instr
, access
);
873 /* With extensions like relaxed_block_layout, we really can't guarantee
874 * much more than scalar alignment.
876 if (op
!= nir_intrinsic_load_push_constant
)
877 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
880 instr
->src
[src
++] = nir_src_for_ssa(index
);
882 if (op
== nir_intrinsic_load_push_constant
) {
883 /* We need to subtract the offset from where the intrinsic will load the
886 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
887 nir_imm_int(&b
->nb
, access_offset
)));
889 instr
->src
[src
++] = nir_src_for_ssa(offset
);
893 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
894 instr
->num_components
, data_bit_size
, NULL
);
895 (*inout
)->def
= &instr
->dest
.ssa
;
898 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
900 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
901 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
905 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
906 nir_ssa_def
*index
, nir_ssa_def
*offset
,
907 unsigned access_offset
, unsigned access_size
,
908 struct vtn_type
*type
, enum gl_access_qualifier access
,
909 struct vtn_ssa_value
**inout
)
911 if (load
&& *inout
== NULL
)
912 *inout
= vtn_create_ssa_value(b
, type
->type
);
914 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
918 case GLSL_TYPE_UINT16
:
919 case GLSL_TYPE_INT16
:
920 case GLSL_TYPE_UINT8
:
922 case GLSL_TYPE_UINT64
:
923 case GLSL_TYPE_INT64
:
924 case GLSL_TYPE_FLOAT
:
925 case GLSL_TYPE_FLOAT16
:
926 case GLSL_TYPE_DOUBLE
:
928 /* This is where things get interesting. At this point, we've hit
929 * a vector, a scalar, or a matrix.
931 if (glsl_type_is_matrix(type
->type
)) {
932 /* Loading the whole matrix */
933 struct vtn_ssa_value
*transpose
;
934 unsigned num_ops
, vec_width
, col_stride
;
935 if (type
->row_major
) {
936 num_ops
= glsl_get_vector_elements(type
->type
);
937 vec_width
= glsl_get_matrix_columns(type
->type
);
938 col_stride
= type
->array_element
->stride
;
940 const struct glsl_type
*transpose_type
=
941 glsl_matrix_type(base_type
, vec_width
, num_ops
);
942 *inout
= vtn_create_ssa_value(b
, transpose_type
);
944 transpose
= vtn_ssa_transpose(b
, *inout
);
948 num_ops
= glsl_get_matrix_columns(type
->type
);
949 vec_width
= glsl_get_vector_elements(type
->type
);
950 col_stride
= type
->stride
;
953 for (unsigned i
= 0; i
< num_ops
; i
++) {
954 nir_ssa_def
*elem_offset
=
955 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
956 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
957 access_offset
, access_size
,
959 glsl_vector_type(base_type
, vec_width
),
960 type
->access
| access
);
963 if (load
&& type
->row_major
)
964 *inout
= vtn_ssa_transpose(b
, *inout
);
966 unsigned elems
= glsl_get_vector_elements(type
->type
);
967 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
968 if (elems
== 1 || type
->stride
== type_size
) {
969 /* This is a tightly-packed normal scalar or vector load */
970 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
971 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
972 access_offset
, access_size
,
974 type
->access
| access
);
976 /* This is a strided load. We have to load N things separately.
977 * This is the single column of a row-major matrix case.
979 vtn_assert(type
->stride
> type_size
);
980 vtn_assert(type
->stride
% type_size
== 0);
982 nir_ssa_def
*per_comp
[4];
983 for (unsigned i
= 0; i
< elems
; i
++) {
984 nir_ssa_def
*elem_offset
=
985 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
986 struct vtn_ssa_value
*comp
, temp_val
;
988 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
989 temp_val
.type
= glsl_scalar_type(base_type
);
992 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
993 access_offset
, access_size
,
994 &comp
, glsl_scalar_type(base_type
),
995 type
->access
| access
);
996 per_comp
[i
] = comp
->def
;
1001 *inout
= vtn_create_ssa_value(b
, type
->type
);
1002 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
1008 case GLSL_TYPE_ARRAY
: {
1009 unsigned elems
= glsl_get_length(type
->type
);
1010 for (unsigned i
= 0; i
< elems
; i
++) {
1011 nir_ssa_def
*elem_off
=
1012 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
1013 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
1014 access_offset
, access_size
,
1015 type
->array_element
,
1016 type
->array_element
->access
| access
,
1017 &(*inout
)->elems
[i
]);
1022 case GLSL_TYPE_INTERFACE
:
1023 case GLSL_TYPE_STRUCT
: {
1024 unsigned elems
= glsl_get_length(type
->type
);
1025 for (unsigned i
= 0; i
< elems
; i
++) {
1026 nir_ssa_def
*elem_off
=
1027 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
1028 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
1029 access_offset
, access_size
,
1031 type
->members
[i
]->access
| access
,
1032 &(*inout
)->elems
[i
]);
1038 vtn_fail("Invalid block member type");
1042 static struct vtn_ssa_value
*
1043 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1045 nir_intrinsic_op op
;
1046 unsigned access_offset
= 0, access_size
= 0;
1047 switch (src
->mode
) {
1048 case vtn_variable_mode_ubo
:
1049 op
= nir_intrinsic_load_ubo
;
1051 case vtn_variable_mode_ssbo
:
1052 op
= nir_intrinsic_load_ssbo
;
1054 case vtn_variable_mode_push_constant
:
1055 op
= nir_intrinsic_load_push_constant
;
1056 access_size
= b
->shader
->num_uniforms
;
1058 case vtn_variable_mode_workgroup
:
1059 op
= nir_intrinsic_load_shared
;
1062 vtn_fail("Invalid block variable mode");
1065 nir_ssa_def
*offset
, *index
= NULL
;
1066 offset
= vtn_pointer_to_offset(b
, src
, &index
);
1068 struct vtn_ssa_value
*value
= NULL
;
1069 _vtn_block_load_store(b
, op
, true, index
, offset
,
1070 access_offset
, access_size
,
1071 src
->type
, src
->access
, &value
);
1076 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1077 struct vtn_pointer
*dst
)
1079 nir_intrinsic_op op
;
1080 switch (dst
->mode
) {
1081 case vtn_variable_mode_ssbo
:
1082 op
= nir_intrinsic_store_ssbo
;
1084 case vtn_variable_mode_workgroup
:
1085 op
= nir_intrinsic_store_shared
;
1088 vtn_fail("Invalid block variable mode");
1091 nir_ssa_def
*offset
, *index
= NULL
;
1092 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1094 _vtn_block_load_store(b
, op
, false, index
, offset
,
1095 0, 0, dst
->type
, dst
->access
, &src
);
1099 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1100 struct vtn_pointer
*ptr
,
1101 enum gl_access_qualifier access
,
1102 struct vtn_ssa_value
**inout
)
1104 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1105 switch (base_type
) {
1106 case GLSL_TYPE_UINT
:
1108 case GLSL_TYPE_UINT16
:
1109 case GLSL_TYPE_INT16
:
1110 case GLSL_TYPE_UINT8
:
1111 case GLSL_TYPE_INT8
:
1112 case GLSL_TYPE_UINT64
:
1113 case GLSL_TYPE_INT64
:
1114 case GLSL_TYPE_FLOAT
:
1115 case GLSL_TYPE_FLOAT16
:
1116 case GLSL_TYPE_BOOL
:
1117 case GLSL_TYPE_DOUBLE
:
1118 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1119 /* We hit a vector or scalar; go ahead and emit the load[s] */
1120 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1121 if (vtn_pointer_is_external_block(b
, ptr
)) {
1122 /* If it's external, we call nir_load/store_deref directly. The
1123 * vtn_local_load/store helpers are too clever and do magic to
1124 * avoid array derefs of vectors. That magic is both less
1125 * efficient than the direct load/store and, in the case of
1126 * stores, is broken because it creates a race condition if two
1127 * threads are writing to different components of the same vector
1128 * due to the load+insert+store it uses to emulate the array
1132 *inout
= vtn_create_ssa_value(b
, ptr
->type
->type
);
1133 (*inout
)->def
= nir_load_deref_with_access(&b
->nb
, deref
,
1134 ptr
->type
->access
| access
);
1136 nir_store_deref_with_access(&b
->nb
, deref
, (*inout
)->def
, ~0,
1137 ptr
->type
->access
| access
);
1141 *inout
= vtn_local_load(b
, deref
, ptr
->type
->access
| access
);
1143 vtn_local_store(b
, *inout
, deref
, ptr
->type
->access
| access
);
1150 case GLSL_TYPE_INTERFACE
:
1151 case GLSL_TYPE_ARRAY
:
1152 case GLSL_TYPE_STRUCT
: {
1153 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1155 vtn_assert(*inout
== NULL
);
1156 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1157 (*inout
)->type
= ptr
->type
->type
;
1158 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1161 struct vtn_access_chain chain
= {
1164 { .mode
= vtn_access_mode_literal
, },
1167 for (unsigned i
= 0; i
< elems
; i
++) {
1168 chain
.link
[0].id
= i
;
1169 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1170 _vtn_variable_load_store(b
, load
, elem
, ptr
->type
->access
| access
,
1171 &(*inout
)->elems
[i
]);
1177 vtn_fail("Invalid access chain type");
1181 struct vtn_ssa_value
*
1182 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1184 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1185 return vtn_block_load(b
, src
);
1187 struct vtn_ssa_value
*val
= NULL
;
1188 _vtn_variable_load_store(b
, true, src
, src
->access
, &val
);
1194 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1195 struct vtn_pointer
*dest
)
1197 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1198 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1199 dest
->mode
== vtn_variable_mode_workgroup
);
1200 vtn_block_store(b
, src
, dest
);
1202 _vtn_variable_load_store(b
, false, dest
, dest
->access
, &src
);
1207 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1208 struct vtn_pointer
*src
)
1210 vtn_assert(src
->type
->type
== dest
->type
->type
);
1211 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1212 switch (base_type
) {
1213 case GLSL_TYPE_UINT
:
1215 case GLSL_TYPE_UINT16
:
1216 case GLSL_TYPE_INT16
:
1217 case GLSL_TYPE_UINT8
:
1218 case GLSL_TYPE_INT8
:
1219 case GLSL_TYPE_UINT64
:
1220 case GLSL_TYPE_INT64
:
1221 case GLSL_TYPE_FLOAT
:
1222 case GLSL_TYPE_FLOAT16
:
1223 case GLSL_TYPE_DOUBLE
:
1224 case GLSL_TYPE_BOOL
:
1225 /* At this point, we have a scalar, vector, or matrix so we know that
1226 * there cannot be any structure splitting still in the way. By
1227 * stopping at the matrix level rather than the vector level, we
1228 * ensure that matrices get loaded in the optimal way even if they
1229 * are storred row-major in a UBO.
1231 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1234 case GLSL_TYPE_INTERFACE
:
1235 case GLSL_TYPE_ARRAY
:
1236 case GLSL_TYPE_STRUCT
: {
1237 struct vtn_access_chain chain
= {
1240 { .mode
= vtn_access_mode_literal
, },
1243 unsigned elems
= glsl_get_length(src
->type
->type
);
1244 for (unsigned i
= 0; i
< elems
; i
++) {
1245 chain
.link
[0].id
= i
;
1246 struct vtn_pointer
*src_elem
=
1247 vtn_pointer_dereference(b
, src
, &chain
);
1248 struct vtn_pointer
*dest_elem
=
1249 vtn_pointer_dereference(b
, dest
, &chain
);
1251 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1257 vtn_fail("Invalid access chain type");
1262 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1263 struct vtn_pointer
*src
)
1265 /* TODO: At some point, we should add a special-case for when we can
1266 * just emit a copy_var intrinsic.
1268 _vtn_variable_copy(b
, dest
, src
);
1272 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1274 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1275 *mode
= nir_var_system_value
;
1279 vtn_get_builtin_location(struct vtn_builder
*b
,
1280 SpvBuiltIn builtin
, int *location
,
1281 nir_variable_mode
*mode
)
1284 case SpvBuiltInPosition
:
1285 *location
= VARYING_SLOT_POS
;
1287 case SpvBuiltInPointSize
:
1288 *location
= VARYING_SLOT_PSIZ
;
1290 case SpvBuiltInClipDistance
:
1291 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1293 case SpvBuiltInCullDistance
:
1294 *location
= VARYING_SLOT_CULL_DIST0
;
1296 case SpvBuiltInVertexId
:
1297 case SpvBuiltInVertexIndex
:
1298 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1299 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1300 * same as gl_VertexID, which is non-zero-based, and removes
1301 * VertexIndex. Since they're both defined to be non-zero-based, we use
1302 * SYSTEM_VALUE_VERTEX_ID for both.
1304 *location
= SYSTEM_VALUE_VERTEX_ID
;
1305 set_mode_system_value(b
, mode
);
1307 case SpvBuiltInInstanceIndex
:
1308 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1309 set_mode_system_value(b
, mode
);
1311 case SpvBuiltInInstanceId
:
1312 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1313 set_mode_system_value(b
, mode
);
1315 case SpvBuiltInPrimitiveId
:
1316 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1317 vtn_assert(*mode
== nir_var_shader_in
);
1318 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1319 } else if (*mode
== nir_var_shader_out
) {
1320 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1322 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1323 set_mode_system_value(b
, mode
);
1326 case SpvBuiltInInvocationId
:
1327 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1328 set_mode_system_value(b
, mode
);
1330 case SpvBuiltInLayer
:
1331 *location
= VARYING_SLOT_LAYER
;
1332 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1333 *mode
= nir_var_shader_in
;
1334 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1335 *mode
= nir_var_shader_out
;
1336 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1337 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1338 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1339 *mode
= nir_var_shader_out
;
1341 vtn_fail("invalid stage for SpvBuiltInLayer");
1343 case SpvBuiltInViewportIndex
:
1344 *location
= VARYING_SLOT_VIEWPORT
;
1345 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1346 *mode
= nir_var_shader_out
;
1347 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1348 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1349 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1350 *mode
= nir_var_shader_out
;
1351 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1352 *mode
= nir_var_shader_in
;
1354 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1356 case SpvBuiltInTessLevelOuter
:
1357 if (b
->options
&& b
->options
->tess_levels_are_sysvals
&&
1358 *mode
== nir_var_shader_in
) {
1359 *location
= SYSTEM_VALUE_TESS_LEVEL_OUTER
;
1360 set_mode_system_value(b
, mode
);
1362 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1365 case SpvBuiltInTessLevelInner
:
1366 if (b
->options
&& b
->options
->tess_levels_are_sysvals
&&
1367 *mode
== nir_var_shader_in
) {
1368 *location
= SYSTEM_VALUE_TESS_LEVEL_INNER
;
1369 set_mode_system_value(b
, mode
);
1371 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1374 case SpvBuiltInTessCoord
:
1375 *location
= SYSTEM_VALUE_TESS_COORD
;
1376 set_mode_system_value(b
, mode
);
1378 case SpvBuiltInPatchVertices
:
1379 *location
= SYSTEM_VALUE_VERTICES_IN
;
1380 set_mode_system_value(b
, mode
);
1382 case SpvBuiltInFragCoord
:
1383 vtn_assert(*mode
== nir_var_shader_in
);
1384 if (b
->options
&& b
->options
->frag_coord_is_sysval
) {
1385 *mode
= nir_var_system_value
;
1386 *location
= SYSTEM_VALUE_FRAG_COORD
;
1388 *location
= VARYING_SLOT_POS
;
1391 case SpvBuiltInPointCoord
:
1392 *location
= VARYING_SLOT_PNTC
;
1393 vtn_assert(*mode
== nir_var_shader_in
);
1395 case SpvBuiltInFrontFacing
:
1396 *location
= SYSTEM_VALUE_FRONT_FACE
;
1397 set_mode_system_value(b
, mode
);
1399 case SpvBuiltInSampleId
:
1400 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1401 set_mode_system_value(b
, mode
);
1403 case SpvBuiltInSamplePosition
:
1404 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1405 set_mode_system_value(b
, mode
);
1407 case SpvBuiltInSampleMask
:
1408 if (*mode
== nir_var_shader_out
) {
1409 *location
= FRAG_RESULT_SAMPLE_MASK
;
1411 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1412 set_mode_system_value(b
, mode
);
1415 case SpvBuiltInFragDepth
:
1416 *location
= FRAG_RESULT_DEPTH
;
1417 vtn_assert(*mode
== nir_var_shader_out
);
1419 case SpvBuiltInHelperInvocation
:
1420 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1421 set_mode_system_value(b
, mode
);
1423 case SpvBuiltInNumWorkgroups
:
1424 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1425 set_mode_system_value(b
, mode
);
1427 case SpvBuiltInWorkgroupSize
:
1428 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1429 set_mode_system_value(b
, mode
);
1431 case SpvBuiltInWorkgroupId
:
1432 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1433 set_mode_system_value(b
, mode
);
1435 case SpvBuiltInLocalInvocationId
:
1436 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1437 set_mode_system_value(b
, mode
);
1439 case SpvBuiltInLocalInvocationIndex
:
1440 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1441 set_mode_system_value(b
, mode
);
1443 case SpvBuiltInGlobalInvocationId
:
1444 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1445 set_mode_system_value(b
, mode
);
1447 case SpvBuiltInGlobalLinearId
:
1448 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
;
1449 set_mode_system_value(b
, mode
);
1451 case SpvBuiltInBaseVertex
:
1452 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1453 * semantic as Vulkan BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1455 if (b
->options
->environment
== NIR_SPIRV_OPENGL
)
1456 *location
= SYSTEM_VALUE_BASE_VERTEX
;
1458 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1459 set_mode_system_value(b
, mode
);
1461 case SpvBuiltInBaseInstance
:
1462 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1463 set_mode_system_value(b
, mode
);
1465 case SpvBuiltInDrawIndex
:
1466 *location
= SYSTEM_VALUE_DRAW_ID
;
1467 set_mode_system_value(b
, mode
);
1469 case SpvBuiltInSubgroupSize
:
1470 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1471 set_mode_system_value(b
, mode
);
1473 case SpvBuiltInSubgroupId
:
1474 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1475 set_mode_system_value(b
, mode
);
1477 case SpvBuiltInSubgroupLocalInvocationId
:
1478 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1479 set_mode_system_value(b
, mode
);
1481 case SpvBuiltInNumSubgroups
:
1482 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1483 set_mode_system_value(b
, mode
);
1485 case SpvBuiltInDeviceIndex
:
1486 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1487 set_mode_system_value(b
, mode
);
1489 case SpvBuiltInViewIndex
:
1490 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1491 set_mode_system_value(b
, mode
);
1493 case SpvBuiltInSubgroupEqMask
:
1494 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1495 set_mode_system_value(b
, mode
);
1497 case SpvBuiltInSubgroupGeMask
:
1498 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1499 set_mode_system_value(b
, mode
);
1501 case SpvBuiltInSubgroupGtMask
:
1502 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1503 set_mode_system_value(b
, mode
);
1505 case SpvBuiltInSubgroupLeMask
:
1506 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1507 set_mode_system_value(b
, mode
);
1509 case SpvBuiltInSubgroupLtMask
:
1510 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1511 set_mode_system_value(b
, mode
);
1513 case SpvBuiltInFragStencilRefEXT
:
1514 *location
= FRAG_RESULT_STENCIL
;
1515 vtn_assert(*mode
== nir_var_shader_out
);
1517 case SpvBuiltInWorkDim
:
1518 *location
= SYSTEM_VALUE_WORK_DIM
;
1519 set_mode_system_value(b
, mode
);
1521 case SpvBuiltInGlobalSize
:
1522 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1523 set_mode_system_value(b
, mode
);
1525 case SpvBuiltInBaryCoordNoPerspAMD
:
1526 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL
;
1527 set_mode_system_value(b
, mode
);
1529 case SpvBuiltInBaryCoordNoPerspCentroidAMD
:
1530 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID
;
1531 set_mode_system_value(b
, mode
);
1533 case SpvBuiltInBaryCoordNoPerspSampleAMD
:
1534 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE
;
1535 set_mode_system_value(b
, mode
);
1537 case SpvBuiltInBaryCoordSmoothAMD
:
1538 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
;
1539 set_mode_system_value(b
, mode
);
1541 case SpvBuiltInBaryCoordSmoothCentroidAMD
:
1542 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
;
1543 set_mode_system_value(b
, mode
);
1545 case SpvBuiltInBaryCoordSmoothSampleAMD
:
1546 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
;
1547 set_mode_system_value(b
, mode
);
1549 case SpvBuiltInBaryCoordPullModelAMD
:
1550 *location
= SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL
;
1551 set_mode_system_value(b
, mode
);
1554 vtn_fail("Unsupported builtin: %s (%u)",
1555 spirv_builtin_to_string(builtin
), builtin
);
1560 apply_var_decoration(struct vtn_builder
*b
,
1561 struct nir_variable_data
*var_data
,
1562 const struct vtn_decoration
*dec
)
1564 switch (dec
->decoration
) {
1565 case SpvDecorationRelaxedPrecision
:
1566 break; /* FIXME: Do nothing with this for now. */
1567 case SpvDecorationNoPerspective
:
1568 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1570 case SpvDecorationFlat
:
1571 var_data
->interpolation
= INTERP_MODE_FLAT
;
1573 case SpvDecorationExplicitInterpAMD
:
1574 var_data
->interpolation
= INTERP_MODE_EXPLICIT
;
1576 case SpvDecorationCentroid
:
1577 var_data
->centroid
= true;
1579 case SpvDecorationSample
:
1580 var_data
->sample
= true;
1582 case SpvDecorationInvariant
:
1583 var_data
->invariant
= true;
1585 case SpvDecorationConstant
:
1586 var_data
->read_only
= true;
1588 case SpvDecorationNonReadable
:
1589 var_data
->access
|= ACCESS_NON_READABLE
;
1591 case SpvDecorationNonWritable
:
1592 var_data
->read_only
= true;
1593 var_data
->access
|= ACCESS_NON_WRITEABLE
;
1595 case SpvDecorationRestrict
:
1596 var_data
->access
|= ACCESS_RESTRICT
;
1598 case SpvDecorationVolatile
:
1599 var_data
->access
|= ACCESS_VOLATILE
;
1601 case SpvDecorationCoherent
:
1602 var_data
->access
|= ACCESS_COHERENT
;
1604 case SpvDecorationComponent
:
1605 var_data
->location_frac
= dec
->operands
[0];
1607 case SpvDecorationIndex
:
1608 var_data
->index
= dec
->operands
[0];
1610 case SpvDecorationBuiltIn
: {
1611 SpvBuiltIn builtin
= dec
->operands
[0];
1613 nir_variable_mode mode
= var_data
->mode
;
1614 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1615 var_data
->mode
= mode
;
1618 case SpvBuiltInTessLevelOuter
:
1619 case SpvBuiltInTessLevelInner
:
1620 /* Since the compact flag is only valid on arrays, don't set it if
1621 * we are lowering TessLevelInner/Outer to vec4/vec2. */
1622 if (!b
->options
|| !b
->options
->lower_tess_levels_to_vec
)
1623 var_data
->compact
= true;
1625 case SpvBuiltInClipDistance
:
1626 case SpvBuiltInCullDistance
:
1627 var_data
->compact
= true;
1634 case SpvDecorationSpecId
:
1635 case SpvDecorationRowMajor
:
1636 case SpvDecorationColMajor
:
1637 case SpvDecorationMatrixStride
:
1638 case SpvDecorationAliased
:
1639 case SpvDecorationUniform
:
1640 case SpvDecorationUniformId
:
1641 case SpvDecorationLinkageAttributes
:
1642 break; /* Do nothing with these here */
1644 case SpvDecorationPatch
:
1645 var_data
->patch
= true;
1648 case SpvDecorationLocation
:
1649 vtn_fail("Handled above");
1651 case SpvDecorationBlock
:
1652 case SpvDecorationBufferBlock
:
1653 case SpvDecorationArrayStride
:
1654 case SpvDecorationGLSLShared
:
1655 case SpvDecorationGLSLPacked
:
1656 break; /* These can apply to a type but we don't care about them */
1658 case SpvDecorationBinding
:
1659 case SpvDecorationDescriptorSet
:
1660 case SpvDecorationNoContraction
:
1661 case SpvDecorationInputAttachmentIndex
:
1662 vtn_warn("Decoration not allowed for variable or structure member: %s",
1663 spirv_decoration_to_string(dec
->decoration
));
1666 case SpvDecorationXfbBuffer
:
1667 var_data
->explicit_xfb_buffer
= true;
1668 var_data
->xfb
.buffer
= dec
->operands
[0];
1669 var_data
->always_active_io
= true;
1671 case SpvDecorationXfbStride
:
1672 var_data
->explicit_xfb_stride
= true;
1673 var_data
->xfb
.stride
= dec
->operands
[0];
1675 case SpvDecorationOffset
:
1676 var_data
->explicit_offset
= true;
1677 var_data
->offset
= dec
->operands
[0];
1680 case SpvDecorationStream
:
1681 var_data
->stream
= dec
->operands
[0];
1684 case SpvDecorationCPacked
:
1685 case SpvDecorationSaturatedConversion
:
1686 case SpvDecorationFuncParamAttr
:
1687 case SpvDecorationFPRoundingMode
:
1688 case SpvDecorationFPFastMathMode
:
1689 case SpvDecorationAlignment
:
1690 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1691 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1692 spirv_decoration_to_string(dec
->decoration
));
1696 case SpvDecorationUserSemantic
:
1697 case SpvDecorationUserTypeGOOGLE
:
1698 /* User semantic decorations can safely be ignored by the driver. */
1701 case SpvDecorationRestrictPointerEXT
:
1702 case SpvDecorationAliasedPointerEXT
:
1703 /* TODO: We should actually plumb alias information through NIR. */
1707 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1712 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1713 const struct vtn_decoration
*dec
, void *out_is_patch
)
1715 if (dec
->decoration
== SpvDecorationPatch
) {
1716 *((bool *) out_is_patch
) = true;
1721 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1722 const struct vtn_decoration
*dec
, void *void_var
)
1724 struct vtn_variable
*vtn_var
= void_var
;
1726 /* Handle decorations that apply to a vtn_variable as a whole */
1727 switch (dec
->decoration
) {
1728 case SpvDecorationBinding
:
1729 vtn_var
->binding
= dec
->operands
[0];
1730 vtn_var
->explicit_binding
= true;
1732 case SpvDecorationDescriptorSet
:
1733 vtn_var
->descriptor_set
= dec
->operands
[0];
1735 case SpvDecorationInputAttachmentIndex
:
1736 vtn_var
->input_attachment_index
= dec
->operands
[0];
1738 case SpvDecorationPatch
:
1739 vtn_var
->patch
= true;
1741 case SpvDecorationOffset
:
1742 vtn_var
->offset
= dec
->operands
[0];
1744 case SpvDecorationNonWritable
:
1745 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1747 case SpvDecorationNonReadable
:
1748 vtn_var
->access
|= ACCESS_NON_READABLE
;
1750 case SpvDecorationVolatile
:
1751 vtn_var
->access
|= ACCESS_VOLATILE
;
1753 case SpvDecorationCoherent
:
1754 vtn_var
->access
|= ACCESS_COHERENT
;
1756 case SpvDecorationCounterBuffer
:
1757 /* Counter buffer decorations can safely be ignored by the driver. */
1763 if (val
->value_type
== vtn_value_type_pointer
) {
1764 assert(val
->pointer
->var
== void_var
);
1765 assert(member
== -1);
1767 assert(val
->value_type
== vtn_value_type_type
);
1770 /* Location is odd. If applied to a split structure, we have to walk the
1771 * whole thing and accumulate the location. It's easier to handle as a
1774 if (dec
->decoration
== SpvDecorationLocation
) {
1775 unsigned location
= dec
->operands
[0];
1776 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1777 vtn_var
->mode
== vtn_variable_mode_output
) {
1778 location
+= FRAG_RESULT_DATA0
;
1779 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1780 vtn_var
->mode
== vtn_variable_mode_input
) {
1781 location
+= VERT_ATTRIB_GENERIC0
;
1782 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1783 vtn_var
->mode
== vtn_variable_mode_output
) {
1784 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1785 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1786 vtn_warn("Location must be on input, output, uniform, sampler or "
1791 if (vtn_var
->var
->num_members
== 0) {
1792 /* This handles the member and lone variable cases */
1793 vtn_var
->var
->data
.location
= location
;
1795 /* This handles the structure member case */
1796 assert(vtn_var
->var
->members
);
1799 vtn_var
->base_location
= location
;
1801 vtn_var
->var
->members
[member
].location
= location
;
1807 if (vtn_var
->var
->num_members
== 0) {
1808 /* We call this function on types as well as variables and not all
1809 * struct types get split so we can end up having stray member
1810 * decorations; just ignore them.
1813 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1814 } else if (member
>= 0) {
1815 /* Member decorations must come from a type */
1816 assert(val
->value_type
== vtn_value_type_type
);
1817 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1820 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1821 for (unsigned i
= 0; i
< length
; i
++)
1822 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1825 /* A few variables, those with external storage, have no actual
1826 * nir_variables associated with them. Fortunately, all decorations
1827 * we care about for those variables are on the type only.
1829 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1830 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1831 vtn_var
->mode
== vtn_variable_mode_push_constant
);
1837 var_decoration_tess_level_vec_cb(
1838 struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1839 const struct vtn_decoration
*dec
, void *void_var
)
1841 struct vtn_variable
*vtn_var
= void_var
;
1842 if (dec
->decoration
== SpvDecorationBuiltIn
) {
1843 SpvBuiltIn builtin
= dec
->operands
[0];
1844 if (builtin
== SpvBuiltInTessLevelOuter
) {
1845 vtn_var
->var
->type
= glsl_vector_type(GLSL_TYPE_FLOAT
, 4);
1846 } else if (builtin
== SpvBuiltInTessLevelInner
) {
1847 vtn_var
->var
->type
= glsl_vector_type(GLSL_TYPE_FLOAT
, 2);
1852 enum vtn_variable_mode
1853 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1854 SpvStorageClass
class,
1855 struct vtn_type
*interface_type
,
1856 nir_variable_mode
*nir_mode_out
)
1858 enum vtn_variable_mode mode
;
1859 nir_variable_mode nir_mode
;
1861 case SpvStorageClassUniform
:
1862 /* Assume it's an UBO if we lack the interface_type. */
1863 if (!interface_type
|| interface_type
->block
) {
1864 mode
= vtn_variable_mode_ubo
;
1865 nir_mode
= nir_var_mem_ubo
;
1866 } else if (interface_type
->buffer_block
) {
1867 mode
= vtn_variable_mode_ssbo
;
1868 nir_mode
= nir_var_mem_ssbo
;
1870 /* Default-block uniforms, coming from gl_spirv */
1871 mode
= vtn_variable_mode_uniform
;
1872 nir_mode
= nir_var_uniform
;
1875 case SpvStorageClassStorageBuffer
:
1876 mode
= vtn_variable_mode_ssbo
;
1877 nir_mode
= nir_var_mem_ssbo
;
1879 case SpvStorageClassPhysicalStorageBuffer
:
1880 mode
= vtn_variable_mode_phys_ssbo
;
1881 nir_mode
= nir_var_mem_global
;
1883 case SpvStorageClassUniformConstant
:
1884 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1885 if (b
->options
->constant_as_global
) {
1886 mode
= vtn_variable_mode_cross_workgroup
;
1887 nir_mode
= nir_var_mem_global
;
1889 mode
= vtn_variable_mode_ubo
;
1890 nir_mode
= nir_var_mem_ubo
;
1893 mode
= vtn_variable_mode_uniform
;
1894 nir_mode
= nir_var_uniform
;
1897 case SpvStorageClassPushConstant
:
1898 mode
= vtn_variable_mode_push_constant
;
1899 nir_mode
= nir_var_uniform
;
1901 case SpvStorageClassInput
:
1902 mode
= vtn_variable_mode_input
;
1903 nir_mode
= nir_var_shader_in
;
1905 case SpvStorageClassOutput
:
1906 mode
= vtn_variable_mode_output
;
1907 nir_mode
= nir_var_shader_out
;
1909 case SpvStorageClassPrivate
:
1910 mode
= vtn_variable_mode_private
;
1911 nir_mode
= nir_var_shader_temp
;
1913 case SpvStorageClassFunction
:
1914 mode
= vtn_variable_mode_function
;
1915 nir_mode
= nir_var_function_temp
;
1917 case SpvStorageClassWorkgroup
:
1918 mode
= vtn_variable_mode_workgroup
;
1919 nir_mode
= nir_var_mem_shared
;
1921 case SpvStorageClassAtomicCounter
:
1922 mode
= vtn_variable_mode_uniform
;
1923 nir_mode
= nir_var_uniform
;
1925 case SpvStorageClassCrossWorkgroup
:
1926 mode
= vtn_variable_mode_cross_workgroup
;
1927 nir_mode
= nir_var_mem_global
;
1929 case SpvStorageClassImage
:
1930 mode
= vtn_variable_mode_image
;
1931 nir_mode
= nir_var_mem_ubo
;
1933 case SpvStorageClassGeneric
:
1935 vtn_fail("Unhandled variable storage class: %s (%u)",
1936 spirv_storageclass_to_string(class), class);
1940 *nir_mode_out
= nir_mode
;
1946 vtn_mode_to_address_format(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
1949 case vtn_variable_mode_ubo
:
1950 return b
->options
->ubo_addr_format
;
1952 case vtn_variable_mode_ssbo
:
1953 return b
->options
->ssbo_addr_format
;
1955 case vtn_variable_mode_phys_ssbo
:
1956 return b
->options
->phys_ssbo_addr_format
;
1958 case vtn_variable_mode_push_constant
:
1959 return b
->options
->push_const_addr_format
;
1961 case vtn_variable_mode_workgroup
:
1962 return b
->options
->shared_addr_format
;
1964 case vtn_variable_mode_cross_workgroup
:
1965 return b
->options
->global_addr_format
;
1967 case vtn_variable_mode_function
:
1968 if (b
->physical_ptrs
)
1969 return b
->options
->temp_addr_format
;
1972 case vtn_variable_mode_private
:
1973 case vtn_variable_mode_uniform
:
1974 case vtn_variable_mode_input
:
1975 case vtn_variable_mode_output
:
1976 case vtn_variable_mode_image
:
1977 return nir_address_format_logical
;
1980 unreachable("Invalid variable mode");
1984 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1986 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1987 /* This pointer needs to have a pointer type with actual storage */
1988 vtn_assert(ptr
->ptr_type
);
1989 vtn_assert(ptr
->ptr_type
->type
);
1992 /* If we don't have an offset then we must be a pointer to the variable
1995 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1997 struct vtn_access_chain chain
= {
2000 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
2003 vtn_assert(ptr
->offset
);
2004 if (ptr
->block_index
) {
2005 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
2006 ptr
->mode
== vtn_variable_mode_ssbo
);
2007 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
2009 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
2013 if (vtn_pointer_is_external_block(b
, ptr
) &&
2014 vtn_type_contains_block(b
, ptr
->type
) &&
2015 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
2016 /* In this case, we're looking for a block index and not an actual
2019 * For PhysicalStorageBuffer pointers, we don't have a block index
2020 * at all because we get the pointer directly from the client. This
2021 * assumes that there will never be a SSBO binding variable using the
2022 * PhysicalStorageBuffer storage class. This assumption appears
2023 * to be correct according to the Vulkan spec because the table,
2024 * "Shader Resource and Storage Class Correspondence," the only the
2025 * Uniform storage class with BufferBlock or the StorageBuffer
2026 * storage class with Block can be used.
2028 if (!ptr
->block_index
) {
2029 /* If we don't have a block_index then we must be a pointer to the
2032 vtn_assert(!ptr
->deref
);
2034 struct vtn_access_chain chain
= {
2037 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
2040 return ptr
->block_index
;
2042 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
2047 struct vtn_pointer
*
2048 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
2049 struct vtn_type
*ptr_type
)
2051 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2053 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
2054 struct vtn_type
*without_array
=
2055 vtn_type_without_array(ptr_type
->deref
);
2057 nir_variable_mode nir_mode
;
2058 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
2059 without_array
, &nir_mode
);
2060 ptr
->type
= ptr_type
->deref
;
2061 ptr
->ptr_type
= ptr_type
;
2063 if (b
->wa_glslang_179
) {
2064 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
2065 * need to whack the mode because it creates a function parameter with
2066 * the Function storage class even though it's a pointer to a sampler.
2067 * If we don't do this, then NIR won't get rid of the deref_cast for us.
2069 if (ptr
->mode
== vtn_variable_mode_function
&&
2070 (ptr
->type
->base_type
== vtn_base_type_sampler
||
2071 ptr
->type
->base_type
== vtn_base_type_sampled_image
)) {
2072 ptr
->mode
= vtn_variable_mode_uniform
;
2073 nir_mode
= nir_var_uniform
;
2077 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
2078 /* This pointer type needs to have actual storage */
2079 vtn_assert(ptr_type
->type
);
2080 if (ptr
->mode
== vtn_variable_mode_ubo
||
2081 ptr
->mode
== vtn_variable_mode_ssbo
) {
2082 vtn_assert(ssa
->num_components
== 2);
2083 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
2084 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
2086 vtn_assert(ssa
->num_components
== 1);
2087 ptr
->block_index
= NULL
;
2091 const struct glsl_type
*deref_type
= ptr_type
->deref
->type
;
2092 if (!vtn_pointer_is_external_block(b
, ptr
)) {
2093 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2094 deref_type
, ptr_type
->stride
);
2095 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
2096 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
2097 /* This is a pointer to somewhere in an array of blocks, not a
2098 * pointer to somewhere inside the block. Set the block index
2099 * instead of making a cast.
2101 ptr
->block_index
= ssa
;
2103 /* This is a pointer to something internal or a pointer inside a
2104 * block. It's just a regular cast.
2106 * For PhysicalStorageBuffer pointers, we don't have a block index
2107 * at all because we get the pointer directly from the client. This
2108 * assumes that there will never be a SSBO binding variable using the
2109 * PhysicalStorageBuffer storage class. This assumption appears
2110 * to be correct according to the Vulkan spec because the table,
2111 * "Shader Resource and Storage Class Correspondence," the only the
2112 * Uniform storage class with BufferBlock or the StorageBuffer
2113 * storage class with Block can be used.
2115 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2116 ptr_type
->deref
->type
,
2118 ptr
->deref
->dest
.ssa
.num_components
=
2119 glsl_get_vector_elements(ptr_type
->type
);
2120 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
2128 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
2130 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
2133 if (var
->mode
== vtn_variable_mode_input
) {
2134 return stage
== MESA_SHADER_TESS_CTRL
||
2135 stage
== MESA_SHADER_TESS_EVAL
||
2136 stage
== MESA_SHADER_GEOMETRY
;
2139 if (var
->mode
== vtn_variable_mode_output
)
2140 return stage
== MESA_SHADER_TESS_CTRL
;
2146 assign_missing_member_locations(struct vtn_variable
*var
)
2149 glsl_get_length(glsl_without_array(var
->type
->type
));
2150 int location
= var
->base_location
;
2152 for (unsigned i
= 0; i
< length
; i
++) {
2153 /* From the Vulkan spec:
2155 * “If the structure type is a Block but without a Location, then each
2156 * of its members must have a Location decoration.”
2159 if (var
->type
->block
) {
2160 assert(var
->base_location
!= -1 ||
2161 var
->var
->members
[i
].location
!= -1);
2164 /* From the Vulkan spec:
2166 * “Any member with its own Location decoration is assigned that
2167 * location. Each remaining member is assigned the location after the
2168 * immediately preceding member in declaration order.”
2170 if (var
->var
->members
[i
].location
!= -1)
2171 location
= var
->var
->members
[i
].location
;
2173 var
->var
->members
[i
].location
= location
;
2175 /* Below we use type instead of interface_type, because interface_type
2176 * is only available when it is a Block. This code also supports
2177 * input/outputs that are just structs
2179 const struct glsl_type
*member_type
=
2180 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
2183 glsl_count_attribute_slots(member_type
,
2184 false /* is_gl_vertex_input */);
2190 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
2191 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
2192 nir_constant
*const_initializer
, nir_variable
*var_initializer
)
2194 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2195 struct vtn_type
*type
= ptr_type
->deref
;
2197 struct vtn_type
*without_array
= vtn_type_without_array(ptr_type
->deref
);
2199 enum vtn_variable_mode mode
;
2200 nir_variable_mode nir_mode
;
2201 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
2204 case vtn_variable_mode_ubo
:
2205 /* There's no other way to get vtn_variable_mode_ubo */
2206 vtn_assert(without_array
->block
);
2207 b
->shader
->info
.num_ubos
++;
2209 case vtn_variable_mode_ssbo
:
2210 if (storage_class
== SpvStorageClassStorageBuffer
&&
2211 !without_array
->block
) {
2212 if (b
->variable_pointers
) {
2213 vtn_fail("Variables in the StorageBuffer storage class must "
2214 "have a struct type with the Block decoration");
2216 /* If variable pointers are not present, it's still malformed
2217 * SPIR-V but we can parse it and do the right thing anyway.
2218 * Since some of the 8-bit storage tests have bugs in this are,
2219 * just make it a warning for now.
2221 vtn_warn("Variables in the StorageBuffer storage class must "
2222 "have a struct type with the Block decoration");
2225 b
->shader
->info
.num_ssbos
++;
2227 case vtn_variable_mode_uniform
:
2228 if (glsl_type_is_image(without_array
->type
))
2229 b
->shader
->info
.num_images
++;
2230 else if (glsl_type_is_sampler(without_array
->type
))
2231 b
->shader
->info
.num_textures
++;
2233 case vtn_variable_mode_push_constant
:
2234 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2237 case vtn_variable_mode_image
:
2238 vtn_fail("Cannot create a variable with the Image storage class");
2241 case vtn_variable_mode_phys_ssbo
:
2242 vtn_fail("Cannot create a variable with the "
2243 "PhysicalStorageBuffer storage class");
2247 /* No tallying is needed */
2251 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2254 var
->base_location
= -1;
2256 val
->pointer
= rzalloc(b
, struct vtn_pointer
);
2257 val
->pointer
->mode
= var
->mode
;
2258 val
->pointer
->type
= var
->type
;
2259 val
->pointer
->ptr_type
= ptr_type
;
2260 val
->pointer
->var
= var
;
2261 val
->pointer
->access
= var
->type
->access
;
2263 switch (var
->mode
) {
2264 case vtn_variable_mode_function
:
2265 case vtn_variable_mode_private
:
2266 case vtn_variable_mode_uniform
:
2267 /* For these, we create the variable normally */
2268 var
->var
= rzalloc(b
->shader
, nir_variable
);
2269 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2271 if (storage_class
== SpvStorageClassAtomicCounter
) {
2272 /* Need to tweak the nir type here as at vtn_handle_type we don't
2273 * have the access to storage_class, that is the one that points us
2274 * that is an atomic uint.
2276 var
->var
->type
= repair_atomic_type(var
->type
->type
);
2278 /* Private variables don't have any explicit layout but some layouts
2279 * may have leaked through due to type deduplication in the SPIR-V.
2281 var
->var
->type
= var
->type
->type
;
2283 var
->var
->data
.mode
= nir_mode
;
2284 var
->var
->data
.location
= -1;
2285 var
->var
->interface_type
= NULL
;
2288 case vtn_variable_mode_ubo
:
2289 case vtn_variable_mode_ssbo
:
2290 var
->var
= rzalloc(b
->shader
, nir_variable
);
2291 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2293 var
->var
->type
= var
->type
->type
;
2294 var
->var
->interface_type
= var
->type
->type
;
2296 var
->var
->data
.mode
= nir_mode
;
2297 var
->var
->data
.location
= -1;
2301 case vtn_variable_mode_workgroup
:
2302 /* Create the variable normally */
2303 var
->var
= rzalloc(b
->shader
, nir_variable
);
2304 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2305 /* Workgroup variables don't have any explicit layout but some
2306 * layouts may have leaked through due to type deduplication in the
2309 var
->var
->type
= var
->type
->type
;
2310 var
->var
->data
.mode
= nir_var_mem_shared
;
2313 case vtn_variable_mode_input
:
2314 case vtn_variable_mode_output
: {
2315 /* In order to know whether or not we're a per-vertex inout, we need
2316 * the patch qualifier. This means walking the variable decorations
2317 * early before we actually create any variables. Not a big deal.
2319 * GLSLang really likes to place decorations in the most interior
2320 * thing it possibly can. In particular, if you have a struct, it
2321 * will place the patch decorations on the struct members. This
2322 * should be handled by the variable splitting below just fine.
2324 * If you have an array-of-struct, things get even more weird as it
2325 * will place the patch decorations on the struct even though it's
2326 * inside an array and some of the members being patch and others not
2327 * makes no sense whatsoever. Since the only sensible thing is for
2328 * it to be all or nothing, we'll call it patch if any of the members
2329 * are declared patch.
2332 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2333 if (glsl_type_is_array(var
->type
->type
) &&
2334 glsl_type_is_struct_or_ifc(without_array
->type
)) {
2335 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2336 vtn_value_type_type
),
2337 var_is_patch_cb
, &var
->patch
);
2340 /* For inputs and outputs, we immediately split structures. This
2341 * is for a couple of reasons. For one, builtins may all come in
2342 * a struct and we really want those split out into separate
2343 * variables. For another, interpolation qualifiers can be
2344 * applied to members of the top-level struct ane we need to be
2345 * able to preserve that information.
2348 struct vtn_type
*per_vertex_type
= var
->type
;
2349 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2350 /* In Geometry shaders (and some tessellation), inputs come
2351 * in per-vertex arrays. However, some builtins come in
2352 * non-per-vertex, hence the need for the is_array check. In
2353 * any case, there are no non-builtin arrays allowed so this
2354 * check should be sufficient.
2356 per_vertex_type
= var
->type
->array_element
;
2359 var
->var
= rzalloc(b
->shader
, nir_variable
);
2360 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2361 /* In Vulkan, shader I/O variables don't have any explicit layout but
2362 * some layouts may have leaked through due to type deduplication in
2363 * the SPIR-V. We do, however, keep the layouts in the variable's
2364 * interface_type because we need offsets for XFB arrays of blocks.
2366 var
->var
->type
= var
->type
->type
;
2367 var
->var
->data
.mode
= nir_mode
;
2368 var
->var
->data
.patch
= var
->patch
;
2370 /* Figure out the interface block type. */
2371 struct vtn_type
*iface_type
= per_vertex_type
;
2372 if (var
->mode
== vtn_variable_mode_output
&&
2373 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2374 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2375 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2376 /* For vertex data outputs, we can end up with arrays of blocks for
2377 * transform feedback where each array element corresponds to a
2378 * different XFB output buffer.
2380 while (iface_type
->base_type
== vtn_base_type_array
)
2381 iface_type
= iface_type
->array_element
;
2383 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2384 var
->var
->interface_type
= iface_type
->type
;
2386 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2387 per_vertex_type
->block
) {
2388 /* It's a struct. Set it up as per-member. */
2389 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2390 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2391 var
->var
->num_members
);
2393 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2394 var
->var
->members
[i
].mode
= nir_mode
;
2395 var
->var
->members
[i
].patch
= var
->patch
;
2396 var
->var
->members
[i
].location
= -1;
2400 /* For inputs and outputs, we need to grab locations and builtin
2401 * information from the per-vertex type.
2403 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2404 vtn_value_type_type
),
2405 var_decoration_cb
, var
);
2409 case vtn_variable_mode_push_constant
:
2410 case vtn_variable_mode_cross_workgroup
:
2411 /* These don't need actual variables. */
2414 case vtn_variable_mode_image
:
2415 case vtn_variable_mode_phys_ssbo
:
2416 unreachable("Should have been caught before");
2419 /* We can only have one type of initializer */
2420 assert(!(const_initializer
&& var_initializer
));
2421 if (const_initializer
) {
2422 var
->var
->constant_initializer
=
2423 nir_constant_clone(const_initializer
, var
->var
);
2425 if (var_initializer
)
2426 var
->var
->pointer_initializer
= var_initializer
;
2428 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2429 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2431 if (b
->options
&& b
->options
->lower_tess_levels_to_vec
)
2432 vtn_foreach_decoration(b
, val
, var_decoration_tess_level_vec_cb
, var
);
2434 /* Propagate access flags from the OpVariable decorations. */
2435 val
->pointer
->access
|= var
->access
;
2437 if ((var
->mode
== vtn_variable_mode_input
||
2438 var
->mode
== vtn_variable_mode_output
) &&
2439 var
->var
->members
) {
2440 assign_missing_member_locations(var
);
2443 if (var
->mode
== vtn_variable_mode_uniform
||
2444 var
->mode
== vtn_variable_mode_ubo
||
2445 var
->mode
== vtn_variable_mode_ssbo
) {
2446 /* XXX: We still need the binding information in the nir_variable
2447 * for these. We should fix that.
2449 var
->var
->data
.binding
= var
->binding
;
2450 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2451 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2452 var
->var
->data
.index
= var
->input_attachment_index
;
2453 var
->var
->data
.offset
= var
->offset
;
2455 if (glsl_type_is_image(without_array
->type
))
2456 var
->var
->data
.image
.format
= without_array
->image_format
;
2459 if (var
->mode
== vtn_variable_mode_function
) {
2460 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2461 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2462 } else if (var
->var
) {
2463 nir_shader_add_variable(b
->shader
, var
->var
);
2465 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2470 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2471 struct vtn_type
*dst_type
,
2472 struct vtn_type
*src_type
)
2474 if (dst_type
->id
== src_type
->id
)
2477 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2478 /* Early versions of GLSLang would re-emit types unnecessarily and you
2479 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2480 * mismatched source and destination types.
2482 * https://github.com/KhronosGroup/glslang/issues/304
2483 * https://github.com/KhronosGroup/glslang/issues/307
2484 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2485 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2487 vtn_warn("Source and destination types of %s do not have the same "
2488 "ID (but are compatible): %u vs %u",
2489 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2493 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2494 spirv_op_to_string(opcode
),
2495 glsl_get_type_name(dst_type
->type
),
2496 glsl_get_type_name(src_type
->type
));
2499 static nir_ssa_def
*
2500 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2501 unsigned num_components
)
2503 if (val
->num_components
== num_components
)
2506 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2507 for (unsigned i
= 0; i
< num_components
; i
++) {
2508 if (i
< val
->num_components
)
2509 comps
[i
] = nir_channel(b
, val
, i
);
2511 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2513 return nir_vec(b
, comps
, num_components
);
2516 static nir_ssa_def
*
2517 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2518 const struct glsl_type
*type
)
2520 const unsigned num_components
= glsl_get_vector_elements(type
);
2521 const unsigned bit_size
= glsl_get_bit_size(type
);
2523 /* First, zero-pad to ensure that the value is big enough that when we
2524 * bit-cast it, we don't loose anything.
2526 if (val
->bit_size
< bit_size
) {
2527 const unsigned src_num_components_needed
=
2528 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2529 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2532 val
= nir_bitcast_vector(b
, val
, bit_size
);
2534 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2538 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2539 const uint32_t *w
, unsigned count
)
2543 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2544 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2548 case SpvOpVariable
: {
2549 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2551 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2553 SpvStorageClass storage_class
= w
[3];
2554 nir_constant
*const_initializer
= NULL
;
2555 nir_variable
*var_initializer
= NULL
;
2557 struct vtn_value
*init
= vtn_untyped_value(b
, w
[4]);
2558 switch (init
->value_type
) {
2559 case vtn_value_type_constant
:
2560 const_initializer
= init
->constant
;
2562 case vtn_value_type_pointer
:
2563 var_initializer
= init
->pointer
->var
->var
;
2566 vtn_fail("SPIR-V variable initializer %u must be constant or pointer",
2571 vtn_create_variable(b
, val
, ptr_type
, storage_class
, const_initializer
, var_initializer
);
2576 case SpvOpAccessChain
:
2577 case SpvOpPtrAccessChain
:
2578 case SpvOpInBoundsAccessChain
:
2579 case SpvOpInBoundsPtrAccessChain
: {
2580 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2581 enum gl_access_qualifier access
= 0;
2582 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
|| opcode
== SpvOpInBoundsPtrAccessChain
);
2585 for (int i
= 4; i
< count
; i
++) {
2586 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2587 if (link_val
->value_type
== vtn_value_type_constant
) {
2588 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2589 chain
->link
[idx
].id
= vtn_constant_int(b
, w
[i
]);
2591 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2592 chain
->link
[idx
].id
= w
[i
];
2594 access
|= vtn_value_access(link_val
);
2598 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2599 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2600 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2601 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2602 * to combine an array of images with a single sampler to get an
2603 * array of sampled images that all share the same sampler.
2604 * Fortunately, this means that we can more-or-less ignore the
2605 * sampler when crawling the access chain, but it does leave us
2606 * with this rather awkward little special-case.
2608 struct vtn_value
*val
=
2609 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2610 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2611 val
->sampled_image
->image
=
2612 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2613 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2614 val
->sampled_image
->image
=
2615 vtn_decorate_pointer(b
, val
, val
->sampled_image
->image
);
2616 val
->sampled_image
->sampler
=
2617 vtn_decorate_pointer(b
, val
, val
->sampled_image
->sampler
);
2619 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2620 struct vtn_pointer
*ptr
=
2621 vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2622 ptr
->ptr_type
= ptr_type
;
2623 ptr
->access
|= access
;
2624 vtn_push_value_pointer(b
, w
[2], ptr
);
2629 case SpvOpCopyMemory
: {
2630 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2631 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2633 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2635 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2640 struct vtn_type
*res_type
=
2641 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2642 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2643 struct vtn_pointer
*src
= src_val
->pointer
;
2645 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2647 if (res_type
->base_type
== vtn_base_type_image
||
2648 res_type
->base_type
== vtn_base_type_sampler
) {
2649 vtn_push_value_pointer(b
, w
[2], src
);
2651 } else if (res_type
->base_type
== vtn_base_type_sampled_image
) {
2652 struct vtn_value
*val
=
2653 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2654 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2655 val
->sampled_image
->image
= val
->sampled_image
->sampler
=
2656 vtn_decorate_pointer(b
, val
, src
);
2662 SpvMemoryAccessMask access
= w
[4];
2663 if (access
& SpvMemoryAccessAlignedMask
)
2666 if (access
& SpvMemoryAccessMakePointerVisibleMask
) {
2667 SpvMemorySemanticsMask semantics
=
2668 SpvMemorySemanticsMakeVisibleMask
|
2669 vtn_storage_class_to_memory_semantics(src
->ptr_type
->storage_class
);
2671 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2672 vtn_emit_memory_barrier(b
, scope
, semantics
);
2676 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2681 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2682 struct vtn_pointer
*dest
= dest_val
->pointer
;
2683 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2685 /* OpStore requires us to actually have a storage type */
2686 vtn_fail_if(dest
->type
->type
== NULL
,
2687 "Invalid destination type for OpStore");
2689 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2690 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2691 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2692 * would then store them to a local variable as bool. Work around
2693 * the issue by doing an implicit conversion.
2695 * https://github.com/KhronosGroup/glslang/issues/170
2696 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2698 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2699 "OpTypeBool. Doing an implicit conversion to work around "
2701 struct vtn_ssa_value
*bool_ssa
=
2702 vtn_create_ssa_value(b
, dest
->type
->type
);
2703 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2704 vtn_variable_store(b
, bool_ssa
, dest
);
2708 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2710 if (glsl_type_is_sampler(dest
->type
->type
)) {
2711 if (b
->wa_glslang_179
) {
2712 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2713 "propagation to workaround the problem.");
2714 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2715 struct vtn_value
*v
= vtn_untyped_value(b
, w
[2]);
2716 if (v
->value_type
== vtn_value_type_sampled_image
) {
2717 dest
->var
->copy_prop_sampler
= v
->sampled_image
->sampler
;
2719 vtn_assert(v
->value_type
== vtn_value_type_pointer
);
2720 dest
->var
->copy_prop_sampler
= v
->pointer
;
2723 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2728 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2729 vtn_variable_store(b
, src
, dest
);
2733 SpvMemoryAccessMask access
= w
[3];
2735 if (access
& SpvMemoryAccessAlignedMask
)
2738 if (access
& SpvMemoryAccessMakePointerAvailableMask
) {
2739 SpvMemorySemanticsMask semantics
=
2740 SpvMemorySemanticsMakeAvailableMask
|
2741 vtn_storage_class_to_memory_semantics(dest
->ptr_type
->storage_class
);
2742 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2743 vtn_emit_memory_barrier(b
, scope
, semantics
);
2749 case SpvOpArrayLength
: {
2750 struct vtn_pointer
*ptr
=
2751 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2752 const uint32_t field
= w
[4];
2754 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2755 "OpArrayLength must take a pointer to a structure type");
2756 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2757 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2758 "OpArrayLength must reference the last memeber of the "
2759 "structure and that must be an array");
2761 const uint32_t offset
= ptr
->type
->offsets
[field
];
2762 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2764 if (!ptr
->block_index
) {
2765 struct vtn_access_chain chain
= {
2768 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2769 vtn_assert(ptr
->block_index
);
2772 nir_intrinsic_instr
*instr
=
2773 nir_intrinsic_instr_create(b
->nb
.shader
,
2774 nir_intrinsic_get_buffer_size
);
2775 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2776 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2777 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2778 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2780 /* array_length = max(buffer_size - offset, 0) / stride */
2781 nir_ssa_def
*array_length
=
2786 nir_imm_int(&b
->nb
, offset
)),
2787 nir_imm_int(&b
->nb
, 0u)),
2788 nir_imm_int(&b
->nb
, stride
));
2790 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2791 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2792 val
->ssa
->def
= array_length
;
2796 case SpvOpConvertPtrToU
: {
2797 struct vtn_value
*u_val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2799 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2800 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2801 "OpConvertPtrToU can only be used to cast to a vector or "
2804 /* The pointer will be converted to an SSA value automatically */
2805 struct vtn_ssa_value
*ptr_ssa
= vtn_ssa_value(b
, w
[3]);
2807 u_val
->ssa
= vtn_create_ssa_value(b
, u_val
->type
->type
);
2808 u_val
->ssa
->def
= nir_sloppy_bitcast(&b
->nb
, ptr_ssa
->def
, u_val
->type
->type
);
2809 u_val
->ssa
->access
|= ptr_ssa
->access
;
2813 case SpvOpConvertUToPtr
: {
2814 struct vtn_value
*ptr_val
=
2815 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2816 struct vtn_value
*u_val
= vtn_untyped_value(b
, w
[3]);
2818 vtn_fail_if(ptr_val
->type
->type
== NULL
,
2819 "OpConvertUToPtr can only be used on physical pointers");
2821 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2822 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2823 "OpConvertUToPtr can only be used to cast from a vector or "
2826 struct vtn_ssa_value
*u_ssa
= vtn_ssa_value(b
, w
[3]);
2827 nir_ssa_def
*ptr_ssa
= nir_sloppy_bitcast(&b
->nb
, u_ssa
->def
,
2828 ptr_val
->type
->type
);
2829 ptr_val
->pointer
= vtn_pointer_from_ssa(b
, ptr_ssa
, ptr_val
->type
);
2830 vtn_foreach_decoration(b
, ptr_val
, ptr_decoration_cb
, ptr_val
->pointer
);
2831 ptr_val
->pointer
->access
|= u_val
->ssa
->access
;
2835 case SpvOpCopyMemorySized
:
2837 vtn_fail_with_opcode("Unhandled opcode", opcode
);