2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
34 ptr_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
35 const struct vtn_decoration
*dec
, void *void_ptr
)
37 struct vtn_pointer
*ptr
= void_ptr
;
39 switch (dec
->decoration
) {
40 case SpvDecorationNonUniformEXT
:
41 ptr
->access
|= ACCESS_NON_UNIFORM
;
49 static struct vtn_pointer
*
50 vtn_decorate_pointer(struct vtn_builder
*b
, struct vtn_value
*val
,
51 struct vtn_pointer
*ptr
)
53 struct vtn_pointer dummy
= { .access
= 0 };
54 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, &dummy
);
56 /* If we're adding access flags, make a copy of the pointer. We could
57 * probably just OR them in without doing so but this prevents us from
58 * leaking them any further than actually specified in the SPIR-V.
60 if (dummy
.access
& ~ptr
->access
) {
61 struct vtn_pointer
*copy
= ralloc(b
, struct vtn_pointer
);
63 copy
->access
|= dummy
.access
;
71 vtn_push_value_pointer(struct vtn_builder
*b
, uint32_t value_id
,
72 struct vtn_pointer
*ptr
)
74 struct vtn_value
*val
= vtn_push_value(b
, value_id
, vtn_value_type_pointer
);
75 val
->pointer
= vtn_decorate_pointer(b
, val
, ptr
);
80 ssa_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
81 const struct vtn_decoration
*dec
, void *void_ssa
)
83 struct vtn_ssa_value
*ssa
= void_ssa
;
85 switch (dec
->decoration
) {
86 case SpvDecorationNonUniformEXT
:
87 ssa
->access
|= ACCESS_NON_UNIFORM
;
96 vtn_push_ssa(struct vtn_builder
*b
, uint32_t value_id
,
97 struct vtn_type
*type
, struct vtn_ssa_value
*ssa
)
99 struct vtn_value
*val
;
100 if (type
->base_type
== vtn_base_type_pointer
) {
101 val
= vtn_push_value_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
103 val
= vtn_push_value(b
, value_id
, vtn_value_type_ssa
);
105 vtn_foreach_decoration(b
, val
, ssa_decoration_cb
, val
->ssa
);
110 static struct vtn_access_chain
*
111 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
113 struct vtn_access_chain
*chain
;
115 /* Subtract 1 from the length since there's already one built in */
116 size_t size
= sizeof(*chain
) +
117 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
118 chain
= rzalloc_size(b
, size
);
119 chain
->length
= length
;
125 vtn_mode_uses_ssa_offset(struct vtn_builder
*b
,
126 enum vtn_variable_mode mode
)
128 return ((mode
== vtn_variable_mode_ubo
||
129 mode
== vtn_variable_mode_ssbo
) &&
130 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
131 mode
== vtn_variable_mode_push_constant
;
135 vtn_pointer_is_external_block(struct vtn_builder
*b
,
136 struct vtn_pointer
*ptr
)
138 return ptr
->mode
== vtn_variable_mode_ssbo
||
139 ptr
->mode
== vtn_variable_mode_ubo
||
140 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
141 ptr
->mode
== vtn_variable_mode_push_constant
;
145 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
146 unsigned stride
, unsigned bit_size
)
148 vtn_assert(stride
> 0);
149 if (link
.mode
== vtn_access_mode_literal
) {
150 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
152 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
153 if (ssa
->bit_size
!= bit_size
)
154 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
155 return nir_imul_imm(&b
->nb
, ssa
, stride
);
159 static VkDescriptorType
160 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
163 case vtn_variable_mode_ubo
:
164 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
165 case vtn_variable_mode_ssbo
:
166 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
168 vtn_fail("Invalid mode for vulkan_resource_index");
173 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
174 nir_ssa_def
*desc_array_index
)
176 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
178 if (!desc_array_index
) {
179 vtn_assert(glsl_type_is_struct_or_ifc(var
->type
->type
));
180 desc_array_index
= nir_imm_int(&b
->nb
, 0);
183 nir_intrinsic_instr
*instr
=
184 nir_intrinsic_instr_create(b
->nb
.shader
,
185 nir_intrinsic_vulkan_resource_index
);
186 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
187 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
188 nir_intrinsic_set_binding(instr
, var
->binding
);
189 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
191 vtn_fail_if(var
->mode
!= vtn_variable_mode_ubo
&&
192 var
->mode
!= vtn_variable_mode_ssbo
,
193 "Invalid mode for vulkan_resource_index");
195 nir_address_format addr_format
= vtn_mode_to_address_format(b
, var
->mode
);
196 const struct glsl_type
*index_type
=
197 b
->options
->lower_ubo_ssbo_access_to_offsets
?
198 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
200 instr
->num_components
= glsl_get_vector_elements(index_type
);
201 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
202 glsl_get_bit_size(index_type
), NULL
);
203 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
205 return &instr
->dest
.ssa
;
209 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
210 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
212 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
214 nir_intrinsic_instr
*instr
=
215 nir_intrinsic_instr_create(b
->nb
.shader
,
216 nir_intrinsic_vulkan_resource_reindex
);
217 instr
->src
[0] = nir_src_for_ssa(base_index
);
218 instr
->src
[1] = nir_src_for_ssa(offset_index
);
219 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
221 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
222 "Invalid mode for vulkan_resource_reindex");
224 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
225 const struct glsl_type
*index_type
=
226 b
->options
->lower_ubo_ssbo_access_to_offsets
?
227 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
229 instr
->num_components
= glsl_get_vector_elements(index_type
);
230 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
231 glsl_get_bit_size(index_type
), NULL
);
232 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
234 return &instr
->dest
.ssa
;
238 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
239 nir_ssa_def
*desc_index
)
241 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
243 nir_intrinsic_instr
*desc_load
=
244 nir_intrinsic_instr_create(b
->nb
.shader
,
245 nir_intrinsic_load_vulkan_descriptor
);
246 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
247 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
249 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
250 "Invalid mode for load_vulkan_descriptor");
252 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
253 const struct glsl_type
*ptr_type
=
254 nir_address_format_to_glsl_type(addr_format
);
256 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
257 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
258 desc_load
->num_components
,
259 glsl_get_bit_size(ptr_type
), NULL
);
260 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
262 return &desc_load
->dest
.ssa
;
265 /* Dereference the given base pointer by the access chain */
266 static struct vtn_pointer
*
267 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
268 struct vtn_pointer
*base
,
269 struct vtn_access_chain
*deref_chain
)
271 struct vtn_type
*type
= base
->type
;
272 enum gl_access_qualifier access
= base
->access
| deref_chain
->access
;
275 nir_deref_instr
*tail
;
278 } else if (b
->options
->environment
== NIR_SPIRV_VULKAN
&&
279 vtn_pointer_is_external_block(b
, base
)) {
280 nir_ssa_def
*block_index
= base
->block_index
;
282 /* We dereferencing an external block pointer. Correctness of this
283 * operation relies on one particular line in the SPIR-V spec, section
284 * entitled "Validation Rules for Shader Capabilities":
286 * "Block and BufferBlock decorations cannot decorate a structure
287 * type that is nested at any level inside another structure type
288 * decorated with Block or BufferBlock."
290 * This means that we can detect the point where we cross over from
291 * descriptor indexing to buffer indexing by looking for the block
292 * decorated struct type. Anything before the block decorated struct
293 * type is a descriptor indexing operation and anything after the block
294 * decorated struct is a buffer offset operation.
297 /* Figure out the descriptor array index if any
299 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
300 * to forget the Block or BufferBlock decoration from time to time.
301 * It's more robust if we check for both !block_index and for the type
302 * to contain a block. This way there's a decent chance that arrays of
303 * UBOs/SSBOs will work correctly even if variable pointers are
306 nir_ssa_def
*desc_arr_idx
= NULL
;
307 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
308 /* If our type contains a block, then we're still outside the block
309 * and we need to process enough levels of dereferences to get inside
312 if (deref_chain
->ptr_as_array
) {
313 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
314 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
315 MAX2(aoa_size
, 1), 32);
319 for (; idx
< deref_chain
->length
; idx
++) {
320 if (type
->base_type
!= vtn_base_type_array
) {
321 vtn_assert(type
->base_type
== vtn_base_type_struct
);
325 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
326 nir_ssa_def
*arr_offset
=
327 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
328 MAX2(aoa_size
, 1), 32);
330 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
332 desc_arr_idx
= arr_offset
;
334 type
= type
->array_element
;
335 access
|= type
->access
;
340 vtn_assert(base
->var
&& base
->type
);
341 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
342 } else if (desc_arr_idx
) {
343 block_index
= vtn_resource_reindex(b
, base
->mode
,
344 block_index
, desc_arr_idx
);
347 if (idx
== deref_chain
->length
) {
348 /* The entire deref was consumed in finding the block index. Return
349 * a pointer which just has a block index and a later access chain
350 * will dereference deeper.
352 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
353 ptr
->mode
= base
->mode
;
355 ptr
->block_index
= block_index
;
356 ptr
->access
= access
;
360 /* If we got here, there's more access chain to handle and we have the
361 * final block index. Insert a descriptor load and cast to a deref to
362 * start the deref chain.
364 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
366 assert(base
->mode
== vtn_variable_mode_ssbo
||
367 base
->mode
== vtn_variable_mode_ubo
);
368 nir_variable_mode nir_mode
=
369 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
371 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
, type
->type
,
372 base
->ptr_type
->stride
);
374 assert(base
->var
&& base
->var
->var
);
375 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
376 if (base
->ptr_type
&& base
->ptr_type
->type
) {
377 tail
->dest
.ssa
.num_components
=
378 glsl_get_vector_elements(base
->ptr_type
->type
);
379 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
383 if (idx
== 0 && deref_chain
->ptr_as_array
) {
384 /* We start with a deref cast to get the stride. Hopefully, we'll be
385 * able to delete that cast eventually.
387 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
388 tail
->type
, base
->ptr_type
->stride
);
390 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
391 tail
->dest
.ssa
.bit_size
);
392 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
396 for (; idx
< deref_chain
->length
; idx
++) {
397 if (glsl_type_is_struct_or_ifc(type
->type
)) {
398 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
399 unsigned field
= deref_chain
->link
[idx
].id
;
400 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
401 type
= type
->members
[field
];
403 nir_ssa_def
*arr_index
=
404 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
405 tail
->dest
.ssa
.bit_size
);
406 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
407 type
= type
->array_element
;
410 access
|= type
->access
;
413 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
414 ptr
->mode
= base
->mode
;
416 ptr
->var
= base
->var
;
418 ptr
->access
= access
;
423 static struct vtn_pointer
*
424 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
425 struct vtn_pointer
*base
,
426 struct vtn_access_chain
*deref_chain
)
428 nir_ssa_def
*block_index
= base
->block_index
;
429 nir_ssa_def
*offset
= base
->offset
;
430 struct vtn_type
*type
= base
->type
;
431 enum gl_access_qualifier access
= base
->access
;
434 if (base
->mode
== vtn_variable_mode_ubo
||
435 base
->mode
== vtn_variable_mode_ssbo
) {
437 vtn_assert(base
->var
&& base
->type
);
438 nir_ssa_def
*desc_arr_idx
;
439 if (glsl_type_is_array(type
->type
)) {
440 if (deref_chain
->length
>= 1) {
442 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
444 /* This consumes a level of type */
445 type
= type
->array_element
;
446 access
|= type
->access
;
448 /* This is annoying. We've been asked for a pointer to the
449 * array of UBOs/SSBOs and not a specifc buffer. Return a
450 * pointer with a descriptor index of 0 and we'll have to do
451 * a reindex later to adjust it to the right thing.
453 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
455 } else if (deref_chain
->ptr_as_array
) {
456 /* You can't have a zero-length OpPtrAccessChain */
457 vtn_assert(deref_chain
->length
>= 1);
458 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
460 /* We have a regular non-array SSBO. */
463 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
464 } else if (deref_chain
->ptr_as_array
&&
465 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
466 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
467 * decorated block. This is an interesting corner in the SPIR-V
468 * spec. One interpretation would be that they client is clearly
469 * trying to treat that block as if it's an implicit array of blocks
470 * repeated in the buffer. However, the SPIR-V spec for the
471 * OpPtrAccessChain says:
473 * "Base is treated as the address of the first element of an
474 * array, and the Element element’s address is computed to be the
475 * base for the Indexes, as per OpAccessChain."
477 * Taken literally, that would mean that your struct type is supposed
478 * to be treated as an array of such a struct and, since it's
479 * decorated block, that means an array of blocks which corresponds
480 * to an array descriptor. Therefore, we need to do a reindex
481 * operation to add the index from the first link in the access chain
482 * to the index we recieved.
484 * The downside to this interpretation (there always is one) is that
485 * this might be somewhat surprising behavior to apps if they expect
486 * the implicit array behavior described above.
488 vtn_assert(deref_chain
->length
>= 1);
489 nir_ssa_def
*offset_index
=
490 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
493 block_index
= vtn_resource_reindex(b
, base
->mode
,
494 block_index
, offset_index
);
499 if (base
->mode
== vtn_variable_mode_workgroup
) {
500 /* SLM doesn't need nor have a block index */
501 vtn_assert(!block_index
);
503 /* We need the variable for the base offset */
504 vtn_assert(base
->var
);
506 /* We need ptr_type for size and alignment */
507 vtn_assert(base
->ptr_type
);
509 /* Assign location on first use so that we don't end up bloating SLM
510 * address space for variables which are never statically used.
512 if (base
->var
->shared_location
< 0) {
513 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
514 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
515 base
->ptr_type
->align
);
516 base
->var
->shared_location
= b
->shader
->num_shared
;
517 b
->shader
->num_shared
+= base
->ptr_type
->length
;
520 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
521 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
522 /* Push constants neither need nor have a block index */
523 vtn_assert(!block_index
);
525 /* Start off with at the start of the push constant block. */
526 offset
= nir_imm_int(&b
->nb
, 0);
528 /* The code above should have ensured a block_index when needed. */
529 vtn_assert(block_index
);
531 /* Start off with at the start of the buffer. */
532 offset
= nir_imm_int(&b
->nb
, 0);
536 if (deref_chain
->ptr_as_array
&& idx
== 0) {
537 /* We need ptr_type for the stride */
538 vtn_assert(base
->ptr_type
);
540 /* We need at least one element in the chain */
541 vtn_assert(deref_chain
->length
>= 1);
543 nir_ssa_def
*elem_offset
=
544 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
545 base
->ptr_type
->stride
, offset
->bit_size
);
546 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
550 for (; idx
< deref_chain
->length
; idx
++) {
551 switch (glsl_get_base_type(type
->type
)) {
554 case GLSL_TYPE_UINT16
:
555 case GLSL_TYPE_INT16
:
556 case GLSL_TYPE_UINT8
:
558 case GLSL_TYPE_UINT64
:
559 case GLSL_TYPE_INT64
:
560 case GLSL_TYPE_FLOAT
:
561 case GLSL_TYPE_FLOAT16
:
562 case GLSL_TYPE_DOUBLE
:
564 case GLSL_TYPE_ARRAY
: {
565 nir_ssa_def
*elem_offset
=
566 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
567 type
->stride
, offset
->bit_size
);
568 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
569 type
= type
->array_element
;
570 access
|= type
->access
;
574 case GLSL_TYPE_INTERFACE
:
575 case GLSL_TYPE_STRUCT
: {
576 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
577 unsigned member
= deref_chain
->link
[idx
].id
;
578 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
579 type
= type
->members
[member
];
580 access
|= type
->access
;
585 vtn_fail("Invalid type for deref");
589 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
590 ptr
->mode
= base
->mode
;
592 ptr
->block_index
= block_index
;
593 ptr
->offset
= offset
;
594 ptr
->access
= access
;
599 /* Dereference the given base pointer by the access chain */
600 static struct vtn_pointer
*
601 vtn_pointer_dereference(struct vtn_builder
*b
,
602 struct vtn_pointer
*base
,
603 struct vtn_access_chain
*deref_chain
)
605 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
606 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
608 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
612 /* Returns an atomic_uint type based on the original uint type. The returned
613 * type will be equivalent to the original one but will have an atomic_uint
614 * type as leaf instead of an uint.
616 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
618 static const struct glsl_type
*
619 repair_atomic_type(const struct glsl_type
*type
)
621 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
622 assert(glsl_type_is_scalar(glsl_without_array(type
)));
624 if (glsl_type_is_array(type
)) {
625 const struct glsl_type
*atomic
=
626 repair_atomic_type(glsl_get_array_element(type
));
628 return glsl_array_type(atomic
, glsl_get_length(type
),
629 glsl_get_explicit_stride(type
));
631 return glsl_atomic_uint_type();
636 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
638 if (b
->wa_glslang_179
) {
639 /* Do on-the-fly copy propagation for samplers. */
640 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
641 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
644 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
646 struct vtn_access_chain chain
= {
649 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
656 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
657 struct vtn_ssa_value
*inout
,
658 enum gl_access_qualifier access
)
660 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
662 inout
->def
= nir_load_deref_with_access(&b
->nb
, deref
, access
);
664 nir_store_deref_with_access(&b
->nb
, deref
, inout
->def
, ~0, access
);
666 } else if (glsl_type_is_array(deref
->type
) ||
667 glsl_type_is_matrix(deref
->type
)) {
668 unsigned elems
= glsl_get_length(deref
->type
);
669 for (unsigned i
= 0; i
< elems
; i
++) {
670 nir_deref_instr
*child
=
671 nir_build_deref_array_imm(&b
->nb
, deref
, i
);
672 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
675 vtn_assert(glsl_type_is_struct_or_ifc(deref
->type
));
676 unsigned elems
= glsl_get_length(deref
->type
);
677 for (unsigned i
= 0; i
< elems
; i
++) {
678 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
679 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
685 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
687 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
688 return vtn_pointer_to_deref(b
, ptr
);
692 * Gets the NIR-level deref tail, which may have as a child an array deref
693 * selecting which component due to OpAccessChain supporting per-component
694 * indexing in SPIR-V.
696 static nir_deref_instr
*
697 get_deref_tail(nir_deref_instr
*deref
)
699 if (deref
->deref_type
!= nir_deref_type_array
)
702 nir_deref_instr
*parent
=
703 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
705 if (glsl_type_is_vector(parent
->type
))
711 struct vtn_ssa_value
*
712 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
,
713 enum gl_access_qualifier access
)
715 nir_deref_instr
*src_tail
= get_deref_tail(src
);
716 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
717 _vtn_local_load_store(b
, true, src_tail
, val
, access
);
719 if (src_tail
!= src
) {
720 val
->type
= src
->type
;
721 val
->def
= nir_vector_extract(&b
->nb
, val
->def
, src
->arr
.index
.ssa
);
728 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
729 nir_deref_instr
*dest
, enum gl_access_qualifier access
)
731 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
733 if (dest_tail
!= dest
) {
734 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
735 _vtn_local_load_store(b
, true, dest_tail
, val
, access
);
737 val
->def
= nir_vector_insert(&b
->nb
, val
->def
, src
->def
,
738 dest
->arr
.index
.ssa
);
739 _vtn_local_load_store(b
, false, dest_tail
, val
, access
);
741 _vtn_local_load_store(b
, false, dest_tail
, src
, access
);
746 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
747 nir_ssa_def
**index_out
)
749 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
751 struct vtn_access_chain chain
= {
754 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
756 *index_out
= ptr
->block_index
;
760 /* Tries to compute the size of an interface block based on the strides and
761 * offsets that are provided to us in the SPIR-V source.
764 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
766 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
770 case GLSL_TYPE_UINT16
:
771 case GLSL_TYPE_INT16
:
772 case GLSL_TYPE_UINT8
:
774 case GLSL_TYPE_UINT64
:
775 case GLSL_TYPE_INT64
:
776 case GLSL_TYPE_FLOAT
:
777 case GLSL_TYPE_FLOAT16
:
779 case GLSL_TYPE_DOUBLE
: {
780 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
781 glsl_get_matrix_columns(type
->type
);
783 vtn_assert(type
->stride
> 0);
784 return type
->stride
* cols
;
786 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
787 return glsl_get_vector_elements(type
->type
) * type_size
;
791 case GLSL_TYPE_STRUCT
:
792 case GLSL_TYPE_INTERFACE
: {
794 unsigned num_fields
= glsl_get_length(type
->type
);
795 for (unsigned f
= 0; f
< num_fields
; f
++) {
796 unsigned field_end
= type
->offsets
[f
] +
797 vtn_type_block_size(b
, type
->members
[f
]);
798 size
= MAX2(size
, field_end
);
803 case GLSL_TYPE_ARRAY
:
804 vtn_assert(type
->stride
> 0);
805 vtn_assert(glsl_get_length(type
->type
) > 0);
806 return type
->stride
* glsl_get_length(type
->type
);
809 vtn_fail("Invalid block type");
815 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
816 nir_ssa_def
*index
, nir_ssa_def
*offset
,
817 unsigned access_offset
, unsigned access_size
,
818 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
819 enum gl_access_qualifier access
)
821 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
822 instr
->num_components
= glsl_get_vector_elements(type
);
824 /* Booleans usually shouldn't show up in external memory in SPIR-V.
825 * However, they do for certain older GLSLang versions and can for shared
826 * memory when we lower access chains internally.
828 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
829 glsl_get_bit_size(type
);
833 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
834 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
837 if (op
== nir_intrinsic_load_push_constant
) {
838 nir_intrinsic_set_base(instr
, access_offset
);
839 nir_intrinsic_set_range(instr
, access_size
);
842 if (op
== nir_intrinsic_load_ubo
||
843 op
== nir_intrinsic_load_ssbo
||
844 op
== nir_intrinsic_store_ssbo
) {
845 nir_intrinsic_set_access(instr
, access
);
848 /* With extensions like relaxed_block_layout, we really can't guarantee
849 * much more than scalar alignment.
851 if (op
!= nir_intrinsic_load_push_constant
)
852 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
855 instr
->src
[src
++] = nir_src_for_ssa(index
);
857 if (op
== nir_intrinsic_load_push_constant
) {
858 /* We need to subtract the offset from where the intrinsic will load the
861 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
862 nir_imm_int(&b
->nb
, access_offset
)));
864 instr
->src
[src
++] = nir_src_for_ssa(offset
);
868 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
869 instr
->num_components
, data_bit_size
, NULL
);
870 (*inout
)->def
= &instr
->dest
.ssa
;
873 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
875 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
876 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
880 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
881 nir_ssa_def
*index
, nir_ssa_def
*offset
,
882 unsigned access_offset
, unsigned access_size
,
883 struct vtn_type
*type
, enum gl_access_qualifier access
,
884 struct vtn_ssa_value
**inout
)
886 if (load
&& *inout
== NULL
)
887 *inout
= vtn_create_ssa_value(b
, type
->type
);
889 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
893 case GLSL_TYPE_UINT16
:
894 case GLSL_TYPE_INT16
:
895 case GLSL_TYPE_UINT8
:
897 case GLSL_TYPE_UINT64
:
898 case GLSL_TYPE_INT64
:
899 case GLSL_TYPE_FLOAT
:
900 case GLSL_TYPE_FLOAT16
:
901 case GLSL_TYPE_DOUBLE
:
903 /* This is where things get interesting. At this point, we've hit
904 * a vector, a scalar, or a matrix.
906 if (glsl_type_is_matrix(type
->type
)) {
907 /* Loading the whole matrix */
908 struct vtn_ssa_value
*transpose
;
909 unsigned num_ops
, vec_width
, col_stride
;
910 if (type
->row_major
) {
911 num_ops
= glsl_get_vector_elements(type
->type
);
912 vec_width
= glsl_get_matrix_columns(type
->type
);
913 col_stride
= type
->array_element
->stride
;
915 const struct glsl_type
*transpose_type
=
916 glsl_matrix_type(base_type
, vec_width
, num_ops
);
917 *inout
= vtn_create_ssa_value(b
, transpose_type
);
919 transpose
= vtn_ssa_transpose(b
, *inout
);
923 num_ops
= glsl_get_matrix_columns(type
->type
);
924 vec_width
= glsl_get_vector_elements(type
->type
);
925 col_stride
= type
->stride
;
928 for (unsigned i
= 0; i
< num_ops
; i
++) {
929 nir_ssa_def
*elem_offset
=
930 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
931 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
932 access_offset
, access_size
,
934 glsl_vector_type(base_type
, vec_width
),
935 type
->access
| access
);
938 if (load
&& type
->row_major
)
939 *inout
= vtn_ssa_transpose(b
, *inout
);
941 unsigned elems
= glsl_get_vector_elements(type
->type
);
942 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
943 if (elems
== 1 || type
->stride
== type_size
) {
944 /* This is a tightly-packed normal scalar or vector load */
945 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
946 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
947 access_offset
, access_size
,
949 type
->access
| access
);
951 /* This is a strided load. We have to load N things separately.
952 * This is the single column of a row-major matrix case.
954 vtn_assert(type
->stride
> type_size
);
955 vtn_assert(type
->stride
% type_size
== 0);
957 nir_ssa_def
*per_comp
[4];
958 for (unsigned i
= 0; i
< elems
; i
++) {
959 nir_ssa_def
*elem_offset
=
960 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
961 struct vtn_ssa_value
*comp
, temp_val
;
963 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
964 temp_val
.type
= glsl_scalar_type(base_type
);
967 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
968 access_offset
, access_size
,
969 &comp
, glsl_scalar_type(base_type
),
970 type
->access
| access
);
971 per_comp
[i
] = comp
->def
;
976 *inout
= vtn_create_ssa_value(b
, type
->type
);
977 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
983 case GLSL_TYPE_ARRAY
: {
984 unsigned elems
= glsl_get_length(type
->type
);
985 for (unsigned i
= 0; i
< elems
; i
++) {
986 nir_ssa_def
*elem_off
=
987 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
988 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
989 access_offset
, access_size
,
991 type
->array_element
->access
| access
,
992 &(*inout
)->elems
[i
]);
997 case GLSL_TYPE_INTERFACE
:
998 case GLSL_TYPE_STRUCT
: {
999 unsigned elems
= glsl_get_length(type
->type
);
1000 for (unsigned i
= 0; i
< elems
; i
++) {
1001 nir_ssa_def
*elem_off
=
1002 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
1003 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
1004 access_offset
, access_size
,
1006 type
->members
[i
]->access
| access
,
1007 &(*inout
)->elems
[i
]);
1013 vtn_fail("Invalid block member type");
1017 static struct vtn_ssa_value
*
1018 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1020 nir_intrinsic_op op
;
1021 unsigned access_offset
= 0, access_size
= 0;
1022 switch (src
->mode
) {
1023 case vtn_variable_mode_ubo
:
1024 op
= nir_intrinsic_load_ubo
;
1026 case vtn_variable_mode_ssbo
:
1027 op
= nir_intrinsic_load_ssbo
;
1029 case vtn_variable_mode_push_constant
:
1030 op
= nir_intrinsic_load_push_constant
;
1031 access_size
= b
->shader
->num_uniforms
;
1033 case vtn_variable_mode_workgroup
:
1034 op
= nir_intrinsic_load_shared
;
1037 vtn_fail("Invalid block variable mode");
1040 nir_ssa_def
*offset
, *index
= NULL
;
1041 offset
= vtn_pointer_to_offset(b
, src
, &index
);
1043 struct vtn_ssa_value
*value
= NULL
;
1044 _vtn_block_load_store(b
, op
, true, index
, offset
,
1045 access_offset
, access_size
,
1046 src
->type
, src
->access
, &value
);
1051 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1052 struct vtn_pointer
*dst
)
1054 nir_intrinsic_op op
;
1055 switch (dst
->mode
) {
1056 case vtn_variable_mode_ssbo
:
1057 op
= nir_intrinsic_store_ssbo
;
1059 case vtn_variable_mode_workgroup
:
1060 op
= nir_intrinsic_store_shared
;
1063 vtn_fail("Invalid block variable mode");
1066 nir_ssa_def
*offset
, *index
= NULL
;
1067 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1069 _vtn_block_load_store(b
, op
, false, index
, offset
,
1070 0, 0, dst
->type
, dst
->access
, &src
);
1074 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1075 struct vtn_pointer
*ptr
,
1076 enum gl_access_qualifier access
,
1077 struct vtn_ssa_value
**inout
)
1079 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1080 switch (base_type
) {
1081 case GLSL_TYPE_UINT
:
1083 case GLSL_TYPE_UINT16
:
1084 case GLSL_TYPE_INT16
:
1085 case GLSL_TYPE_UINT8
:
1086 case GLSL_TYPE_INT8
:
1087 case GLSL_TYPE_UINT64
:
1088 case GLSL_TYPE_INT64
:
1089 case GLSL_TYPE_FLOAT
:
1090 case GLSL_TYPE_FLOAT16
:
1091 case GLSL_TYPE_BOOL
:
1092 case GLSL_TYPE_DOUBLE
:
1093 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1094 /* We hit a vector or scalar; go ahead and emit the load[s] */
1095 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1096 if (vtn_pointer_is_external_block(b
, ptr
)) {
1097 /* If it's external, we call nir_load/store_deref directly. The
1098 * vtn_local_load/store helpers are too clever and do magic to
1099 * avoid array derefs of vectors. That magic is both less
1100 * efficient than the direct load/store and, in the case of
1101 * stores, is broken because it creates a race condition if two
1102 * threads are writing to different components of the same vector
1103 * due to the load+insert+store it uses to emulate the array
1107 *inout
= vtn_create_ssa_value(b
, ptr
->type
->type
);
1108 (*inout
)->def
= nir_load_deref_with_access(&b
->nb
, deref
,
1109 ptr
->type
->access
| access
);
1111 nir_store_deref_with_access(&b
->nb
, deref
, (*inout
)->def
, ~0,
1112 ptr
->type
->access
| access
);
1116 *inout
= vtn_local_load(b
, deref
, ptr
->type
->access
| access
);
1118 vtn_local_store(b
, *inout
, deref
, ptr
->type
->access
| access
);
1125 case GLSL_TYPE_INTERFACE
:
1126 case GLSL_TYPE_ARRAY
:
1127 case GLSL_TYPE_STRUCT
: {
1128 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1130 vtn_assert(*inout
== NULL
);
1131 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1132 (*inout
)->type
= ptr
->type
->type
;
1133 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1136 struct vtn_access_chain chain
= {
1139 { .mode
= vtn_access_mode_literal
, },
1142 for (unsigned i
= 0; i
< elems
; i
++) {
1143 chain
.link
[0].id
= i
;
1144 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1145 _vtn_variable_load_store(b
, load
, elem
, ptr
->type
->access
| access
,
1146 &(*inout
)->elems
[i
]);
1152 vtn_fail("Invalid access chain type");
1156 struct vtn_ssa_value
*
1157 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1159 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1160 return vtn_block_load(b
, src
);
1162 struct vtn_ssa_value
*val
= NULL
;
1163 _vtn_variable_load_store(b
, true, src
, src
->access
, &val
);
1169 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1170 struct vtn_pointer
*dest
)
1172 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1173 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1174 dest
->mode
== vtn_variable_mode_workgroup
);
1175 vtn_block_store(b
, src
, dest
);
1177 _vtn_variable_load_store(b
, false, dest
, dest
->access
, &src
);
1182 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1183 struct vtn_pointer
*src
)
1185 vtn_assert(src
->type
->type
== dest
->type
->type
);
1186 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1187 switch (base_type
) {
1188 case GLSL_TYPE_UINT
:
1190 case GLSL_TYPE_UINT16
:
1191 case GLSL_TYPE_INT16
:
1192 case GLSL_TYPE_UINT8
:
1193 case GLSL_TYPE_INT8
:
1194 case GLSL_TYPE_UINT64
:
1195 case GLSL_TYPE_INT64
:
1196 case GLSL_TYPE_FLOAT
:
1197 case GLSL_TYPE_FLOAT16
:
1198 case GLSL_TYPE_DOUBLE
:
1199 case GLSL_TYPE_BOOL
:
1200 /* At this point, we have a scalar, vector, or matrix so we know that
1201 * there cannot be any structure splitting still in the way. By
1202 * stopping at the matrix level rather than the vector level, we
1203 * ensure that matrices get loaded in the optimal way even if they
1204 * are storred row-major in a UBO.
1206 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1209 case GLSL_TYPE_INTERFACE
:
1210 case GLSL_TYPE_ARRAY
:
1211 case GLSL_TYPE_STRUCT
: {
1212 struct vtn_access_chain chain
= {
1215 { .mode
= vtn_access_mode_literal
, },
1218 unsigned elems
= glsl_get_length(src
->type
->type
);
1219 for (unsigned i
= 0; i
< elems
; i
++) {
1220 chain
.link
[0].id
= i
;
1221 struct vtn_pointer
*src_elem
=
1222 vtn_pointer_dereference(b
, src
, &chain
);
1223 struct vtn_pointer
*dest_elem
=
1224 vtn_pointer_dereference(b
, dest
, &chain
);
1226 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1232 vtn_fail("Invalid access chain type");
1237 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1238 struct vtn_pointer
*src
)
1240 /* TODO: At some point, we should add a special-case for when we can
1241 * just emit a copy_var intrinsic.
1243 _vtn_variable_copy(b
, dest
, src
);
1247 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1249 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1250 *mode
= nir_var_system_value
;
1254 vtn_get_builtin_location(struct vtn_builder
*b
,
1255 SpvBuiltIn builtin
, int *location
,
1256 nir_variable_mode
*mode
)
1259 case SpvBuiltInPosition
:
1260 *location
= VARYING_SLOT_POS
;
1262 case SpvBuiltInPointSize
:
1263 *location
= VARYING_SLOT_PSIZ
;
1265 case SpvBuiltInClipDistance
:
1266 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1268 case SpvBuiltInCullDistance
:
1269 *location
= VARYING_SLOT_CULL_DIST0
;
1271 case SpvBuiltInVertexId
:
1272 case SpvBuiltInVertexIndex
:
1273 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1274 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1275 * same as gl_VertexID, which is non-zero-based, and removes
1276 * VertexIndex. Since they're both defined to be non-zero-based, we use
1277 * SYSTEM_VALUE_VERTEX_ID for both.
1279 *location
= SYSTEM_VALUE_VERTEX_ID
;
1280 set_mode_system_value(b
, mode
);
1282 case SpvBuiltInInstanceIndex
:
1283 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1284 set_mode_system_value(b
, mode
);
1286 case SpvBuiltInInstanceId
:
1287 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1288 set_mode_system_value(b
, mode
);
1290 case SpvBuiltInPrimitiveId
:
1291 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1292 vtn_assert(*mode
== nir_var_shader_in
);
1293 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1294 } else if (*mode
== nir_var_shader_out
) {
1295 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1297 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1298 set_mode_system_value(b
, mode
);
1301 case SpvBuiltInInvocationId
:
1302 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1303 set_mode_system_value(b
, mode
);
1305 case SpvBuiltInLayer
:
1306 *location
= VARYING_SLOT_LAYER
;
1307 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1308 *mode
= nir_var_shader_in
;
1309 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1310 *mode
= nir_var_shader_out
;
1311 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1312 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1313 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1314 *mode
= nir_var_shader_out
;
1316 vtn_fail("invalid stage for SpvBuiltInLayer");
1318 case SpvBuiltInViewportIndex
:
1319 *location
= VARYING_SLOT_VIEWPORT
;
1320 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1321 *mode
= nir_var_shader_out
;
1322 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1323 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1324 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1325 *mode
= nir_var_shader_out
;
1326 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1327 *mode
= nir_var_shader_in
;
1329 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1331 case SpvBuiltInTessLevelOuter
:
1332 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1334 case SpvBuiltInTessLevelInner
:
1335 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1337 case SpvBuiltInTessCoord
:
1338 *location
= SYSTEM_VALUE_TESS_COORD
;
1339 set_mode_system_value(b
, mode
);
1341 case SpvBuiltInPatchVertices
:
1342 *location
= SYSTEM_VALUE_VERTICES_IN
;
1343 set_mode_system_value(b
, mode
);
1345 case SpvBuiltInFragCoord
:
1346 vtn_assert(*mode
== nir_var_shader_in
);
1347 if (b
->options
&& b
->options
->frag_coord_is_sysval
) {
1348 *mode
= nir_var_system_value
;
1349 *location
= SYSTEM_VALUE_FRAG_COORD
;
1351 *location
= VARYING_SLOT_POS
;
1354 case SpvBuiltInPointCoord
:
1355 *location
= VARYING_SLOT_PNTC
;
1356 vtn_assert(*mode
== nir_var_shader_in
);
1358 case SpvBuiltInFrontFacing
:
1359 *location
= SYSTEM_VALUE_FRONT_FACE
;
1360 set_mode_system_value(b
, mode
);
1362 case SpvBuiltInSampleId
:
1363 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1364 set_mode_system_value(b
, mode
);
1366 case SpvBuiltInSamplePosition
:
1367 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1368 set_mode_system_value(b
, mode
);
1370 case SpvBuiltInSampleMask
:
1371 if (*mode
== nir_var_shader_out
) {
1372 *location
= FRAG_RESULT_SAMPLE_MASK
;
1374 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1375 set_mode_system_value(b
, mode
);
1378 case SpvBuiltInFragDepth
:
1379 *location
= FRAG_RESULT_DEPTH
;
1380 vtn_assert(*mode
== nir_var_shader_out
);
1382 case SpvBuiltInHelperInvocation
:
1383 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1384 set_mode_system_value(b
, mode
);
1386 case SpvBuiltInNumWorkgroups
:
1387 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1388 set_mode_system_value(b
, mode
);
1390 case SpvBuiltInWorkgroupSize
:
1391 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1392 set_mode_system_value(b
, mode
);
1394 case SpvBuiltInWorkgroupId
:
1395 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1396 set_mode_system_value(b
, mode
);
1398 case SpvBuiltInLocalInvocationId
:
1399 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1400 set_mode_system_value(b
, mode
);
1402 case SpvBuiltInLocalInvocationIndex
:
1403 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1404 set_mode_system_value(b
, mode
);
1406 case SpvBuiltInGlobalInvocationId
:
1407 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1408 set_mode_system_value(b
, mode
);
1410 case SpvBuiltInGlobalLinearId
:
1411 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
;
1412 set_mode_system_value(b
, mode
);
1414 case SpvBuiltInBaseVertex
:
1415 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1416 * semantic as Vulkan BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1418 if (b
->options
->environment
== NIR_SPIRV_OPENGL
)
1419 *location
= SYSTEM_VALUE_BASE_VERTEX
;
1421 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1422 set_mode_system_value(b
, mode
);
1424 case SpvBuiltInBaseInstance
:
1425 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1426 set_mode_system_value(b
, mode
);
1428 case SpvBuiltInDrawIndex
:
1429 *location
= SYSTEM_VALUE_DRAW_ID
;
1430 set_mode_system_value(b
, mode
);
1432 case SpvBuiltInSubgroupSize
:
1433 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1434 set_mode_system_value(b
, mode
);
1436 case SpvBuiltInSubgroupId
:
1437 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1438 set_mode_system_value(b
, mode
);
1440 case SpvBuiltInSubgroupLocalInvocationId
:
1441 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1442 set_mode_system_value(b
, mode
);
1444 case SpvBuiltInNumSubgroups
:
1445 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1446 set_mode_system_value(b
, mode
);
1448 case SpvBuiltInDeviceIndex
:
1449 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1450 set_mode_system_value(b
, mode
);
1452 case SpvBuiltInViewIndex
:
1453 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1454 set_mode_system_value(b
, mode
);
1456 case SpvBuiltInSubgroupEqMask
:
1457 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1458 set_mode_system_value(b
, mode
);
1460 case SpvBuiltInSubgroupGeMask
:
1461 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1462 set_mode_system_value(b
, mode
);
1464 case SpvBuiltInSubgroupGtMask
:
1465 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1466 set_mode_system_value(b
, mode
);
1468 case SpvBuiltInSubgroupLeMask
:
1469 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1470 set_mode_system_value(b
, mode
);
1472 case SpvBuiltInSubgroupLtMask
:
1473 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1474 set_mode_system_value(b
, mode
);
1476 case SpvBuiltInFragStencilRefEXT
:
1477 *location
= FRAG_RESULT_STENCIL
;
1478 vtn_assert(*mode
== nir_var_shader_out
);
1480 case SpvBuiltInWorkDim
:
1481 *location
= SYSTEM_VALUE_WORK_DIM
;
1482 set_mode_system_value(b
, mode
);
1484 case SpvBuiltInGlobalSize
:
1485 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1486 set_mode_system_value(b
, mode
);
1488 case SpvBuiltInBaryCoordNoPerspAMD
:
1489 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL
;
1490 set_mode_system_value(b
, mode
);
1492 case SpvBuiltInBaryCoordNoPerspCentroidAMD
:
1493 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID
;
1494 set_mode_system_value(b
, mode
);
1496 case SpvBuiltInBaryCoordNoPerspSampleAMD
:
1497 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE
;
1498 set_mode_system_value(b
, mode
);
1500 case SpvBuiltInBaryCoordSmoothAMD
:
1501 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
;
1502 set_mode_system_value(b
, mode
);
1504 case SpvBuiltInBaryCoordSmoothCentroidAMD
:
1505 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
;
1506 set_mode_system_value(b
, mode
);
1508 case SpvBuiltInBaryCoordSmoothSampleAMD
:
1509 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
;
1510 set_mode_system_value(b
, mode
);
1512 case SpvBuiltInBaryCoordPullModelAMD
:
1513 *location
= SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL
;
1514 set_mode_system_value(b
, mode
);
1517 vtn_fail("Unsupported builtin: %s (%u)",
1518 spirv_builtin_to_string(builtin
), builtin
);
1523 apply_var_decoration(struct vtn_builder
*b
,
1524 struct nir_variable_data
*var_data
,
1525 const struct vtn_decoration
*dec
)
1527 switch (dec
->decoration
) {
1528 case SpvDecorationRelaxedPrecision
:
1529 break; /* FIXME: Do nothing with this for now. */
1530 case SpvDecorationNoPerspective
:
1531 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1533 case SpvDecorationFlat
:
1534 var_data
->interpolation
= INTERP_MODE_FLAT
;
1536 case SpvDecorationExplicitInterpAMD
:
1537 var_data
->interpolation
= INTERP_MODE_EXPLICIT
;
1539 case SpvDecorationCentroid
:
1540 var_data
->centroid
= true;
1542 case SpvDecorationSample
:
1543 var_data
->sample
= true;
1545 case SpvDecorationInvariant
:
1546 var_data
->invariant
= true;
1548 case SpvDecorationConstant
:
1549 var_data
->read_only
= true;
1551 case SpvDecorationNonReadable
:
1552 var_data
->access
|= ACCESS_NON_READABLE
;
1554 case SpvDecorationNonWritable
:
1555 var_data
->read_only
= true;
1556 var_data
->access
|= ACCESS_NON_WRITEABLE
;
1558 case SpvDecorationRestrict
:
1559 var_data
->access
|= ACCESS_RESTRICT
;
1561 case SpvDecorationVolatile
:
1562 var_data
->access
|= ACCESS_VOLATILE
;
1564 case SpvDecorationCoherent
:
1565 var_data
->access
|= ACCESS_COHERENT
;
1567 case SpvDecorationComponent
:
1568 var_data
->location_frac
= dec
->operands
[0];
1570 case SpvDecorationIndex
:
1571 var_data
->index
= dec
->operands
[0];
1573 case SpvDecorationBuiltIn
: {
1574 SpvBuiltIn builtin
= dec
->operands
[0];
1576 nir_variable_mode mode
= var_data
->mode
;
1577 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1578 var_data
->mode
= mode
;
1581 case SpvBuiltInTessLevelOuter
:
1582 case SpvBuiltInTessLevelInner
:
1583 case SpvBuiltInClipDistance
:
1584 case SpvBuiltInCullDistance
:
1585 var_data
->compact
= true;
1592 case SpvDecorationSpecId
:
1593 case SpvDecorationRowMajor
:
1594 case SpvDecorationColMajor
:
1595 case SpvDecorationMatrixStride
:
1596 case SpvDecorationAliased
:
1597 case SpvDecorationUniform
:
1598 case SpvDecorationUniformId
:
1599 case SpvDecorationLinkageAttributes
:
1600 break; /* Do nothing with these here */
1602 case SpvDecorationPatch
:
1603 var_data
->patch
= true;
1606 case SpvDecorationLocation
:
1607 vtn_fail("Handled above");
1609 case SpvDecorationBlock
:
1610 case SpvDecorationBufferBlock
:
1611 case SpvDecorationArrayStride
:
1612 case SpvDecorationGLSLShared
:
1613 case SpvDecorationGLSLPacked
:
1614 break; /* These can apply to a type but we don't care about them */
1616 case SpvDecorationBinding
:
1617 case SpvDecorationDescriptorSet
:
1618 case SpvDecorationNoContraction
:
1619 case SpvDecorationInputAttachmentIndex
:
1620 vtn_warn("Decoration not allowed for variable or structure member: %s",
1621 spirv_decoration_to_string(dec
->decoration
));
1624 case SpvDecorationXfbBuffer
:
1625 var_data
->explicit_xfb_buffer
= true;
1626 var_data
->xfb
.buffer
= dec
->operands
[0];
1627 var_data
->always_active_io
= true;
1629 case SpvDecorationXfbStride
:
1630 var_data
->explicit_xfb_stride
= true;
1631 var_data
->xfb
.stride
= dec
->operands
[0];
1633 case SpvDecorationOffset
:
1634 var_data
->explicit_offset
= true;
1635 var_data
->offset
= dec
->operands
[0];
1638 case SpvDecorationStream
:
1639 var_data
->stream
= dec
->operands
[0];
1642 case SpvDecorationCPacked
:
1643 case SpvDecorationSaturatedConversion
:
1644 case SpvDecorationFuncParamAttr
:
1645 case SpvDecorationFPRoundingMode
:
1646 case SpvDecorationFPFastMathMode
:
1647 case SpvDecorationAlignment
:
1648 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1649 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1650 spirv_decoration_to_string(dec
->decoration
));
1654 case SpvDecorationUserSemantic
:
1655 /* User semantic decorations can safely be ignored by the driver. */
1658 case SpvDecorationRestrictPointerEXT
:
1659 case SpvDecorationAliasedPointerEXT
:
1660 /* TODO: We should actually plumb alias information through NIR. */
1664 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1669 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1670 const struct vtn_decoration
*dec
, void *out_is_patch
)
1672 if (dec
->decoration
== SpvDecorationPatch
) {
1673 *((bool *) out_is_patch
) = true;
1678 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1679 const struct vtn_decoration
*dec
, void *void_var
)
1681 struct vtn_variable
*vtn_var
= void_var
;
1683 /* Handle decorations that apply to a vtn_variable as a whole */
1684 switch (dec
->decoration
) {
1685 case SpvDecorationBinding
:
1686 vtn_var
->binding
= dec
->operands
[0];
1687 vtn_var
->explicit_binding
= true;
1689 case SpvDecorationDescriptorSet
:
1690 vtn_var
->descriptor_set
= dec
->operands
[0];
1692 case SpvDecorationInputAttachmentIndex
:
1693 vtn_var
->input_attachment_index
= dec
->operands
[0];
1695 case SpvDecorationPatch
:
1696 vtn_var
->patch
= true;
1698 case SpvDecorationOffset
:
1699 vtn_var
->offset
= dec
->operands
[0];
1701 case SpvDecorationNonWritable
:
1702 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1704 case SpvDecorationNonReadable
:
1705 vtn_var
->access
|= ACCESS_NON_READABLE
;
1707 case SpvDecorationVolatile
:
1708 vtn_var
->access
|= ACCESS_VOLATILE
;
1710 case SpvDecorationCoherent
:
1711 vtn_var
->access
|= ACCESS_COHERENT
;
1713 case SpvDecorationCounterBuffer
:
1714 /* Counter buffer decorations can safely be ignored by the driver. */
1720 if (val
->value_type
== vtn_value_type_pointer
) {
1721 assert(val
->pointer
->var
== void_var
);
1722 assert(member
== -1);
1724 assert(val
->value_type
== vtn_value_type_type
);
1727 /* Location is odd. If applied to a split structure, we have to walk the
1728 * whole thing and accumulate the location. It's easier to handle as a
1731 if (dec
->decoration
== SpvDecorationLocation
) {
1732 unsigned location
= dec
->operands
[0];
1733 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1734 vtn_var
->mode
== vtn_variable_mode_output
) {
1735 location
+= FRAG_RESULT_DATA0
;
1736 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1737 vtn_var
->mode
== vtn_variable_mode_input
) {
1738 location
+= VERT_ATTRIB_GENERIC0
;
1739 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1740 vtn_var
->mode
== vtn_variable_mode_output
) {
1741 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1742 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1743 vtn_warn("Location must be on input, output, uniform, sampler or "
1748 if (vtn_var
->var
->num_members
== 0) {
1749 /* This handles the member and lone variable cases */
1750 vtn_var
->var
->data
.location
= location
;
1752 /* This handles the structure member case */
1753 assert(vtn_var
->var
->members
);
1756 vtn_var
->base_location
= location
;
1758 vtn_var
->var
->members
[member
].location
= location
;
1764 if (vtn_var
->var
->num_members
== 0) {
1765 /* We call this function on types as well as variables and not all
1766 * struct types get split so we can end up having stray member
1767 * decorations; just ignore them.
1770 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1771 } else if (member
>= 0) {
1772 /* Member decorations must come from a type */
1773 assert(val
->value_type
== vtn_value_type_type
);
1774 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1777 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1778 for (unsigned i
= 0; i
< length
; i
++)
1779 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1782 /* A few variables, those with external storage, have no actual
1783 * nir_variables associated with them. Fortunately, all decorations
1784 * we care about for those variables are on the type only.
1786 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1787 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1788 vtn_var
->mode
== vtn_variable_mode_push_constant
);
1793 enum vtn_variable_mode
1794 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1795 SpvStorageClass
class,
1796 struct vtn_type
*interface_type
,
1797 nir_variable_mode
*nir_mode_out
)
1799 enum vtn_variable_mode mode
;
1800 nir_variable_mode nir_mode
;
1802 case SpvStorageClassUniform
:
1803 /* Assume it's an UBO if we lack the interface_type. */
1804 if (!interface_type
|| interface_type
->block
) {
1805 mode
= vtn_variable_mode_ubo
;
1806 nir_mode
= nir_var_mem_ubo
;
1807 } else if (interface_type
->buffer_block
) {
1808 mode
= vtn_variable_mode_ssbo
;
1809 nir_mode
= nir_var_mem_ssbo
;
1811 /* Default-block uniforms, coming from gl_spirv */
1812 mode
= vtn_variable_mode_uniform
;
1813 nir_mode
= nir_var_uniform
;
1816 case SpvStorageClassStorageBuffer
:
1817 mode
= vtn_variable_mode_ssbo
;
1818 nir_mode
= nir_var_mem_ssbo
;
1820 case SpvStorageClassPhysicalStorageBuffer
:
1821 mode
= vtn_variable_mode_phys_ssbo
;
1822 nir_mode
= nir_var_mem_global
;
1824 case SpvStorageClassUniformConstant
:
1825 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1826 if (b
->options
->constant_as_global
) {
1827 mode
= vtn_variable_mode_cross_workgroup
;
1828 nir_mode
= nir_var_mem_global
;
1830 mode
= vtn_variable_mode_ubo
;
1831 nir_mode
= nir_var_mem_ubo
;
1834 mode
= vtn_variable_mode_uniform
;
1835 nir_mode
= nir_var_uniform
;
1838 case SpvStorageClassPushConstant
:
1839 mode
= vtn_variable_mode_push_constant
;
1840 nir_mode
= nir_var_uniform
;
1842 case SpvStorageClassInput
:
1843 mode
= vtn_variable_mode_input
;
1844 nir_mode
= nir_var_shader_in
;
1846 case SpvStorageClassOutput
:
1847 mode
= vtn_variable_mode_output
;
1848 nir_mode
= nir_var_shader_out
;
1850 case SpvStorageClassPrivate
:
1851 mode
= vtn_variable_mode_private
;
1852 nir_mode
= nir_var_shader_temp
;
1854 case SpvStorageClassFunction
:
1855 mode
= vtn_variable_mode_function
;
1856 nir_mode
= nir_var_function_temp
;
1858 case SpvStorageClassWorkgroup
:
1859 mode
= vtn_variable_mode_workgroup
;
1860 nir_mode
= nir_var_mem_shared
;
1862 case SpvStorageClassAtomicCounter
:
1863 mode
= vtn_variable_mode_uniform
;
1864 nir_mode
= nir_var_uniform
;
1866 case SpvStorageClassCrossWorkgroup
:
1867 mode
= vtn_variable_mode_cross_workgroup
;
1868 nir_mode
= nir_var_mem_global
;
1870 case SpvStorageClassImage
:
1871 mode
= vtn_variable_mode_image
;
1872 nir_mode
= nir_var_mem_ubo
;
1874 case SpvStorageClassGeneric
:
1876 vtn_fail("Unhandled variable storage class: %s (%u)",
1877 spirv_storageclass_to_string(class), class);
1881 *nir_mode_out
= nir_mode
;
1887 vtn_mode_to_address_format(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
1890 case vtn_variable_mode_ubo
:
1891 return b
->options
->ubo_addr_format
;
1893 case vtn_variable_mode_ssbo
:
1894 return b
->options
->ssbo_addr_format
;
1896 case vtn_variable_mode_phys_ssbo
:
1897 return b
->options
->phys_ssbo_addr_format
;
1899 case vtn_variable_mode_push_constant
:
1900 return b
->options
->push_const_addr_format
;
1902 case vtn_variable_mode_workgroup
:
1903 return b
->options
->shared_addr_format
;
1905 case vtn_variable_mode_cross_workgroup
:
1906 return b
->options
->global_addr_format
;
1908 case vtn_variable_mode_function
:
1909 if (b
->physical_ptrs
)
1910 return b
->options
->temp_addr_format
;
1913 case vtn_variable_mode_private
:
1914 case vtn_variable_mode_uniform
:
1915 case vtn_variable_mode_input
:
1916 case vtn_variable_mode_output
:
1917 case vtn_variable_mode_image
:
1918 return nir_address_format_logical
;
1921 unreachable("Invalid variable mode");
1925 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1927 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1928 /* This pointer needs to have a pointer type with actual storage */
1929 vtn_assert(ptr
->ptr_type
);
1930 vtn_assert(ptr
->ptr_type
->type
);
1933 /* If we don't have an offset then we must be a pointer to the variable
1936 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1938 struct vtn_access_chain chain
= {
1941 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1944 vtn_assert(ptr
->offset
);
1945 if (ptr
->block_index
) {
1946 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1947 ptr
->mode
== vtn_variable_mode_ssbo
);
1948 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1950 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1954 if (vtn_pointer_is_external_block(b
, ptr
) &&
1955 vtn_type_contains_block(b
, ptr
->type
) &&
1956 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1957 /* In this case, we're looking for a block index and not an actual
1960 * For PhysicalStorageBuffer pointers, we don't have a block index
1961 * at all because we get the pointer directly from the client. This
1962 * assumes that there will never be a SSBO binding variable using the
1963 * PhysicalStorageBuffer storage class. This assumption appears
1964 * to be correct according to the Vulkan spec because the table,
1965 * "Shader Resource and Storage Class Correspondence," the only the
1966 * Uniform storage class with BufferBlock or the StorageBuffer
1967 * storage class with Block can be used.
1969 if (!ptr
->block_index
) {
1970 /* If we don't have a block_index then we must be a pointer to the
1973 vtn_assert(!ptr
->deref
);
1975 struct vtn_access_chain chain
= {
1978 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
1981 return ptr
->block_index
;
1983 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1988 struct vtn_pointer
*
1989 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1990 struct vtn_type
*ptr_type
)
1992 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1994 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1995 struct vtn_type
*without_array
=
1996 vtn_type_without_array(ptr_type
->deref
);
1998 nir_variable_mode nir_mode
;
1999 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
2000 without_array
, &nir_mode
);
2001 ptr
->type
= ptr_type
->deref
;
2002 ptr
->ptr_type
= ptr_type
;
2004 if (b
->wa_glslang_179
) {
2005 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
2006 * need to whack the mode because it creates a function parameter with
2007 * the Function storage class even though it's a pointer to a sampler.
2008 * If we don't do this, then NIR won't get rid of the deref_cast for us.
2010 if (ptr
->mode
== vtn_variable_mode_function
&&
2011 (ptr
->type
->base_type
== vtn_base_type_sampler
||
2012 ptr
->type
->base_type
== vtn_base_type_sampled_image
)) {
2013 ptr
->mode
= vtn_variable_mode_uniform
;
2014 nir_mode
= nir_var_uniform
;
2018 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
2019 /* This pointer type needs to have actual storage */
2020 vtn_assert(ptr_type
->type
);
2021 if (ptr
->mode
== vtn_variable_mode_ubo
||
2022 ptr
->mode
== vtn_variable_mode_ssbo
) {
2023 vtn_assert(ssa
->num_components
== 2);
2024 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
2025 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
2027 vtn_assert(ssa
->num_components
== 1);
2028 ptr
->block_index
= NULL
;
2032 const struct glsl_type
*deref_type
= ptr_type
->deref
->type
;
2033 if (!vtn_pointer_is_external_block(b
, ptr
)) {
2034 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2035 deref_type
, ptr_type
->stride
);
2036 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
2037 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
2038 /* This is a pointer to somewhere in an array of blocks, not a
2039 * pointer to somewhere inside the block. Set the block index
2040 * instead of making a cast.
2042 ptr
->block_index
= ssa
;
2044 /* This is a pointer to something internal or a pointer inside a
2045 * block. It's just a regular cast.
2047 * For PhysicalStorageBuffer pointers, we don't have a block index
2048 * at all because we get the pointer directly from the client. This
2049 * assumes that there will never be a SSBO binding variable using the
2050 * PhysicalStorageBuffer storage class. This assumption appears
2051 * to be correct according to the Vulkan spec because the table,
2052 * "Shader Resource and Storage Class Correspondence," the only the
2053 * Uniform storage class with BufferBlock or the StorageBuffer
2054 * storage class with Block can be used.
2056 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2057 ptr_type
->deref
->type
,
2059 ptr
->deref
->dest
.ssa
.num_components
=
2060 glsl_get_vector_elements(ptr_type
->type
);
2061 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
2069 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
2071 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
2074 if (var
->mode
== vtn_variable_mode_input
) {
2075 return stage
== MESA_SHADER_TESS_CTRL
||
2076 stage
== MESA_SHADER_TESS_EVAL
||
2077 stage
== MESA_SHADER_GEOMETRY
;
2080 if (var
->mode
== vtn_variable_mode_output
)
2081 return stage
== MESA_SHADER_TESS_CTRL
;
2087 assign_missing_member_locations(struct vtn_variable
*var
)
2090 glsl_get_length(glsl_without_array(var
->type
->type
));
2091 int location
= var
->base_location
;
2093 for (unsigned i
= 0; i
< length
; i
++) {
2094 /* From the Vulkan spec:
2096 * “If the structure type is a Block but without a Location, then each
2097 * of its members must have a Location decoration.”
2100 if (var
->type
->block
) {
2101 assert(var
->base_location
!= -1 ||
2102 var
->var
->members
[i
].location
!= -1);
2105 /* From the Vulkan spec:
2107 * “Any member with its own Location decoration is assigned that
2108 * location. Each remaining member is assigned the location after the
2109 * immediately preceding member in declaration order.”
2111 if (var
->var
->members
[i
].location
!= -1)
2112 location
= var
->var
->members
[i
].location
;
2114 var
->var
->members
[i
].location
= location
;
2116 /* Below we use type instead of interface_type, because interface_type
2117 * is only available when it is a Block. This code also supports
2118 * input/outputs that are just structs
2120 const struct glsl_type
*member_type
=
2121 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
2124 glsl_count_attribute_slots(member_type
,
2125 false /* is_gl_vertex_input */);
2131 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
2132 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
2133 nir_constant
*const_initializer
, nir_variable
*var_initializer
)
2135 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2136 struct vtn_type
*type
= ptr_type
->deref
;
2138 struct vtn_type
*without_array
= vtn_type_without_array(ptr_type
->deref
);
2140 enum vtn_variable_mode mode
;
2141 nir_variable_mode nir_mode
;
2142 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
2145 case vtn_variable_mode_ubo
:
2146 /* There's no other way to get vtn_variable_mode_ubo */
2147 vtn_assert(without_array
->block
);
2148 b
->shader
->info
.num_ubos
++;
2150 case vtn_variable_mode_ssbo
:
2151 if (storage_class
== SpvStorageClassStorageBuffer
&&
2152 !without_array
->block
) {
2153 if (b
->variable_pointers
) {
2154 vtn_fail("Variables in the StorageBuffer storage class must "
2155 "have a struct type with the Block decoration");
2157 /* If variable pointers are not present, it's still malformed
2158 * SPIR-V but we can parse it and do the right thing anyway.
2159 * Since some of the 8-bit storage tests have bugs in this are,
2160 * just make it a warning for now.
2162 vtn_warn("Variables in the StorageBuffer storage class must "
2163 "have a struct type with the Block decoration");
2166 b
->shader
->info
.num_ssbos
++;
2168 case vtn_variable_mode_uniform
:
2169 if (glsl_type_is_image(without_array
->type
))
2170 b
->shader
->info
.num_images
++;
2171 else if (glsl_type_is_sampler(without_array
->type
))
2172 b
->shader
->info
.num_textures
++;
2174 case vtn_variable_mode_push_constant
:
2175 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2178 case vtn_variable_mode_image
:
2179 vtn_fail("Cannot create a variable with the Image storage class");
2182 case vtn_variable_mode_phys_ssbo
:
2183 vtn_fail("Cannot create a variable with the "
2184 "PhysicalStorageBuffer storage class");
2188 /* No tallying is needed */
2192 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2195 var
->base_location
= -1;
2197 val
->pointer
= rzalloc(b
, struct vtn_pointer
);
2198 val
->pointer
->mode
= var
->mode
;
2199 val
->pointer
->type
= var
->type
;
2200 val
->pointer
->ptr_type
= ptr_type
;
2201 val
->pointer
->var
= var
;
2202 val
->pointer
->access
= var
->type
->access
;
2204 switch (var
->mode
) {
2205 case vtn_variable_mode_function
:
2206 case vtn_variable_mode_private
:
2207 case vtn_variable_mode_uniform
:
2208 /* For these, we create the variable normally */
2209 var
->var
= rzalloc(b
->shader
, nir_variable
);
2210 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2212 if (storage_class
== SpvStorageClassAtomicCounter
) {
2213 /* Need to tweak the nir type here as at vtn_handle_type we don't
2214 * have the access to storage_class, that is the one that points us
2215 * that is an atomic uint.
2217 var
->var
->type
= repair_atomic_type(var
->type
->type
);
2219 /* Private variables don't have any explicit layout but some layouts
2220 * may have leaked through due to type deduplication in the SPIR-V.
2222 var
->var
->type
= var
->type
->type
;
2224 var
->var
->data
.mode
= nir_mode
;
2225 var
->var
->data
.location
= -1;
2226 var
->var
->interface_type
= NULL
;
2229 case vtn_variable_mode_ubo
:
2230 case vtn_variable_mode_ssbo
:
2231 var
->var
= rzalloc(b
->shader
, nir_variable
);
2232 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2234 var
->var
->type
= var
->type
->type
;
2235 var
->var
->interface_type
= var
->type
->type
;
2237 var
->var
->data
.mode
= nir_mode
;
2238 var
->var
->data
.location
= -1;
2242 case vtn_variable_mode_workgroup
:
2243 /* Create the variable normally */
2244 var
->var
= rzalloc(b
->shader
, nir_variable
);
2245 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2246 /* Workgroup variables don't have any explicit layout but some
2247 * layouts may have leaked through due to type deduplication in the
2250 var
->var
->type
= var
->type
->type
;
2251 var
->var
->data
.mode
= nir_var_mem_shared
;
2254 case vtn_variable_mode_input
:
2255 case vtn_variable_mode_output
: {
2256 /* In order to know whether or not we're a per-vertex inout, we need
2257 * the patch qualifier. This means walking the variable decorations
2258 * early before we actually create any variables. Not a big deal.
2260 * GLSLang really likes to place decorations in the most interior
2261 * thing it possibly can. In particular, if you have a struct, it
2262 * will place the patch decorations on the struct members. This
2263 * should be handled by the variable splitting below just fine.
2265 * If you have an array-of-struct, things get even more weird as it
2266 * will place the patch decorations on the struct even though it's
2267 * inside an array and some of the members being patch and others not
2268 * makes no sense whatsoever. Since the only sensible thing is for
2269 * it to be all or nothing, we'll call it patch if any of the members
2270 * are declared patch.
2273 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2274 if (glsl_type_is_array(var
->type
->type
) &&
2275 glsl_type_is_struct_or_ifc(without_array
->type
)) {
2276 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2277 vtn_value_type_type
),
2278 var_is_patch_cb
, &var
->patch
);
2281 /* For inputs and outputs, we immediately split structures. This
2282 * is for a couple of reasons. For one, builtins may all come in
2283 * a struct and we really want those split out into separate
2284 * variables. For another, interpolation qualifiers can be
2285 * applied to members of the top-level struct ane we need to be
2286 * able to preserve that information.
2289 struct vtn_type
*per_vertex_type
= var
->type
;
2290 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2291 /* In Geometry shaders (and some tessellation), inputs come
2292 * in per-vertex arrays. However, some builtins come in
2293 * non-per-vertex, hence the need for the is_array check. In
2294 * any case, there are no non-builtin arrays allowed so this
2295 * check should be sufficient.
2297 per_vertex_type
= var
->type
->array_element
;
2300 var
->var
= rzalloc(b
->shader
, nir_variable
);
2301 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2302 /* In Vulkan, shader I/O variables don't have any explicit layout but
2303 * some layouts may have leaked through due to type deduplication in
2304 * the SPIR-V. We do, however, keep the layouts in the variable's
2305 * interface_type because we need offsets for XFB arrays of blocks.
2307 var
->var
->type
= var
->type
->type
;
2308 var
->var
->data
.mode
= nir_mode
;
2309 var
->var
->data
.patch
= var
->patch
;
2311 /* Figure out the interface block type. */
2312 struct vtn_type
*iface_type
= per_vertex_type
;
2313 if (var
->mode
== vtn_variable_mode_output
&&
2314 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2315 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2316 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2317 /* For vertex data outputs, we can end up with arrays of blocks for
2318 * transform feedback where each array element corresponds to a
2319 * different XFB output buffer.
2321 while (iface_type
->base_type
== vtn_base_type_array
)
2322 iface_type
= iface_type
->array_element
;
2324 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2325 var
->var
->interface_type
= iface_type
->type
;
2327 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2328 per_vertex_type
->block
) {
2329 /* It's a struct. Set it up as per-member. */
2330 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2331 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2332 var
->var
->num_members
);
2334 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2335 var
->var
->members
[i
].mode
= nir_mode
;
2336 var
->var
->members
[i
].patch
= var
->patch
;
2337 var
->var
->members
[i
].location
= -1;
2341 /* For inputs and outputs, we need to grab locations and builtin
2342 * information from the per-vertex type.
2344 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2345 vtn_value_type_type
),
2346 var_decoration_cb
, var
);
2350 case vtn_variable_mode_push_constant
:
2351 case vtn_variable_mode_cross_workgroup
:
2352 /* These don't need actual variables. */
2355 case vtn_variable_mode_image
:
2356 case vtn_variable_mode_phys_ssbo
:
2357 unreachable("Should have been caught before");
2360 /* We can only have one type of initializer */
2361 assert(!(const_initializer
&& var_initializer
));
2362 if (const_initializer
) {
2363 var
->var
->constant_initializer
=
2364 nir_constant_clone(const_initializer
, var
->var
);
2366 if (var_initializer
)
2367 var
->var
->pointer_initializer
= var_initializer
;
2369 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2370 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2372 /* Propagate access flags from the OpVariable decorations. */
2373 val
->pointer
->access
|= var
->access
;
2375 if ((var
->mode
== vtn_variable_mode_input
||
2376 var
->mode
== vtn_variable_mode_output
) &&
2377 var
->var
->members
) {
2378 assign_missing_member_locations(var
);
2381 if (var
->mode
== vtn_variable_mode_uniform
||
2382 var
->mode
== vtn_variable_mode_ubo
||
2383 var
->mode
== vtn_variable_mode_ssbo
) {
2384 /* XXX: We still need the binding information in the nir_variable
2385 * for these. We should fix that.
2387 var
->var
->data
.binding
= var
->binding
;
2388 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2389 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2390 var
->var
->data
.index
= var
->input_attachment_index
;
2391 var
->var
->data
.offset
= var
->offset
;
2393 if (glsl_type_is_image(without_array
->type
))
2394 var
->var
->data
.image
.format
= without_array
->image_format
;
2397 if (var
->mode
== vtn_variable_mode_function
) {
2398 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2399 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2400 } else if (var
->var
) {
2401 nir_shader_add_variable(b
->shader
, var
->var
);
2403 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2408 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2409 struct vtn_type
*dst_type
,
2410 struct vtn_type
*src_type
)
2412 if (dst_type
->id
== src_type
->id
)
2415 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2416 /* Early versions of GLSLang would re-emit types unnecessarily and you
2417 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2418 * mismatched source and destination types.
2420 * https://github.com/KhronosGroup/glslang/issues/304
2421 * https://github.com/KhronosGroup/glslang/issues/307
2422 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2423 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2425 vtn_warn("Source and destination types of %s do not have the same "
2426 "ID (but are compatible): %u vs %u",
2427 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2431 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2432 spirv_op_to_string(opcode
),
2433 glsl_get_type_name(dst_type
->type
),
2434 glsl_get_type_name(src_type
->type
));
2437 static nir_ssa_def
*
2438 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2439 unsigned num_components
)
2441 if (val
->num_components
== num_components
)
2444 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2445 for (unsigned i
= 0; i
< num_components
; i
++) {
2446 if (i
< val
->num_components
)
2447 comps
[i
] = nir_channel(b
, val
, i
);
2449 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2451 return nir_vec(b
, comps
, num_components
);
2454 static nir_ssa_def
*
2455 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2456 const struct glsl_type
*type
)
2458 const unsigned num_components
= glsl_get_vector_elements(type
);
2459 const unsigned bit_size
= glsl_get_bit_size(type
);
2461 /* First, zero-pad to ensure that the value is big enough that when we
2462 * bit-cast it, we don't loose anything.
2464 if (val
->bit_size
< bit_size
) {
2465 const unsigned src_num_components_needed
=
2466 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2467 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2470 val
= nir_bitcast_vector(b
, val
, bit_size
);
2472 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2476 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2477 const uint32_t *w
, unsigned count
)
2481 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2482 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2486 case SpvOpVariable
: {
2487 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2489 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2491 SpvStorageClass storage_class
= w
[3];
2492 nir_constant
*const_initializer
= NULL
;
2493 nir_variable
*var_initializer
= NULL
;
2495 struct vtn_value
*init
= vtn_untyped_value(b
, w
[4]);
2496 switch (init
->value_type
) {
2497 case vtn_value_type_constant
:
2498 const_initializer
= init
->constant
;
2500 case vtn_value_type_pointer
:
2501 var_initializer
= init
->pointer
->var
->var
;
2504 vtn_fail("SPIR-V variable initializer %u must be constant or pointer",
2509 vtn_create_variable(b
, val
, ptr_type
, storage_class
, const_initializer
, var_initializer
);
2514 case SpvOpAccessChain
:
2515 case SpvOpPtrAccessChain
:
2516 case SpvOpInBoundsAccessChain
:
2517 case SpvOpInBoundsPtrAccessChain
: {
2518 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2519 enum gl_access_qualifier access
= 0;
2520 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
|| opcode
== SpvOpInBoundsPtrAccessChain
);
2523 for (int i
= 4; i
< count
; i
++) {
2524 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2525 if (link_val
->value_type
== vtn_value_type_constant
) {
2526 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2527 chain
->link
[idx
].id
= vtn_constant_int(b
, w
[i
]);
2529 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2530 chain
->link
[idx
].id
= w
[i
];
2532 access
|= vtn_value_access(link_val
);
2536 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2537 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2538 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2539 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2540 * to combine an array of images with a single sampler to get an
2541 * array of sampled images that all share the same sampler.
2542 * Fortunately, this means that we can more-or-less ignore the
2543 * sampler when crawling the access chain, but it does leave us
2544 * with this rather awkward little special-case.
2546 struct vtn_value
*val
=
2547 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2548 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2549 val
->sampled_image
->image
=
2550 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2551 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2552 val
->sampled_image
->image
=
2553 vtn_decorate_pointer(b
, val
, val
->sampled_image
->image
);
2554 val
->sampled_image
->sampler
=
2555 vtn_decorate_pointer(b
, val
, val
->sampled_image
->sampler
);
2557 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2558 struct vtn_pointer
*ptr
=
2559 vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2560 ptr
->ptr_type
= ptr_type
;
2561 ptr
->access
|= access
;
2562 vtn_push_value_pointer(b
, w
[2], ptr
);
2567 case SpvOpCopyMemory
: {
2568 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2569 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2571 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2573 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2578 struct vtn_type
*res_type
=
2579 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2580 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2581 struct vtn_pointer
*src
= src_val
->pointer
;
2583 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2585 if (res_type
->base_type
== vtn_base_type_image
||
2586 res_type
->base_type
== vtn_base_type_sampler
) {
2587 vtn_push_value_pointer(b
, w
[2], src
);
2589 } else if (res_type
->base_type
== vtn_base_type_sampled_image
) {
2590 struct vtn_value
*val
=
2591 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2592 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2593 val
->sampled_image
->image
= val
->sampled_image
->sampler
=
2594 vtn_decorate_pointer(b
, val
, src
);
2600 SpvMemoryAccessMask access
= w
[4];
2601 if (access
& SpvMemoryAccessAlignedMask
)
2604 if (access
& SpvMemoryAccessMakePointerVisibleMask
) {
2605 SpvMemorySemanticsMask semantics
=
2606 SpvMemorySemanticsMakeVisibleMask
|
2607 vtn_storage_class_to_memory_semantics(src
->ptr_type
->storage_class
);
2609 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2610 vtn_emit_memory_barrier(b
, scope
, semantics
);
2614 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2619 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2620 struct vtn_pointer
*dest
= dest_val
->pointer
;
2621 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2623 /* OpStore requires us to actually have a storage type */
2624 vtn_fail_if(dest
->type
->type
== NULL
,
2625 "Invalid destination type for OpStore");
2627 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2628 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2629 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2630 * would then store them to a local variable as bool. Work around
2631 * the issue by doing an implicit conversion.
2633 * https://github.com/KhronosGroup/glslang/issues/170
2634 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2636 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2637 "OpTypeBool. Doing an implicit conversion to work around "
2639 struct vtn_ssa_value
*bool_ssa
=
2640 vtn_create_ssa_value(b
, dest
->type
->type
);
2641 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2642 vtn_variable_store(b
, bool_ssa
, dest
);
2646 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2648 if (glsl_type_is_sampler(dest
->type
->type
)) {
2649 if (b
->wa_glslang_179
) {
2650 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2651 "propagation to workaround the problem.");
2652 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2653 struct vtn_value
*v
= vtn_untyped_value(b
, w
[2]);
2654 if (v
->value_type
== vtn_value_type_sampled_image
) {
2655 dest
->var
->copy_prop_sampler
= v
->sampled_image
->sampler
;
2657 vtn_assert(v
->value_type
== vtn_value_type_pointer
);
2658 dest
->var
->copy_prop_sampler
= v
->pointer
;
2661 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2666 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2667 vtn_variable_store(b
, src
, dest
);
2671 SpvMemoryAccessMask access
= w
[3];
2673 if (access
& SpvMemoryAccessAlignedMask
)
2676 if (access
& SpvMemoryAccessMakePointerAvailableMask
) {
2677 SpvMemorySemanticsMask semantics
=
2678 SpvMemorySemanticsMakeAvailableMask
|
2679 vtn_storage_class_to_memory_semantics(dest
->ptr_type
->storage_class
);
2680 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2681 vtn_emit_memory_barrier(b
, scope
, semantics
);
2687 case SpvOpArrayLength
: {
2688 struct vtn_pointer
*ptr
=
2689 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2690 const uint32_t field
= w
[4];
2692 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2693 "OpArrayLength must take a pointer to a structure type");
2694 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2695 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2696 "OpArrayLength must reference the last memeber of the "
2697 "structure and that must be an array");
2699 const uint32_t offset
= ptr
->type
->offsets
[field
];
2700 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2702 if (!ptr
->block_index
) {
2703 struct vtn_access_chain chain
= {
2706 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2707 vtn_assert(ptr
->block_index
);
2710 nir_intrinsic_instr
*instr
=
2711 nir_intrinsic_instr_create(b
->nb
.shader
,
2712 nir_intrinsic_get_buffer_size
);
2713 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2714 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2715 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2716 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2718 /* array_length = max(buffer_size - offset, 0) / stride */
2719 nir_ssa_def
*array_length
=
2724 nir_imm_int(&b
->nb
, offset
)),
2725 nir_imm_int(&b
->nb
, 0u)),
2726 nir_imm_int(&b
->nb
, stride
));
2728 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2729 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2730 val
->ssa
->def
= array_length
;
2734 case SpvOpConvertPtrToU
: {
2735 struct vtn_value
*u_val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2737 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2738 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2739 "OpConvertPtrToU can only be used to cast to a vector or "
2742 /* The pointer will be converted to an SSA value automatically */
2743 struct vtn_ssa_value
*ptr_ssa
= vtn_ssa_value(b
, w
[3]);
2745 u_val
->ssa
= vtn_create_ssa_value(b
, u_val
->type
->type
);
2746 u_val
->ssa
->def
= nir_sloppy_bitcast(&b
->nb
, ptr_ssa
->def
, u_val
->type
->type
);
2747 u_val
->ssa
->access
|= ptr_ssa
->access
;
2751 case SpvOpConvertUToPtr
: {
2752 struct vtn_value
*ptr_val
=
2753 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2754 struct vtn_value
*u_val
= vtn_untyped_value(b
, w
[3]);
2756 vtn_fail_if(ptr_val
->type
->type
== NULL
,
2757 "OpConvertUToPtr can only be used on physical pointers");
2759 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2760 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2761 "OpConvertUToPtr can only be used to cast from a vector or "
2764 struct vtn_ssa_value
*u_ssa
= vtn_ssa_value(b
, w
[3]);
2765 nir_ssa_def
*ptr_ssa
= nir_sloppy_bitcast(&b
->nb
, u_ssa
->def
,
2766 ptr_val
->type
->type
);
2767 ptr_val
->pointer
= vtn_pointer_from_ssa(b
, ptr_ssa
, ptr_val
->type
);
2768 vtn_foreach_decoration(b
, ptr_val
, ptr_decoration_cb
, ptr_val
->pointer
);
2769 ptr_val
->pointer
->access
|= u_val
->ssa
->access
;
2773 case SpvOpCopyMemorySized
:
2775 vtn_fail_with_opcode("Unhandled opcode", opcode
);