2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
34 ptr_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
35 const struct vtn_decoration
*dec
, void *void_ptr
)
37 struct vtn_pointer
*ptr
= void_ptr
;
39 switch (dec
->decoration
) {
40 case SpvDecorationNonUniformEXT
:
41 ptr
->access
|= ACCESS_NON_UNIFORM
;
49 static struct vtn_pointer
*
50 vtn_decorate_pointer(struct vtn_builder
*b
, struct vtn_value
*val
,
51 struct vtn_pointer
*ptr
)
53 struct vtn_pointer dummy
= { .access
= 0 };
54 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, &dummy
);
56 /* If we're adding access flags, make a copy of the pointer. We could
57 * probably just OR them in without doing so but this prevents us from
58 * leaking them any further than actually specified in the SPIR-V.
60 if (dummy
.access
& ~ptr
->access
) {
61 struct vtn_pointer
*copy
= ralloc(b
, struct vtn_pointer
);
63 copy
->access
|= dummy
.access
;
71 vtn_push_value_pointer(struct vtn_builder
*b
, uint32_t value_id
,
72 struct vtn_pointer
*ptr
)
74 struct vtn_value
*val
= vtn_push_value(b
, value_id
, vtn_value_type_pointer
);
75 val
->pointer
= vtn_decorate_pointer(b
, val
, ptr
);
80 ssa_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
81 const struct vtn_decoration
*dec
, void *void_ssa
)
83 struct vtn_ssa_value
*ssa
= void_ssa
;
85 switch (dec
->decoration
) {
86 case SpvDecorationNonUniformEXT
:
87 ssa
->access
|= ACCESS_NON_UNIFORM
;
96 vtn_push_ssa(struct vtn_builder
*b
, uint32_t value_id
,
97 struct vtn_type
*type
, struct vtn_ssa_value
*ssa
)
99 struct vtn_value
*val
;
100 if (type
->base_type
== vtn_base_type_pointer
) {
101 val
= vtn_push_value_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
103 val
= vtn_push_value(b
, value_id
, vtn_value_type_ssa
);
105 vtn_foreach_decoration(b
, val
, ssa_decoration_cb
, val
->ssa
);
110 static struct vtn_access_chain
*
111 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
113 struct vtn_access_chain
*chain
;
115 /* Subtract 1 from the length since there's already one built in */
116 size_t size
= sizeof(*chain
) +
117 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
118 chain
= rzalloc_size(b
, size
);
119 chain
->length
= length
;
125 vtn_mode_uses_ssa_offset(struct vtn_builder
*b
,
126 enum vtn_variable_mode mode
)
128 return ((mode
== vtn_variable_mode_ubo
||
129 mode
== vtn_variable_mode_ssbo
) &&
130 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
131 mode
== vtn_variable_mode_push_constant
;
135 vtn_pointer_is_external_block(struct vtn_builder
*b
,
136 struct vtn_pointer
*ptr
)
138 return ptr
->mode
== vtn_variable_mode_ssbo
||
139 ptr
->mode
== vtn_variable_mode_ubo
||
140 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
141 ptr
->mode
== vtn_variable_mode_push_constant
;
145 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
146 unsigned stride
, unsigned bit_size
)
148 vtn_assert(stride
> 0);
149 if (link
.mode
== vtn_access_mode_literal
) {
150 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
152 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
153 if (ssa
->bit_size
!= bit_size
)
154 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
155 return nir_imul_imm(&b
->nb
, ssa
, stride
);
159 static VkDescriptorType
160 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
163 case vtn_variable_mode_ubo
:
164 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
165 case vtn_variable_mode_ssbo
:
166 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
168 vtn_fail("Invalid mode for vulkan_resource_index");
173 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
174 nir_ssa_def
*desc_array_index
)
176 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
178 if (!desc_array_index
) {
179 vtn_assert(glsl_type_is_struct_or_ifc(var
->type
->type
));
180 desc_array_index
= nir_imm_int(&b
->nb
, 0);
183 nir_intrinsic_instr
*instr
=
184 nir_intrinsic_instr_create(b
->nb
.shader
,
185 nir_intrinsic_vulkan_resource_index
);
186 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
187 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
188 nir_intrinsic_set_binding(instr
, var
->binding
);
189 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
191 vtn_fail_if(var
->mode
!= vtn_variable_mode_ubo
&&
192 var
->mode
!= vtn_variable_mode_ssbo
,
193 "Invalid mode for vulkan_resource_index");
195 nir_address_format addr_format
= vtn_mode_to_address_format(b
, var
->mode
);
196 const struct glsl_type
*index_type
=
197 b
->options
->lower_ubo_ssbo_access_to_offsets
?
198 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
200 instr
->num_components
= glsl_get_vector_elements(index_type
);
201 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
202 glsl_get_bit_size(index_type
), NULL
);
203 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
205 return &instr
->dest
.ssa
;
209 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
210 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
212 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
214 nir_intrinsic_instr
*instr
=
215 nir_intrinsic_instr_create(b
->nb
.shader
,
216 nir_intrinsic_vulkan_resource_reindex
);
217 instr
->src
[0] = nir_src_for_ssa(base_index
);
218 instr
->src
[1] = nir_src_for_ssa(offset_index
);
219 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
221 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
222 "Invalid mode for vulkan_resource_reindex");
224 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
225 const struct glsl_type
*index_type
=
226 b
->options
->lower_ubo_ssbo_access_to_offsets
?
227 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
229 instr
->num_components
= glsl_get_vector_elements(index_type
);
230 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
231 glsl_get_bit_size(index_type
), NULL
);
232 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
234 return &instr
->dest
.ssa
;
238 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
239 nir_ssa_def
*desc_index
)
241 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
243 nir_intrinsic_instr
*desc_load
=
244 nir_intrinsic_instr_create(b
->nb
.shader
,
245 nir_intrinsic_load_vulkan_descriptor
);
246 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
247 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
249 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
250 "Invalid mode for load_vulkan_descriptor");
252 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
253 const struct glsl_type
*ptr_type
=
254 nir_address_format_to_glsl_type(addr_format
);
256 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
257 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
258 desc_load
->num_components
,
259 glsl_get_bit_size(ptr_type
), NULL
);
260 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
262 return &desc_load
->dest
.ssa
;
265 /* Dereference the given base pointer by the access chain */
266 static struct vtn_pointer
*
267 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
268 struct vtn_pointer
*base
,
269 struct vtn_access_chain
*deref_chain
)
271 struct vtn_type
*type
= base
->type
;
272 enum gl_access_qualifier access
= base
->access
| deref_chain
->access
;
275 nir_deref_instr
*tail
;
278 } else if (b
->options
->environment
== NIR_SPIRV_VULKAN
&&
279 vtn_pointer_is_external_block(b
, base
)) {
280 nir_ssa_def
*block_index
= base
->block_index
;
282 /* We dereferencing an external block pointer. Correctness of this
283 * operation relies on one particular line in the SPIR-V spec, section
284 * entitled "Validation Rules for Shader Capabilities":
286 * "Block and BufferBlock decorations cannot decorate a structure
287 * type that is nested at any level inside another structure type
288 * decorated with Block or BufferBlock."
290 * This means that we can detect the point where we cross over from
291 * descriptor indexing to buffer indexing by looking for the block
292 * decorated struct type. Anything before the block decorated struct
293 * type is a descriptor indexing operation and anything after the block
294 * decorated struct is a buffer offset operation.
297 /* Figure out the descriptor array index if any
299 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
300 * to forget the Block or BufferBlock decoration from time to time.
301 * It's more robust if we check for both !block_index and for the type
302 * to contain a block. This way there's a decent chance that arrays of
303 * UBOs/SSBOs will work correctly even if variable pointers are
306 nir_ssa_def
*desc_arr_idx
= NULL
;
307 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
308 /* If our type contains a block, then we're still outside the block
309 * and we need to process enough levels of dereferences to get inside
312 if (deref_chain
->ptr_as_array
) {
313 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
314 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
315 MAX2(aoa_size
, 1), 32);
319 for (; idx
< deref_chain
->length
; idx
++) {
320 if (type
->base_type
!= vtn_base_type_array
) {
321 vtn_assert(type
->base_type
== vtn_base_type_struct
);
325 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
326 nir_ssa_def
*arr_offset
=
327 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
328 MAX2(aoa_size
, 1), 32);
330 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
332 desc_arr_idx
= arr_offset
;
334 type
= type
->array_element
;
335 access
|= type
->access
;
340 vtn_assert(base
->var
&& base
->type
);
341 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
342 } else if (desc_arr_idx
) {
343 block_index
= vtn_resource_reindex(b
, base
->mode
,
344 block_index
, desc_arr_idx
);
347 if (idx
== deref_chain
->length
) {
348 /* The entire deref was consumed in finding the block index. Return
349 * a pointer which just has a block index and a later access chain
350 * will dereference deeper.
352 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
353 ptr
->mode
= base
->mode
;
355 ptr
->block_index
= block_index
;
356 ptr
->access
= access
;
360 /* If we got here, there's more access chain to handle and we have the
361 * final block index. Insert a descriptor load and cast to a deref to
362 * start the deref chain.
364 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
366 assert(base
->mode
== vtn_variable_mode_ssbo
||
367 base
->mode
== vtn_variable_mode_ubo
);
368 nir_variable_mode nir_mode
=
369 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
371 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
, type
->type
,
372 base
->ptr_type
->stride
);
374 assert(base
->var
&& base
->var
->var
);
375 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
376 if (base
->ptr_type
&& base
->ptr_type
->type
) {
377 tail
->dest
.ssa
.num_components
=
378 glsl_get_vector_elements(base
->ptr_type
->type
);
379 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
383 if (idx
== 0 && deref_chain
->ptr_as_array
) {
384 /* We start with a deref cast to get the stride. Hopefully, we'll be
385 * able to delete that cast eventually.
387 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
388 tail
->type
, base
->ptr_type
->stride
);
390 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
391 tail
->dest
.ssa
.bit_size
);
392 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
396 for (; idx
< deref_chain
->length
; idx
++) {
397 if (glsl_type_is_struct_or_ifc(type
->type
)) {
398 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
399 unsigned field
= deref_chain
->link
[idx
].id
;
400 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
401 type
= type
->members
[field
];
403 nir_ssa_def
*arr_index
=
404 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
405 tail
->dest
.ssa
.bit_size
);
406 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
407 type
= type
->array_element
;
410 access
|= type
->access
;
413 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
414 ptr
->mode
= base
->mode
;
416 ptr
->var
= base
->var
;
418 ptr
->access
= access
;
423 static struct vtn_pointer
*
424 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
425 struct vtn_pointer
*base
,
426 struct vtn_access_chain
*deref_chain
)
428 nir_ssa_def
*block_index
= base
->block_index
;
429 nir_ssa_def
*offset
= base
->offset
;
430 struct vtn_type
*type
= base
->type
;
431 enum gl_access_qualifier access
= base
->access
;
434 if (base
->mode
== vtn_variable_mode_ubo
||
435 base
->mode
== vtn_variable_mode_ssbo
) {
437 vtn_assert(base
->var
&& base
->type
);
438 nir_ssa_def
*desc_arr_idx
;
439 if (glsl_type_is_array(type
->type
)) {
440 if (deref_chain
->length
>= 1) {
442 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
444 /* This consumes a level of type */
445 type
= type
->array_element
;
446 access
|= type
->access
;
448 /* This is annoying. We've been asked for a pointer to the
449 * array of UBOs/SSBOs and not a specifc buffer. Return a
450 * pointer with a descriptor index of 0 and we'll have to do
451 * a reindex later to adjust it to the right thing.
453 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
455 } else if (deref_chain
->ptr_as_array
) {
456 /* You can't have a zero-length OpPtrAccessChain */
457 vtn_assert(deref_chain
->length
>= 1);
458 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
460 /* We have a regular non-array SSBO. */
463 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
464 } else if (deref_chain
->ptr_as_array
&&
465 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
466 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
467 * decorated block. This is an interesting corner in the SPIR-V
468 * spec. One interpretation would be that they client is clearly
469 * trying to treat that block as if it's an implicit array of blocks
470 * repeated in the buffer. However, the SPIR-V spec for the
471 * OpPtrAccessChain says:
473 * "Base is treated as the address of the first element of an
474 * array, and the Element element’s address is computed to be the
475 * base for the Indexes, as per OpAccessChain."
477 * Taken literally, that would mean that your struct type is supposed
478 * to be treated as an array of such a struct and, since it's
479 * decorated block, that means an array of blocks which corresponds
480 * to an array descriptor. Therefore, we need to do a reindex
481 * operation to add the index from the first link in the access chain
482 * to the index we recieved.
484 * The downside to this interpretation (there always is one) is that
485 * this might be somewhat surprising behavior to apps if they expect
486 * the implicit array behavior described above.
488 vtn_assert(deref_chain
->length
>= 1);
489 nir_ssa_def
*offset_index
=
490 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
493 block_index
= vtn_resource_reindex(b
, base
->mode
,
494 block_index
, offset_index
);
499 if (base
->mode
== vtn_variable_mode_workgroup
) {
500 /* SLM doesn't need nor have a block index */
501 vtn_assert(!block_index
);
503 /* We need the variable for the base offset */
504 vtn_assert(base
->var
);
506 /* We need ptr_type for size and alignment */
507 vtn_assert(base
->ptr_type
);
509 /* Assign location on first use so that we don't end up bloating SLM
510 * address space for variables which are never statically used.
512 if (base
->var
->shared_location
< 0) {
513 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
514 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
515 base
->ptr_type
->align
);
516 base
->var
->shared_location
= b
->shader
->num_shared
;
517 b
->shader
->num_shared
+= base
->ptr_type
->length
;
520 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
521 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
522 /* Push constants neither need nor have a block index */
523 vtn_assert(!block_index
);
525 /* Start off with at the start of the push constant block. */
526 offset
= nir_imm_int(&b
->nb
, 0);
528 /* The code above should have ensured a block_index when needed. */
529 vtn_assert(block_index
);
531 /* Start off with at the start of the buffer. */
532 offset
= nir_imm_int(&b
->nb
, 0);
536 if (deref_chain
->ptr_as_array
&& idx
== 0) {
537 /* We need ptr_type for the stride */
538 vtn_assert(base
->ptr_type
);
540 /* We need at least one element in the chain */
541 vtn_assert(deref_chain
->length
>= 1);
543 nir_ssa_def
*elem_offset
=
544 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
545 base
->ptr_type
->stride
, offset
->bit_size
);
546 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
550 for (; idx
< deref_chain
->length
; idx
++) {
551 switch (glsl_get_base_type(type
->type
)) {
554 case GLSL_TYPE_UINT16
:
555 case GLSL_TYPE_INT16
:
556 case GLSL_TYPE_UINT8
:
558 case GLSL_TYPE_UINT64
:
559 case GLSL_TYPE_INT64
:
560 case GLSL_TYPE_FLOAT
:
561 case GLSL_TYPE_FLOAT16
:
562 case GLSL_TYPE_DOUBLE
:
564 case GLSL_TYPE_ARRAY
: {
565 nir_ssa_def
*elem_offset
=
566 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
567 type
->stride
, offset
->bit_size
);
568 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
569 type
= type
->array_element
;
570 access
|= type
->access
;
574 case GLSL_TYPE_INTERFACE
:
575 case GLSL_TYPE_STRUCT
: {
576 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
577 unsigned member
= deref_chain
->link
[idx
].id
;
578 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
579 type
= type
->members
[member
];
580 access
|= type
->access
;
585 vtn_fail("Invalid type for deref");
589 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
590 ptr
->mode
= base
->mode
;
592 ptr
->block_index
= block_index
;
593 ptr
->offset
= offset
;
594 ptr
->access
= access
;
599 /* Dereference the given base pointer by the access chain */
600 static struct vtn_pointer
*
601 vtn_pointer_dereference(struct vtn_builder
*b
,
602 struct vtn_pointer
*base
,
603 struct vtn_access_chain
*deref_chain
)
605 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
606 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
608 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
613 vtn_pointer_for_variable(struct vtn_builder
*b
,
614 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
616 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
618 pointer
->mode
= var
->mode
;
619 pointer
->type
= var
->type
;
620 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
621 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
622 pointer
->ptr_type
= ptr_type
;
624 pointer
->access
= var
->access
| var
->type
->access
;
629 /* Returns an atomic_uint type based on the original uint type. The returned
630 * type will be equivalent to the original one but will have an atomic_uint
631 * type as leaf instead of an uint.
633 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
635 static const struct glsl_type
*
636 repair_atomic_type(const struct glsl_type
*type
)
638 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
639 assert(glsl_type_is_scalar(glsl_without_array(type
)));
641 if (glsl_type_is_array(type
)) {
642 const struct glsl_type
*atomic
=
643 repair_atomic_type(glsl_get_array_element(type
));
645 return glsl_array_type(atomic
, glsl_get_length(type
),
646 glsl_get_explicit_stride(type
));
648 return glsl_atomic_uint_type();
653 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
655 if (b
->wa_glslang_179
) {
656 /* Do on-the-fly copy propagation for samplers. */
657 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
658 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
661 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
663 struct vtn_access_chain chain
= {
666 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
673 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
674 struct vtn_ssa_value
*inout
,
675 enum gl_access_qualifier access
)
677 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
679 inout
->def
= nir_load_deref_with_access(&b
->nb
, deref
, access
);
681 nir_store_deref_with_access(&b
->nb
, deref
, inout
->def
, ~0, access
);
683 } else if (glsl_type_is_array(deref
->type
) ||
684 glsl_type_is_matrix(deref
->type
)) {
685 unsigned elems
= glsl_get_length(deref
->type
);
686 for (unsigned i
= 0; i
< elems
; i
++) {
687 nir_deref_instr
*child
=
688 nir_build_deref_array_imm(&b
->nb
, deref
, i
);
689 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
692 vtn_assert(glsl_type_is_struct_or_ifc(deref
->type
));
693 unsigned elems
= glsl_get_length(deref
->type
);
694 for (unsigned i
= 0; i
< elems
; i
++) {
695 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
696 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
702 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
704 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
705 return vtn_pointer_to_deref(b
, ptr
);
709 * Gets the NIR-level deref tail, which may have as a child an array deref
710 * selecting which component due to OpAccessChain supporting per-component
711 * indexing in SPIR-V.
713 static nir_deref_instr
*
714 get_deref_tail(nir_deref_instr
*deref
)
716 if (deref
->deref_type
!= nir_deref_type_array
)
719 nir_deref_instr
*parent
=
720 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
722 if (glsl_type_is_vector(parent
->type
))
728 struct vtn_ssa_value
*
729 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
,
730 enum gl_access_qualifier access
)
732 nir_deref_instr
*src_tail
= get_deref_tail(src
);
733 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
734 _vtn_local_load_store(b
, true, src_tail
, val
, access
);
736 if (src_tail
!= src
) {
737 val
->type
= src
->type
;
738 val
->def
= nir_vector_extract(&b
->nb
, val
->def
, src
->arr
.index
.ssa
);
745 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
746 nir_deref_instr
*dest
, enum gl_access_qualifier access
)
748 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
750 if (dest_tail
!= dest
) {
751 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
752 _vtn_local_load_store(b
, true, dest_tail
, val
, access
);
754 if (nir_src_is_const(dest
->arr
.index
))
755 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
756 nir_src_as_uint(dest
->arr
.index
));
758 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
759 dest
->arr
.index
.ssa
);
760 _vtn_local_load_store(b
, false, dest_tail
, val
, access
);
762 _vtn_local_load_store(b
, false, dest_tail
, src
, access
);
767 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
768 nir_ssa_def
**index_out
)
770 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
772 struct vtn_access_chain chain
= {
775 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
777 *index_out
= ptr
->block_index
;
781 /* Tries to compute the size of an interface block based on the strides and
782 * offsets that are provided to us in the SPIR-V source.
785 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
787 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
791 case GLSL_TYPE_UINT16
:
792 case GLSL_TYPE_INT16
:
793 case GLSL_TYPE_UINT8
:
795 case GLSL_TYPE_UINT64
:
796 case GLSL_TYPE_INT64
:
797 case GLSL_TYPE_FLOAT
:
798 case GLSL_TYPE_FLOAT16
:
800 case GLSL_TYPE_DOUBLE
: {
801 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
802 glsl_get_matrix_columns(type
->type
);
804 vtn_assert(type
->stride
> 0);
805 return type
->stride
* cols
;
807 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
808 return glsl_get_vector_elements(type
->type
) * type_size
;
812 case GLSL_TYPE_STRUCT
:
813 case GLSL_TYPE_INTERFACE
: {
815 unsigned num_fields
= glsl_get_length(type
->type
);
816 for (unsigned f
= 0; f
< num_fields
; f
++) {
817 unsigned field_end
= type
->offsets
[f
] +
818 vtn_type_block_size(b
, type
->members
[f
]);
819 size
= MAX2(size
, field_end
);
824 case GLSL_TYPE_ARRAY
:
825 vtn_assert(type
->stride
> 0);
826 vtn_assert(glsl_get_length(type
->type
) > 0);
827 return type
->stride
* glsl_get_length(type
->type
);
830 vtn_fail("Invalid block type");
836 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
837 nir_ssa_def
*index
, nir_ssa_def
*offset
,
838 unsigned access_offset
, unsigned access_size
,
839 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
840 enum gl_access_qualifier access
)
842 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
843 instr
->num_components
= glsl_get_vector_elements(type
);
845 /* Booleans usually shouldn't show up in external memory in SPIR-V.
846 * However, they do for certain older GLSLang versions and can for shared
847 * memory when we lower access chains internally.
849 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
850 glsl_get_bit_size(type
);
854 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
855 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
858 if (op
== nir_intrinsic_load_push_constant
) {
859 nir_intrinsic_set_base(instr
, access_offset
);
860 nir_intrinsic_set_range(instr
, access_size
);
863 if (op
== nir_intrinsic_load_ubo
||
864 op
== nir_intrinsic_load_ssbo
||
865 op
== nir_intrinsic_store_ssbo
) {
866 nir_intrinsic_set_access(instr
, access
);
869 /* With extensions like relaxed_block_layout, we really can't guarantee
870 * much more than scalar alignment.
872 if (op
!= nir_intrinsic_load_push_constant
)
873 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
876 instr
->src
[src
++] = nir_src_for_ssa(index
);
878 if (op
== nir_intrinsic_load_push_constant
) {
879 /* We need to subtract the offset from where the intrinsic will load the
882 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
883 nir_imm_int(&b
->nb
, access_offset
)));
885 instr
->src
[src
++] = nir_src_for_ssa(offset
);
889 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
890 instr
->num_components
, data_bit_size
, NULL
);
891 (*inout
)->def
= &instr
->dest
.ssa
;
894 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
896 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
897 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
901 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
902 nir_ssa_def
*index
, nir_ssa_def
*offset
,
903 unsigned access_offset
, unsigned access_size
,
904 struct vtn_type
*type
, enum gl_access_qualifier access
,
905 struct vtn_ssa_value
**inout
)
907 if (load
&& *inout
== NULL
)
908 *inout
= vtn_create_ssa_value(b
, type
->type
);
910 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
914 case GLSL_TYPE_UINT16
:
915 case GLSL_TYPE_INT16
:
916 case GLSL_TYPE_UINT8
:
918 case GLSL_TYPE_UINT64
:
919 case GLSL_TYPE_INT64
:
920 case GLSL_TYPE_FLOAT
:
921 case GLSL_TYPE_FLOAT16
:
922 case GLSL_TYPE_DOUBLE
:
924 /* This is where things get interesting. At this point, we've hit
925 * a vector, a scalar, or a matrix.
927 if (glsl_type_is_matrix(type
->type
)) {
928 /* Loading the whole matrix */
929 struct vtn_ssa_value
*transpose
;
930 unsigned num_ops
, vec_width
, col_stride
;
931 if (type
->row_major
) {
932 num_ops
= glsl_get_vector_elements(type
->type
);
933 vec_width
= glsl_get_matrix_columns(type
->type
);
934 col_stride
= type
->array_element
->stride
;
936 const struct glsl_type
*transpose_type
=
937 glsl_matrix_type(base_type
, vec_width
, num_ops
);
938 *inout
= vtn_create_ssa_value(b
, transpose_type
);
940 transpose
= vtn_ssa_transpose(b
, *inout
);
944 num_ops
= glsl_get_matrix_columns(type
->type
);
945 vec_width
= glsl_get_vector_elements(type
->type
);
946 col_stride
= type
->stride
;
949 for (unsigned i
= 0; i
< num_ops
; i
++) {
950 nir_ssa_def
*elem_offset
=
951 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
952 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
953 access_offset
, access_size
,
955 glsl_vector_type(base_type
, vec_width
),
956 type
->access
| access
);
959 if (load
&& type
->row_major
)
960 *inout
= vtn_ssa_transpose(b
, *inout
);
962 unsigned elems
= glsl_get_vector_elements(type
->type
);
963 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
964 if (elems
== 1 || type
->stride
== type_size
) {
965 /* This is a tightly-packed normal scalar or vector load */
966 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
967 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
968 access_offset
, access_size
,
970 type
->access
| access
);
972 /* This is a strided load. We have to load N things separately.
973 * This is the single column of a row-major matrix case.
975 vtn_assert(type
->stride
> type_size
);
976 vtn_assert(type
->stride
% type_size
== 0);
978 nir_ssa_def
*per_comp
[4];
979 for (unsigned i
= 0; i
< elems
; i
++) {
980 nir_ssa_def
*elem_offset
=
981 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
982 struct vtn_ssa_value
*comp
, temp_val
;
984 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
985 temp_val
.type
= glsl_scalar_type(base_type
);
988 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
989 access_offset
, access_size
,
990 &comp
, glsl_scalar_type(base_type
),
991 type
->access
| access
);
992 per_comp
[i
] = comp
->def
;
997 *inout
= vtn_create_ssa_value(b
, type
->type
);
998 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
1004 case GLSL_TYPE_ARRAY
: {
1005 unsigned elems
= glsl_get_length(type
->type
);
1006 for (unsigned i
= 0; i
< elems
; i
++) {
1007 nir_ssa_def
*elem_off
=
1008 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
1009 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
1010 access_offset
, access_size
,
1011 type
->array_element
,
1012 type
->array_element
->access
| access
,
1013 &(*inout
)->elems
[i
]);
1018 case GLSL_TYPE_INTERFACE
:
1019 case GLSL_TYPE_STRUCT
: {
1020 unsigned elems
= glsl_get_length(type
->type
);
1021 for (unsigned i
= 0; i
< elems
; i
++) {
1022 nir_ssa_def
*elem_off
=
1023 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
1024 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
1025 access_offset
, access_size
,
1027 type
->members
[i
]->access
| access
,
1028 &(*inout
)->elems
[i
]);
1034 vtn_fail("Invalid block member type");
1038 static struct vtn_ssa_value
*
1039 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1041 nir_intrinsic_op op
;
1042 unsigned access_offset
= 0, access_size
= 0;
1043 switch (src
->mode
) {
1044 case vtn_variable_mode_ubo
:
1045 op
= nir_intrinsic_load_ubo
;
1047 case vtn_variable_mode_ssbo
:
1048 op
= nir_intrinsic_load_ssbo
;
1050 case vtn_variable_mode_push_constant
:
1051 op
= nir_intrinsic_load_push_constant
;
1052 access_size
= b
->shader
->num_uniforms
;
1054 case vtn_variable_mode_workgroup
:
1055 op
= nir_intrinsic_load_shared
;
1058 vtn_fail("Invalid block variable mode");
1061 nir_ssa_def
*offset
, *index
= NULL
;
1062 offset
= vtn_pointer_to_offset(b
, src
, &index
);
1064 struct vtn_ssa_value
*value
= NULL
;
1065 _vtn_block_load_store(b
, op
, true, index
, offset
,
1066 access_offset
, access_size
,
1067 src
->type
, src
->access
, &value
);
1072 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1073 struct vtn_pointer
*dst
)
1075 nir_intrinsic_op op
;
1076 switch (dst
->mode
) {
1077 case vtn_variable_mode_ssbo
:
1078 op
= nir_intrinsic_store_ssbo
;
1080 case vtn_variable_mode_workgroup
:
1081 op
= nir_intrinsic_store_shared
;
1084 vtn_fail("Invalid block variable mode");
1087 nir_ssa_def
*offset
, *index
= NULL
;
1088 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1090 _vtn_block_load_store(b
, op
, false, index
, offset
,
1091 0, 0, dst
->type
, dst
->access
, &src
);
1095 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1096 struct vtn_pointer
*ptr
,
1097 enum gl_access_qualifier access
,
1098 struct vtn_ssa_value
**inout
)
1100 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1101 switch (base_type
) {
1102 case GLSL_TYPE_UINT
:
1104 case GLSL_TYPE_UINT16
:
1105 case GLSL_TYPE_INT16
:
1106 case GLSL_TYPE_UINT8
:
1107 case GLSL_TYPE_INT8
:
1108 case GLSL_TYPE_UINT64
:
1109 case GLSL_TYPE_INT64
:
1110 case GLSL_TYPE_FLOAT
:
1111 case GLSL_TYPE_FLOAT16
:
1112 case GLSL_TYPE_BOOL
:
1113 case GLSL_TYPE_DOUBLE
:
1114 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1115 /* We hit a vector or scalar; go ahead and emit the load[s] */
1116 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1117 if (vtn_pointer_is_external_block(b
, ptr
)) {
1118 /* If it's external, we call nir_load/store_deref directly. The
1119 * vtn_local_load/store helpers are too clever and do magic to
1120 * avoid array derefs of vectors. That magic is both less
1121 * efficient than the direct load/store and, in the case of
1122 * stores, is broken because it creates a race condition if two
1123 * threads are writing to different components of the same vector
1124 * due to the load+insert+store it uses to emulate the array
1128 *inout
= vtn_create_ssa_value(b
, ptr
->type
->type
);
1129 (*inout
)->def
= nir_load_deref_with_access(&b
->nb
, deref
,
1130 ptr
->type
->access
| access
);
1132 nir_store_deref_with_access(&b
->nb
, deref
, (*inout
)->def
, ~0,
1133 ptr
->type
->access
| access
);
1137 *inout
= vtn_local_load(b
, deref
, ptr
->type
->access
| access
);
1139 vtn_local_store(b
, *inout
, deref
, ptr
->type
->access
| access
);
1146 case GLSL_TYPE_INTERFACE
:
1147 case GLSL_TYPE_ARRAY
:
1148 case GLSL_TYPE_STRUCT
: {
1149 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1151 vtn_assert(*inout
== NULL
);
1152 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1153 (*inout
)->type
= ptr
->type
->type
;
1154 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1157 struct vtn_access_chain chain
= {
1160 { .mode
= vtn_access_mode_literal
, },
1163 for (unsigned i
= 0; i
< elems
; i
++) {
1164 chain
.link
[0].id
= i
;
1165 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1166 _vtn_variable_load_store(b
, load
, elem
, ptr
->type
->access
| access
,
1167 &(*inout
)->elems
[i
]);
1173 vtn_fail("Invalid access chain type");
1177 struct vtn_ssa_value
*
1178 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1180 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1181 return vtn_block_load(b
, src
);
1183 struct vtn_ssa_value
*val
= NULL
;
1184 _vtn_variable_load_store(b
, true, src
, src
->access
, &val
);
1190 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1191 struct vtn_pointer
*dest
)
1193 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1194 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1195 dest
->mode
== vtn_variable_mode_workgroup
);
1196 vtn_block_store(b
, src
, dest
);
1198 _vtn_variable_load_store(b
, false, dest
, dest
->access
, &src
);
1203 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1204 struct vtn_pointer
*src
)
1206 vtn_assert(src
->type
->type
== dest
->type
->type
);
1207 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1208 switch (base_type
) {
1209 case GLSL_TYPE_UINT
:
1211 case GLSL_TYPE_UINT16
:
1212 case GLSL_TYPE_INT16
:
1213 case GLSL_TYPE_UINT8
:
1214 case GLSL_TYPE_INT8
:
1215 case GLSL_TYPE_UINT64
:
1216 case GLSL_TYPE_INT64
:
1217 case GLSL_TYPE_FLOAT
:
1218 case GLSL_TYPE_FLOAT16
:
1219 case GLSL_TYPE_DOUBLE
:
1220 case GLSL_TYPE_BOOL
:
1221 /* At this point, we have a scalar, vector, or matrix so we know that
1222 * there cannot be any structure splitting still in the way. By
1223 * stopping at the matrix level rather than the vector level, we
1224 * ensure that matrices get loaded in the optimal way even if they
1225 * are storred row-major in a UBO.
1227 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1230 case GLSL_TYPE_INTERFACE
:
1231 case GLSL_TYPE_ARRAY
:
1232 case GLSL_TYPE_STRUCT
: {
1233 struct vtn_access_chain chain
= {
1236 { .mode
= vtn_access_mode_literal
, },
1239 unsigned elems
= glsl_get_length(src
->type
->type
);
1240 for (unsigned i
= 0; i
< elems
; i
++) {
1241 chain
.link
[0].id
= i
;
1242 struct vtn_pointer
*src_elem
=
1243 vtn_pointer_dereference(b
, src
, &chain
);
1244 struct vtn_pointer
*dest_elem
=
1245 vtn_pointer_dereference(b
, dest
, &chain
);
1247 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1253 vtn_fail("Invalid access chain type");
1258 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1259 struct vtn_pointer
*src
)
1261 /* TODO: At some point, we should add a special-case for when we can
1262 * just emit a copy_var intrinsic.
1264 _vtn_variable_copy(b
, dest
, src
);
1268 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1270 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1271 *mode
= nir_var_system_value
;
1275 vtn_get_builtin_location(struct vtn_builder
*b
,
1276 SpvBuiltIn builtin
, int *location
,
1277 nir_variable_mode
*mode
)
1280 case SpvBuiltInPosition
:
1281 *location
= VARYING_SLOT_POS
;
1283 case SpvBuiltInPointSize
:
1284 *location
= VARYING_SLOT_PSIZ
;
1286 case SpvBuiltInClipDistance
:
1287 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1289 case SpvBuiltInCullDistance
:
1290 *location
= VARYING_SLOT_CULL_DIST0
;
1292 case SpvBuiltInVertexId
:
1293 case SpvBuiltInVertexIndex
:
1294 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1295 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1296 * same as gl_VertexID, which is non-zero-based, and removes
1297 * VertexIndex. Since they're both defined to be non-zero-based, we use
1298 * SYSTEM_VALUE_VERTEX_ID for both.
1300 *location
= SYSTEM_VALUE_VERTEX_ID
;
1301 set_mode_system_value(b
, mode
);
1303 case SpvBuiltInInstanceIndex
:
1304 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1305 set_mode_system_value(b
, mode
);
1307 case SpvBuiltInInstanceId
:
1308 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1309 set_mode_system_value(b
, mode
);
1311 case SpvBuiltInPrimitiveId
:
1312 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1313 vtn_assert(*mode
== nir_var_shader_in
);
1314 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1315 } else if (*mode
== nir_var_shader_out
) {
1316 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1318 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1319 set_mode_system_value(b
, mode
);
1322 case SpvBuiltInInvocationId
:
1323 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1324 set_mode_system_value(b
, mode
);
1326 case SpvBuiltInLayer
:
1327 *location
= VARYING_SLOT_LAYER
;
1328 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1329 *mode
= nir_var_shader_in
;
1330 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1331 *mode
= nir_var_shader_out
;
1332 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1333 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1334 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1335 *mode
= nir_var_shader_out
;
1337 vtn_fail("invalid stage for SpvBuiltInLayer");
1339 case SpvBuiltInViewportIndex
:
1340 *location
= VARYING_SLOT_VIEWPORT
;
1341 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1342 *mode
= nir_var_shader_out
;
1343 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1344 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1345 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1346 *mode
= nir_var_shader_out
;
1347 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1348 *mode
= nir_var_shader_in
;
1350 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1352 case SpvBuiltInTessLevelOuter
:
1353 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1355 case SpvBuiltInTessLevelInner
:
1356 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1358 case SpvBuiltInTessCoord
:
1359 *location
= SYSTEM_VALUE_TESS_COORD
;
1360 set_mode_system_value(b
, mode
);
1362 case SpvBuiltInPatchVertices
:
1363 *location
= SYSTEM_VALUE_VERTICES_IN
;
1364 set_mode_system_value(b
, mode
);
1366 case SpvBuiltInFragCoord
:
1367 vtn_assert(*mode
== nir_var_shader_in
);
1368 if (b
->options
&& b
->options
->frag_coord_is_sysval
) {
1369 *mode
= nir_var_system_value
;
1370 *location
= SYSTEM_VALUE_FRAG_COORD
;
1372 *location
= VARYING_SLOT_POS
;
1375 case SpvBuiltInPointCoord
:
1376 *location
= VARYING_SLOT_PNTC
;
1377 vtn_assert(*mode
== nir_var_shader_in
);
1379 case SpvBuiltInFrontFacing
:
1380 *location
= SYSTEM_VALUE_FRONT_FACE
;
1381 set_mode_system_value(b
, mode
);
1383 case SpvBuiltInSampleId
:
1384 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1385 set_mode_system_value(b
, mode
);
1387 case SpvBuiltInSamplePosition
:
1388 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1389 set_mode_system_value(b
, mode
);
1391 case SpvBuiltInSampleMask
:
1392 if (*mode
== nir_var_shader_out
) {
1393 *location
= FRAG_RESULT_SAMPLE_MASK
;
1395 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1396 set_mode_system_value(b
, mode
);
1399 case SpvBuiltInFragDepth
:
1400 *location
= FRAG_RESULT_DEPTH
;
1401 vtn_assert(*mode
== nir_var_shader_out
);
1403 case SpvBuiltInHelperInvocation
:
1404 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1405 set_mode_system_value(b
, mode
);
1407 case SpvBuiltInNumWorkgroups
:
1408 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1409 set_mode_system_value(b
, mode
);
1411 case SpvBuiltInWorkgroupSize
:
1412 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1413 set_mode_system_value(b
, mode
);
1415 case SpvBuiltInWorkgroupId
:
1416 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1417 set_mode_system_value(b
, mode
);
1419 case SpvBuiltInLocalInvocationId
:
1420 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1421 set_mode_system_value(b
, mode
);
1423 case SpvBuiltInLocalInvocationIndex
:
1424 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1425 set_mode_system_value(b
, mode
);
1427 case SpvBuiltInGlobalInvocationId
:
1428 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1429 set_mode_system_value(b
, mode
);
1431 case SpvBuiltInGlobalLinearId
:
1432 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
;
1433 set_mode_system_value(b
, mode
);
1435 case SpvBuiltInBaseVertex
:
1436 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1437 * semantic as Vulkan BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1439 if (b
->options
->environment
== NIR_SPIRV_OPENGL
)
1440 *location
= SYSTEM_VALUE_BASE_VERTEX
;
1442 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1443 set_mode_system_value(b
, mode
);
1445 case SpvBuiltInBaseInstance
:
1446 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1447 set_mode_system_value(b
, mode
);
1449 case SpvBuiltInDrawIndex
:
1450 *location
= SYSTEM_VALUE_DRAW_ID
;
1451 set_mode_system_value(b
, mode
);
1453 case SpvBuiltInSubgroupSize
:
1454 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1455 set_mode_system_value(b
, mode
);
1457 case SpvBuiltInSubgroupId
:
1458 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1459 set_mode_system_value(b
, mode
);
1461 case SpvBuiltInSubgroupLocalInvocationId
:
1462 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1463 set_mode_system_value(b
, mode
);
1465 case SpvBuiltInNumSubgroups
:
1466 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1467 set_mode_system_value(b
, mode
);
1469 case SpvBuiltInDeviceIndex
:
1470 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1471 set_mode_system_value(b
, mode
);
1473 case SpvBuiltInViewIndex
:
1474 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1475 set_mode_system_value(b
, mode
);
1477 case SpvBuiltInSubgroupEqMask
:
1478 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1479 set_mode_system_value(b
, mode
);
1481 case SpvBuiltInSubgroupGeMask
:
1482 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1483 set_mode_system_value(b
, mode
);
1485 case SpvBuiltInSubgroupGtMask
:
1486 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1487 set_mode_system_value(b
, mode
);
1489 case SpvBuiltInSubgroupLeMask
:
1490 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1491 set_mode_system_value(b
, mode
);
1493 case SpvBuiltInSubgroupLtMask
:
1494 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1495 set_mode_system_value(b
, mode
);
1497 case SpvBuiltInFragStencilRefEXT
:
1498 *location
= FRAG_RESULT_STENCIL
;
1499 vtn_assert(*mode
== nir_var_shader_out
);
1501 case SpvBuiltInWorkDim
:
1502 *location
= SYSTEM_VALUE_WORK_DIM
;
1503 set_mode_system_value(b
, mode
);
1505 case SpvBuiltInGlobalSize
:
1506 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1507 set_mode_system_value(b
, mode
);
1509 case SpvBuiltInBaryCoordNoPerspAMD
:
1510 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL
;
1511 set_mode_system_value(b
, mode
);
1513 case SpvBuiltInBaryCoordNoPerspCentroidAMD
:
1514 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID
;
1515 set_mode_system_value(b
, mode
);
1517 case SpvBuiltInBaryCoordNoPerspSampleAMD
:
1518 *location
= SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE
;
1519 set_mode_system_value(b
, mode
);
1521 case SpvBuiltInBaryCoordSmoothAMD
:
1522 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL
;
1523 set_mode_system_value(b
, mode
);
1525 case SpvBuiltInBaryCoordSmoothCentroidAMD
:
1526 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID
;
1527 set_mode_system_value(b
, mode
);
1529 case SpvBuiltInBaryCoordSmoothSampleAMD
:
1530 *location
= SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE
;
1531 set_mode_system_value(b
, mode
);
1533 case SpvBuiltInBaryCoordPullModelAMD
:
1534 *location
= SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL
;
1535 set_mode_system_value(b
, mode
);
1538 vtn_fail("Unsupported builtin: %s (%u)",
1539 spirv_builtin_to_string(builtin
), builtin
);
1544 apply_var_decoration(struct vtn_builder
*b
,
1545 struct nir_variable_data
*var_data
,
1546 const struct vtn_decoration
*dec
)
1548 switch (dec
->decoration
) {
1549 case SpvDecorationRelaxedPrecision
:
1550 break; /* FIXME: Do nothing with this for now. */
1551 case SpvDecorationNoPerspective
:
1552 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1554 case SpvDecorationFlat
:
1555 var_data
->interpolation
= INTERP_MODE_FLAT
;
1557 case SpvDecorationExplicitInterpAMD
:
1558 var_data
->interpolation
= INTERP_MODE_EXPLICIT
;
1560 case SpvDecorationCentroid
:
1561 var_data
->centroid
= true;
1563 case SpvDecorationSample
:
1564 var_data
->sample
= true;
1566 case SpvDecorationInvariant
:
1567 var_data
->invariant
= true;
1569 case SpvDecorationConstant
:
1570 var_data
->read_only
= true;
1572 case SpvDecorationNonReadable
:
1573 var_data
->access
|= ACCESS_NON_READABLE
;
1575 case SpvDecorationNonWritable
:
1576 var_data
->read_only
= true;
1577 var_data
->access
|= ACCESS_NON_WRITEABLE
;
1579 case SpvDecorationRestrict
:
1580 var_data
->access
|= ACCESS_RESTRICT
;
1582 case SpvDecorationVolatile
:
1583 var_data
->access
|= ACCESS_VOLATILE
;
1585 case SpvDecorationCoherent
:
1586 var_data
->access
|= ACCESS_COHERENT
;
1588 case SpvDecorationComponent
:
1589 var_data
->location_frac
= dec
->operands
[0];
1591 case SpvDecorationIndex
:
1592 var_data
->index
= dec
->operands
[0];
1594 case SpvDecorationBuiltIn
: {
1595 SpvBuiltIn builtin
= dec
->operands
[0];
1597 nir_variable_mode mode
= var_data
->mode
;
1598 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1599 var_data
->mode
= mode
;
1602 case SpvBuiltInTessLevelOuter
:
1603 case SpvBuiltInTessLevelInner
:
1604 case SpvBuiltInClipDistance
:
1605 case SpvBuiltInCullDistance
:
1606 var_data
->compact
= true;
1613 case SpvDecorationSpecId
:
1614 case SpvDecorationRowMajor
:
1615 case SpvDecorationColMajor
:
1616 case SpvDecorationMatrixStride
:
1617 case SpvDecorationAliased
:
1618 case SpvDecorationUniform
:
1619 case SpvDecorationUniformId
:
1620 case SpvDecorationLinkageAttributes
:
1621 break; /* Do nothing with these here */
1623 case SpvDecorationPatch
:
1624 var_data
->patch
= true;
1627 case SpvDecorationLocation
:
1628 vtn_fail("Handled above");
1630 case SpvDecorationBlock
:
1631 case SpvDecorationBufferBlock
:
1632 case SpvDecorationArrayStride
:
1633 case SpvDecorationGLSLShared
:
1634 case SpvDecorationGLSLPacked
:
1635 break; /* These can apply to a type but we don't care about them */
1637 case SpvDecorationBinding
:
1638 case SpvDecorationDescriptorSet
:
1639 case SpvDecorationNoContraction
:
1640 case SpvDecorationInputAttachmentIndex
:
1641 vtn_warn("Decoration not allowed for variable or structure member: %s",
1642 spirv_decoration_to_string(dec
->decoration
));
1645 case SpvDecorationXfbBuffer
:
1646 var_data
->explicit_xfb_buffer
= true;
1647 var_data
->xfb
.buffer
= dec
->operands
[0];
1648 var_data
->always_active_io
= true;
1650 case SpvDecorationXfbStride
:
1651 var_data
->explicit_xfb_stride
= true;
1652 var_data
->xfb
.stride
= dec
->operands
[0];
1654 case SpvDecorationOffset
:
1655 var_data
->explicit_offset
= true;
1656 var_data
->offset
= dec
->operands
[0];
1659 case SpvDecorationStream
:
1660 var_data
->stream
= dec
->operands
[0];
1663 case SpvDecorationCPacked
:
1664 case SpvDecorationSaturatedConversion
:
1665 case SpvDecorationFuncParamAttr
:
1666 case SpvDecorationFPRoundingMode
:
1667 case SpvDecorationFPFastMathMode
:
1668 case SpvDecorationAlignment
:
1669 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1670 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1671 spirv_decoration_to_string(dec
->decoration
));
1675 case SpvDecorationUserSemantic
:
1676 /* User semantic decorations can safely be ignored by the driver. */
1679 case SpvDecorationRestrictPointerEXT
:
1680 case SpvDecorationAliasedPointerEXT
:
1681 /* TODO: We should actually plumb alias information through NIR. */
1685 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1690 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1691 const struct vtn_decoration
*dec
, void *out_is_patch
)
1693 if (dec
->decoration
== SpvDecorationPatch
) {
1694 *((bool *) out_is_patch
) = true;
1699 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1700 const struct vtn_decoration
*dec
, void *void_var
)
1702 struct vtn_variable
*vtn_var
= void_var
;
1704 /* Handle decorations that apply to a vtn_variable as a whole */
1705 switch (dec
->decoration
) {
1706 case SpvDecorationBinding
:
1707 vtn_var
->binding
= dec
->operands
[0];
1708 vtn_var
->explicit_binding
= true;
1710 case SpvDecorationDescriptorSet
:
1711 vtn_var
->descriptor_set
= dec
->operands
[0];
1713 case SpvDecorationInputAttachmentIndex
:
1714 vtn_var
->input_attachment_index
= dec
->operands
[0];
1716 case SpvDecorationPatch
:
1717 vtn_var
->patch
= true;
1719 case SpvDecorationOffset
:
1720 vtn_var
->offset
= dec
->operands
[0];
1722 case SpvDecorationNonWritable
:
1723 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1725 case SpvDecorationNonReadable
:
1726 vtn_var
->access
|= ACCESS_NON_READABLE
;
1728 case SpvDecorationVolatile
:
1729 vtn_var
->access
|= ACCESS_VOLATILE
;
1731 case SpvDecorationCoherent
:
1732 vtn_var
->access
|= ACCESS_COHERENT
;
1734 case SpvDecorationCounterBuffer
:
1735 /* Counter buffer decorations can safely be ignored by the driver. */
1741 if (val
->value_type
== vtn_value_type_pointer
) {
1742 assert(val
->pointer
->var
== void_var
);
1743 assert(member
== -1);
1745 assert(val
->value_type
== vtn_value_type_type
);
1748 /* Location is odd. If applied to a split structure, we have to walk the
1749 * whole thing and accumulate the location. It's easier to handle as a
1752 if (dec
->decoration
== SpvDecorationLocation
) {
1753 unsigned location
= dec
->operands
[0];
1754 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1755 vtn_var
->mode
== vtn_variable_mode_output
) {
1756 location
+= FRAG_RESULT_DATA0
;
1757 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1758 vtn_var
->mode
== vtn_variable_mode_input
) {
1759 location
+= VERT_ATTRIB_GENERIC0
;
1760 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1761 vtn_var
->mode
== vtn_variable_mode_output
) {
1762 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1763 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1764 vtn_warn("Location must be on input, output, uniform, sampler or "
1769 if (vtn_var
->var
->num_members
== 0) {
1770 /* This handles the member and lone variable cases */
1771 vtn_var
->var
->data
.location
= location
;
1773 /* This handles the structure member case */
1774 assert(vtn_var
->var
->members
);
1777 vtn_var
->base_location
= location
;
1779 vtn_var
->var
->members
[member
].location
= location
;
1785 if (vtn_var
->var
->num_members
== 0) {
1786 /* We call this function on types as well as variables and not all
1787 * struct types get split so we can end up having stray member
1788 * decorations; just ignore them.
1791 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1792 } else if (member
>= 0) {
1793 /* Member decorations must come from a type */
1794 assert(val
->value_type
== vtn_value_type_type
);
1795 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1798 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1799 for (unsigned i
= 0; i
< length
; i
++)
1800 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1803 /* A few variables, those with external storage, have no actual
1804 * nir_variables associated with them. Fortunately, all decorations
1805 * we care about for those variables are on the type only.
1807 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1808 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1809 vtn_var
->mode
== vtn_variable_mode_push_constant
);
1814 enum vtn_variable_mode
1815 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1816 SpvStorageClass
class,
1817 struct vtn_type
*interface_type
,
1818 nir_variable_mode
*nir_mode_out
)
1820 enum vtn_variable_mode mode
;
1821 nir_variable_mode nir_mode
;
1823 case SpvStorageClassUniform
:
1824 /* Assume it's an UBO if we lack the interface_type. */
1825 if (!interface_type
|| interface_type
->block
) {
1826 mode
= vtn_variable_mode_ubo
;
1827 nir_mode
= nir_var_mem_ubo
;
1828 } else if (interface_type
->buffer_block
) {
1829 mode
= vtn_variable_mode_ssbo
;
1830 nir_mode
= nir_var_mem_ssbo
;
1832 /* Default-block uniforms, coming from gl_spirv */
1833 mode
= vtn_variable_mode_uniform
;
1834 nir_mode
= nir_var_uniform
;
1837 case SpvStorageClassStorageBuffer
:
1838 mode
= vtn_variable_mode_ssbo
;
1839 nir_mode
= nir_var_mem_ssbo
;
1841 case SpvStorageClassPhysicalStorageBuffer
:
1842 mode
= vtn_variable_mode_phys_ssbo
;
1843 nir_mode
= nir_var_mem_global
;
1845 case SpvStorageClassUniformConstant
:
1846 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1847 if (b
->options
->constant_as_global
) {
1848 mode
= vtn_variable_mode_cross_workgroup
;
1849 nir_mode
= nir_var_mem_global
;
1851 mode
= vtn_variable_mode_ubo
;
1852 nir_mode
= nir_var_mem_ubo
;
1855 mode
= vtn_variable_mode_uniform
;
1856 nir_mode
= nir_var_uniform
;
1859 case SpvStorageClassPushConstant
:
1860 mode
= vtn_variable_mode_push_constant
;
1861 nir_mode
= nir_var_uniform
;
1863 case SpvStorageClassInput
:
1864 mode
= vtn_variable_mode_input
;
1865 nir_mode
= nir_var_shader_in
;
1867 case SpvStorageClassOutput
:
1868 mode
= vtn_variable_mode_output
;
1869 nir_mode
= nir_var_shader_out
;
1871 case SpvStorageClassPrivate
:
1872 mode
= vtn_variable_mode_private
;
1873 nir_mode
= nir_var_shader_temp
;
1875 case SpvStorageClassFunction
:
1876 mode
= vtn_variable_mode_function
;
1877 nir_mode
= nir_var_function_temp
;
1879 case SpvStorageClassWorkgroup
:
1880 mode
= vtn_variable_mode_workgroup
;
1881 nir_mode
= nir_var_mem_shared
;
1883 case SpvStorageClassAtomicCounter
:
1884 mode
= vtn_variable_mode_uniform
;
1885 nir_mode
= nir_var_uniform
;
1887 case SpvStorageClassCrossWorkgroup
:
1888 mode
= vtn_variable_mode_cross_workgroup
;
1889 nir_mode
= nir_var_mem_global
;
1891 case SpvStorageClassImage
:
1892 mode
= vtn_variable_mode_image
;
1893 nir_mode
= nir_var_mem_ubo
;
1895 case SpvStorageClassGeneric
:
1897 vtn_fail("Unhandled variable storage class: %s (%u)",
1898 spirv_storageclass_to_string(class), class);
1902 *nir_mode_out
= nir_mode
;
1908 vtn_mode_to_address_format(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
1911 case vtn_variable_mode_ubo
:
1912 return b
->options
->ubo_addr_format
;
1914 case vtn_variable_mode_ssbo
:
1915 return b
->options
->ssbo_addr_format
;
1917 case vtn_variable_mode_phys_ssbo
:
1918 return b
->options
->phys_ssbo_addr_format
;
1920 case vtn_variable_mode_push_constant
:
1921 return b
->options
->push_const_addr_format
;
1923 case vtn_variable_mode_workgroup
:
1924 return b
->options
->shared_addr_format
;
1926 case vtn_variable_mode_cross_workgroup
:
1927 return b
->options
->global_addr_format
;
1929 case vtn_variable_mode_function
:
1930 if (b
->physical_ptrs
)
1931 return b
->options
->temp_addr_format
;
1934 case vtn_variable_mode_private
:
1935 case vtn_variable_mode_uniform
:
1936 case vtn_variable_mode_input
:
1937 case vtn_variable_mode_output
:
1938 case vtn_variable_mode_image
:
1939 return nir_address_format_logical
;
1942 unreachable("Invalid variable mode");
1946 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1948 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1949 /* This pointer needs to have a pointer type with actual storage */
1950 vtn_assert(ptr
->ptr_type
);
1951 vtn_assert(ptr
->ptr_type
->type
);
1954 /* If we don't have an offset then we must be a pointer to the variable
1957 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1959 struct vtn_access_chain chain
= {
1962 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1965 vtn_assert(ptr
->offset
);
1966 if (ptr
->block_index
) {
1967 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1968 ptr
->mode
== vtn_variable_mode_ssbo
);
1969 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1971 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1975 if (vtn_pointer_is_external_block(b
, ptr
) &&
1976 vtn_type_contains_block(b
, ptr
->type
) &&
1977 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1978 /* In this case, we're looking for a block index and not an actual
1981 * For PhysicalStorageBuffer pointers, we don't have a block index
1982 * at all because we get the pointer directly from the client. This
1983 * assumes that there will never be a SSBO binding variable using the
1984 * PhysicalStorageBuffer storage class. This assumption appears
1985 * to be correct according to the Vulkan spec because the table,
1986 * "Shader Resource and Storage Class Correspondence," the only the
1987 * Uniform storage class with BufferBlock or the StorageBuffer
1988 * storage class with Block can be used.
1990 if (!ptr
->block_index
) {
1991 /* If we don't have a block_index then we must be a pointer to the
1994 vtn_assert(!ptr
->deref
);
1996 struct vtn_access_chain chain
= {
1999 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
2002 return ptr
->block_index
;
2004 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
2009 struct vtn_pointer
*
2010 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
2011 struct vtn_type
*ptr_type
)
2013 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2015 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
2016 struct vtn_type
*without_array
=
2017 vtn_type_without_array(ptr_type
->deref
);
2019 nir_variable_mode nir_mode
;
2020 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
2021 without_array
, &nir_mode
);
2022 ptr
->type
= ptr_type
->deref
;
2023 ptr
->ptr_type
= ptr_type
;
2025 if (b
->wa_glslang_179
) {
2026 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
2027 * need to whack the mode because it creates a function parameter with
2028 * the Function storage class even though it's a pointer to a sampler.
2029 * If we don't do this, then NIR won't get rid of the deref_cast for us.
2031 if (ptr
->mode
== vtn_variable_mode_function
&&
2032 (ptr
->type
->base_type
== vtn_base_type_sampler
||
2033 ptr
->type
->base_type
== vtn_base_type_sampled_image
)) {
2034 ptr
->mode
= vtn_variable_mode_uniform
;
2035 nir_mode
= nir_var_uniform
;
2039 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
2040 /* This pointer type needs to have actual storage */
2041 vtn_assert(ptr_type
->type
);
2042 if (ptr
->mode
== vtn_variable_mode_ubo
||
2043 ptr
->mode
== vtn_variable_mode_ssbo
) {
2044 vtn_assert(ssa
->num_components
== 2);
2045 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
2046 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
2048 vtn_assert(ssa
->num_components
== 1);
2049 ptr
->block_index
= NULL
;
2053 const struct glsl_type
*deref_type
= ptr_type
->deref
->type
;
2054 if (!vtn_pointer_is_external_block(b
, ptr
)) {
2055 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2056 deref_type
, ptr_type
->stride
);
2057 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
2058 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
2059 /* This is a pointer to somewhere in an array of blocks, not a
2060 * pointer to somewhere inside the block. Set the block index
2061 * instead of making a cast.
2063 ptr
->block_index
= ssa
;
2065 /* This is a pointer to something internal or a pointer inside a
2066 * block. It's just a regular cast.
2068 * For PhysicalStorageBuffer pointers, we don't have a block index
2069 * at all because we get the pointer directly from the client. This
2070 * assumes that there will never be a SSBO binding variable using the
2071 * PhysicalStorageBuffer storage class. This assumption appears
2072 * to be correct according to the Vulkan spec because the table,
2073 * "Shader Resource and Storage Class Correspondence," the only the
2074 * Uniform storage class with BufferBlock or the StorageBuffer
2075 * storage class with Block can be used.
2077 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2078 ptr_type
->deref
->type
,
2080 ptr
->deref
->dest
.ssa
.num_components
=
2081 glsl_get_vector_elements(ptr_type
->type
);
2082 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
2090 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
2092 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
2095 if (var
->mode
== vtn_variable_mode_input
) {
2096 return stage
== MESA_SHADER_TESS_CTRL
||
2097 stage
== MESA_SHADER_TESS_EVAL
||
2098 stage
== MESA_SHADER_GEOMETRY
;
2101 if (var
->mode
== vtn_variable_mode_output
)
2102 return stage
== MESA_SHADER_TESS_CTRL
;
2108 assign_missing_member_locations(struct vtn_variable
*var
)
2111 glsl_get_length(glsl_without_array(var
->type
->type
));
2112 int location
= var
->base_location
;
2114 for (unsigned i
= 0; i
< length
; i
++) {
2115 /* From the Vulkan spec:
2117 * “If the structure type is a Block but without a Location, then each
2118 * of its members must have a Location decoration.”
2121 if (var
->type
->block
) {
2122 assert(var
->base_location
!= -1 ||
2123 var
->var
->members
[i
].location
!= -1);
2126 /* From the Vulkan spec:
2128 * “Any member with its own Location decoration is assigned that
2129 * location. Each remaining member is assigned the location after the
2130 * immediately preceding member in declaration order.”
2132 if (var
->var
->members
[i
].location
!= -1)
2133 location
= var
->var
->members
[i
].location
;
2135 var
->var
->members
[i
].location
= location
;
2137 /* Below we use type instead of interface_type, because interface_type
2138 * is only available when it is a Block. This code also supports
2139 * input/outputs that are just structs
2141 const struct glsl_type
*member_type
=
2142 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
2145 glsl_count_attribute_slots(member_type
,
2146 false /* is_gl_vertex_input */);
2152 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
2153 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
2154 nir_constant
*const_initializer
, nir_variable
*var_initializer
)
2156 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2157 struct vtn_type
*type
= ptr_type
->deref
;
2159 struct vtn_type
*without_array
= vtn_type_without_array(ptr_type
->deref
);
2161 enum vtn_variable_mode mode
;
2162 nir_variable_mode nir_mode
;
2163 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
2166 case vtn_variable_mode_ubo
:
2167 /* There's no other way to get vtn_variable_mode_ubo */
2168 vtn_assert(without_array
->block
);
2169 b
->shader
->info
.num_ubos
++;
2171 case vtn_variable_mode_ssbo
:
2172 if (storage_class
== SpvStorageClassStorageBuffer
&&
2173 !without_array
->block
) {
2174 if (b
->variable_pointers
) {
2175 vtn_fail("Variables in the StorageBuffer storage class must "
2176 "have a struct type with the Block decoration");
2178 /* If variable pointers are not present, it's still malformed
2179 * SPIR-V but we can parse it and do the right thing anyway.
2180 * Since some of the 8-bit storage tests have bugs in this are,
2181 * just make it a warning for now.
2183 vtn_warn("Variables in the StorageBuffer storage class must "
2184 "have a struct type with the Block decoration");
2187 b
->shader
->info
.num_ssbos
++;
2189 case vtn_variable_mode_uniform
:
2190 if (glsl_type_is_image(without_array
->type
))
2191 b
->shader
->info
.num_images
++;
2192 else if (glsl_type_is_sampler(without_array
->type
))
2193 b
->shader
->info
.num_textures
++;
2195 case vtn_variable_mode_push_constant
:
2196 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2199 case vtn_variable_mode_image
:
2200 vtn_fail("Cannot create a variable with the Image storage class");
2203 case vtn_variable_mode_phys_ssbo
:
2204 vtn_fail("Cannot create a variable with the "
2205 "PhysicalStorageBuffer storage class");
2209 /* No tallying is needed */
2213 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2216 var
->base_location
= -1;
2218 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
2219 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
2221 switch (var
->mode
) {
2222 case vtn_variable_mode_function
:
2223 case vtn_variable_mode_private
:
2224 case vtn_variable_mode_uniform
:
2225 /* For these, we create the variable normally */
2226 var
->var
= rzalloc(b
->shader
, nir_variable
);
2227 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2229 if (storage_class
== SpvStorageClassAtomicCounter
) {
2230 /* Need to tweak the nir type here as at vtn_handle_type we don't
2231 * have the access to storage_class, that is the one that points us
2232 * that is an atomic uint.
2234 var
->var
->type
= repair_atomic_type(var
->type
->type
);
2236 /* Private variables don't have any explicit layout but some layouts
2237 * may have leaked through due to type deduplication in the SPIR-V.
2239 var
->var
->type
= var
->type
->type
;
2241 var
->var
->data
.mode
= nir_mode
;
2242 var
->var
->data
.location
= -1;
2243 var
->var
->interface_type
= NULL
;
2246 case vtn_variable_mode_ubo
:
2247 case vtn_variable_mode_ssbo
:
2248 var
->var
= rzalloc(b
->shader
, nir_variable
);
2249 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2251 var
->var
->type
= var
->type
->type
;
2252 var
->var
->interface_type
= var
->type
->type
;
2254 var
->var
->data
.mode
= nir_mode
;
2255 var
->var
->data
.location
= -1;
2259 case vtn_variable_mode_workgroup
:
2260 /* Create the variable normally */
2261 var
->var
= rzalloc(b
->shader
, nir_variable
);
2262 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2263 /* Workgroup variables don't have any explicit layout but some
2264 * layouts may have leaked through due to type deduplication in the
2267 var
->var
->type
= var
->type
->type
;
2268 var
->var
->data
.mode
= nir_var_mem_shared
;
2271 case vtn_variable_mode_input
:
2272 case vtn_variable_mode_output
: {
2273 /* In order to know whether or not we're a per-vertex inout, we need
2274 * the patch qualifier. This means walking the variable decorations
2275 * early before we actually create any variables. Not a big deal.
2277 * GLSLang really likes to place decorations in the most interior
2278 * thing it possibly can. In particular, if you have a struct, it
2279 * will place the patch decorations on the struct members. This
2280 * should be handled by the variable splitting below just fine.
2282 * If you have an array-of-struct, things get even more weird as it
2283 * will place the patch decorations on the struct even though it's
2284 * inside an array and some of the members being patch and others not
2285 * makes no sense whatsoever. Since the only sensible thing is for
2286 * it to be all or nothing, we'll call it patch if any of the members
2287 * are declared patch.
2290 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2291 if (glsl_type_is_array(var
->type
->type
) &&
2292 glsl_type_is_struct_or_ifc(without_array
->type
)) {
2293 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2294 vtn_value_type_type
),
2295 var_is_patch_cb
, &var
->patch
);
2298 /* For inputs and outputs, we immediately split structures. This
2299 * is for a couple of reasons. For one, builtins may all come in
2300 * a struct and we really want those split out into separate
2301 * variables. For another, interpolation qualifiers can be
2302 * applied to members of the top-level struct ane we need to be
2303 * able to preserve that information.
2306 struct vtn_type
*per_vertex_type
= var
->type
;
2307 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2308 /* In Geometry shaders (and some tessellation), inputs come
2309 * in per-vertex arrays. However, some builtins come in
2310 * non-per-vertex, hence the need for the is_array check. In
2311 * any case, there are no non-builtin arrays allowed so this
2312 * check should be sufficient.
2314 per_vertex_type
= var
->type
->array_element
;
2317 var
->var
= rzalloc(b
->shader
, nir_variable
);
2318 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2319 /* In Vulkan, shader I/O variables don't have any explicit layout but
2320 * some layouts may have leaked through due to type deduplication in
2321 * the SPIR-V. We do, however, keep the layouts in the variable's
2322 * interface_type because we need offsets for XFB arrays of blocks.
2324 var
->var
->type
= var
->type
->type
;
2325 var
->var
->data
.mode
= nir_mode
;
2326 var
->var
->data
.patch
= var
->patch
;
2328 /* Figure out the interface block type. */
2329 struct vtn_type
*iface_type
= per_vertex_type
;
2330 if (var
->mode
== vtn_variable_mode_output
&&
2331 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2332 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2333 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2334 /* For vertex data outputs, we can end up with arrays of blocks for
2335 * transform feedback where each array element corresponds to a
2336 * different XFB output buffer.
2338 while (iface_type
->base_type
== vtn_base_type_array
)
2339 iface_type
= iface_type
->array_element
;
2341 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2342 var
->var
->interface_type
= iface_type
->type
;
2344 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2345 per_vertex_type
->block
) {
2346 /* It's a struct. Set it up as per-member. */
2347 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2348 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2349 var
->var
->num_members
);
2351 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2352 var
->var
->members
[i
].mode
= nir_mode
;
2353 var
->var
->members
[i
].patch
= var
->patch
;
2354 var
->var
->members
[i
].location
= -1;
2358 /* For inputs and outputs, we need to grab locations and builtin
2359 * information from the per-vertex type.
2361 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2362 vtn_value_type_type
),
2363 var_decoration_cb
, var
);
2367 case vtn_variable_mode_push_constant
:
2368 case vtn_variable_mode_cross_workgroup
:
2369 /* These don't need actual variables. */
2372 case vtn_variable_mode_image
:
2373 case vtn_variable_mode_phys_ssbo
:
2374 unreachable("Should have been caught before");
2377 /* We can only have one type of initializer */
2378 assert(!(const_initializer
&& var_initializer
));
2379 if (const_initializer
) {
2380 var
->var
->constant_initializer
=
2381 nir_constant_clone(const_initializer
, var
->var
);
2383 if (var_initializer
)
2384 var
->var
->pointer_initializer
= var_initializer
;
2386 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2387 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2389 if ((var
->mode
== vtn_variable_mode_input
||
2390 var
->mode
== vtn_variable_mode_output
) &&
2391 var
->var
->members
) {
2392 assign_missing_member_locations(var
);
2395 if (var
->mode
== vtn_variable_mode_uniform
||
2396 var
->mode
== vtn_variable_mode_ubo
||
2397 var
->mode
== vtn_variable_mode_ssbo
) {
2398 /* XXX: We still need the binding information in the nir_variable
2399 * for these. We should fix that.
2401 var
->var
->data
.binding
= var
->binding
;
2402 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2403 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2404 var
->var
->data
.index
= var
->input_attachment_index
;
2405 var
->var
->data
.offset
= var
->offset
;
2407 if (glsl_type_is_image(without_array
->type
))
2408 var
->var
->data
.image
.format
= without_array
->image_format
;
2411 if (var
->mode
== vtn_variable_mode_function
) {
2412 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2413 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2414 } else if (var
->var
) {
2415 nir_shader_add_variable(b
->shader
, var
->var
);
2417 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2422 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2423 struct vtn_type
*dst_type
,
2424 struct vtn_type
*src_type
)
2426 if (dst_type
->id
== src_type
->id
)
2429 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2430 /* Early versions of GLSLang would re-emit types unnecessarily and you
2431 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2432 * mismatched source and destination types.
2434 * https://github.com/KhronosGroup/glslang/issues/304
2435 * https://github.com/KhronosGroup/glslang/issues/307
2436 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2437 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2439 vtn_warn("Source and destination types of %s do not have the same "
2440 "ID (but are compatible): %u vs %u",
2441 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2445 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2446 spirv_op_to_string(opcode
),
2447 glsl_get_type_name(dst_type
->type
),
2448 glsl_get_type_name(src_type
->type
));
2451 static nir_ssa_def
*
2452 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2453 unsigned num_components
)
2455 if (val
->num_components
== num_components
)
2458 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2459 for (unsigned i
= 0; i
< num_components
; i
++) {
2460 if (i
< val
->num_components
)
2461 comps
[i
] = nir_channel(b
, val
, i
);
2463 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2465 return nir_vec(b
, comps
, num_components
);
2468 static nir_ssa_def
*
2469 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2470 const struct glsl_type
*type
)
2472 const unsigned num_components
= glsl_get_vector_elements(type
);
2473 const unsigned bit_size
= glsl_get_bit_size(type
);
2475 /* First, zero-pad to ensure that the value is big enough that when we
2476 * bit-cast it, we don't loose anything.
2478 if (val
->bit_size
< bit_size
) {
2479 const unsigned src_num_components_needed
=
2480 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2481 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2484 val
= nir_bitcast_vector(b
, val
, bit_size
);
2486 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2490 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2491 const uint32_t *w
, unsigned count
)
2495 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2496 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2500 case SpvOpVariable
: {
2501 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2503 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2505 SpvStorageClass storage_class
= w
[3];
2506 nir_constant
*const_initializer
= NULL
;
2507 nir_variable
*var_initializer
= NULL
;
2509 struct vtn_value
*init
= vtn_untyped_value(b
, w
[4]);
2510 switch (init
->value_type
) {
2511 case vtn_value_type_constant
:
2512 const_initializer
= init
->constant
;
2514 case vtn_value_type_pointer
:
2515 var_initializer
= init
->pointer
->var
->var
;
2518 vtn_fail("SPIR-V variable initializer %u must be constant or pointer",
2523 vtn_create_variable(b
, val
, ptr_type
, storage_class
, const_initializer
, var_initializer
);
2528 case SpvOpAccessChain
:
2529 case SpvOpPtrAccessChain
:
2530 case SpvOpInBoundsAccessChain
:
2531 case SpvOpInBoundsPtrAccessChain
: {
2532 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2533 enum gl_access_qualifier access
= 0;
2534 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
|| opcode
== SpvOpInBoundsPtrAccessChain
);
2537 for (int i
= 4; i
< count
; i
++) {
2538 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2539 if (link_val
->value_type
== vtn_value_type_constant
) {
2540 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2541 chain
->link
[idx
].id
= vtn_constant_int(b
, w
[i
]);
2543 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2544 chain
->link
[idx
].id
= w
[i
];
2546 access
|= vtn_value_access(link_val
);
2550 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2551 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2552 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2553 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2554 * to combine an array of images with a single sampler to get an
2555 * array of sampled images that all share the same sampler.
2556 * Fortunately, this means that we can more-or-less ignore the
2557 * sampler when crawling the access chain, but it does leave us
2558 * with this rather awkward little special-case.
2560 struct vtn_value
*val
=
2561 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2562 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2563 val
->sampled_image
->image
=
2564 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2565 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2566 val
->sampled_image
->image
=
2567 vtn_decorate_pointer(b
, val
, val
->sampled_image
->image
);
2568 val
->sampled_image
->sampler
=
2569 vtn_decorate_pointer(b
, val
, val
->sampled_image
->sampler
);
2571 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2572 struct vtn_pointer
*ptr
=
2573 vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2574 ptr
->ptr_type
= ptr_type
;
2575 ptr
->access
|= access
;
2576 vtn_push_value_pointer(b
, w
[2], ptr
);
2581 case SpvOpCopyMemory
: {
2582 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2583 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2585 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2587 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2592 struct vtn_type
*res_type
=
2593 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2594 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2595 struct vtn_pointer
*src
= src_val
->pointer
;
2597 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2599 if (res_type
->base_type
== vtn_base_type_image
||
2600 res_type
->base_type
== vtn_base_type_sampler
) {
2601 vtn_push_value_pointer(b
, w
[2], src
);
2603 } else if (res_type
->base_type
== vtn_base_type_sampled_image
) {
2604 struct vtn_value
*val
=
2605 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2606 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2607 val
->sampled_image
->image
= val
->sampled_image
->sampler
=
2608 vtn_decorate_pointer(b
, val
, src
);
2614 SpvMemoryAccessMask access
= w
[4];
2615 if (access
& SpvMemoryAccessAlignedMask
)
2618 if (access
& SpvMemoryAccessMakePointerVisibleMask
) {
2619 SpvMemorySemanticsMask semantics
=
2620 SpvMemorySemanticsMakeVisibleMask
|
2621 vtn_storage_class_to_memory_semantics(src
->ptr_type
->storage_class
);
2623 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2624 vtn_emit_memory_barrier(b
, scope
, semantics
);
2628 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2633 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2634 struct vtn_pointer
*dest
= dest_val
->pointer
;
2635 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2637 /* OpStore requires us to actually have a storage type */
2638 vtn_fail_if(dest
->type
->type
== NULL
,
2639 "Invalid destination type for OpStore");
2641 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2642 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2643 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2644 * would then store them to a local variable as bool. Work around
2645 * the issue by doing an implicit conversion.
2647 * https://github.com/KhronosGroup/glslang/issues/170
2648 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2650 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2651 "OpTypeBool. Doing an implicit conversion to work around "
2653 struct vtn_ssa_value
*bool_ssa
=
2654 vtn_create_ssa_value(b
, dest
->type
->type
);
2655 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2656 vtn_variable_store(b
, bool_ssa
, dest
);
2660 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2662 if (glsl_type_is_sampler(dest
->type
->type
)) {
2663 if (b
->wa_glslang_179
) {
2664 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2665 "propagation to workaround the problem.");
2666 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2667 struct vtn_value
*v
= vtn_untyped_value(b
, w
[2]);
2668 if (v
->value_type
== vtn_value_type_sampled_image
) {
2669 dest
->var
->copy_prop_sampler
= v
->sampled_image
->sampler
;
2671 vtn_assert(v
->value_type
== vtn_value_type_pointer
);
2672 dest
->var
->copy_prop_sampler
= v
->pointer
;
2675 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2680 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2681 vtn_variable_store(b
, src
, dest
);
2685 SpvMemoryAccessMask access
= w
[3];
2687 if (access
& SpvMemoryAccessAlignedMask
)
2690 if (access
& SpvMemoryAccessMakePointerAvailableMask
) {
2691 SpvMemorySemanticsMask semantics
=
2692 SpvMemorySemanticsMakeAvailableMask
|
2693 vtn_storage_class_to_memory_semantics(dest
->ptr_type
->storage_class
);
2694 SpvScope scope
= vtn_constant_uint(b
, w
[idx
]);
2695 vtn_emit_memory_barrier(b
, scope
, semantics
);
2701 case SpvOpArrayLength
: {
2702 struct vtn_pointer
*ptr
=
2703 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2704 const uint32_t field
= w
[4];
2706 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2707 "OpArrayLength must take a pointer to a structure type");
2708 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2709 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2710 "OpArrayLength must reference the last memeber of the "
2711 "structure and that must be an array");
2713 const uint32_t offset
= ptr
->type
->offsets
[field
];
2714 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2716 if (!ptr
->block_index
) {
2717 struct vtn_access_chain chain
= {
2720 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2721 vtn_assert(ptr
->block_index
);
2724 nir_intrinsic_instr
*instr
=
2725 nir_intrinsic_instr_create(b
->nb
.shader
,
2726 nir_intrinsic_get_buffer_size
);
2727 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2728 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2729 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2730 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2732 /* array_length = max(buffer_size - offset, 0) / stride */
2733 nir_ssa_def
*array_length
=
2738 nir_imm_int(&b
->nb
, offset
)),
2739 nir_imm_int(&b
->nb
, 0u)),
2740 nir_imm_int(&b
->nb
, stride
));
2742 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2743 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2744 val
->ssa
->def
= array_length
;
2748 case SpvOpConvertPtrToU
: {
2749 struct vtn_value
*u_val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2751 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2752 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2753 "OpConvertPtrToU can only be used to cast to a vector or "
2756 /* The pointer will be converted to an SSA value automatically */
2757 struct vtn_ssa_value
*ptr_ssa
= vtn_ssa_value(b
, w
[3]);
2759 u_val
->ssa
= vtn_create_ssa_value(b
, u_val
->type
->type
);
2760 u_val
->ssa
->def
= nir_sloppy_bitcast(&b
->nb
, ptr_ssa
->def
, u_val
->type
->type
);
2761 u_val
->ssa
->access
|= ptr_ssa
->access
;
2765 case SpvOpConvertUToPtr
: {
2766 struct vtn_value
*ptr_val
=
2767 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2768 struct vtn_value
*u_val
= vtn_value(b
, w
[3], vtn_value_type_ssa
);
2770 vtn_fail_if(ptr_val
->type
->type
== NULL
,
2771 "OpConvertUToPtr can only be used on physical pointers");
2773 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2774 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2775 "OpConvertUToPtr can only be used to cast from a vector or "
2778 nir_ssa_def
*ptr_ssa
= nir_sloppy_bitcast(&b
->nb
, u_val
->ssa
->def
,
2779 ptr_val
->type
->type
);
2780 ptr_val
->pointer
= vtn_pointer_from_ssa(b
, ptr_ssa
, ptr_val
->type
);
2781 vtn_foreach_decoration(b
, ptr_val
, ptr_decoration_cb
, ptr_val
->pointer
);
2782 ptr_val
->pointer
->access
|= u_val
->ssa
->access
;
2786 case SpvOpCopyMemorySized
:
2788 vtn_fail_with_opcode("Unhandled opcode", opcode
);