2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
33 static void ptr_decoration_cb(struct vtn_builder
*b
,
34 struct vtn_value
*val
, int member
,
35 const struct vtn_decoration
*dec
,
39 vtn_push_value_pointer(struct vtn_builder
*b
, uint32_t value_id
,
40 struct vtn_pointer
*ptr
)
42 struct vtn_value
*val
= vtn_push_value(b
, value_id
, vtn_value_type_pointer
);
44 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, ptr
);
49 vtn_push_ssa(struct vtn_builder
*b
, uint32_t value_id
,
50 struct vtn_type
*type
, struct vtn_ssa_value
*ssa
)
52 struct vtn_value
*val
;
53 if (type
->base_type
== vtn_base_type_pointer
) {
54 val
= vtn_push_value_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
56 val
= vtn_push_value(b
, value_id
, vtn_value_type_ssa
);
62 static struct vtn_access_chain
*
63 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
65 struct vtn_access_chain
*chain
;
67 /* Subtract 1 from the length since there's already one built in */
68 size_t size
= sizeof(*chain
) +
69 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
70 chain
= rzalloc_size(b
, size
);
71 chain
->length
= length
;
77 vtn_mode_uses_ssa_offset(struct vtn_builder
*b
,
78 enum vtn_variable_mode mode
)
80 return ((mode
== vtn_variable_mode_ubo
||
81 mode
== vtn_variable_mode_ssbo
) &&
82 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
83 mode
== vtn_variable_mode_push_constant
||
84 (mode
== vtn_variable_mode_workgroup
&&
85 b
->options
->lower_workgroup_access_to_offsets
);
89 vtn_pointer_is_external_block(struct vtn_builder
*b
,
90 struct vtn_pointer
*ptr
)
92 return ptr
->mode
== vtn_variable_mode_ssbo
||
93 ptr
->mode
== vtn_variable_mode_ubo
||
94 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
95 ptr
->mode
== vtn_variable_mode_push_constant
||
96 (ptr
->mode
== vtn_variable_mode_workgroup
&&
97 b
->options
->lower_workgroup_access_to_offsets
);
101 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
102 unsigned stride
, unsigned bit_size
)
104 vtn_assert(stride
> 0);
105 if (link
.mode
== vtn_access_mode_literal
) {
106 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
108 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
109 if (ssa
->bit_size
!= bit_size
)
110 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
111 return nir_imul_imm(&b
->nb
, ssa
, stride
);
115 static VkDescriptorType
116 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
119 case vtn_variable_mode_ubo
:
120 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
121 case vtn_variable_mode_ssbo
:
122 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
124 vtn_fail("Invalid mode for vulkan_resource_index");
129 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
130 nir_ssa_def
*desc_array_index
)
132 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
134 if (!desc_array_index
) {
135 vtn_assert(glsl_type_is_struct_or_ifc(var
->type
->type
));
136 desc_array_index
= nir_imm_int(&b
->nb
, 0);
139 nir_intrinsic_instr
*instr
=
140 nir_intrinsic_instr_create(b
->nb
.shader
,
141 nir_intrinsic_vulkan_resource_index
);
142 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
143 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
144 nir_intrinsic_set_binding(instr
, var
->binding
);
145 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
147 vtn_fail_if(var
->mode
!= vtn_variable_mode_ubo
&&
148 var
->mode
!= vtn_variable_mode_ssbo
,
149 "Invalid mode for vulkan_resource_index");
151 nir_address_format addr_format
= vtn_mode_to_address_format(b
, var
->mode
);
152 const struct glsl_type
*index_type
=
153 b
->options
->lower_ubo_ssbo_access_to_offsets
?
154 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
156 instr
->num_components
= glsl_get_vector_elements(index_type
);
157 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
158 glsl_get_bit_size(index_type
), NULL
);
159 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
161 return &instr
->dest
.ssa
;
165 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
166 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
168 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
170 nir_intrinsic_instr
*instr
=
171 nir_intrinsic_instr_create(b
->nb
.shader
,
172 nir_intrinsic_vulkan_resource_reindex
);
173 instr
->src
[0] = nir_src_for_ssa(base_index
);
174 instr
->src
[1] = nir_src_for_ssa(offset_index
);
175 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
177 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
178 "Invalid mode for vulkan_resource_reindex");
180 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
181 const struct glsl_type
*index_type
=
182 b
->options
->lower_ubo_ssbo_access_to_offsets
?
183 glsl_uint_type() : nir_address_format_to_glsl_type(addr_format
);
185 instr
->num_components
= glsl_get_vector_elements(index_type
);
186 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
187 glsl_get_bit_size(index_type
), NULL
);
188 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
190 return &instr
->dest
.ssa
;
194 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
195 nir_ssa_def
*desc_index
)
197 vtn_assert(b
->options
->environment
== NIR_SPIRV_VULKAN
);
199 nir_intrinsic_instr
*desc_load
=
200 nir_intrinsic_instr_create(b
->nb
.shader
,
201 nir_intrinsic_load_vulkan_descriptor
);
202 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
203 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
205 vtn_fail_if(mode
!= vtn_variable_mode_ubo
&& mode
!= vtn_variable_mode_ssbo
,
206 "Invalid mode for load_vulkan_descriptor");
208 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
209 const struct glsl_type
*ptr_type
=
210 nir_address_format_to_glsl_type(addr_format
);
212 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
213 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
214 desc_load
->num_components
,
215 glsl_get_bit_size(ptr_type
), NULL
);
216 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
218 return &desc_load
->dest
.ssa
;
221 /* Dereference the given base pointer by the access chain */
222 static struct vtn_pointer
*
223 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
224 struct vtn_pointer
*base
,
225 struct vtn_access_chain
*deref_chain
)
227 struct vtn_type
*type
= base
->type
;
228 enum gl_access_qualifier access
= base
->access
;
231 nir_deref_instr
*tail
;
234 } else if (b
->options
->environment
== NIR_SPIRV_VULKAN
&&
235 vtn_pointer_is_external_block(b
, base
)) {
236 nir_ssa_def
*block_index
= base
->block_index
;
238 /* We dereferencing an external block pointer. Correctness of this
239 * operation relies on one particular line in the SPIR-V spec, section
240 * entitled "Validation Rules for Shader Capabilities":
242 * "Block and BufferBlock decorations cannot decorate a structure
243 * type that is nested at any level inside another structure type
244 * decorated with Block or BufferBlock."
246 * This means that we can detect the point where we cross over from
247 * descriptor indexing to buffer indexing by looking for the block
248 * decorated struct type. Anything before the block decorated struct
249 * type is a descriptor indexing operation and anything after the block
250 * decorated struct is a buffer offset operation.
253 /* Figure out the descriptor array index if any
255 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
256 * to forget the Block or BufferBlock decoration from time to time.
257 * It's more robust if we check for both !block_index and for the type
258 * to contain a block. This way there's a decent chance that arrays of
259 * UBOs/SSBOs will work correctly even if variable pointers are
262 nir_ssa_def
*desc_arr_idx
= NULL
;
263 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
264 /* If our type contains a block, then we're still outside the block
265 * and we need to process enough levels of dereferences to get inside
268 if (deref_chain
->ptr_as_array
) {
269 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
270 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
271 MAX2(aoa_size
, 1), 32);
275 for (; idx
< deref_chain
->length
; idx
++) {
276 if (type
->base_type
!= vtn_base_type_array
) {
277 vtn_assert(type
->base_type
== vtn_base_type_struct
);
281 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
282 nir_ssa_def
*arr_offset
=
283 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
284 MAX2(aoa_size
, 1), 32);
286 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
288 desc_arr_idx
= arr_offset
;
290 type
= type
->array_element
;
291 access
|= type
->access
;
296 vtn_assert(base
->var
&& base
->type
);
297 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
298 } else if (desc_arr_idx
) {
299 block_index
= vtn_resource_reindex(b
, base
->mode
,
300 block_index
, desc_arr_idx
);
303 if (idx
== deref_chain
->length
) {
304 /* The entire deref was consumed in finding the block index. Return
305 * a pointer which just has a block index and a later access chain
306 * will dereference deeper.
308 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
309 ptr
->mode
= base
->mode
;
311 ptr
->block_index
= block_index
;
312 ptr
->access
= access
;
316 /* If we got here, there's more access chain to handle and we have the
317 * final block index. Insert a descriptor load and cast to a deref to
318 * start the deref chain.
320 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
322 assert(base
->mode
== vtn_variable_mode_ssbo
||
323 base
->mode
== vtn_variable_mode_ubo
);
324 nir_variable_mode nir_mode
=
325 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
327 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
, type
->type
,
328 base
->ptr_type
->stride
);
330 assert(base
->var
&& base
->var
->var
);
331 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
332 if (base
->ptr_type
&& base
->ptr_type
->type
) {
333 tail
->dest
.ssa
.num_components
=
334 glsl_get_vector_elements(base
->ptr_type
->type
);
335 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
339 if (idx
== 0 && deref_chain
->ptr_as_array
) {
340 /* We start with a deref cast to get the stride. Hopefully, we'll be
341 * able to delete that cast eventually.
343 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
344 tail
->type
, base
->ptr_type
->stride
);
346 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
347 tail
->dest
.ssa
.bit_size
);
348 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
352 for (; idx
< deref_chain
->length
; idx
++) {
353 if (glsl_type_is_struct_or_ifc(type
->type
)) {
354 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
355 unsigned field
= deref_chain
->link
[idx
].id
;
356 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
357 type
= type
->members
[field
];
359 nir_ssa_def
*arr_index
=
360 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
361 tail
->dest
.ssa
.bit_size
);
362 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
363 type
= type
->array_element
;
366 access
|= type
->access
;
369 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
370 ptr
->mode
= base
->mode
;
372 ptr
->var
= base
->var
;
374 ptr
->access
= access
;
379 static struct vtn_pointer
*
380 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
381 struct vtn_pointer
*base
,
382 struct vtn_access_chain
*deref_chain
)
384 nir_ssa_def
*block_index
= base
->block_index
;
385 nir_ssa_def
*offset
= base
->offset
;
386 struct vtn_type
*type
= base
->type
;
387 enum gl_access_qualifier access
= base
->access
;
390 if (base
->mode
== vtn_variable_mode_ubo
||
391 base
->mode
== vtn_variable_mode_ssbo
) {
393 vtn_assert(base
->var
&& base
->type
);
394 nir_ssa_def
*desc_arr_idx
;
395 if (glsl_type_is_array(type
->type
)) {
396 if (deref_chain
->length
>= 1) {
398 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
400 /* This consumes a level of type */
401 type
= type
->array_element
;
402 access
|= type
->access
;
404 /* This is annoying. We've been asked for a pointer to the
405 * array of UBOs/SSBOs and not a specifc buffer. Return a
406 * pointer with a descriptor index of 0 and we'll have to do
407 * a reindex later to adjust it to the right thing.
409 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
411 } else if (deref_chain
->ptr_as_array
) {
412 /* You can't have a zero-length OpPtrAccessChain */
413 vtn_assert(deref_chain
->length
>= 1);
414 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
416 /* We have a regular non-array SSBO. */
419 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
420 } else if (deref_chain
->ptr_as_array
&&
421 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
422 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
423 * decorated block. This is an interesting corner in the SPIR-V
424 * spec. One interpretation would be that they client is clearly
425 * trying to treat that block as if it's an implicit array of blocks
426 * repeated in the buffer. However, the SPIR-V spec for the
427 * OpPtrAccessChain says:
429 * "Base is treated as the address of the first element of an
430 * array, and the Element element’s address is computed to be the
431 * base for the Indexes, as per OpAccessChain."
433 * Taken literally, that would mean that your struct type is supposed
434 * to be treated as an array of such a struct and, since it's
435 * decorated block, that means an array of blocks which corresponds
436 * to an array descriptor. Therefore, we need to do a reindex
437 * operation to add the index from the first link in the access chain
438 * to the index we recieved.
440 * The downside to this interpretation (there always is one) is that
441 * this might be somewhat surprising behavior to apps if they expect
442 * the implicit array behavior described above.
444 vtn_assert(deref_chain
->length
>= 1);
445 nir_ssa_def
*offset_index
=
446 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
449 block_index
= vtn_resource_reindex(b
, base
->mode
,
450 block_index
, offset_index
);
455 if (base
->mode
== vtn_variable_mode_workgroup
) {
456 /* SLM doesn't need nor have a block index */
457 vtn_assert(!block_index
);
459 /* We need the variable for the base offset */
460 vtn_assert(base
->var
);
462 /* We need ptr_type for size and alignment */
463 vtn_assert(base
->ptr_type
);
465 /* Assign location on first use so that we don't end up bloating SLM
466 * address space for variables which are never statically used.
468 if (base
->var
->shared_location
< 0) {
469 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
470 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
471 base
->ptr_type
->align
);
472 base
->var
->shared_location
= b
->shader
->num_shared
;
473 b
->shader
->num_shared
+= base
->ptr_type
->length
;
476 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
477 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
478 /* Push constants neither need nor have a block index */
479 vtn_assert(!block_index
);
481 /* Start off with at the start of the push constant block. */
482 offset
= nir_imm_int(&b
->nb
, 0);
484 /* The code above should have ensured a block_index when needed. */
485 vtn_assert(block_index
);
487 /* Start off with at the start of the buffer. */
488 offset
= nir_imm_int(&b
->nb
, 0);
492 if (deref_chain
->ptr_as_array
&& idx
== 0) {
493 /* We need ptr_type for the stride */
494 vtn_assert(base
->ptr_type
);
496 /* We need at least one element in the chain */
497 vtn_assert(deref_chain
->length
>= 1);
499 nir_ssa_def
*elem_offset
=
500 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
501 base
->ptr_type
->stride
, offset
->bit_size
);
502 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
506 for (; idx
< deref_chain
->length
; idx
++) {
507 switch (glsl_get_base_type(type
->type
)) {
510 case GLSL_TYPE_UINT16
:
511 case GLSL_TYPE_INT16
:
512 case GLSL_TYPE_UINT8
:
514 case GLSL_TYPE_UINT64
:
515 case GLSL_TYPE_INT64
:
516 case GLSL_TYPE_FLOAT
:
517 case GLSL_TYPE_FLOAT16
:
518 case GLSL_TYPE_DOUBLE
:
520 case GLSL_TYPE_ARRAY
: {
521 nir_ssa_def
*elem_offset
=
522 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
523 type
->stride
, offset
->bit_size
);
524 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
525 type
= type
->array_element
;
526 access
|= type
->access
;
530 case GLSL_TYPE_INTERFACE
:
531 case GLSL_TYPE_STRUCT
: {
532 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
533 unsigned member
= deref_chain
->link
[idx
].id
;
534 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
535 type
= type
->members
[member
];
536 access
|= type
->access
;
541 vtn_fail("Invalid type for deref");
545 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
546 ptr
->mode
= base
->mode
;
548 ptr
->block_index
= block_index
;
549 ptr
->offset
= offset
;
550 ptr
->access
= access
;
555 /* Dereference the given base pointer by the access chain */
556 static struct vtn_pointer
*
557 vtn_pointer_dereference(struct vtn_builder
*b
,
558 struct vtn_pointer
*base
,
559 struct vtn_access_chain
*deref_chain
)
561 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
562 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
564 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
569 vtn_pointer_for_variable(struct vtn_builder
*b
,
570 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
572 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
574 pointer
->mode
= var
->mode
;
575 pointer
->type
= var
->type
;
576 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
577 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
578 pointer
->ptr_type
= ptr_type
;
580 pointer
->access
= var
->access
| var
->type
->access
;
585 /* Returns an atomic_uint type based on the original uint type. The returned
586 * type will be equivalent to the original one but will have an atomic_uint
587 * type as leaf instead of an uint.
589 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
591 static const struct glsl_type
*
592 repair_atomic_type(const struct glsl_type
*type
)
594 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
595 assert(glsl_type_is_scalar(glsl_without_array(type
)));
597 if (glsl_type_is_array(type
)) {
598 const struct glsl_type
*atomic
=
599 repair_atomic_type(glsl_get_array_element(type
));
601 return glsl_array_type(atomic
, glsl_get_length(type
),
602 glsl_get_explicit_stride(type
));
604 return glsl_atomic_uint_type();
609 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
611 if (b
->wa_glslang_179
) {
612 /* Do on-the-fly copy propagation for samplers. */
613 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
614 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
617 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
619 struct vtn_access_chain chain
= {
622 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
629 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
630 struct vtn_ssa_value
*inout
,
631 enum gl_access_qualifier access
)
633 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
635 inout
->def
= nir_load_deref_with_access(&b
->nb
, deref
, access
);
637 nir_store_deref_with_access(&b
->nb
, deref
, inout
->def
, ~0, access
);
639 } else if (glsl_type_is_array(deref
->type
) ||
640 glsl_type_is_matrix(deref
->type
)) {
641 unsigned elems
= glsl_get_length(deref
->type
);
642 for (unsigned i
= 0; i
< elems
; i
++) {
643 nir_deref_instr
*child
=
644 nir_build_deref_array_imm(&b
->nb
, deref
, i
);
645 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
648 vtn_assert(glsl_type_is_struct_or_ifc(deref
->type
));
649 unsigned elems
= glsl_get_length(deref
->type
);
650 for (unsigned i
= 0; i
< elems
; i
++) {
651 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
652 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
658 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
660 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
661 return vtn_pointer_to_deref(b
, ptr
);
665 * Gets the NIR-level deref tail, which may have as a child an array deref
666 * selecting which component due to OpAccessChain supporting per-component
667 * indexing in SPIR-V.
669 static nir_deref_instr
*
670 get_deref_tail(nir_deref_instr
*deref
)
672 if (deref
->deref_type
!= nir_deref_type_array
)
675 nir_deref_instr
*parent
=
676 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
678 if (glsl_type_is_vector(parent
->type
))
684 struct vtn_ssa_value
*
685 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
,
686 enum gl_access_qualifier access
)
688 nir_deref_instr
*src_tail
= get_deref_tail(src
);
689 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
690 _vtn_local_load_store(b
, true, src_tail
, val
, access
);
692 if (src_tail
!= src
) {
693 val
->type
= src
->type
;
694 if (nir_src_is_const(src
->arr
.index
))
695 val
->def
= vtn_vector_extract(b
, val
->def
,
696 nir_src_as_uint(src
->arr
.index
));
698 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
, src
->arr
.index
.ssa
);
705 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
706 nir_deref_instr
*dest
, enum gl_access_qualifier access
)
708 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
710 if (dest_tail
!= dest
) {
711 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
712 _vtn_local_load_store(b
, true, dest_tail
, val
, access
);
714 if (nir_src_is_const(dest
->arr
.index
))
715 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
716 nir_src_as_uint(dest
->arr
.index
));
718 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
719 dest
->arr
.index
.ssa
);
720 _vtn_local_load_store(b
, false, dest_tail
, val
, access
);
722 _vtn_local_load_store(b
, false, dest_tail
, src
, access
);
727 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
728 nir_ssa_def
**index_out
)
730 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
732 struct vtn_access_chain chain
= {
735 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
737 *index_out
= ptr
->block_index
;
741 /* Tries to compute the size of an interface block based on the strides and
742 * offsets that are provided to us in the SPIR-V source.
745 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
747 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
751 case GLSL_TYPE_UINT16
:
752 case GLSL_TYPE_INT16
:
753 case GLSL_TYPE_UINT8
:
755 case GLSL_TYPE_UINT64
:
756 case GLSL_TYPE_INT64
:
757 case GLSL_TYPE_FLOAT
:
758 case GLSL_TYPE_FLOAT16
:
760 case GLSL_TYPE_DOUBLE
: {
761 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
762 glsl_get_matrix_columns(type
->type
);
764 vtn_assert(type
->stride
> 0);
765 return type
->stride
* cols
;
767 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
768 return glsl_get_vector_elements(type
->type
) * type_size
;
772 case GLSL_TYPE_STRUCT
:
773 case GLSL_TYPE_INTERFACE
: {
775 unsigned num_fields
= glsl_get_length(type
->type
);
776 for (unsigned f
= 0; f
< num_fields
; f
++) {
777 unsigned field_end
= type
->offsets
[f
] +
778 vtn_type_block_size(b
, type
->members
[f
]);
779 size
= MAX2(size
, field_end
);
784 case GLSL_TYPE_ARRAY
:
785 vtn_assert(type
->stride
> 0);
786 vtn_assert(glsl_get_length(type
->type
) > 0);
787 return type
->stride
* glsl_get_length(type
->type
);
790 vtn_fail("Invalid block type");
796 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
797 nir_ssa_def
*index
, nir_ssa_def
*offset
,
798 unsigned access_offset
, unsigned access_size
,
799 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
800 enum gl_access_qualifier access
)
802 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
803 instr
->num_components
= glsl_get_vector_elements(type
);
805 /* Booleans usually shouldn't show up in external memory in SPIR-V.
806 * However, they do for certain older GLSLang versions and can for shared
807 * memory when we lower access chains internally.
809 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
810 glsl_get_bit_size(type
);
814 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
815 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
818 if (op
== nir_intrinsic_load_push_constant
) {
819 nir_intrinsic_set_base(instr
, access_offset
);
820 nir_intrinsic_set_range(instr
, access_size
);
823 if (op
== nir_intrinsic_load_ubo
||
824 op
== nir_intrinsic_load_ssbo
||
825 op
== nir_intrinsic_store_ssbo
) {
826 nir_intrinsic_set_access(instr
, access
);
829 /* With extensions like relaxed_block_layout, we really can't guarantee
830 * much more than scalar alignment.
832 if (op
!= nir_intrinsic_load_push_constant
)
833 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
836 instr
->src
[src
++] = nir_src_for_ssa(index
);
838 if (op
== nir_intrinsic_load_push_constant
) {
839 /* We need to subtract the offset from where the intrinsic will load the
842 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
843 nir_imm_int(&b
->nb
, access_offset
)));
845 instr
->src
[src
++] = nir_src_for_ssa(offset
);
849 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
850 instr
->num_components
, data_bit_size
, NULL
);
851 (*inout
)->def
= &instr
->dest
.ssa
;
854 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
856 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
857 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
861 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
862 nir_ssa_def
*index
, nir_ssa_def
*offset
,
863 unsigned access_offset
, unsigned access_size
,
864 struct vtn_type
*type
, enum gl_access_qualifier access
,
865 struct vtn_ssa_value
**inout
)
867 if (load
&& *inout
== NULL
)
868 *inout
= vtn_create_ssa_value(b
, type
->type
);
870 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
874 case GLSL_TYPE_UINT16
:
875 case GLSL_TYPE_INT16
:
876 case GLSL_TYPE_UINT8
:
878 case GLSL_TYPE_UINT64
:
879 case GLSL_TYPE_INT64
:
880 case GLSL_TYPE_FLOAT
:
881 case GLSL_TYPE_FLOAT16
:
882 case GLSL_TYPE_DOUBLE
:
884 /* This is where things get interesting. At this point, we've hit
885 * a vector, a scalar, or a matrix.
887 if (glsl_type_is_matrix(type
->type
)) {
888 /* Loading the whole matrix */
889 struct vtn_ssa_value
*transpose
;
890 unsigned num_ops
, vec_width
, col_stride
;
891 if (type
->row_major
) {
892 num_ops
= glsl_get_vector_elements(type
->type
);
893 vec_width
= glsl_get_matrix_columns(type
->type
);
894 col_stride
= type
->array_element
->stride
;
896 const struct glsl_type
*transpose_type
=
897 glsl_matrix_type(base_type
, vec_width
, num_ops
);
898 *inout
= vtn_create_ssa_value(b
, transpose_type
);
900 transpose
= vtn_ssa_transpose(b
, *inout
);
904 num_ops
= glsl_get_matrix_columns(type
->type
);
905 vec_width
= glsl_get_vector_elements(type
->type
);
906 col_stride
= type
->stride
;
909 for (unsigned i
= 0; i
< num_ops
; i
++) {
910 nir_ssa_def
*elem_offset
=
911 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
912 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
913 access_offset
, access_size
,
915 glsl_vector_type(base_type
, vec_width
),
916 type
->access
| access
);
919 if (load
&& type
->row_major
)
920 *inout
= vtn_ssa_transpose(b
, *inout
);
922 unsigned elems
= glsl_get_vector_elements(type
->type
);
923 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
924 if (elems
== 1 || type
->stride
== type_size
) {
925 /* This is a tightly-packed normal scalar or vector load */
926 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
927 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
928 access_offset
, access_size
,
930 type
->access
| access
);
932 /* This is a strided load. We have to load N things separately.
933 * This is the single column of a row-major matrix case.
935 vtn_assert(type
->stride
> type_size
);
936 vtn_assert(type
->stride
% type_size
== 0);
938 nir_ssa_def
*per_comp
[4];
939 for (unsigned i
= 0; i
< elems
; i
++) {
940 nir_ssa_def
*elem_offset
=
941 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
942 struct vtn_ssa_value
*comp
, temp_val
;
944 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
945 temp_val
.type
= glsl_scalar_type(base_type
);
948 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
949 access_offset
, access_size
,
950 &comp
, glsl_scalar_type(base_type
),
951 type
->access
| access
);
952 per_comp
[i
] = comp
->def
;
957 *inout
= vtn_create_ssa_value(b
, type
->type
);
958 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
964 case GLSL_TYPE_ARRAY
: {
965 unsigned elems
= glsl_get_length(type
->type
);
966 for (unsigned i
= 0; i
< elems
; i
++) {
967 nir_ssa_def
*elem_off
=
968 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
969 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
970 access_offset
, access_size
,
972 type
->array_element
->access
| access
,
973 &(*inout
)->elems
[i
]);
978 case GLSL_TYPE_INTERFACE
:
979 case GLSL_TYPE_STRUCT
: {
980 unsigned elems
= glsl_get_length(type
->type
);
981 for (unsigned i
= 0; i
< elems
; i
++) {
982 nir_ssa_def
*elem_off
=
983 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
984 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
985 access_offset
, access_size
,
987 type
->members
[i
]->access
| access
,
988 &(*inout
)->elems
[i
]);
994 vtn_fail("Invalid block member type");
998 static struct vtn_ssa_value
*
999 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1001 nir_intrinsic_op op
;
1002 unsigned access_offset
= 0, access_size
= 0;
1003 switch (src
->mode
) {
1004 case vtn_variable_mode_ubo
:
1005 op
= nir_intrinsic_load_ubo
;
1007 case vtn_variable_mode_ssbo
:
1008 op
= nir_intrinsic_load_ssbo
;
1010 case vtn_variable_mode_push_constant
:
1011 op
= nir_intrinsic_load_push_constant
;
1012 access_size
= b
->shader
->num_uniforms
;
1014 case vtn_variable_mode_workgroup
:
1015 op
= nir_intrinsic_load_shared
;
1018 vtn_fail("Invalid block variable mode");
1021 nir_ssa_def
*offset
, *index
= NULL
;
1022 offset
= vtn_pointer_to_offset(b
, src
, &index
);
1024 struct vtn_ssa_value
*value
= NULL
;
1025 _vtn_block_load_store(b
, op
, true, index
, offset
,
1026 access_offset
, access_size
,
1027 src
->type
, src
->access
, &value
);
1032 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1033 struct vtn_pointer
*dst
)
1035 nir_intrinsic_op op
;
1036 switch (dst
->mode
) {
1037 case vtn_variable_mode_ssbo
:
1038 op
= nir_intrinsic_store_ssbo
;
1040 case vtn_variable_mode_workgroup
:
1041 op
= nir_intrinsic_store_shared
;
1044 vtn_fail("Invalid block variable mode");
1047 nir_ssa_def
*offset
, *index
= NULL
;
1048 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1050 _vtn_block_load_store(b
, op
, false, index
, offset
,
1051 0, 0, dst
->type
, dst
->access
, &src
);
1055 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1056 struct vtn_pointer
*ptr
,
1057 enum gl_access_qualifier access
,
1058 struct vtn_ssa_value
**inout
)
1060 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1061 switch (base_type
) {
1062 case GLSL_TYPE_UINT
:
1064 case GLSL_TYPE_UINT16
:
1065 case GLSL_TYPE_INT16
:
1066 case GLSL_TYPE_UINT8
:
1067 case GLSL_TYPE_INT8
:
1068 case GLSL_TYPE_UINT64
:
1069 case GLSL_TYPE_INT64
:
1070 case GLSL_TYPE_FLOAT
:
1071 case GLSL_TYPE_FLOAT16
:
1072 case GLSL_TYPE_BOOL
:
1073 case GLSL_TYPE_DOUBLE
:
1074 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1075 /* We hit a vector or scalar; go ahead and emit the load[s] */
1076 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1077 if (vtn_pointer_is_external_block(b
, ptr
)) {
1078 /* If it's external, we call nir_load/store_deref directly. The
1079 * vtn_local_load/store helpers are too clever and do magic to
1080 * avoid array derefs of vectors. That magic is both less
1081 * efficient than the direct load/store and, in the case of
1082 * stores, is broken because it creates a race condition if two
1083 * threads are writing to different components of the same vector
1084 * due to the load+insert+store it uses to emulate the array
1088 *inout
= vtn_create_ssa_value(b
, ptr
->type
->type
);
1089 (*inout
)->def
= nir_load_deref_with_access(&b
->nb
, deref
,
1090 ptr
->type
->access
| access
);
1092 nir_store_deref_with_access(&b
->nb
, deref
, (*inout
)->def
, ~0,
1093 ptr
->type
->access
| access
);
1097 *inout
= vtn_local_load(b
, deref
, ptr
->type
->access
| access
);
1099 vtn_local_store(b
, *inout
, deref
, ptr
->type
->access
| access
);
1106 case GLSL_TYPE_INTERFACE
:
1107 case GLSL_TYPE_ARRAY
:
1108 case GLSL_TYPE_STRUCT
: {
1109 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1111 vtn_assert(*inout
== NULL
);
1112 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1113 (*inout
)->type
= ptr
->type
->type
;
1114 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1117 struct vtn_access_chain chain
= {
1120 { .mode
= vtn_access_mode_literal
, },
1123 for (unsigned i
= 0; i
< elems
; i
++) {
1124 chain
.link
[0].id
= i
;
1125 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1126 _vtn_variable_load_store(b
, load
, elem
, ptr
->type
->access
| access
,
1127 &(*inout
)->elems
[i
]);
1133 vtn_fail("Invalid access chain type");
1137 struct vtn_ssa_value
*
1138 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1140 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1141 return vtn_block_load(b
, src
);
1143 struct vtn_ssa_value
*val
= NULL
;
1144 _vtn_variable_load_store(b
, true, src
, src
->access
, &val
);
1150 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1151 struct vtn_pointer
*dest
)
1153 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1154 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1155 dest
->mode
== vtn_variable_mode_workgroup
);
1156 vtn_block_store(b
, src
, dest
);
1158 _vtn_variable_load_store(b
, false, dest
, dest
->access
, &src
);
1163 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1164 struct vtn_pointer
*src
)
1166 vtn_assert(src
->type
->type
== dest
->type
->type
);
1167 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1168 switch (base_type
) {
1169 case GLSL_TYPE_UINT
:
1171 case GLSL_TYPE_UINT16
:
1172 case GLSL_TYPE_INT16
:
1173 case GLSL_TYPE_UINT8
:
1174 case GLSL_TYPE_INT8
:
1175 case GLSL_TYPE_UINT64
:
1176 case GLSL_TYPE_INT64
:
1177 case GLSL_TYPE_FLOAT
:
1178 case GLSL_TYPE_FLOAT16
:
1179 case GLSL_TYPE_DOUBLE
:
1180 case GLSL_TYPE_BOOL
:
1181 /* At this point, we have a scalar, vector, or matrix so we know that
1182 * there cannot be any structure splitting still in the way. By
1183 * stopping at the matrix level rather than the vector level, we
1184 * ensure that matrices get loaded in the optimal way even if they
1185 * are storred row-major in a UBO.
1187 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1190 case GLSL_TYPE_INTERFACE
:
1191 case GLSL_TYPE_ARRAY
:
1192 case GLSL_TYPE_STRUCT
: {
1193 struct vtn_access_chain chain
= {
1196 { .mode
= vtn_access_mode_literal
, },
1199 unsigned elems
= glsl_get_length(src
->type
->type
);
1200 for (unsigned i
= 0; i
< elems
; i
++) {
1201 chain
.link
[0].id
= i
;
1202 struct vtn_pointer
*src_elem
=
1203 vtn_pointer_dereference(b
, src
, &chain
);
1204 struct vtn_pointer
*dest_elem
=
1205 vtn_pointer_dereference(b
, dest
, &chain
);
1207 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1213 vtn_fail("Invalid access chain type");
1218 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1219 struct vtn_pointer
*src
)
1221 /* TODO: At some point, we should add a special-case for when we can
1222 * just emit a copy_var intrinsic.
1224 _vtn_variable_copy(b
, dest
, src
);
1228 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1230 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1231 *mode
= nir_var_system_value
;
1235 vtn_get_builtin_location(struct vtn_builder
*b
,
1236 SpvBuiltIn builtin
, int *location
,
1237 nir_variable_mode
*mode
)
1240 case SpvBuiltInPosition
:
1241 *location
= VARYING_SLOT_POS
;
1243 case SpvBuiltInPointSize
:
1244 *location
= VARYING_SLOT_PSIZ
;
1246 case SpvBuiltInClipDistance
:
1247 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1249 case SpvBuiltInCullDistance
:
1250 *location
= VARYING_SLOT_CULL_DIST0
;
1252 case SpvBuiltInVertexId
:
1253 case SpvBuiltInVertexIndex
:
1254 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1255 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1256 * same as gl_VertexID, which is non-zero-based, and removes
1257 * VertexIndex. Since they're both defined to be non-zero-based, we use
1258 * SYSTEM_VALUE_VERTEX_ID for both.
1260 *location
= SYSTEM_VALUE_VERTEX_ID
;
1261 set_mode_system_value(b
, mode
);
1263 case SpvBuiltInInstanceIndex
:
1264 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1265 set_mode_system_value(b
, mode
);
1267 case SpvBuiltInInstanceId
:
1268 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1269 set_mode_system_value(b
, mode
);
1271 case SpvBuiltInPrimitiveId
:
1272 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1273 vtn_assert(*mode
== nir_var_shader_in
);
1274 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1275 } else if (*mode
== nir_var_shader_out
) {
1276 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1278 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1279 set_mode_system_value(b
, mode
);
1282 case SpvBuiltInInvocationId
:
1283 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1284 set_mode_system_value(b
, mode
);
1286 case SpvBuiltInLayer
:
1287 *location
= VARYING_SLOT_LAYER
;
1288 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1289 *mode
= nir_var_shader_in
;
1290 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1291 *mode
= nir_var_shader_out
;
1292 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1293 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1294 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1295 *mode
= nir_var_shader_out
;
1297 vtn_fail("invalid stage for SpvBuiltInLayer");
1299 case SpvBuiltInViewportIndex
:
1300 *location
= VARYING_SLOT_VIEWPORT
;
1301 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1302 *mode
= nir_var_shader_out
;
1303 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1304 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1305 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1306 *mode
= nir_var_shader_out
;
1307 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1308 *mode
= nir_var_shader_in
;
1310 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1312 case SpvBuiltInTessLevelOuter
:
1313 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1315 case SpvBuiltInTessLevelInner
:
1316 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1318 case SpvBuiltInTessCoord
:
1319 *location
= SYSTEM_VALUE_TESS_COORD
;
1320 set_mode_system_value(b
, mode
);
1322 case SpvBuiltInPatchVertices
:
1323 *location
= SYSTEM_VALUE_VERTICES_IN
;
1324 set_mode_system_value(b
, mode
);
1326 case SpvBuiltInFragCoord
:
1327 vtn_assert(*mode
== nir_var_shader_in
);
1328 if (b
->options
&& b
->options
->frag_coord_is_sysval
) {
1329 *mode
= nir_var_system_value
;
1330 *location
= SYSTEM_VALUE_FRAG_COORD
;
1332 *location
= VARYING_SLOT_POS
;
1335 case SpvBuiltInPointCoord
:
1336 *location
= VARYING_SLOT_PNTC
;
1337 vtn_assert(*mode
== nir_var_shader_in
);
1339 case SpvBuiltInFrontFacing
:
1340 *location
= SYSTEM_VALUE_FRONT_FACE
;
1341 set_mode_system_value(b
, mode
);
1343 case SpvBuiltInSampleId
:
1344 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1345 set_mode_system_value(b
, mode
);
1347 case SpvBuiltInSamplePosition
:
1348 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1349 set_mode_system_value(b
, mode
);
1351 case SpvBuiltInSampleMask
:
1352 if (*mode
== nir_var_shader_out
) {
1353 *location
= FRAG_RESULT_SAMPLE_MASK
;
1355 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1356 set_mode_system_value(b
, mode
);
1359 case SpvBuiltInFragDepth
:
1360 *location
= FRAG_RESULT_DEPTH
;
1361 vtn_assert(*mode
== nir_var_shader_out
);
1363 case SpvBuiltInHelperInvocation
:
1364 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1365 set_mode_system_value(b
, mode
);
1367 case SpvBuiltInNumWorkgroups
:
1368 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1369 set_mode_system_value(b
, mode
);
1371 case SpvBuiltInWorkgroupSize
:
1372 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1373 set_mode_system_value(b
, mode
);
1375 case SpvBuiltInWorkgroupId
:
1376 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1377 set_mode_system_value(b
, mode
);
1379 case SpvBuiltInLocalInvocationId
:
1380 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1381 set_mode_system_value(b
, mode
);
1383 case SpvBuiltInLocalInvocationIndex
:
1384 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1385 set_mode_system_value(b
, mode
);
1387 case SpvBuiltInGlobalInvocationId
:
1388 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1389 set_mode_system_value(b
, mode
);
1391 case SpvBuiltInGlobalLinearId
:
1392 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
;
1393 set_mode_system_value(b
, mode
);
1395 case SpvBuiltInBaseVertex
:
1396 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1397 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1399 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1400 set_mode_system_value(b
, mode
);
1402 case SpvBuiltInBaseInstance
:
1403 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1404 set_mode_system_value(b
, mode
);
1406 case SpvBuiltInDrawIndex
:
1407 *location
= SYSTEM_VALUE_DRAW_ID
;
1408 set_mode_system_value(b
, mode
);
1410 case SpvBuiltInSubgroupSize
:
1411 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1412 set_mode_system_value(b
, mode
);
1414 case SpvBuiltInSubgroupId
:
1415 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1416 set_mode_system_value(b
, mode
);
1418 case SpvBuiltInSubgroupLocalInvocationId
:
1419 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1420 set_mode_system_value(b
, mode
);
1422 case SpvBuiltInNumSubgroups
:
1423 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1424 set_mode_system_value(b
, mode
);
1426 case SpvBuiltInDeviceIndex
:
1427 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1428 set_mode_system_value(b
, mode
);
1430 case SpvBuiltInViewIndex
:
1431 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1432 set_mode_system_value(b
, mode
);
1434 case SpvBuiltInSubgroupEqMask
:
1435 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1436 set_mode_system_value(b
, mode
);
1438 case SpvBuiltInSubgroupGeMask
:
1439 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1440 set_mode_system_value(b
, mode
);
1442 case SpvBuiltInSubgroupGtMask
:
1443 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1444 set_mode_system_value(b
, mode
);
1446 case SpvBuiltInSubgroupLeMask
:
1447 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1448 set_mode_system_value(b
, mode
);
1450 case SpvBuiltInSubgroupLtMask
:
1451 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1452 set_mode_system_value(b
, mode
);
1454 case SpvBuiltInFragStencilRefEXT
:
1455 *location
= FRAG_RESULT_STENCIL
;
1456 vtn_assert(*mode
== nir_var_shader_out
);
1458 case SpvBuiltInWorkDim
:
1459 *location
= SYSTEM_VALUE_WORK_DIM
;
1460 set_mode_system_value(b
, mode
);
1462 case SpvBuiltInGlobalSize
:
1463 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1464 set_mode_system_value(b
, mode
);
1467 vtn_fail("Unsupported builtin: %s (%u)",
1468 spirv_builtin_to_string(builtin
), builtin
);
1473 apply_var_decoration(struct vtn_builder
*b
,
1474 struct nir_variable_data
*var_data
,
1475 const struct vtn_decoration
*dec
)
1477 switch (dec
->decoration
) {
1478 case SpvDecorationRelaxedPrecision
:
1479 break; /* FIXME: Do nothing with this for now. */
1480 case SpvDecorationNoPerspective
:
1481 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1483 case SpvDecorationFlat
:
1484 var_data
->interpolation
= INTERP_MODE_FLAT
;
1486 case SpvDecorationCentroid
:
1487 var_data
->centroid
= true;
1489 case SpvDecorationSample
:
1490 var_data
->sample
= true;
1492 case SpvDecorationInvariant
:
1493 var_data
->invariant
= true;
1495 case SpvDecorationConstant
:
1496 var_data
->read_only
= true;
1498 case SpvDecorationNonReadable
:
1499 var_data
->image
.access
|= ACCESS_NON_READABLE
;
1501 case SpvDecorationNonWritable
:
1502 var_data
->read_only
= true;
1503 var_data
->image
.access
|= ACCESS_NON_WRITEABLE
;
1505 case SpvDecorationRestrict
:
1506 var_data
->image
.access
|= ACCESS_RESTRICT
;
1508 case SpvDecorationVolatile
:
1509 var_data
->image
.access
|= ACCESS_VOLATILE
;
1511 case SpvDecorationCoherent
:
1512 var_data
->image
.access
|= ACCESS_COHERENT
;
1514 case SpvDecorationComponent
:
1515 var_data
->location_frac
= dec
->operands
[0];
1517 case SpvDecorationIndex
:
1518 var_data
->index
= dec
->operands
[0];
1520 case SpvDecorationBuiltIn
: {
1521 SpvBuiltIn builtin
= dec
->operands
[0];
1523 nir_variable_mode mode
= var_data
->mode
;
1524 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1525 var_data
->mode
= mode
;
1528 case SpvBuiltInTessLevelOuter
:
1529 case SpvBuiltInTessLevelInner
:
1530 case SpvBuiltInClipDistance
:
1531 case SpvBuiltInCullDistance
:
1532 var_data
->compact
= true;
1539 case SpvDecorationSpecId
:
1540 case SpvDecorationRowMajor
:
1541 case SpvDecorationColMajor
:
1542 case SpvDecorationMatrixStride
:
1543 case SpvDecorationAliased
:
1544 case SpvDecorationUniform
:
1545 case SpvDecorationUniformId
:
1546 case SpvDecorationLinkageAttributes
:
1547 break; /* Do nothing with these here */
1549 case SpvDecorationPatch
:
1550 var_data
->patch
= true;
1553 case SpvDecorationLocation
:
1554 vtn_fail("Handled above");
1556 case SpvDecorationBlock
:
1557 case SpvDecorationBufferBlock
:
1558 case SpvDecorationArrayStride
:
1559 case SpvDecorationGLSLShared
:
1560 case SpvDecorationGLSLPacked
:
1561 break; /* These can apply to a type but we don't care about them */
1563 case SpvDecorationBinding
:
1564 case SpvDecorationDescriptorSet
:
1565 case SpvDecorationNoContraction
:
1566 case SpvDecorationInputAttachmentIndex
:
1567 vtn_warn("Decoration not allowed for variable or structure member: %s",
1568 spirv_decoration_to_string(dec
->decoration
));
1571 case SpvDecorationXfbBuffer
:
1572 var_data
->explicit_xfb_buffer
= true;
1573 var_data
->xfb_buffer
= dec
->operands
[0];
1574 var_data
->always_active_io
= true;
1576 case SpvDecorationXfbStride
:
1577 var_data
->explicit_xfb_stride
= true;
1578 var_data
->xfb_stride
= dec
->operands
[0];
1580 case SpvDecorationOffset
:
1581 var_data
->explicit_offset
= true;
1582 var_data
->offset
= dec
->operands
[0];
1585 case SpvDecorationStream
:
1586 var_data
->stream
= dec
->operands
[0];
1589 case SpvDecorationCPacked
:
1590 case SpvDecorationSaturatedConversion
:
1591 case SpvDecorationFuncParamAttr
:
1592 case SpvDecorationFPRoundingMode
:
1593 case SpvDecorationFPFastMathMode
:
1594 case SpvDecorationAlignment
:
1595 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1596 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1597 spirv_decoration_to_string(dec
->decoration
));
1601 case SpvDecorationUserSemantic
:
1602 /* User semantic decorations can safely be ignored by the driver. */
1605 case SpvDecorationRestrictPointerEXT
:
1606 case SpvDecorationAliasedPointerEXT
:
1607 /* TODO: We should actually plumb alias information through NIR. */
1611 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1616 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1617 const struct vtn_decoration
*dec
, void *out_is_patch
)
1619 if (dec
->decoration
== SpvDecorationPatch
) {
1620 *((bool *) out_is_patch
) = true;
1625 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1626 const struct vtn_decoration
*dec
, void *void_var
)
1628 struct vtn_variable
*vtn_var
= void_var
;
1630 /* Handle decorations that apply to a vtn_variable as a whole */
1631 switch (dec
->decoration
) {
1632 case SpvDecorationBinding
:
1633 vtn_var
->binding
= dec
->operands
[0];
1634 vtn_var
->explicit_binding
= true;
1636 case SpvDecorationDescriptorSet
:
1637 vtn_var
->descriptor_set
= dec
->operands
[0];
1639 case SpvDecorationInputAttachmentIndex
:
1640 vtn_var
->input_attachment_index
= dec
->operands
[0];
1642 case SpvDecorationPatch
:
1643 vtn_var
->patch
= true;
1645 case SpvDecorationOffset
:
1646 vtn_var
->offset
= dec
->operands
[0];
1648 case SpvDecorationNonWritable
:
1649 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1651 case SpvDecorationNonReadable
:
1652 vtn_var
->access
|= ACCESS_NON_READABLE
;
1654 case SpvDecorationVolatile
:
1655 vtn_var
->access
|= ACCESS_VOLATILE
;
1657 case SpvDecorationCoherent
:
1658 vtn_var
->access
|= ACCESS_COHERENT
;
1660 case SpvDecorationCounterBuffer
:
1661 /* Counter buffer decorations can safely be ignored by the driver. */
1667 if (val
->value_type
== vtn_value_type_pointer
) {
1668 assert(val
->pointer
->var
== void_var
);
1669 assert(member
== -1);
1671 assert(val
->value_type
== vtn_value_type_type
);
1674 /* Location is odd. If applied to a split structure, we have to walk the
1675 * whole thing and accumulate the location. It's easier to handle as a
1678 if (dec
->decoration
== SpvDecorationLocation
) {
1679 unsigned location
= dec
->operands
[0];
1680 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1681 vtn_var
->mode
== vtn_variable_mode_output
) {
1682 location
+= FRAG_RESULT_DATA0
;
1683 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1684 vtn_var
->mode
== vtn_variable_mode_input
) {
1685 location
+= VERT_ATTRIB_GENERIC0
;
1686 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1687 vtn_var
->mode
== vtn_variable_mode_output
) {
1688 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1689 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1690 vtn_warn("Location must be on input, output, uniform, sampler or "
1695 if (vtn_var
->var
->num_members
== 0) {
1696 /* This handles the member and lone variable cases */
1697 vtn_var
->var
->data
.location
= location
;
1699 /* This handles the structure member case */
1700 assert(vtn_var
->var
->members
);
1703 vtn_var
->base_location
= location
;
1705 vtn_var
->var
->members
[member
].location
= location
;
1711 if (vtn_var
->var
->num_members
== 0) {
1712 /* We call this function on types as well as variables and not all
1713 * struct types get split so we can end up having stray member
1714 * decorations; just ignore them.
1717 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1718 } else if (member
>= 0) {
1719 /* Member decorations must come from a type */
1720 assert(val
->value_type
== vtn_value_type_type
);
1721 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1724 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1725 for (unsigned i
= 0; i
< length
; i
++)
1726 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1729 /* A few variables, those with external storage, have no actual
1730 * nir_variables associated with them. Fortunately, all decorations
1731 * we care about for those variables are on the type only.
1733 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1734 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1735 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1736 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1737 b
->options
->lower_workgroup_access_to_offsets
));
1743 ptr_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1744 const struct vtn_decoration
*dec
, void *void_ptr
)
1746 struct vtn_pointer
*ptr
= void_ptr
;
1748 switch (dec
->decoration
) {
1749 case SpvDecorationNonUniformEXT
:
1750 ptr
->access
|= ACCESS_NON_UNIFORM
;
1758 enum vtn_variable_mode
1759 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1760 SpvStorageClass
class,
1761 struct vtn_type
*interface_type
,
1762 nir_variable_mode
*nir_mode_out
)
1764 enum vtn_variable_mode mode
;
1765 nir_variable_mode nir_mode
;
1767 case SpvStorageClassUniform
:
1768 /* Assume it's an UBO if we lack the interface_type. */
1769 if (!interface_type
|| interface_type
->block
) {
1770 mode
= vtn_variable_mode_ubo
;
1771 nir_mode
= nir_var_mem_ubo
;
1772 } else if (interface_type
->buffer_block
) {
1773 mode
= vtn_variable_mode_ssbo
;
1774 nir_mode
= nir_var_mem_ssbo
;
1776 /* Default-block uniforms, coming from gl_spirv */
1777 mode
= vtn_variable_mode_uniform
;
1778 nir_mode
= nir_var_uniform
;
1781 case SpvStorageClassStorageBuffer
:
1782 mode
= vtn_variable_mode_ssbo
;
1783 nir_mode
= nir_var_mem_ssbo
;
1785 case SpvStorageClassPhysicalStorageBufferEXT
:
1786 mode
= vtn_variable_mode_phys_ssbo
;
1787 nir_mode
= nir_var_mem_global
;
1789 case SpvStorageClassUniformConstant
:
1790 mode
= vtn_variable_mode_uniform
;
1791 nir_mode
= nir_var_uniform
;
1793 case SpvStorageClassPushConstant
:
1794 mode
= vtn_variable_mode_push_constant
;
1795 nir_mode
= nir_var_uniform
;
1797 case SpvStorageClassInput
:
1798 mode
= vtn_variable_mode_input
;
1799 nir_mode
= nir_var_shader_in
;
1801 case SpvStorageClassOutput
:
1802 mode
= vtn_variable_mode_output
;
1803 nir_mode
= nir_var_shader_out
;
1805 case SpvStorageClassPrivate
:
1806 mode
= vtn_variable_mode_private
;
1807 nir_mode
= nir_var_shader_temp
;
1809 case SpvStorageClassFunction
:
1810 mode
= vtn_variable_mode_function
;
1811 nir_mode
= nir_var_function_temp
;
1813 case SpvStorageClassWorkgroup
:
1814 mode
= vtn_variable_mode_workgroup
;
1815 nir_mode
= nir_var_mem_shared
;
1817 case SpvStorageClassAtomicCounter
:
1818 mode
= vtn_variable_mode_uniform
;
1819 nir_mode
= nir_var_uniform
;
1821 case SpvStorageClassCrossWorkgroup
:
1822 mode
= vtn_variable_mode_cross_workgroup
;
1823 nir_mode
= nir_var_mem_global
;
1825 case SpvStorageClassImage
:
1826 mode
= vtn_variable_mode_image
;
1827 nir_mode
= nir_var_mem_ubo
;
1829 case SpvStorageClassGeneric
:
1831 vtn_fail("Unhandled variable storage class: %s (%u)",
1832 spirv_storageclass_to_string(class), class);
1836 *nir_mode_out
= nir_mode
;
1842 vtn_mode_to_address_format(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
1845 case vtn_variable_mode_ubo
:
1846 return b
->options
->ubo_addr_format
;
1848 case vtn_variable_mode_ssbo
:
1849 return b
->options
->ssbo_addr_format
;
1851 case vtn_variable_mode_phys_ssbo
:
1852 return b
->options
->phys_ssbo_addr_format
;
1854 case vtn_variable_mode_push_constant
:
1855 return b
->options
->push_const_addr_format
;
1857 case vtn_variable_mode_workgroup
:
1858 return b
->options
->shared_addr_format
;
1860 case vtn_variable_mode_cross_workgroup
:
1861 return b
->options
->global_addr_format
;
1863 case vtn_variable_mode_function
:
1864 if (b
->physical_ptrs
)
1865 return b
->options
->temp_addr_format
;
1868 case vtn_variable_mode_private
:
1869 case vtn_variable_mode_uniform
:
1870 case vtn_variable_mode_input
:
1871 case vtn_variable_mode_output
:
1872 case vtn_variable_mode_image
:
1873 return nir_address_format_logical
;
1876 unreachable("Invalid variable mode");
1880 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1882 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1883 /* This pointer needs to have a pointer type with actual storage */
1884 vtn_assert(ptr
->ptr_type
);
1885 vtn_assert(ptr
->ptr_type
->type
);
1888 /* If we don't have an offset then we must be a pointer to the variable
1891 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1893 struct vtn_access_chain chain
= {
1896 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1899 vtn_assert(ptr
->offset
);
1900 if (ptr
->block_index
) {
1901 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1902 ptr
->mode
== vtn_variable_mode_ssbo
);
1903 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1905 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1909 if (vtn_pointer_is_external_block(b
, ptr
) &&
1910 vtn_type_contains_block(b
, ptr
->type
) &&
1911 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1912 /* In this case, we're looking for a block index and not an actual
1915 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1916 * at all because we get the pointer directly from the client. This
1917 * assumes that there will never be a SSBO binding variable using the
1918 * PhysicalStorageBufferEXT storage class. This assumption appears
1919 * to be correct according to the Vulkan spec because the table,
1920 * "Shader Resource and Storage Class Correspondence," the only the
1921 * Uniform storage class with BufferBlock or the StorageBuffer
1922 * storage class with Block can be used.
1924 if (!ptr
->block_index
) {
1925 /* If we don't have a block_index then we must be a pointer to the
1928 vtn_assert(!ptr
->deref
);
1930 struct vtn_access_chain chain
= {
1933 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
1936 return ptr
->block_index
;
1938 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1943 struct vtn_pointer
*
1944 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1945 struct vtn_type
*ptr_type
)
1947 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1949 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1950 struct vtn_type
*without_array
=
1951 vtn_type_without_array(ptr_type
->deref
);
1953 nir_variable_mode nir_mode
;
1954 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1955 without_array
, &nir_mode
);
1956 ptr
->type
= ptr_type
->deref
;
1957 ptr
->ptr_type
= ptr_type
;
1959 if (b
->wa_glslang_179
) {
1960 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1961 * need to whack the mode because it creates a function parameter with
1962 * the Function storage class even though it's a pointer to a sampler.
1963 * If we don't do this, then NIR won't get rid of the deref_cast for us.
1965 if (ptr
->mode
== vtn_variable_mode_function
&&
1966 (ptr
->type
->base_type
== vtn_base_type_sampler
||
1967 ptr
->type
->base_type
== vtn_base_type_sampled_image
)) {
1968 ptr
->mode
= vtn_variable_mode_uniform
;
1969 nir_mode
= nir_var_uniform
;
1973 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1974 /* This pointer type needs to have actual storage */
1975 vtn_assert(ptr_type
->type
);
1976 if (ptr
->mode
== vtn_variable_mode_ubo
||
1977 ptr
->mode
== vtn_variable_mode_ssbo
) {
1978 vtn_assert(ssa
->num_components
== 2);
1979 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1980 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1982 vtn_assert(ssa
->num_components
== 1);
1983 ptr
->block_index
= NULL
;
1987 const struct glsl_type
*deref_type
= ptr_type
->deref
->type
;
1988 if (!vtn_pointer_is_external_block(b
, ptr
)) {
1989 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1990 deref_type
, ptr_type
->stride
);
1991 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
1992 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1993 /* This is a pointer to somewhere in an array of blocks, not a
1994 * pointer to somewhere inside the block. Set the block index
1995 * instead of making a cast.
1997 ptr
->block_index
= ssa
;
1999 /* This is a pointer to something internal or a pointer inside a
2000 * block. It's just a regular cast.
2002 * For PhysicalStorageBufferEXT pointers, we don't have a block index
2003 * at all because we get the pointer directly from the client. This
2004 * assumes that there will never be a SSBO binding variable using the
2005 * PhysicalStorageBufferEXT storage class. This assumption appears
2006 * to be correct according to the Vulkan spec because the table,
2007 * "Shader Resource and Storage Class Correspondence," the only the
2008 * Uniform storage class with BufferBlock or the StorageBuffer
2009 * storage class with Block can be used.
2011 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
2012 ptr_type
->deref
->type
,
2014 ptr
->deref
->dest
.ssa
.num_components
=
2015 glsl_get_vector_elements(ptr_type
->type
);
2016 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
2024 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
2026 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
2029 if (var
->mode
== vtn_variable_mode_input
) {
2030 return stage
== MESA_SHADER_TESS_CTRL
||
2031 stage
== MESA_SHADER_TESS_EVAL
||
2032 stage
== MESA_SHADER_GEOMETRY
;
2035 if (var
->mode
== vtn_variable_mode_output
)
2036 return stage
== MESA_SHADER_TESS_CTRL
;
2042 assign_missing_member_locations(struct vtn_variable
*var
)
2045 glsl_get_length(glsl_without_array(var
->type
->type
));
2046 int location
= var
->base_location
;
2048 for (unsigned i
= 0; i
< length
; i
++) {
2049 /* From the Vulkan spec:
2051 * “If the structure type is a Block but without a Location, then each
2052 * of its members must have a Location decoration.”
2055 if (var
->type
->block
) {
2056 assert(var
->base_location
!= -1 ||
2057 var
->var
->members
[i
].location
!= -1);
2060 /* From the Vulkan spec:
2062 * “Any member with its own Location decoration is assigned that
2063 * location. Each remaining member is assigned the location after the
2064 * immediately preceding member in declaration order.”
2066 if (var
->var
->members
[i
].location
!= -1)
2067 location
= var
->var
->members
[i
].location
;
2069 var
->var
->members
[i
].location
= location
;
2071 /* Below we use type instead of interface_type, because interface_type
2072 * is only available when it is a Block. This code also supports
2073 * input/outputs that are just structs
2075 const struct glsl_type
*member_type
=
2076 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
2079 glsl_count_attribute_slots(member_type
,
2080 false /* is_gl_vertex_input */);
2086 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
2087 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
2088 nir_constant
*initializer
)
2090 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2091 struct vtn_type
*type
= ptr_type
->deref
;
2093 struct vtn_type
*without_array
= vtn_type_without_array(ptr_type
->deref
);
2095 enum vtn_variable_mode mode
;
2096 nir_variable_mode nir_mode
;
2097 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
2100 case vtn_variable_mode_ubo
:
2101 /* There's no other way to get vtn_variable_mode_ubo */
2102 vtn_assert(without_array
->block
);
2103 b
->shader
->info
.num_ubos
++;
2105 case vtn_variable_mode_ssbo
:
2106 if (storage_class
== SpvStorageClassStorageBuffer
&&
2107 !without_array
->block
) {
2108 if (b
->variable_pointers
) {
2109 vtn_fail("Variables in the StorageBuffer storage class must "
2110 "have a struct type with the Block decoration");
2112 /* If variable pointers are not present, it's still malformed
2113 * SPIR-V but we can parse it and do the right thing anyway.
2114 * Since some of the 8-bit storage tests have bugs in this are,
2115 * just make it a warning for now.
2117 vtn_warn("Variables in the StorageBuffer storage class must "
2118 "have a struct type with the Block decoration");
2121 b
->shader
->info
.num_ssbos
++;
2123 case vtn_variable_mode_uniform
:
2124 if (glsl_type_is_image(without_array
->type
))
2125 b
->shader
->info
.num_images
++;
2126 else if (glsl_type_is_sampler(without_array
->type
))
2127 b
->shader
->info
.num_textures
++;
2129 case vtn_variable_mode_push_constant
:
2130 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2133 case vtn_variable_mode_image
:
2134 vtn_fail("Cannot create a variable with the Image storage class");
2137 case vtn_variable_mode_phys_ssbo
:
2138 vtn_fail("Cannot create a variable with the "
2139 "PhysicalStorageBufferEXT storage class");
2143 /* No tallying is needed */
2147 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2150 var
->base_location
= -1;
2152 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
2153 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
2155 switch (var
->mode
) {
2156 case vtn_variable_mode_function
:
2157 case vtn_variable_mode_private
:
2158 case vtn_variable_mode_uniform
:
2159 /* For these, we create the variable normally */
2160 var
->var
= rzalloc(b
->shader
, nir_variable
);
2161 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2163 if (storage_class
== SpvStorageClassAtomicCounter
) {
2164 /* Need to tweak the nir type here as at vtn_handle_type we don't
2165 * have the access to storage_class, that is the one that points us
2166 * that is an atomic uint.
2168 var
->var
->type
= repair_atomic_type(var
->type
->type
);
2170 /* Private variables don't have any explicit layout but some layouts
2171 * may have leaked through due to type deduplication in the SPIR-V.
2173 var
->var
->type
= var
->type
->type
;
2175 var
->var
->data
.mode
= nir_mode
;
2176 var
->var
->data
.location
= -1;
2177 var
->var
->interface_type
= NULL
;
2180 case vtn_variable_mode_ubo
:
2181 case vtn_variable_mode_ssbo
:
2182 var
->var
= rzalloc(b
->shader
, nir_variable
);
2183 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2185 var
->var
->type
= var
->type
->type
;
2186 var
->var
->interface_type
= var
->type
->type
;
2188 var
->var
->data
.mode
= nir_mode
;
2189 var
->var
->data
.location
= -1;
2193 case vtn_variable_mode_workgroup
:
2194 if (b
->options
->lower_workgroup_access_to_offsets
) {
2195 var
->shared_location
= -1;
2197 /* Create the variable normally */
2198 var
->var
= rzalloc(b
->shader
, nir_variable
);
2199 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2200 /* Workgroup variables don't have any explicit layout but some
2201 * layouts may have leaked through due to type deduplication in the
2204 var
->var
->type
= var
->type
->type
;
2205 var
->var
->data
.mode
= nir_var_mem_shared
;
2209 case vtn_variable_mode_input
:
2210 case vtn_variable_mode_output
: {
2211 /* In order to know whether or not we're a per-vertex inout, we need
2212 * the patch qualifier. This means walking the variable decorations
2213 * early before we actually create any variables. Not a big deal.
2215 * GLSLang really likes to place decorations in the most interior
2216 * thing it possibly can. In particular, if you have a struct, it
2217 * will place the patch decorations on the struct members. This
2218 * should be handled by the variable splitting below just fine.
2220 * If you have an array-of-struct, things get even more weird as it
2221 * will place the patch decorations on the struct even though it's
2222 * inside an array and some of the members being patch and others not
2223 * makes no sense whatsoever. Since the only sensible thing is for
2224 * it to be all or nothing, we'll call it patch if any of the members
2225 * are declared patch.
2228 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2229 if (glsl_type_is_array(var
->type
->type
) &&
2230 glsl_type_is_struct_or_ifc(without_array
->type
)) {
2231 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2232 vtn_value_type_type
),
2233 var_is_patch_cb
, &var
->patch
);
2236 /* For inputs and outputs, we immediately split structures. This
2237 * is for a couple of reasons. For one, builtins may all come in
2238 * a struct and we really want those split out into separate
2239 * variables. For another, interpolation qualifiers can be
2240 * applied to members of the top-level struct ane we need to be
2241 * able to preserve that information.
2244 struct vtn_type
*per_vertex_type
= var
->type
;
2245 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2246 /* In Geometry shaders (and some tessellation), inputs come
2247 * in per-vertex arrays. However, some builtins come in
2248 * non-per-vertex, hence the need for the is_array check. In
2249 * any case, there are no non-builtin arrays allowed so this
2250 * check should be sufficient.
2252 per_vertex_type
= var
->type
->array_element
;
2255 var
->var
= rzalloc(b
->shader
, nir_variable
);
2256 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2257 /* In Vulkan, shader I/O variables don't have any explicit layout but
2258 * some layouts may have leaked through due to type deduplication in
2259 * the SPIR-V. We do, however, keep the layouts in the variable's
2260 * interface_type because we need offsets for XFB arrays of blocks.
2262 var
->var
->type
= var
->type
->type
;
2263 var
->var
->data
.mode
= nir_mode
;
2264 var
->var
->data
.patch
= var
->patch
;
2266 /* Figure out the interface block type. */
2267 struct vtn_type
*iface_type
= per_vertex_type
;
2268 if (var
->mode
== vtn_variable_mode_output
&&
2269 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2270 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2271 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2272 /* For vertex data outputs, we can end up with arrays of blocks for
2273 * transform feedback where each array element corresponds to a
2274 * different XFB output buffer.
2276 while (iface_type
->base_type
== vtn_base_type_array
)
2277 iface_type
= iface_type
->array_element
;
2279 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2280 var
->var
->interface_type
= iface_type
->type
;
2282 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2283 per_vertex_type
->block
) {
2284 /* It's a struct. Set it up as per-member. */
2285 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2286 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2287 var
->var
->num_members
);
2289 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2290 var
->var
->members
[i
].mode
= nir_mode
;
2291 var
->var
->members
[i
].patch
= var
->patch
;
2292 var
->var
->members
[i
].location
= -1;
2296 /* For inputs and outputs, we need to grab locations and builtin
2297 * information from the per-vertex type.
2299 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2300 vtn_value_type_type
),
2301 var_decoration_cb
, var
);
2305 case vtn_variable_mode_push_constant
:
2306 case vtn_variable_mode_cross_workgroup
:
2307 /* These don't need actual variables. */
2310 case vtn_variable_mode_image
:
2311 case vtn_variable_mode_phys_ssbo
:
2312 unreachable("Should have been caught before");
2316 var
->var
->constant_initializer
=
2317 nir_constant_clone(initializer
, var
->var
);
2320 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2321 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2323 if ((var
->mode
== vtn_variable_mode_input
||
2324 var
->mode
== vtn_variable_mode_output
) &&
2325 var
->var
->members
) {
2326 assign_missing_member_locations(var
);
2329 if (var
->mode
== vtn_variable_mode_uniform
||
2330 var
->mode
== vtn_variable_mode_ubo
||
2331 var
->mode
== vtn_variable_mode_ssbo
) {
2332 /* XXX: We still need the binding information in the nir_variable
2333 * for these. We should fix that.
2335 var
->var
->data
.binding
= var
->binding
;
2336 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2337 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2338 var
->var
->data
.index
= var
->input_attachment_index
;
2339 var
->var
->data
.offset
= var
->offset
;
2341 if (glsl_type_is_image(without_array
->type
))
2342 var
->var
->data
.image
.format
= without_array
->image_format
;
2345 if (var
->mode
== vtn_variable_mode_function
) {
2346 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2347 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2348 } else if (var
->var
) {
2349 nir_shader_add_variable(b
->shader
, var
->var
);
2351 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2356 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2357 struct vtn_type
*dst_type
,
2358 struct vtn_type
*src_type
)
2360 if (dst_type
->id
== src_type
->id
)
2363 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2364 /* Early versions of GLSLang would re-emit types unnecessarily and you
2365 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2366 * mismatched source and destination types.
2368 * https://github.com/KhronosGroup/glslang/issues/304
2369 * https://github.com/KhronosGroup/glslang/issues/307
2370 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2371 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2373 vtn_warn("Source and destination types of %s do not have the same "
2374 "ID (but are compatible): %u vs %u",
2375 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2379 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2380 spirv_op_to_string(opcode
),
2381 glsl_get_type_name(dst_type
->type
),
2382 glsl_get_type_name(src_type
->type
));
2385 static nir_ssa_def
*
2386 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2387 unsigned num_components
)
2389 if (val
->num_components
== num_components
)
2392 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2393 for (unsigned i
= 0; i
< num_components
; i
++) {
2394 if (i
< val
->num_components
)
2395 comps
[i
] = nir_channel(b
, val
, i
);
2397 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2399 return nir_vec(b
, comps
, num_components
);
2402 static nir_ssa_def
*
2403 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2404 const struct glsl_type
*type
)
2406 const unsigned num_components
= glsl_get_vector_elements(type
);
2407 const unsigned bit_size
= glsl_get_bit_size(type
);
2409 /* First, zero-pad to ensure that the value is big enough that when we
2410 * bit-cast it, we don't loose anything.
2412 if (val
->bit_size
< bit_size
) {
2413 const unsigned src_num_components_needed
=
2414 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2415 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2418 val
= nir_bitcast_vector(b
, val
, bit_size
);
2420 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2424 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2425 const uint32_t *w
, unsigned count
)
2429 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2430 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2434 case SpvOpVariable
: {
2435 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2437 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2439 SpvStorageClass storage_class
= w
[3];
2440 nir_constant
*initializer
= NULL
;
2442 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
2444 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
2448 case SpvOpAccessChain
:
2449 case SpvOpPtrAccessChain
:
2450 case SpvOpInBoundsAccessChain
:
2451 case SpvOpInBoundsPtrAccessChain
: {
2452 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2453 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
|| opcode
== SpvOpInBoundsPtrAccessChain
);
2456 for (int i
= 4; i
< count
; i
++) {
2457 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2458 if (link_val
->value_type
== vtn_value_type_constant
) {
2459 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2460 chain
->link
[idx
].id
= vtn_constant_int(b
, w
[i
]);
2462 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2463 chain
->link
[idx
].id
= w
[i
];
2469 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2470 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2471 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2472 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2473 * to combine an array of images with a single sampler to get an
2474 * array of sampled images that all share the same sampler.
2475 * Fortunately, this means that we can more-or-less ignore the
2476 * sampler when crawling the access chain, but it does leave us
2477 * with this rather awkward little special-case.
2479 struct vtn_value
*val
=
2480 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2481 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2482 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
2483 val
->sampled_image
->image
=
2484 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2485 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2486 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
,
2487 val
->sampled_image
->image
);
2488 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
,
2489 val
->sampled_image
->sampler
);
2491 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2492 struct vtn_pointer
*ptr
=
2493 vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2494 ptr
->ptr_type
= ptr_type
;
2495 vtn_push_value_pointer(b
, w
[2], ptr
);
2500 case SpvOpCopyMemory
: {
2501 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2502 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2504 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2506 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2511 struct vtn_type
*res_type
=
2512 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2513 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2514 struct vtn_pointer
*src
= src_val
->pointer
;
2516 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2518 if (glsl_type_is_image(res_type
->type
) ||
2519 glsl_type_is_sampler(res_type
->type
)) {
2520 vtn_push_value_pointer(b
, w
[2], src
);
2524 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2529 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2530 struct vtn_pointer
*dest
= dest_val
->pointer
;
2531 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2533 /* OpStore requires us to actually have a storage type */
2534 vtn_fail_if(dest
->type
->type
== NULL
,
2535 "Invalid destination type for OpStore");
2537 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2538 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2539 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2540 * would then store them to a local variable as bool. Work around
2541 * the issue by doing an implicit conversion.
2543 * https://github.com/KhronosGroup/glslang/issues/170
2544 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2546 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2547 "OpTypeBool. Doing an implicit conversion to work around "
2549 struct vtn_ssa_value
*bool_ssa
=
2550 vtn_create_ssa_value(b
, dest
->type
->type
);
2551 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2552 vtn_variable_store(b
, bool_ssa
, dest
);
2556 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2558 if (glsl_type_is_sampler(dest
->type
->type
)) {
2559 if (b
->wa_glslang_179
) {
2560 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2561 "propagation to workaround the problem.");
2562 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2563 dest
->var
->copy_prop_sampler
=
2564 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2566 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2571 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2572 vtn_variable_store(b
, src
, dest
);
2576 case SpvOpArrayLength
: {
2577 struct vtn_pointer
*ptr
=
2578 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2579 const uint32_t field
= w
[4];
2581 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2582 "OpArrayLength must take a pointer to a structure type");
2583 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2584 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2585 "OpArrayLength must reference the last memeber of the "
2586 "structure and that must be an array");
2588 const uint32_t offset
= ptr
->type
->offsets
[field
];
2589 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2591 if (!ptr
->block_index
) {
2592 struct vtn_access_chain chain
= {
2595 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2596 vtn_assert(ptr
->block_index
);
2599 nir_intrinsic_instr
*instr
=
2600 nir_intrinsic_instr_create(b
->nb
.shader
,
2601 nir_intrinsic_get_buffer_size
);
2602 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2603 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2604 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2605 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2607 /* array_length = max(buffer_size - offset, 0) / stride */
2608 nir_ssa_def
*array_length
=
2613 nir_imm_int(&b
->nb
, offset
)),
2614 nir_imm_int(&b
->nb
, 0u)),
2615 nir_imm_int(&b
->nb
, stride
));
2617 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2618 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2619 val
->ssa
->def
= array_length
;
2623 case SpvOpConvertPtrToU
: {
2624 struct vtn_value
*u_val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2626 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2627 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2628 "OpConvertPtrToU can only be used to cast to a vector or "
2631 /* The pointer will be converted to an SSA value automatically */
2632 nir_ssa_def
*ptr_ssa
= vtn_ssa_value(b
, w
[3])->def
;
2634 u_val
->ssa
= vtn_create_ssa_value(b
, u_val
->type
->type
);
2635 u_val
->ssa
->def
= nir_sloppy_bitcast(&b
->nb
, ptr_ssa
, u_val
->type
->type
);
2639 case SpvOpConvertUToPtr
: {
2640 struct vtn_value
*ptr_val
=
2641 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2642 struct vtn_value
*u_val
= vtn_value(b
, w
[3], vtn_value_type_ssa
);
2644 vtn_fail_if(ptr_val
->type
->type
== NULL
,
2645 "OpConvertUToPtr can only be used on physical pointers");
2647 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2648 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2649 "OpConvertUToPtr can only be used to cast from a vector or "
2652 nir_ssa_def
*ptr_ssa
= nir_sloppy_bitcast(&b
->nb
, u_val
->ssa
->def
,
2653 ptr_val
->type
->type
);
2654 ptr_val
->pointer
= vtn_pointer_from_ssa(b
, ptr_ssa
, ptr_val
->type
);
2658 case SpvOpCopyMemorySized
:
2660 vtn_fail_with_opcode("Unhandled opcode", opcode
);