2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
33 static struct vtn_access_chain
*
34 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
36 struct vtn_access_chain
*chain
;
38 /* Subtract 1 from the length since there's already one built in */
39 size_t size
= sizeof(*chain
) +
40 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
41 chain
= rzalloc_size(b
, size
);
42 chain
->length
= length
;
48 vtn_pointer_uses_ssa_offset(struct vtn_builder
*b
,
49 struct vtn_pointer
*ptr
)
51 return ((ptr
->mode
== vtn_variable_mode_ubo
||
52 ptr
->mode
== vtn_variable_mode_ssbo
) &&
53 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
54 ptr
->mode
== vtn_variable_mode_push_constant
||
55 (ptr
->mode
== vtn_variable_mode_workgroup
&&
56 b
->options
->lower_workgroup_access_to_offsets
);
60 vtn_pointer_is_external_block(struct vtn_builder
*b
,
61 struct vtn_pointer
*ptr
)
63 return ptr
->mode
== vtn_variable_mode_ssbo
||
64 ptr
->mode
== vtn_variable_mode_ubo
||
65 ptr
->mode
== vtn_variable_mode_push_constant
||
66 (ptr
->mode
== vtn_variable_mode_workgroup
&&
67 b
->options
->lower_workgroup_access_to_offsets
);
71 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
72 unsigned stride
, unsigned bit_size
)
74 vtn_assert(stride
> 0);
75 if (link
.mode
== vtn_access_mode_literal
) {
76 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
78 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
79 if (ssa
->bit_size
!= bit_size
)
80 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
82 ssa
= nir_imul_imm(&b
->nb
, ssa
, stride
);
87 static VkDescriptorType
88 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
91 case vtn_variable_mode_ubo
:
92 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
93 case vtn_variable_mode_ssbo
:
94 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
96 vtn_fail("Invalid mode for vulkan_resource_index");
101 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
102 nir_ssa_def
*desc_array_index
)
104 if (!desc_array_index
) {
105 vtn_assert(glsl_type_is_struct(var
->type
->type
));
106 desc_array_index
= nir_imm_int(&b
->nb
, 0);
109 nir_intrinsic_instr
*instr
=
110 nir_intrinsic_instr_create(b
->nb
.shader
,
111 nir_intrinsic_vulkan_resource_index
);
112 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
113 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
114 nir_intrinsic_set_binding(instr
, var
->binding
);
115 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
117 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
118 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
120 return &instr
->dest
.ssa
;
124 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
125 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
127 nir_intrinsic_instr
*instr
=
128 nir_intrinsic_instr_create(b
->nb
.shader
,
129 nir_intrinsic_vulkan_resource_reindex
);
130 instr
->src
[0] = nir_src_for_ssa(base_index
);
131 instr
->src
[1] = nir_src_for_ssa(offset_index
);
132 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
134 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
135 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
137 return &instr
->dest
.ssa
;
141 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
142 const struct glsl_type
*desc_type
, nir_ssa_def
*desc_index
)
144 nir_intrinsic_instr
*desc_load
=
145 nir_intrinsic_instr_create(b
->nb
.shader
,
146 nir_intrinsic_load_vulkan_descriptor
);
147 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
148 desc_load
->num_components
= glsl_get_vector_elements(desc_type
);
149 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
150 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
151 desc_load
->num_components
,
152 glsl_get_bit_size(desc_type
), NULL
);
153 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
155 return &desc_load
->dest
.ssa
;
158 /* Dereference the given base pointer by the access chain */
159 static struct vtn_pointer
*
160 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
161 struct vtn_pointer
*base
,
162 struct vtn_access_chain
*deref_chain
)
164 struct vtn_type
*type
= base
->type
;
165 enum gl_access_qualifier access
= base
->access
;
168 nir_deref_instr
*tail
;
171 } else if (vtn_pointer_is_external_block(b
, base
)) {
172 nir_ssa_def
*block_index
= base
->block_index
;
174 /* We dereferencing an external block pointer. Correctness of this
175 * operation relies on one particular line in the SPIR-V spec, section
176 * entitled "Validation Rules for Shader Capabilities":
178 * "Block and BufferBlock decorations cannot decorate a structure
179 * type that is nested at any level inside another structure type
180 * decorated with Block or BufferBlock."
182 * This means that we can detect the point where we cross over from
183 * descriptor indexing to buffer indexing by looking for the block
184 * decorated struct type. Anything before the block decorated struct
185 * type is a descriptor indexing operation and anything after the block
186 * decorated struct is a buffer offset operation.
189 /* Figure out the descriptor array index if any
191 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
192 * to forget the Block or BufferBlock decoration from time to time.
193 * It's more robust if we check for both !block_index and for the type
194 * to contain a block. This way there's a decent chance that arrays of
195 * UBOs/SSBOs will work correctly even if variable pointers are
198 nir_ssa_def
*desc_arr_idx
= NULL
;
199 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
200 /* If our type contains a block, then we're still outside the block
201 * and we need to process enough levels of dereferences to get inside
204 if (deref_chain
->ptr_as_array
) {
205 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
206 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
207 MAX2(aoa_size
, 1), 32);
211 for (; idx
< deref_chain
->length
; idx
++) {
212 if (type
->base_type
!= vtn_base_type_array
) {
213 vtn_assert(type
->base_type
== vtn_base_type_struct
);
217 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
218 nir_ssa_def
*arr_offset
=
219 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
220 MAX2(aoa_size
, 1), 32);
222 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
224 desc_arr_idx
= arr_offset
;
226 type
= type
->array_element
;
227 access
|= type
->access
;
232 vtn_assert(base
->var
&& base
->type
);
233 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
234 } else if (desc_arr_idx
) {
235 block_index
= vtn_resource_reindex(b
, base
->mode
,
236 block_index
, desc_arr_idx
);
239 if (idx
== deref_chain
->length
) {
240 /* The entire deref was consumed in finding the block index. Return
241 * a pointer which just has a block index and a later access chain
242 * will dereference deeper.
244 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
245 ptr
->mode
= base
->mode
;
247 ptr
->block_index
= block_index
;
248 ptr
->access
= access
;
252 /* If we got here, there's more access chain to handle and we have the
253 * final block index. Insert a descriptor load and cast to a deref to
254 * start the deref chain.
257 vtn_descriptor_load(b
, base
->mode
, base
->ptr_type
->type
, block_index
);
259 assert(base
->mode
== vtn_variable_mode_ssbo
||
260 base
->mode
== vtn_variable_mode_ubo
);
261 nir_variable_mode nir_mode
=
262 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
264 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
, type
->type
,
265 base
->ptr_type
->stride
);
267 assert(base
->var
&& base
->var
->var
);
268 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
269 if (base
->ptr_type
&& base
->ptr_type
->type
) {
270 tail
->dest
.ssa
.num_components
=
271 glsl_get_vector_elements(base
->ptr_type
->type
);
272 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
276 if (idx
== 0 && deref_chain
->ptr_as_array
) {
277 /* We start with a deref cast to get the stride. Hopefully, we'll be
278 * able to delete that cast eventually.
280 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
281 tail
->type
, base
->ptr_type
->stride
);
283 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
284 tail
->dest
.ssa
.bit_size
);
285 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
289 for (; idx
< deref_chain
->length
; idx
++) {
290 if (glsl_type_is_struct(type
->type
)) {
291 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
292 unsigned field
= deref_chain
->link
[idx
].id
;
293 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
294 type
= type
->members
[field
];
296 nir_ssa_def
*arr_index
=
297 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
298 tail
->dest
.ssa
.bit_size
);
299 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
300 type
= type
->array_element
;
303 access
|= type
->access
;
306 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
307 ptr
->mode
= base
->mode
;
309 ptr
->var
= base
->var
;
311 ptr
->access
= access
;
316 static struct vtn_pointer
*
317 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
318 struct vtn_pointer
*base
,
319 struct vtn_access_chain
*deref_chain
)
321 nir_ssa_def
*block_index
= base
->block_index
;
322 nir_ssa_def
*offset
= base
->offset
;
323 struct vtn_type
*type
= base
->type
;
324 enum gl_access_qualifier access
= base
->access
;
327 if (base
->mode
== vtn_variable_mode_ubo
||
328 base
->mode
== vtn_variable_mode_ssbo
) {
330 vtn_assert(base
->var
&& base
->type
);
331 nir_ssa_def
*desc_arr_idx
;
332 if (glsl_type_is_array(type
->type
)) {
333 if (deref_chain
->length
>= 1) {
335 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
337 /* This consumes a level of type */
338 type
= type
->array_element
;
339 access
|= type
->access
;
341 /* This is annoying. We've been asked for a pointer to the
342 * array of UBOs/SSBOs and not a specifc buffer. Return a
343 * pointer with a descriptor index of 0 and we'll have to do
344 * a reindex later to adjust it to the right thing.
346 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
348 } else if (deref_chain
->ptr_as_array
) {
349 /* You can't have a zero-length OpPtrAccessChain */
350 vtn_assert(deref_chain
->length
>= 1);
351 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
353 /* We have a regular non-array SSBO. */
356 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
357 } else if (deref_chain
->ptr_as_array
&&
358 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
359 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
360 * decorated block. This is an interesting corner in the SPIR-V
361 * spec. One interpretation would be that they client is clearly
362 * trying to treat that block as if it's an implicit array of blocks
363 * repeated in the buffer. However, the SPIR-V spec for the
364 * OpPtrAccessChain says:
366 * "Base is treated as the address of the first element of an
367 * array, and the Element element’s address is computed to be the
368 * base for the Indexes, as per OpAccessChain."
370 * Taken literally, that would mean that your struct type is supposed
371 * to be treated as an array of such a struct and, since it's
372 * decorated block, that means an array of blocks which corresponds
373 * to an array descriptor. Therefore, we need to do a reindex
374 * operation to add the index from the first link in the access chain
375 * to the index we recieved.
377 * The downside to this interpretation (there always is one) is that
378 * this might be somewhat surprising behavior to apps if they expect
379 * the implicit array behavior described above.
381 vtn_assert(deref_chain
->length
>= 1);
382 nir_ssa_def
*offset_index
=
383 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
386 block_index
= vtn_resource_reindex(b
, base
->mode
,
387 block_index
, offset_index
);
392 if (base
->mode
== vtn_variable_mode_workgroup
) {
393 /* SLM doesn't need nor have a block index */
394 vtn_assert(!block_index
);
396 /* We need the variable for the base offset */
397 vtn_assert(base
->var
);
399 /* We need ptr_type for size and alignment */
400 vtn_assert(base
->ptr_type
);
402 /* Assign location on first use so that we don't end up bloating SLM
403 * address space for variables which are never statically used.
405 if (base
->var
->shared_location
< 0) {
406 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
407 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
408 base
->ptr_type
->align
);
409 base
->var
->shared_location
= b
->shader
->num_shared
;
410 b
->shader
->num_shared
+= base
->ptr_type
->length
;
413 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
414 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
415 /* Push constants neither need nor have a block index */
416 vtn_assert(!block_index
);
418 /* Start off with at the start of the push constant block. */
419 offset
= nir_imm_int(&b
->nb
, 0);
421 /* The code above should have ensured a block_index when needed. */
422 vtn_assert(block_index
);
424 /* Start off with at the start of the buffer. */
425 offset
= nir_imm_int(&b
->nb
, 0);
429 if (deref_chain
->ptr_as_array
&& idx
== 0) {
430 /* We need ptr_type for the stride */
431 vtn_assert(base
->ptr_type
);
433 /* We need at least one element in the chain */
434 vtn_assert(deref_chain
->length
>= 1);
436 nir_ssa_def
*elem_offset
=
437 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
438 base
->ptr_type
->stride
, offset
->bit_size
);
439 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
443 for (; idx
< deref_chain
->length
; idx
++) {
444 switch (glsl_get_base_type(type
->type
)) {
447 case GLSL_TYPE_UINT16
:
448 case GLSL_TYPE_INT16
:
449 case GLSL_TYPE_UINT8
:
451 case GLSL_TYPE_UINT64
:
452 case GLSL_TYPE_INT64
:
453 case GLSL_TYPE_FLOAT
:
454 case GLSL_TYPE_FLOAT16
:
455 case GLSL_TYPE_DOUBLE
:
457 case GLSL_TYPE_ARRAY
: {
458 nir_ssa_def
*elem_offset
=
459 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
460 type
->stride
, offset
->bit_size
);
461 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
462 type
= type
->array_element
;
463 access
|= type
->access
;
467 case GLSL_TYPE_STRUCT
: {
468 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
469 unsigned member
= deref_chain
->link
[idx
].id
;
470 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
471 type
= type
->members
[member
];
472 access
|= type
->access
;
477 vtn_fail("Invalid type for deref");
481 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
482 ptr
->mode
= base
->mode
;
484 ptr
->block_index
= block_index
;
485 ptr
->offset
= offset
;
486 ptr
->access
= access
;
491 /* Dereference the given base pointer by the access chain */
492 static struct vtn_pointer
*
493 vtn_pointer_dereference(struct vtn_builder
*b
,
494 struct vtn_pointer
*base
,
495 struct vtn_access_chain
*deref_chain
)
497 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
498 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
500 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
505 vtn_pointer_for_variable(struct vtn_builder
*b
,
506 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
508 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
510 pointer
->mode
= var
->mode
;
511 pointer
->type
= var
->type
;
512 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
513 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
514 pointer
->ptr_type
= ptr_type
;
516 pointer
->access
= var
->access
| var
->type
->access
;
521 /* Returns an atomic_uint type based on the original uint type. The returned
522 * type will be equivalent to the original one but will have an atomic_uint
523 * type as leaf instead of an uint.
525 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
527 static const struct glsl_type
*
528 repair_atomic_type(const struct glsl_type
*type
)
530 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
531 assert(glsl_type_is_scalar(glsl_without_array(type
)));
533 if (glsl_type_is_array(type
)) {
534 const struct glsl_type
*atomic
=
535 repair_atomic_type(glsl_get_array_element(type
));
537 return glsl_array_type(atomic
, glsl_get_length(type
),
538 glsl_get_explicit_stride(type
));
540 return glsl_atomic_uint_type();
545 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
547 if (b
->wa_glslang_179
) {
548 /* Do on-the-fly copy propagation for samplers. */
549 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
550 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
553 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
555 struct vtn_access_chain chain
= {
558 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
565 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
566 struct vtn_ssa_value
*inout
)
568 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
570 inout
->def
= nir_load_deref(&b
->nb
, deref
);
572 nir_store_deref(&b
->nb
, deref
, inout
->def
, ~0);
574 } else if (glsl_type_is_array(deref
->type
) ||
575 glsl_type_is_matrix(deref
->type
)) {
576 unsigned elems
= glsl_get_length(deref
->type
);
577 for (unsigned i
= 0; i
< elems
; i
++) {
578 nir_deref_instr
*child
=
579 nir_build_deref_array(&b
->nb
, deref
, nir_imm_int(&b
->nb
, i
));
580 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
583 vtn_assert(glsl_type_is_struct(deref
->type
));
584 unsigned elems
= glsl_get_length(deref
->type
);
585 for (unsigned i
= 0; i
< elems
; i
++) {
586 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
587 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
593 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
595 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
596 return vtn_pointer_to_deref(b
, ptr
);
600 * Gets the NIR-level deref tail, which may have as a child an array deref
601 * selecting which component due to OpAccessChain supporting per-component
602 * indexing in SPIR-V.
604 static nir_deref_instr
*
605 get_deref_tail(nir_deref_instr
*deref
)
607 if (deref
->deref_type
!= nir_deref_type_array
)
610 nir_deref_instr
*parent
=
611 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
613 if (glsl_type_is_vector(parent
->type
))
619 struct vtn_ssa_value
*
620 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
)
622 nir_deref_instr
*src_tail
= get_deref_tail(src
);
623 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
624 _vtn_local_load_store(b
, true, src_tail
, val
);
626 if (src_tail
!= src
) {
627 val
->type
= src
->type
;
628 if (nir_src_is_const(src
->arr
.index
))
629 val
->def
= vtn_vector_extract(b
, val
->def
,
630 nir_src_as_uint(src
->arr
.index
));
632 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
, src
->arr
.index
.ssa
);
639 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
640 nir_deref_instr
*dest
)
642 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
644 if (dest_tail
!= dest
) {
645 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
646 _vtn_local_load_store(b
, true, dest_tail
, val
);
648 if (nir_src_is_const(dest
->arr
.index
))
649 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
650 nir_src_as_uint(dest
->arr
.index
));
652 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
653 dest
->arr
.index
.ssa
);
654 _vtn_local_load_store(b
, false, dest_tail
, val
);
656 _vtn_local_load_store(b
, false, dest_tail
, src
);
661 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
662 nir_ssa_def
**index_out
)
664 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
666 struct vtn_access_chain chain
= {
669 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
671 *index_out
= ptr
->block_index
;
675 /* Tries to compute the size of an interface block based on the strides and
676 * offsets that are provided to us in the SPIR-V source.
679 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
681 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
685 case GLSL_TYPE_UINT16
:
686 case GLSL_TYPE_INT16
:
687 case GLSL_TYPE_UINT8
:
689 case GLSL_TYPE_UINT64
:
690 case GLSL_TYPE_INT64
:
691 case GLSL_TYPE_FLOAT
:
692 case GLSL_TYPE_FLOAT16
:
694 case GLSL_TYPE_DOUBLE
: {
695 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
696 glsl_get_matrix_columns(type
->type
);
698 vtn_assert(type
->stride
> 0);
699 return type
->stride
* cols
;
701 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
702 return glsl_get_vector_elements(type
->type
) * type_size
;
706 case GLSL_TYPE_STRUCT
:
707 case GLSL_TYPE_INTERFACE
: {
709 unsigned num_fields
= glsl_get_length(type
->type
);
710 for (unsigned f
= 0; f
< num_fields
; f
++) {
711 unsigned field_end
= type
->offsets
[f
] +
712 vtn_type_block_size(b
, type
->members
[f
]);
713 size
= MAX2(size
, field_end
);
718 case GLSL_TYPE_ARRAY
:
719 vtn_assert(type
->stride
> 0);
720 vtn_assert(glsl_get_length(type
->type
) > 0);
721 return type
->stride
* glsl_get_length(type
->type
);
724 vtn_fail("Invalid block type");
730 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
731 nir_ssa_def
*index
, nir_ssa_def
*offset
,
732 unsigned access_offset
, unsigned access_size
,
733 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
734 enum gl_access_qualifier access
)
736 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
737 instr
->num_components
= glsl_get_vector_elements(type
);
739 /* Booleans usually shouldn't show up in external memory in SPIR-V.
740 * However, they do for certain older GLSLang versions and can for shared
741 * memory when we lower access chains internally.
743 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
744 glsl_get_bit_size(type
);
748 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
749 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
752 if (op
== nir_intrinsic_load_push_constant
) {
753 nir_intrinsic_set_base(instr
, access_offset
);
754 nir_intrinsic_set_range(instr
, access_size
);
757 if (op
== nir_intrinsic_load_ssbo
||
758 op
== nir_intrinsic_store_ssbo
) {
759 nir_intrinsic_set_access(instr
, access
);
762 /* With extensions like relaxed_block_layout, we really can't guarantee
763 * much more than scalar alignment.
765 if (op
!= nir_intrinsic_load_push_constant
)
766 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
769 instr
->src
[src
++] = nir_src_for_ssa(index
);
771 if (op
== nir_intrinsic_load_push_constant
) {
772 /* We need to subtract the offset from where the intrinsic will load the
775 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
776 nir_imm_int(&b
->nb
, access_offset
)));
778 instr
->src
[src
++] = nir_src_for_ssa(offset
);
782 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
783 instr
->num_components
, data_bit_size
, NULL
);
784 (*inout
)->def
= &instr
->dest
.ssa
;
787 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
789 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
790 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
794 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
795 nir_ssa_def
*index
, nir_ssa_def
*offset
,
796 unsigned access_offset
, unsigned access_size
,
797 struct vtn_type
*type
, enum gl_access_qualifier access
,
798 struct vtn_ssa_value
**inout
)
800 if (load
&& *inout
== NULL
)
801 *inout
= vtn_create_ssa_value(b
, type
->type
);
803 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
807 case GLSL_TYPE_UINT16
:
808 case GLSL_TYPE_INT16
:
809 case GLSL_TYPE_UINT8
:
811 case GLSL_TYPE_UINT64
:
812 case GLSL_TYPE_INT64
:
813 case GLSL_TYPE_FLOAT
:
814 case GLSL_TYPE_FLOAT16
:
815 case GLSL_TYPE_DOUBLE
:
817 /* This is where things get interesting. At this point, we've hit
818 * a vector, a scalar, or a matrix.
820 if (glsl_type_is_matrix(type
->type
)) {
821 /* Loading the whole matrix */
822 struct vtn_ssa_value
*transpose
;
823 unsigned num_ops
, vec_width
, col_stride
;
824 if (type
->row_major
) {
825 num_ops
= glsl_get_vector_elements(type
->type
);
826 vec_width
= glsl_get_matrix_columns(type
->type
);
827 col_stride
= type
->array_element
->stride
;
829 const struct glsl_type
*transpose_type
=
830 glsl_matrix_type(base_type
, vec_width
, num_ops
);
831 *inout
= vtn_create_ssa_value(b
, transpose_type
);
833 transpose
= vtn_ssa_transpose(b
, *inout
);
837 num_ops
= glsl_get_matrix_columns(type
->type
);
838 vec_width
= glsl_get_vector_elements(type
->type
);
839 col_stride
= type
->stride
;
842 for (unsigned i
= 0; i
< num_ops
; i
++) {
843 nir_ssa_def
*elem_offset
=
844 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
845 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
846 access_offset
, access_size
,
848 glsl_vector_type(base_type
, vec_width
),
849 type
->access
| access
);
852 if (load
&& type
->row_major
)
853 *inout
= vtn_ssa_transpose(b
, *inout
);
855 unsigned elems
= glsl_get_vector_elements(type
->type
);
856 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
857 if (elems
== 1 || type
->stride
== type_size
) {
858 /* This is a tightly-packed normal scalar or vector load */
859 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
860 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
861 access_offset
, access_size
,
863 type
->access
| access
);
865 /* This is a strided load. We have to load N things separately.
866 * This is the single column of a row-major matrix case.
868 vtn_assert(type
->stride
> type_size
);
869 vtn_assert(type
->stride
% type_size
== 0);
871 nir_ssa_def
*per_comp
[4];
872 for (unsigned i
= 0; i
< elems
; i
++) {
873 nir_ssa_def
*elem_offset
=
874 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
875 struct vtn_ssa_value
*comp
, temp_val
;
877 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
878 temp_val
.type
= glsl_scalar_type(base_type
);
881 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
882 access_offset
, access_size
,
883 &comp
, glsl_scalar_type(base_type
),
884 type
->access
| access
);
885 per_comp
[i
] = comp
->def
;
890 *inout
= vtn_create_ssa_value(b
, type
->type
);
891 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
897 case GLSL_TYPE_ARRAY
: {
898 unsigned elems
= glsl_get_length(type
->type
);
899 for (unsigned i
= 0; i
< elems
; i
++) {
900 nir_ssa_def
*elem_off
=
901 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
902 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
903 access_offset
, access_size
,
905 type
->array_element
->access
| access
,
906 &(*inout
)->elems
[i
]);
911 case GLSL_TYPE_STRUCT
: {
912 unsigned elems
= glsl_get_length(type
->type
);
913 for (unsigned i
= 0; i
< elems
; i
++) {
914 nir_ssa_def
*elem_off
=
915 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
916 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
917 access_offset
, access_size
,
919 type
->members
[i
]->access
| access
,
920 &(*inout
)->elems
[i
]);
926 vtn_fail("Invalid block member type");
930 static struct vtn_ssa_value
*
931 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
934 unsigned access_offset
= 0, access_size
= 0;
936 case vtn_variable_mode_ubo
:
937 op
= nir_intrinsic_load_ubo
;
939 case vtn_variable_mode_ssbo
:
940 op
= nir_intrinsic_load_ssbo
;
942 case vtn_variable_mode_push_constant
:
943 op
= nir_intrinsic_load_push_constant
;
944 access_size
= b
->shader
->num_uniforms
;
946 case vtn_variable_mode_workgroup
:
947 op
= nir_intrinsic_load_shared
;
950 vtn_fail("Invalid block variable mode");
953 nir_ssa_def
*offset
, *index
= NULL
;
954 offset
= vtn_pointer_to_offset(b
, src
, &index
);
956 struct vtn_ssa_value
*value
= NULL
;
957 _vtn_block_load_store(b
, op
, true, index
, offset
,
958 access_offset
, access_size
,
959 src
->type
, src
->access
, &value
);
964 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
965 struct vtn_pointer
*dst
)
969 case vtn_variable_mode_ssbo
:
970 op
= nir_intrinsic_store_ssbo
;
972 case vtn_variable_mode_workgroup
:
973 op
= nir_intrinsic_store_shared
;
976 vtn_fail("Invalid block variable mode");
979 nir_ssa_def
*offset
, *index
= NULL
;
980 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
982 _vtn_block_load_store(b
, op
, false, index
, offset
,
983 0, 0, dst
->type
, dst
->access
, &src
);
987 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
988 struct vtn_pointer
*ptr
,
989 struct vtn_ssa_value
**inout
)
991 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
995 case GLSL_TYPE_UINT16
:
996 case GLSL_TYPE_INT16
:
997 case GLSL_TYPE_UINT8
:
999 case GLSL_TYPE_UINT64
:
1000 case GLSL_TYPE_INT64
:
1001 case GLSL_TYPE_FLOAT
:
1002 case GLSL_TYPE_FLOAT16
:
1003 case GLSL_TYPE_BOOL
:
1004 case GLSL_TYPE_DOUBLE
:
1005 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1006 /* We hit a vector or scalar; go ahead and emit the load[s] */
1007 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1008 if (vtn_pointer_is_external_block(b
, ptr
)) {
1009 /* If it's external, we call nir_load/store_deref directly. The
1010 * vtn_local_load/store helpers are too clever and do magic to
1011 * avoid array derefs of vectors. That magic is both less
1012 * efficient than the direct load/store and, in the case of
1013 * stores, is broken because it creates a race condition if two
1014 * threads are writing to different components of the same vector
1015 * due to the load+insert+store it uses to emulate the array
1019 *inout
= vtn_create_ssa_value(b
, ptr
->type
->type
);
1020 (*inout
)->def
= nir_load_deref(&b
->nb
, deref
);
1022 nir_store_deref(&b
->nb
, deref
, (*inout
)->def
, ~0);
1026 *inout
= vtn_local_load(b
, deref
);
1028 vtn_local_store(b
, *inout
, deref
);
1035 case GLSL_TYPE_ARRAY
:
1036 case GLSL_TYPE_STRUCT
: {
1037 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1039 vtn_assert(*inout
== NULL
);
1040 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1041 (*inout
)->type
= ptr
->type
->type
;
1042 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1045 struct vtn_access_chain chain
= {
1048 { .mode
= vtn_access_mode_literal
, },
1051 for (unsigned i
= 0; i
< elems
; i
++) {
1052 chain
.link
[0].id
= i
;
1053 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1054 _vtn_variable_load_store(b
, load
, elem
, &(*inout
)->elems
[i
]);
1060 vtn_fail("Invalid access chain type");
1064 struct vtn_ssa_value
*
1065 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1067 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1068 return vtn_block_load(b
, src
);
1070 struct vtn_ssa_value
*val
= NULL
;
1071 _vtn_variable_load_store(b
, true, src
, &val
);
1077 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1078 struct vtn_pointer
*dest
)
1080 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1081 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1082 dest
->mode
== vtn_variable_mode_workgroup
);
1083 vtn_block_store(b
, src
, dest
);
1085 _vtn_variable_load_store(b
, false, dest
, &src
);
1090 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1091 struct vtn_pointer
*src
)
1093 vtn_assert(src
->type
->type
== dest
->type
->type
);
1094 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1095 switch (base_type
) {
1096 case GLSL_TYPE_UINT
:
1098 case GLSL_TYPE_UINT16
:
1099 case GLSL_TYPE_INT16
:
1100 case GLSL_TYPE_UINT8
:
1101 case GLSL_TYPE_INT8
:
1102 case GLSL_TYPE_UINT64
:
1103 case GLSL_TYPE_INT64
:
1104 case GLSL_TYPE_FLOAT
:
1105 case GLSL_TYPE_FLOAT16
:
1106 case GLSL_TYPE_DOUBLE
:
1107 case GLSL_TYPE_BOOL
:
1108 /* At this point, we have a scalar, vector, or matrix so we know that
1109 * there cannot be any structure splitting still in the way. By
1110 * stopping at the matrix level rather than the vector level, we
1111 * ensure that matrices get loaded in the optimal way even if they
1112 * are storred row-major in a UBO.
1114 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1117 case GLSL_TYPE_ARRAY
:
1118 case GLSL_TYPE_STRUCT
: {
1119 struct vtn_access_chain chain
= {
1122 { .mode
= vtn_access_mode_literal
, },
1125 unsigned elems
= glsl_get_length(src
->type
->type
);
1126 for (unsigned i
= 0; i
< elems
; i
++) {
1127 chain
.link
[0].id
= i
;
1128 struct vtn_pointer
*src_elem
=
1129 vtn_pointer_dereference(b
, src
, &chain
);
1130 struct vtn_pointer
*dest_elem
=
1131 vtn_pointer_dereference(b
, dest
, &chain
);
1133 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1139 vtn_fail("Invalid access chain type");
1144 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1145 struct vtn_pointer
*src
)
1147 /* TODO: At some point, we should add a special-case for when we can
1148 * just emit a copy_var intrinsic.
1150 _vtn_variable_copy(b
, dest
, src
);
1154 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1156 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1157 *mode
= nir_var_system_value
;
1161 vtn_get_builtin_location(struct vtn_builder
*b
,
1162 SpvBuiltIn builtin
, int *location
,
1163 nir_variable_mode
*mode
)
1166 case SpvBuiltInPosition
:
1167 *location
= VARYING_SLOT_POS
;
1169 case SpvBuiltInPointSize
:
1170 *location
= VARYING_SLOT_PSIZ
;
1172 case SpvBuiltInClipDistance
:
1173 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1175 case SpvBuiltInCullDistance
:
1176 *location
= VARYING_SLOT_CULL_DIST0
;
1178 case SpvBuiltInVertexId
:
1179 case SpvBuiltInVertexIndex
:
1180 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1181 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1182 * same as gl_VertexID, which is non-zero-based, and removes
1183 * VertexIndex. Since they're both defined to be non-zero-based, we use
1184 * SYSTEM_VALUE_VERTEX_ID for both.
1186 *location
= SYSTEM_VALUE_VERTEX_ID
;
1187 set_mode_system_value(b
, mode
);
1189 case SpvBuiltInInstanceIndex
:
1190 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1191 set_mode_system_value(b
, mode
);
1193 case SpvBuiltInInstanceId
:
1194 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1195 set_mode_system_value(b
, mode
);
1197 case SpvBuiltInPrimitiveId
:
1198 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1199 vtn_assert(*mode
== nir_var_shader_in
);
1200 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1201 } else if (*mode
== nir_var_shader_out
) {
1202 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1204 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1205 set_mode_system_value(b
, mode
);
1208 case SpvBuiltInInvocationId
:
1209 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1210 set_mode_system_value(b
, mode
);
1212 case SpvBuiltInLayer
:
1213 *location
= VARYING_SLOT_LAYER
;
1214 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1215 *mode
= nir_var_shader_in
;
1216 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1217 *mode
= nir_var_shader_out
;
1218 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1219 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1220 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1221 *mode
= nir_var_shader_out
;
1223 vtn_fail("invalid stage for SpvBuiltInLayer");
1225 case SpvBuiltInViewportIndex
:
1226 *location
= VARYING_SLOT_VIEWPORT
;
1227 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1228 *mode
= nir_var_shader_out
;
1229 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1230 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1231 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1232 *mode
= nir_var_shader_out
;
1233 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1234 *mode
= nir_var_shader_in
;
1236 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1238 case SpvBuiltInTessLevelOuter
:
1239 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1241 case SpvBuiltInTessLevelInner
:
1242 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1244 case SpvBuiltInTessCoord
:
1245 *location
= SYSTEM_VALUE_TESS_COORD
;
1246 set_mode_system_value(b
, mode
);
1248 case SpvBuiltInPatchVertices
:
1249 *location
= SYSTEM_VALUE_VERTICES_IN
;
1250 set_mode_system_value(b
, mode
);
1252 case SpvBuiltInFragCoord
:
1253 *location
= VARYING_SLOT_POS
;
1254 vtn_assert(*mode
== nir_var_shader_in
);
1256 case SpvBuiltInPointCoord
:
1257 *location
= VARYING_SLOT_PNTC
;
1258 vtn_assert(*mode
== nir_var_shader_in
);
1260 case SpvBuiltInFrontFacing
:
1261 *location
= SYSTEM_VALUE_FRONT_FACE
;
1262 set_mode_system_value(b
, mode
);
1264 case SpvBuiltInSampleId
:
1265 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1266 set_mode_system_value(b
, mode
);
1268 case SpvBuiltInSamplePosition
:
1269 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1270 set_mode_system_value(b
, mode
);
1272 case SpvBuiltInSampleMask
:
1273 if (*mode
== nir_var_shader_out
) {
1274 *location
= FRAG_RESULT_SAMPLE_MASK
;
1276 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1277 set_mode_system_value(b
, mode
);
1280 case SpvBuiltInFragDepth
:
1281 *location
= FRAG_RESULT_DEPTH
;
1282 vtn_assert(*mode
== nir_var_shader_out
);
1284 case SpvBuiltInHelperInvocation
:
1285 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1286 set_mode_system_value(b
, mode
);
1288 case SpvBuiltInNumWorkgroups
:
1289 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1290 set_mode_system_value(b
, mode
);
1292 case SpvBuiltInWorkgroupSize
:
1293 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1294 set_mode_system_value(b
, mode
);
1296 case SpvBuiltInWorkgroupId
:
1297 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1298 set_mode_system_value(b
, mode
);
1300 case SpvBuiltInLocalInvocationId
:
1301 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1302 set_mode_system_value(b
, mode
);
1304 case SpvBuiltInLocalInvocationIndex
:
1305 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1306 set_mode_system_value(b
, mode
);
1308 case SpvBuiltInGlobalInvocationId
:
1309 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1310 set_mode_system_value(b
, mode
);
1312 case SpvBuiltInBaseVertex
:
1313 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1314 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1316 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1317 set_mode_system_value(b
, mode
);
1319 case SpvBuiltInBaseInstance
:
1320 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1321 set_mode_system_value(b
, mode
);
1323 case SpvBuiltInDrawIndex
:
1324 *location
= SYSTEM_VALUE_DRAW_ID
;
1325 set_mode_system_value(b
, mode
);
1327 case SpvBuiltInSubgroupSize
:
1328 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1329 set_mode_system_value(b
, mode
);
1331 case SpvBuiltInSubgroupId
:
1332 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1333 set_mode_system_value(b
, mode
);
1335 case SpvBuiltInSubgroupLocalInvocationId
:
1336 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1337 set_mode_system_value(b
, mode
);
1339 case SpvBuiltInNumSubgroups
:
1340 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1341 set_mode_system_value(b
, mode
);
1343 case SpvBuiltInDeviceIndex
:
1344 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1345 set_mode_system_value(b
, mode
);
1347 case SpvBuiltInViewIndex
:
1348 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1349 set_mode_system_value(b
, mode
);
1351 case SpvBuiltInSubgroupEqMask
:
1352 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1353 set_mode_system_value(b
, mode
);
1355 case SpvBuiltInSubgroupGeMask
:
1356 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1357 set_mode_system_value(b
, mode
);
1359 case SpvBuiltInSubgroupGtMask
:
1360 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1361 set_mode_system_value(b
, mode
);
1363 case SpvBuiltInSubgroupLeMask
:
1364 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1365 set_mode_system_value(b
, mode
);
1367 case SpvBuiltInSubgroupLtMask
:
1368 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1369 set_mode_system_value(b
, mode
);
1371 case SpvBuiltInFragStencilRefEXT
:
1372 *location
= FRAG_RESULT_STENCIL
;
1373 vtn_assert(*mode
== nir_var_shader_out
);
1375 case SpvBuiltInWorkDim
:
1376 *location
= SYSTEM_VALUE_WORK_DIM
;
1377 set_mode_system_value(b
, mode
);
1379 case SpvBuiltInGlobalSize
:
1380 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1381 set_mode_system_value(b
, mode
);
1384 vtn_fail("unsupported builtin: %u", builtin
);
1389 apply_var_decoration(struct vtn_builder
*b
,
1390 struct nir_variable_data
*var_data
,
1391 const struct vtn_decoration
*dec
)
1393 switch (dec
->decoration
) {
1394 case SpvDecorationRelaxedPrecision
:
1395 break; /* FIXME: Do nothing with this for now. */
1396 case SpvDecorationNoPerspective
:
1397 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1399 case SpvDecorationFlat
:
1400 var_data
->interpolation
= INTERP_MODE_FLAT
;
1402 case SpvDecorationCentroid
:
1403 var_data
->centroid
= true;
1405 case SpvDecorationSample
:
1406 var_data
->sample
= true;
1408 case SpvDecorationInvariant
:
1409 var_data
->invariant
= true;
1411 case SpvDecorationConstant
:
1412 var_data
->read_only
= true;
1414 case SpvDecorationNonReadable
:
1415 var_data
->image
.access
|= ACCESS_NON_READABLE
;
1417 case SpvDecorationNonWritable
:
1418 var_data
->read_only
= true;
1419 var_data
->image
.access
|= ACCESS_NON_WRITEABLE
;
1421 case SpvDecorationRestrict
:
1422 var_data
->image
.access
|= ACCESS_RESTRICT
;
1424 case SpvDecorationVolatile
:
1425 var_data
->image
.access
|= ACCESS_VOLATILE
;
1427 case SpvDecorationCoherent
:
1428 var_data
->image
.access
|= ACCESS_COHERENT
;
1430 case SpvDecorationComponent
:
1431 var_data
->location_frac
= dec
->literals
[0];
1433 case SpvDecorationIndex
:
1434 var_data
->index
= dec
->literals
[0];
1436 case SpvDecorationBuiltIn
: {
1437 SpvBuiltIn builtin
= dec
->literals
[0];
1439 nir_variable_mode mode
= var_data
->mode
;
1440 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1441 var_data
->mode
= mode
;
1444 case SpvBuiltInTessLevelOuter
:
1445 case SpvBuiltInTessLevelInner
:
1446 var_data
->compact
= true;
1448 case SpvBuiltInFragCoord
:
1449 var_data
->pixel_center_integer
= b
->pixel_center_integer
;
1451 case SpvBuiltInSamplePosition
:
1452 var_data
->origin_upper_left
= b
->origin_upper_left
;
1459 case SpvDecorationSpecId
:
1460 case SpvDecorationRowMajor
:
1461 case SpvDecorationColMajor
:
1462 case SpvDecorationMatrixStride
:
1463 case SpvDecorationAliased
:
1464 case SpvDecorationUniform
:
1465 case SpvDecorationLinkageAttributes
:
1466 break; /* Do nothing with these here */
1468 case SpvDecorationPatch
:
1469 var_data
->patch
= true;
1472 case SpvDecorationLocation
:
1473 vtn_fail("Handled above");
1475 case SpvDecorationBlock
:
1476 case SpvDecorationBufferBlock
:
1477 case SpvDecorationArrayStride
:
1478 case SpvDecorationGLSLShared
:
1479 case SpvDecorationGLSLPacked
:
1480 break; /* These can apply to a type but we don't care about them */
1482 case SpvDecorationBinding
:
1483 case SpvDecorationDescriptorSet
:
1484 case SpvDecorationNoContraction
:
1485 case SpvDecorationInputAttachmentIndex
:
1486 vtn_warn("Decoration not allowed for variable or structure member: %s",
1487 spirv_decoration_to_string(dec
->decoration
));
1490 case SpvDecorationXfbBuffer
:
1491 var_data
->explicit_xfb_buffer
= true;
1492 var_data
->xfb_buffer
= dec
->literals
[0];
1493 var_data
->always_active_io
= true;
1495 case SpvDecorationXfbStride
:
1496 var_data
->explicit_xfb_stride
= true;
1497 var_data
->xfb_stride
= dec
->literals
[0];
1499 case SpvDecorationOffset
:
1500 var_data
->explicit_offset
= true;
1501 var_data
->offset
= dec
->literals
[0];
1504 case SpvDecorationStream
:
1505 var_data
->stream
= dec
->literals
[0];
1508 case SpvDecorationCPacked
:
1509 case SpvDecorationSaturatedConversion
:
1510 case SpvDecorationFuncParamAttr
:
1511 case SpvDecorationFPRoundingMode
:
1512 case SpvDecorationFPFastMathMode
:
1513 case SpvDecorationAlignment
:
1514 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1515 spirv_decoration_to_string(dec
->decoration
));
1518 case SpvDecorationHlslSemanticGOOGLE
:
1519 /* HLSL semantic decorations can safely be ignored by the driver. */
1523 vtn_fail("Unhandled decoration");
1528 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1529 const struct vtn_decoration
*dec
, void *out_is_patch
)
1531 if (dec
->decoration
== SpvDecorationPatch
) {
1532 *((bool *) out_is_patch
) = true;
1537 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1538 const struct vtn_decoration
*dec
, void *void_var
)
1540 struct vtn_variable
*vtn_var
= void_var
;
1542 /* Handle decorations that apply to a vtn_variable as a whole */
1543 switch (dec
->decoration
) {
1544 case SpvDecorationBinding
:
1545 vtn_var
->binding
= dec
->literals
[0];
1546 vtn_var
->explicit_binding
= true;
1548 case SpvDecorationDescriptorSet
:
1549 vtn_var
->descriptor_set
= dec
->literals
[0];
1551 case SpvDecorationInputAttachmentIndex
:
1552 vtn_var
->input_attachment_index
= dec
->literals
[0];
1554 case SpvDecorationPatch
:
1555 vtn_var
->patch
= true;
1557 case SpvDecorationOffset
:
1558 vtn_var
->offset
= dec
->literals
[0];
1560 case SpvDecorationNonWritable
:
1561 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1563 case SpvDecorationNonReadable
:
1564 vtn_var
->access
|= ACCESS_NON_READABLE
;
1566 case SpvDecorationVolatile
:
1567 vtn_var
->access
|= ACCESS_VOLATILE
;
1569 case SpvDecorationCoherent
:
1570 vtn_var
->access
|= ACCESS_COHERENT
;
1572 case SpvDecorationHlslCounterBufferGOOGLE
:
1573 /* HLSL semantic decorations can safely be ignored by the driver. */
1579 if (val
->value_type
== vtn_value_type_pointer
) {
1580 assert(val
->pointer
->var
== void_var
);
1581 assert(member
== -1);
1583 assert(val
->value_type
== vtn_value_type_type
);
1586 /* Location is odd. If applied to a split structure, we have to walk the
1587 * whole thing and accumulate the location. It's easier to handle as a
1590 if (dec
->decoration
== SpvDecorationLocation
) {
1591 unsigned location
= dec
->literals
[0];
1592 bool is_vertex_input
= false;
1593 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1594 vtn_var
->mode
== vtn_variable_mode_output
) {
1595 location
+= FRAG_RESULT_DATA0
;
1596 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1597 vtn_var
->mode
== vtn_variable_mode_input
) {
1598 is_vertex_input
= true;
1599 location
+= VERT_ATTRIB_GENERIC0
;
1600 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1601 vtn_var
->mode
== vtn_variable_mode_output
) {
1602 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1603 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1604 vtn_warn("Location must be on input, output, uniform, sampler or "
1609 if (vtn_var
->var
->num_members
== 0) {
1610 /* This handles the member and lone variable cases */
1611 vtn_var
->var
->data
.location
= location
;
1613 /* This handles the structure member case */
1614 assert(vtn_var
->var
->members
);
1615 for (unsigned i
= 0; i
< vtn_var
->var
->num_members
; i
++) {
1616 vtn_var
->var
->members
[i
].location
= location
;
1617 const struct glsl_type
*member_type
=
1618 glsl_get_struct_field(vtn_var
->var
->interface_type
, i
);
1619 location
+= glsl_count_attribute_slots(member_type
,
1626 if (vtn_var
->var
->num_members
== 0) {
1627 assert(member
== -1);
1628 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1629 } else if (member
>= 0) {
1630 /* Member decorations must come from a type */
1631 assert(val
->value_type
== vtn_value_type_type
);
1632 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1635 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1636 for (unsigned i
= 0; i
< length
; i
++)
1637 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1640 /* A few variables, those with external storage, have no actual
1641 * nir_variables associated with them. Fortunately, all decorations
1642 * we care about for those variables are on the type only.
1644 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1645 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1646 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1647 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1648 b
->options
->lower_workgroup_access_to_offsets
));
1653 static enum vtn_variable_mode
1654 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1655 SpvStorageClass
class,
1656 struct vtn_type
*interface_type
,
1657 nir_variable_mode
*nir_mode_out
)
1659 enum vtn_variable_mode mode
;
1660 nir_variable_mode nir_mode
;
1662 case SpvStorageClassUniform
:
1663 if (interface_type
->block
) {
1664 mode
= vtn_variable_mode_ubo
;
1665 nir_mode
= nir_var_mem_ubo
;
1666 } else if (interface_type
->buffer_block
) {
1667 mode
= vtn_variable_mode_ssbo
;
1668 nir_mode
= nir_var_mem_ssbo
;
1670 /* Default-block uniforms, coming from gl_spirv */
1671 mode
= vtn_variable_mode_uniform
;
1672 nir_mode
= nir_var_uniform
;
1675 case SpvStorageClassStorageBuffer
:
1676 mode
= vtn_variable_mode_ssbo
;
1677 nir_mode
= nir_var_mem_ssbo
;
1679 case SpvStorageClassUniformConstant
:
1680 mode
= vtn_variable_mode_uniform
;
1681 nir_mode
= nir_var_uniform
;
1683 case SpvStorageClassPushConstant
:
1684 mode
= vtn_variable_mode_push_constant
;
1685 nir_mode
= nir_var_uniform
;
1687 case SpvStorageClassInput
:
1688 mode
= vtn_variable_mode_input
;
1689 nir_mode
= nir_var_shader_in
;
1691 case SpvStorageClassOutput
:
1692 mode
= vtn_variable_mode_output
;
1693 nir_mode
= nir_var_shader_out
;
1695 case SpvStorageClassPrivate
:
1696 mode
= vtn_variable_mode_private
;
1697 nir_mode
= nir_var_shader_temp
;
1699 case SpvStorageClassFunction
:
1700 mode
= vtn_variable_mode_function
;
1701 nir_mode
= nir_var_function_temp
;
1703 case SpvStorageClassWorkgroup
:
1704 mode
= vtn_variable_mode_workgroup
;
1705 nir_mode
= nir_var_mem_shared
;
1707 case SpvStorageClassAtomicCounter
:
1708 mode
= vtn_variable_mode_uniform
;
1709 nir_mode
= nir_var_uniform
;
1711 case SpvStorageClassCrossWorkgroup
:
1712 case SpvStorageClassGeneric
:
1714 vtn_fail("Unhandled variable storage class");
1718 *nir_mode_out
= nir_mode
;
1724 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1726 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1727 /* This pointer needs to have a pointer type with actual storage */
1728 vtn_assert(ptr
->ptr_type
);
1729 vtn_assert(ptr
->ptr_type
->type
);
1732 /* If we don't have an offset then we must be a pointer to the variable
1735 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1737 struct vtn_access_chain chain
= {
1740 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1743 vtn_assert(ptr
->offset
);
1744 if (ptr
->block_index
) {
1745 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1746 ptr
->mode
== vtn_variable_mode_ssbo
);
1747 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1749 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1753 if (vtn_pointer_is_external_block(b
, ptr
) &&
1754 vtn_type_contains_block(b
, ptr
->type
)) {
1755 const unsigned bit_size
= glsl_get_bit_size(ptr
->ptr_type
->type
);
1756 const unsigned num_components
=
1757 glsl_get_vector_elements(ptr
->ptr_type
->type
);
1759 /* In this case, we're looking for a block index and not an actual
1762 if (!ptr
->block_index
) {
1763 /* If we don't have a block_index then we must be a pointer to the
1766 vtn_assert(!ptr
->deref
);
1768 struct vtn_access_chain chain
= {
1771 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
1774 /* A block index is just a 32-bit value but the pointer has some
1775 * other dimensionality. Cram it in there and we'll unpack it later
1776 * in vtn_pointer_from_ssa.
1778 const unsigned swiz
[4] = { 0, };
1779 return nir_swizzle(&b
->nb
, nir_u2u(&b
->nb
, ptr
->block_index
, bit_size
),
1780 swiz
, num_components
, false);
1782 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1787 struct vtn_pointer
*
1788 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1789 struct vtn_type
*ptr_type
)
1791 vtn_assert(ssa
->num_components
<= 2 && ssa
->bit_size
== 32);
1792 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1794 struct vtn_type
*interface_type
= ptr_type
->deref
;
1795 while (interface_type
->base_type
== vtn_base_type_array
)
1796 interface_type
= interface_type
->array_element
;
1798 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1799 nir_variable_mode nir_mode
;
1800 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1801 interface_type
, &nir_mode
);
1802 ptr
->type
= ptr_type
->deref
;
1803 ptr
->ptr_type
= ptr_type
;
1805 if (b
->wa_glslang_179
) {
1806 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1807 * need to whack the mode because it creates a function parameter with
1808 * the Function storage class even though it's a pointer to a sampler.
1809 * If we don't do this, then NIR won't get rid of the deref_cast for us.
1811 if (ptr
->mode
== vtn_variable_mode_function
&&
1812 (ptr
->type
->base_type
== vtn_base_type_sampler
||
1813 ptr
->type
->base_type
== vtn_base_type_sampled_image
)) {
1814 ptr
->mode
= vtn_variable_mode_uniform
;
1815 nir_mode
= nir_var_uniform
;
1819 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1820 /* This pointer type needs to have actual storage */
1821 vtn_assert(ptr_type
->type
);
1822 if (ptr
->mode
== vtn_variable_mode_ubo
||
1823 ptr
->mode
== vtn_variable_mode_ssbo
) {
1824 vtn_assert(ssa
->num_components
== 2);
1825 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1826 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1828 vtn_assert(ssa
->num_components
== 1);
1829 ptr
->block_index
= NULL
;
1833 const struct glsl_type
*deref_type
= ptr_type
->deref
->type
;
1834 if (!vtn_pointer_is_external_block(b
, ptr
)) {
1835 assert(ssa
->bit_size
== 32 && ssa
->num_components
== 1);
1836 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1837 glsl_get_bare_type(deref_type
), 0);
1838 } else if (vtn_type_contains_block(b
, ptr
->type
)) {
1839 /* This is a pointer to somewhere in an array of blocks, not a
1840 * pointer to somewhere inside the block. We squashed it into a
1841 * random vector type before so just pick off the first channel and
1842 * cast it back to 32 bits.
1844 ptr
->block_index
= nir_u2u32(&b
->nb
, nir_channel(&b
->nb
, ssa
, 0));
1846 /* This is a pointer to something internal or a pointer inside a
1847 * block. It's just a regular cast.
1849 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1850 ptr_type
->deref
->type
,
1852 ptr
->deref
->dest
.ssa
.num_components
=
1853 glsl_get_vector_elements(ptr_type
->type
);
1854 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
1862 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
1864 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
1867 if (var
->mode
== vtn_variable_mode_input
) {
1868 return stage
== MESA_SHADER_TESS_CTRL
||
1869 stage
== MESA_SHADER_TESS_EVAL
||
1870 stage
== MESA_SHADER_GEOMETRY
;
1873 if (var
->mode
== vtn_variable_mode_output
)
1874 return stage
== MESA_SHADER_TESS_CTRL
;
1880 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
1881 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
1882 nir_constant
*initializer
)
1884 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1885 struct vtn_type
*type
= ptr_type
->deref
;
1887 struct vtn_type
*without_array
= type
;
1888 while(glsl_type_is_array(without_array
->type
))
1889 without_array
= without_array
->array_element
;
1891 enum vtn_variable_mode mode
;
1892 nir_variable_mode nir_mode
;
1893 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
1896 case vtn_variable_mode_ubo
:
1897 /* There's no other way to get vtn_variable_mode_ubo */
1898 vtn_assert(without_array
->block
);
1899 b
->shader
->info
.num_ubos
++;
1901 case vtn_variable_mode_ssbo
:
1902 if (storage_class
== SpvStorageClassStorageBuffer
&&
1903 !without_array
->block
) {
1904 if (b
->variable_pointers
) {
1905 vtn_fail("Variables in the StorageBuffer storage class must "
1906 "have a struct type with the Block decoration");
1908 /* If variable pointers are not present, it's still malformed
1909 * SPIR-V but we can parse it and do the right thing anyway.
1910 * Since some of the 8-bit storage tests have bugs in this are,
1911 * just make it a warning for now.
1913 vtn_warn("Variables in the StorageBuffer storage class must "
1914 "have a struct type with the Block decoration");
1917 b
->shader
->info
.num_ssbos
++;
1919 case vtn_variable_mode_uniform
:
1920 if (glsl_type_is_image(without_array
->type
))
1921 b
->shader
->info
.num_images
++;
1922 else if (glsl_type_is_sampler(without_array
->type
))
1923 b
->shader
->info
.num_textures
++;
1925 case vtn_variable_mode_push_constant
:
1926 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
1929 /* No tallying is needed */
1933 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
1937 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
1938 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
1940 switch (var
->mode
) {
1941 case vtn_variable_mode_function
:
1942 case vtn_variable_mode_private
:
1943 case vtn_variable_mode_uniform
:
1944 /* For these, we create the variable normally */
1945 var
->var
= rzalloc(b
->shader
, nir_variable
);
1946 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1948 if (storage_class
== SpvStorageClassAtomicCounter
) {
1949 /* Need to tweak the nir type here as at vtn_handle_type we don't
1950 * have the access to storage_class, that is the one that points us
1951 * that is an atomic uint.
1953 var
->var
->type
= repair_atomic_type(var
->type
->type
);
1955 /* Private variables don't have any explicit layout but some layouts
1956 * may have leaked through due to type deduplication in the SPIR-V.
1958 var
->var
->type
= glsl_get_bare_type(var
->type
->type
);
1960 var
->var
->data
.mode
= nir_mode
;
1961 var
->var
->data
.location
= -1;
1962 var
->var
->interface_type
= NULL
;
1965 case vtn_variable_mode_workgroup
:
1966 if (b
->options
->lower_workgroup_access_to_offsets
) {
1967 var
->shared_location
= -1;
1969 /* Create the variable normally */
1970 var
->var
= rzalloc(b
->shader
, nir_variable
);
1971 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1972 /* Workgroup variables don't have any explicit layout but some
1973 * layouts may have leaked through due to type deduplication in the
1976 var
->var
->type
= glsl_get_bare_type(var
->type
->type
);
1977 var
->var
->data
.mode
= nir_var_mem_shared
;
1981 case vtn_variable_mode_input
:
1982 case vtn_variable_mode_output
: {
1983 /* In order to know whether or not we're a per-vertex inout, we need
1984 * the patch qualifier. This means walking the variable decorations
1985 * early before we actually create any variables. Not a big deal.
1987 * GLSLang really likes to place decorations in the most interior
1988 * thing it possibly can. In particular, if you have a struct, it
1989 * will place the patch decorations on the struct members. This
1990 * should be handled by the variable splitting below just fine.
1992 * If you have an array-of-struct, things get even more weird as it
1993 * will place the patch decorations on the struct even though it's
1994 * inside an array and some of the members being patch and others not
1995 * makes no sense whatsoever. Since the only sensible thing is for
1996 * it to be all or nothing, we'll call it patch if any of the members
1997 * are declared patch.
2000 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2001 if (glsl_type_is_array(var
->type
->type
) &&
2002 glsl_type_is_struct(without_array
->type
)) {
2003 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2004 vtn_value_type_type
),
2005 var_is_patch_cb
, &var
->patch
);
2008 /* For inputs and outputs, we immediately split structures. This
2009 * is for a couple of reasons. For one, builtins may all come in
2010 * a struct and we really want those split out into separate
2011 * variables. For another, interpolation qualifiers can be
2012 * applied to members of the top-level struct ane we need to be
2013 * able to preserve that information.
2016 struct vtn_type
*interface_type
= var
->type
;
2017 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2018 /* In Geometry shaders (and some tessellation), inputs come
2019 * in per-vertex arrays. However, some builtins come in
2020 * non-per-vertex, hence the need for the is_array check. In
2021 * any case, there are no non-builtin arrays allowed so this
2022 * check should be sufficient.
2024 interface_type
= var
->type
->array_element
;
2027 var
->var
= rzalloc(b
->shader
, nir_variable
);
2028 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2029 /* In Vulkan, shader I/O variables don't have any explicit layout but
2030 * some layouts may have leaked through due to type deduplication in
2033 var
->var
->type
= glsl_get_bare_type(var
->type
->type
);
2034 var
->var
->interface_type
= interface_type
->type
;
2035 var
->var
->data
.mode
= nir_mode
;
2036 var
->var
->data
.patch
= var
->patch
;
2038 if (glsl_type_is_struct(interface_type
->type
)) {
2039 /* It's a struct. Set it up as per-member. */
2040 var
->var
->num_members
= glsl_get_length(interface_type
->type
);
2041 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2042 var
->var
->num_members
);
2044 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2045 var
->var
->members
[i
].mode
= nir_mode
;
2046 var
->var
->members
[i
].patch
= var
->patch
;
2050 /* For inputs and outputs, we need to grab locations and builtin
2051 * information from the interface type.
2053 vtn_foreach_decoration(b
, vtn_value(b
, interface_type
->id
,
2054 vtn_value_type_type
),
2055 var_decoration_cb
, var
);
2059 case vtn_variable_mode_ubo
:
2060 case vtn_variable_mode_ssbo
:
2061 case vtn_variable_mode_push_constant
:
2062 /* These don't need actual variables. */
2067 var
->var
->constant_initializer
=
2068 nir_constant_clone(initializer
, var
->var
);
2071 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2073 if (var
->mode
== vtn_variable_mode_uniform
) {
2074 /* XXX: We still need the binding information in the nir_variable
2075 * for these. We should fix that.
2077 var
->var
->data
.binding
= var
->binding
;
2078 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2079 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2080 var
->var
->data
.index
= var
->input_attachment_index
;
2081 var
->var
->data
.offset
= var
->offset
;
2083 if (glsl_type_is_image(without_array
->type
))
2084 var
->var
->data
.image
.format
= without_array
->image_format
;
2087 if (var
->mode
== vtn_variable_mode_function
) {
2088 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2089 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2090 } else if (var
->var
) {
2091 nir_shader_add_variable(b
->shader
, var
->var
);
2093 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2098 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2099 struct vtn_type
*dst_type
,
2100 struct vtn_type
*src_type
)
2102 if (dst_type
->id
== src_type
->id
)
2105 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2106 /* Early versions of GLSLang would re-emit types unnecessarily and you
2107 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2108 * mismatched source and destination types.
2110 * https://github.com/KhronosGroup/glslang/issues/304
2111 * https://github.com/KhronosGroup/glslang/issues/307
2112 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2113 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2115 vtn_warn("Source and destination types of %s do not have the same "
2116 "ID (but are compatible): %u vs %u",
2117 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2121 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2122 spirv_op_to_string(opcode
),
2123 glsl_get_type_name(dst_type
->type
),
2124 glsl_get_type_name(src_type
->type
));
2128 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2129 const uint32_t *w
, unsigned count
)
2133 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2134 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2138 case SpvOpVariable
: {
2139 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2141 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2143 SpvStorageClass storage_class
= w
[3];
2144 nir_constant
*initializer
= NULL
;
2146 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
2148 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
2152 case SpvOpAccessChain
:
2153 case SpvOpPtrAccessChain
:
2154 case SpvOpInBoundsAccessChain
: {
2155 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2156 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
);
2159 for (int i
= 4; i
< count
; i
++) {
2160 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2161 if (link_val
->value_type
== vtn_value_type_constant
) {
2162 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2163 switch (glsl_get_bit_size(link_val
->type
->type
)) {
2165 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i8
[0];
2168 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i16
[0];
2171 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i32
[0];
2174 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i64
[0];
2177 vtn_fail("Invalid bit size");
2180 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2181 chain
->link
[idx
].id
= w
[i
];
2187 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2188 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2189 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2190 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2191 * to combine an array of images with a single sampler to get an
2192 * array of sampled images that all share the same sampler.
2193 * Fortunately, this means that we can more-or-less ignore the
2194 * sampler when crawling the access chain, but it does leave us
2195 * with this rather awkward little special-case.
2197 struct vtn_value
*val
=
2198 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2199 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2200 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
2201 val
->sampled_image
->image
=
2202 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2203 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2205 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2206 struct vtn_value
*val
=
2207 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2208 val
->pointer
= vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2209 val
->pointer
->ptr_type
= ptr_type
;
2214 case SpvOpCopyMemory
: {
2215 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2216 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2218 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2220 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2225 struct vtn_type
*res_type
=
2226 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2227 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2228 struct vtn_pointer
*src
= src_val
->pointer
;
2230 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2232 if (glsl_type_is_image(res_type
->type
) ||
2233 glsl_type_is_sampler(res_type
->type
)) {
2234 vtn_push_value(b
, w
[2], vtn_value_type_pointer
)->pointer
= src
;
2238 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2243 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2244 struct vtn_pointer
*dest
= dest_val
->pointer
;
2245 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2247 /* OpStore requires us to actually have a storage type */
2248 vtn_fail_if(dest
->type
->type
== NULL
,
2249 "Invalid destination type for OpStore");
2251 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2252 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2253 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2254 * would then store them to a local variable as bool. Work around
2255 * the issue by doing an implicit conversion.
2257 * https://github.com/KhronosGroup/glslang/issues/170
2258 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2260 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2261 "OpTypeBool. Doing an implicit conversion to work around "
2263 struct vtn_ssa_value
*bool_ssa
=
2264 vtn_create_ssa_value(b
, dest
->type
->type
);
2265 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2266 vtn_variable_store(b
, bool_ssa
, dest
);
2270 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2272 if (glsl_type_is_sampler(dest
->type
->type
)) {
2273 if (b
->wa_glslang_179
) {
2274 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2275 "propagation to workaround the problem.");
2276 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2277 dest
->var
->copy_prop_sampler
=
2278 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2280 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2285 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2286 vtn_variable_store(b
, src
, dest
);
2290 case SpvOpArrayLength
: {
2291 struct vtn_pointer
*ptr
=
2292 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2294 const uint32_t offset
= ptr
->var
->type
->offsets
[w
[4]];
2295 const uint32_t stride
= ptr
->var
->type
->members
[w
[4]]->stride
;
2297 if (!ptr
->block_index
) {
2298 struct vtn_access_chain chain
= {
2301 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
2302 vtn_assert(ptr
->block_index
);
2305 nir_intrinsic_instr
*instr
=
2306 nir_intrinsic_instr_create(b
->nb
.shader
,
2307 nir_intrinsic_get_buffer_size
);
2308 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2309 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2310 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2311 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2313 /* array_length = max(buffer_size - offset, 0) / stride */
2314 nir_ssa_def
*array_length
=
2319 nir_imm_int(&b
->nb
, offset
)),
2320 nir_imm_int(&b
->nb
, 0u)),
2321 nir_imm_int(&b
->nb
, stride
));
2323 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2324 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2325 val
->ssa
->def
= array_length
;
2329 case SpvOpCopyMemorySized
:
2331 vtn_fail("Unhandled opcode");