2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
31 #include <vulkan/vulkan_core.h>
33 static struct vtn_access_chain
*
34 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
36 struct vtn_access_chain
*chain
;
38 /* Subtract 1 from the length since there's already one built in */
39 size_t size
= sizeof(*chain
) +
40 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
41 chain
= rzalloc_size(b
, size
);
42 chain
->length
= length
;
48 vtn_pointer_uses_ssa_offset(struct vtn_builder
*b
,
49 struct vtn_pointer
*ptr
)
51 return ((ptr
->mode
== vtn_variable_mode_ubo
||
52 ptr
->mode
== vtn_variable_mode_ssbo
) &&
53 b
->options
->lower_ubo_ssbo_access_to_offsets
) ||
54 ptr
->mode
== vtn_variable_mode_push_constant
||
55 (ptr
->mode
== vtn_variable_mode_workgroup
&&
56 b
->options
->lower_workgroup_access_to_offsets
);
60 vtn_pointer_is_external_block(struct vtn_builder
*b
,
61 struct vtn_pointer
*ptr
)
63 return ptr
->mode
== vtn_variable_mode_ssbo
||
64 ptr
->mode
== vtn_variable_mode_ubo
||
65 ptr
->mode
== vtn_variable_mode_phys_ssbo
||
66 ptr
->mode
== vtn_variable_mode_push_constant
||
67 (ptr
->mode
== vtn_variable_mode_workgroup
&&
68 b
->options
->lower_workgroup_access_to_offsets
);
72 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
73 unsigned stride
, unsigned bit_size
)
75 vtn_assert(stride
> 0);
76 if (link
.mode
== vtn_access_mode_literal
) {
77 return nir_imm_intN_t(&b
->nb
, link
.id
* stride
, bit_size
);
79 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
80 if (ssa
->bit_size
!= bit_size
)
81 ssa
= nir_i2i(&b
->nb
, ssa
, bit_size
);
82 return nir_imul_imm(&b
->nb
, ssa
, stride
);
86 static VkDescriptorType
87 vk_desc_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
90 case vtn_variable_mode_ubo
:
91 return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
;
92 case vtn_variable_mode_ssbo
:
93 return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
;
95 vtn_fail("Invalid mode for vulkan_resource_index");
99 static const struct glsl_type
*
100 vtn_ptr_type_for_mode(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
103 case vtn_variable_mode_ubo
:
104 return b
->options
->ubo_ptr_type
;
105 case vtn_variable_mode_ssbo
:
106 return b
->options
->ssbo_ptr_type
;
108 vtn_fail("Invalid mode for vulkan_resource_index");
113 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
114 nir_ssa_def
*desc_array_index
)
116 if (!desc_array_index
) {
117 vtn_assert(glsl_type_is_struct_or_ifc(var
->type
->type
));
118 desc_array_index
= nir_imm_int(&b
->nb
, 0);
121 nir_intrinsic_instr
*instr
=
122 nir_intrinsic_instr_create(b
->nb
.shader
,
123 nir_intrinsic_vulkan_resource_index
);
124 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
125 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
126 nir_intrinsic_set_binding(instr
, var
->binding
);
127 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, var
->mode
));
129 const struct glsl_type
*index_type
=
130 b
->options
->lower_ubo_ssbo_access_to_offsets
?
131 glsl_uint_type() : vtn_ptr_type_for_mode(b
, var
->mode
);
133 instr
->num_components
= glsl_get_vector_elements(index_type
);
134 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
135 glsl_get_bit_size(index_type
), NULL
);
136 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
138 return &instr
->dest
.ssa
;
142 vtn_resource_reindex(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
143 nir_ssa_def
*base_index
, nir_ssa_def
*offset_index
)
145 nir_intrinsic_instr
*instr
=
146 nir_intrinsic_instr_create(b
->nb
.shader
,
147 nir_intrinsic_vulkan_resource_reindex
);
148 instr
->src
[0] = nir_src_for_ssa(base_index
);
149 instr
->src
[1] = nir_src_for_ssa(offset_index
);
150 nir_intrinsic_set_desc_type(instr
, vk_desc_type_for_mode(b
, mode
));
152 const struct glsl_type
*index_type
=
153 b
->options
->lower_ubo_ssbo_access_to_offsets
?
154 glsl_uint_type() : vtn_ptr_type_for_mode(b
, mode
);
156 instr
->num_components
= glsl_get_vector_elements(index_type
);
157 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, instr
->num_components
,
158 glsl_get_bit_size(index_type
), NULL
);
159 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
161 return &instr
->dest
.ssa
;
165 vtn_descriptor_load(struct vtn_builder
*b
, enum vtn_variable_mode mode
,
166 nir_ssa_def
*desc_index
)
168 nir_intrinsic_instr
*desc_load
=
169 nir_intrinsic_instr_create(b
->nb
.shader
,
170 nir_intrinsic_load_vulkan_descriptor
);
171 desc_load
->src
[0] = nir_src_for_ssa(desc_index
);
172 nir_intrinsic_set_desc_type(desc_load
, vk_desc_type_for_mode(b
, mode
));
174 const struct glsl_type
*ptr_type
= vtn_ptr_type_for_mode(b
, mode
);
176 desc_load
->num_components
= glsl_get_vector_elements(ptr_type
);
177 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
178 desc_load
->num_components
,
179 glsl_get_bit_size(ptr_type
), NULL
);
180 nir_builder_instr_insert(&b
->nb
, &desc_load
->instr
);
182 return &desc_load
->dest
.ssa
;
185 /* Dereference the given base pointer by the access chain */
186 static struct vtn_pointer
*
187 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
188 struct vtn_pointer
*base
,
189 struct vtn_access_chain
*deref_chain
)
191 struct vtn_type
*type
= base
->type
;
192 enum gl_access_qualifier access
= base
->access
;
195 nir_deref_instr
*tail
;
198 } else if (vtn_pointer_is_external_block(b
, base
)) {
199 nir_ssa_def
*block_index
= base
->block_index
;
201 /* We dereferencing an external block pointer. Correctness of this
202 * operation relies on one particular line in the SPIR-V spec, section
203 * entitled "Validation Rules for Shader Capabilities":
205 * "Block and BufferBlock decorations cannot decorate a structure
206 * type that is nested at any level inside another structure type
207 * decorated with Block or BufferBlock."
209 * This means that we can detect the point where we cross over from
210 * descriptor indexing to buffer indexing by looking for the block
211 * decorated struct type. Anything before the block decorated struct
212 * type is a descriptor indexing operation and anything after the block
213 * decorated struct is a buffer offset operation.
216 /* Figure out the descriptor array index if any
218 * Some of the Vulkan CTS tests with hand-rolled SPIR-V have been known
219 * to forget the Block or BufferBlock decoration from time to time.
220 * It's more robust if we check for both !block_index and for the type
221 * to contain a block. This way there's a decent chance that arrays of
222 * UBOs/SSBOs will work correctly even if variable pointers are
225 nir_ssa_def
*desc_arr_idx
= NULL
;
226 if (!block_index
|| vtn_type_contains_block(b
, type
)) {
227 /* If our type contains a block, then we're still outside the block
228 * and we need to process enough levels of dereferences to get inside
231 if (deref_chain
->ptr_as_array
) {
232 unsigned aoa_size
= glsl_get_aoa_size(type
->type
);
233 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
234 MAX2(aoa_size
, 1), 32);
238 for (; idx
< deref_chain
->length
; idx
++) {
239 if (type
->base_type
!= vtn_base_type_array
) {
240 vtn_assert(type
->base_type
== vtn_base_type_struct
);
244 unsigned aoa_size
= glsl_get_aoa_size(type
->array_element
->type
);
245 nir_ssa_def
*arr_offset
=
246 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
247 MAX2(aoa_size
, 1), 32);
249 desc_arr_idx
= nir_iadd(&b
->nb
, desc_arr_idx
, arr_offset
);
251 desc_arr_idx
= arr_offset
;
253 type
= type
->array_element
;
254 access
|= type
->access
;
259 vtn_assert(base
->var
&& base
->type
);
260 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
261 } else if (desc_arr_idx
) {
262 block_index
= vtn_resource_reindex(b
, base
->mode
,
263 block_index
, desc_arr_idx
);
266 if (idx
== deref_chain
->length
) {
267 /* The entire deref was consumed in finding the block index. Return
268 * a pointer which just has a block index and a later access chain
269 * will dereference deeper.
271 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
272 ptr
->mode
= base
->mode
;
274 ptr
->block_index
= block_index
;
275 ptr
->access
= access
;
279 /* If we got here, there's more access chain to handle and we have the
280 * final block index. Insert a descriptor load and cast to a deref to
281 * start the deref chain.
283 nir_ssa_def
*desc
= vtn_descriptor_load(b
, base
->mode
, block_index
);
285 assert(base
->mode
== vtn_variable_mode_ssbo
||
286 base
->mode
== vtn_variable_mode_ubo
);
287 nir_variable_mode nir_mode
=
288 base
->mode
== vtn_variable_mode_ssbo
? nir_var_mem_ssbo
: nir_var_mem_ubo
;
290 tail
= nir_build_deref_cast(&b
->nb
, desc
, nir_mode
, type
->type
,
291 base
->ptr_type
->stride
);
293 assert(base
->var
&& base
->var
->var
);
294 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
295 if (base
->ptr_type
&& base
->ptr_type
->type
) {
296 tail
->dest
.ssa
.num_components
=
297 glsl_get_vector_elements(base
->ptr_type
->type
);
298 tail
->dest
.ssa
.bit_size
= glsl_get_bit_size(base
->ptr_type
->type
);
302 if (idx
== 0 && deref_chain
->ptr_as_array
) {
303 /* We start with a deref cast to get the stride. Hopefully, we'll be
304 * able to delete that cast eventually.
306 tail
= nir_build_deref_cast(&b
->nb
, &tail
->dest
.ssa
, tail
->mode
,
307 tail
->type
, base
->ptr_type
->stride
);
309 nir_ssa_def
*index
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1,
310 tail
->dest
.ssa
.bit_size
);
311 tail
= nir_build_deref_ptr_as_array(&b
->nb
, tail
, index
);
315 for (; idx
< deref_chain
->length
; idx
++) {
316 if (glsl_type_is_struct_or_ifc(type
->type
)) {
317 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
318 unsigned field
= deref_chain
->link
[idx
].id
;
319 tail
= nir_build_deref_struct(&b
->nb
, tail
, field
);
320 type
= type
->members
[field
];
322 nir_ssa_def
*arr_index
=
323 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], 1,
324 tail
->dest
.ssa
.bit_size
);
325 tail
= nir_build_deref_array(&b
->nb
, tail
, arr_index
);
326 type
= type
->array_element
;
329 access
|= type
->access
;
332 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
333 ptr
->mode
= base
->mode
;
335 ptr
->var
= base
->var
;
337 ptr
->access
= access
;
342 static struct vtn_pointer
*
343 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
344 struct vtn_pointer
*base
,
345 struct vtn_access_chain
*deref_chain
)
347 nir_ssa_def
*block_index
= base
->block_index
;
348 nir_ssa_def
*offset
= base
->offset
;
349 struct vtn_type
*type
= base
->type
;
350 enum gl_access_qualifier access
= base
->access
;
353 if (base
->mode
== vtn_variable_mode_ubo
||
354 base
->mode
== vtn_variable_mode_ssbo
) {
356 vtn_assert(base
->var
&& base
->type
);
357 nir_ssa_def
*desc_arr_idx
;
358 if (glsl_type_is_array(type
->type
)) {
359 if (deref_chain
->length
>= 1) {
361 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
363 /* This consumes a level of type */
364 type
= type
->array_element
;
365 access
|= type
->access
;
367 /* This is annoying. We've been asked for a pointer to the
368 * array of UBOs/SSBOs and not a specifc buffer. Return a
369 * pointer with a descriptor index of 0 and we'll have to do
370 * a reindex later to adjust it to the right thing.
372 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
374 } else if (deref_chain
->ptr_as_array
) {
375 /* You can't have a zero-length OpPtrAccessChain */
376 vtn_assert(deref_chain
->length
>= 1);
377 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
379 /* We have a regular non-array SSBO. */
382 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
383 } else if (deref_chain
->ptr_as_array
&&
384 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
385 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
386 * decorated block. This is an interesting corner in the SPIR-V
387 * spec. One interpretation would be that they client is clearly
388 * trying to treat that block as if it's an implicit array of blocks
389 * repeated in the buffer. However, the SPIR-V spec for the
390 * OpPtrAccessChain says:
392 * "Base is treated as the address of the first element of an
393 * array, and the Element element’s address is computed to be the
394 * base for the Indexes, as per OpAccessChain."
396 * Taken literally, that would mean that your struct type is supposed
397 * to be treated as an array of such a struct and, since it's
398 * decorated block, that means an array of blocks which corresponds
399 * to an array descriptor. Therefore, we need to do a reindex
400 * operation to add the index from the first link in the access chain
401 * to the index we recieved.
403 * The downside to this interpretation (there always is one) is that
404 * this might be somewhat surprising behavior to apps if they expect
405 * the implicit array behavior described above.
407 vtn_assert(deref_chain
->length
>= 1);
408 nir_ssa_def
*offset_index
=
409 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1, 32);
412 block_index
= vtn_resource_reindex(b
, base
->mode
,
413 block_index
, offset_index
);
418 if (base
->mode
== vtn_variable_mode_workgroup
) {
419 /* SLM doesn't need nor have a block index */
420 vtn_assert(!block_index
);
422 /* We need the variable for the base offset */
423 vtn_assert(base
->var
);
425 /* We need ptr_type for size and alignment */
426 vtn_assert(base
->ptr_type
);
428 /* Assign location on first use so that we don't end up bloating SLM
429 * address space for variables which are never statically used.
431 if (base
->var
->shared_location
< 0) {
432 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
433 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
434 base
->ptr_type
->align
);
435 base
->var
->shared_location
= b
->shader
->num_shared
;
436 b
->shader
->num_shared
+= base
->ptr_type
->length
;
439 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
440 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
441 /* Push constants neither need nor have a block index */
442 vtn_assert(!block_index
);
444 /* Start off with at the start of the push constant block. */
445 offset
= nir_imm_int(&b
->nb
, 0);
447 /* The code above should have ensured a block_index when needed. */
448 vtn_assert(block_index
);
450 /* Start off with at the start of the buffer. */
451 offset
= nir_imm_int(&b
->nb
, 0);
455 if (deref_chain
->ptr_as_array
&& idx
== 0) {
456 /* We need ptr_type for the stride */
457 vtn_assert(base
->ptr_type
);
459 /* We need at least one element in the chain */
460 vtn_assert(deref_chain
->length
>= 1);
462 nir_ssa_def
*elem_offset
=
463 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
464 base
->ptr_type
->stride
, offset
->bit_size
);
465 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
469 for (; idx
< deref_chain
->length
; idx
++) {
470 switch (glsl_get_base_type(type
->type
)) {
473 case GLSL_TYPE_UINT16
:
474 case GLSL_TYPE_INT16
:
475 case GLSL_TYPE_UINT8
:
477 case GLSL_TYPE_UINT64
:
478 case GLSL_TYPE_INT64
:
479 case GLSL_TYPE_FLOAT
:
480 case GLSL_TYPE_FLOAT16
:
481 case GLSL_TYPE_DOUBLE
:
483 case GLSL_TYPE_ARRAY
: {
484 nir_ssa_def
*elem_offset
=
485 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
486 type
->stride
, offset
->bit_size
);
487 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
488 type
= type
->array_element
;
489 access
|= type
->access
;
493 case GLSL_TYPE_INTERFACE
:
494 case GLSL_TYPE_STRUCT
: {
495 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
496 unsigned member
= deref_chain
->link
[idx
].id
;
497 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
498 type
= type
->members
[member
];
499 access
|= type
->access
;
504 vtn_fail("Invalid type for deref");
508 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
509 ptr
->mode
= base
->mode
;
511 ptr
->block_index
= block_index
;
512 ptr
->offset
= offset
;
513 ptr
->access
= access
;
518 /* Dereference the given base pointer by the access chain */
519 static struct vtn_pointer
*
520 vtn_pointer_dereference(struct vtn_builder
*b
,
521 struct vtn_pointer
*base
,
522 struct vtn_access_chain
*deref_chain
)
524 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
525 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
527 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
532 vtn_pointer_for_variable(struct vtn_builder
*b
,
533 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
535 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
537 pointer
->mode
= var
->mode
;
538 pointer
->type
= var
->type
;
539 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
540 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
541 pointer
->ptr_type
= ptr_type
;
543 pointer
->access
= var
->access
| var
->type
->access
;
548 /* Returns an atomic_uint type based on the original uint type. The returned
549 * type will be equivalent to the original one but will have an atomic_uint
550 * type as leaf instead of an uint.
552 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
554 static const struct glsl_type
*
555 repair_atomic_type(const struct glsl_type
*type
)
557 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
558 assert(glsl_type_is_scalar(glsl_without_array(type
)));
560 if (glsl_type_is_array(type
)) {
561 const struct glsl_type
*atomic
=
562 repair_atomic_type(glsl_get_array_element(type
));
564 return glsl_array_type(atomic
, glsl_get_length(type
),
565 glsl_get_explicit_stride(type
));
567 return glsl_atomic_uint_type();
572 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
574 if (b
->wa_glslang_179
) {
575 /* Do on-the-fly copy propagation for samplers. */
576 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
577 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
580 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
582 struct vtn_access_chain chain
= {
585 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
592 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
593 struct vtn_ssa_value
*inout
,
594 enum gl_access_qualifier access
)
596 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
598 inout
->def
= nir_load_deref_with_access(&b
->nb
, deref
, access
);
600 nir_store_deref_with_access(&b
->nb
, deref
, inout
->def
, ~0, access
);
602 } else if (glsl_type_is_array(deref
->type
) ||
603 glsl_type_is_matrix(deref
->type
)) {
604 unsigned elems
= glsl_get_length(deref
->type
);
605 for (unsigned i
= 0; i
< elems
; i
++) {
606 nir_deref_instr
*child
=
607 nir_build_deref_array_imm(&b
->nb
, deref
, i
);
608 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
611 vtn_assert(glsl_type_is_struct_or_ifc(deref
->type
));
612 unsigned elems
= glsl_get_length(deref
->type
);
613 for (unsigned i
= 0; i
< elems
; i
++) {
614 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
615 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
], access
);
621 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
623 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
624 return vtn_pointer_to_deref(b
, ptr
);
628 * Gets the NIR-level deref tail, which may have as a child an array deref
629 * selecting which component due to OpAccessChain supporting per-component
630 * indexing in SPIR-V.
632 static nir_deref_instr
*
633 get_deref_tail(nir_deref_instr
*deref
)
635 if (deref
->deref_type
!= nir_deref_type_array
)
638 nir_deref_instr
*parent
=
639 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
641 if (glsl_type_is_vector(parent
->type
))
647 struct vtn_ssa_value
*
648 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
,
649 enum gl_access_qualifier access
)
651 nir_deref_instr
*src_tail
= get_deref_tail(src
);
652 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
653 _vtn_local_load_store(b
, true, src_tail
, val
, access
);
655 if (src_tail
!= src
) {
656 val
->type
= src
->type
;
657 if (nir_src_is_const(src
->arr
.index
))
658 val
->def
= vtn_vector_extract(b
, val
->def
,
659 nir_src_as_uint(src
->arr
.index
));
661 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
, src
->arr
.index
.ssa
);
668 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
669 nir_deref_instr
*dest
, enum gl_access_qualifier access
)
671 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
673 if (dest_tail
!= dest
) {
674 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
675 _vtn_local_load_store(b
, true, dest_tail
, val
, access
);
677 if (nir_src_is_const(dest
->arr
.index
))
678 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
679 nir_src_as_uint(dest
->arr
.index
));
681 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
682 dest
->arr
.index
.ssa
);
683 _vtn_local_load_store(b
, false, dest_tail
, val
, access
);
685 _vtn_local_load_store(b
, false, dest_tail
, src
, access
);
690 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
691 nir_ssa_def
**index_out
)
693 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
695 struct vtn_access_chain chain
= {
698 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
700 *index_out
= ptr
->block_index
;
704 /* Tries to compute the size of an interface block based on the strides and
705 * offsets that are provided to us in the SPIR-V source.
708 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
710 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
714 case GLSL_TYPE_UINT16
:
715 case GLSL_TYPE_INT16
:
716 case GLSL_TYPE_UINT8
:
718 case GLSL_TYPE_UINT64
:
719 case GLSL_TYPE_INT64
:
720 case GLSL_TYPE_FLOAT
:
721 case GLSL_TYPE_FLOAT16
:
723 case GLSL_TYPE_DOUBLE
: {
724 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
725 glsl_get_matrix_columns(type
->type
);
727 vtn_assert(type
->stride
> 0);
728 return type
->stride
* cols
;
730 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
731 return glsl_get_vector_elements(type
->type
) * type_size
;
735 case GLSL_TYPE_STRUCT
:
736 case GLSL_TYPE_INTERFACE
: {
738 unsigned num_fields
= glsl_get_length(type
->type
);
739 for (unsigned f
= 0; f
< num_fields
; f
++) {
740 unsigned field_end
= type
->offsets
[f
] +
741 vtn_type_block_size(b
, type
->members
[f
]);
742 size
= MAX2(size
, field_end
);
747 case GLSL_TYPE_ARRAY
:
748 vtn_assert(type
->stride
> 0);
749 vtn_assert(glsl_get_length(type
->type
) > 0);
750 return type
->stride
* glsl_get_length(type
->type
);
753 vtn_fail("Invalid block type");
759 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
760 nir_ssa_def
*index
, nir_ssa_def
*offset
,
761 unsigned access_offset
, unsigned access_size
,
762 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
763 enum gl_access_qualifier access
)
765 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
766 instr
->num_components
= glsl_get_vector_elements(type
);
768 /* Booleans usually shouldn't show up in external memory in SPIR-V.
769 * However, they do for certain older GLSLang versions and can for shared
770 * memory when we lower access chains internally.
772 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
773 glsl_get_bit_size(type
);
777 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
778 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
781 if (op
== nir_intrinsic_load_push_constant
) {
782 nir_intrinsic_set_base(instr
, access_offset
);
783 nir_intrinsic_set_range(instr
, access_size
);
786 if (op
== nir_intrinsic_load_ssbo
||
787 op
== nir_intrinsic_store_ssbo
) {
788 nir_intrinsic_set_access(instr
, access
);
791 /* With extensions like relaxed_block_layout, we really can't guarantee
792 * much more than scalar alignment.
794 if (op
!= nir_intrinsic_load_push_constant
)
795 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
798 instr
->src
[src
++] = nir_src_for_ssa(index
);
800 if (op
== nir_intrinsic_load_push_constant
) {
801 /* We need to subtract the offset from where the intrinsic will load the
804 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
805 nir_imm_int(&b
->nb
, access_offset
)));
807 instr
->src
[src
++] = nir_src_for_ssa(offset
);
811 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
812 instr
->num_components
, data_bit_size
, NULL
);
813 (*inout
)->def
= &instr
->dest
.ssa
;
816 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
818 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
819 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
823 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
824 nir_ssa_def
*index
, nir_ssa_def
*offset
,
825 unsigned access_offset
, unsigned access_size
,
826 struct vtn_type
*type
, enum gl_access_qualifier access
,
827 struct vtn_ssa_value
**inout
)
829 if (load
&& *inout
== NULL
)
830 *inout
= vtn_create_ssa_value(b
, type
->type
);
832 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
836 case GLSL_TYPE_UINT16
:
837 case GLSL_TYPE_INT16
:
838 case GLSL_TYPE_UINT8
:
840 case GLSL_TYPE_UINT64
:
841 case GLSL_TYPE_INT64
:
842 case GLSL_TYPE_FLOAT
:
843 case GLSL_TYPE_FLOAT16
:
844 case GLSL_TYPE_DOUBLE
:
846 /* This is where things get interesting. At this point, we've hit
847 * a vector, a scalar, or a matrix.
849 if (glsl_type_is_matrix(type
->type
)) {
850 /* Loading the whole matrix */
851 struct vtn_ssa_value
*transpose
;
852 unsigned num_ops
, vec_width
, col_stride
;
853 if (type
->row_major
) {
854 num_ops
= glsl_get_vector_elements(type
->type
);
855 vec_width
= glsl_get_matrix_columns(type
->type
);
856 col_stride
= type
->array_element
->stride
;
858 const struct glsl_type
*transpose_type
=
859 glsl_matrix_type(base_type
, vec_width
, num_ops
);
860 *inout
= vtn_create_ssa_value(b
, transpose_type
);
862 transpose
= vtn_ssa_transpose(b
, *inout
);
866 num_ops
= glsl_get_matrix_columns(type
->type
);
867 vec_width
= glsl_get_vector_elements(type
->type
);
868 col_stride
= type
->stride
;
871 for (unsigned i
= 0; i
< num_ops
; i
++) {
872 nir_ssa_def
*elem_offset
=
873 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
874 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
875 access_offset
, access_size
,
877 glsl_vector_type(base_type
, vec_width
),
878 type
->access
| access
);
881 if (load
&& type
->row_major
)
882 *inout
= vtn_ssa_transpose(b
, *inout
);
884 unsigned elems
= glsl_get_vector_elements(type
->type
);
885 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
886 if (elems
== 1 || type
->stride
== type_size
) {
887 /* This is a tightly-packed normal scalar or vector load */
888 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
889 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
890 access_offset
, access_size
,
892 type
->access
| access
);
894 /* This is a strided load. We have to load N things separately.
895 * This is the single column of a row-major matrix case.
897 vtn_assert(type
->stride
> type_size
);
898 vtn_assert(type
->stride
% type_size
== 0);
900 nir_ssa_def
*per_comp
[4];
901 for (unsigned i
= 0; i
< elems
; i
++) {
902 nir_ssa_def
*elem_offset
=
903 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
904 struct vtn_ssa_value
*comp
, temp_val
;
906 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
907 temp_val
.type
= glsl_scalar_type(base_type
);
910 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
911 access_offset
, access_size
,
912 &comp
, glsl_scalar_type(base_type
),
913 type
->access
| access
);
914 per_comp
[i
] = comp
->def
;
919 *inout
= vtn_create_ssa_value(b
, type
->type
);
920 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
926 case GLSL_TYPE_ARRAY
: {
927 unsigned elems
= glsl_get_length(type
->type
);
928 for (unsigned i
= 0; i
< elems
; i
++) {
929 nir_ssa_def
*elem_off
=
930 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
931 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
932 access_offset
, access_size
,
934 type
->array_element
->access
| access
,
935 &(*inout
)->elems
[i
]);
940 case GLSL_TYPE_INTERFACE
:
941 case GLSL_TYPE_STRUCT
: {
942 unsigned elems
= glsl_get_length(type
->type
);
943 for (unsigned i
= 0; i
< elems
; i
++) {
944 nir_ssa_def
*elem_off
=
945 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
946 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
947 access_offset
, access_size
,
949 type
->members
[i
]->access
| access
,
950 &(*inout
)->elems
[i
]);
956 vtn_fail("Invalid block member type");
960 static struct vtn_ssa_value
*
961 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
964 unsigned access_offset
= 0, access_size
= 0;
966 case vtn_variable_mode_ubo
:
967 op
= nir_intrinsic_load_ubo
;
969 case vtn_variable_mode_ssbo
:
970 op
= nir_intrinsic_load_ssbo
;
972 case vtn_variable_mode_push_constant
:
973 op
= nir_intrinsic_load_push_constant
;
974 access_size
= b
->shader
->num_uniforms
;
976 case vtn_variable_mode_workgroup
:
977 op
= nir_intrinsic_load_shared
;
980 vtn_fail("Invalid block variable mode");
983 nir_ssa_def
*offset
, *index
= NULL
;
984 offset
= vtn_pointer_to_offset(b
, src
, &index
);
986 struct vtn_ssa_value
*value
= NULL
;
987 _vtn_block_load_store(b
, op
, true, index
, offset
,
988 access_offset
, access_size
,
989 src
->type
, src
->access
, &value
);
994 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
995 struct vtn_pointer
*dst
)
999 case vtn_variable_mode_ssbo
:
1000 op
= nir_intrinsic_store_ssbo
;
1002 case vtn_variable_mode_workgroup
:
1003 op
= nir_intrinsic_store_shared
;
1006 vtn_fail("Invalid block variable mode");
1009 nir_ssa_def
*offset
, *index
= NULL
;
1010 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
1012 _vtn_block_load_store(b
, op
, false, index
, offset
,
1013 0, 0, dst
->type
, dst
->access
, &src
);
1017 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
1018 struct vtn_pointer
*ptr
,
1019 enum gl_access_qualifier access
,
1020 struct vtn_ssa_value
**inout
)
1022 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
1023 switch (base_type
) {
1024 case GLSL_TYPE_UINT
:
1026 case GLSL_TYPE_UINT16
:
1027 case GLSL_TYPE_INT16
:
1028 case GLSL_TYPE_UINT8
:
1029 case GLSL_TYPE_INT8
:
1030 case GLSL_TYPE_UINT64
:
1031 case GLSL_TYPE_INT64
:
1032 case GLSL_TYPE_FLOAT
:
1033 case GLSL_TYPE_FLOAT16
:
1034 case GLSL_TYPE_BOOL
:
1035 case GLSL_TYPE_DOUBLE
:
1036 if (glsl_type_is_vector_or_scalar(ptr
->type
->type
)) {
1037 /* We hit a vector or scalar; go ahead and emit the load[s] */
1038 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
1039 if (vtn_pointer_is_external_block(b
, ptr
)) {
1040 /* If it's external, we call nir_load/store_deref directly. The
1041 * vtn_local_load/store helpers are too clever and do magic to
1042 * avoid array derefs of vectors. That magic is both less
1043 * efficient than the direct load/store and, in the case of
1044 * stores, is broken because it creates a race condition if two
1045 * threads are writing to different components of the same vector
1046 * due to the load+insert+store it uses to emulate the array
1050 *inout
= vtn_create_ssa_value(b
, ptr
->type
->type
);
1051 (*inout
)->def
= nir_load_deref_with_access(&b
->nb
, deref
,
1052 ptr
->type
->access
| access
);
1054 nir_store_deref_with_access(&b
->nb
, deref
, (*inout
)->def
, ~0,
1055 ptr
->type
->access
| access
);
1059 *inout
= vtn_local_load(b
, deref
, ptr
->type
->access
| access
);
1061 vtn_local_store(b
, *inout
, deref
, ptr
->type
->access
| access
);
1068 case GLSL_TYPE_INTERFACE
:
1069 case GLSL_TYPE_ARRAY
:
1070 case GLSL_TYPE_STRUCT
: {
1071 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1073 vtn_assert(*inout
== NULL
);
1074 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1075 (*inout
)->type
= ptr
->type
->type
;
1076 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1079 struct vtn_access_chain chain
= {
1082 { .mode
= vtn_access_mode_literal
, },
1085 for (unsigned i
= 0; i
< elems
; i
++) {
1086 chain
.link
[0].id
= i
;
1087 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1088 _vtn_variable_load_store(b
, load
, elem
, ptr
->type
->access
| access
,
1089 &(*inout
)->elems
[i
]);
1095 vtn_fail("Invalid access chain type");
1099 struct vtn_ssa_value
*
1100 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1102 if (vtn_pointer_uses_ssa_offset(b
, src
)) {
1103 return vtn_block_load(b
, src
);
1105 struct vtn_ssa_value
*val
= NULL
;
1106 _vtn_variable_load_store(b
, true, src
, src
->access
, &val
);
1112 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1113 struct vtn_pointer
*dest
)
1115 if (vtn_pointer_uses_ssa_offset(b
, dest
)) {
1116 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1117 dest
->mode
== vtn_variable_mode_workgroup
);
1118 vtn_block_store(b
, src
, dest
);
1120 _vtn_variable_load_store(b
, false, dest
, dest
->access
, &src
);
1125 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1126 struct vtn_pointer
*src
)
1128 vtn_assert(src
->type
->type
== dest
->type
->type
);
1129 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1130 switch (base_type
) {
1131 case GLSL_TYPE_UINT
:
1133 case GLSL_TYPE_UINT16
:
1134 case GLSL_TYPE_INT16
:
1135 case GLSL_TYPE_UINT8
:
1136 case GLSL_TYPE_INT8
:
1137 case GLSL_TYPE_UINT64
:
1138 case GLSL_TYPE_INT64
:
1139 case GLSL_TYPE_FLOAT
:
1140 case GLSL_TYPE_FLOAT16
:
1141 case GLSL_TYPE_DOUBLE
:
1142 case GLSL_TYPE_BOOL
:
1143 /* At this point, we have a scalar, vector, or matrix so we know that
1144 * there cannot be any structure splitting still in the way. By
1145 * stopping at the matrix level rather than the vector level, we
1146 * ensure that matrices get loaded in the optimal way even if they
1147 * are storred row-major in a UBO.
1149 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1152 case GLSL_TYPE_INTERFACE
:
1153 case GLSL_TYPE_ARRAY
:
1154 case GLSL_TYPE_STRUCT
: {
1155 struct vtn_access_chain chain
= {
1158 { .mode
= vtn_access_mode_literal
, },
1161 unsigned elems
= glsl_get_length(src
->type
->type
);
1162 for (unsigned i
= 0; i
< elems
; i
++) {
1163 chain
.link
[0].id
= i
;
1164 struct vtn_pointer
*src_elem
=
1165 vtn_pointer_dereference(b
, src
, &chain
);
1166 struct vtn_pointer
*dest_elem
=
1167 vtn_pointer_dereference(b
, dest
, &chain
);
1169 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1175 vtn_fail("Invalid access chain type");
1180 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1181 struct vtn_pointer
*src
)
1183 /* TODO: At some point, we should add a special-case for when we can
1184 * just emit a copy_var intrinsic.
1186 _vtn_variable_copy(b
, dest
, src
);
1190 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1192 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1193 *mode
= nir_var_system_value
;
1197 vtn_get_builtin_location(struct vtn_builder
*b
,
1198 SpvBuiltIn builtin
, int *location
,
1199 nir_variable_mode
*mode
)
1202 case SpvBuiltInPosition
:
1203 *location
= VARYING_SLOT_POS
;
1205 case SpvBuiltInPointSize
:
1206 *location
= VARYING_SLOT_PSIZ
;
1208 case SpvBuiltInClipDistance
:
1209 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1211 case SpvBuiltInCullDistance
:
1212 *location
= VARYING_SLOT_CULL_DIST0
;
1214 case SpvBuiltInVertexId
:
1215 case SpvBuiltInVertexIndex
:
1216 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1217 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1218 * same as gl_VertexID, which is non-zero-based, and removes
1219 * VertexIndex. Since they're both defined to be non-zero-based, we use
1220 * SYSTEM_VALUE_VERTEX_ID for both.
1222 *location
= SYSTEM_VALUE_VERTEX_ID
;
1223 set_mode_system_value(b
, mode
);
1225 case SpvBuiltInInstanceIndex
:
1226 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1227 set_mode_system_value(b
, mode
);
1229 case SpvBuiltInInstanceId
:
1230 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1231 set_mode_system_value(b
, mode
);
1233 case SpvBuiltInPrimitiveId
:
1234 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1235 vtn_assert(*mode
== nir_var_shader_in
);
1236 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1237 } else if (*mode
== nir_var_shader_out
) {
1238 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1240 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1241 set_mode_system_value(b
, mode
);
1244 case SpvBuiltInInvocationId
:
1245 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1246 set_mode_system_value(b
, mode
);
1248 case SpvBuiltInLayer
:
1249 *location
= VARYING_SLOT_LAYER
;
1250 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1251 *mode
= nir_var_shader_in
;
1252 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1253 *mode
= nir_var_shader_out
;
1254 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1255 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1256 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1257 *mode
= nir_var_shader_out
;
1259 vtn_fail("invalid stage for SpvBuiltInLayer");
1261 case SpvBuiltInViewportIndex
:
1262 *location
= VARYING_SLOT_VIEWPORT
;
1263 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1264 *mode
= nir_var_shader_out
;
1265 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1266 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1267 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1268 *mode
= nir_var_shader_out
;
1269 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1270 *mode
= nir_var_shader_in
;
1272 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1274 case SpvBuiltInTessLevelOuter
:
1275 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1277 case SpvBuiltInTessLevelInner
:
1278 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1280 case SpvBuiltInTessCoord
:
1281 *location
= SYSTEM_VALUE_TESS_COORD
;
1282 set_mode_system_value(b
, mode
);
1284 case SpvBuiltInPatchVertices
:
1285 *location
= SYSTEM_VALUE_VERTICES_IN
;
1286 set_mode_system_value(b
, mode
);
1288 case SpvBuiltInFragCoord
:
1289 *location
= VARYING_SLOT_POS
;
1290 vtn_assert(*mode
== nir_var_shader_in
);
1292 case SpvBuiltInPointCoord
:
1293 *location
= VARYING_SLOT_PNTC
;
1294 vtn_assert(*mode
== nir_var_shader_in
);
1296 case SpvBuiltInFrontFacing
:
1297 *location
= SYSTEM_VALUE_FRONT_FACE
;
1298 set_mode_system_value(b
, mode
);
1300 case SpvBuiltInSampleId
:
1301 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1302 set_mode_system_value(b
, mode
);
1304 case SpvBuiltInSamplePosition
:
1305 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1306 set_mode_system_value(b
, mode
);
1308 case SpvBuiltInSampleMask
:
1309 if (*mode
== nir_var_shader_out
) {
1310 *location
= FRAG_RESULT_SAMPLE_MASK
;
1312 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1313 set_mode_system_value(b
, mode
);
1316 case SpvBuiltInFragDepth
:
1317 *location
= FRAG_RESULT_DEPTH
;
1318 vtn_assert(*mode
== nir_var_shader_out
);
1320 case SpvBuiltInHelperInvocation
:
1321 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1322 set_mode_system_value(b
, mode
);
1324 case SpvBuiltInNumWorkgroups
:
1325 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1326 set_mode_system_value(b
, mode
);
1328 case SpvBuiltInWorkgroupSize
:
1329 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1330 set_mode_system_value(b
, mode
);
1332 case SpvBuiltInWorkgroupId
:
1333 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1334 set_mode_system_value(b
, mode
);
1336 case SpvBuiltInLocalInvocationId
:
1337 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1338 set_mode_system_value(b
, mode
);
1340 case SpvBuiltInLocalInvocationIndex
:
1341 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1342 set_mode_system_value(b
, mode
);
1344 case SpvBuiltInGlobalInvocationId
:
1345 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1346 set_mode_system_value(b
, mode
);
1348 case SpvBuiltInGlobalLinearId
:
1349 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX
;
1350 set_mode_system_value(b
, mode
);
1352 case SpvBuiltInBaseVertex
:
1353 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1354 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1356 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1357 set_mode_system_value(b
, mode
);
1359 case SpvBuiltInBaseInstance
:
1360 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1361 set_mode_system_value(b
, mode
);
1363 case SpvBuiltInDrawIndex
:
1364 *location
= SYSTEM_VALUE_DRAW_ID
;
1365 set_mode_system_value(b
, mode
);
1367 case SpvBuiltInSubgroupSize
:
1368 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1369 set_mode_system_value(b
, mode
);
1371 case SpvBuiltInSubgroupId
:
1372 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1373 set_mode_system_value(b
, mode
);
1375 case SpvBuiltInSubgroupLocalInvocationId
:
1376 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1377 set_mode_system_value(b
, mode
);
1379 case SpvBuiltInNumSubgroups
:
1380 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1381 set_mode_system_value(b
, mode
);
1383 case SpvBuiltInDeviceIndex
:
1384 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1385 set_mode_system_value(b
, mode
);
1387 case SpvBuiltInViewIndex
:
1388 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1389 set_mode_system_value(b
, mode
);
1391 case SpvBuiltInSubgroupEqMask
:
1392 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1393 set_mode_system_value(b
, mode
);
1395 case SpvBuiltInSubgroupGeMask
:
1396 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1397 set_mode_system_value(b
, mode
);
1399 case SpvBuiltInSubgroupGtMask
:
1400 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1401 set_mode_system_value(b
, mode
);
1403 case SpvBuiltInSubgroupLeMask
:
1404 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1405 set_mode_system_value(b
, mode
);
1407 case SpvBuiltInSubgroupLtMask
:
1408 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1409 set_mode_system_value(b
, mode
);
1411 case SpvBuiltInFragStencilRefEXT
:
1412 *location
= FRAG_RESULT_STENCIL
;
1413 vtn_assert(*mode
== nir_var_shader_out
);
1415 case SpvBuiltInWorkDim
:
1416 *location
= SYSTEM_VALUE_WORK_DIM
;
1417 set_mode_system_value(b
, mode
);
1419 case SpvBuiltInGlobalSize
:
1420 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1421 set_mode_system_value(b
, mode
);
1424 vtn_fail("unsupported builtin: %u", builtin
);
1429 apply_var_decoration(struct vtn_builder
*b
,
1430 struct nir_variable_data
*var_data
,
1431 const struct vtn_decoration
*dec
)
1433 switch (dec
->decoration
) {
1434 case SpvDecorationRelaxedPrecision
:
1435 break; /* FIXME: Do nothing with this for now. */
1436 case SpvDecorationNoPerspective
:
1437 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1439 case SpvDecorationFlat
:
1440 var_data
->interpolation
= INTERP_MODE_FLAT
;
1442 case SpvDecorationCentroid
:
1443 var_data
->centroid
= true;
1445 case SpvDecorationSample
:
1446 var_data
->sample
= true;
1448 case SpvDecorationInvariant
:
1449 var_data
->invariant
= true;
1451 case SpvDecorationConstant
:
1452 var_data
->read_only
= true;
1454 case SpvDecorationNonReadable
:
1455 var_data
->image
.access
|= ACCESS_NON_READABLE
;
1457 case SpvDecorationNonWritable
:
1458 var_data
->read_only
= true;
1459 var_data
->image
.access
|= ACCESS_NON_WRITEABLE
;
1461 case SpvDecorationRestrict
:
1462 var_data
->image
.access
|= ACCESS_RESTRICT
;
1464 case SpvDecorationVolatile
:
1465 var_data
->image
.access
|= ACCESS_VOLATILE
;
1467 case SpvDecorationCoherent
:
1468 var_data
->image
.access
|= ACCESS_COHERENT
;
1470 case SpvDecorationComponent
:
1471 var_data
->location_frac
= dec
->literals
[0];
1473 case SpvDecorationIndex
:
1474 var_data
->index
= dec
->literals
[0];
1476 case SpvDecorationBuiltIn
: {
1477 SpvBuiltIn builtin
= dec
->literals
[0];
1479 nir_variable_mode mode
= var_data
->mode
;
1480 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1481 var_data
->mode
= mode
;
1484 case SpvBuiltInTessLevelOuter
:
1485 case SpvBuiltInTessLevelInner
:
1486 case SpvBuiltInClipDistance
:
1487 case SpvBuiltInCullDistance
:
1488 var_data
->compact
= true;
1495 case SpvDecorationSpecId
:
1496 case SpvDecorationRowMajor
:
1497 case SpvDecorationColMajor
:
1498 case SpvDecorationMatrixStride
:
1499 case SpvDecorationAliased
:
1500 case SpvDecorationUniform
:
1501 case SpvDecorationLinkageAttributes
:
1502 break; /* Do nothing with these here */
1504 case SpvDecorationPatch
:
1505 var_data
->patch
= true;
1508 case SpvDecorationLocation
:
1509 vtn_fail("Handled above");
1511 case SpvDecorationBlock
:
1512 case SpvDecorationBufferBlock
:
1513 case SpvDecorationArrayStride
:
1514 case SpvDecorationGLSLShared
:
1515 case SpvDecorationGLSLPacked
:
1516 break; /* These can apply to a type but we don't care about them */
1518 case SpvDecorationBinding
:
1519 case SpvDecorationDescriptorSet
:
1520 case SpvDecorationNoContraction
:
1521 case SpvDecorationInputAttachmentIndex
:
1522 vtn_warn("Decoration not allowed for variable or structure member: %s",
1523 spirv_decoration_to_string(dec
->decoration
));
1526 case SpvDecorationXfbBuffer
:
1527 var_data
->explicit_xfb_buffer
= true;
1528 var_data
->xfb_buffer
= dec
->literals
[0];
1529 var_data
->always_active_io
= true;
1531 case SpvDecorationXfbStride
:
1532 var_data
->explicit_xfb_stride
= true;
1533 var_data
->xfb_stride
= dec
->literals
[0];
1535 case SpvDecorationOffset
:
1536 var_data
->explicit_offset
= true;
1537 var_data
->offset
= dec
->literals
[0];
1540 case SpvDecorationStream
:
1541 var_data
->stream
= dec
->literals
[0];
1544 case SpvDecorationCPacked
:
1545 case SpvDecorationSaturatedConversion
:
1546 case SpvDecorationFuncParamAttr
:
1547 case SpvDecorationFPRoundingMode
:
1548 case SpvDecorationFPFastMathMode
:
1549 case SpvDecorationAlignment
:
1550 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1551 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1552 spirv_decoration_to_string(dec
->decoration
));
1556 case SpvDecorationHlslSemanticGOOGLE
:
1557 /* HLSL semantic decorations can safely be ignored by the driver. */
1560 case SpvDecorationRestrictPointerEXT
:
1561 case SpvDecorationAliasedPointerEXT
:
1562 /* TODO: We should actually plumb alias information through NIR. */
1566 vtn_fail("Unhandled decoration");
1571 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1572 const struct vtn_decoration
*dec
, void *out_is_patch
)
1574 if (dec
->decoration
== SpvDecorationPatch
) {
1575 *((bool *) out_is_patch
) = true;
1580 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1581 const struct vtn_decoration
*dec
, void *void_var
)
1583 struct vtn_variable
*vtn_var
= void_var
;
1585 /* Handle decorations that apply to a vtn_variable as a whole */
1586 switch (dec
->decoration
) {
1587 case SpvDecorationBinding
:
1588 vtn_var
->binding
= dec
->literals
[0];
1589 vtn_var
->explicit_binding
= true;
1591 case SpvDecorationDescriptorSet
:
1592 vtn_var
->descriptor_set
= dec
->literals
[0];
1594 case SpvDecorationInputAttachmentIndex
:
1595 vtn_var
->input_attachment_index
= dec
->literals
[0];
1597 case SpvDecorationPatch
:
1598 vtn_var
->patch
= true;
1600 case SpvDecorationOffset
:
1601 vtn_var
->offset
= dec
->literals
[0];
1603 case SpvDecorationNonWritable
:
1604 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1606 case SpvDecorationNonReadable
:
1607 vtn_var
->access
|= ACCESS_NON_READABLE
;
1609 case SpvDecorationVolatile
:
1610 vtn_var
->access
|= ACCESS_VOLATILE
;
1612 case SpvDecorationCoherent
:
1613 vtn_var
->access
|= ACCESS_COHERENT
;
1615 case SpvDecorationHlslCounterBufferGOOGLE
:
1616 /* HLSL semantic decorations can safely be ignored by the driver. */
1622 if (val
->value_type
== vtn_value_type_pointer
) {
1623 assert(val
->pointer
->var
== void_var
);
1624 assert(member
== -1);
1626 assert(val
->value_type
== vtn_value_type_type
);
1629 /* Location is odd. If applied to a split structure, we have to walk the
1630 * whole thing and accumulate the location. It's easier to handle as a
1633 if (dec
->decoration
== SpvDecorationLocation
) {
1634 unsigned location
= dec
->literals
[0];
1635 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1636 vtn_var
->mode
== vtn_variable_mode_output
) {
1637 location
+= FRAG_RESULT_DATA0
;
1638 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1639 vtn_var
->mode
== vtn_variable_mode_input
) {
1640 location
+= VERT_ATTRIB_GENERIC0
;
1641 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1642 vtn_var
->mode
== vtn_variable_mode_output
) {
1643 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1644 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1645 vtn_warn("Location must be on input, output, uniform, sampler or "
1650 if (vtn_var
->var
->num_members
== 0) {
1651 /* This handles the member and lone variable cases */
1652 vtn_var
->var
->data
.location
= location
;
1654 /* This handles the structure member case */
1655 assert(vtn_var
->var
->members
);
1658 vtn_var
->base_location
= location
;
1660 vtn_var
->var
->members
[member
].location
= location
;
1666 if (vtn_var
->var
->num_members
== 0) {
1667 /* We call this function on types as well as variables and not all
1668 * struct types get split so we can end up having stray member
1669 * decorations; just ignore them.
1672 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1673 } else if (member
>= 0) {
1674 /* Member decorations must come from a type */
1675 assert(val
->value_type
== vtn_value_type_type
);
1676 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1679 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1680 for (unsigned i
= 0; i
< length
; i
++)
1681 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1684 /* A few variables, those with external storage, have no actual
1685 * nir_variables associated with them. Fortunately, all decorations
1686 * we care about for those variables are on the type only.
1688 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1689 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1690 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1691 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1692 b
->options
->lower_workgroup_access_to_offsets
));
1698 ptr_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1699 const struct vtn_decoration
*dec
, void *void_ptr
)
1701 struct vtn_pointer
*ptr
= void_ptr
;
1703 switch (dec
->decoration
) {
1704 case SpvDecorationNonUniformEXT
:
1705 ptr
->access
|= ACCESS_NON_UNIFORM
;
1713 static enum vtn_variable_mode
1714 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1715 SpvStorageClass
class,
1716 struct vtn_type
*interface_type
,
1717 nir_variable_mode
*nir_mode_out
)
1719 enum vtn_variable_mode mode
;
1720 nir_variable_mode nir_mode
;
1722 case SpvStorageClassUniform
:
1723 if (interface_type
->block
) {
1724 mode
= vtn_variable_mode_ubo
;
1725 nir_mode
= nir_var_mem_ubo
;
1726 } else if (interface_type
->buffer_block
) {
1727 mode
= vtn_variable_mode_ssbo
;
1728 nir_mode
= nir_var_mem_ssbo
;
1730 /* Default-block uniforms, coming from gl_spirv */
1731 mode
= vtn_variable_mode_uniform
;
1732 nir_mode
= nir_var_uniform
;
1735 case SpvStorageClassStorageBuffer
:
1736 mode
= vtn_variable_mode_ssbo
;
1737 nir_mode
= nir_var_mem_ssbo
;
1739 case SpvStorageClassPhysicalStorageBufferEXT
:
1740 mode
= vtn_variable_mode_phys_ssbo
;
1741 nir_mode
= nir_var_mem_global
;
1743 case SpvStorageClassUniformConstant
:
1744 mode
= vtn_variable_mode_uniform
;
1745 nir_mode
= nir_var_uniform
;
1747 case SpvStorageClassPushConstant
:
1748 mode
= vtn_variable_mode_push_constant
;
1749 nir_mode
= nir_var_uniform
;
1751 case SpvStorageClassInput
:
1752 mode
= vtn_variable_mode_input
;
1753 nir_mode
= nir_var_shader_in
;
1755 case SpvStorageClassOutput
:
1756 mode
= vtn_variable_mode_output
;
1757 nir_mode
= nir_var_shader_out
;
1759 case SpvStorageClassPrivate
:
1760 mode
= vtn_variable_mode_private
;
1761 nir_mode
= nir_var_shader_temp
;
1763 case SpvStorageClassFunction
:
1764 mode
= vtn_variable_mode_function
;
1765 nir_mode
= nir_var_function_temp
;
1767 case SpvStorageClassWorkgroup
:
1768 mode
= vtn_variable_mode_workgroup
;
1769 nir_mode
= nir_var_mem_shared
;
1771 case SpvStorageClassAtomicCounter
:
1772 mode
= vtn_variable_mode_uniform
;
1773 nir_mode
= nir_var_uniform
;
1775 case SpvStorageClassCrossWorkgroup
:
1776 mode
= vtn_variable_mode_cross_workgroup
;
1777 nir_mode
= nir_var_mem_global
;
1779 case SpvStorageClassGeneric
:
1781 vtn_fail("Unhandled variable storage class");
1785 *nir_mode_out
= nir_mode
;
1791 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1793 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1794 /* This pointer needs to have a pointer type with actual storage */
1795 vtn_assert(ptr
->ptr_type
);
1796 vtn_assert(ptr
->ptr_type
->type
);
1799 /* If we don't have an offset then we must be a pointer to the variable
1802 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1804 struct vtn_access_chain chain
= {
1807 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1810 vtn_assert(ptr
->offset
);
1811 if (ptr
->block_index
) {
1812 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1813 ptr
->mode
== vtn_variable_mode_ssbo
);
1814 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1816 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1820 if (vtn_pointer_is_external_block(b
, ptr
) &&
1821 vtn_type_contains_block(b
, ptr
->type
) &&
1822 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1823 /* In this case, we're looking for a block index and not an actual
1826 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1827 * at all because we get the pointer directly from the client. This
1828 * assumes that there will never be a SSBO binding variable using the
1829 * PhysicalStorageBufferEXT storage class. This assumption appears
1830 * to be correct according to the Vulkan spec because the table,
1831 * "Shader Resource and Storage Class Correspondence," the only the
1832 * Uniform storage class with BufferBlock or the StorageBuffer
1833 * storage class with Block can be used.
1835 if (!ptr
->block_index
) {
1836 /* If we don't have a block_index then we must be a pointer to the
1839 vtn_assert(!ptr
->deref
);
1841 struct vtn_access_chain chain
= {
1844 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
1847 return ptr
->block_index
;
1849 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1854 struct vtn_pointer
*
1855 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1856 struct vtn_type
*ptr_type
)
1858 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1860 struct vtn_type
*interface_type
= ptr_type
->deref
;
1861 while (interface_type
->base_type
== vtn_base_type_array
)
1862 interface_type
= interface_type
->array_element
;
1864 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1865 nir_variable_mode nir_mode
;
1866 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1867 interface_type
, &nir_mode
);
1868 ptr
->type
= ptr_type
->deref
;
1869 ptr
->ptr_type
= ptr_type
;
1871 if (b
->wa_glslang_179
) {
1872 /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
1873 * need to whack the mode because it creates a function parameter with
1874 * the Function storage class even though it's a pointer to a sampler.
1875 * If we don't do this, then NIR won't get rid of the deref_cast for us.
1877 if (ptr
->mode
== vtn_variable_mode_function
&&
1878 (ptr
->type
->base_type
== vtn_base_type_sampler
||
1879 ptr
->type
->base_type
== vtn_base_type_sampled_image
)) {
1880 ptr
->mode
= vtn_variable_mode_uniform
;
1881 nir_mode
= nir_var_uniform
;
1885 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1886 /* This pointer type needs to have actual storage */
1887 vtn_assert(ptr_type
->type
);
1888 if (ptr
->mode
== vtn_variable_mode_ubo
||
1889 ptr
->mode
== vtn_variable_mode_ssbo
) {
1890 vtn_assert(ssa
->num_components
== 2);
1891 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1892 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1894 vtn_assert(ssa
->num_components
== 1);
1895 ptr
->block_index
= NULL
;
1899 const struct glsl_type
*deref_type
= ptr_type
->deref
->type
;
1900 if (!vtn_pointer_is_external_block(b
, ptr
)) {
1901 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1903 } else if (vtn_type_contains_block(b
, ptr
->type
) &&
1904 ptr
->mode
!= vtn_variable_mode_phys_ssbo
) {
1905 /* This is a pointer to somewhere in an array of blocks, not a
1906 * pointer to somewhere inside the block. Set the block index
1907 * instead of making a cast.
1909 ptr
->block_index
= ssa
;
1911 /* This is a pointer to something internal or a pointer inside a
1912 * block. It's just a regular cast.
1914 * For PhysicalStorageBufferEXT pointers, we don't have a block index
1915 * at all because we get the pointer directly from the client. This
1916 * assumes that there will never be a SSBO binding variable using the
1917 * PhysicalStorageBufferEXT storage class. This assumption appears
1918 * to be correct according to the Vulkan spec because the table,
1919 * "Shader Resource and Storage Class Correspondence," the only the
1920 * Uniform storage class with BufferBlock or the StorageBuffer
1921 * storage class with Block can be used.
1923 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1924 ptr_type
->deref
->type
,
1926 ptr
->deref
->dest
.ssa
.num_components
=
1927 glsl_get_vector_elements(ptr_type
->type
);
1928 ptr
->deref
->dest
.ssa
.bit_size
= glsl_get_bit_size(ptr_type
->type
);
1936 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
1938 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
1941 if (var
->mode
== vtn_variable_mode_input
) {
1942 return stage
== MESA_SHADER_TESS_CTRL
||
1943 stage
== MESA_SHADER_TESS_EVAL
||
1944 stage
== MESA_SHADER_GEOMETRY
;
1947 if (var
->mode
== vtn_variable_mode_output
)
1948 return stage
== MESA_SHADER_TESS_CTRL
;
1954 assign_missing_member_locations(struct vtn_variable
*var
)
1957 glsl_get_length(glsl_without_array(var
->type
->type
));
1958 int location
= var
->base_location
;
1960 for (unsigned i
= 0; i
< length
; i
++) {
1961 /* From the Vulkan spec:
1963 * “If the structure type is a Block but without a Location, then each
1964 * of its members must have a Location decoration.”
1967 if (var
->type
->block
) {
1968 assert(var
->base_location
!= -1 ||
1969 var
->var
->members
[i
].location
!= -1);
1972 /* From the Vulkan spec:
1974 * “Any member with its own Location decoration is assigned that
1975 * location. Each remaining member is assigned the location after the
1976 * immediately preceding member in declaration order.”
1978 if (var
->var
->members
[i
].location
!= -1)
1979 location
= var
->var
->members
[i
].location
;
1981 var
->var
->members
[i
].location
= location
;
1983 /* Below we use type instead of interface_type, because interface_type
1984 * is only available when it is a Block. This code also supports
1985 * input/outputs that are just structs
1987 const struct glsl_type
*member_type
=
1988 glsl_get_struct_field(glsl_without_array(var
->type
->type
), i
);
1991 glsl_count_attribute_slots(member_type
,
1992 false /* is_gl_vertex_input */);
1998 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
1999 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
2000 nir_constant
*initializer
)
2002 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
2003 struct vtn_type
*type
= ptr_type
->deref
;
2005 struct vtn_type
*without_array
= type
;
2006 while(glsl_type_is_array(without_array
->type
))
2007 without_array
= without_array
->array_element
;
2009 enum vtn_variable_mode mode
;
2010 nir_variable_mode nir_mode
;
2011 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
2014 case vtn_variable_mode_ubo
:
2015 /* There's no other way to get vtn_variable_mode_ubo */
2016 vtn_assert(without_array
->block
);
2017 b
->shader
->info
.num_ubos
++;
2019 case vtn_variable_mode_ssbo
:
2020 if (storage_class
== SpvStorageClassStorageBuffer
&&
2021 !without_array
->block
) {
2022 if (b
->variable_pointers
) {
2023 vtn_fail("Variables in the StorageBuffer storage class must "
2024 "have a struct type with the Block decoration");
2026 /* If variable pointers are not present, it's still malformed
2027 * SPIR-V but we can parse it and do the right thing anyway.
2028 * Since some of the 8-bit storage tests have bugs in this are,
2029 * just make it a warning for now.
2031 vtn_warn("Variables in the StorageBuffer storage class must "
2032 "have a struct type with the Block decoration");
2035 b
->shader
->info
.num_ssbos
++;
2037 case vtn_variable_mode_uniform
:
2038 if (glsl_type_is_image(without_array
->type
))
2039 b
->shader
->info
.num_images
++;
2040 else if (glsl_type_is_sampler(without_array
->type
))
2041 b
->shader
->info
.num_textures
++;
2043 case vtn_variable_mode_push_constant
:
2044 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
2047 case vtn_variable_mode_phys_ssbo
:
2048 vtn_fail("Cannot create a variable with the "
2049 "PhysicalStorageBufferEXT storage class");
2053 /* No tallying is needed */
2057 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
2060 var
->base_location
= -1;
2062 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
2063 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
2065 switch (var
->mode
) {
2066 case vtn_variable_mode_function
:
2067 case vtn_variable_mode_private
:
2068 case vtn_variable_mode_uniform
:
2069 /* For these, we create the variable normally */
2070 var
->var
= rzalloc(b
->shader
, nir_variable
);
2071 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2073 if (storage_class
== SpvStorageClassAtomicCounter
) {
2074 /* Need to tweak the nir type here as at vtn_handle_type we don't
2075 * have the access to storage_class, that is the one that points us
2076 * that is an atomic uint.
2078 var
->var
->type
= repair_atomic_type(var
->type
->type
);
2080 /* Private variables don't have any explicit layout but some layouts
2081 * may have leaked through due to type deduplication in the SPIR-V.
2083 var
->var
->type
= var
->type
->type
;
2085 var
->var
->data
.mode
= nir_mode
;
2086 var
->var
->data
.location
= -1;
2087 var
->var
->interface_type
= NULL
;
2090 case vtn_variable_mode_workgroup
:
2091 if (b
->options
->lower_workgroup_access_to_offsets
) {
2092 var
->shared_location
= -1;
2094 /* Create the variable normally */
2095 var
->var
= rzalloc(b
->shader
, nir_variable
);
2096 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2097 /* Workgroup variables don't have any explicit layout but some
2098 * layouts may have leaked through due to type deduplication in the
2101 var
->var
->type
= var
->type
->type
;
2102 var
->var
->data
.mode
= nir_var_mem_shared
;
2106 case vtn_variable_mode_input
:
2107 case vtn_variable_mode_output
: {
2108 /* In order to know whether or not we're a per-vertex inout, we need
2109 * the patch qualifier. This means walking the variable decorations
2110 * early before we actually create any variables. Not a big deal.
2112 * GLSLang really likes to place decorations in the most interior
2113 * thing it possibly can. In particular, if you have a struct, it
2114 * will place the patch decorations on the struct members. This
2115 * should be handled by the variable splitting below just fine.
2117 * If you have an array-of-struct, things get even more weird as it
2118 * will place the patch decorations on the struct even though it's
2119 * inside an array and some of the members being patch and others not
2120 * makes no sense whatsoever. Since the only sensible thing is for
2121 * it to be all or nothing, we'll call it patch if any of the members
2122 * are declared patch.
2125 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
2126 if (glsl_type_is_array(var
->type
->type
) &&
2127 glsl_type_is_struct_or_ifc(without_array
->type
)) {
2128 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
2129 vtn_value_type_type
),
2130 var_is_patch_cb
, &var
->patch
);
2133 /* For inputs and outputs, we immediately split structures. This
2134 * is for a couple of reasons. For one, builtins may all come in
2135 * a struct and we really want those split out into separate
2136 * variables. For another, interpolation qualifiers can be
2137 * applied to members of the top-level struct ane we need to be
2138 * able to preserve that information.
2141 struct vtn_type
*per_vertex_type
= var
->type
;
2142 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
2143 /* In Geometry shaders (and some tessellation), inputs come
2144 * in per-vertex arrays. However, some builtins come in
2145 * non-per-vertex, hence the need for the is_array check. In
2146 * any case, there are no non-builtin arrays allowed so this
2147 * check should be sufficient.
2149 per_vertex_type
= var
->type
->array_element
;
2152 var
->var
= rzalloc(b
->shader
, nir_variable
);
2153 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
2154 /* In Vulkan, shader I/O variables don't have any explicit layout but
2155 * some layouts may have leaked through due to type deduplication in
2156 * the SPIR-V. We do, however, keep the layouts in the variable's
2157 * interface_type because we need offsets for XFB arrays of blocks.
2159 var
->var
->type
= var
->type
->type
;
2160 var
->var
->data
.mode
= nir_mode
;
2161 var
->var
->data
.patch
= var
->patch
;
2163 /* Figure out the interface block type. */
2164 struct vtn_type
*iface_type
= per_vertex_type
;
2165 if (var
->mode
== vtn_variable_mode_output
&&
2166 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
2167 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
2168 b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)) {
2169 /* For vertex data outputs, we can end up with arrays of blocks for
2170 * transform feedback where each array element corresponds to a
2171 * different XFB output buffer.
2173 while (iface_type
->base_type
== vtn_base_type_array
)
2174 iface_type
= iface_type
->array_element
;
2176 if (iface_type
->base_type
== vtn_base_type_struct
&& iface_type
->block
)
2177 var
->var
->interface_type
= iface_type
->type
;
2179 if (per_vertex_type
->base_type
== vtn_base_type_struct
&&
2180 per_vertex_type
->block
) {
2181 /* It's a struct. Set it up as per-member. */
2182 var
->var
->num_members
= glsl_get_length(per_vertex_type
->type
);
2183 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
2184 var
->var
->num_members
);
2186 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
2187 var
->var
->members
[i
].mode
= nir_mode
;
2188 var
->var
->members
[i
].patch
= var
->patch
;
2189 var
->var
->members
[i
].location
= -1;
2193 /* For inputs and outputs, we need to grab locations and builtin
2194 * information from the per-vertex type.
2196 vtn_foreach_decoration(b
, vtn_value(b
, per_vertex_type
->id
,
2197 vtn_value_type_type
),
2198 var_decoration_cb
, var
);
2202 case vtn_variable_mode_ubo
:
2203 case vtn_variable_mode_ssbo
:
2204 case vtn_variable_mode_push_constant
:
2205 case vtn_variable_mode_cross_workgroup
:
2206 /* These don't need actual variables. */
2209 case vtn_variable_mode_phys_ssbo
:
2210 unreachable("Should have been caught before");
2214 var
->var
->constant_initializer
=
2215 nir_constant_clone(initializer
, var
->var
);
2218 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
2219 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2221 if ((var
->mode
== vtn_variable_mode_input
||
2222 var
->mode
== vtn_variable_mode_output
) &&
2223 var
->var
->members
) {
2224 assign_missing_member_locations(var
);
2227 if (var
->mode
== vtn_variable_mode_uniform
) {
2228 /* XXX: We still need the binding information in the nir_variable
2229 * for these. We should fix that.
2231 var
->var
->data
.binding
= var
->binding
;
2232 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
2233 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
2234 var
->var
->data
.index
= var
->input_attachment_index
;
2235 var
->var
->data
.offset
= var
->offset
;
2237 if (glsl_type_is_image(without_array
->type
))
2238 var
->var
->data
.image
.format
= without_array
->image_format
;
2241 if (var
->mode
== vtn_variable_mode_function
) {
2242 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
2243 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
2244 } else if (var
->var
) {
2245 nir_shader_add_variable(b
->shader
, var
->var
);
2247 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
2252 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
2253 struct vtn_type
*dst_type
,
2254 struct vtn_type
*src_type
)
2256 if (dst_type
->id
== src_type
->id
)
2259 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
2260 /* Early versions of GLSLang would re-emit types unnecessarily and you
2261 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
2262 * mismatched source and destination types.
2264 * https://github.com/KhronosGroup/glslang/issues/304
2265 * https://github.com/KhronosGroup/glslang/issues/307
2266 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
2267 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2269 vtn_warn("Source and destination types of %s do not have the same "
2270 "ID (but are compatible): %u vs %u",
2271 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
2275 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
2276 spirv_op_to_string(opcode
),
2277 glsl_get_type_name(dst_type
->type
),
2278 glsl_get_type_name(src_type
->type
));
2281 static nir_ssa_def
*
2282 nir_shrink_zero_pad_vec(nir_builder
*b
, nir_ssa_def
*val
,
2283 unsigned num_components
)
2285 if (val
->num_components
== num_components
)
2288 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
2289 for (unsigned i
= 0; i
< num_components
; i
++) {
2290 if (i
< val
->num_components
)
2291 comps
[i
] = nir_channel(b
, val
, i
);
2293 comps
[i
] = nir_imm_intN_t(b
, 0, val
->bit_size
);
2295 return nir_vec(b
, comps
, num_components
);
2298 static nir_ssa_def
*
2299 nir_sloppy_bitcast(nir_builder
*b
, nir_ssa_def
*val
,
2300 const struct glsl_type
*type
)
2302 const unsigned num_components
= glsl_get_vector_elements(type
);
2303 const unsigned bit_size
= glsl_get_bit_size(type
);
2305 /* First, zero-pad to ensure that the value is big enough that when we
2306 * bit-cast it, we don't loose anything.
2308 if (val
->bit_size
< bit_size
) {
2309 const unsigned src_num_components_needed
=
2310 vtn_align_u32(val
->num_components
, bit_size
/ val
->bit_size
);
2311 val
= nir_shrink_zero_pad_vec(b
, val
, src_num_components_needed
);
2314 val
= nir_bitcast_vector(b
, val
, bit_size
);
2316 return nir_shrink_zero_pad_vec(b
, val
, num_components
);
2320 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
2321 const uint32_t *w
, unsigned count
)
2325 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2326 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2330 case SpvOpVariable
: {
2331 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2333 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2335 SpvStorageClass storage_class
= w
[3];
2336 nir_constant
*initializer
= NULL
;
2338 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
2340 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
2344 case SpvOpAccessChain
:
2345 case SpvOpPtrAccessChain
:
2346 case SpvOpInBoundsAccessChain
:
2347 case SpvOpInBoundsPtrAccessChain
: {
2348 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2349 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
|| opcode
== SpvOpInBoundsPtrAccessChain
);
2352 for (int i
= 4; i
< count
; i
++) {
2353 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2354 if (link_val
->value_type
== vtn_value_type_constant
) {
2355 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2356 switch (glsl_get_bit_size(link_val
->type
->type
)) {
2358 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i8
[0];
2361 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i16
[0];
2364 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i32
[0];
2367 chain
->link
[idx
].id
= link_val
->constant
->values
[0].i64
[0];
2370 vtn_fail("Invalid bit size");
2373 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2374 chain
->link
[idx
].id
= w
[i
];
2380 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2381 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2382 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2383 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2384 * to combine an array of images with a single sampler to get an
2385 * array of sampled images that all share the same sampler.
2386 * Fortunately, this means that we can more-or-less ignore the
2387 * sampler when crawling the access chain, but it does leave us
2388 * with this rather awkward little special-case.
2390 struct vtn_value
*val
=
2391 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2392 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2393 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
2394 val
->sampled_image
->image
=
2395 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2396 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2397 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
,
2398 val
->sampled_image
->image
);
2399 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
,
2400 val
->sampled_image
->sampler
);
2402 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2403 struct vtn_value
*val
=
2404 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2405 val
->pointer
= vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2406 val
->pointer
->ptr_type
= ptr_type
;
2407 vtn_foreach_decoration(b
, val
, ptr_decoration_cb
, val
->pointer
);
2412 case SpvOpCopyMemory
: {
2413 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2414 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2416 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2418 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2423 struct vtn_type
*res_type
=
2424 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2425 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2426 struct vtn_pointer
*src
= src_val
->pointer
;
2428 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2430 if (glsl_type_is_image(res_type
->type
) ||
2431 glsl_type_is_sampler(res_type
->type
)) {
2432 vtn_push_value(b
, w
[2], vtn_value_type_pointer
)->pointer
= src
;
2436 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2441 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2442 struct vtn_pointer
*dest
= dest_val
->pointer
;
2443 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2445 /* OpStore requires us to actually have a storage type */
2446 vtn_fail_if(dest
->type
->type
== NULL
,
2447 "Invalid destination type for OpStore");
2449 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2450 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2451 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2452 * would then store them to a local variable as bool. Work around
2453 * the issue by doing an implicit conversion.
2455 * https://github.com/KhronosGroup/glslang/issues/170
2456 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2458 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2459 "OpTypeBool. Doing an implicit conversion to work around "
2461 struct vtn_ssa_value
*bool_ssa
=
2462 vtn_create_ssa_value(b
, dest
->type
->type
);
2463 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2464 vtn_variable_store(b
, bool_ssa
, dest
);
2468 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2470 if (glsl_type_is_sampler(dest
->type
->type
)) {
2471 if (b
->wa_glslang_179
) {
2472 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2473 "propagation to workaround the problem.");
2474 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2475 dest
->var
->copy_prop_sampler
=
2476 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2478 vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
2483 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2484 vtn_variable_store(b
, src
, dest
);
2488 case SpvOpArrayLength
: {
2489 struct vtn_pointer
*ptr
=
2490 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2491 const uint32_t field
= w
[4];
2493 vtn_fail_if(ptr
->type
->base_type
!= vtn_base_type_struct
,
2494 "OpArrayLength must take a pointer to a structure type");
2495 vtn_fail_if(field
!= ptr
->type
->length
- 1 ||
2496 ptr
->type
->members
[field
]->base_type
!= vtn_base_type_array
,
2497 "OpArrayLength must reference the last memeber of the "
2498 "structure and that must be an array");
2500 const uint32_t offset
= ptr
->type
->offsets
[field
];
2501 const uint32_t stride
= ptr
->type
->members
[field
]->stride
;
2503 if (!ptr
->block_index
) {
2504 struct vtn_access_chain chain
= {
2507 ptr
= vtn_pointer_dereference(b
, ptr
, &chain
);
2508 vtn_assert(ptr
->block_index
);
2511 nir_intrinsic_instr
*instr
=
2512 nir_intrinsic_instr_create(b
->nb
.shader
,
2513 nir_intrinsic_get_buffer_size
);
2514 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2515 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2516 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2517 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2519 /* array_length = max(buffer_size - offset, 0) / stride */
2520 nir_ssa_def
*array_length
=
2525 nir_imm_int(&b
->nb
, offset
)),
2526 nir_imm_int(&b
->nb
, 0u)),
2527 nir_imm_int(&b
->nb
, stride
));
2529 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2530 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2531 val
->ssa
->def
= array_length
;
2535 case SpvOpConvertPtrToU
: {
2536 struct vtn_value
*u_val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2538 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2539 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2540 "OpConvertPtrToU can only be used to cast to a vector or "
2543 /* The pointer will be converted to an SSA value automatically */
2544 nir_ssa_def
*ptr_ssa
= vtn_ssa_value(b
, w
[3])->def
;
2546 u_val
->ssa
= vtn_create_ssa_value(b
, u_val
->type
->type
);
2547 u_val
->ssa
->def
= nir_sloppy_bitcast(&b
->nb
, ptr_ssa
, u_val
->type
->type
);
2551 case SpvOpConvertUToPtr
: {
2552 struct vtn_value
*ptr_val
=
2553 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2554 struct vtn_value
*u_val
= vtn_value(b
, w
[3], vtn_value_type_ssa
);
2556 vtn_fail_if(ptr_val
->type
->type
== NULL
,
2557 "OpConvertUToPtr can only be used on physical pointers");
2559 vtn_fail_if(u_val
->type
->base_type
!= vtn_base_type_vector
&&
2560 u_val
->type
->base_type
!= vtn_base_type_scalar
,
2561 "OpConvertUToPtr can only be used to cast from a vector or "
2564 nir_ssa_def
*ptr_ssa
= nir_sloppy_bitcast(&b
->nb
, u_val
->ssa
->def
,
2565 ptr_val
->type
->type
);
2566 ptr_val
->pointer
= vtn_pointer_from_ssa(b
, ptr_ssa
, ptr_val
->type
);
2570 case SpvOpCopyMemorySized
:
2572 vtn_fail("Unhandled opcode");