2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
32 static struct vtn_access_chain
*
33 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
35 struct vtn_access_chain
*chain
;
37 /* Subtract 1 from the length since there's already one built in */
38 size_t size
= sizeof(*chain
) +
39 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
40 chain
= rzalloc_size(b
, size
);
41 chain
->length
= length
;
46 static struct vtn_access_chain
*
47 vtn_access_chain_extend(struct vtn_builder
*b
, struct vtn_access_chain
*old
,
50 struct vtn_access_chain
*chain
;
52 unsigned old_len
= old
? old
->length
: 0;
53 chain
= vtn_access_chain_create(b
, old_len
+ new_ids
);
55 for (unsigned i
= 0; i
< old_len
; i
++)
56 chain
->link
[i
] = old
->link
[i
];
62 vtn_pointer_uses_ssa_offset(struct vtn_builder
*b
,
63 struct vtn_pointer
*ptr
)
65 return ptr
->mode
== vtn_variable_mode_ubo
||
66 ptr
->mode
== vtn_variable_mode_ssbo
||
67 ptr
->mode
== vtn_variable_mode_push_constant
||
68 (ptr
->mode
== vtn_variable_mode_workgroup
&&
69 b
->options
->lower_workgroup_access_to_offsets
);
73 vtn_pointer_is_external_block(struct vtn_builder
*b
,
74 struct vtn_pointer
*ptr
)
76 return ptr
->mode
== vtn_variable_mode_ssbo
||
77 ptr
->mode
== vtn_variable_mode_ubo
||
78 ptr
->mode
== vtn_variable_mode_push_constant
||
79 (ptr
->mode
== vtn_variable_mode_workgroup
&&
80 b
->options
->lower_workgroup_access_to_offsets
);
83 /* Dereference the given base pointer by the access chain */
84 static struct vtn_pointer
*
85 vtn_access_chain_pointer_dereference(struct vtn_builder
*b
,
86 struct vtn_pointer
*base
,
87 struct vtn_access_chain
*deref_chain
)
89 struct vtn_access_chain
*chain
=
90 vtn_access_chain_extend(b
, base
->chain
, deref_chain
->length
);
91 struct vtn_type
*type
= base
->type
;
93 /* OpPtrAccessChain is only allowed on things which support variable
94 * pointers. For everything else, the client is expected to just pass us
95 * the right access chain.
97 vtn_assert(!deref_chain
->ptr_as_array
);
99 unsigned start
= base
->chain
? base
->chain
->length
: 0;
100 for (unsigned i
= 0; i
< deref_chain
->length
; i
++) {
101 chain
->link
[start
+ i
] = deref_chain
->link
[i
];
103 if (glsl_type_is_struct(type
->type
)) {
104 vtn_assert(deref_chain
->link
[i
].mode
== vtn_access_mode_literal
);
105 type
= type
->members
[deref_chain
->link
[i
].id
];
107 type
= type
->array_element
;
111 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
112 ptr
->mode
= base
->mode
;
114 ptr
->var
= base
->var
;
121 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
124 vtn_assert(stride
> 0);
125 if (link
.mode
== vtn_access_mode_literal
) {
126 return nir_imm_int(&b
->nb
, link
.id
* stride
);
127 } else if (stride
== 1) {
128 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
129 if (ssa
->bit_size
!= 32)
130 ssa
= nir_u2u32(&b
->nb
, ssa
);
133 nir_ssa_def
*src0
= vtn_ssa_value(b
, link
.id
)->def
;
134 if (src0
->bit_size
!= 32)
135 src0
= nir_u2u32(&b
->nb
, src0
);
136 return nir_imul(&b
->nb
, src0
, nir_imm_int(&b
->nb
, stride
));
141 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
142 nir_ssa_def
*desc_array_index
)
144 if (!desc_array_index
) {
145 vtn_assert(glsl_type_is_struct(var
->type
->type
));
146 desc_array_index
= nir_imm_int(&b
->nb
, 0);
149 nir_intrinsic_instr
*instr
=
150 nir_intrinsic_instr_create(b
->nb
.shader
,
151 nir_intrinsic_vulkan_resource_index
);
152 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
153 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
154 nir_intrinsic_set_binding(instr
, var
->binding
);
156 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
157 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
159 return &instr
->dest
.ssa
;
163 vtn_resource_reindex(struct vtn_builder
*b
, nir_ssa_def
*base_index
,
164 nir_ssa_def
*offset_index
)
166 nir_intrinsic_instr
*instr
=
167 nir_intrinsic_instr_create(b
->nb
.shader
,
168 nir_intrinsic_vulkan_resource_reindex
);
169 instr
->src
[0] = nir_src_for_ssa(base_index
);
170 instr
->src
[1] = nir_src_for_ssa(offset_index
);
172 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
173 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
175 return &instr
->dest
.ssa
;
178 static struct vtn_pointer
*
179 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
180 struct vtn_pointer
*base
,
181 struct vtn_access_chain
*deref_chain
)
183 nir_ssa_def
*block_index
= base
->block_index
;
184 nir_ssa_def
*offset
= base
->offset
;
185 struct vtn_type
*type
= base
->type
;
188 if (base
->mode
== vtn_variable_mode_ubo
||
189 base
->mode
== vtn_variable_mode_ssbo
) {
191 vtn_assert(base
->var
&& base
->type
);
192 nir_ssa_def
*desc_arr_idx
;
193 if (glsl_type_is_array(type
->type
)) {
194 if (deref_chain
->length
>= 1) {
196 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
198 /* This consumes a level of type */
199 type
= type
->array_element
;
201 /* This is annoying. We've been asked for a pointer to the
202 * array of UBOs/SSBOs and not a specifc buffer. Return a
203 * pointer with a descriptor index of 0 and we'll have to do
204 * a reindex later to adjust it to the right thing.
206 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
208 } else if (deref_chain
->ptr_as_array
) {
209 /* You can't have a zero-length OpPtrAccessChain */
210 vtn_assert(deref_chain
->length
>= 1);
211 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
213 /* We have a regular non-array SSBO. */
216 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
217 } else if (deref_chain
->ptr_as_array
&&
218 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
219 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
220 * decorated block. This is an interesting corner in the SPIR-V
221 * spec. One interpretation would be that they client is clearly
222 * trying to treat that block as if it's an implicit array of blocks
223 * repeated in the buffer. However, the SPIR-V spec for the
224 * OpPtrAccessChain says:
226 * "Base is treated as the address of the first element of an
227 * array, and the Element element’s address is computed to be the
228 * base for the Indexes, as per OpAccessChain."
230 * Taken literally, that would mean that your struct type is supposed
231 * to be treated as an array of such a struct and, since it's
232 * decorated block, that means an array of blocks which corresponds
233 * to an array descriptor. Therefore, we need to do a reindex
234 * operation to add the index from the first link in the access chain
235 * to the index we recieved.
237 * The downside to this interpretation (there always is one) is that
238 * this might be somewhat surprising behavior to apps if they expect
239 * the implicit array behavior described above.
241 vtn_assert(deref_chain
->length
>= 1);
242 nir_ssa_def
*offset_index
=
243 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
246 block_index
= vtn_resource_reindex(b
, block_index
, offset_index
);
251 if (base
->mode
== vtn_variable_mode_workgroup
) {
252 /* SLM doesn't need nor have a block index */
253 vtn_assert(!block_index
);
255 /* We need the variable for the base offset */
256 vtn_assert(base
->var
);
258 /* We need ptr_type for size and alignment */
259 vtn_assert(base
->ptr_type
);
261 /* Assign location on first use so that we don't end up bloating SLM
262 * address space for variables which are never statically used.
264 if (base
->var
->shared_location
< 0) {
265 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
266 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
267 base
->ptr_type
->align
);
268 base
->var
->shared_location
= b
->shader
->num_shared
;
269 b
->shader
->num_shared
+= base
->ptr_type
->length
;
272 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
273 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
274 /* Push constants neither need nor have a block index */
275 vtn_assert(!block_index
);
277 /* Start off with at the start of the push constant block. */
278 offset
= nir_imm_int(&b
->nb
, 0);
280 /* The code above should have ensured a block_index when needed. */
281 vtn_assert(block_index
);
283 /* Start off with at the start of the buffer. */
284 offset
= nir_imm_int(&b
->nb
, 0);
288 if (deref_chain
->ptr_as_array
&& idx
== 0) {
289 /* We need ptr_type for the stride */
290 vtn_assert(base
->ptr_type
);
292 /* We need at least one element in the chain */
293 vtn_assert(deref_chain
->length
>= 1);
295 nir_ssa_def
*elem_offset
=
296 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
297 base
->ptr_type
->stride
);
298 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
302 for (; idx
< deref_chain
->length
; idx
++) {
303 switch (glsl_get_base_type(type
->type
)) {
306 case GLSL_TYPE_UINT16
:
307 case GLSL_TYPE_INT16
:
308 case GLSL_TYPE_UINT8
:
310 case GLSL_TYPE_UINT64
:
311 case GLSL_TYPE_INT64
:
312 case GLSL_TYPE_FLOAT
:
313 case GLSL_TYPE_FLOAT16
:
314 case GLSL_TYPE_DOUBLE
:
316 case GLSL_TYPE_ARRAY
: {
317 nir_ssa_def
*elem_offset
=
318 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], type
->stride
);
319 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
320 type
= type
->array_element
;
324 case GLSL_TYPE_STRUCT
: {
325 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
326 unsigned member
= deref_chain
->link
[idx
].id
;
327 nir_ssa_def
*mem_offset
= nir_imm_int(&b
->nb
, type
->offsets
[member
]);
328 offset
= nir_iadd(&b
->nb
, offset
, mem_offset
);
329 type
= type
->members
[member
];
334 vtn_fail("Invalid type for deref");
338 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
339 ptr
->mode
= base
->mode
;
341 ptr
->block_index
= block_index
;
342 ptr
->offset
= offset
;
347 /* Dereference the given base pointer by the access chain */
348 static struct vtn_pointer
*
349 vtn_pointer_dereference(struct vtn_builder
*b
,
350 struct vtn_pointer
*base
,
351 struct vtn_access_chain
*deref_chain
)
353 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
354 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
356 return vtn_access_chain_pointer_dereference(b
, base
, deref_chain
);
361 vtn_pointer_for_variable(struct vtn_builder
*b
,
362 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
364 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
366 pointer
->mode
= var
->mode
;
367 pointer
->type
= var
->type
;
368 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
369 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
370 pointer
->ptr_type
= ptr_type
;
377 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
379 /* Do on-the-fly copy propagation for samplers. */
380 if (ptr
->var
->copy_prop_sampler
)
381 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
383 nir_deref_instr
*deref_var
=
384 nir_deref_instr_create(b
->nb
.shader
, nir_deref_type_var
);
385 nir_ssa_dest_init(&deref_var
->instr
, &deref_var
->dest
, 1, 32, NULL
);
386 nir_builder_instr_insert(&b
->nb
, &deref_var
->instr
);
388 assert(ptr
->var
->var
);
389 deref_var
->mode
= ptr
->var
->var
->data
.mode
;
390 deref_var
->type
= ptr
->var
->var
->type
;
391 deref_var
->var
= ptr
->var
->var
;
392 /* Raw variable access */
396 struct vtn_access_chain
*chain
= ptr
->chain
;
399 struct vtn_type
*deref_type
= ptr
->var
->type
;
400 nir_deref_instr
*tail
= deref_var
;
402 for (unsigned i
= 0; i
< chain
->length
; i
++) {
403 enum glsl_base_type base_type
= glsl_get_base_type(deref_type
->type
);
407 case GLSL_TYPE_UINT16
:
408 case GLSL_TYPE_INT16
:
409 case GLSL_TYPE_UINT8
:
411 case GLSL_TYPE_UINT64
:
412 case GLSL_TYPE_INT64
:
413 case GLSL_TYPE_FLOAT
:
414 case GLSL_TYPE_FLOAT16
:
415 case GLSL_TYPE_DOUBLE
:
417 case GLSL_TYPE_ARRAY
: {
418 deref_type
= deref_type
->array_element
;
421 if (chain
->link
[i
].mode
== vtn_access_mode_literal
) {
422 index
= nir_imm_int(&b
->nb
, chain
->link
[i
].id
);
424 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_id
);
425 index
= vtn_ssa_value(b
, chain
->link
[i
].id
)->def
;
427 tail
= nir_build_deref_array(&b
->nb
, tail
, index
);
431 case GLSL_TYPE_STRUCT
: {
432 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_literal
);
433 unsigned idx
= chain
->link
[i
].id
;
434 deref_type
= deref_type
->members
[idx
];
435 tail
= nir_build_deref_struct(&b
->nb
, tail
, idx
);
439 vtn_fail("Invalid type for deref");
447 vtn_pointer_to_deref_var(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
449 return nir_deref_instr_to_deref(vtn_pointer_to_deref(b
, ptr
), b
);
453 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
454 struct vtn_ssa_value
*inout
)
456 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
458 inout
->def
= nir_load_deref(&b
->nb
, deref
);
460 nir_store_deref(&b
->nb
, deref
, inout
->def
, ~0);
462 } else if (glsl_type_is_array(deref
->type
) ||
463 glsl_type_is_matrix(deref
->type
)) {
464 unsigned elems
= glsl_get_length(deref
->type
);
465 for (unsigned i
= 0; i
< elems
; i
++) {
466 nir_deref_instr
*child
=
467 nir_build_deref_array(&b
->nb
, deref
, nir_imm_int(&b
->nb
, i
));
468 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
471 vtn_assert(glsl_type_is_struct(deref
->type
));
472 unsigned elems
= glsl_get_length(deref
->type
);
473 for (unsigned i
= 0; i
< elems
; i
++) {
474 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
475 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
481 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
483 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
484 return vtn_pointer_to_deref(b
, ptr
);
488 * Gets the NIR-level deref tail, which may have as a child an array deref
489 * selecting which component due to OpAccessChain supporting per-component
490 * indexing in SPIR-V.
492 static nir_deref_instr
*
493 get_deref_tail(nir_deref_instr
*deref
)
495 if (deref
->deref_type
!= nir_deref_type_array
)
498 nir_deref_instr
*parent
=
499 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
501 if (glsl_type_is_vector(parent
->type
))
507 struct vtn_ssa_value
*
508 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
)
510 nir_deref_instr
*src_tail
= get_deref_tail(src
);
511 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
512 _vtn_local_load_store(b
, true, src_tail
, val
);
514 if (src_tail
!= src
) {
515 val
->type
= src
->type
;
516 nir_const_value
*const_index
= nir_src_as_const_value(src
->arr
.index
);
518 val
->def
= vtn_vector_extract(b
, val
->def
, const_index
->u32
[0]);
520 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
, src
->arr
.index
.ssa
);
527 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
528 nir_deref_instr
*dest
)
530 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
532 if (dest_tail
!= dest
) {
533 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
534 _vtn_local_load_store(b
, true, dest_tail
, val
);
536 nir_const_value
*const_index
= nir_src_as_const_value(dest
->arr
.index
);
538 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
539 const_index
->u32
[0]);
541 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
542 dest
->arr
.index
.ssa
);
543 _vtn_local_load_store(b
, false, dest_tail
, val
);
545 _vtn_local_load_store(b
, false, dest_tail
, src
);
550 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
551 nir_ssa_def
**index_out
)
553 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
555 struct vtn_access_chain chain
= {
558 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
560 *index_out
= ptr
->block_index
;
564 /* Tries to compute the size of an interface block based on the strides and
565 * offsets that are provided to us in the SPIR-V source.
568 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
570 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
574 case GLSL_TYPE_UINT16
:
575 case GLSL_TYPE_INT16
:
576 case GLSL_TYPE_UINT8
:
578 case GLSL_TYPE_UINT64
:
579 case GLSL_TYPE_INT64
:
580 case GLSL_TYPE_FLOAT
:
581 case GLSL_TYPE_FLOAT16
:
583 case GLSL_TYPE_DOUBLE
: {
584 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
585 glsl_get_matrix_columns(type
->type
);
587 vtn_assert(type
->stride
> 0);
588 return type
->stride
* cols
;
590 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
591 return glsl_get_vector_elements(type
->type
) * type_size
;
595 case GLSL_TYPE_STRUCT
:
596 case GLSL_TYPE_INTERFACE
: {
598 unsigned num_fields
= glsl_get_length(type
->type
);
599 for (unsigned f
= 0; f
< num_fields
; f
++) {
600 unsigned field_end
= type
->offsets
[f
] +
601 vtn_type_block_size(b
, type
->members
[f
]);
602 size
= MAX2(size
, field_end
);
607 case GLSL_TYPE_ARRAY
:
608 vtn_assert(type
->stride
> 0);
609 vtn_assert(glsl_get_length(type
->type
) > 0);
610 return type
->stride
* glsl_get_length(type
->type
);
613 vtn_fail("Invalid block type");
619 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
620 nir_ssa_def
*index
, nir_ssa_def
*offset
,
621 unsigned access_offset
, unsigned access_size
,
622 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
)
624 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
625 instr
->num_components
= glsl_get_vector_elements(type
);
629 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
630 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
633 if (op
== nir_intrinsic_load_push_constant
) {
634 nir_intrinsic_set_base(instr
, access_offset
);
635 nir_intrinsic_set_range(instr
, access_size
);
639 instr
->src
[src
++] = nir_src_for_ssa(index
);
641 if (op
== nir_intrinsic_load_push_constant
) {
642 /* We need to subtract the offset from where the intrinsic will load the
645 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
646 nir_imm_int(&b
->nb
, access_offset
)));
648 instr
->src
[src
++] = nir_src_for_ssa(offset
);
652 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
653 instr
->num_components
,
654 glsl_get_bit_size(type
), NULL
);
655 (*inout
)->def
= &instr
->dest
.ssa
;
658 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
660 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
661 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
665 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
666 nir_ssa_def
*index
, nir_ssa_def
*offset
,
667 unsigned access_offset
, unsigned access_size
,
668 struct vtn_type
*type
, struct vtn_ssa_value
**inout
)
670 if (load
&& *inout
== NULL
)
671 *inout
= vtn_create_ssa_value(b
, type
->type
);
673 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
677 case GLSL_TYPE_UINT16
:
678 case GLSL_TYPE_INT16
:
679 case GLSL_TYPE_UINT8
:
681 case GLSL_TYPE_UINT64
:
682 case GLSL_TYPE_INT64
:
683 case GLSL_TYPE_FLOAT
:
684 case GLSL_TYPE_FLOAT16
:
685 case GLSL_TYPE_DOUBLE
:
687 /* This is where things get interesting. At this point, we've hit
688 * a vector, a scalar, or a matrix.
690 if (glsl_type_is_matrix(type
->type
)) {
691 /* Loading the whole matrix */
692 struct vtn_ssa_value
*transpose
;
693 unsigned num_ops
, vec_width
, col_stride
;
694 if (type
->row_major
) {
695 num_ops
= glsl_get_vector_elements(type
->type
);
696 vec_width
= glsl_get_matrix_columns(type
->type
);
697 col_stride
= type
->array_element
->stride
;
699 const struct glsl_type
*transpose_type
=
700 glsl_matrix_type(base_type
, vec_width
, num_ops
);
701 *inout
= vtn_create_ssa_value(b
, transpose_type
);
703 transpose
= vtn_ssa_transpose(b
, *inout
);
707 num_ops
= glsl_get_matrix_columns(type
->type
);
708 vec_width
= glsl_get_vector_elements(type
->type
);
709 col_stride
= type
->stride
;
712 for (unsigned i
= 0; i
< num_ops
; i
++) {
713 nir_ssa_def
*elem_offset
=
714 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* col_stride
));
715 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
716 access_offset
, access_size
,
718 glsl_vector_type(base_type
, vec_width
));
721 if (load
&& type
->row_major
)
722 *inout
= vtn_ssa_transpose(b
, *inout
);
724 unsigned elems
= glsl_get_vector_elements(type
->type
);
725 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
726 if (elems
== 1 || type
->stride
== type_size
) {
727 /* This is a tightly-packed normal scalar or vector load */
728 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
729 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
730 access_offset
, access_size
,
733 /* This is a strided load. We have to load N things separately.
734 * This is the single column of a row-major matrix case.
736 vtn_assert(type
->stride
> type_size
);
737 vtn_assert(type
->stride
% type_size
== 0);
739 nir_ssa_def
*per_comp
[4];
740 for (unsigned i
= 0; i
< elems
; i
++) {
741 nir_ssa_def
*elem_offset
=
742 nir_iadd(&b
->nb
, offset
,
743 nir_imm_int(&b
->nb
, i
* type
->stride
));
744 struct vtn_ssa_value
*comp
, temp_val
;
746 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
747 temp_val
.type
= glsl_scalar_type(base_type
);
750 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
751 access_offset
, access_size
,
752 &comp
, glsl_scalar_type(base_type
));
753 per_comp
[i
] = comp
->def
;
758 *inout
= vtn_create_ssa_value(b
, type
->type
);
759 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
765 case GLSL_TYPE_ARRAY
: {
766 unsigned elems
= glsl_get_length(type
->type
);
767 for (unsigned i
= 0; i
< elems
; i
++) {
768 nir_ssa_def
*elem_off
=
769 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* type
->stride
));
770 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
771 access_offset
, access_size
,
772 type
->array_element
, &(*inout
)->elems
[i
]);
777 case GLSL_TYPE_STRUCT
: {
778 unsigned elems
= glsl_get_length(type
->type
);
779 for (unsigned i
= 0; i
< elems
; i
++) {
780 nir_ssa_def
*elem_off
=
781 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, type
->offsets
[i
]));
782 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
783 access_offset
, access_size
,
784 type
->members
[i
], &(*inout
)->elems
[i
]);
790 vtn_fail("Invalid block member type");
794 static struct vtn_ssa_value
*
795 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
798 unsigned access_offset
= 0, access_size
= 0;
800 case vtn_variable_mode_ubo
:
801 op
= nir_intrinsic_load_ubo
;
803 case vtn_variable_mode_ssbo
:
804 op
= nir_intrinsic_load_ssbo
;
806 case vtn_variable_mode_push_constant
:
807 op
= nir_intrinsic_load_push_constant
;
808 access_size
= b
->shader
->num_uniforms
;
810 case vtn_variable_mode_workgroup
:
811 op
= nir_intrinsic_load_shared
;
814 vtn_fail("Invalid block variable mode");
817 nir_ssa_def
*offset
, *index
= NULL
;
818 offset
= vtn_pointer_to_offset(b
, src
, &index
);
820 struct vtn_ssa_value
*value
= NULL
;
821 _vtn_block_load_store(b
, op
, true, index
, offset
,
822 access_offset
, access_size
,
828 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
829 struct vtn_pointer
*dst
)
833 case vtn_variable_mode_ssbo
:
834 op
= nir_intrinsic_store_ssbo
;
836 case vtn_variable_mode_workgroup
:
837 op
= nir_intrinsic_store_shared
;
840 vtn_fail("Invalid block variable mode");
843 nir_ssa_def
*offset
, *index
= NULL
;
844 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
846 _vtn_block_load_store(b
, op
, false, index
, offset
,
847 0, 0, dst
->type
, &src
);
851 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
852 struct vtn_pointer
*ptr
,
853 struct vtn_ssa_value
**inout
)
855 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
859 case GLSL_TYPE_UINT16
:
860 case GLSL_TYPE_INT16
:
861 case GLSL_TYPE_UINT8
:
863 case GLSL_TYPE_UINT64
:
864 case GLSL_TYPE_INT64
:
865 case GLSL_TYPE_FLOAT
:
866 case GLSL_TYPE_FLOAT16
:
868 case GLSL_TYPE_DOUBLE
:
869 /* At this point, we have a scalar, vector, or matrix so we know that
870 * there cannot be any structure splitting still in the way. By
871 * stopping at the matrix level rather than the vector level, we
872 * ensure that matrices get loaded in the optimal way even if they
873 * are storred row-major in a UBO.
876 *inout
= vtn_local_load(b
, vtn_pointer_to_deref(b
, ptr
));
878 vtn_local_store(b
, *inout
, vtn_pointer_to_deref(b
, ptr
));
882 case GLSL_TYPE_ARRAY
:
883 case GLSL_TYPE_STRUCT
: {
884 unsigned elems
= glsl_get_length(ptr
->type
->type
);
886 vtn_assert(*inout
== NULL
);
887 *inout
= rzalloc(b
, struct vtn_ssa_value
);
888 (*inout
)->type
= ptr
->type
->type
;
889 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
892 struct vtn_access_chain chain
= {
895 { .mode
= vtn_access_mode_literal
, },
898 for (unsigned i
= 0; i
< elems
; i
++) {
899 chain
.link
[0].id
= i
;
900 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
901 _vtn_variable_load_store(b
, load
, elem
, &(*inout
)->elems
[i
]);
907 vtn_fail("Invalid access chain type");
911 struct vtn_ssa_value
*
912 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
914 if (vtn_pointer_is_external_block(b
, src
)) {
915 return vtn_block_load(b
, src
);
917 struct vtn_ssa_value
*val
= NULL
;
918 _vtn_variable_load_store(b
, true, src
, &val
);
924 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
925 struct vtn_pointer
*dest
)
927 if (vtn_pointer_is_external_block(b
, dest
)) {
928 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
929 dest
->mode
== vtn_variable_mode_workgroup
);
930 vtn_block_store(b
, src
, dest
);
932 _vtn_variable_load_store(b
, false, dest
, &src
);
937 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
938 struct vtn_pointer
*src
)
940 vtn_assert(src
->type
->type
== dest
->type
->type
);
941 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
945 case GLSL_TYPE_UINT16
:
946 case GLSL_TYPE_INT16
:
947 case GLSL_TYPE_UINT8
:
949 case GLSL_TYPE_UINT64
:
950 case GLSL_TYPE_INT64
:
951 case GLSL_TYPE_FLOAT
:
952 case GLSL_TYPE_FLOAT16
:
953 case GLSL_TYPE_DOUBLE
:
955 /* At this point, we have a scalar, vector, or matrix so we know that
956 * there cannot be any structure splitting still in the way. By
957 * stopping at the matrix level rather than the vector level, we
958 * ensure that matrices get loaded in the optimal way even if they
959 * are storred row-major in a UBO.
961 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
964 case GLSL_TYPE_ARRAY
:
965 case GLSL_TYPE_STRUCT
: {
966 struct vtn_access_chain chain
= {
969 { .mode
= vtn_access_mode_literal
, },
972 unsigned elems
= glsl_get_length(src
->type
->type
);
973 for (unsigned i
= 0; i
< elems
; i
++) {
974 chain
.link
[0].id
= i
;
975 struct vtn_pointer
*src_elem
=
976 vtn_pointer_dereference(b
, src
, &chain
);
977 struct vtn_pointer
*dest_elem
=
978 vtn_pointer_dereference(b
, dest
, &chain
);
980 _vtn_variable_copy(b
, dest_elem
, src_elem
);
986 vtn_fail("Invalid access chain type");
991 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
992 struct vtn_pointer
*src
)
994 /* TODO: At some point, we should add a special-case for when we can
995 * just emit a copy_var intrinsic.
997 _vtn_variable_copy(b
, dest
, src
);
1001 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1003 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1004 *mode
= nir_var_system_value
;
1008 vtn_get_builtin_location(struct vtn_builder
*b
,
1009 SpvBuiltIn builtin
, int *location
,
1010 nir_variable_mode
*mode
)
1013 case SpvBuiltInPosition
:
1014 *location
= VARYING_SLOT_POS
;
1016 case SpvBuiltInPointSize
:
1017 *location
= VARYING_SLOT_PSIZ
;
1019 case SpvBuiltInClipDistance
:
1020 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1022 case SpvBuiltInCullDistance
:
1023 *location
= VARYING_SLOT_CULL_DIST0
;
1025 case SpvBuiltInVertexIndex
:
1026 *location
= SYSTEM_VALUE_VERTEX_ID
;
1027 set_mode_system_value(b
, mode
);
1029 case SpvBuiltInVertexId
:
1030 /* Vulkan defines VertexID to be zero-based and reserves the new
1031 * builtin keyword VertexIndex to indicate the non-zero-based value.
1033 *location
= SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1034 set_mode_system_value(b
, mode
);
1036 case SpvBuiltInInstanceIndex
:
1037 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1038 set_mode_system_value(b
, mode
);
1040 case SpvBuiltInInstanceId
:
1041 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1042 set_mode_system_value(b
, mode
);
1044 case SpvBuiltInPrimitiveId
:
1045 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1046 vtn_assert(*mode
== nir_var_shader_in
);
1047 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1048 } else if (*mode
== nir_var_shader_out
) {
1049 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1051 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1052 set_mode_system_value(b
, mode
);
1055 case SpvBuiltInInvocationId
:
1056 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1057 set_mode_system_value(b
, mode
);
1059 case SpvBuiltInLayer
:
1060 *location
= VARYING_SLOT_LAYER
;
1061 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1062 *mode
= nir_var_shader_in
;
1063 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1064 *mode
= nir_var_shader_out
;
1065 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1066 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1067 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1068 *mode
= nir_var_shader_out
;
1070 vtn_fail("invalid stage for SpvBuiltInLayer");
1072 case SpvBuiltInViewportIndex
:
1073 *location
= VARYING_SLOT_VIEWPORT
;
1074 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1075 *mode
= nir_var_shader_out
;
1076 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1077 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1078 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1079 *mode
= nir_var_shader_out
;
1080 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1081 *mode
= nir_var_shader_in
;
1083 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1085 case SpvBuiltInTessLevelOuter
:
1086 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1088 case SpvBuiltInTessLevelInner
:
1089 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1091 case SpvBuiltInTessCoord
:
1092 *location
= SYSTEM_VALUE_TESS_COORD
;
1093 set_mode_system_value(b
, mode
);
1095 case SpvBuiltInPatchVertices
:
1096 *location
= SYSTEM_VALUE_VERTICES_IN
;
1097 set_mode_system_value(b
, mode
);
1099 case SpvBuiltInFragCoord
:
1100 *location
= VARYING_SLOT_POS
;
1101 vtn_assert(*mode
== nir_var_shader_in
);
1103 case SpvBuiltInPointCoord
:
1104 *location
= VARYING_SLOT_PNTC
;
1105 vtn_assert(*mode
== nir_var_shader_in
);
1107 case SpvBuiltInFrontFacing
:
1108 *location
= SYSTEM_VALUE_FRONT_FACE
;
1109 set_mode_system_value(b
, mode
);
1111 case SpvBuiltInSampleId
:
1112 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1113 set_mode_system_value(b
, mode
);
1115 case SpvBuiltInSamplePosition
:
1116 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1117 set_mode_system_value(b
, mode
);
1119 case SpvBuiltInSampleMask
:
1120 if (*mode
== nir_var_shader_out
) {
1121 *location
= FRAG_RESULT_SAMPLE_MASK
;
1123 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1124 set_mode_system_value(b
, mode
);
1127 case SpvBuiltInFragDepth
:
1128 *location
= FRAG_RESULT_DEPTH
;
1129 vtn_assert(*mode
== nir_var_shader_out
);
1131 case SpvBuiltInHelperInvocation
:
1132 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1133 set_mode_system_value(b
, mode
);
1135 case SpvBuiltInNumWorkgroups
:
1136 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1137 set_mode_system_value(b
, mode
);
1139 case SpvBuiltInWorkgroupSize
:
1140 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1141 set_mode_system_value(b
, mode
);
1143 case SpvBuiltInWorkgroupId
:
1144 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1145 set_mode_system_value(b
, mode
);
1147 case SpvBuiltInLocalInvocationId
:
1148 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1149 set_mode_system_value(b
, mode
);
1151 case SpvBuiltInLocalInvocationIndex
:
1152 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1153 set_mode_system_value(b
, mode
);
1155 case SpvBuiltInGlobalInvocationId
:
1156 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1157 set_mode_system_value(b
, mode
);
1159 case SpvBuiltInBaseVertex
:
1160 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1161 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1163 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1164 set_mode_system_value(b
, mode
);
1166 case SpvBuiltInBaseInstance
:
1167 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1168 set_mode_system_value(b
, mode
);
1170 case SpvBuiltInDrawIndex
:
1171 *location
= SYSTEM_VALUE_DRAW_ID
;
1172 set_mode_system_value(b
, mode
);
1174 case SpvBuiltInSubgroupSize
:
1175 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1176 set_mode_system_value(b
, mode
);
1178 case SpvBuiltInSubgroupId
:
1179 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1180 set_mode_system_value(b
, mode
);
1182 case SpvBuiltInSubgroupLocalInvocationId
:
1183 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1184 set_mode_system_value(b
, mode
);
1186 case SpvBuiltInNumSubgroups
:
1187 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1188 set_mode_system_value(b
, mode
);
1190 case SpvBuiltInDeviceIndex
:
1191 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1192 set_mode_system_value(b
, mode
);
1194 case SpvBuiltInViewIndex
:
1195 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1196 set_mode_system_value(b
, mode
);
1198 case SpvBuiltInSubgroupEqMask
:
1199 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1200 set_mode_system_value(b
, mode
);
1202 case SpvBuiltInSubgroupGeMask
:
1203 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1204 set_mode_system_value(b
, mode
);
1206 case SpvBuiltInSubgroupGtMask
:
1207 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1208 set_mode_system_value(b
, mode
);
1210 case SpvBuiltInSubgroupLeMask
:
1211 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1212 set_mode_system_value(b
, mode
);
1214 case SpvBuiltInSubgroupLtMask
:
1215 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1216 set_mode_system_value(b
, mode
);
1218 case SpvBuiltInFragStencilRefEXT
:
1219 *location
= FRAG_RESULT_STENCIL
;
1220 vtn_assert(*mode
== nir_var_shader_out
);
1223 vtn_fail("unsupported builtin");
1228 apply_var_decoration(struct vtn_builder
*b
,
1229 struct nir_variable_data
*var_data
,
1230 const struct vtn_decoration
*dec
)
1232 switch (dec
->decoration
) {
1233 case SpvDecorationRelaxedPrecision
:
1234 break; /* FIXME: Do nothing with this for now. */
1235 case SpvDecorationNoPerspective
:
1236 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1238 case SpvDecorationFlat
:
1239 var_data
->interpolation
= INTERP_MODE_FLAT
;
1241 case SpvDecorationCentroid
:
1242 var_data
->centroid
= true;
1244 case SpvDecorationSample
:
1245 var_data
->sample
= true;
1247 case SpvDecorationInvariant
:
1248 var_data
->invariant
= true;
1250 case SpvDecorationConstant
:
1251 var_data
->read_only
= true;
1253 case SpvDecorationNonReadable
:
1254 var_data
->image
.write_only
= true;
1256 case SpvDecorationNonWritable
:
1257 var_data
->read_only
= true;
1258 var_data
->image
.read_only
= true;
1260 case SpvDecorationRestrict
:
1261 var_data
->image
.restrict_flag
= true;
1263 case SpvDecorationVolatile
:
1264 var_data
->image
._volatile
= true;
1266 case SpvDecorationCoherent
:
1267 var_data
->image
.coherent
= true;
1269 case SpvDecorationComponent
:
1270 var_data
->location_frac
= dec
->literals
[0];
1272 case SpvDecorationIndex
:
1273 var_data
->index
= dec
->literals
[0];
1275 case SpvDecorationBuiltIn
: {
1276 SpvBuiltIn builtin
= dec
->literals
[0];
1278 nir_variable_mode mode
= var_data
->mode
;
1279 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1280 var_data
->mode
= mode
;
1283 case SpvBuiltInTessLevelOuter
:
1284 case SpvBuiltInTessLevelInner
:
1285 var_data
->compact
= true;
1287 case SpvBuiltInFragCoord
:
1288 var_data
->pixel_center_integer
= b
->pixel_center_integer
;
1290 case SpvBuiltInSamplePosition
:
1291 var_data
->origin_upper_left
= b
->origin_upper_left
;
1298 case SpvDecorationSpecId
:
1299 case SpvDecorationRowMajor
:
1300 case SpvDecorationColMajor
:
1301 case SpvDecorationMatrixStride
:
1302 case SpvDecorationAliased
:
1303 case SpvDecorationUniform
:
1304 case SpvDecorationStream
:
1305 case SpvDecorationOffset
:
1306 case SpvDecorationLinkageAttributes
:
1307 break; /* Do nothing with these here */
1309 case SpvDecorationPatch
:
1310 var_data
->patch
= true;
1313 case SpvDecorationLocation
:
1314 vtn_fail("Handled above");
1316 case SpvDecorationBlock
:
1317 case SpvDecorationBufferBlock
:
1318 case SpvDecorationArrayStride
:
1319 case SpvDecorationGLSLShared
:
1320 case SpvDecorationGLSLPacked
:
1321 break; /* These can apply to a type but we don't care about them */
1323 case SpvDecorationBinding
:
1324 case SpvDecorationDescriptorSet
:
1325 case SpvDecorationNoContraction
:
1326 case SpvDecorationInputAttachmentIndex
:
1327 vtn_warn("Decoration not allowed for variable or structure member: %s",
1328 spirv_decoration_to_string(dec
->decoration
));
1331 case SpvDecorationXfbBuffer
:
1332 case SpvDecorationXfbStride
:
1333 vtn_warn("Vulkan does not have transform feedback: %s",
1334 spirv_decoration_to_string(dec
->decoration
));
1337 case SpvDecorationCPacked
:
1338 case SpvDecorationSaturatedConversion
:
1339 case SpvDecorationFuncParamAttr
:
1340 case SpvDecorationFPRoundingMode
:
1341 case SpvDecorationFPFastMathMode
:
1342 case SpvDecorationAlignment
:
1343 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1344 spirv_decoration_to_string(dec
->decoration
));
1348 vtn_fail("Unhandled decoration");
1353 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1354 const struct vtn_decoration
*dec
, void *out_is_patch
)
1356 if (dec
->decoration
== SpvDecorationPatch
) {
1357 *((bool *) out_is_patch
) = true;
1362 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1363 const struct vtn_decoration
*dec
, void *void_var
)
1365 struct vtn_variable
*vtn_var
= void_var
;
1367 /* Handle decorations that apply to a vtn_variable as a whole */
1368 switch (dec
->decoration
) {
1369 case SpvDecorationBinding
:
1370 vtn_var
->binding
= dec
->literals
[0];
1371 vtn_var
->explicit_binding
= true;
1373 case SpvDecorationDescriptorSet
:
1374 vtn_var
->descriptor_set
= dec
->literals
[0];
1376 case SpvDecorationInputAttachmentIndex
:
1377 vtn_var
->input_attachment_index
= dec
->literals
[0];
1379 case SpvDecorationPatch
:
1380 vtn_var
->patch
= true;
1386 if (val
->value_type
== vtn_value_type_pointer
) {
1387 assert(val
->pointer
->var
== void_var
);
1388 assert(val
->pointer
->chain
== NULL
);
1389 assert(member
== -1);
1391 assert(val
->value_type
== vtn_value_type_type
);
1394 /* Location is odd. If applied to a split structure, we have to walk the
1395 * whole thing and accumulate the location. It's easier to handle as a
1398 if (dec
->decoration
== SpvDecorationLocation
) {
1399 unsigned location
= dec
->literals
[0];
1400 bool is_vertex_input
;
1401 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1402 vtn_var
->mode
== vtn_variable_mode_output
) {
1403 is_vertex_input
= false;
1404 location
+= FRAG_RESULT_DATA0
;
1405 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1406 vtn_var
->mode
== vtn_variable_mode_input
) {
1407 is_vertex_input
= true;
1408 location
+= VERT_ATTRIB_GENERIC0
;
1409 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1410 vtn_var
->mode
== vtn_variable_mode_output
) {
1411 is_vertex_input
= false;
1412 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1413 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1414 vtn_warn("Location must be on input, output, uniform, sampler or "
1419 if (vtn_var
->var
->num_members
== 0) {
1420 /* This handles the member and lone variable cases */
1421 vtn_var
->var
->data
.location
= location
;
1423 /* This handles the structure member case */
1424 assert(vtn_var
->var
->members
);
1425 for (unsigned i
= 0; i
< vtn_var
->var
->num_members
; i
++) {
1426 vtn_var
->var
->members
[i
].location
= location
;
1427 const struct glsl_type
*member_type
=
1428 glsl_get_struct_field(vtn_var
->var
->interface_type
, i
);
1429 location
+= glsl_count_attribute_slots(member_type
,
1436 if (vtn_var
->var
->num_members
== 0) {
1437 assert(member
== -1);
1438 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1439 } else if (member
>= 0) {
1440 /* Member decorations must come from a type */
1441 assert(val
->value_type
== vtn_value_type_type
);
1442 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1445 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1446 for (unsigned i
= 0; i
< length
; i
++)
1447 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1450 /* A few variables, those with external storage, have no actual
1451 * nir_variables associated with them. Fortunately, all decorations
1452 * we care about for those variables are on the type only.
1454 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1455 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1456 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1457 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1458 b
->options
->lower_workgroup_access_to_offsets
));
1463 static enum vtn_variable_mode
1464 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1465 SpvStorageClass
class,
1466 struct vtn_type
*interface_type
,
1467 nir_variable_mode
*nir_mode_out
)
1469 enum vtn_variable_mode mode
;
1470 nir_variable_mode nir_mode
;
1472 case SpvStorageClassUniform
:
1473 if (interface_type
->block
) {
1474 mode
= vtn_variable_mode_ubo
;
1476 } else if (interface_type
->buffer_block
) {
1477 mode
= vtn_variable_mode_ssbo
;
1480 /* Default-block uniforms, coming from gl_spirv */
1481 mode
= vtn_variable_mode_uniform
;
1482 nir_mode
= nir_var_uniform
;
1485 case SpvStorageClassStorageBuffer
:
1486 mode
= vtn_variable_mode_ssbo
;
1489 case SpvStorageClassUniformConstant
:
1490 mode
= vtn_variable_mode_uniform
;
1491 nir_mode
= nir_var_uniform
;
1493 case SpvStorageClassPushConstant
:
1494 mode
= vtn_variable_mode_push_constant
;
1495 nir_mode
= nir_var_uniform
;
1497 case SpvStorageClassInput
:
1498 mode
= vtn_variable_mode_input
;
1499 nir_mode
= nir_var_shader_in
;
1501 case SpvStorageClassOutput
:
1502 mode
= vtn_variable_mode_output
;
1503 nir_mode
= nir_var_shader_out
;
1505 case SpvStorageClassPrivate
:
1506 mode
= vtn_variable_mode_global
;
1507 nir_mode
= nir_var_global
;
1509 case SpvStorageClassFunction
:
1510 mode
= vtn_variable_mode_local
;
1511 nir_mode
= nir_var_local
;
1513 case SpvStorageClassWorkgroup
:
1514 mode
= vtn_variable_mode_workgroup
;
1515 nir_mode
= nir_var_shared
;
1517 case SpvStorageClassCrossWorkgroup
:
1518 case SpvStorageClassGeneric
:
1519 case SpvStorageClassAtomicCounter
:
1521 vtn_fail("Unhandled variable storage class");
1525 *nir_mode_out
= nir_mode
;
1531 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1533 /* This pointer needs to have a pointer type with actual storage */
1534 vtn_assert(ptr
->ptr_type
);
1535 vtn_assert(ptr
->ptr_type
->type
);
1538 /* If we don't have an offset then we must be a pointer to the variable
1541 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1543 struct vtn_access_chain chain
= {
1546 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1549 vtn_assert(ptr
->offset
);
1550 if (ptr
->block_index
) {
1551 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1552 ptr
->mode
== vtn_variable_mode_ssbo
);
1553 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1555 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1560 struct vtn_pointer
*
1561 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1562 struct vtn_type
*ptr_type
)
1564 vtn_assert(ssa
->num_components
<= 2 && ssa
->bit_size
== 32);
1565 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1566 vtn_assert(ptr_type
->deref
->base_type
!= vtn_base_type_pointer
);
1567 /* This pointer type needs to have actual storage */
1568 vtn_assert(ptr_type
->type
);
1570 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1571 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1573 ptr
->type
= ptr_type
->deref
;
1574 ptr
->ptr_type
= ptr_type
;
1576 if (ssa
->num_components
> 1) {
1577 vtn_assert(ssa
->num_components
== 2);
1578 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1579 ptr
->mode
== vtn_variable_mode_ssbo
);
1580 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1581 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1583 vtn_assert(ssa
->num_components
== 1);
1584 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
||
1585 ptr
->mode
== vtn_variable_mode_push_constant
);
1586 ptr
->block_index
= NULL
;
1594 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
1596 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
1599 if (var
->mode
== vtn_variable_mode_input
) {
1600 return stage
== MESA_SHADER_TESS_CTRL
||
1601 stage
== MESA_SHADER_TESS_EVAL
||
1602 stage
== MESA_SHADER_GEOMETRY
;
1605 if (var
->mode
== vtn_variable_mode_output
)
1606 return stage
== MESA_SHADER_TESS_CTRL
;
1612 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
1613 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
1614 nir_constant
*initializer
)
1616 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1617 struct vtn_type
*type
= ptr_type
->deref
;
1619 struct vtn_type
*without_array
= type
;
1620 while(glsl_type_is_array(without_array
->type
))
1621 without_array
= without_array
->array_element
;
1623 enum vtn_variable_mode mode
;
1624 nir_variable_mode nir_mode
;
1625 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
1628 case vtn_variable_mode_ubo
:
1629 b
->shader
->info
.num_ubos
++;
1631 case vtn_variable_mode_ssbo
:
1632 b
->shader
->info
.num_ssbos
++;
1634 case vtn_variable_mode_uniform
:
1635 if (glsl_type_is_image(without_array
->type
))
1636 b
->shader
->info
.num_images
++;
1637 else if (glsl_type_is_sampler(without_array
->type
))
1638 b
->shader
->info
.num_textures
++;
1640 case vtn_variable_mode_push_constant
:
1641 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
1644 /* No tallying is needed */
1648 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
1652 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
1653 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
1655 switch (var
->mode
) {
1656 case vtn_variable_mode_local
:
1657 case vtn_variable_mode_global
:
1658 case vtn_variable_mode_uniform
:
1659 /* For these, we create the variable normally */
1660 var
->var
= rzalloc(b
->shader
, nir_variable
);
1661 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1662 var
->var
->type
= var
->type
->type
;
1663 var
->var
->data
.mode
= nir_mode
;
1664 var
->var
->data
.location
= -1;
1665 var
->var
->interface_type
= NULL
;
1668 case vtn_variable_mode_workgroup
:
1669 if (b
->options
->lower_workgroup_access_to_offsets
) {
1670 var
->shared_location
= -1;
1672 /* Create the variable normally */
1673 var
->var
= rzalloc(b
->shader
, nir_variable
);
1674 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1675 var
->var
->type
= var
->type
->type
;
1676 var
->var
->data
.mode
= nir_var_shared
;
1680 case vtn_variable_mode_input
:
1681 case vtn_variable_mode_output
: {
1682 /* In order to know whether or not we're a per-vertex inout, we need
1683 * the patch qualifier. This means walking the variable decorations
1684 * early before we actually create any variables. Not a big deal.
1686 * GLSLang really likes to place decorations in the most interior
1687 * thing it possibly can. In particular, if you have a struct, it
1688 * will place the patch decorations on the struct members. This
1689 * should be handled by the variable splitting below just fine.
1691 * If you have an array-of-struct, things get even more weird as it
1692 * will place the patch decorations on the struct even though it's
1693 * inside an array and some of the members being patch and others not
1694 * makes no sense whatsoever. Since the only sensible thing is for
1695 * it to be all or nothing, we'll call it patch if any of the members
1696 * are declared patch.
1699 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
1700 if (glsl_type_is_array(var
->type
->type
) &&
1701 glsl_type_is_struct(without_array
->type
)) {
1702 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
1703 vtn_value_type_type
),
1704 var_is_patch_cb
, &var
->patch
);
1707 /* For inputs and outputs, we immediately split structures. This
1708 * is for a couple of reasons. For one, builtins may all come in
1709 * a struct and we really want those split out into separate
1710 * variables. For another, interpolation qualifiers can be
1711 * applied to members of the top-level struct ane we need to be
1712 * able to preserve that information.
1715 struct vtn_type
*interface_type
= var
->type
;
1716 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
1717 /* In Geometry shaders (and some tessellation), inputs come
1718 * in per-vertex arrays. However, some builtins come in
1719 * non-per-vertex, hence the need for the is_array check. In
1720 * any case, there are no non-builtin arrays allowed so this
1721 * check should be sufficient.
1723 interface_type
= var
->type
->array_element
;
1726 var
->var
= rzalloc(b
->shader
, nir_variable
);
1727 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1728 var
->var
->type
= var
->type
->type
;
1729 var
->var
->interface_type
= interface_type
->type
;
1730 var
->var
->data
.mode
= nir_mode
;
1731 var
->var
->data
.patch
= var
->patch
;
1733 if (glsl_type_is_struct(interface_type
->type
)) {
1734 /* It's a struct. Set it up as per-member. */
1735 var
->var
->num_members
= glsl_get_length(interface_type
->type
);
1736 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
1737 var
->var
->num_members
);
1739 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
1740 var
->var
->members
[i
].mode
= nir_mode
;
1741 var
->var
->members
[i
].patch
= var
->patch
;
1745 /* For inputs and outputs, we need to grab locations and builtin
1746 * information from the interface type.
1748 vtn_foreach_decoration(b
, vtn_value(b
, interface_type
->id
,
1749 vtn_value_type_type
),
1750 var_decoration_cb
, var
);
1754 case vtn_variable_mode_param
:
1755 vtn_fail("Not created through OpVariable");
1757 case vtn_variable_mode_ubo
:
1758 case vtn_variable_mode_ssbo
:
1759 case vtn_variable_mode_push_constant
:
1760 /* These don't need actual variables. */
1765 var
->var
->constant_initializer
=
1766 nir_constant_clone(initializer
, var
->var
);
1769 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
1771 if (var
->mode
== vtn_variable_mode_uniform
) {
1772 /* XXX: We still need the binding information in the nir_variable
1773 * for these. We should fix that.
1775 var
->var
->data
.binding
= var
->binding
;
1776 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
1777 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
1778 var
->var
->data
.index
= var
->input_attachment_index
;
1780 if (glsl_type_is_image(without_array
->type
))
1781 var
->var
->data
.image
.format
= without_array
->image_format
;
1784 if (var
->mode
== vtn_variable_mode_local
) {
1785 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
1786 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
1787 } else if (var
->var
) {
1788 nir_shader_add_variable(b
->shader
, var
->var
);
1790 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
1795 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
1796 struct vtn_type
*dst_type
,
1797 struct vtn_type
*src_type
)
1799 if (dst_type
->id
== src_type
->id
)
1802 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
1803 /* Early versions of GLSLang would re-emit types unnecessarily and you
1804 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
1805 * mismatched source and destination types.
1807 * https://github.com/KhronosGroup/glslang/issues/304
1808 * https://github.com/KhronosGroup/glslang/issues/307
1809 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
1810 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
1812 vtn_warn("Source and destination types of %s do not have the same "
1813 "ID (but are compatible): %u vs %u",
1814 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
1818 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
1819 spirv_op_to_string(opcode
),
1820 glsl_get_type_name(dst_type
->type
),
1821 glsl_get_type_name(src_type
->type
));
1825 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
1826 const uint32_t *w
, unsigned count
)
1830 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1831 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1835 case SpvOpVariable
: {
1836 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1838 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1840 SpvStorageClass storage_class
= w
[3];
1841 nir_constant
*initializer
= NULL
;
1843 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
1845 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
1849 case SpvOpAccessChain
:
1850 case SpvOpPtrAccessChain
:
1851 case SpvOpInBoundsAccessChain
: {
1852 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
1853 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
);
1856 for (int i
= 4; i
< count
; i
++) {
1857 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
1858 if (link_val
->value_type
== vtn_value_type_constant
) {
1859 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
1860 chain
->link
[idx
].id
= link_val
->constant
->values
[0].u32
[0];
1862 chain
->link
[idx
].mode
= vtn_access_mode_id
;
1863 chain
->link
[idx
].id
= w
[i
];
1869 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1870 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
1871 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
1872 /* This is rather insane. SPIR-V allows you to use OpSampledImage
1873 * to combine an array of images with a single sampler to get an
1874 * array of sampled images that all share the same sampler.
1875 * Fortunately, this means that we can more-or-less ignore the
1876 * sampler when crawling the access chain, but it does leave us
1877 * with this rather awkward little special-case.
1879 struct vtn_value
*val
=
1880 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1881 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1882 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
1883 val
->sampled_image
->image
=
1884 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
1885 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
1887 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
1888 struct vtn_value
*val
=
1889 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1890 val
->pointer
= vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
1891 val
->pointer
->ptr_type
= ptr_type
;
1896 case SpvOpCopyMemory
: {
1897 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
1898 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
1900 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
1902 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
1907 struct vtn_type
*res_type
=
1908 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1909 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
1910 struct vtn_pointer
*src
= src_val
->pointer
;
1912 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
1914 if (glsl_type_is_image(res_type
->type
) ||
1915 glsl_type_is_sampler(res_type
->type
)) {
1916 vtn_push_value(b
, w
[2], vtn_value_type_pointer
)->pointer
= src
;
1920 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
1925 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
1926 struct vtn_pointer
*dest
= dest_val
->pointer
;
1927 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
1929 /* OpStore requires us to actually have a storage type */
1930 vtn_fail_if(dest
->type
->type
== NULL
,
1931 "Invalid destination type for OpStore");
1933 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
1934 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
1935 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
1936 * would then store them to a local variable as bool. Work around
1937 * the issue by doing an implicit conversion.
1939 * https://github.com/KhronosGroup/glslang/issues/170
1940 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
1942 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
1943 "OpTypeBool. Doing an implicit conversion to work around "
1945 struct vtn_ssa_value
*bool_ssa
=
1946 vtn_create_ssa_value(b
, dest
->type
->type
);
1947 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
1948 vtn_variable_store(b
, bool_ssa
, dest
);
1952 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
1954 if (glsl_type_is_sampler(dest
->type
->type
)) {
1955 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
1956 "propagation to workaround the problem.");
1957 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
1958 dest
->var
->copy_prop_sampler
=
1959 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
1963 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
1964 vtn_variable_store(b
, src
, dest
);
1968 case SpvOpArrayLength
: {
1969 struct vtn_pointer
*ptr
=
1970 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1972 const uint32_t offset
= ptr
->var
->type
->offsets
[w
[4]];
1973 const uint32_t stride
= ptr
->var
->type
->members
[w
[4]]->stride
;
1975 if (!ptr
->block_index
) {
1976 struct vtn_access_chain chain
= {
1979 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1980 vtn_assert(ptr
->block_index
);
1983 nir_intrinsic_instr
*instr
=
1984 nir_intrinsic_instr_create(b
->nb
.shader
,
1985 nir_intrinsic_get_buffer_size
);
1986 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
1987 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
1988 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
1989 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
1991 /* array_length = max(buffer_size - offset, 0) / stride */
1992 nir_ssa_def
*array_length
=
1997 nir_imm_int(&b
->nb
, offset
)),
1998 nir_imm_int(&b
->nb
, 0u)),
1999 nir_imm_int(&b
->nb
, stride
));
2001 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2002 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2003 val
->ssa
->def
= array_length
;
2007 case SpvOpCopyMemorySized
:
2009 vtn_fail("Unhandled opcode");