2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
31 static struct vtn_access_chain
*
32 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
34 struct vtn_access_chain
*chain
;
36 /* Subtract 1 from the length since there's already one built in */
37 size_t size
= sizeof(*chain
) +
38 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
39 chain
= rzalloc_size(b
, size
);
40 chain
->length
= length
;
45 static struct vtn_access_chain
*
46 vtn_access_chain_extend(struct vtn_builder
*b
, struct vtn_access_chain
*old
,
49 struct vtn_access_chain
*chain
;
51 unsigned old_len
= old
? old
->length
: 0;
52 chain
= vtn_access_chain_create(b
, old_len
+ new_ids
);
54 for (unsigned i
= 0; i
< old_len
; i
++)
55 chain
->link
[i
] = old
->link
[i
];
61 vtn_pointer_uses_ssa_offset(struct vtn_builder
*b
,
62 struct vtn_pointer
*ptr
)
64 return ptr
->mode
== vtn_variable_mode_ubo
||
65 ptr
->mode
== vtn_variable_mode_ssbo
||
66 (ptr
->mode
== vtn_variable_mode_workgroup
&&
67 b
->options
->lower_workgroup_access_to_offsets
);
71 vtn_pointer_is_external_block(struct vtn_builder
*b
,
72 struct vtn_pointer
*ptr
)
74 return ptr
->mode
== vtn_variable_mode_ssbo
||
75 ptr
->mode
== vtn_variable_mode_ubo
||
76 ptr
->mode
== vtn_variable_mode_push_constant
||
77 (ptr
->mode
== vtn_variable_mode_workgroup
&&
78 b
->options
->lower_workgroup_access_to_offsets
);
81 /* Dereference the given base pointer by the access chain */
82 static struct vtn_pointer
*
83 vtn_access_chain_pointer_dereference(struct vtn_builder
*b
,
84 struct vtn_pointer
*base
,
85 struct vtn_access_chain
*deref_chain
)
87 struct vtn_access_chain
*chain
=
88 vtn_access_chain_extend(b
, base
->chain
, deref_chain
->length
);
89 struct vtn_type
*type
= base
->type
;
91 /* OpPtrAccessChain is only allowed on things which support variable
92 * pointers. For everything else, the client is expected to just pass us
93 * the right access chain.
95 vtn_assert(!deref_chain
->ptr_as_array
);
97 unsigned start
= base
->chain
? base
->chain
->length
: 0;
98 for (unsigned i
= 0; i
< deref_chain
->length
; i
++) {
99 chain
->link
[start
+ i
] = deref_chain
->link
[i
];
101 if (glsl_type_is_struct(type
->type
)) {
102 vtn_assert(deref_chain
->link
[i
].mode
== vtn_access_mode_literal
);
103 type
= type
->members
[deref_chain
->link
[i
].id
];
105 type
= type
->array_element
;
109 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
110 ptr
->mode
= base
->mode
;
112 ptr
->var
= base
->var
;
119 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
122 vtn_assert(stride
> 0);
123 if (link
.mode
== vtn_access_mode_literal
) {
124 return nir_imm_int(&b
->nb
, link
.id
* stride
);
125 } else if (stride
== 1) {
126 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
127 if (ssa
->bit_size
!= 32)
128 ssa
= nir_u2u32(&b
->nb
, ssa
);
131 nir_ssa_def
*src0
= vtn_ssa_value(b
, link
.id
)->def
;
132 if (src0
->bit_size
!= 32)
133 src0
= nir_u2u32(&b
->nb
, src0
);
134 return nir_imul(&b
->nb
, src0
, nir_imm_int(&b
->nb
, stride
));
139 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
140 nir_ssa_def
*desc_array_index
)
142 if (!desc_array_index
) {
143 vtn_assert(glsl_type_is_struct(var
->type
->type
));
144 desc_array_index
= nir_imm_int(&b
->nb
, 0);
147 nir_intrinsic_instr
*instr
=
148 nir_intrinsic_instr_create(b
->nb
.shader
,
149 nir_intrinsic_vulkan_resource_index
);
150 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
151 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
152 nir_intrinsic_set_binding(instr
, var
->binding
);
154 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
155 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
157 return &instr
->dest
.ssa
;
161 vtn_resource_reindex(struct vtn_builder
*b
, nir_ssa_def
*base_index
,
162 nir_ssa_def
*offset_index
)
164 nir_intrinsic_instr
*instr
=
165 nir_intrinsic_instr_create(b
->nb
.shader
,
166 nir_intrinsic_vulkan_resource_reindex
);
167 instr
->src
[0] = nir_src_for_ssa(base_index
);
168 instr
->src
[1] = nir_src_for_ssa(offset_index
);
170 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
171 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
173 return &instr
->dest
.ssa
;
176 static struct vtn_pointer
*
177 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
178 struct vtn_pointer
*base
,
179 struct vtn_access_chain
*deref_chain
)
181 nir_ssa_def
*block_index
= base
->block_index
;
182 nir_ssa_def
*offset
= base
->offset
;
183 struct vtn_type
*type
= base
->type
;
186 if (base
->mode
== vtn_variable_mode_ubo
||
187 base
->mode
== vtn_variable_mode_ssbo
) {
189 vtn_assert(base
->var
&& base
->type
);
190 nir_ssa_def
*desc_arr_idx
;
191 if (glsl_type_is_array(type
->type
)) {
192 if (deref_chain
->length
>= 1) {
194 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
196 /* This consumes a level of type */
197 type
= type
->array_element
;
199 /* This is annoying. We've been asked for a pointer to the
200 * array of UBOs/SSBOs and not a specifc buffer. Return a
201 * pointer with a descriptor index of 0 and we'll have to do
202 * a reindex later to adjust it to the right thing.
204 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
206 } else if (deref_chain
->ptr_as_array
) {
207 /* You can't have a zero-length OpPtrAccessChain */
208 vtn_assert(deref_chain
->length
>= 1);
209 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
211 /* We have a regular non-array SSBO. */
214 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
215 } else if (deref_chain
->ptr_as_array
&&
216 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
217 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
218 * decorated block. This is an interesting corner in the SPIR-V
219 * spec. One interpretation would be that they client is clearly
220 * trying to treat that block as if it's an implicit array of blocks
221 * repeated in the buffer. However, the SPIR-V spec for the
222 * OpPtrAccessChain says:
224 * "Base is treated as the address of the first element of an
225 * array, and the Element element’s address is computed to be the
226 * base for the Indexes, as per OpAccessChain."
228 * Taken literally, that would mean that your struct type is supposed
229 * to be treated as an array of such a struct and, since it's
230 * decorated block, that means an array of blocks which corresponds
231 * to an array descriptor. Therefore, we need to do a reindex
232 * operation to add the index from the first link in the access chain
233 * to the index we recieved.
235 * The downside to this interpretation (there always is one) is that
236 * this might be somewhat surprising behavior to apps if they expect
237 * the implicit array behavior described above.
239 vtn_assert(deref_chain
->length
>= 1);
240 nir_ssa_def
*offset_index
=
241 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
244 block_index
= vtn_resource_reindex(b
, block_index
, offset_index
);
249 if (base
->mode
== vtn_variable_mode_workgroup
) {
250 /* SLM doesn't need nor have a block index */
251 vtn_assert(!block_index
);
253 /* We need the variable for the base offset */
254 vtn_assert(base
->var
);
256 /* We need ptr_type for size and alignment */
257 vtn_assert(base
->ptr_type
);
259 /* Assign location on first use so that we don't end up bloating SLM
260 * address space for variables which are never statically used.
262 if (base
->var
->shared_location
< 0) {
263 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
264 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
265 base
->ptr_type
->align
);
266 base
->var
->shared_location
= b
->shader
->num_shared
;
267 b
->shader
->num_shared
+= base
->ptr_type
->length
;
270 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
272 /* The code above should have ensured a block_index when needed. */
273 vtn_assert(block_index
);
275 /* Start off with at the start of the buffer. */
276 offset
= nir_imm_int(&b
->nb
, 0);
280 if (deref_chain
->ptr_as_array
&& idx
== 0) {
281 /* We need ptr_type for the stride */
282 vtn_assert(base
->ptr_type
);
284 /* We need at least one element in the chain */
285 vtn_assert(deref_chain
->length
>= 1);
287 nir_ssa_def
*elem_offset
=
288 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
289 base
->ptr_type
->stride
);
290 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
294 for (; idx
< deref_chain
->length
; idx
++) {
295 switch (glsl_get_base_type(type
->type
)) {
298 case GLSL_TYPE_UINT16
:
299 case GLSL_TYPE_INT16
:
300 case GLSL_TYPE_UINT64
:
301 case GLSL_TYPE_INT64
:
302 case GLSL_TYPE_FLOAT
:
303 case GLSL_TYPE_FLOAT16
:
304 case GLSL_TYPE_DOUBLE
:
306 case GLSL_TYPE_ARRAY
: {
307 nir_ssa_def
*elem_offset
=
308 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], type
->stride
);
309 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
310 type
= type
->array_element
;
314 case GLSL_TYPE_STRUCT
: {
315 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
316 unsigned member
= deref_chain
->link
[idx
].id
;
317 nir_ssa_def
*mem_offset
= nir_imm_int(&b
->nb
, type
->offsets
[member
]);
318 offset
= nir_iadd(&b
->nb
, offset
, mem_offset
);
319 type
= type
->members
[member
];
324 vtn_fail("Invalid type for deref");
328 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
329 ptr
->mode
= base
->mode
;
331 ptr
->block_index
= block_index
;
332 ptr
->offset
= offset
;
337 /* Dereference the given base pointer by the access chain */
338 static struct vtn_pointer
*
339 vtn_pointer_dereference(struct vtn_builder
*b
,
340 struct vtn_pointer
*base
,
341 struct vtn_access_chain
*deref_chain
)
343 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
344 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
346 return vtn_access_chain_pointer_dereference(b
, base
, deref_chain
);
350 /* Crawls a chain of array derefs and rewrites the types so that the
351 * lengths stay the same but the terminal type is the one given by
352 * tail_type. This is useful for split structures.
355 rewrite_deref_types(struct vtn_builder
*b
, nir_deref
*deref
,
356 const struct glsl_type
*type
)
360 vtn_assert(deref
->child
->deref_type
== nir_deref_type_array
);
361 vtn_assert(glsl_type_is_array(deref
->type
));
362 rewrite_deref_types(b
, deref
->child
, glsl_get_array_element(type
));
367 vtn_pointer_for_variable(struct vtn_builder
*b
,
368 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
370 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
372 pointer
->mode
= var
->mode
;
373 pointer
->type
= var
->type
;
374 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
375 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
376 pointer
->ptr_type
= ptr_type
;
383 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
385 /* Do on-the-fly copy propagation for samplers. */
386 if (ptr
->var
->copy_prop_sampler
)
387 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
389 nir_deref_var
*deref_var
;
391 deref_var
= nir_deref_var_create(b
, ptr
->var
->var
);
392 /* Raw variable access */
396 vtn_assert(ptr
->var
->members
);
397 /* Create the deref_var manually. It will get filled out later. */
398 deref_var
= rzalloc(b
, nir_deref_var
);
399 deref_var
->deref
.deref_type
= nir_deref_type_var
;
402 struct vtn_access_chain
*chain
= ptr
->chain
;
405 struct vtn_type
*deref_type
= ptr
->var
->type
;
406 nir_deref
*tail
= &deref_var
->deref
;
407 nir_variable
**members
= ptr
->var
->members
;
409 for (unsigned i
= 0; i
< chain
->length
; i
++) {
410 enum glsl_base_type base_type
= glsl_get_base_type(deref_type
->type
);
414 case GLSL_TYPE_UINT16
:
415 case GLSL_TYPE_INT16
:
416 case GLSL_TYPE_UINT64
:
417 case GLSL_TYPE_INT64
:
418 case GLSL_TYPE_FLOAT
:
419 case GLSL_TYPE_FLOAT16
:
420 case GLSL_TYPE_DOUBLE
:
422 case GLSL_TYPE_ARRAY
: {
423 deref_type
= deref_type
->array_element
;
425 nir_deref_array
*deref_arr
= nir_deref_array_create(b
);
426 deref_arr
->deref
.type
= deref_type
->type
;
428 if (chain
->link
[i
].mode
== vtn_access_mode_literal
) {
429 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
430 deref_arr
->base_offset
= chain
->link
[i
].id
;
432 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_id
);
433 deref_arr
->deref_array_type
= nir_deref_array_type_indirect
;
434 deref_arr
->base_offset
= 0;
435 deref_arr
->indirect
=
436 nir_src_for_ssa(vtn_ssa_value(b
, chain
->link
[i
].id
)->def
);
438 tail
->child
= &deref_arr
->deref
;
443 case GLSL_TYPE_STRUCT
: {
444 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_literal
);
445 unsigned idx
= chain
->link
[i
].id
;
446 deref_type
= deref_type
->members
[idx
];
448 /* This is a pre-split structure. */
449 deref_var
->var
= members
[idx
];
450 rewrite_deref_types(b
, &deref_var
->deref
, members
[idx
]->type
);
451 vtn_assert(tail
->type
== deref_type
->type
);
454 nir_deref_struct
*deref_struct
= nir_deref_struct_create(b
, idx
);
455 deref_struct
->deref
.type
= deref_type
->type
;
456 tail
->child
= &deref_struct
->deref
;
462 vtn_fail("Invalid type for deref");
466 vtn_assert(members
== NULL
);
471 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_var
*deref
,
472 nir_deref
*tail
, struct vtn_ssa_value
*inout
)
474 /* The deref tail may contain a deref to select a component of a vector (in
475 * other words, it might not be an actual tail) so we have to save it away
476 * here since we overwrite it later.
478 nir_deref
*old_child
= tail
->child
;
480 if (glsl_type_is_vector_or_scalar(tail
->type
)) {
481 /* Terminate the deref chain in case there is one more link to pick
482 * off a component of the vector.
486 nir_intrinsic_op op
= load
? nir_intrinsic_load_var
:
487 nir_intrinsic_store_var
;
489 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
490 intrin
->variables
[0] = nir_deref_var_clone(deref
, intrin
);
491 intrin
->num_components
= glsl_get_vector_elements(tail
->type
);
494 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
495 intrin
->num_components
,
496 glsl_get_bit_size(tail
->type
),
498 inout
->def
= &intrin
->dest
.ssa
;
500 nir_intrinsic_set_write_mask(intrin
, (1 << intrin
->num_components
) - 1);
501 intrin
->src
[0] = nir_src_for_ssa(inout
->def
);
504 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
505 } else if (glsl_get_base_type(tail
->type
) == GLSL_TYPE_ARRAY
||
506 glsl_type_is_matrix(tail
->type
)) {
507 unsigned elems
= glsl_get_length(tail
->type
);
508 nir_deref_array
*deref_arr
= nir_deref_array_create(b
);
509 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
510 deref_arr
->deref
.type
= glsl_get_array_element(tail
->type
);
511 tail
->child
= &deref_arr
->deref
;
512 for (unsigned i
= 0; i
< elems
; i
++) {
513 deref_arr
->base_offset
= i
;
514 _vtn_local_load_store(b
, load
, deref
, tail
->child
, inout
->elems
[i
]);
517 vtn_assert(glsl_get_base_type(tail
->type
) == GLSL_TYPE_STRUCT
);
518 unsigned elems
= glsl_get_length(tail
->type
);
519 nir_deref_struct
*deref_struct
= nir_deref_struct_create(b
, 0);
520 tail
->child
= &deref_struct
->deref
;
521 for (unsigned i
= 0; i
< elems
; i
++) {
522 deref_struct
->index
= i
;
523 deref_struct
->deref
.type
= glsl_get_struct_field(tail
->type
, i
);
524 _vtn_local_load_store(b
, load
, deref
, tail
->child
, inout
->elems
[i
]);
528 tail
->child
= old_child
;
532 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
534 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
535 return vtn_pointer_to_deref(b
, ptr
);
539 * Gets the NIR-level deref tail, which may have as a child an array deref
540 * selecting which component due to OpAccessChain supporting per-component
541 * indexing in SPIR-V.
544 get_deref_tail(nir_deref_var
*deref
)
546 nir_deref
*cur
= &deref
->deref
;
547 while (!glsl_type_is_vector_or_scalar(cur
->type
) && cur
->child
)
553 struct vtn_ssa_value
*
554 vtn_local_load(struct vtn_builder
*b
, nir_deref_var
*src
)
556 nir_deref
*src_tail
= get_deref_tail(src
);
557 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
558 _vtn_local_load_store(b
, true, src
, src_tail
, val
);
560 if (src_tail
->child
) {
561 nir_deref_array
*vec_deref
= nir_deref_as_array(src_tail
->child
);
562 vtn_assert(vec_deref
->deref
.child
== NULL
);
563 val
->type
= vec_deref
->deref
.type
;
564 if (vec_deref
->deref_array_type
== nir_deref_array_type_direct
)
565 val
->def
= vtn_vector_extract(b
, val
->def
, vec_deref
->base_offset
);
567 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
,
568 vec_deref
->indirect
.ssa
);
575 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
578 nir_deref
*dest_tail
= get_deref_tail(dest
);
580 if (dest_tail
->child
) {
581 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
582 _vtn_local_load_store(b
, true, dest
, dest_tail
, val
);
583 nir_deref_array
*deref
= nir_deref_as_array(dest_tail
->child
);
584 vtn_assert(deref
->deref
.child
== NULL
);
585 if (deref
->deref_array_type
== nir_deref_array_type_direct
)
586 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
589 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
590 deref
->indirect
.ssa
);
591 _vtn_local_load_store(b
, false, dest
, dest_tail
, val
);
593 _vtn_local_load_store(b
, false, dest
, dest_tail
, src
);
598 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
599 nir_ssa_def
**index_out
, unsigned *end_idx_out
)
601 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
603 struct vtn_access_chain chain
= {
606 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
608 *index_out
= ptr
->block_index
;
612 vtn_assert(ptr
->mode
== vtn_variable_mode_push_constant
);
616 struct vtn_type
*type
= ptr
->var
->type
;
617 nir_ssa_def
*offset
= nir_imm_int(&b
->nb
, 0);
620 for (; idx
< ptr
->chain
->length
; idx
++) {
621 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
625 case GLSL_TYPE_UINT16
:
626 case GLSL_TYPE_INT16
:
627 case GLSL_TYPE_UINT64
:
628 case GLSL_TYPE_INT64
:
629 case GLSL_TYPE_FLOAT
:
630 case GLSL_TYPE_FLOAT16
:
631 case GLSL_TYPE_DOUBLE
:
633 case GLSL_TYPE_ARRAY
:
634 offset
= nir_iadd(&b
->nb
, offset
,
635 vtn_access_link_as_ssa(b
, ptr
->chain
->link
[idx
],
638 type
= type
->array_element
;
641 case GLSL_TYPE_STRUCT
: {
642 vtn_assert(ptr
->chain
->link
[idx
].mode
== vtn_access_mode_literal
);
643 unsigned member
= ptr
->chain
->link
[idx
].id
;
644 offset
= nir_iadd(&b
->nb
, offset
,
645 nir_imm_int(&b
->nb
, type
->offsets
[member
]));
646 type
= type
->members
[member
];
651 vtn_fail("Invalid type for deref");
656 vtn_assert(type
== ptr
->type
);
663 /* Tries to compute the size of an interface block based on the strides and
664 * offsets that are provided to us in the SPIR-V source.
667 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
669 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
673 case GLSL_TYPE_UINT16
:
674 case GLSL_TYPE_INT16
:
675 case GLSL_TYPE_UINT64
:
676 case GLSL_TYPE_INT64
:
677 case GLSL_TYPE_FLOAT
:
678 case GLSL_TYPE_FLOAT16
:
680 case GLSL_TYPE_DOUBLE
: {
681 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
682 glsl_get_matrix_columns(type
->type
);
684 vtn_assert(type
->stride
> 0);
685 return type
->stride
* cols
;
687 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
688 return glsl_get_vector_elements(type
->type
) * type_size
;
692 case GLSL_TYPE_STRUCT
:
693 case GLSL_TYPE_INTERFACE
: {
695 unsigned num_fields
= glsl_get_length(type
->type
);
696 for (unsigned f
= 0; f
< num_fields
; f
++) {
697 unsigned field_end
= type
->offsets
[f
] +
698 vtn_type_block_size(b
, type
->members
[f
]);
699 size
= MAX2(size
, field_end
);
704 case GLSL_TYPE_ARRAY
:
705 vtn_assert(type
->stride
> 0);
706 vtn_assert(glsl_get_length(type
->type
) > 0);
707 return type
->stride
* glsl_get_length(type
->type
);
710 vtn_fail("Invalid block type");
716 vtn_access_chain_get_offset_size(struct vtn_builder
*b
,
717 struct vtn_access_chain
*chain
,
718 struct vtn_type
*type
,
719 unsigned *access_offset
,
720 unsigned *access_size
)
724 for (unsigned i
= 0; i
< chain
->length
; i
++) {
725 if (chain
->link
[i
].mode
!= vtn_access_mode_literal
)
728 if (glsl_type_is_struct(type
->type
)) {
729 *access_offset
+= type
->offsets
[chain
->link
[i
].id
];
730 type
= type
->members
[chain
->link
[i
].id
];
732 *access_offset
+= type
->stride
* chain
->link
[i
].id
;
733 type
= type
->array_element
;
737 *access_size
= vtn_type_block_size(b
, type
);
741 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
742 nir_ssa_def
*index
, nir_ssa_def
*offset
,
743 unsigned access_offset
, unsigned access_size
,
744 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
)
746 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
747 instr
->num_components
= glsl_get_vector_elements(type
);
751 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
752 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
755 if (op
== nir_intrinsic_load_push_constant
) {
756 nir_intrinsic_set_base(instr
, access_offset
);
757 nir_intrinsic_set_range(instr
, access_size
);
761 instr
->src
[src
++] = nir_src_for_ssa(index
);
763 if (op
== nir_intrinsic_load_push_constant
) {
764 /* We need to subtract the offset from where the intrinsic will load the
767 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
768 nir_imm_int(&b
->nb
, access_offset
)));
770 instr
->src
[src
++] = nir_src_for_ssa(offset
);
774 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
775 instr
->num_components
,
776 glsl_get_bit_size(type
), NULL
);
777 (*inout
)->def
= &instr
->dest
.ssa
;
780 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
782 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
783 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
787 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
788 nir_ssa_def
*index
, nir_ssa_def
*offset
,
789 unsigned access_offset
, unsigned access_size
,
790 struct vtn_access_chain
*chain
, unsigned chain_idx
,
791 struct vtn_type
*type
, struct vtn_ssa_value
**inout
)
793 if (chain
&& chain_idx
>= chain
->length
)
796 if (load
&& chain
== NULL
&& *inout
== NULL
)
797 *inout
= vtn_create_ssa_value(b
, type
->type
);
799 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
803 case GLSL_TYPE_UINT16
:
804 case GLSL_TYPE_INT16
:
805 case GLSL_TYPE_UINT64
:
806 case GLSL_TYPE_INT64
:
807 case GLSL_TYPE_FLOAT
:
808 case GLSL_TYPE_FLOAT16
:
809 case GLSL_TYPE_DOUBLE
:
811 /* This is where things get interesting. At this point, we've hit
812 * a vector, a scalar, or a matrix.
814 if (glsl_type_is_matrix(type
->type
)) {
815 /* Loading the whole matrix */
816 struct vtn_ssa_value
*transpose
;
817 unsigned num_ops
, vec_width
, col_stride
;
818 if (type
->row_major
) {
819 num_ops
= glsl_get_vector_elements(type
->type
);
820 vec_width
= glsl_get_matrix_columns(type
->type
);
821 col_stride
= type
->array_element
->stride
;
823 const struct glsl_type
*transpose_type
=
824 glsl_matrix_type(base_type
, vec_width
, num_ops
);
825 *inout
= vtn_create_ssa_value(b
, transpose_type
);
827 transpose
= vtn_ssa_transpose(b
, *inout
);
831 num_ops
= glsl_get_matrix_columns(type
->type
);
832 vec_width
= glsl_get_vector_elements(type
->type
);
833 col_stride
= type
->stride
;
836 for (unsigned i
= 0; i
< num_ops
; i
++) {
837 nir_ssa_def
*elem_offset
=
838 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* col_stride
));
839 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
840 access_offset
, access_size
,
842 glsl_vector_type(base_type
, vec_width
));
845 if (load
&& type
->row_major
)
846 *inout
= vtn_ssa_transpose(b
, *inout
);
848 unsigned elems
= glsl_get_vector_elements(type
->type
);
849 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
850 if (elems
== 1 || type
->stride
== type_size
) {
851 /* This is a tightly-packed normal scalar or vector load */
852 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
853 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
854 access_offset
, access_size
,
857 /* This is a strided load. We have to load N things separately.
858 * This is the single column of a row-major matrix case.
860 vtn_assert(type
->stride
> type_size
);
861 vtn_assert(type
->stride
% type_size
== 0);
863 nir_ssa_def
*per_comp
[4];
864 for (unsigned i
= 0; i
< elems
; i
++) {
865 nir_ssa_def
*elem_offset
=
866 nir_iadd(&b
->nb
, offset
,
867 nir_imm_int(&b
->nb
, i
* type
->stride
));
868 struct vtn_ssa_value
*comp
, temp_val
;
870 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
871 temp_val
.type
= glsl_scalar_type(base_type
);
874 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
875 access_offset
, access_size
,
876 &comp
, glsl_scalar_type(base_type
));
877 per_comp
[i
] = comp
->def
;
882 *inout
= vtn_create_ssa_value(b
, type
->type
);
883 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
889 case GLSL_TYPE_ARRAY
: {
890 unsigned elems
= glsl_get_length(type
->type
);
891 for (unsigned i
= 0; i
< elems
; i
++) {
892 nir_ssa_def
*elem_off
=
893 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* type
->stride
));
894 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
895 access_offset
, access_size
,
897 type
->array_element
, &(*inout
)->elems
[i
]);
902 case GLSL_TYPE_STRUCT
: {
903 unsigned elems
= glsl_get_length(type
->type
);
904 for (unsigned i
= 0; i
< elems
; i
++) {
905 nir_ssa_def
*elem_off
=
906 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, type
->offsets
[i
]));
907 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
908 access_offset
, access_size
,
910 type
->members
[i
], &(*inout
)->elems
[i
]);
916 vtn_fail("Invalid block member type");
920 static struct vtn_ssa_value
*
921 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
924 unsigned access_offset
= 0, access_size
= 0;
926 case vtn_variable_mode_ubo
:
927 op
= nir_intrinsic_load_ubo
;
929 case vtn_variable_mode_ssbo
:
930 op
= nir_intrinsic_load_ssbo
;
932 case vtn_variable_mode_push_constant
:
933 op
= nir_intrinsic_load_push_constant
;
934 vtn_access_chain_get_offset_size(b
, src
->chain
, src
->var
->type
,
935 &access_offset
, &access_size
);
937 case vtn_variable_mode_workgroup
:
938 op
= nir_intrinsic_load_shared
;
941 vtn_fail("Invalid block variable mode");
944 nir_ssa_def
*offset
, *index
= NULL
;
946 offset
= vtn_pointer_to_offset(b
, src
, &index
, &chain_idx
);
948 struct vtn_ssa_value
*value
= NULL
;
949 _vtn_block_load_store(b
, op
, true, index
, offset
,
950 access_offset
, access_size
,
951 src
->chain
, chain_idx
, src
->type
, &value
);
956 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
957 struct vtn_pointer
*dst
)
961 case vtn_variable_mode_ssbo
:
962 op
= nir_intrinsic_store_ssbo
;
964 case vtn_variable_mode_workgroup
:
965 op
= nir_intrinsic_store_shared
;
968 vtn_fail("Invalid block variable mode");
971 nir_ssa_def
*offset
, *index
= NULL
;
973 offset
= vtn_pointer_to_offset(b
, dst
, &index
, &chain_idx
);
975 _vtn_block_load_store(b
, op
, false, index
, offset
,
976 0, 0, dst
->chain
, chain_idx
, dst
->type
, &src
);
980 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
981 struct vtn_pointer
*ptr
,
982 struct vtn_ssa_value
**inout
)
984 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
988 case GLSL_TYPE_UINT16
:
989 case GLSL_TYPE_INT16
:
990 case GLSL_TYPE_UINT64
:
991 case GLSL_TYPE_INT64
:
992 case GLSL_TYPE_FLOAT
:
993 case GLSL_TYPE_FLOAT16
:
995 case GLSL_TYPE_DOUBLE
:
996 /* At this point, we have a scalar, vector, or matrix so we know that
997 * there cannot be any structure splitting still in the way. By
998 * stopping at the matrix level rather than the vector level, we
999 * ensure that matrices get loaded in the optimal way even if they
1000 * are storred row-major in a UBO.
1003 *inout
= vtn_local_load(b
, vtn_pointer_to_deref(b
, ptr
));
1005 vtn_local_store(b
, *inout
, vtn_pointer_to_deref(b
, ptr
));
1009 case GLSL_TYPE_ARRAY
:
1010 case GLSL_TYPE_STRUCT
: {
1011 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1013 vtn_assert(*inout
== NULL
);
1014 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1015 (*inout
)->type
= ptr
->type
->type
;
1016 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1019 struct vtn_access_chain chain
= {
1022 { .mode
= vtn_access_mode_literal
, },
1025 for (unsigned i
= 0; i
< elems
; i
++) {
1026 chain
.link
[0].id
= i
;
1027 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1028 _vtn_variable_load_store(b
, load
, elem
, &(*inout
)->elems
[i
]);
1034 vtn_fail("Invalid access chain type");
1038 struct vtn_ssa_value
*
1039 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1041 if (vtn_pointer_is_external_block(b
, src
)) {
1042 return vtn_block_load(b
, src
);
1044 struct vtn_ssa_value
*val
= NULL
;
1045 _vtn_variable_load_store(b
, true, src
, &val
);
1051 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1052 struct vtn_pointer
*dest
)
1054 if (vtn_pointer_is_external_block(b
, dest
)) {
1055 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1056 dest
->mode
== vtn_variable_mode_workgroup
);
1057 vtn_block_store(b
, src
, dest
);
1059 _vtn_variable_load_store(b
, false, dest
, &src
);
1064 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1065 struct vtn_pointer
*src
)
1067 vtn_assert(src
->type
->type
== dest
->type
->type
);
1068 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1069 switch (base_type
) {
1070 case GLSL_TYPE_UINT
:
1072 case GLSL_TYPE_UINT16
:
1073 case GLSL_TYPE_INT16
:
1074 case GLSL_TYPE_UINT64
:
1075 case GLSL_TYPE_INT64
:
1076 case GLSL_TYPE_FLOAT
:
1077 case GLSL_TYPE_FLOAT16
:
1078 case GLSL_TYPE_DOUBLE
:
1079 case GLSL_TYPE_BOOL
:
1080 /* At this point, we have a scalar, vector, or matrix so we know that
1081 * there cannot be any structure splitting still in the way. By
1082 * stopping at the matrix level rather than the vector level, we
1083 * ensure that matrices get loaded in the optimal way even if they
1084 * are storred row-major in a UBO.
1086 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1089 case GLSL_TYPE_ARRAY
:
1090 case GLSL_TYPE_STRUCT
: {
1091 struct vtn_access_chain chain
= {
1094 { .mode
= vtn_access_mode_literal
, },
1097 unsigned elems
= glsl_get_length(src
->type
->type
);
1098 for (unsigned i
= 0; i
< elems
; i
++) {
1099 chain
.link
[0].id
= i
;
1100 struct vtn_pointer
*src_elem
=
1101 vtn_pointer_dereference(b
, src
, &chain
);
1102 struct vtn_pointer
*dest_elem
=
1103 vtn_pointer_dereference(b
, dest
, &chain
);
1105 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1111 vtn_fail("Invalid access chain type");
1116 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1117 struct vtn_pointer
*src
)
1119 /* TODO: At some point, we should add a special-case for when we can
1120 * just emit a copy_var intrinsic.
1122 _vtn_variable_copy(b
, dest
, src
);
1126 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1128 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1129 *mode
= nir_var_system_value
;
1133 vtn_get_builtin_location(struct vtn_builder
*b
,
1134 SpvBuiltIn builtin
, int *location
,
1135 nir_variable_mode
*mode
)
1138 case SpvBuiltInPosition
:
1139 *location
= VARYING_SLOT_POS
;
1141 case SpvBuiltInPointSize
:
1142 *location
= VARYING_SLOT_PSIZ
;
1144 case SpvBuiltInClipDistance
:
1145 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1147 case SpvBuiltInCullDistance
:
1148 *location
= VARYING_SLOT_CULL_DIST0
;
1150 case SpvBuiltInVertexIndex
:
1151 *location
= SYSTEM_VALUE_VERTEX_ID
;
1152 set_mode_system_value(b
, mode
);
1154 case SpvBuiltInVertexId
:
1155 /* Vulkan defines VertexID to be zero-based and reserves the new
1156 * builtin keyword VertexIndex to indicate the non-zero-based value.
1158 *location
= SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1159 set_mode_system_value(b
, mode
);
1161 case SpvBuiltInInstanceIndex
:
1162 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1163 set_mode_system_value(b
, mode
);
1165 case SpvBuiltInInstanceId
:
1166 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1167 set_mode_system_value(b
, mode
);
1169 case SpvBuiltInPrimitiveId
:
1170 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1171 vtn_assert(*mode
== nir_var_shader_in
);
1172 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1173 } else if (*mode
== nir_var_shader_out
) {
1174 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1176 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1177 set_mode_system_value(b
, mode
);
1180 case SpvBuiltInInvocationId
:
1181 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1182 set_mode_system_value(b
, mode
);
1184 case SpvBuiltInLayer
:
1185 *location
= VARYING_SLOT_LAYER
;
1186 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1187 *mode
= nir_var_shader_in
;
1188 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1189 *mode
= nir_var_shader_out
;
1190 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1191 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1192 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1193 *mode
= nir_var_shader_out
;
1195 vtn_fail("invalid stage for SpvBuiltInLayer");
1197 case SpvBuiltInViewportIndex
:
1198 *location
= VARYING_SLOT_VIEWPORT
;
1199 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1200 *mode
= nir_var_shader_out
;
1201 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1202 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1203 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1204 *mode
= nir_var_shader_out
;
1205 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1206 *mode
= nir_var_shader_in
;
1208 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1210 case SpvBuiltInTessLevelOuter
:
1211 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1213 case SpvBuiltInTessLevelInner
:
1214 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1216 case SpvBuiltInTessCoord
:
1217 *location
= SYSTEM_VALUE_TESS_COORD
;
1218 set_mode_system_value(b
, mode
);
1220 case SpvBuiltInPatchVertices
:
1221 *location
= SYSTEM_VALUE_VERTICES_IN
;
1222 set_mode_system_value(b
, mode
);
1224 case SpvBuiltInFragCoord
:
1225 *location
= VARYING_SLOT_POS
;
1226 vtn_assert(*mode
== nir_var_shader_in
);
1228 case SpvBuiltInPointCoord
:
1229 *location
= VARYING_SLOT_PNTC
;
1230 vtn_assert(*mode
== nir_var_shader_in
);
1232 case SpvBuiltInFrontFacing
:
1233 *location
= SYSTEM_VALUE_FRONT_FACE
;
1234 set_mode_system_value(b
, mode
);
1236 case SpvBuiltInSampleId
:
1237 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1238 set_mode_system_value(b
, mode
);
1240 case SpvBuiltInSamplePosition
:
1241 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1242 set_mode_system_value(b
, mode
);
1244 case SpvBuiltInSampleMask
:
1245 if (*mode
== nir_var_shader_out
) {
1246 *location
= FRAG_RESULT_SAMPLE_MASK
;
1248 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1249 set_mode_system_value(b
, mode
);
1252 case SpvBuiltInFragDepth
:
1253 *location
= FRAG_RESULT_DEPTH
;
1254 vtn_assert(*mode
== nir_var_shader_out
);
1256 case SpvBuiltInHelperInvocation
:
1257 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1258 set_mode_system_value(b
, mode
);
1260 case SpvBuiltInNumWorkgroups
:
1261 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1262 set_mode_system_value(b
, mode
);
1264 case SpvBuiltInWorkgroupSize
:
1265 /* This should already be handled */
1266 vtn_fail("unsupported builtin");
1268 case SpvBuiltInWorkgroupId
:
1269 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1270 set_mode_system_value(b
, mode
);
1272 case SpvBuiltInLocalInvocationId
:
1273 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1274 set_mode_system_value(b
, mode
);
1276 case SpvBuiltInLocalInvocationIndex
:
1277 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1278 set_mode_system_value(b
, mode
);
1280 case SpvBuiltInGlobalInvocationId
:
1281 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1282 set_mode_system_value(b
, mode
);
1284 case SpvBuiltInBaseVertex
:
1285 *location
= SYSTEM_VALUE_BASE_VERTEX
;
1286 set_mode_system_value(b
, mode
);
1288 case SpvBuiltInBaseInstance
:
1289 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1290 set_mode_system_value(b
, mode
);
1292 case SpvBuiltInDrawIndex
:
1293 *location
= SYSTEM_VALUE_DRAW_ID
;
1294 set_mode_system_value(b
, mode
);
1296 case SpvBuiltInSubgroupSize
:
1297 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1298 set_mode_system_value(b
, mode
);
1300 case SpvBuiltInSubgroupId
:
1301 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1302 set_mode_system_value(b
, mode
);
1304 case SpvBuiltInSubgroupLocalInvocationId
:
1305 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1306 set_mode_system_value(b
, mode
);
1308 case SpvBuiltInNumSubgroups
:
1309 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1310 set_mode_system_value(b
, mode
);
1312 case SpvBuiltInDeviceIndex
:
1313 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1314 set_mode_system_value(b
, mode
);
1316 case SpvBuiltInViewIndex
:
1317 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1318 set_mode_system_value(b
, mode
);
1321 vtn_fail("unsupported builtin");
1326 apply_var_decoration(struct vtn_builder
*b
, nir_variable
*nir_var
,
1327 const struct vtn_decoration
*dec
)
1329 switch (dec
->decoration
) {
1330 case SpvDecorationRelaxedPrecision
:
1331 break; /* FIXME: Do nothing with this for now. */
1332 case SpvDecorationNoPerspective
:
1333 nir_var
->data
.interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1335 case SpvDecorationFlat
:
1336 nir_var
->data
.interpolation
= INTERP_MODE_FLAT
;
1338 case SpvDecorationCentroid
:
1339 nir_var
->data
.centroid
= true;
1341 case SpvDecorationSample
:
1342 nir_var
->data
.sample
= true;
1344 case SpvDecorationInvariant
:
1345 nir_var
->data
.invariant
= true;
1347 case SpvDecorationConstant
:
1348 vtn_assert(nir_var
->constant_initializer
!= NULL
);
1349 nir_var
->data
.read_only
= true;
1351 case SpvDecorationNonReadable
:
1352 nir_var
->data
.image
.write_only
= true;
1354 case SpvDecorationNonWritable
:
1355 nir_var
->data
.read_only
= true;
1356 nir_var
->data
.image
.read_only
= true;
1358 case SpvDecorationRestrict
:
1359 nir_var
->data
.image
.restrict_flag
= true;
1361 case SpvDecorationVolatile
:
1362 nir_var
->data
.image
._volatile
= true;
1364 case SpvDecorationCoherent
:
1365 nir_var
->data
.image
.coherent
= true;
1367 case SpvDecorationComponent
:
1368 nir_var
->data
.location_frac
= dec
->literals
[0];
1370 case SpvDecorationIndex
:
1371 nir_var
->data
.index
= dec
->literals
[0];
1373 case SpvDecorationBuiltIn
: {
1374 SpvBuiltIn builtin
= dec
->literals
[0];
1376 if (builtin
== SpvBuiltInWorkgroupSize
) {
1377 /* This shouldn't be a builtin. It's actually a constant. */
1378 nir_var
->data
.mode
= nir_var_global
;
1379 nir_var
->data
.read_only
= true;
1381 nir_constant
*c
= rzalloc(nir_var
, nir_constant
);
1382 c
->values
[0].u32
[0] = b
->shader
->info
.cs
.local_size
[0];
1383 c
->values
[0].u32
[1] = b
->shader
->info
.cs
.local_size
[1];
1384 c
->values
[0].u32
[2] = b
->shader
->info
.cs
.local_size
[2];
1385 nir_var
->constant_initializer
= c
;
1389 nir_variable_mode mode
= nir_var
->data
.mode
;
1390 vtn_get_builtin_location(b
, builtin
, &nir_var
->data
.location
, &mode
);
1391 nir_var
->data
.mode
= mode
;
1394 case SpvBuiltInTessLevelOuter
:
1395 case SpvBuiltInTessLevelInner
:
1396 nir_var
->data
.compact
= true;
1398 case SpvBuiltInSamplePosition
:
1399 nir_var
->data
.origin_upper_left
= b
->origin_upper_left
;
1401 case SpvBuiltInFragCoord
:
1402 nir_var
->data
.pixel_center_integer
= b
->pixel_center_integer
;
1409 case SpvDecorationSpecId
:
1410 case SpvDecorationRowMajor
:
1411 case SpvDecorationColMajor
:
1412 case SpvDecorationMatrixStride
:
1413 case SpvDecorationAliased
:
1414 case SpvDecorationUniform
:
1415 case SpvDecorationStream
:
1416 case SpvDecorationOffset
:
1417 case SpvDecorationLinkageAttributes
:
1418 break; /* Do nothing with these here */
1420 case SpvDecorationPatch
:
1421 nir_var
->data
.patch
= true;
1424 case SpvDecorationLocation
:
1425 vtn_fail("Handled above");
1427 case SpvDecorationBlock
:
1428 case SpvDecorationBufferBlock
:
1429 case SpvDecorationArrayStride
:
1430 case SpvDecorationGLSLShared
:
1431 case SpvDecorationGLSLPacked
:
1432 break; /* These can apply to a type but we don't care about them */
1434 case SpvDecorationBinding
:
1435 case SpvDecorationDescriptorSet
:
1436 case SpvDecorationNoContraction
:
1437 case SpvDecorationInputAttachmentIndex
:
1438 vtn_warn("Decoration not allowed for variable or structure member: %s",
1439 spirv_decoration_to_string(dec
->decoration
));
1442 case SpvDecorationXfbBuffer
:
1443 case SpvDecorationXfbStride
:
1444 vtn_warn("Vulkan does not have transform feedback: %s",
1445 spirv_decoration_to_string(dec
->decoration
));
1448 case SpvDecorationCPacked
:
1449 case SpvDecorationSaturatedConversion
:
1450 case SpvDecorationFuncParamAttr
:
1451 case SpvDecorationFPRoundingMode
:
1452 case SpvDecorationFPFastMathMode
:
1453 case SpvDecorationAlignment
:
1454 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1455 spirv_decoration_to_string(dec
->decoration
));
1459 vtn_fail("Unhandled decoration");
1464 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1465 const struct vtn_decoration
*dec
, void *out_is_patch
)
1467 if (dec
->decoration
== SpvDecorationPatch
) {
1468 *((bool *) out_is_patch
) = true;
1473 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1474 const struct vtn_decoration
*dec
, void *void_var
)
1476 struct vtn_variable
*vtn_var
= void_var
;
1478 /* Handle decorations that apply to a vtn_variable as a whole */
1479 switch (dec
->decoration
) {
1480 case SpvDecorationBinding
:
1481 vtn_var
->binding
= dec
->literals
[0];
1483 case SpvDecorationDescriptorSet
:
1484 vtn_var
->descriptor_set
= dec
->literals
[0];
1486 case SpvDecorationInputAttachmentIndex
:
1487 vtn_var
->input_attachment_index
= dec
->literals
[0];
1489 case SpvDecorationPatch
:
1490 vtn_var
->patch
= true;
1496 if (val
->value_type
== vtn_value_type_pointer
) {
1497 assert(val
->pointer
->var
== void_var
);
1498 assert(val
->pointer
->chain
== NULL
);
1499 assert(member
== -1);
1501 assert(val
->value_type
== vtn_value_type_type
);
1504 /* Location is odd. If applied to a split structure, we have to walk the
1505 * whole thing and accumulate the location. It's easier to handle as a
1508 if (dec
->decoration
== SpvDecorationLocation
) {
1509 unsigned location
= dec
->literals
[0];
1510 bool is_vertex_input
;
1511 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1512 vtn_var
->mode
== vtn_variable_mode_output
) {
1513 is_vertex_input
= false;
1514 location
+= FRAG_RESULT_DATA0
;
1515 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1516 vtn_var
->mode
== vtn_variable_mode_input
) {
1517 is_vertex_input
= true;
1518 location
+= VERT_ATTRIB_GENERIC0
;
1519 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1520 vtn_var
->mode
== vtn_variable_mode_output
) {
1521 is_vertex_input
= false;
1522 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1524 vtn_warn("Location must be on input or output variable");
1529 /* This handles the member and lone variable cases */
1530 vtn_var
->var
->data
.location
= location
;
1532 /* This handles the structure member case */
1533 assert(vtn_var
->members
);
1535 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1536 for (unsigned i
= 0; i
< length
; i
++) {
1537 vtn_var
->members
[i
]->data
.location
= location
;
1539 glsl_count_attribute_slots(vtn_var
->members
[i
]->interface_type
,
1546 assert(member
== -1);
1547 apply_var_decoration(b
, vtn_var
->var
, dec
);
1548 } else if (vtn_var
->members
) {
1550 /* Member decorations must come from a type */
1551 assert(val
->value_type
== vtn_value_type_type
);
1552 apply_var_decoration(b
, vtn_var
->members
[member
], dec
);
1555 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1556 for (unsigned i
= 0; i
< length
; i
++)
1557 apply_var_decoration(b
, vtn_var
->members
[i
], dec
);
1560 /* A few variables, those with external storage, have no actual
1561 * nir_variables associated with them. Fortunately, all decorations
1562 * we care about for those variables are on the type only.
1564 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1565 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1566 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1567 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1568 b
->options
->lower_workgroup_access_to_offsets
));
1573 static enum vtn_variable_mode
1574 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1575 SpvStorageClass
class,
1576 struct vtn_type
*interface_type
,
1577 nir_variable_mode
*nir_mode_out
)
1579 enum vtn_variable_mode mode
;
1580 nir_variable_mode nir_mode
;
1582 case SpvStorageClassUniform
:
1583 if (interface_type
->block
) {
1584 mode
= vtn_variable_mode_ubo
;
1586 } else if (interface_type
->buffer_block
) {
1587 mode
= vtn_variable_mode_ssbo
;
1590 vtn_fail("Invalid uniform variable type");
1593 case SpvStorageClassStorageBuffer
:
1594 mode
= vtn_variable_mode_ssbo
;
1597 case SpvStorageClassUniformConstant
:
1598 if (glsl_type_is_image(interface_type
->type
)) {
1599 mode
= vtn_variable_mode_image
;
1600 nir_mode
= nir_var_uniform
;
1601 } else if (glsl_type_is_sampler(interface_type
->type
)) {
1602 mode
= vtn_variable_mode_sampler
;
1603 nir_mode
= nir_var_uniform
;
1605 vtn_fail("Invalid uniform constant variable type");
1608 case SpvStorageClassPushConstant
:
1609 mode
= vtn_variable_mode_push_constant
;
1610 nir_mode
= nir_var_uniform
;
1612 case SpvStorageClassInput
:
1613 mode
= vtn_variable_mode_input
;
1614 nir_mode
= nir_var_shader_in
;
1616 case SpvStorageClassOutput
:
1617 mode
= vtn_variable_mode_output
;
1618 nir_mode
= nir_var_shader_out
;
1620 case SpvStorageClassPrivate
:
1621 mode
= vtn_variable_mode_global
;
1622 nir_mode
= nir_var_global
;
1624 case SpvStorageClassFunction
:
1625 mode
= vtn_variable_mode_local
;
1626 nir_mode
= nir_var_local
;
1628 case SpvStorageClassWorkgroup
:
1629 mode
= vtn_variable_mode_workgroup
;
1630 nir_mode
= nir_var_shared
;
1632 case SpvStorageClassCrossWorkgroup
:
1633 case SpvStorageClassGeneric
:
1634 case SpvStorageClassAtomicCounter
:
1636 vtn_fail("Unhandled variable storage class");
1640 *nir_mode_out
= nir_mode
;
1646 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1648 /* This pointer needs to have a pointer type with actual storage */
1649 vtn_assert(ptr
->ptr_type
);
1650 vtn_assert(ptr
->ptr_type
->type
);
1653 /* If we don't have an offset then we must be a pointer to the variable
1656 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1658 struct vtn_access_chain chain
= {
1661 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1664 vtn_assert(ptr
->offset
);
1665 if (ptr
->block_index
) {
1666 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1667 ptr
->mode
== vtn_variable_mode_ssbo
);
1668 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1670 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1675 struct vtn_pointer
*
1676 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1677 struct vtn_type
*ptr_type
)
1679 vtn_assert(ssa
->num_components
<= 2 && ssa
->bit_size
== 32);
1680 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1681 vtn_assert(ptr_type
->deref
->base_type
!= vtn_base_type_pointer
);
1682 /* This pointer type needs to have actual storage */
1683 vtn_assert(ptr_type
->type
);
1685 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1686 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1688 ptr
->type
= ptr_type
->deref
;
1689 ptr
->ptr_type
= ptr_type
;
1691 if (ssa
->num_components
> 1) {
1692 vtn_assert(ssa
->num_components
== 2);
1693 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1694 ptr
->mode
== vtn_variable_mode_ssbo
);
1695 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1696 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1698 vtn_assert(ssa
->num_components
== 1);
1699 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1700 ptr
->block_index
= NULL
;
1708 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
1710 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
1713 if (var
->mode
== vtn_variable_mode_input
) {
1714 return stage
== MESA_SHADER_TESS_CTRL
||
1715 stage
== MESA_SHADER_TESS_EVAL
||
1716 stage
== MESA_SHADER_GEOMETRY
;
1719 if (var
->mode
== vtn_variable_mode_output
)
1720 return stage
== MESA_SHADER_TESS_CTRL
;
1726 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
1727 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
1728 nir_constant
*initializer
)
1730 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1731 struct vtn_type
*type
= ptr_type
->deref
;
1733 struct vtn_type
*without_array
= type
;
1734 while(glsl_type_is_array(without_array
->type
))
1735 without_array
= without_array
->array_element
;
1737 enum vtn_variable_mode mode
;
1738 nir_variable_mode nir_mode
;
1739 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
1742 case vtn_variable_mode_ubo
:
1743 b
->shader
->info
.num_ubos
++;
1745 case vtn_variable_mode_ssbo
:
1746 b
->shader
->info
.num_ssbos
++;
1748 case vtn_variable_mode_image
:
1749 b
->shader
->info
.num_images
++;
1751 case vtn_variable_mode_sampler
:
1752 b
->shader
->info
.num_textures
++;
1754 case vtn_variable_mode_push_constant
:
1755 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
1758 /* No tallying is needed */
1762 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
1766 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
1767 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
1769 switch (var
->mode
) {
1770 case vtn_variable_mode_local
:
1771 case vtn_variable_mode_global
:
1772 case vtn_variable_mode_image
:
1773 case vtn_variable_mode_sampler
:
1774 /* For these, we create the variable normally */
1775 var
->var
= rzalloc(b
->shader
, nir_variable
);
1776 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1777 var
->var
->type
= var
->type
->type
;
1778 var
->var
->data
.mode
= nir_mode
;
1780 switch (var
->mode
) {
1781 case vtn_variable_mode_image
:
1782 case vtn_variable_mode_sampler
:
1783 var
->var
->interface_type
= without_array
->type
;
1786 var
->var
->interface_type
= NULL
;
1791 case vtn_variable_mode_workgroup
:
1792 if (b
->options
->lower_workgroup_access_to_offsets
) {
1793 var
->shared_location
= -1;
1795 /* Create the variable normally */
1796 var
->var
= rzalloc(b
->shader
, nir_variable
);
1797 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1798 var
->var
->type
= var
->type
->type
;
1799 var
->var
->data
.mode
= nir_var_shared
;
1803 case vtn_variable_mode_input
:
1804 case vtn_variable_mode_output
: {
1805 /* In order to know whether or not we're a per-vertex inout, we need
1806 * the patch qualifier. This means walking the variable decorations
1807 * early before we actually create any variables. Not a big deal.
1809 * GLSLang really likes to place decorations in the most interior
1810 * thing it possibly can. In particular, if you have a struct, it
1811 * will place the patch decorations on the struct members. This
1812 * should be handled by the variable splitting below just fine.
1814 * If you have an array-of-struct, things get even more weird as it
1815 * will place the patch decorations on the struct even though it's
1816 * inside an array and some of the members being patch and others not
1817 * makes no sense whatsoever. Since the only sensible thing is for
1818 * it to be all or nothing, we'll call it patch if any of the members
1819 * are declared patch.
1822 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
1823 if (glsl_type_is_array(var
->type
->type
) &&
1824 glsl_type_is_struct(without_array
->type
)) {
1825 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
1826 vtn_value_type_type
),
1827 var_is_patch_cb
, &var
->patch
);
1830 /* For inputs and outputs, we immediately split structures. This
1831 * is for a couple of reasons. For one, builtins may all come in
1832 * a struct and we really want those split out into separate
1833 * variables. For another, interpolation qualifiers can be
1834 * applied to members of the top-level struct ane we need to be
1835 * able to preserve that information.
1838 int array_length
= -1;
1839 struct vtn_type
*interface_type
= var
->type
;
1840 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
1841 /* In Geometry shaders (and some tessellation), inputs come
1842 * in per-vertex arrays. However, some builtins come in
1843 * non-per-vertex, hence the need for the is_array check. In
1844 * any case, there are no non-builtin arrays allowed so this
1845 * check should be sufficient.
1847 interface_type
= var
->type
->array_element
;
1848 array_length
= glsl_get_length(var
->type
->type
);
1851 if (glsl_type_is_struct(interface_type
->type
)) {
1852 /* It's a struct. Split it. */
1853 unsigned num_members
= glsl_get_length(interface_type
->type
);
1854 var
->members
= ralloc_array(b
, nir_variable
*, num_members
);
1856 for (unsigned i
= 0; i
< num_members
; i
++) {
1857 const struct glsl_type
*mtype
= interface_type
->members
[i
]->type
;
1858 if (array_length
>= 0)
1859 mtype
= glsl_array_type(mtype
, array_length
);
1861 var
->members
[i
] = rzalloc(b
->shader
, nir_variable
);
1862 var
->members
[i
]->name
=
1863 ralloc_asprintf(var
->members
[i
], "%s.%d", val
->name
, i
);
1864 var
->members
[i
]->type
= mtype
;
1865 var
->members
[i
]->interface_type
=
1866 interface_type
->members
[i
]->type
;
1867 var
->members
[i
]->data
.mode
= nir_mode
;
1868 var
->members
[i
]->data
.patch
= var
->patch
;
1871 assert(i
< initializer
->num_elements
);
1872 var
->members
[i
]->constant_initializer
=
1873 nir_constant_clone(initializer
->elements
[i
], var
->members
[i
]);
1879 var
->var
= rzalloc(b
->shader
, nir_variable
);
1880 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1881 var
->var
->type
= var
->type
->type
;
1882 var
->var
->interface_type
= interface_type
->type
;
1883 var
->var
->data
.mode
= nir_mode
;
1884 var
->var
->data
.patch
= var
->patch
;
1887 /* For inputs and outputs, we need to grab locations and builtin
1888 * information from the interface type.
1890 vtn_foreach_decoration(b
, vtn_value(b
, interface_type
->id
,
1891 vtn_value_type_type
),
1892 var_decoration_cb
, var
);
1896 case vtn_variable_mode_param
:
1897 vtn_fail("Not created through OpVariable");
1899 case vtn_variable_mode_ubo
:
1900 case vtn_variable_mode_ssbo
:
1901 case vtn_variable_mode_push_constant
:
1902 /* These don't need actual variables. */
1907 var
->var
->constant_initializer
=
1908 nir_constant_clone(initializer
, var
->var
);
1911 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
1913 if (var
->mode
== vtn_variable_mode_image
||
1914 var
->mode
== vtn_variable_mode_sampler
) {
1915 /* XXX: We still need the binding information in the nir_variable
1916 * for these. We should fix that.
1918 var
->var
->data
.binding
= var
->binding
;
1919 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
1920 var
->var
->data
.index
= var
->input_attachment_index
;
1922 if (var
->mode
== vtn_variable_mode_image
)
1923 var
->var
->data
.image
.format
= without_array
->image_format
;
1926 if (var
->mode
== vtn_variable_mode_local
) {
1927 vtn_assert(var
->members
== NULL
&& var
->var
!= NULL
);
1928 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
1929 } else if (var
->var
) {
1930 nir_shader_add_variable(b
->shader
, var
->var
);
1931 } else if (var
->members
) {
1932 unsigned count
= glsl_get_length(without_array
->type
);
1933 for (unsigned i
= 0; i
< count
; i
++) {
1934 vtn_assert(var
->members
[i
]->data
.mode
!= nir_var_local
);
1935 nir_shader_add_variable(b
->shader
, var
->members
[i
]);
1938 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
1943 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
1944 struct vtn_type
*dst_type
,
1945 struct vtn_type
*src_type
)
1947 if (dst_type
->id
== src_type
->id
)
1950 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
1951 /* Early versions of GLSLang would re-emit types unnecessarily and you
1952 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
1953 * mismatched source and destination types.
1955 * https://github.com/KhronosGroup/glslang/issues/304
1956 * https://github.com/KhronosGroup/glslang/issues/307
1957 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
1958 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
1960 vtn_warn("Source and destination types of %s do not have the same "
1961 "ID (but are compatible): %u vs %u",
1962 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
1966 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
1967 spirv_op_to_string(opcode
),
1968 glsl_get_type_name(dst_type
->type
),
1969 glsl_get_type_name(src_type
->type
));
1973 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
1974 const uint32_t *w
, unsigned count
)
1978 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1979 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1983 case SpvOpVariable
: {
1984 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1986 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1988 SpvStorageClass storage_class
= w
[3];
1989 nir_constant
*initializer
= NULL
;
1991 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
1993 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
1997 case SpvOpAccessChain
:
1998 case SpvOpPtrAccessChain
:
1999 case SpvOpInBoundsAccessChain
: {
2000 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2001 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
);
2004 for (int i
= 4; i
< count
; i
++) {
2005 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2006 if (link_val
->value_type
== vtn_value_type_constant
) {
2007 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2008 chain
->link
[idx
].id
= link_val
->constant
->values
[0].u32
[0];
2010 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2011 chain
->link
[idx
].id
= w
[i
];
2017 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2018 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2019 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2020 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2021 * to combine an array of images with a single sampler to get an
2022 * array of sampled images that all share the same sampler.
2023 * Fortunately, this means that we can more-or-less ignore the
2024 * sampler when crawling the access chain, but it does leave us
2025 * with this rather awkward little special-case.
2027 struct vtn_value
*val
=
2028 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2029 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2030 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
2031 val
->sampled_image
->image
=
2032 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2033 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2035 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2036 struct vtn_value
*val
=
2037 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2038 val
->pointer
= vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2039 val
->pointer
->ptr_type
= ptr_type
;
2044 case SpvOpCopyMemory
: {
2045 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2046 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2048 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2050 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2055 struct vtn_type
*res_type
=
2056 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2057 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2058 struct vtn_pointer
*src
= src_val
->pointer
;
2060 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2062 if (src
->mode
== vtn_variable_mode_image
||
2063 src
->mode
== vtn_variable_mode_sampler
) {
2064 vtn_push_value(b
, w
[2], vtn_value_type_pointer
)->pointer
= src
;
2068 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2073 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2074 struct vtn_pointer
*dest
= dest_val
->pointer
;
2075 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2077 /* OpStore requires us to actually have a storage type */
2078 vtn_fail_if(dest
->type
->type
== NULL
,
2079 "Invalid destination type for OpStore");
2081 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2082 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2083 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2084 * would then store them to a local variable as bool. Work around
2085 * the issue by doing an implicit conversion.
2087 * https://github.com/KhronosGroup/glslang/issues/170
2088 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2090 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2091 "OpTypeBool. Doing an implicit conversion to work around "
2093 struct vtn_ssa_value
*bool_ssa
=
2094 vtn_create_ssa_value(b
, dest
->type
->type
);
2095 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2096 vtn_variable_store(b
, bool_ssa
, dest
);
2100 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2102 if (glsl_type_is_sampler(dest
->type
->type
)) {
2103 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2104 "propagation to workaround the problem.");
2105 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2106 dest
->var
->copy_prop_sampler
=
2107 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2111 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2112 vtn_variable_store(b
, src
, dest
);
2116 case SpvOpArrayLength
: {
2117 struct vtn_pointer
*ptr
=
2118 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2120 const uint32_t offset
= ptr
->var
->type
->offsets
[w
[4]];
2121 const uint32_t stride
= ptr
->var
->type
->members
[w
[4]]->stride
;
2123 if (!ptr
->block_index
) {
2124 struct vtn_access_chain chain
= {
2127 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
2128 vtn_assert(ptr
->block_index
);
2131 nir_intrinsic_instr
*instr
=
2132 nir_intrinsic_instr_create(b
->nb
.shader
,
2133 nir_intrinsic_get_buffer_size
);
2134 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2135 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2136 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2137 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2139 /* array_length = max(buffer_size - offset, 0) / stride */
2140 nir_ssa_def
*array_length
=
2145 nir_imm_int(&b
->nb
, offset
)),
2146 nir_imm_int(&b
->nb
, 0u)),
2147 nir_imm_int(&b
->nb
, stride
));
2149 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2150 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2151 val
->ssa
->def
= array_length
;
2155 case SpvOpCopyMemorySized
:
2157 vtn_fail("Unhandled opcode");