2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
31 static struct vtn_access_chain
*
32 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
34 struct vtn_access_chain
*chain
;
36 /* Subtract 1 from the length since there's already one built in */
37 size_t size
= sizeof(*chain
) +
38 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
39 chain
= rzalloc_size(b
, size
);
40 chain
->length
= length
;
45 static struct vtn_access_chain
*
46 vtn_access_chain_extend(struct vtn_builder
*b
, struct vtn_access_chain
*old
,
49 struct vtn_access_chain
*chain
;
51 unsigned old_len
= old
? old
->length
: 0;
52 chain
= vtn_access_chain_create(b
, old_len
+ new_ids
);
54 for (unsigned i
= 0; i
< old_len
; i
++)
55 chain
->link
[i
] = old
->link
[i
];
61 vtn_pointer_uses_ssa_offset(struct vtn_builder
*b
,
62 struct vtn_pointer
*ptr
)
64 return ptr
->mode
== vtn_variable_mode_ubo
||
65 ptr
->mode
== vtn_variable_mode_ssbo
||
66 (ptr
->mode
== vtn_variable_mode_workgroup
&&
67 b
->options
->lower_workgroup_access_to_offsets
);
71 vtn_pointer_is_external_block(struct vtn_builder
*b
,
72 struct vtn_pointer
*ptr
)
74 return ptr
->mode
== vtn_variable_mode_ssbo
||
75 ptr
->mode
== vtn_variable_mode_ubo
||
76 ptr
->mode
== vtn_variable_mode_push_constant
||
77 (ptr
->mode
== vtn_variable_mode_workgroup
&&
78 b
->options
->lower_workgroup_access_to_offsets
);
81 /* Dereference the given base pointer by the access chain */
82 static struct vtn_pointer
*
83 vtn_access_chain_pointer_dereference(struct vtn_builder
*b
,
84 struct vtn_pointer
*base
,
85 struct vtn_access_chain
*deref_chain
)
87 struct vtn_access_chain
*chain
=
88 vtn_access_chain_extend(b
, base
->chain
, deref_chain
->length
);
89 struct vtn_type
*type
= base
->type
;
91 /* OpPtrAccessChain is only allowed on things which support variable
92 * pointers. For everything else, the client is expected to just pass us
93 * the right access chain.
95 vtn_assert(!deref_chain
->ptr_as_array
);
97 unsigned start
= base
->chain
? base
->chain
->length
: 0;
98 for (unsigned i
= 0; i
< deref_chain
->length
; i
++) {
99 chain
->link
[start
+ i
] = deref_chain
->link
[i
];
101 if (glsl_type_is_struct(type
->type
)) {
102 vtn_assert(deref_chain
->link
[i
].mode
== vtn_access_mode_literal
);
103 type
= type
->members
[deref_chain
->link
[i
].id
];
105 type
= type
->array_element
;
109 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
110 ptr
->mode
= base
->mode
;
112 ptr
->var
= base
->var
;
119 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
122 vtn_assert(stride
> 0);
123 if (link
.mode
== vtn_access_mode_literal
) {
124 return nir_imm_int(&b
->nb
, link
.id
* stride
);
125 } else if (stride
== 1) {
126 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
127 if (ssa
->bit_size
!= 32)
128 ssa
= nir_u2u32(&b
->nb
, ssa
);
131 nir_ssa_def
*src0
= vtn_ssa_value(b
, link
.id
)->def
;
132 if (src0
->bit_size
!= 32)
133 src0
= nir_u2u32(&b
->nb
, src0
);
134 return nir_imul(&b
->nb
, src0
, nir_imm_int(&b
->nb
, stride
));
139 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
140 nir_ssa_def
*desc_array_index
)
142 if (!desc_array_index
) {
143 vtn_assert(glsl_type_is_struct(var
->type
->type
));
144 desc_array_index
= nir_imm_int(&b
->nb
, 0);
147 nir_intrinsic_instr
*instr
=
148 nir_intrinsic_instr_create(b
->nb
.shader
,
149 nir_intrinsic_vulkan_resource_index
);
150 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
151 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
152 nir_intrinsic_set_binding(instr
, var
->binding
);
154 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
155 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
157 return &instr
->dest
.ssa
;
161 vtn_resource_reindex(struct vtn_builder
*b
, nir_ssa_def
*base_index
,
162 nir_ssa_def
*offset_index
)
164 nir_intrinsic_instr
*instr
=
165 nir_intrinsic_instr_create(b
->nb
.shader
,
166 nir_intrinsic_vulkan_resource_reindex
);
167 instr
->src
[0] = nir_src_for_ssa(base_index
);
168 instr
->src
[1] = nir_src_for_ssa(offset_index
);
170 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
171 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
173 return &instr
->dest
.ssa
;
176 static struct vtn_pointer
*
177 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
178 struct vtn_pointer
*base
,
179 struct vtn_access_chain
*deref_chain
)
181 nir_ssa_def
*block_index
= base
->block_index
;
182 nir_ssa_def
*offset
= base
->offset
;
183 struct vtn_type
*type
= base
->type
;
186 if (base
->mode
== vtn_variable_mode_ubo
||
187 base
->mode
== vtn_variable_mode_ssbo
) {
189 vtn_assert(base
->var
&& base
->type
);
190 nir_ssa_def
*desc_arr_idx
;
191 if (glsl_type_is_array(type
->type
)) {
192 if (deref_chain
->length
>= 1) {
194 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
196 /* This consumes a level of type */
197 type
= type
->array_element
;
199 /* This is annoying. We've been asked for a pointer to the
200 * array of UBOs/SSBOs and not a specifc buffer. Return a
201 * pointer with a descriptor index of 0 and we'll have to do
202 * a reindex later to adjust it to the right thing.
204 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
206 } else if (deref_chain
->ptr_as_array
) {
207 /* You can't have a zero-length OpPtrAccessChain */
208 vtn_assert(deref_chain
->length
>= 1);
209 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
211 /* We have a regular non-array SSBO. */
214 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
215 } else if (deref_chain
->ptr_as_array
&&
216 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
217 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
218 * decorated block. This is an interesting corner in the SPIR-V
219 * spec. One interpretation would be that they client is clearly
220 * trying to treat that block as if it's an implicit array of blocks
221 * repeated in the buffer. However, the SPIR-V spec for the
222 * OpPtrAccessChain says:
224 * "Base is treated as the address of the first element of an
225 * array, and the Element element’s address is computed to be the
226 * base for the Indexes, as per OpAccessChain."
228 * Taken literally, that would mean that your struct type is supposed
229 * to be treated as an array of such a struct and, since it's
230 * decorated block, that means an array of blocks which corresponds
231 * to an array descriptor. Therefore, we need to do a reindex
232 * operation to add the index from the first link in the access chain
233 * to the index we recieved.
235 * The downside to this interpretation (there always is one) is that
236 * this might be somewhat surprising behavior to apps if they expect
237 * the implicit array behavior described above.
239 vtn_assert(deref_chain
->length
>= 1);
240 nir_ssa_def
*offset_index
=
241 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
244 block_index
= vtn_resource_reindex(b
, block_index
, offset_index
);
249 if (base
->mode
== vtn_variable_mode_workgroup
) {
250 /* SLM doesn't need nor have a block index */
251 vtn_assert(!block_index
);
253 /* We need the variable for the base offset */
254 vtn_assert(base
->var
);
256 /* We need ptr_type for size and alignment */
257 vtn_assert(base
->ptr_type
);
259 /* Assign location on first use so that we don't end up bloating SLM
260 * address space for variables which are never statically used.
262 if (base
->var
->shared_location
< 0) {
263 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
264 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
265 base
->ptr_type
->align
);
266 base
->var
->shared_location
= b
->shader
->num_shared
;
267 b
->shader
->num_shared
+= base
->ptr_type
->length
;
270 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
272 /* The code above should have ensured a block_index when needed. */
273 vtn_assert(block_index
);
275 /* Start off with at the start of the buffer. */
276 offset
= nir_imm_int(&b
->nb
, 0);
280 if (deref_chain
->ptr_as_array
&& idx
== 0) {
281 /* We need ptr_type for the stride */
282 vtn_assert(base
->ptr_type
);
284 /* We need at least one element in the chain */
285 vtn_assert(deref_chain
->length
>= 1);
287 nir_ssa_def
*elem_offset
=
288 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
289 base
->ptr_type
->stride
);
290 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
294 for (; idx
< deref_chain
->length
; idx
++) {
295 switch (glsl_get_base_type(type
->type
)) {
298 case GLSL_TYPE_UINT16
:
299 case GLSL_TYPE_INT16
:
300 case GLSL_TYPE_UINT8
:
302 case GLSL_TYPE_UINT64
:
303 case GLSL_TYPE_INT64
:
304 case GLSL_TYPE_FLOAT
:
305 case GLSL_TYPE_FLOAT16
:
306 case GLSL_TYPE_DOUBLE
:
308 case GLSL_TYPE_ARRAY
: {
309 nir_ssa_def
*elem_offset
=
310 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], type
->stride
);
311 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
312 type
= type
->array_element
;
316 case GLSL_TYPE_STRUCT
: {
317 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
318 unsigned member
= deref_chain
->link
[idx
].id
;
319 nir_ssa_def
*mem_offset
= nir_imm_int(&b
->nb
, type
->offsets
[member
]);
320 offset
= nir_iadd(&b
->nb
, offset
, mem_offset
);
321 type
= type
->members
[member
];
326 vtn_fail("Invalid type for deref");
330 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
331 ptr
->mode
= base
->mode
;
333 ptr
->block_index
= block_index
;
334 ptr
->offset
= offset
;
339 /* Dereference the given base pointer by the access chain */
340 static struct vtn_pointer
*
341 vtn_pointer_dereference(struct vtn_builder
*b
,
342 struct vtn_pointer
*base
,
343 struct vtn_access_chain
*deref_chain
)
345 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
346 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
348 return vtn_access_chain_pointer_dereference(b
, base
, deref_chain
);
352 /* Crawls a chain of array derefs and rewrites the types so that the
353 * lengths stay the same but the terminal type is the one given by
354 * tail_type. This is useful for split structures.
357 rewrite_deref_types(struct vtn_builder
*b
, nir_deref
*deref
,
358 const struct glsl_type
*type
)
362 vtn_assert(deref
->child
->deref_type
== nir_deref_type_array
);
363 vtn_assert(glsl_type_is_array(deref
->type
));
364 rewrite_deref_types(b
, deref
->child
, glsl_get_array_element(type
));
369 vtn_pointer_for_variable(struct vtn_builder
*b
,
370 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
372 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
374 pointer
->mode
= var
->mode
;
375 pointer
->type
= var
->type
;
376 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
377 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
378 pointer
->ptr_type
= ptr_type
;
385 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
387 /* Do on-the-fly copy propagation for samplers. */
388 if (ptr
->var
->copy_prop_sampler
)
389 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
391 nir_deref_var
*deref_var
;
393 deref_var
= nir_deref_var_create(b
, ptr
->var
->var
);
394 /* Raw variable access */
398 vtn_assert(ptr
->var
->members
);
399 /* Create the deref_var manually. It will get filled out later. */
400 deref_var
= rzalloc(b
, nir_deref_var
);
401 deref_var
->deref
.deref_type
= nir_deref_type_var
;
404 struct vtn_access_chain
*chain
= ptr
->chain
;
407 struct vtn_type
*deref_type
= ptr
->var
->type
;
408 nir_deref
*tail
= &deref_var
->deref
;
409 nir_variable
**members
= ptr
->var
->members
;
411 for (unsigned i
= 0; i
< chain
->length
; i
++) {
412 enum glsl_base_type base_type
= glsl_get_base_type(deref_type
->type
);
416 case GLSL_TYPE_UINT16
:
417 case GLSL_TYPE_INT16
:
418 case GLSL_TYPE_UINT8
:
420 case GLSL_TYPE_UINT64
:
421 case GLSL_TYPE_INT64
:
422 case GLSL_TYPE_FLOAT
:
423 case GLSL_TYPE_FLOAT16
:
424 case GLSL_TYPE_DOUBLE
:
426 case GLSL_TYPE_ARRAY
: {
427 deref_type
= deref_type
->array_element
;
429 nir_deref_array
*deref_arr
= nir_deref_array_create(b
);
430 deref_arr
->deref
.type
= deref_type
->type
;
432 if (chain
->link
[i
].mode
== vtn_access_mode_literal
) {
433 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
434 deref_arr
->base_offset
= chain
->link
[i
].id
;
436 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_id
);
437 deref_arr
->deref_array_type
= nir_deref_array_type_indirect
;
438 deref_arr
->base_offset
= 0;
439 deref_arr
->indirect
=
440 nir_src_for_ssa(vtn_ssa_value(b
, chain
->link
[i
].id
)->def
);
442 tail
->child
= &deref_arr
->deref
;
447 case GLSL_TYPE_STRUCT
: {
448 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_literal
);
449 unsigned idx
= chain
->link
[i
].id
;
450 deref_type
= deref_type
->members
[idx
];
452 /* This is a pre-split structure. */
453 deref_var
->var
= members
[idx
];
454 rewrite_deref_types(b
, &deref_var
->deref
, members
[idx
]->type
);
455 vtn_assert(tail
->type
== deref_type
->type
);
458 nir_deref_struct
*deref_struct
= nir_deref_struct_create(b
, idx
);
459 deref_struct
->deref
.type
= deref_type
->type
;
460 tail
->child
= &deref_struct
->deref
;
466 vtn_fail("Invalid type for deref");
470 vtn_assert(members
== NULL
);
475 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_var
*deref
,
476 nir_deref
*tail
, struct vtn_ssa_value
*inout
)
478 /* The deref tail may contain a deref to select a component of a vector (in
479 * other words, it might not be an actual tail) so we have to save it away
480 * here since we overwrite it later.
482 nir_deref
*old_child
= tail
->child
;
484 if (glsl_type_is_vector_or_scalar(tail
->type
)) {
485 /* Terminate the deref chain in case there is one more link to pick
486 * off a component of the vector.
490 nir_intrinsic_op op
= load
? nir_intrinsic_load_var
:
491 nir_intrinsic_store_var
;
493 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
494 intrin
->variables
[0] = nir_deref_var_clone(deref
, intrin
);
495 intrin
->num_components
= glsl_get_vector_elements(tail
->type
);
498 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
499 intrin
->num_components
,
500 glsl_get_bit_size(tail
->type
),
502 inout
->def
= &intrin
->dest
.ssa
;
504 nir_intrinsic_set_write_mask(intrin
, (1 << intrin
->num_components
) - 1);
505 intrin
->src
[0] = nir_src_for_ssa(inout
->def
);
508 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
509 } else if (glsl_get_base_type(tail
->type
) == GLSL_TYPE_ARRAY
||
510 glsl_type_is_matrix(tail
->type
)) {
511 unsigned elems
= glsl_get_length(tail
->type
);
512 nir_deref_array
*deref_arr
= nir_deref_array_create(b
);
513 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
514 deref_arr
->deref
.type
= glsl_get_array_element(tail
->type
);
515 tail
->child
= &deref_arr
->deref
;
516 for (unsigned i
= 0; i
< elems
; i
++) {
517 deref_arr
->base_offset
= i
;
518 _vtn_local_load_store(b
, load
, deref
, tail
->child
, inout
->elems
[i
]);
521 vtn_assert(glsl_get_base_type(tail
->type
) == GLSL_TYPE_STRUCT
);
522 unsigned elems
= glsl_get_length(tail
->type
);
523 nir_deref_struct
*deref_struct
= nir_deref_struct_create(b
, 0);
524 tail
->child
= &deref_struct
->deref
;
525 for (unsigned i
= 0; i
< elems
; i
++) {
526 deref_struct
->index
= i
;
527 deref_struct
->deref
.type
= glsl_get_struct_field(tail
->type
, i
);
528 _vtn_local_load_store(b
, load
, deref
, tail
->child
, inout
->elems
[i
]);
532 tail
->child
= old_child
;
536 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
538 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
539 return vtn_pointer_to_deref(b
, ptr
);
543 * Gets the NIR-level deref tail, which may have as a child an array deref
544 * selecting which component due to OpAccessChain supporting per-component
545 * indexing in SPIR-V.
548 get_deref_tail(nir_deref_var
*deref
)
550 nir_deref
*cur
= &deref
->deref
;
551 while (!glsl_type_is_vector_or_scalar(cur
->type
) && cur
->child
)
557 struct vtn_ssa_value
*
558 vtn_local_load(struct vtn_builder
*b
, nir_deref_var
*src
)
560 nir_deref
*src_tail
= get_deref_tail(src
);
561 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
562 _vtn_local_load_store(b
, true, src
, src_tail
, val
);
564 if (src_tail
->child
) {
565 nir_deref_array
*vec_deref
= nir_deref_as_array(src_tail
->child
);
566 vtn_assert(vec_deref
->deref
.child
== NULL
);
567 val
->type
= vec_deref
->deref
.type
;
568 if (vec_deref
->deref_array_type
== nir_deref_array_type_direct
)
569 val
->def
= vtn_vector_extract(b
, val
->def
, vec_deref
->base_offset
);
571 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
,
572 vec_deref
->indirect
.ssa
);
579 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
582 nir_deref
*dest_tail
= get_deref_tail(dest
);
584 if (dest_tail
->child
) {
585 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
586 _vtn_local_load_store(b
, true, dest
, dest_tail
, val
);
587 nir_deref_array
*deref
= nir_deref_as_array(dest_tail
->child
);
588 vtn_assert(deref
->deref
.child
== NULL
);
589 if (deref
->deref_array_type
== nir_deref_array_type_direct
)
590 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
593 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
594 deref
->indirect
.ssa
);
595 _vtn_local_load_store(b
, false, dest
, dest_tail
, val
);
597 _vtn_local_load_store(b
, false, dest
, dest_tail
, src
);
602 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
603 nir_ssa_def
**index_out
, unsigned *end_idx_out
)
605 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
607 struct vtn_access_chain chain
= {
610 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
612 *index_out
= ptr
->block_index
;
616 vtn_assert(ptr
->mode
== vtn_variable_mode_push_constant
);
620 struct vtn_type
*type
= ptr
->var
->type
;
621 nir_ssa_def
*offset
= nir_imm_int(&b
->nb
, 0);
624 for (; idx
< ptr
->chain
->length
; idx
++) {
625 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
629 case GLSL_TYPE_UINT16
:
630 case GLSL_TYPE_INT16
:
631 case GLSL_TYPE_UINT8
:
633 case GLSL_TYPE_UINT64
:
634 case GLSL_TYPE_INT64
:
635 case GLSL_TYPE_FLOAT
:
636 case GLSL_TYPE_FLOAT16
:
637 case GLSL_TYPE_DOUBLE
:
639 case GLSL_TYPE_ARRAY
:
640 offset
= nir_iadd(&b
->nb
, offset
,
641 vtn_access_link_as_ssa(b
, ptr
->chain
->link
[idx
],
644 type
= type
->array_element
;
647 case GLSL_TYPE_STRUCT
: {
648 vtn_assert(ptr
->chain
->link
[idx
].mode
== vtn_access_mode_literal
);
649 unsigned member
= ptr
->chain
->link
[idx
].id
;
650 offset
= nir_iadd(&b
->nb
, offset
,
651 nir_imm_int(&b
->nb
, type
->offsets
[member
]));
652 type
= type
->members
[member
];
657 vtn_fail("Invalid type for deref");
662 vtn_assert(type
== ptr
->type
);
669 /* Tries to compute the size of an interface block based on the strides and
670 * offsets that are provided to us in the SPIR-V source.
673 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
675 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
679 case GLSL_TYPE_UINT16
:
680 case GLSL_TYPE_INT16
:
681 case GLSL_TYPE_UINT8
:
683 case GLSL_TYPE_UINT64
:
684 case GLSL_TYPE_INT64
:
685 case GLSL_TYPE_FLOAT
:
686 case GLSL_TYPE_FLOAT16
:
688 case GLSL_TYPE_DOUBLE
: {
689 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
690 glsl_get_matrix_columns(type
->type
);
692 vtn_assert(type
->stride
> 0);
693 return type
->stride
* cols
;
695 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
696 return glsl_get_vector_elements(type
->type
) * type_size
;
700 case GLSL_TYPE_STRUCT
:
701 case GLSL_TYPE_INTERFACE
: {
703 unsigned num_fields
= glsl_get_length(type
->type
);
704 for (unsigned f
= 0; f
< num_fields
; f
++) {
705 unsigned field_end
= type
->offsets
[f
] +
706 vtn_type_block_size(b
, type
->members
[f
]);
707 size
= MAX2(size
, field_end
);
712 case GLSL_TYPE_ARRAY
:
713 vtn_assert(type
->stride
> 0);
714 vtn_assert(glsl_get_length(type
->type
) > 0);
715 return type
->stride
* glsl_get_length(type
->type
);
718 vtn_fail("Invalid block type");
724 vtn_access_chain_get_offset_size(struct vtn_builder
*b
,
725 struct vtn_access_chain
*chain
,
726 struct vtn_type
*type
,
727 unsigned *access_offset
,
728 unsigned *access_size
)
732 for (unsigned i
= 0; i
< chain
->length
; i
++) {
733 if (chain
->link
[i
].mode
!= vtn_access_mode_literal
)
736 if (glsl_type_is_struct(type
->type
)) {
737 *access_offset
+= type
->offsets
[chain
->link
[i
].id
];
738 type
= type
->members
[chain
->link
[i
].id
];
740 *access_offset
+= type
->stride
* chain
->link
[i
].id
;
741 type
= type
->array_element
;
745 *access_size
= vtn_type_block_size(b
, type
);
749 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
750 nir_ssa_def
*index
, nir_ssa_def
*offset
,
751 unsigned access_offset
, unsigned access_size
,
752 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
)
754 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
755 instr
->num_components
= glsl_get_vector_elements(type
);
759 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
760 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
763 if (op
== nir_intrinsic_load_push_constant
) {
764 nir_intrinsic_set_base(instr
, access_offset
);
765 nir_intrinsic_set_range(instr
, access_size
);
769 instr
->src
[src
++] = nir_src_for_ssa(index
);
771 if (op
== nir_intrinsic_load_push_constant
) {
772 /* We need to subtract the offset from where the intrinsic will load the
775 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
776 nir_imm_int(&b
->nb
, access_offset
)));
778 instr
->src
[src
++] = nir_src_for_ssa(offset
);
782 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
783 instr
->num_components
,
784 glsl_get_bit_size(type
), NULL
);
785 (*inout
)->def
= &instr
->dest
.ssa
;
788 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
790 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
791 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
795 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
796 nir_ssa_def
*index
, nir_ssa_def
*offset
,
797 unsigned access_offset
, unsigned access_size
,
798 struct vtn_access_chain
*chain
, unsigned chain_idx
,
799 struct vtn_type
*type
, struct vtn_ssa_value
**inout
)
801 if (chain
&& chain_idx
>= chain
->length
)
804 if (load
&& chain
== NULL
&& *inout
== NULL
)
805 *inout
= vtn_create_ssa_value(b
, type
->type
);
807 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
811 case GLSL_TYPE_UINT16
:
812 case GLSL_TYPE_INT16
:
813 case GLSL_TYPE_UINT8
:
815 case GLSL_TYPE_UINT64
:
816 case GLSL_TYPE_INT64
:
817 case GLSL_TYPE_FLOAT
:
818 case GLSL_TYPE_FLOAT16
:
819 case GLSL_TYPE_DOUBLE
:
821 /* This is where things get interesting. At this point, we've hit
822 * a vector, a scalar, or a matrix.
824 if (glsl_type_is_matrix(type
->type
)) {
825 /* Loading the whole matrix */
826 struct vtn_ssa_value
*transpose
;
827 unsigned num_ops
, vec_width
, col_stride
;
828 if (type
->row_major
) {
829 num_ops
= glsl_get_vector_elements(type
->type
);
830 vec_width
= glsl_get_matrix_columns(type
->type
);
831 col_stride
= type
->array_element
->stride
;
833 const struct glsl_type
*transpose_type
=
834 glsl_matrix_type(base_type
, vec_width
, num_ops
);
835 *inout
= vtn_create_ssa_value(b
, transpose_type
);
837 transpose
= vtn_ssa_transpose(b
, *inout
);
841 num_ops
= glsl_get_matrix_columns(type
->type
);
842 vec_width
= glsl_get_vector_elements(type
->type
);
843 col_stride
= type
->stride
;
846 for (unsigned i
= 0; i
< num_ops
; i
++) {
847 nir_ssa_def
*elem_offset
=
848 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* col_stride
));
849 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
850 access_offset
, access_size
,
852 glsl_vector_type(base_type
, vec_width
));
855 if (load
&& type
->row_major
)
856 *inout
= vtn_ssa_transpose(b
, *inout
);
858 unsigned elems
= glsl_get_vector_elements(type
->type
);
859 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
860 if (elems
== 1 || type
->stride
== type_size
) {
861 /* This is a tightly-packed normal scalar or vector load */
862 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
863 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
864 access_offset
, access_size
,
867 /* This is a strided load. We have to load N things separately.
868 * This is the single column of a row-major matrix case.
870 vtn_assert(type
->stride
> type_size
);
871 vtn_assert(type
->stride
% type_size
== 0);
873 nir_ssa_def
*per_comp
[4];
874 for (unsigned i
= 0; i
< elems
; i
++) {
875 nir_ssa_def
*elem_offset
=
876 nir_iadd(&b
->nb
, offset
,
877 nir_imm_int(&b
->nb
, i
* type
->stride
));
878 struct vtn_ssa_value
*comp
, temp_val
;
880 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
881 temp_val
.type
= glsl_scalar_type(base_type
);
884 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
885 access_offset
, access_size
,
886 &comp
, glsl_scalar_type(base_type
));
887 per_comp
[i
] = comp
->def
;
892 *inout
= vtn_create_ssa_value(b
, type
->type
);
893 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
899 case GLSL_TYPE_ARRAY
: {
900 unsigned elems
= glsl_get_length(type
->type
);
901 for (unsigned i
= 0; i
< elems
; i
++) {
902 nir_ssa_def
*elem_off
=
903 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* type
->stride
));
904 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
905 access_offset
, access_size
,
907 type
->array_element
, &(*inout
)->elems
[i
]);
912 case GLSL_TYPE_STRUCT
: {
913 unsigned elems
= glsl_get_length(type
->type
);
914 for (unsigned i
= 0; i
< elems
; i
++) {
915 nir_ssa_def
*elem_off
=
916 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, type
->offsets
[i
]));
917 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
918 access_offset
, access_size
,
920 type
->members
[i
], &(*inout
)->elems
[i
]);
926 vtn_fail("Invalid block member type");
930 static struct vtn_ssa_value
*
931 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
934 unsigned access_offset
= 0, access_size
= 0;
936 case vtn_variable_mode_ubo
:
937 op
= nir_intrinsic_load_ubo
;
939 case vtn_variable_mode_ssbo
:
940 op
= nir_intrinsic_load_ssbo
;
942 case vtn_variable_mode_push_constant
:
943 op
= nir_intrinsic_load_push_constant
;
944 vtn_access_chain_get_offset_size(b
, src
->chain
, src
->var
->type
,
945 &access_offset
, &access_size
);
947 case vtn_variable_mode_workgroup
:
948 op
= nir_intrinsic_load_shared
;
951 vtn_fail("Invalid block variable mode");
954 nir_ssa_def
*offset
, *index
= NULL
;
956 offset
= vtn_pointer_to_offset(b
, src
, &index
, &chain_idx
);
958 struct vtn_ssa_value
*value
= NULL
;
959 _vtn_block_load_store(b
, op
, true, index
, offset
,
960 access_offset
, access_size
,
961 src
->chain
, chain_idx
, src
->type
, &value
);
966 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
967 struct vtn_pointer
*dst
)
971 case vtn_variable_mode_ssbo
:
972 op
= nir_intrinsic_store_ssbo
;
974 case vtn_variable_mode_workgroup
:
975 op
= nir_intrinsic_store_shared
;
978 vtn_fail("Invalid block variable mode");
981 nir_ssa_def
*offset
, *index
= NULL
;
983 offset
= vtn_pointer_to_offset(b
, dst
, &index
, &chain_idx
);
985 _vtn_block_load_store(b
, op
, false, index
, offset
,
986 0, 0, dst
->chain
, chain_idx
, dst
->type
, &src
);
990 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
991 struct vtn_pointer
*ptr
,
992 struct vtn_ssa_value
**inout
)
994 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
998 case GLSL_TYPE_UINT16
:
999 case GLSL_TYPE_INT16
:
1000 case GLSL_TYPE_UINT8
:
1001 case GLSL_TYPE_INT8
:
1002 case GLSL_TYPE_UINT64
:
1003 case GLSL_TYPE_INT64
:
1004 case GLSL_TYPE_FLOAT
:
1005 case GLSL_TYPE_FLOAT16
:
1006 case GLSL_TYPE_BOOL
:
1007 case GLSL_TYPE_DOUBLE
:
1008 /* At this point, we have a scalar, vector, or matrix so we know that
1009 * there cannot be any structure splitting still in the way. By
1010 * stopping at the matrix level rather than the vector level, we
1011 * ensure that matrices get loaded in the optimal way even if they
1012 * are storred row-major in a UBO.
1015 *inout
= vtn_local_load(b
, vtn_pointer_to_deref(b
, ptr
));
1017 vtn_local_store(b
, *inout
, vtn_pointer_to_deref(b
, ptr
));
1021 case GLSL_TYPE_ARRAY
:
1022 case GLSL_TYPE_STRUCT
: {
1023 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1025 vtn_assert(*inout
== NULL
);
1026 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1027 (*inout
)->type
= ptr
->type
->type
;
1028 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1031 struct vtn_access_chain chain
= {
1034 { .mode
= vtn_access_mode_literal
, },
1037 for (unsigned i
= 0; i
< elems
; i
++) {
1038 chain
.link
[0].id
= i
;
1039 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1040 _vtn_variable_load_store(b
, load
, elem
, &(*inout
)->elems
[i
]);
1046 vtn_fail("Invalid access chain type");
1050 struct vtn_ssa_value
*
1051 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1053 if (vtn_pointer_is_external_block(b
, src
)) {
1054 return vtn_block_load(b
, src
);
1056 struct vtn_ssa_value
*val
= NULL
;
1057 _vtn_variable_load_store(b
, true, src
, &val
);
1063 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1064 struct vtn_pointer
*dest
)
1066 if (vtn_pointer_is_external_block(b
, dest
)) {
1067 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1068 dest
->mode
== vtn_variable_mode_workgroup
);
1069 vtn_block_store(b
, src
, dest
);
1071 _vtn_variable_load_store(b
, false, dest
, &src
);
1076 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1077 struct vtn_pointer
*src
)
1079 vtn_assert(src
->type
->type
== dest
->type
->type
);
1080 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1081 switch (base_type
) {
1082 case GLSL_TYPE_UINT
:
1084 case GLSL_TYPE_UINT16
:
1085 case GLSL_TYPE_INT16
:
1086 case GLSL_TYPE_UINT8
:
1087 case GLSL_TYPE_INT8
:
1088 case GLSL_TYPE_UINT64
:
1089 case GLSL_TYPE_INT64
:
1090 case GLSL_TYPE_FLOAT
:
1091 case GLSL_TYPE_FLOAT16
:
1092 case GLSL_TYPE_DOUBLE
:
1093 case GLSL_TYPE_BOOL
:
1094 /* At this point, we have a scalar, vector, or matrix so we know that
1095 * there cannot be any structure splitting still in the way. By
1096 * stopping at the matrix level rather than the vector level, we
1097 * ensure that matrices get loaded in the optimal way even if they
1098 * are storred row-major in a UBO.
1100 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1103 case GLSL_TYPE_ARRAY
:
1104 case GLSL_TYPE_STRUCT
: {
1105 struct vtn_access_chain chain
= {
1108 { .mode
= vtn_access_mode_literal
, },
1111 unsigned elems
= glsl_get_length(src
->type
->type
);
1112 for (unsigned i
= 0; i
< elems
; i
++) {
1113 chain
.link
[0].id
= i
;
1114 struct vtn_pointer
*src_elem
=
1115 vtn_pointer_dereference(b
, src
, &chain
);
1116 struct vtn_pointer
*dest_elem
=
1117 vtn_pointer_dereference(b
, dest
, &chain
);
1119 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1125 vtn_fail("Invalid access chain type");
1130 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1131 struct vtn_pointer
*src
)
1133 /* TODO: At some point, we should add a special-case for when we can
1134 * just emit a copy_var intrinsic.
1136 _vtn_variable_copy(b
, dest
, src
);
1140 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1142 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1143 *mode
= nir_var_system_value
;
1147 vtn_get_builtin_location(struct vtn_builder
*b
,
1148 SpvBuiltIn builtin
, int *location
,
1149 nir_variable_mode
*mode
)
1152 case SpvBuiltInPosition
:
1153 *location
= VARYING_SLOT_POS
;
1155 case SpvBuiltInPointSize
:
1156 *location
= VARYING_SLOT_PSIZ
;
1158 case SpvBuiltInClipDistance
:
1159 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1161 case SpvBuiltInCullDistance
:
1162 *location
= VARYING_SLOT_CULL_DIST0
;
1164 case SpvBuiltInVertexIndex
:
1165 *location
= SYSTEM_VALUE_VERTEX_ID
;
1166 set_mode_system_value(b
, mode
);
1168 case SpvBuiltInVertexId
:
1169 /* Vulkan defines VertexID to be zero-based and reserves the new
1170 * builtin keyword VertexIndex to indicate the non-zero-based value.
1172 *location
= SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1173 set_mode_system_value(b
, mode
);
1175 case SpvBuiltInInstanceIndex
:
1176 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1177 set_mode_system_value(b
, mode
);
1179 case SpvBuiltInInstanceId
:
1180 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1181 set_mode_system_value(b
, mode
);
1183 case SpvBuiltInPrimitiveId
:
1184 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1185 vtn_assert(*mode
== nir_var_shader_in
);
1186 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1187 } else if (*mode
== nir_var_shader_out
) {
1188 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1190 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1191 set_mode_system_value(b
, mode
);
1194 case SpvBuiltInInvocationId
:
1195 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1196 set_mode_system_value(b
, mode
);
1198 case SpvBuiltInLayer
:
1199 *location
= VARYING_SLOT_LAYER
;
1200 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1201 *mode
= nir_var_shader_in
;
1202 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1203 *mode
= nir_var_shader_out
;
1204 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1205 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1206 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1207 *mode
= nir_var_shader_out
;
1209 vtn_fail("invalid stage for SpvBuiltInLayer");
1211 case SpvBuiltInViewportIndex
:
1212 *location
= VARYING_SLOT_VIEWPORT
;
1213 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1214 *mode
= nir_var_shader_out
;
1215 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1216 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1217 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1218 *mode
= nir_var_shader_out
;
1219 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1220 *mode
= nir_var_shader_in
;
1222 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1224 case SpvBuiltInTessLevelOuter
:
1225 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1227 case SpvBuiltInTessLevelInner
:
1228 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1230 case SpvBuiltInTessCoord
:
1231 *location
= SYSTEM_VALUE_TESS_COORD
;
1232 set_mode_system_value(b
, mode
);
1234 case SpvBuiltInPatchVertices
:
1235 *location
= SYSTEM_VALUE_VERTICES_IN
;
1236 set_mode_system_value(b
, mode
);
1238 case SpvBuiltInFragCoord
:
1239 *location
= VARYING_SLOT_POS
;
1240 vtn_assert(*mode
== nir_var_shader_in
);
1242 case SpvBuiltInPointCoord
:
1243 *location
= VARYING_SLOT_PNTC
;
1244 vtn_assert(*mode
== nir_var_shader_in
);
1246 case SpvBuiltInFrontFacing
:
1247 *location
= SYSTEM_VALUE_FRONT_FACE
;
1248 set_mode_system_value(b
, mode
);
1250 case SpvBuiltInSampleId
:
1251 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1252 set_mode_system_value(b
, mode
);
1254 case SpvBuiltInSamplePosition
:
1255 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1256 set_mode_system_value(b
, mode
);
1258 case SpvBuiltInSampleMask
:
1259 if (*mode
== nir_var_shader_out
) {
1260 *location
= FRAG_RESULT_SAMPLE_MASK
;
1262 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1263 set_mode_system_value(b
, mode
);
1266 case SpvBuiltInFragDepth
:
1267 *location
= FRAG_RESULT_DEPTH
;
1268 vtn_assert(*mode
== nir_var_shader_out
);
1270 case SpvBuiltInHelperInvocation
:
1271 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1272 set_mode_system_value(b
, mode
);
1274 case SpvBuiltInNumWorkgroups
:
1275 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1276 set_mode_system_value(b
, mode
);
1278 case SpvBuiltInWorkgroupSize
:
1279 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1280 set_mode_system_value(b
, mode
);
1282 case SpvBuiltInWorkgroupId
:
1283 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1284 set_mode_system_value(b
, mode
);
1286 case SpvBuiltInLocalInvocationId
:
1287 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1288 set_mode_system_value(b
, mode
);
1290 case SpvBuiltInLocalInvocationIndex
:
1291 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1292 set_mode_system_value(b
, mode
);
1294 case SpvBuiltInGlobalInvocationId
:
1295 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1296 set_mode_system_value(b
, mode
);
1298 case SpvBuiltInBaseVertex
:
1299 *location
= SYSTEM_VALUE_BASE_VERTEX
;
1300 set_mode_system_value(b
, mode
);
1302 case SpvBuiltInBaseInstance
:
1303 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1304 set_mode_system_value(b
, mode
);
1306 case SpvBuiltInDrawIndex
:
1307 *location
= SYSTEM_VALUE_DRAW_ID
;
1308 set_mode_system_value(b
, mode
);
1310 case SpvBuiltInSubgroupSize
:
1311 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1312 set_mode_system_value(b
, mode
);
1314 case SpvBuiltInSubgroupId
:
1315 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1316 set_mode_system_value(b
, mode
);
1318 case SpvBuiltInSubgroupLocalInvocationId
:
1319 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1320 set_mode_system_value(b
, mode
);
1322 case SpvBuiltInNumSubgroups
:
1323 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1324 set_mode_system_value(b
, mode
);
1326 case SpvBuiltInDeviceIndex
:
1327 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1328 set_mode_system_value(b
, mode
);
1330 case SpvBuiltInViewIndex
:
1331 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1332 set_mode_system_value(b
, mode
);
1334 case SpvBuiltInSubgroupEqMask
:
1335 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1336 set_mode_system_value(b
, mode
);
1338 case SpvBuiltInSubgroupGeMask
:
1339 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1340 set_mode_system_value(b
, mode
);
1342 case SpvBuiltInSubgroupGtMask
:
1343 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1344 set_mode_system_value(b
, mode
);
1346 case SpvBuiltInSubgroupLeMask
:
1347 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1348 set_mode_system_value(b
, mode
);
1350 case SpvBuiltInSubgroupLtMask
:
1351 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1352 set_mode_system_value(b
, mode
);
1355 vtn_fail("unsupported builtin");
1360 apply_var_decoration(struct vtn_builder
*b
, nir_variable
*nir_var
,
1361 const struct vtn_decoration
*dec
)
1363 switch (dec
->decoration
) {
1364 case SpvDecorationRelaxedPrecision
:
1365 break; /* FIXME: Do nothing with this for now. */
1366 case SpvDecorationNoPerspective
:
1367 nir_var
->data
.interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1369 case SpvDecorationFlat
:
1370 nir_var
->data
.interpolation
= INTERP_MODE_FLAT
;
1372 case SpvDecorationCentroid
:
1373 nir_var
->data
.centroid
= true;
1375 case SpvDecorationSample
:
1376 nir_var
->data
.sample
= true;
1378 case SpvDecorationInvariant
:
1379 nir_var
->data
.invariant
= true;
1381 case SpvDecorationConstant
:
1382 vtn_assert(nir_var
->constant_initializer
!= NULL
);
1383 nir_var
->data
.read_only
= true;
1385 case SpvDecorationNonReadable
:
1386 nir_var
->data
.image
.write_only
= true;
1388 case SpvDecorationNonWritable
:
1389 nir_var
->data
.read_only
= true;
1390 nir_var
->data
.image
.read_only
= true;
1392 case SpvDecorationRestrict
:
1393 nir_var
->data
.image
.restrict_flag
= true;
1395 case SpvDecorationVolatile
:
1396 nir_var
->data
.image
._volatile
= true;
1398 case SpvDecorationCoherent
:
1399 nir_var
->data
.image
.coherent
= true;
1401 case SpvDecorationComponent
:
1402 nir_var
->data
.location_frac
= dec
->literals
[0];
1404 case SpvDecorationIndex
:
1405 nir_var
->data
.index
= dec
->literals
[0];
1407 case SpvDecorationBuiltIn
: {
1408 SpvBuiltIn builtin
= dec
->literals
[0];
1410 nir_variable_mode mode
= nir_var
->data
.mode
;
1411 vtn_get_builtin_location(b
, builtin
, &nir_var
->data
.location
, &mode
);
1412 nir_var
->data
.mode
= mode
;
1415 case SpvBuiltInTessLevelOuter
:
1416 case SpvBuiltInTessLevelInner
:
1417 nir_var
->data
.compact
= true;
1419 case SpvBuiltInSamplePosition
:
1420 nir_var
->data
.origin_upper_left
= b
->origin_upper_left
;
1422 case SpvBuiltInFragCoord
:
1423 nir_var
->data
.pixel_center_integer
= b
->pixel_center_integer
;
1430 case SpvDecorationSpecId
:
1431 case SpvDecorationRowMajor
:
1432 case SpvDecorationColMajor
:
1433 case SpvDecorationMatrixStride
:
1434 case SpvDecorationAliased
:
1435 case SpvDecorationUniform
:
1436 case SpvDecorationStream
:
1437 case SpvDecorationOffset
:
1438 case SpvDecorationLinkageAttributes
:
1439 break; /* Do nothing with these here */
1441 case SpvDecorationPatch
:
1442 nir_var
->data
.patch
= true;
1445 case SpvDecorationLocation
:
1446 vtn_fail("Handled above");
1448 case SpvDecorationBlock
:
1449 case SpvDecorationBufferBlock
:
1450 case SpvDecorationArrayStride
:
1451 case SpvDecorationGLSLShared
:
1452 case SpvDecorationGLSLPacked
:
1453 break; /* These can apply to a type but we don't care about them */
1455 case SpvDecorationBinding
:
1456 case SpvDecorationDescriptorSet
:
1457 case SpvDecorationNoContraction
:
1458 case SpvDecorationInputAttachmentIndex
:
1459 vtn_warn("Decoration not allowed for variable or structure member: %s",
1460 spirv_decoration_to_string(dec
->decoration
));
1463 case SpvDecorationXfbBuffer
:
1464 case SpvDecorationXfbStride
:
1465 vtn_warn("Vulkan does not have transform feedback: %s",
1466 spirv_decoration_to_string(dec
->decoration
));
1469 case SpvDecorationCPacked
:
1470 case SpvDecorationSaturatedConversion
:
1471 case SpvDecorationFuncParamAttr
:
1472 case SpvDecorationFPRoundingMode
:
1473 case SpvDecorationFPFastMathMode
:
1474 case SpvDecorationAlignment
:
1475 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1476 spirv_decoration_to_string(dec
->decoration
));
1480 vtn_fail("Unhandled decoration");
1485 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1486 const struct vtn_decoration
*dec
, void *out_is_patch
)
1488 if (dec
->decoration
== SpvDecorationPatch
) {
1489 *((bool *) out_is_patch
) = true;
1494 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1495 const struct vtn_decoration
*dec
, void *void_var
)
1497 struct vtn_variable
*vtn_var
= void_var
;
1499 /* Handle decorations that apply to a vtn_variable as a whole */
1500 switch (dec
->decoration
) {
1501 case SpvDecorationBinding
:
1502 vtn_var
->binding
= dec
->literals
[0];
1504 case SpvDecorationDescriptorSet
:
1505 vtn_var
->descriptor_set
= dec
->literals
[0];
1507 case SpvDecorationInputAttachmentIndex
:
1508 vtn_var
->input_attachment_index
= dec
->literals
[0];
1510 case SpvDecorationPatch
:
1511 vtn_var
->patch
= true;
1517 if (val
->value_type
== vtn_value_type_pointer
) {
1518 assert(val
->pointer
->var
== void_var
);
1519 assert(val
->pointer
->chain
== NULL
);
1520 assert(member
== -1);
1522 assert(val
->value_type
== vtn_value_type_type
);
1525 /* Location is odd. If applied to a split structure, we have to walk the
1526 * whole thing and accumulate the location. It's easier to handle as a
1529 if (dec
->decoration
== SpvDecorationLocation
) {
1530 unsigned location
= dec
->literals
[0];
1531 bool is_vertex_input
;
1532 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1533 vtn_var
->mode
== vtn_variable_mode_output
) {
1534 is_vertex_input
= false;
1535 location
+= FRAG_RESULT_DATA0
;
1536 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1537 vtn_var
->mode
== vtn_variable_mode_input
) {
1538 is_vertex_input
= true;
1539 location
+= VERT_ATTRIB_GENERIC0
;
1540 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1541 vtn_var
->mode
== vtn_variable_mode_output
) {
1542 is_vertex_input
= false;
1543 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1545 vtn_warn("Location must be on input or output variable");
1550 /* This handles the member and lone variable cases */
1551 vtn_var
->var
->data
.location
= location
;
1553 /* This handles the structure member case */
1554 assert(vtn_var
->members
);
1556 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1557 for (unsigned i
= 0; i
< length
; i
++) {
1558 vtn_var
->members
[i
]->data
.location
= location
;
1560 glsl_count_attribute_slots(vtn_var
->members
[i
]->interface_type
,
1567 assert(member
== -1);
1568 apply_var_decoration(b
, vtn_var
->var
, dec
);
1569 } else if (vtn_var
->members
) {
1571 /* Member decorations must come from a type */
1572 assert(val
->value_type
== vtn_value_type_type
);
1573 apply_var_decoration(b
, vtn_var
->members
[member
], dec
);
1576 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1577 for (unsigned i
= 0; i
< length
; i
++)
1578 apply_var_decoration(b
, vtn_var
->members
[i
], dec
);
1581 /* A few variables, those with external storage, have no actual
1582 * nir_variables associated with them. Fortunately, all decorations
1583 * we care about for those variables are on the type only.
1585 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1586 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1587 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1588 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1589 b
->options
->lower_workgroup_access_to_offsets
));
1594 static enum vtn_variable_mode
1595 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1596 SpvStorageClass
class,
1597 struct vtn_type
*interface_type
,
1598 nir_variable_mode
*nir_mode_out
)
1600 enum vtn_variable_mode mode
;
1601 nir_variable_mode nir_mode
;
1603 case SpvStorageClassUniform
:
1604 if (interface_type
->block
) {
1605 mode
= vtn_variable_mode_ubo
;
1607 } else if (interface_type
->buffer_block
) {
1608 mode
= vtn_variable_mode_ssbo
;
1611 vtn_fail("Invalid uniform variable type");
1614 case SpvStorageClassStorageBuffer
:
1615 mode
= vtn_variable_mode_ssbo
;
1618 case SpvStorageClassUniformConstant
:
1619 if (glsl_type_is_image(interface_type
->type
)) {
1620 mode
= vtn_variable_mode_image
;
1621 nir_mode
= nir_var_uniform
;
1622 } else if (glsl_type_is_sampler(interface_type
->type
)) {
1623 mode
= vtn_variable_mode_sampler
;
1624 nir_mode
= nir_var_uniform
;
1626 vtn_fail("Invalid uniform constant variable type");
1629 case SpvStorageClassPushConstant
:
1630 mode
= vtn_variable_mode_push_constant
;
1631 nir_mode
= nir_var_uniform
;
1633 case SpvStorageClassInput
:
1634 mode
= vtn_variable_mode_input
;
1635 nir_mode
= nir_var_shader_in
;
1637 case SpvStorageClassOutput
:
1638 mode
= vtn_variable_mode_output
;
1639 nir_mode
= nir_var_shader_out
;
1641 case SpvStorageClassPrivate
:
1642 mode
= vtn_variable_mode_global
;
1643 nir_mode
= nir_var_global
;
1645 case SpvStorageClassFunction
:
1646 mode
= vtn_variable_mode_local
;
1647 nir_mode
= nir_var_local
;
1649 case SpvStorageClassWorkgroup
:
1650 mode
= vtn_variable_mode_workgroup
;
1651 nir_mode
= nir_var_shared
;
1653 case SpvStorageClassCrossWorkgroup
:
1654 case SpvStorageClassGeneric
:
1655 case SpvStorageClassAtomicCounter
:
1657 vtn_fail("Unhandled variable storage class");
1661 *nir_mode_out
= nir_mode
;
1667 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1669 /* This pointer needs to have a pointer type with actual storage */
1670 vtn_assert(ptr
->ptr_type
);
1671 vtn_assert(ptr
->ptr_type
->type
);
1674 /* If we don't have an offset then we must be a pointer to the variable
1677 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1679 struct vtn_access_chain chain
= {
1682 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1685 vtn_assert(ptr
->offset
);
1686 if (ptr
->block_index
) {
1687 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1688 ptr
->mode
== vtn_variable_mode_ssbo
);
1689 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1691 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1696 struct vtn_pointer
*
1697 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1698 struct vtn_type
*ptr_type
)
1700 vtn_assert(ssa
->num_components
<= 2 && ssa
->bit_size
== 32);
1701 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1702 vtn_assert(ptr_type
->deref
->base_type
!= vtn_base_type_pointer
);
1703 /* This pointer type needs to have actual storage */
1704 vtn_assert(ptr_type
->type
);
1706 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1707 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1709 ptr
->type
= ptr_type
->deref
;
1710 ptr
->ptr_type
= ptr_type
;
1712 if (ssa
->num_components
> 1) {
1713 vtn_assert(ssa
->num_components
== 2);
1714 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1715 ptr
->mode
== vtn_variable_mode_ssbo
);
1716 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1717 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1719 vtn_assert(ssa
->num_components
== 1);
1720 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1721 ptr
->block_index
= NULL
;
1729 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
1731 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
1734 if (var
->mode
== vtn_variable_mode_input
) {
1735 return stage
== MESA_SHADER_TESS_CTRL
||
1736 stage
== MESA_SHADER_TESS_EVAL
||
1737 stage
== MESA_SHADER_GEOMETRY
;
1740 if (var
->mode
== vtn_variable_mode_output
)
1741 return stage
== MESA_SHADER_TESS_CTRL
;
1747 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
1748 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
1749 nir_constant
*initializer
)
1751 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1752 struct vtn_type
*type
= ptr_type
->deref
;
1754 struct vtn_type
*without_array
= type
;
1755 while(glsl_type_is_array(without_array
->type
))
1756 without_array
= without_array
->array_element
;
1758 enum vtn_variable_mode mode
;
1759 nir_variable_mode nir_mode
;
1760 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
1763 case vtn_variable_mode_ubo
:
1764 b
->shader
->info
.num_ubos
++;
1766 case vtn_variable_mode_ssbo
:
1767 b
->shader
->info
.num_ssbos
++;
1769 case vtn_variable_mode_image
:
1770 b
->shader
->info
.num_images
++;
1772 case vtn_variable_mode_sampler
:
1773 b
->shader
->info
.num_textures
++;
1775 case vtn_variable_mode_push_constant
:
1776 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
1779 /* No tallying is needed */
1783 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
1787 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
1788 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
1790 switch (var
->mode
) {
1791 case vtn_variable_mode_local
:
1792 case vtn_variable_mode_global
:
1793 case vtn_variable_mode_image
:
1794 case vtn_variable_mode_sampler
:
1795 /* For these, we create the variable normally */
1796 var
->var
= rzalloc(b
->shader
, nir_variable
);
1797 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1798 var
->var
->type
= var
->type
->type
;
1799 var
->var
->data
.mode
= nir_mode
;
1801 switch (var
->mode
) {
1802 case vtn_variable_mode_image
:
1803 case vtn_variable_mode_sampler
:
1804 var
->var
->interface_type
= without_array
->type
;
1807 var
->var
->interface_type
= NULL
;
1812 case vtn_variable_mode_workgroup
:
1813 if (b
->options
->lower_workgroup_access_to_offsets
) {
1814 var
->shared_location
= -1;
1816 /* Create the variable normally */
1817 var
->var
= rzalloc(b
->shader
, nir_variable
);
1818 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1819 var
->var
->type
= var
->type
->type
;
1820 var
->var
->data
.mode
= nir_var_shared
;
1824 case vtn_variable_mode_input
:
1825 case vtn_variable_mode_output
: {
1826 /* In order to know whether or not we're a per-vertex inout, we need
1827 * the patch qualifier. This means walking the variable decorations
1828 * early before we actually create any variables. Not a big deal.
1830 * GLSLang really likes to place decorations in the most interior
1831 * thing it possibly can. In particular, if you have a struct, it
1832 * will place the patch decorations on the struct members. This
1833 * should be handled by the variable splitting below just fine.
1835 * If you have an array-of-struct, things get even more weird as it
1836 * will place the patch decorations on the struct even though it's
1837 * inside an array and some of the members being patch and others not
1838 * makes no sense whatsoever. Since the only sensible thing is for
1839 * it to be all or nothing, we'll call it patch if any of the members
1840 * are declared patch.
1843 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
1844 if (glsl_type_is_array(var
->type
->type
) &&
1845 glsl_type_is_struct(without_array
->type
)) {
1846 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
1847 vtn_value_type_type
),
1848 var_is_patch_cb
, &var
->patch
);
1851 /* For inputs and outputs, we immediately split structures. This
1852 * is for a couple of reasons. For one, builtins may all come in
1853 * a struct and we really want those split out into separate
1854 * variables. For another, interpolation qualifiers can be
1855 * applied to members of the top-level struct ane we need to be
1856 * able to preserve that information.
1859 int array_length
= -1;
1860 struct vtn_type
*interface_type
= var
->type
;
1861 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
1862 /* In Geometry shaders (and some tessellation), inputs come
1863 * in per-vertex arrays. However, some builtins come in
1864 * non-per-vertex, hence the need for the is_array check. In
1865 * any case, there are no non-builtin arrays allowed so this
1866 * check should be sufficient.
1868 interface_type
= var
->type
->array_element
;
1869 array_length
= glsl_get_length(var
->type
->type
);
1872 if (glsl_type_is_struct(interface_type
->type
)) {
1873 /* It's a struct. Split it. */
1874 unsigned num_members
= glsl_get_length(interface_type
->type
);
1875 var
->members
= ralloc_array(b
, nir_variable
*, num_members
);
1877 for (unsigned i
= 0; i
< num_members
; i
++) {
1878 const struct glsl_type
*mtype
= interface_type
->members
[i
]->type
;
1879 if (array_length
>= 0)
1880 mtype
= glsl_array_type(mtype
, array_length
);
1882 var
->members
[i
] = rzalloc(b
->shader
, nir_variable
);
1883 var
->members
[i
]->name
=
1884 ralloc_asprintf(var
->members
[i
], "%s.%d", val
->name
, i
);
1885 var
->members
[i
]->type
= mtype
;
1886 var
->members
[i
]->interface_type
=
1887 interface_type
->members
[i
]->type
;
1888 var
->members
[i
]->data
.mode
= nir_mode
;
1889 var
->members
[i
]->data
.patch
= var
->patch
;
1892 assert(i
< initializer
->num_elements
);
1893 var
->members
[i
]->constant_initializer
=
1894 nir_constant_clone(initializer
->elements
[i
], var
->members
[i
]);
1900 var
->var
= rzalloc(b
->shader
, nir_variable
);
1901 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1902 var
->var
->type
= var
->type
->type
;
1903 var
->var
->interface_type
= interface_type
->type
;
1904 var
->var
->data
.mode
= nir_mode
;
1905 var
->var
->data
.patch
= var
->patch
;
1908 /* For inputs and outputs, we need to grab locations and builtin
1909 * information from the interface type.
1911 vtn_foreach_decoration(b
, vtn_value(b
, interface_type
->id
,
1912 vtn_value_type_type
),
1913 var_decoration_cb
, var
);
1917 case vtn_variable_mode_param
:
1918 vtn_fail("Not created through OpVariable");
1920 case vtn_variable_mode_ubo
:
1921 case vtn_variable_mode_ssbo
:
1922 case vtn_variable_mode_push_constant
:
1923 /* These don't need actual variables. */
1928 var
->var
->constant_initializer
=
1929 nir_constant_clone(initializer
, var
->var
);
1932 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
1934 if (var
->mode
== vtn_variable_mode_image
||
1935 var
->mode
== vtn_variable_mode_sampler
) {
1936 /* XXX: We still need the binding information in the nir_variable
1937 * for these. We should fix that.
1939 var
->var
->data
.binding
= var
->binding
;
1940 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
1941 var
->var
->data
.index
= var
->input_attachment_index
;
1943 if (var
->mode
== vtn_variable_mode_image
)
1944 var
->var
->data
.image
.format
= without_array
->image_format
;
1947 if (var
->mode
== vtn_variable_mode_local
) {
1948 vtn_assert(var
->members
== NULL
&& var
->var
!= NULL
);
1949 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
1950 } else if (var
->var
) {
1951 nir_shader_add_variable(b
->shader
, var
->var
);
1952 } else if (var
->members
) {
1953 unsigned count
= glsl_get_length(without_array
->type
);
1954 for (unsigned i
= 0; i
< count
; i
++) {
1955 vtn_assert(var
->members
[i
]->data
.mode
!= nir_var_local
);
1956 nir_shader_add_variable(b
->shader
, var
->members
[i
]);
1959 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
1964 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
1965 struct vtn_type
*dst_type
,
1966 struct vtn_type
*src_type
)
1968 if (dst_type
->id
== src_type
->id
)
1971 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
1972 /* Early versions of GLSLang would re-emit types unnecessarily and you
1973 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
1974 * mismatched source and destination types.
1976 * https://github.com/KhronosGroup/glslang/issues/304
1977 * https://github.com/KhronosGroup/glslang/issues/307
1978 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
1979 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
1981 vtn_warn("Source and destination types of %s do not have the same "
1982 "ID (but are compatible): %u vs %u",
1983 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
1987 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
1988 spirv_op_to_string(opcode
),
1989 glsl_get_type_name(dst_type
->type
),
1990 glsl_get_type_name(src_type
->type
));
1994 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
1995 const uint32_t *w
, unsigned count
)
1999 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2000 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2004 case SpvOpVariable
: {
2005 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2007 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2009 SpvStorageClass storage_class
= w
[3];
2010 nir_constant
*initializer
= NULL
;
2012 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
2014 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
2018 case SpvOpAccessChain
:
2019 case SpvOpPtrAccessChain
:
2020 case SpvOpInBoundsAccessChain
: {
2021 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
2022 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
);
2025 for (int i
= 4; i
< count
; i
++) {
2026 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
2027 if (link_val
->value_type
== vtn_value_type_constant
) {
2028 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
2029 chain
->link
[idx
].id
= link_val
->constant
->values
[0].u32
[0];
2031 chain
->link
[idx
].mode
= vtn_access_mode_id
;
2032 chain
->link
[idx
].id
= w
[i
];
2038 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2039 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
2040 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
2041 /* This is rather insane. SPIR-V allows you to use OpSampledImage
2042 * to combine an array of images with a single sampler to get an
2043 * array of sampled images that all share the same sampler.
2044 * Fortunately, this means that we can more-or-less ignore the
2045 * sampler when crawling the access chain, but it does leave us
2046 * with this rather awkward little special-case.
2048 struct vtn_value
*val
=
2049 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2050 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2051 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
2052 val
->sampled_image
->image
=
2053 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
2054 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
2056 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
2057 struct vtn_value
*val
=
2058 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
2059 val
->pointer
= vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
2060 val
->pointer
->ptr_type
= ptr_type
;
2065 case SpvOpCopyMemory
: {
2066 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2067 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
2069 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
2071 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
2076 struct vtn_type
*res_type
=
2077 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2078 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2079 struct vtn_pointer
*src
= src_val
->pointer
;
2081 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
2083 if (src
->mode
== vtn_variable_mode_image
||
2084 src
->mode
== vtn_variable_mode_sampler
) {
2085 vtn_push_value(b
, w
[2], vtn_value_type_pointer
)->pointer
= src
;
2089 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
2094 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2095 struct vtn_pointer
*dest
= dest_val
->pointer
;
2096 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
2098 /* OpStore requires us to actually have a storage type */
2099 vtn_fail_if(dest
->type
->type
== NULL
,
2100 "Invalid destination type for OpStore");
2102 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
2103 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2104 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2105 * would then store them to a local variable as bool. Work around
2106 * the issue by doing an implicit conversion.
2108 * https://github.com/KhronosGroup/glslang/issues/170
2109 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2111 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2112 "OpTypeBool. Doing an implicit conversion to work around "
2114 struct vtn_ssa_value
*bool_ssa
=
2115 vtn_create_ssa_value(b
, dest
->type
->type
);
2116 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2117 vtn_variable_store(b
, bool_ssa
, dest
);
2121 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2123 if (glsl_type_is_sampler(dest
->type
->type
)) {
2124 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2125 "propagation to workaround the problem.");
2126 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2127 dest
->var
->copy_prop_sampler
=
2128 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2132 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2133 vtn_variable_store(b
, src
, dest
);
2137 case SpvOpArrayLength
: {
2138 struct vtn_pointer
*ptr
=
2139 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2141 const uint32_t offset
= ptr
->var
->type
->offsets
[w
[4]];
2142 const uint32_t stride
= ptr
->var
->type
->members
[w
[4]]->stride
;
2144 if (!ptr
->block_index
) {
2145 struct vtn_access_chain chain
= {
2148 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
2149 vtn_assert(ptr
->block_index
);
2152 nir_intrinsic_instr
*instr
=
2153 nir_intrinsic_instr_create(b
->nb
.shader
,
2154 nir_intrinsic_get_buffer_size
);
2155 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2156 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2157 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2158 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2160 /* array_length = max(buffer_size - offset, 0) / stride */
2161 nir_ssa_def
*array_length
=
2166 nir_imm_int(&b
->nb
, offset
)),
2167 nir_imm_int(&b
->nb
, 0u)),
2168 nir_imm_int(&b
->nb
, stride
));
2170 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2171 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2172 val
->ssa
->def
= array_length
;
2176 case SpvOpCopyMemorySized
:
2178 vtn_fail("Unhandled opcode");