2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
32 static struct vtn_access_chain
*
33 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
35 struct vtn_access_chain
*chain
;
37 /* Subtract 1 from the length since there's already one built in */
38 size_t size
= sizeof(*chain
) +
39 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
40 chain
= rzalloc_size(b
, size
);
41 chain
->length
= length
;
46 static struct vtn_access_chain
*
47 vtn_access_chain_extend(struct vtn_builder
*b
, struct vtn_access_chain
*old
,
50 struct vtn_access_chain
*chain
;
52 unsigned old_len
= old
? old
->length
: 0;
53 chain
= vtn_access_chain_create(b
, old_len
+ new_ids
);
55 for (unsigned i
= 0; i
< old_len
; i
++)
56 chain
->link
[i
] = old
->link
[i
];
62 vtn_pointer_uses_ssa_offset(struct vtn_builder
*b
,
63 struct vtn_pointer
*ptr
)
65 return ptr
->mode
== vtn_variable_mode_ubo
||
66 ptr
->mode
== vtn_variable_mode_ssbo
||
67 ptr
->mode
== vtn_variable_mode_push_constant
||
68 (ptr
->mode
== vtn_variable_mode_workgroup
&&
69 b
->options
->lower_workgroup_access_to_offsets
);
73 vtn_pointer_is_external_block(struct vtn_builder
*b
,
74 struct vtn_pointer
*ptr
)
76 return ptr
->mode
== vtn_variable_mode_ssbo
||
77 ptr
->mode
== vtn_variable_mode_ubo
||
78 ptr
->mode
== vtn_variable_mode_push_constant
||
79 (ptr
->mode
== vtn_variable_mode_workgroup
&&
80 b
->options
->lower_workgroup_access_to_offsets
);
83 /* Dereference the given base pointer by the access chain */
84 static struct vtn_pointer
*
85 vtn_access_chain_pointer_dereference(struct vtn_builder
*b
,
86 struct vtn_pointer
*base
,
87 struct vtn_access_chain
*deref_chain
)
89 struct vtn_access_chain
*chain
=
90 vtn_access_chain_extend(b
, base
->chain
, deref_chain
->length
);
91 struct vtn_type
*type
= base
->type
;
92 enum gl_access_qualifier access
= base
->access
;
94 /* OpPtrAccessChain is only allowed on things which support variable
95 * pointers. For everything else, the client is expected to just pass us
96 * the right access chain.
98 vtn_assert(!deref_chain
->ptr_as_array
);
100 unsigned start
= base
->chain
? base
->chain
->length
: 0;
101 for (unsigned i
= 0; i
< deref_chain
->length
; i
++) {
102 chain
->link
[start
+ i
] = deref_chain
->link
[i
];
104 if (glsl_type_is_struct(type
->type
)) {
105 vtn_assert(deref_chain
->link
[i
].mode
== vtn_access_mode_literal
);
106 type
= type
->members
[deref_chain
->link
[i
].id
];
108 type
= type
->array_element
;
111 access
|= type
->access
;
114 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
115 ptr
->mode
= base
->mode
;
117 ptr
->var
= base
->var
;
118 ptr
->deref
= base
->deref
;
120 ptr
->access
= access
;
126 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
129 vtn_assert(stride
> 0);
130 if (link
.mode
== vtn_access_mode_literal
) {
131 return nir_imm_int(&b
->nb
, link
.id
* stride
);
132 } else if (stride
== 1) {
133 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
134 if (ssa
->bit_size
!= 32)
135 ssa
= nir_u2u32(&b
->nb
, ssa
);
138 nir_ssa_def
*src0
= vtn_ssa_value(b
, link
.id
)->def
;
139 if (src0
->bit_size
!= 32)
140 src0
= nir_u2u32(&b
->nb
, src0
);
141 return nir_imul(&b
->nb
, src0
, nir_imm_int(&b
->nb
, stride
));
146 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
147 nir_ssa_def
*desc_array_index
)
149 if (!desc_array_index
) {
150 vtn_assert(glsl_type_is_struct(var
->type
->type
));
151 desc_array_index
= nir_imm_int(&b
->nb
, 0);
154 nir_intrinsic_instr
*instr
=
155 nir_intrinsic_instr_create(b
->nb
.shader
,
156 nir_intrinsic_vulkan_resource_index
);
157 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
158 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
159 nir_intrinsic_set_binding(instr
, var
->binding
);
161 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
162 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
164 return &instr
->dest
.ssa
;
168 vtn_resource_reindex(struct vtn_builder
*b
, nir_ssa_def
*base_index
,
169 nir_ssa_def
*offset_index
)
171 nir_intrinsic_instr
*instr
=
172 nir_intrinsic_instr_create(b
->nb
.shader
,
173 nir_intrinsic_vulkan_resource_reindex
);
174 instr
->src
[0] = nir_src_for_ssa(base_index
);
175 instr
->src
[1] = nir_src_for_ssa(offset_index
);
177 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
178 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
180 return &instr
->dest
.ssa
;
183 static struct vtn_pointer
*
184 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
185 struct vtn_pointer
*base
,
186 struct vtn_access_chain
*deref_chain
)
188 nir_ssa_def
*block_index
= base
->block_index
;
189 nir_ssa_def
*offset
= base
->offset
;
190 struct vtn_type
*type
= base
->type
;
191 enum gl_access_qualifier access
= base
->access
;
194 if (base
->mode
== vtn_variable_mode_ubo
||
195 base
->mode
== vtn_variable_mode_ssbo
) {
197 vtn_assert(base
->var
&& base
->type
);
198 nir_ssa_def
*desc_arr_idx
;
199 if (glsl_type_is_array(type
->type
)) {
200 if (deref_chain
->length
>= 1) {
202 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
204 /* This consumes a level of type */
205 type
= type
->array_element
;
206 access
|= type
->access
;
208 /* This is annoying. We've been asked for a pointer to the
209 * array of UBOs/SSBOs and not a specifc buffer. Return a
210 * pointer with a descriptor index of 0 and we'll have to do
211 * a reindex later to adjust it to the right thing.
213 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
215 } else if (deref_chain
->ptr_as_array
) {
216 /* You can't have a zero-length OpPtrAccessChain */
217 vtn_assert(deref_chain
->length
>= 1);
218 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
220 /* We have a regular non-array SSBO. */
223 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
224 } else if (deref_chain
->ptr_as_array
&&
225 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
226 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
227 * decorated block. This is an interesting corner in the SPIR-V
228 * spec. One interpretation would be that they client is clearly
229 * trying to treat that block as if it's an implicit array of blocks
230 * repeated in the buffer. However, the SPIR-V spec for the
231 * OpPtrAccessChain says:
233 * "Base is treated as the address of the first element of an
234 * array, and the Element element’s address is computed to be the
235 * base for the Indexes, as per OpAccessChain."
237 * Taken literally, that would mean that your struct type is supposed
238 * to be treated as an array of such a struct and, since it's
239 * decorated block, that means an array of blocks which corresponds
240 * to an array descriptor. Therefore, we need to do a reindex
241 * operation to add the index from the first link in the access chain
242 * to the index we recieved.
244 * The downside to this interpretation (there always is one) is that
245 * this might be somewhat surprising behavior to apps if they expect
246 * the implicit array behavior described above.
248 vtn_assert(deref_chain
->length
>= 1);
249 nir_ssa_def
*offset_index
=
250 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
253 block_index
= vtn_resource_reindex(b
, block_index
, offset_index
);
258 if (base
->mode
== vtn_variable_mode_workgroup
) {
259 /* SLM doesn't need nor have a block index */
260 vtn_assert(!block_index
);
262 /* We need the variable for the base offset */
263 vtn_assert(base
->var
);
265 /* We need ptr_type for size and alignment */
266 vtn_assert(base
->ptr_type
);
268 /* Assign location on first use so that we don't end up bloating SLM
269 * address space for variables which are never statically used.
271 if (base
->var
->shared_location
< 0) {
272 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
273 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
274 base
->ptr_type
->align
);
275 base
->var
->shared_location
= b
->shader
->num_shared
;
276 b
->shader
->num_shared
+= base
->ptr_type
->length
;
279 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
280 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
281 /* Push constants neither need nor have a block index */
282 vtn_assert(!block_index
);
284 /* Start off with at the start of the push constant block. */
285 offset
= nir_imm_int(&b
->nb
, 0);
287 /* The code above should have ensured a block_index when needed. */
288 vtn_assert(block_index
);
290 /* Start off with at the start of the buffer. */
291 offset
= nir_imm_int(&b
->nb
, 0);
295 if (deref_chain
->ptr_as_array
&& idx
== 0) {
296 /* We need ptr_type for the stride */
297 vtn_assert(base
->ptr_type
);
299 /* We need at least one element in the chain */
300 vtn_assert(deref_chain
->length
>= 1);
302 nir_ssa_def
*elem_offset
=
303 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
304 base
->ptr_type
->stride
);
305 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
309 for (; idx
< deref_chain
->length
; idx
++) {
310 switch (glsl_get_base_type(type
->type
)) {
313 case GLSL_TYPE_UINT16
:
314 case GLSL_TYPE_INT16
:
315 case GLSL_TYPE_UINT8
:
317 case GLSL_TYPE_UINT64
:
318 case GLSL_TYPE_INT64
:
319 case GLSL_TYPE_FLOAT
:
320 case GLSL_TYPE_FLOAT16
:
321 case GLSL_TYPE_DOUBLE
:
323 case GLSL_TYPE_ARRAY
: {
324 nir_ssa_def
*elem_offset
=
325 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], type
->stride
);
326 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
327 type
= type
->array_element
;
328 access
|= type
->access
;
332 case GLSL_TYPE_STRUCT
: {
333 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
334 unsigned member
= deref_chain
->link
[idx
].id
;
335 nir_ssa_def
*mem_offset
= nir_imm_int(&b
->nb
, type
->offsets
[member
]);
336 offset
= nir_iadd(&b
->nb
, offset
, mem_offset
);
337 type
= type
->members
[member
];
338 access
|= type
->access
;
343 vtn_fail("Invalid type for deref");
347 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
348 ptr
->mode
= base
->mode
;
350 ptr
->block_index
= block_index
;
351 ptr
->offset
= offset
;
352 ptr
->access
= access
;
357 /* Dereference the given base pointer by the access chain */
358 static struct vtn_pointer
*
359 vtn_pointer_dereference(struct vtn_builder
*b
,
360 struct vtn_pointer
*base
,
361 struct vtn_access_chain
*deref_chain
)
363 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
364 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
366 return vtn_access_chain_pointer_dereference(b
, base
, deref_chain
);
371 vtn_pointer_for_variable(struct vtn_builder
*b
,
372 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
374 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
376 pointer
->mode
= var
->mode
;
377 pointer
->type
= var
->type
;
378 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
379 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
380 pointer
->ptr_type
= ptr_type
;
382 pointer
->access
= var
->access
| var
->type
->access
;
387 /* Returns an atomic_uint type based on the original uint type. The returned
388 * type will be equivalent to the original one but will have an atomic_uint
389 * type as leaf instead of an uint.
391 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
393 static const struct glsl_type
*
394 repair_atomic_type(const struct glsl_type
*type
)
396 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
397 assert(glsl_type_is_scalar(glsl_without_array(type
)));
399 if (glsl_type_is_array(type
)) {
400 const struct glsl_type
*atomic
=
401 repair_atomic_type(glsl_get_array_element(type
));
403 return glsl_array_type(atomic
, glsl_get_length(type
));
405 return glsl_atomic_uint_type();
410 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
412 /* Do on-the-fly copy propagation for samplers. */
413 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
414 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
416 nir_deref_instr
*tail
;
420 assert(ptr
->var
&& ptr
->var
->var
);
421 tail
= nir_build_deref_var(&b
->nb
, ptr
->var
->var
);
424 /* Raw variable access */
428 struct vtn_access_chain
*chain
= ptr
->chain
;
431 for (unsigned i
= 0; i
< chain
->length
; i
++) {
432 if (glsl_type_is_struct(tail
->type
)) {
433 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_literal
);
434 unsigned idx
= chain
->link
[i
].id
;
435 tail
= nir_build_deref_struct(&b
->nb
, tail
, idx
);
438 if (chain
->link
[i
].mode
== vtn_access_mode_literal
) {
439 index
= nir_imm_int(&b
->nb
, chain
->link
[i
].id
);
441 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_id
);
442 index
= vtn_ssa_value(b
, chain
->link
[i
].id
)->def
;
444 tail
= nir_build_deref_array(&b
->nb
, tail
, index
);
452 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
453 struct vtn_ssa_value
*inout
)
455 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
457 inout
->def
= nir_load_deref(&b
->nb
, deref
);
459 nir_store_deref(&b
->nb
, deref
, inout
->def
, ~0);
461 } else if (glsl_type_is_array(deref
->type
) ||
462 glsl_type_is_matrix(deref
->type
)) {
463 unsigned elems
= glsl_get_length(deref
->type
);
464 for (unsigned i
= 0; i
< elems
; i
++) {
465 nir_deref_instr
*child
=
466 nir_build_deref_array(&b
->nb
, deref
, nir_imm_int(&b
->nb
, i
));
467 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
470 vtn_assert(glsl_type_is_struct(deref
->type
));
471 unsigned elems
= glsl_get_length(deref
->type
);
472 for (unsigned i
= 0; i
< elems
; i
++) {
473 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
474 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
480 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
482 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
483 return vtn_pointer_to_deref(b
, ptr
);
487 * Gets the NIR-level deref tail, which may have as a child an array deref
488 * selecting which component due to OpAccessChain supporting per-component
489 * indexing in SPIR-V.
491 static nir_deref_instr
*
492 get_deref_tail(nir_deref_instr
*deref
)
494 if (deref
->deref_type
!= nir_deref_type_array
)
497 nir_deref_instr
*parent
=
498 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
500 if (glsl_type_is_vector(parent
->type
))
506 struct vtn_ssa_value
*
507 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
)
509 nir_deref_instr
*src_tail
= get_deref_tail(src
);
510 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
511 _vtn_local_load_store(b
, true, src_tail
, val
);
513 if (src_tail
!= src
) {
514 val
->type
= src
->type
;
515 nir_const_value
*const_index
= nir_src_as_const_value(src
->arr
.index
);
517 val
->def
= vtn_vector_extract(b
, val
->def
, const_index
->u32
[0]);
519 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
, src
->arr
.index
.ssa
);
526 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
527 nir_deref_instr
*dest
)
529 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
531 if (dest_tail
!= dest
) {
532 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
533 _vtn_local_load_store(b
, true, dest_tail
, val
);
535 nir_const_value
*const_index
= nir_src_as_const_value(dest
->arr
.index
);
537 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
538 const_index
->u32
[0]);
540 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
541 dest
->arr
.index
.ssa
);
542 _vtn_local_load_store(b
, false, dest_tail
, val
);
544 _vtn_local_load_store(b
, false, dest_tail
, src
);
549 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
550 nir_ssa_def
**index_out
)
552 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
554 struct vtn_access_chain chain
= {
557 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
559 *index_out
= ptr
->block_index
;
563 /* Tries to compute the size of an interface block based on the strides and
564 * offsets that are provided to us in the SPIR-V source.
567 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
569 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
573 case GLSL_TYPE_UINT16
:
574 case GLSL_TYPE_INT16
:
575 case GLSL_TYPE_UINT8
:
577 case GLSL_TYPE_UINT64
:
578 case GLSL_TYPE_INT64
:
579 case GLSL_TYPE_FLOAT
:
580 case GLSL_TYPE_FLOAT16
:
582 case GLSL_TYPE_DOUBLE
: {
583 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
584 glsl_get_matrix_columns(type
->type
);
586 vtn_assert(type
->stride
> 0);
587 return type
->stride
* cols
;
589 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
590 return glsl_get_vector_elements(type
->type
) * type_size
;
594 case GLSL_TYPE_STRUCT
:
595 case GLSL_TYPE_INTERFACE
: {
597 unsigned num_fields
= glsl_get_length(type
->type
);
598 for (unsigned f
= 0; f
< num_fields
; f
++) {
599 unsigned field_end
= type
->offsets
[f
] +
600 vtn_type_block_size(b
, type
->members
[f
]);
601 size
= MAX2(size
, field_end
);
606 case GLSL_TYPE_ARRAY
:
607 vtn_assert(type
->stride
> 0);
608 vtn_assert(glsl_get_length(type
->type
) > 0);
609 return type
->stride
* glsl_get_length(type
->type
);
612 vtn_fail("Invalid block type");
618 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
619 nir_ssa_def
*index
, nir_ssa_def
*offset
,
620 unsigned access_offset
, unsigned access_size
,
621 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
622 enum gl_access_qualifier access
)
624 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
625 instr
->num_components
= glsl_get_vector_elements(type
);
629 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
630 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
633 if (op
== nir_intrinsic_load_push_constant
) {
634 nir_intrinsic_set_base(instr
, access_offset
);
635 nir_intrinsic_set_range(instr
, access_size
);
638 if (op
== nir_intrinsic_load_ssbo
||
639 op
== nir_intrinsic_store_ssbo
) {
640 nir_intrinsic_set_access(instr
, access
);
644 instr
->src
[src
++] = nir_src_for_ssa(index
);
646 if (op
== nir_intrinsic_load_push_constant
) {
647 /* We need to subtract the offset from where the intrinsic will load the
650 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
651 nir_imm_int(&b
->nb
, access_offset
)));
653 instr
->src
[src
++] = nir_src_for_ssa(offset
);
657 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
658 instr
->num_components
,
659 glsl_get_bit_size(type
), NULL
);
660 (*inout
)->def
= &instr
->dest
.ssa
;
663 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
665 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
666 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
670 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
671 nir_ssa_def
*index
, nir_ssa_def
*offset
,
672 unsigned access_offset
, unsigned access_size
,
673 struct vtn_type
*type
, enum gl_access_qualifier access
,
674 struct vtn_ssa_value
**inout
)
676 if (load
&& *inout
== NULL
)
677 *inout
= vtn_create_ssa_value(b
, type
->type
);
679 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
683 case GLSL_TYPE_UINT16
:
684 case GLSL_TYPE_INT16
:
685 case GLSL_TYPE_UINT8
:
687 case GLSL_TYPE_UINT64
:
688 case GLSL_TYPE_INT64
:
689 case GLSL_TYPE_FLOAT
:
690 case GLSL_TYPE_FLOAT16
:
691 case GLSL_TYPE_DOUBLE
:
693 /* This is where things get interesting. At this point, we've hit
694 * a vector, a scalar, or a matrix.
696 if (glsl_type_is_matrix(type
->type
)) {
697 /* Loading the whole matrix */
698 struct vtn_ssa_value
*transpose
;
699 unsigned num_ops
, vec_width
, col_stride
;
700 if (type
->row_major
) {
701 num_ops
= glsl_get_vector_elements(type
->type
);
702 vec_width
= glsl_get_matrix_columns(type
->type
);
703 col_stride
= type
->array_element
->stride
;
705 const struct glsl_type
*transpose_type
=
706 glsl_matrix_type(base_type
, vec_width
, num_ops
);
707 *inout
= vtn_create_ssa_value(b
, transpose_type
);
709 transpose
= vtn_ssa_transpose(b
, *inout
);
713 num_ops
= glsl_get_matrix_columns(type
->type
);
714 vec_width
= glsl_get_vector_elements(type
->type
);
715 col_stride
= type
->stride
;
718 for (unsigned i
= 0; i
< num_ops
; i
++) {
719 nir_ssa_def
*elem_offset
=
720 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* col_stride
));
721 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
722 access_offset
, access_size
,
724 glsl_vector_type(base_type
, vec_width
),
725 type
->access
| access
);
728 if (load
&& type
->row_major
)
729 *inout
= vtn_ssa_transpose(b
, *inout
);
731 unsigned elems
= glsl_get_vector_elements(type
->type
);
732 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
733 if (elems
== 1 || type
->stride
== type_size
) {
734 /* This is a tightly-packed normal scalar or vector load */
735 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
736 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
737 access_offset
, access_size
,
739 type
->access
| access
);
741 /* This is a strided load. We have to load N things separately.
742 * This is the single column of a row-major matrix case.
744 vtn_assert(type
->stride
> type_size
);
745 vtn_assert(type
->stride
% type_size
== 0);
747 nir_ssa_def
*per_comp
[4];
748 for (unsigned i
= 0; i
< elems
; i
++) {
749 nir_ssa_def
*elem_offset
=
750 nir_iadd(&b
->nb
, offset
,
751 nir_imm_int(&b
->nb
, i
* type
->stride
));
752 struct vtn_ssa_value
*comp
, temp_val
;
754 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
755 temp_val
.type
= glsl_scalar_type(base_type
);
758 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
759 access_offset
, access_size
,
760 &comp
, glsl_scalar_type(base_type
),
761 type
->access
| access
);
762 per_comp
[i
] = comp
->def
;
767 *inout
= vtn_create_ssa_value(b
, type
->type
);
768 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
774 case GLSL_TYPE_ARRAY
: {
775 unsigned elems
= glsl_get_length(type
->type
);
776 for (unsigned i
= 0; i
< elems
; i
++) {
777 nir_ssa_def
*elem_off
=
778 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* type
->stride
));
779 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
780 access_offset
, access_size
,
782 type
->array_element
->access
| access
,
783 &(*inout
)->elems
[i
]);
788 case GLSL_TYPE_STRUCT
: {
789 unsigned elems
= glsl_get_length(type
->type
);
790 for (unsigned i
= 0; i
< elems
; i
++) {
791 nir_ssa_def
*elem_off
=
792 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, type
->offsets
[i
]));
793 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
794 access_offset
, access_size
,
796 type
->members
[i
]->access
| access
,
797 &(*inout
)->elems
[i
]);
803 vtn_fail("Invalid block member type");
807 static struct vtn_ssa_value
*
808 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
811 unsigned access_offset
= 0, access_size
= 0;
813 case vtn_variable_mode_ubo
:
814 op
= nir_intrinsic_load_ubo
;
816 case vtn_variable_mode_ssbo
:
817 op
= nir_intrinsic_load_ssbo
;
819 case vtn_variable_mode_push_constant
:
820 op
= nir_intrinsic_load_push_constant
;
821 access_size
= b
->shader
->num_uniforms
;
823 case vtn_variable_mode_workgroup
:
824 op
= nir_intrinsic_load_shared
;
827 vtn_fail("Invalid block variable mode");
830 nir_ssa_def
*offset
, *index
= NULL
;
831 offset
= vtn_pointer_to_offset(b
, src
, &index
);
833 struct vtn_ssa_value
*value
= NULL
;
834 _vtn_block_load_store(b
, op
, true, index
, offset
,
835 access_offset
, access_size
,
836 src
->type
, src
->access
, &value
);
841 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
842 struct vtn_pointer
*dst
)
846 case vtn_variable_mode_ssbo
:
847 op
= nir_intrinsic_store_ssbo
;
849 case vtn_variable_mode_workgroup
:
850 op
= nir_intrinsic_store_shared
;
853 vtn_fail("Invalid block variable mode");
856 nir_ssa_def
*offset
, *index
= NULL
;
857 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
859 _vtn_block_load_store(b
, op
, false, index
, offset
,
860 0, 0, dst
->type
, dst
->access
, &src
);
864 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
865 struct vtn_pointer
*ptr
,
866 struct vtn_ssa_value
**inout
)
868 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
872 case GLSL_TYPE_UINT16
:
873 case GLSL_TYPE_INT16
:
874 case GLSL_TYPE_UINT8
:
876 case GLSL_TYPE_UINT64
:
877 case GLSL_TYPE_INT64
:
878 case GLSL_TYPE_FLOAT
:
879 case GLSL_TYPE_FLOAT16
:
881 case GLSL_TYPE_DOUBLE
:
882 /* At this point, we have a scalar, vector, or matrix so we know that
883 * there cannot be any structure splitting still in the way. By
884 * stopping at the matrix level rather than the vector level, we
885 * ensure that matrices get loaded in the optimal way even if they
886 * are storred row-major in a UBO.
889 *inout
= vtn_local_load(b
, vtn_pointer_to_deref(b
, ptr
));
891 vtn_local_store(b
, *inout
, vtn_pointer_to_deref(b
, ptr
));
895 case GLSL_TYPE_ARRAY
:
896 case GLSL_TYPE_STRUCT
: {
897 unsigned elems
= glsl_get_length(ptr
->type
->type
);
899 vtn_assert(*inout
== NULL
);
900 *inout
= rzalloc(b
, struct vtn_ssa_value
);
901 (*inout
)->type
= ptr
->type
->type
;
902 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
905 struct vtn_access_chain chain
= {
908 { .mode
= vtn_access_mode_literal
, },
911 for (unsigned i
= 0; i
< elems
; i
++) {
912 chain
.link
[0].id
= i
;
913 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
914 _vtn_variable_load_store(b
, load
, elem
, &(*inout
)->elems
[i
]);
920 vtn_fail("Invalid access chain type");
924 struct vtn_ssa_value
*
925 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
927 if (vtn_pointer_is_external_block(b
, src
)) {
928 return vtn_block_load(b
, src
);
930 struct vtn_ssa_value
*val
= NULL
;
931 _vtn_variable_load_store(b
, true, src
, &val
);
937 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
938 struct vtn_pointer
*dest
)
940 if (vtn_pointer_is_external_block(b
, dest
)) {
941 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
942 dest
->mode
== vtn_variable_mode_workgroup
);
943 vtn_block_store(b
, src
, dest
);
945 _vtn_variable_load_store(b
, false, dest
, &src
);
950 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
951 struct vtn_pointer
*src
)
953 vtn_assert(src
->type
->type
== dest
->type
->type
);
954 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
958 case GLSL_TYPE_UINT16
:
959 case GLSL_TYPE_INT16
:
960 case GLSL_TYPE_UINT8
:
962 case GLSL_TYPE_UINT64
:
963 case GLSL_TYPE_INT64
:
964 case GLSL_TYPE_FLOAT
:
965 case GLSL_TYPE_FLOAT16
:
966 case GLSL_TYPE_DOUBLE
:
968 /* At this point, we have a scalar, vector, or matrix so we know that
969 * there cannot be any structure splitting still in the way. By
970 * stopping at the matrix level rather than the vector level, we
971 * ensure that matrices get loaded in the optimal way even if they
972 * are storred row-major in a UBO.
974 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
977 case GLSL_TYPE_ARRAY
:
978 case GLSL_TYPE_STRUCT
: {
979 struct vtn_access_chain chain
= {
982 { .mode
= vtn_access_mode_literal
, },
985 unsigned elems
= glsl_get_length(src
->type
->type
);
986 for (unsigned i
= 0; i
< elems
; i
++) {
987 chain
.link
[0].id
= i
;
988 struct vtn_pointer
*src_elem
=
989 vtn_pointer_dereference(b
, src
, &chain
);
990 struct vtn_pointer
*dest_elem
=
991 vtn_pointer_dereference(b
, dest
, &chain
);
993 _vtn_variable_copy(b
, dest_elem
, src_elem
);
999 vtn_fail("Invalid access chain type");
1004 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1005 struct vtn_pointer
*src
)
1007 /* TODO: At some point, we should add a special-case for when we can
1008 * just emit a copy_var intrinsic.
1010 _vtn_variable_copy(b
, dest
, src
);
1014 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1016 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1017 *mode
= nir_var_system_value
;
1021 vtn_get_builtin_location(struct vtn_builder
*b
,
1022 SpvBuiltIn builtin
, int *location
,
1023 nir_variable_mode
*mode
)
1026 case SpvBuiltInPosition
:
1027 *location
= VARYING_SLOT_POS
;
1029 case SpvBuiltInPointSize
:
1030 *location
= VARYING_SLOT_PSIZ
;
1032 case SpvBuiltInClipDistance
:
1033 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1035 case SpvBuiltInCullDistance
:
1036 *location
= VARYING_SLOT_CULL_DIST0
;
1038 case SpvBuiltInVertexId
:
1039 case SpvBuiltInVertexIndex
:
1040 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1041 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1042 * same as gl_VertexID, which is non-zero-based, and removes
1043 * VertexIndex. Since they're both defined to be non-zero-based, we use
1044 * SYSTEM_VALUE_VERTEX_ID for both.
1046 *location
= SYSTEM_VALUE_VERTEX_ID
;
1047 set_mode_system_value(b
, mode
);
1049 case SpvBuiltInInstanceIndex
:
1050 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1051 set_mode_system_value(b
, mode
);
1053 case SpvBuiltInInstanceId
:
1054 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1055 set_mode_system_value(b
, mode
);
1057 case SpvBuiltInPrimitiveId
:
1058 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1059 vtn_assert(*mode
== nir_var_shader_in
);
1060 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1061 } else if (*mode
== nir_var_shader_out
) {
1062 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1064 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1065 set_mode_system_value(b
, mode
);
1068 case SpvBuiltInInvocationId
:
1069 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1070 set_mode_system_value(b
, mode
);
1072 case SpvBuiltInLayer
:
1073 *location
= VARYING_SLOT_LAYER
;
1074 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1075 *mode
= nir_var_shader_in
;
1076 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1077 *mode
= nir_var_shader_out
;
1078 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1079 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1080 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1081 *mode
= nir_var_shader_out
;
1083 vtn_fail("invalid stage for SpvBuiltInLayer");
1085 case SpvBuiltInViewportIndex
:
1086 *location
= VARYING_SLOT_VIEWPORT
;
1087 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1088 *mode
= nir_var_shader_out
;
1089 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1090 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1091 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1092 *mode
= nir_var_shader_out
;
1093 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1094 *mode
= nir_var_shader_in
;
1096 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1098 case SpvBuiltInTessLevelOuter
:
1099 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1101 case SpvBuiltInTessLevelInner
:
1102 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1104 case SpvBuiltInTessCoord
:
1105 *location
= SYSTEM_VALUE_TESS_COORD
;
1106 set_mode_system_value(b
, mode
);
1108 case SpvBuiltInPatchVertices
:
1109 *location
= SYSTEM_VALUE_VERTICES_IN
;
1110 set_mode_system_value(b
, mode
);
1112 case SpvBuiltInFragCoord
:
1113 *location
= VARYING_SLOT_POS
;
1114 vtn_assert(*mode
== nir_var_shader_in
);
1116 case SpvBuiltInPointCoord
:
1117 *location
= VARYING_SLOT_PNTC
;
1118 vtn_assert(*mode
== nir_var_shader_in
);
1120 case SpvBuiltInFrontFacing
:
1121 *location
= SYSTEM_VALUE_FRONT_FACE
;
1122 set_mode_system_value(b
, mode
);
1124 case SpvBuiltInSampleId
:
1125 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1126 set_mode_system_value(b
, mode
);
1128 case SpvBuiltInSamplePosition
:
1129 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1130 set_mode_system_value(b
, mode
);
1132 case SpvBuiltInSampleMask
:
1133 if (*mode
== nir_var_shader_out
) {
1134 *location
= FRAG_RESULT_SAMPLE_MASK
;
1136 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1137 set_mode_system_value(b
, mode
);
1140 case SpvBuiltInFragDepth
:
1141 *location
= FRAG_RESULT_DEPTH
;
1142 vtn_assert(*mode
== nir_var_shader_out
);
1144 case SpvBuiltInHelperInvocation
:
1145 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1146 set_mode_system_value(b
, mode
);
1148 case SpvBuiltInNumWorkgroups
:
1149 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1150 set_mode_system_value(b
, mode
);
1152 case SpvBuiltInWorkgroupSize
:
1153 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1154 set_mode_system_value(b
, mode
);
1156 case SpvBuiltInWorkgroupId
:
1157 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1158 set_mode_system_value(b
, mode
);
1160 case SpvBuiltInLocalInvocationId
:
1161 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1162 set_mode_system_value(b
, mode
);
1164 case SpvBuiltInLocalInvocationIndex
:
1165 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1166 set_mode_system_value(b
, mode
);
1168 case SpvBuiltInGlobalInvocationId
:
1169 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1170 set_mode_system_value(b
, mode
);
1172 case SpvBuiltInBaseVertex
:
1173 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1174 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1176 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1177 set_mode_system_value(b
, mode
);
1179 case SpvBuiltInBaseInstance
:
1180 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1181 set_mode_system_value(b
, mode
);
1183 case SpvBuiltInDrawIndex
:
1184 *location
= SYSTEM_VALUE_DRAW_ID
;
1185 set_mode_system_value(b
, mode
);
1187 case SpvBuiltInSubgroupSize
:
1188 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1189 set_mode_system_value(b
, mode
);
1191 case SpvBuiltInSubgroupId
:
1192 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1193 set_mode_system_value(b
, mode
);
1195 case SpvBuiltInSubgroupLocalInvocationId
:
1196 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1197 set_mode_system_value(b
, mode
);
1199 case SpvBuiltInNumSubgroups
:
1200 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1201 set_mode_system_value(b
, mode
);
1203 case SpvBuiltInDeviceIndex
:
1204 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1205 set_mode_system_value(b
, mode
);
1207 case SpvBuiltInViewIndex
:
1208 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1209 set_mode_system_value(b
, mode
);
1211 case SpvBuiltInSubgroupEqMask
:
1212 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1213 set_mode_system_value(b
, mode
);
1215 case SpvBuiltInSubgroupGeMask
:
1216 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1217 set_mode_system_value(b
, mode
);
1219 case SpvBuiltInSubgroupGtMask
:
1220 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1221 set_mode_system_value(b
, mode
);
1223 case SpvBuiltInSubgroupLeMask
:
1224 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1225 set_mode_system_value(b
, mode
);
1227 case SpvBuiltInSubgroupLtMask
:
1228 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1229 set_mode_system_value(b
, mode
);
1231 case SpvBuiltInFragStencilRefEXT
:
1232 *location
= FRAG_RESULT_STENCIL
;
1233 vtn_assert(*mode
== nir_var_shader_out
);
1235 case SpvBuiltInWorkDim
:
1236 *location
= SYSTEM_VALUE_WORK_DIM
;
1237 set_mode_system_value(b
, mode
);
1239 case SpvBuiltInGlobalSize
:
1240 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1241 set_mode_system_value(b
, mode
);
1244 vtn_fail("unsupported builtin: %u", builtin
);
1249 apply_var_decoration(struct vtn_builder
*b
,
1250 struct nir_variable_data
*var_data
,
1251 const struct vtn_decoration
*dec
)
1253 switch (dec
->decoration
) {
1254 case SpvDecorationRelaxedPrecision
:
1255 break; /* FIXME: Do nothing with this for now. */
1256 case SpvDecorationNoPerspective
:
1257 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1259 case SpvDecorationFlat
:
1260 var_data
->interpolation
= INTERP_MODE_FLAT
;
1262 case SpvDecorationCentroid
:
1263 var_data
->centroid
= true;
1265 case SpvDecorationSample
:
1266 var_data
->sample
= true;
1268 case SpvDecorationInvariant
:
1269 var_data
->invariant
= true;
1271 case SpvDecorationConstant
:
1272 var_data
->read_only
= true;
1274 case SpvDecorationNonReadable
:
1275 var_data
->image
.access
|= ACCESS_NON_READABLE
;
1277 case SpvDecorationNonWritable
:
1278 var_data
->read_only
= true;
1279 var_data
->image
.access
|= ACCESS_NON_WRITEABLE
;
1281 case SpvDecorationRestrict
:
1282 var_data
->image
.access
|= ACCESS_RESTRICT
;
1284 case SpvDecorationVolatile
:
1285 var_data
->image
.access
|= ACCESS_VOLATILE
;
1287 case SpvDecorationCoherent
:
1288 var_data
->image
.access
|= ACCESS_COHERENT
;
1290 case SpvDecorationComponent
:
1291 var_data
->location_frac
= dec
->literals
[0];
1293 case SpvDecorationIndex
:
1294 var_data
->index
= dec
->literals
[0];
1296 case SpvDecorationBuiltIn
: {
1297 SpvBuiltIn builtin
= dec
->literals
[0];
1299 nir_variable_mode mode
= var_data
->mode
;
1300 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1301 var_data
->mode
= mode
;
1304 case SpvBuiltInTessLevelOuter
:
1305 case SpvBuiltInTessLevelInner
:
1306 var_data
->compact
= true;
1308 case SpvBuiltInFragCoord
:
1309 var_data
->pixel_center_integer
= b
->pixel_center_integer
;
1311 case SpvBuiltInSamplePosition
:
1312 var_data
->origin_upper_left
= b
->origin_upper_left
;
1319 case SpvDecorationSpecId
:
1320 case SpvDecorationRowMajor
:
1321 case SpvDecorationColMajor
:
1322 case SpvDecorationMatrixStride
:
1323 case SpvDecorationAliased
:
1324 case SpvDecorationUniform
:
1325 case SpvDecorationLinkageAttributes
:
1326 break; /* Do nothing with these here */
1328 case SpvDecorationPatch
:
1329 var_data
->patch
= true;
1332 case SpvDecorationLocation
:
1333 vtn_fail("Handled above");
1335 case SpvDecorationBlock
:
1336 case SpvDecorationBufferBlock
:
1337 case SpvDecorationArrayStride
:
1338 case SpvDecorationGLSLShared
:
1339 case SpvDecorationGLSLPacked
:
1340 break; /* These can apply to a type but we don't care about them */
1342 case SpvDecorationBinding
:
1343 case SpvDecorationDescriptorSet
:
1344 case SpvDecorationNoContraction
:
1345 case SpvDecorationInputAttachmentIndex
:
1346 vtn_warn("Decoration not allowed for variable or structure member: %s",
1347 spirv_decoration_to_string(dec
->decoration
));
1350 case SpvDecorationXfbBuffer
:
1351 var_data
->explicit_xfb_buffer
= true;
1352 var_data
->xfb_buffer
= dec
->literals
[0];
1353 var_data
->always_active_io
= true;
1355 case SpvDecorationXfbStride
:
1356 var_data
->explicit_xfb_stride
= true;
1357 var_data
->xfb_stride
= dec
->literals
[0];
1359 case SpvDecorationOffset
:
1360 var_data
->explicit_offset
= true;
1361 var_data
->offset
= dec
->literals
[0];
1364 case SpvDecorationStream
:
1365 var_data
->stream
= dec
->literals
[0];
1368 case SpvDecorationCPacked
:
1369 case SpvDecorationSaturatedConversion
:
1370 case SpvDecorationFuncParamAttr
:
1371 case SpvDecorationFPRoundingMode
:
1372 case SpvDecorationFPFastMathMode
:
1373 case SpvDecorationAlignment
:
1374 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1375 spirv_decoration_to_string(dec
->decoration
));
1379 vtn_fail("Unhandled decoration");
1384 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1385 const struct vtn_decoration
*dec
, void *out_is_patch
)
1387 if (dec
->decoration
== SpvDecorationPatch
) {
1388 *((bool *) out_is_patch
) = true;
1393 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1394 const struct vtn_decoration
*dec
, void *void_var
)
1396 struct vtn_variable
*vtn_var
= void_var
;
1398 /* Handle decorations that apply to a vtn_variable as a whole */
1399 switch (dec
->decoration
) {
1400 case SpvDecorationBinding
:
1401 vtn_var
->binding
= dec
->literals
[0];
1402 vtn_var
->explicit_binding
= true;
1404 case SpvDecorationDescriptorSet
:
1405 vtn_var
->descriptor_set
= dec
->literals
[0];
1407 case SpvDecorationInputAttachmentIndex
:
1408 vtn_var
->input_attachment_index
= dec
->literals
[0];
1410 case SpvDecorationPatch
:
1411 vtn_var
->patch
= true;
1413 case SpvDecorationOffset
:
1414 vtn_var
->offset
= dec
->literals
[0];
1416 case SpvDecorationNonWritable
:
1417 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1419 case SpvDecorationNonReadable
:
1420 vtn_var
->access
|= ACCESS_NON_READABLE
;
1422 case SpvDecorationVolatile
:
1423 vtn_var
->access
|= ACCESS_VOLATILE
;
1425 case SpvDecorationCoherent
:
1426 vtn_var
->access
|= ACCESS_COHERENT
;
1432 if (val
->value_type
== vtn_value_type_pointer
) {
1433 assert(val
->pointer
->var
== void_var
);
1434 assert(val
->pointer
->chain
== NULL
);
1435 assert(member
== -1);
1437 assert(val
->value_type
== vtn_value_type_type
);
1440 /* Location is odd. If applied to a split structure, we have to walk the
1441 * whole thing and accumulate the location. It's easier to handle as a
1444 if (dec
->decoration
== SpvDecorationLocation
) {
1445 unsigned location
= dec
->literals
[0];
1446 bool is_vertex_input
= false;
1447 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1448 vtn_var
->mode
== vtn_variable_mode_output
) {
1449 location
+= FRAG_RESULT_DATA0
;
1450 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1451 vtn_var
->mode
== vtn_variable_mode_input
) {
1452 is_vertex_input
= true;
1453 location
+= VERT_ATTRIB_GENERIC0
;
1454 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1455 vtn_var
->mode
== vtn_variable_mode_output
) {
1456 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1457 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1458 vtn_warn("Location must be on input, output, uniform, sampler or "
1463 if (vtn_var
->var
->num_members
== 0) {
1464 /* This handles the member and lone variable cases */
1465 vtn_var
->var
->data
.location
= location
;
1467 /* This handles the structure member case */
1468 assert(vtn_var
->var
->members
);
1469 for (unsigned i
= 0; i
< vtn_var
->var
->num_members
; i
++) {
1470 vtn_var
->var
->members
[i
].location
= location
;
1471 const struct glsl_type
*member_type
=
1472 glsl_get_struct_field(vtn_var
->var
->interface_type
, i
);
1473 location
+= glsl_count_attribute_slots(member_type
,
1480 if (vtn_var
->var
->num_members
== 0) {
1481 assert(member
== -1);
1482 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1483 } else if (member
>= 0) {
1484 /* Member decorations must come from a type */
1485 assert(val
->value_type
== vtn_value_type_type
);
1486 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1489 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1490 for (unsigned i
= 0; i
< length
; i
++)
1491 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1494 /* A few variables, those with external storage, have no actual
1495 * nir_variables associated with them. Fortunately, all decorations
1496 * we care about for those variables are on the type only.
1498 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1499 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1500 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1501 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1502 b
->options
->lower_workgroup_access_to_offsets
));
1507 static enum vtn_variable_mode
1508 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1509 SpvStorageClass
class,
1510 struct vtn_type
*interface_type
,
1511 nir_variable_mode
*nir_mode_out
)
1513 enum vtn_variable_mode mode
;
1514 nir_variable_mode nir_mode
;
1516 case SpvStorageClassUniform
:
1517 if (interface_type
->block
) {
1518 mode
= vtn_variable_mode_ubo
;
1520 } else if (interface_type
->buffer_block
) {
1521 mode
= vtn_variable_mode_ssbo
;
1524 /* Default-block uniforms, coming from gl_spirv */
1525 mode
= vtn_variable_mode_uniform
;
1526 nir_mode
= nir_var_uniform
;
1529 case SpvStorageClassStorageBuffer
:
1530 mode
= vtn_variable_mode_ssbo
;
1533 case SpvStorageClassUniformConstant
:
1534 mode
= vtn_variable_mode_uniform
;
1535 nir_mode
= nir_var_uniform
;
1537 case SpvStorageClassPushConstant
:
1538 mode
= vtn_variable_mode_push_constant
;
1539 nir_mode
= nir_var_uniform
;
1541 case SpvStorageClassInput
:
1542 mode
= vtn_variable_mode_input
;
1543 nir_mode
= nir_var_shader_in
;
1545 case SpvStorageClassOutput
:
1546 mode
= vtn_variable_mode_output
;
1547 nir_mode
= nir_var_shader_out
;
1549 case SpvStorageClassPrivate
:
1550 mode
= vtn_variable_mode_global
;
1551 nir_mode
= nir_var_global
;
1553 case SpvStorageClassFunction
:
1554 mode
= vtn_variable_mode_local
;
1555 nir_mode
= nir_var_local
;
1557 case SpvStorageClassWorkgroup
:
1558 mode
= vtn_variable_mode_workgroup
;
1559 nir_mode
= nir_var_shared
;
1561 case SpvStorageClassAtomicCounter
:
1562 mode
= vtn_variable_mode_uniform
;
1563 nir_mode
= nir_var_uniform
;
1565 case SpvStorageClassCrossWorkgroup
:
1566 case SpvStorageClassGeneric
:
1568 vtn_fail("Unhandled variable storage class");
1572 *nir_mode_out
= nir_mode
;
1578 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1580 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1581 /* This pointer needs to have a pointer type with actual storage */
1582 vtn_assert(ptr
->ptr_type
);
1583 vtn_assert(ptr
->ptr_type
->type
);
1586 /* If we don't have an offset then we must be a pointer to the variable
1589 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1591 struct vtn_access_chain chain
= {
1594 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1597 vtn_assert(ptr
->offset
);
1598 if (ptr
->block_index
) {
1599 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1600 ptr
->mode
== vtn_variable_mode_ssbo
);
1601 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1603 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1607 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1611 struct vtn_pointer
*
1612 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1613 struct vtn_type
*ptr_type
)
1615 vtn_assert(ssa
->num_components
<= 2 && ssa
->bit_size
== 32);
1616 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1618 struct vtn_type
*interface_type
= ptr_type
->deref
;
1619 while (interface_type
->base_type
== vtn_base_type_array
)
1620 interface_type
= interface_type
->array_element
;
1622 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1623 nir_variable_mode nir_mode
;
1624 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1625 interface_type
, &nir_mode
);
1626 ptr
->type
= ptr_type
->deref
;
1627 ptr
->ptr_type
= ptr_type
;
1629 if (ptr
->mode
== vtn_variable_mode_ubo
||
1630 ptr
->mode
== vtn_variable_mode_ssbo
) {
1631 /* This pointer type needs to have actual storage */
1632 vtn_assert(ptr_type
->type
);
1633 vtn_assert(ssa
->num_components
== 2);
1634 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1635 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1636 } else if (ptr
->mode
== vtn_variable_mode_workgroup
||
1637 ptr
->mode
== vtn_variable_mode_push_constant
) {
1638 /* This pointer type needs to have actual storage */
1639 vtn_assert(ptr_type
->type
);
1640 vtn_assert(ssa
->num_components
== 1);
1641 ptr
->block_index
= NULL
;
1644 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1645 ptr_type
->deref
->type
);
1652 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
1654 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
1657 if (var
->mode
== vtn_variable_mode_input
) {
1658 return stage
== MESA_SHADER_TESS_CTRL
||
1659 stage
== MESA_SHADER_TESS_EVAL
||
1660 stage
== MESA_SHADER_GEOMETRY
;
1663 if (var
->mode
== vtn_variable_mode_output
)
1664 return stage
== MESA_SHADER_TESS_CTRL
;
1670 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
1671 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
1672 nir_constant
*initializer
)
1674 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1675 struct vtn_type
*type
= ptr_type
->deref
;
1677 struct vtn_type
*without_array
= type
;
1678 while(glsl_type_is_array(without_array
->type
))
1679 without_array
= without_array
->array_element
;
1681 enum vtn_variable_mode mode
;
1682 nir_variable_mode nir_mode
;
1683 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
1686 case vtn_variable_mode_ubo
:
1687 b
->shader
->info
.num_ubos
++;
1689 case vtn_variable_mode_ssbo
:
1690 b
->shader
->info
.num_ssbos
++;
1692 case vtn_variable_mode_uniform
:
1693 if (glsl_type_is_image(without_array
->type
))
1694 b
->shader
->info
.num_images
++;
1695 else if (glsl_type_is_sampler(without_array
->type
))
1696 b
->shader
->info
.num_textures
++;
1698 case vtn_variable_mode_push_constant
:
1699 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
1702 /* No tallying is needed */
1706 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
1710 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
1711 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
1713 switch (var
->mode
) {
1714 case vtn_variable_mode_local
:
1715 case vtn_variable_mode_global
:
1716 case vtn_variable_mode_uniform
:
1717 /* For these, we create the variable normally */
1718 var
->var
= rzalloc(b
->shader
, nir_variable
);
1719 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1721 /* Need to tweak the nir type here as at vtn_handle_type we don't have
1722 * the access to storage_class, that is the one that points us that is
1725 if (storage_class
== SpvStorageClassAtomicCounter
) {
1726 var
->var
->type
= repair_atomic_type(var
->type
->type
);
1728 var
->var
->type
= var
->type
->type
;
1730 var
->var
->data
.mode
= nir_mode
;
1731 var
->var
->data
.location
= -1;
1732 var
->var
->interface_type
= NULL
;
1735 case vtn_variable_mode_workgroup
:
1736 if (b
->options
->lower_workgroup_access_to_offsets
) {
1737 var
->shared_location
= -1;
1739 /* Create the variable normally */
1740 var
->var
= rzalloc(b
->shader
, nir_variable
);
1741 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1742 var
->var
->type
= var
->type
->type
;
1743 var
->var
->data
.mode
= nir_var_shared
;
1747 case vtn_variable_mode_input
:
1748 case vtn_variable_mode_output
: {
1749 /* In order to know whether or not we're a per-vertex inout, we need
1750 * the patch qualifier. This means walking the variable decorations
1751 * early before we actually create any variables. Not a big deal.
1753 * GLSLang really likes to place decorations in the most interior
1754 * thing it possibly can. In particular, if you have a struct, it
1755 * will place the patch decorations on the struct members. This
1756 * should be handled by the variable splitting below just fine.
1758 * If you have an array-of-struct, things get even more weird as it
1759 * will place the patch decorations on the struct even though it's
1760 * inside an array and some of the members being patch and others not
1761 * makes no sense whatsoever. Since the only sensible thing is for
1762 * it to be all or nothing, we'll call it patch if any of the members
1763 * are declared patch.
1766 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
1767 if (glsl_type_is_array(var
->type
->type
) &&
1768 glsl_type_is_struct(without_array
->type
)) {
1769 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
1770 vtn_value_type_type
),
1771 var_is_patch_cb
, &var
->patch
);
1774 /* For inputs and outputs, we immediately split structures. This
1775 * is for a couple of reasons. For one, builtins may all come in
1776 * a struct and we really want those split out into separate
1777 * variables. For another, interpolation qualifiers can be
1778 * applied to members of the top-level struct ane we need to be
1779 * able to preserve that information.
1782 struct vtn_type
*interface_type
= var
->type
;
1783 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
1784 /* In Geometry shaders (and some tessellation), inputs come
1785 * in per-vertex arrays. However, some builtins come in
1786 * non-per-vertex, hence the need for the is_array check. In
1787 * any case, there are no non-builtin arrays allowed so this
1788 * check should be sufficient.
1790 interface_type
= var
->type
->array_element
;
1793 var
->var
= rzalloc(b
->shader
, nir_variable
);
1794 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1795 var
->var
->type
= var
->type
->type
;
1796 var
->var
->interface_type
= interface_type
->type
;
1797 var
->var
->data
.mode
= nir_mode
;
1798 var
->var
->data
.patch
= var
->patch
;
1800 if (glsl_type_is_struct(interface_type
->type
)) {
1801 /* It's a struct. Set it up as per-member. */
1802 var
->var
->num_members
= glsl_get_length(interface_type
->type
);
1803 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
1804 var
->var
->num_members
);
1806 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
1807 var
->var
->members
[i
].mode
= nir_mode
;
1808 var
->var
->members
[i
].patch
= var
->patch
;
1812 /* For inputs and outputs, we need to grab locations and builtin
1813 * information from the interface type.
1815 vtn_foreach_decoration(b
, vtn_value(b
, interface_type
->id
,
1816 vtn_value_type_type
),
1817 var_decoration_cb
, var
);
1821 case vtn_variable_mode_ubo
:
1822 case vtn_variable_mode_ssbo
:
1823 case vtn_variable_mode_push_constant
:
1824 /* These don't need actual variables. */
1829 var
->var
->constant_initializer
=
1830 nir_constant_clone(initializer
, var
->var
);
1833 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
1835 if (var
->mode
== vtn_variable_mode_uniform
) {
1836 /* XXX: We still need the binding information in the nir_variable
1837 * for these. We should fix that.
1839 var
->var
->data
.binding
= var
->binding
;
1840 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
1841 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
1842 var
->var
->data
.index
= var
->input_attachment_index
;
1843 var
->var
->data
.offset
= var
->offset
;
1845 if (glsl_type_is_image(without_array
->type
))
1846 var
->var
->data
.image
.format
= without_array
->image_format
;
1849 if (var
->mode
== vtn_variable_mode_local
) {
1850 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
1851 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
1852 } else if (var
->var
) {
1853 nir_shader_add_variable(b
->shader
, var
->var
);
1855 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
1860 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
1861 struct vtn_type
*dst_type
,
1862 struct vtn_type
*src_type
)
1864 if (dst_type
->id
== src_type
->id
)
1867 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
1868 /* Early versions of GLSLang would re-emit types unnecessarily and you
1869 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
1870 * mismatched source and destination types.
1872 * https://github.com/KhronosGroup/glslang/issues/304
1873 * https://github.com/KhronosGroup/glslang/issues/307
1874 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
1875 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
1877 vtn_warn("Source and destination types of %s do not have the same "
1878 "ID (but are compatible): %u vs %u",
1879 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
1883 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
1884 spirv_op_to_string(opcode
),
1885 glsl_get_type_name(dst_type
->type
),
1886 glsl_get_type_name(src_type
->type
));
1890 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
1891 const uint32_t *w
, unsigned count
)
1895 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1896 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1900 case SpvOpVariable
: {
1901 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1903 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1905 SpvStorageClass storage_class
= w
[3];
1906 nir_constant
*initializer
= NULL
;
1908 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
1910 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
1914 case SpvOpAccessChain
:
1915 case SpvOpPtrAccessChain
:
1916 case SpvOpInBoundsAccessChain
: {
1917 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
1918 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
);
1921 for (int i
= 4; i
< count
; i
++) {
1922 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
1923 if (link_val
->value_type
== vtn_value_type_constant
) {
1924 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
1925 chain
->link
[idx
].id
= link_val
->constant
->values
[0].u32
[0];
1927 chain
->link
[idx
].mode
= vtn_access_mode_id
;
1928 chain
->link
[idx
].id
= w
[i
];
1934 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1935 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
1936 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
1937 /* This is rather insane. SPIR-V allows you to use OpSampledImage
1938 * to combine an array of images with a single sampler to get an
1939 * array of sampled images that all share the same sampler.
1940 * Fortunately, this means that we can more-or-less ignore the
1941 * sampler when crawling the access chain, but it does leave us
1942 * with this rather awkward little special-case.
1944 struct vtn_value
*val
=
1945 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1946 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1947 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
1948 val
->sampled_image
->image
=
1949 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
1950 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
1952 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
1953 struct vtn_value
*val
=
1954 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1955 val
->pointer
= vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
1956 val
->pointer
->ptr_type
= ptr_type
;
1961 case SpvOpCopyMemory
: {
1962 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
1963 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
1965 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
1967 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
1972 struct vtn_type
*res_type
=
1973 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1974 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
1975 struct vtn_pointer
*src
= src_val
->pointer
;
1977 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
1979 if (glsl_type_is_image(res_type
->type
) ||
1980 glsl_type_is_sampler(res_type
->type
)) {
1981 vtn_push_value(b
, w
[2], vtn_value_type_pointer
)->pointer
= src
;
1985 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
1990 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
1991 struct vtn_pointer
*dest
= dest_val
->pointer
;
1992 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
1994 /* OpStore requires us to actually have a storage type */
1995 vtn_fail_if(dest
->type
->type
== NULL
,
1996 "Invalid destination type for OpStore");
1998 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
1999 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
2000 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
2001 * would then store them to a local variable as bool. Work around
2002 * the issue by doing an implicit conversion.
2004 * https://github.com/KhronosGroup/glslang/issues/170
2005 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
2007 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
2008 "OpTypeBool. Doing an implicit conversion to work around "
2010 struct vtn_ssa_value
*bool_ssa
=
2011 vtn_create_ssa_value(b
, dest
->type
->type
);
2012 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2013 vtn_variable_store(b
, bool_ssa
, dest
);
2017 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2019 if (glsl_type_is_sampler(dest
->type
->type
)) {
2020 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2021 "propagation to workaround the problem.");
2022 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2023 dest
->var
->copy_prop_sampler
=
2024 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2028 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2029 vtn_variable_store(b
, src
, dest
);
2033 case SpvOpArrayLength
: {
2034 struct vtn_pointer
*ptr
=
2035 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2037 const uint32_t offset
= ptr
->var
->type
->offsets
[w
[4]];
2038 const uint32_t stride
= ptr
->var
->type
->members
[w
[4]]->stride
;
2040 if (!ptr
->block_index
) {
2041 struct vtn_access_chain chain
= {
2044 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
2045 vtn_assert(ptr
->block_index
);
2048 nir_intrinsic_instr
*instr
=
2049 nir_intrinsic_instr_create(b
->nb
.shader
,
2050 nir_intrinsic_get_buffer_size
);
2051 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2052 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2053 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2054 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2056 /* array_length = max(buffer_size - offset, 0) / stride */
2057 nir_ssa_def
*array_length
=
2062 nir_imm_int(&b
->nb
, offset
)),
2063 nir_imm_int(&b
->nb
, 0u)),
2064 nir_imm_int(&b
->nb
, stride
));
2066 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2067 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2068 val
->ssa
->def
= array_length
;
2072 case SpvOpCopyMemorySized
:
2074 vtn_fail("Unhandled opcode");