2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
30 #include "nir_deref.h"
32 static struct vtn_access_chain
*
33 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
35 struct vtn_access_chain
*chain
;
37 /* Subtract 1 from the length since there's already one built in */
38 size_t size
= sizeof(*chain
) +
39 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
40 chain
= rzalloc_size(b
, size
);
41 chain
->length
= length
;
47 vtn_pointer_uses_ssa_offset(struct vtn_builder
*b
,
48 struct vtn_pointer
*ptr
)
50 return ptr
->mode
== vtn_variable_mode_ubo
||
51 ptr
->mode
== vtn_variable_mode_ssbo
||
52 ptr
->mode
== vtn_variable_mode_push_constant
||
53 (ptr
->mode
== vtn_variable_mode_workgroup
&&
54 b
->options
->lower_workgroup_access_to_offsets
);
58 vtn_pointer_is_external_block(struct vtn_builder
*b
,
59 struct vtn_pointer
*ptr
)
61 return ptr
->mode
== vtn_variable_mode_ssbo
||
62 ptr
->mode
== vtn_variable_mode_ubo
||
63 ptr
->mode
== vtn_variable_mode_push_constant
||
64 (ptr
->mode
== vtn_variable_mode_workgroup
&&
65 b
->options
->lower_workgroup_access_to_offsets
);
68 /* Dereference the given base pointer by the access chain */
69 static struct vtn_pointer
*
70 vtn_nir_deref_pointer_dereference(struct vtn_builder
*b
,
71 struct vtn_pointer
*base
,
72 struct vtn_access_chain
*deref_chain
)
74 struct vtn_type
*type
= base
->type
;
75 enum gl_access_qualifier access
= base
->access
;
77 nir_deref_instr
*tail
;
81 assert(base
->var
&& base
->var
->var
);
82 tail
= nir_build_deref_var(&b
->nb
, base
->var
->var
);
85 /* OpPtrAccessChain is only allowed on things which support variable
86 * pointers. For everything else, the client is expected to just pass us
87 * the right access chain.
89 vtn_assert(!deref_chain
->ptr_as_array
);
91 for (unsigned i
= 0; i
< deref_chain
->length
; i
++) {
92 if (glsl_type_is_struct(type
->type
)) {
93 vtn_assert(deref_chain
->link
[i
].mode
== vtn_access_mode_literal
);
94 unsigned idx
= deref_chain
->link
[i
].id
;
95 tail
= nir_build_deref_struct(&b
->nb
, tail
, idx
);
96 type
= type
->members
[idx
];
99 if (deref_chain
->link
[i
].mode
== vtn_access_mode_literal
) {
100 index
= nir_imm_int(&b
->nb
, deref_chain
->link
[i
].id
);
102 vtn_assert(deref_chain
->link
[i
].mode
== vtn_access_mode_id
);
103 index
= vtn_ssa_value(b
, deref_chain
->link
[i
].id
)->def
;
105 tail
= nir_build_deref_array(&b
->nb
, tail
, index
);
106 type
= type
->array_element
;
109 access
|= type
->access
;
112 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
113 ptr
->mode
= base
->mode
;
115 ptr
->var
= base
->var
;
117 ptr
->access
= access
;
123 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
126 vtn_assert(stride
> 0);
127 if (link
.mode
== vtn_access_mode_literal
) {
128 return nir_imm_int(&b
->nb
, link
.id
* stride
);
129 } else if (stride
== 1) {
130 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
131 if (ssa
->bit_size
!= 32)
132 ssa
= nir_i2i32(&b
->nb
, ssa
);
135 nir_ssa_def
*src0
= vtn_ssa_value(b
, link
.id
)->def
;
136 if (src0
->bit_size
!= 32)
137 src0
= nir_i2i32(&b
->nb
, src0
);
138 return nir_imul_imm(&b
->nb
, src0
, stride
);
143 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
144 nir_ssa_def
*desc_array_index
)
146 if (!desc_array_index
) {
147 vtn_assert(glsl_type_is_struct(var
->type
->type
));
148 desc_array_index
= nir_imm_int(&b
->nb
, 0);
151 nir_intrinsic_instr
*instr
=
152 nir_intrinsic_instr_create(b
->nb
.shader
,
153 nir_intrinsic_vulkan_resource_index
);
154 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
155 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
156 nir_intrinsic_set_binding(instr
, var
->binding
);
158 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
159 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
161 return &instr
->dest
.ssa
;
165 vtn_resource_reindex(struct vtn_builder
*b
, nir_ssa_def
*base_index
,
166 nir_ssa_def
*offset_index
)
168 nir_intrinsic_instr
*instr
=
169 nir_intrinsic_instr_create(b
->nb
.shader
,
170 nir_intrinsic_vulkan_resource_reindex
);
171 instr
->src
[0] = nir_src_for_ssa(base_index
);
172 instr
->src
[1] = nir_src_for_ssa(offset_index
);
174 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
175 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
177 return &instr
->dest
.ssa
;
180 static struct vtn_pointer
*
181 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
182 struct vtn_pointer
*base
,
183 struct vtn_access_chain
*deref_chain
)
185 nir_ssa_def
*block_index
= base
->block_index
;
186 nir_ssa_def
*offset
= base
->offset
;
187 struct vtn_type
*type
= base
->type
;
188 enum gl_access_qualifier access
= base
->access
;
191 if (base
->mode
== vtn_variable_mode_ubo
||
192 base
->mode
== vtn_variable_mode_ssbo
) {
194 vtn_assert(base
->var
&& base
->type
);
195 nir_ssa_def
*desc_arr_idx
;
196 if (glsl_type_is_array(type
->type
)) {
197 if (deref_chain
->length
>= 1) {
199 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
201 /* This consumes a level of type */
202 type
= type
->array_element
;
203 access
|= type
->access
;
205 /* This is annoying. We've been asked for a pointer to the
206 * array of UBOs/SSBOs and not a specifc buffer. Return a
207 * pointer with a descriptor index of 0 and we'll have to do
208 * a reindex later to adjust it to the right thing.
210 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
212 } else if (deref_chain
->ptr_as_array
) {
213 /* You can't have a zero-length OpPtrAccessChain */
214 vtn_assert(deref_chain
->length
>= 1);
215 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
217 /* We have a regular non-array SSBO. */
220 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
221 } else if (deref_chain
->ptr_as_array
&&
222 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
223 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
224 * decorated block. This is an interesting corner in the SPIR-V
225 * spec. One interpretation would be that they client is clearly
226 * trying to treat that block as if it's an implicit array of blocks
227 * repeated in the buffer. However, the SPIR-V spec for the
228 * OpPtrAccessChain says:
230 * "Base is treated as the address of the first element of an
231 * array, and the Element element’s address is computed to be the
232 * base for the Indexes, as per OpAccessChain."
234 * Taken literally, that would mean that your struct type is supposed
235 * to be treated as an array of such a struct and, since it's
236 * decorated block, that means an array of blocks which corresponds
237 * to an array descriptor. Therefore, we need to do a reindex
238 * operation to add the index from the first link in the access chain
239 * to the index we recieved.
241 * The downside to this interpretation (there always is one) is that
242 * this might be somewhat surprising behavior to apps if they expect
243 * the implicit array behavior described above.
245 vtn_assert(deref_chain
->length
>= 1);
246 nir_ssa_def
*offset_index
=
247 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
250 block_index
= vtn_resource_reindex(b
, block_index
, offset_index
);
255 if (base
->mode
== vtn_variable_mode_workgroup
) {
256 /* SLM doesn't need nor have a block index */
257 vtn_assert(!block_index
);
259 /* We need the variable for the base offset */
260 vtn_assert(base
->var
);
262 /* We need ptr_type for size and alignment */
263 vtn_assert(base
->ptr_type
);
265 /* Assign location on first use so that we don't end up bloating SLM
266 * address space for variables which are never statically used.
268 if (base
->var
->shared_location
< 0) {
269 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
270 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
271 base
->ptr_type
->align
);
272 base
->var
->shared_location
= b
->shader
->num_shared
;
273 b
->shader
->num_shared
+= base
->ptr_type
->length
;
276 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
277 } else if (base
->mode
== vtn_variable_mode_push_constant
) {
278 /* Push constants neither need nor have a block index */
279 vtn_assert(!block_index
);
281 /* Start off with at the start of the push constant block. */
282 offset
= nir_imm_int(&b
->nb
, 0);
284 /* The code above should have ensured a block_index when needed. */
285 vtn_assert(block_index
);
287 /* Start off with at the start of the buffer. */
288 offset
= nir_imm_int(&b
->nb
, 0);
292 if (deref_chain
->ptr_as_array
&& idx
== 0) {
293 /* We need ptr_type for the stride */
294 vtn_assert(base
->ptr_type
);
296 /* We need at least one element in the chain */
297 vtn_assert(deref_chain
->length
>= 1);
299 nir_ssa_def
*elem_offset
=
300 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
301 base
->ptr_type
->stride
);
302 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
306 for (; idx
< deref_chain
->length
; idx
++) {
307 switch (glsl_get_base_type(type
->type
)) {
310 case GLSL_TYPE_UINT16
:
311 case GLSL_TYPE_INT16
:
312 case GLSL_TYPE_UINT8
:
314 case GLSL_TYPE_UINT64
:
315 case GLSL_TYPE_INT64
:
316 case GLSL_TYPE_FLOAT
:
317 case GLSL_TYPE_FLOAT16
:
318 case GLSL_TYPE_DOUBLE
:
320 case GLSL_TYPE_ARRAY
: {
321 nir_ssa_def
*elem_offset
=
322 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], type
->stride
);
323 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
324 type
= type
->array_element
;
325 access
|= type
->access
;
329 case GLSL_TYPE_STRUCT
: {
330 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
331 unsigned member
= deref_chain
->link
[idx
].id
;
332 offset
= nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[member
]);
333 type
= type
->members
[member
];
334 access
|= type
->access
;
339 vtn_fail("Invalid type for deref");
343 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
344 ptr
->mode
= base
->mode
;
346 ptr
->block_index
= block_index
;
347 ptr
->offset
= offset
;
348 ptr
->access
= access
;
353 /* Dereference the given base pointer by the access chain */
354 static struct vtn_pointer
*
355 vtn_pointer_dereference(struct vtn_builder
*b
,
356 struct vtn_pointer
*base
,
357 struct vtn_access_chain
*deref_chain
)
359 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
360 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
362 return vtn_nir_deref_pointer_dereference(b
, base
, deref_chain
);
367 vtn_pointer_for_variable(struct vtn_builder
*b
,
368 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
370 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
372 pointer
->mode
= var
->mode
;
373 pointer
->type
= var
->type
;
374 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
375 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
376 pointer
->ptr_type
= ptr_type
;
378 pointer
->access
= var
->access
| var
->type
->access
;
383 /* Returns an atomic_uint type based on the original uint type. The returned
384 * type will be equivalent to the original one but will have an atomic_uint
385 * type as leaf instead of an uint.
387 * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
389 static const struct glsl_type
*
390 repair_atomic_type(const struct glsl_type
*type
)
392 assert(glsl_get_base_type(glsl_without_array(type
)) == GLSL_TYPE_UINT
);
393 assert(glsl_type_is_scalar(glsl_without_array(type
)));
395 if (glsl_type_is_array(type
)) {
396 const struct glsl_type
*atomic
=
397 repair_atomic_type(glsl_get_array_element(type
));
399 return glsl_array_type(atomic
, glsl_get_length(type
));
401 return glsl_atomic_uint_type();
406 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
408 /* Do on-the-fly copy propagation for samplers. */
409 if (ptr
->var
&& ptr
->var
->copy_prop_sampler
)
410 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
412 vtn_assert(!vtn_pointer_uses_ssa_offset(b
, ptr
));
414 struct vtn_access_chain chain
= {
417 ptr
= vtn_nir_deref_pointer_dereference(b
, ptr
, &chain
);
424 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_instr
*deref
,
425 struct vtn_ssa_value
*inout
)
427 if (glsl_type_is_vector_or_scalar(deref
->type
)) {
429 inout
->def
= nir_load_deref(&b
->nb
, deref
);
431 nir_store_deref(&b
->nb
, deref
, inout
->def
, ~0);
433 } else if (glsl_type_is_array(deref
->type
) ||
434 glsl_type_is_matrix(deref
->type
)) {
435 unsigned elems
= glsl_get_length(deref
->type
);
436 for (unsigned i
= 0; i
< elems
; i
++) {
437 nir_deref_instr
*child
=
438 nir_build_deref_array(&b
->nb
, deref
, nir_imm_int(&b
->nb
, i
));
439 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
442 vtn_assert(glsl_type_is_struct(deref
->type
));
443 unsigned elems
= glsl_get_length(deref
->type
);
444 for (unsigned i
= 0; i
< elems
; i
++) {
445 nir_deref_instr
*child
= nir_build_deref_struct(&b
->nb
, deref
, i
);
446 _vtn_local_load_store(b
, load
, child
, inout
->elems
[i
]);
452 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
454 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
455 return vtn_pointer_to_deref(b
, ptr
);
459 * Gets the NIR-level deref tail, which may have as a child an array deref
460 * selecting which component due to OpAccessChain supporting per-component
461 * indexing in SPIR-V.
463 static nir_deref_instr
*
464 get_deref_tail(nir_deref_instr
*deref
)
466 if (deref
->deref_type
!= nir_deref_type_array
)
469 nir_deref_instr
*parent
=
470 nir_instr_as_deref(deref
->parent
.ssa
->parent_instr
);
472 if (glsl_type_is_vector(parent
->type
))
478 struct vtn_ssa_value
*
479 vtn_local_load(struct vtn_builder
*b
, nir_deref_instr
*src
)
481 nir_deref_instr
*src_tail
= get_deref_tail(src
);
482 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
483 _vtn_local_load_store(b
, true, src_tail
, val
);
485 if (src_tail
!= src
) {
486 val
->type
= src
->type
;
487 if (nir_src_is_const(src
->arr
.index
))
488 val
->def
= vtn_vector_extract(b
, val
->def
,
489 nir_src_as_uint(src
->arr
.index
));
491 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
, src
->arr
.index
.ssa
);
498 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
499 nir_deref_instr
*dest
)
501 nir_deref_instr
*dest_tail
= get_deref_tail(dest
);
503 if (dest_tail
!= dest
) {
504 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
505 _vtn_local_load_store(b
, true, dest_tail
, val
);
507 if (nir_src_is_const(dest
->arr
.index
))
508 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
509 nir_src_as_uint(dest
->arr
.index
));
511 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
512 dest
->arr
.index
.ssa
);
513 _vtn_local_load_store(b
, false, dest_tail
, val
);
515 _vtn_local_load_store(b
, false, dest_tail
, src
);
520 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
521 nir_ssa_def
**index_out
)
523 assert(vtn_pointer_uses_ssa_offset(b
, ptr
));
525 struct vtn_access_chain chain
= {
528 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
530 *index_out
= ptr
->block_index
;
534 /* Tries to compute the size of an interface block based on the strides and
535 * offsets that are provided to us in the SPIR-V source.
538 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
540 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
544 case GLSL_TYPE_UINT16
:
545 case GLSL_TYPE_INT16
:
546 case GLSL_TYPE_UINT8
:
548 case GLSL_TYPE_UINT64
:
549 case GLSL_TYPE_INT64
:
550 case GLSL_TYPE_FLOAT
:
551 case GLSL_TYPE_FLOAT16
:
553 case GLSL_TYPE_DOUBLE
: {
554 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
555 glsl_get_matrix_columns(type
->type
);
557 vtn_assert(type
->stride
> 0);
558 return type
->stride
* cols
;
560 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
561 return glsl_get_vector_elements(type
->type
) * type_size
;
565 case GLSL_TYPE_STRUCT
:
566 case GLSL_TYPE_INTERFACE
: {
568 unsigned num_fields
= glsl_get_length(type
->type
);
569 for (unsigned f
= 0; f
< num_fields
; f
++) {
570 unsigned field_end
= type
->offsets
[f
] +
571 vtn_type_block_size(b
, type
->members
[f
]);
572 size
= MAX2(size
, field_end
);
577 case GLSL_TYPE_ARRAY
:
578 vtn_assert(type
->stride
> 0);
579 vtn_assert(glsl_get_length(type
->type
) > 0);
580 return type
->stride
* glsl_get_length(type
->type
);
583 vtn_fail("Invalid block type");
589 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
590 nir_ssa_def
*index
, nir_ssa_def
*offset
,
591 unsigned access_offset
, unsigned access_size
,
592 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
,
593 enum gl_access_qualifier access
)
595 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
596 instr
->num_components
= glsl_get_vector_elements(type
);
598 /* Booleans usually shouldn't show up in external memory in SPIR-V.
599 * However, they do for certain older GLSLang versions and can for shared
600 * memory when we lower access chains internally.
602 const unsigned data_bit_size
= glsl_type_is_boolean(type
) ? 32 :
603 glsl_get_bit_size(type
);
607 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
608 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
611 if (op
== nir_intrinsic_load_push_constant
) {
612 nir_intrinsic_set_base(instr
, access_offset
);
613 nir_intrinsic_set_range(instr
, access_size
);
616 if (op
== nir_intrinsic_load_ssbo
||
617 op
== nir_intrinsic_store_ssbo
) {
618 nir_intrinsic_set_access(instr
, access
);
621 /* With extensions like relaxed_block_layout, we really can't guarantee
622 * much more than scalar alignment.
624 if (op
!= nir_intrinsic_load_push_constant
)
625 nir_intrinsic_set_align(instr
, data_bit_size
/ 8, 0);
628 instr
->src
[src
++] = nir_src_for_ssa(index
);
630 if (op
== nir_intrinsic_load_push_constant
) {
631 /* We need to subtract the offset from where the intrinsic will load the
634 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
635 nir_imm_int(&b
->nb
, access_offset
)));
637 instr
->src
[src
++] = nir_src_for_ssa(offset
);
641 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
642 instr
->num_components
, data_bit_size
, NULL
);
643 (*inout
)->def
= &instr
->dest
.ssa
;
646 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
648 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
649 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
653 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
654 nir_ssa_def
*index
, nir_ssa_def
*offset
,
655 unsigned access_offset
, unsigned access_size
,
656 struct vtn_type
*type
, enum gl_access_qualifier access
,
657 struct vtn_ssa_value
**inout
)
659 if (load
&& *inout
== NULL
)
660 *inout
= vtn_create_ssa_value(b
, type
->type
);
662 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
666 case GLSL_TYPE_UINT16
:
667 case GLSL_TYPE_INT16
:
668 case GLSL_TYPE_UINT8
:
670 case GLSL_TYPE_UINT64
:
671 case GLSL_TYPE_INT64
:
672 case GLSL_TYPE_FLOAT
:
673 case GLSL_TYPE_FLOAT16
:
674 case GLSL_TYPE_DOUBLE
:
676 /* This is where things get interesting. At this point, we've hit
677 * a vector, a scalar, or a matrix.
679 if (glsl_type_is_matrix(type
->type
)) {
680 /* Loading the whole matrix */
681 struct vtn_ssa_value
*transpose
;
682 unsigned num_ops
, vec_width
, col_stride
;
683 if (type
->row_major
) {
684 num_ops
= glsl_get_vector_elements(type
->type
);
685 vec_width
= glsl_get_matrix_columns(type
->type
);
686 col_stride
= type
->array_element
->stride
;
688 const struct glsl_type
*transpose_type
=
689 glsl_matrix_type(base_type
, vec_width
, num_ops
);
690 *inout
= vtn_create_ssa_value(b
, transpose_type
);
692 transpose
= vtn_ssa_transpose(b
, *inout
);
696 num_ops
= glsl_get_matrix_columns(type
->type
);
697 vec_width
= glsl_get_vector_elements(type
->type
);
698 col_stride
= type
->stride
;
701 for (unsigned i
= 0; i
< num_ops
; i
++) {
702 nir_ssa_def
*elem_offset
=
703 nir_iadd_imm(&b
->nb
, offset
, i
* col_stride
);
704 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
705 access_offset
, access_size
,
707 glsl_vector_type(base_type
, vec_width
),
708 type
->access
| access
);
711 if (load
&& type
->row_major
)
712 *inout
= vtn_ssa_transpose(b
, *inout
);
714 unsigned elems
= glsl_get_vector_elements(type
->type
);
715 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
716 if (elems
== 1 || type
->stride
== type_size
) {
717 /* This is a tightly-packed normal scalar or vector load */
718 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
719 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
720 access_offset
, access_size
,
722 type
->access
| access
);
724 /* This is a strided load. We have to load N things separately.
725 * This is the single column of a row-major matrix case.
727 vtn_assert(type
->stride
> type_size
);
728 vtn_assert(type
->stride
% type_size
== 0);
730 nir_ssa_def
*per_comp
[4];
731 for (unsigned i
= 0; i
< elems
; i
++) {
732 nir_ssa_def
*elem_offset
=
733 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
734 struct vtn_ssa_value
*comp
, temp_val
;
736 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
737 temp_val
.type
= glsl_scalar_type(base_type
);
740 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
741 access_offset
, access_size
,
742 &comp
, glsl_scalar_type(base_type
),
743 type
->access
| access
);
744 per_comp
[i
] = comp
->def
;
749 *inout
= vtn_create_ssa_value(b
, type
->type
);
750 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
756 case GLSL_TYPE_ARRAY
: {
757 unsigned elems
= glsl_get_length(type
->type
);
758 for (unsigned i
= 0; i
< elems
; i
++) {
759 nir_ssa_def
*elem_off
=
760 nir_iadd_imm(&b
->nb
, offset
, i
* type
->stride
);
761 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
762 access_offset
, access_size
,
764 type
->array_element
->access
| access
,
765 &(*inout
)->elems
[i
]);
770 case GLSL_TYPE_STRUCT
: {
771 unsigned elems
= glsl_get_length(type
->type
);
772 for (unsigned i
= 0; i
< elems
; i
++) {
773 nir_ssa_def
*elem_off
=
774 nir_iadd_imm(&b
->nb
, offset
, type
->offsets
[i
]);
775 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
776 access_offset
, access_size
,
778 type
->members
[i
]->access
| access
,
779 &(*inout
)->elems
[i
]);
785 vtn_fail("Invalid block member type");
789 static struct vtn_ssa_value
*
790 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
793 unsigned access_offset
= 0, access_size
= 0;
795 case vtn_variable_mode_ubo
:
796 op
= nir_intrinsic_load_ubo
;
798 case vtn_variable_mode_ssbo
:
799 op
= nir_intrinsic_load_ssbo
;
801 case vtn_variable_mode_push_constant
:
802 op
= nir_intrinsic_load_push_constant
;
803 access_size
= b
->shader
->num_uniforms
;
805 case vtn_variable_mode_workgroup
:
806 op
= nir_intrinsic_load_shared
;
809 vtn_fail("Invalid block variable mode");
812 nir_ssa_def
*offset
, *index
= NULL
;
813 offset
= vtn_pointer_to_offset(b
, src
, &index
);
815 struct vtn_ssa_value
*value
= NULL
;
816 _vtn_block_load_store(b
, op
, true, index
, offset
,
817 access_offset
, access_size
,
818 src
->type
, src
->access
, &value
);
823 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
824 struct vtn_pointer
*dst
)
828 case vtn_variable_mode_ssbo
:
829 op
= nir_intrinsic_store_ssbo
;
831 case vtn_variable_mode_workgroup
:
832 op
= nir_intrinsic_store_shared
;
835 vtn_fail("Invalid block variable mode");
838 nir_ssa_def
*offset
, *index
= NULL
;
839 offset
= vtn_pointer_to_offset(b
, dst
, &index
);
841 _vtn_block_load_store(b
, op
, false, index
, offset
,
842 0, 0, dst
->type
, dst
->access
, &src
);
846 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
847 struct vtn_pointer
*ptr
,
848 struct vtn_ssa_value
**inout
)
850 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
854 case GLSL_TYPE_UINT16
:
855 case GLSL_TYPE_INT16
:
856 case GLSL_TYPE_UINT8
:
858 case GLSL_TYPE_UINT64
:
859 case GLSL_TYPE_INT64
:
860 case GLSL_TYPE_FLOAT
:
861 case GLSL_TYPE_FLOAT16
:
863 case GLSL_TYPE_DOUBLE
:
864 /* At this point, we have a scalar, vector, or matrix so we know that
865 * there cannot be any structure splitting still in the way. By
866 * stopping at the matrix level rather than the vector level, we
867 * ensure that matrices get loaded in the optimal way even if they
868 * are storred row-major in a UBO.
871 *inout
= vtn_local_load(b
, vtn_pointer_to_deref(b
, ptr
));
873 vtn_local_store(b
, *inout
, vtn_pointer_to_deref(b
, ptr
));
877 case GLSL_TYPE_ARRAY
:
878 case GLSL_TYPE_STRUCT
: {
879 unsigned elems
= glsl_get_length(ptr
->type
->type
);
881 vtn_assert(*inout
== NULL
);
882 *inout
= rzalloc(b
, struct vtn_ssa_value
);
883 (*inout
)->type
= ptr
->type
->type
;
884 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
887 struct vtn_access_chain chain
= {
890 { .mode
= vtn_access_mode_literal
, },
893 for (unsigned i
= 0; i
< elems
; i
++) {
894 chain
.link
[0].id
= i
;
895 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
896 _vtn_variable_load_store(b
, load
, elem
, &(*inout
)->elems
[i
]);
902 vtn_fail("Invalid access chain type");
906 struct vtn_ssa_value
*
907 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
909 if (vtn_pointer_is_external_block(b
, src
)) {
910 return vtn_block_load(b
, src
);
912 struct vtn_ssa_value
*val
= NULL
;
913 _vtn_variable_load_store(b
, true, src
, &val
);
919 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
920 struct vtn_pointer
*dest
)
922 if (vtn_pointer_is_external_block(b
, dest
)) {
923 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
924 dest
->mode
== vtn_variable_mode_workgroup
);
925 vtn_block_store(b
, src
, dest
);
927 _vtn_variable_load_store(b
, false, dest
, &src
);
932 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
933 struct vtn_pointer
*src
)
935 vtn_assert(src
->type
->type
== dest
->type
->type
);
936 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
940 case GLSL_TYPE_UINT16
:
941 case GLSL_TYPE_INT16
:
942 case GLSL_TYPE_UINT8
:
944 case GLSL_TYPE_UINT64
:
945 case GLSL_TYPE_INT64
:
946 case GLSL_TYPE_FLOAT
:
947 case GLSL_TYPE_FLOAT16
:
948 case GLSL_TYPE_DOUBLE
:
950 /* At this point, we have a scalar, vector, or matrix so we know that
951 * there cannot be any structure splitting still in the way. By
952 * stopping at the matrix level rather than the vector level, we
953 * ensure that matrices get loaded in the optimal way even if they
954 * are storred row-major in a UBO.
956 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
959 case GLSL_TYPE_ARRAY
:
960 case GLSL_TYPE_STRUCT
: {
961 struct vtn_access_chain chain
= {
964 { .mode
= vtn_access_mode_literal
, },
967 unsigned elems
= glsl_get_length(src
->type
->type
);
968 for (unsigned i
= 0; i
< elems
; i
++) {
969 chain
.link
[0].id
= i
;
970 struct vtn_pointer
*src_elem
=
971 vtn_pointer_dereference(b
, src
, &chain
);
972 struct vtn_pointer
*dest_elem
=
973 vtn_pointer_dereference(b
, dest
, &chain
);
975 _vtn_variable_copy(b
, dest_elem
, src_elem
);
981 vtn_fail("Invalid access chain type");
986 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
987 struct vtn_pointer
*src
)
989 /* TODO: At some point, we should add a special-case for when we can
990 * just emit a copy_var intrinsic.
992 _vtn_variable_copy(b
, dest
, src
);
996 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
998 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
999 *mode
= nir_var_system_value
;
1003 vtn_get_builtin_location(struct vtn_builder
*b
,
1004 SpvBuiltIn builtin
, int *location
,
1005 nir_variable_mode
*mode
)
1008 case SpvBuiltInPosition
:
1009 *location
= VARYING_SLOT_POS
;
1011 case SpvBuiltInPointSize
:
1012 *location
= VARYING_SLOT_PSIZ
;
1014 case SpvBuiltInClipDistance
:
1015 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1017 case SpvBuiltInCullDistance
:
1018 *location
= VARYING_SLOT_CULL_DIST0
;
1020 case SpvBuiltInVertexId
:
1021 case SpvBuiltInVertexIndex
:
1022 /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
1023 * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
1024 * same as gl_VertexID, which is non-zero-based, and removes
1025 * VertexIndex. Since they're both defined to be non-zero-based, we use
1026 * SYSTEM_VALUE_VERTEX_ID for both.
1028 *location
= SYSTEM_VALUE_VERTEX_ID
;
1029 set_mode_system_value(b
, mode
);
1031 case SpvBuiltInInstanceIndex
:
1032 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1033 set_mode_system_value(b
, mode
);
1035 case SpvBuiltInInstanceId
:
1036 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1037 set_mode_system_value(b
, mode
);
1039 case SpvBuiltInPrimitiveId
:
1040 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1041 vtn_assert(*mode
== nir_var_shader_in
);
1042 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1043 } else if (*mode
== nir_var_shader_out
) {
1044 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1046 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1047 set_mode_system_value(b
, mode
);
1050 case SpvBuiltInInvocationId
:
1051 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1052 set_mode_system_value(b
, mode
);
1054 case SpvBuiltInLayer
:
1055 *location
= VARYING_SLOT_LAYER
;
1056 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1057 *mode
= nir_var_shader_in
;
1058 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1059 *mode
= nir_var_shader_out
;
1060 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1061 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1062 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1063 *mode
= nir_var_shader_out
;
1065 vtn_fail("invalid stage for SpvBuiltInLayer");
1067 case SpvBuiltInViewportIndex
:
1068 *location
= VARYING_SLOT_VIEWPORT
;
1069 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1070 *mode
= nir_var_shader_out
;
1071 else if (b
->options
&& b
->options
->caps
.shader_viewport_index_layer
&&
1072 (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
||
1073 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
))
1074 *mode
= nir_var_shader_out
;
1075 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1076 *mode
= nir_var_shader_in
;
1078 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1080 case SpvBuiltInTessLevelOuter
:
1081 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1083 case SpvBuiltInTessLevelInner
:
1084 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1086 case SpvBuiltInTessCoord
:
1087 *location
= SYSTEM_VALUE_TESS_COORD
;
1088 set_mode_system_value(b
, mode
);
1090 case SpvBuiltInPatchVertices
:
1091 *location
= SYSTEM_VALUE_VERTICES_IN
;
1092 set_mode_system_value(b
, mode
);
1094 case SpvBuiltInFragCoord
:
1095 *location
= VARYING_SLOT_POS
;
1096 vtn_assert(*mode
== nir_var_shader_in
);
1098 case SpvBuiltInPointCoord
:
1099 *location
= VARYING_SLOT_PNTC
;
1100 vtn_assert(*mode
== nir_var_shader_in
);
1102 case SpvBuiltInFrontFacing
:
1103 *location
= SYSTEM_VALUE_FRONT_FACE
;
1104 set_mode_system_value(b
, mode
);
1106 case SpvBuiltInSampleId
:
1107 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1108 set_mode_system_value(b
, mode
);
1110 case SpvBuiltInSamplePosition
:
1111 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1112 set_mode_system_value(b
, mode
);
1114 case SpvBuiltInSampleMask
:
1115 if (*mode
== nir_var_shader_out
) {
1116 *location
= FRAG_RESULT_SAMPLE_MASK
;
1118 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1119 set_mode_system_value(b
, mode
);
1122 case SpvBuiltInFragDepth
:
1123 *location
= FRAG_RESULT_DEPTH
;
1124 vtn_assert(*mode
== nir_var_shader_out
);
1126 case SpvBuiltInHelperInvocation
:
1127 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1128 set_mode_system_value(b
, mode
);
1130 case SpvBuiltInNumWorkgroups
:
1131 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1132 set_mode_system_value(b
, mode
);
1134 case SpvBuiltInWorkgroupSize
:
1135 *location
= SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1136 set_mode_system_value(b
, mode
);
1138 case SpvBuiltInWorkgroupId
:
1139 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1140 set_mode_system_value(b
, mode
);
1142 case SpvBuiltInLocalInvocationId
:
1143 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1144 set_mode_system_value(b
, mode
);
1146 case SpvBuiltInLocalInvocationIndex
:
1147 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1148 set_mode_system_value(b
, mode
);
1150 case SpvBuiltInGlobalInvocationId
:
1151 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1152 set_mode_system_value(b
, mode
);
1154 case SpvBuiltInBaseVertex
:
1155 /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
1156 * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
1158 *location
= SYSTEM_VALUE_FIRST_VERTEX
;
1159 set_mode_system_value(b
, mode
);
1161 case SpvBuiltInBaseInstance
:
1162 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1163 set_mode_system_value(b
, mode
);
1165 case SpvBuiltInDrawIndex
:
1166 *location
= SYSTEM_VALUE_DRAW_ID
;
1167 set_mode_system_value(b
, mode
);
1169 case SpvBuiltInSubgroupSize
:
1170 *location
= SYSTEM_VALUE_SUBGROUP_SIZE
;
1171 set_mode_system_value(b
, mode
);
1173 case SpvBuiltInSubgroupId
:
1174 *location
= SYSTEM_VALUE_SUBGROUP_ID
;
1175 set_mode_system_value(b
, mode
);
1177 case SpvBuiltInSubgroupLocalInvocationId
:
1178 *location
= SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1179 set_mode_system_value(b
, mode
);
1181 case SpvBuiltInNumSubgroups
:
1182 *location
= SYSTEM_VALUE_NUM_SUBGROUPS
;
1183 set_mode_system_value(b
, mode
);
1185 case SpvBuiltInDeviceIndex
:
1186 *location
= SYSTEM_VALUE_DEVICE_INDEX
;
1187 set_mode_system_value(b
, mode
);
1189 case SpvBuiltInViewIndex
:
1190 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1191 set_mode_system_value(b
, mode
);
1193 case SpvBuiltInSubgroupEqMask
:
1194 *location
= SYSTEM_VALUE_SUBGROUP_EQ_MASK
,
1195 set_mode_system_value(b
, mode
);
1197 case SpvBuiltInSubgroupGeMask
:
1198 *location
= SYSTEM_VALUE_SUBGROUP_GE_MASK
,
1199 set_mode_system_value(b
, mode
);
1201 case SpvBuiltInSubgroupGtMask
:
1202 *location
= SYSTEM_VALUE_SUBGROUP_GT_MASK
,
1203 set_mode_system_value(b
, mode
);
1205 case SpvBuiltInSubgroupLeMask
:
1206 *location
= SYSTEM_VALUE_SUBGROUP_LE_MASK
,
1207 set_mode_system_value(b
, mode
);
1209 case SpvBuiltInSubgroupLtMask
:
1210 *location
= SYSTEM_VALUE_SUBGROUP_LT_MASK
,
1211 set_mode_system_value(b
, mode
);
1213 case SpvBuiltInFragStencilRefEXT
:
1214 *location
= FRAG_RESULT_STENCIL
;
1215 vtn_assert(*mode
== nir_var_shader_out
);
1217 case SpvBuiltInWorkDim
:
1218 *location
= SYSTEM_VALUE_WORK_DIM
;
1219 set_mode_system_value(b
, mode
);
1221 case SpvBuiltInGlobalSize
:
1222 *location
= SYSTEM_VALUE_GLOBAL_GROUP_SIZE
;
1223 set_mode_system_value(b
, mode
);
1226 vtn_fail("unsupported builtin: %u", builtin
);
1231 apply_var_decoration(struct vtn_builder
*b
,
1232 struct nir_variable_data
*var_data
,
1233 const struct vtn_decoration
*dec
)
1235 switch (dec
->decoration
) {
1236 case SpvDecorationRelaxedPrecision
:
1237 break; /* FIXME: Do nothing with this for now. */
1238 case SpvDecorationNoPerspective
:
1239 var_data
->interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1241 case SpvDecorationFlat
:
1242 var_data
->interpolation
= INTERP_MODE_FLAT
;
1244 case SpvDecorationCentroid
:
1245 var_data
->centroid
= true;
1247 case SpvDecorationSample
:
1248 var_data
->sample
= true;
1250 case SpvDecorationInvariant
:
1251 var_data
->invariant
= true;
1253 case SpvDecorationConstant
:
1254 var_data
->read_only
= true;
1256 case SpvDecorationNonReadable
:
1257 var_data
->image
.access
|= ACCESS_NON_READABLE
;
1259 case SpvDecorationNonWritable
:
1260 var_data
->read_only
= true;
1261 var_data
->image
.access
|= ACCESS_NON_WRITEABLE
;
1263 case SpvDecorationRestrict
:
1264 var_data
->image
.access
|= ACCESS_RESTRICT
;
1266 case SpvDecorationVolatile
:
1267 var_data
->image
.access
|= ACCESS_VOLATILE
;
1269 case SpvDecorationCoherent
:
1270 var_data
->image
.access
|= ACCESS_COHERENT
;
1272 case SpvDecorationComponent
:
1273 var_data
->location_frac
= dec
->literals
[0];
1275 case SpvDecorationIndex
:
1276 var_data
->index
= dec
->literals
[0];
1278 case SpvDecorationBuiltIn
: {
1279 SpvBuiltIn builtin
= dec
->literals
[0];
1281 nir_variable_mode mode
= var_data
->mode
;
1282 vtn_get_builtin_location(b
, builtin
, &var_data
->location
, &mode
);
1283 var_data
->mode
= mode
;
1286 case SpvBuiltInTessLevelOuter
:
1287 case SpvBuiltInTessLevelInner
:
1288 var_data
->compact
= true;
1290 case SpvBuiltInFragCoord
:
1291 var_data
->pixel_center_integer
= b
->pixel_center_integer
;
1293 case SpvBuiltInSamplePosition
:
1294 var_data
->origin_upper_left
= b
->origin_upper_left
;
1301 case SpvDecorationSpecId
:
1302 case SpvDecorationRowMajor
:
1303 case SpvDecorationColMajor
:
1304 case SpvDecorationMatrixStride
:
1305 case SpvDecorationAliased
:
1306 case SpvDecorationUniform
:
1307 case SpvDecorationLinkageAttributes
:
1308 break; /* Do nothing with these here */
1310 case SpvDecorationPatch
:
1311 var_data
->patch
= true;
1314 case SpvDecorationLocation
:
1315 vtn_fail("Handled above");
1317 case SpvDecorationBlock
:
1318 case SpvDecorationBufferBlock
:
1319 case SpvDecorationArrayStride
:
1320 case SpvDecorationGLSLShared
:
1321 case SpvDecorationGLSLPacked
:
1322 break; /* These can apply to a type but we don't care about them */
1324 case SpvDecorationBinding
:
1325 case SpvDecorationDescriptorSet
:
1326 case SpvDecorationNoContraction
:
1327 case SpvDecorationInputAttachmentIndex
:
1328 vtn_warn("Decoration not allowed for variable or structure member: %s",
1329 spirv_decoration_to_string(dec
->decoration
));
1332 case SpvDecorationXfbBuffer
:
1333 var_data
->explicit_xfb_buffer
= true;
1334 var_data
->xfb_buffer
= dec
->literals
[0];
1335 var_data
->always_active_io
= true;
1337 case SpvDecorationXfbStride
:
1338 var_data
->explicit_xfb_stride
= true;
1339 var_data
->xfb_stride
= dec
->literals
[0];
1341 case SpvDecorationOffset
:
1342 var_data
->explicit_offset
= true;
1343 var_data
->offset
= dec
->literals
[0];
1346 case SpvDecorationStream
:
1347 var_data
->stream
= dec
->literals
[0];
1350 case SpvDecorationCPacked
:
1351 case SpvDecorationSaturatedConversion
:
1352 case SpvDecorationFuncParamAttr
:
1353 case SpvDecorationFPRoundingMode
:
1354 case SpvDecorationFPFastMathMode
:
1355 case SpvDecorationAlignment
:
1356 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1357 spirv_decoration_to_string(dec
->decoration
));
1360 case SpvDecorationHlslSemanticGOOGLE
:
1361 /* HLSL semantic decorations can safely be ignored by the driver. */
1365 vtn_fail("Unhandled decoration");
1370 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1371 const struct vtn_decoration
*dec
, void *out_is_patch
)
1373 if (dec
->decoration
== SpvDecorationPatch
) {
1374 *((bool *) out_is_patch
) = true;
1379 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1380 const struct vtn_decoration
*dec
, void *void_var
)
1382 struct vtn_variable
*vtn_var
= void_var
;
1384 /* Handle decorations that apply to a vtn_variable as a whole */
1385 switch (dec
->decoration
) {
1386 case SpvDecorationBinding
:
1387 vtn_var
->binding
= dec
->literals
[0];
1388 vtn_var
->explicit_binding
= true;
1390 case SpvDecorationDescriptorSet
:
1391 vtn_var
->descriptor_set
= dec
->literals
[0];
1393 case SpvDecorationInputAttachmentIndex
:
1394 vtn_var
->input_attachment_index
= dec
->literals
[0];
1396 case SpvDecorationPatch
:
1397 vtn_var
->patch
= true;
1399 case SpvDecorationOffset
:
1400 vtn_var
->offset
= dec
->literals
[0];
1402 case SpvDecorationNonWritable
:
1403 vtn_var
->access
|= ACCESS_NON_WRITEABLE
;
1405 case SpvDecorationNonReadable
:
1406 vtn_var
->access
|= ACCESS_NON_READABLE
;
1408 case SpvDecorationVolatile
:
1409 vtn_var
->access
|= ACCESS_VOLATILE
;
1411 case SpvDecorationCoherent
:
1412 vtn_var
->access
|= ACCESS_COHERENT
;
1414 case SpvDecorationHlslCounterBufferGOOGLE
:
1415 /* HLSL semantic decorations can safely be ignored by the driver. */
1421 if (val
->value_type
== vtn_value_type_pointer
) {
1422 assert(val
->pointer
->var
== void_var
);
1423 assert(member
== -1);
1425 assert(val
->value_type
== vtn_value_type_type
);
1428 /* Location is odd. If applied to a split structure, we have to walk the
1429 * whole thing and accumulate the location. It's easier to handle as a
1432 if (dec
->decoration
== SpvDecorationLocation
) {
1433 unsigned location
= dec
->literals
[0];
1434 bool is_vertex_input
= false;
1435 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1436 vtn_var
->mode
== vtn_variable_mode_output
) {
1437 location
+= FRAG_RESULT_DATA0
;
1438 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1439 vtn_var
->mode
== vtn_variable_mode_input
) {
1440 is_vertex_input
= true;
1441 location
+= VERT_ATTRIB_GENERIC0
;
1442 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1443 vtn_var
->mode
== vtn_variable_mode_output
) {
1444 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1445 } else if (vtn_var
->mode
!= vtn_variable_mode_uniform
) {
1446 vtn_warn("Location must be on input, output, uniform, sampler or "
1451 if (vtn_var
->var
->num_members
== 0) {
1452 /* This handles the member and lone variable cases */
1453 vtn_var
->var
->data
.location
= location
;
1455 /* This handles the structure member case */
1456 assert(vtn_var
->var
->members
);
1457 for (unsigned i
= 0; i
< vtn_var
->var
->num_members
; i
++) {
1458 vtn_var
->var
->members
[i
].location
= location
;
1459 const struct glsl_type
*member_type
=
1460 glsl_get_struct_field(vtn_var
->var
->interface_type
, i
);
1461 location
+= glsl_count_attribute_slots(member_type
,
1468 if (vtn_var
->var
->num_members
== 0) {
1469 assert(member
== -1);
1470 apply_var_decoration(b
, &vtn_var
->var
->data
, dec
);
1471 } else if (member
>= 0) {
1472 /* Member decorations must come from a type */
1473 assert(val
->value_type
== vtn_value_type_type
);
1474 apply_var_decoration(b
, &vtn_var
->var
->members
[member
], dec
);
1477 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1478 for (unsigned i
= 0; i
< length
; i
++)
1479 apply_var_decoration(b
, &vtn_var
->var
->members
[i
], dec
);
1482 /* A few variables, those with external storage, have no actual
1483 * nir_variables associated with them. Fortunately, all decorations
1484 * we care about for those variables are on the type only.
1486 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1487 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1488 vtn_var
->mode
== vtn_variable_mode_push_constant
||
1489 (vtn_var
->mode
== vtn_variable_mode_workgroup
&&
1490 b
->options
->lower_workgroup_access_to_offsets
));
1495 static enum vtn_variable_mode
1496 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1497 SpvStorageClass
class,
1498 struct vtn_type
*interface_type
,
1499 nir_variable_mode
*nir_mode_out
)
1501 enum vtn_variable_mode mode
;
1502 nir_variable_mode nir_mode
;
1504 case SpvStorageClassUniform
:
1505 if (interface_type
->block
) {
1506 mode
= vtn_variable_mode_ubo
;
1508 } else if (interface_type
->buffer_block
) {
1509 mode
= vtn_variable_mode_ssbo
;
1512 /* Default-block uniforms, coming from gl_spirv */
1513 mode
= vtn_variable_mode_uniform
;
1514 nir_mode
= nir_var_uniform
;
1517 case SpvStorageClassStorageBuffer
:
1518 mode
= vtn_variable_mode_ssbo
;
1521 case SpvStorageClassUniformConstant
:
1522 mode
= vtn_variable_mode_uniform
;
1523 nir_mode
= nir_var_uniform
;
1525 case SpvStorageClassPushConstant
:
1526 mode
= vtn_variable_mode_push_constant
;
1527 nir_mode
= nir_var_uniform
;
1529 case SpvStorageClassInput
:
1530 mode
= vtn_variable_mode_input
;
1531 nir_mode
= nir_var_shader_in
;
1533 case SpvStorageClassOutput
:
1534 mode
= vtn_variable_mode_output
;
1535 nir_mode
= nir_var_shader_out
;
1537 case SpvStorageClassPrivate
:
1538 mode
= vtn_variable_mode_global
;
1539 nir_mode
= nir_var_global
;
1541 case SpvStorageClassFunction
:
1542 mode
= vtn_variable_mode_local
;
1543 nir_mode
= nir_var_local
;
1545 case SpvStorageClassWorkgroup
:
1546 mode
= vtn_variable_mode_workgroup
;
1547 nir_mode
= nir_var_shared
;
1549 case SpvStorageClassAtomicCounter
:
1550 mode
= vtn_variable_mode_uniform
;
1551 nir_mode
= nir_var_uniform
;
1553 case SpvStorageClassCrossWorkgroup
:
1554 case SpvStorageClassGeneric
:
1556 vtn_fail("Unhandled variable storage class");
1560 *nir_mode_out
= nir_mode
;
1566 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1568 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
1569 /* This pointer needs to have a pointer type with actual storage */
1570 vtn_assert(ptr
->ptr_type
);
1571 vtn_assert(ptr
->ptr_type
->type
);
1574 /* If we don't have an offset then we must be a pointer to the variable
1577 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1579 struct vtn_access_chain chain
= {
1582 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1585 vtn_assert(ptr
->offset
);
1586 if (ptr
->block_index
) {
1587 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1588 ptr
->mode
== vtn_variable_mode_ssbo
);
1589 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1591 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1595 return &vtn_pointer_to_deref(b
, ptr
)->dest
.ssa
;
1599 struct vtn_pointer
*
1600 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1601 struct vtn_type
*ptr_type
)
1603 vtn_assert(ssa
->num_components
<= 2 && ssa
->bit_size
== 32);
1604 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1606 struct vtn_type
*interface_type
= ptr_type
->deref
;
1607 while (interface_type
->base_type
== vtn_base_type_array
)
1608 interface_type
= interface_type
->array_element
;
1610 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1611 nir_variable_mode nir_mode
;
1612 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1613 interface_type
, &nir_mode
);
1614 ptr
->type
= ptr_type
->deref
;
1615 ptr
->ptr_type
= ptr_type
;
1617 if (ptr
->mode
== vtn_variable_mode_ubo
||
1618 ptr
->mode
== vtn_variable_mode_ssbo
) {
1619 /* This pointer type needs to have actual storage */
1620 vtn_assert(ptr_type
->type
);
1621 vtn_assert(ssa
->num_components
== 2);
1622 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1623 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1624 } else if ((ptr
->mode
== vtn_variable_mode_workgroup
&&
1625 b
->options
->lower_workgroup_access_to_offsets
) ||
1626 ptr
->mode
== vtn_variable_mode_push_constant
) {
1627 /* This pointer type needs to have actual storage */
1628 vtn_assert(ptr_type
->type
);
1629 vtn_assert(ssa
->num_components
== 1);
1630 ptr
->block_index
= NULL
;
1633 ptr
->deref
= nir_build_deref_cast(&b
->nb
, ssa
, nir_mode
,
1634 ptr_type
->deref
->type
);
1641 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
1643 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
1646 if (var
->mode
== vtn_variable_mode_input
) {
1647 return stage
== MESA_SHADER_TESS_CTRL
||
1648 stage
== MESA_SHADER_TESS_EVAL
||
1649 stage
== MESA_SHADER_GEOMETRY
;
1652 if (var
->mode
== vtn_variable_mode_output
)
1653 return stage
== MESA_SHADER_TESS_CTRL
;
1659 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
1660 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
1661 nir_constant
*initializer
)
1663 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1664 struct vtn_type
*type
= ptr_type
->deref
;
1666 struct vtn_type
*without_array
= type
;
1667 while(glsl_type_is_array(without_array
->type
))
1668 without_array
= without_array
->array_element
;
1670 enum vtn_variable_mode mode
;
1671 nir_variable_mode nir_mode
;
1672 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
1675 case vtn_variable_mode_ubo
:
1676 b
->shader
->info
.num_ubos
++;
1678 case vtn_variable_mode_ssbo
:
1679 b
->shader
->info
.num_ssbos
++;
1681 case vtn_variable_mode_uniform
:
1682 if (glsl_type_is_image(without_array
->type
))
1683 b
->shader
->info
.num_images
++;
1684 else if (glsl_type_is_sampler(without_array
->type
))
1685 b
->shader
->info
.num_textures
++;
1687 case vtn_variable_mode_push_constant
:
1688 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
1691 /* No tallying is needed */
1695 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
1699 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
1700 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
1702 switch (var
->mode
) {
1703 case vtn_variable_mode_local
:
1704 case vtn_variable_mode_global
:
1705 case vtn_variable_mode_uniform
:
1706 /* For these, we create the variable normally */
1707 var
->var
= rzalloc(b
->shader
, nir_variable
);
1708 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1710 /* Need to tweak the nir type here as at vtn_handle_type we don't have
1711 * the access to storage_class, that is the one that points us that is
1714 if (storage_class
== SpvStorageClassAtomicCounter
) {
1715 var
->var
->type
= repair_atomic_type(var
->type
->type
);
1717 var
->var
->type
= var
->type
->type
;
1719 var
->var
->data
.mode
= nir_mode
;
1720 var
->var
->data
.location
= -1;
1721 var
->var
->interface_type
= NULL
;
1724 case vtn_variable_mode_workgroup
:
1725 if (b
->options
->lower_workgroup_access_to_offsets
) {
1726 var
->shared_location
= -1;
1728 /* Create the variable normally */
1729 var
->var
= rzalloc(b
->shader
, nir_variable
);
1730 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1731 var
->var
->type
= var
->type
->type
;
1732 var
->var
->data
.mode
= nir_var_shared
;
1736 case vtn_variable_mode_input
:
1737 case vtn_variable_mode_output
: {
1738 /* In order to know whether or not we're a per-vertex inout, we need
1739 * the patch qualifier. This means walking the variable decorations
1740 * early before we actually create any variables. Not a big deal.
1742 * GLSLang really likes to place decorations in the most interior
1743 * thing it possibly can. In particular, if you have a struct, it
1744 * will place the patch decorations on the struct members. This
1745 * should be handled by the variable splitting below just fine.
1747 * If you have an array-of-struct, things get even more weird as it
1748 * will place the patch decorations on the struct even though it's
1749 * inside an array and some of the members being patch and others not
1750 * makes no sense whatsoever. Since the only sensible thing is for
1751 * it to be all or nothing, we'll call it patch if any of the members
1752 * are declared patch.
1755 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
1756 if (glsl_type_is_array(var
->type
->type
) &&
1757 glsl_type_is_struct(without_array
->type
)) {
1758 vtn_foreach_decoration(b
, vtn_value(b
, without_array
->id
,
1759 vtn_value_type_type
),
1760 var_is_patch_cb
, &var
->patch
);
1763 /* For inputs and outputs, we immediately split structures. This
1764 * is for a couple of reasons. For one, builtins may all come in
1765 * a struct and we really want those split out into separate
1766 * variables. For another, interpolation qualifiers can be
1767 * applied to members of the top-level struct ane we need to be
1768 * able to preserve that information.
1771 struct vtn_type
*interface_type
= var
->type
;
1772 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
1773 /* In Geometry shaders (and some tessellation), inputs come
1774 * in per-vertex arrays. However, some builtins come in
1775 * non-per-vertex, hence the need for the is_array check. In
1776 * any case, there are no non-builtin arrays allowed so this
1777 * check should be sufficient.
1779 interface_type
= var
->type
->array_element
;
1782 var
->var
= rzalloc(b
->shader
, nir_variable
);
1783 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1784 var
->var
->type
= var
->type
->type
;
1785 var
->var
->interface_type
= interface_type
->type
;
1786 var
->var
->data
.mode
= nir_mode
;
1787 var
->var
->data
.patch
= var
->patch
;
1789 if (glsl_type_is_struct(interface_type
->type
)) {
1790 /* It's a struct. Set it up as per-member. */
1791 var
->var
->num_members
= glsl_get_length(interface_type
->type
);
1792 var
->var
->members
= rzalloc_array(var
->var
, struct nir_variable_data
,
1793 var
->var
->num_members
);
1795 for (unsigned i
= 0; i
< var
->var
->num_members
; i
++) {
1796 var
->var
->members
[i
].mode
= nir_mode
;
1797 var
->var
->members
[i
].patch
= var
->patch
;
1801 /* For inputs and outputs, we need to grab locations and builtin
1802 * information from the interface type.
1804 vtn_foreach_decoration(b
, vtn_value(b
, interface_type
->id
,
1805 vtn_value_type_type
),
1806 var_decoration_cb
, var
);
1810 case vtn_variable_mode_ubo
:
1811 case vtn_variable_mode_ssbo
:
1812 case vtn_variable_mode_push_constant
:
1813 /* These don't need actual variables. */
1818 var
->var
->constant_initializer
=
1819 nir_constant_clone(initializer
, var
->var
);
1822 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
1824 if (var
->mode
== vtn_variable_mode_uniform
) {
1825 /* XXX: We still need the binding information in the nir_variable
1826 * for these. We should fix that.
1828 var
->var
->data
.binding
= var
->binding
;
1829 var
->var
->data
.explicit_binding
= var
->explicit_binding
;
1830 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
1831 var
->var
->data
.index
= var
->input_attachment_index
;
1832 var
->var
->data
.offset
= var
->offset
;
1834 if (glsl_type_is_image(without_array
->type
))
1835 var
->var
->data
.image
.format
= without_array
->image_format
;
1838 if (var
->mode
== vtn_variable_mode_local
) {
1839 vtn_assert(var
->var
!= NULL
&& var
->var
->members
== NULL
);
1840 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
1841 } else if (var
->var
) {
1842 nir_shader_add_variable(b
->shader
, var
->var
);
1844 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
1849 vtn_assert_types_equal(struct vtn_builder
*b
, SpvOp opcode
,
1850 struct vtn_type
*dst_type
,
1851 struct vtn_type
*src_type
)
1853 if (dst_type
->id
== src_type
->id
)
1856 if (vtn_types_compatible(b
, dst_type
, src_type
)) {
1857 /* Early versions of GLSLang would re-emit types unnecessarily and you
1858 * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
1859 * mismatched source and destination types.
1861 * https://github.com/KhronosGroup/glslang/issues/304
1862 * https://github.com/KhronosGroup/glslang/issues/307
1863 * https://bugs.freedesktop.org/show_bug.cgi?id=104338
1864 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
1866 vtn_warn("Source and destination types of %s do not have the same "
1867 "ID (but are compatible): %u vs %u",
1868 spirv_op_to_string(opcode
), dst_type
->id
, src_type
->id
);
1872 vtn_fail("Source and destination types of %s do not match: %s vs. %s",
1873 spirv_op_to_string(opcode
),
1874 glsl_get_type_name(dst_type
->type
),
1875 glsl_get_type_name(src_type
->type
));
1879 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
1880 const uint32_t *w
, unsigned count
)
1884 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1885 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1889 case SpvOpVariable
: {
1890 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1892 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1894 SpvStorageClass storage_class
= w
[3];
1895 nir_constant
*initializer
= NULL
;
1897 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
1899 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
1903 case SpvOpAccessChain
:
1904 case SpvOpPtrAccessChain
:
1905 case SpvOpInBoundsAccessChain
: {
1906 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
1907 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
);
1910 for (int i
= 4; i
< count
; i
++) {
1911 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
1912 if (link_val
->value_type
== vtn_value_type_constant
) {
1913 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
1914 chain
->link
[idx
].id
= link_val
->constant
->values
[0].u32
[0];
1916 chain
->link
[idx
].mode
= vtn_access_mode_id
;
1917 chain
->link
[idx
].id
= w
[i
];
1923 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1924 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
1925 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
1926 /* This is rather insane. SPIR-V allows you to use OpSampledImage
1927 * to combine an array of images with a single sampler to get an
1928 * array of sampled images that all share the same sampler.
1929 * Fortunately, this means that we can more-or-less ignore the
1930 * sampler when crawling the access chain, but it does leave us
1931 * with this rather awkward little special-case.
1933 struct vtn_value
*val
=
1934 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1935 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1936 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
1937 val
->sampled_image
->image
=
1938 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
1939 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
1941 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
1942 struct vtn_value
*val
=
1943 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1944 val
->pointer
= vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
1945 val
->pointer
->ptr_type
= ptr_type
;
1950 case SpvOpCopyMemory
: {
1951 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
1952 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
1954 vtn_assert_types_equal(b
, opcode
, dest
->type
->deref
, src
->type
->deref
);
1956 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
1961 struct vtn_type
*res_type
=
1962 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1963 struct vtn_value
*src_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
1964 struct vtn_pointer
*src
= src_val
->pointer
;
1966 vtn_assert_types_equal(b
, opcode
, res_type
, src_val
->type
->deref
);
1968 if (glsl_type_is_image(res_type
->type
) ||
1969 glsl_type_is_sampler(res_type
->type
)) {
1970 vtn_push_value(b
, w
[2], vtn_value_type_pointer
)->pointer
= src
;
1974 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
1979 struct vtn_value
*dest_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
1980 struct vtn_pointer
*dest
= dest_val
->pointer
;
1981 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[2]);
1983 /* OpStore requires us to actually have a storage type */
1984 vtn_fail_if(dest
->type
->type
== NULL
,
1985 "Invalid destination type for OpStore");
1987 if (glsl_get_base_type(dest
->type
->type
) == GLSL_TYPE_BOOL
&&
1988 glsl_get_base_type(src_val
->type
->type
) == GLSL_TYPE_UINT
) {
1989 /* Early versions of GLSLang would use uint types for UBOs/SSBOs but
1990 * would then store them to a local variable as bool. Work around
1991 * the issue by doing an implicit conversion.
1993 * https://github.com/KhronosGroup/glslang/issues/170
1994 * https://bugs.freedesktop.org/show_bug.cgi?id=104424
1996 vtn_warn("OpStore of value of type OpTypeInt to a pointer to type "
1997 "OpTypeBool. Doing an implicit conversion to work around "
1999 struct vtn_ssa_value
*bool_ssa
=
2000 vtn_create_ssa_value(b
, dest
->type
->type
);
2001 bool_ssa
->def
= nir_i2b(&b
->nb
, vtn_ssa_value(b
, w
[2])->def
);
2002 vtn_variable_store(b
, bool_ssa
, dest
);
2006 vtn_assert_types_equal(b
, opcode
, dest_val
->type
->deref
, src_val
->type
);
2008 if (glsl_type_is_sampler(dest
->type
->type
)) {
2009 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
2010 "propagation to workaround the problem.");
2011 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2012 dest
->var
->copy_prop_sampler
=
2013 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2017 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2018 vtn_variable_store(b
, src
, dest
);
2022 case SpvOpArrayLength
: {
2023 struct vtn_pointer
*ptr
=
2024 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2026 const uint32_t offset
= ptr
->var
->type
->offsets
[w
[4]];
2027 const uint32_t stride
= ptr
->var
->type
->members
[w
[4]]->stride
;
2029 if (!ptr
->block_index
) {
2030 struct vtn_access_chain chain
= {
2033 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
2034 vtn_assert(ptr
->block_index
);
2037 nir_intrinsic_instr
*instr
=
2038 nir_intrinsic_instr_create(b
->nb
.shader
,
2039 nir_intrinsic_get_buffer_size
);
2040 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2041 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2042 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2043 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2045 /* array_length = max(buffer_size - offset, 0) / stride */
2046 nir_ssa_def
*array_length
=
2051 nir_imm_int(&b
->nb
, offset
)),
2052 nir_imm_int(&b
->nb
, 0u)),
2053 nir_imm_int(&b
->nb
, stride
));
2055 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2056 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2057 val
->ssa
->def
= array_length
;
2061 case SpvOpCopyMemorySized
:
2063 vtn_fail("Unhandled opcode");