2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "spirv_info.h"
31 static struct vtn_access_chain
*
32 vtn_access_chain_create(struct vtn_builder
*b
, unsigned length
)
34 struct vtn_access_chain
*chain
;
36 /* Subtract 1 from the length since there's already one built in */
37 size_t size
= sizeof(*chain
) +
38 (MAX2(length
, 1) - 1) * sizeof(chain
->link
[0]);
39 chain
= rzalloc_size(b
, size
);
40 chain
->length
= length
;
45 static struct vtn_access_chain
*
46 vtn_access_chain_extend(struct vtn_builder
*b
, struct vtn_access_chain
*old
,
49 struct vtn_access_chain
*chain
;
51 unsigned old_len
= old
? old
->length
: 0;
52 chain
= vtn_access_chain_create(b
, old_len
+ new_ids
);
54 for (unsigned i
= 0; i
< old_len
; i
++)
55 chain
->link
[i
] = old
->link
[i
];
61 vtn_pointer_uses_ssa_offset(struct vtn_builder
*b
,
62 struct vtn_pointer
*ptr
)
64 return ptr
->mode
== vtn_variable_mode_ubo
||
65 ptr
->mode
== vtn_variable_mode_ssbo
||
66 (ptr
->mode
== vtn_variable_mode_workgroup
&&
67 b
->options
->lower_workgroup_access_to_offsets
);
71 vtn_pointer_is_external_block(struct vtn_builder
*b
,
72 struct vtn_pointer
*ptr
)
74 return ptr
->mode
== vtn_variable_mode_ssbo
||
75 ptr
->mode
== vtn_variable_mode_ubo
||
76 ptr
->mode
== vtn_variable_mode_push_constant
||
77 (ptr
->mode
== vtn_variable_mode_workgroup
&&
78 b
->options
->lower_workgroup_access_to_offsets
);
81 /* Dereference the given base pointer by the access chain */
82 static struct vtn_pointer
*
83 vtn_access_chain_pointer_dereference(struct vtn_builder
*b
,
84 struct vtn_pointer
*base
,
85 struct vtn_access_chain
*deref_chain
)
87 struct vtn_access_chain
*chain
=
88 vtn_access_chain_extend(b
, base
->chain
, deref_chain
->length
);
89 struct vtn_type
*type
= base
->type
;
91 /* OpPtrAccessChain is only allowed on things which support variable
92 * pointers. For everything else, the client is expected to just pass us
93 * the right access chain.
95 vtn_assert(!deref_chain
->ptr_as_array
);
97 unsigned start
= base
->chain
? base
->chain
->length
: 0;
98 for (unsigned i
= 0; i
< deref_chain
->length
; i
++) {
99 chain
->link
[start
+ i
] = deref_chain
->link
[i
];
101 if (glsl_type_is_struct(type
->type
)) {
102 vtn_assert(deref_chain
->link
[i
].mode
== vtn_access_mode_literal
);
103 type
= type
->members
[deref_chain
->link
[i
].id
];
105 type
= type
->array_element
;
109 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
110 ptr
->mode
= base
->mode
;
112 ptr
->var
= base
->var
;
119 vtn_access_link_as_ssa(struct vtn_builder
*b
, struct vtn_access_link link
,
122 vtn_assert(stride
> 0);
123 if (link
.mode
== vtn_access_mode_literal
) {
124 return nir_imm_int(&b
->nb
, link
.id
* stride
);
125 } else if (stride
== 1) {
126 nir_ssa_def
*ssa
= vtn_ssa_value(b
, link
.id
)->def
;
127 if (ssa
->bit_size
!= 32)
128 ssa
= nir_u2u32(&b
->nb
, ssa
);
131 nir_ssa_def
*src0
= vtn_ssa_value(b
, link
.id
)->def
;
132 if (src0
->bit_size
!= 32)
133 src0
= nir_u2u32(&b
->nb
, src0
);
134 return nir_imul(&b
->nb
, src0
, nir_imm_int(&b
->nb
, stride
));
139 vtn_variable_resource_index(struct vtn_builder
*b
, struct vtn_variable
*var
,
140 nir_ssa_def
*desc_array_index
)
142 if (!desc_array_index
) {
143 vtn_assert(glsl_type_is_struct(var
->type
->type
));
144 desc_array_index
= nir_imm_int(&b
->nb
, 0);
147 nir_intrinsic_instr
*instr
=
148 nir_intrinsic_instr_create(b
->nb
.shader
,
149 nir_intrinsic_vulkan_resource_index
);
150 instr
->src
[0] = nir_src_for_ssa(desc_array_index
);
151 nir_intrinsic_set_desc_set(instr
, var
->descriptor_set
);
152 nir_intrinsic_set_binding(instr
, var
->binding
);
154 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
155 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
157 return &instr
->dest
.ssa
;
161 vtn_resource_reindex(struct vtn_builder
*b
, nir_ssa_def
*base_index
,
162 nir_ssa_def
*offset_index
)
164 nir_intrinsic_instr
*instr
=
165 nir_intrinsic_instr_create(b
->nb
.shader
,
166 nir_intrinsic_vulkan_resource_reindex
);
167 instr
->src
[0] = nir_src_for_ssa(base_index
);
168 instr
->src
[1] = nir_src_for_ssa(offset_index
);
170 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
171 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
173 return &instr
->dest
.ssa
;
176 static struct vtn_pointer
*
177 vtn_ssa_offset_pointer_dereference(struct vtn_builder
*b
,
178 struct vtn_pointer
*base
,
179 struct vtn_access_chain
*deref_chain
)
181 nir_ssa_def
*block_index
= base
->block_index
;
182 nir_ssa_def
*offset
= base
->offset
;
183 struct vtn_type
*type
= base
->type
;
186 if (base
->mode
== vtn_variable_mode_ubo
||
187 base
->mode
== vtn_variable_mode_ssbo
) {
189 vtn_assert(base
->var
&& base
->type
);
190 nir_ssa_def
*desc_arr_idx
;
191 if (glsl_type_is_array(type
->type
)) {
192 if (deref_chain
->length
>= 1) {
194 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
196 /* This consumes a level of type */
197 type
= type
->array_element
;
199 /* This is annoying. We've been asked for a pointer to the
200 * array of UBOs/SSBOs and not a specifc buffer. Return a
201 * pointer with a descriptor index of 0 and we'll have to do
202 * a reindex later to adjust it to the right thing.
204 desc_arr_idx
= nir_imm_int(&b
->nb
, 0);
206 } else if (deref_chain
->ptr_as_array
) {
207 /* You can't have a zero-length OpPtrAccessChain */
208 vtn_assert(deref_chain
->length
>= 1);
209 desc_arr_idx
= vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
211 /* We have a regular non-array SSBO. */
214 block_index
= vtn_variable_resource_index(b
, base
->var
, desc_arr_idx
);
215 } else if (deref_chain
->ptr_as_array
&&
216 type
->base_type
== vtn_base_type_struct
&& type
->block
) {
217 /* We are doing an OpPtrAccessChain on a pointer to a struct that is
218 * decorated block. This is an interesting corner in the SPIR-V
219 * spec. One interpretation would be that they client is clearly
220 * trying to treat that block as if it's an implicit array of blocks
221 * repeated in the buffer. However, the SPIR-V spec for the
222 * OpPtrAccessChain says:
224 * "Base is treated as the address of the first element of an
225 * array, and the Element element’s address is computed to be the
226 * base for the Indexes, as per OpAccessChain."
228 * Taken literally, that would mean that your struct type is supposed
229 * to be treated as an array of such a struct and, since it's
230 * decorated block, that means an array of blocks which corresponds
231 * to an array descriptor. Therefore, we need to do a reindex
232 * operation to add the index from the first link in the access chain
233 * to the index we recieved.
235 * The downside to this interpretation (there always is one) is that
236 * this might be somewhat surprising behavior to apps if they expect
237 * the implicit array behavior described above.
239 vtn_assert(deref_chain
->length
>= 1);
240 nir_ssa_def
*offset_index
=
241 vtn_access_link_as_ssa(b
, deref_chain
->link
[0], 1);
244 block_index
= vtn_resource_reindex(b
, block_index
, offset_index
);
249 if (base
->mode
== vtn_variable_mode_workgroup
) {
250 /* SLM doesn't need nor have a block index */
251 vtn_assert(!block_index
);
253 /* We need the variable for the base offset */
254 vtn_assert(base
->var
);
256 /* We need ptr_type for size and alignment */
257 vtn_assert(base
->ptr_type
);
259 /* Assign location on first use so that we don't end up bloating SLM
260 * address space for variables which are never statically used.
262 if (base
->var
->shared_location
< 0) {
263 vtn_assert(base
->ptr_type
->length
> 0 && base
->ptr_type
->align
> 0);
264 b
->shader
->num_shared
= vtn_align_u32(b
->shader
->num_shared
,
265 base
->ptr_type
->align
);
266 base
->var
->shared_location
= b
->shader
->num_shared
;
267 b
->shader
->num_shared
+= base
->ptr_type
->length
;
270 offset
= nir_imm_int(&b
->nb
, base
->var
->shared_location
);
272 /* The code above should have ensured a block_index when needed. */
273 vtn_assert(block_index
);
275 /* Start off with at the start of the buffer. */
276 offset
= nir_imm_int(&b
->nb
, 0);
280 if (deref_chain
->ptr_as_array
&& idx
== 0) {
281 /* We need ptr_type for the stride */
282 vtn_assert(base
->ptr_type
);
284 /* We need at least one element in the chain */
285 vtn_assert(deref_chain
->length
>= 1);
287 nir_ssa_def
*elem_offset
=
288 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
],
289 base
->ptr_type
->stride
);
290 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
294 for (; idx
< deref_chain
->length
; idx
++) {
295 switch (glsl_get_base_type(type
->type
)) {
298 case GLSL_TYPE_UINT16
:
299 case GLSL_TYPE_INT16
:
300 case GLSL_TYPE_UINT64
:
301 case GLSL_TYPE_INT64
:
302 case GLSL_TYPE_FLOAT
:
303 case GLSL_TYPE_FLOAT16
:
304 case GLSL_TYPE_DOUBLE
:
306 case GLSL_TYPE_ARRAY
: {
307 nir_ssa_def
*elem_offset
=
308 vtn_access_link_as_ssa(b
, deref_chain
->link
[idx
], type
->stride
);
309 offset
= nir_iadd(&b
->nb
, offset
, elem_offset
);
310 type
= type
->array_element
;
314 case GLSL_TYPE_STRUCT
: {
315 vtn_assert(deref_chain
->link
[idx
].mode
== vtn_access_mode_literal
);
316 unsigned member
= deref_chain
->link
[idx
].id
;
317 nir_ssa_def
*mem_offset
= nir_imm_int(&b
->nb
, type
->offsets
[member
]);
318 offset
= nir_iadd(&b
->nb
, offset
, mem_offset
);
319 type
= type
->members
[member
];
324 vtn_fail("Invalid type for deref");
328 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
329 ptr
->mode
= base
->mode
;
331 ptr
->block_index
= block_index
;
332 ptr
->offset
= offset
;
337 /* Dereference the given base pointer by the access chain */
338 static struct vtn_pointer
*
339 vtn_pointer_dereference(struct vtn_builder
*b
,
340 struct vtn_pointer
*base
,
341 struct vtn_access_chain
*deref_chain
)
343 if (vtn_pointer_uses_ssa_offset(b
, base
)) {
344 return vtn_ssa_offset_pointer_dereference(b
, base
, deref_chain
);
346 return vtn_access_chain_pointer_dereference(b
, base
, deref_chain
);
350 /* Crawls a chain of array derefs and rewrites the types so that the
351 * lengths stay the same but the terminal type is the one given by
352 * tail_type. This is useful for split structures.
355 rewrite_deref_types(struct vtn_builder
*b
, nir_deref
*deref
,
356 const struct glsl_type
*type
)
360 vtn_assert(deref
->child
->deref_type
== nir_deref_type_array
);
361 vtn_assert(glsl_type_is_array(deref
->type
));
362 rewrite_deref_types(b
, deref
->child
, glsl_get_array_element(type
));
367 vtn_pointer_for_variable(struct vtn_builder
*b
,
368 struct vtn_variable
*var
, struct vtn_type
*ptr_type
)
370 struct vtn_pointer
*pointer
= rzalloc(b
, struct vtn_pointer
);
372 pointer
->mode
= var
->mode
;
373 pointer
->type
= var
->type
;
374 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
375 vtn_assert(ptr_type
->deref
->type
== var
->type
->type
);
376 pointer
->ptr_type
= ptr_type
;
383 vtn_pointer_to_deref(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
385 /* Do on-the-fly copy propagation for samplers. */
386 if (ptr
->var
->copy_prop_sampler
)
387 return vtn_pointer_to_deref(b
, ptr
->var
->copy_prop_sampler
);
389 nir_deref_var
*deref_var
;
391 deref_var
= nir_deref_var_create(b
, ptr
->var
->var
);
392 /* Raw variable access */
396 vtn_assert(ptr
->var
->members
);
397 /* Create the deref_var manually. It will get filled out later. */
398 deref_var
= rzalloc(b
, nir_deref_var
);
399 deref_var
->deref
.deref_type
= nir_deref_type_var
;
402 struct vtn_access_chain
*chain
= ptr
->chain
;
405 struct vtn_type
*deref_type
= ptr
->var
->type
;
406 nir_deref
*tail
= &deref_var
->deref
;
407 nir_variable
**members
= ptr
->var
->members
;
409 for (unsigned i
= 0; i
< chain
->length
; i
++) {
410 enum glsl_base_type base_type
= glsl_get_base_type(deref_type
->type
);
414 case GLSL_TYPE_UINT16
:
415 case GLSL_TYPE_INT16
:
416 case GLSL_TYPE_UINT64
:
417 case GLSL_TYPE_INT64
:
418 case GLSL_TYPE_FLOAT
:
419 case GLSL_TYPE_FLOAT16
:
420 case GLSL_TYPE_DOUBLE
:
422 case GLSL_TYPE_ARRAY
: {
423 deref_type
= deref_type
->array_element
;
425 nir_deref_array
*deref_arr
= nir_deref_array_create(b
);
426 deref_arr
->deref
.type
= deref_type
->type
;
428 if (chain
->link
[i
].mode
== vtn_access_mode_literal
) {
429 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
430 deref_arr
->base_offset
= chain
->link
[i
].id
;
432 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_id
);
433 deref_arr
->deref_array_type
= nir_deref_array_type_indirect
;
434 deref_arr
->base_offset
= 0;
435 deref_arr
->indirect
=
436 nir_src_for_ssa(vtn_ssa_value(b
, chain
->link
[i
].id
)->def
);
438 tail
->child
= &deref_arr
->deref
;
443 case GLSL_TYPE_STRUCT
: {
444 vtn_assert(chain
->link
[i
].mode
== vtn_access_mode_literal
);
445 unsigned idx
= chain
->link
[i
].id
;
446 deref_type
= deref_type
->members
[idx
];
448 /* This is a pre-split structure. */
449 deref_var
->var
= members
[idx
];
450 rewrite_deref_types(b
, &deref_var
->deref
, members
[idx
]->type
);
451 vtn_assert(tail
->type
== deref_type
->type
);
454 nir_deref_struct
*deref_struct
= nir_deref_struct_create(b
, idx
);
455 deref_struct
->deref
.type
= deref_type
->type
;
456 tail
->child
= &deref_struct
->deref
;
462 vtn_fail("Invalid type for deref");
466 vtn_assert(members
== NULL
);
471 _vtn_local_load_store(struct vtn_builder
*b
, bool load
, nir_deref_var
*deref
,
472 nir_deref
*tail
, struct vtn_ssa_value
*inout
)
474 /* The deref tail may contain a deref to select a component of a vector (in
475 * other words, it might not be an actual tail) so we have to save it away
476 * here since we overwrite it later.
478 nir_deref
*old_child
= tail
->child
;
480 if (glsl_type_is_vector_or_scalar(tail
->type
)) {
481 /* Terminate the deref chain in case there is one more link to pick
482 * off a component of the vector.
486 nir_intrinsic_op op
= load
? nir_intrinsic_load_var
:
487 nir_intrinsic_store_var
;
489 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
490 intrin
->variables
[0] = nir_deref_var_clone(deref
, intrin
);
491 intrin
->num_components
= glsl_get_vector_elements(tail
->type
);
494 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
495 intrin
->num_components
,
496 glsl_get_bit_size(tail
->type
),
498 inout
->def
= &intrin
->dest
.ssa
;
500 nir_intrinsic_set_write_mask(intrin
, (1 << intrin
->num_components
) - 1);
501 intrin
->src
[0] = nir_src_for_ssa(inout
->def
);
504 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
505 } else if (glsl_get_base_type(tail
->type
) == GLSL_TYPE_ARRAY
||
506 glsl_type_is_matrix(tail
->type
)) {
507 unsigned elems
= glsl_get_length(tail
->type
);
508 nir_deref_array
*deref_arr
= nir_deref_array_create(b
);
509 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
510 deref_arr
->deref
.type
= glsl_get_array_element(tail
->type
);
511 tail
->child
= &deref_arr
->deref
;
512 for (unsigned i
= 0; i
< elems
; i
++) {
513 deref_arr
->base_offset
= i
;
514 _vtn_local_load_store(b
, load
, deref
, tail
->child
, inout
->elems
[i
]);
517 vtn_assert(glsl_get_base_type(tail
->type
) == GLSL_TYPE_STRUCT
);
518 unsigned elems
= glsl_get_length(tail
->type
);
519 nir_deref_struct
*deref_struct
= nir_deref_struct_create(b
, 0);
520 tail
->child
= &deref_struct
->deref
;
521 for (unsigned i
= 0; i
< elems
; i
++) {
522 deref_struct
->index
= i
;
523 deref_struct
->deref
.type
= glsl_get_struct_field(tail
->type
, i
);
524 _vtn_local_load_store(b
, load
, deref
, tail
->child
, inout
->elems
[i
]);
528 tail
->child
= old_child
;
532 vtn_nir_deref(struct vtn_builder
*b
, uint32_t id
)
534 struct vtn_pointer
*ptr
= vtn_value(b
, id
, vtn_value_type_pointer
)->pointer
;
535 return vtn_pointer_to_deref(b
, ptr
);
539 * Gets the NIR-level deref tail, which may have as a child an array deref
540 * selecting which component due to OpAccessChain supporting per-component
541 * indexing in SPIR-V.
544 get_deref_tail(nir_deref_var
*deref
)
546 nir_deref
*cur
= &deref
->deref
;
547 while (!glsl_type_is_vector_or_scalar(cur
->type
) && cur
->child
)
553 struct vtn_ssa_value
*
554 vtn_local_load(struct vtn_builder
*b
, nir_deref_var
*src
)
556 nir_deref
*src_tail
= get_deref_tail(src
);
557 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src_tail
->type
);
558 _vtn_local_load_store(b
, true, src
, src_tail
, val
);
560 if (src_tail
->child
) {
561 nir_deref_array
*vec_deref
= nir_deref_as_array(src_tail
->child
);
562 vtn_assert(vec_deref
->deref
.child
== NULL
);
563 val
->type
= vec_deref
->deref
.type
;
564 if (vec_deref
->deref_array_type
== nir_deref_array_type_direct
)
565 val
->def
= vtn_vector_extract(b
, val
->def
, vec_deref
->base_offset
);
567 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
,
568 vec_deref
->indirect
.ssa
);
575 vtn_local_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
578 nir_deref
*dest_tail
= get_deref_tail(dest
);
580 if (dest_tail
->child
) {
581 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, dest_tail
->type
);
582 _vtn_local_load_store(b
, true, dest
, dest_tail
, val
);
583 nir_deref_array
*deref
= nir_deref_as_array(dest_tail
->child
);
584 vtn_assert(deref
->deref
.child
== NULL
);
585 if (deref
->deref_array_type
== nir_deref_array_type_direct
)
586 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
589 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
590 deref
->indirect
.ssa
);
591 _vtn_local_load_store(b
, false, dest
, dest_tail
, val
);
593 _vtn_local_load_store(b
, false, dest
, dest_tail
, src
);
598 vtn_pointer_to_offset(struct vtn_builder
*b
, struct vtn_pointer
*ptr
,
599 nir_ssa_def
**index_out
, unsigned *end_idx_out
)
601 if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
603 struct vtn_access_chain chain
= {
606 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
608 *index_out
= ptr
->block_index
;
612 vtn_assert(ptr
->mode
== vtn_variable_mode_push_constant
);
616 struct vtn_type
*type
= ptr
->var
->type
;
617 nir_ssa_def
*offset
= nir_imm_int(&b
->nb
, 0);
618 for (; idx
< ptr
->chain
->length
; idx
++) {
619 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
623 case GLSL_TYPE_UINT16
:
624 case GLSL_TYPE_INT16
:
625 case GLSL_TYPE_UINT64
:
626 case GLSL_TYPE_INT64
:
627 case GLSL_TYPE_FLOAT
:
628 case GLSL_TYPE_FLOAT16
:
629 case GLSL_TYPE_DOUBLE
:
631 case GLSL_TYPE_ARRAY
:
632 offset
= nir_iadd(&b
->nb
, offset
,
633 vtn_access_link_as_ssa(b
, ptr
->chain
->link
[idx
],
636 type
= type
->array_element
;
639 case GLSL_TYPE_STRUCT
: {
640 vtn_assert(ptr
->chain
->link
[idx
].mode
== vtn_access_mode_literal
);
641 unsigned member
= ptr
->chain
->link
[idx
].id
;
642 offset
= nir_iadd(&b
->nb
, offset
,
643 nir_imm_int(&b
->nb
, type
->offsets
[member
]));
644 type
= type
->members
[member
];
649 vtn_fail("Invalid type for deref");
653 vtn_assert(type
== ptr
->type
);
660 /* Tries to compute the size of an interface block based on the strides and
661 * offsets that are provided to us in the SPIR-V source.
664 vtn_type_block_size(struct vtn_builder
*b
, struct vtn_type
*type
)
666 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
670 case GLSL_TYPE_UINT16
:
671 case GLSL_TYPE_INT16
:
672 case GLSL_TYPE_UINT64
:
673 case GLSL_TYPE_INT64
:
674 case GLSL_TYPE_FLOAT
:
675 case GLSL_TYPE_FLOAT16
:
677 case GLSL_TYPE_DOUBLE
: {
678 unsigned cols
= type
->row_major
? glsl_get_vector_elements(type
->type
) :
679 glsl_get_matrix_columns(type
->type
);
681 vtn_assert(type
->stride
> 0);
682 return type
->stride
* cols
;
683 } else if (base_type
== GLSL_TYPE_DOUBLE
||
684 base_type
== GLSL_TYPE_UINT64
||
685 base_type
== GLSL_TYPE_INT64
) {
686 return glsl_get_vector_elements(type
->type
) * 8;
688 return glsl_get_vector_elements(type
->type
) * 4;
692 case GLSL_TYPE_STRUCT
:
693 case GLSL_TYPE_INTERFACE
: {
695 unsigned num_fields
= glsl_get_length(type
->type
);
696 for (unsigned f
= 0; f
< num_fields
; f
++) {
697 unsigned field_end
= type
->offsets
[f
] +
698 vtn_type_block_size(b
, type
->members
[f
]);
699 size
= MAX2(size
, field_end
);
704 case GLSL_TYPE_ARRAY
:
705 vtn_assert(type
->stride
> 0);
706 vtn_assert(glsl_get_length(type
->type
) > 0);
707 return type
->stride
* glsl_get_length(type
->type
);
710 vtn_fail("Invalid block type");
716 vtn_access_chain_get_offset_size(struct vtn_builder
*b
,
717 struct vtn_access_chain
*chain
,
718 struct vtn_type
*type
,
719 unsigned *access_offset
,
720 unsigned *access_size
)
724 for (unsigned i
= 0; i
< chain
->length
; i
++) {
725 if (chain
->link
[i
].mode
!= vtn_access_mode_literal
)
728 if (glsl_type_is_struct(type
->type
)) {
729 *access_offset
+= type
->offsets
[chain
->link
[i
].id
];
730 type
= type
->members
[chain
->link
[i
].id
];
732 *access_offset
+= type
->stride
* chain
->link
[i
].id
;
733 type
= type
->array_element
;
737 *access_size
= vtn_type_block_size(b
, type
);
741 _vtn_load_store_tail(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
742 nir_ssa_def
*index
, nir_ssa_def
*offset
,
743 unsigned access_offset
, unsigned access_size
,
744 struct vtn_ssa_value
**inout
, const struct glsl_type
*type
)
746 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
747 instr
->num_components
= glsl_get_vector_elements(type
);
751 nir_intrinsic_set_write_mask(instr
, (1 << instr
->num_components
) - 1);
752 instr
->src
[src
++] = nir_src_for_ssa((*inout
)->def
);
755 if (op
== nir_intrinsic_load_push_constant
) {
756 vtn_assert(access_offset
% 4 == 0);
758 nir_intrinsic_set_base(instr
, access_offset
);
759 nir_intrinsic_set_range(instr
, access_size
);
763 instr
->src
[src
++] = nir_src_for_ssa(index
);
765 if (op
== nir_intrinsic_load_push_constant
) {
766 /* We need to subtract the offset from where the intrinsic will load the
769 nir_src_for_ssa(nir_isub(&b
->nb
, offset
,
770 nir_imm_int(&b
->nb
, access_offset
)));
772 instr
->src
[src
++] = nir_src_for_ssa(offset
);
776 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
777 instr
->num_components
,
778 glsl_get_bit_size(type
), NULL
);
779 (*inout
)->def
= &instr
->dest
.ssa
;
782 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
784 if (load
&& glsl_get_base_type(type
) == GLSL_TYPE_BOOL
)
785 (*inout
)->def
= nir_ine(&b
->nb
, (*inout
)->def
, nir_imm_int(&b
->nb
, 0));
789 _vtn_block_load_store(struct vtn_builder
*b
, nir_intrinsic_op op
, bool load
,
790 nir_ssa_def
*index
, nir_ssa_def
*offset
,
791 unsigned access_offset
, unsigned access_size
,
792 struct vtn_access_chain
*chain
, unsigned chain_idx
,
793 struct vtn_type
*type
, struct vtn_ssa_value
**inout
)
795 if (chain
&& chain_idx
>= chain
->length
)
798 if (load
&& chain
== NULL
&& *inout
== NULL
)
799 *inout
= vtn_create_ssa_value(b
, type
->type
);
801 enum glsl_base_type base_type
= glsl_get_base_type(type
->type
);
805 case GLSL_TYPE_UINT16
:
806 case GLSL_TYPE_INT16
:
807 case GLSL_TYPE_UINT64
:
808 case GLSL_TYPE_INT64
:
809 case GLSL_TYPE_FLOAT
:
810 case GLSL_TYPE_FLOAT16
:
811 case GLSL_TYPE_DOUBLE
:
813 /* This is where things get interesting. At this point, we've hit
814 * a vector, a scalar, or a matrix.
816 if (glsl_type_is_matrix(type
->type
)) {
817 /* Loading the whole matrix */
818 struct vtn_ssa_value
*transpose
;
819 unsigned num_ops
, vec_width
, col_stride
;
820 if (type
->row_major
) {
821 num_ops
= glsl_get_vector_elements(type
->type
);
822 vec_width
= glsl_get_matrix_columns(type
->type
);
823 col_stride
= type
->array_element
->stride
;
825 const struct glsl_type
*transpose_type
=
826 glsl_matrix_type(base_type
, vec_width
, num_ops
);
827 *inout
= vtn_create_ssa_value(b
, transpose_type
);
829 transpose
= vtn_ssa_transpose(b
, *inout
);
833 num_ops
= glsl_get_matrix_columns(type
->type
);
834 vec_width
= glsl_get_vector_elements(type
->type
);
835 col_stride
= type
->stride
;
838 for (unsigned i
= 0; i
< num_ops
; i
++) {
839 nir_ssa_def
*elem_offset
=
840 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* col_stride
));
841 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
842 access_offset
, access_size
,
844 glsl_vector_type(base_type
, vec_width
));
847 if (load
&& type
->row_major
)
848 *inout
= vtn_ssa_transpose(b
, *inout
);
850 unsigned elems
= glsl_get_vector_elements(type
->type
);
851 unsigned type_size
= glsl_get_bit_size(type
->type
) / 8;
852 if (elems
== 1 || type
->stride
== type_size
) {
853 /* This is a tightly-packed normal scalar or vector load */
854 vtn_assert(glsl_type_is_vector_or_scalar(type
->type
));
855 _vtn_load_store_tail(b
, op
, load
, index
, offset
,
856 access_offset
, access_size
,
859 /* This is a strided load. We have to load N things separately.
860 * This is the single column of a row-major matrix case.
862 vtn_assert(type
->stride
> type_size
);
863 vtn_assert(type
->stride
% type_size
== 0);
865 nir_ssa_def
*per_comp
[4];
866 for (unsigned i
= 0; i
< elems
; i
++) {
867 nir_ssa_def
*elem_offset
=
868 nir_iadd(&b
->nb
, offset
,
869 nir_imm_int(&b
->nb
, i
* type
->stride
));
870 struct vtn_ssa_value
*comp
, temp_val
;
872 temp_val
.def
= nir_channel(&b
->nb
, (*inout
)->def
, i
);
873 temp_val
.type
= glsl_scalar_type(base_type
);
876 _vtn_load_store_tail(b
, op
, load
, index
, elem_offset
,
877 access_offset
, access_size
,
878 &comp
, glsl_scalar_type(base_type
));
879 per_comp
[i
] = comp
->def
;
884 *inout
= vtn_create_ssa_value(b
, type
->type
);
885 (*inout
)->def
= nir_vec(&b
->nb
, per_comp
, elems
);
891 case GLSL_TYPE_ARRAY
: {
892 unsigned elems
= glsl_get_length(type
->type
);
893 for (unsigned i
= 0; i
< elems
; i
++) {
894 nir_ssa_def
*elem_off
=
895 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, i
* type
->stride
));
896 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
897 access_offset
, access_size
,
899 type
->array_element
, &(*inout
)->elems
[i
]);
904 case GLSL_TYPE_STRUCT
: {
905 unsigned elems
= glsl_get_length(type
->type
);
906 for (unsigned i
= 0; i
< elems
; i
++) {
907 nir_ssa_def
*elem_off
=
908 nir_iadd(&b
->nb
, offset
, nir_imm_int(&b
->nb
, type
->offsets
[i
]));
909 _vtn_block_load_store(b
, op
, load
, index
, elem_off
,
910 access_offset
, access_size
,
912 type
->members
[i
], &(*inout
)->elems
[i
]);
918 vtn_fail("Invalid block member type");
922 static struct vtn_ssa_value
*
923 vtn_block_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
926 unsigned access_offset
= 0, access_size
= 0;
928 case vtn_variable_mode_ubo
:
929 op
= nir_intrinsic_load_ubo
;
931 case vtn_variable_mode_ssbo
:
932 op
= nir_intrinsic_load_ssbo
;
934 case vtn_variable_mode_push_constant
:
935 op
= nir_intrinsic_load_push_constant
;
936 vtn_access_chain_get_offset_size(b
, src
->chain
, src
->var
->type
,
937 &access_offset
, &access_size
);
939 case vtn_variable_mode_workgroup
:
940 op
= nir_intrinsic_load_shared
;
943 vtn_fail("Invalid block variable mode");
946 nir_ssa_def
*offset
, *index
= NULL
;
948 offset
= vtn_pointer_to_offset(b
, src
, &index
, &chain_idx
);
950 struct vtn_ssa_value
*value
= NULL
;
951 _vtn_block_load_store(b
, op
, true, index
, offset
,
952 access_offset
, access_size
,
953 src
->chain
, chain_idx
, src
->type
, &value
);
958 vtn_block_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
959 struct vtn_pointer
*dst
)
963 case vtn_variable_mode_ssbo
:
964 op
= nir_intrinsic_store_ssbo
;
966 case vtn_variable_mode_workgroup
:
967 op
= nir_intrinsic_store_shared
;
970 vtn_fail("Invalid block variable mode");
973 nir_ssa_def
*offset
, *index
= NULL
;
975 offset
= vtn_pointer_to_offset(b
, dst
, &index
, &chain_idx
);
977 _vtn_block_load_store(b
, op
, false, index
, offset
,
978 0, 0, dst
->chain
, chain_idx
, dst
->type
, &src
);
982 _vtn_variable_load_store(struct vtn_builder
*b
, bool load
,
983 struct vtn_pointer
*ptr
,
984 struct vtn_ssa_value
**inout
)
986 enum glsl_base_type base_type
= glsl_get_base_type(ptr
->type
->type
);
990 case GLSL_TYPE_UINT16
:
991 case GLSL_TYPE_INT16
:
992 case GLSL_TYPE_UINT64
:
993 case GLSL_TYPE_INT64
:
994 case GLSL_TYPE_FLOAT
:
995 case GLSL_TYPE_FLOAT16
:
997 case GLSL_TYPE_DOUBLE
:
998 /* At this point, we have a scalar, vector, or matrix so we know that
999 * there cannot be any structure splitting still in the way. By
1000 * stopping at the matrix level rather than the vector level, we
1001 * ensure that matrices get loaded in the optimal way even if they
1002 * are storred row-major in a UBO.
1005 *inout
= vtn_local_load(b
, vtn_pointer_to_deref(b
, ptr
));
1007 vtn_local_store(b
, *inout
, vtn_pointer_to_deref(b
, ptr
));
1011 case GLSL_TYPE_ARRAY
:
1012 case GLSL_TYPE_STRUCT
: {
1013 unsigned elems
= glsl_get_length(ptr
->type
->type
);
1015 vtn_assert(*inout
== NULL
);
1016 *inout
= rzalloc(b
, struct vtn_ssa_value
);
1017 (*inout
)->type
= ptr
->type
->type
;
1018 (*inout
)->elems
= rzalloc_array(b
, struct vtn_ssa_value
*, elems
);
1021 struct vtn_access_chain chain
= {
1024 { .mode
= vtn_access_mode_literal
, },
1027 for (unsigned i
= 0; i
< elems
; i
++) {
1028 chain
.link
[0].id
= i
;
1029 struct vtn_pointer
*elem
= vtn_pointer_dereference(b
, ptr
, &chain
);
1030 _vtn_variable_load_store(b
, load
, elem
, &(*inout
)->elems
[i
]);
1036 vtn_fail("Invalid access chain type");
1040 struct vtn_ssa_value
*
1041 vtn_variable_load(struct vtn_builder
*b
, struct vtn_pointer
*src
)
1043 if (vtn_pointer_is_external_block(b
, src
)) {
1044 return vtn_block_load(b
, src
);
1046 struct vtn_ssa_value
*val
= NULL
;
1047 _vtn_variable_load_store(b
, true, src
, &val
);
1053 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1054 struct vtn_pointer
*dest
)
1056 if (vtn_pointer_is_external_block(b
, dest
)) {
1057 vtn_assert(dest
->mode
== vtn_variable_mode_ssbo
||
1058 dest
->mode
== vtn_variable_mode_workgroup
);
1059 vtn_block_store(b
, src
, dest
);
1061 _vtn_variable_load_store(b
, false, dest
, &src
);
1066 _vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1067 struct vtn_pointer
*src
)
1069 vtn_assert(src
->type
->type
== dest
->type
->type
);
1070 enum glsl_base_type base_type
= glsl_get_base_type(src
->type
->type
);
1071 switch (base_type
) {
1072 case GLSL_TYPE_UINT
:
1074 case GLSL_TYPE_UINT16
:
1075 case GLSL_TYPE_INT16
:
1076 case GLSL_TYPE_UINT64
:
1077 case GLSL_TYPE_INT64
:
1078 case GLSL_TYPE_FLOAT
:
1079 case GLSL_TYPE_FLOAT16
:
1080 case GLSL_TYPE_DOUBLE
:
1081 case GLSL_TYPE_BOOL
:
1082 /* At this point, we have a scalar, vector, or matrix so we know that
1083 * there cannot be any structure splitting still in the way. By
1084 * stopping at the matrix level rather than the vector level, we
1085 * ensure that matrices get loaded in the optimal way even if they
1086 * are storred row-major in a UBO.
1088 vtn_variable_store(b
, vtn_variable_load(b
, src
), dest
);
1091 case GLSL_TYPE_ARRAY
:
1092 case GLSL_TYPE_STRUCT
: {
1093 struct vtn_access_chain chain
= {
1096 { .mode
= vtn_access_mode_literal
, },
1099 unsigned elems
= glsl_get_length(src
->type
->type
);
1100 for (unsigned i
= 0; i
< elems
; i
++) {
1101 chain
.link
[0].id
= i
;
1102 struct vtn_pointer
*src_elem
=
1103 vtn_pointer_dereference(b
, src
, &chain
);
1104 struct vtn_pointer
*dest_elem
=
1105 vtn_pointer_dereference(b
, dest
, &chain
);
1107 _vtn_variable_copy(b
, dest_elem
, src_elem
);
1113 vtn_fail("Invalid access chain type");
1118 vtn_variable_copy(struct vtn_builder
*b
, struct vtn_pointer
*dest
,
1119 struct vtn_pointer
*src
)
1121 /* TODO: At some point, we should add a special-case for when we can
1122 * just emit a copy_var intrinsic.
1124 _vtn_variable_copy(b
, dest
, src
);
1128 set_mode_system_value(struct vtn_builder
*b
, nir_variable_mode
*mode
)
1130 vtn_assert(*mode
== nir_var_system_value
|| *mode
== nir_var_shader_in
);
1131 *mode
= nir_var_system_value
;
1135 vtn_get_builtin_location(struct vtn_builder
*b
,
1136 SpvBuiltIn builtin
, int *location
,
1137 nir_variable_mode
*mode
)
1140 case SpvBuiltInPosition
:
1141 *location
= VARYING_SLOT_POS
;
1143 case SpvBuiltInPointSize
:
1144 *location
= VARYING_SLOT_PSIZ
;
1146 case SpvBuiltInClipDistance
:
1147 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
1149 case SpvBuiltInCullDistance
:
1150 *location
= VARYING_SLOT_CULL_DIST0
;
1152 case SpvBuiltInVertexIndex
:
1153 *location
= SYSTEM_VALUE_VERTEX_ID
;
1154 set_mode_system_value(b
, mode
);
1156 case SpvBuiltInVertexId
:
1157 /* Vulkan defines VertexID to be zero-based and reserves the new
1158 * builtin keyword VertexIndex to indicate the non-zero-based value.
1160 *location
= SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1161 set_mode_system_value(b
, mode
);
1163 case SpvBuiltInInstanceIndex
:
1164 *location
= SYSTEM_VALUE_INSTANCE_INDEX
;
1165 set_mode_system_value(b
, mode
);
1167 case SpvBuiltInInstanceId
:
1168 *location
= SYSTEM_VALUE_INSTANCE_ID
;
1169 set_mode_system_value(b
, mode
);
1171 case SpvBuiltInPrimitiveId
:
1172 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
1173 vtn_assert(*mode
== nir_var_shader_in
);
1174 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1175 } else if (*mode
== nir_var_shader_out
) {
1176 *location
= VARYING_SLOT_PRIMITIVE_ID
;
1178 *location
= SYSTEM_VALUE_PRIMITIVE_ID
;
1179 set_mode_system_value(b
, mode
);
1182 case SpvBuiltInInvocationId
:
1183 *location
= SYSTEM_VALUE_INVOCATION_ID
;
1184 set_mode_system_value(b
, mode
);
1186 case SpvBuiltInLayer
:
1187 *location
= VARYING_SLOT_LAYER
;
1188 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1189 *mode
= nir_var_shader_in
;
1190 else if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1191 *mode
= nir_var_shader_out
;
1193 vtn_fail("invalid stage for SpvBuiltInLayer");
1195 case SpvBuiltInViewportIndex
:
1196 *location
= VARYING_SLOT_VIEWPORT
;
1197 if (b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
1198 *mode
= nir_var_shader_out
;
1199 else if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
1200 *mode
= nir_var_shader_in
;
1202 vtn_fail("invalid stage for SpvBuiltInViewportIndex");
1204 case SpvBuiltInTessLevelOuter
:
1205 *location
= VARYING_SLOT_TESS_LEVEL_OUTER
;
1207 case SpvBuiltInTessLevelInner
:
1208 *location
= VARYING_SLOT_TESS_LEVEL_INNER
;
1210 case SpvBuiltInTessCoord
:
1211 *location
= SYSTEM_VALUE_TESS_COORD
;
1212 set_mode_system_value(b
, mode
);
1214 case SpvBuiltInPatchVertices
:
1215 *location
= SYSTEM_VALUE_VERTICES_IN
;
1216 set_mode_system_value(b
, mode
);
1218 case SpvBuiltInFragCoord
:
1219 *location
= VARYING_SLOT_POS
;
1220 vtn_assert(*mode
== nir_var_shader_in
);
1222 case SpvBuiltInPointCoord
:
1223 *location
= VARYING_SLOT_PNTC
;
1224 vtn_assert(*mode
== nir_var_shader_in
);
1226 case SpvBuiltInFrontFacing
:
1227 *location
= SYSTEM_VALUE_FRONT_FACE
;
1228 set_mode_system_value(b
, mode
);
1230 case SpvBuiltInSampleId
:
1231 *location
= SYSTEM_VALUE_SAMPLE_ID
;
1232 set_mode_system_value(b
, mode
);
1234 case SpvBuiltInSamplePosition
:
1235 *location
= SYSTEM_VALUE_SAMPLE_POS
;
1236 set_mode_system_value(b
, mode
);
1238 case SpvBuiltInSampleMask
:
1239 if (*mode
== nir_var_shader_out
) {
1240 *location
= FRAG_RESULT_SAMPLE_MASK
;
1242 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
;
1243 set_mode_system_value(b
, mode
);
1246 case SpvBuiltInFragDepth
:
1247 *location
= FRAG_RESULT_DEPTH
;
1248 vtn_assert(*mode
== nir_var_shader_out
);
1250 case SpvBuiltInHelperInvocation
:
1251 *location
= SYSTEM_VALUE_HELPER_INVOCATION
;
1252 set_mode_system_value(b
, mode
);
1254 case SpvBuiltInNumWorkgroups
:
1255 *location
= SYSTEM_VALUE_NUM_WORK_GROUPS
;
1256 set_mode_system_value(b
, mode
);
1258 case SpvBuiltInWorkgroupSize
:
1259 /* This should already be handled */
1260 vtn_fail("unsupported builtin");
1262 case SpvBuiltInWorkgroupId
:
1263 *location
= SYSTEM_VALUE_WORK_GROUP_ID
;
1264 set_mode_system_value(b
, mode
);
1266 case SpvBuiltInLocalInvocationId
:
1267 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1268 set_mode_system_value(b
, mode
);
1270 case SpvBuiltInLocalInvocationIndex
:
1271 *location
= SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1272 set_mode_system_value(b
, mode
);
1274 case SpvBuiltInGlobalInvocationId
:
1275 *location
= SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1276 set_mode_system_value(b
, mode
);
1278 case SpvBuiltInBaseVertex
:
1279 *location
= SYSTEM_VALUE_BASE_VERTEX
;
1280 set_mode_system_value(b
, mode
);
1282 case SpvBuiltInBaseInstance
:
1283 *location
= SYSTEM_VALUE_BASE_INSTANCE
;
1284 set_mode_system_value(b
, mode
);
1286 case SpvBuiltInDrawIndex
:
1287 *location
= SYSTEM_VALUE_DRAW_ID
;
1288 set_mode_system_value(b
, mode
);
1290 case SpvBuiltInViewIndex
:
1291 *location
= SYSTEM_VALUE_VIEW_INDEX
;
1292 set_mode_system_value(b
, mode
);
1295 vtn_fail("unsupported builtin");
1300 apply_var_decoration(struct vtn_builder
*b
, nir_variable
*nir_var
,
1301 const struct vtn_decoration
*dec
)
1303 switch (dec
->decoration
) {
1304 case SpvDecorationRelaxedPrecision
:
1305 break; /* FIXME: Do nothing with this for now. */
1306 case SpvDecorationNoPerspective
:
1307 nir_var
->data
.interpolation
= INTERP_MODE_NOPERSPECTIVE
;
1309 case SpvDecorationFlat
:
1310 nir_var
->data
.interpolation
= INTERP_MODE_FLAT
;
1312 case SpvDecorationCentroid
:
1313 nir_var
->data
.centroid
= true;
1315 case SpvDecorationSample
:
1316 nir_var
->data
.sample
= true;
1318 case SpvDecorationInvariant
:
1319 nir_var
->data
.invariant
= true;
1321 case SpvDecorationConstant
:
1322 vtn_assert(nir_var
->constant_initializer
!= NULL
);
1323 nir_var
->data
.read_only
= true;
1325 case SpvDecorationNonReadable
:
1326 nir_var
->data
.image
.write_only
= true;
1328 case SpvDecorationNonWritable
:
1329 nir_var
->data
.read_only
= true;
1330 nir_var
->data
.image
.read_only
= true;
1332 case SpvDecorationComponent
:
1333 nir_var
->data
.location_frac
= dec
->literals
[0];
1335 case SpvDecorationIndex
:
1336 nir_var
->data
.index
= dec
->literals
[0];
1338 case SpvDecorationBuiltIn
: {
1339 SpvBuiltIn builtin
= dec
->literals
[0];
1341 if (builtin
== SpvBuiltInWorkgroupSize
) {
1342 /* This shouldn't be a builtin. It's actually a constant. */
1343 nir_var
->data
.mode
= nir_var_global
;
1344 nir_var
->data
.read_only
= true;
1346 nir_constant
*c
= rzalloc(nir_var
, nir_constant
);
1347 c
->values
[0].u32
[0] = b
->shader
->info
.cs
.local_size
[0];
1348 c
->values
[0].u32
[1] = b
->shader
->info
.cs
.local_size
[1];
1349 c
->values
[0].u32
[2] = b
->shader
->info
.cs
.local_size
[2];
1350 nir_var
->constant_initializer
= c
;
1354 nir_variable_mode mode
= nir_var
->data
.mode
;
1355 vtn_get_builtin_location(b
, builtin
, &nir_var
->data
.location
, &mode
);
1356 nir_var
->data
.mode
= mode
;
1359 case SpvBuiltInTessLevelOuter
:
1360 case SpvBuiltInTessLevelInner
:
1361 nir_var
->data
.compact
= true;
1363 case SpvBuiltInSamplePosition
:
1364 nir_var
->data
.origin_upper_left
= b
->origin_upper_left
;
1366 case SpvBuiltInFragCoord
:
1367 nir_var
->data
.pixel_center_integer
= b
->pixel_center_integer
;
1374 case SpvDecorationSpecId
:
1375 case SpvDecorationRowMajor
:
1376 case SpvDecorationColMajor
:
1377 case SpvDecorationMatrixStride
:
1378 case SpvDecorationRestrict
:
1379 case SpvDecorationAliased
:
1380 case SpvDecorationVolatile
:
1381 case SpvDecorationCoherent
:
1382 case SpvDecorationUniform
:
1383 case SpvDecorationStream
:
1384 case SpvDecorationOffset
:
1385 case SpvDecorationLinkageAttributes
:
1386 break; /* Do nothing with these here */
1388 case SpvDecorationPatch
:
1389 nir_var
->data
.patch
= true;
1392 case SpvDecorationLocation
:
1393 vtn_fail("Handled above");
1395 case SpvDecorationBlock
:
1396 case SpvDecorationBufferBlock
:
1397 case SpvDecorationArrayStride
:
1398 case SpvDecorationGLSLShared
:
1399 case SpvDecorationGLSLPacked
:
1400 break; /* These can apply to a type but we don't care about them */
1402 case SpvDecorationBinding
:
1403 case SpvDecorationDescriptorSet
:
1404 case SpvDecorationNoContraction
:
1405 case SpvDecorationInputAttachmentIndex
:
1406 vtn_warn("Decoration not allowed for variable or structure member: %s",
1407 spirv_decoration_to_string(dec
->decoration
));
1410 case SpvDecorationXfbBuffer
:
1411 case SpvDecorationXfbStride
:
1412 vtn_warn("Vulkan does not have transform feedback: %s",
1413 spirv_decoration_to_string(dec
->decoration
));
1416 case SpvDecorationCPacked
:
1417 case SpvDecorationSaturatedConversion
:
1418 case SpvDecorationFuncParamAttr
:
1419 case SpvDecorationFPRoundingMode
:
1420 case SpvDecorationFPFastMathMode
:
1421 case SpvDecorationAlignment
:
1422 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1423 spirv_decoration_to_string(dec
->decoration
));
1427 vtn_fail("Unhandled decoration");
1432 var_is_patch_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1433 const struct vtn_decoration
*dec
, void *out_is_patch
)
1435 if (dec
->decoration
== SpvDecorationPatch
) {
1436 *((bool *) out_is_patch
) = true;
1441 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
1442 const struct vtn_decoration
*dec
, void *void_var
)
1444 struct vtn_variable
*vtn_var
= void_var
;
1446 /* Handle decorations that apply to a vtn_variable as a whole */
1447 switch (dec
->decoration
) {
1448 case SpvDecorationBinding
:
1449 vtn_var
->binding
= dec
->literals
[0];
1451 case SpvDecorationDescriptorSet
:
1452 vtn_var
->descriptor_set
= dec
->literals
[0];
1454 case SpvDecorationInputAttachmentIndex
:
1455 vtn_var
->input_attachment_index
= dec
->literals
[0];
1457 case SpvDecorationPatch
:
1458 vtn_var
->patch
= true;
1464 if (val
->value_type
== vtn_value_type_pointer
) {
1465 vtn_assert(val
->pointer
->var
== void_var
);
1466 vtn_assert(val
->pointer
->chain
== NULL
);
1467 vtn_assert(member
== -1);
1469 vtn_assert(val
->value_type
== vtn_value_type_type
);
1472 /* Location is odd. If applied to a split structure, we have to walk the
1473 * whole thing and accumulate the location. It's easier to handle as a
1476 if (dec
->decoration
== SpvDecorationLocation
) {
1477 unsigned location
= dec
->literals
[0];
1478 bool is_vertex_input
;
1479 if (b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
1480 vtn_var
->mode
== vtn_variable_mode_output
) {
1481 is_vertex_input
= false;
1482 location
+= FRAG_RESULT_DATA0
;
1483 } else if (b
->shader
->info
.stage
== MESA_SHADER_VERTEX
&&
1484 vtn_var
->mode
== vtn_variable_mode_input
) {
1485 is_vertex_input
= true;
1486 location
+= VERT_ATTRIB_GENERIC0
;
1487 } else if (vtn_var
->mode
== vtn_variable_mode_input
||
1488 vtn_var
->mode
== vtn_variable_mode_output
) {
1489 is_vertex_input
= false;
1490 location
+= vtn_var
->patch
? VARYING_SLOT_PATCH0
: VARYING_SLOT_VAR0
;
1492 vtn_warn("Location must be on input or output variable");
1497 /* This handles the member and lone variable cases */
1498 vtn_var
->var
->data
.location
= location
;
1500 /* This handles the structure member case */
1501 vtn_assert(vtn_var
->members
);
1503 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1504 for (unsigned i
= 0; i
< length
; i
++) {
1505 vtn_var
->members
[i
]->data
.location
= location
;
1507 glsl_count_attribute_slots(vtn_var
->members
[i
]->interface_type
,
1514 vtn_assert(member
<= 0);
1515 apply_var_decoration(b
, vtn_var
->var
, dec
);
1516 } else if (vtn_var
->members
) {
1518 vtn_assert(vtn_var
->members
);
1519 apply_var_decoration(b
, vtn_var
->members
[member
], dec
);
1522 glsl_get_length(glsl_without_array(vtn_var
->type
->type
));
1523 for (unsigned i
= 0; i
< length
; i
++)
1524 apply_var_decoration(b
, vtn_var
->members
[i
], dec
);
1527 /* A few variables, those with external storage, have no actual
1528 * nir_variables associated with them. Fortunately, all decorations
1529 * we care about for those variables are on the type only.
1531 vtn_assert(vtn_var
->mode
== vtn_variable_mode_ubo
||
1532 vtn_var
->mode
== vtn_variable_mode_ssbo
||
1533 vtn_var
->mode
== vtn_variable_mode_push_constant
);
1538 static enum vtn_variable_mode
1539 vtn_storage_class_to_mode(struct vtn_builder
*b
,
1540 SpvStorageClass
class,
1541 struct vtn_type
*interface_type
,
1542 nir_variable_mode
*nir_mode_out
)
1544 enum vtn_variable_mode mode
;
1545 nir_variable_mode nir_mode
;
1547 case SpvStorageClassUniform
:
1548 if (interface_type
->block
) {
1549 mode
= vtn_variable_mode_ubo
;
1551 } else if (interface_type
->buffer_block
) {
1552 mode
= vtn_variable_mode_ssbo
;
1555 vtn_fail("Invalid uniform variable type");
1558 case SpvStorageClassStorageBuffer
:
1559 mode
= vtn_variable_mode_ssbo
;
1562 case SpvStorageClassUniformConstant
:
1563 if (glsl_type_is_image(interface_type
->type
)) {
1564 mode
= vtn_variable_mode_image
;
1565 nir_mode
= nir_var_uniform
;
1566 } else if (glsl_type_is_sampler(interface_type
->type
)) {
1567 mode
= vtn_variable_mode_sampler
;
1568 nir_mode
= nir_var_uniform
;
1570 vtn_fail("Invalid uniform constant variable type");
1573 case SpvStorageClassPushConstant
:
1574 mode
= vtn_variable_mode_push_constant
;
1575 nir_mode
= nir_var_uniform
;
1577 case SpvStorageClassInput
:
1578 mode
= vtn_variable_mode_input
;
1579 nir_mode
= nir_var_shader_in
;
1581 case SpvStorageClassOutput
:
1582 mode
= vtn_variable_mode_output
;
1583 nir_mode
= nir_var_shader_out
;
1585 case SpvStorageClassPrivate
:
1586 mode
= vtn_variable_mode_global
;
1587 nir_mode
= nir_var_global
;
1589 case SpvStorageClassFunction
:
1590 mode
= vtn_variable_mode_local
;
1591 nir_mode
= nir_var_local
;
1593 case SpvStorageClassWorkgroup
:
1594 mode
= vtn_variable_mode_workgroup
;
1595 nir_mode
= nir_var_shared
;
1597 case SpvStorageClassCrossWorkgroup
:
1598 case SpvStorageClassGeneric
:
1599 case SpvStorageClassAtomicCounter
:
1601 vtn_fail("Unhandled variable storage class");
1605 *nir_mode_out
= nir_mode
;
1611 vtn_pointer_to_ssa(struct vtn_builder
*b
, struct vtn_pointer
*ptr
)
1613 /* This pointer needs to have a pointer type with actual storage */
1614 vtn_assert(ptr
->ptr_type
);
1615 vtn_assert(ptr
->ptr_type
->type
);
1618 /* If we don't have an offset then we must be a pointer to the variable
1621 vtn_assert(!ptr
->offset
&& !ptr
->block_index
);
1623 struct vtn_access_chain chain
= {
1626 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
1629 vtn_assert(ptr
->offset
);
1630 if (ptr
->block_index
) {
1631 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1632 ptr
->mode
== vtn_variable_mode_ssbo
);
1633 return nir_vec2(&b
->nb
, ptr
->block_index
, ptr
->offset
);
1635 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1640 struct vtn_pointer
*
1641 vtn_pointer_from_ssa(struct vtn_builder
*b
, nir_ssa_def
*ssa
,
1642 struct vtn_type
*ptr_type
)
1644 vtn_assert(ssa
->num_components
<= 2 && ssa
->bit_size
== 32);
1645 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1646 vtn_assert(ptr_type
->deref
->base_type
!= vtn_base_type_pointer
);
1647 /* This pointer type needs to have actual storage */
1648 vtn_assert(ptr_type
->type
);
1650 struct vtn_pointer
*ptr
= rzalloc(b
, struct vtn_pointer
);
1651 ptr
->mode
= vtn_storage_class_to_mode(b
, ptr_type
->storage_class
,
1653 ptr
->type
= ptr_type
->deref
;
1654 ptr
->ptr_type
= ptr_type
;
1656 if (ssa
->num_components
> 1) {
1657 vtn_assert(ssa
->num_components
== 2);
1658 vtn_assert(ptr
->mode
== vtn_variable_mode_ubo
||
1659 ptr
->mode
== vtn_variable_mode_ssbo
);
1660 ptr
->block_index
= nir_channel(&b
->nb
, ssa
, 0);
1661 ptr
->offset
= nir_channel(&b
->nb
, ssa
, 1);
1663 vtn_assert(ssa
->num_components
== 1);
1664 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
);
1665 ptr
->block_index
= NULL
;
1673 is_per_vertex_inout(const struct vtn_variable
*var
, gl_shader_stage stage
)
1675 if (var
->patch
|| !glsl_type_is_array(var
->type
->type
))
1678 if (var
->mode
== vtn_variable_mode_input
) {
1679 return stage
== MESA_SHADER_TESS_CTRL
||
1680 stage
== MESA_SHADER_TESS_EVAL
||
1681 stage
== MESA_SHADER_GEOMETRY
;
1684 if (var
->mode
== vtn_variable_mode_output
)
1685 return stage
== MESA_SHADER_TESS_CTRL
;
1691 vtn_create_variable(struct vtn_builder
*b
, struct vtn_value
*val
,
1692 struct vtn_type
*ptr_type
, SpvStorageClass storage_class
,
1693 nir_constant
*initializer
)
1695 vtn_assert(ptr_type
->base_type
== vtn_base_type_pointer
);
1696 struct vtn_type
*type
= ptr_type
->deref
;
1698 struct vtn_type
*without_array
= type
;
1699 while(glsl_type_is_array(without_array
->type
))
1700 without_array
= without_array
->array_element
;
1702 enum vtn_variable_mode mode
;
1703 nir_variable_mode nir_mode
;
1704 mode
= vtn_storage_class_to_mode(b
, storage_class
, without_array
, &nir_mode
);
1707 case vtn_variable_mode_ubo
:
1708 b
->shader
->info
.num_ubos
++;
1710 case vtn_variable_mode_ssbo
:
1711 b
->shader
->info
.num_ssbos
++;
1713 case vtn_variable_mode_image
:
1714 b
->shader
->info
.num_images
++;
1716 case vtn_variable_mode_sampler
:
1717 b
->shader
->info
.num_textures
++;
1719 case vtn_variable_mode_push_constant
:
1720 b
->shader
->num_uniforms
= vtn_type_block_size(b
, type
);
1723 /* No tallying is needed */
1727 struct vtn_variable
*var
= rzalloc(b
, struct vtn_variable
);
1731 vtn_assert(val
->value_type
== vtn_value_type_pointer
);
1732 val
->pointer
= vtn_pointer_for_variable(b
, var
, ptr_type
);
1734 switch (var
->mode
) {
1735 case vtn_variable_mode_local
:
1736 case vtn_variable_mode_global
:
1737 case vtn_variable_mode_image
:
1738 case vtn_variable_mode_sampler
:
1739 /* For these, we create the variable normally */
1740 var
->var
= rzalloc(b
->shader
, nir_variable
);
1741 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1742 var
->var
->type
= var
->type
->type
;
1743 var
->var
->data
.mode
= nir_mode
;
1745 switch (var
->mode
) {
1746 case vtn_variable_mode_image
:
1747 case vtn_variable_mode_sampler
:
1748 var
->var
->interface_type
= without_array
->type
;
1751 var
->var
->interface_type
= NULL
;
1756 case vtn_variable_mode_workgroup
:
1757 if (b
->options
->lower_workgroup_access_to_offsets
) {
1758 var
->shared_location
= -1;
1760 /* Create the variable normally */
1761 var
->var
= rzalloc(b
->shader
, nir_variable
);
1762 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1763 var
->var
->type
= var
->type
->type
;
1764 var
->var
->data
.mode
= nir_var_shared
;
1768 case vtn_variable_mode_input
:
1769 case vtn_variable_mode_output
: {
1770 /* In order to know whether or not we're a per-vertex inout, we need
1771 * the patch qualifier. This means walking the variable decorations
1772 * early before we actually create any variables. Not a big deal.
1774 * GLSLang really likes to place decorations in the most interior
1775 * thing it possibly can. In particular, if you have a struct, it
1776 * will place the patch decorations on the struct members. This
1777 * should be handled by the variable splitting below just fine.
1779 * If you have an array-of-struct, things get even more weird as it
1780 * will place the patch decorations on the struct even though it's
1781 * inside an array and some of the members being patch and others not
1782 * makes no sense whatsoever. Since the only sensible thing is for
1783 * it to be all or nothing, we'll call it patch if any of the members
1784 * are declared patch.
1787 vtn_foreach_decoration(b
, val
, var_is_patch_cb
, &var
->patch
);
1788 if (glsl_type_is_array(var
->type
->type
) &&
1789 glsl_type_is_struct(without_array
->type
)) {
1790 vtn_foreach_decoration(b
, without_array
->val
,
1791 var_is_patch_cb
, &var
->patch
);
1794 /* For inputs and outputs, we immediately split structures. This
1795 * is for a couple of reasons. For one, builtins may all come in
1796 * a struct and we really want those split out into separate
1797 * variables. For another, interpolation qualifiers can be
1798 * applied to members of the top-level struct ane we need to be
1799 * able to preserve that information.
1802 int array_length
= -1;
1803 struct vtn_type
*interface_type
= var
->type
;
1804 if (is_per_vertex_inout(var
, b
->shader
->info
.stage
)) {
1805 /* In Geometry shaders (and some tessellation), inputs come
1806 * in per-vertex arrays. However, some builtins come in
1807 * non-per-vertex, hence the need for the is_array check. In
1808 * any case, there are no non-builtin arrays allowed so this
1809 * check should be sufficient.
1811 interface_type
= var
->type
->array_element
;
1812 array_length
= glsl_get_length(var
->type
->type
);
1815 if (glsl_type_is_struct(interface_type
->type
)) {
1816 /* It's a struct. Split it. */
1817 unsigned num_members
= glsl_get_length(interface_type
->type
);
1818 var
->members
= ralloc_array(b
, nir_variable
*, num_members
);
1820 for (unsigned i
= 0; i
< num_members
; i
++) {
1821 const struct glsl_type
*mtype
= interface_type
->members
[i
]->type
;
1822 if (array_length
>= 0)
1823 mtype
= glsl_array_type(mtype
, array_length
);
1825 var
->members
[i
] = rzalloc(b
->shader
, nir_variable
);
1826 var
->members
[i
]->name
=
1827 ralloc_asprintf(var
->members
[i
], "%s.%d", val
->name
, i
);
1828 var
->members
[i
]->type
= mtype
;
1829 var
->members
[i
]->interface_type
=
1830 interface_type
->members
[i
]->type
;
1831 var
->members
[i
]->data
.mode
= nir_mode
;
1832 var
->members
[i
]->data
.patch
= var
->patch
;
1835 var
->var
= rzalloc(b
->shader
, nir_variable
);
1836 var
->var
->name
= ralloc_strdup(var
->var
, val
->name
);
1837 var
->var
->type
= var
->type
->type
;
1838 var
->var
->interface_type
= interface_type
->type
;
1839 var
->var
->data
.mode
= nir_mode
;
1840 var
->var
->data
.patch
= var
->patch
;
1843 /* For inputs and outputs, we need to grab locations and builtin
1844 * information from the interface type.
1846 vtn_foreach_decoration(b
, interface_type
->val
, var_decoration_cb
, var
);
1850 case vtn_variable_mode_param
:
1851 vtn_fail("Not created through OpVariable");
1853 case vtn_variable_mode_ubo
:
1854 case vtn_variable_mode_ssbo
:
1855 case vtn_variable_mode_push_constant
:
1856 /* These don't need actual variables. */
1861 var
->var
->constant_initializer
=
1862 nir_constant_clone(initializer
, var
->var
);
1865 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
1867 if (var
->mode
== vtn_variable_mode_image
||
1868 var
->mode
== vtn_variable_mode_sampler
) {
1869 /* XXX: We still need the binding information in the nir_variable
1870 * for these. We should fix that.
1872 var
->var
->data
.binding
= var
->binding
;
1873 var
->var
->data
.descriptor_set
= var
->descriptor_set
;
1874 var
->var
->data
.index
= var
->input_attachment_index
;
1876 if (var
->mode
== vtn_variable_mode_image
)
1877 var
->var
->data
.image
.format
= without_array
->image_format
;
1880 if (var
->mode
== vtn_variable_mode_local
) {
1881 vtn_assert(var
->members
== NULL
&& var
->var
!= NULL
);
1882 nir_function_impl_add_variable(b
->nb
.impl
, var
->var
);
1883 } else if (var
->var
) {
1884 nir_shader_add_variable(b
->shader
, var
->var
);
1885 } else if (var
->members
) {
1886 unsigned count
= glsl_get_length(without_array
->type
);
1887 for (unsigned i
= 0; i
< count
; i
++) {
1888 vtn_assert(var
->members
[i
]->data
.mode
!= nir_var_local
);
1889 nir_shader_add_variable(b
->shader
, var
->members
[i
]);
1892 vtn_assert(vtn_pointer_is_external_block(b
, val
->pointer
));
1897 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
1898 const uint32_t *w
, unsigned count
)
1902 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1903 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1907 case SpvOpVariable
: {
1908 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1910 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1912 SpvStorageClass storage_class
= w
[3];
1913 nir_constant
*initializer
= NULL
;
1915 initializer
= vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
1917 vtn_create_variable(b
, val
, ptr_type
, storage_class
, initializer
);
1921 case SpvOpAccessChain
:
1922 case SpvOpPtrAccessChain
:
1923 case SpvOpInBoundsAccessChain
: {
1924 struct vtn_access_chain
*chain
= vtn_access_chain_create(b
, count
- 4);
1925 chain
->ptr_as_array
= (opcode
== SpvOpPtrAccessChain
);
1928 for (int i
= 4; i
< count
; i
++) {
1929 struct vtn_value
*link_val
= vtn_untyped_value(b
, w
[i
]);
1930 if (link_val
->value_type
== vtn_value_type_constant
) {
1931 chain
->link
[idx
].mode
= vtn_access_mode_literal
;
1932 chain
->link
[idx
].id
= link_val
->constant
->values
[0].u32
[0];
1934 chain
->link
[idx
].mode
= vtn_access_mode_id
;
1935 chain
->link
[idx
].id
= w
[i
];
1941 struct vtn_type
*ptr_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1942 struct vtn_value
*base_val
= vtn_untyped_value(b
, w
[3]);
1943 if (base_val
->value_type
== vtn_value_type_sampled_image
) {
1944 /* This is rather insane. SPIR-V allows you to use OpSampledImage
1945 * to combine an array of images with a single sampler to get an
1946 * array of sampled images that all share the same sampler.
1947 * Fortunately, this means that we can more-or-less ignore the
1948 * sampler when crawling the access chain, but it does leave us
1949 * with this rather awkward little special-case.
1951 struct vtn_value
*val
=
1952 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1953 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1954 val
->sampled_image
->type
= base_val
->sampled_image
->type
;
1955 val
->sampled_image
->image
=
1956 vtn_pointer_dereference(b
, base_val
->sampled_image
->image
, chain
);
1957 val
->sampled_image
->sampler
= base_val
->sampled_image
->sampler
;
1959 vtn_assert(base_val
->value_type
== vtn_value_type_pointer
);
1960 struct vtn_value
*val
=
1961 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1962 val
->pointer
= vtn_pointer_dereference(b
, base_val
->pointer
, chain
);
1963 val
->pointer
->ptr_type
= ptr_type
;
1968 case SpvOpCopyMemory
: {
1969 struct vtn_value
*dest
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
1970 struct vtn_value
*src
= vtn_value(b
, w
[2], vtn_value_type_pointer
);
1972 vtn_variable_copy(b
, dest
->pointer
, src
->pointer
);
1977 struct vtn_type
*res_type
=
1978 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1979 struct vtn_pointer
*src
=
1980 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1982 if (src
->mode
== vtn_variable_mode_image
||
1983 src
->mode
== vtn_variable_mode_sampler
) {
1984 vtn_push_value(b
, w
[2], vtn_value_type_pointer
)->pointer
= src
;
1988 vtn_push_ssa(b
, w
[2], res_type
, vtn_variable_load(b
, src
));
1993 struct vtn_pointer
*dest
=
1994 vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
1996 if (glsl_type_is_sampler(dest
->type
->type
)) {
1997 vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
1998 "propagation to workaround the problem.");
1999 vtn_assert(dest
->var
->copy_prop_sampler
== NULL
);
2000 dest
->var
->copy_prop_sampler
=
2001 vtn_value(b
, w
[2], vtn_value_type_pointer
)->pointer
;
2005 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
2006 vtn_variable_store(b
, src
, dest
);
2010 case SpvOpArrayLength
: {
2011 struct vtn_pointer
*ptr
=
2012 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2014 const uint32_t offset
= ptr
->var
->type
->offsets
[w
[4]];
2015 const uint32_t stride
= ptr
->var
->type
->members
[w
[4]]->stride
;
2017 if (!ptr
->block_index
) {
2018 struct vtn_access_chain chain
= {
2021 ptr
= vtn_ssa_offset_pointer_dereference(b
, ptr
, &chain
);
2022 vtn_assert(ptr
->block_index
);
2025 nir_intrinsic_instr
*instr
=
2026 nir_intrinsic_instr_create(b
->nb
.shader
,
2027 nir_intrinsic_get_buffer_size
);
2028 instr
->src
[0] = nir_src_for_ssa(ptr
->block_index
);
2029 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
2030 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2031 nir_ssa_def
*buf_size
= &instr
->dest
.ssa
;
2033 /* array_length = max(buffer_size - offset, 0) / stride */
2034 nir_ssa_def
*array_length
=
2039 nir_imm_int(&b
->nb
, offset
)),
2040 nir_imm_int(&b
->nb
, 0u)),
2041 nir_imm_int(&b
->nb
, stride
));
2043 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2044 val
->ssa
= vtn_create_ssa_value(b
, glsl_uint_type());
2045 val
->ssa
->def
= array_length
;
2049 case SpvOpCopyMemorySized
:
2051 vtn_fail("Unhandled opcode");