2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
39 #if UTIL_ARCH_BIG_ENDIAN
44 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
45 size_t spirv_offset
, const char *message
)
47 if (b
->options
->debug
.func
) {
48 b
->options
->debug
.func(b
->options
->debug
.private_data
,
49 level
, spirv_offset
, message
);
53 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
54 fprintf(stderr
, "%s\n", message
);
59 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
60 size_t spirv_offset
, const char *fmt
, ...)
66 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
69 vtn_log(b
, level
, spirv_offset
, msg
);
75 vtn_log_err(struct vtn_builder
*b
,
76 enum nir_spirv_debug_level level
, const char *prefix
,
77 const char *file
, unsigned line
,
78 const char *fmt
, va_list args
)
82 msg
= ralloc_strdup(NULL
, prefix
);
85 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
88 ralloc_asprintf_append(&msg
, " ");
90 ralloc_vasprintf_append(&msg
, fmt
, args
);
92 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
96 ralloc_asprintf_append(&msg
,
97 "\n in SPIR-V source file %s, line %d, col %d",
98 b
->file
, b
->line
, b
->col
);
101 vtn_log(b
, level
, b
->spirv_offset
, msg
);
107 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
112 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
113 path
, prefix
, idx
++);
114 if (len
< 0 || len
>= sizeof(filename
))
117 FILE *f
= fopen(filename
, "w");
121 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
124 vtn_info("SPIR-V shader dumped to %s", filename
);
128 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
129 const char *fmt
, ...)
134 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
135 file
, line
, fmt
, args
);
140 _vtn_err(struct vtn_builder
*b
, const char *file
, unsigned line
,
141 const char *fmt
, ...)
146 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V ERROR:\n",
147 file
, line
, fmt
, args
);
152 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
153 const char *fmt
, ...)
158 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
159 file
, line
, fmt
, args
);
162 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
164 vtn_dump_shader(b
, dump_path
, "fail");
166 longjmp(b
->fail_jump
, 1);
169 static struct vtn_ssa_value
*
170 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
172 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
173 val
->type
= glsl_get_bare_type(type
);
175 if (glsl_type_is_vector_or_scalar(type
)) {
176 unsigned num_components
= glsl_get_vector_elements(val
->type
);
177 unsigned bit_size
= glsl_get_bit_size(val
->type
);
178 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
180 unsigned elems
= glsl_get_length(val
->type
);
181 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
182 if (glsl_type_is_array_or_matrix(type
)) {
183 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
184 for (unsigned i
= 0; i
< elems
; i
++)
185 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
187 vtn_assert(glsl_type_is_struct_or_ifc(type
));
188 for (unsigned i
= 0; i
< elems
; i
++) {
189 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
190 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
198 static struct vtn_ssa_value
*
199 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
200 const struct glsl_type
*type
)
202 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
207 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
208 val
->type
= glsl_get_bare_type(type
);
210 if (glsl_type_is_vector_or_scalar(type
)) {
211 unsigned num_components
= glsl_get_vector_elements(val
->type
);
212 unsigned bit_size
= glsl_get_bit_size(type
);
213 nir_load_const_instr
*load
=
214 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
216 memcpy(load
->value
, constant
->values
,
217 sizeof(nir_const_value
) * num_components
);
219 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
220 val
->def
= &load
->def
;
222 unsigned elems
= glsl_get_length(val
->type
);
223 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
224 if (glsl_type_is_array_or_matrix(type
)) {
225 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
226 for (unsigned i
= 0; i
< elems
; i
++) {
227 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
231 vtn_assert(glsl_type_is_struct_or_ifc(type
));
232 for (unsigned i
= 0; i
< elems
; i
++) {
233 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
234 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
243 struct vtn_ssa_value
*
244 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
246 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
247 switch (val
->value_type
) {
248 case vtn_value_type_undef
:
249 return vtn_undef_ssa_value(b
, val
->type
->type
);
251 case vtn_value_type_constant
:
252 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
254 case vtn_value_type_ssa
:
257 case vtn_value_type_pointer
:
258 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
259 struct vtn_ssa_value
*ssa
=
260 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
261 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
265 vtn_fail("Invalid type for an SSA value");
270 vtn_push_ssa_value(struct vtn_builder
*b
, uint32_t value_id
,
271 struct vtn_ssa_value
*ssa
)
273 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
275 /* See vtn_create_ssa_value */
276 vtn_fail_if(ssa
->type
!= glsl_get_bare_type(type
->type
),
277 "Type mismatch for SPIR-V SSA value");
279 struct vtn_value
*val
;
280 if (type
->base_type
== vtn_base_type_pointer
) {
281 val
= vtn_push_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
283 /* Don't trip the value_type_ssa check in vtn_push_value */
284 val
= vtn_push_value(b
, value_id
, vtn_value_type_invalid
);
285 val
->value_type
= vtn_value_type_ssa
;
293 vtn_get_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
)
295 struct vtn_ssa_value
*ssa
= vtn_ssa_value(b
, value_id
);
296 vtn_fail_if(!glsl_type_is_vector_or_scalar(ssa
->type
),
297 "Expected a vector or scalar type");
302 vtn_push_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
, nir_ssa_def
*def
)
304 /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
305 * type will be valid by the time we get here.
307 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
308 vtn_fail_if(def
->num_components
!= glsl_get_vector_elements(type
->type
) ||
309 def
->bit_size
!= glsl_get_bit_size(type
->type
),
310 "Mismatch between NIR and SPIR-V type.");
311 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
313 return vtn_push_ssa_value(b
, value_id
, ssa
);
316 static nir_deref_instr
*
317 vtn_get_image(struct vtn_builder
*b
, uint32_t value_id
)
319 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
320 vtn_assert(type
->base_type
== vtn_base_type_image
);
321 return nir_build_deref_cast(&b
->nb
, vtn_get_nir_ssa(b
, value_id
),
322 nir_var_uniform
, type
->glsl_image
, 0);
326 vtn_push_image(struct vtn_builder
*b
, uint32_t value_id
,
327 nir_deref_instr
*deref
, bool propagate_non_uniform
)
329 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
330 vtn_assert(type
->base_type
== vtn_base_type_image
);
331 struct vtn_value
*value
= vtn_push_nir_ssa(b
, value_id
, &deref
->dest
.ssa
);
332 value
->propagated_non_uniform
= propagate_non_uniform
;
335 static nir_deref_instr
*
336 vtn_get_sampler(struct vtn_builder
*b
, uint32_t value_id
)
338 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
339 vtn_assert(type
->base_type
== vtn_base_type_sampler
);
340 return nir_build_deref_cast(&b
->nb
, vtn_get_nir_ssa(b
, value_id
),
341 nir_var_uniform
, glsl_bare_sampler_type(), 0);
345 vtn_sampled_image_to_nir_ssa(struct vtn_builder
*b
,
346 struct vtn_sampled_image si
)
348 return nir_vec2(&b
->nb
, &si
.image
->dest
.ssa
, &si
.sampler
->dest
.ssa
);
352 vtn_push_sampled_image(struct vtn_builder
*b
, uint32_t value_id
,
353 struct vtn_sampled_image si
, bool propagate_non_uniform
)
355 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
356 vtn_assert(type
->base_type
== vtn_base_type_sampled_image
);
357 struct vtn_value
*value
= vtn_push_nir_ssa(b
, value_id
,
358 vtn_sampled_image_to_nir_ssa(b
, si
));
359 value
->propagated_non_uniform
= propagate_non_uniform
;
362 static struct vtn_sampled_image
363 vtn_get_sampled_image(struct vtn_builder
*b
, uint32_t value_id
)
365 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
366 vtn_assert(type
->base_type
== vtn_base_type_sampled_image
);
367 nir_ssa_def
*si_vec2
= vtn_get_nir_ssa(b
, value_id
);
369 struct vtn_sampled_image si
= { NULL
, };
370 si
.image
= nir_build_deref_cast(&b
->nb
, nir_channel(&b
->nb
, si_vec2
, 0),
372 type
->image
->glsl_image
, 0);
373 si
.sampler
= nir_build_deref_cast(&b
->nb
, nir_channel(&b
->nb
, si_vec2
, 1),
375 glsl_bare_sampler_type(), 0);
380 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
381 unsigned word_count
, unsigned *words_used
)
383 /* From the SPIR-V spec:
385 * "A string is interpreted as a nul-terminated stream of characters.
386 * The character set is Unicode in the UTF-8 encoding scheme. The UTF-8
387 * octets (8-bit bytes) are packed four per word, following the
388 * little-endian convention (i.e., the first octet is in the
389 * lowest-order 8 bits of the word). The final word contains the
390 * string’s nul-termination character (0), and all contents past the
391 * end of the string in the final word are padded with 0."
393 * On big-endian, we need to byte-swap.
395 #if UTIL_ARCH_BIG_ENDIAN
397 uint32_t *copy
= ralloc_array(b
, uint32_t, word_count
);
398 for (unsigned i
= 0; i
< word_count
; i
++)
399 copy
[i
] = bswap_32(words
[i
]);
404 const char *str
= (char *)words
;
405 const char *end
= memchr(str
, 0, word_count
* 4);
406 vtn_fail_if(end
== NULL
, "String is not null-terminated");
409 *words_used
= DIV_ROUND_UP(end
- str
+ 1, sizeof(*words
));
415 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
416 const uint32_t *end
, vtn_instruction_handler handler
)
422 const uint32_t *w
= start
;
424 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
425 unsigned count
= w
[0] >> SpvWordCountShift
;
426 vtn_assert(count
>= 1 && w
+ count
<= end
);
428 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
432 break; /* Do nothing */
435 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
447 if (!handler(b
, opcode
, w
, count
))
465 vtn_handle_non_semantic_instruction(struct vtn_builder
*b
, SpvOp ext_opcode
,
466 const uint32_t *w
, unsigned count
)
473 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
474 const uint32_t *w
, unsigned count
)
477 case SpvOpExtInstImport
: {
478 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
479 const char *ext
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
480 if (strcmp(ext
, "GLSL.std.450") == 0) {
481 val
->ext_handler
= vtn_handle_glsl450_instruction
;
482 } else if ((strcmp(ext
, "SPV_AMD_gcn_shader") == 0)
483 && (b
->options
&& b
->options
->caps
.amd_gcn_shader
)) {
484 val
->ext_handler
= vtn_handle_amd_gcn_shader_instruction
;
485 } else if ((strcmp(ext
, "SPV_AMD_shader_ballot") == 0)
486 && (b
->options
&& b
->options
->caps
.amd_shader_ballot
)) {
487 val
->ext_handler
= vtn_handle_amd_shader_ballot_instruction
;
488 } else if ((strcmp(ext
, "SPV_AMD_shader_trinary_minmax") == 0)
489 && (b
->options
&& b
->options
->caps
.amd_trinary_minmax
)) {
490 val
->ext_handler
= vtn_handle_amd_shader_trinary_minmax_instruction
;
491 } else if ((strcmp(ext
, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
492 && (b
->options
&& b
->options
->caps
.amd_shader_explicit_vertex_parameter
)) {
493 val
->ext_handler
= vtn_handle_amd_shader_explicit_vertex_parameter_instruction
;
494 } else if (strcmp(ext
, "OpenCL.std") == 0) {
495 val
->ext_handler
= vtn_handle_opencl_instruction
;
496 } else if (strstr(ext
, "NonSemantic.") == ext
) {
497 val
->ext_handler
= vtn_handle_non_semantic_instruction
;
499 vtn_fail("Unsupported extension: %s", ext
);
505 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
506 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
512 vtn_fail_with_opcode("Unhandled opcode", opcode
);
517 _foreach_decoration_helper(struct vtn_builder
*b
,
518 struct vtn_value
*base_value
,
520 struct vtn_value
*value
,
521 vtn_decoration_foreach_cb cb
, void *data
)
523 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
525 if (dec
->scope
== VTN_DEC_DECORATION
) {
526 member
= parent_member
;
527 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
528 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
529 value
->type
->base_type
!= vtn_base_type_struct
,
530 "OpMemberDecorate and OpGroupMemberDecorate are only "
531 "allowed on OpTypeStruct");
532 /* This means we haven't recursed yet */
533 assert(value
== base_value
);
535 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
537 vtn_fail_if(member
>= base_value
->type
->length
,
538 "OpMemberDecorate specifies member %d but the "
539 "OpTypeStruct has only %u members",
540 member
, base_value
->type
->length
);
542 /* Not a decoration */
543 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
548 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
549 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
552 cb(b
, base_value
, member
, dec
, data
);
557 /** Iterates (recursively if needed) over all of the decorations on a value
559 * This function iterates over all of the decorations applied to a given
560 * value. If it encounters a decoration group, it recurses into the group
561 * and iterates over all of those decorations as well.
564 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
565 vtn_decoration_foreach_cb cb
, void *data
)
567 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
571 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
572 vtn_execution_mode_foreach_cb cb
, void *data
)
574 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
575 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
578 assert(dec
->group
== NULL
);
579 cb(b
, value
, dec
, data
);
584 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
585 const uint32_t *w
, unsigned count
)
587 const uint32_t *w_end
= w
+ count
;
588 const uint32_t target
= w
[1];
592 case SpvOpDecorationGroup
:
593 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
597 case SpvOpDecorateId
:
598 case SpvOpMemberDecorate
:
599 case SpvOpDecorateString
:
600 case SpvOpMemberDecorateString
:
601 case SpvOpExecutionMode
:
602 case SpvOpExecutionModeId
: {
603 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
605 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
608 case SpvOpDecorateId
:
609 case SpvOpDecorateString
:
610 dec
->scope
= VTN_DEC_DECORATION
;
612 case SpvOpMemberDecorate
:
613 case SpvOpMemberDecorateString
:
614 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
615 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
616 "Member argument of OpMemberDecorate too large");
618 case SpvOpExecutionMode
:
619 case SpvOpExecutionModeId
:
620 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
623 unreachable("Invalid decoration opcode");
625 dec
->decoration
= *(w
++);
628 /* Link into the list */
629 dec
->next
= val
->decoration
;
630 val
->decoration
= dec
;
634 case SpvOpGroupMemberDecorate
:
635 case SpvOpGroupDecorate
: {
636 struct vtn_value
*group
=
637 vtn_value(b
, target
, vtn_value_type_decoration_group
);
639 for (; w
< w_end
; w
++) {
640 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
641 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
644 if (opcode
== SpvOpGroupDecorate
) {
645 dec
->scope
= VTN_DEC_DECORATION
;
647 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
648 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
649 "Member argument of OpGroupMemberDecorate too large");
652 /* Link into the list */
653 dec
->next
= val
->decoration
;
654 val
->decoration
= dec
;
660 unreachable("Unhandled opcode");
664 struct member_decoration_ctx
{
666 struct glsl_struct_field
*fields
;
667 struct vtn_type
*type
;
671 * Returns true if the given type contains a struct decorated Block or
675 vtn_type_contains_block(struct vtn_builder
*b
, struct vtn_type
*type
)
677 switch (type
->base_type
) {
678 case vtn_base_type_array
:
679 return vtn_type_contains_block(b
, type
->array_element
);
680 case vtn_base_type_struct
:
681 if (type
->block
|| type
->buffer_block
)
683 for (unsigned i
= 0; i
< type
->length
; i
++) {
684 if (vtn_type_contains_block(b
, type
->members
[i
]))
693 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
694 * OpStore, or OpCopyMemory between them without breaking anything.
695 * Technically, the SPIR-V rules require the exact same type ID but this lets
696 * us internally be a bit looser.
699 vtn_types_compatible(struct vtn_builder
*b
,
700 struct vtn_type
*t1
, struct vtn_type
*t2
)
702 if (t1
->id
== t2
->id
)
705 if (t1
->base_type
!= t2
->base_type
)
708 switch (t1
->base_type
) {
709 case vtn_base_type_void
:
710 case vtn_base_type_scalar
:
711 case vtn_base_type_vector
:
712 case vtn_base_type_matrix
:
713 case vtn_base_type_image
:
714 case vtn_base_type_sampler
:
715 case vtn_base_type_sampled_image
:
716 return t1
->type
== t2
->type
;
718 case vtn_base_type_array
:
719 return t1
->length
== t2
->length
&&
720 vtn_types_compatible(b
, t1
->array_element
, t2
->array_element
);
722 case vtn_base_type_pointer
:
723 return vtn_types_compatible(b
, t1
->deref
, t2
->deref
);
725 case vtn_base_type_struct
:
726 if (t1
->length
!= t2
->length
)
729 for (unsigned i
= 0; i
< t1
->length
; i
++) {
730 if (!vtn_types_compatible(b
, t1
->members
[i
], t2
->members
[i
]))
735 case vtn_base_type_function
:
736 /* This case shouldn't get hit since you can't copy around function
737 * types. Just require them to be identical.
742 vtn_fail("Invalid base type");
746 vtn_type_without_array(struct vtn_type
*type
)
748 while (type
->base_type
== vtn_base_type_array
)
749 type
= type
->array_element
;
753 /* does a shallow copy of a vtn_type */
755 static struct vtn_type
*
756 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
758 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
761 switch (src
->base_type
) {
762 case vtn_base_type_void
:
763 case vtn_base_type_scalar
:
764 case vtn_base_type_vector
:
765 case vtn_base_type_matrix
:
766 case vtn_base_type_array
:
767 case vtn_base_type_pointer
:
768 case vtn_base_type_image
:
769 case vtn_base_type_sampler
:
770 case vtn_base_type_sampled_image
:
771 /* Nothing more to do */
774 case vtn_base_type_struct
:
775 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
776 memcpy(dest
->members
, src
->members
,
777 src
->length
* sizeof(src
->members
[0]));
779 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
780 memcpy(dest
->offsets
, src
->offsets
,
781 src
->length
* sizeof(src
->offsets
[0]));
784 case vtn_base_type_function
:
785 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
786 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
793 static const struct glsl_type
*
794 wrap_type_in_array(const struct glsl_type
*type
,
795 const struct glsl_type
*array_type
)
797 if (!glsl_type_is_array(array_type
))
800 const struct glsl_type
*elem_type
=
801 wrap_type_in_array(type
, glsl_get_array_element(array_type
));
802 return glsl_array_type(elem_type
, glsl_get_length(array_type
),
803 glsl_get_explicit_stride(array_type
));
807 vtn_type_needs_explicit_layout(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
809 /* For OpenCL we never want to strip the info from the types, and it makes
810 * type comparisons easier in later stages.
812 if (b
->options
->environment
== NIR_SPIRV_OPENCL
)
816 case vtn_variable_mode_input
:
817 case vtn_variable_mode_output
:
818 /* Layout decorations kept because we need offsets for XFB arrays of
821 return b
->shader
->info
.has_transform_feedback_varyings
;
823 case vtn_variable_mode_ssbo
:
824 case vtn_variable_mode_phys_ssbo
:
825 case vtn_variable_mode_ubo
:
833 const struct glsl_type
*
834 vtn_type_get_nir_type(struct vtn_builder
*b
, struct vtn_type
*type
,
835 enum vtn_variable_mode mode
)
837 if (mode
== vtn_variable_mode_atomic_counter
) {
838 vtn_fail_if(glsl_without_array(type
->type
) != glsl_uint_type(),
839 "Variables in the AtomicCounter storage class should be "
840 "(possibly arrays of arrays of) uint.");
841 return wrap_type_in_array(glsl_atomic_uint_type(), type
->type
);
844 if (mode
== vtn_variable_mode_uniform
) {
845 switch (type
->base_type
) {
846 case vtn_base_type_array
: {
847 const struct glsl_type
*elem_type
=
848 vtn_type_get_nir_type(b
, type
->array_element
, mode
);
850 return glsl_array_type(elem_type
, type
->length
,
851 glsl_get_explicit_stride(type
->type
));
854 case vtn_base_type_struct
: {
855 bool need_new_struct
= false;
856 const uint32_t num_fields
= type
->length
;
857 NIR_VLA(struct glsl_struct_field
, fields
, num_fields
);
858 for (unsigned i
= 0; i
< num_fields
; i
++) {
859 fields
[i
] = *glsl_get_struct_field_data(type
->type
, i
);
860 const struct glsl_type
*field_nir_type
=
861 vtn_type_get_nir_type(b
, type
->members
[i
], mode
);
862 if (fields
[i
].type
!= field_nir_type
) {
863 fields
[i
].type
= field_nir_type
;
864 need_new_struct
= true;
867 if (need_new_struct
) {
868 if (glsl_type_is_interface(type
->type
)) {
869 return glsl_interface_type(fields
, num_fields
,
870 /* packing */ 0, false,
871 glsl_get_type_name(type
->type
));
873 return glsl_struct_type(fields
, num_fields
,
874 glsl_get_type_name(type
->type
),
875 glsl_struct_type_is_packed(type
->type
));
878 /* No changes, just pass it on */
883 case vtn_base_type_image
:
884 return type
->glsl_image
;
886 case vtn_base_type_sampler
:
887 return glsl_bare_sampler_type();
889 case vtn_base_type_sampled_image
:
890 return type
->image
->glsl_image
;
897 /* Layout decorations are allowed but ignored in certain conditions,
898 * to allow SPIR-V generators perform type deduplication. Discard
899 * unnecessary ones when passing to NIR.
901 if (!vtn_type_needs_explicit_layout(b
, mode
))
902 return glsl_get_bare_type(type
->type
);
907 static struct vtn_type
*
908 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
910 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
911 type
= type
->members
[member
];
913 /* We may have an array of matrices.... Oh, joy! */
914 while (glsl_type_is_array(type
->type
)) {
915 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
916 type
= type
->array_element
;
919 vtn_assert(glsl_type_is_matrix(type
->type
));
925 vtn_handle_access_qualifier(struct vtn_builder
*b
, struct vtn_type
*type
,
926 int member
, enum gl_access_qualifier access
)
928 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
929 type
= type
->members
[member
];
931 type
->access
|= access
;
935 array_stride_decoration_cb(struct vtn_builder
*b
,
936 struct vtn_value
*val
, int member
,
937 const struct vtn_decoration
*dec
, void *void_ctx
)
939 struct vtn_type
*type
= val
->type
;
941 if (dec
->decoration
== SpvDecorationArrayStride
) {
942 if (vtn_type_contains_block(b
, type
)) {
943 vtn_warn("The ArrayStride decoration cannot be applied to an array "
944 "type which contains a structure type decorated Block "
946 /* Ignore the decoration */
948 vtn_fail_if(dec
->operands
[0] == 0, "ArrayStride must be non-zero");
949 type
->stride
= dec
->operands
[0];
955 struct_member_decoration_cb(struct vtn_builder
*b
,
956 UNUSED
struct vtn_value
*val
, int member
,
957 const struct vtn_decoration
*dec
, void *void_ctx
)
959 struct member_decoration_ctx
*ctx
= void_ctx
;
964 assert(member
< ctx
->num_fields
);
966 switch (dec
->decoration
) {
967 case SpvDecorationRelaxedPrecision
:
968 case SpvDecorationUniform
:
969 case SpvDecorationUniformId
:
970 break; /* FIXME: Do nothing with this for now. */
971 case SpvDecorationNonWritable
:
972 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_WRITEABLE
);
974 case SpvDecorationNonReadable
:
975 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_READABLE
);
977 case SpvDecorationVolatile
:
978 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_VOLATILE
);
980 case SpvDecorationCoherent
:
981 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_COHERENT
);
983 case SpvDecorationNoPerspective
:
984 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
986 case SpvDecorationFlat
:
987 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
989 case SpvDecorationExplicitInterpAMD
:
990 ctx
->fields
[member
].interpolation
= INTERP_MODE_EXPLICIT
;
992 case SpvDecorationCentroid
:
993 ctx
->fields
[member
].centroid
= true;
995 case SpvDecorationSample
:
996 ctx
->fields
[member
].sample
= true;
998 case SpvDecorationStream
:
999 /* This is handled later by var_decoration_cb in vtn_variables.c */
1001 case SpvDecorationLocation
:
1002 ctx
->fields
[member
].location
= dec
->operands
[0];
1004 case SpvDecorationComponent
:
1005 break; /* FIXME: What should we do with these? */
1006 case SpvDecorationBuiltIn
:
1007 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
1008 ctx
->type
->members
[member
]->is_builtin
= true;
1009 ctx
->type
->members
[member
]->builtin
= dec
->operands
[0];
1010 ctx
->type
->builtin_block
= true;
1012 case SpvDecorationOffset
:
1013 ctx
->type
->offsets
[member
] = dec
->operands
[0];
1014 ctx
->fields
[member
].offset
= dec
->operands
[0];
1016 case SpvDecorationMatrixStride
:
1017 /* Handled as a second pass */
1019 case SpvDecorationColMajor
:
1020 break; /* Nothing to do here. Column-major is the default. */
1021 case SpvDecorationRowMajor
:
1022 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
1025 case SpvDecorationPatch
:
1028 case SpvDecorationSpecId
:
1029 case SpvDecorationBlock
:
1030 case SpvDecorationBufferBlock
:
1031 case SpvDecorationArrayStride
:
1032 case SpvDecorationGLSLShared
:
1033 case SpvDecorationGLSLPacked
:
1034 case SpvDecorationInvariant
:
1035 case SpvDecorationRestrict
:
1036 case SpvDecorationAliased
:
1037 case SpvDecorationConstant
:
1038 case SpvDecorationIndex
:
1039 case SpvDecorationBinding
:
1040 case SpvDecorationDescriptorSet
:
1041 case SpvDecorationLinkageAttributes
:
1042 case SpvDecorationNoContraction
:
1043 case SpvDecorationInputAttachmentIndex
:
1044 case SpvDecorationCPacked
:
1045 vtn_warn("Decoration not allowed on struct members: %s",
1046 spirv_decoration_to_string(dec
->decoration
));
1049 case SpvDecorationXfbBuffer
:
1050 case SpvDecorationXfbStride
:
1051 /* This is handled later by var_decoration_cb in vtn_variables.c */
1054 case SpvDecorationSaturatedConversion
:
1055 case SpvDecorationFuncParamAttr
:
1056 case SpvDecorationFPRoundingMode
:
1057 case SpvDecorationFPFastMathMode
:
1058 case SpvDecorationAlignment
:
1059 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1060 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1061 spirv_decoration_to_string(dec
->decoration
));
1065 case SpvDecorationUserSemantic
:
1066 case SpvDecorationUserTypeGOOGLE
:
1067 /* User semantic decorations can safely be ignored by the driver. */
1071 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1075 /** Chases the array type all the way down to the tail and rewrites the
1076 * glsl_types to be based off the tail's glsl_type.
1079 vtn_array_type_rewrite_glsl_type(struct vtn_type
*type
)
1081 if (type
->base_type
!= vtn_base_type_array
)
1084 vtn_array_type_rewrite_glsl_type(type
->array_element
);
1086 type
->type
= glsl_array_type(type
->array_element
->type
,
1087 type
->length
, type
->stride
);
1090 /* Matrix strides are handled as a separate pass because we need to know
1091 * whether the matrix is row-major or not first.
1094 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
1095 UNUSED
struct vtn_value
*val
, int member
,
1096 const struct vtn_decoration
*dec
,
1099 if (dec
->decoration
!= SpvDecorationMatrixStride
)
1102 vtn_fail_if(member
< 0,
1103 "The MatrixStride decoration is only allowed on members "
1105 vtn_fail_if(dec
->operands
[0] == 0, "MatrixStride must be non-zero");
1107 struct member_decoration_ctx
*ctx
= void_ctx
;
1109 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
1110 if (mat_type
->row_major
) {
1111 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
1112 mat_type
->stride
= mat_type
->array_element
->stride
;
1113 mat_type
->array_element
->stride
= dec
->operands
[0];
1115 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
1116 dec
->operands
[0], true);
1117 mat_type
->array_element
->type
= glsl_get_column_type(mat_type
->type
);
1119 vtn_assert(mat_type
->array_element
->stride
> 0);
1120 mat_type
->stride
= dec
->operands
[0];
1122 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
1123 dec
->operands
[0], false);
1126 /* Now that we've replaced the glsl_type with a properly strided matrix
1127 * type, rewrite the member type so that it's an array of the proper kind
1130 vtn_array_type_rewrite_glsl_type(ctx
->type
->members
[member
]);
1131 ctx
->fields
[member
].type
= ctx
->type
->members
[member
]->type
;
1135 struct_packed_decoration_cb(struct vtn_builder
*b
,
1136 struct vtn_value
*val
, int member
,
1137 const struct vtn_decoration
*dec
, void *void_ctx
)
1139 vtn_assert(val
->type
->base_type
== vtn_base_type_struct
);
1140 if (dec
->decoration
== SpvDecorationCPacked
) {
1141 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1142 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1143 spirv_decoration_to_string(dec
->decoration
));
1145 val
->type
->packed
= true;
1150 struct_block_decoration_cb(struct vtn_builder
*b
,
1151 struct vtn_value
*val
, int member
,
1152 const struct vtn_decoration
*dec
, void *ctx
)
1157 struct vtn_type
*type
= val
->type
;
1158 if (dec
->decoration
== SpvDecorationBlock
)
1160 else if (dec
->decoration
== SpvDecorationBufferBlock
)
1161 type
->buffer_block
= true;
1165 type_decoration_cb(struct vtn_builder
*b
,
1166 struct vtn_value
*val
, int member
,
1167 const struct vtn_decoration
*dec
, UNUSED
void *ctx
)
1169 struct vtn_type
*type
= val
->type
;
1172 /* This should have been handled by OpTypeStruct */
1173 assert(val
->type
->base_type
== vtn_base_type_struct
);
1174 assert(member
>= 0 && member
< val
->type
->length
);
1178 switch (dec
->decoration
) {
1179 case SpvDecorationArrayStride
:
1180 vtn_assert(type
->base_type
== vtn_base_type_array
||
1181 type
->base_type
== vtn_base_type_pointer
);
1183 case SpvDecorationBlock
:
1184 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1185 vtn_assert(type
->block
);
1187 case SpvDecorationBufferBlock
:
1188 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1189 vtn_assert(type
->buffer_block
);
1191 case SpvDecorationGLSLShared
:
1192 case SpvDecorationGLSLPacked
:
1193 /* Ignore these, since we get explicit offsets anyways */
1196 case SpvDecorationRowMajor
:
1197 case SpvDecorationColMajor
:
1198 case SpvDecorationMatrixStride
:
1199 case SpvDecorationBuiltIn
:
1200 case SpvDecorationNoPerspective
:
1201 case SpvDecorationFlat
:
1202 case SpvDecorationPatch
:
1203 case SpvDecorationCentroid
:
1204 case SpvDecorationSample
:
1205 case SpvDecorationExplicitInterpAMD
:
1206 case SpvDecorationVolatile
:
1207 case SpvDecorationCoherent
:
1208 case SpvDecorationNonWritable
:
1209 case SpvDecorationNonReadable
:
1210 case SpvDecorationUniform
:
1211 case SpvDecorationUniformId
:
1212 case SpvDecorationLocation
:
1213 case SpvDecorationComponent
:
1214 case SpvDecorationOffset
:
1215 case SpvDecorationXfbBuffer
:
1216 case SpvDecorationXfbStride
:
1217 case SpvDecorationUserSemantic
:
1218 vtn_warn("Decoration only allowed for struct members: %s",
1219 spirv_decoration_to_string(dec
->decoration
));
1222 case SpvDecorationStream
:
1223 /* We don't need to do anything here, as stream is filled up when
1224 * aplying the decoration to a variable, just check that if it is not a
1225 * struct member, it should be a struct.
1227 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1230 case SpvDecorationRelaxedPrecision
:
1231 case SpvDecorationSpecId
:
1232 case SpvDecorationInvariant
:
1233 case SpvDecorationRestrict
:
1234 case SpvDecorationAliased
:
1235 case SpvDecorationConstant
:
1236 case SpvDecorationIndex
:
1237 case SpvDecorationBinding
:
1238 case SpvDecorationDescriptorSet
:
1239 case SpvDecorationLinkageAttributes
:
1240 case SpvDecorationNoContraction
:
1241 case SpvDecorationInputAttachmentIndex
:
1242 vtn_warn("Decoration not allowed on types: %s",
1243 spirv_decoration_to_string(dec
->decoration
));
1246 case SpvDecorationCPacked
:
1247 /* Handled when parsing a struct type, nothing to do here. */
1250 case SpvDecorationSaturatedConversion
:
1251 case SpvDecorationFuncParamAttr
:
1252 case SpvDecorationFPRoundingMode
:
1253 case SpvDecorationFPFastMathMode
:
1254 case SpvDecorationAlignment
:
1255 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1256 spirv_decoration_to_string(dec
->decoration
));
1259 case SpvDecorationUserTypeGOOGLE
:
1260 /* User semantic decorations can safely be ignored by the driver. */
1264 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1269 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
1272 case SpvImageFormatUnknown
: return PIPE_FORMAT_NONE
;
1273 case SpvImageFormatRgba32f
: return PIPE_FORMAT_R32G32B32A32_FLOAT
;
1274 case SpvImageFormatRgba16f
: return PIPE_FORMAT_R16G16B16A16_FLOAT
;
1275 case SpvImageFormatR32f
: return PIPE_FORMAT_R32_FLOAT
;
1276 case SpvImageFormatRgba8
: return PIPE_FORMAT_R8G8B8A8_UNORM
;
1277 case SpvImageFormatRgba8Snorm
: return PIPE_FORMAT_R8G8B8A8_SNORM
;
1278 case SpvImageFormatRg32f
: return PIPE_FORMAT_R32G32_FLOAT
;
1279 case SpvImageFormatRg16f
: return PIPE_FORMAT_R16G16_FLOAT
;
1280 case SpvImageFormatR11fG11fB10f
: return PIPE_FORMAT_R11G11B10_FLOAT
;
1281 case SpvImageFormatR16f
: return PIPE_FORMAT_R16_FLOAT
;
1282 case SpvImageFormatRgba16
: return PIPE_FORMAT_R16G16B16A16_UNORM
;
1283 case SpvImageFormatRgb10A2
: return PIPE_FORMAT_R10G10B10A2_UNORM
;
1284 case SpvImageFormatRg16
: return PIPE_FORMAT_R16G16_UNORM
;
1285 case SpvImageFormatRg8
: return PIPE_FORMAT_R8G8_UNORM
;
1286 case SpvImageFormatR16
: return PIPE_FORMAT_R16_UNORM
;
1287 case SpvImageFormatR8
: return PIPE_FORMAT_R8_UNORM
;
1288 case SpvImageFormatRgba16Snorm
: return PIPE_FORMAT_R16G16B16A16_SNORM
;
1289 case SpvImageFormatRg16Snorm
: return PIPE_FORMAT_R16G16_SNORM
;
1290 case SpvImageFormatRg8Snorm
: return PIPE_FORMAT_R8G8_SNORM
;
1291 case SpvImageFormatR16Snorm
: return PIPE_FORMAT_R16_SNORM
;
1292 case SpvImageFormatR8Snorm
: return PIPE_FORMAT_R8_SNORM
;
1293 case SpvImageFormatRgba32i
: return PIPE_FORMAT_R32G32B32A32_SINT
;
1294 case SpvImageFormatRgba16i
: return PIPE_FORMAT_R16G16B16A16_SINT
;
1295 case SpvImageFormatRgba8i
: return PIPE_FORMAT_R8G8B8A8_SINT
;
1296 case SpvImageFormatR32i
: return PIPE_FORMAT_R32_SINT
;
1297 case SpvImageFormatRg32i
: return PIPE_FORMAT_R32G32_SINT
;
1298 case SpvImageFormatRg16i
: return PIPE_FORMAT_R16G16_SINT
;
1299 case SpvImageFormatRg8i
: return PIPE_FORMAT_R8G8_SINT
;
1300 case SpvImageFormatR16i
: return PIPE_FORMAT_R16_SINT
;
1301 case SpvImageFormatR8i
: return PIPE_FORMAT_R8_SINT
;
1302 case SpvImageFormatRgba32ui
: return PIPE_FORMAT_R32G32B32A32_UINT
;
1303 case SpvImageFormatRgba16ui
: return PIPE_FORMAT_R16G16B16A16_UINT
;
1304 case SpvImageFormatRgba8ui
: return PIPE_FORMAT_R8G8B8A8_UINT
;
1305 case SpvImageFormatR32ui
: return PIPE_FORMAT_R32_UINT
;
1306 case SpvImageFormatRgb10a2ui
: return PIPE_FORMAT_R10G10B10A2_UINT
;
1307 case SpvImageFormatRg32ui
: return PIPE_FORMAT_R32G32_UINT
;
1308 case SpvImageFormatRg16ui
: return PIPE_FORMAT_R16G16_UINT
;
1309 case SpvImageFormatRg8ui
: return PIPE_FORMAT_R8G8_UINT
;
1310 case SpvImageFormatR16ui
: return PIPE_FORMAT_R16_UINT
;
1311 case SpvImageFormatR8ui
: return PIPE_FORMAT_R8_UINT
;
1313 vtn_fail("Invalid image format: %s (%u)",
1314 spirv_imageformat_to_string(format
), format
);
1319 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
1320 const uint32_t *w
, unsigned count
)
1322 struct vtn_value
*val
= NULL
;
1324 /* In order to properly handle forward declarations, we have to defer
1325 * allocation for pointer types.
1327 if (opcode
!= SpvOpTypePointer
&& opcode
!= SpvOpTypeForwardPointer
) {
1328 val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
1329 vtn_fail_if(val
->type
!= NULL
,
1330 "Only pointers can have forward declarations");
1331 val
->type
= rzalloc(b
, struct vtn_type
);
1332 val
->type
->id
= w
[1];
1337 val
->type
->base_type
= vtn_base_type_void
;
1338 val
->type
->type
= glsl_void_type();
1341 val
->type
->base_type
= vtn_base_type_scalar
;
1342 val
->type
->type
= glsl_bool_type();
1343 val
->type
->length
= 1;
1345 case SpvOpTypeInt
: {
1346 int bit_size
= w
[2];
1347 const bool signedness
= w
[3];
1348 val
->type
->base_type
= vtn_base_type_scalar
;
1351 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
1354 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
1357 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
1360 val
->type
->type
= (signedness
? glsl_int8_t_type() : glsl_uint8_t_type());
1363 vtn_fail("Invalid int bit size: %u", bit_size
);
1365 val
->type
->length
= 1;
1369 case SpvOpTypeFloat
: {
1370 int bit_size
= w
[2];
1371 val
->type
->base_type
= vtn_base_type_scalar
;
1374 val
->type
->type
= glsl_float16_t_type();
1377 val
->type
->type
= glsl_float_type();
1380 val
->type
->type
= glsl_double_type();
1383 vtn_fail("Invalid float bit size: %u", bit_size
);
1385 val
->type
->length
= 1;
1389 case SpvOpTypeVector
: {
1390 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1391 unsigned elems
= w
[3];
1393 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
1394 "Base type for OpTypeVector must be a scalar");
1395 vtn_fail_if((elems
< 2 || elems
> 4) && (elems
!= 8) && (elems
!= 16),
1396 "Invalid component count for OpTypeVector");
1398 val
->type
->base_type
= vtn_base_type_vector
;
1399 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
1400 val
->type
->length
= elems
;
1401 val
->type
->stride
= glsl_type_is_boolean(val
->type
->type
)
1402 ? 4 : glsl_get_bit_size(base
->type
) / 8;
1403 val
->type
->array_element
= base
;
1407 case SpvOpTypeMatrix
: {
1408 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1409 unsigned columns
= w
[3];
1411 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1412 "Base type for OpTypeMatrix must be a vector");
1413 vtn_fail_if(columns
< 2 || columns
> 4,
1414 "Invalid column count for OpTypeMatrix");
1416 val
->type
->base_type
= vtn_base_type_matrix
;
1417 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1418 glsl_get_vector_elements(base
->type
),
1420 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1421 "Unsupported base type for OpTypeMatrix");
1422 assert(!glsl_type_is_error(val
->type
->type
));
1423 val
->type
->length
= columns
;
1424 val
->type
->array_element
= base
;
1425 val
->type
->row_major
= false;
1426 val
->type
->stride
= 0;
1430 case SpvOpTypeRuntimeArray
:
1431 case SpvOpTypeArray
: {
1432 struct vtn_type
*array_element
= vtn_get_type(b
, w
[2]);
1434 if (opcode
== SpvOpTypeRuntimeArray
) {
1435 /* A length of 0 is used to denote unsized arrays */
1436 val
->type
->length
= 0;
1438 val
->type
->length
= vtn_constant_uint(b
, w
[3]);
1441 val
->type
->base_type
= vtn_base_type_array
;
1442 val
->type
->array_element
= array_element
;
1443 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1444 val
->type
->stride
= glsl_get_cl_size(array_element
->type
);
1446 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1447 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
,
1452 case SpvOpTypeStruct
: {
1453 unsigned num_fields
= count
- 2;
1454 val
->type
->base_type
= vtn_base_type_struct
;
1455 val
->type
->length
= num_fields
;
1456 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1457 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1458 val
->type
->packed
= false;
1460 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1461 for (unsigned i
= 0; i
< num_fields
; i
++) {
1462 val
->type
->members
[i
] = vtn_get_type(b
, w
[i
+ 2]);
1463 fields
[i
] = (struct glsl_struct_field
) {
1464 .type
= val
->type
->members
[i
]->type
,
1465 .name
= ralloc_asprintf(b
, "field%d", i
),
1471 vtn_foreach_decoration(b
, val
, struct_packed_decoration_cb
, NULL
);
1473 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1474 unsigned offset
= 0;
1475 for (unsigned i
= 0; i
< num_fields
; i
++) {
1476 if (!val
->type
->packed
)
1477 offset
= align(offset
, glsl_get_cl_alignment(fields
[i
].type
));
1478 fields
[i
].offset
= offset
;
1479 offset
+= glsl_get_cl_size(fields
[i
].type
);
1483 struct member_decoration_ctx ctx
= {
1484 .num_fields
= num_fields
,
1489 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1490 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1492 vtn_foreach_decoration(b
, val
, struct_block_decoration_cb
, NULL
);
1494 const char *name
= val
->name
;
1496 if (val
->type
->block
|| val
->type
->buffer_block
) {
1497 /* Packing will be ignored since types coming from SPIR-V are
1498 * explicitly laid out.
1500 val
->type
->type
= glsl_interface_type(fields
, num_fields
,
1501 /* packing */ 0, false,
1502 name
? name
: "block");
1504 val
->type
->type
= glsl_struct_type(fields
, num_fields
,
1505 name
? name
: "struct",
1511 case SpvOpTypeFunction
: {
1512 val
->type
->base_type
= vtn_base_type_function
;
1513 val
->type
->type
= NULL
;
1515 val
->type
->return_type
= vtn_get_type(b
, w
[2]);
1517 const unsigned num_params
= count
- 3;
1518 val
->type
->length
= num_params
;
1519 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1520 for (unsigned i
= 0; i
< count
- 3; i
++) {
1521 val
->type
->params
[i
] = vtn_get_type(b
, w
[i
+ 3]);
1526 case SpvOpTypePointer
:
1527 case SpvOpTypeForwardPointer
: {
1528 /* We can't blindly push the value because it might be a forward
1531 val
= vtn_untyped_value(b
, w
[1]);
1533 SpvStorageClass storage_class
= w
[2];
1535 if (val
->value_type
== vtn_value_type_invalid
) {
1536 val
->value_type
= vtn_value_type_type
;
1537 val
->type
= rzalloc(b
, struct vtn_type
);
1538 val
->type
->id
= w
[1];
1539 val
->type
->base_type
= vtn_base_type_pointer
;
1540 val
->type
->storage_class
= storage_class
;
1542 /* These can actually be stored to nir_variables and used as SSA
1543 * values so they need a real glsl_type.
1545 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1546 b
, storage_class
, NULL
, NULL
);
1547 val
->type
->type
= nir_address_format_to_glsl_type(
1548 vtn_mode_to_address_format(b
, mode
));
1550 vtn_fail_if(val
->type
->storage_class
!= storage_class
,
1551 "The storage classes of an OpTypePointer and any "
1552 "OpTypeForwardPointers that provide forward "
1553 "declarations of it must match.");
1556 if (opcode
== SpvOpTypePointer
) {
1557 vtn_fail_if(val
->type
->deref
!= NULL
,
1558 "While OpTypeForwardPointer can be used to provide a "
1559 "forward declaration of a pointer, OpTypePointer can "
1560 "only be used once for a given id.");
1562 val
->type
->deref
= vtn_get_type(b
, w
[3]);
1564 /* Only certain storage classes use ArrayStride. The others (in
1565 * particular Workgroup) are expected to be laid out by the driver.
1567 switch (storage_class
) {
1568 case SpvStorageClassUniform
:
1569 case SpvStorageClassPushConstant
:
1570 case SpvStorageClassStorageBuffer
:
1571 case SpvStorageClassPhysicalStorageBuffer
:
1572 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1575 /* Nothing to do. */
1579 if (b
->physical_ptrs
) {
1580 switch (storage_class
) {
1581 case SpvStorageClassFunction
:
1582 case SpvStorageClassWorkgroup
:
1583 case SpvStorageClassCrossWorkgroup
:
1584 case SpvStorageClassUniformConstant
:
1585 val
->type
->stride
= align(glsl_get_cl_size(val
->type
->deref
->type
),
1586 glsl_get_cl_alignment(val
->type
->deref
->type
));
1596 case SpvOpTypeImage
: {
1597 val
->type
->base_type
= vtn_base_type_image
;
1599 /* Images are represented in NIR as a scalar SSA value that is the
1600 * result of a deref instruction. An OpLoad on an OpTypeImage pointer
1601 * from UniformConstant memory just takes the NIR deref from the pointer
1602 * and turns it into an SSA value.
1604 val
->type
->type
= nir_address_format_to_glsl_type(
1605 vtn_mode_to_address_format(b
, vtn_variable_mode_function
));
1607 const struct vtn_type
*sampled_type
= vtn_get_type(b
, w
[2]);
1608 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1609 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_void
,
1610 "Sampled type of OpTypeImage must be void for kernels");
1612 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1613 glsl_get_bit_size(sampled_type
->type
) != 32,
1614 "Sampled type of OpTypeImage must be a 32-bit scalar");
1617 enum glsl_sampler_dim dim
;
1618 switch ((SpvDim
)w
[3]) {
1619 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1620 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1621 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1622 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1623 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1624 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1625 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1627 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1628 spirv_dim_to_string((SpvDim
)w
[3]), w
[3]);
1631 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1632 * The “Depth” operand of OpTypeImage is ignored.
1634 bool is_array
= w
[5];
1635 bool multisampled
= w
[6];
1636 unsigned sampled
= w
[7];
1637 SpvImageFormat format
= w
[8];
1640 val
->type
->access_qualifier
= w
[9];
1641 else if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1642 /* Per the CL C spec: If no qualifier is provided, read_only is assumed. */
1643 val
->type
->access_qualifier
= SpvAccessQualifierReadOnly
;
1645 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1648 if (dim
== GLSL_SAMPLER_DIM_2D
)
1649 dim
= GLSL_SAMPLER_DIM_MS
;
1650 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1651 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1653 vtn_fail("Unsupported multisampled image type");
1656 val
->type
->image_format
= translate_image_format(b
, format
);
1658 enum glsl_base_type sampled_base_type
=
1659 glsl_get_base_type(sampled_type
->type
);
1661 val
->type
->glsl_image
= glsl_sampler_type(dim
, false, is_array
,
1663 } else if (sampled
== 2) {
1664 val
->type
->glsl_image
= glsl_image_type(dim
, is_array
,
1666 } else if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1667 val
->type
->glsl_image
= glsl_image_type(dim
, is_array
,
1670 vtn_fail("We need to know if the image will be sampled");
1675 case SpvOpTypeSampledImage
: {
1676 val
->type
->base_type
= vtn_base_type_sampled_image
;
1677 val
->type
->image
= vtn_get_type(b
, w
[2]);
1679 /* Sampled images are represented NIR as a vec2 SSA value where each
1680 * component is the result of a deref instruction. The first component
1681 * is the image and the second is the sampler. An OpLoad on an
1682 * OpTypeSampledImage pointer from UniformConstant memory just takes
1683 * the NIR deref from the pointer and duplicates it to both vector
1686 nir_address_format addr_format
=
1687 vtn_mode_to_address_format(b
, vtn_variable_mode_function
);
1688 assert(nir_address_format_num_components(addr_format
) == 1);
1689 unsigned bit_size
= nir_address_format_bit_size(addr_format
);
1690 assert(bit_size
== 32 || bit_size
== 64);
1692 enum glsl_base_type base_type
=
1693 bit_size
== 32 ? GLSL_TYPE_UINT
: GLSL_TYPE_UINT64
;
1694 val
->type
->type
= glsl_vector_type(base_type
, 2);
1698 case SpvOpTypeSampler
:
1699 val
->type
->base_type
= vtn_base_type_sampler
;
1701 /* Samplers are represented in NIR as a scalar SSA value that is the
1702 * result of a deref instruction. An OpLoad on an OpTypeSampler pointer
1703 * from UniformConstant memory just takes the NIR deref from the pointer
1704 * and turns it into an SSA value.
1706 val
->type
->type
= nir_address_format_to_glsl_type(
1707 vtn_mode_to_address_format(b
, vtn_variable_mode_function
));
1710 case SpvOpTypeOpaque
:
1711 case SpvOpTypeEvent
:
1712 case SpvOpTypeDeviceEvent
:
1713 case SpvOpTypeReserveId
:
1714 case SpvOpTypeQueue
:
1717 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1720 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1722 if (val
->type
->base_type
== vtn_base_type_struct
&&
1723 (val
->type
->block
|| val
->type
->buffer_block
)) {
1724 for (unsigned i
= 0; i
< val
->type
->length
; i
++) {
1725 vtn_fail_if(vtn_type_contains_block(b
, val
->type
->members
[i
]),
1726 "Block and BufferBlock decorations cannot decorate a "
1727 "structure type that is nested at any level inside "
1728 "another structure type decorated with Block or "
1734 static nir_constant
*
1735 vtn_null_constant(struct vtn_builder
*b
, struct vtn_type
*type
)
1737 nir_constant
*c
= rzalloc(b
, nir_constant
);
1739 switch (type
->base_type
) {
1740 case vtn_base_type_scalar
:
1741 case vtn_base_type_vector
:
1742 /* Nothing to do here. It's already initialized to zero */
1745 case vtn_base_type_pointer
: {
1746 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1747 b
, type
->storage_class
, type
->deref
, NULL
);
1748 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
1750 const nir_const_value
*null_value
= nir_address_format_null_value(addr_format
);
1751 memcpy(c
->values
, null_value
,
1752 sizeof(nir_const_value
) * nir_address_format_num_components(addr_format
));
1756 case vtn_base_type_void
:
1757 case vtn_base_type_image
:
1758 case vtn_base_type_sampler
:
1759 case vtn_base_type_sampled_image
:
1760 case vtn_base_type_function
:
1761 /* For those we have to return something but it doesn't matter what. */
1764 case vtn_base_type_matrix
:
1765 case vtn_base_type_array
:
1766 vtn_assert(type
->length
> 0);
1767 c
->num_elements
= type
->length
;
1768 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1770 c
->elements
[0] = vtn_null_constant(b
, type
->array_element
);
1771 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1772 c
->elements
[i
] = c
->elements
[0];
1775 case vtn_base_type_struct
:
1776 c
->num_elements
= type
->length
;
1777 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1778 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
1779 c
->elements
[i
] = vtn_null_constant(b
, type
->members
[i
]);
1783 vtn_fail("Invalid type for null constant");
1790 spec_constant_decoration_cb(struct vtn_builder
*b
, UNUSED
struct vtn_value
*val
,
1791 ASSERTED
int member
,
1792 const struct vtn_decoration
*dec
, void *data
)
1794 vtn_assert(member
== -1);
1795 if (dec
->decoration
!= SpvDecorationSpecId
)
1798 nir_const_value
*value
= data
;
1799 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1800 if (b
->specializations
[i
].id
== dec
->operands
[0]) {
1801 *value
= b
->specializations
[i
].value
;
1808 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1809 struct vtn_value
*val
,
1810 ASSERTED
int member
,
1811 const struct vtn_decoration
*dec
,
1814 vtn_assert(member
== -1);
1815 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1816 dec
->operands
[0] != SpvBuiltInWorkgroupSize
)
1819 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1820 b
->workgroup_size_builtin
= val
;
1824 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1825 const uint32_t *w
, unsigned count
)
1827 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1828 val
->constant
= rzalloc(b
, nir_constant
);
1830 case SpvOpConstantTrue
:
1831 case SpvOpConstantFalse
:
1832 case SpvOpSpecConstantTrue
:
1833 case SpvOpSpecConstantFalse
: {
1834 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1835 "Result type of %s must be OpTypeBool",
1836 spirv_op_to_string(opcode
));
1838 bool bval
= (opcode
== SpvOpConstantTrue
||
1839 opcode
== SpvOpSpecConstantTrue
);
1841 nir_const_value u32val
= nir_const_value_for_uint(bval
, 32);
1843 if (opcode
== SpvOpSpecConstantTrue
||
1844 opcode
== SpvOpSpecConstantFalse
)
1845 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32val
);
1847 val
->constant
->values
[0].b
= u32val
.u32
!= 0;
1852 case SpvOpSpecConstant
: {
1853 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1854 "Result type of %s must be a scalar",
1855 spirv_op_to_string(opcode
));
1856 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1859 val
->constant
->values
[0].u64
= vtn_u64_literal(&w
[3]);
1862 val
->constant
->values
[0].u32
= w
[3];
1865 val
->constant
->values
[0].u16
= w
[3];
1868 val
->constant
->values
[0].u8
= w
[3];
1871 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size
);
1874 if (opcode
== SpvOpSpecConstant
)
1875 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
,
1876 &val
->constant
->values
[0]);
1880 case SpvOpSpecConstantComposite
:
1881 case SpvOpConstantComposite
: {
1882 unsigned elem_count
= count
- 3;
1883 vtn_fail_if(elem_count
!= val
->type
->length
,
1884 "%s has %u constituents, expected %u",
1885 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1887 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1888 for (unsigned i
= 0; i
< elem_count
; i
++) {
1889 struct vtn_value
*val
= vtn_untyped_value(b
, w
[i
+ 3]);
1891 if (val
->value_type
== vtn_value_type_constant
) {
1892 elems
[i
] = val
->constant
;
1894 vtn_fail_if(val
->value_type
!= vtn_value_type_undef
,
1895 "only constants or undefs allowed for "
1896 "SpvOpConstantComposite");
1897 /* to make it easier, just insert a NULL constant for now */
1898 elems
[i
] = vtn_null_constant(b
, val
->type
);
1902 switch (val
->type
->base_type
) {
1903 case vtn_base_type_vector
: {
1904 assert(glsl_type_is_vector(val
->type
->type
));
1905 for (unsigned i
= 0; i
< elem_count
; i
++)
1906 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1910 case vtn_base_type_matrix
:
1911 case vtn_base_type_struct
:
1912 case vtn_base_type_array
:
1913 ralloc_steal(val
->constant
, elems
);
1914 val
->constant
->num_elements
= elem_count
;
1915 val
->constant
->elements
= elems
;
1919 vtn_fail("Result type of %s must be a composite type",
1920 spirv_op_to_string(opcode
));
1925 case SpvOpSpecConstantOp
: {
1926 nir_const_value u32op
= nir_const_value_for_uint(w
[3], 32);
1927 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32op
);
1928 SpvOp opcode
= u32op
.u32
;
1930 case SpvOpVectorShuffle
: {
1931 struct vtn_value
*v0
= &b
->values
[w
[4]];
1932 struct vtn_value
*v1
= &b
->values
[w
[5]];
1934 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1935 v0
->value_type
== vtn_value_type_undef
);
1936 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1937 v1
->value_type
== vtn_value_type_undef
);
1939 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1940 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1942 vtn_assert(len0
+ len1
< 16);
1944 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1945 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1946 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1948 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1949 (void)bit_size0
; (void)bit_size1
;
1951 nir_const_value undef
= { .u64
= 0xdeadbeefdeadbeef };
1952 nir_const_value combined
[NIR_MAX_VEC_COMPONENTS
* 2];
1954 if (v0
->value_type
== vtn_value_type_constant
) {
1955 for (unsigned i
= 0; i
< len0
; i
++)
1956 combined
[i
] = v0
->constant
->values
[i
];
1958 if (v1
->value_type
== vtn_value_type_constant
) {
1959 for (unsigned i
= 0; i
< len1
; i
++)
1960 combined
[len0
+ i
] = v1
->constant
->values
[i
];
1963 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1964 uint32_t comp
= w
[i
+ 6];
1965 if (comp
== (uint32_t)-1) {
1966 /* If component is not used, set the value to a known constant
1967 * to detect if it is wrongly used.
1969 val
->constant
->values
[j
] = undef
;
1971 vtn_fail_if(comp
>= len0
+ len1
,
1972 "All Component literals must either be FFFFFFFF "
1973 "or in [0, N - 1] (inclusive).");
1974 val
->constant
->values
[j
] = combined
[comp
];
1980 case SpvOpCompositeExtract
:
1981 case SpvOpCompositeInsert
: {
1982 struct vtn_value
*comp
;
1983 unsigned deref_start
;
1984 struct nir_constant
**c
;
1985 if (opcode
== SpvOpCompositeExtract
) {
1986 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1988 c
= &comp
->constant
;
1990 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1992 val
->constant
= nir_constant_clone(comp
->constant
,
1998 const struct vtn_type
*type
= comp
->type
;
1999 for (unsigned i
= deref_start
; i
< count
; i
++) {
2000 vtn_fail_if(w
[i
] > type
->length
,
2001 "%uth index of %s is %u but the type has only "
2002 "%u elements", i
- deref_start
,
2003 spirv_op_to_string(opcode
), w
[i
], type
->length
);
2005 switch (type
->base_type
) {
2006 case vtn_base_type_vector
:
2008 type
= type
->array_element
;
2011 case vtn_base_type_matrix
:
2012 case vtn_base_type_array
:
2013 c
= &(*c
)->elements
[w
[i
]];
2014 type
= type
->array_element
;
2017 case vtn_base_type_struct
:
2018 c
= &(*c
)->elements
[w
[i
]];
2019 type
= type
->members
[w
[i
]];
2023 vtn_fail("%s must only index into composite types",
2024 spirv_op_to_string(opcode
));
2028 if (opcode
== SpvOpCompositeExtract
) {
2032 unsigned num_components
= type
->length
;
2033 for (unsigned i
= 0; i
< num_components
; i
++)
2034 val
->constant
->values
[i
] = (*c
)->values
[elem
+ i
];
2037 struct vtn_value
*insert
=
2038 vtn_value(b
, w
[4], vtn_value_type_constant
);
2039 vtn_assert(insert
->type
== type
);
2041 *c
= insert
->constant
;
2043 unsigned num_components
= type
->length
;
2044 for (unsigned i
= 0; i
< num_components
; i
++)
2045 (*c
)->values
[elem
+ i
] = insert
->constant
->values
[i
];
2053 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
2054 nir_alu_type src_alu_type
= dst_alu_type
;
2055 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
2058 vtn_assert(count
<= 7);
2064 /* We have a source in a conversion */
2066 nir_get_nir_type_for_glsl_type(vtn_get_value_type(b
, w
[4])->type
);
2067 /* We use the bitsize of the conversion source to evaluate the opcode later */
2068 bit_size
= glsl_get_bit_size(vtn_get_value_type(b
, w
[4])->type
);
2071 bit_size
= glsl_get_bit_size(val
->type
->type
);
2074 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
2075 nir_alu_type_get_type_size(src_alu_type
),
2076 nir_alu_type_get_type_size(dst_alu_type
));
2077 nir_const_value src
[3][NIR_MAX_VEC_COMPONENTS
];
2079 for (unsigned i
= 0; i
< count
- 4; i
++) {
2080 struct vtn_value
*src_val
=
2081 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
);
2083 /* If this is an unsized source, pull the bit size from the
2084 * source; otherwise, we'll use the bit size from the destination.
2086 if (!nir_alu_type_get_type_size(nir_op_infos
[op
].input_types
[i
]))
2087 bit_size
= glsl_get_bit_size(src_val
->type
->type
);
2089 unsigned src_comps
= nir_op_infos
[op
].input_sizes
[i
] ?
2090 nir_op_infos
[op
].input_sizes
[i
] :
2093 unsigned j
= swap
? 1 - i
: i
;
2094 for (unsigned c
= 0; c
< src_comps
; c
++)
2095 src
[j
][c
] = src_val
->constant
->values
[c
];
2098 /* fix up fixed size sources */
2105 for (unsigned i
= 0; i
< num_components
; ++i
) {
2107 case 64: src
[1][i
].u32
= src
[1][i
].u64
; break;
2108 case 16: src
[1][i
].u32
= src
[1][i
].u16
; break;
2109 case 8: src
[1][i
].u32
= src
[1][i
].u8
; break;
2118 nir_const_value
*srcs
[3] = {
2119 src
[0], src
[1], src
[2],
2121 nir_eval_const_opcode(op
, val
->constant
->values
,
2122 num_components
, bit_size
, srcs
,
2123 b
->shader
->info
.float_controls_execution_mode
);
2130 case SpvOpConstantNull
:
2131 val
->constant
= vtn_null_constant(b
, val
->type
);
2135 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2138 /* Now that we have the value, update the workgroup size if needed */
2139 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
2143 vtn_split_barrier_semantics(struct vtn_builder
*b
,
2144 SpvMemorySemanticsMask semantics
,
2145 SpvMemorySemanticsMask
*before
,
2146 SpvMemorySemanticsMask
*after
)
2148 /* For memory semantics embedded in operations, we split them into up to
2149 * two barriers, to be added before and after the operation. This is less
2150 * strict than if we propagated until the final backend stage, but still
2151 * result in correct execution.
2153 * A further improvement could be pipe this information (and use!) into the
2154 * next compiler layers, at the expense of making the handling of barriers
2158 *before
= SpvMemorySemanticsMaskNone
;
2159 *after
= SpvMemorySemanticsMaskNone
;
2161 SpvMemorySemanticsMask order_semantics
=
2162 semantics
& (SpvMemorySemanticsAcquireMask
|
2163 SpvMemorySemanticsReleaseMask
|
2164 SpvMemorySemanticsAcquireReleaseMask
|
2165 SpvMemorySemanticsSequentiallyConsistentMask
);
2167 if (util_bitcount(order_semantics
) > 1) {
2168 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2169 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2170 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2172 vtn_warn("Multiple memory ordering semantics specified, "
2173 "assuming AcquireRelease.");
2174 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2177 const SpvMemorySemanticsMask av_vis_semantics
=
2178 semantics
& (SpvMemorySemanticsMakeAvailableMask
|
2179 SpvMemorySemanticsMakeVisibleMask
);
2181 const SpvMemorySemanticsMask storage_semantics
=
2182 semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2183 SpvMemorySemanticsSubgroupMemoryMask
|
2184 SpvMemorySemanticsWorkgroupMemoryMask
|
2185 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2186 SpvMemorySemanticsAtomicCounterMemoryMask
|
2187 SpvMemorySemanticsImageMemoryMask
|
2188 SpvMemorySemanticsOutputMemoryMask
);
2190 const SpvMemorySemanticsMask other_semantics
=
2191 semantics
& ~(order_semantics
| av_vis_semantics
| storage_semantics
|
2192 SpvMemorySemanticsVolatileMask
);
2194 if (other_semantics
)
2195 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics
);
2197 /* SequentiallyConsistent is treated as AcquireRelease. */
2199 /* The RELEASE barrier happens BEFORE the operation, and it is usually
2200 * associated with a Store. All the write operations with a matching
2201 * semantics will not be reordered after the Store.
2203 if (order_semantics
& (SpvMemorySemanticsReleaseMask
|
2204 SpvMemorySemanticsAcquireReleaseMask
|
2205 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2206 *before
|= SpvMemorySemanticsReleaseMask
| storage_semantics
;
2209 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2210 * associated with a Load. All the operations with a matching semantics
2211 * will not be reordered before the Load.
2213 if (order_semantics
& (SpvMemorySemanticsAcquireMask
|
2214 SpvMemorySemanticsAcquireReleaseMask
|
2215 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2216 *after
|= SpvMemorySemanticsAcquireMask
| storage_semantics
;
2219 if (av_vis_semantics
& SpvMemorySemanticsMakeVisibleMask
)
2220 *before
|= SpvMemorySemanticsMakeVisibleMask
| storage_semantics
;
2222 if (av_vis_semantics
& SpvMemorySemanticsMakeAvailableMask
)
2223 *after
|= SpvMemorySemanticsMakeAvailableMask
| storage_semantics
;
2226 static nir_memory_semantics
2227 vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder
*b
,
2228 SpvMemorySemanticsMask semantics
)
2230 nir_memory_semantics nir_semantics
= 0;
2232 SpvMemorySemanticsMask order_semantics
=
2233 semantics
& (SpvMemorySemanticsAcquireMask
|
2234 SpvMemorySemanticsReleaseMask
|
2235 SpvMemorySemanticsAcquireReleaseMask
|
2236 SpvMemorySemanticsSequentiallyConsistentMask
);
2238 if (util_bitcount(order_semantics
) > 1) {
2239 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2240 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2241 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2243 vtn_warn("Multiple memory ordering semantics bits specified, "
2244 "assuming AcquireRelease.");
2245 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2248 switch (order_semantics
) {
2250 /* Not an ordering barrier. */
2253 case SpvMemorySemanticsAcquireMask
:
2254 nir_semantics
= NIR_MEMORY_ACQUIRE
;
2257 case SpvMemorySemanticsReleaseMask
:
2258 nir_semantics
= NIR_MEMORY_RELEASE
;
2261 case SpvMemorySemanticsSequentiallyConsistentMask
:
2262 /* Fall through. Treated as AcquireRelease in Vulkan. */
2263 case SpvMemorySemanticsAcquireReleaseMask
:
2264 nir_semantics
= NIR_MEMORY_ACQUIRE
| NIR_MEMORY_RELEASE
;
2268 unreachable("Invalid memory order semantics");
2271 if (semantics
& SpvMemorySemanticsMakeAvailableMask
) {
2272 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2273 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2274 "capability must be declared.");
2275 nir_semantics
|= NIR_MEMORY_MAKE_AVAILABLE
;
2278 if (semantics
& SpvMemorySemanticsMakeVisibleMask
) {
2279 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2280 "To use MakeVisible memory semantics the VulkanMemoryModel "
2281 "capability must be declared.");
2282 nir_semantics
|= NIR_MEMORY_MAKE_VISIBLE
;
2285 return nir_semantics
;
2288 static nir_variable_mode
2289 vtn_mem_sematics_to_nir_var_modes(struct vtn_builder
*b
,
2290 SpvMemorySemanticsMask semantics
)
2292 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2293 * and AtomicCounterMemory are ignored".
2295 semantics
&= ~(SpvMemorySemanticsSubgroupMemoryMask
|
2296 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2297 SpvMemorySemanticsAtomicCounterMemoryMask
);
2299 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2300 * for SpvMemorySemanticsImageMemoryMask.
2303 nir_variable_mode modes
= 0;
2304 if (semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2305 SpvMemorySemanticsImageMemoryMask
)) {
2306 modes
|= nir_var_uniform
|
2311 if (semantics
& SpvMemorySemanticsWorkgroupMemoryMask
)
2312 modes
|= nir_var_mem_shared
;
2313 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
2314 modes
|= nir_var_shader_out
;
2321 vtn_scope_to_nir_scope(struct vtn_builder
*b
, SpvScope scope
)
2323 nir_scope nir_scope
;
2325 case SpvScopeDevice
:
2326 vtn_fail_if(b
->options
->caps
.vk_memory_model
&&
2327 !b
->options
->caps
.vk_memory_model_device_scope
,
2328 "If the Vulkan memory model is declared and any instruction "
2329 "uses Device scope, the VulkanMemoryModelDeviceScope "
2330 "capability must be declared.");
2331 nir_scope
= NIR_SCOPE_DEVICE
;
2334 case SpvScopeQueueFamily
:
2335 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2336 "To use Queue Family scope, the VulkanMemoryModel capability "
2337 "must be declared.");
2338 nir_scope
= NIR_SCOPE_QUEUE_FAMILY
;
2341 case SpvScopeWorkgroup
:
2342 nir_scope
= NIR_SCOPE_WORKGROUP
;
2345 case SpvScopeSubgroup
:
2346 nir_scope
= NIR_SCOPE_SUBGROUP
;
2349 case SpvScopeInvocation
:
2350 nir_scope
= NIR_SCOPE_INVOCATION
;
2354 vtn_fail("Invalid memory scope");
2361 vtn_emit_scoped_control_barrier(struct vtn_builder
*b
, SpvScope exec_scope
,
2363 SpvMemorySemanticsMask semantics
)
2365 nir_memory_semantics nir_semantics
=
2366 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2367 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2368 nir_scope nir_exec_scope
= vtn_scope_to_nir_scope(b
, exec_scope
);
2370 /* Memory semantics is optional for OpControlBarrier. */
2371 nir_scope nir_mem_scope
;
2372 if (nir_semantics
== 0 || modes
== 0)
2373 nir_mem_scope
= NIR_SCOPE_NONE
;
2375 nir_mem_scope
= vtn_scope_to_nir_scope(b
, mem_scope
);
2377 nir_scoped_barrier(&b
->nb
, nir_exec_scope
, nir_mem_scope
, nir_semantics
, modes
);
2381 vtn_emit_scoped_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
2382 SpvMemorySemanticsMask semantics
)
2384 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2385 nir_memory_semantics nir_semantics
=
2386 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2388 /* No barrier to add. */
2389 if (nir_semantics
== 0 || modes
== 0)
2392 nir_scope nir_mem_scope
= vtn_scope_to_nir_scope(b
, scope
);
2393 nir_scoped_barrier(&b
->nb
, NIR_SCOPE_NONE
, nir_mem_scope
, nir_semantics
, modes
);
2396 struct vtn_ssa_value
*
2397 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
2399 /* Always use bare types for SSA values for a couple of reasons:
2401 * 1. Code which emits deref chains should never listen to the explicit
2402 * layout information on the SSA value if any exists. If we've
2403 * accidentally been relying on this, we want to find those bugs.
2405 * 2. We want to be able to quickly check that an SSA value being assigned
2406 * to a SPIR-V value has the right type. Using bare types everywhere
2407 * ensures that we can pointer-compare.
2409 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
2410 val
->type
= glsl_get_bare_type(type
);
2413 if (!glsl_type_is_vector_or_scalar(type
)) {
2414 unsigned elems
= glsl_get_length(val
->type
);
2415 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2416 if (glsl_type_is_array_or_matrix(type
)) {
2417 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
2418 for (unsigned i
= 0; i
< elems
; i
++)
2419 val
->elems
[i
] = vtn_create_ssa_value(b
, elem_type
);
2421 vtn_assert(glsl_type_is_struct_or_ifc(type
));
2422 for (unsigned i
= 0; i
< elems
; i
++) {
2423 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
2424 val
->elems
[i
] = vtn_create_ssa_value(b
, elem_type
);
2433 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
2436 src
.src
= nir_src_for_ssa(vtn_get_nir_ssa(b
, index
));
2437 src
.src_type
= type
;
2442 image_operand_arg(struct vtn_builder
*b
, const uint32_t *w
, uint32_t count
,
2443 uint32_t mask_idx
, SpvImageOperandsMask op
)
2445 static const SpvImageOperandsMask ops_with_arg
=
2446 SpvImageOperandsBiasMask
|
2447 SpvImageOperandsLodMask
|
2448 SpvImageOperandsGradMask
|
2449 SpvImageOperandsConstOffsetMask
|
2450 SpvImageOperandsOffsetMask
|
2451 SpvImageOperandsConstOffsetsMask
|
2452 SpvImageOperandsSampleMask
|
2453 SpvImageOperandsMinLodMask
|
2454 SpvImageOperandsMakeTexelAvailableMask
|
2455 SpvImageOperandsMakeTexelVisibleMask
;
2457 assert(util_bitcount(op
) == 1);
2458 assert(w
[mask_idx
] & op
);
2459 assert(op
& ops_with_arg
);
2461 uint32_t idx
= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_arg
) + 1;
2463 /* Adjust indices for operands with two arguments. */
2464 static const SpvImageOperandsMask ops_with_two_args
=
2465 SpvImageOperandsGradMask
;
2466 idx
+= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_two_args
);
2470 vtn_fail_if(idx
+ (op
& ops_with_two_args
? 1 : 0) >= count
,
2471 "Image op claims to have %s but does not enough "
2472 "following operands", spirv_imageoperands_to_string(op
));
2478 non_uniform_decoration_cb(struct vtn_builder
*b
,
2479 struct vtn_value
*val
, int member
,
2480 const struct vtn_decoration
*dec
, void *void_ctx
)
2482 enum gl_access_qualifier
*access
= void_ctx
;
2483 switch (dec
->decoration
) {
2484 case SpvDecorationNonUniformEXT
:
2485 *access
|= ACCESS_NON_UNIFORM
;
2494 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
2495 const uint32_t *w
, unsigned count
)
2497 struct vtn_type
*ret_type
= vtn_get_type(b
, w
[1]);
2499 if (opcode
== SpvOpSampledImage
) {
2500 struct vtn_sampled_image si
= {
2501 .image
= vtn_get_image(b
, w
[3]),
2502 .sampler
= vtn_get_sampler(b
, w
[4]),
2505 enum gl_access_qualifier access
= 0;
2506 vtn_foreach_decoration(b
, vtn_untyped_value(b
, w
[3]),
2507 non_uniform_decoration_cb
, &access
);
2508 vtn_foreach_decoration(b
, vtn_untyped_value(b
, w
[4]),
2509 non_uniform_decoration_cb
, &access
);
2511 vtn_push_sampled_image(b
, w
[2], si
, access
& ACCESS_NON_UNIFORM
);
2513 } else if (opcode
== SpvOpImage
) {
2514 struct vtn_sampled_image si
= vtn_get_sampled_image(b
, w
[3]);
2516 enum gl_access_qualifier access
= 0;
2517 vtn_foreach_decoration(b
, vtn_untyped_value(b
, w
[3]),
2518 non_uniform_decoration_cb
, &access
);
2520 vtn_push_image(b
, w
[2], si
.image
, access
& ACCESS_NON_UNIFORM
);
2524 nir_deref_instr
*image
= NULL
, *sampler
= NULL
;
2525 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
2526 if (sampled_val
->type
->base_type
== vtn_base_type_sampled_image
) {
2527 struct vtn_sampled_image si
= vtn_get_sampled_image(b
, w
[3]);
2529 sampler
= si
.sampler
;
2531 image
= vtn_get_image(b
, w
[3]);
2534 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image
->type
);
2535 const bool is_array
= glsl_sampler_type_is_array(image
->type
);
2536 nir_alu_type dest_type
= nir_type_invalid
;
2538 /* Figure out the base texture operation */
2541 case SpvOpImageSampleImplicitLod
:
2542 case SpvOpImageSampleDrefImplicitLod
:
2543 case SpvOpImageSampleProjImplicitLod
:
2544 case SpvOpImageSampleProjDrefImplicitLod
:
2545 texop
= nir_texop_tex
;
2548 case SpvOpImageSampleExplicitLod
:
2549 case SpvOpImageSampleDrefExplicitLod
:
2550 case SpvOpImageSampleProjExplicitLod
:
2551 case SpvOpImageSampleProjDrefExplicitLod
:
2552 texop
= nir_texop_txl
;
2555 case SpvOpImageFetch
:
2556 if (sampler_dim
== GLSL_SAMPLER_DIM_MS
) {
2557 texop
= nir_texop_txf_ms
;
2559 texop
= nir_texop_txf
;
2563 case SpvOpImageGather
:
2564 case SpvOpImageDrefGather
:
2565 texop
= nir_texop_tg4
;
2568 case SpvOpImageQuerySizeLod
:
2569 case SpvOpImageQuerySize
:
2570 texop
= nir_texop_txs
;
2571 dest_type
= nir_type_int
;
2574 case SpvOpImageQueryLod
:
2575 texop
= nir_texop_lod
;
2576 dest_type
= nir_type_float
;
2579 case SpvOpImageQueryLevels
:
2580 texop
= nir_texop_query_levels
;
2581 dest_type
= nir_type_int
;
2584 case SpvOpImageQuerySamples
:
2585 texop
= nir_texop_texture_samples
;
2586 dest_type
= nir_type_int
;
2589 case SpvOpFragmentFetchAMD
:
2590 texop
= nir_texop_fragment_fetch
;
2593 case SpvOpFragmentMaskFetchAMD
:
2594 texop
= nir_texop_fragment_mask_fetch
;
2595 dest_type
= nir_type_uint
;
2599 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2602 nir_tex_src srcs
[10]; /* 10 should be enough */
2603 nir_tex_src
*p
= srcs
;
2605 p
->src
= nir_src_for_ssa(&image
->dest
.ssa
);
2606 p
->src_type
= nir_tex_src_texture_deref
;
2616 vtn_fail_if(sampler
== NULL
,
2617 "%s requires an image of type OpTypeSampledImage",
2618 spirv_op_to_string(opcode
));
2619 p
->src
= nir_src_for_ssa(&sampler
->dest
.ssa
);
2620 p
->src_type
= nir_tex_src_sampler_deref
;
2624 case nir_texop_txf_ms
:
2626 case nir_texop_query_levels
:
2627 case nir_texop_texture_samples
:
2628 case nir_texop_samples_identical
:
2629 case nir_texop_fragment_fetch
:
2630 case nir_texop_fragment_mask_fetch
:
2633 case nir_texop_txf_ms_fb
:
2634 vtn_fail("unexpected nir_texop_txf_ms_fb");
2636 case nir_texop_txf_ms_mcs
:
2637 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2638 case nir_texop_tex_prefetch
:
2639 vtn_fail("unexpected nir_texop_tex_prefetch");
2644 struct nir_ssa_def
*coord
;
2645 unsigned coord_components
;
2647 case SpvOpImageSampleImplicitLod
:
2648 case SpvOpImageSampleExplicitLod
:
2649 case SpvOpImageSampleDrefImplicitLod
:
2650 case SpvOpImageSampleDrefExplicitLod
:
2651 case SpvOpImageSampleProjImplicitLod
:
2652 case SpvOpImageSampleProjExplicitLod
:
2653 case SpvOpImageSampleProjDrefImplicitLod
:
2654 case SpvOpImageSampleProjDrefExplicitLod
:
2655 case SpvOpImageFetch
:
2656 case SpvOpImageGather
:
2657 case SpvOpImageDrefGather
:
2658 case SpvOpImageQueryLod
:
2659 case SpvOpFragmentFetchAMD
:
2660 case SpvOpFragmentMaskFetchAMD
: {
2661 /* All these types have the coordinate as their first real argument */
2662 coord_components
= glsl_get_sampler_dim_coordinate_components(sampler_dim
);
2664 if (is_array
&& texop
!= nir_texop_lod
)
2667 struct vtn_ssa_value
*coord_val
= vtn_ssa_value(b
, w
[idx
++]);
2668 coord
= coord_val
->def
;
2669 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
2670 (1 << coord_components
) - 1));
2672 /* OpenCL allows integer sampling coordinates */
2673 if (glsl_type_is_integer(coord_val
->type
) &&
2674 opcode
== SpvOpImageSampleExplicitLod
) {
2675 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
2676 "Unless the Kernel capability is being used, the coordinate parameter "
2677 "OpImageSampleExplicitLod must be floating point.");
2679 p
->src
= nir_src_for_ssa(nir_i2f32(&b
->nb
, p
->src
.ssa
));
2682 p
->src_type
= nir_tex_src_coord
;
2689 coord_components
= 0;
2694 case SpvOpImageSampleProjImplicitLod
:
2695 case SpvOpImageSampleProjExplicitLod
:
2696 case SpvOpImageSampleProjDrefImplicitLod
:
2697 case SpvOpImageSampleProjDrefExplicitLod
:
2698 /* These have the projector as the last coordinate component */
2699 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
2700 p
->src_type
= nir_tex_src_projector
;
2708 bool is_shadow
= false;
2709 unsigned gather_component
= 0;
2711 case SpvOpImageSampleDrefImplicitLod
:
2712 case SpvOpImageSampleDrefExplicitLod
:
2713 case SpvOpImageSampleProjDrefImplicitLod
:
2714 case SpvOpImageSampleProjDrefExplicitLod
:
2715 case SpvOpImageDrefGather
:
2716 /* These all have an explicit depth value as their next source */
2718 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
2721 case SpvOpImageGather
:
2722 /* This has a component as its next source */
2723 gather_component
= vtn_constant_uint(b
, w
[idx
++]);
2730 /* For OpImageQuerySizeLod, we always have an LOD */
2731 if (opcode
== SpvOpImageQuerySizeLod
)
2732 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2734 /* For OpFragmentFetchAMD, we always have a multisample index */
2735 if (opcode
== SpvOpFragmentFetchAMD
)
2736 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2738 /* Now we need to handle some number of optional arguments */
2739 struct vtn_value
*gather_offsets
= NULL
;
2741 uint32_t operands
= w
[idx
];
2743 if (operands
& SpvImageOperandsBiasMask
) {
2744 vtn_assert(texop
== nir_texop_tex
||
2745 texop
== nir_texop_tg4
);
2746 if (texop
== nir_texop_tex
)
2747 texop
= nir_texop_txb
;
2748 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2749 SpvImageOperandsBiasMask
);
2750 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_bias
);
2753 if (operands
& SpvImageOperandsLodMask
) {
2754 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
2755 texop
== nir_texop_txs
|| texop
== nir_texop_tg4
);
2756 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2757 SpvImageOperandsLodMask
);
2758 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_lod
);
2761 if (operands
& SpvImageOperandsGradMask
) {
2762 vtn_assert(texop
== nir_texop_txl
);
2763 texop
= nir_texop_txd
;
2764 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2765 SpvImageOperandsGradMask
);
2766 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ddx
);
2767 (*p
++) = vtn_tex_src(b
, w
[arg
+ 1], nir_tex_src_ddy
);
2770 vtn_fail_if(util_bitcount(operands
& (SpvImageOperandsConstOffsetsMask
|
2771 SpvImageOperandsOffsetMask
|
2772 SpvImageOperandsConstOffsetMask
)) > 1,
2773 "At most one of the ConstOffset, Offset, and ConstOffsets "
2774 "image operands can be used on a given instruction.");
2776 if (operands
& SpvImageOperandsOffsetMask
) {
2777 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2778 SpvImageOperandsOffsetMask
);
2779 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2782 if (operands
& SpvImageOperandsConstOffsetMask
) {
2783 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2784 SpvImageOperandsConstOffsetMask
);
2785 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2788 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2789 vtn_assert(texop
== nir_texop_tg4
);
2790 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2791 SpvImageOperandsConstOffsetsMask
);
2792 gather_offsets
= vtn_value(b
, w
[arg
], vtn_value_type_constant
);
2795 if (operands
& SpvImageOperandsSampleMask
) {
2796 vtn_assert(texop
== nir_texop_txf_ms
);
2797 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2798 SpvImageOperandsSampleMask
);
2799 texop
= nir_texop_txf_ms
;
2800 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ms_index
);
2803 if (operands
& SpvImageOperandsMinLodMask
) {
2804 vtn_assert(texop
== nir_texop_tex
||
2805 texop
== nir_texop_txb
||
2806 texop
== nir_texop_txd
);
2807 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2808 SpvImageOperandsMinLodMask
);
2809 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_min_lod
);
2813 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2816 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2818 instr
->coord_components
= coord_components
;
2819 instr
->sampler_dim
= sampler_dim
;
2820 instr
->is_array
= is_array
;
2821 instr
->is_shadow
= is_shadow
;
2822 instr
->is_new_style_shadow
=
2823 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2824 instr
->component
= gather_component
;
2826 /* The Vulkan spec says:
2828 * "If an instruction loads from or stores to a resource (including
2829 * atomics and image instructions) and the resource descriptor being
2830 * accessed is not dynamically uniform, then the operand corresponding
2831 * to that resource (e.g. the pointer or sampled image operand) must be
2832 * decorated with NonUniform."
2834 * It's very careful to specify that the exact operand must be decorated
2835 * NonUniform. The SPIR-V parser is not expected to chase through long
2836 * chains to find the NonUniform decoration. It's either right there or we
2837 * can assume it doesn't exist.
2839 enum gl_access_qualifier access
= 0;
2840 vtn_foreach_decoration(b
, sampled_val
, non_uniform_decoration_cb
, &access
);
2842 if (sampled_val
->propagated_non_uniform
)
2843 access
|= ACCESS_NON_UNIFORM
;
2845 if (image
&& (access
& ACCESS_NON_UNIFORM
))
2846 instr
->texture_non_uniform
= true;
2848 if (sampler
&& (access
& ACCESS_NON_UNIFORM
))
2849 instr
->sampler_non_uniform
= true;
2851 /* for non-query ops, get dest_type from SPIR-V return type */
2852 if (dest_type
== nir_type_invalid
) {
2853 /* the return type should match the image type, unless the image type is
2854 * VOID (CL image), in which case the return type dictates the sampler
2856 enum glsl_base_type sampler_base
=
2857 glsl_get_sampler_result_type(image
->type
);
2858 enum glsl_base_type ret_base
= glsl_get_base_type(ret_type
->type
);
2859 vtn_fail_if(sampler_base
!= ret_base
&& sampler_base
!= GLSL_TYPE_VOID
,
2860 "SPIR-V return type mismatches image type. This is only valid "
2861 "for untyped images (OpenCL).");
2863 case GLSL_TYPE_FLOAT
: dest_type
= nir_type_float
; break;
2864 case GLSL_TYPE_INT
: dest_type
= nir_type_int
; break;
2865 case GLSL_TYPE_UINT
: dest_type
= nir_type_uint
; break;
2866 case GLSL_TYPE_BOOL
: dest_type
= nir_type_bool
; break;
2868 vtn_fail("Invalid base type for sampler result");
2872 instr
->dest_type
= dest_type
;
2874 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2875 nir_tex_instr_dest_size(instr
), 32, NULL
);
2877 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2878 nir_tex_instr_dest_size(instr
));
2880 if (gather_offsets
) {
2881 vtn_fail_if(gather_offsets
->type
->base_type
!= vtn_base_type_array
||
2882 gather_offsets
->type
->length
!= 4,
2883 "ConstOffsets must be an array of size four of vectors "
2884 "of two integer components");
2886 struct vtn_type
*vec_type
= gather_offsets
->type
->array_element
;
2887 vtn_fail_if(vec_type
->base_type
!= vtn_base_type_vector
||
2888 vec_type
->length
!= 2 ||
2889 !glsl_type_is_integer(vec_type
->type
),
2890 "ConstOffsets must be an array of size four of vectors "
2891 "of two integer components");
2893 unsigned bit_size
= glsl_get_bit_size(vec_type
->type
);
2894 for (uint32_t i
= 0; i
< 4; i
++) {
2895 const nir_const_value
*cvec
=
2896 gather_offsets
->constant
->elements
[i
]->values
;
2897 for (uint32_t j
= 0; j
< 2; j
++) {
2899 case 8: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i8
; break;
2900 case 16: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i16
; break;
2901 case 32: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i32
; break;
2902 case 64: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i64
; break;
2904 vtn_fail("Unsupported bit size: %u", bit_size
);
2910 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2912 vtn_push_nir_ssa(b
, w
[2], &instr
->dest
.ssa
);
2916 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2917 const uint32_t *w
, nir_src
*src
)
2919 const struct glsl_type
*type
= vtn_get_type(b
, w
[1])->type
;
2920 unsigned bit_size
= glsl_get_bit_size(type
);
2923 case SpvOpAtomicIIncrement
:
2924 src
[0] = nir_src_for_ssa(nir_imm_intN_t(&b
->nb
, 1, bit_size
));
2927 case SpvOpAtomicIDecrement
:
2928 src
[0] = nir_src_for_ssa(nir_imm_intN_t(&b
->nb
, -1, bit_size
));
2931 case SpvOpAtomicISub
:
2933 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_get_nir_ssa(b
, w
[6])));
2936 case SpvOpAtomicCompareExchange
:
2937 case SpvOpAtomicCompareExchangeWeak
:
2938 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[8]));
2939 src
[1] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[7]));
2942 case SpvOpAtomicExchange
:
2943 case SpvOpAtomicIAdd
:
2944 case SpvOpAtomicSMin
:
2945 case SpvOpAtomicUMin
:
2946 case SpvOpAtomicSMax
:
2947 case SpvOpAtomicUMax
:
2948 case SpvOpAtomicAnd
:
2950 case SpvOpAtomicXor
:
2951 case SpvOpAtomicFAddEXT
:
2952 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[6]));
2956 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
2960 static nir_ssa_def
*
2961 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2963 nir_ssa_def
*coord
= vtn_get_nir_ssa(b
, value
);
2965 /* The image_load_store intrinsics assume a 4-dim coordinate */
2966 unsigned swizzle
[4];
2967 for (unsigned i
= 0; i
< 4; i
++)
2968 swizzle
[i
] = MIN2(i
, coord
->num_components
- 1);
2970 return nir_swizzle(&b
->nb
, coord
, swizzle
, 4);
2973 static nir_ssa_def
*
2974 expand_to_vec4(nir_builder
*b
, nir_ssa_def
*value
)
2976 if (value
->num_components
== 4)
2980 for (unsigned i
= 0; i
< 4; i
++)
2981 swiz
[i
] = i
< value
->num_components
? i
: 0;
2982 return nir_swizzle(b
, value
, swiz
, 4);
2986 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2987 const uint32_t *w
, unsigned count
)
2989 /* Just get this one out of the way */
2990 if (opcode
== SpvOpImageTexelPointer
) {
2991 struct vtn_value
*val
=
2992 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2993 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2995 val
->image
->image
= vtn_nir_deref(b
, w
[3]);
2996 val
->image
->coord
= get_image_coord(b
, w
[4]);
2997 val
->image
->sample
= vtn_get_nir_ssa(b
, w
[5]);
2998 val
->image
->lod
= nir_imm_int(&b
->nb
, 0);
3002 struct vtn_image_pointer image
;
3003 SpvScope scope
= SpvScopeInvocation
;
3004 SpvMemorySemanticsMask semantics
= 0;
3006 enum gl_access_qualifier access
= 0;
3008 struct vtn_value
*res_val
;
3010 case SpvOpAtomicExchange
:
3011 case SpvOpAtomicCompareExchange
:
3012 case SpvOpAtomicCompareExchangeWeak
:
3013 case SpvOpAtomicIIncrement
:
3014 case SpvOpAtomicIDecrement
:
3015 case SpvOpAtomicIAdd
:
3016 case SpvOpAtomicISub
:
3017 case SpvOpAtomicLoad
:
3018 case SpvOpAtomicSMin
:
3019 case SpvOpAtomicUMin
:
3020 case SpvOpAtomicSMax
:
3021 case SpvOpAtomicUMax
:
3022 case SpvOpAtomicAnd
:
3024 case SpvOpAtomicXor
:
3025 case SpvOpAtomicFAddEXT
:
3026 res_val
= vtn_value(b
, w
[3], vtn_value_type_image_pointer
);
3027 image
= *res_val
->image
;
3028 scope
= vtn_constant_uint(b
, w
[4]);
3029 semantics
= vtn_constant_uint(b
, w
[5]);
3030 access
|= ACCESS_COHERENT
;
3033 case SpvOpAtomicStore
:
3034 res_val
= vtn_value(b
, w
[1], vtn_value_type_image_pointer
);
3035 image
= *res_val
->image
;
3036 scope
= vtn_constant_uint(b
, w
[2]);
3037 semantics
= vtn_constant_uint(b
, w
[3]);
3038 access
|= ACCESS_COHERENT
;
3041 case SpvOpImageQuerySizeLod
:
3042 res_val
= vtn_untyped_value(b
, w
[3]);
3043 image
.image
= vtn_get_image(b
, w
[3]);
3045 image
.sample
= NULL
;
3046 image
.lod
= vtn_ssa_value(b
, w
[4])->def
;
3049 case SpvOpImageQuerySize
:
3050 res_val
= vtn_untyped_value(b
, w
[3]);
3051 image
.image
= vtn_get_image(b
, w
[3]);
3053 image
.sample
= NULL
;
3057 case SpvOpImageQueryFormat
:
3058 case SpvOpImageQueryOrder
:
3059 res_val
= vtn_untyped_value(b
, w
[3]);
3060 image
.image
= vtn_get_image(b
, w
[3]);
3062 image
.sample
= NULL
;
3066 case SpvOpImageRead
: {
3067 res_val
= vtn_untyped_value(b
, w
[3]);
3068 image
.image
= vtn_get_image(b
, w
[3]);
3069 image
.coord
= get_image_coord(b
, w
[4]);
3071 const SpvImageOperandsMask operands
=
3072 count
> 5 ? w
[5] : SpvImageOperandsMaskNone
;
3074 if (operands
& SpvImageOperandsSampleMask
) {
3075 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
3076 SpvImageOperandsSampleMask
);
3077 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
3079 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
3082 if (operands
& SpvImageOperandsMakeTexelVisibleMask
) {
3083 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
3084 "MakeTexelVisible requires NonPrivateTexel to also be set.");
3085 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
3086 SpvImageOperandsMakeTexelVisibleMask
);
3087 semantics
= SpvMemorySemanticsMakeVisibleMask
;
3088 scope
= vtn_constant_uint(b
, w
[arg
]);
3091 if (operands
& SpvImageOperandsLodMask
) {
3092 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
3093 SpvImageOperandsLodMask
);
3094 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
3096 image
.lod
= nir_imm_int(&b
->nb
, 0);
3099 if (operands
& SpvImageOperandsVolatileTexelMask
)
3100 access
|= ACCESS_VOLATILE
;
3105 case SpvOpImageWrite
: {
3106 res_val
= vtn_untyped_value(b
, w
[1]);
3107 image
.image
= vtn_get_image(b
, w
[1]);
3108 image
.coord
= get_image_coord(b
, w
[2]);
3112 const SpvImageOperandsMask operands
=
3113 count
> 4 ? w
[4] : SpvImageOperandsMaskNone
;
3115 if (operands
& SpvImageOperandsSampleMask
) {
3116 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
3117 SpvImageOperandsSampleMask
);
3118 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
3120 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
3123 if (operands
& SpvImageOperandsMakeTexelAvailableMask
) {
3124 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
3125 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
3126 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
3127 SpvImageOperandsMakeTexelAvailableMask
);
3128 semantics
= SpvMemorySemanticsMakeAvailableMask
;
3129 scope
= vtn_constant_uint(b
, w
[arg
]);
3132 if (operands
& SpvImageOperandsLodMask
) {
3133 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
3134 SpvImageOperandsLodMask
);
3135 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
3137 image
.lod
= nir_imm_int(&b
->nb
, 0);
3140 if (operands
& SpvImageOperandsVolatileTexelMask
)
3141 access
|= ACCESS_VOLATILE
;
3147 vtn_fail_with_opcode("Invalid image opcode", opcode
);
3150 if (semantics
& SpvMemorySemanticsVolatileMask
)
3151 access
|= ACCESS_VOLATILE
;
3153 nir_intrinsic_op op
;
3155 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
3156 OP(ImageQuerySize
, size
)
3157 OP(ImageQuerySizeLod
, size
)
3159 OP(ImageWrite
, store
)
3160 OP(AtomicLoad
, load
)
3161 OP(AtomicStore
, store
)
3162 OP(AtomicExchange
, atomic_exchange
)
3163 OP(AtomicCompareExchange
, atomic_comp_swap
)
3164 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3165 OP(AtomicIIncrement
, atomic_add
)
3166 OP(AtomicIDecrement
, atomic_add
)
3167 OP(AtomicIAdd
, atomic_add
)
3168 OP(AtomicISub
, atomic_add
)
3169 OP(AtomicSMin
, atomic_imin
)
3170 OP(AtomicUMin
, atomic_umin
)
3171 OP(AtomicSMax
, atomic_imax
)
3172 OP(AtomicUMax
, atomic_umax
)
3173 OP(AtomicAnd
, atomic_and
)
3174 OP(AtomicOr
, atomic_or
)
3175 OP(AtomicXor
, atomic_xor
)
3176 OP(AtomicFAddEXT
, atomic_fadd
)
3177 OP(ImageQueryFormat
, format
)
3178 OP(ImageQueryOrder
, order
)
3181 vtn_fail_with_opcode("Invalid image opcode", opcode
);
3184 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3186 intrin
->src
[0] = nir_src_for_ssa(&image
.image
->dest
.ssa
);
3189 case SpvOpImageQuerySize
:
3190 case SpvOpImageQuerySizeLod
:
3191 case SpvOpImageQueryFormat
:
3192 case SpvOpImageQueryOrder
:
3195 /* The image coordinate is always 4 components but we may not have that
3196 * many. Swizzle to compensate.
3198 intrin
->src
[1] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, image
.coord
));
3199 intrin
->src
[2] = nir_src_for_ssa(image
.sample
);
3203 /* The Vulkan spec says:
3205 * "If an instruction loads from or stores to a resource (including
3206 * atomics and image instructions) and the resource descriptor being
3207 * accessed is not dynamically uniform, then the operand corresponding
3208 * to that resource (e.g. the pointer or sampled image operand) must be
3209 * decorated with NonUniform."
3211 * It's very careful to specify that the exact operand must be decorated
3212 * NonUniform. The SPIR-V parser is not expected to chase through long
3213 * chains to find the NonUniform decoration. It's either right there or we
3214 * can assume it doesn't exist.
3216 vtn_foreach_decoration(b
, res_val
, non_uniform_decoration_cb
, &access
);
3217 nir_intrinsic_set_access(intrin
, access
);
3220 case SpvOpImageQueryFormat
:
3221 case SpvOpImageQueryOrder
:
3222 /* No additional sources */
3224 case SpvOpImageQuerySize
:
3225 intrin
->src
[1] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 0));
3227 case SpvOpImageQuerySizeLod
:
3228 intrin
->src
[1] = nir_src_for_ssa(image
.lod
);
3230 case SpvOpAtomicLoad
:
3231 case SpvOpImageRead
:
3232 /* Only OpImageRead can support a lod parameter if
3233 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
3234 * intrinsics definition for atomics requires us to set it for
3237 intrin
->src
[3] = nir_src_for_ssa(image
.lod
);
3239 case SpvOpAtomicStore
:
3240 case SpvOpImageWrite
: {
3241 const uint32_t value_id
= opcode
== SpvOpAtomicStore
? w
[4] : w
[3];
3242 struct vtn_ssa_value
*value
= vtn_ssa_value(b
, value_id
);
3243 /* nir_intrinsic_image_deref_store always takes a vec4 value */
3244 assert(op
== nir_intrinsic_image_deref_store
);
3245 intrin
->num_components
= 4;
3246 intrin
->src
[3] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, value
->def
));
3247 /* Only OpImageWrite can support a lod parameter if
3248 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
3249 * intrinsics definition for atomics requires us to set it for
3252 intrin
->src
[4] = nir_src_for_ssa(image
.lod
);
3254 if (opcode
== SpvOpImageWrite
)
3255 nir_intrinsic_set_type(intrin
, nir_get_nir_type_for_glsl_type(value
->type
));
3259 case SpvOpAtomicCompareExchange
:
3260 case SpvOpAtomicCompareExchangeWeak
:
3261 case SpvOpAtomicIIncrement
:
3262 case SpvOpAtomicIDecrement
:
3263 case SpvOpAtomicExchange
:
3264 case SpvOpAtomicIAdd
:
3265 case SpvOpAtomicISub
:
3266 case SpvOpAtomicSMin
:
3267 case SpvOpAtomicUMin
:
3268 case SpvOpAtomicSMax
:
3269 case SpvOpAtomicUMax
:
3270 case SpvOpAtomicAnd
:
3272 case SpvOpAtomicXor
:
3273 case SpvOpAtomicFAddEXT
:
3274 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[3]);
3278 vtn_fail_with_opcode("Invalid image opcode", opcode
);
3281 /* Image operations implicitly have the Image storage memory semantics. */
3282 semantics
|= SpvMemorySemanticsImageMemoryMask
;
3284 SpvMemorySemanticsMask before_semantics
;
3285 SpvMemorySemanticsMask after_semantics
;
3286 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3288 if (before_semantics
)
3289 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3291 if (opcode
!= SpvOpImageWrite
&& opcode
!= SpvOpAtomicStore
) {
3292 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3294 unsigned dest_components
= glsl_get_vector_elements(type
->type
);
3295 if (nir_intrinsic_infos
[op
].dest_components
== 0)
3296 intrin
->num_components
= dest_components
;
3298 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
3299 nir_intrinsic_dest_components(intrin
), 32, NULL
);
3301 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3303 nir_ssa_def
*result
= &intrin
->dest
.ssa
;
3304 if (nir_intrinsic_dest_components(intrin
) != dest_components
)
3305 result
= nir_channels(&b
->nb
, result
, (1 << dest_components
) - 1);
3307 vtn_push_nir_ssa(b
, w
[2], result
);
3309 if (opcode
== SpvOpImageRead
)
3310 nir_intrinsic_set_type(intrin
, nir_get_nir_type_for_glsl_type(type
->type
));
3312 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3315 if (after_semantics
)
3316 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3319 static nir_intrinsic_op
3320 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3323 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
3324 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
3325 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
3326 OP(AtomicExchange
, atomic_exchange
)
3327 OP(AtomicCompareExchange
, atomic_comp_swap
)
3328 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3329 OP(AtomicIIncrement
, atomic_add
)
3330 OP(AtomicIDecrement
, atomic_add
)
3331 OP(AtomicIAdd
, atomic_add
)
3332 OP(AtomicISub
, atomic_add
)
3333 OP(AtomicSMin
, atomic_imin
)
3334 OP(AtomicUMin
, atomic_umin
)
3335 OP(AtomicSMax
, atomic_imax
)
3336 OP(AtomicUMax
, atomic_umax
)
3337 OP(AtomicAnd
, atomic_and
)
3338 OP(AtomicOr
, atomic_or
)
3339 OP(AtomicXor
, atomic_xor
)
3340 OP(AtomicFAddEXT
, atomic_fadd
)
3343 vtn_fail_with_opcode("Invalid SSBO atomic", opcode
);
3347 static nir_intrinsic_op
3348 get_uniform_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3351 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
3352 OP(AtomicLoad
, read_deref
)
3353 OP(AtomicExchange
, exchange
)
3354 OP(AtomicCompareExchange
, comp_swap
)
3355 OP(AtomicCompareExchangeWeak
, comp_swap
)
3356 OP(AtomicIIncrement
, inc_deref
)
3357 OP(AtomicIDecrement
, post_dec_deref
)
3358 OP(AtomicIAdd
, add_deref
)
3359 OP(AtomicISub
, add_deref
)
3360 OP(AtomicUMin
, min_deref
)
3361 OP(AtomicUMax
, max_deref
)
3362 OP(AtomicAnd
, and_deref
)
3363 OP(AtomicOr
, or_deref
)
3364 OP(AtomicXor
, xor_deref
)
3367 /* We left the following out: AtomicStore, AtomicSMin and
3368 * AtomicSmax. Right now there are not nir intrinsics for them. At this
3369 * moment Atomic Counter support is needed for ARB_spirv support, so is
3370 * only need to support GLSL Atomic Counters that are uints and don't
3371 * allow direct storage.
3373 vtn_fail("Invalid uniform atomic");
3377 static nir_intrinsic_op
3378 get_deref_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3381 case SpvOpAtomicLoad
: return nir_intrinsic_load_deref
;
3382 case SpvOpAtomicStore
: return nir_intrinsic_store_deref
;
3383 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
3384 OP(AtomicExchange
, atomic_exchange
)
3385 OP(AtomicCompareExchange
, atomic_comp_swap
)
3386 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3387 OP(AtomicIIncrement
, atomic_add
)
3388 OP(AtomicIDecrement
, atomic_add
)
3389 OP(AtomicIAdd
, atomic_add
)
3390 OP(AtomicISub
, atomic_add
)
3391 OP(AtomicSMin
, atomic_imin
)
3392 OP(AtomicUMin
, atomic_umin
)
3393 OP(AtomicSMax
, atomic_imax
)
3394 OP(AtomicUMax
, atomic_umax
)
3395 OP(AtomicAnd
, atomic_and
)
3396 OP(AtomicOr
, atomic_or
)
3397 OP(AtomicXor
, atomic_xor
)
3398 OP(AtomicFAddEXT
, atomic_fadd
)
3401 vtn_fail_with_opcode("Invalid shared atomic", opcode
);
3406 * Handles shared atomics, ssbo atomics and atomic counters.
3409 vtn_handle_atomics(struct vtn_builder
*b
, SpvOp opcode
,
3410 const uint32_t *w
, UNUSED
unsigned count
)
3412 struct vtn_pointer
*ptr
;
3413 nir_intrinsic_instr
*atomic
;
3415 SpvScope scope
= SpvScopeInvocation
;
3416 SpvMemorySemanticsMask semantics
= 0;
3417 enum gl_access_qualifier access
= 0;
3420 case SpvOpAtomicLoad
:
3421 case SpvOpAtomicExchange
:
3422 case SpvOpAtomicCompareExchange
:
3423 case SpvOpAtomicCompareExchangeWeak
:
3424 case SpvOpAtomicIIncrement
:
3425 case SpvOpAtomicIDecrement
:
3426 case SpvOpAtomicIAdd
:
3427 case SpvOpAtomicISub
:
3428 case SpvOpAtomicSMin
:
3429 case SpvOpAtomicUMin
:
3430 case SpvOpAtomicSMax
:
3431 case SpvOpAtomicUMax
:
3432 case SpvOpAtomicAnd
:
3434 case SpvOpAtomicXor
:
3435 case SpvOpAtomicFAddEXT
:
3436 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3437 scope
= vtn_constant_uint(b
, w
[4]);
3438 semantics
= vtn_constant_uint(b
, w
[5]);
3441 case SpvOpAtomicStore
:
3442 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
3443 scope
= vtn_constant_uint(b
, w
[2]);
3444 semantics
= vtn_constant_uint(b
, w
[3]);
3448 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3451 if (semantics
& SpvMemorySemanticsVolatileMask
)
3452 access
|= ACCESS_VOLATILE
;
3454 /* uniform as "atomic counter uniform" */
3455 if (ptr
->mode
== vtn_variable_mode_atomic_counter
) {
3456 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3457 nir_intrinsic_op op
= get_uniform_nir_atomic_op(b
, opcode
);
3458 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3459 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3461 /* SSBO needs to initialize index/offset. In this case we don't need to,
3462 * as that info is already stored on the ptr->var->var nir_variable (see
3463 * vtn_create_variable)
3467 case SpvOpAtomicLoad
:
3468 case SpvOpAtomicExchange
:
3469 case SpvOpAtomicCompareExchange
:
3470 case SpvOpAtomicCompareExchangeWeak
:
3471 case SpvOpAtomicIIncrement
:
3472 case SpvOpAtomicIDecrement
:
3473 case SpvOpAtomicIAdd
:
3474 case SpvOpAtomicISub
:
3475 case SpvOpAtomicSMin
:
3476 case SpvOpAtomicUMin
:
3477 case SpvOpAtomicSMax
:
3478 case SpvOpAtomicUMax
:
3479 case SpvOpAtomicAnd
:
3481 case SpvOpAtomicXor
:
3482 /* Nothing: we don't need to call fill_common_atomic_sources here, as
3483 * atomic counter uniforms doesn't have sources
3488 unreachable("Invalid SPIR-V atomic");
3491 } else if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
3492 nir_ssa_def
*offset
, *index
;
3493 offset
= vtn_pointer_to_offset(b
, ptr
, &index
);
3495 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
3497 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(b
, opcode
);
3498 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3500 nir_intrinsic_set_access(atomic
, access
| ACCESS_COHERENT
);
3504 case SpvOpAtomicLoad
:
3505 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3506 nir_intrinsic_set_align(atomic
, 4, 0);
3507 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3508 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3509 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3512 case SpvOpAtomicStore
:
3513 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3514 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3515 nir_intrinsic_set_align(atomic
, 4, 0);
3516 atomic
->src
[src
++] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[4]));
3517 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3518 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3519 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3522 case SpvOpAtomicExchange
:
3523 case SpvOpAtomicCompareExchange
:
3524 case SpvOpAtomicCompareExchangeWeak
:
3525 case SpvOpAtomicIIncrement
:
3526 case SpvOpAtomicIDecrement
:
3527 case SpvOpAtomicIAdd
:
3528 case SpvOpAtomicISub
:
3529 case SpvOpAtomicSMin
:
3530 case SpvOpAtomicUMin
:
3531 case SpvOpAtomicSMax
:
3532 case SpvOpAtomicUMax
:
3533 case SpvOpAtomicAnd
:
3535 case SpvOpAtomicXor
:
3536 case SpvOpAtomicFAddEXT
:
3537 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3538 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3539 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3540 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
3544 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3547 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3548 const struct glsl_type
*deref_type
= deref
->type
;
3549 nir_intrinsic_op op
= get_deref_nir_atomic_op(b
, opcode
);
3550 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3551 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3553 if (ptr
->mode
!= vtn_variable_mode_workgroup
)
3554 access
|= ACCESS_COHERENT
;
3556 nir_intrinsic_set_access(atomic
, access
);
3559 case SpvOpAtomicLoad
:
3560 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3563 case SpvOpAtomicStore
:
3564 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3565 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3566 atomic
->src
[1] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[4]));
3569 case SpvOpAtomicExchange
:
3570 case SpvOpAtomicCompareExchange
:
3571 case SpvOpAtomicCompareExchangeWeak
:
3572 case SpvOpAtomicIIncrement
:
3573 case SpvOpAtomicIDecrement
:
3574 case SpvOpAtomicIAdd
:
3575 case SpvOpAtomicISub
:
3576 case SpvOpAtomicSMin
:
3577 case SpvOpAtomicUMin
:
3578 case SpvOpAtomicSMax
:
3579 case SpvOpAtomicUMax
:
3580 case SpvOpAtomicAnd
:
3582 case SpvOpAtomicXor
:
3583 case SpvOpAtomicFAddEXT
:
3584 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[1]);
3588 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3592 /* Atomic ordering operations will implicitly apply to the atomic operation
3593 * storage class, so include that too.
3595 semantics
|= vtn_mode_to_memory_semantics(ptr
->mode
);
3597 SpvMemorySemanticsMask before_semantics
;
3598 SpvMemorySemanticsMask after_semantics
;
3599 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3601 if (before_semantics
)
3602 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3604 if (opcode
!= SpvOpAtomicStore
) {
3605 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3607 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
3608 glsl_get_vector_elements(type
->type
),
3609 glsl_get_bit_size(type
->type
), NULL
);
3611 vtn_push_nir_ssa(b
, w
[2], &atomic
->dest
.ssa
);
3614 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
3616 if (after_semantics
)
3617 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3620 static nir_alu_instr
*
3621 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
3623 nir_op op
= nir_op_vec(num_components
);
3624 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
3625 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
3627 vec
->dest
.write_mask
= (1 << num_components
) - 1;
3632 struct vtn_ssa_value
*
3633 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
3635 if (src
->transposed
)
3636 return src
->transposed
;
3638 struct vtn_ssa_value
*dest
=
3639 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
3641 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
3642 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
3643 glsl_get_bit_size(src
->type
));
3644 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3645 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
3646 vec
->src
[0].swizzle
[0] = i
;
3648 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
3649 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
3650 vec
->src
[j
].swizzle
[0] = i
;
3653 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3654 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
3657 dest
->transposed
= src
;
3662 static nir_ssa_def
*
3663 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
3664 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
3665 const uint32_t *indices
)
3667 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
3669 for (unsigned i
= 0; i
< num_components
; i
++) {
3670 uint32_t index
= indices
[i
];
3671 if (index
== 0xffffffff) {
3673 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
3674 } else if (index
< src0
->num_components
) {
3675 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
3676 vec
->src
[i
].swizzle
[0] = index
;
3678 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
3679 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
3683 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3685 return &vec
->dest
.dest
.ssa
;
3689 * Concatentates a number of vectors/scalars together to produce a vector
3691 static nir_ssa_def
*
3692 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
3693 unsigned num_srcs
, nir_ssa_def
**srcs
)
3695 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
3697 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3699 * "When constructing a vector, there must be at least two Constituent
3702 vtn_assert(num_srcs
>= 2);
3704 unsigned dest_idx
= 0;
3705 for (unsigned i
= 0; i
< num_srcs
; i
++) {
3706 nir_ssa_def
*src
= srcs
[i
];
3707 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
3708 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
3709 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
3710 vec
->src
[dest_idx
].swizzle
[0] = j
;
3715 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3717 * "When constructing a vector, the total number of components in all
3718 * the operands must equal the number of components in Result Type."
3720 vtn_assert(dest_idx
== num_components
);
3722 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3724 return &vec
->dest
.dest
.ssa
;
3727 static struct vtn_ssa_value
*
3728 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
3730 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
3731 dest
->type
= src
->type
;
3733 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3734 dest
->def
= src
->def
;
3736 unsigned elems
= glsl_get_length(src
->type
);
3738 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
3739 for (unsigned i
= 0; i
< elems
; i
++)
3740 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
3746 static struct vtn_ssa_value
*
3747 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3748 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
3749 unsigned num_indices
)
3751 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
3753 struct vtn_ssa_value
*cur
= dest
;
3755 for (i
= 0; i
< num_indices
- 1; i
++) {
3756 /* If we got a vector here, that means the next index will be trying to
3757 * dereference a scalar.
3759 vtn_fail_if(glsl_type_is_vector_or_scalar(cur
->type
),
3760 "OpCompositeInsert has too many indices.");
3761 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3762 "All indices in an OpCompositeInsert must be in-bounds");
3763 cur
= cur
->elems
[indices
[i
]];
3766 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3767 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3768 "All indices in an OpCompositeInsert must be in-bounds");
3770 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3771 * the component granularity. In that case, the last index will be
3772 * the index to insert the scalar into the vector.
3775 cur
->def
= nir_vector_insert_imm(&b
->nb
, cur
->def
, insert
->def
, indices
[i
]);
3777 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3778 "All indices in an OpCompositeInsert must be in-bounds");
3779 cur
->elems
[indices
[i
]] = insert
;
3785 static struct vtn_ssa_value
*
3786 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3787 const uint32_t *indices
, unsigned num_indices
)
3789 struct vtn_ssa_value
*cur
= src
;
3790 for (unsigned i
= 0; i
< num_indices
; i
++) {
3791 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3792 vtn_assert(i
== num_indices
- 1);
3793 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3794 "All indices in an OpCompositeExtract must be in-bounds");
3796 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3797 * the component granularity. The last index will be the index of the
3798 * vector to extract.
3801 const struct glsl_type
*scalar_type
=
3802 glsl_scalar_type(glsl_get_base_type(cur
->type
));
3803 struct vtn_ssa_value
*ret
= vtn_create_ssa_value(b
, scalar_type
);
3804 ret
->def
= nir_channel(&b
->nb
, cur
->def
, indices
[i
]);
3807 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3808 "All indices in an OpCompositeExtract must be in-bounds");
3809 cur
= cur
->elems
[indices
[i
]];
3817 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
3818 const uint32_t *w
, unsigned count
)
3820 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3821 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
3824 case SpvOpVectorExtractDynamic
:
3825 ssa
->def
= nir_vector_extract(&b
->nb
, vtn_get_nir_ssa(b
, w
[3]),
3826 vtn_get_nir_ssa(b
, w
[4]));
3829 case SpvOpVectorInsertDynamic
:
3830 ssa
->def
= nir_vector_insert(&b
->nb
, vtn_get_nir_ssa(b
, w
[3]),
3831 vtn_get_nir_ssa(b
, w
[4]),
3832 vtn_get_nir_ssa(b
, w
[5]));
3835 case SpvOpVectorShuffle
:
3836 ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
->type
),
3837 vtn_get_nir_ssa(b
, w
[3]),
3838 vtn_get_nir_ssa(b
, w
[4]),
3842 case SpvOpCompositeConstruct
: {
3843 unsigned elems
= count
- 3;
3845 if (glsl_type_is_vector_or_scalar(type
->type
)) {
3846 nir_ssa_def
*srcs
[NIR_MAX_VEC_COMPONENTS
];
3847 for (unsigned i
= 0; i
< elems
; i
++)
3848 srcs
[i
] = vtn_get_nir_ssa(b
, w
[3 + i
]);
3850 vtn_vector_construct(b
, glsl_get_vector_elements(type
->type
),
3853 ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
3854 for (unsigned i
= 0; i
< elems
; i
++)
3855 ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
3859 case SpvOpCompositeExtract
:
3860 ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
3864 case SpvOpCompositeInsert
:
3865 ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
3866 vtn_ssa_value(b
, w
[3]),
3870 case SpvOpCopyLogical
:
3871 ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
3873 case SpvOpCopyObject
:
3874 vtn_copy_value(b
, w
[3], w
[2]);
3878 vtn_fail_with_opcode("unknown composite operation", opcode
);
3881 vtn_push_ssa_value(b
, w
[2], ssa
);
3885 vtn_emit_barrier(struct vtn_builder
*b
, nir_intrinsic_op op
)
3887 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3888 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3892 vtn_emit_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
3893 SpvMemorySemanticsMask semantics
)
3895 if (b
->shader
->options
->use_scoped_barrier
) {
3896 vtn_emit_scoped_memory_barrier(b
, scope
, semantics
);
3900 static const SpvMemorySemanticsMask all_memory_semantics
=
3901 SpvMemorySemanticsUniformMemoryMask
|
3902 SpvMemorySemanticsWorkgroupMemoryMask
|
3903 SpvMemorySemanticsAtomicCounterMemoryMask
|
3904 SpvMemorySemanticsImageMemoryMask
|
3905 SpvMemorySemanticsOutputMemoryMask
;
3907 /* If we're not actually doing a memory barrier, bail */
3908 if (!(semantics
& all_memory_semantics
))
3911 /* GL and Vulkan don't have these */
3912 vtn_assert(scope
!= SpvScopeCrossDevice
);
3914 if (scope
== SpvScopeSubgroup
)
3915 return; /* Nothing to do here */
3917 if (scope
== SpvScopeWorkgroup
) {
3918 vtn_emit_barrier(b
, nir_intrinsic_group_memory_barrier
);
3922 /* There's only two scopes thing left */
3923 vtn_assert(scope
== SpvScopeInvocation
|| scope
== SpvScopeDevice
);
3925 /* Map the GLSL memoryBarrier() construct and any barriers with more than one
3926 * semantic to the corresponding NIR one.
3928 if (util_bitcount(semantics
& all_memory_semantics
) > 1) {
3929 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3930 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
3931 /* GLSL memoryBarrier() (and the corresponding NIR one) doesn't include
3932 * TCS outputs, so we have to emit it's own intrinsic for that. We
3933 * then need to emit another memory_barrier to prevent moving
3934 * non-output operations to before the tcs_patch barrier.
3936 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3937 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3942 /* Issue a more specific barrier */
3943 switch (semantics
& all_memory_semantics
) {
3944 case SpvMemorySemanticsUniformMemoryMask
:
3945 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_buffer
);
3947 case SpvMemorySemanticsWorkgroupMemoryMask
:
3948 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_shared
);
3950 case SpvMemorySemanticsAtomicCounterMemoryMask
:
3951 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_atomic_counter
);
3953 case SpvMemorySemanticsImageMemoryMask
:
3954 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_image
);
3956 case SpvMemorySemanticsOutputMemoryMask
:
3957 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
3958 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3966 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
3967 const uint32_t *w
, UNUSED
unsigned count
)
3970 case SpvOpEmitVertex
:
3971 case SpvOpEmitStreamVertex
:
3972 case SpvOpEndPrimitive
:
3973 case SpvOpEndStreamPrimitive
: {
3974 nir_intrinsic_op intrinsic_op
;
3976 case SpvOpEmitVertex
:
3977 case SpvOpEmitStreamVertex
:
3978 intrinsic_op
= nir_intrinsic_emit_vertex
;
3980 case SpvOpEndPrimitive
:
3981 case SpvOpEndStreamPrimitive
:
3982 intrinsic_op
= nir_intrinsic_end_primitive
;
3985 unreachable("Invalid opcode");
3988 nir_intrinsic_instr
*intrin
=
3989 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
3992 case SpvOpEmitStreamVertex
:
3993 case SpvOpEndStreamPrimitive
: {
3994 unsigned stream
= vtn_constant_uint(b
, w
[1]);
3995 nir_intrinsic_set_stream_id(intrin
, stream
);
4003 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
4007 case SpvOpMemoryBarrier
: {
4008 SpvScope scope
= vtn_constant_uint(b
, w
[1]);
4009 SpvMemorySemanticsMask semantics
= vtn_constant_uint(b
, w
[2]);
4010 vtn_emit_memory_barrier(b
, scope
, semantics
);
4014 case SpvOpControlBarrier
: {
4015 SpvScope execution_scope
= vtn_constant_uint(b
, w
[1]);
4016 SpvScope memory_scope
= vtn_constant_uint(b
, w
[2]);
4017 SpvMemorySemanticsMask memory_semantics
= vtn_constant_uint(b
, w
[3]);
4019 /* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
4020 * memory semantics of None for GLSL barrier().
4021 * And before that, prior to c3f1cdfa, emitted the OpControlBarrier with
4022 * Device instead of Workgroup for execution scope.
4024 if (b
->wa_glslang_cs_barrier
&&
4025 b
->nb
.shader
->info
.stage
== MESA_SHADER_COMPUTE
&&
4026 (execution_scope
== SpvScopeWorkgroup
||
4027 execution_scope
== SpvScopeDevice
) &&
4028 memory_semantics
== SpvMemorySemanticsMaskNone
) {
4029 execution_scope
= SpvScopeWorkgroup
;
4030 memory_scope
= SpvScopeWorkgroup
;
4031 memory_semantics
= SpvMemorySemanticsAcquireReleaseMask
|
4032 SpvMemorySemanticsWorkgroupMemoryMask
;
4035 /* From the SPIR-V spec:
4037 * "When used with the TessellationControl execution model, it also
4038 * implicitly synchronizes the Output Storage Class: Writes to Output
4039 * variables performed by any invocation executed prior to a
4040 * OpControlBarrier will be visible to any other invocation after
4041 * return from that OpControlBarrier."
4043 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
4044 memory_semantics
&= ~(SpvMemorySemanticsAcquireMask
|
4045 SpvMemorySemanticsReleaseMask
|
4046 SpvMemorySemanticsAcquireReleaseMask
|
4047 SpvMemorySemanticsSequentiallyConsistentMask
);
4048 memory_semantics
|= SpvMemorySemanticsAcquireReleaseMask
|
4049 SpvMemorySemanticsOutputMemoryMask
;
4052 if (b
->shader
->options
->use_scoped_barrier
) {
4053 vtn_emit_scoped_control_barrier(b
, execution_scope
, memory_scope
,
4056 vtn_emit_memory_barrier(b
, memory_scope
, memory_semantics
);
4058 if (execution_scope
== SpvScopeWorkgroup
)
4059 vtn_emit_barrier(b
, nir_intrinsic_control_barrier
);
4065 unreachable("unknown barrier instruction");
4070 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
4071 SpvExecutionMode mode
)
4074 case SpvExecutionModeInputPoints
:
4075 case SpvExecutionModeOutputPoints
:
4076 return 0; /* GL_POINTS */
4077 case SpvExecutionModeInputLines
:
4078 return 1; /* GL_LINES */
4079 case SpvExecutionModeInputLinesAdjacency
:
4080 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
4081 case SpvExecutionModeTriangles
:
4082 return 4; /* GL_TRIANGLES */
4083 case SpvExecutionModeInputTrianglesAdjacency
:
4084 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
4085 case SpvExecutionModeQuads
:
4086 return 7; /* GL_QUADS */
4087 case SpvExecutionModeIsolines
:
4088 return 0x8E7A; /* GL_ISOLINES */
4089 case SpvExecutionModeOutputLineStrip
:
4090 return 3; /* GL_LINE_STRIP */
4091 case SpvExecutionModeOutputTriangleStrip
:
4092 return 5; /* GL_TRIANGLE_STRIP */
4094 vtn_fail("Invalid primitive type: %s (%u)",
4095 spirv_executionmode_to_string(mode
), mode
);
4100 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
4101 SpvExecutionMode mode
)
4104 case SpvExecutionModeInputPoints
:
4106 case SpvExecutionModeInputLines
:
4108 case SpvExecutionModeInputLinesAdjacency
:
4110 case SpvExecutionModeTriangles
:
4112 case SpvExecutionModeInputTrianglesAdjacency
:
4115 vtn_fail("Invalid GS input mode: %s (%u)",
4116 spirv_executionmode_to_string(mode
), mode
);
4120 static gl_shader_stage
4121 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
4124 case SpvExecutionModelVertex
:
4125 return MESA_SHADER_VERTEX
;
4126 case SpvExecutionModelTessellationControl
:
4127 return MESA_SHADER_TESS_CTRL
;
4128 case SpvExecutionModelTessellationEvaluation
:
4129 return MESA_SHADER_TESS_EVAL
;
4130 case SpvExecutionModelGeometry
:
4131 return MESA_SHADER_GEOMETRY
;
4132 case SpvExecutionModelFragment
:
4133 return MESA_SHADER_FRAGMENT
;
4134 case SpvExecutionModelGLCompute
:
4135 return MESA_SHADER_COMPUTE
;
4136 case SpvExecutionModelKernel
:
4137 return MESA_SHADER_KERNEL
;
4139 vtn_fail("Unsupported execution model: %s (%u)",
4140 spirv_executionmodel_to_string(model
), model
);
4144 #define spv_check_supported(name, cap) do { \
4145 if (!(b->options && b->options->caps.name)) \
4146 vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
4147 spirv_capability_to_string(cap), cap); \
4152 vtn_handle_entry_point(struct vtn_builder
*b
, const uint32_t *w
,
4155 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
4156 /* Let this be a name label regardless */
4157 unsigned name_words
;
4158 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
4160 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
4161 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
4164 vtn_assert(b
->entry_point
== NULL
);
4165 b
->entry_point
= entry_point
;
4169 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4170 const uint32_t *w
, unsigned count
)
4177 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
4178 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
4179 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
4180 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
4181 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
4182 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
4185 uint32_t version
= w
[2];
4188 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
4190 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
4194 case SpvOpSourceExtension
:
4195 case SpvOpSourceContinued
:
4196 case SpvOpExtension
:
4197 case SpvOpModuleProcessed
:
4198 /* Unhandled, but these are for debug so that's ok. */
4201 case SpvOpCapability
: {
4202 SpvCapability cap
= w
[1];
4204 case SpvCapabilityMatrix
:
4205 case SpvCapabilityShader
:
4206 case SpvCapabilityGeometry
:
4207 case SpvCapabilityGeometryPointSize
:
4208 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
4209 case SpvCapabilitySampledImageArrayDynamicIndexing
:
4210 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
4211 case SpvCapabilityStorageImageArrayDynamicIndexing
:
4212 case SpvCapabilityImageRect
:
4213 case SpvCapabilitySampledRect
:
4214 case SpvCapabilitySampled1D
:
4215 case SpvCapabilityImage1D
:
4216 case SpvCapabilitySampledCubeArray
:
4217 case SpvCapabilityImageCubeArray
:
4218 case SpvCapabilitySampledBuffer
:
4219 case SpvCapabilityImageBuffer
:
4220 case SpvCapabilityImageQuery
:
4221 case SpvCapabilityDerivativeControl
:
4222 case SpvCapabilityInterpolationFunction
:
4223 case SpvCapabilityMultiViewport
:
4224 case SpvCapabilitySampleRateShading
:
4225 case SpvCapabilityClipDistance
:
4226 case SpvCapabilityCullDistance
:
4227 case SpvCapabilityInputAttachment
:
4228 case SpvCapabilityImageGatherExtended
:
4229 case SpvCapabilityStorageImageExtendedFormats
:
4230 case SpvCapabilityVector16
:
4233 case SpvCapabilityLinkage
:
4234 case SpvCapabilityFloat16Buffer
:
4235 case SpvCapabilitySparseResidency
:
4236 vtn_warn("Unsupported SPIR-V capability: %s",
4237 spirv_capability_to_string(cap
));
4240 case SpvCapabilityMinLod
:
4241 spv_check_supported(min_lod
, cap
);
4244 case SpvCapabilityAtomicStorage
:
4245 spv_check_supported(atomic_storage
, cap
);
4248 case SpvCapabilityFloat64
:
4249 spv_check_supported(float64
, cap
);
4251 case SpvCapabilityInt64
:
4252 spv_check_supported(int64
, cap
);
4254 case SpvCapabilityInt16
:
4255 spv_check_supported(int16
, cap
);
4257 case SpvCapabilityInt8
:
4258 spv_check_supported(int8
, cap
);
4261 case SpvCapabilityTransformFeedback
:
4262 spv_check_supported(transform_feedback
, cap
);
4265 case SpvCapabilityGeometryStreams
:
4266 spv_check_supported(geometry_streams
, cap
);
4269 case SpvCapabilityInt64Atomics
:
4270 spv_check_supported(int64_atomics
, cap
);
4273 case SpvCapabilityStorageImageMultisample
:
4274 spv_check_supported(storage_image_ms
, cap
);
4277 case SpvCapabilityAddresses
:
4278 spv_check_supported(address
, cap
);
4281 case SpvCapabilityKernel
:
4282 spv_check_supported(kernel
, cap
);
4285 case SpvCapabilityImageBasic
:
4286 spv_check_supported(kernel_image
, cap
);
4289 case SpvCapabilityLiteralSampler
:
4290 spv_check_supported(literal_sampler
, cap
);
4293 case SpvCapabilityImageReadWrite
:
4294 case SpvCapabilityImageMipmap
:
4295 case SpvCapabilityPipes
:
4296 case SpvCapabilityDeviceEnqueue
:
4297 case SpvCapabilityGenericPointer
:
4298 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
4299 spirv_capability_to_string(cap
));
4302 case SpvCapabilityImageMSArray
:
4303 spv_check_supported(image_ms_array
, cap
);
4306 case SpvCapabilityTessellation
:
4307 case SpvCapabilityTessellationPointSize
:
4308 spv_check_supported(tessellation
, cap
);
4311 case SpvCapabilityDrawParameters
:
4312 spv_check_supported(draw_parameters
, cap
);
4315 case SpvCapabilityStorageImageReadWithoutFormat
:
4316 spv_check_supported(image_read_without_format
, cap
);
4319 case SpvCapabilityStorageImageWriteWithoutFormat
:
4320 spv_check_supported(image_write_without_format
, cap
);
4323 case SpvCapabilityDeviceGroup
:
4324 spv_check_supported(device_group
, cap
);
4327 case SpvCapabilityMultiView
:
4328 spv_check_supported(multiview
, cap
);
4331 case SpvCapabilityGroupNonUniform
:
4332 spv_check_supported(subgroup_basic
, cap
);
4335 case SpvCapabilitySubgroupVoteKHR
:
4336 case SpvCapabilityGroupNonUniformVote
:
4337 spv_check_supported(subgroup_vote
, cap
);
4340 case SpvCapabilitySubgroupBallotKHR
:
4341 case SpvCapabilityGroupNonUniformBallot
:
4342 spv_check_supported(subgroup_ballot
, cap
);
4345 case SpvCapabilityGroupNonUniformShuffle
:
4346 case SpvCapabilityGroupNonUniformShuffleRelative
:
4347 spv_check_supported(subgroup_shuffle
, cap
);
4350 case SpvCapabilityGroupNonUniformQuad
:
4351 spv_check_supported(subgroup_quad
, cap
);
4354 case SpvCapabilityGroupNonUniformArithmetic
:
4355 case SpvCapabilityGroupNonUniformClustered
:
4356 spv_check_supported(subgroup_arithmetic
, cap
);
4359 case SpvCapabilityGroups
:
4360 spv_check_supported(amd_shader_ballot
, cap
);
4363 case SpvCapabilityVariablePointersStorageBuffer
:
4364 case SpvCapabilityVariablePointers
:
4365 spv_check_supported(variable_pointers
, cap
);
4366 b
->variable_pointers
= true;
4369 case SpvCapabilityStorageUniformBufferBlock16
:
4370 case SpvCapabilityStorageUniform16
:
4371 case SpvCapabilityStoragePushConstant16
:
4372 case SpvCapabilityStorageInputOutput16
:
4373 spv_check_supported(storage_16bit
, cap
);
4376 case SpvCapabilityShaderLayer
:
4377 case SpvCapabilityShaderViewportIndex
:
4378 case SpvCapabilityShaderViewportIndexLayerEXT
:
4379 spv_check_supported(shader_viewport_index_layer
, cap
);
4382 case SpvCapabilityStorageBuffer8BitAccess
:
4383 case SpvCapabilityUniformAndStorageBuffer8BitAccess
:
4384 case SpvCapabilityStoragePushConstant8
:
4385 spv_check_supported(storage_8bit
, cap
);
4388 case SpvCapabilityShaderNonUniformEXT
:
4389 spv_check_supported(descriptor_indexing
, cap
);
4392 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT
:
4393 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT
:
4394 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT
:
4395 spv_check_supported(descriptor_array_dynamic_indexing
, cap
);
4398 case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT
:
4399 case SpvCapabilitySampledImageArrayNonUniformIndexingEXT
:
4400 case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT
:
4401 case SpvCapabilityStorageImageArrayNonUniformIndexingEXT
:
4402 case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT
:
4403 case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT
:
4404 case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT
:
4405 spv_check_supported(descriptor_array_non_uniform_indexing
, cap
);
4408 case SpvCapabilityRuntimeDescriptorArrayEXT
:
4409 spv_check_supported(runtime_descriptor_array
, cap
);
4412 case SpvCapabilityStencilExportEXT
:
4413 spv_check_supported(stencil_export
, cap
);
4416 case SpvCapabilitySampleMaskPostDepthCoverage
:
4417 spv_check_supported(post_depth_coverage
, cap
);
4420 case SpvCapabilityDenormFlushToZero
:
4421 case SpvCapabilityDenormPreserve
:
4422 case SpvCapabilitySignedZeroInfNanPreserve
:
4423 case SpvCapabilityRoundingModeRTE
:
4424 case SpvCapabilityRoundingModeRTZ
:
4425 spv_check_supported(float_controls
, cap
);
4428 case SpvCapabilityPhysicalStorageBufferAddresses
:
4429 spv_check_supported(physical_storage_buffer_address
, cap
);
4432 case SpvCapabilityComputeDerivativeGroupQuadsNV
:
4433 case SpvCapabilityComputeDerivativeGroupLinearNV
:
4434 spv_check_supported(derivative_group
, cap
);
4437 case SpvCapabilityFloat16
:
4438 spv_check_supported(float16
, cap
);
4441 case SpvCapabilityFragmentShaderSampleInterlockEXT
:
4442 spv_check_supported(fragment_shader_sample_interlock
, cap
);
4445 case SpvCapabilityFragmentShaderPixelInterlockEXT
:
4446 spv_check_supported(fragment_shader_pixel_interlock
, cap
);
4449 case SpvCapabilityDemoteToHelperInvocationEXT
:
4450 spv_check_supported(demote_to_helper_invocation
, cap
);
4453 case SpvCapabilityShaderClockKHR
:
4454 spv_check_supported(shader_clock
, cap
);
4457 case SpvCapabilityVulkanMemoryModel
:
4458 spv_check_supported(vk_memory_model
, cap
);
4461 case SpvCapabilityVulkanMemoryModelDeviceScope
:
4462 spv_check_supported(vk_memory_model_device_scope
, cap
);
4465 case SpvCapabilityImageReadWriteLodAMD
:
4466 spv_check_supported(amd_image_read_write_lod
, cap
);
4469 case SpvCapabilityIntegerFunctions2INTEL
:
4470 spv_check_supported(integer_functions2
, cap
);
4473 case SpvCapabilityFragmentMaskAMD
:
4474 spv_check_supported(amd_fragment_mask
, cap
);
4477 case SpvCapabilityImageGatherBiasLodAMD
:
4478 spv_check_supported(amd_image_gather_bias_lod
, cap
);
4481 case SpvCapabilityAtomicFloat32AddEXT
:
4482 spv_check_supported(float32_atomic_add
, cap
);
4485 case SpvCapabilityAtomicFloat64AddEXT
:
4486 spv_check_supported(float64_atomic_add
, cap
);
4490 vtn_fail("Unhandled capability: %s (%u)",
4491 spirv_capability_to_string(cap
), cap
);
4496 case SpvOpExtInstImport
:
4497 vtn_handle_extension(b
, opcode
, w
, count
);
4500 case SpvOpMemoryModel
:
4502 case SpvAddressingModelPhysical32
:
4503 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4504 "AddressingModelPhysical32 only supported for kernels");
4505 b
->shader
->info
.cs
.ptr_size
= 32;
4506 b
->physical_ptrs
= true;
4507 assert(nir_address_format_bit_size(b
->options
->global_addr_format
) == 32);
4508 assert(nir_address_format_num_components(b
->options
->global_addr_format
) == 1);
4509 assert(nir_address_format_bit_size(b
->options
->shared_addr_format
) == 32);
4510 assert(nir_address_format_num_components(b
->options
->shared_addr_format
) == 1);
4511 assert(nir_address_format_bit_size(b
->options
->constant_addr_format
) == 32);
4512 assert(nir_address_format_num_components(b
->options
->constant_addr_format
) == 1);
4514 case SpvAddressingModelPhysical64
:
4515 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4516 "AddressingModelPhysical64 only supported for kernels");
4517 b
->shader
->info
.cs
.ptr_size
= 64;
4518 b
->physical_ptrs
= true;
4519 assert(nir_address_format_bit_size(b
->options
->global_addr_format
) == 64);
4520 assert(nir_address_format_num_components(b
->options
->global_addr_format
) == 1);
4521 assert(nir_address_format_bit_size(b
->options
->shared_addr_format
) == 64);
4522 assert(nir_address_format_num_components(b
->options
->shared_addr_format
) == 1);
4523 assert(nir_address_format_bit_size(b
->options
->constant_addr_format
) == 64);
4524 assert(nir_address_format_num_components(b
->options
->constant_addr_format
) == 1);
4526 case SpvAddressingModelLogical
:
4527 vtn_fail_if(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
,
4528 "AddressingModelLogical only supported for shaders");
4529 b
->physical_ptrs
= false;
4531 case SpvAddressingModelPhysicalStorageBuffer64
:
4532 vtn_fail_if(!b
->options
||
4533 !b
->options
->caps
.physical_storage_buffer_address
,
4534 "AddressingModelPhysicalStorageBuffer64 not supported");
4537 vtn_fail("Unknown addressing model: %s (%u)",
4538 spirv_addressingmodel_to_string(w
[1]), w
[1]);
4542 b
->mem_model
= w
[2];
4544 case SpvMemoryModelSimple
:
4545 case SpvMemoryModelGLSL450
:
4546 case SpvMemoryModelOpenCL
:
4548 case SpvMemoryModelVulkan
:
4549 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
4550 "Vulkan memory model is unsupported by this driver");
4553 vtn_fail("Unsupported memory model: %s",
4554 spirv_memorymodel_to_string(w
[2]));
4559 case SpvOpEntryPoint
:
4560 vtn_handle_entry_point(b
, w
, count
);
4564 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
4565 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4569 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4572 case SpvOpMemberName
:
4576 case SpvOpExecutionMode
:
4577 case SpvOpExecutionModeId
:
4578 case SpvOpDecorationGroup
:
4580 case SpvOpDecorateId
:
4581 case SpvOpMemberDecorate
:
4582 case SpvOpGroupDecorate
:
4583 case SpvOpGroupMemberDecorate
:
4584 case SpvOpDecorateString
:
4585 case SpvOpMemberDecorateString
:
4586 vtn_handle_decoration(b
, opcode
, w
, count
);
4589 case SpvOpExtInst
: {
4590 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4591 if (val
->ext_handler
== vtn_handle_non_semantic_instruction
) {
4592 /* NonSemantic extended instructions are acceptable in preamble. */
4593 vtn_handle_non_semantic_instruction(b
, w
[4], w
, count
);
4596 return false; /* End of preamble. */
4601 return false; /* End of preamble */
4608 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4609 const struct vtn_decoration
*mode
, UNUSED
void *data
)
4611 vtn_assert(b
->entry_point
== entry_point
);
4613 switch(mode
->exec_mode
) {
4614 case SpvExecutionModeOriginUpperLeft
:
4615 case SpvExecutionModeOriginLowerLeft
:
4616 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4617 b
->shader
->info
.fs
.origin_upper_left
=
4618 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
4621 case SpvExecutionModeEarlyFragmentTests
:
4622 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4623 b
->shader
->info
.fs
.early_fragment_tests
= true;
4626 case SpvExecutionModePostDepthCoverage
:
4627 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4628 b
->shader
->info
.fs
.post_depth_coverage
= true;
4631 case SpvExecutionModeInvocations
:
4632 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4633 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->operands
[0]);
4636 case SpvExecutionModeDepthReplacing
:
4637 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4638 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
4640 case SpvExecutionModeDepthGreater
:
4641 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4642 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
4644 case SpvExecutionModeDepthLess
:
4645 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4646 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
4648 case SpvExecutionModeDepthUnchanged
:
4649 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4650 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
4653 case SpvExecutionModeLocalSize
:
4654 vtn_assert(gl_shader_stage_is_compute(b
->shader
->info
.stage
));
4655 b
->shader
->info
.cs
.local_size
[0] = mode
->operands
[0];
4656 b
->shader
->info
.cs
.local_size
[1] = mode
->operands
[1];
4657 b
->shader
->info
.cs
.local_size
[2] = mode
->operands
[2];
4660 case SpvExecutionModeLocalSizeHint
:
4661 break; /* Nothing to do with this */
4663 case SpvExecutionModeOutputVertices
:
4664 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4665 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4666 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->operands
[0];
4668 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4669 b
->shader
->info
.gs
.vertices_out
= mode
->operands
[0];
4673 case SpvExecutionModeInputPoints
:
4674 case SpvExecutionModeInputLines
:
4675 case SpvExecutionModeInputLinesAdjacency
:
4676 case SpvExecutionModeTriangles
:
4677 case SpvExecutionModeInputTrianglesAdjacency
:
4678 case SpvExecutionModeQuads
:
4679 case SpvExecutionModeIsolines
:
4680 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4681 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4682 b
->shader
->info
.tess
.primitive_mode
=
4683 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4685 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4686 b
->shader
->info
.gs
.vertices_in
=
4687 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
4688 b
->shader
->info
.gs
.input_primitive
=
4689 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4693 case SpvExecutionModeOutputPoints
:
4694 case SpvExecutionModeOutputLineStrip
:
4695 case SpvExecutionModeOutputTriangleStrip
:
4696 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4697 b
->shader
->info
.gs
.output_primitive
=
4698 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4701 case SpvExecutionModeSpacingEqual
:
4702 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4703 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4704 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
4706 case SpvExecutionModeSpacingFractionalEven
:
4707 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4708 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4709 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
4711 case SpvExecutionModeSpacingFractionalOdd
:
4712 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4713 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4714 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
4716 case SpvExecutionModeVertexOrderCw
:
4717 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4718 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4719 b
->shader
->info
.tess
.ccw
= false;
4721 case SpvExecutionModeVertexOrderCcw
:
4722 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4723 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4724 b
->shader
->info
.tess
.ccw
= true;
4726 case SpvExecutionModePointMode
:
4727 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4728 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4729 b
->shader
->info
.tess
.point_mode
= true;
4732 case SpvExecutionModePixelCenterInteger
:
4733 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4734 b
->shader
->info
.fs
.pixel_center_integer
= true;
4737 case SpvExecutionModeXfb
:
4738 b
->shader
->info
.has_transform_feedback_varyings
= true;
4741 case SpvExecutionModeVecTypeHint
:
4744 case SpvExecutionModeContractionOff
:
4745 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
4746 vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4747 spirv_executionmode_to_string(mode
->exec_mode
));
4752 case SpvExecutionModeStencilRefReplacingEXT
:
4753 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4756 case SpvExecutionModeDerivativeGroupQuadsNV
:
4757 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4758 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_QUADS
;
4761 case SpvExecutionModeDerivativeGroupLinearNV
:
4762 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4763 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_LINEAR
;
4766 case SpvExecutionModePixelInterlockOrderedEXT
:
4767 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4768 b
->shader
->info
.fs
.pixel_interlock_ordered
= true;
4771 case SpvExecutionModePixelInterlockUnorderedEXT
:
4772 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4773 b
->shader
->info
.fs
.pixel_interlock_unordered
= true;
4776 case SpvExecutionModeSampleInterlockOrderedEXT
:
4777 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4778 b
->shader
->info
.fs
.sample_interlock_ordered
= true;
4781 case SpvExecutionModeSampleInterlockUnorderedEXT
:
4782 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4783 b
->shader
->info
.fs
.sample_interlock_unordered
= true;
4786 case SpvExecutionModeDenormPreserve
:
4787 case SpvExecutionModeDenormFlushToZero
:
4788 case SpvExecutionModeSignedZeroInfNanPreserve
:
4789 case SpvExecutionModeRoundingModeRTE
:
4790 case SpvExecutionModeRoundingModeRTZ
: {
4791 unsigned execution_mode
= 0;
4792 switch (mode
->exec_mode
) {
4793 case SpvExecutionModeDenormPreserve
:
4794 switch (mode
->operands
[0]) {
4795 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP16
; break;
4796 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP32
; break;
4797 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP64
; break;
4798 default: vtn_fail("Floating point type not supported");
4801 case SpvExecutionModeDenormFlushToZero
:
4802 switch (mode
->operands
[0]) {
4803 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16
; break;
4804 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32
; break;
4805 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64
; break;
4806 default: vtn_fail("Floating point type not supported");
4809 case SpvExecutionModeSignedZeroInfNanPreserve
:
4810 switch (mode
->operands
[0]) {
4811 case 16: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16
; break;
4812 case 32: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32
; break;
4813 case 64: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64
; break;
4814 default: vtn_fail("Floating point type not supported");
4817 case SpvExecutionModeRoundingModeRTE
:
4818 switch (mode
->operands
[0]) {
4819 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16
; break;
4820 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32
; break;
4821 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64
; break;
4822 default: vtn_fail("Floating point type not supported");
4825 case SpvExecutionModeRoundingModeRTZ
:
4826 switch (mode
->operands
[0]) {
4827 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16
; break;
4828 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32
; break;
4829 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64
; break;
4830 default: vtn_fail("Floating point type not supported");
4837 b
->shader
->info
.float_controls_execution_mode
|= execution_mode
;
4841 case SpvExecutionModeLocalSizeId
:
4842 case SpvExecutionModeLocalSizeHintId
:
4843 /* Handled later by vtn_handle_execution_mode_id(). */
4847 vtn_fail("Unhandled execution mode: %s (%u)",
4848 spirv_executionmode_to_string(mode
->exec_mode
),
4854 vtn_handle_execution_mode_id(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4855 const struct vtn_decoration
*mode
, UNUSED
void *data
)
4858 vtn_assert(b
->entry_point
== entry_point
);
4860 switch (mode
->exec_mode
) {
4861 case SpvExecutionModeLocalSizeId
:
4862 b
->shader
->info
.cs
.local_size
[0] = vtn_constant_uint(b
, mode
->operands
[0]);
4863 b
->shader
->info
.cs
.local_size
[1] = vtn_constant_uint(b
, mode
->operands
[1]);
4864 b
->shader
->info
.cs
.local_size
[2] = vtn_constant_uint(b
, mode
->operands
[2]);
4867 case SpvExecutionModeLocalSizeHintId
:
4868 /* Nothing to do with this hint. */
4872 /* Nothing to do. Literal execution modes already handled by
4873 * vtn_handle_execution_mode(). */
4879 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4880 const uint32_t *w
, unsigned count
)
4882 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
4886 case SpvOpSourceContinued
:
4887 case SpvOpSourceExtension
:
4888 case SpvOpExtension
:
4889 case SpvOpCapability
:
4890 case SpvOpExtInstImport
:
4891 case SpvOpMemoryModel
:
4892 case SpvOpEntryPoint
:
4893 case SpvOpExecutionMode
:
4896 case SpvOpMemberName
:
4897 case SpvOpDecorationGroup
:
4899 case SpvOpDecorateId
:
4900 case SpvOpMemberDecorate
:
4901 case SpvOpGroupDecorate
:
4902 case SpvOpGroupMemberDecorate
:
4903 case SpvOpDecorateString
:
4904 case SpvOpMemberDecorateString
:
4905 vtn_fail("Invalid opcode types and variables section");
4911 case SpvOpTypeFloat
:
4912 case SpvOpTypeVector
:
4913 case SpvOpTypeMatrix
:
4914 case SpvOpTypeImage
:
4915 case SpvOpTypeSampler
:
4916 case SpvOpTypeSampledImage
:
4917 case SpvOpTypeArray
:
4918 case SpvOpTypeRuntimeArray
:
4919 case SpvOpTypeStruct
:
4920 case SpvOpTypeOpaque
:
4921 case SpvOpTypePointer
:
4922 case SpvOpTypeForwardPointer
:
4923 case SpvOpTypeFunction
:
4924 case SpvOpTypeEvent
:
4925 case SpvOpTypeDeviceEvent
:
4926 case SpvOpTypeReserveId
:
4927 case SpvOpTypeQueue
:
4929 vtn_handle_type(b
, opcode
, w
, count
);
4932 case SpvOpConstantTrue
:
4933 case SpvOpConstantFalse
:
4935 case SpvOpConstantComposite
:
4936 case SpvOpConstantNull
:
4937 case SpvOpSpecConstantTrue
:
4938 case SpvOpSpecConstantFalse
:
4939 case SpvOpSpecConstant
:
4940 case SpvOpSpecConstantComposite
:
4941 case SpvOpSpecConstantOp
:
4942 vtn_handle_constant(b
, opcode
, w
, count
);
4947 case SpvOpConstantSampler
:
4948 vtn_handle_variables(b
, opcode
, w
, count
);
4951 case SpvOpExtInst
: {
4952 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4953 /* NonSemantic extended instructions are acceptable in preamble, others
4954 * will indicate the end of preamble.
4956 return val
->ext_handler
== vtn_handle_non_semantic_instruction
;
4960 return false; /* End of preamble */
4966 static struct vtn_ssa_value
*
4967 vtn_nir_select(struct vtn_builder
*b
, struct vtn_ssa_value
*src0
,
4968 struct vtn_ssa_value
*src1
, struct vtn_ssa_value
*src2
)
4970 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
4971 dest
->type
= src1
->type
;
4973 if (glsl_type_is_vector_or_scalar(src1
->type
)) {
4974 dest
->def
= nir_bcsel(&b
->nb
, src0
->def
, src1
->def
, src2
->def
);
4976 unsigned elems
= glsl_get_length(src1
->type
);
4978 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
4979 for (unsigned i
= 0; i
< elems
; i
++) {
4980 dest
->elems
[i
] = vtn_nir_select(b
, src0
,
4981 src1
->elems
[i
], src2
->elems
[i
]);
4989 vtn_handle_select(struct vtn_builder
*b
, SpvOp opcode
,
4990 const uint32_t *w
, unsigned count
)
4992 /* Handle OpSelect up-front here because it needs to be able to handle
4993 * pointers and not just regular vectors and scalars.
4995 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
4996 struct vtn_value
*cond_val
= vtn_untyped_value(b
, w
[3]);
4997 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
4998 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
5000 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
5001 obj2_val
->type
!= res_val
->type
,
5002 "Object types must match the result type in OpSelect");
5004 vtn_fail_if((cond_val
->type
->base_type
!= vtn_base_type_scalar
&&
5005 cond_val
->type
->base_type
!= vtn_base_type_vector
) ||
5006 !glsl_type_is_boolean(cond_val
->type
->type
),
5007 "OpSelect must have either a vector of booleans or "
5008 "a boolean as Condition type");
5010 vtn_fail_if(cond_val
->type
->base_type
== vtn_base_type_vector
&&
5011 (res_val
->type
->base_type
!= vtn_base_type_vector
||
5012 res_val
->type
->length
!= cond_val
->type
->length
),
5013 "When Condition type in OpSelect is a vector, the Result "
5014 "type must be a vector of the same length");
5016 switch (res_val
->type
->base_type
) {
5017 case vtn_base_type_scalar
:
5018 case vtn_base_type_vector
:
5019 case vtn_base_type_matrix
:
5020 case vtn_base_type_array
:
5021 case vtn_base_type_struct
:
5024 case vtn_base_type_pointer
:
5025 /* We need to have actual storage for pointer types. */
5026 vtn_fail_if(res_val
->type
->type
== NULL
,
5027 "Invalid pointer result type for OpSelect");
5030 vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
5033 vtn_push_ssa_value(b
, w
[2],
5034 vtn_nir_select(b
, vtn_ssa_value(b
, w
[3]),
5035 vtn_ssa_value(b
, w
[4]),
5036 vtn_ssa_value(b
, w
[5])));
5040 vtn_handle_ptr(struct vtn_builder
*b
, SpvOp opcode
,
5041 const uint32_t *w
, unsigned count
)
5043 struct vtn_type
*type1
= vtn_get_value_type(b
, w
[3]);
5044 struct vtn_type
*type2
= vtn_get_value_type(b
, w
[4]);
5045 vtn_fail_if(type1
->base_type
!= vtn_base_type_pointer
||
5046 type2
->base_type
!= vtn_base_type_pointer
,
5047 "%s operands must have pointer types",
5048 spirv_op_to_string(opcode
));
5049 vtn_fail_if(type1
->storage_class
!= type2
->storage_class
,
5050 "%s operands must have the same storage class",
5051 spirv_op_to_string(opcode
));
5053 struct vtn_type
*vtn_type
= vtn_get_type(b
, w
[1]);
5054 const struct glsl_type
*type
= vtn_type
->type
;
5056 nir_address_format addr_format
= vtn_mode_to_address_format(
5057 b
, vtn_storage_class_to_mode(b
, type1
->storage_class
, NULL
, NULL
));
5062 case SpvOpPtrDiff
: {
5063 /* OpPtrDiff returns the difference in number of elements (not byte offset). */
5064 unsigned elem_size
, elem_align
;
5065 glsl_get_natural_size_align_bytes(type1
->deref
->type
,
5066 &elem_size
, &elem_align
);
5068 def
= nir_build_addr_isub(&b
->nb
,
5069 vtn_get_nir_ssa(b
, w
[3]),
5070 vtn_get_nir_ssa(b
, w
[4]),
5072 def
= nir_idiv(&b
->nb
, def
, nir_imm_intN_t(&b
->nb
, elem_size
, def
->bit_size
));
5073 def
= nir_i2i(&b
->nb
, def
, glsl_get_bit_size(type
));
5078 case SpvOpPtrNotEqual
: {
5079 def
= nir_build_addr_ieq(&b
->nb
,
5080 vtn_get_nir_ssa(b
, w
[3]),
5081 vtn_get_nir_ssa(b
, w
[4]),
5083 if (opcode
== SpvOpPtrNotEqual
)
5084 def
= nir_inot(&b
->nb
, def
);
5089 unreachable("Invalid ptr operation");
5092 vtn_push_nir_ssa(b
, w
[2], def
);
5096 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
5097 const uint32_t *w
, unsigned count
)
5103 case SpvOpLoopMerge
:
5104 case SpvOpSelectionMerge
:
5105 /* This is handled by cfg pre-pass and walk_blocks */
5109 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
5110 val
->type
= vtn_get_type(b
, w
[1]);
5115 vtn_handle_extension(b
, opcode
, w
, count
);
5121 case SpvOpCopyMemory
:
5122 case SpvOpCopyMemorySized
:
5123 case SpvOpAccessChain
:
5124 case SpvOpPtrAccessChain
:
5125 case SpvOpInBoundsAccessChain
:
5126 case SpvOpInBoundsPtrAccessChain
:
5127 case SpvOpArrayLength
:
5128 case SpvOpConvertPtrToU
:
5129 case SpvOpConvertUToPtr
:
5130 vtn_handle_variables(b
, opcode
, w
, count
);
5133 case SpvOpFunctionCall
:
5134 vtn_handle_function_call(b
, opcode
, w
, count
);
5137 case SpvOpSampledImage
:
5139 case SpvOpImageSampleImplicitLod
:
5140 case SpvOpImageSampleExplicitLod
:
5141 case SpvOpImageSampleDrefImplicitLod
:
5142 case SpvOpImageSampleDrefExplicitLod
:
5143 case SpvOpImageSampleProjImplicitLod
:
5144 case SpvOpImageSampleProjExplicitLod
:
5145 case SpvOpImageSampleProjDrefImplicitLod
:
5146 case SpvOpImageSampleProjDrefExplicitLod
:
5147 case SpvOpImageFetch
:
5148 case SpvOpImageGather
:
5149 case SpvOpImageDrefGather
:
5150 case SpvOpImageQueryLod
:
5151 case SpvOpImageQueryLevels
:
5152 case SpvOpImageQuerySamples
:
5153 vtn_handle_texture(b
, opcode
, w
, count
);
5156 case SpvOpImageRead
:
5157 case SpvOpImageWrite
:
5158 case SpvOpImageTexelPointer
:
5159 case SpvOpImageQueryFormat
:
5160 case SpvOpImageQueryOrder
:
5161 vtn_handle_image(b
, opcode
, w
, count
);
5164 case SpvOpImageQuerySizeLod
:
5165 case SpvOpImageQuerySize
: {
5166 struct vtn_type
*image_type
= vtn_get_value_type(b
, w
[3]);
5167 vtn_assert(image_type
->base_type
== vtn_base_type_image
);
5168 if (glsl_type_is_image(image_type
->glsl_image
)) {
5169 vtn_handle_image(b
, opcode
, w
, count
);
5171 vtn_assert(glsl_type_is_sampler(image_type
->glsl_image
));
5172 vtn_handle_texture(b
, opcode
, w
, count
);
5177 case SpvOpFragmentMaskFetchAMD
:
5178 case SpvOpFragmentFetchAMD
:
5179 vtn_handle_texture(b
, opcode
, w
, count
);
5182 case SpvOpAtomicLoad
:
5183 case SpvOpAtomicExchange
:
5184 case SpvOpAtomicCompareExchange
:
5185 case SpvOpAtomicCompareExchangeWeak
:
5186 case SpvOpAtomicIIncrement
:
5187 case SpvOpAtomicIDecrement
:
5188 case SpvOpAtomicIAdd
:
5189 case SpvOpAtomicISub
:
5190 case SpvOpAtomicSMin
:
5191 case SpvOpAtomicUMin
:
5192 case SpvOpAtomicSMax
:
5193 case SpvOpAtomicUMax
:
5194 case SpvOpAtomicAnd
:
5196 case SpvOpAtomicXor
:
5197 case SpvOpAtomicFAddEXT
: {
5198 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
5199 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
5200 vtn_handle_image(b
, opcode
, w
, count
);
5202 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
5203 vtn_handle_atomics(b
, opcode
, w
, count
);
5208 case SpvOpAtomicStore
: {
5209 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
5210 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
5211 vtn_handle_image(b
, opcode
, w
, count
);
5213 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
5214 vtn_handle_atomics(b
, opcode
, w
, count
);
5220 vtn_handle_select(b
, opcode
, w
, count
);
5228 case SpvOpConvertFToU
:
5229 case SpvOpConvertFToS
:
5230 case SpvOpConvertSToF
:
5231 case SpvOpConvertUToF
:
5235 case SpvOpQuantizeToF16
:
5236 case SpvOpPtrCastToGeneric
:
5237 case SpvOpGenericCastToPtr
:
5242 case SpvOpSignBitSet
:
5243 case SpvOpLessOrGreater
:
5245 case SpvOpUnordered
:
5260 case SpvOpVectorTimesScalar
:
5262 case SpvOpIAddCarry
:
5263 case SpvOpISubBorrow
:
5264 case SpvOpUMulExtended
:
5265 case SpvOpSMulExtended
:
5266 case SpvOpShiftRightLogical
:
5267 case SpvOpShiftRightArithmetic
:
5268 case SpvOpShiftLeftLogical
:
5269 case SpvOpLogicalEqual
:
5270 case SpvOpLogicalNotEqual
:
5271 case SpvOpLogicalOr
:
5272 case SpvOpLogicalAnd
:
5273 case SpvOpLogicalNot
:
5274 case SpvOpBitwiseOr
:
5275 case SpvOpBitwiseXor
:
5276 case SpvOpBitwiseAnd
:
5278 case SpvOpFOrdEqual
:
5279 case SpvOpFUnordEqual
:
5280 case SpvOpINotEqual
:
5281 case SpvOpFOrdNotEqual
:
5282 case SpvOpFUnordNotEqual
:
5283 case SpvOpULessThan
:
5284 case SpvOpSLessThan
:
5285 case SpvOpFOrdLessThan
:
5286 case SpvOpFUnordLessThan
:
5287 case SpvOpUGreaterThan
:
5288 case SpvOpSGreaterThan
:
5289 case SpvOpFOrdGreaterThan
:
5290 case SpvOpFUnordGreaterThan
:
5291 case SpvOpULessThanEqual
:
5292 case SpvOpSLessThanEqual
:
5293 case SpvOpFOrdLessThanEqual
:
5294 case SpvOpFUnordLessThanEqual
:
5295 case SpvOpUGreaterThanEqual
:
5296 case SpvOpSGreaterThanEqual
:
5297 case SpvOpFOrdGreaterThanEqual
:
5298 case SpvOpFUnordGreaterThanEqual
:
5304 case SpvOpFwidthFine
:
5305 case SpvOpDPdxCoarse
:
5306 case SpvOpDPdyCoarse
:
5307 case SpvOpFwidthCoarse
:
5308 case SpvOpBitFieldInsert
:
5309 case SpvOpBitFieldSExtract
:
5310 case SpvOpBitFieldUExtract
:
5311 case SpvOpBitReverse
:
5313 case SpvOpTranspose
:
5314 case SpvOpOuterProduct
:
5315 case SpvOpMatrixTimesScalar
:
5316 case SpvOpVectorTimesMatrix
:
5317 case SpvOpMatrixTimesVector
:
5318 case SpvOpMatrixTimesMatrix
:
5319 case SpvOpUCountLeadingZerosINTEL
:
5320 case SpvOpUCountTrailingZerosINTEL
:
5321 case SpvOpAbsISubINTEL
:
5322 case SpvOpAbsUSubINTEL
:
5323 case SpvOpIAddSatINTEL
:
5324 case SpvOpUAddSatINTEL
:
5325 case SpvOpIAverageINTEL
:
5326 case SpvOpUAverageINTEL
:
5327 case SpvOpIAverageRoundedINTEL
:
5328 case SpvOpUAverageRoundedINTEL
:
5329 case SpvOpISubSatINTEL
:
5330 case SpvOpUSubSatINTEL
:
5331 case SpvOpIMul32x16INTEL
:
5332 case SpvOpUMul32x16INTEL
:
5333 vtn_handle_alu(b
, opcode
, w
, count
);
5337 vtn_handle_bitcast(b
, w
, count
);
5340 case SpvOpVectorExtractDynamic
:
5341 case SpvOpVectorInsertDynamic
:
5342 case SpvOpVectorShuffle
:
5343 case SpvOpCompositeConstruct
:
5344 case SpvOpCompositeExtract
:
5345 case SpvOpCompositeInsert
:
5346 case SpvOpCopyLogical
:
5347 case SpvOpCopyObject
:
5348 vtn_handle_composite(b
, opcode
, w
, count
);
5351 case SpvOpEmitVertex
:
5352 case SpvOpEndPrimitive
:
5353 case SpvOpEmitStreamVertex
:
5354 case SpvOpEndStreamPrimitive
:
5355 case SpvOpControlBarrier
:
5356 case SpvOpMemoryBarrier
:
5357 vtn_handle_barrier(b
, opcode
, w
, count
);
5360 case SpvOpGroupNonUniformElect
:
5361 case SpvOpGroupNonUniformAll
:
5362 case SpvOpGroupNonUniformAny
:
5363 case SpvOpGroupNonUniformAllEqual
:
5364 case SpvOpGroupNonUniformBroadcast
:
5365 case SpvOpGroupNonUniformBroadcastFirst
:
5366 case SpvOpGroupNonUniformBallot
:
5367 case SpvOpGroupNonUniformInverseBallot
:
5368 case SpvOpGroupNonUniformBallotBitExtract
:
5369 case SpvOpGroupNonUniformBallotBitCount
:
5370 case SpvOpGroupNonUniformBallotFindLSB
:
5371 case SpvOpGroupNonUniformBallotFindMSB
:
5372 case SpvOpGroupNonUniformShuffle
:
5373 case SpvOpGroupNonUniformShuffleXor
:
5374 case SpvOpGroupNonUniformShuffleUp
:
5375 case SpvOpGroupNonUniformShuffleDown
:
5376 case SpvOpGroupNonUniformIAdd
:
5377 case SpvOpGroupNonUniformFAdd
:
5378 case SpvOpGroupNonUniformIMul
:
5379 case SpvOpGroupNonUniformFMul
:
5380 case SpvOpGroupNonUniformSMin
:
5381 case SpvOpGroupNonUniformUMin
:
5382 case SpvOpGroupNonUniformFMin
:
5383 case SpvOpGroupNonUniformSMax
:
5384 case SpvOpGroupNonUniformUMax
:
5385 case SpvOpGroupNonUniformFMax
:
5386 case SpvOpGroupNonUniformBitwiseAnd
:
5387 case SpvOpGroupNonUniformBitwiseOr
:
5388 case SpvOpGroupNonUniformBitwiseXor
:
5389 case SpvOpGroupNonUniformLogicalAnd
:
5390 case SpvOpGroupNonUniformLogicalOr
:
5391 case SpvOpGroupNonUniformLogicalXor
:
5392 case SpvOpGroupNonUniformQuadBroadcast
:
5393 case SpvOpGroupNonUniformQuadSwap
:
5396 case SpvOpGroupBroadcast
:
5397 case SpvOpGroupIAdd
:
5398 case SpvOpGroupFAdd
:
5399 case SpvOpGroupFMin
:
5400 case SpvOpGroupUMin
:
5401 case SpvOpGroupSMin
:
5402 case SpvOpGroupFMax
:
5403 case SpvOpGroupUMax
:
5404 case SpvOpGroupSMax
:
5405 case SpvOpSubgroupBallotKHR
:
5406 case SpvOpSubgroupFirstInvocationKHR
:
5407 case SpvOpSubgroupReadInvocationKHR
:
5408 case SpvOpSubgroupAllKHR
:
5409 case SpvOpSubgroupAnyKHR
:
5410 case SpvOpSubgroupAllEqualKHR
:
5411 case SpvOpGroupIAddNonUniformAMD
:
5412 case SpvOpGroupFAddNonUniformAMD
:
5413 case SpvOpGroupFMinNonUniformAMD
:
5414 case SpvOpGroupUMinNonUniformAMD
:
5415 case SpvOpGroupSMinNonUniformAMD
:
5416 case SpvOpGroupFMaxNonUniformAMD
:
5417 case SpvOpGroupUMaxNonUniformAMD
:
5418 case SpvOpGroupSMaxNonUniformAMD
:
5419 vtn_handle_subgroup(b
, opcode
, w
, count
);
5424 case SpvOpPtrNotEqual
:
5425 vtn_handle_ptr(b
, opcode
, w
, count
);
5428 case SpvOpBeginInvocationInterlockEXT
:
5429 vtn_emit_barrier(b
, nir_intrinsic_begin_invocation_interlock
);
5432 case SpvOpEndInvocationInterlockEXT
:
5433 vtn_emit_barrier(b
, nir_intrinsic_end_invocation_interlock
);
5436 case SpvOpDemoteToHelperInvocationEXT
: {
5437 nir_intrinsic_instr
*intrin
=
5438 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_demote
);
5439 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5443 case SpvOpIsHelperInvocationEXT
: {
5444 nir_intrinsic_instr
*intrin
=
5445 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_is_helper_invocation
);
5446 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 1, 1, NULL
);
5447 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5449 vtn_push_nir_ssa(b
, w
[2], &intrin
->dest
.ssa
);
5453 case SpvOpReadClockKHR
: {
5454 SpvScope scope
= vtn_constant_uint(b
, w
[3]);
5455 nir_scope nir_scope
;
5458 case SpvScopeDevice
:
5459 nir_scope
= NIR_SCOPE_DEVICE
;
5461 case SpvScopeSubgroup
:
5462 nir_scope
= NIR_SCOPE_SUBGROUP
;
5465 vtn_fail("invalid read clock scope");
5468 /* Operation supports two result types: uvec2 and uint64_t. The NIR
5469 * intrinsic gives uvec2, so pack the result for the other case.
5471 nir_intrinsic_instr
*intrin
=
5472 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_shader_clock
);
5473 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 2, 32, NULL
);
5474 nir_intrinsic_set_memory_scope(intrin
, nir_scope
);
5475 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5477 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
5478 const struct glsl_type
*dest_type
= type
->type
;
5479 nir_ssa_def
*result
;
5481 if (glsl_type_is_vector(dest_type
)) {
5482 assert(dest_type
== glsl_vector_type(GLSL_TYPE_UINT
, 2));
5483 result
= &intrin
->dest
.ssa
;
5485 assert(glsl_type_is_scalar(dest_type
));
5486 assert(glsl_get_base_type(dest_type
) == GLSL_TYPE_UINT64
);
5487 result
= nir_pack_64_2x32(&b
->nb
, &intrin
->dest
.ssa
);
5490 vtn_push_nir_ssa(b
, w
[2], result
);
5494 case SpvOpLifetimeStart
:
5495 case SpvOpLifetimeStop
:
5499 vtn_fail_with_opcode("Unhandled opcode", opcode
);
5506 vtn_create_builder(const uint32_t *words
, size_t word_count
,
5507 gl_shader_stage stage
, const char *entry_point_name
,
5508 const struct spirv_to_nir_options
*options
)
5510 /* Initialize the vtn_builder object */
5511 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
5512 struct spirv_to_nir_options
*dup_options
=
5513 ralloc(b
, struct spirv_to_nir_options
);
5514 *dup_options
= *options
;
5517 b
->spirv_word_count
= word_count
;
5521 list_inithead(&b
->functions
);
5522 b
->entry_point_stage
= stage
;
5523 b
->entry_point_name
= entry_point_name
;
5524 b
->options
= dup_options
;
5527 * Handle the SPIR-V header (first 5 dwords).
5528 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
5530 if (word_count
<= 5)
5533 if (words
[0] != SpvMagicNumber
) {
5534 vtn_err("words[0] was 0x%x, want 0x%x", words
[0], SpvMagicNumber
);
5537 if (words
[1] < 0x10000) {
5538 vtn_err("words[1] was 0x%x, want >= 0x10000", words
[1]);
5542 uint16_t generator_id
= words
[2] >> 16;
5543 uint16_t generator_version
= words
[2];
5545 /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
5546 * to provide correct memory semantics on compute shader barrier()
5547 * commands. Prior to that, we need to fix them up ourselves. This
5548 * GLSLang fix caused them to bump to generator version 3.
5550 b
->wa_glslang_cs_barrier
= (generator_id
== 8 && generator_version
< 3);
5552 /* words[2] == generator magic */
5553 unsigned value_id_bound
= words
[3];
5554 if (words
[4] != 0) {
5555 vtn_err("words[4] was %u, want 0", words
[4]);
5559 b
->value_id_bound
= value_id_bound
;
5560 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
5568 static nir_function
*
5569 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder
*b
,
5570 nir_function
*entry_point
)
5572 vtn_assert(entry_point
== b
->entry_point
->func
->impl
->function
);
5573 vtn_fail_if(!entry_point
->name
, "entry points are required to have a name");
5574 const char *func_name
=
5575 ralloc_asprintf(b
->shader
, "__wrapped_%s", entry_point
->name
);
5577 /* we shouldn't have any inputs yet */
5578 vtn_assert(!entry_point
->shader
->num_inputs
);
5579 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
);
5581 nir_function
*main_entry_point
= nir_function_create(b
->shader
, func_name
);
5582 main_entry_point
->impl
= nir_function_impl_create(main_entry_point
);
5583 nir_builder_init(&b
->nb
, main_entry_point
->impl
);
5584 b
->nb
.cursor
= nir_after_cf_list(&main_entry_point
->impl
->body
);
5585 b
->func_param_idx
= 0;
5587 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, entry_point
);
5589 for (unsigned i
= 0; i
< entry_point
->num_params
; ++i
) {
5590 struct vtn_type
*param_type
= b
->entry_point
->func
->type
->params
[i
];
5592 /* consider all pointers to function memory to be parameters passed
5595 bool is_by_val
= param_type
->base_type
== vtn_base_type_pointer
&&
5596 param_type
->storage_class
== SpvStorageClassFunction
;
5598 /* input variable */
5599 nir_variable
*in_var
= rzalloc(b
->nb
.shader
, nir_variable
);
5600 in_var
->data
.mode
= nir_var_uniform
;
5601 in_var
->data
.read_only
= true;
5602 in_var
->data
.location
= i
;
5603 if (param_type
->base_type
== vtn_base_type_image
) {
5604 in_var
->data
.access
= 0;
5605 if (param_type
->access_qualifier
& SpvAccessQualifierReadOnly
)
5606 in_var
->data
.access
|= ACCESS_NON_WRITEABLE
;
5607 if (param_type
->access_qualifier
& SpvAccessQualifierWriteOnly
)
5608 in_var
->data
.access
|= ACCESS_NON_READABLE
;
5612 in_var
->type
= param_type
->deref
->type
;
5613 else if (param_type
->base_type
== vtn_base_type_image
)
5614 in_var
->type
= param_type
->glsl_image
;
5615 else if (param_type
->base_type
== vtn_base_type_sampler
)
5616 in_var
->type
= glsl_bare_sampler_type();
5618 in_var
->type
= param_type
->type
;
5620 nir_shader_add_variable(b
->nb
.shader
, in_var
);
5621 b
->nb
.shader
->num_inputs
++;
5623 /* we have to copy the entire variable into function memory */
5625 nir_variable
*copy_var
=
5626 nir_local_variable_create(main_entry_point
->impl
, in_var
->type
,
5628 nir_copy_var(&b
->nb
, copy_var
, in_var
);
5630 nir_src_for_ssa(&nir_build_deref_var(&b
->nb
, copy_var
)->dest
.ssa
);
5631 } else if (param_type
->base_type
== vtn_base_type_image
||
5632 param_type
->base_type
== vtn_base_type_sampler
) {
5633 /* Don't load the var, just pass a deref of it */
5634 call
->params
[i
] = nir_src_for_ssa(&nir_build_deref_var(&b
->nb
, in_var
)->dest
.ssa
);
5636 call
->params
[i
] = nir_src_for_ssa(nir_load_var(&b
->nb
, in_var
));
5640 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
5642 return main_entry_point
;
5646 spirv_to_nir(const uint32_t *words
, size_t word_count
,
5647 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
5648 gl_shader_stage stage
, const char *entry_point_name
,
5649 const struct spirv_to_nir_options
*options
,
5650 const nir_shader_compiler_options
*nir_options
)
5653 const uint32_t *word_end
= words
+ word_count
;
5655 struct vtn_builder
*b
= vtn_create_builder(words
, word_count
,
5656 stage
, entry_point_name
,
5662 /* See also _vtn_fail() */
5663 if (setjmp(b
->fail_jump
)) {
5668 /* Skip the SPIR-V header, handled at vtn_create_builder */
5671 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
5673 /* Handle all the preamble instructions */
5674 words
= vtn_foreach_instruction(b
, words
, word_end
,
5675 vtn_handle_preamble_instruction
);
5677 if (b
->entry_point
== NULL
) {
5678 vtn_fail("Entry point not found");
5683 /* Ensure a sane address mode is being used for function temps */
5684 assert(nir_address_format_bit_size(b
->options
->temp_addr_format
) == nir_get_ptr_bitsize(b
->shader
));
5685 assert(nir_address_format_num_components(b
->options
->temp_addr_format
) == 1);
5687 /* Set shader info defaults */
5688 if (stage
== MESA_SHADER_GEOMETRY
)
5689 b
->shader
->info
.gs
.invocations
= 1;
5691 /* Parse execution modes. */
5692 vtn_foreach_execution_mode(b
, b
->entry_point
,
5693 vtn_handle_execution_mode
, NULL
);
5695 b
->specializations
= spec
;
5696 b
->num_specializations
= num_spec
;
5698 /* Handle all variable, type, and constant instructions */
5699 words
= vtn_foreach_instruction(b
, words
, word_end
,
5700 vtn_handle_variable_or_type_instruction
);
5702 /* Parse execution modes that depend on IDs. Must happen after we have
5705 vtn_foreach_execution_mode(b
, b
->entry_point
,
5706 vtn_handle_execution_mode_id
, NULL
);
5708 if (b
->workgroup_size_builtin
) {
5709 vtn_assert(b
->workgroup_size_builtin
->type
->type
==
5710 glsl_vector_type(GLSL_TYPE_UINT
, 3));
5712 nir_const_value
*const_size
=
5713 b
->workgroup_size_builtin
->constant
->values
;
5715 b
->shader
->info
.cs
.local_size
[0] = const_size
[0].u32
;
5716 b
->shader
->info
.cs
.local_size
[1] = const_size
[1].u32
;
5717 b
->shader
->info
.cs
.local_size
[2] = const_size
[2].u32
;
5720 /* Set types on all vtn_values */
5721 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
5723 vtn_build_cfg(b
, words
, word_end
);
5725 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5726 b
->entry_point
->func
->referenced
= true;
5731 vtn_foreach_cf_node(node
, &b
->functions
) {
5732 struct vtn_function
*func
= vtn_cf_node_as_function(node
);
5733 if (func
->referenced
&& !func
->emitted
) {
5734 b
->const_table
= _mesa_pointer_hash_table_create(b
);
5736 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
5742 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5743 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
5744 vtn_assert(entry_point
);
5746 /* post process entry_points with input params */
5747 if (entry_point
->num_params
&& b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
5748 entry_point
= vtn_emit_kernel_entry_point_wrapper(b
, entry_point
);
5750 /* structurize the CFG */
5751 nir_lower_goto_ifs(b
->shader
);
5753 entry_point
->is_entrypoint
= true;
5755 /* When multiple shader stages exist in the same SPIR-V module, we
5756 * generate input and output variables for every stage, in the same
5757 * NIR program. These dead variables can be invalid NIR. For example,
5758 * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
5759 * VS output variables wouldn't be.
5761 * To ensure we have valid NIR, we eliminate any dead inputs and outputs
5762 * right away. In order to do so, we must lower any constant initializers
5763 * on outputs so nir_remove_dead_variables sees that they're written to.
5765 nir_lower_variable_initializers(b
->shader
, nir_var_shader_out
);
5766 nir_remove_dead_variables(b
->shader
,
5767 nir_var_shader_in
| nir_var_shader_out
, NULL
);
5769 /* We sometimes generate bogus derefs that, while never used, give the
5770 * validator a bit of heartburn. Run dead code to get rid of them.
5772 nir_opt_dce(b
->shader
);
5774 /* Unparent the shader from the vtn_builder before we delete the builder */
5775 ralloc_steal(NULL
, b
->shader
);
5777 nir_shader
*shader
= b
->shader
;