2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
35 #include "util/u_math.h"
40 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
41 size_t spirv_offset
, const char *message
)
43 if (b
->options
->debug
.func
) {
44 b
->options
->debug
.func(b
->options
->debug
.private_data
,
45 level
, spirv_offset
, message
);
49 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
50 fprintf(stderr
, "%s\n", message
);
55 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
56 size_t spirv_offset
, const char *fmt
, ...)
62 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
65 vtn_log(b
, level
, spirv_offset
, msg
);
71 vtn_log_err(struct vtn_builder
*b
,
72 enum nir_spirv_debug_level level
, const char *prefix
,
73 const char *file
, unsigned line
,
74 const char *fmt
, va_list args
)
78 msg
= ralloc_strdup(NULL
, prefix
);
81 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
84 ralloc_asprintf_append(&msg
, " ");
86 ralloc_vasprintf_append(&msg
, fmt
, args
);
88 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
92 ralloc_asprintf_append(&msg
,
93 "\n in SPIR-V source file %s, line %d, col %d",
94 b
->file
, b
->line
, b
->col
);
97 vtn_log(b
, level
, b
->spirv_offset
, msg
);
103 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
108 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
109 path
, prefix
, idx
++);
110 if (len
< 0 || len
>= sizeof(filename
))
113 FILE *f
= fopen(filename
, "w");
117 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
120 vtn_info("SPIR-V shader dumped to %s", filename
);
124 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
125 const char *fmt
, ...)
130 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
131 file
, line
, fmt
, args
);
136 _vtn_err(struct vtn_builder
*b
, const char *file
, unsigned line
,
137 const char *fmt
, ...)
142 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V ERROR:\n",
143 file
, line
, fmt
, args
);
148 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
149 const char *fmt
, ...)
154 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
155 file
, line
, fmt
, args
);
158 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
160 vtn_dump_shader(b
, dump_path
, "fail");
162 longjmp(b
->fail_jump
, 1);
165 struct spec_constant_value
{
173 static struct vtn_ssa_value
*
174 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
176 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
179 if (glsl_type_is_vector_or_scalar(type
)) {
180 unsigned num_components
= glsl_get_vector_elements(val
->type
);
181 unsigned bit_size
= glsl_get_bit_size(val
->type
);
182 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
184 unsigned elems
= glsl_get_length(val
->type
);
185 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
186 if (glsl_type_is_matrix(type
)) {
187 const struct glsl_type
*elem_type
=
188 glsl_vector_type(glsl_get_base_type(type
),
189 glsl_get_vector_elements(type
));
191 for (unsigned i
= 0; i
< elems
; i
++)
192 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
193 } else if (glsl_type_is_array(type
)) {
194 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
195 for (unsigned i
= 0; i
< elems
; i
++)
196 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
198 for (unsigned i
= 0; i
< elems
; i
++) {
199 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
200 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
208 static struct vtn_ssa_value
*
209 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
210 const struct glsl_type
*type
)
212 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
217 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
220 switch (glsl_get_base_type(type
)) {
223 case GLSL_TYPE_INT16
:
224 case GLSL_TYPE_UINT16
:
225 case GLSL_TYPE_UINT8
:
227 case GLSL_TYPE_INT64
:
228 case GLSL_TYPE_UINT64
:
230 case GLSL_TYPE_FLOAT
:
231 case GLSL_TYPE_FLOAT16
:
232 case GLSL_TYPE_DOUBLE
: {
233 int bit_size
= glsl_get_bit_size(type
);
234 if (glsl_type_is_vector_or_scalar(type
)) {
235 unsigned num_components
= glsl_get_vector_elements(val
->type
);
236 nir_load_const_instr
*load
=
237 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
239 memcpy(load
->value
, constant
->values
,
240 sizeof(nir_const_value
) * load
->def
.num_components
);
242 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
243 val
->def
= &load
->def
;
245 assert(glsl_type_is_matrix(type
));
246 unsigned columns
= glsl_get_matrix_columns(val
->type
);
247 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
248 const struct glsl_type
*column_type
= glsl_get_column_type(val
->type
);
249 for (unsigned i
= 0; i
< columns
; i
++)
250 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
256 case GLSL_TYPE_ARRAY
: {
257 unsigned elems
= glsl_get_length(val
->type
);
258 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
259 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
260 for (unsigned i
= 0; i
< elems
; i
++)
261 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
266 case GLSL_TYPE_STRUCT
: {
267 unsigned elems
= glsl_get_length(val
->type
);
268 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
269 for (unsigned i
= 0; i
< elems
; i
++) {
270 const struct glsl_type
*elem_type
=
271 glsl_get_struct_field(val
->type
, i
);
272 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
279 vtn_fail("bad constant type");
285 struct vtn_ssa_value
*
286 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
288 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
289 switch (val
->value_type
) {
290 case vtn_value_type_undef
:
291 return vtn_undef_ssa_value(b
, val
->type
->type
);
293 case vtn_value_type_constant
:
294 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
296 case vtn_value_type_ssa
:
299 case vtn_value_type_pointer
:
300 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
301 struct vtn_ssa_value
*ssa
=
302 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
303 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
307 vtn_fail("Invalid type for an SSA value");
312 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
313 unsigned word_count
, unsigned *words_used
)
315 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
317 /* Ammount of space taken by the string (including the null) */
318 unsigned len
= strlen(dup
) + 1;
319 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
325 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
326 const uint32_t *end
, vtn_instruction_handler handler
)
332 const uint32_t *w
= start
;
334 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
335 unsigned count
= w
[0] >> SpvWordCountShift
;
336 vtn_assert(count
>= 1 && w
+ count
<= end
);
338 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
342 break; /* Do nothing */
345 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
357 if (!handler(b
, opcode
, w
, count
))
375 vtn_handle_non_semantic_instruction(struct vtn_builder
*b
, SpvOp ext_opcode
,
376 const uint32_t *w
, unsigned count
)
383 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
384 const uint32_t *w
, unsigned count
)
386 const char *ext
= (const char *)&w
[2];
388 case SpvOpExtInstImport
: {
389 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
390 if (strcmp(ext
, "GLSL.std.450") == 0) {
391 val
->ext_handler
= vtn_handle_glsl450_instruction
;
392 } else if ((strcmp(ext
, "SPV_AMD_gcn_shader") == 0)
393 && (b
->options
&& b
->options
->caps
.amd_gcn_shader
)) {
394 val
->ext_handler
= vtn_handle_amd_gcn_shader_instruction
;
395 } else if ((strcmp(ext
, "SPV_AMD_shader_ballot") == 0)
396 && (b
->options
&& b
->options
->caps
.amd_shader_ballot
)) {
397 val
->ext_handler
= vtn_handle_amd_shader_ballot_instruction
;
398 } else if ((strcmp(ext
, "SPV_AMD_shader_trinary_minmax") == 0)
399 && (b
->options
&& b
->options
->caps
.amd_trinary_minmax
)) {
400 val
->ext_handler
= vtn_handle_amd_shader_trinary_minmax_instruction
;
401 } else if ((strcmp(ext
, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
402 && (b
->options
&& b
->options
->caps
.amd_shader_explicit_vertex_parameter
)) {
403 val
->ext_handler
= vtn_handle_amd_shader_explicit_vertex_parameter_instruction
;
404 } else if (strcmp(ext
, "OpenCL.std") == 0) {
405 val
->ext_handler
= vtn_handle_opencl_instruction
;
406 } else if (strstr(ext
, "NonSemantic.") == ext
) {
407 val
->ext_handler
= vtn_handle_non_semantic_instruction
;
409 vtn_fail("Unsupported extension: %s", ext
);
415 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
416 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
422 vtn_fail_with_opcode("Unhandled opcode", opcode
);
427 _foreach_decoration_helper(struct vtn_builder
*b
,
428 struct vtn_value
*base_value
,
430 struct vtn_value
*value
,
431 vtn_decoration_foreach_cb cb
, void *data
)
433 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
435 if (dec
->scope
== VTN_DEC_DECORATION
) {
436 member
= parent_member
;
437 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
438 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
439 value
->type
->base_type
!= vtn_base_type_struct
,
440 "OpMemberDecorate and OpGroupMemberDecorate are only "
441 "allowed on OpTypeStruct");
442 /* This means we haven't recursed yet */
443 assert(value
== base_value
);
445 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
447 vtn_fail_if(member
>= base_value
->type
->length
,
448 "OpMemberDecorate specifies member %d but the "
449 "OpTypeStruct has only %u members",
450 member
, base_value
->type
->length
);
452 /* Not a decoration */
453 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
458 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
459 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
462 cb(b
, base_value
, member
, dec
, data
);
467 /** Iterates (recursively if needed) over all of the decorations on a value
469 * This function iterates over all of the decorations applied to a given
470 * value. If it encounters a decoration group, it recurses into the group
471 * and iterates over all of those decorations as well.
474 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
475 vtn_decoration_foreach_cb cb
, void *data
)
477 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
481 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
482 vtn_execution_mode_foreach_cb cb
, void *data
)
484 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
485 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
488 assert(dec
->group
== NULL
);
489 cb(b
, value
, dec
, data
);
494 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
495 const uint32_t *w
, unsigned count
)
497 const uint32_t *w_end
= w
+ count
;
498 const uint32_t target
= w
[1];
502 case SpvOpDecorationGroup
:
503 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
507 case SpvOpDecorateId
:
508 case SpvOpMemberDecorate
:
509 case SpvOpDecorateString
:
510 case SpvOpMemberDecorateString
:
511 case SpvOpExecutionMode
:
512 case SpvOpExecutionModeId
: {
513 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
515 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
518 case SpvOpDecorateId
:
519 case SpvOpDecorateString
:
520 dec
->scope
= VTN_DEC_DECORATION
;
522 case SpvOpMemberDecorate
:
523 case SpvOpMemberDecorateString
:
524 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
525 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
526 "Member argument of OpMemberDecorate too large");
528 case SpvOpExecutionMode
:
529 case SpvOpExecutionModeId
:
530 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
533 unreachable("Invalid decoration opcode");
535 dec
->decoration
= *(w
++);
538 /* Link into the list */
539 dec
->next
= val
->decoration
;
540 val
->decoration
= dec
;
544 case SpvOpGroupMemberDecorate
:
545 case SpvOpGroupDecorate
: {
546 struct vtn_value
*group
=
547 vtn_value(b
, target
, vtn_value_type_decoration_group
);
549 for (; w
< w_end
; w
++) {
550 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
551 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
554 if (opcode
== SpvOpGroupDecorate
) {
555 dec
->scope
= VTN_DEC_DECORATION
;
557 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
558 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
559 "Member argument of OpGroupMemberDecorate too large");
562 /* Link into the list */
563 dec
->next
= val
->decoration
;
564 val
->decoration
= dec
;
570 unreachable("Unhandled opcode");
574 struct member_decoration_ctx
{
576 struct glsl_struct_field
*fields
;
577 struct vtn_type
*type
;
581 * Returns true if the given type contains a struct decorated Block or
585 vtn_type_contains_block(struct vtn_builder
*b
, struct vtn_type
*type
)
587 switch (type
->base_type
) {
588 case vtn_base_type_array
:
589 return vtn_type_contains_block(b
, type
->array_element
);
590 case vtn_base_type_struct
:
591 if (type
->block
|| type
->buffer_block
)
593 for (unsigned i
= 0; i
< type
->length
; i
++) {
594 if (vtn_type_contains_block(b
, type
->members
[i
]))
603 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
604 * OpStore, or OpCopyMemory between them without breaking anything.
605 * Technically, the SPIR-V rules require the exact same type ID but this lets
606 * us internally be a bit looser.
609 vtn_types_compatible(struct vtn_builder
*b
,
610 struct vtn_type
*t1
, struct vtn_type
*t2
)
612 if (t1
->id
== t2
->id
)
615 if (t1
->base_type
!= t2
->base_type
)
618 switch (t1
->base_type
) {
619 case vtn_base_type_void
:
620 case vtn_base_type_scalar
:
621 case vtn_base_type_vector
:
622 case vtn_base_type_matrix
:
623 case vtn_base_type_image
:
624 case vtn_base_type_sampler
:
625 case vtn_base_type_sampled_image
:
626 return t1
->type
== t2
->type
;
628 case vtn_base_type_array
:
629 return t1
->length
== t2
->length
&&
630 vtn_types_compatible(b
, t1
->array_element
, t2
->array_element
);
632 case vtn_base_type_pointer
:
633 return vtn_types_compatible(b
, t1
->deref
, t2
->deref
);
635 case vtn_base_type_struct
:
636 if (t1
->length
!= t2
->length
)
639 for (unsigned i
= 0; i
< t1
->length
; i
++) {
640 if (!vtn_types_compatible(b
, t1
->members
[i
], t2
->members
[i
]))
645 case vtn_base_type_function
:
646 /* This case shouldn't get hit since you can't copy around function
647 * types. Just require them to be identical.
652 vtn_fail("Invalid base type");
656 vtn_type_without_array(struct vtn_type
*type
)
658 while (type
->base_type
== vtn_base_type_array
)
659 type
= type
->array_element
;
663 /* does a shallow copy of a vtn_type */
665 static struct vtn_type
*
666 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
668 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
671 switch (src
->base_type
) {
672 case vtn_base_type_void
:
673 case vtn_base_type_scalar
:
674 case vtn_base_type_vector
:
675 case vtn_base_type_matrix
:
676 case vtn_base_type_array
:
677 case vtn_base_type_pointer
:
678 case vtn_base_type_image
:
679 case vtn_base_type_sampler
:
680 case vtn_base_type_sampled_image
:
681 /* Nothing more to do */
684 case vtn_base_type_struct
:
685 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
686 memcpy(dest
->members
, src
->members
,
687 src
->length
* sizeof(src
->members
[0]));
689 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
690 memcpy(dest
->offsets
, src
->offsets
,
691 src
->length
* sizeof(src
->offsets
[0]));
694 case vtn_base_type_function
:
695 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
696 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
703 static struct vtn_type
*
704 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
706 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
707 type
= type
->members
[member
];
709 /* We may have an array of matrices.... Oh, joy! */
710 while (glsl_type_is_array(type
->type
)) {
711 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
712 type
= type
->array_element
;
715 vtn_assert(glsl_type_is_matrix(type
->type
));
721 vtn_handle_access_qualifier(struct vtn_builder
*b
, struct vtn_type
*type
,
722 int member
, enum gl_access_qualifier access
)
724 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
725 type
= type
->members
[member
];
727 type
->access
|= access
;
731 array_stride_decoration_cb(struct vtn_builder
*b
,
732 struct vtn_value
*val
, int member
,
733 const struct vtn_decoration
*dec
, void *void_ctx
)
735 struct vtn_type
*type
= val
->type
;
737 if (dec
->decoration
== SpvDecorationArrayStride
) {
738 if (vtn_type_contains_block(b
, type
)) {
739 vtn_warn("The ArrayStride decoration cannot be applied to an array "
740 "type which contains a structure type decorated Block "
742 /* Ignore the decoration */
744 vtn_fail_if(dec
->operands
[0] == 0, "ArrayStride must be non-zero");
745 type
->stride
= dec
->operands
[0];
751 struct_member_decoration_cb(struct vtn_builder
*b
,
752 UNUSED
struct vtn_value
*val
, int member
,
753 const struct vtn_decoration
*dec
, void *void_ctx
)
755 struct member_decoration_ctx
*ctx
= void_ctx
;
760 assert(member
< ctx
->num_fields
);
762 switch (dec
->decoration
) {
763 case SpvDecorationRelaxedPrecision
:
764 case SpvDecorationUniform
:
765 case SpvDecorationUniformId
:
766 break; /* FIXME: Do nothing with this for now. */
767 case SpvDecorationNonWritable
:
768 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_WRITEABLE
);
770 case SpvDecorationNonReadable
:
771 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_READABLE
);
773 case SpvDecorationVolatile
:
774 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_VOLATILE
);
776 case SpvDecorationCoherent
:
777 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_COHERENT
);
779 case SpvDecorationNoPerspective
:
780 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
782 case SpvDecorationFlat
:
783 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
785 case SpvDecorationExplicitInterpAMD
:
786 ctx
->fields
[member
].interpolation
= INTERP_MODE_EXPLICIT
;
788 case SpvDecorationCentroid
:
789 ctx
->fields
[member
].centroid
= true;
791 case SpvDecorationSample
:
792 ctx
->fields
[member
].sample
= true;
794 case SpvDecorationStream
:
795 /* Vulkan only allows one GS stream */
796 vtn_assert(dec
->operands
[0] == 0);
798 case SpvDecorationLocation
:
799 ctx
->fields
[member
].location
= dec
->operands
[0];
801 case SpvDecorationComponent
:
802 break; /* FIXME: What should we do with these? */
803 case SpvDecorationBuiltIn
:
804 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
805 ctx
->type
->members
[member
]->is_builtin
= true;
806 ctx
->type
->members
[member
]->builtin
= dec
->operands
[0];
807 ctx
->type
->builtin_block
= true;
809 case SpvDecorationOffset
:
810 ctx
->type
->offsets
[member
] = dec
->operands
[0];
811 ctx
->fields
[member
].offset
= dec
->operands
[0];
813 case SpvDecorationMatrixStride
:
814 /* Handled as a second pass */
816 case SpvDecorationColMajor
:
817 break; /* Nothing to do here. Column-major is the default. */
818 case SpvDecorationRowMajor
:
819 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
822 case SpvDecorationPatch
:
825 case SpvDecorationSpecId
:
826 case SpvDecorationBlock
:
827 case SpvDecorationBufferBlock
:
828 case SpvDecorationArrayStride
:
829 case SpvDecorationGLSLShared
:
830 case SpvDecorationGLSLPacked
:
831 case SpvDecorationInvariant
:
832 case SpvDecorationRestrict
:
833 case SpvDecorationAliased
:
834 case SpvDecorationConstant
:
835 case SpvDecorationIndex
:
836 case SpvDecorationBinding
:
837 case SpvDecorationDescriptorSet
:
838 case SpvDecorationLinkageAttributes
:
839 case SpvDecorationNoContraction
:
840 case SpvDecorationInputAttachmentIndex
:
841 vtn_warn("Decoration not allowed on struct members: %s",
842 spirv_decoration_to_string(dec
->decoration
));
845 case SpvDecorationXfbBuffer
:
846 case SpvDecorationXfbStride
:
847 vtn_warn("Vulkan does not have transform feedback");
850 case SpvDecorationCPacked
:
851 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
852 vtn_warn("Decoration only allowed for CL-style kernels: %s",
853 spirv_decoration_to_string(dec
->decoration
));
855 ctx
->type
->packed
= true;
858 case SpvDecorationSaturatedConversion
:
859 case SpvDecorationFuncParamAttr
:
860 case SpvDecorationFPRoundingMode
:
861 case SpvDecorationFPFastMathMode
:
862 case SpvDecorationAlignment
:
863 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
864 vtn_warn("Decoration only allowed for CL-style kernels: %s",
865 spirv_decoration_to_string(dec
->decoration
));
869 case SpvDecorationUserSemantic
:
870 /* User semantic decorations can safely be ignored by the driver. */
874 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
878 /** Chases the array type all the way down to the tail and rewrites the
879 * glsl_types to be based off the tail's glsl_type.
882 vtn_array_type_rewrite_glsl_type(struct vtn_type
*type
)
884 if (type
->base_type
!= vtn_base_type_array
)
887 vtn_array_type_rewrite_glsl_type(type
->array_element
);
889 type
->type
= glsl_array_type(type
->array_element
->type
,
890 type
->length
, type
->stride
);
893 /* Matrix strides are handled as a separate pass because we need to know
894 * whether the matrix is row-major or not first.
897 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
898 UNUSED
struct vtn_value
*val
, int member
,
899 const struct vtn_decoration
*dec
,
902 if (dec
->decoration
!= SpvDecorationMatrixStride
)
905 vtn_fail_if(member
< 0,
906 "The MatrixStride decoration is only allowed on members "
908 vtn_fail_if(dec
->operands
[0] == 0, "MatrixStride must be non-zero");
910 struct member_decoration_ctx
*ctx
= void_ctx
;
912 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
913 if (mat_type
->row_major
) {
914 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
915 mat_type
->stride
= mat_type
->array_element
->stride
;
916 mat_type
->array_element
->stride
= dec
->operands
[0];
918 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
919 dec
->operands
[0], true);
920 mat_type
->array_element
->type
= glsl_get_column_type(mat_type
->type
);
922 vtn_assert(mat_type
->array_element
->stride
> 0);
923 mat_type
->stride
= dec
->operands
[0];
925 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
926 dec
->operands
[0], false);
929 /* Now that we've replaced the glsl_type with a properly strided matrix
930 * type, rewrite the member type so that it's an array of the proper kind
933 vtn_array_type_rewrite_glsl_type(ctx
->type
->members
[member
]);
934 ctx
->fields
[member
].type
= ctx
->type
->members
[member
]->type
;
938 struct_block_decoration_cb(struct vtn_builder
*b
,
939 struct vtn_value
*val
, int member
,
940 const struct vtn_decoration
*dec
, void *ctx
)
945 struct vtn_type
*type
= val
->type
;
946 if (dec
->decoration
== SpvDecorationBlock
)
948 else if (dec
->decoration
== SpvDecorationBufferBlock
)
949 type
->buffer_block
= true;
953 type_decoration_cb(struct vtn_builder
*b
,
954 struct vtn_value
*val
, int member
,
955 const struct vtn_decoration
*dec
, UNUSED
void *ctx
)
957 struct vtn_type
*type
= val
->type
;
960 /* This should have been handled by OpTypeStruct */
961 assert(val
->type
->base_type
== vtn_base_type_struct
);
962 assert(member
>= 0 && member
< val
->type
->length
);
966 switch (dec
->decoration
) {
967 case SpvDecorationArrayStride
:
968 vtn_assert(type
->base_type
== vtn_base_type_array
||
969 type
->base_type
== vtn_base_type_pointer
);
971 case SpvDecorationBlock
:
972 vtn_assert(type
->base_type
== vtn_base_type_struct
);
973 vtn_assert(type
->block
);
975 case SpvDecorationBufferBlock
:
976 vtn_assert(type
->base_type
== vtn_base_type_struct
);
977 vtn_assert(type
->buffer_block
);
979 case SpvDecorationGLSLShared
:
980 case SpvDecorationGLSLPacked
:
981 /* Ignore these, since we get explicit offsets anyways */
984 case SpvDecorationRowMajor
:
985 case SpvDecorationColMajor
:
986 case SpvDecorationMatrixStride
:
987 case SpvDecorationBuiltIn
:
988 case SpvDecorationNoPerspective
:
989 case SpvDecorationFlat
:
990 case SpvDecorationPatch
:
991 case SpvDecorationCentroid
:
992 case SpvDecorationSample
:
993 case SpvDecorationExplicitInterpAMD
:
994 case SpvDecorationVolatile
:
995 case SpvDecorationCoherent
:
996 case SpvDecorationNonWritable
:
997 case SpvDecorationNonReadable
:
998 case SpvDecorationUniform
:
999 case SpvDecorationUniformId
:
1000 case SpvDecorationLocation
:
1001 case SpvDecorationComponent
:
1002 case SpvDecorationOffset
:
1003 case SpvDecorationXfbBuffer
:
1004 case SpvDecorationXfbStride
:
1005 case SpvDecorationUserSemantic
:
1006 vtn_warn("Decoration only allowed for struct members: %s",
1007 spirv_decoration_to_string(dec
->decoration
));
1010 case SpvDecorationStream
:
1011 /* We don't need to do anything here, as stream is filled up when
1012 * aplying the decoration to a variable, just check that if it is not a
1013 * struct member, it should be a struct.
1015 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1018 case SpvDecorationRelaxedPrecision
:
1019 case SpvDecorationSpecId
:
1020 case SpvDecorationInvariant
:
1021 case SpvDecorationRestrict
:
1022 case SpvDecorationAliased
:
1023 case SpvDecorationConstant
:
1024 case SpvDecorationIndex
:
1025 case SpvDecorationBinding
:
1026 case SpvDecorationDescriptorSet
:
1027 case SpvDecorationLinkageAttributes
:
1028 case SpvDecorationNoContraction
:
1029 case SpvDecorationInputAttachmentIndex
:
1030 vtn_warn("Decoration not allowed on types: %s",
1031 spirv_decoration_to_string(dec
->decoration
));
1034 case SpvDecorationCPacked
:
1035 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
1036 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1037 spirv_decoration_to_string(dec
->decoration
));
1039 type
->packed
= true;
1042 case SpvDecorationSaturatedConversion
:
1043 case SpvDecorationFuncParamAttr
:
1044 case SpvDecorationFPRoundingMode
:
1045 case SpvDecorationFPFastMathMode
:
1046 case SpvDecorationAlignment
:
1047 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1048 spirv_decoration_to_string(dec
->decoration
));
1052 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1057 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
1060 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
1061 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
1062 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
1063 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
1064 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
1065 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
1066 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
1067 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
1068 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
1069 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
1070 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
1071 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
1072 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
1073 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
1074 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
1075 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
1076 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
1077 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
1078 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
1079 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
1080 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
1081 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
1082 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
1083 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
1084 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
1085 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
1086 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
1087 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
1088 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
1089 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
1090 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
1091 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
1092 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
1093 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
1094 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
1095 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
1096 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
1097 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
1098 case SpvImageFormatR16ui
: return 0x8234; /* GL_R16UI */
1099 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
1101 vtn_fail("Invalid image format: %s (%u)",
1102 spirv_imageformat_to_string(format
), format
);
1107 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
1108 const uint32_t *w
, unsigned count
)
1110 struct vtn_value
*val
= NULL
;
1112 /* In order to properly handle forward declarations, we have to defer
1113 * allocation for pointer types.
1115 if (opcode
!= SpvOpTypePointer
&& opcode
!= SpvOpTypeForwardPointer
) {
1116 val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
1117 vtn_fail_if(val
->type
!= NULL
,
1118 "Only pointers can have forward declarations");
1119 val
->type
= rzalloc(b
, struct vtn_type
);
1120 val
->type
->id
= w
[1];
1125 val
->type
->base_type
= vtn_base_type_void
;
1126 val
->type
->type
= glsl_void_type();
1129 val
->type
->base_type
= vtn_base_type_scalar
;
1130 val
->type
->type
= glsl_bool_type();
1131 val
->type
->length
= 1;
1133 case SpvOpTypeInt
: {
1134 int bit_size
= w
[2];
1135 const bool signedness
= w
[3];
1136 val
->type
->base_type
= vtn_base_type_scalar
;
1139 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
1142 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
1145 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
1148 val
->type
->type
= (signedness
? glsl_int8_t_type() : glsl_uint8_t_type());
1151 vtn_fail("Invalid int bit size: %u", bit_size
);
1153 val
->type
->length
= 1;
1157 case SpvOpTypeFloat
: {
1158 int bit_size
= w
[2];
1159 val
->type
->base_type
= vtn_base_type_scalar
;
1162 val
->type
->type
= glsl_float16_t_type();
1165 val
->type
->type
= glsl_float_type();
1168 val
->type
->type
= glsl_double_type();
1171 vtn_fail("Invalid float bit size: %u", bit_size
);
1173 val
->type
->length
= 1;
1177 case SpvOpTypeVector
: {
1178 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1179 unsigned elems
= w
[3];
1181 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
1182 "Base type for OpTypeVector must be a scalar");
1183 vtn_fail_if((elems
< 2 || elems
> 4) && (elems
!= 8) && (elems
!= 16),
1184 "Invalid component count for OpTypeVector");
1186 val
->type
->base_type
= vtn_base_type_vector
;
1187 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
1188 val
->type
->length
= elems
;
1189 val
->type
->stride
= glsl_type_is_boolean(val
->type
->type
)
1190 ? 4 : glsl_get_bit_size(base
->type
) / 8;
1191 val
->type
->array_element
= base
;
1195 case SpvOpTypeMatrix
: {
1196 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1197 unsigned columns
= w
[3];
1199 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1200 "Base type for OpTypeMatrix must be a vector");
1201 vtn_fail_if(columns
< 2 || columns
> 4,
1202 "Invalid column count for OpTypeMatrix");
1204 val
->type
->base_type
= vtn_base_type_matrix
;
1205 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1206 glsl_get_vector_elements(base
->type
),
1208 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1209 "Unsupported base type for OpTypeMatrix");
1210 assert(!glsl_type_is_error(val
->type
->type
));
1211 val
->type
->length
= columns
;
1212 val
->type
->array_element
= base
;
1213 val
->type
->row_major
= false;
1214 val
->type
->stride
= 0;
1218 case SpvOpTypeRuntimeArray
:
1219 case SpvOpTypeArray
: {
1220 struct vtn_type
*array_element
=
1221 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1223 if (opcode
== SpvOpTypeRuntimeArray
) {
1224 /* A length of 0 is used to denote unsized arrays */
1225 val
->type
->length
= 0;
1227 val
->type
->length
= vtn_constant_uint(b
, w
[3]);
1230 val
->type
->base_type
= vtn_base_type_array
;
1231 val
->type
->array_element
= array_element
;
1232 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1233 val
->type
->stride
= glsl_get_cl_size(array_element
->type
);
1235 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1236 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
,
1241 case SpvOpTypeStruct
: {
1242 unsigned num_fields
= count
- 2;
1243 val
->type
->base_type
= vtn_base_type_struct
;
1244 val
->type
->length
= num_fields
;
1245 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1246 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1247 val
->type
->packed
= false;
1249 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1250 for (unsigned i
= 0; i
< num_fields
; i
++) {
1251 val
->type
->members
[i
] =
1252 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
1253 fields
[i
] = (struct glsl_struct_field
) {
1254 .type
= val
->type
->members
[i
]->type
,
1255 .name
= ralloc_asprintf(b
, "field%d", i
),
1261 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1262 unsigned offset
= 0;
1263 for (unsigned i
= 0; i
< num_fields
; i
++) {
1264 offset
= align(offset
, glsl_get_cl_alignment(fields
[i
].type
));
1265 fields
[i
].offset
= offset
;
1266 offset
+= glsl_get_cl_size(fields
[i
].type
);
1270 struct member_decoration_ctx ctx
= {
1271 .num_fields
= num_fields
,
1276 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1277 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1279 vtn_foreach_decoration(b
, val
, struct_block_decoration_cb
, NULL
);
1281 const char *name
= val
->name
;
1283 if (val
->type
->block
|| val
->type
->buffer_block
) {
1284 /* Packing will be ignored since types coming from SPIR-V are
1285 * explicitly laid out.
1287 val
->type
->type
= glsl_interface_type(fields
, num_fields
,
1288 /* packing */ 0, false,
1289 name
? name
: "block");
1291 val
->type
->type
= glsl_struct_type(fields
, num_fields
,
1292 name
? name
: "struct", false);
1297 case SpvOpTypeFunction
: {
1298 val
->type
->base_type
= vtn_base_type_function
;
1299 val
->type
->type
= NULL
;
1301 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1303 const unsigned num_params
= count
- 3;
1304 val
->type
->length
= num_params
;
1305 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1306 for (unsigned i
= 0; i
< count
- 3; i
++) {
1307 val
->type
->params
[i
] =
1308 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
1313 case SpvOpTypePointer
:
1314 case SpvOpTypeForwardPointer
: {
1315 /* We can't blindly push the value because it might be a forward
1318 val
= vtn_untyped_value(b
, w
[1]);
1320 SpvStorageClass storage_class
= w
[2];
1322 if (val
->value_type
== vtn_value_type_invalid
) {
1323 val
->value_type
= vtn_value_type_type
;
1324 val
->type
= rzalloc(b
, struct vtn_type
);
1325 val
->type
->id
= w
[1];
1326 val
->type
->base_type
= vtn_base_type_pointer
;
1327 val
->type
->storage_class
= storage_class
;
1329 /* These can actually be stored to nir_variables and used as SSA
1330 * values so they need a real glsl_type.
1332 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1333 b
, storage_class
, NULL
, NULL
);
1334 val
->type
->type
= nir_address_format_to_glsl_type(
1335 vtn_mode_to_address_format(b
, mode
));
1337 vtn_fail_if(val
->type
->storage_class
!= storage_class
,
1338 "The storage classes of an OpTypePointer and any "
1339 "OpTypeForwardPointers that provide forward "
1340 "declarations of it must match.");
1343 if (opcode
== SpvOpTypePointer
) {
1344 vtn_fail_if(val
->type
->deref
!= NULL
,
1345 "While OpTypeForwardPointer can be used to provide a "
1346 "forward declaration of a pointer, OpTypePointer can "
1347 "only be used once for a given id.");
1349 val
->type
->deref
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
1351 /* Only certain storage classes use ArrayStride. The others (in
1352 * particular Workgroup) are expected to be laid out by the driver.
1354 switch (storage_class
) {
1355 case SpvStorageClassUniform
:
1356 case SpvStorageClassPushConstant
:
1357 case SpvStorageClassStorageBuffer
:
1358 case SpvStorageClassPhysicalStorageBuffer
:
1359 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1362 /* Nothing to do. */
1366 if (b
->physical_ptrs
) {
1367 switch (storage_class
) {
1368 case SpvStorageClassFunction
:
1369 case SpvStorageClassWorkgroup
:
1370 case SpvStorageClassCrossWorkgroup
:
1371 case SpvStorageClassUniformConstant
:
1372 val
->type
->stride
= align(glsl_get_cl_size(val
->type
->deref
->type
),
1373 glsl_get_cl_alignment(val
->type
->deref
->type
));
1383 case SpvOpTypeImage
: {
1384 val
->type
->base_type
= vtn_base_type_image
;
1386 const struct vtn_type
*sampled_type
=
1387 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1389 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1390 glsl_get_bit_size(sampled_type
->type
) != 32,
1391 "Sampled type of OpTypeImage must be a 32-bit scalar");
1393 enum glsl_sampler_dim dim
;
1394 switch ((SpvDim
)w
[3]) {
1395 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1396 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1397 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1398 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1399 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1400 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1401 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1403 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1404 spirv_dim_to_string((SpvDim
)w
[3]), w
[3]);
1407 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1408 * The “Depth” operand of OpTypeImage is ignored.
1410 bool is_array
= w
[5];
1411 bool multisampled
= w
[6];
1412 unsigned sampled
= w
[7];
1413 SpvImageFormat format
= w
[8];
1416 val
->type
->access_qualifier
= w
[9];
1418 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1421 if (dim
== GLSL_SAMPLER_DIM_2D
)
1422 dim
= GLSL_SAMPLER_DIM_MS
;
1423 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1424 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1426 vtn_fail("Unsupported multisampled image type");
1429 val
->type
->image_format
= translate_image_format(b
, format
);
1431 enum glsl_base_type sampled_base_type
=
1432 glsl_get_base_type(sampled_type
->type
);
1434 val
->type
->sampled
= true;
1435 val
->type
->type
= glsl_sampler_type(dim
, false, is_array
,
1437 } else if (sampled
== 2) {
1438 val
->type
->sampled
= false;
1439 val
->type
->type
= glsl_image_type(dim
, is_array
, sampled_base_type
);
1441 vtn_fail("We need to know if the image will be sampled");
1446 case SpvOpTypeSampledImage
:
1447 val
->type
->base_type
= vtn_base_type_sampled_image
;
1448 val
->type
->image
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1449 val
->type
->type
= val
->type
->image
->type
;
1452 case SpvOpTypeSampler
:
1453 /* The actual sampler type here doesn't really matter. It gets
1454 * thrown away the moment you combine it with an image. What really
1455 * matters is that it's a sampler type as opposed to an integer type
1456 * so the backend knows what to do.
1458 val
->type
->base_type
= vtn_base_type_sampler
;
1459 val
->type
->type
= glsl_bare_sampler_type();
1462 case SpvOpTypeOpaque
:
1463 case SpvOpTypeEvent
:
1464 case SpvOpTypeDeviceEvent
:
1465 case SpvOpTypeReserveId
:
1466 case SpvOpTypeQueue
:
1469 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1472 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1474 if (val
->type
->base_type
== vtn_base_type_struct
&&
1475 (val
->type
->block
|| val
->type
->buffer_block
)) {
1476 for (unsigned i
= 0; i
< val
->type
->length
; i
++) {
1477 vtn_fail_if(vtn_type_contains_block(b
, val
->type
->members
[i
]),
1478 "Block and BufferBlock decorations cannot decorate a "
1479 "structure type that is nested at any level inside "
1480 "another structure type decorated with Block or "
1486 static nir_constant
*
1487 vtn_null_constant(struct vtn_builder
*b
, struct vtn_type
*type
)
1489 nir_constant
*c
= rzalloc(b
, nir_constant
);
1491 switch (type
->base_type
) {
1492 case vtn_base_type_scalar
:
1493 case vtn_base_type_vector
:
1494 /* Nothing to do here. It's already initialized to zero */
1497 case vtn_base_type_pointer
: {
1498 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1499 b
, type
->storage_class
, type
->deref
, NULL
);
1500 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
1502 const nir_const_value
*null_value
= nir_address_format_null_value(addr_format
);
1503 memcpy(c
->values
, null_value
,
1504 sizeof(nir_const_value
) * nir_address_format_num_components(addr_format
));
1508 case vtn_base_type_void
:
1509 case vtn_base_type_image
:
1510 case vtn_base_type_sampler
:
1511 case vtn_base_type_sampled_image
:
1512 case vtn_base_type_function
:
1513 /* For those we have to return something but it doesn't matter what. */
1516 case vtn_base_type_matrix
:
1517 case vtn_base_type_array
:
1518 vtn_assert(type
->length
> 0);
1519 c
->num_elements
= type
->length
;
1520 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1522 c
->elements
[0] = vtn_null_constant(b
, type
->array_element
);
1523 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1524 c
->elements
[i
] = c
->elements
[0];
1527 case vtn_base_type_struct
:
1528 c
->num_elements
= type
->length
;
1529 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1530 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
1531 c
->elements
[i
] = vtn_null_constant(b
, type
->members
[i
]);
1535 vtn_fail("Invalid type for null constant");
1542 spec_constant_decoration_cb(struct vtn_builder
*b
, UNUSED
struct vtn_value
*val
,
1543 ASSERTED
int member
,
1544 const struct vtn_decoration
*dec
, void *data
)
1546 vtn_assert(member
== -1);
1547 if (dec
->decoration
!= SpvDecorationSpecId
)
1550 struct spec_constant_value
*const_value
= data
;
1552 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1553 if (b
->specializations
[i
].id
== dec
->operands
[0]) {
1554 if (const_value
->is_double
)
1555 const_value
->data64
= b
->specializations
[i
].data64
;
1557 const_value
->data32
= b
->specializations
[i
].data32
;
1564 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
1565 uint32_t const_value
)
1567 struct spec_constant_value data
;
1568 data
.is_double
= false;
1569 data
.data32
= const_value
;
1570 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1575 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
1576 uint64_t const_value
)
1578 struct spec_constant_value data
;
1579 data
.is_double
= true;
1580 data
.data64
= const_value
;
1581 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1586 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1587 struct vtn_value
*val
,
1588 ASSERTED
int member
,
1589 const struct vtn_decoration
*dec
,
1592 vtn_assert(member
== -1);
1593 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1594 dec
->operands
[0] != SpvBuiltInWorkgroupSize
)
1597 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1598 b
->workgroup_size_builtin
= val
;
1602 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1603 const uint32_t *w
, unsigned count
)
1605 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1606 val
->constant
= rzalloc(b
, nir_constant
);
1608 case SpvOpConstantTrue
:
1609 case SpvOpConstantFalse
:
1610 case SpvOpSpecConstantTrue
:
1611 case SpvOpSpecConstantFalse
: {
1612 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1613 "Result type of %s must be OpTypeBool",
1614 spirv_op_to_string(opcode
));
1616 uint32_t int_val
= (opcode
== SpvOpConstantTrue
||
1617 opcode
== SpvOpSpecConstantTrue
);
1619 if (opcode
== SpvOpSpecConstantTrue
||
1620 opcode
== SpvOpSpecConstantFalse
)
1621 int_val
= get_specialization(b
, val
, int_val
);
1623 val
->constant
->values
[0].b
= int_val
!= 0;
1627 case SpvOpConstant
: {
1628 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1629 "Result type of %s must be a scalar",
1630 spirv_op_to_string(opcode
));
1631 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1634 val
->constant
->values
[0].u64
= vtn_u64_literal(&w
[3]);
1637 val
->constant
->values
[0].u32
= w
[3];
1640 val
->constant
->values
[0].u16
= w
[3];
1643 val
->constant
->values
[0].u8
= w
[3];
1646 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size
);
1651 case SpvOpSpecConstant
: {
1652 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1653 "Result type of %s must be a scalar",
1654 spirv_op_to_string(opcode
));
1655 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1658 val
->constant
->values
[0].u64
=
1659 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1662 val
->constant
->values
[0].u32
= get_specialization(b
, val
, w
[3]);
1665 val
->constant
->values
[0].u16
= get_specialization(b
, val
, w
[3]);
1668 val
->constant
->values
[0].u8
= get_specialization(b
, val
, w
[3]);
1671 vtn_fail("Unsupported SpvOpSpecConstant bit size");
1676 case SpvOpSpecConstantComposite
:
1677 case SpvOpConstantComposite
: {
1678 unsigned elem_count
= count
- 3;
1679 vtn_fail_if(elem_count
!= val
->type
->length
,
1680 "%s has %u constituents, expected %u",
1681 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1683 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1684 for (unsigned i
= 0; i
< elem_count
; i
++) {
1685 struct vtn_value
*val
= vtn_untyped_value(b
, w
[i
+ 3]);
1687 if (val
->value_type
== vtn_value_type_constant
) {
1688 elems
[i
] = val
->constant
;
1690 vtn_fail_if(val
->value_type
!= vtn_value_type_undef
,
1691 "only constants or undefs allowed for "
1692 "SpvOpConstantComposite");
1693 /* to make it easier, just insert a NULL constant for now */
1694 elems
[i
] = vtn_null_constant(b
, val
->type
);
1698 switch (val
->type
->base_type
) {
1699 case vtn_base_type_vector
: {
1700 assert(glsl_type_is_vector(val
->type
->type
));
1701 for (unsigned i
= 0; i
< elem_count
; i
++)
1702 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1706 case vtn_base_type_matrix
:
1707 case vtn_base_type_struct
:
1708 case vtn_base_type_array
:
1709 ralloc_steal(val
->constant
, elems
);
1710 val
->constant
->num_elements
= elem_count
;
1711 val
->constant
->elements
= elems
;
1715 vtn_fail("Result type of %s must be a composite type",
1716 spirv_op_to_string(opcode
));
1721 case SpvOpSpecConstantOp
: {
1722 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1724 case SpvOpVectorShuffle
: {
1725 struct vtn_value
*v0
= &b
->values
[w
[4]];
1726 struct vtn_value
*v1
= &b
->values
[w
[5]];
1728 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1729 v0
->value_type
== vtn_value_type_undef
);
1730 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1731 v1
->value_type
== vtn_value_type_undef
);
1733 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1734 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1736 vtn_assert(len0
+ len1
< 16);
1738 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1739 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1740 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1742 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1743 (void)bit_size0
; (void)bit_size1
;
1745 nir_const_value undef
= { .u64
= 0xdeadbeefdeadbeef };
1746 nir_const_value combined
[NIR_MAX_VEC_COMPONENTS
* 2];
1748 if (v0
->value_type
== vtn_value_type_constant
) {
1749 for (unsigned i
= 0; i
< len0
; i
++)
1750 combined
[i
] = v0
->constant
->values
[i
];
1752 if (v1
->value_type
== vtn_value_type_constant
) {
1753 for (unsigned i
= 0; i
< len1
; i
++)
1754 combined
[len0
+ i
] = v1
->constant
->values
[i
];
1757 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1758 uint32_t comp
= w
[i
+ 6];
1759 if (comp
== (uint32_t)-1) {
1760 /* If component is not used, set the value to a known constant
1761 * to detect if it is wrongly used.
1763 val
->constant
->values
[j
] = undef
;
1765 vtn_fail_if(comp
>= len0
+ len1
,
1766 "All Component literals must either be FFFFFFFF "
1767 "or in [0, N - 1] (inclusive).");
1768 val
->constant
->values
[j
] = combined
[comp
];
1774 case SpvOpCompositeExtract
:
1775 case SpvOpCompositeInsert
: {
1776 struct vtn_value
*comp
;
1777 unsigned deref_start
;
1778 struct nir_constant
**c
;
1779 if (opcode
== SpvOpCompositeExtract
) {
1780 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1782 c
= &comp
->constant
;
1784 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1786 val
->constant
= nir_constant_clone(comp
->constant
,
1792 const struct vtn_type
*type
= comp
->type
;
1793 for (unsigned i
= deref_start
; i
< count
; i
++) {
1794 vtn_fail_if(w
[i
] > type
->length
,
1795 "%uth index of %s is %u but the type has only "
1796 "%u elements", i
- deref_start
,
1797 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1799 switch (type
->base_type
) {
1800 case vtn_base_type_vector
:
1802 type
= type
->array_element
;
1805 case vtn_base_type_matrix
:
1806 case vtn_base_type_array
:
1807 c
= &(*c
)->elements
[w
[i
]];
1808 type
= type
->array_element
;
1811 case vtn_base_type_struct
:
1812 c
= &(*c
)->elements
[w
[i
]];
1813 type
= type
->members
[w
[i
]];
1817 vtn_fail("%s must only index into composite types",
1818 spirv_op_to_string(opcode
));
1822 if (opcode
== SpvOpCompositeExtract
) {
1826 unsigned num_components
= type
->length
;
1827 for (unsigned i
= 0; i
< num_components
; i
++)
1828 val
->constant
->values
[i
] = (*c
)->values
[elem
+ i
];
1831 struct vtn_value
*insert
=
1832 vtn_value(b
, w
[4], vtn_value_type_constant
);
1833 vtn_assert(insert
->type
== type
);
1835 *c
= insert
->constant
;
1837 unsigned num_components
= type
->length
;
1838 for (unsigned i
= 0; i
< num_components
; i
++)
1839 (*c
)->values
[elem
+ i
] = insert
->constant
->values
[i
];
1847 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
1848 nir_alu_type src_alu_type
= dst_alu_type
;
1849 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
1852 vtn_assert(count
<= 7);
1858 /* We have a source in a conversion */
1860 nir_get_nir_type_for_glsl_type(
1861 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1862 /* We use the bitsize of the conversion source to evaluate the opcode later */
1863 bit_size
= glsl_get_bit_size(
1864 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1867 bit_size
= glsl_get_bit_size(val
->type
->type
);
1870 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
1871 nir_alu_type_get_type_size(src_alu_type
),
1872 nir_alu_type_get_type_size(dst_alu_type
));
1873 nir_const_value src
[3][NIR_MAX_VEC_COMPONENTS
];
1875 for (unsigned i
= 0; i
< count
- 4; i
++) {
1876 struct vtn_value
*src_val
=
1877 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
);
1879 /* If this is an unsized source, pull the bit size from the
1880 * source; otherwise, we'll use the bit size from the destination.
1882 if (!nir_alu_type_get_type_size(nir_op_infos
[op
].input_types
[i
]))
1883 bit_size
= glsl_get_bit_size(src_val
->type
->type
);
1885 unsigned src_comps
= nir_op_infos
[op
].input_sizes
[i
] ?
1886 nir_op_infos
[op
].input_sizes
[i
] :
1889 unsigned j
= swap
? 1 - i
: i
;
1890 for (unsigned c
= 0; c
< src_comps
; c
++)
1891 src
[j
][c
] = src_val
->constant
->values
[c
];
1894 /* fix up fixed size sources */
1901 for (unsigned i
= 0; i
< num_components
; ++i
) {
1903 case 64: src
[1][i
].u32
= src
[1][i
].u64
; break;
1904 case 16: src
[1][i
].u32
= src
[1][i
].u16
; break;
1905 case 8: src
[1][i
].u32
= src
[1][i
].u8
; break;
1914 nir_const_value
*srcs
[3] = {
1915 src
[0], src
[1], src
[2],
1917 nir_eval_const_opcode(op
, val
->constant
->values
,
1918 num_components
, bit_size
, srcs
,
1919 b
->shader
->info
.float_controls_execution_mode
);
1926 case SpvOpConstantNull
:
1927 val
->constant
= vtn_null_constant(b
, val
->type
);
1930 case SpvOpConstantSampler
:
1931 vtn_fail("OpConstantSampler requires Kernel Capability");
1935 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1938 /* Now that we have the value, update the workgroup size if needed */
1939 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1942 SpvMemorySemanticsMask
1943 vtn_storage_class_to_memory_semantics(SpvStorageClass sc
)
1946 case SpvStorageClassStorageBuffer
:
1947 case SpvStorageClassPhysicalStorageBuffer
:
1948 return SpvMemorySemanticsUniformMemoryMask
;
1949 case SpvStorageClassWorkgroup
:
1950 return SpvMemorySemanticsWorkgroupMemoryMask
;
1952 return SpvMemorySemanticsMaskNone
;
1957 vtn_split_barrier_semantics(struct vtn_builder
*b
,
1958 SpvMemorySemanticsMask semantics
,
1959 SpvMemorySemanticsMask
*before
,
1960 SpvMemorySemanticsMask
*after
)
1962 /* For memory semantics embedded in operations, we split them into up to
1963 * two barriers, to be added before and after the operation. This is less
1964 * strict than if we propagated until the final backend stage, but still
1965 * result in correct execution.
1967 * A further improvement could be pipe this information (and use!) into the
1968 * next compiler layers, at the expense of making the handling of barriers
1972 *before
= SpvMemorySemanticsMaskNone
;
1973 *after
= SpvMemorySemanticsMaskNone
;
1975 SpvMemorySemanticsMask order_semantics
=
1976 semantics
& (SpvMemorySemanticsAcquireMask
|
1977 SpvMemorySemanticsReleaseMask
|
1978 SpvMemorySemanticsAcquireReleaseMask
|
1979 SpvMemorySemanticsSequentiallyConsistentMask
);
1981 if (util_bitcount(order_semantics
) > 1) {
1982 /* Old GLSLang versions incorrectly set all the ordering bits. This was
1983 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
1984 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
1986 vtn_warn("Multiple memory ordering semantics specified, "
1987 "assuming AcquireRelease.");
1988 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
1991 const SpvMemorySemanticsMask av_vis_semantics
=
1992 semantics
& (SpvMemorySemanticsMakeAvailableMask
|
1993 SpvMemorySemanticsMakeVisibleMask
);
1995 const SpvMemorySemanticsMask storage_semantics
=
1996 semantics
& (SpvMemorySemanticsUniformMemoryMask
|
1997 SpvMemorySemanticsSubgroupMemoryMask
|
1998 SpvMemorySemanticsWorkgroupMemoryMask
|
1999 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2000 SpvMemorySemanticsAtomicCounterMemoryMask
|
2001 SpvMemorySemanticsImageMemoryMask
|
2002 SpvMemorySemanticsOutputMemoryMask
);
2004 const SpvMemorySemanticsMask other_semantics
=
2005 semantics
& ~(order_semantics
| av_vis_semantics
| storage_semantics
);
2007 if (other_semantics
)
2008 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics
);
2010 /* SequentiallyConsistent is treated as AcquireRelease. */
2012 /* The RELEASE barrier happens BEFORE the operation, and it is usually
2013 * associated with a Store. All the write operations with a matching
2014 * semantics will not be reordered after the Store.
2016 if (order_semantics
& (SpvMemorySemanticsReleaseMask
|
2017 SpvMemorySemanticsAcquireReleaseMask
|
2018 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2019 *before
|= SpvMemorySemanticsReleaseMask
| storage_semantics
;
2022 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2023 * associated with a Load. All the operations with a matching semantics
2024 * will not be reordered before the Load.
2026 if (order_semantics
& (SpvMemorySemanticsAcquireMask
|
2027 SpvMemorySemanticsAcquireReleaseMask
|
2028 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2029 *after
|= SpvMemorySemanticsAcquireMask
| storage_semantics
;
2032 if (av_vis_semantics
& SpvMemorySemanticsMakeVisibleMask
)
2033 *before
|= SpvMemorySemanticsMakeVisibleMask
| storage_semantics
;
2035 if (av_vis_semantics
& SpvMemorySemanticsMakeAvailableMask
)
2036 *after
|= SpvMemorySemanticsMakeAvailableMask
| storage_semantics
;
2040 vtn_emit_scoped_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
2041 SpvMemorySemanticsMask semantics
)
2043 nir_memory_semantics nir_semantics
= 0;
2045 SpvMemorySemanticsMask order_semantics
=
2046 semantics
& (SpvMemorySemanticsAcquireMask
|
2047 SpvMemorySemanticsReleaseMask
|
2048 SpvMemorySemanticsAcquireReleaseMask
|
2049 SpvMemorySemanticsSequentiallyConsistentMask
);
2051 if (util_bitcount(order_semantics
) > 1) {
2052 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2053 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2054 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2056 vtn_warn("Multiple memory ordering semantics bits specified, "
2057 "assuming AcquireRelease.");
2058 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2061 switch (order_semantics
) {
2063 /* Not an ordering barrier. */
2066 case SpvMemorySemanticsAcquireMask
:
2067 nir_semantics
= NIR_MEMORY_ACQUIRE
;
2070 case SpvMemorySemanticsReleaseMask
:
2071 nir_semantics
= NIR_MEMORY_RELEASE
;
2074 case SpvMemorySemanticsSequentiallyConsistentMask
:
2075 /* Fall through. Treated as AcquireRelease in Vulkan. */
2076 case SpvMemorySemanticsAcquireReleaseMask
:
2077 nir_semantics
= NIR_MEMORY_ACQUIRE
| NIR_MEMORY_RELEASE
;
2081 unreachable("Invalid memory order semantics");
2084 if (semantics
& SpvMemorySemanticsMakeAvailableMask
) {
2085 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2086 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2087 "capability must be declared.");
2088 nir_semantics
|= NIR_MEMORY_MAKE_AVAILABLE
;
2091 if (semantics
& SpvMemorySemanticsMakeVisibleMask
) {
2092 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2093 "To use MakeVisible memory semantics the VulkanMemoryModel "
2094 "capability must be declared.");
2095 nir_semantics
|= NIR_MEMORY_MAKE_VISIBLE
;
2098 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2099 * and AtomicCounterMemory are ignored".
2101 semantics
&= ~(SpvMemorySemanticsSubgroupMemoryMask
|
2102 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2103 SpvMemorySemanticsAtomicCounterMemoryMask
);
2105 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2106 * for SpvMemorySemanticsImageMemoryMask.
2109 nir_variable_mode modes
= 0;
2110 if (semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2111 SpvMemorySemanticsImageMemoryMask
)) {
2112 modes
|= nir_var_uniform
|
2117 if (semantics
& SpvMemorySemanticsWorkgroupMemoryMask
)
2118 modes
|= nir_var_mem_shared
;
2119 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
2120 modes
|= nir_var_shader_out
;
2123 /* No barrier to add. */
2124 if (nir_semantics
== 0 || modes
== 0)
2127 nir_scope nir_scope
;
2129 case SpvScopeDevice
:
2130 vtn_fail_if(b
->options
->caps
.vk_memory_model
&&
2131 !b
->options
->caps
.vk_memory_model_device_scope
,
2132 "If the Vulkan memory model is declared and any instruction "
2133 "uses Device scope, the VulkanMemoryModelDeviceScope "
2134 "capability must be declared.");
2135 nir_scope
= NIR_SCOPE_DEVICE
;
2138 case SpvScopeQueueFamily
:
2139 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2140 "To use Queue Family scope, the VulkanMemoryModel capability "
2141 "must be declared.");
2142 nir_scope
= NIR_SCOPE_QUEUE_FAMILY
;
2145 case SpvScopeWorkgroup
:
2146 nir_scope
= NIR_SCOPE_WORKGROUP
;
2149 case SpvScopeSubgroup
:
2150 nir_scope
= NIR_SCOPE_SUBGROUP
;
2153 case SpvScopeInvocation
:
2154 nir_scope
= NIR_SCOPE_INVOCATION
;
2158 vtn_fail("Invalid memory scope");
2161 nir_intrinsic_instr
*intrin
=
2162 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_scoped_memory_barrier
);
2163 nir_intrinsic_set_memory_semantics(intrin
, nir_semantics
);
2165 nir_intrinsic_set_memory_modes(intrin
, modes
);
2166 nir_intrinsic_set_memory_scope(intrin
, nir_scope
);
2167 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2170 struct vtn_ssa_value
*
2171 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
2173 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
2176 if (!glsl_type_is_vector_or_scalar(type
)) {
2177 unsigned elems
= glsl_get_length(type
);
2178 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2179 for (unsigned i
= 0; i
< elems
; i
++) {
2180 const struct glsl_type
*child_type
;
2182 switch (glsl_get_base_type(type
)) {
2184 case GLSL_TYPE_UINT
:
2185 case GLSL_TYPE_INT16
:
2186 case GLSL_TYPE_UINT16
:
2187 case GLSL_TYPE_UINT8
:
2188 case GLSL_TYPE_INT8
:
2189 case GLSL_TYPE_INT64
:
2190 case GLSL_TYPE_UINT64
:
2191 case GLSL_TYPE_BOOL
:
2192 case GLSL_TYPE_FLOAT
:
2193 case GLSL_TYPE_FLOAT16
:
2194 case GLSL_TYPE_DOUBLE
:
2195 child_type
= glsl_get_column_type(type
);
2197 case GLSL_TYPE_ARRAY
:
2198 child_type
= glsl_get_array_element(type
);
2200 case GLSL_TYPE_STRUCT
:
2201 case GLSL_TYPE_INTERFACE
:
2202 child_type
= glsl_get_struct_field(type
, i
);
2205 vtn_fail("unkown base type");
2208 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
2216 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
2219 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
2220 src
.src_type
= type
;
2225 image_operand_arg(struct vtn_builder
*b
, const uint32_t *w
, uint32_t count
,
2226 uint32_t mask_idx
, SpvImageOperandsMask op
)
2228 static const SpvImageOperandsMask ops_with_arg
=
2229 SpvImageOperandsBiasMask
|
2230 SpvImageOperandsLodMask
|
2231 SpvImageOperandsGradMask
|
2232 SpvImageOperandsConstOffsetMask
|
2233 SpvImageOperandsOffsetMask
|
2234 SpvImageOperandsConstOffsetsMask
|
2235 SpvImageOperandsSampleMask
|
2236 SpvImageOperandsMinLodMask
|
2237 SpvImageOperandsMakeTexelAvailableMask
|
2238 SpvImageOperandsMakeTexelVisibleMask
;
2240 assert(util_bitcount(op
) == 1);
2241 assert(w
[mask_idx
] & op
);
2242 assert(op
& ops_with_arg
);
2244 uint32_t idx
= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_arg
) + 1;
2246 /* Adjust indices for operands with two arguments. */
2247 static const SpvImageOperandsMask ops_with_two_args
=
2248 SpvImageOperandsGradMask
;
2249 idx
+= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_two_args
);
2253 vtn_fail_if(idx
+ (op
& ops_with_two_args
? 1 : 0) >= count
,
2254 "Image op claims to have %s but does not enough "
2255 "following operands", spirv_imageoperands_to_string(op
));
2261 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
2262 const uint32_t *w
, unsigned count
)
2264 if (opcode
== SpvOpSampledImage
) {
2265 struct vtn_value
*val
=
2266 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2267 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2268 val
->sampled_image
->image
=
2269 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2270 val
->sampled_image
->sampler
=
2271 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
2273 } else if (opcode
== SpvOpImage
) {
2274 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
2275 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
2276 vtn_push_value_pointer(b
, w
[2], src_val
->sampled_image
->image
);
2278 vtn_assert(src_val
->value_type
== vtn_value_type_pointer
);
2279 vtn_push_value_pointer(b
, w
[2], src_val
->pointer
);
2284 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2286 struct vtn_pointer
*image
= NULL
, *sampler
= NULL
;
2287 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
2288 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
2289 image
= sampled_val
->sampled_image
->image
;
2290 sampler
= sampled_val
->sampled_image
->sampler
;
2292 vtn_assert(sampled_val
->value_type
== vtn_value_type_pointer
);
2293 image
= sampled_val
->pointer
;
2296 nir_deref_instr
*image_deref
= vtn_pointer_to_deref(b
, image
);
2297 nir_deref_instr
*sampler_deref
=
2298 sampler
? vtn_pointer_to_deref(b
, sampler
) : NULL
;
2300 const struct glsl_type
*image_type
= sampled_val
->type
->type
;
2301 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
2302 const bool is_array
= glsl_sampler_type_is_array(image_type
);
2303 nir_alu_type dest_type
= nir_type_invalid
;
2305 /* Figure out the base texture operation */
2308 case SpvOpImageSampleImplicitLod
:
2309 case SpvOpImageSampleDrefImplicitLod
:
2310 case SpvOpImageSampleProjImplicitLod
:
2311 case SpvOpImageSampleProjDrefImplicitLod
:
2312 texop
= nir_texop_tex
;
2315 case SpvOpImageSampleExplicitLod
:
2316 case SpvOpImageSampleDrefExplicitLod
:
2317 case SpvOpImageSampleProjExplicitLod
:
2318 case SpvOpImageSampleProjDrefExplicitLod
:
2319 texop
= nir_texop_txl
;
2322 case SpvOpImageFetch
:
2323 if (sampler_dim
== GLSL_SAMPLER_DIM_MS
) {
2324 texop
= nir_texop_txf_ms
;
2326 texop
= nir_texop_txf
;
2330 case SpvOpImageGather
:
2331 case SpvOpImageDrefGather
:
2332 texop
= nir_texop_tg4
;
2335 case SpvOpImageQuerySizeLod
:
2336 case SpvOpImageQuerySize
:
2337 texop
= nir_texop_txs
;
2338 dest_type
= nir_type_int
;
2341 case SpvOpImageQueryLod
:
2342 texop
= nir_texop_lod
;
2343 dest_type
= nir_type_float
;
2346 case SpvOpImageQueryLevels
:
2347 texop
= nir_texop_query_levels
;
2348 dest_type
= nir_type_int
;
2351 case SpvOpImageQuerySamples
:
2352 texop
= nir_texop_texture_samples
;
2353 dest_type
= nir_type_int
;
2356 case SpvOpFragmentFetchAMD
:
2357 texop
= nir_texop_fragment_fetch
;
2360 case SpvOpFragmentMaskFetchAMD
:
2361 texop
= nir_texop_fragment_mask_fetch
;
2365 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2368 nir_tex_src srcs
[10]; /* 10 should be enough */
2369 nir_tex_src
*p
= srcs
;
2371 p
->src
= nir_src_for_ssa(&image_deref
->dest
.ssa
);
2372 p
->src_type
= nir_tex_src_texture_deref
;
2382 vtn_fail_if(sampler
== NULL
,
2383 "%s requires an image of type OpTypeSampledImage",
2384 spirv_op_to_string(opcode
));
2385 p
->src
= nir_src_for_ssa(&sampler_deref
->dest
.ssa
);
2386 p
->src_type
= nir_tex_src_sampler_deref
;
2390 case nir_texop_txf_ms
:
2392 case nir_texop_query_levels
:
2393 case nir_texop_texture_samples
:
2394 case nir_texop_samples_identical
:
2395 case nir_texop_fragment_fetch
:
2396 case nir_texop_fragment_mask_fetch
:
2399 case nir_texop_txf_ms_fb
:
2400 vtn_fail("unexpected nir_texop_txf_ms_fb");
2402 case nir_texop_txf_ms_mcs
:
2403 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2404 case nir_texop_tex_prefetch
:
2405 vtn_fail("unexpected nir_texop_tex_prefetch");
2410 struct nir_ssa_def
*coord
;
2411 unsigned coord_components
;
2413 case SpvOpImageSampleImplicitLod
:
2414 case SpvOpImageSampleExplicitLod
:
2415 case SpvOpImageSampleDrefImplicitLod
:
2416 case SpvOpImageSampleDrefExplicitLod
:
2417 case SpvOpImageSampleProjImplicitLod
:
2418 case SpvOpImageSampleProjExplicitLod
:
2419 case SpvOpImageSampleProjDrefImplicitLod
:
2420 case SpvOpImageSampleProjDrefExplicitLod
:
2421 case SpvOpImageFetch
:
2422 case SpvOpImageGather
:
2423 case SpvOpImageDrefGather
:
2424 case SpvOpImageQueryLod
:
2425 case SpvOpFragmentFetchAMD
:
2426 case SpvOpFragmentMaskFetchAMD
: {
2427 /* All these types have the coordinate as their first real argument */
2428 switch (sampler_dim
) {
2429 case GLSL_SAMPLER_DIM_1D
:
2430 case GLSL_SAMPLER_DIM_BUF
:
2431 coord_components
= 1;
2433 case GLSL_SAMPLER_DIM_2D
:
2434 case GLSL_SAMPLER_DIM_RECT
:
2435 case GLSL_SAMPLER_DIM_MS
:
2436 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
2437 coord_components
= 2;
2439 case GLSL_SAMPLER_DIM_3D
:
2440 case GLSL_SAMPLER_DIM_CUBE
:
2441 coord_components
= 3;
2444 vtn_fail("Invalid sampler type");
2447 if (is_array
&& texop
!= nir_texop_lod
)
2450 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
2451 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
2452 (1 << coord_components
) - 1));
2453 p
->src_type
= nir_tex_src_coord
;
2460 coord_components
= 0;
2465 case SpvOpImageSampleProjImplicitLod
:
2466 case SpvOpImageSampleProjExplicitLod
:
2467 case SpvOpImageSampleProjDrefImplicitLod
:
2468 case SpvOpImageSampleProjDrefExplicitLod
:
2469 /* These have the projector as the last coordinate component */
2470 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
2471 p
->src_type
= nir_tex_src_projector
;
2479 bool is_shadow
= false;
2480 unsigned gather_component
= 0;
2482 case SpvOpImageSampleDrefImplicitLod
:
2483 case SpvOpImageSampleDrefExplicitLod
:
2484 case SpvOpImageSampleProjDrefImplicitLod
:
2485 case SpvOpImageSampleProjDrefExplicitLod
:
2486 case SpvOpImageDrefGather
:
2487 /* These all have an explicit depth value as their next source */
2489 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
2492 case SpvOpImageGather
:
2493 /* This has a component as its next source */
2494 gather_component
= vtn_constant_uint(b
, w
[idx
++]);
2501 /* For OpImageQuerySizeLod, we always have an LOD */
2502 if (opcode
== SpvOpImageQuerySizeLod
)
2503 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2505 /* For OpFragmentFetchAMD, we always have a multisample index */
2506 if (opcode
== SpvOpFragmentFetchAMD
)
2507 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2509 /* Now we need to handle some number of optional arguments */
2510 struct vtn_value
*gather_offsets
= NULL
;
2512 uint32_t operands
= w
[idx
];
2514 if (operands
& SpvImageOperandsBiasMask
) {
2515 vtn_assert(texop
== nir_texop_tex
);
2516 texop
= nir_texop_txb
;
2517 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2518 SpvImageOperandsBiasMask
);
2519 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_bias
);
2522 if (operands
& SpvImageOperandsLodMask
) {
2523 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
2524 texop
== nir_texop_txs
);
2525 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2526 SpvImageOperandsLodMask
);
2527 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_lod
);
2530 if (operands
& SpvImageOperandsGradMask
) {
2531 vtn_assert(texop
== nir_texop_txl
);
2532 texop
= nir_texop_txd
;
2533 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2534 SpvImageOperandsGradMask
);
2535 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ddx
);
2536 (*p
++) = vtn_tex_src(b
, w
[arg
+ 1], nir_tex_src_ddy
);
2539 vtn_fail_if(util_bitcount(operands
& (SpvImageOperandsConstOffsetsMask
|
2540 SpvImageOperandsOffsetMask
|
2541 SpvImageOperandsConstOffsetMask
)) > 1,
2542 "At most one of the ConstOffset, Offset, and ConstOffsets "
2543 "image operands can be used on a given instruction.");
2545 if (operands
& SpvImageOperandsOffsetMask
) {
2546 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2547 SpvImageOperandsOffsetMask
);
2548 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2551 if (operands
& SpvImageOperandsConstOffsetMask
) {
2552 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2553 SpvImageOperandsConstOffsetMask
);
2554 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2557 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2558 vtn_assert(texop
== nir_texop_tg4
);
2559 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2560 SpvImageOperandsConstOffsetsMask
);
2561 gather_offsets
= vtn_value(b
, w
[arg
], vtn_value_type_constant
);
2564 if (operands
& SpvImageOperandsSampleMask
) {
2565 vtn_assert(texop
== nir_texop_txf_ms
);
2566 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2567 SpvImageOperandsSampleMask
);
2568 texop
= nir_texop_txf_ms
;
2569 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ms_index
);
2572 if (operands
& SpvImageOperandsMinLodMask
) {
2573 vtn_assert(texop
== nir_texop_tex
||
2574 texop
== nir_texop_txb
||
2575 texop
== nir_texop_txd
);
2576 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2577 SpvImageOperandsMinLodMask
);
2578 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_min_lod
);
2582 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2585 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2587 instr
->coord_components
= coord_components
;
2588 instr
->sampler_dim
= sampler_dim
;
2589 instr
->is_array
= is_array
;
2590 instr
->is_shadow
= is_shadow
;
2591 instr
->is_new_style_shadow
=
2592 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2593 instr
->component
= gather_component
;
2595 if (image
&& (image
->access
& ACCESS_NON_UNIFORM
))
2596 instr
->texture_non_uniform
= true;
2598 if (sampler
&& (sampler
->access
& ACCESS_NON_UNIFORM
))
2599 instr
->sampler_non_uniform
= true;
2601 /* for non-query ops, get dest_type from sampler type */
2602 if (dest_type
== nir_type_invalid
) {
2603 switch (glsl_get_sampler_result_type(image_type
)) {
2604 case GLSL_TYPE_FLOAT
: dest_type
= nir_type_float
; break;
2605 case GLSL_TYPE_INT
: dest_type
= nir_type_int
; break;
2606 case GLSL_TYPE_UINT
: dest_type
= nir_type_uint
; break;
2607 case GLSL_TYPE_BOOL
: dest_type
= nir_type_bool
; break;
2609 vtn_fail("Invalid base type for sampler result");
2613 instr
->dest_type
= dest_type
;
2615 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2616 nir_tex_instr_dest_size(instr
), 32, NULL
);
2618 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2619 nir_tex_instr_dest_size(instr
));
2621 if (gather_offsets
) {
2622 vtn_fail_if(gather_offsets
->type
->base_type
!= vtn_base_type_array
||
2623 gather_offsets
->type
->length
!= 4,
2624 "ConstOffsets must be an array of size four of vectors "
2625 "of two integer components");
2627 struct vtn_type
*vec_type
= gather_offsets
->type
->array_element
;
2628 vtn_fail_if(vec_type
->base_type
!= vtn_base_type_vector
||
2629 vec_type
->length
!= 2 ||
2630 !glsl_type_is_integer(vec_type
->type
),
2631 "ConstOffsets must be an array of size four of vectors "
2632 "of two integer components");
2634 unsigned bit_size
= glsl_get_bit_size(vec_type
->type
);
2635 for (uint32_t i
= 0; i
< 4; i
++) {
2636 const nir_const_value
*cvec
=
2637 gather_offsets
->constant
->elements
[i
]->values
;
2638 for (uint32_t j
= 0; j
< 2; j
++) {
2640 case 8: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i8
; break;
2641 case 16: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i16
; break;
2642 case 32: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i32
; break;
2643 case 64: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i64
; break;
2645 vtn_fail("Unsupported bit size: %u", bit_size
);
2651 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
2652 ssa
->def
= &instr
->dest
.ssa
;
2653 vtn_push_ssa(b
, w
[2], ret_type
, ssa
);
2655 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2659 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2660 const uint32_t *w
, nir_src
*src
)
2663 case SpvOpAtomicIIncrement
:
2664 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2667 case SpvOpAtomicIDecrement
:
2668 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2671 case SpvOpAtomicISub
:
2673 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
2676 case SpvOpAtomicCompareExchange
:
2677 case SpvOpAtomicCompareExchangeWeak
:
2678 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
2679 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
2682 case SpvOpAtomicExchange
:
2683 case SpvOpAtomicIAdd
:
2684 case SpvOpAtomicSMin
:
2685 case SpvOpAtomicUMin
:
2686 case SpvOpAtomicSMax
:
2687 case SpvOpAtomicUMax
:
2688 case SpvOpAtomicAnd
:
2690 case SpvOpAtomicXor
:
2691 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
2695 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
2699 static nir_ssa_def
*
2700 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2702 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
2704 /* The image_load_store intrinsics assume a 4-dim coordinate */
2705 unsigned dim
= glsl_get_vector_elements(coord
->type
);
2706 unsigned swizzle
[4];
2707 for (unsigned i
= 0; i
< 4; i
++)
2708 swizzle
[i
] = MIN2(i
, dim
- 1);
2710 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4);
2713 static nir_ssa_def
*
2714 expand_to_vec4(nir_builder
*b
, nir_ssa_def
*value
)
2716 if (value
->num_components
== 4)
2720 for (unsigned i
= 0; i
< 4; i
++)
2721 swiz
[i
] = i
< value
->num_components
? i
: 0;
2722 return nir_swizzle(b
, value
, swiz
, 4);
2726 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2727 const uint32_t *w
, unsigned count
)
2729 /* Just get this one out of the way */
2730 if (opcode
== SpvOpImageTexelPointer
) {
2731 struct vtn_value
*val
=
2732 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2733 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2735 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2736 val
->image
->coord
= get_image_coord(b
, w
[4]);
2737 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
2738 val
->image
->lod
= nir_imm_int(&b
->nb
, 0);
2742 struct vtn_image_pointer image
;
2743 SpvScope scope
= SpvScopeInvocation
;
2744 SpvMemorySemanticsMask semantics
= 0;
2747 case SpvOpAtomicExchange
:
2748 case SpvOpAtomicCompareExchange
:
2749 case SpvOpAtomicCompareExchangeWeak
:
2750 case SpvOpAtomicIIncrement
:
2751 case SpvOpAtomicIDecrement
:
2752 case SpvOpAtomicIAdd
:
2753 case SpvOpAtomicISub
:
2754 case SpvOpAtomicLoad
:
2755 case SpvOpAtomicSMin
:
2756 case SpvOpAtomicUMin
:
2757 case SpvOpAtomicSMax
:
2758 case SpvOpAtomicUMax
:
2759 case SpvOpAtomicAnd
:
2761 case SpvOpAtomicXor
:
2762 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
2763 scope
= vtn_constant_uint(b
, w
[4]);
2764 semantics
= vtn_constant_uint(b
, w
[5]);
2767 case SpvOpAtomicStore
:
2768 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
2769 scope
= vtn_constant_uint(b
, w
[2]);
2770 semantics
= vtn_constant_uint(b
, w
[3]);
2773 case SpvOpImageQuerySize
:
2774 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2776 image
.sample
= NULL
;
2780 case SpvOpImageRead
: {
2781 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2782 image
.coord
= get_image_coord(b
, w
[4]);
2784 const SpvImageOperandsMask operands
=
2785 count
> 5 ? w
[5] : SpvImageOperandsMaskNone
;
2787 if (operands
& SpvImageOperandsSampleMask
) {
2788 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2789 SpvImageOperandsSampleMask
);
2790 image
.sample
= vtn_ssa_value(b
, w
[arg
])->def
;
2792 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2795 if (operands
& SpvImageOperandsMakeTexelVisibleMask
) {
2796 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2797 "MakeTexelVisible requires NonPrivateTexel to also be set.");
2798 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2799 SpvImageOperandsMakeTexelVisibleMask
);
2800 semantics
= SpvMemorySemanticsMakeVisibleMask
;
2801 scope
= vtn_constant_uint(b
, w
[arg
]);
2804 if (operands
& SpvImageOperandsLodMask
) {
2805 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2806 SpvImageOperandsLodMask
);
2807 image
.lod
= vtn_ssa_value(b
, w
[arg
])->def
;
2809 image
.lod
= nir_imm_int(&b
->nb
, 0);
2812 /* TODO: Volatile. */
2817 case SpvOpImageWrite
: {
2818 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2819 image
.coord
= get_image_coord(b
, w
[2]);
2823 const SpvImageOperandsMask operands
=
2824 count
> 4 ? w
[4] : SpvImageOperandsMaskNone
;
2826 if (operands
& SpvImageOperandsSampleMask
) {
2827 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2828 SpvImageOperandsSampleMask
);
2829 image
.sample
= vtn_ssa_value(b
, w
[arg
])->def
;
2831 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2834 if (operands
& SpvImageOperandsMakeTexelAvailableMask
) {
2835 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2836 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
2837 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2838 SpvImageOperandsMakeTexelAvailableMask
);
2839 semantics
= SpvMemorySemanticsMakeAvailableMask
;
2840 scope
= vtn_constant_uint(b
, w
[arg
]);
2843 if (operands
& SpvImageOperandsLodMask
) {
2844 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2845 SpvImageOperandsLodMask
);
2846 image
.lod
= vtn_ssa_value(b
, w
[arg
])->def
;
2848 image
.lod
= nir_imm_int(&b
->nb
, 0);
2851 /* TODO: Volatile. */
2857 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2860 nir_intrinsic_op op
;
2862 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
2863 OP(ImageQuerySize
, size
)
2865 OP(ImageWrite
, store
)
2866 OP(AtomicLoad
, load
)
2867 OP(AtomicStore
, store
)
2868 OP(AtomicExchange
, atomic_exchange
)
2869 OP(AtomicCompareExchange
, atomic_comp_swap
)
2870 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
2871 OP(AtomicIIncrement
, atomic_add
)
2872 OP(AtomicIDecrement
, atomic_add
)
2873 OP(AtomicIAdd
, atomic_add
)
2874 OP(AtomicISub
, atomic_add
)
2875 OP(AtomicSMin
, atomic_imin
)
2876 OP(AtomicUMin
, atomic_umin
)
2877 OP(AtomicSMax
, atomic_imax
)
2878 OP(AtomicUMax
, atomic_umax
)
2879 OP(AtomicAnd
, atomic_and
)
2880 OP(AtomicOr
, atomic_or
)
2881 OP(AtomicXor
, atomic_xor
)
2884 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2887 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2889 nir_deref_instr
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2890 intrin
->src
[0] = nir_src_for_ssa(&image_deref
->dest
.ssa
);
2892 /* ImageQuerySize doesn't take any extra parameters */
2893 if (opcode
!= SpvOpImageQuerySize
) {
2894 /* The image coordinate is always 4 components but we may not have that
2895 * many. Swizzle to compensate.
2897 intrin
->src
[1] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, image
.coord
));
2898 intrin
->src
[2] = nir_src_for_ssa(image
.sample
);
2901 nir_intrinsic_set_access(intrin
, image
.image
->access
);
2904 case SpvOpAtomicLoad
:
2905 case SpvOpImageQuerySize
:
2906 case SpvOpImageRead
:
2907 if (opcode
== SpvOpImageRead
|| opcode
== SpvOpAtomicLoad
) {
2908 /* Only OpImageRead can support a lod parameter if
2909 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
2910 * intrinsics definition for atomics requires us to set it for
2913 intrin
->src
[3] = nir_src_for_ssa(image
.lod
);
2916 case SpvOpAtomicStore
:
2917 case SpvOpImageWrite
: {
2918 const uint32_t value_id
= opcode
== SpvOpAtomicStore
? w
[4] : w
[3];
2919 nir_ssa_def
*value
= vtn_ssa_value(b
, value_id
)->def
;
2920 /* nir_intrinsic_image_deref_store always takes a vec4 value */
2921 assert(op
== nir_intrinsic_image_deref_store
);
2922 intrin
->num_components
= 4;
2923 intrin
->src
[3] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, value
));
2924 /* Only OpImageWrite can support a lod parameter if
2925 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
2926 * intrinsics definition for atomics requires us to set it for
2929 intrin
->src
[4] = nir_src_for_ssa(image
.lod
);
2933 case SpvOpAtomicCompareExchange
:
2934 case SpvOpAtomicCompareExchangeWeak
:
2935 case SpvOpAtomicIIncrement
:
2936 case SpvOpAtomicIDecrement
:
2937 case SpvOpAtomicExchange
:
2938 case SpvOpAtomicIAdd
:
2939 case SpvOpAtomicISub
:
2940 case SpvOpAtomicSMin
:
2941 case SpvOpAtomicUMin
:
2942 case SpvOpAtomicSMax
:
2943 case SpvOpAtomicUMax
:
2944 case SpvOpAtomicAnd
:
2946 case SpvOpAtomicXor
:
2947 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[3]);
2951 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2954 /* Image operations implicitly have the Image storage memory semantics. */
2955 semantics
|= SpvMemorySemanticsImageMemoryMask
;
2957 SpvMemorySemanticsMask before_semantics
;
2958 SpvMemorySemanticsMask after_semantics
;
2959 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
2961 if (before_semantics
)
2962 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
2964 if (opcode
!= SpvOpImageWrite
&& opcode
!= SpvOpAtomicStore
) {
2965 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2967 unsigned dest_components
= glsl_get_vector_elements(type
->type
);
2968 intrin
->num_components
= nir_intrinsic_infos
[op
].dest_components
;
2969 if (intrin
->num_components
== 0)
2970 intrin
->num_components
= dest_components
;
2972 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2973 intrin
->num_components
, 32, NULL
);
2975 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2977 nir_ssa_def
*result
= &intrin
->dest
.ssa
;
2978 if (intrin
->num_components
!= dest_components
)
2979 result
= nir_channels(&b
->nb
, result
, (1 << dest_components
) - 1);
2981 struct vtn_value
*val
=
2982 vtn_push_ssa(b
, w
[2], type
, vtn_create_ssa_value(b
, type
->type
));
2983 val
->ssa
->def
= result
;
2985 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2988 if (after_semantics
)
2989 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
2992 static nir_intrinsic_op
2993 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2996 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2997 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2998 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2999 OP(AtomicExchange
, atomic_exchange
)
3000 OP(AtomicCompareExchange
, atomic_comp_swap
)
3001 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3002 OP(AtomicIIncrement
, atomic_add
)
3003 OP(AtomicIDecrement
, atomic_add
)
3004 OP(AtomicIAdd
, atomic_add
)
3005 OP(AtomicISub
, atomic_add
)
3006 OP(AtomicSMin
, atomic_imin
)
3007 OP(AtomicUMin
, atomic_umin
)
3008 OP(AtomicSMax
, atomic_imax
)
3009 OP(AtomicUMax
, atomic_umax
)
3010 OP(AtomicAnd
, atomic_and
)
3011 OP(AtomicOr
, atomic_or
)
3012 OP(AtomicXor
, atomic_xor
)
3015 vtn_fail_with_opcode("Invalid SSBO atomic", opcode
);
3019 static nir_intrinsic_op
3020 get_uniform_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3023 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
3024 OP(AtomicLoad
, read_deref
)
3025 OP(AtomicExchange
, exchange
)
3026 OP(AtomicCompareExchange
, comp_swap
)
3027 OP(AtomicCompareExchangeWeak
, comp_swap
)
3028 OP(AtomicIIncrement
, inc_deref
)
3029 OP(AtomicIDecrement
, post_dec_deref
)
3030 OP(AtomicIAdd
, add_deref
)
3031 OP(AtomicISub
, add_deref
)
3032 OP(AtomicUMin
, min_deref
)
3033 OP(AtomicUMax
, max_deref
)
3034 OP(AtomicAnd
, and_deref
)
3035 OP(AtomicOr
, or_deref
)
3036 OP(AtomicXor
, xor_deref
)
3039 /* We left the following out: AtomicStore, AtomicSMin and
3040 * AtomicSmax. Right now there are not nir intrinsics for them. At this
3041 * moment Atomic Counter support is needed for ARB_spirv support, so is
3042 * only need to support GLSL Atomic Counters that are uints and don't
3043 * allow direct storage.
3045 vtn_fail("Invalid uniform atomic");
3049 static nir_intrinsic_op
3050 get_deref_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3053 case SpvOpAtomicLoad
: return nir_intrinsic_load_deref
;
3054 case SpvOpAtomicStore
: return nir_intrinsic_store_deref
;
3055 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
3056 OP(AtomicExchange
, atomic_exchange
)
3057 OP(AtomicCompareExchange
, atomic_comp_swap
)
3058 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3059 OP(AtomicIIncrement
, atomic_add
)
3060 OP(AtomicIDecrement
, atomic_add
)
3061 OP(AtomicIAdd
, atomic_add
)
3062 OP(AtomicISub
, atomic_add
)
3063 OP(AtomicSMin
, atomic_imin
)
3064 OP(AtomicUMin
, atomic_umin
)
3065 OP(AtomicSMax
, atomic_imax
)
3066 OP(AtomicUMax
, atomic_umax
)
3067 OP(AtomicAnd
, atomic_and
)
3068 OP(AtomicOr
, atomic_or
)
3069 OP(AtomicXor
, atomic_xor
)
3072 vtn_fail_with_opcode("Invalid shared atomic", opcode
);
3077 * Handles shared atomics, ssbo atomics and atomic counters.
3080 vtn_handle_atomics(struct vtn_builder
*b
, SpvOp opcode
,
3081 const uint32_t *w
, UNUSED
unsigned count
)
3083 struct vtn_pointer
*ptr
;
3084 nir_intrinsic_instr
*atomic
;
3086 SpvScope scope
= SpvScopeInvocation
;
3087 SpvMemorySemanticsMask semantics
= 0;
3090 case SpvOpAtomicLoad
:
3091 case SpvOpAtomicExchange
:
3092 case SpvOpAtomicCompareExchange
:
3093 case SpvOpAtomicCompareExchangeWeak
:
3094 case SpvOpAtomicIIncrement
:
3095 case SpvOpAtomicIDecrement
:
3096 case SpvOpAtomicIAdd
:
3097 case SpvOpAtomicISub
:
3098 case SpvOpAtomicSMin
:
3099 case SpvOpAtomicUMin
:
3100 case SpvOpAtomicSMax
:
3101 case SpvOpAtomicUMax
:
3102 case SpvOpAtomicAnd
:
3104 case SpvOpAtomicXor
:
3105 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3106 scope
= vtn_constant_uint(b
, w
[4]);
3107 semantics
= vtn_constant_uint(b
, w
[5]);
3110 case SpvOpAtomicStore
:
3111 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
3112 scope
= vtn_constant_uint(b
, w
[2]);
3113 semantics
= vtn_constant_uint(b
, w
[3]);
3117 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3120 /* uniform as "atomic counter uniform" */
3121 if (ptr
->mode
== vtn_variable_mode_uniform
) {
3122 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3123 const struct glsl_type
*deref_type
= deref
->type
;
3124 nir_intrinsic_op op
= get_uniform_nir_atomic_op(b
, opcode
);
3125 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3126 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3128 /* SSBO needs to initialize index/offset. In this case we don't need to,
3129 * as that info is already stored on the ptr->var->var nir_variable (see
3130 * vtn_create_variable)
3134 case SpvOpAtomicLoad
:
3135 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3138 case SpvOpAtomicStore
:
3139 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3140 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3143 case SpvOpAtomicExchange
:
3144 case SpvOpAtomicCompareExchange
:
3145 case SpvOpAtomicCompareExchangeWeak
:
3146 case SpvOpAtomicIIncrement
:
3147 case SpvOpAtomicIDecrement
:
3148 case SpvOpAtomicIAdd
:
3149 case SpvOpAtomicISub
:
3150 case SpvOpAtomicSMin
:
3151 case SpvOpAtomicUMin
:
3152 case SpvOpAtomicSMax
:
3153 case SpvOpAtomicUMax
:
3154 case SpvOpAtomicAnd
:
3156 case SpvOpAtomicXor
:
3157 /* Nothing: we don't need to call fill_common_atomic_sources here, as
3158 * atomic counter uniforms doesn't have sources
3163 unreachable("Invalid SPIR-V atomic");
3166 } else if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
3167 nir_ssa_def
*offset
, *index
;
3168 offset
= vtn_pointer_to_offset(b
, ptr
, &index
);
3170 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
3172 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(b
, opcode
);
3173 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3177 case SpvOpAtomicLoad
:
3178 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3179 nir_intrinsic_set_align(atomic
, 4, 0);
3180 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3181 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3182 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3185 case SpvOpAtomicStore
:
3186 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3187 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3188 nir_intrinsic_set_align(atomic
, 4, 0);
3189 atomic
->src
[src
++] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
3190 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3191 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3192 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3195 case SpvOpAtomicExchange
:
3196 case SpvOpAtomicCompareExchange
:
3197 case SpvOpAtomicCompareExchangeWeak
:
3198 case SpvOpAtomicIIncrement
:
3199 case SpvOpAtomicIDecrement
:
3200 case SpvOpAtomicIAdd
:
3201 case SpvOpAtomicISub
:
3202 case SpvOpAtomicSMin
:
3203 case SpvOpAtomicUMin
:
3204 case SpvOpAtomicSMax
:
3205 case SpvOpAtomicUMax
:
3206 case SpvOpAtomicAnd
:
3208 case SpvOpAtomicXor
:
3209 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3210 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3211 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3212 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
3216 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3219 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3220 const struct glsl_type
*deref_type
= deref
->type
;
3221 nir_intrinsic_op op
= get_deref_nir_atomic_op(b
, opcode
);
3222 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3223 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3226 case SpvOpAtomicLoad
:
3227 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3230 case SpvOpAtomicStore
:
3231 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3232 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3233 atomic
->src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
3236 case SpvOpAtomicExchange
:
3237 case SpvOpAtomicCompareExchange
:
3238 case SpvOpAtomicCompareExchangeWeak
:
3239 case SpvOpAtomicIIncrement
:
3240 case SpvOpAtomicIDecrement
:
3241 case SpvOpAtomicIAdd
:
3242 case SpvOpAtomicISub
:
3243 case SpvOpAtomicSMin
:
3244 case SpvOpAtomicUMin
:
3245 case SpvOpAtomicSMax
:
3246 case SpvOpAtomicUMax
:
3247 case SpvOpAtomicAnd
:
3249 case SpvOpAtomicXor
:
3250 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[1]);
3254 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3258 /* Atomic ordering operations will implicitly apply to the atomic operation
3259 * storage class, so include that too.
3261 semantics
|= vtn_storage_class_to_memory_semantics(ptr
->ptr_type
->storage_class
);
3263 SpvMemorySemanticsMask before_semantics
;
3264 SpvMemorySemanticsMask after_semantics
;
3265 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3267 if (before_semantics
)
3268 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3270 if (opcode
!= SpvOpAtomicStore
) {
3271 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3273 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
3274 glsl_get_vector_elements(type
->type
),
3275 glsl_get_bit_size(type
->type
), NULL
);
3277 struct vtn_ssa_value
*ssa
= rzalloc(b
, struct vtn_ssa_value
);
3278 ssa
->def
= &atomic
->dest
.ssa
;
3279 ssa
->type
= type
->type
;
3280 vtn_push_ssa(b
, w
[2], type
, ssa
);
3283 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
3285 if (after_semantics
)
3286 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3289 static nir_alu_instr
*
3290 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
3292 nir_op op
= nir_op_vec(num_components
);
3293 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
3294 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
3296 vec
->dest
.write_mask
= (1 << num_components
) - 1;
3301 struct vtn_ssa_value
*
3302 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
3304 if (src
->transposed
)
3305 return src
->transposed
;
3307 struct vtn_ssa_value
*dest
=
3308 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
3310 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
3311 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
3312 glsl_get_bit_size(src
->type
));
3313 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3314 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
3315 vec
->src
[0].swizzle
[0] = i
;
3317 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
3318 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
3319 vec
->src
[j
].swizzle
[0] = i
;
3322 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3323 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
3326 dest
->transposed
= src
;
3332 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
3334 return nir_channel(&b
->nb
, src
, index
);
3338 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
3341 nir_alu_instr
*vec
= create_vec(b
, src
->num_components
,
3344 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
3346 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
3348 vec
->src
[i
].src
= nir_src_for_ssa(src
);
3349 vec
->src
[i
].swizzle
[0] = i
;
3353 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3355 return &vec
->dest
.dest
.ssa
;
3358 static nir_ssa_def
*
3359 nir_ieq_imm(nir_builder
*b
, nir_ssa_def
*x
, uint64_t i
)
3361 return nir_ieq(b
, x
, nir_imm_intN_t(b
, i
, x
->bit_size
));
3365 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
3368 return nir_vector_extract(&b
->nb
, src
, nir_i2i(&b
->nb
, index
, 32));
3372 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
3373 nir_ssa_def
*insert
, nir_ssa_def
*index
)
3375 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
3376 for (unsigned i
= 1; i
< src
->num_components
; i
++)
3377 dest
= nir_bcsel(&b
->nb
, nir_ieq_imm(&b
->nb
, index
, i
),
3378 vtn_vector_insert(b
, src
, insert
, i
), dest
);
3383 static nir_ssa_def
*
3384 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
3385 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
3386 const uint32_t *indices
)
3388 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
3390 for (unsigned i
= 0; i
< num_components
; i
++) {
3391 uint32_t index
= indices
[i
];
3392 if (index
== 0xffffffff) {
3394 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
3395 } else if (index
< src0
->num_components
) {
3396 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
3397 vec
->src
[i
].swizzle
[0] = index
;
3399 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
3400 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
3404 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3406 return &vec
->dest
.dest
.ssa
;
3410 * Concatentates a number of vectors/scalars together to produce a vector
3412 static nir_ssa_def
*
3413 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
3414 unsigned num_srcs
, nir_ssa_def
**srcs
)
3416 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
3418 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3420 * "When constructing a vector, there must be at least two Constituent
3423 vtn_assert(num_srcs
>= 2);
3425 unsigned dest_idx
= 0;
3426 for (unsigned i
= 0; i
< num_srcs
; i
++) {
3427 nir_ssa_def
*src
= srcs
[i
];
3428 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
3429 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
3430 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
3431 vec
->src
[dest_idx
].swizzle
[0] = j
;
3436 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3438 * "When constructing a vector, the total number of components in all
3439 * the operands must equal the number of components in Result Type."
3441 vtn_assert(dest_idx
== num_components
);
3443 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3445 return &vec
->dest
.dest
.ssa
;
3448 static struct vtn_ssa_value
*
3449 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
3451 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
3452 dest
->type
= src
->type
;
3454 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3455 dest
->def
= src
->def
;
3457 unsigned elems
= glsl_get_length(src
->type
);
3459 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
3460 for (unsigned i
= 0; i
< elems
; i
++)
3461 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
3467 static struct vtn_ssa_value
*
3468 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3469 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
3470 unsigned num_indices
)
3472 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
3474 struct vtn_ssa_value
*cur
= dest
;
3476 for (i
= 0; i
< num_indices
- 1; i
++) {
3477 cur
= cur
->elems
[indices
[i
]];
3480 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3481 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3482 * the component granularity. In that case, the last index will be
3483 * the index to insert the scalar into the vector.
3486 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
3488 cur
->elems
[indices
[i
]] = insert
;
3494 static struct vtn_ssa_value
*
3495 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3496 const uint32_t *indices
, unsigned num_indices
)
3498 struct vtn_ssa_value
*cur
= src
;
3499 for (unsigned i
= 0; i
< num_indices
; i
++) {
3500 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3501 vtn_assert(i
== num_indices
- 1);
3502 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3503 * the component granularity. The last index will be the index of the
3504 * vector to extract.
3507 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
3508 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
3509 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
3512 cur
= cur
->elems
[indices
[i
]];
3520 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
3521 const uint32_t *w
, unsigned count
)
3523 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3524 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
3527 case SpvOpVectorExtractDynamic
:
3528 ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
3529 vtn_ssa_value(b
, w
[4])->def
);
3532 case SpvOpVectorInsertDynamic
:
3533 ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
3534 vtn_ssa_value(b
, w
[4])->def
,
3535 vtn_ssa_value(b
, w
[5])->def
);
3538 case SpvOpVectorShuffle
:
3539 ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
->type
),
3540 vtn_ssa_value(b
, w
[3])->def
,
3541 vtn_ssa_value(b
, w
[4])->def
,
3545 case SpvOpCompositeConstruct
: {
3546 unsigned elems
= count
- 3;
3548 if (glsl_type_is_vector_or_scalar(type
->type
)) {
3549 nir_ssa_def
*srcs
[NIR_MAX_VEC_COMPONENTS
];
3550 for (unsigned i
= 0; i
< elems
; i
++)
3551 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
3553 vtn_vector_construct(b
, glsl_get_vector_elements(type
->type
),
3556 ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
3557 for (unsigned i
= 0; i
< elems
; i
++)
3558 ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
3562 case SpvOpCompositeExtract
:
3563 ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
3567 case SpvOpCompositeInsert
:
3568 ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
3569 vtn_ssa_value(b
, w
[3]),
3573 case SpvOpCopyLogical
:
3574 case SpvOpCopyObject
:
3575 ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
3579 vtn_fail_with_opcode("unknown composite operation", opcode
);
3582 vtn_push_ssa(b
, w
[2], type
, ssa
);
3586 vtn_emit_barrier(struct vtn_builder
*b
, nir_intrinsic_op op
)
3588 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3589 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3593 vtn_emit_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
3594 SpvMemorySemanticsMask semantics
)
3596 if (b
->options
->use_scoped_memory_barrier
) {
3597 vtn_emit_scoped_memory_barrier(b
, scope
, semantics
);
3601 static const SpvMemorySemanticsMask all_memory_semantics
=
3602 SpvMemorySemanticsUniformMemoryMask
|
3603 SpvMemorySemanticsWorkgroupMemoryMask
|
3604 SpvMemorySemanticsAtomicCounterMemoryMask
|
3605 SpvMemorySemanticsImageMemoryMask
;
3607 /* If we're not actually doing a memory barrier, bail */
3608 if (!(semantics
& all_memory_semantics
))
3611 /* GL and Vulkan don't have these */
3612 vtn_assert(scope
!= SpvScopeCrossDevice
);
3614 if (scope
== SpvScopeSubgroup
)
3615 return; /* Nothing to do here */
3617 if (scope
== SpvScopeWorkgroup
) {
3618 vtn_emit_barrier(b
, nir_intrinsic_group_memory_barrier
);
3622 /* There's only two scopes thing left */
3623 vtn_assert(scope
== SpvScopeInvocation
|| scope
== SpvScopeDevice
);
3625 if ((semantics
& all_memory_semantics
) == all_memory_semantics
) {
3626 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3630 /* Issue a bunch of more specific barriers */
3631 uint32_t bits
= semantics
;
3633 SpvMemorySemanticsMask semantic
= 1 << u_bit_scan(&bits
);
3635 case SpvMemorySemanticsUniformMemoryMask
:
3636 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_buffer
);
3638 case SpvMemorySemanticsWorkgroupMemoryMask
:
3639 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_shared
);
3641 case SpvMemorySemanticsAtomicCounterMemoryMask
:
3642 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_atomic_counter
);
3644 case SpvMemorySemanticsImageMemoryMask
:
3645 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_image
);
3647 case SpvMemorySemanticsOutputMemoryMask
:
3648 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
3649 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3658 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
3659 const uint32_t *w
, UNUSED
unsigned count
)
3662 case SpvOpEmitVertex
:
3663 case SpvOpEmitStreamVertex
:
3664 case SpvOpEndPrimitive
:
3665 case SpvOpEndStreamPrimitive
: {
3666 nir_intrinsic_op intrinsic_op
;
3668 case SpvOpEmitVertex
:
3669 case SpvOpEmitStreamVertex
:
3670 intrinsic_op
= nir_intrinsic_emit_vertex
;
3672 case SpvOpEndPrimitive
:
3673 case SpvOpEndStreamPrimitive
:
3674 intrinsic_op
= nir_intrinsic_end_primitive
;
3677 unreachable("Invalid opcode");
3680 nir_intrinsic_instr
*intrin
=
3681 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
3684 case SpvOpEmitStreamVertex
:
3685 case SpvOpEndStreamPrimitive
: {
3686 unsigned stream
= vtn_constant_uint(b
, w
[1]);
3687 nir_intrinsic_set_stream_id(intrin
, stream
);
3695 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3699 case SpvOpMemoryBarrier
: {
3700 SpvScope scope
= vtn_constant_uint(b
, w
[1]);
3701 SpvMemorySemanticsMask semantics
= vtn_constant_uint(b
, w
[2]);
3702 vtn_emit_memory_barrier(b
, scope
, semantics
);
3706 case SpvOpControlBarrier
: {
3707 SpvScope execution_scope
= vtn_constant_uint(b
, w
[1]);
3708 SpvScope memory_scope
= vtn_constant_uint(b
, w
[2]);
3709 SpvMemorySemanticsMask memory_semantics
= vtn_constant_uint(b
, w
[3]);
3711 /* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
3712 * memory semantics of None for GLSL barrier().
3714 if (b
->wa_glslang_cs_barrier
&&
3715 b
->nb
.shader
->info
.stage
== MESA_SHADER_COMPUTE
&&
3716 execution_scope
== SpvScopeWorkgroup
&&
3717 memory_semantics
== SpvMemorySemanticsMaskNone
) {
3718 memory_scope
= SpvScopeWorkgroup
;
3719 memory_semantics
= SpvMemorySemanticsAcquireReleaseMask
|
3720 SpvMemorySemanticsWorkgroupMemoryMask
;
3723 /* From the SPIR-V spec:
3725 * "When used with the TessellationControl execution model, it also
3726 * implicitly synchronizes the Output Storage Class: Writes to Output
3727 * variables performed by any invocation executed prior to a
3728 * OpControlBarrier will be visible to any other invocation after
3729 * return from that OpControlBarrier."
3731 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3732 memory_semantics
&= ~(SpvMemorySemanticsAcquireMask
|
3733 SpvMemorySemanticsReleaseMask
|
3734 SpvMemorySemanticsAcquireReleaseMask
|
3735 SpvMemorySemanticsSequentiallyConsistentMask
);
3736 memory_semantics
|= SpvMemorySemanticsAcquireReleaseMask
|
3737 SpvMemorySemanticsOutputMemoryMask
;
3740 vtn_emit_memory_barrier(b
, memory_scope
, memory_semantics
);
3742 if (execution_scope
== SpvScopeWorkgroup
)
3743 vtn_emit_barrier(b
, nir_intrinsic_control_barrier
);
3748 unreachable("unknown barrier instruction");
3753 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
3754 SpvExecutionMode mode
)
3757 case SpvExecutionModeInputPoints
:
3758 case SpvExecutionModeOutputPoints
:
3759 return 0; /* GL_POINTS */
3760 case SpvExecutionModeInputLines
:
3761 return 1; /* GL_LINES */
3762 case SpvExecutionModeInputLinesAdjacency
:
3763 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
3764 case SpvExecutionModeTriangles
:
3765 return 4; /* GL_TRIANGLES */
3766 case SpvExecutionModeInputTrianglesAdjacency
:
3767 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
3768 case SpvExecutionModeQuads
:
3769 return 7; /* GL_QUADS */
3770 case SpvExecutionModeIsolines
:
3771 return 0x8E7A; /* GL_ISOLINES */
3772 case SpvExecutionModeOutputLineStrip
:
3773 return 3; /* GL_LINE_STRIP */
3774 case SpvExecutionModeOutputTriangleStrip
:
3775 return 5; /* GL_TRIANGLE_STRIP */
3777 vtn_fail("Invalid primitive type: %s (%u)",
3778 spirv_executionmode_to_string(mode
), mode
);
3783 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
3784 SpvExecutionMode mode
)
3787 case SpvExecutionModeInputPoints
:
3789 case SpvExecutionModeInputLines
:
3791 case SpvExecutionModeInputLinesAdjacency
:
3793 case SpvExecutionModeTriangles
:
3795 case SpvExecutionModeInputTrianglesAdjacency
:
3798 vtn_fail("Invalid GS input mode: %s (%u)",
3799 spirv_executionmode_to_string(mode
), mode
);
3803 static gl_shader_stage
3804 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
3807 case SpvExecutionModelVertex
:
3808 return MESA_SHADER_VERTEX
;
3809 case SpvExecutionModelTessellationControl
:
3810 return MESA_SHADER_TESS_CTRL
;
3811 case SpvExecutionModelTessellationEvaluation
:
3812 return MESA_SHADER_TESS_EVAL
;
3813 case SpvExecutionModelGeometry
:
3814 return MESA_SHADER_GEOMETRY
;
3815 case SpvExecutionModelFragment
:
3816 return MESA_SHADER_FRAGMENT
;
3817 case SpvExecutionModelGLCompute
:
3818 return MESA_SHADER_COMPUTE
;
3819 case SpvExecutionModelKernel
:
3820 return MESA_SHADER_KERNEL
;
3822 vtn_fail("Unsupported execution model: %s (%u)",
3823 spirv_executionmodel_to_string(model
), model
);
3827 #define spv_check_supported(name, cap) do { \
3828 if (!(b->options && b->options->caps.name)) \
3829 vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
3830 spirv_capability_to_string(cap), cap); \
3835 vtn_handle_entry_point(struct vtn_builder
*b
, const uint32_t *w
,
3838 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
3839 /* Let this be a name label regardless */
3840 unsigned name_words
;
3841 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
3843 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
3844 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
3847 vtn_assert(b
->entry_point
== NULL
);
3848 b
->entry_point
= entry_point
;
3852 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3853 const uint32_t *w
, unsigned count
)
3860 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
3861 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
3862 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
3863 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
3864 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
3865 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
3868 uint32_t version
= w
[2];
3871 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
3873 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
3877 case SpvOpSourceExtension
:
3878 case SpvOpSourceContinued
:
3879 case SpvOpExtension
:
3880 case SpvOpModuleProcessed
:
3881 /* Unhandled, but these are for debug so that's ok. */
3884 case SpvOpCapability
: {
3885 SpvCapability cap
= w
[1];
3887 case SpvCapabilityMatrix
:
3888 case SpvCapabilityShader
:
3889 case SpvCapabilityGeometry
:
3890 case SpvCapabilityGeometryPointSize
:
3891 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
3892 case SpvCapabilitySampledImageArrayDynamicIndexing
:
3893 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
3894 case SpvCapabilityStorageImageArrayDynamicIndexing
:
3895 case SpvCapabilityImageRect
:
3896 case SpvCapabilitySampledRect
:
3897 case SpvCapabilitySampled1D
:
3898 case SpvCapabilityImage1D
:
3899 case SpvCapabilitySampledCubeArray
:
3900 case SpvCapabilityImageCubeArray
:
3901 case SpvCapabilitySampledBuffer
:
3902 case SpvCapabilityImageBuffer
:
3903 case SpvCapabilityImageQuery
:
3904 case SpvCapabilityDerivativeControl
:
3905 case SpvCapabilityInterpolationFunction
:
3906 case SpvCapabilityMultiViewport
:
3907 case SpvCapabilitySampleRateShading
:
3908 case SpvCapabilityClipDistance
:
3909 case SpvCapabilityCullDistance
:
3910 case SpvCapabilityInputAttachment
:
3911 case SpvCapabilityImageGatherExtended
:
3912 case SpvCapabilityStorageImageExtendedFormats
:
3913 case SpvCapabilityVector16
:
3916 case SpvCapabilityLinkage
:
3917 case SpvCapabilityFloat16Buffer
:
3918 case SpvCapabilitySparseResidency
:
3919 vtn_warn("Unsupported SPIR-V capability: %s",
3920 spirv_capability_to_string(cap
));
3923 case SpvCapabilityMinLod
:
3924 spv_check_supported(min_lod
, cap
);
3927 case SpvCapabilityAtomicStorage
:
3928 spv_check_supported(atomic_storage
, cap
);
3931 case SpvCapabilityFloat64
:
3932 spv_check_supported(float64
, cap
);
3934 case SpvCapabilityInt64
:
3935 spv_check_supported(int64
, cap
);
3937 case SpvCapabilityInt16
:
3938 spv_check_supported(int16
, cap
);
3940 case SpvCapabilityInt8
:
3941 spv_check_supported(int8
, cap
);
3944 case SpvCapabilityTransformFeedback
:
3945 spv_check_supported(transform_feedback
, cap
);
3948 case SpvCapabilityGeometryStreams
:
3949 spv_check_supported(geometry_streams
, cap
);
3952 case SpvCapabilityInt64Atomics
:
3953 spv_check_supported(int64_atomics
, cap
);
3956 case SpvCapabilityStorageImageMultisample
:
3957 spv_check_supported(storage_image_ms
, cap
);
3960 case SpvCapabilityAddresses
:
3961 spv_check_supported(address
, cap
);
3964 case SpvCapabilityKernel
:
3965 spv_check_supported(kernel
, cap
);
3968 case SpvCapabilityImageBasic
:
3969 case SpvCapabilityImageReadWrite
:
3970 case SpvCapabilityImageMipmap
:
3971 case SpvCapabilityPipes
:
3972 case SpvCapabilityDeviceEnqueue
:
3973 case SpvCapabilityLiteralSampler
:
3974 case SpvCapabilityGenericPointer
:
3975 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3976 spirv_capability_to_string(cap
));
3979 case SpvCapabilityImageMSArray
:
3980 spv_check_supported(image_ms_array
, cap
);
3983 case SpvCapabilityTessellation
:
3984 case SpvCapabilityTessellationPointSize
:
3985 spv_check_supported(tessellation
, cap
);
3988 case SpvCapabilityDrawParameters
:
3989 spv_check_supported(draw_parameters
, cap
);
3992 case SpvCapabilityStorageImageReadWithoutFormat
:
3993 spv_check_supported(image_read_without_format
, cap
);
3996 case SpvCapabilityStorageImageWriteWithoutFormat
:
3997 spv_check_supported(image_write_without_format
, cap
);
4000 case SpvCapabilityDeviceGroup
:
4001 spv_check_supported(device_group
, cap
);
4004 case SpvCapabilityMultiView
:
4005 spv_check_supported(multiview
, cap
);
4008 case SpvCapabilityGroupNonUniform
:
4009 spv_check_supported(subgroup_basic
, cap
);
4012 case SpvCapabilitySubgroupVoteKHR
:
4013 case SpvCapabilityGroupNonUniformVote
:
4014 spv_check_supported(subgroup_vote
, cap
);
4017 case SpvCapabilitySubgroupBallotKHR
:
4018 case SpvCapabilityGroupNonUniformBallot
:
4019 spv_check_supported(subgroup_ballot
, cap
);
4022 case SpvCapabilityGroupNonUniformShuffle
:
4023 case SpvCapabilityGroupNonUniformShuffleRelative
:
4024 spv_check_supported(subgroup_shuffle
, cap
);
4027 case SpvCapabilityGroupNonUniformQuad
:
4028 spv_check_supported(subgroup_quad
, cap
);
4031 case SpvCapabilityGroupNonUniformArithmetic
:
4032 case SpvCapabilityGroupNonUniformClustered
:
4033 spv_check_supported(subgroup_arithmetic
, cap
);
4036 case SpvCapabilityGroups
:
4037 spv_check_supported(amd_shader_ballot
, cap
);
4040 case SpvCapabilityVariablePointersStorageBuffer
:
4041 case SpvCapabilityVariablePointers
:
4042 spv_check_supported(variable_pointers
, cap
);
4043 b
->variable_pointers
= true;
4046 case SpvCapabilityStorageUniformBufferBlock16
:
4047 case SpvCapabilityStorageUniform16
:
4048 case SpvCapabilityStoragePushConstant16
:
4049 case SpvCapabilityStorageInputOutput16
:
4050 spv_check_supported(storage_16bit
, cap
);
4053 case SpvCapabilityShaderLayer
:
4054 case SpvCapabilityShaderViewportIndex
:
4055 case SpvCapabilityShaderViewportIndexLayerEXT
:
4056 spv_check_supported(shader_viewport_index_layer
, cap
);
4059 case SpvCapabilityStorageBuffer8BitAccess
:
4060 case SpvCapabilityUniformAndStorageBuffer8BitAccess
:
4061 case SpvCapabilityStoragePushConstant8
:
4062 spv_check_supported(storage_8bit
, cap
);
4065 case SpvCapabilityShaderNonUniformEXT
:
4066 spv_check_supported(descriptor_indexing
, cap
);
4069 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT
:
4070 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT
:
4071 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT
:
4072 spv_check_supported(descriptor_array_dynamic_indexing
, cap
);
4075 case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT
:
4076 case SpvCapabilitySampledImageArrayNonUniformIndexingEXT
:
4077 case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT
:
4078 case SpvCapabilityStorageImageArrayNonUniformIndexingEXT
:
4079 case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT
:
4080 case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT
:
4081 case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT
:
4082 spv_check_supported(descriptor_array_non_uniform_indexing
, cap
);
4085 case SpvCapabilityRuntimeDescriptorArrayEXT
:
4086 spv_check_supported(runtime_descriptor_array
, cap
);
4089 case SpvCapabilityStencilExportEXT
:
4090 spv_check_supported(stencil_export
, cap
);
4093 case SpvCapabilitySampleMaskPostDepthCoverage
:
4094 spv_check_supported(post_depth_coverage
, cap
);
4097 case SpvCapabilityDenormFlushToZero
:
4098 case SpvCapabilityDenormPreserve
:
4099 case SpvCapabilitySignedZeroInfNanPreserve
:
4100 case SpvCapabilityRoundingModeRTE
:
4101 case SpvCapabilityRoundingModeRTZ
:
4102 spv_check_supported(float_controls
, cap
);
4105 case SpvCapabilityPhysicalStorageBufferAddresses
:
4106 spv_check_supported(physical_storage_buffer_address
, cap
);
4109 case SpvCapabilityComputeDerivativeGroupQuadsNV
:
4110 case SpvCapabilityComputeDerivativeGroupLinearNV
:
4111 spv_check_supported(derivative_group
, cap
);
4114 case SpvCapabilityFloat16
:
4115 spv_check_supported(float16
, cap
);
4118 case SpvCapabilityFragmentShaderSampleInterlockEXT
:
4119 spv_check_supported(fragment_shader_sample_interlock
, cap
);
4122 case SpvCapabilityFragmentShaderPixelInterlockEXT
:
4123 spv_check_supported(fragment_shader_pixel_interlock
, cap
);
4126 case SpvCapabilityDemoteToHelperInvocationEXT
:
4127 spv_check_supported(demote_to_helper_invocation
, cap
);
4130 case SpvCapabilityShaderClockKHR
:
4131 spv_check_supported(shader_clock
, cap
);
4134 case SpvCapabilityVulkanMemoryModel
:
4135 spv_check_supported(vk_memory_model
, cap
);
4138 case SpvCapabilityVulkanMemoryModelDeviceScope
:
4139 spv_check_supported(vk_memory_model_device_scope
, cap
);
4142 case SpvCapabilityImageReadWriteLodAMD
:
4143 spv_check_supported(amd_image_read_write_lod
, cap
);
4146 case SpvCapabilityIntegerFunctions2INTEL
:
4147 spv_check_supported(integer_functions2
, cap
);
4150 case SpvCapabilityFragmentMaskAMD
:
4151 spv_check_supported(amd_fragment_mask
, cap
);
4155 vtn_fail("Unhandled capability: %s (%u)",
4156 spirv_capability_to_string(cap
), cap
);
4161 case SpvOpExtInstImport
:
4162 vtn_handle_extension(b
, opcode
, w
, count
);
4165 case SpvOpMemoryModel
:
4167 case SpvAddressingModelPhysical32
:
4168 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4169 "AddressingModelPhysical32 only supported for kernels");
4170 b
->shader
->info
.cs
.ptr_size
= 32;
4171 b
->physical_ptrs
= true;
4172 b
->options
->shared_addr_format
= nir_address_format_32bit_global
;
4173 b
->options
->global_addr_format
= nir_address_format_32bit_global
;
4174 b
->options
->temp_addr_format
= nir_address_format_32bit_global
;
4176 case SpvAddressingModelPhysical64
:
4177 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4178 "AddressingModelPhysical64 only supported for kernels");
4179 b
->shader
->info
.cs
.ptr_size
= 64;
4180 b
->physical_ptrs
= true;
4181 b
->options
->shared_addr_format
= nir_address_format_64bit_global
;
4182 b
->options
->global_addr_format
= nir_address_format_64bit_global
;
4183 b
->options
->temp_addr_format
= nir_address_format_64bit_global
;
4185 case SpvAddressingModelLogical
:
4186 vtn_fail_if(b
->shader
->info
.stage
>= MESA_SHADER_STAGES
,
4187 "AddressingModelLogical only supported for shaders");
4188 b
->physical_ptrs
= false;
4190 case SpvAddressingModelPhysicalStorageBuffer64
:
4191 vtn_fail_if(!b
->options
||
4192 !b
->options
->caps
.physical_storage_buffer_address
,
4193 "AddressingModelPhysicalStorageBuffer64 not supported");
4196 vtn_fail("Unknown addressing model: %s (%u)",
4197 spirv_addressingmodel_to_string(w
[1]), w
[1]);
4202 case SpvMemoryModelSimple
:
4203 case SpvMemoryModelGLSL450
:
4204 case SpvMemoryModelOpenCL
:
4206 case SpvMemoryModelVulkan
:
4207 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
4208 "Vulkan memory model is unsupported by this driver");
4211 vtn_fail("Unsupported memory model: %s",
4212 spirv_memorymodel_to_string(w
[2]));
4217 case SpvOpEntryPoint
:
4218 vtn_handle_entry_point(b
, w
, count
);
4222 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
4223 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4227 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4230 case SpvOpMemberName
:
4234 case SpvOpExecutionMode
:
4235 case SpvOpExecutionModeId
:
4236 case SpvOpDecorationGroup
:
4238 case SpvOpDecorateId
:
4239 case SpvOpMemberDecorate
:
4240 case SpvOpGroupDecorate
:
4241 case SpvOpGroupMemberDecorate
:
4242 case SpvOpDecorateString
:
4243 case SpvOpMemberDecorateString
:
4244 vtn_handle_decoration(b
, opcode
, w
, count
);
4247 case SpvOpExtInst
: {
4248 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4249 if (val
->ext_handler
== vtn_handle_non_semantic_instruction
) {
4250 /* NonSemantic extended instructions are acceptable in preamble. */
4251 vtn_handle_non_semantic_instruction(b
, w
[4], w
, count
);
4254 return false; /* End of preamble. */
4259 return false; /* End of preamble */
4266 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4267 const struct vtn_decoration
*mode
, UNUSED
void *data
)
4269 vtn_assert(b
->entry_point
== entry_point
);
4271 switch(mode
->exec_mode
) {
4272 case SpvExecutionModeOriginUpperLeft
:
4273 case SpvExecutionModeOriginLowerLeft
:
4274 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4275 b
->shader
->info
.fs
.origin_upper_left
=
4276 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
4279 case SpvExecutionModeEarlyFragmentTests
:
4280 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4281 b
->shader
->info
.fs
.early_fragment_tests
= true;
4284 case SpvExecutionModePostDepthCoverage
:
4285 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4286 b
->shader
->info
.fs
.post_depth_coverage
= true;
4289 case SpvExecutionModeInvocations
:
4290 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4291 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->operands
[0]);
4294 case SpvExecutionModeDepthReplacing
:
4295 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4296 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
4298 case SpvExecutionModeDepthGreater
:
4299 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4300 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
4302 case SpvExecutionModeDepthLess
:
4303 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4304 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
4306 case SpvExecutionModeDepthUnchanged
:
4307 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4308 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
4311 case SpvExecutionModeLocalSize
:
4312 vtn_assert(gl_shader_stage_is_compute(b
->shader
->info
.stage
));
4313 b
->shader
->info
.cs
.local_size
[0] = mode
->operands
[0];
4314 b
->shader
->info
.cs
.local_size
[1] = mode
->operands
[1];
4315 b
->shader
->info
.cs
.local_size
[2] = mode
->operands
[2];
4318 case SpvExecutionModeLocalSizeId
:
4319 b
->shader
->info
.cs
.local_size
[0] = vtn_constant_uint(b
, mode
->operands
[0]);
4320 b
->shader
->info
.cs
.local_size
[1] = vtn_constant_uint(b
, mode
->operands
[1]);
4321 b
->shader
->info
.cs
.local_size
[2] = vtn_constant_uint(b
, mode
->operands
[2]);
4324 case SpvExecutionModeLocalSizeHint
:
4325 case SpvExecutionModeLocalSizeHintId
:
4326 break; /* Nothing to do with this */
4328 case SpvExecutionModeOutputVertices
:
4329 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4330 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4331 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->operands
[0];
4333 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4334 b
->shader
->info
.gs
.vertices_out
= mode
->operands
[0];
4338 case SpvExecutionModeInputPoints
:
4339 case SpvExecutionModeInputLines
:
4340 case SpvExecutionModeInputLinesAdjacency
:
4341 case SpvExecutionModeTriangles
:
4342 case SpvExecutionModeInputTrianglesAdjacency
:
4343 case SpvExecutionModeQuads
:
4344 case SpvExecutionModeIsolines
:
4345 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4346 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4347 b
->shader
->info
.tess
.primitive_mode
=
4348 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4350 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4351 b
->shader
->info
.gs
.vertices_in
=
4352 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
4353 b
->shader
->info
.gs
.input_primitive
=
4354 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4358 case SpvExecutionModeOutputPoints
:
4359 case SpvExecutionModeOutputLineStrip
:
4360 case SpvExecutionModeOutputTriangleStrip
:
4361 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4362 b
->shader
->info
.gs
.output_primitive
=
4363 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4366 case SpvExecutionModeSpacingEqual
:
4367 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4368 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4369 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
4371 case SpvExecutionModeSpacingFractionalEven
:
4372 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4373 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4374 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
4376 case SpvExecutionModeSpacingFractionalOdd
:
4377 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4378 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4379 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
4381 case SpvExecutionModeVertexOrderCw
:
4382 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4383 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4384 b
->shader
->info
.tess
.ccw
= false;
4386 case SpvExecutionModeVertexOrderCcw
:
4387 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4388 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4389 b
->shader
->info
.tess
.ccw
= true;
4391 case SpvExecutionModePointMode
:
4392 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4393 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4394 b
->shader
->info
.tess
.point_mode
= true;
4397 case SpvExecutionModePixelCenterInteger
:
4398 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4399 b
->shader
->info
.fs
.pixel_center_integer
= true;
4402 case SpvExecutionModeXfb
:
4403 b
->shader
->info
.has_transform_feedback_varyings
= true;
4406 case SpvExecutionModeVecTypeHint
:
4409 case SpvExecutionModeContractionOff
:
4410 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
4411 vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4412 spirv_executionmode_to_string(mode
->exec_mode
));
4417 case SpvExecutionModeStencilRefReplacingEXT
:
4418 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4421 case SpvExecutionModeDerivativeGroupQuadsNV
:
4422 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4423 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_QUADS
;
4426 case SpvExecutionModeDerivativeGroupLinearNV
:
4427 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4428 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_LINEAR
;
4431 case SpvExecutionModePixelInterlockOrderedEXT
:
4432 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4433 b
->shader
->info
.fs
.pixel_interlock_ordered
= true;
4436 case SpvExecutionModePixelInterlockUnorderedEXT
:
4437 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4438 b
->shader
->info
.fs
.pixel_interlock_unordered
= true;
4441 case SpvExecutionModeSampleInterlockOrderedEXT
:
4442 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4443 b
->shader
->info
.fs
.sample_interlock_ordered
= true;
4446 case SpvExecutionModeSampleInterlockUnorderedEXT
:
4447 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4448 b
->shader
->info
.fs
.sample_interlock_unordered
= true;
4451 case SpvExecutionModeDenormPreserve
:
4452 case SpvExecutionModeDenormFlushToZero
:
4453 case SpvExecutionModeSignedZeroInfNanPreserve
:
4454 case SpvExecutionModeRoundingModeRTE
:
4455 case SpvExecutionModeRoundingModeRTZ
:
4456 /* Already handled in vtn_handle_rounding_mode_in_execution_mode() */
4460 vtn_fail("Unhandled execution mode: %s (%u)",
4461 spirv_executionmode_to_string(mode
->exec_mode
),
4467 vtn_handle_rounding_mode_in_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4468 const struct vtn_decoration
*mode
, void *data
)
4470 vtn_assert(b
->entry_point
== entry_point
);
4472 unsigned execution_mode
= 0;
4474 switch(mode
->exec_mode
) {
4475 case SpvExecutionModeDenormPreserve
:
4476 switch (mode
->operands
[0]) {
4477 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP16
; break;
4478 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP32
; break;
4479 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP64
; break;
4480 default: vtn_fail("Floating point type not supported");
4483 case SpvExecutionModeDenormFlushToZero
:
4484 switch (mode
->operands
[0]) {
4485 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16
; break;
4486 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32
; break;
4487 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64
; break;
4488 default: vtn_fail("Floating point type not supported");
4491 case SpvExecutionModeSignedZeroInfNanPreserve
:
4492 switch (mode
->operands
[0]) {
4493 case 16: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16
; break;
4494 case 32: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32
; break;
4495 case 64: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64
; break;
4496 default: vtn_fail("Floating point type not supported");
4499 case SpvExecutionModeRoundingModeRTE
:
4500 switch (mode
->operands
[0]) {
4501 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16
; break;
4502 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32
; break;
4503 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64
; break;
4504 default: vtn_fail("Floating point type not supported");
4507 case SpvExecutionModeRoundingModeRTZ
:
4508 switch (mode
->operands
[0]) {
4509 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16
; break;
4510 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32
; break;
4511 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64
; break;
4512 default: vtn_fail("Floating point type not supported");
4520 b
->shader
->info
.float_controls_execution_mode
|= execution_mode
;
4524 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4525 const uint32_t *w
, unsigned count
)
4527 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
4531 case SpvOpSourceContinued
:
4532 case SpvOpSourceExtension
:
4533 case SpvOpExtension
:
4534 case SpvOpCapability
:
4535 case SpvOpExtInstImport
:
4536 case SpvOpMemoryModel
:
4537 case SpvOpEntryPoint
:
4538 case SpvOpExecutionMode
:
4541 case SpvOpMemberName
:
4542 case SpvOpDecorationGroup
:
4544 case SpvOpDecorateId
:
4545 case SpvOpMemberDecorate
:
4546 case SpvOpGroupDecorate
:
4547 case SpvOpGroupMemberDecorate
:
4548 case SpvOpDecorateString
:
4549 case SpvOpMemberDecorateString
:
4550 vtn_fail("Invalid opcode types and variables section");
4556 case SpvOpTypeFloat
:
4557 case SpvOpTypeVector
:
4558 case SpvOpTypeMatrix
:
4559 case SpvOpTypeImage
:
4560 case SpvOpTypeSampler
:
4561 case SpvOpTypeSampledImage
:
4562 case SpvOpTypeArray
:
4563 case SpvOpTypeRuntimeArray
:
4564 case SpvOpTypeStruct
:
4565 case SpvOpTypeOpaque
:
4566 case SpvOpTypePointer
:
4567 case SpvOpTypeForwardPointer
:
4568 case SpvOpTypeFunction
:
4569 case SpvOpTypeEvent
:
4570 case SpvOpTypeDeviceEvent
:
4571 case SpvOpTypeReserveId
:
4572 case SpvOpTypeQueue
:
4574 vtn_handle_type(b
, opcode
, w
, count
);
4577 case SpvOpConstantTrue
:
4578 case SpvOpConstantFalse
:
4580 case SpvOpConstantComposite
:
4581 case SpvOpConstantSampler
:
4582 case SpvOpConstantNull
:
4583 case SpvOpSpecConstantTrue
:
4584 case SpvOpSpecConstantFalse
:
4585 case SpvOpSpecConstant
:
4586 case SpvOpSpecConstantComposite
:
4587 case SpvOpSpecConstantOp
:
4588 vtn_handle_constant(b
, opcode
, w
, count
);
4593 vtn_handle_variables(b
, opcode
, w
, count
);
4596 case SpvOpExtInst
: {
4597 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4598 /* NonSemantic extended instructions are acceptable in preamble, others
4599 * will indicate the end of preamble.
4601 return val
->ext_handler
== vtn_handle_non_semantic_instruction
;
4605 return false; /* End of preamble */
4611 static struct vtn_ssa_value
*
4612 vtn_nir_select(struct vtn_builder
*b
, struct vtn_ssa_value
*src0
,
4613 struct vtn_ssa_value
*src1
, struct vtn_ssa_value
*src2
)
4615 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
4616 dest
->type
= src1
->type
;
4618 if (glsl_type_is_vector_or_scalar(src1
->type
)) {
4619 dest
->def
= nir_bcsel(&b
->nb
, src0
->def
, src1
->def
, src2
->def
);
4621 unsigned elems
= glsl_get_length(src1
->type
);
4623 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
4624 for (unsigned i
= 0; i
< elems
; i
++) {
4625 dest
->elems
[i
] = vtn_nir_select(b
, src0
,
4626 src1
->elems
[i
], src2
->elems
[i
]);
4634 vtn_handle_select(struct vtn_builder
*b
, SpvOp opcode
,
4635 const uint32_t *w
, unsigned count
)
4637 /* Handle OpSelect up-front here because it needs to be able to handle
4638 * pointers and not just regular vectors and scalars.
4640 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
4641 struct vtn_value
*cond_val
= vtn_untyped_value(b
, w
[3]);
4642 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
4643 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
4645 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
4646 obj2_val
->type
!= res_val
->type
,
4647 "Object types must match the result type in OpSelect");
4649 vtn_fail_if((cond_val
->type
->base_type
!= vtn_base_type_scalar
&&
4650 cond_val
->type
->base_type
!= vtn_base_type_vector
) ||
4651 !glsl_type_is_boolean(cond_val
->type
->type
),
4652 "OpSelect must have either a vector of booleans or "
4653 "a boolean as Condition type");
4655 vtn_fail_if(cond_val
->type
->base_type
== vtn_base_type_vector
&&
4656 (res_val
->type
->base_type
!= vtn_base_type_vector
||
4657 res_val
->type
->length
!= cond_val
->type
->length
),
4658 "When Condition type in OpSelect is a vector, the Result "
4659 "type must be a vector of the same length");
4661 switch (res_val
->type
->base_type
) {
4662 case vtn_base_type_scalar
:
4663 case vtn_base_type_vector
:
4664 case vtn_base_type_matrix
:
4665 case vtn_base_type_array
:
4666 case vtn_base_type_struct
:
4669 case vtn_base_type_pointer
:
4670 /* We need to have actual storage for pointer types. */
4671 vtn_fail_if(res_val
->type
->type
== NULL
,
4672 "Invalid pointer result type for OpSelect");
4675 vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
4678 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4679 struct vtn_ssa_value
*ssa
= vtn_nir_select(b
,
4680 vtn_ssa_value(b
, w
[3]), vtn_ssa_value(b
, w
[4]), vtn_ssa_value(b
, w
[5]));
4682 vtn_push_ssa(b
, w
[2], res_type
, ssa
);
4686 vtn_handle_ptr(struct vtn_builder
*b
, SpvOp opcode
,
4687 const uint32_t *w
, unsigned count
)
4689 struct vtn_type
*type1
= vtn_untyped_value(b
, w
[3])->type
;
4690 struct vtn_type
*type2
= vtn_untyped_value(b
, w
[4])->type
;
4691 vtn_fail_if(type1
->base_type
!= vtn_base_type_pointer
||
4692 type2
->base_type
!= vtn_base_type_pointer
,
4693 "%s operands must have pointer types",
4694 spirv_op_to_string(opcode
));
4695 vtn_fail_if(type1
->storage_class
!= type2
->storage_class
,
4696 "%s operands must have the same storage class",
4697 spirv_op_to_string(opcode
));
4699 struct vtn_type
*vtn_type
=
4700 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4701 const struct glsl_type
*type
= vtn_type
->type
;
4703 nir_address_format addr_format
= vtn_mode_to_address_format(
4704 b
, vtn_storage_class_to_mode(b
, type1
->storage_class
, NULL
, NULL
));
4709 case SpvOpPtrDiff
: {
4710 /* OpPtrDiff returns the difference in number of elements (not byte offset). */
4711 unsigned elem_size
, elem_align
;
4712 glsl_get_natural_size_align_bytes(type1
->deref
->type
,
4713 &elem_size
, &elem_align
);
4715 def
= nir_build_addr_isub(&b
->nb
,
4716 vtn_ssa_value(b
, w
[3])->def
,
4717 vtn_ssa_value(b
, w
[4])->def
,
4719 def
= nir_idiv(&b
->nb
, def
, nir_imm_intN_t(&b
->nb
, elem_size
, def
->bit_size
));
4720 def
= nir_i2i(&b
->nb
, def
, glsl_get_bit_size(type
));
4725 case SpvOpPtrNotEqual
: {
4726 def
= nir_build_addr_ieq(&b
->nb
,
4727 vtn_ssa_value(b
, w
[3])->def
,
4728 vtn_ssa_value(b
, w
[4])->def
,
4730 if (opcode
== SpvOpPtrNotEqual
)
4731 def
= nir_inot(&b
->nb
, def
);
4736 unreachable("Invalid ptr operation");
4739 struct vtn_ssa_value
*ssa_value
= vtn_create_ssa_value(b
, type
);
4740 ssa_value
->def
= def
;
4741 vtn_push_ssa(b
, w
[2], vtn_type
, ssa_value
);
4745 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4746 const uint32_t *w
, unsigned count
)
4752 case SpvOpLoopMerge
:
4753 case SpvOpSelectionMerge
:
4754 /* This is handled by cfg pre-pass and walk_blocks */
4758 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
4759 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4764 vtn_handle_extension(b
, opcode
, w
, count
);
4770 case SpvOpCopyMemory
:
4771 case SpvOpCopyMemorySized
:
4772 case SpvOpAccessChain
:
4773 case SpvOpPtrAccessChain
:
4774 case SpvOpInBoundsAccessChain
:
4775 case SpvOpInBoundsPtrAccessChain
:
4776 case SpvOpArrayLength
:
4777 case SpvOpConvertPtrToU
:
4778 case SpvOpConvertUToPtr
:
4779 vtn_handle_variables(b
, opcode
, w
, count
);
4782 case SpvOpFunctionCall
:
4783 vtn_handle_function_call(b
, opcode
, w
, count
);
4786 case SpvOpSampledImage
:
4788 case SpvOpImageSampleImplicitLod
:
4789 case SpvOpImageSampleExplicitLod
:
4790 case SpvOpImageSampleDrefImplicitLod
:
4791 case SpvOpImageSampleDrefExplicitLod
:
4792 case SpvOpImageSampleProjImplicitLod
:
4793 case SpvOpImageSampleProjExplicitLod
:
4794 case SpvOpImageSampleProjDrefImplicitLod
:
4795 case SpvOpImageSampleProjDrefExplicitLod
:
4796 case SpvOpImageFetch
:
4797 case SpvOpImageGather
:
4798 case SpvOpImageDrefGather
:
4799 case SpvOpImageQuerySizeLod
:
4800 case SpvOpImageQueryLod
:
4801 case SpvOpImageQueryLevels
:
4802 case SpvOpImageQuerySamples
:
4803 vtn_handle_texture(b
, opcode
, w
, count
);
4806 case SpvOpImageRead
:
4807 case SpvOpImageWrite
:
4808 case SpvOpImageTexelPointer
:
4809 vtn_handle_image(b
, opcode
, w
, count
);
4812 case SpvOpImageQuerySize
: {
4813 struct vtn_pointer
*image
=
4814 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
4815 if (glsl_type_is_image(image
->type
->type
)) {
4816 vtn_handle_image(b
, opcode
, w
, count
);
4818 vtn_assert(glsl_type_is_sampler(image
->type
->type
));
4819 vtn_handle_texture(b
, opcode
, w
, count
);
4824 case SpvOpFragmentMaskFetchAMD
:
4825 case SpvOpFragmentFetchAMD
:
4826 vtn_handle_texture(b
, opcode
, w
, count
);
4829 case SpvOpAtomicLoad
:
4830 case SpvOpAtomicExchange
:
4831 case SpvOpAtomicCompareExchange
:
4832 case SpvOpAtomicCompareExchangeWeak
:
4833 case SpvOpAtomicIIncrement
:
4834 case SpvOpAtomicIDecrement
:
4835 case SpvOpAtomicIAdd
:
4836 case SpvOpAtomicISub
:
4837 case SpvOpAtomicSMin
:
4838 case SpvOpAtomicUMin
:
4839 case SpvOpAtomicSMax
:
4840 case SpvOpAtomicUMax
:
4841 case SpvOpAtomicAnd
:
4843 case SpvOpAtomicXor
: {
4844 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
4845 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4846 vtn_handle_image(b
, opcode
, w
, count
);
4848 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4849 vtn_handle_atomics(b
, opcode
, w
, count
);
4854 case SpvOpAtomicStore
: {
4855 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
4856 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4857 vtn_handle_image(b
, opcode
, w
, count
);
4859 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4860 vtn_handle_atomics(b
, opcode
, w
, count
);
4866 vtn_handle_select(b
, opcode
, w
, count
);
4874 case SpvOpConvertFToU
:
4875 case SpvOpConvertFToS
:
4876 case SpvOpConvertSToF
:
4877 case SpvOpConvertUToF
:
4881 case SpvOpQuantizeToF16
:
4882 case SpvOpPtrCastToGeneric
:
4883 case SpvOpGenericCastToPtr
:
4888 case SpvOpSignBitSet
:
4889 case SpvOpLessOrGreater
:
4891 case SpvOpUnordered
:
4906 case SpvOpVectorTimesScalar
:
4908 case SpvOpIAddCarry
:
4909 case SpvOpISubBorrow
:
4910 case SpvOpUMulExtended
:
4911 case SpvOpSMulExtended
:
4912 case SpvOpShiftRightLogical
:
4913 case SpvOpShiftRightArithmetic
:
4914 case SpvOpShiftLeftLogical
:
4915 case SpvOpLogicalEqual
:
4916 case SpvOpLogicalNotEqual
:
4917 case SpvOpLogicalOr
:
4918 case SpvOpLogicalAnd
:
4919 case SpvOpLogicalNot
:
4920 case SpvOpBitwiseOr
:
4921 case SpvOpBitwiseXor
:
4922 case SpvOpBitwiseAnd
:
4924 case SpvOpFOrdEqual
:
4925 case SpvOpFUnordEqual
:
4926 case SpvOpINotEqual
:
4927 case SpvOpFOrdNotEqual
:
4928 case SpvOpFUnordNotEqual
:
4929 case SpvOpULessThan
:
4930 case SpvOpSLessThan
:
4931 case SpvOpFOrdLessThan
:
4932 case SpvOpFUnordLessThan
:
4933 case SpvOpUGreaterThan
:
4934 case SpvOpSGreaterThan
:
4935 case SpvOpFOrdGreaterThan
:
4936 case SpvOpFUnordGreaterThan
:
4937 case SpvOpULessThanEqual
:
4938 case SpvOpSLessThanEqual
:
4939 case SpvOpFOrdLessThanEqual
:
4940 case SpvOpFUnordLessThanEqual
:
4941 case SpvOpUGreaterThanEqual
:
4942 case SpvOpSGreaterThanEqual
:
4943 case SpvOpFOrdGreaterThanEqual
:
4944 case SpvOpFUnordGreaterThanEqual
:
4950 case SpvOpFwidthFine
:
4951 case SpvOpDPdxCoarse
:
4952 case SpvOpDPdyCoarse
:
4953 case SpvOpFwidthCoarse
:
4954 case SpvOpBitFieldInsert
:
4955 case SpvOpBitFieldSExtract
:
4956 case SpvOpBitFieldUExtract
:
4957 case SpvOpBitReverse
:
4959 case SpvOpTranspose
:
4960 case SpvOpOuterProduct
:
4961 case SpvOpMatrixTimesScalar
:
4962 case SpvOpVectorTimesMatrix
:
4963 case SpvOpMatrixTimesVector
:
4964 case SpvOpMatrixTimesMatrix
:
4965 case SpvOpUCountLeadingZerosINTEL
:
4966 case SpvOpUCountTrailingZerosINTEL
:
4967 case SpvOpAbsISubINTEL
:
4968 case SpvOpAbsUSubINTEL
:
4969 case SpvOpIAddSatINTEL
:
4970 case SpvOpUAddSatINTEL
:
4971 case SpvOpIAverageINTEL
:
4972 case SpvOpUAverageINTEL
:
4973 case SpvOpIAverageRoundedINTEL
:
4974 case SpvOpUAverageRoundedINTEL
:
4975 case SpvOpISubSatINTEL
:
4976 case SpvOpUSubSatINTEL
:
4977 case SpvOpIMul32x16INTEL
:
4978 case SpvOpUMul32x16INTEL
:
4979 vtn_handle_alu(b
, opcode
, w
, count
);
4983 vtn_handle_bitcast(b
, w
, count
);
4986 case SpvOpVectorExtractDynamic
:
4987 case SpvOpVectorInsertDynamic
:
4988 case SpvOpVectorShuffle
:
4989 case SpvOpCompositeConstruct
:
4990 case SpvOpCompositeExtract
:
4991 case SpvOpCompositeInsert
:
4992 case SpvOpCopyLogical
:
4993 case SpvOpCopyObject
:
4994 vtn_handle_composite(b
, opcode
, w
, count
);
4997 case SpvOpEmitVertex
:
4998 case SpvOpEndPrimitive
:
4999 case SpvOpEmitStreamVertex
:
5000 case SpvOpEndStreamPrimitive
:
5001 case SpvOpControlBarrier
:
5002 case SpvOpMemoryBarrier
:
5003 vtn_handle_barrier(b
, opcode
, w
, count
);
5006 case SpvOpGroupNonUniformElect
:
5007 case SpvOpGroupNonUniformAll
:
5008 case SpvOpGroupNonUniformAny
:
5009 case SpvOpGroupNonUniformAllEqual
:
5010 case SpvOpGroupNonUniformBroadcast
:
5011 case SpvOpGroupNonUniformBroadcastFirst
:
5012 case SpvOpGroupNonUniformBallot
:
5013 case SpvOpGroupNonUniformInverseBallot
:
5014 case SpvOpGroupNonUniformBallotBitExtract
:
5015 case SpvOpGroupNonUniformBallotBitCount
:
5016 case SpvOpGroupNonUniformBallotFindLSB
:
5017 case SpvOpGroupNonUniformBallotFindMSB
:
5018 case SpvOpGroupNonUniformShuffle
:
5019 case SpvOpGroupNonUniformShuffleXor
:
5020 case SpvOpGroupNonUniformShuffleUp
:
5021 case SpvOpGroupNonUniformShuffleDown
:
5022 case SpvOpGroupNonUniformIAdd
:
5023 case SpvOpGroupNonUniformFAdd
:
5024 case SpvOpGroupNonUniformIMul
:
5025 case SpvOpGroupNonUniformFMul
:
5026 case SpvOpGroupNonUniformSMin
:
5027 case SpvOpGroupNonUniformUMin
:
5028 case SpvOpGroupNonUniformFMin
:
5029 case SpvOpGroupNonUniformSMax
:
5030 case SpvOpGroupNonUniformUMax
:
5031 case SpvOpGroupNonUniformFMax
:
5032 case SpvOpGroupNonUniformBitwiseAnd
:
5033 case SpvOpGroupNonUniformBitwiseOr
:
5034 case SpvOpGroupNonUniformBitwiseXor
:
5035 case SpvOpGroupNonUniformLogicalAnd
:
5036 case SpvOpGroupNonUniformLogicalOr
:
5037 case SpvOpGroupNonUniformLogicalXor
:
5038 case SpvOpGroupNonUniformQuadBroadcast
:
5039 case SpvOpGroupNonUniformQuadSwap
:
5042 case SpvOpGroupBroadcast
:
5043 case SpvOpGroupIAdd
:
5044 case SpvOpGroupFAdd
:
5045 case SpvOpGroupFMin
:
5046 case SpvOpGroupUMin
:
5047 case SpvOpGroupSMin
:
5048 case SpvOpGroupFMax
:
5049 case SpvOpGroupUMax
:
5050 case SpvOpGroupSMax
:
5051 case SpvOpSubgroupBallotKHR
:
5052 case SpvOpSubgroupFirstInvocationKHR
:
5053 case SpvOpSubgroupReadInvocationKHR
:
5054 case SpvOpSubgroupAllKHR
:
5055 case SpvOpSubgroupAnyKHR
:
5056 case SpvOpSubgroupAllEqualKHR
:
5057 case SpvOpGroupIAddNonUniformAMD
:
5058 case SpvOpGroupFAddNonUniformAMD
:
5059 case SpvOpGroupFMinNonUniformAMD
:
5060 case SpvOpGroupUMinNonUniformAMD
:
5061 case SpvOpGroupSMinNonUniformAMD
:
5062 case SpvOpGroupFMaxNonUniformAMD
:
5063 case SpvOpGroupUMaxNonUniformAMD
:
5064 case SpvOpGroupSMaxNonUniformAMD
:
5065 vtn_handle_subgroup(b
, opcode
, w
, count
);
5070 case SpvOpPtrNotEqual
:
5071 vtn_handle_ptr(b
, opcode
, w
, count
);
5074 case SpvOpBeginInvocationInterlockEXT
:
5075 vtn_emit_barrier(b
, nir_intrinsic_begin_invocation_interlock
);
5078 case SpvOpEndInvocationInterlockEXT
:
5079 vtn_emit_barrier(b
, nir_intrinsic_end_invocation_interlock
);
5082 case SpvOpDemoteToHelperInvocationEXT
: {
5083 nir_intrinsic_instr
*intrin
=
5084 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_demote
);
5085 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5089 case SpvOpIsHelperInvocationEXT
: {
5090 nir_intrinsic_instr
*intrin
=
5091 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_is_helper_invocation
);
5092 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 1, 1, NULL
);
5093 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5095 struct vtn_type
*res_type
=
5096 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
5097 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, res_type
->type
);
5098 val
->def
= &intrin
->dest
.ssa
;
5100 vtn_push_ssa(b
, w
[2], res_type
, val
);
5104 case SpvOpReadClockKHR
: {
5105 assert(vtn_constant_uint(b
, w
[3]) == SpvScopeSubgroup
);
5107 /* Operation supports two result types: uvec2 and uint64_t. The NIR
5108 * intrinsic gives uvec2, so pack the result for the other case.
5110 nir_intrinsic_instr
*intrin
=
5111 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_shader_clock
);
5112 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 2, 32, NULL
);
5113 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5115 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
5116 const struct glsl_type
*dest_type
= type
->type
;
5117 nir_ssa_def
*result
;
5119 if (glsl_type_is_vector(dest_type
)) {
5120 assert(dest_type
== glsl_vector_type(GLSL_TYPE_UINT
, 2));
5121 result
= &intrin
->dest
.ssa
;
5123 assert(glsl_type_is_scalar(dest_type
));
5124 assert(glsl_get_base_type(dest_type
) == GLSL_TYPE_UINT64
);
5125 result
= nir_pack_64_2x32(&b
->nb
, &intrin
->dest
.ssa
);
5128 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
5130 val
->ssa
= vtn_create_ssa_value(b
, dest_type
);
5131 val
->ssa
->def
= result
;
5135 case SpvOpLifetimeStart
:
5136 case SpvOpLifetimeStop
:
5140 vtn_fail_with_opcode("Unhandled opcode", opcode
);
5147 vtn_create_builder(const uint32_t *words
, size_t word_count
,
5148 gl_shader_stage stage
, const char *entry_point_name
,
5149 const struct spirv_to_nir_options
*options
)
5151 /* Initialize the vtn_builder object */
5152 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
5153 struct spirv_to_nir_options
*dup_options
=
5154 ralloc(b
, struct spirv_to_nir_options
);
5155 *dup_options
= *options
;
5158 b
->spirv_word_count
= word_count
;
5162 exec_list_make_empty(&b
->functions
);
5163 b
->entry_point_stage
= stage
;
5164 b
->entry_point_name
= entry_point_name
;
5165 b
->options
= dup_options
;
5168 * Handle the SPIR-V header (first 5 dwords).
5169 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
5171 if (word_count
<= 5)
5174 if (words
[0] != SpvMagicNumber
) {
5175 vtn_err("words[0] was 0x%x, want 0x%x", words
[0], SpvMagicNumber
);
5178 if (words
[1] < 0x10000) {
5179 vtn_err("words[1] was 0x%x, want >= 0x10000", words
[1]);
5183 uint16_t generator_id
= words
[2] >> 16;
5184 uint16_t generator_version
= words
[2];
5186 /* The first GLSLang version bump actually 1.5 years after #179 was fixed
5187 * but this should at least let us shut the workaround off for modern
5188 * versions of GLSLang.
5190 b
->wa_glslang_179
= (generator_id
== 8 && generator_version
== 1);
5192 /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
5193 * to provide correct memory semantics on compute shader barrier()
5194 * commands. Prior to that, we need to fix them up ourselves. This
5195 * GLSLang fix caused them to bump to generator version 3.
5197 b
->wa_glslang_cs_barrier
= (generator_id
== 8 && generator_version
< 3);
5199 /* words[2] == generator magic */
5200 unsigned value_id_bound
= words
[3];
5201 if (words
[4] != 0) {
5202 vtn_err("words[4] was %u, want 0", words
[4]);
5206 b
->value_id_bound
= value_id_bound
;
5207 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
5215 static nir_function
*
5216 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder
*b
,
5217 nir_function
*entry_point
)
5219 vtn_assert(entry_point
== b
->entry_point
->func
->impl
->function
);
5220 vtn_fail_if(!entry_point
->name
, "entry points are required to have a name");
5221 const char *func_name
=
5222 ralloc_asprintf(b
->shader
, "__wrapped_%s", entry_point
->name
);
5224 /* we shouldn't have any inputs yet */
5225 vtn_assert(!entry_point
->shader
->num_inputs
);
5226 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
);
5228 nir_function
*main_entry_point
= nir_function_create(b
->shader
, func_name
);
5229 main_entry_point
->impl
= nir_function_impl_create(main_entry_point
);
5230 nir_builder_init(&b
->nb
, main_entry_point
->impl
);
5231 b
->nb
.cursor
= nir_after_cf_list(&main_entry_point
->impl
->body
);
5232 b
->func_param_idx
= 0;
5234 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, entry_point
);
5236 for (unsigned i
= 0; i
< entry_point
->num_params
; ++i
) {
5237 struct vtn_type
*param_type
= b
->entry_point
->func
->type
->params
[i
];
5239 /* consider all pointers to function memory to be parameters passed
5242 bool is_by_val
= param_type
->base_type
== vtn_base_type_pointer
&&
5243 param_type
->storage_class
== SpvStorageClassFunction
;
5245 /* input variable */
5246 nir_variable
*in_var
= rzalloc(b
->nb
.shader
, nir_variable
);
5247 in_var
->data
.mode
= nir_var_shader_in
;
5248 in_var
->data
.read_only
= true;
5249 in_var
->data
.location
= i
;
5252 in_var
->type
= param_type
->deref
->type
;
5254 in_var
->type
= param_type
->type
;
5256 nir_shader_add_variable(b
->nb
.shader
, in_var
);
5257 b
->nb
.shader
->num_inputs
++;
5259 /* we have to copy the entire variable into function memory */
5261 nir_variable
*copy_var
=
5262 nir_local_variable_create(main_entry_point
->impl
, in_var
->type
,
5264 nir_copy_var(&b
->nb
, copy_var
, in_var
);
5266 nir_src_for_ssa(&nir_build_deref_var(&b
->nb
, copy_var
)->dest
.ssa
);
5268 call
->params
[i
] = nir_src_for_ssa(nir_load_var(&b
->nb
, in_var
));
5272 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
5274 return main_entry_point
;
5278 spirv_to_nir(const uint32_t *words
, size_t word_count
,
5279 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
5280 gl_shader_stage stage
, const char *entry_point_name
,
5281 const struct spirv_to_nir_options
*options
,
5282 const nir_shader_compiler_options
*nir_options
)
5285 const uint32_t *word_end
= words
+ word_count
;
5287 struct vtn_builder
*b
= vtn_create_builder(words
, word_count
,
5288 stage
, entry_point_name
,
5294 /* See also _vtn_fail() */
5295 if (setjmp(b
->fail_jump
)) {
5300 /* Skip the SPIR-V header, handled at vtn_create_builder */
5303 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
5305 /* Handle all the preamble instructions */
5306 words
= vtn_foreach_instruction(b
, words
, word_end
,
5307 vtn_handle_preamble_instruction
);
5309 if (b
->entry_point
== NULL
) {
5310 vtn_fail("Entry point not found");
5315 /* Set shader info defaults */
5316 if (stage
== MESA_SHADER_GEOMETRY
)
5317 b
->shader
->info
.gs
.invocations
= 1;
5319 /* Parse rounding mode execution modes. This has to happen earlier than
5320 * other changes in the execution modes since they can affect, for example,
5321 * the result of the floating point constants.
5323 vtn_foreach_execution_mode(b
, b
->entry_point
,
5324 vtn_handle_rounding_mode_in_execution_mode
, NULL
);
5326 b
->specializations
= spec
;
5327 b
->num_specializations
= num_spec
;
5329 /* Handle all variable, type, and constant instructions */
5330 words
= vtn_foreach_instruction(b
, words
, word_end
,
5331 vtn_handle_variable_or_type_instruction
);
5333 /* Parse execution modes */
5334 vtn_foreach_execution_mode(b
, b
->entry_point
,
5335 vtn_handle_execution_mode
, NULL
);
5337 if (b
->workgroup_size_builtin
) {
5338 vtn_assert(b
->workgroup_size_builtin
->type
->type
==
5339 glsl_vector_type(GLSL_TYPE_UINT
, 3));
5341 nir_const_value
*const_size
=
5342 b
->workgroup_size_builtin
->constant
->values
;
5344 b
->shader
->info
.cs
.local_size
[0] = const_size
[0].u32
;
5345 b
->shader
->info
.cs
.local_size
[1] = const_size
[1].u32
;
5346 b
->shader
->info
.cs
.local_size
[2] = const_size
[2].u32
;
5349 /* Set types on all vtn_values */
5350 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
5352 vtn_build_cfg(b
, words
, word_end
);
5354 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5355 b
->entry_point
->func
->referenced
= true;
5360 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
5361 if (func
->referenced
&& !func
->emitted
) {
5362 b
->const_table
= _mesa_pointer_hash_table_create(b
);
5364 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
5370 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5371 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
5372 vtn_assert(entry_point
);
5374 /* post process entry_points with input params */
5375 if (entry_point
->num_params
&& b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
5376 entry_point
= vtn_emit_kernel_entry_point_wrapper(b
, entry_point
);
5378 entry_point
->is_entrypoint
= true;
5380 /* When multiple shader stages exist in the same SPIR-V module, we
5381 * generate input and output variables for every stage, in the same
5382 * NIR program. These dead variables can be invalid NIR. For example,
5383 * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
5384 * VS output variables wouldn't be.
5386 * To ensure we have valid NIR, we eliminate any dead inputs and outputs
5387 * right away. In order to do so, we must lower any constant initializers
5388 * on outputs so nir_remove_dead_variables sees that they're written to.
5390 nir_lower_constant_initializers(b
->shader
, nir_var_shader_out
);
5391 nir_remove_dead_variables(b
->shader
,
5392 nir_var_shader_in
| nir_var_shader_out
);
5394 /* We sometimes generate bogus derefs that, while never used, give the
5395 * validator a bit of heartburn. Run dead code to get rid of them.
5397 nir_opt_dce(b
->shader
);
5399 /* Unparent the shader from the vtn_builder before we delete the builder */
5400 ralloc_steal(NULL
, b
->shader
);
5402 nir_shader
*shader
= b
->shader
;