2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
41 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
42 size_t spirv_offset
, const char *message
)
44 if (b
->options
->debug
.func
) {
45 b
->options
->debug
.func(b
->options
->debug
.private_data
,
46 level
, spirv_offset
, message
);
50 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
51 fprintf(stderr
, "%s\n", message
);
56 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
57 size_t spirv_offset
, const char *fmt
, ...)
63 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
66 vtn_log(b
, level
, spirv_offset
, msg
);
72 vtn_log_err(struct vtn_builder
*b
,
73 enum nir_spirv_debug_level level
, const char *prefix
,
74 const char *file
, unsigned line
,
75 const char *fmt
, va_list args
)
79 msg
= ralloc_strdup(NULL
, prefix
);
82 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
85 ralloc_asprintf_append(&msg
, " ");
87 ralloc_vasprintf_append(&msg
, fmt
, args
);
89 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
93 ralloc_asprintf_append(&msg
,
94 "\n in SPIR-V source file %s, line %d, col %d",
95 b
->file
, b
->line
, b
->col
);
98 vtn_log(b
, level
, b
->spirv_offset
, msg
);
104 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
109 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
110 path
, prefix
, idx
++);
111 if (len
< 0 || len
>= sizeof(filename
))
114 FILE *f
= fopen(filename
, "w");
118 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
121 vtn_info("SPIR-V shader dumped to %s", filename
);
125 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
126 const char *fmt
, ...)
131 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
132 file
, line
, fmt
, args
);
137 _vtn_err(struct vtn_builder
*b
, const char *file
, unsigned line
,
138 const char *fmt
, ...)
143 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V ERROR:\n",
144 file
, line
, fmt
, args
);
149 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
150 const char *fmt
, ...)
155 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
156 file
, line
, fmt
, args
);
159 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
161 vtn_dump_shader(b
, dump_path
, "fail");
163 longjmp(b
->fail_jump
, 1);
166 static struct vtn_ssa_value
*
167 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
169 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
172 if (glsl_type_is_vector_or_scalar(type
)) {
173 unsigned num_components
= glsl_get_vector_elements(val
->type
);
174 unsigned bit_size
= glsl_get_bit_size(val
->type
);
175 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
177 unsigned elems
= glsl_get_length(val
->type
);
178 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
179 if (glsl_type_is_matrix(type
)) {
180 const struct glsl_type
*elem_type
=
181 glsl_vector_type(glsl_get_base_type(type
),
182 glsl_get_vector_elements(type
));
184 for (unsigned i
= 0; i
< elems
; i
++)
185 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
186 } else if (glsl_type_is_array(type
)) {
187 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
188 for (unsigned i
= 0; i
< elems
; i
++)
189 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
191 for (unsigned i
= 0; i
< elems
; i
++) {
192 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
193 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
201 static struct vtn_ssa_value
*
202 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
203 const struct glsl_type
*type
)
205 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
210 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
213 switch (glsl_get_base_type(type
)) {
216 case GLSL_TYPE_INT16
:
217 case GLSL_TYPE_UINT16
:
218 case GLSL_TYPE_UINT8
:
220 case GLSL_TYPE_INT64
:
221 case GLSL_TYPE_UINT64
:
223 case GLSL_TYPE_FLOAT
:
224 case GLSL_TYPE_FLOAT16
:
225 case GLSL_TYPE_DOUBLE
: {
226 int bit_size
= glsl_get_bit_size(type
);
227 if (glsl_type_is_vector_or_scalar(type
)) {
228 unsigned num_components
= glsl_get_vector_elements(val
->type
);
229 nir_load_const_instr
*load
=
230 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
232 memcpy(load
->value
, constant
->values
,
233 sizeof(nir_const_value
) * load
->def
.num_components
);
235 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
236 val
->def
= &load
->def
;
238 assert(glsl_type_is_matrix(type
));
239 unsigned columns
= glsl_get_matrix_columns(val
->type
);
240 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
241 const struct glsl_type
*column_type
= glsl_get_column_type(val
->type
);
242 for (unsigned i
= 0; i
< columns
; i
++)
243 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
249 case GLSL_TYPE_ARRAY
: {
250 unsigned elems
= glsl_get_length(val
->type
);
251 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
252 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
253 for (unsigned i
= 0; i
< elems
; i
++)
254 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
259 case GLSL_TYPE_STRUCT
: {
260 unsigned elems
= glsl_get_length(val
->type
);
261 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
262 for (unsigned i
= 0; i
< elems
; i
++) {
263 const struct glsl_type
*elem_type
=
264 glsl_get_struct_field(val
->type
, i
);
265 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
272 vtn_fail("bad constant type");
278 struct vtn_ssa_value
*
279 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
281 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
282 switch (val
->value_type
) {
283 case vtn_value_type_undef
:
284 return vtn_undef_ssa_value(b
, val
->type
->type
);
286 case vtn_value_type_constant
:
287 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
289 case vtn_value_type_ssa
:
292 case vtn_value_type_pointer
:
293 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
294 struct vtn_ssa_value
*ssa
=
295 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
296 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
300 vtn_fail("Invalid type for an SSA value");
305 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
306 unsigned word_count
, unsigned *words_used
)
308 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
310 /* Ammount of space taken by the string (including the null) */
311 unsigned len
= strlen(dup
) + 1;
312 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
318 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
319 const uint32_t *end
, vtn_instruction_handler handler
)
325 const uint32_t *w
= start
;
327 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
328 unsigned count
= w
[0] >> SpvWordCountShift
;
329 vtn_assert(count
>= 1 && w
+ count
<= end
);
331 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
335 break; /* Do nothing */
338 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
350 if (!handler(b
, opcode
, w
, count
))
368 vtn_handle_non_semantic_instruction(struct vtn_builder
*b
, SpvOp ext_opcode
,
369 const uint32_t *w
, unsigned count
)
376 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
377 const uint32_t *w
, unsigned count
)
379 const char *ext
= (const char *)&w
[2];
381 case SpvOpExtInstImport
: {
382 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
383 if (strcmp(ext
, "GLSL.std.450") == 0) {
384 val
->ext_handler
= vtn_handle_glsl450_instruction
;
385 } else if ((strcmp(ext
, "SPV_AMD_gcn_shader") == 0)
386 && (b
->options
&& b
->options
->caps
.amd_gcn_shader
)) {
387 val
->ext_handler
= vtn_handle_amd_gcn_shader_instruction
;
388 } else if ((strcmp(ext
, "SPV_AMD_shader_ballot") == 0)
389 && (b
->options
&& b
->options
->caps
.amd_shader_ballot
)) {
390 val
->ext_handler
= vtn_handle_amd_shader_ballot_instruction
;
391 } else if ((strcmp(ext
, "SPV_AMD_shader_trinary_minmax") == 0)
392 && (b
->options
&& b
->options
->caps
.amd_trinary_minmax
)) {
393 val
->ext_handler
= vtn_handle_amd_shader_trinary_minmax_instruction
;
394 } else if ((strcmp(ext
, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
395 && (b
->options
&& b
->options
->caps
.amd_shader_explicit_vertex_parameter
)) {
396 val
->ext_handler
= vtn_handle_amd_shader_explicit_vertex_parameter_instruction
;
397 } else if (strcmp(ext
, "OpenCL.std") == 0) {
398 val
->ext_handler
= vtn_handle_opencl_instruction
;
399 } else if (strstr(ext
, "NonSemantic.") == ext
) {
400 val
->ext_handler
= vtn_handle_non_semantic_instruction
;
402 vtn_fail("Unsupported extension: %s", ext
);
408 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
409 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
415 vtn_fail_with_opcode("Unhandled opcode", opcode
);
420 _foreach_decoration_helper(struct vtn_builder
*b
,
421 struct vtn_value
*base_value
,
423 struct vtn_value
*value
,
424 vtn_decoration_foreach_cb cb
, void *data
)
426 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
428 if (dec
->scope
== VTN_DEC_DECORATION
) {
429 member
= parent_member
;
430 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
431 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
432 value
->type
->base_type
!= vtn_base_type_struct
,
433 "OpMemberDecorate and OpGroupMemberDecorate are only "
434 "allowed on OpTypeStruct");
435 /* This means we haven't recursed yet */
436 assert(value
== base_value
);
438 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
440 vtn_fail_if(member
>= base_value
->type
->length
,
441 "OpMemberDecorate specifies member %d but the "
442 "OpTypeStruct has only %u members",
443 member
, base_value
->type
->length
);
445 /* Not a decoration */
446 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
451 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
452 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
455 cb(b
, base_value
, member
, dec
, data
);
460 /** Iterates (recursively if needed) over all of the decorations on a value
462 * This function iterates over all of the decorations applied to a given
463 * value. If it encounters a decoration group, it recurses into the group
464 * and iterates over all of those decorations as well.
467 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
468 vtn_decoration_foreach_cb cb
, void *data
)
470 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
474 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
475 vtn_execution_mode_foreach_cb cb
, void *data
)
477 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
478 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
481 assert(dec
->group
== NULL
);
482 cb(b
, value
, dec
, data
);
487 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
488 const uint32_t *w
, unsigned count
)
490 const uint32_t *w_end
= w
+ count
;
491 const uint32_t target
= w
[1];
495 case SpvOpDecorationGroup
:
496 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
500 case SpvOpDecorateId
:
501 case SpvOpMemberDecorate
:
502 case SpvOpDecorateString
:
503 case SpvOpMemberDecorateString
:
504 case SpvOpExecutionMode
:
505 case SpvOpExecutionModeId
: {
506 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
508 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
511 case SpvOpDecorateId
:
512 case SpvOpDecorateString
:
513 dec
->scope
= VTN_DEC_DECORATION
;
515 case SpvOpMemberDecorate
:
516 case SpvOpMemberDecorateString
:
517 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
518 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
519 "Member argument of OpMemberDecorate too large");
521 case SpvOpExecutionMode
:
522 case SpvOpExecutionModeId
:
523 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
526 unreachable("Invalid decoration opcode");
528 dec
->decoration
= *(w
++);
531 /* Link into the list */
532 dec
->next
= val
->decoration
;
533 val
->decoration
= dec
;
537 case SpvOpGroupMemberDecorate
:
538 case SpvOpGroupDecorate
: {
539 struct vtn_value
*group
=
540 vtn_value(b
, target
, vtn_value_type_decoration_group
);
542 for (; w
< w_end
; w
++) {
543 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
544 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
547 if (opcode
== SpvOpGroupDecorate
) {
548 dec
->scope
= VTN_DEC_DECORATION
;
550 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
551 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
552 "Member argument of OpGroupMemberDecorate too large");
555 /* Link into the list */
556 dec
->next
= val
->decoration
;
557 val
->decoration
= dec
;
563 unreachable("Unhandled opcode");
567 struct member_decoration_ctx
{
569 struct glsl_struct_field
*fields
;
570 struct vtn_type
*type
;
574 * Returns true if the given type contains a struct decorated Block or
578 vtn_type_contains_block(struct vtn_builder
*b
, struct vtn_type
*type
)
580 switch (type
->base_type
) {
581 case vtn_base_type_array
:
582 return vtn_type_contains_block(b
, type
->array_element
);
583 case vtn_base_type_struct
:
584 if (type
->block
|| type
->buffer_block
)
586 for (unsigned i
= 0; i
< type
->length
; i
++) {
587 if (vtn_type_contains_block(b
, type
->members
[i
]))
596 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
597 * OpStore, or OpCopyMemory between them without breaking anything.
598 * Technically, the SPIR-V rules require the exact same type ID but this lets
599 * us internally be a bit looser.
602 vtn_types_compatible(struct vtn_builder
*b
,
603 struct vtn_type
*t1
, struct vtn_type
*t2
)
605 if (t1
->id
== t2
->id
)
608 if (t1
->base_type
!= t2
->base_type
)
611 switch (t1
->base_type
) {
612 case vtn_base_type_void
:
613 case vtn_base_type_scalar
:
614 case vtn_base_type_vector
:
615 case vtn_base_type_matrix
:
616 case vtn_base_type_image
:
617 case vtn_base_type_sampler
:
618 case vtn_base_type_sampled_image
:
619 return t1
->type
== t2
->type
;
621 case vtn_base_type_array
:
622 return t1
->length
== t2
->length
&&
623 vtn_types_compatible(b
, t1
->array_element
, t2
->array_element
);
625 case vtn_base_type_pointer
:
626 return vtn_types_compatible(b
, t1
->deref
, t2
->deref
);
628 case vtn_base_type_struct
:
629 if (t1
->length
!= t2
->length
)
632 for (unsigned i
= 0; i
< t1
->length
; i
++) {
633 if (!vtn_types_compatible(b
, t1
->members
[i
], t2
->members
[i
]))
638 case vtn_base_type_function
:
639 /* This case shouldn't get hit since you can't copy around function
640 * types. Just require them to be identical.
645 vtn_fail("Invalid base type");
649 vtn_type_without_array(struct vtn_type
*type
)
651 while (type
->base_type
== vtn_base_type_array
)
652 type
= type
->array_element
;
656 /* does a shallow copy of a vtn_type */
658 static struct vtn_type
*
659 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
661 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
664 switch (src
->base_type
) {
665 case vtn_base_type_void
:
666 case vtn_base_type_scalar
:
667 case vtn_base_type_vector
:
668 case vtn_base_type_matrix
:
669 case vtn_base_type_array
:
670 case vtn_base_type_pointer
:
671 case vtn_base_type_image
:
672 case vtn_base_type_sampler
:
673 case vtn_base_type_sampled_image
:
674 /* Nothing more to do */
677 case vtn_base_type_struct
:
678 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
679 memcpy(dest
->members
, src
->members
,
680 src
->length
* sizeof(src
->members
[0]));
682 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
683 memcpy(dest
->offsets
, src
->offsets
,
684 src
->length
* sizeof(src
->offsets
[0]));
687 case vtn_base_type_function
:
688 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
689 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
696 static struct vtn_type
*
697 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
699 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
700 type
= type
->members
[member
];
702 /* We may have an array of matrices.... Oh, joy! */
703 while (glsl_type_is_array(type
->type
)) {
704 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
705 type
= type
->array_element
;
708 vtn_assert(glsl_type_is_matrix(type
->type
));
714 vtn_handle_access_qualifier(struct vtn_builder
*b
, struct vtn_type
*type
,
715 int member
, enum gl_access_qualifier access
)
717 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
718 type
= type
->members
[member
];
720 type
->access
|= access
;
724 array_stride_decoration_cb(struct vtn_builder
*b
,
725 struct vtn_value
*val
, int member
,
726 const struct vtn_decoration
*dec
, void *void_ctx
)
728 struct vtn_type
*type
= val
->type
;
730 if (dec
->decoration
== SpvDecorationArrayStride
) {
731 if (vtn_type_contains_block(b
, type
)) {
732 vtn_warn("The ArrayStride decoration cannot be applied to an array "
733 "type which contains a structure type decorated Block "
735 /* Ignore the decoration */
737 vtn_fail_if(dec
->operands
[0] == 0, "ArrayStride must be non-zero");
738 type
->stride
= dec
->operands
[0];
744 struct_member_decoration_cb(struct vtn_builder
*b
,
745 UNUSED
struct vtn_value
*val
, int member
,
746 const struct vtn_decoration
*dec
, void *void_ctx
)
748 struct member_decoration_ctx
*ctx
= void_ctx
;
753 assert(member
< ctx
->num_fields
);
755 switch (dec
->decoration
) {
756 case SpvDecorationRelaxedPrecision
:
757 case SpvDecorationUniform
:
758 case SpvDecorationUniformId
:
759 break; /* FIXME: Do nothing with this for now. */
760 case SpvDecorationNonWritable
:
761 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_WRITEABLE
);
763 case SpvDecorationNonReadable
:
764 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_READABLE
);
766 case SpvDecorationVolatile
:
767 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_VOLATILE
);
769 case SpvDecorationCoherent
:
770 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_COHERENT
);
772 case SpvDecorationNoPerspective
:
773 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
775 case SpvDecorationFlat
:
776 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
778 case SpvDecorationExplicitInterpAMD
:
779 ctx
->fields
[member
].interpolation
= INTERP_MODE_EXPLICIT
;
781 case SpvDecorationCentroid
:
782 ctx
->fields
[member
].centroid
= true;
784 case SpvDecorationSample
:
785 ctx
->fields
[member
].sample
= true;
787 case SpvDecorationStream
:
788 /* This is handled later by var_decoration_cb in vtn_variables.c */
790 case SpvDecorationLocation
:
791 ctx
->fields
[member
].location
= dec
->operands
[0];
793 case SpvDecorationComponent
:
794 break; /* FIXME: What should we do with these? */
795 case SpvDecorationBuiltIn
:
796 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
797 ctx
->type
->members
[member
]->is_builtin
= true;
798 ctx
->type
->members
[member
]->builtin
= dec
->operands
[0];
799 ctx
->type
->builtin_block
= true;
801 case SpvDecorationOffset
:
802 ctx
->type
->offsets
[member
] = dec
->operands
[0];
803 ctx
->fields
[member
].offset
= dec
->operands
[0];
805 case SpvDecorationMatrixStride
:
806 /* Handled as a second pass */
808 case SpvDecorationColMajor
:
809 break; /* Nothing to do here. Column-major is the default. */
810 case SpvDecorationRowMajor
:
811 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
814 case SpvDecorationPatch
:
817 case SpvDecorationSpecId
:
818 case SpvDecorationBlock
:
819 case SpvDecorationBufferBlock
:
820 case SpvDecorationArrayStride
:
821 case SpvDecorationGLSLShared
:
822 case SpvDecorationGLSLPacked
:
823 case SpvDecorationInvariant
:
824 case SpvDecorationRestrict
:
825 case SpvDecorationAliased
:
826 case SpvDecorationConstant
:
827 case SpvDecorationIndex
:
828 case SpvDecorationBinding
:
829 case SpvDecorationDescriptorSet
:
830 case SpvDecorationLinkageAttributes
:
831 case SpvDecorationNoContraction
:
832 case SpvDecorationInputAttachmentIndex
:
833 vtn_warn("Decoration not allowed on struct members: %s",
834 spirv_decoration_to_string(dec
->decoration
));
837 case SpvDecorationXfbBuffer
:
838 case SpvDecorationXfbStride
:
839 /* This is handled later by var_decoration_cb in vtn_variables.c */
842 case SpvDecorationCPacked
:
843 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
844 vtn_warn("Decoration only allowed for CL-style kernels: %s",
845 spirv_decoration_to_string(dec
->decoration
));
847 ctx
->type
->packed
= true;
850 case SpvDecorationSaturatedConversion
:
851 case SpvDecorationFuncParamAttr
:
852 case SpvDecorationFPRoundingMode
:
853 case SpvDecorationFPFastMathMode
:
854 case SpvDecorationAlignment
:
855 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
856 vtn_warn("Decoration only allowed for CL-style kernels: %s",
857 spirv_decoration_to_string(dec
->decoration
));
861 case SpvDecorationUserSemantic
:
862 case SpvDecorationUserTypeGOOGLE
:
863 /* User semantic decorations can safely be ignored by the driver. */
867 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
871 /** Chases the array type all the way down to the tail and rewrites the
872 * glsl_types to be based off the tail's glsl_type.
875 vtn_array_type_rewrite_glsl_type(struct vtn_type
*type
)
877 if (type
->base_type
!= vtn_base_type_array
)
880 vtn_array_type_rewrite_glsl_type(type
->array_element
);
882 type
->type
= glsl_array_type(type
->array_element
->type
,
883 type
->length
, type
->stride
);
886 /* Matrix strides are handled as a separate pass because we need to know
887 * whether the matrix is row-major or not first.
890 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
891 UNUSED
struct vtn_value
*val
, int member
,
892 const struct vtn_decoration
*dec
,
895 if (dec
->decoration
!= SpvDecorationMatrixStride
)
898 vtn_fail_if(member
< 0,
899 "The MatrixStride decoration is only allowed on members "
901 vtn_fail_if(dec
->operands
[0] == 0, "MatrixStride must be non-zero");
903 struct member_decoration_ctx
*ctx
= void_ctx
;
905 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
906 if (mat_type
->row_major
) {
907 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
908 mat_type
->stride
= mat_type
->array_element
->stride
;
909 mat_type
->array_element
->stride
= dec
->operands
[0];
911 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
912 dec
->operands
[0], true);
913 mat_type
->array_element
->type
= glsl_get_column_type(mat_type
->type
);
915 vtn_assert(mat_type
->array_element
->stride
> 0);
916 mat_type
->stride
= dec
->operands
[0];
918 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
919 dec
->operands
[0], false);
922 /* Now that we've replaced the glsl_type with a properly strided matrix
923 * type, rewrite the member type so that it's an array of the proper kind
926 vtn_array_type_rewrite_glsl_type(ctx
->type
->members
[member
]);
927 ctx
->fields
[member
].type
= ctx
->type
->members
[member
]->type
;
931 struct_block_decoration_cb(struct vtn_builder
*b
,
932 struct vtn_value
*val
, int member
,
933 const struct vtn_decoration
*dec
, void *ctx
)
938 struct vtn_type
*type
= val
->type
;
939 if (dec
->decoration
== SpvDecorationBlock
)
941 else if (dec
->decoration
== SpvDecorationBufferBlock
)
942 type
->buffer_block
= true;
946 type_decoration_cb(struct vtn_builder
*b
,
947 struct vtn_value
*val
, int member
,
948 const struct vtn_decoration
*dec
, UNUSED
void *ctx
)
950 struct vtn_type
*type
= val
->type
;
953 /* This should have been handled by OpTypeStruct */
954 assert(val
->type
->base_type
== vtn_base_type_struct
);
955 assert(member
>= 0 && member
< val
->type
->length
);
959 switch (dec
->decoration
) {
960 case SpvDecorationArrayStride
:
961 vtn_assert(type
->base_type
== vtn_base_type_array
||
962 type
->base_type
== vtn_base_type_pointer
);
964 case SpvDecorationBlock
:
965 vtn_assert(type
->base_type
== vtn_base_type_struct
);
966 vtn_assert(type
->block
);
968 case SpvDecorationBufferBlock
:
969 vtn_assert(type
->base_type
== vtn_base_type_struct
);
970 vtn_assert(type
->buffer_block
);
972 case SpvDecorationGLSLShared
:
973 case SpvDecorationGLSLPacked
:
974 /* Ignore these, since we get explicit offsets anyways */
977 case SpvDecorationRowMajor
:
978 case SpvDecorationColMajor
:
979 case SpvDecorationMatrixStride
:
980 case SpvDecorationBuiltIn
:
981 case SpvDecorationNoPerspective
:
982 case SpvDecorationFlat
:
983 case SpvDecorationPatch
:
984 case SpvDecorationCentroid
:
985 case SpvDecorationSample
:
986 case SpvDecorationExplicitInterpAMD
:
987 case SpvDecorationVolatile
:
988 case SpvDecorationCoherent
:
989 case SpvDecorationNonWritable
:
990 case SpvDecorationNonReadable
:
991 case SpvDecorationUniform
:
992 case SpvDecorationUniformId
:
993 case SpvDecorationLocation
:
994 case SpvDecorationComponent
:
995 case SpvDecorationOffset
:
996 case SpvDecorationXfbBuffer
:
997 case SpvDecorationXfbStride
:
998 case SpvDecorationUserSemantic
:
999 vtn_warn("Decoration only allowed for struct members: %s",
1000 spirv_decoration_to_string(dec
->decoration
));
1003 case SpvDecorationStream
:
1004 /* We don't need to do anything here, as stream is filled up when
1005 * aplying the decoration to a variable, just check that if it is not a
1006 * struct member, it should be a struct.
1008 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1011 case SpvDecorationRelaxedPrecision
:
1012 case SpvDecorationSpecId
:
1013 case SpvDecorationInvariant
:
1014 case SpvDecorationRestrict
:
1015 case SpvDecorationAliased
:
1016 case SpvDecorationConstant
:
1017 case SpvDecorationIndex
:
1018 case SpvDecorationBinding
:
1019 case SpvDecorationDescriptorSet
:
1020 case SpvDecorationLinkageAttributes
:
1021 case SpvDecorationNoContraction
:
1022 case SpvDecorationInputAttachmentIndex
:
1023 vtn_warn("Decoration not allowed on types: %s",
1024 spirv_decoration_to_string(dec
->decoration
));
1027 case SpvDecorationCPacked
:
1028 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
1029 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1030 spirv_decoration_to_string(dec
->decoration
));
1032 type
->packed
= true;
1035 case SpvDecorationSaturatedConversion
:
1036 case SpvDecorationFuncParamAttr
:
1037 case SpvDecorationFPRoundingMode
:
1038 case SpvDecorationFPFastMathMode
:
1039 case SpvDecorationAlignment
:
1040 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1041 spirv_decoration_to_string(dec
->decoration
));
1044 case SpvDecorationUserTypeGOOGLE
:
1045 /* User semantic decorations can safely be ignored by the driver. */
1049 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1054 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
1057 case SpvImageFormatUnknown
: return PIPE_FORMAT_NONE
;
1058 case SpvImageFormatRgba32f
: return PIPE_FORMAT_R32G32B32A32_FLOAT
;
1059 case SpvImageFormatRgba16f
: return PIPE_FORMAT_R16G16B16A16_FLOAT
;
1060 case SpvImageFormatR32f
: return PIPE_FORMAT_R32_FLOAT
;
1061 case SpvImageFormatRgba8
: return PIPE_FORMAT_R8G8B8A8_UNORM
;
1062 case SpvImageFormatRgba8Snorm
: return PIPE_FORMAT_R8G8B8A8_SNORM
;
1063 case SpvImageFormatRg32f
: return PIPE_FORMAT_R32G32_FLOAT
;
1064 case SpvImageFormatRg16f
: return PIPE_FORMAT_R16G16_FLOAT
;
1065 case SpvImageFormatR11fG11fB10f
: return PIPE_FORMAT_R11G11B10_FLOAT
;
1066 case SpvImageFormatR16f
: return PIPE_FORMAT_R16_FLOAT
;
1067 case SpvImageFormatRgba16
: return PIPE_FORMAT_R16G16B16A16_UNORM
;
1068 case SpvImageFormatRgb10A2
: return PIPE_FORMAT_R10G10B10A2_UNORM
;
1069 case SpvImageFormatRg16
: return PIPE_FORMAT_R16G16_UNORM
;
1070 case SpvImageFormatRg8
: return PIPE_FORMAT_R8G8_UNORM
;
1071 case SpvImageFormatR16
: return PIPE_FORMAT_R16_UNORM
;
1072 case SpvImageFormatR8
: return PIPE_FORMAT_R8_UNORM
;
1073 case SpvImageFormatRgba16Snorm
: return PIPE_FORMAT_R16G16B16A16_SNORM
;
1074 case SpvImageFormatRg16Snorm
: return PIPE_FORMAT_R16G16_SNORM
;
1075 case SpvImageFormatRg8Snorm
: return PIPE_FORMAT_R8G8_SNORM
;
1076 case SpvImageFormatR16Snorm
: return PIPE_FORMAT_R16_SNORM
;
1077 case SpvImageFormatR8Snorm
: return PIPE_FORMAT_R8_SNORM
;
1078 case SpvImageFormatRgba32i
: return PIPE_FORMAT_R32G32B32A32_SINT
;
1079 case SpvImageFormatRgba16i
: return PIPE_FORMAT_R16G16B16A16_SINT
;
1080 case SpvImageFormatRgba8i
: return PIPE_FORMAT_R8G8B8A8_SINT
;
1081 case SpvImageFormatR32i
: return PIPE_FORMAT_R32_SINT
;
1082 case SpvImageFormatRg32i
: return PIPE_FORMAT_R32G32_SINT
;
1083 case SpvImageFormatRg16i
: return PIPE_FORMAT_R16G16_SINT
;
1084 case SpvImageFormatRg8i
: return PIPE_FORMAT_R8G8_SINT
;
1085 case SpvImageFormatR16i
: return PIPE_FORMAT_R16_SINT
;
1086 case SpvImageFormatR8i
: return PIPE_FORMAT_R8_SINT
;
1087 case SpvImageFormatRgba32ui
: return PIPE_FORMAT_R32G32B32A32_UINT
;
1088 case SpvImageFormatRgba16ui
: return PIPE_FORMAT_R16G16B16A16_UINT
;
1089 case SpvImageFormatRgba8ui
: return PIPE_FORMAT_R8G8B8A8_UINT
;
1090 case SpvImageFormatR32ui
: return PIPE_FORMAT_R32_UINT
;
1091 case SpvImageFormatRgb10a2ui
: return PIPE_FORMAT_R10G10B10A2_UINT
;
1092 case SpvImageFormatRg32ui
: return PIPE_FORMAT_R32G32_UINT
;
1093 case SpvImageFormatRg16ui
: return PIPE_FORMAT_R16G16_UINT
;
1094 case SpvImageFormatRg8ui
: return PIPE_FORMAT_R8G8_UINT
;
1095 case SpvImageFormatR16ui
: return PIPE_FORMAT_R16_UINT
;
1096 case SpvImageFormatR8ui
: return PIPE_FORMAT_R8_UINT
;
1098 vtn_fail("Invalid image format: %s (%u)",
1099 spirv_imageformat_to_string(format
), format
);
1104 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
1105 const uint32_t *w
, unsigned count
)
1107 struct vtn_value
*val
= NULL
;
1109 /* In order to properly handle forward declarations, we have to defer
1110 * allocation for pointer types.
1112 if (opcode
!= SpvOpTypePointer
&& opcode
!= SpvOpTypeForwardPointer
) {
1113 val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
1114 vtn_fail_if(val
->type
!= NULL
,
1115 "Only pointers can have forward declarations");
1116 val
->type
= rzalloc(b
, struct vtn_type
);
1117 val
->type
->id
= w
[1];
1122 val
->type
->base_type
= vtn_base_type_void
;
1123 val
->type
->type
= glsl_void_type();
1126 val
->type
->base_type
= vtn_base_type_scalar
;
1127 val
->type
->type
= glsl_bool_type();
1128 val
->type
->length
= 1;
1130 case SpvOpTypeInt
: {
1131 int bit_size
= w
[2];
1132 const bool signedness
= w
[3];
1133 val
->type
->base_type
= vtn_base_type_scalar
;
1136 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
1139 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
1142 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
1145 val
->type
->type
= (signedness
? glsl_int8_t_type() : glsl_uint8_t_type());
1148 vtn_fail("Invalid int bit size: %u", bit_size
);
1150 val
->type
->length
= 1;
1154 case SpvOpTypeFloat
: {
1155 int bit_size
= w
[2];
1156 val
->type
->base_type
= vtn_base_type_scalar
;
1159 val
->type
->type
= glsl_float16_t_type();
1162 val
->type
->type
= glsl_float_type();
1165 val
->type
->type
= glsl_double_type();
1168 vtn_fail("Invalid float bit size: %u", bit_size
);
1170 val
->type
->length
= 1;
1174 case SpvOpTypeVector
: {
1175 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1176 unsigned elems
= w
[3];
1178 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
1179 "Base type for OpTypeVector must be a scalar");
1180 vtn_fail_if((elems
< 2 || elems
> 4) && (elems
!= 8) && (elems
!= 16),
1181 "Invalid component count for OpTypeVector");
1183 val
->type
->base_type
= vtn_base_type_vector
;
1184 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
1185 val
->type
->length
= elems
;
1186 val
->type
->stride
= glsl_type_is_boolean(val
->type
->type
)
1187 ? 4 : glsl_get_bit_size(base
->type
) / 8;
1188 val
->type
->array_element
= base
;
1192 case SpvOpTypeMatrix
: {
1193 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1194 unsigned columns
= w
[3];
1196 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1197 "Base type for OpTypeMatrix must be a vector");
1198 vtn_fail_if(columns
< 2 || columns
> 4,
1199 "Invalid column count for OpTypeMatrix");
1201 val
->type
->base_type
= vtn_base_type_matrix
;
1202 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1203 glsl_get_vector_elements(base
->type
),
1205 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1206 "Unsupported base type for OpTypeMatrix");
1207 assert(!glsl_type_is_error(val
->type
->type
));
1208 val
->type
->length
= columns
;
1209 val
->type
->array_element
= base
;
1210 val
->type
->row_major
= false;
1211 val
->type
->stride
= 0;
1215 case SpvOpTypeRuntimeArray
:
1216 case SpvOpTypeArray
: {
1217 struct vtn_type
*array_element
=
1218 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1220 if (opcode
== SpvOpTypeRuntimeArray
) {
1221 /* A length of 0 is used to denote unsized arrays */
1222 val
->type
->length
= 0;
1224 val
->type
->length
= vtn_constant_uint(b
, w
[3]);
1227 val
->type
->base_type
= vtn_base_type_array
;
1228 val
->type
->array_element
= array_element
;
1229 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1230 val
->type
->stride
= glsl_get_cl_size(array_element
->type
);
1232 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1233 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
,
1238 case SpvOpTypeStruct
: {
1239 unsigned num_fields
= count
- 2;
1240 val
->type
->base_type
= vtn_base_type_struct
;
1241 val
->type
->length
= num_fields
;
1242 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1243 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1244 val
->type
->packed
= false;
1246 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1247 for (unsigned i
= 0; i
< num_fields
; i
++) {
1248 val
->type
->members
[i
] =
1249 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
1250 fields
[i
] = (struct glsl_struct_field
) {
1251 .type
= val
->type
->members
[i
]->type
,
1252 .name
= ralloc_asprintf(b
, "field%d", i
),
1258 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1259 unsigned offset
= 0;
1260 for (unsigned i
= 0; i
< num_fields
; i
++) {
1261 offset
= align(offset
, glsl_get_cl_alignment(fields
[i
].type
));
1262 fields
[i
].offset
= offset
;
1263 offset
+= glsl_get_cl_size(fields
[i
].type
);
1267 struct member_decoration_ctx ctx
= {
1268 .num_fields
= num_fields
,
1273 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1274 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1276 vtn_foreach_decoration(b
, val
, struct_block_decoration_cb
, NULL
);
1278 const char *name
= val
->name
;
1280 if (val
->type
->block
|| val
->type
->buffer_block
) {
1281 /* Packing will be ignored since types coming from SPIR-V are
1282 * explicitly laid out.
1284 val
->type
->type
= glsl_interface_type(fields
, num_fields
,
1285 /* packing */ 0, false,
1286 name
? name
: "block");
1288 val
->type
->type
= glsl_struct_type(fields
, num_fields
,
1289 name
? name
: "struct", false);
1294 case SpvOpTypeFunction
: {
1295 val
->type
->base_type
= vtn_base_type_function
;
1296 val
->type
->type
= NULL
;
1298 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1300 const unsigned num_params
= count
- 3;
1301 val
->type
->length
= num_params
;
1302 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1303 for (unsigned i
= 0; i
< count
- 3; i
++) {
1304 val
->type
->params
[i
] =
1305 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
1310 case SpvOpTypePointer
:
1311 case SpvOpTypeForwardPointer
: {
1312 /* We can't blindly push the value because it might be a forward
1315 val
= vtn_untyped_value(b
, w
[1]);
1317 SpvStorageClass storage_class
= w
[2];
1319 if (val
->value_type
== vtn_value_type_invalid
) {
1320 val
->value_type
= vtn_value_type_type
;
1321 val
->type
= rzalloc(b
, struct vtn_type
);
1322 val
->type
->id
= w
[1];
1323 val
->type
->base_type
= vtn_base_type_pointer
;
1324 val
->type
->storage_class
= storage_class
;
1326 /* These can actually be stored to nir_variables and used as SSA
1327 * values so they need a real glsl_type.
1329 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1330 b
, storage_class
, NULL
, NULL
);
1331 val
->type
->type
= nir_address_format_to_glsl_type(
1332 vtn_mode_to_address_format(b
, mode
));
1334 vtn_fail_if(val
->type
->storage_class
!= storage_class
,
1335 "The storage classes of an OpTypePointer and any "
1336 "OpTypeForwardPointers that provide forward "
1337 "declarations of it must match.");
1340 if (opcode
== SpvOpTypePointer
) {
1341 vtn_fail_if(val
->type
->deref
!= NULL
,
1342 "While OpTypeForwardPointer can be used to provide a "
1343 "forward declaration of a pointer, OpTypePointer can "
1344 "only be used once for a given id.");
1346 val
->type
->deref
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
1348 /* Only certain storage classes use ArrayStride. The others (in
1349 * particular Workgroup) are expected to be laid out by the driver.
1351 switch (storage_class
) {
1352 case SpvStorageClassUniform
:
1353 case SpvStorageClassPushConstant
:
1354 case SpvStorageClassStorageBuffer
:
1355 case SpvStorageClassPhysicalStorageBuffer
:
1356 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1359 /* Nothing to do. */
1363 if (b
->physical_ptrs
) {
1364 switch (storage_class
) {
1365 case SpvStorageClassFunction
:
1366 case SpvStorageClassWorkgroup
:
1367 case SpvStorageClassCrossWorkgroup
:
1368 case SpvStorageClassUniformConstant
:
1369 val
->type
->stride
= align(glsl_get_cl_size(val
->type
->deref
->type
),
1370 glsl_get_cl_alignment(val
->type
->deref
->type
));
1380 case SpvOpTypeImage
: {
1381 val
->type
->base_type
= vtn_base_type_image
;
1383 const struct vtn_type
*sampled_type
=
1384 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1386 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1387 glsl_get_bit_size(sampled_type
->type
) != 32,
1388 "Sampled type of OpTypeImage must be a 32-bit scalar");
1390 enum glsl_sampler_dim dim
;
1391 switch ((SpvDim
)w
[3]) {
1392 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1393 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1394 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1395 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1396 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1397 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1398 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1400 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1401 spirv_dim_to_string((SpvDim
)w
[3]), w
[3]);
1404 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1405 * The “Depth” operand of OpTypeImage is ignored.
1407 bool is_array
= w
[5];
1408 bool multisampled
= w
[6];
1409 unsigned sampled
= w
[7];
1410 SpvImageFormat format
= w
[8];
1413 val
->type
->access_qualifier
= w
[9];
1415 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1418 if (dim
== GLSL_SAMPLER_DIM_2D
)
1419 dim
= GLSL_SAMPLER_DIM_MS
;
1420 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1421 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1423 vtn_fail("Unsupported multisampled image type");
1426 val
->type
->image_format
= translate_image_format(b
, format
);
1428 enum glsl_base_type sampled_base_type
=
1429 glsl_get_base_type(sampled_type
->type
);
1431 val
->type
->sampled
= true;
1432 val
->type
->type
= glsl_sampler_type(dim
, false, is_array
,
1434 } else if (sampled
== 2) {
1435 val
->type
->sampled
= false;
1436 val
->type
->type
= glsl_image_type(dim
, is_array
, sampled_base_type
);
1438 vtn_fail("We need to know if the image will be sampled");
1443 case SpvOpTypeSampledImage
:
1444 val
->type
->base_type
= vtn_base_type_sampled_image
;
1445 val
->type
->image
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1446 val
->type
->type
= val
->type
->image
->type
;
1449 case SpvOpTypeSampler
:
1450 /* The actual sampler type here doesn't really matter. It gets
1451 * thrown away the moment you combine it with an image. What really
1452 * matters is that it's a sampler type as opposed to an integer type
1453 * so the backend knows what to do.
1455 val
->type
->base_type
= vtn_base_type_sampler
;
1456 val
->type
->type
= glsl_bare_sampler_type();
1459 case SpvOpTypeOpaque
:
1460 case SpvOpTypeEvent
:
1461 case SpvOpTypeDeviceEvent
:
1462 case SpvOpTypeReserveId
:
1463 case SpvOpTypeQueue
:
1466 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1469 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1471 if (val
->type
->base_type
== vtn_base_type_struct
&&
1472 (val
->type
->block
|| val
->type
->buffer_block
)) {
1473 for (unsigned i
= 0; i
< val
->type
->length
; i
++) {
1474 vtn_fail_if(vtn_type_contains_block(b
, val
->type
->members
[i
]),
1475 "Block and BufferBlock decorations cannot decorate a "
1476 "structure type that is nested at any level inside "
1477 "another structure type decorated with Block or "
1483 static nir_constant
*
1484 vtn_null_constant(struct vtn_builder
*b
, struct vtn_type
*type
)
1486 nir_constant
*c
= rzalloc(b
, nir_constant
);
1488 switch (type
->base_type
) {
1489 case vtn_base_type_scalar
:
1490 case vtn_base_type_vector
:
1491 /* Nothing to do here. It's already initialized to zero */
1494 case vtn_base_type_pointer
: {
1495 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1496 b
, type
->storage_class
, type
->deref
, NULL
);
1497 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
1499 const nir_const_value
*null_value
= nir_address_format_null_value(addr_format
);
1500 memcpy(c
->values
, null_value
,
1501 sizeof(nir_const_value
) * nir_address_format_num_components(addr_format
));
1505 case vtn_base_type_void
:
1506 case vtn_base_type_image
:
1507 case vtn_base_type_sampler
:
1508 case vtn_base_type_sampled_image
:
1509 case vtn_base_type_function
:
1510 /* For those we have to return something but it doesn't matter what. */
1513 case vtn_base_type_matrix
:
1514 case vtn_base_type_array
:
1515 vtn_assert(type
->length
> 0);
1516 c
->num_elements
= type
->length
;
1517 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1519 c
->elements
[0] = vtn_null_constant(b
, type
->array_element
);
1520 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1521 c
->elements
[i
] = c
->elements
[0];
1524 case vtn_base_type_struct
:
1525 c
->num_elements
= type
->length
;
1526 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1527 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
1528 c
->elements
[i
] = vtn_null_constant(b
, type
->members
[i
]);
1532 vtn_fail("Invalid type for null constant");
1539 spec_constant_decoration_cb(struct vtn_builder
*b
, UNUSED
struct vtn_value
*val
,
1540 ASSERTED
int member
,
1541 const struct vtn_decoration
*dec
, void *data
)
1543 vtn_assert(member
== -1);
1544 if (dec
->decoration
!= SpvDecorationSpecId
)
1547 nir_const_value
*value
= data
;
1548 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1549 if (b
->specializations
[i
].id
== dec
->operands
[0]) {
1550 *value
= b
->specializations
[i
].value
;
1557 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1558 struct vtn_value
*val
,
1559 ASSERTED
int member
,
1560 const struct vtn_decoration
*dec
,
1563 vtn_assert(member
== -1);
1564 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1565 dec
->operands
[0] != SpvBuiltInWorkgroupSize
)
1568 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1569 b
->workgroup_size_builtin
= val
;
1573 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1574 const uint32_t *w
, unsigned count
)
1576 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1577 val
->constant
= rzalloc(b
, nir_constant
);
1579 case SpvOpConstantTrue
:
1580 case SpvOpConstantFalse
:
1581 case SpvOpSpecConstantTrue
:
1582 case SpvOpSpecConstantFalse
: {
1583 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1584 "Result type of %s must be OpTypeBool",
1585 spirv_op_to_string(opcode
));
1587 bool bval
= (opcode
== SpvOpConstantTrue
||
1588 opcode
== SpvOpSpecConstantTrue
);
1590 nir_const_value u32val
= nir_const_value_for_uint(bval
, 32);
1592 if (opcode
== SpvOpSpecConstantTrue
||
1593 opcode
== SpvOpSpecConstantFalse
)
1594 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32val
);
1596 val
->constant
->values
[0].b
= u32val
.u32
!= 0;
1601 case SpvOpSpecConstant
: {
1602 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1603 "Result type of %s must be a scalar",
1604 spirv_op_to_string(opcode
));
1605 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1608 val
->constant
->values
[0].u64
= vtn_u64_literal(&w
[3]);
1611 val
->constant
->values
[0].u32
= w
[3];
1614 val
->constant
->values
[0].u16
= w
[3];
1617 val
->constant
->values
[0].u8
= w
[3];
1620 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size
);
1623 if (opcode
== SpvOpSpecConstant
)
1624 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
,
1625 &val
->constant
->values
[0]);
1629 case SpvOpSpecConstantComposite
:
1630 case SpvOpConstantComposite
: {
1631 unsigned elem_count
= count
- 3;
1632 vtn_fail_if(elem_count
!= val
->type
->length
,
1633 "%s has %u constituents, expected %u",
1634 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1636 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1637 for (unsigned i
= 0; i
< elem_count
; i
++) {
1638 struct vtn_value
*val
= vtn_untyped_value(b
, w
[i
+ 3]);
1640 if (val
->value_type
== vtn_value_type_constant
) {
1641 elems
[i
] = val
->constant
;
1643 vtn_fail_if(val
->value_type
!= vtn_value_type_undef
,
1644 "only constants or undefs allowed for "
1645 "SpvOpConstantComposite");
1646 /* to make it easier, just insert a NULL constant for now */
1647 elems
[i
] = vtn_null_constant(b
, val
->type
);
1651 switch (val
->type
->base_type
) {
1652 case vtn_base_type_vector
: {
1653 assert(glsl_type_is_vector(val
->type
->type
));
1654 for (unsigned i
= 0; i
< elem_count
; i
++)
1655 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1659 case vtn_base_type_matrix
:
1660 case vtn_base_type_struct
:
1661 case vtn_base_type_array
:
1662 ralloc_steal(val
->constant
, elems
);
1663 val
->constant
->num_elements
= elem_count
;
1664 val
->constant
->elements
= elems
;
1668 vtn_fail("Result type of %s must be a composite type",
1669 spirv_op_to_string(opcode
));
1674 case SpvOpSpecConstantOp
: {
1675 nir_const_value u32op
= nir_const_value_for_uint(w
[3], 32);
1676 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32op
);
1677 SpvOp opcode
= u32op
.u32
;
1679 case SpvOpVectorShuffle
: {
1680 struct vtn_value
*v0
= &b
->values
[w
[4]];
1681 struct vtn_value
*v1
= &b
->values
[w
[5]];
1683 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1684 v0
->value_type
== vtn_value_type_undef
);
1685 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1686 v1
->value_type
== vtn_value_type_undef
);
1688 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1689 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1691 vtn_assert(len0
+ len1
< 16);
1693 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1694 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1695 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1697 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1698 (void)bit_size0
; (void)bit_size1
;
1700 nir_const_value undef
= { .u64
= 0xdeadbeefdeadbeef };
1701 nir_const_value combined
[NIR_MAX_VEC_COMPONENTS
* 2];
1703 if (v0
->value_type
== vtn_value_type_constant
) {
1704 for (unsigned i
= 0; i
< len0
; i
++)
1705 combined
[i
] = v0
->constant
->values
[i
];
1707 if (v1
->value_type
== vtn_value_type_constant
) {
1708 for (unsigned i
= 0; i
< len1
; i
++)
1709 combined
[len0
+ i
] = v1
->constant
->values
[i
];
1712 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1713 uint32_t comp
= w
[i
+ 6];
1714 if (comp
== (uint32_t)-1) {
1715 /* If component is not used, set the value to a known constant
1716 * to detect if it is wrongly used.
1718 val
->constant
->values
[j
] = undef
;
1720 vtn_fail_if(comp
>= len0
+ len1
,
1721 "All Component literals must either be FFFFFFFF "
1722 "or in [0, N - 1] (inclusive).");
1723 val
->constant
->values
[j
] = combined
[comp
];
1729 case SpvOpCompositeExtract
:
1730 case SpvOpCompositeInsert
: {
1731 struct vtn_value
*comp
;
1732 unsigned deref_start
;
1733 struct nir_constant
**c
;
1734 if (opcode
== SpvOpCompositeExtract
) {
1735 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1737 c
= &comp
->constant
;
1739 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1741 val
->constant
= nir_constant_clone(comp
->constant
,
1747 const struct vtn_type
*type
= comp
->type
;
1748 for (unsigned i
= deref_start
; i
< count
; i
++) {
1749 vtn_fail_if(w
[i
] > type
->length
,
1750 "%uth index of %s is %u but the type has only "
1751 "%u elements", i
- deref_start
,
1752 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1754 switch (type
->base_type
) {
1755 case vtn_base_type_vector
:
1757 type
= type
->array_element
;
1760 case vtn_base_type_matrix
:
1761 case vtn_base_type_array
:
1762 c
= &(*c
)->elements
[w
[i
]];
1763 type
= type
->array_element
;
1766 case vtn_base_type_struct
:
1767 c
= &(*c
)->elements
[w
[i
]];
1768 type
= type
->members
[w
[i
]];
1772 vtn_fail("%s must only index into composite types",
1773 spirv_op_to_string(opcode
));
1777 if (opcode
== SpvOpCompositeExtract
) {
1781 unsigned num_components
= type
->length
;
1782 for (unsigned i
= 0; i
< num_components
; i
++)
1783 val
->constant
->values
[i
] = (*c
)->values
[elem
+ i
];
1786 struct vtn_value
*insert
=
1787 vtn_value(b
, w
[4], vtn_value_type_constant
);
1788 vtn_assert(insert
->type
== type
);
1790 *c
= insert
->constant
;
1792 unsigned num_components
= type
->length
;
1793 for (unsigned i
= 0; i
< num_components
; i
++)
1794 (*c
)->values
[elem
+ i
] = insert
->constant
->values
[i
];
1802 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
1803 nir_alu_type src_alu_type
= dst_alu_type
;
1804 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
1807 vtn_assert(count
<= 7);
1813 /* We have a source in a conversion */
1815 nir_get_nir_type_for_glsl_type(
1816 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1817 /* We use the bitsize of the conversion source to evaluate the opcode later */
1818 bit_size
= glsl_get_bit_size(
1819 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1822 bit_size
= glsl_get_bit_size(val
->type
->type
);
1825 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
1826 nir_alu_type_get_type_size(src_alu_type
),
1827 nir_alu_type_get_type_size(dst_alu_type
));
1828 nir_const_value src
[3][NIR_MAX_VEC_COMPONENTS
];
1830 for (unsigned i
= 0; i
< count
- 4; i
++) {
1831 struct vtn_value
*src_val
=
1832 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
);
1834 /* If this is an unsized source, pull the bit size from the
1835 * source; otherwise, we'll use the bit size from the destination.
1837 if (!nir_alu_type_get_type_size(nir_op_infos
[op
].input_types
[i
]))
1838 bit_size
= glsl_get_bit_size(src_val
->type
->type
);
1840 unsigned src_comps
= nir_op_infos
[op
].input_sizes
[i
] ?
1841 nir_op_infos
[op
].input_sizes
[i
] :
1844 unsigned j
= swap
? 1 - i
: i
;
1845 for (unsigned c
= 0; c
< src_comps
; c
++)
1846 src
[j
][c
] = src_val
->constant
->values
[c
];
1849 /* fix up fixed size sources */
1856 for (unsigned i
= 0; i
< num_components
; ++i
) {
1858 case 64: src
[1][i
].u32
= src
[1][i
].u64
; break;
1859 case 16: src
[1][i
].u32
= src
[1][i
].u16
; break;
1860 case 8: src
[1][i
].u32
= src
[1][i
].u8
; break;
1869 nir_const_value
*srcs
[3] = {
1870 src
[0], src
[1], src
[2],
1872 nir_eval_const_opcode(op
, val
->constant
->values
,
1873 num_components
, bit_size
, srcs
,
1874 b
->shader
->info
.float_controls_execution_mode
);
1881 case SpvOpConstantNull
:
1882 val
->constant
= vtn_null_constant(b
, val
->type
);
1885 case SpvOpConstantSampler
:
1886 vtn_fail("OpConstantSampler requires Kernel Capability");
1890 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1893 /* Now that we have the value, update the workgroup size if needed */
1894 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1897 SpvMemorySemanticsMask
1898 vtn_storage_class_to_memory_semantics(SpvStorageClass sc
)
1901 case SpvStorageClassStorageBuffer
:
1902 case SpvStorageClassPhysicalStorageBuffer
:
1903 return SpvMemorySemanticsUniformMemoryMask
;
1904 case SpvStorageClassWorkgroup
:
1905 return SpvMemorySemanticsWorkgroupMemoryMask
;
1907 return SpvMemorySemanticsMaskNone
;
1912 vtn_split_barrier_semantics(struct vtn_builder
*b
,
1913 SpvMemorySemanticsMask semantics
,
1914 SpvMemorySemanticsMask
*before
,
1915 SpvMemorySemanticsMask
*after
)
1917 /* For memory semantics embedded in operations, we split them into up to
1918 * two barriers, to be added before and after the operation. This is less
1919 * strict than if we propagated until the final backend stage, but still
1920 * result in correct execution.
1922 * A further improvement could be pipe this information (and use!) into the
1923 * next compiler layers, at the expense of making the handling of barriers
1927 *before
= SpvMemorySemanticsMaskNone
;
1928 *after
= SpvMemorySemanticsMaskNone
;
1930 SpvMemorySemanticsMask order_semantics
=
1931 semantics
& (SpvMemorySemanticsAcquireMask
|
1932 SpvMemorySemanticsReleaseMask
|
1933 SpvMemorySemanticsAcquireReleaseMask
|
1934 SpvMemorySemanticsSequentiallyConsistentMask
);
1936 if (util_bitcount(order_semantics
) > 1) {
1937 /* Old GLSLang versions incorrectly set all the ordering bits. This was
1938 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
1939 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
1941 vtn_warn("Multiple memory ordering semantics specified, "
1942 "assuming AcquireRelease.");
1943 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
1946 const SpvMemorySemanticsMask av_vis_semantics
=
1947 semantics
& (SpvMemorySemanticsMakeAvailableMask
|
1948 SpvMemorySemanticsMakeVisibleMask
);
1950 const SpvMemorySemanticsMask storage_semantics
=
1951 semantics
& (SpvMemorySemanticsUniformMemoryMask
|
1952 SpvMemorySemanticsSubgroupMemoryMask
|
1953 SpvMemorySemanticsWorkgroupMemoryMask
|
1954 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
1955 SpvMemorySemanticsAtomicCounterMemoryMask
|
1956 SpvMemorySemanticsImageMemoryMask
|
1957 SpvMemorySemanticsOutputMemoryMask
);
1959 const SpvMemorySemanticsMask other_semantics
=
1960 semantics
& ~(order_semantics
| av_vis_semantics
| storage_semantics
);
1962 if (other_semantics
)
1963 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics
);
1965 /* SequentiallyConsistent is treated as AcquireRelease. */
1967 /* The RELEASE barrier happens BEFORE the operation, and it is usually
1968 * associated with a Store. All the write operations with a matching
1969 * semantics will not be reordered after the Store.
1971 if (order_semantics
& (SpvMemorySemanticsReleaseMask
|
1972 SpvMemorySemanticsAcquireReleaseMask
|
1973 SpvMemorySemanticsSequentiallyConsistentMask
)) {
1974 *before
|= SpvMemorySemanticsReleaseMask
| storage_semantics
;
1977 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
1978 * associated with a Load. All the operations with a matching semantics
1979 * will not be reordered before the Load.
1981 if (order_semantics
& (SpvMemorySemanticsAcquireMask
|
1982 SpvMemorySemanticsAcquireReleaseMask
|
1983 SpvMemorySemanticsSequentiallyConsistentMask
)) {
1984 *after
|= SpvMemorySemanticsAcquireMask
| storage_semantics
;
1987 if (av_vis_semantics
& SpvMemorySemanticsMakeVisibleMask
)
1988 *before
|= SpvMemorySemanticsMakeVisibleMask
| storage_semantics
;
1990 if (av_vis_semantics
& SpvMemorySemanticsMakeAvailableMask
)
1991 *after
|= SpvMemorySemanticsMakeAvailableMask
| storage_semantics
;
1995 vtn_emit_scoped_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
1996 SpvMemorySemanticsMask semantics
)
1998 nir_memory_semantics nir_semantics
= 0;
2000 SpvMemorySemanticsMask order_semantics
=
2001 semantics
& (SpvMemorySemanticsAcquireMask
|
2002 SpvMemorySemanticsReleaseMask
|
2003 SpvMemorySemanticsAcquireReleaseMask
|
2004 SpvMemorySemanticsSequentiallyConsistentMask
);
2006 if (util_bitcount(order_semantics
) > 1) {
2007 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2008 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2009 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2011 vtn_warn("Multiple memory ordering semantics bits specified, "
2012 "assuming AcquireRelease.");
2013 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2016 switch (order_semantics
) {
2018 /* Not an ordering barrier. */
2021 case SpvMemorySemanticsAcquireMask
:
2022 nir_semantics
= NIR_MEMORY_ACQUIRE
;
2025 case SpvMemorySemanticsReleaseMask
:
2026 nir_semantics
= NIR_MEMORY_RELEASE
;
2029 case SpvMemorySemanticsSequentiallyConsistentMask
:
2030 /* Fall through. Treated as AcquireRelease in Vulkan. */
2031 case SpvMemorySemanticsAcquireReleaseMask
:
2032 nir_semantics
= NIR_MEMORY_ACQUIRE
| NIR_MEMORY_RELEASE
;
2036 unreachable("Invalid memory order semantics");
2039 if (semantics
& SpvMemorySemanticsMakeAvailableMask
) {
2040 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2041 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2042 "capability must be declared.");
2043 nir_semantics
|= NIR_MEMORY_MAKE_AVAILABLE
;
2046 if (semantics
& SpvMemorySemanticsMakeVisibleMask
) {
2047 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2048 "To use MakeVisible memory semantics the VulkanMemoryModel "
2049 "capability must be declared.");
2050 nir_semantics
|= NIR_MEMORY_MAKE_VISIBLE
;
2053 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2054 * and AtomicCounterMemory are ignored".
2056 semantics
&= ~(SpvMemorySemanticsSubgroupMemoryMask
|
2057 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2058 SpvMemorySemanticsAtomicCounterMemoryMask
);
2060 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2061 * for SpvMemorySemanticsImageMemoryMask.
2064 nir_variable_mode modes
= 0;
2065 if (semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2066 SpvMemorySemanticsImageMemoryMask
)) {
2067 modes
|= nir_var_uniform
|
2072 if (semantics
& SpvMemorySemanticsWorkgroupMemoryMask
)
2073 modes
|= nir_var_mem_shared
;
2074 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
2075 modes
|= nir_var_shader_out
;
2078 /* No barrier to add. */
2079 if (nir_semantics
== 0 || modes
== 0)
2082 nir_scope nir_scope
;
2084 case SpvScopeDevice
:
2085 vtn_fail_if(b
->options
->caps
.vk_memory_model
&&
2086 !b
->options
->caps
.vk_memory_model_device_scope
,
2087 "If the Vulkan memory model is declared and any instruction "
2088 "uses Device scope, the VulkanMemoryModelDeviceScope "
2089 "capability must be declared.");
2090 nir_scope
= NIR_SCOPE_DEVICE
;
2093 case SpvScopeQueueFamily
:
2094 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2095 "To use Queue Family scope, the VulkanMemoryModel capability "
2096 "must be declared.");
2097 nir_scope
= NIR_SCOPE_QUEUE_FAMILY
;
2100 case SpvScopeWorkgroup
:
2101 nir_scope
= NIR_SCOPE_WORKGROUP
;
2104 case SpvScopeSubgroup
:
2105 nir_scope
= NIR_SCOPE_SUBGROUP
;
2108 case SpvScopeInvocation
:
2109 nir_scope
= NIR_SCOPE_INVOCATION
;
2113 vtn_fail("Invalid memory scope");
2116 nir_intrinsic_instr
*intrin
=
2117 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_scoped_memory_barrier
);
2118 nir_intrinsic_set_memory_semantics(intrin
, nir_semantics
);
2120 nir_intrinsic_set_memory_modes(intrin
, modes
);
2121 nir_intrinsic_set_memory_scope(intrin
, nir_scope
);
2122 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2125 struct vtn_ssa_value
*
2126 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
2128 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
2131 if (!glsl_type_is_vector_or_scalar(type
)) {
2132 unsigned elems
= glsl_get_length(type
);
2133 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2134 for (unsigned i
= 0; i
< elems
; i
++) {
2135 const struct glsl_type
*child_type
;
2137 switch (glsl_get_base_type(type
)) {
2139 case GLSL_TYPE_UINT
:
2140 case GLSL_TYPE_INT16
:
2141 case GLSL_TYPE_UINT16
:
2142 case GLSL_TYPE_UINT8
:
2143 case GLSL_TYPE_INT8
:
2144 case GLSL_TYPE_INT64
:
2145 case GLSL_TYPE_UINT64
:
2146 case GLSL_TYPE_BOOL
:
2147 case GLSL_TYPE_FLOAT
:
2148 case GLSL_TYPE_FLOAT16
:
2149 case GLSL_TYPE_DOUBLE
:
2150 child_type
= glsl_get_column_type(type
);
2152 case GLSL_TYPE_ARRAY
:
2153 child_type
= glsl_get_array_element(type
);
2155 case GLSL_TYPE_STRUCT
:
2156 case GLSL_TYPE_INTERFACE
:
2157 child_type
= glsl_get_struct_field(type
, i
);
2160 vtn_fail("unkown base type");
2163 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
2171 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
2174 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
2175 src
.src_type
= type
;
2180 image_operand_arg(struct vtn_builder
*b
, const uint32_t *w
, uint32_t count
,
2181 uint32_t mask_idx
, SpvImageOperandsMask op
)
2183 static const SpvImageOperandsMask ops_with_arg
=
2184 SpvImageOperandsBiasMask
|
2185 SpvImageOperandsLodMask
|
2186 SpvImageOperandsGradMask
|
2187 SpvImageOperandsConstOffsetMask
|
2188 SpvImageOperandsOffsetMask
|
2189 SpvImageOperandsConstOffsetsMask
|
2190 SpvImageOperandsSampleMask
|
2191 SpvImageOperandsMinLodMask
|
2192 SpvImageOperandsMakeTexelAvailableMask
|
2193 SpvImageOperandsMakeTexelVisibleMask
;
2195 assert(util_bitcount(op
) == 1);
2196 assert(w
[mask_idx
] & op
);
2197 assert(op
& ops_with_arg
);
2199 uint32_t idx
= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_arg
) + 1;
2201 /* Adjust indices for operands with two arguments. */
2202 static const SpvImageOperandsMask ops_with_two_args
=
2203 SpvImageOperandsGradMask
;
2204 idx
+= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_two_args
);
2208 vtn_fail_if(idx
+ (op
& ops_with_two_args
? 1 : 0) >= count
,
2209 "Image op claims to have %s but does not enough "
2210 "following operands", spirv_imageoperands_to_string(op
));
2216 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
2217 const uint32_t *w
, unsigned count
)
2219 if (opcode
== SpvOpSampledImage
) {
2220 struct vtn_value
*val
=
2221 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2222 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2223 val
->sampled_image
->image
=
2224 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2225 val
->sampled_image
->sampler
=
2226 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
2228 } else if (opcode
== SpvOpImage
) {
2229 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
2230 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
2231 vtn_push_value_pointer(b
, w
[2], src_val
->sampled_image
->image
);
2233 vtn_assert(src_val
->value_type
== vtn_value_type_pointer
);
2234 vtn_push_value_pointer(b
, w
[2], src_val
->pointer
);
2239 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2241 struct vtn_pointer
*image
= NULL
, *sampler
= NULL
;
2242 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
2243 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
2244 image
= sampled_val
->sampled_image
->image
;
2245 sampler
= sampled_val
->sampled_image
->sampler
;
2247 vtn_assert(sampled_val
->value_type
== vtn_value_type_pointer
);
2248 image
= sampled_val
->pointer
;
2251 nir_deref_instr
*image_deref
= vtn_pointer_to_deref(b
, image
);
2252 nir_deref_instr
*sampler_deref
=
2253 sampler
? vtn_pointer_to_deref(b
, sampler
) : NULL
;
2255 const struct glsl_type
*image_type
= sampled_val
->type
->type
;
2256 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
2257 const bool is_array
= glsl_sampler_type_is_array(image_type
);
2258 nir_alu_type dest_type
= nir_type_invalid
;
2260 /* Figure out the base texture operation */
2263 case SpvOpImageSampleImplicitLod
:
2264 case SpvOpImageSampleDrefImplicitLod
:
2265 case SpvOpImageSampleProjImplicitLod
:
2266 case SpvOpImageSampleProjDrefImplicitLod
:
2267 texop
= nir_texop_tex
;
2270 case SpvOpImageSampleExplicitLod
:
2271 case SpvOpImageSampleDrefExplicitLod
:
2272 case SpvOpImageSampleProjExplicitLod
:
2273 case SpvOpImageSampleProjDrefExplicitLod
:
2274 texop
= nir_texop_txl
;
2277 case SpvOpImageFetch
:
2278 if (sampler_dim
== GLSL_SAMPLER_DIM_MS
) {
2279 texop
= nir_texop_txf_ms
;
2281 texop
= nir_texop_txf
;
2285 case SpvOpImageGather
:
2286 case SpvOpImageDrefGather
:
2287 texop
= nir_texop_tg4
;
2290 case SpvOpImageQuerySizeLod
:
2291 case SpvOpImageQuerySize
:
2292 texop
= nir_texop_txs
;
2293 dest_type
= nir_type_int
;
2296 case SpvOpImageQueryLod
:
2297 texop
= nir_texop_lod
;
2298 dest_type
= nir_type_float
;
2301 case SpvOpImageQueryLevels
:
2302 texop
= nir_texop_query_levels
;
2303 dest_type
= nir_type_int
;
2306 case SpvOpImageQuerySamples
:
2307 texop
= nir_texop_texture_samples
;
2308 dest_type
= nir_type_int
;
2311 case SpvOpFragmentFetchAMD
:
2312 texop
= nir_texop_fragment_fetch
;
2315 case SpvOpFragmentMaskFetchAMD
:
2316 texop
= nir_texop_fragment_mask_fetch
;
2320 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2323 nir_tex_src srcs
[10]; /* 10 should be enough */
2324 nir_tex_src
*p
= srcs
;
2326 p
->src
= nir_src_for_ssa(&image_deref
->dest
.ssa
);
2327 p
->src_type
= nir_tex_src_texture_deref
;
2337 vtn_fail_if(sampler
== NULL
,
2338 "%s requires an image of type OpTypeSampledImage",
2339 spirv_op_to_string(opcode
));
2340 p
->src
= nir_src_for_ssa(&sampler_deref
->dest
.ssa
);
2341 p
->src_type
= nir_tex_src_sampler_deref
;
2345 case nir_texop_txf_ms
:
2347 case nir_texop_query_levels
:
2348 case nir_texop_texture_samples
:
2349 case nir_texop_samples_identical
:
2350 case nir_texop_fragment_fetch
:
2351 case nir_texop_fragment_mask_fetch
:
2354 case nir_texop_txf_ms_fb
:
2355 vtn_fail("unexpected nir_texop_txf_ms_fb");
2357 case nir_texop_txf_ms_mcs
:
2358 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2359 case nir_texop_tex_prefetch
:
2360 vtn_fail("unexpected nir_texop_tex_prefetch");
2365 struct nir_ssa_def
*coord
;
2366 unsigned coord_components
;
2368 case SpvOpImageSampleImplicitLod
:
2369 case SpvOpImageSampleExplicitLod
:
2370 case SpvOpImageSampleDrefImplicitLod
:
2371 case SpvOpImageSampleDrefExplicitLod
:
2372 case SpvOpImageSampleProjImplicitLod
:
2373 case SpvOpImageSampleProjExplicitLod
:
2374 case SpvOpImageSampleProjDrefImplicitLod
:
2375 case SpvOpImageSampleProjDrefExplicitLod
:
2376 case SpvOpImageFetch
:
2377 case SpvOpImageGather
:
2378 case SpvOpImageDrefGather
:
2379 case SpvOpImageQueryLod
:
2380 case SpvOpFragmentFetchAMD
:
2381 case SpvOpFragmentMaskFetchAMD
: {
2382 /* All these types have the coordinate as their first real argument */
2383 coord_components
= glsl_get_sampler_dim_coordinate_components(sampler_dim
);
2385 if (is_array
&& texop
!= nir_texop_lod
)
2388 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
2389 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
2390 (1 << coord_components
) - 1));
2391 p
->src_type
= nir_tex_src_coord
;
2398 coord_components
= 0;
2403 case SpvOpImageSampleProjImplicitLod
:
2404 case SpvOpImageSampleProjExplicitLod
:
2405 case SpvOpImageSampleProjDrefImplicitLod
:
2406 case SpvOpImageSampleProjDrefExplicitLod
:
2407 /* These have the projector as the last coordinate component */
2408 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
2409 p
->src_type
= nir_tex_src_projector
;
2417 bool is_shadow
= false;
2418 unsigned gather_component
= 0;
2420 case SpvOpImageSampleDrefImplicitLod
:
2421 case SpvOpImageSampleDrefExplicitLod
:
2422 case SpvOpImageSampleProjDrefImplicitLod
:
2423 case SpvOpImageSampleProjDrefExplicitLod
:
2424 case SpvOpImageDrefGather
:
2425 /* These all have an explicit depth value as their next source */
2427 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
2430 case SpvOpImageGather
:
2431 /* This has a component as its next source */
2432 gather_component
= vtn_constant_uint(b
, w
[idx
++]);
2439 /* For OpImageQuerySizeLod, we always have an LOD */
2440 if (opcode
== SpvOpImageQuerySizeLod
)
2441 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2443 /* For OpFragmentFetchAMD, we always have a multisample index */
2444 if (opcode
== SpvOpFragmentFetchAMD
)
2445 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2447 /* Now we need to handle some number of optional arguments */
2448 struct vtn_value
*gather_offsets
= NULL
;
2450 uint32_t operands
= w
[idx
];
2452 if (operands
& SpvImageOperandsBiasMask
) {
2453 vtn_assert(texop
== nir_texop_tex
||
2454 texop
== nir_texop_tg4
);
2455 if (texop
== nir_texop_tex
)
2456 texop
= nir_texop_txb
;
2457 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2458 SpvImageOperandsBiasMask
);
2459 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_bias
);
2462 if (operands
& SpvImageOperandsLodMask
) {
2463 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
2464 texop
== nir_texop_txs
|| texop
== nir_texop_tg4
);
2465 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2466 SpvImageOperandsLodMask
);
2467 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_lod
);
2470 if (operands
& SpvImageOperandsGradMask
) {
2471 vtn_assert(texop
== nir_texop_txl
);
2472 texop
= nir_texop_txd
;
2473 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2474 SpvImageOperandsGradMask
);
2475 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ddx
);
2476 (*p
++) = vtn_tex_src(b
, w
[arg
+ 1], nir_tex_src_ddy
);
2479 vtn_fail_if(util_bitcount(operands
& (SpvImageOperandsConstOffsetsMask
|
2480 SpvImageOperandsOffsetMask
|
2481 SpvImageOperandsConstOffsetMask
)) > 1,
2482 "At most one of the ConstOffset, Offset, and ConstOffsets "
2483 "image operands can be used on a given instruction.");
2485 if (operands
& SpvImageOperandsOffsetMask
) {
2486 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2487 SpvImageOperandsOffsetMask
);
2488 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2491 if (operands
& SpvImageOperandsConstOffsetMask
) {
2492 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2493 SpvImageOperandsConstOffsetMask
);
2494 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2497 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2498 vtn_assert(texop
== nir_texop_tg4
);
2499 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2500 SpvImageOperandsConstOffsetsMask
);
2501 gather_offsets
= vtn_value(b
, w
[arg
], vtn_value_type_constant
);
2504 if (operands
& SpvImageOperandsSampleMask
) {
2505 vtn_assert(texop
== nir_texop_txf_ms
);
2506 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2507 SpvImageOperandsSampleMask
);
2508 texop
= nir_texop_txf_ms
;
2509 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ms_index
);
2512 if (operands
& SpvImageOperandsMinLodMask
) {
2513 vtn_assert(texop
== nir_texop_tex
||
2514 texop
== nir_texop_txb
||
2515 texop
== nir_texop_txd
);
2516 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2517 SpvImageOperandsMinLodMask
);
2518 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_min_lod
);
2522 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2525 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2527 instr
->coord_components
= coord_components
;
2528 instr
->sampler_dim
= sampler_dim
;
2529 instr
->is_array
= is_array
;
2530 instr
->is_shadow
= is_shadow
;
2531 instr
->is_new_style_shadow
=
2532 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2533 instr
->component
= gather_component
;
2535 if (image
&& (image
->access
& ACCESS_NON_UNIFORM
))
2536 instr
->texture_non_uniform
= true;
2538 if (sampler
&& (sampler
->access
& ACCESS_NON_UNIFORM
))
2539 instr
->sampler_non_uniform
= true;
2541 /* for non-query ops, get dest_type from sampler type */
2542 if (dest_type
== nir_type_invalid
) {
2543 switch (glsl_get_sampler_result_type(image_type
)) {
2544 case GLSL_TYPE_FLOAT
: dest_type
= nir_type_float
; break;
2545 case GLSL_TYPE_INT
: dest_type
= nir_type_int
; break;
2546 case GLSL_TYPE_UINT
: dest_type
= nir_type_uint
; break;
2547 case GLSL_TYPE_BOOL
: dest_type
= nir_type_bool
; break;
2549 vtn_fail("Invalid base type for sampler result");
2553 instr
->dest_type
= dest_type
;
2555 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2556 nir_tex_instr_dest_size(instr
), 32, NULL
);
2558 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2559 nir_tex_instr_dest_size(instr
));
2561 if (gather_offsets
) {
2562 vtn_fail_if(gather_offsets
->type
->base_type
!= vtn_base_type_array
||
2563 gather_offsets
->type
->length
!= 4,
2564 "ConstOffsets must be an array of size four of vectors "
2565 "of two integer components");
2567 struct vtn_type
*vec_type
= gather_offsets
->type
->array_element
;
2568 vtn_fail_if(vec_type
->base_type
!= vtn_base_type_vector
||
2569 vec_type
->length
!= 2 ||
2570 !glsl_type_is_integer(vec_type
->type
),
2571 "ConstOffsets must be an array of size four of vectors "
2572 "of two integer components");
2574 unsigned bit_size
= glsl_get_bit_size(vec_type
->type
);
2575 for (uint32_t i
= 0; i
< 4; i
++) {
2576 const nir_const_value
*cvec
=
2577 gather_offsets
->constant
->elements
[i
]->values
;
2578 for (uint32_t j
= 0; j
< 2; j
++) {
2580 case 8: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i8
; break;
2581 case 16: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i16
; break;
2582 case 32: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i32
; break;
2583 case 64: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i64
; break;
2585 vtn_fail("Unsupported bit size: %u", bit_size
);
2591 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
2592 ssa
->def
= &instr
->dest
.ssa
;
2593 vtn_push_ssa(b
, w
[2], ret_type
, ssa
);
2595 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2599 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2600 const uint32_t *w
, nir_src
*src
)
2603 case SpvOpAtomicIIncrement
:
2604 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2607 case SpvOpAtomicIDecrement
:
2608 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2611 case SpvOpAtomicISub
:
2613 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
2616 case SpvOpAtomicCompareExchange
:
2617 case SpvOpAtomicCompareExchangeWeak
:
2618 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
2619 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
2622 case SpvOpAtomicExchange
:
2623 case SpvOpAtomicIAdd
:
2624 case SpvOpAtomicSMin
:
2625 case SpvOpAtomicUMin
:
2626 case SpvOpAtomicSMax
:
2627 case SpvOpAtomicUMax
:
2628 case SpvOpAtomicAnd
:
2630 case SpvOpAtomicXor
:
2631 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
2635 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
2639 static nir_ssa_def
*
2640 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2642 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
2644 /* The image_load_store intrinsics assume a 4-dim coordinate */
2645 unsigned dim
= glsl_get_vector_elements(coord
->type
);
2646 unsigned swizzle
[4];
2647 for (unsigned i
= 0; i
< 4; i
++)
2648 swizzle
[i
] = MIN2(i
, dim
- 1);
2650 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4);
2653 static nir_ssa_def
*
2654 expand_to_vec4(nir_builder
*b
, nir_ssa_def
*value
)
2656 if (value
->num_components
== 4)
2660 for (unsigned i
= 0; i
< 4; i
++)
2661 swiz
[i
] = i
< value
->num_components
? i
: 0;
2662 return nir_swizzle(b
, value
, swiz
, 4);
2666 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2667 const uint32_t *w
, unsigned count
)
2669 /* Just get this one out of the way */
2670 if (opcode
== SpvOpImageTexelPointer
) {
2671 struct vtn_value
*val
=
2672 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2673 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2675 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2676 val
->image
->coord
= get_image_coord(b
, w
[4]);
2677 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
2678 val
->image
->lod
= nir_imm_int(&b
->nb
, 0);
2682 struct vtn_image_pointer image
;
2683 SpvScope scope
= SpvScopeInvocation
;
2684 SpvMemorySemanticsMask semantics
= 0;
2687 case SpvOpAtomicExchange
:
2688 case SpvOpAtomicCompareExchange
:
2689 case SpvOpAtomicCompareExchangeWeak
:
2690 case SpvOpAtomicIIncrement
:
2691 case SpvOpAtomicIDecrement
:
2692 case SpvOpAtomicIAdd
:
2693 case SpvOpAtomicISub
:
2694 case SpvOpAtomicLoad
:
2695 case SpvOpAtomicSMin
:
2696 case SpvOpAtomicUMin
:
2697 case SpvOpAtomicSMax
:
2698 case SpvOpAtomicUMax
:
2699 case SpvOpAtomicAnd
:
2701 case SpvOpAtomicXor
:
2702 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
2703 scope
= vtn_constant_uint(b
, w
[4]);
2704 semantics
= vtn_constant_uint(b
, w
[5]);
2707 case SpvOpAtomicStore
:
2708 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
2709 scope
= vtn_constant_uint(b
, w
[2]);
2710 semantics
= vtn_constant_uint(b
, w
[3]);
2713 case SpvOpImageQuerySize
:
2714 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2716 image
.sample
= NULL
;
2720 case SpvOpImageRead
: {
2721 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2722 image
.coord
= get_image_coord(b
, w
[4]);
2724 const SpvImageOperandsMask operands
=
2725 count
> 5 ? w
[5] : SpvImageOperandsMaskNone
;
2727 if (operands
& SpvImageOperandsSampleMask
) {
2728 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2729 SpvImageOperandsSampleMask
);
2730 image
.sample
= vtn_ssa_value(b
, w
[arg
])->def
;
2732 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2735 if (operands
& SpvImageOperandsMakeTexelVisibleMask
) {
2736 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2737 "MakeTexelVisible requires NonPrivateTexel to also be set.");
2738 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2739 SpvImageOperandsMakeTexelVisibleMask
);
2740 semantics
= SpvMemorySemanticsMakeVisibleMask
;
2741 scope
= vtn_constant_uint(b
, w
[arg
]);
2744 if (operands
& SpvImageOperandsLodMask
) {
2745 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2746 SpvImageOperandsLodMask
);
2747 image
.lod
= vtn_ssa_value(b
, w
[arg
])->def
;
2749 image
.lod
= nir_imm_int(&b
->nb
, 0);
2752 /* TODO: Volatile. */
2757 case SpvOpImageWrite
: {
2758 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2759 image
.coord
= get_image_coord(b
, w
[2]);
2763 const SpvImageOperandsMask operands
=
2764 count
> 4 ? w
[4] : SpvImageOperandsMaskNone
;
2766 if (operands
& SpvImageOperandsSampleMask
) {
2767 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2768 SpvImageOperandsSampleMask
);
2769 image
.sample
= vtn_ssa_value(b
, w
[arg
])->def
;
2771 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2774 if (operands
& SpvImageOperandsMakeTexelAvailableMask
) {
2775 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2776 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
2777 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2778 SpvImageOperandsMakeTexelAvailableMask
);
2779 semantics
= SpvMemorySemanticsMakeAvailableMask
;
2780 scope
= vtn_constant_uint(b
, w
[arg
]);
2783 if (operands
& SpvImageOperandsLodMask
) {
2784 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2785 SpvImageOperandsLodMask
);
2786 image
.lod
= vtn_ssa_value(b
, w
[arg
])->def
;
2788 image
.lod
= nir_imm_int(&b
->nb
, 0);
2791 /* TODO: Volatile. */
2797 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2800 nir_intrinsic_op op
;
2802 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
2803 OP(ImageQuerySize
, size
)
2805 OP(ImageWrite
, store
)
2806 OP(AtomicLoad
, load
)
2807 OP(AtomicStore
, store
)
2808 OP(AtomicExchange
, atomic_exchange
)
2809 OP(AtomicCompareExchange
, atomic_comp_swap
)
2810 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
2811 OP(AtomicIIncrement
, atomic_add
)
2812 OP(AtomicIDecrement
, atomic_add
)
2813 OP(AtomicIAdd
, atomic_add
)
2814 OP(AtomicISub
, atomic_add
)
2815 OP(AtomicSMin
, atomic_imin
)
2816 OP(AtomicUMin
, atomic_umin
)
2817 OP(AtomicSMax
, atomic_imax
)
2818 OP(AtomicUMax
, atomic_umax
)
2819 OP(AtomicAnd
, atomic_and
)
2820 OP(AtomicOr
, atomic_or
)
2821 OP(AtomicXor
, atomic_xor
)
2824 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2827 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2829 nir_deref_instr
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2830 intrin
->src
[0] = nir_src_for_ssa(&image_deref
->dest
.ssa
);
2832 /* ImageQuerySize doesn't take any extra parameters */
2833 if (opcode
!= SpvOpImageQuerySize
) {
2834 /* The image coordinate is always 4 components but we may not have that
2835 * many. Swizzle to compensate.
2837 intrin
->src
[1] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, image
.coord
));
2838 intrin
->src
[2] = nir_src_for_ssa(image
.sample
);
2841 nir_intrinsic_set_access(intrin
, image
.image
->access
);
2844 case SpvOpAtomicLoad
:
2845 case SpvOpImageQuerySize
:
2846 case SpvOpImageRead
:
2847 if (opcode
== SpvOpImageRead
|| opcode
== SpvOpAtomicLoad
) {
2848 /* Only OpImageRead can support a lod parameter if
2849 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
2850 * intrinsics definition for atomics requires us to set it for
2853 intrin
->src
[3] = nir_src_for_ssa(image
.lod
);
2856 case SpvOpAtomicStore
:
2857 case SpvOpImageWrite
: {
2858 const uint32_t value_id
= opcode
== SpvOpAtomicStore
? w
[4] : w
[3];
2859 nir_ssa_def
*value
= vtn_ssa_value(b
, value_id
)->def
;
2860 /* nir_intrinsic_image_deref_store always takes a vec4 value */
2861 assert(op
== nir_intrinsic_image_deref_store
);
2862 intrin
->num_components
= 4;
2863 intrin
->src
[3] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, value
));
2864 /* Only OpImageWrite can support a lod parameter if
2865 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
2866 * intrinsics definition for atomics requires us to set it for
2869 intrin
->src
[4] = nir_src_for_ssa(image
.lod
);
2873 case SpvOpAtomicCompareExchange
:
2874 case SpvOpAtomicCompareExchangeWeak
:
2875 case SpvOpAtomicIIncrement
:
2876 case SpvOpAtomicIDecrement
:
2877 case SpvOpAtomicExchange
:
2878 case SpvOpAtomicIAdd
:
2879 case SpvOpAtomicISub
:
2880 case SpvOpAtomicSMin
:
2881 case SpvOpAtomicUMin
:
2882 case SpvOpAtomicSMax
:
2883 case SpvOpAtomicUMax
:
2884 case SpvOpAtomicAnd
:
2886 case SpvOpAtomicXor
:
2887 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[3]);
2891 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2894 /* Image operations implicitly have the Image storage memory semantics. */
2895 semantics
|= SpvMemorySemanticsImageMemoryMask
;
2897 SpvMemorySemanticsMask before_semantics
;
2898 SpvMemorySemanticsMask after_semantics
;
2899 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
2901 if (before_semantics
)
2902 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
2904 if (opcode
!= SpvOpImageWrite
&& opcode
!= SpvOpAtomicStore
) {
2905 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2907 unsigned dest_components
= glsl_get_vector_elements(type
->type
);
2908 intrin
->num_components
= nir_intrinsic_infos
[op
].dest_components
;
2909 if (intrin
->num_components
== 0)
2910 intrin
->num_components
= dest_components
;
2912 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2913 intrin
->num_components
, 32, NULL
);
2915 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2917 nir_ssa_def
*result
= &intrin
->dest
.ssa
;
2918 if (intrin
->num_components
!= dest_components
)
2919 result
= nir_channels(&b
->nb
, result
, (1 << dest_components
) - 1);
2921 struct vtn_value
*val
=
2922 vtn_push_ssa(b
, w
[2], type
, vtn_create_ssa_value(b
, type
->type
));
2923 val
->ssa
->def
= result
;
2925 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2928 if (after_semantics
)
2929 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
2932 static nir_intrinsic_op
2933 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2936 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2937 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2938 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2939 OP(AtomicExchange
, atomic_exchange
)
2940 OP(AtomicCompareExchange
, atomic_comp_swap
)
2941 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
2942 OP(AtomicIIncrement
, atomic_add
)
2943 OP(AtomicIDecrement
, atomic_add
)
2944 OP(AtomicIAdd
, atomic_add
)
2945 OP(AtomicISub
, atomic_add
)
2946 OP(AtomicSMin
, atomic_imin
)
2947 OP(AtomicUMin
, atomic_umin
)
2948 OP(AtomicSMax
, atomic_imax
)
2949 OP(AtomicUMax
, atomic_umax
)
2950 OP(AtomicAnd
, atomic_and
)
2951 OP(AtomicOr
, atomic_or
)
2952 OP(AtomicXor
, atomic_xor
)
2955 vtn_fail_with_opcode("Invalid SSBO atomic", opcode
);
2959 static nir_intrinsic_op
2960 get_uniform_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2963 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
2964 OP(AtomicLoad
, read_deref
)
2965 OP(AtomicExchange
, exchange
)
2966 OP(AtomicCompareExchange
, comp_swap
)
2967 OP(AtomicCompareExchangeWeak
, comp_swap
)
2968 OP(AtomicIIncrement
, inc_deref
)
2969 OP(AtomicIDecrement
, post_dec_deref
)
2970 OP(AtomicIAdd
, add_deref
)
2971 OP(AtomicISub
, add_deref
)
2972 OP(AtomicUMin
, min_deref
)
2973 OP(AtomicUMax
, max_deref
)
2974 OP(AtomicAnd
, and_deref
)
2975 OP(AtomicOr
, or_deref
)
2976 OP(AtomicXor
, xor_deref
)
2979 /* We left the following out: AtomicStore, AtomicSMin and
2980 * AtomicSmax. Right now there are not nir intrinsics for them. At this
2981 * moment Atomic Counter support is needed for ARB_spirv support, so is
2982 * only need to support GLSL Atomic Counters that are uints and don't
2983 * allow direct storage.
2985 vtn_fail("Invalid uniform atomic");
2989 static nir_intrinsic_op
2990 get_deref_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2993 case SpvOpAtomicLoad
: return nir_intrinsic_load_deref
;
2994 case SpvOpAtomicStore
: return nir_intrinsic_store_deref
;
2995 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
2996 OP(AtomicExchange
, atomic_exchange
)
2997 OP(AtomicCompareExchange
, atomic_comp_swap
)
2998 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
2999 OP(AtomicIIncrement
, atomic_add
)
3000 OP(AtomicIDecrement
, atomic_add
)
3001 OP(AtomicIAdd
, atomic_add
)
3002 OP(AtomicISub
, atomic_add
)
3003 OP(AtomicSMin
, atomic_imin
)
3004 OP(AtomicUMin
, atomic_umin
)
3005 OP(AtomicSMax
, atomic_imax
)
3006 OP(AtomicUMax
, atomic_umax
)
3007 OP(AtomicAnd
, atomic_and
)
3008 OP(AtomicOr
, atomic_or
)
3009 OP(AtomicXor
, atomic_xor
)
3012 vtn_fail_with_opcode("Invalid shared atomic", opcode
);
3017 * Handles shared atomics, ssbo atomics and atomic counters.
3020 vtn_handle_atomics(struct vtn_builder
*b
, SpvOp opcode
,
3021 const uint32_t *w
, UNUSED
unsigned count
)
3023 struct vtn_pointer
*ptr
;
3024 nir_intrinsic_instr
*atomic
;
3026 SpvScope scope
= SpvScopeInvocation
;
3027 SpvMemorySemanticsMask semantics
= 0;
3030 case SpvOpAtomicLoad
:
3031 case SpvOpAtomicExchange
:
3032 case SpvOpAtomicCompareExchange
:
3033 case SpvOpAtomicCompareExchangeWeak
:
3034 case SpvOpAtomicIIncrement
:
3035 case SpvOpAtomicIDecrement
:
3036 case SpvOpAtomicIAdd
:
3037 case SpvOpAtomicISub
:
3038 case SpvOpAtomicSMin
:
3039 case SpvOpAtomicUMin
:
3040 case SpvOpAtomicSMax
:
3041 case SpvOpAtomicUMax
:
3042 case SpvOpAtomicAnd
:
3044 case SpvOpAtomicXor
:
3045 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3046 scope
= vtn_constant_uint(b
, w
[4]);
3047 semantics
= vtn_constant_uint(b
, w
[5]);
3050 case SpvOpAtomicStore
:
3051 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
3052 scope
= vtn_constant_uint(b
, w
[2]);
3053 semantics
= vtn_constant_uint(b
, w
[3]);
3057 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3060 /* uniform as "atomic counter uniform" */
3061 if (ptr
->mode
== vtn_variable_mode_uniform
) {
3062 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3063 const struct glsl_type
*deref_type
= deref
->type
;
3064 nir_intrinsic_op op
= get_uniform_nir_atomic_op(b
, opcode
);
3065 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3066 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3068 /* SSBO needs to initialize index/offset. In this case we don't need to,
3069 * as that info is already stored on the ptr->var->var nir_variable (see
3070 * vtn_create_variable)
3074 case SpvOpAtomicLoad
:
3075 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3078 case SpvOpAtomicStore
:
3079 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3080 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3083 case SpvOpAtomicExchange
:
3084 case SpvOpAtomicCompareExchange
:
3085 case SpvOpAtomicCompareExchangeWeak
:
3086 case SpvOpAtomicIIncrement
:
3087 case SpvOpAtomicIDecrement
:
3088 case SpvOpAtomicIAdd
:
3089 case SpvOpAtomicISub
:
3090 case SpvOpAtomicSMin
:
3091 case SpvOpAtomicUMin
:
3092 case SpvOpAtomicSMax
:
3093 case SpvOpAtomicUMax
:
3094 case SpvOpAtomicAnd
:
3096 case SpvOpAtomicXor
:
3097 /* Nothing: we don't need to call fill_common_atomic_sources here, as
3098 * atomic counter uniforms doesn't have sources
3103 unreachable("Invalid SPIR-V atomic");
3106 } else if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
3107 nir_ssa_def
*offset
, *index
;
3108 offset
= vtn_pointer_to_offset(b
, ptr
, &index
);
3110 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
3112 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(b
, opcode
);
3113 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3117 case SpvOpAtomicLoad
:
3118 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3119 nir_intrinsic_set_align(atomic
, 4, 0);
3120 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3121 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3122 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3125 case SpvOpAtomicStore
:
3126 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3127 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3128 nir_intrinsic_set_align(atomic
, 4, 0);
3129 atomic
->src
[src
++] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
3130 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3131 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3132 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3135 case SpvOpAtomicExchange
:
3136 case SpvOpAtomicCompareExchange
:
3137 case SpvOpAtomicCompareExchangeWeak
:
3138 case SpvOpAtomicIIncrement
:
3139 case SpvOpAtomicIDecrement
:
3140 case SpvOpAtomicIAdd
:
3141 case SpvOpAtomicISub
:
3142 case SpvOpAtomicSMin
:
3143 case SpvOpAtomicUMin
:
3144 case SpvOpAtomicSMax
:
3145 case SpvOpAtomicUMax
:
3146 case SpvOpAtomicAnd
:
3148 case SpvOpAtomicXor
:
3149 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3150 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3151 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3152 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
3156 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3159 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3160 const struct glsl_type
*deref_type
= deref
->type
;
3161 nir_intrinsic_op op
= get_deref_nir_atomic_op(b
, opcode
);
3162 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3163 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3166 case SpvOpAtomicLoad
:
3167 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3170 case SpvOpAtomicStore
:
3171 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3172 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3173 atomic
->src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
3176 case SpvOpAtomicExchange
:
3177 case SpvOpAtomicCompareExchange
:
3178 case SpvOpAtomicCompareExchangeWeak
:
3179 case SpvOpAtomicIIncrement
:
3180 case SpvOpAtomicIDecrement
:
3181 case SpvOpAtomicIAdd
:
3182 case SpvOpAtomicISub
:
3183 case SpvOpAtomicSMin
:
3184 case SpvOpAtomicUMin
:
3185 case SpvOpAtomicSMax
:
3186 case SpvOpAtomicUMax
:
3187 case SpvOpAtomicAnd
:
3189 case SpvOpAtomicXor
:
3190 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[1]);
3194 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3198 /* Atomic ordering operations will implicitly apply to the atomic operation
3199 * storage class, so include that too.
3201 semantics
|= vtn_storage_class_to_memory_semantics(ptr
->ptr_type
->storage_class
);
3203 SpvMemorySemanticsMask before_semantics
;
3204 SpvMemorySemanticsMask after_semantics
;
3205 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3207 if (before_semantics
)
3208 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3210 if (opcode
!= SpvOpAtomicStore
) {
3211 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3213 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
3214 glsl_get_vector_elements(type
->type
),
3215 glsl_get_bit_size(type
->type
), NULL
);
3217 struct vtn_ssa_value
*ssa
= rzalloc(b
, struct vtn_ssa_value
);
3218 ssa
->def
= &atomic
->dest
.ssa
;
3219 ssa
->type
= type
->type
;
3220 vtn_push_ssa(b
, w
[2], type
, ssa
);
3223 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
3225 if (after_semantics
)
3226 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3229 static nir_alu_instr
*
3230 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
3232 nir_op op
= nir_op_vec(num_components
);
3233 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
3234 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
3236 vec
->dest
.write_mask
= (1 << num_components
) - 1;
3241 struct vtn_ssa_value
*
3242 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
3244 if (src
->transposed
)
3245 return src
->transposed
;
3247 struct vtn_ssa_value
*dest
=
3248 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
3250 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
3251 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
3252 glsl_get_bit_size(src
->type
));
3253 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3254 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
3255 vec
->src
[0].swizzle
[0] = i
;
3257 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
3258 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
3259 vec
->src
[j
].swizzle
[0] = i
;
3262 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3263 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
3266 dest
->transposed
= src
;
3271 static nir_ssa_def
*
3272 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
3273 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
3274 const uint32_t *indices
)
3276 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
3278 for (unsigned i
= 0; i
< num_components
; i
++) {
3279 uint32_t index
= indices
[i
];
3280 if (index
== 0xffffffff) {
3282 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
3283 } else if (index
< src0
->num_components
) {
3284 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
3285 vec
->src
[i
].swizzle
[0] = index
;
3287 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
3288 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
3292 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3294 return &vec
->dest
.dest
.ssa
;
3298 * Concatentates a number of vectors/scalars together to produce a vector
3300 static nir_ssa_def
*
3301 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
3302 unsigned num_srcs
, nir_ssa_def
**srcs
)
3304 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
3306 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3308 * "When constructing a vector, there must be at least two Constituent
3311 vtn_assert(num_srcs
>= 2);
3313 unsigned dest_idx
= 0;
3314 for (unsigned i
= 0; i
< num_srcs
; i
++) {
3315 nir_ssa_def
*src
= srcs
[i
];
3316 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
3317 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
3318 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
3319 vec
->src
[dest_idx
].swizzle
[0] = j
;
3324 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3326 * "When constructing a vector, the total number of components in all
3327 * the operands must equal the number of components in Result Type."
3329 vtn_assert(dest_idx
== num_components
);
3331 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3333 return &vec
->dest
.dest
.ssa
;
3336 static struct vtn_ssa_value
*
3337 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
3339 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
3340 dest
->type
= src
->type
;
3342 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3343 dest
->def
= src
->def
;
3345 unsigned elems
= glsl_get_length(src
->type
);
3347 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
3348 for (unsigned i
= 0; i
< elems
; i
++)
3349 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
3355 static struct vtn_ssa_value
*
3356 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3357 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
3358 unsigned num_indices
)
3360 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
3362 struct vtn_ssa_value
*cur
= dest
;
3364 for (i
= 0; i
< num_indices
- 1; i
++) {
3365 /* If we got a vector here, that means the next index will be trying to
3366 * dereference a scalar.
3368 vtn_fail_if(glsl_type_is_vector_or_scalar(cur
->type
),
3369 "OpCompositeInsert has too many indices.");
3370 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3371 "All indices in an OpCompositeInsert must be in-bounds");
3372 cur
= cur
->elems
[indices
[i
]];
3375 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3376 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3377 "All indices in an OpCompositeInsert must be in-bounds");
3379 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3380 * the component granularity. In that case, the last index will be
3381 * the index to insert the scalar into the vector.
3384 cur
->def
= nir_vector_insert_imm(&b
->nb
, cur
->def
, insert
->def
, indices
[i
]);
3386 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3387 "All indices in an OpCompositeInsert must be in-bounds");
3388 cur
->elems
[indices
[i
]] = insert
;
3394 static struct vtn_ssa_value
*
3395 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3396 const uint32_t *indices
, unsigned num_indices
)
3398 struct vtn_ssa_value
*cur
= src
;
3399 for (unsigned i
= 0; i
< num_indices
; i
++) {
3400 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3401 vtn_assert(i
== num_indices
- 1);
3402 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3403 "All indices in an OpCompositeExtract must be in-bounds");
3405 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3406 * the component granularity. The last index will be the index of the
3407 * vector to extract.
3410 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
3411 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
3412 ret
->def
= nir_channel(&b
->nb
, cur
->def
, indices
[i
]);
3415 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3416 "All indices in an OpCompositeExtract must be in-bounds");
3417 cur
= cur
->elems
[indices
[i
]];
3425 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
3426 const uint32_t *w
, unsigned count
)
3428 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3429 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
3432 case SpvOpVectorExtractDynamic
:
3433 ssa
->def
= nir_vector_extract(&b
->nb
, vtn_ssa_value(b
, w
[3])->def
,
3434 vtn_ssa_value(b
, w
[4])->def
);
3437 case SpvOpVectorInsertDynamic
:
3438 ssa
->def
= nir_vector_insert(&b
->nb
, vtn_ssa_value(b
, w
[3])->def
,
3439 vtn_ssa_value(b
, w
[4])->def
,
3440 vtn_ssa_value(b
, w
[5])->def
);
3443 case SpvOpVectorShuffle
:
3444 ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
->type
),
3445 vtn_ssa_value(b
, w
[3])->def
,
3446 vtn_ssa_value(b
, w
[4])->def
,
3450 case SpvOpCompositeConstruct
: {
3451 unsigned elems
= count
- 3;
3453 if (glsl_type_is_vector_or_scalar(type
->type
)) {
3454 nir_ssa_def
*srcs
[NIR_MAX_VEC_COMPONENTS
];
3455 for (unsigned i
= 0; i
< elems
; i
++)
3456 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
3458 vtn_vector_construct(b
, glsl_get_vector_elements(type
->type
),
3461 ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
3462 for (unsigned i
= 0; i
< elems
; i
++)
3463 ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
3467 case SpvOpCompositeExtract
:
3468 ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
3472 case SpvOpCompositeInsert
:
3473 ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
3474 vtn_ssa_value(b
, w
[3]),
3478 case SpvOpCopyLogical
:
3479 ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
3481 case SpvOpCopyObject
:
3482 vtn_copy_value(b
, w
[3], w
[2]);
3486 vtn_fail_with_opcode("unknown composite operation", opcode
);
3489 vtn_push_ssa(b
, w
[2], type
, ssa
);
3493 vtn_emit_barrier(struct vtn_builder
*b
, nir_intrinsic_op op
)
3495 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3496 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3500 vtn_emit_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
3501 SpvMemorySemanticsMask semantics
)
3503 if (b
->shader
->options
->use_scoped_memory_barrier
) {
3504 vtn_emit_scoped_memory_barrier(b
, scope
, semantics
);
3508 static const SpvMemorySemanticsMask all_memory_semantics
=
3509 SpvMemorySemanticsUniformMemoryMask
|
3510 SpvMemorySemanticsWorkgroupMemoryMask
|
3511 SpvMemorySemanticsAtomicCounterMemoryMask
|
3512 SpvMemorySemanticsImageMemoryMask
|
3513 SpvMemorySemanticsOutputMemoryMask
;
3515 /* If we're not actually doing a memory barrier, bail */
3516 if (!(semantics
& all_memory_semantics
))
3519 /* GL and Vulkan don't have these */
3520 vtn_assert(scope
!= SpvScopeCrossDevice
);
3522 if (scope
== SpvScopeSubgroup
)
3523 return; /* Nothing to do here */
3525 if (scope
== SpvScopeWorkgroup
) {
3526 vtn_emit_barrier(b
, nir_intrinsic_group_memory_barrier
);
3530 /* There's only two scopes thing left */
3531 vtn_assert(scope
== SpvScopeInvocation
|| scope
== SpvScopeDevice
);
3533 /* Map the GLSL memoryBarrier() construct to the corresponding NIR one. */
3534 static const SpvMemorySemanticsMask glsl_memory_barrier
=
3535 SpvMemorySemanticsUniformMemoryMask
|
3536 SpvMemorySemanticsWorkgroupMemoryMask
|
3537 SpvMemorySemanticsImageMemoryMask
;
3538 if ((semantics
& glsl_memory_barrier
) == glsl_memory_barrier
) {
3539 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3540 semantics
&= ~(glsl_memory_barrier
| SpvMemorySemanticsAtomicCounterMemoryMask
);
3543 /* Issue a bunch of more specific barriers */
3544 uint32_t bits
= semantics
;
3546 SpvMemorySemanticsMask semantic
= 1 << u_bit_scan(&bits
);
3548 case SpvMemorySemanticsUniformMemoryMask
:
3549 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_buffer
);
3551 case SpvMemorySemanticsWorkgroupMemoryMask
:
3552 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_shared
);
3554 case SpvMemorySemanticsAtomicCounterMemoryMask
:
3555 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_atomic_counter
);
3557 case SpvMemorySemanticsImageMemoryMask
:
3558 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_image
);
3560 case SpvMemorySemanticsOutputMemoryMask
:
3561 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
3562 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3571 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
3572 const uint32_t *w
, UNUSED
unsigned count
)
3575 case SpvOpEmitVertex
:
3576 case SpvOpEmitStreamVertex
:
3577 case SpvOpEndPrimitive
:
3578 case SpvOpEndStreamPrimitive
: {
3579 nir_intrinsic_op intrinsic_op
;
3581 case SpvOpEmitVertex
:
3582 case SpvOpEmitStreamVertex
:
3583 intrinsic_op
= nir_intrinsic_emit_vertex
;
3585 case SpvOpEndPrimitive
:
3586 case SpvOpEndStreamPrimitive
:
3587 intrinsic_op
= nir_intrinsic_end_primitive
;
3590 unreachable("Invalid opcode");
3593 nir_intrinsic_instr
*intrin
=
3594 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
3597 case SpvOpEmitStreamVertex
:
3598 case SpvOpEndStreamPrimitive
: {
3599 unsigned stream
= vtn_constant_uint(b
, w
[1]);
3600 nir_intrinsic_set_stream_id(intrin
, stream
);
3608 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3612 case SpvOpMemoryBarrier
: {
3613 SpvScope scope
= vtn_constant_uint(b
, w
[1]);
3614 SpvMemorySemanticsMask semantics
= vtn_constant_uint(b
, w
[2]);
3615 vtn_emit_memory_barrier(b
, scope
, semantics
);
3619 case SpvOpControlBarrier
: {
3620 SpvScope execution_scope
= vtn_constant_uint(b
, w
[1]);
3621 SpvScope memory_scope
= vtn_constant_uint(b
, w
[2]);
3622 SpvMemorySemanticsMask memory_semantics
= vtn_constant_uint(b
, w
[3]);
3624 /* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
3625 * memory semantics of None for GLSL barrier().
3626 * And before that, prior to c3f1cdfa, emitted the OpControlBarrier with
3627 * Device instead of Workgroup for execution scope.
3629 if (b
->wa_glslang_cs_barrier
&&
3630 b
->nb
.shader
->info
.stage
== MESA_SHADER_COMPUTE
&&
3631 (execution_scope
== SpvScopeWorkgroup
||
3632 execution_scope
== SpvScopeDevice
) &&
3633 memory_semantics
== SpvMemorySemanticsMaskNone
) {
3634 execution_scope
= SpvScopeWorkgroup
;
3635 memory_scope
= SpvScopeWorkgroup
;
3636 memory_semantics
= SpvMemorySemanticsAcquireReleaseMask
|
3637 SpvMemorySemanticsWorkgroupMemoryMask
;
3640 /* From the SPIR-V spec:
3642 * "When used with the TessellationControl execution model, it also
3643 * implicitly synchronizes the Output Storage Class: Writes to Output
3644 * variables performed by any invocation executed prior to a
3645 * OpControlBarrier will be visible to any other invocation after
3646 * return from that OpControlBarrier."
3648 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3649 memory_semantics
&= ~(SpvMemorySemanticsAcquireMask
|
3650 SpvMemorySemanticsReleaseMask
|
3651 SpvMemorySemanticsAcquireReleaseMask
|
3652 SpvMemorySemanticsSequentiallyConsistentMask
);
3653 memory_semantics
|= SpvMemorySemanticsAcquireReleaseMask
|
3654 SpvMemorySemanticsOutputMemoryMask
;
3657 vtn_emit_memory_barrier(b
, memory_scope
, memory_semantics
);
3659 if (execution_scope
== SpvScopeWorkgroup
)
3660 vtn_emit_barrier(b
, nir_intrinsic_control_barrier
);
3665 unreachable("unknown barrier instruction");
3670 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
3671 SpvExecutionMode mode
)
3674 case SpvExecutionModeInputPoints
:
3675 case SpvExecutionModeOutputPoints
:
3676 return 0; /* GL_POINTS */
3677 case SpvExecutionModeInputLines
:
3678 return 1; /* GL_LINES */
3679 case SpvExecutionModeInputLinesAdjacency
:
3680 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
3681 case SpvExecutionModeTriangles
:
3682 return 4; /* GL_TRIANGLES */
3683 case SpvExecutionModeInputTrianglesAdjacency
:
3684 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
3685 case SpvExecutionModeQuads
:
3686 return 7; /* GL_QUADS */
3687 case SpvExecutionModeIsolines
:
3688 return 0x8E7A; /* GL_ISOLINES */
3689 case SpvExecutionModeOutputLineStrip
:
3690 return 3; /* GL_LINE_STRIP */
3691 case SpvExecutionModeOutputTriangleStrip
:
3692 return 5; /* GL_TRIANGLE_STRIP */
3694 vtn_fail("Invalid primitive type: %s (%u)",
3695 spirv_executionmode_to_string(mode
), mode
);
3700 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
3701 SpvExecutionMode mode
)
3704 case SpvExecutionModeInputPoints
:
3706 case SpvExecutionModeInputLines
:
3708 case SpvExecutionModeInputLinesAdjacency
:
3710 case SpvExecutionModeTriangles
:
3712 case SpvExecutionModeInputTrianglesAdjacency
:
3715 vtn_fail("Invalid GS input mode: %s (%u)",
3716 spirv_executionmode_to_string(mode
), mode
);
3720 static gl_shader_stage
3721 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
3724 case SpvExecutionModelVertex
:
3725 return MESA_SHADER_VERTEX
;
3726 case SpvExecutionModelTessellationControl
:
3727 return MESA_SHADER_TESS_CTRL
;
3728 case SpvExecutionModelTessellationEvaluation
:
3729 return MESA_SHADER_TESS_EVAL
;
3730 case SpvExecutionModelGeometry
:
3731 return MESA_SHADER_GEOMETRY
;
3732 case SpvExecutionModelFragment
:
3733 return MESA_SHADER_FRAGMENT
;
3734 case SpvExecutionModelGLCompute
:
3735 return MESA_SHADER_COMPUTE
;
3736 case SpvExecutionModelKernel
:
3737 return MESA_SHADER_KERNEL
;
3739 vtn_fail("Unsupported execution model: %s (%u)",
3740 spirv_executionmodel_to_string(model
), model
);
3744 #define spv_check_supported(name, cap) do { \
3745 if (!(b->options && b->options->caps.name)) \
3746 vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
3747 spirv_capability_to_string(cap), cap); \
3752 vtn_handle_entry_point(struct vtn_builder
*b
, const uint32_t *w
,
3755 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
3756 /* Let this be a name label regardless */
3757 unsigned name_words
;
3758 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
3760 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
3761 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
3764 vtn_assert(b
->entry_point
== NULL
);
3765 b
->entry_point
= entry_point
;
3769 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3770 const uint32_t *w
, unsigned count
)
3777 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
3778 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
3779 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
3780 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
3781 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
3782 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
3785 uint32_t version
= w
[2];
3788 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
3790 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
3794 case SpvOpSourceExtension
:
3795 case SpvOpSourceContinued
:
3796 case SpvOpExtension
:
3797 case SpvOpModuleProcessed
:
3798 /* Unhandled, but these are for debug so that's ok. */
3801 case SpvOpCapability
: {
3802 SpvCapability cap
= w
[1];
3804 case SpvCapabilityMatrix
:
3805 case SpvCapabilityShader
:
3806 case SpvCapabilityGeometry
:
3807 case SpvCapabilityGeometryPointSize
:
3808 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
3809 case SpvCapabilitySampledImageArrayDynamicIndexing
:
3810 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
3811 case SpvCapabilityStorageImageArrayDynamicIndexing
:
3812 case SpvCapabilityImageRect
:
3813 case SpvCapabilitySampledRect
:
3814 case SpvCapabilitySampled1D
:
3815 case SpvCapabilityImage1D
:
3816 case SpvCapabilitySampledCubeArray
:
3817 case SpvCapabilityImageCubeArray
:
3818 case SpvCapabilitySampledBuffer
:
3819 case SpvCapabilityImageBuffer
:
3820 case SpvCapabilityImageQuery
:
3821 case SpvCapabilityDerivativeControl
:
3822 case SpvCapabilityInterpolationFunction
:
3823 case SpvCapabilityMultiViewport
:
3824 case SpvCapabilitySampleRateShading
:
3825 case SpvCapabilityClipDistance
:
3826 case SpvCapabilityCullDistance
:
3827 case SpvCapabilityInputAttachment
:
3828 case SpvCapabilityImageGatherExtended
:
3829 case SpvCapabilityStorageImageExtendedFormats
:
3830 case SpvCapabilityVector16
:
3833 case SpvCapabilityLinkage
:
3834 case SpvCapabilityFloat16Buffer
:
3835 case SpvCapabilitySparseResidency
:
3836 vtn_warn("Unsupported SPIR-V capability: %s",
3837 spirv_capability_to_string(cap
));
3840 case SpvCapabilityMinLod
:
3841 spv_check_supported(min_lod
, cap
);
3844 case SpvCapabilityAtomicStorage
:
3845 spv_check_supported(atomic_storage
, cap
);
3848 case SpvCapabilityFloat64
:
3849 spv_check_supported(float64
, cap
);
3851 case SpvCapabilityInt64
:
3852 spv_check_supported(int64
, cap
);
3854 case SpvCapabilityInt16
:
3855 spv_check_supported(int16
, cap
);
3857 case SpvCapabilityInt8
:
3858 spv_check_supported(int8
, cap
);
3861 case SpvCapabilityTransformFeedback
:
3862 spv_check_supported(transform_feedback
, cap
);
3865 case SpvCapabilityGeometryStreams
:
3866 spv_check_supported(geometry_streams
, cap
);
3869 case SpvCapabilityInt64Atomics
:
3870 spv_check_supported(int64_atomics
, cap
);
3873 case SpvCapabilityStorageImageMultisample
:
3874 spv_check_supported(storage_image_ms
, cap
);
3877 case SpvCapabilityAddresses
:
3878 spv_check_supported(address
, cap
);
3881 case SpvCapabilityKernel
:
3882 spv_check_supported(kernel
, cap
);
3885 case SpvCapabilityImageBasic
:
3886 case SpvCapabilityImageReadWrite
:
3887 case SpvCapabilityImageMipmap
:
3888 case SpvCapabilityPipes
:
3889 case SpvCapabilityDeviceEnqueue
:
3890 case SpvCapabilityLiteralSampler
:
3891 case SpvCapabilityGenericPointer
:
3892 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3893 spirv_capability_to_string(cap
));
3896 case SpvCapabilityImageMSArray
:
3897 spv_check_supported(image_ms_array
, cap
);
3900 case SpvCapabilityTessellation
:
3901 case SpvCapabilityTessellationPointSize
:
3902 spv_check_supported(tessellation
, cap
);
3905 case SpvCapabilityDrawParameters
:
3906 spv_check_supported(draw_parameters
, cap
);
3909 case SpvCapabilityStorageImageReadWithoutFormat
:
3910 spv_check_supported(image_read_without_format
, cap
);
3913 case SpvCapabilityStorageImageWriteWithoutFormat
:
3914 spv_check_supported(image_write_without_format
, cap
);
3917 case SpvCapabilityDeviceGroup
:
3918 spv_check_supported(device_group
, cap
);
3921 case SpvCapabilityMultiView
:
3922 spv_check_supported(multiview
, cap
);
3925 case SpvCapabilityGroupNonUniform
:
3926 spv_check_supported(subgroup_basic
, cap
);
3929 case SpvCapabilitySubgroupVoteKHR
:
3930 case SpvCapabilityGroupNonUniformVote
:
3931 spv_check_supported(subgroup_vote
, cap
);
3934 case SpvCapabilitySubgroupBallotKHR
:
3935 case SpvCapabilityGroupNonUniformBallot
:
3936 spv_check_supported(subgroup_ballot
, cap
);
3939 case SpvCapabilityGroupNonUniformShuffle
:
3940 case SpvCapabilityGroupNonUniformShuffleRelative
:
3941 spv_check_supported(subgroup_shuffle
, cap
);
3944 case SpvCapabilityGroupNonUniformQuad
:
3945 spv_check_supported(subgroup_quad
, cap
);
3948 case SpvCapabilityGroupNonUniformArithmetic
:
3949 case SpvCapabilityGroupNonUniformClustered
:
3950 spv_check_supported(subgroup_arithmetic
, cap
);
3953 case SpvCapabilityGroups
:
3954 spv_check_supported(amd_shader_ballot
, cap
);
3957 case SpvCapabilityVariablePointersStorageBuffer
:
3958 case SpvCapabilityVariablePointers
:
3959 spv_check_supported(variable_pointers
, cap
);
3960 b
->variable_pointers
= true;
3963 case SpvCapabilityStorageUniformBufferBlock16
:
3964 case SpvCapabilityStorageUniform16
:
3965 case SpvCapabilityStoragePushConstant16
:
3966 case SpvCapabilityStorageInputOutput16
:
3967 spv_check_supported(storage_16bit
, cap
);
3970 case SpvCapabilityShaderLayer
:
3971 case SpvCapabilityShaderViewportIndex
:
3972 case SpvCapabilityShaderViewportIndexLayerEXT
:
3973 spv_check_supported(shader_viewport_index_layer
, cap
);
3976 case SpvCapabilityStorageBuffer8BitAccess
:
3977 case SpvCapabilityUniformAndStorageBuffer8BitAccess
:
3978 case SpvCapabilityStoragePushConstant8
:
3979 spv_check_supported(storage_8bit
, cap
);
3982 case SpvCapabilityShaderNonUniformEXT
:
3983 spv_check_supported(descriptor_indexing
, cap
);
3986 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT
:
3987 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT
:
3988 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT
:
3989 spv_check_supported(descriptor_array_dynamic_indexing
, cap
);
3992 case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT
:
3993 case SpvCapabilitySampledImageArrayNonUniformIndexingEXT
:
3994 case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT
:
3995 case SpvCapabilityStorageImageArrayNonUniformIndexingEXT
:
3996 case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT
:
3997 case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT
:
3998 case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT
:
3999 spv_check_supported(descriptor_array_non_uniform_indexing
, cap
);
4002 case SpvCapabilityRuntimeDescriptorArrayEXT
:
4003 spv_check_supported(runtime_descriptor_array
, cap
);
4006 case SpvCapabilityStencilExportEXT
:
4007 spv_check_supported(stencil_export
, cap
);
4010 case SpvCapabilitySampleMaskPostDepthCoverage
:
4011 spv_check_supported(post_depth_coverage
, cap
);
4014 case SpvCapabilityDenormFlushToZero
:
4015 case SpvCapabilityDenormPreserve
:
4016 case SpvCapabilitySignedZeroInfNanPreserve
:
4017 case SpvCapabilityRoundingModeRTE
:
4018 case SpvCapabilityRoundingModeRTZ
:
4019 spv_check_supported(float_controls
, cap
);
4022 case SpvCapabilityPhysicalStorageBufferAddresses
:
4023 spv_check_supported(physical_storage_buffer_address
, cap
);
4026 case SpvCapabilityComputeDerivativeGroupQuadsNV
:
4027 case SpvCapabilityComputeDerivativeGroupLinearNV
:
4028 spv_check_supported(derivative_group
, cap
);
4031 case SpvCapabilityFloat16
:
4032 spv_check_supported(float16
, cap
);
4035 case SpvCapabilityFragmentShaderSampleInterlockEXT
:
4036 spv_check_supported(fragment_shader_sample_interlock
, cap
);
4039 case SpvCapabilityFragmentShaderPixelInterlockEXT
:
4040 spv_check_supported(fragment_shader_pixel_interlock
, cap
);
4043 case SpvCapabilityDemoteToHelperInvocationEXT
:
4044 spv_check_supported(demote_to_helper_invocation
, cap
);
4047 case SpvCapabilityShaderClockKHR
:
4048 spv_check_supported(shader_clock
, cap
);
4051 case SpvCapabilityVulkanMemoryModel
:
4052 spv_check_supported(vk_memory_model
, cap
);
4055 case SpvCapabilityVulkanMemoryModelDeviceScope
:
4056 spv_check_supported(vk_memory_model_device_scope
, cap
);
4059 case SpvCapabilityImageReadWriteLodAMD
:
4060 spv_check_supported(amd_image_read_write_lod
, cap
);
4063 case SpvCapabilityIntegerFunctions2INTEL
:
4064 spv_check_supported(integer_functions2
, cap
);
4067 case SpvCapabilityFragmentMaskAMD
:
4068 spv_check_supported(amd_fragment_mask
, cap
);
4071 case SpvCapabilityImageGatherBiasLodAMD
:
4072 spv_check_supported(amd_image_gather_bias_lod
, cap
);
4076 vtn_fail("Unhandled capability: %s (%u)",
4077 spirv_capability_to_string(cap
), cap
);
4082 case SpvOpExtInstImport
:
4083 vtn_handle_extension(b
, opcode
, w
, count
);
4086 case SpvOpMemoryModel
:
4088 case SpvAddressingModelPhysical32
:
4089 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4090 "AddressingModelPhysical32 only supported for kernels");
4091 b
->shader
->info
.cs
.ptr_size
= 32;
4092 b
->physical_ptrs
= true;
4093 b
->options
->shared_addr_format
= nir_address_format_32bit_global
;
4094 b
->options
->global_addr_format
= nir_address_format_32bit_global
;
4095 b
->options
->temp_addr_format
= nir_address_format_32bit_global
;
4097 case SpvAddressingModelPhysical64
:
4098 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4099 "AddressingModelPhysical64 only supported for kernels");
4100 b
->shader
->info
.cs
.ptr_size
= 64;
4101 b
->physical_ptrs
= true;
4102 b
->options
->shared_addr_format
= nir_address_format_64bit_global
;
4103 b
->options
->global_addr_format
= nir_address_format_64bit_global
;
4104 b
->options
->temp_addr_format
= nir_address_format_64bit_global
;
4106 case SpvAddressingModelLogical
:
4107 vtn_fail_if(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
,
4108 "AddressingModelLogical only supported for shaders");
4109 b
->physical_ptrs
= false;
4111 case SpvAddressingModelPhysicalStorageBuffer64
:
4112 vtn_fail_if(!b
->options
||
4113 !b
->options
->caps
.physical_storage_buffer_address
,
4114 "AddressingModelPhysicalStorageBuffer64 not supported");
4117 vtn_fail("Unknown addressing model: %s (%u)",
4118 spirv_addressingmodel_to_string(w
[1]), w
[1]);
4123 case SpvMemoryModelSimple
:
4124 case SpvMemoryModelGLSL450
:
4125 case SpvMemoryModelOpenCL
:
4127 case SpvMemoryModelVulkan
:
4128 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
4129 "Vulkan memory model is unsupported by this driver");
4132 vtn_fail("Unsupported memory model: %s",
4133 spirv_memorymodel_to_string(w
[2]));
4138 case SpvOpEntryPoint
:
4139 vtn_handle_entry_point(b
, w
, count
);
4143 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
4144 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4148 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4151 case SpvOpMemberName
:
4155 case SpvOpExecutionMode
:
4156 case SpvOpExecutionModeId
:
4157 case SpvOpDecorationGroup
:
4159 case SpvOpDecorateId
:
4160 case SpvOpMemberDecorate
:
4161 case SpvOpGroupDecorate
:
4162 case SpvOpGroupMemberDecorate
:
4163 case SpvOpDecorateString
:
4164 case SpvOpMemberDecorateString
:
4165 vtn_handle_decoration(b
, opcode
, w
, count
);
4168 case SpvOpExtInst
: {
4169 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4170 if (val
->ext_handler
== vtn_handle_non_semantic_instruction
) {
4171 /* NonSemantic extended instructions are acceptable in preamble. */
4172 vtn_handle_non_semantic_instruction(b
, w
[4], w
, count
);
4175 return false; /* End of preamble. */
4180 return false; /* End of preamble */
4187 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4188 const struct vtn_decoration
*mode
, UNUSED
void *data
)
4190 vtn_assert(b
->entry_point
== entry_point
);
4192 switch(mode
->exec_mode
) {
4193 case SpvExecutionModeOriginUpperLeft
:
4194 case SpvExecutionModeOriginLowerLeft
:
4195 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4196 b
->shader
->info
.fs
.origin_upper_left
=
4197 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
4200 case SpvExecutionModeEarlyFragmentTests
:
4201 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4202 b
->shader
->info
.fs
.early_fragment_tests
= true;
4205 case SpvExecutionModePostDepthCoverage
:
4206 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4207 b
->shader
->info
.fs
.post_depth_coverage
= true;
4210 case SpvExecutionModeInvocations
:
4211 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4212 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->operands
[0]);
4215 case SpvExecutionModeDepthReplacing
:
4216 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4217 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
4219 case SpvExecutionModeDepthGreater
:
4220 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4221 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
4223 case SpvExecutionModeDepthLess
:
4224 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4225 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
4227 case SpvExecutionModeDepthUnchanged
:
4228 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4229 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
4232 case SpvExecutionModeLocalSize
:
4233 vtn_assert(gl_shader_stage_is_compute(b
->shader
->info
.stage
));
4234 b
->shader
->info
.cs
.local_size
[0] = mode
->operands
[0];
4235 b
->shader
->info
.cs
.local_size
[1] = mode
->operands
[1];
4236 b
->shader
->info
.cs
.local_size
[2] = mode
->operands
[2];
4239 case SpvExecutionModeLocalSizeId
:
4240 b
->shader
->info
.cs
.local_size
[0] = vtn_constant_uint(b
, mode
->operands
[0]);
4241 b
->shader
->info
.cs
.local_size
[1] = vtn_constant_uint(b
, mode
->operands
[1]);
4242 b
->shader
->info
.cs
.local_size
[2] = vtn_constant_uint(b
, mode
->operands
[2]);
4245 case SpvExecutionModeLocalSizeHint
:
4246 case SpvExecutionModeLocalSizeHintId
:
4247 break; /* Nothing to do with this */
4249 case SpvExecutionModeOutputVertices
:
4250 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4251 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4252 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->operands
[0];
4254 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4255 b
->shader
->info
.gs
.vertices_out
= mode
->operands
[0];
4259 case SpvExecutionModeInputPoints
:
4260 case SpvExecutionModeInputLines
:
4261 case SpvExecutionModeInputLinesAdjacency
:
4262 case SpvExecutionModeTriangles
:
4263 case SpvExecutionModeInputTrianglesAdjacency
:
4264 case SpvExecutionModeQuads
:
4265 case SpvExecutionModeIsolines
:
4266 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4267 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4268 b
->shader
->info
.tess
.primitive_mode
=
4269 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4271 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4272 b
->shader
->info
.gs
.vertices_in
=
4273 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
4274 b
->shader
->info
.gs
.input_primitive
=
4275 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4279 case SpvExecutionModeOutputPoints
:
4280 case SpvExecutionModeOutputLineStrip
:
4281 case SpvExecutionModeOutputTriangleStrip
:
4282 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4283 b
->shader
->info
.gs
.output_primitive
=
4284 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4287 case SpvExecutionModeSpacingEqual
:
4288 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4289 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4290 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
4292 case SpvExecutionModeSpacingFractionalEven
:
4293 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4294 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4295 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
4297 case SpvExecutionModeSpacingFractionalOdd
:
4298 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4299 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4300 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
4302 case SpvExecutionModeVertexOrderCw
:
4303 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4304 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4305 b
->shader
->info
.tess
.ccw
= false;
4307 case SpvExecutionModeVertexOrderCcw
:
4308 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4309 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4310 b
->shader
->info
.tess
.ccw
= true;
4312 case SpvExecutionModePointMode
:
4313 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4314 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4315 b
->shader
->info
.tess
.point_mode
= true;
4318 case SpvExecutionModePixelCenterInteger
:
4319 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4320 b
->shader
->info
.fs
.pixel_center_integer
= true;
4323 case SpvExecutionModeXfb
:
4324 b
->shader
->info
.has_transform_feedback_varyings
= true;
4327 case SpvExecutionModeVecTypeHint
:
4330 case SpvExecutionModeContractionOff
:
4331 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
4332 vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4333 spirv_executionmode_to_string(mode
->exec_mode
));
4338 case SpvExecutionModeStencilRefReplacingEXT
:
4339 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4342 case SpvExecutionModeDerivativeGroupQuadsNV
:
4343 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4344 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_QUADS
;
4347 case SpvExecutionModeDerivativeGroupLinearNV
:
4348 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4349 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_LINEAR
;
4352 case SpvExecutionModePixelInterlockOrderedEXT
:
4353 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4354 b
->shader
->info
.fs
.pixel_interlock_ordered
= true;
4357 case SpvExecutionModePixelInterlockUnorderedEXT
:
4358 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4359 b
->shader
->info
.fs
.pixel_interlock_unordered
= true;
4362 case SpvExecutionModeSampleInterlockOrderedEXT
:
4363 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4364 b
->shader
->info
.fs
.sample_interlock_ordered
= true;
4367 case SpvExecutionModeSampleInterlockUnorderedEXT
:
4368 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4369 b
->shader
->info
.fs
.sample_interlock_unordered
= true;
4372 case SpvExecutionModeDenormPreserve
:
4373 case SpvExecutionModeDenormFlushToZero
:
4374 case SpvExecutionModeSignedZeroInfNanPreserve
:
4375 case SpvExecutionModeRoundingModeRTE
:
4376 case SpvExecutionModeRoundingModeRTZ
:
4377 /* Already handled in vtn_handle_rounding_mode_in_execution_mode() */
4381 vtn_fail("Unhandled execution mode: %s (%u)",
4382 spirv_executionmode_to_string(mode
->exec_mode
),
4388 vtn_handle_rounding_mode_in_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4389 const struct vtn_decoration
*mode
, void *data
)
4391 vtn_assert(b
->entry_point
== entry_point
);
4393 unsigned execution_mode
= 0;
4395 switch(mode
->exec_mode
) {
4396 case SpvExecutionModeDenormPreserve
:
4397 switch (mode
->operands
[0]) {
4398 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP16
; break;
4399 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP32
; break;
4400 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP64
; break;
4401 default: vtn_fail("Floating point type not supported");
4404 case SpvExecutionModeDenormFlushToZero
:
4405 switch (mode
->operands
[0]) {
4406 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16
; break;
4407 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32
; break;
4408 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64
; break;
4409 default: vtn_fail("Floating point type not supported");
4412 case SpvExecutionModeSignedZeroInfNanPreserve
:
4413 switch (mode
->operands
[0]) {
4414 case 16: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16
; break;
4415 case 32: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32
; break;
4416 case 64: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64
; break;
4417 default: vtn_fail("Floating point type not supported");
4420 case SpvExecutionModeRoundingModeRTE
:
4421 switch (mode
->operands
[0]) {
4422 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16
; break;
4423 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32
; break;
4424 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64
; break;
4425 default: vtn_fail("Floating point type not supported");
4428 case SpvExecutionModeRoundingModeRTZ
:
4429 switch (mode
->operands
[0]) {
4430 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16
; break;
4431 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32
; break;
4432 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64
; break;
4433 default: vtn_fail("Floating point type not supported");
4441 b
->shader
->info
.float_controls_execution_mode
|= execution_mode
;
4445 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4446 const uint32_t *w
, unsigned count
)
4448 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
4452 case SpvOpSourceContinued
:
4453 case SpvOpSourceExtension
:
4454 case SpvOpExtension
:
4455 case SpvOpCapability
:
4456 case SpvOpExtInstImport
:
4457 case SpvOpMemoryModel
:
4458 case SpvOpEntryPoint
:
4459 case SpvOpExecutionMode
:
4462 case SpvOpMemberName
:
4463 case SpvOpDecorationGroup
:
4465 case SpvOpDecorateId
:
4466 case SpvOpMemberDecorate
:
4467 case SpvOpGroupDecorate
:
4468 case SpvOpGroupMemberDecorate
:
4469 case SpvOpDecorateString
:
4470 case SpvOpMemberDecorateString
:
4471 vtn_fail("Invalid opcode types and variables section");
4477 case SpvOpTypeFloat
:
4478 case SpvOpTypeVector
:
4479 case SpvOpTypeMatrix
:
4480 case SpvOpTypeImage
:
4481 case SpvOpTypeSampler
:
4482 case SpvOpTypeSampledImage
:
4483 case SpvOpTypeArray
:
4484 case SpvOpTypeRuntimeArray
:
4485 case SpvOpTypeStruct
:
4486 case SpvOpTypeOpaque
:
4487 case SpvOpTypePointer
:
4488 case SpvOpTypeForwardPointer
:
4489 case SpvOpTypeFunction
:
4490 case SpvOpTypeEvent
:
4491 case SpvOpTypeDeviceEvent
:
4492 case SpvOpTypeReserveId
:
4493 case SpvOpTypeQueue
:
4495 vtn_handle_type(b
, opcode
, w
, count
);
4498 case SpvOpConstantTrue
:
4499 case SpvOpConstantFalse
:
4501 case SpvOpConstantComposite
:
4502 case SpvOpConstantSampler
:
4503 case SpvOpConstantNull
:
4504 case SpvOpSpecConstantTrue
:
4505 case SpvOpSpecConstantFalse
:
4506 case SpvOpSpecConstant
:
4507 case SpvOpSpecConstantComposite
:
4508 case SpvOpSpecConstantOp
:
4509 vtn_handle_constant(b
, opcode
, w
, count
);
4514 vtn_handle_variables(b
, opcode
, w
, count
);
4517 case SpvOpExtInst
: {
4518 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4519 /* NonSemantic extended instructions are acceptable in preamble, others
4520 * will indicate the end of preamble.
4522 return val
->ext_handler
== vtn_handle_non_semantic_instruction
;
4526 return false; /* End of preamble */
4532 static struct vtn_ssa_value
*
4533 vtn_nir_select(struct vtn_builder
*b
, struct vtn_ssa_value
*src0
,
4534 struct vtn_ssa_value
*src1
, struct vtn_ssa_value
*src2
)
4536 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
4537 dest
->type
= src1
->type
;
4539 if (glsl_type_is_vector_or_scalar(src1
->type
)) {
4540 dest
->def
= nir_bcsel(&b
->nb
, src0
->def
, src1
->def
, src2
->def
);
4542 unsigned elems
= glsl_get_length(src1
->type
);
4544 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
4545 for (unsigned i
= 0; i
< elems
; i
++) {
4546 dest
->elems
[i
] = vtn_nir_select(b
, src0
,
4547 src1
->elems
[i
], src2
->elems
[i
]);
4555 vtn_handle_select(struct vtn_builder
*b
, SpvOp opcode
,
4556 const uint32_t *w
, unsigned count
)
4558 /* Handle OpSelect up-front here because it needs to be able to handle
4559 * pointers and not just regular vectors and scalars.
4561 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
4562 struct vtn_value
*cond_val
= vtn_untyped_value(b
, w
[3]);
4563 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
4564 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
4566 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
4567 obj2_val
->type
!= res_val
->type
,
4568 "Object types must match the result type in OpSelect");
4570 vtn_fail_if((cond_val
->type
->base_type
!= vtn_base_type_scalar
&&
4571 cond_val
->type
->base_type
!= vtn_base_type_vector
) ||
4572 !glsl_type_is_boolean(cond_val
->type
->type
),
4573 "OpSelect must have either a vector of booleans or "
4574 "a boolean as Condition type");
4576 vtn_fail_if(cond_val
->type
->base_type
== vtn_base_type_vector
&&
4577 (res_val
->type
->base_type
!= vtn_base_type_vector
||
4578 res_val
->type
->length
!= cond_val
->type
->length
),
4579 "When Condition type in OpSelect is a vector, the Result "
4580 "type must be a vector of the same length");
4582 switch (res_val
->type
->base_type
) {
4583 case vtn_base_type_scalar
:
4584 case vtn_base_type_vector
:
4585 case vtn_base_type_matrix
:
4586 case vtn_base_type_array
:
4587 case vtn_base_type_struct
:
4590 case vtn_base_type_pointer
:
4591 /* We need to have actual storage for pointer types. */
4592 vtn_fail_if(res_val
->type
->type
== NULL
,
4593 "Invalid pointer result type for OpSelect");
4596 vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
4599 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4600 struct vtn_ssa_value
*ssa
= vtn_nir_select(b
,
4601 vtn_ssa_value(b
, w
[3]), vtn_ssa_value(b
, w
[4]), vtn_ssa_value(b
, w
[5]));
4603 vtn_push_ssa(b
, w
[2], res_type
, ssa
);
4607 vtn_handle_ptr(struct vtn_builder
*b
, SpvOp opcode
,
4608 const uint32_t *w
, unsigned count
)
4610 struct vtn_type
*type1
= vtn_untyped_value(b
, w
[3])->type
;
4611 struct vtn_type
*type2
= vtn_untyped_value(b
, w
[4])->type
;
4612 vtn_fail_if(type1
->base_type
!= vtn_base_type_pointer
||
4613 type2
->base_type
!= vtn_base_type_pointer
,
4614 "%s operands must have pointer types",
4615 spirv_op_to_string(opcode
));
4616 vtn_fail_if(type1
->storage_class
!= type2
->storage_class
,
4617 "%s operands must have the same storage class",
4618 spirv_op_to_string(opcode
));
4620 struct vtn_type
*vtn_type
=
4621 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4622 const struct glsl_type
*type
= vtn_type
->type
;
4624 nir_address_format addr_format
= vtn_mode_to_address_format(
4625 b
, vtn_storage_class_to_mode(b
, type1
->storage_class
, NULL
, NULL
));
4630 case SpvOpPtrDiff
: {
4631 /* OpPtrDiff returns the difference in number of elements (not byte offset). */
4632 unsigned elem_size
, elem_align
;
4633 glsl_get_natural_size_align_bytes(type1
->deref
->type
,
4634 &elem_size
, &elem_align
);
4636 def
= nir_build_addr_isub(&b
->nb
,
4637 vtn_ssa_value(b
, w
[3])->def
,
4638 vtn_ssa_value(b
, w
[4])->def
,
4640 def
= nir_idiv(&b
->nb
, def
, nir_imm_intN_t(&b
->nb
, elem_size
, def
->bit_size
));
4641 def
= nir_i2i(&b
->nb
, def
, glsl_get_bit_size(type
));
4646 case SpvOpPtrNotEqual
: {
4647 def
= nir_build_addr_ieq(&b
->nb
,
4648 vtn_ssa_value(b
, w
[3])->def
,
4649 vtn_ssa_value(b
, w
[4])->def
,
4651 if (opcode
== SpvOpPtrNotEqual
)
4652 def
= nir_inot(&b
->nb
, def
);
4657 unreachable("Invalid ptr operation");
4660 struct vtn_ssa_value
*ssa_value
= vtn_create_ssa_value(b
, type
);
4661 ssa_value
->def
= def
;
4662 vtn_push_ssa(b
, w
[2], vtn_type
, ssa_value
);
4666 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4667 const uint32_t *w
, unsigned count
)
4673 case SpvOpLoopMerge
:
4674 case SpvOpSelectionMerge
:
4675 /* This is handled by cfg pre-pass and walk_blocks */
4679 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
4680 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4685 vtn_handle_extension(b
, opcode
, w
, count
);
4691 case SpvOpCopyMemory
:
4692 case SpvOpCopyMemorySized
:
4693 case SpvOpAccessChain
:
4694 case SpvOpPtrAccessChain
:
4695 case SpvOpInBoundsAccessChain
:
4696 case SpvOpInBoundsPtrAccessChain
:
4697 case SpvOpArrayLength
:
4698 case SpvOpConvertPtrToU
:
4699 case SpvOpConvertUToPtr
:
4700 vtn_handle_variables(b
, opcode
, w
, count
);
4703 case SpvOpFunctionCall
:
4704 vtn_handle_function_call(b
, opcode
, w
, count
);
4707 case SpvOpSampledImage
:
4709 case SpvOpImageSampleImplicitLod
:
4710 case SpvOpImageSampleExplicitLod
:
4711 case SpvOpImageSampleDrefImplicitLod
:
4712 case SpvOpImageSampleDrefExplicitLod
:
4713 case SpvOpImageSampleProjImplicitLod
:
4714 case SpvOpImageSampleProjExplicitLod
:
4715 case SpvOpImageSampleProjDrefImplicitLod
:
4716 case SpvOpImageSampleProjDrefExplicitLod
:
4717 case SpvOpImageFetch
:
4718 case SpvOpImageGather
:
4719 case SpvOpImageDrefGather
:
4720 case SpvOpImageQuerySizeLod
:
4721 case SpvOpImageQueryLod
:
4722 case SpvOpImageQueryLevels
:
4723 case SpvOpImageQuerySamples
:
4724 vtn_handle_texture(b
, opcode
, w
, count
);
4727 case SpvOpImageRead
:
4728 case SpvOpImageWrite
:
4729 case SpvOpImageTexelPointer
:
4730 vtn_handle_image(b
, opcode
, w
, count
);
4733 case SpvOpImageQuerySize
: {
4734 struct vtn_pointer
*image
=
4735 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
4736 if (glsl_type_is_image(image
->type
->type
)) {
4737 vtn_handle_image(b
, opcode
, w
, count
);
4739 vtn_assert(glsl_type_is_sampler(image
->type
->type
));
4740 vtn_handle_texture(b
, opcode
, w
, count
);
4745 case SpvOpFragmentMaskFetchAMD
:
4746 case SpvOpFragmentFetchAMD
:
4747 vtn_handle_texture(b
, opcode
, w
, count
);
4750 case SpvOpAtomicLoad
:
4751 case SpvOpAtomicExchange
:
4752 case SpvOpAtomicCompareExchange
:
4753 case SpvOpAtomicCompareExchangeWeak
:
4754 case SpvOpAtomicIIncrement
:
4755 case SpvOpAtomicIDecrement
:
4756 case SpvOpAtomicIAdd
:
4757 case SpvOpAtomicISub
:
4758 case SpvOpAtomicSMin
:
4759 case SpvOpAtomicUMin
:
4760 case SpvOpAtomicSMax
:
4761 case SpvOpAtomicUMax
:
4762 case SpvOpAtomicAnd
:
4764 case SpvOpAtomicXor
: {
4765 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
4766 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4767 vtn_handle_image(b
, opcode
, w
, count
);
4769 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4770 vtn_handle_atomics(b
, opcode
, w
, count
);
4775 case SpvOpAtomicStore
: {
4776 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
4777 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4778 vtn_handle_image(b
, opcode
, w
, count
);
4780 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4781 vtn_handle_atomics(b
, opcode
, w
, count
);
4787 vtn_handle_select(b
, opcode
, w
, count
);
4795 case SpvOpConvertFToU
:
4796 case SpvOpConvertFToS
:
4797 case SpvOpConvertSToF
:
4798 case SpvOpConvertUToF
:
4802 case SpvOpQuantizeToF16
:
4803 case SpvOpPtrCastToGeneric
:
4804 case SpvOpGenericCastToPtr
:
4809 case SpvOpSignBitSet
:
4810 case SpvOpLessOrGreater
:
4812 case SpvOpUnordered
:
4827 case SpvOpVectorTimesScalar
:
4829 case SpvOpIAddCarry
:
4830 case SpvOpISubBorrow
:
4831 case SpvOpUMulExtended
:
4832 case SpvOpSMulExtended
:
4833 case SpvOpShiftRightLogical
:
4834 case SpvOpShiftRightArithmetic
:
4835 case SpvOpShiftLeftLogical
:
4836 case SpvOpLogicalEqual
:
4837 case SpvOpLogicalNotEqual
:
4838 case SpvOpLogicalOr
:
4839 case SpvOpLogicalAnd
:
4840 case SpvOpLogicalNot
:
4841 case SpvOpBitwiseOr
:
4842 case SpvOpBitwiseXor
:
4843 case SpvOpBitwiseAnd
:
4845 case SpvOpFOrdEqual
:
4846 case SpvOpFUnordEqual
:
4847 case SpvOpINotEqual
:
4848 case SpvOpFOrdNotEqual
:
4849 case SpvOpFUnordNotEqual
:
4850 case SpvOpULessThan
:
4851 case SpvOpSLessThan
:
4852 case SpvOpFOrdLessThan
:
4853 case SpvOpFUnordLessThan
:
4854 case SpvOpUGreaterThan
:
4855 case SpvOpSGreaterThan
:
4856 case SpvOpFOrdGreaterThan
:
4857 case SpvOpFUnordGreaterThan
:
4858 case SpvOpULessThanEqual
:
4859 case SpvOpSLessThanEqual
:
4860 case SpvOpFOrdLessThanEqual
:
4861 case SpvOpFUnordLessThanEqual
:
4862 case SpvOpUGreaterThanEqual
:
4863 case SpvOpSGreaterThanEqual
:
4864 case SpvOpFOrdGreaterThanEqual
:
4865 case SpvOpFUnordGreaterThanEqual
:
4871 case SpvOpFwidthFine
:
4872 case SpvOpDPdxCoarse
:
4873 case SpvOpDPdyCoarse
:
4874 case SpvOpFwidthCoarse
:
4875 case SpvOpBitFieldInsert
:
4876 case SpvOpBitFieldSExtract
:
4877 case SpvOpBitFieldUExtract
:
4878 case SpvOpBitReverse
:
4880 case SpvOpTranspose
:
4881 case SpvOpOuterProduct
:
4882 case SpvOpMatrixTimesScalar
:
4883 case SpvOpVectorTimesMatrix
:
4884 case SpvOpMatrixTimesVector
:
4885 case SpvOpMatrixTimesMatrix
:
4886 case SpvOpUCountLeadingZerosINTEL
:
4887 case SpvOpUCountTrailingZerosINTEL
:
4888 case SpvOpAbsISubINTEL
:
4889 case SpvOpAbsUSubINTEL
:
4890 case SpvOpIAddSatINTEL
:
4891 case SpvOpUAddSatINTEL
:
4892 case SpvOpIAverageINTEL
:
4893 case SpvOpUAverageINTEL
:
4894 case SpvOpIAverageRoundedINTEL
:
4895 case SpvOpUAverageRoundedINTEL
:
4896 case SpvOpISubSatINTEL
:
4897 case SpvOpUSubSatINTEL
:
4898 case SpvOpIMul32x16INTEL
:
4899 case SpvOpUMul32x16INTEL
:
4900 vtn_handle_alu(b
, opcode
, w
, count
);
4904 vtn_handle_bitcast(b
, w
, count
);
4907 case SpvOpVectorExtractDynamic
:
4908 case SpvOpVectorInsertDynamic
:
4909 case SpvOpVectorShuffle
:
4910 case SpvOpCompositeConstruct
:
4911 case SpvOpCompositeExtract
:
4912 case SpvOpCompositeInsert
:
4913 case SpvOpCopyLogical
:
4914 case SpvOpCopyObject
:
4915 vtn_handle_composite(b
, opcode
, w
, count
);
4918 case SpvOpEmitVertex
:
4919 case SpvOpEndPrimitive
:
4920 case SpvOpEmitStreamVertex
:
4921 case SpvOpEndStreamPrimitive
:
4922 case SpvOpControlBarrier
:
4923 case SpvOpMemoryBarrier
:
4924 vtn_handle_barrier(b
, opcode
, w
, count
);
4927 case SpvOpGroupNonUniformElect
:
4928 case SpvOpGroupNonUniformAll
:
4929 case SpvOpGroupNonUniformAny
:
4930 case SpvOpGroupNonUniformAllEqual
:
4931 case SpvOpGroupNonUniformBroadcast
:
4932 case SpvOpGroupNonUniformBroadcastFirst
:
4933 case SpvOpGroupNonUniformBallot
:
4934 case SpvOpGroupNonUniformInverseBallot
:
4935 case SpvOpGroupNonUniformBallotBitExtract
:
4936 case SpvOpGroupNonUniformBallotBitCount
:
4937 case SpvOpGroupNonUniformBallotFindLSB
:
4938 case SpvOpGroupNonUniformBallotFindMSB
:
4939 case SpvOpGroupNonUniformShuffle
:
4940 case SpvOpGroupNonUniformShuffleXor
:
4941 case SpvOpGroupNonUniformShuffleUp
:
4942 case SpvOpGroupNonUniformShuffleDown
:
4943 case SpvOpGroupNonUniformIAdd
:
4944 case SpvOpGroupNonUniformFAdd
:
4945 case SpvOpGroupNonUniformIMul
:
4946 case SpvOpGroupNonUniformFMul
:
4947 case SpvOpGroupNonUniformSMin
:
4948 case SpvOpGroupNonUniformUMin
:
4949 case SpvOpGroupNonUniformFMin
:
4950 case SpvOpGroupNonUniformSMax
:
4951 case SpvOpGroupNonUniformUMax
:
4952 case SpvOpGroupNonUniformFMax
:
4953 case SpvOpGroupNonUniformBitwiseAnd
:
4954 case SpvOpGroupNonUniformBitwiseOr
:
4955 case SpvOpGroupNonUniformBitwiseXor
:
4956 case SpvOpGroupNonUniformLogicalAnd
:
4957 case SpvOpGroupNonUniformLogicalOr
:
4958 case SpvOpGroupNonUniformLogicalXor
:
4959 case SpvOpGroupNonUniformQuadBroadcast
:
4960 case SpvOpGroupNonUniformQuadSwap
:
4963 case SpvOpGroupBroadcast
:
4964 case SpvOpGroupIAdd
:
4965 case SpvOpGroupFAdd
:
4966 case SpvOpGroupFMin
:
4967 case SpvOpGroupUMin
:
4968 case SpvOpGroupSMin
:
4969 case SpvOpGroupFMax
:
4970 case SpvOpGroupUMax
:
4971 case SpvOpGroupSMax
:
4972 case SpvOpSubgroupBallotKHR
:
4973 case SpvOpSubgroupFirstInvocationKHR
:
4974 case SpvOpSubgroupReadInvocationKHR
:
4975 case SpvOpSubgroupAllKHR
:
4976 case SpvOpSubgroupAnyKHR
:
4977 case SpvOpSubgroupAllEqualKHR
:
4978 case SpvOpGroupIAddNonUniformAMD
:
4979 case SpvOpGroupFAddNonUniformAMD
:
4980 case SpvOpGroupFMinNonUniformAMD
:
4981 case SpvOpGroupUMinNonUniformAMD
:
4982 case SpvOpGroupSMinNonUniformAMD
:
4983 case SpvOpGroupFMaxNonUniformAMD
:
4984 case SpvOpGroupUMaxNonUniformAMD
:
4985 case SpvOpGroupSMaxNonUniformAMD
:
4986 vtn_handle_subgroup(b
, opcode
, w
, count
);
4991 case SpvOpPtrNotEqual
:
4992 vtn_handle_ptr(b
, opcode
, w
, count
);
4995 case SpvOpBeginInvocationInterlockEXT
:
4996 vtn_emit_barrier(b
, nir_intrinsic_begin_invocation_interlock
);
4999 case SpvOpEndInvocationInterlockEXT
:
5000 vtn_emit_barrier(b
, nir_intrinsic_end_invocation_interlock
);
5003 case SpvOpDemoteToHelperInvocationEXT
: {
5004 nir_intrinsic_instr
*intrin
=
5005 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_demote
);
5006 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5010 case SpvOpIsHelperInvocationEXT
: {
5011 nir_intrinsic_instr
*intrin
=
5012 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_is_helper_invocation
);
5013 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 1, 1, NULL
);
5014 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5016 struct vtn_type
*res_type
=
5017 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
5018 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, res_type
->type
);
5019 val
->def
= &intrin
->dest
.ssa
;
5021 vtn_push_ssa(b
, w
[2], res_type
, val
);
5025 case SpvOpReadClockKHR
: {
5026 SpvScope scope
= vtn_constant_uint(b
, w
[3]);
5027 nir_scope nir_scope
;
5030 case SpvScopeDevice
:
5031 nir_scope
= NIR_SCOPE_DEVICE
;
5033 case SpvScopeSubgroup
:
5034 nir_scope
= NIR_SCOPE_SUBGROUP
;
5037 vtn_fail("invalid read clock scope");
5040 /* Operation supports two result types: uvec2 and uint64_t. The NIR
5041 * intrinsic gives uvec2, so pack the result for the other case.
5043 nir_intrinsic_instr
*intrin
=
5044 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_shader_clock
);
5045 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 2, 32, NULL
);
5046 nir_intrinsic_set_memory_scope(intrin
, nir_scope
);
5047 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5049 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
5050 const struct glsl_type
*dest_type
= type
->type
;
5051 nir_ssa_def
*result
;
5053 if (glsl_type_is_vector(dest_type
)) {
5054 assert(dest_type
== glsl_vector_type(GLSL_TYPE_UINT
, 2));
5055 result
= &intrin
->dest
.ssa
;
5057 assert(glsl_type_is_scalar(dest_type
));
5058 assert(glsl_get_base_type(dest_type
) == GLSL_TYPE_UINT64
);
5059 result
= nir_pack_64_2x32(&b
->nb
, &intrin
->dest
.ssa
);
5062 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
5064 val
->ssa
= vtn_create_ssa_value(b
, dest_type
);
5065 val
->ssa
->def
= result
;
5069 case SpvOpLifetimeStart
:
5070 case SpvOpLifetimeStop
:
5074 vtn_fail_with_opcode("Unhandled opcode", opcode
);
5081 vtn_create_builder(const uint32_t *words
, size_t word_count
,
5082 gl_shader_stage stage
, const char *entry_point_name
,
5083 const struct spirv_to_nir_options
*options
)
5085 /* Initialize the vtn_builder object */
5086 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
5087 struct spirv_to_nir_options
*dup_options
=
5088 ralloc(b
, struct spirv_to_nir_options
);
5089 *dup_options
= *options
;
5092 b
->spirv_word_count
= word_count
;
5096 list_inithead(&b
->functions
);
5097 b
->entry_point_stage
= stage
;
5098 b
->entry_point_name
= entry_point_name
;
5099 b
->options
= dup_options
;
5102 * Handle the SPIR-V header (first 5 dwords).
5103 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
5105 if (word_count
<= 5)
5108 if (words
[0] != SpvMagicNumber
) {
5109 vtn_err("words[0] was 0x%x, want 0x%x", words
[0], SpvMagicNumber
);
5112 if (words
[1] < 0x10000) {
5113 vtn_err("words[1] was 0x%x, want >= 0x10000", words
[1]);
5117 uint16_t generator_id
= words
[2] >> 16;
5118 uint16_t generator_version
= words
[2];
5120 /* The first GLSLang version bump actually 1.5 years after #179 was fixed
5121 * but this should at least let us shut the workaround off for modern
5122 * versions of GLSLang.
5124 b
->wa_glslang_179
= (generator_id
== 8 && generator_version
== 1);
5126 /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
5127 * to provide correct memory semantics on compute shader barrier()
5128 * commands. Prior to that, we need to fix them up ourselves. This
5129 * GLSLang fix caused them to bump to generator version 3.
5131 b
->wa_glslang_cs_barrier
= (generator_id
== 8 && generator_version
< 3);
5133 /* words[2] == generator magic */
5134 unsigned value_id_bound
= words
[3];
5135 if (words
[4] != 0) {
5136 vtn_err("words[4] was %u, want 0", words
[4]);
5140 b
->value_id_bound
= value_id_bound
;
5141 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
5149 static nir_function
*
5150 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder
*b
,
5151 nir_function
*entry_point
)
5153 vtn_assert(entry_point
== b
->entry_point
->func
->impl
->function
);
5154 vtn_fail_if(!entry_point
->name
, "entry points are required to have a name");
5155 const char *func_name
=
5156 ralloc_asprintf(b
->shader
, "__wrapped_%s", entry_point
->name
);
5158 /* we shouldn't have any inputs yet */
5159 vtn_assert(!entry_point
->shader
->num_inputs
);
5160 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
);
5162 nir_function
*main_entry_point
= nir_function_create(b
->shader
, func_name
);
5163 main_entry_point
->impl
= nir_function_impl_create(main_entry_point
);
5164 nir_builder_init(&b
->nb
, main_entry_point
->impl
);
5165 b
->nb
.cursor
= nir_after_cf_list(&main_entry_point
->impl
->body
);
5166 b
->func_param_idx
= 0;
5168 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, entry_point
);
5170 for (unsigned i
= 0; i
< entry_point
->num_params
; ++i
) {
5171 struct vtn_type
*param_type
= b
->entry_point
->func
->type
->params
[i
];
5173 /* consider all pointers to function memory to be parameters passed
5176 bool is_by_val
= param_type
->base_type
== vtn_base_type_pointer
&&
5177 param_type
->storage_class
== SpvStorageClassFunction
;
5179 /* input variable */
5180 nir_variable
*in_var
= rzalloc(b
->nb
.shader
, nir_variable
);
5181 in_var
->data
.mode
= nir_var_shader_in
;
5182 in_var
->data
.read_only
= true;
5183 in_var
->data
.location
= i
;
5186 in_var
->type
= param_type
->deref
->type
;
5188 in_var
->type
= param_type
->type
;
5190 nir_shader_add_variable(b
->nb
.shader
, in_var
);
5191 b
->nb
.shader
->num_inputs
++;
5193 /* we have to copy the entire variable into function memory */
5195 nir_variable
*copy_var
=
5196 nir_local_variable_create(main_entry_point
->impl
, in_var
->type
,
5198 nir_copy_var(&b
->nb
, copy_var
, in_var
);
5200 nir_src_for_ssa(&nir_build_deref_var(&b
->nb
, copy_var
)->dest
.ssa
);
5202 call
->params
[i
] = nir_src_for_ssa(nir_load_var(&b
->nb
, in_var
));
5206 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
5208 return main_entry_point
;
5212 spirv_to_nir(const uint32_t *words
, size_t word_count
,
5213 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
5214 gl_shader_stage stage
, const char *entry_point_name
,
5215 const struct spirv_to_nir_options
*options
,
5216 const nir_shader_compiler_options
*nir_options
)
5219 const uint32_t *word_end
= words
+ word_count
;
5221 struct vtn_builder
*b
= vtn_create_builder(words
, word_count
,
5222 stage
, entry_point_name
,
5228 /* See also _vtn_fail() */
5229 if (setjmp(b
->fail_jump
)) {
5234 /* Skip the SPIR-V header, handled at vtn_create_builder */
5237 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
5239 /* Handle all the preamble instructions */
5240 words
= vtn_foreach_instruction(b
, words
, word_end
,
5241 vtn_handle_preamble_instruction
);
5243 if (b
->entry_point
== NULL
) {
5244 vtn_fail("Entry point not found");
5249 /* Set shader info defaults */
5250 if (stage
== MESA_SHADER_GEOMETRY
)
5251 b
->shader
->info
.gs
.invocations
= 1;
5253 /* Parse rounding mode execution modes. This has to happen earlier than
5254 * other changes in the execution modes since they can affect, for example,
5255 * the result of the floating point constants.
5257 vtn_foreach_execution_mode(b
, b
->entry_point
,
5258 vtn_handle_rounding_mode_in_execution_mode
, NULL
);
5260 b
->specializations
= spec
;
5261 b
->num_specializations
= num_spec
;
5263 /* Handle all variable, type, and constant instructions */
5264 words
= vtn_foreach_instruction(b
, words
, word_end
,
5265 vtn_handle_variable_or_type_instruction
);
5267 /* Parse execution modes */
5268 vtn_foreach_execution_mode(b
, b
->entry_point
,
5269 vtn_handle_execution_mode
, NULL
);
5271 if (b
->workgroup_size_builtin
) {
5272 vtn_assert(b
->workgroup_size_builtin
->type
->type
==
5273 glsl_vector_type(GLSL_TYPE_UINT
, 3));
5275 nir_const_value
*const_size
=
5276 b
->workgroup_size_builtin
->constant
->values
;
5278 b
->shader
->info
.cs
.local_size
[0] = const_size
[0].u32
;
5279 b
->shader
->info
.cs
.local_size
[1] = const_size
[1].u32
;
5280 b
->shader
->info
.cs
.local_size
[2] = const_size
[2].u32
;
5283 /* Set types on all vtn_values */
5284 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
5286 vtn_build_cfg(b
, words
, word_end
);
5288 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5289 b
->entry_point
->func
->referenced
= true;
5294 vtn_foreach_cf_node(node
, &b
->functions
) {
5295 struct vtn_function
*func
= vtn_cf_node_as_function(node
);
5296 if (func
->referenced
&& !func
->emitted
) {
5297 b
->const_table
= _mesa_pointer_hash_table_create(b
);
5299 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
5305 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5306 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
5307 vtn_assert(entry_point
);
5309 /* post process entry_points with input params */
5310 if (entry_point
->num_params
&& b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
5311 entry_point
= vtn_emit_kernel_entry_point_wrapper(b
, entry_point
);
5313 entry_point
->is_entrypoint
= true;
5315 /* When multiple shader stages exist in the same SPIR-V module, we
5316 * generate input and output variables for every stage, in the same
5317 * NIR program. These dead variables can be invalid NIR. For example,
5318 * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
5319 * VS output variables wouldn't be.
5321 * To ensure we have valid NIR, we eliminate any dead inputs and outputs
5322 * right away. In order to do so, we must lower any constant initializers
5323 * on outputs so nir_remove_dead_variables sees that they're written to.
5325 nir_lower_variable_initializers(b
->shader
, nir_var_shader_out
);
5326 nir_remove_dead_variables(b
->shader
,
5327 nir_var_shader_in
| nir_var_shader_out
, NULL
);
5329 /* We sometimes generate bogus derefs that, while never used, give the
5330 * validator a bit of heartburn. Run dead code to get rid of them.
5332 nir_opt_dce(b
->shader
);
5334 /* Unparent the shader from the vtn_builder before we delete the builder */
5335 ralloc_steal(NULL
, b
->shader
);
5337 nir_shader
*shader
= b
->shader
;