2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
35 #include "util/u_math.h"
40 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
41 size_t spirv_offset
, const char *message
)
43 if (b
->options
->debug
.func
) {
44 b
->options
->debug
.func(b
->options
->debug
.private_data
,
45 level
, spirv_offset
, message
);
49 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
50 fprintf(stderr
, "%s\n", message
);
55 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
56 size_t spirv_offset
, const char *fmt
, ...)
62 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
65 vtn_log(b
, level
, spirv_offset
, msg
);
71 vtn_log_err(struct vtn_builder
*b
,
72 enum nir_spirv_debug_level level
, const char *prefix
,
73 const char *file
, unsigned line
,
74 const char *fmt
, va_list args
)
78 msg
= ralloc_strdup(NULL
, prefix
);
81 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
84 ralloc_asprintf_append(&msg
, " ");
86 ralloc_vasprintf_append(&msg
, fmt
, args
);
88 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
92 ralloc_asprintf_append(&msg
,
93 "\n in SPIR-V source file %s, line %d, col %d",
94 b
->file
, b
->line
, b
->col
);
97 vtn_log(b
, level
, b
->spirv_offset
, msg
);
103 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
108 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
109 path
, prefix
, idx
++);
110 if (len
< 0 || len
>= sizeof(filename
))
113 FILE *f
= fopen(filename
, "w");
117 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
120 vtn_info("SPIR-V shader dumped to %s", filename
);
124 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
125 const char *fmt
, ...)
130 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
131 file
, line
, fmt
, args
);
136 _vtn_err(struct vtn_builder
*b
, const char *file
, unsigned line
,
137 const char *fmt
, ...)
142 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V ERROR:\n",
143 file
, line
, fmt
, args
);
148 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
149 const char *fmt
, ...)
154 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
155 file
, line
, fmt
, args
);
158 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
160 vtn_dump_shader(b
, dump_path
, "fail");
162 longjmp(b
->fail_jump
, 1);
165 struct spec_constant_value
{
173 static struct vtn_ssa_value
*
174 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
176 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
179 if (glsl_type_is_vector_or_scalar(type
)) {
180 unsigned num_components
= glsl_get_vector_elements(val
->type
);
181 unsigned bit_size
= glsl_get_bit_size(val
->type
);
182 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
184 unsigned elems
= glsl_get_length(val
->type
);
185 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
186 if (glsl_type_is_matrix(type
)) {
187 const struct glsl_type
*elem_type
=
188 glsl_vector_type(glsl_get_base_type(type
),
189 glsl_get_vector_elements(type
));
191 for (unsigned i
= 0; i
< elems
; i
++)
192 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
193 } else if (glsl_type_is_array(type
)) {
194 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
195 for (unsigned i
= 0; i
< elems
; i
++)
196 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
198 for (unsigned i
= 0; i
< elems
; i
++) {
199 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
200 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
208 static struct vtn_ssa_value
*
209 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
210 const struct glsl_type
*type
)
212 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
217 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
220 switch (glsl_get_base_type(type
)) {
223 case GLSL_TYPE_INT16
:
224 case GLSL_TYPE_UINT16
:
225 case GLSL_TYPE_UINT8
:
227 case GLSL_TYPE_INT64
:
228 case GLSL_TYPE_UINT64
:
230 case GLSL_TYPE_FLOAT
:
231 case GLSL_TYPE_FLOAT16
:
232 case GLSL_TYPE_DOUBLE
: {
233 int bit_size
= glsl_get_bit_size(type
);
234 if (glsl_type_is_vector_or_scalar(type
)) {
235 unsigned num_components
= glsl_get_vector_elements(val
->type
);
236 nir_load_const_instr
*load
=
237 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
239 memcpy(load
->value
, constant
->values
,
240 sizeof(nir_const_value
) * load
->def
.num_components
);
242 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
243 val
->def
= &load
->def
;
245 assert(glsl_type_is_matrix(type
));
246 unsigned columns
= glsl_get_matrix_columns(val
->type
);
247 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
248 const struct glsl_type
*column_type
= glsl_get_column_type(val
->type
);
249 for (unsigned i
= 0; i
< columns
; i
++)
250 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
256 case GLSL_TYPE_ARRAY
: {
257 unsigned elems
= glsl_get_length(val
->type
);
258 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
259 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
260 for (unsigned i
= 0; i
< elems
; i
++)
261 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
266 case GLSL_TYPE_STRUCT
: {
267 unsigned elems
= glsl_get_length(val
->type
);
268 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
269 for (unsigned i
= 0; i
< elems
; i
++) {
270 const struct glsl_type
*elem_type
=
271 glsl_get_struct_field(val
->type
, i
);
272 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
279 vtn_fail("bad constant type");
285 struct vtn_ssa_value
*
286 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
288 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
289 switch (val
->value_type
) {
290 case vtn_value_type_undef
:
291 return vtn_undef_ssa_value(b
, val
->type
->type
);
293 case vtn_value_type_constant
:
294 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
296 case vtn_value_type_ssa
:
299 case vtn_value_type_pointer
:
300 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
301 struct vtn_ssa_value
*ssa
=
302 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
303 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
307 vtn_fail("Invalid type for an SSA value");
312 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
313 unsigned word_count
, unsigned *words_used
)
315 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
317 /* Ammount of space taken by the string (including the null) */
318 unsigned len
= strlen(dup
) + 1;
319 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
325 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
326 const uint32_t *end
, vtn_instruction_handler handler
)
332 const uint32_t *w
= start
;
334 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
335 unsigned count
= w
[0] >> SpvWordCountShift
;
336 vtn_assert(count
>= 1 && w
+ count
<= end
);
338 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
342 break; /* Do nothing */
345 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
357 if (!handler(b
, opcode
, w
, count
))
375 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
376 const uint32_t *w
, unsigned count
)
378 const char *ext
= (const char *)&w
[2];
380 case SpvOpExtInstImport
: {
381 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
382 if (strcmp(ext
, "GLSL.std.450") == 0) {
383 val
->ext_handler
= vtn_handle_glsl450_instruction
;
384 } else if ((strcmp(ext
, "SPV_AMD_gcn_shader") == 0)
385 && (b
->options
&& b
->options
->caps
.amd_gcn_shader
)) {
386 val
->ext_handler
= vtn_handle_amd_gcn_shader_instruction
;
387 } else if ((strcmp(ext
, "SPV_AMD_shader_ballot") == 0)
388 && (b
->options
&& b
->options
->caps
.amd_shader_ballot
)) {
389 val
->ext_handler
= vtn_handle_amd_shader_ballot_instruction
;
390 } else if ((strcmp(ext
, "SPV_AMD_shader_trinary_minmax") == 0)
391 && (b
->options
&& b
->options
->caps
.amd_trinary_minmax
)) {
392 val
->ext_handler
= vtn_handle_amd_shader_trinary_minmax_instruction
;
393 } else if (strcmp(ext
, "OpenCL.std") == 0) {
394 val
->ext_handler
= vtn_handle_opencl_instruction
;
396 vtn_fail("Unsupported extension: %s", ext
);
402 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
403 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
409 vtn_fail_with_opcode("Unhandled opcode", opcode
);
414 _foreach_decoration_helper(struct vtn_builder
*b
,
415 struct vtn_value
*base_value
,
417 struct vtn_value
*value
,
418 vtn_decoration_foreach_cb cb
, void *data
)
420 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
422 if (dec
->scope
== VTN_DEC_DECORATION
) {
423 member
= parent_member
;
424 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
425 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
426 value
->type
->base_type
!= vtn_base_type_struct
,
427 "OpMemberDecorate and OpGroupMemberDecorate are only "
428 "allowed on OpTypeStruct");
429 /* This means we haven't recursed yet */
430 assert(value
== base_value
);
432 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
434 vtn_fail_if(member
>= base_value
->type
->length
,
435 "OpMemberDecorate specifies member %d but the "
436 "OpTypeStruct has only %u members",
437 member
, base_value
->type
->length
);
439 /* Not a decoration */
440 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
445 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
446 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
449 cb(b
, base_value
, member
, dec
, data
);
454 /** Iterates (recursively if needed) over all of the decorations on a value
456 * This function iterates over all of the decorations applied to a given
457 * value. If it encounters a decoration group, it recurses into the group
458 * and iterates over all of those decorations as well.
461 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
462 vtn_decoration_foreach_cb cb
, void *data
)
464 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
468 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
469 vtn_execution_mode_foreach_cb cb
, void *data
)
471 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
472 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
475 assert(dec
->group
== NULL
);
476 cb(b
, value
, dec
, data
);
481 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
482 const uint32_t *w
, unsigned count
)
484 const uint32_t *w_end
= w
+ count
;
485 const uint32_t target
= w
[1];
489 case SpvOpDecorationGroup
:
490 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
494 case SpvOpDecorateId
:
495 case SpvOpMemberDecorate
:
496 case SpvOpDecorateString
:
497 case SpvOpMemberDecorateString
:
498 case SpvOpExecutionMode
:
499 case SpvOpExecutionModeId
: {
500 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
502 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
505 case SpvOpDecorateId
:
506 case SpvOpDecorateString
:
507 dec
->scope
= VTN_DEC_DECORATION
;
509 case SpvOpMemberDecorate
:
510 case SpvOpMemberDecorateString
:
511 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
512 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
513 "Member argument of OpMemberDecorate too large");
515 case SpvOpExecutionMode
:
516 case SpvOpExecutionModeId
:
517 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
520 unreachable("Invalid decoration opcode");
522 dec
->decoration
= *(w
++);
525 /* Link into the list */
526 dec
->next
= val
->decoration
;
527 val
->decoration
= dec
;
531 case SpvOpGroupMemberDecorate
:
532 case SpvOpGroupDecorate
: {
533 struct vtn_value
*group
=
534 vtn_value(b
, target
, vtn_value_type_decoration_group
);
536 for (; w
< w_end
; w
++) {
537 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
538 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
541 if (opcode
== SpvOpGroupDecorate
) {
542 dec
->scope
= VTN_DEC_DECORATION
;
544 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
545 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
546 "Member argument of OpGroupMemberDecorate too large");
549 /* Link into the list */
550 dec
->next
= val
->decoration
;
551 val
->decoration
= dec
;
557 unreachable("Unhandled opcode");
561 struct member_decoration_ctx
{
563 struct glsl_struct_field
*fields
;
564 struct vtn_type
*type
;
568 * Returns true if the given type contains a struct decorated Block or
572 vtn_type_contains_block(struct vtn_builder
*b
, struct vtn_type
*type
)
574 switch (type
->base_type
) {
575 case vtn_base_type_array
:
576 return vtn_type_contains_block(b
, type
->array_element
);
577 case vtn_base_type_struct
:
578 if (type
->block
|| type
->buffer_block
)
580 for (unsigned i
= 0; i
< type
->length
; i
++) {
581 if (vtn_type_contains_block(b
, type
->members
[i
]))
590 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
591 * OpStore, or OpCopyMemory between them without breaking anything.
592 * Technically, the SPIR-V rules require the exact same type ID but this lets
593 * us internally be a bit looser.
596 vtn_types_compatible(struct vtn_builder
*b
,
597 struct vtn_type
*t1
, struct vtn_type
*t2
)
599 if (t1
->id
== t2
->id
)
602 if (t1
->base_type
!= t2
->base_type
)
605 switch (t1
->base_type
) {
606 case vtn_base_type_void
:
607 case vtn_base_type_scalar
:
608 case vtn_base_type_vector
:
609 case vtn_base_type_matrix
:
610 case vtn_base_type_image
:
611 case vtn_base_type_sampler
:
612 case vtn_base_type_sampled_image
:
613 return t1
->type
== t2
->type
;
615 case vtn_base_type_array
:
616 return t1
->length
== t2
->length
&&
617 vtn_types_compatible(b
, t1
->array_element
, t2
->array_element
);
619 case vtn_base_type_pointer
:
620 return vtn_types_compatible(b
, t1
->deref
, t2
->deref
);
622 case vtn_base_type_struct
:
623 if (t1
->length
!= t2
->length
)
626 for (unsigned i
= 0; i
< t1
->length
; i
++) {
627 if (!vtn_types_compatible(b
, t1
->members
[i
], t2
->members
[i
]))
632 case vtn_base_type_function
:
633 /* This case shouldn't get hit since you can't copy around function
634 * types. Just require them to be identical.
639 vtn_fail("Invalid base type");
643 vtn_type_without_array(struct vtn_type
*type
)
645 while (type
->base_type
== vtn_base_type_array
)
646 type
= type
->array_element
;
650 /* does a shallow copy of a vtn_type */
652 static struct vtn_type
*
653 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
655 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
658 switch (src
->base_type
) {
659 case vtn_base_type_void
:
660 case vtn_base_type_scalar
:
661 case vtn_base_type_vector
:
662 case vtn_base_type_matrix
:
663 case vtn_base_type_array
:
664 case vtn_base_type_pointer
:
665 case vtn_base_type_image
:
666 case vtn_base_type_sampler
:
667 case vtn_base_type_sampled_image
:
668 /* Nothing more to do */
671 case vtn_base_type_struct
:
672 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
673 memcpy(dest
->members
, src
->members
,
674 src
->length
* sizeof(src
->members
[0]));
676 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
677 memcpy(dest
->offsets
, src
->offsets
,
678 src
->length
* sizeof(src
->offsets
[0]));
681 case vtn_base_type_function
:
682 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
683 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
690 static struct vtn_type
*
691 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
693 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
694 type
= type
->members
[member
];
696 /* We may have an array of matrices.... Oh, joy! */
697 while (glsl_type_is_array(type
->type
)) {
698 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
699 type
= type
->array_element
;
702 vtn_assert(glsl_type_is_matrix(type
->type
));
708 vtn_handle_access_qualifier(struct vtn_builder
*b
, struct vtn_type
*type
,
709 int member
, enum gl_access_qualifier access
)
711 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
712 type
= type
->members
[member
];
714 type
->access
|= access
;
718 array_stride_decoration_cb(struct vtn_builder
*b
,
719 struct vtn_value
*val
, int member
,
720 const struct vtn_decoration
*dec
, void *void_ctx
)
722 struct vtn_type
*type
= val
->type
;
724 if (dec
->decoration
== SpvDecorationArrayStride
) {
725 if (vtn_type_contains_block(b
, type
)) {
726 vtn_warn("The ArrayStride decoration cannot be applied to an array "
727 "type which contains a structure type decorated Block "
729 /* Ignore the decoration */
731 vtn_fail_if(dec
->operands
[0] == 0, "ArrayStride must be non-zero");
732 type
->stride
= dec
->operands
[0];
738 struct_member_decoration_cb(struct vtn_builder
*b
,
739 struct vtn_value
*val
, int member
,
740 const struct vtn_decoration
*dec
, void *void_ctx
)
742 struct member_decoration_ctx
*ctx
= void_ctx
;
747 assert(member
< ctx
->num_fields
);
749 switch (dec
->decoration
) {
750 case SpvDecorationRelaxedPrecision
:
751 case SpvDecorationUniform
:
752 case SpvDecorationUniformId
:
753 break; /* FIXME: Do nothing with this for now. */
754 case SpvDecorationNonWritable
:
755 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_WRITEABLE
);
757 case SpvDecorationNonReadable
:
758 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_READABLE
);
760 case SpvDecorationVolatile
:
761 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_VOLATILE
);
763 case SpvDecorationCoherent
:
764 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_COHERENT
);
766 case SpvDecorationNoPerspective
:
767 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
769 case SpvDecorationFlat
:
770 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
772 case SpvDecorationCentroid
:
773 ctx
->fields
[member
].centroid
= true;
775 case SpvDecorationSample
:
776 ctx
->fields
[member
].sample
= true;
778 case SpvDecorationStream
:
779 /* Vulkan only allows one GS stream */
780 vtn_assert(dec
->operands
[0] == 0);
782 case SpvDecorationLocation
:
783 ctx
->fields
[member
].location
= dec
->operands
[0];
785 case SpvDecorationComponent
:
786 break; /* FIXME: What should we do with these? */
787 case SpvDecorationBuiltIn
:
788 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
789 ctx
->type
->members
[member
]->is_builtin
= true;
790 ctx
->type
->members
[member
]->builtin
= dec
->operands
[0];
791 ctx
->type
->builtin_block
= true;
793 case SpvDecorationOffset
:
794 ctx
->type
->offsets
[member
] = dec
->operands
[0];
795 ctx
->fields
[member
].offset
= dec
->operands
[0];
797 case SpvDecorationMatrixStride
:
798 /* Handled as a second pass */
800 case SpvDecorationColMajor
:
801 break; /* Nothing to do here. Column-major is the default. */
802 case SpvDecorationRowMajor
:
803 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
806 case SpvDecorationPatch
:
809 case SpvDecorationSpecId
:
810 case SpvDecorationBlock
:
811 case SpvDecorationBufferBlock
:
812 case SpvDecorationArrayStride
:
813 case SpvDecorationGLSLShared
:
814 case SpvDecorationGLSLPacked
:
815 case SpvDecorationInvariant
:
816 case SpvDecorationRestrict
:
817 case SpvDecorationAliased
:
818 case SpvDecorationConstant
:
819 case SpvDecorationIndex
:
820 case SpvDecorationBinding
:
821 case SpvDecorationDescriptorSet
:
822 case SpvDecorationLinkageAttributes
:
823 case SpvDecorationNoContraction
:
824 case SpvDecorationInputAttachmentIndex
:
825 vtn_warn("Decoration not allowed on struct members: %s",
826 spirv_decoration_to_string(dec
->decoration
));
829 case SpvDecorationXfbBuffer
:
830 case SpvDecorationXfbStride
:
831 vtn_warn("Vulkan does not have transform feedback");
834 case SpvDecorationCPacked
:
835 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
836 vtn_warn("Decoration only allowed for CL-style kernels: %s",
837 spirv_decoration_to_string(dec
->decoration
));
839 ctx
->type
->packed
= true;
842 case SpvDecorationSaturatedConversion
:
843 case SpvDecorationFuncParamAttr
:
844 case SpvDecorationFPRoundingMode
:
845 case SpvDecorationFPFastMathMode
:
846 case SpvDecorationAlignment
:
847 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
848 vtn_warn("Decoration only allowed for CL-style kernels: %s",
849 spirv_decoration_to_string(dec
->decoration
));
853 case SpvDecorationUserSemantic
:
854 /* User semantic decorations can safely be ignored by the driver. */
858 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
862 /** Chases the array type all the way down to the tail and rewrites the
863 * glsl_types to be based off the tail's glsl_type.
866 vtn_array_type_rewrite_glsl_type(struct vtn_type
*type
)
868 if (type
->base_type
!= vtn_base_type_array
)
871 vtn_array_type_rewrite_glsl_type(type
->array_element
);
873 type
->type
= glsl_array_type(type
->array_element
->type
,
874 type
->length
, type
->stride
);
877 /* Matrix strides are handled as a separate pass because we need to know
878 * whether the matrix is row-major or not first.
881 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
882 struct vtn_value
*val
, int member
,
883 const struct vtn_decoration
*dec
,
886 if (dec
->decoration
!= SpvDecorationMatrixStride
)
889 vtn_fail_if(member
< 0,
890 "The MatrixStride decoration is only allowed on members "
892 vtn_fail_if(dec
->operands
[0] == 0, "MatrixStride must be non-zero");
894 struct member_decoration_ctx
*ctx
= void_ctx
;
896 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
897 if (mat_type
->row_major
) {
898 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
899 mat_type
->stride
= mat_type
->array_element
->stride
;
900 mat_type
->array_element
->stride
= dec
->operands
[0];
902 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
903 dec
->operands
[0], true);
904 mat_type
->array_element
->type
= glsl_get_column_type(mat_type
->type
);
906 vtn_assert(mat_type
->array_element
->stride
> 0);
907 mat_type
->stride
= dec
->operands
[0];
909 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
910 dec
->operands
[0], false);
913 /* Now that we've replaced the glsl_type with a properly strided matrix
914 * type, rewrite the member type so that it's an array of the proper kind
917 vtn_array_type_rewrite_glsl_type(ctx
->type
->members
[member
]);
918 ctx
->fields
[member
].type
= ctx
->type
->members
[member
]->type
;
922 struct_block_decoration_cb(struct vtn_builder
*b
,
923 struct vtn_value
*val
, int member
,
924 const struct vtn_decoration
*dec
, void *ctx
)
929 struct vtn_type
*type
= val
->type
;
930 if (dec
->decoration
== SpvDecorationBlock
)
932 else if (dec
->decoration
== SpvDecorationBufferBlock
)
933 type
->buffer_block
= true;
937 type_decoration_cb(struct vtn_builder
*b
,
938 struct vtn_value
*val
, int member
,
939 const struct vtn_decoration
*dec
, void *ctx
)
941 struct vtn_type
*type
= val
->type
;
944 /* This should have been handled by OpTypeStruct */
945 assert(val
->type
->base_type
== vtn_base_type_struct
);
946 assert(member
>= 0 && member
< val
->type
->length
);
950 switch (dec
->decoration
) {
951 case SpvDecorationArrayStride
:
952 vtn_assert(type
->base_type
== vtn_base_type_array
||
953 type
->base_type
== vtn_base_type_pointer
);
955 case SpvDecorationBlock
:
956 vtn_assert(type
->base_type
== vtn_base_type_struct
);
957 vtn_assert(type
->block
);
959 case SpvDecorationBufferBlock
:
960 vtn_assert(type
->base_type
== vtn_base_type_struct
);
961 vtn_assert(type
->buffer_block
);
963 case SpvDecorationGLSLShared
:
964 case SpvDecorationGLSLPacked
:
965 /* Ignore these, since we get explicit offsets anyways */
968 case SpvDecorationRowMajor
:
969 case SpvDecorationColMajor
:
970 case SpvDecorationMatrixStride
:
971 case SpvDecorationBuiltIn
:
972 case SpvDecorationNoPerspective
:
973 case SpvDecorationFlat
:
974 case SpvDecorationPatch
:
975 case SpvDecorationCentroid
:
976 case SpvDecorationSample
:
977 case SpvDecorationVolatile
:
978 case SpvDecorationCoherent
:
979 case SpvDecorationNonWritable
:
980 case SpvDecorationNonReadable
:
981 case SpvDecorationUniform
:
982 case SpvDecorationUniformId
:
983 case SpvDecorationLocation
:
984 case SpvDecorationComponent
:
985 case SpvDecorationOffset
:
986 case SpvDecorationXfbBuffer
:
987 case SpvDecorationXfbStride
:
988 case SpvDecorationUserSemantic
:
989 vtn_warn("Decoration only allowed for struct members: %s",
990 spirv_decoration_to_string(dec
->decoration
));
993 case SpvDecorationStream
:
994 /* We don't need to do anything here, as stream is filled up when
995 * aplying the decoration to a variable, just check that if it is not a
996 * struct member, it should be a struct.
998 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1001 case SpvDecorationRelaxedPrecision
:
1002 case SpvDecorationSpecId
:
1003 case SpvDecorationInvariant
:
1004 case SpvDecorationRestrict
:
1005 case SpvDecorationAliased
:
1006 case SpvDecorationConstant
:
1007 case SpvDecorationIndex
:
1008 case SpvDecorationBinding
:
1009 case SpvDecorationDescriptorSet
:
1010 case SpvDecorationLinkageAttributes
:
1011 case SpvDecorationNoContraction
:
1012 case SpvDecorationInputAttachmentIndex
:
1013 vtn_warn("Decoration not allowed on types: %s",
1014 spirv_decoration_to_string(dec
->decoration
));
1017 case SpvDecorationCPacked
:
1018 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
1019 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1020 spirv_decoration_to_string(dec
->decoration
));
1022 type
->packed
= true;
1025 case SpvDecorationSaturatedConversion
:
1026 case SpvDecorationFuncParamAttr
:
1027 case SpvDecorationFPRoundingMode
:
1028 case SpvDecorationFPFastMathMode
:
1029 case SpvDecorationAlignment
:
1030 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1031 spirv_decoration_to_string(dec
->decoration
));
1035 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1040 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
1043 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
1044 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
1045 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
1046 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
1047 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
1048 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
1049 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
1050 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
1051 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
1052 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
1053 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
1054 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
1055 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
1056 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
1057 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
1058 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
1059 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
1060 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
1061 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
1062 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
1063 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
1064 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
1065 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
1066 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
1067 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
1068 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
1069 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
1070 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
1071 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
1072 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
1073 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
1074 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
1075 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
1076 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
1077 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
1078 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
1079 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
1080 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
1081 case SpvImageFormatR16ui
: return 0x8234; /* GL_R16UI */
1082 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
1084 vtn_fail("Invalid image format: %s (%u)",
1085 spirv_imageformat_to_string(format
), format
);
1090 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
1091 const uint32_t *w
, unsigned count
)
1093 struct vtn_value
*val
= NULL
;
1095 /* In order to properly handle forward declarations, we have to defer
1096 * allocation for pointer types.
1098 if (opcode
!= SpvOpTypePointer
&& opcode
!= SpvOpTypeForwardPointer
) {
1099 val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
1100 vtn_fail_if(val
->type
!= NULL
,
1101 "Only pointers can have forward declarations");
1102 val
->type
= rzalloc(b
, struct vtn_type
);
1103 val
->type
->id
= w
[1];
1108 val
->type
->base_type
= vtn_base_type_void
;
1109 val
->type
->type
= glsl_void_type();
1112 val
->type
->base_type
= vtn_base_type_scalar
;
1113 val
->type
->type
= glsl_bool_type();
1114 val
->type
->length
= 1;
1116 case SpvOpTypeInt
: {
1117 int bit_size
= w
[2];
1118 const bool signedness
= w
[3];
1119 val
->type
->base_type
= vtn_base_type_scalar
;
1122 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
1125 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
1128 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
1131 val
->type
->type
= (signedness
? glsl_int8_t_type() : glsl_uint8_t_type());
1134 vtn_fail("Invalid int bit size: %u", bit_size
);
1136 val
->type
->length
= 1;
1140 case SpvOpTypeFloat
: {
1141 int bit_size
= w
[2];
1142 val
->type
->base_type
= vtn_base_type_scalar
;
1145 val
->type
->type
= glsl_float16_t_type();
1148 val
->type
->type
= glsl_float_type();
1151 val
->type
->type
= glsl_double_type();
1154 vtn_fail("Invalid float bit size: %u", bit_size
);
1156 val
->type
->length
= 1;
1160 case SpvOpTypeVector
: {
1161 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1162 unsigned elems
= w
[3];
1164 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
1165 "Base type for OpTypeVector must be a scalar");
1166 vtn_fail_if((elems
< 2 || elems
> 4) && (elems
!= 8) && (elems
!= 16),
1167 "Invalid component count for OpTypeVector");
1169 val
->type
->base_type
= vtn_base_type_vector
;
1170 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
1171 val
->type
->length
= elems
;
1172 val
->type
->stride
= glsl_type_is_boolean(val
->type
->type
)
1173 ? 4 : glsl_get_bit_size(base
->type
) / 8;
1174 val
->type
->array_element
= base
;
1178 case SpvOpTypeMatrix
: {
1179 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1180 unsigned columns
= w
[3];
1182 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1183 "Base type for OpTypeMatrix must be a vector");
1184 vtn_fail_if(columns
< 2 || columns
> 4,
1185 "Invalid column count for OpTypeMatrix");
1187 val
->type
->base_type
= vtn_base_type_matrix
;
1188 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1189 glsl_get_vector_elements(base
->type
),
1191 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1192 "Unsupported base type for OpTypeMatrix");
1193 assert(!glsl_type_is_error(val
->type
->type
));
1194 val
->type
->length
= columns
;
1195 val
->type
->array_element
= base
;
1196 val
->type
->row_major
= false;
1197 val
->type
->stride
= 0;
1201 case SpvOpTypeRuntimeArray
:
1202 case SpvOpTypeArray
: {
1203 struct vtn_type
*array_element
=
1204 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1206 if (opcode
== SpvOpTypeRuntimeArray
) {
1207 /* A length of 0 is used to denote unsized arrays */
1208 val
->type
->length
= 0;
1210 val
->type
->length
= vtn_constant_uint(b
, w
[3]);
1213 val
->type
->base_type
= vtn_base_type_array
;
1214 val
->type
->array_element
= array_element
;
1215 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1216 val
->type
->stride
= glsl_get_cl_size(array_element
->type
);
1218 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1219 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
,
1224 case SpvOpTypeStruct
: {
1225 unsigned num_fields
= count
- 2;
1226 val
->type
->base_type
= vtn_base_type_struct
;
1227 val
->type
->length
= num_fields
;
1228 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1229 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1230 val
->type
->packed
= false;
1232 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1233 for (unsigned i
= 0; i
< num_fields
; i
++) {
1234 val
->type
->members
[i
] =
1235 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
1236 fields
[i
] = (struct glsl_struct_field
) {
1237 .type
= val
->type
->members
[i
]->type
,
1238 .name
= ralloc_asprintf(b
, "field%d", i
),
1244 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1245 unsigned offset
= 0;
1246 for (unsigned i
= 0; i
< num_fields
; i
++) {
1247 offset
= align(offset
, glsl_get_cl_alignment(fields
[i
].type
));
1248 fields
[i
].offset
= offset
;
1249 offset
+= glsl_get_cl_size(fields
[i
].type
);
1253 struct member_decoration_ctx ctx
= {
1254 .num_fields
= num_fields
,
1259 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1260 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1262 vtn_foreach_decoration(b
, val
, struct_block_decoration_cb
, NULL
);
1264 const char *name
= val
->name
;
1266 if (val
->type
->block
|| val
->type
->buffer_block
) {
1267 /* Packing will be ignored since types coming from SPIR-V are
1268 * explicitly laid out.
1270 val
->type
->type
= glsl_interface_type(fields
, num_fields
,
1271 /* packing */ 0, false,
1272 name
? name
: "block");
1274 val
->type
->type
= glsl_struct_type(fields
, num_fields
,
1275 name
? name
: "struct", false);
1280 case SpvOpTypeFunction
: {
1281 val
->type
->base_type
= vtn_base_type_function
;
1282 val
->type
->type
= NULL
;
1284 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1286 const unsigned num_params
= count
- 3;
1287 val
->type
->length
= num_params
;
1288 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1289 for (unsigned i
= 0; i
< count
- 3; i
++) {
1290 val
->type
->params
[i
] =
1291 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
1296 case SpvOpTypePointer
:
1297 case SpvOpTypeForwardPointer
: {
1298 /* We can't blindly push the value because it might be a forward
1301 val
= vtn_untyped_value(b
, w
[1]);
1303 SpvStorageClass storage_class
= w
[2];
1305 if (val
->value_type
== vtn_value_type_invalid
) {
1306 val
->value_type
= vtn_value_type_type
;
1307 val
->type
= rzalloc(b
, struct vtn_type
);
1308 val
->type
->id
= w
[1];
1309 val
->type
->base_type
= vtn_base_type_pointer
;
1310 val
->type
->storage_class
= storage_class
;
1312 /* These can actually be stored to nir_variables and used as SSA
1313 * values so they need a real glsl_type.
1315 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1316 b
, storage_class
, NULL
, NULL
);
1317 val
->type
->type
= nir_address_format_to_glsl_type(
1318 vtn_mode_to_address_format(b
, mode
));
1320 vtn_fail_if(val
->type
->storage_class
!= storage_class
,
1321 "The storage classes of an OpTypePointer and any "
1322 "OpTypeForwardPointers that provide forward "
1323 "declarations of it must match.");
1326 if (opcode
== SpvOpTypePointer
) {
1327 vtn_fail_if(val
->type
->deref
!= NULL
,
1328 "While OpTypeForwardPointer can be used to provide a "
1329 "forward declaration of a pointer, OpTypePointer can "
1330 "only be used once for a given id.");
1332 val
->type
->deref
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
1334 /* Only certain storage classes use ArrayStride. The others (in
1335 * particular Workgroup) are expected to be laid out by the driver.
1337 switch (storage_class
) {
1338 case SpvStorageClassUniform
:
1339 case SpvStorageClassPushConstant
:
1340 case SpvStorageClassStorageBuffer
:
1341 case SpvStorageClassPhysicalStorageBufferEXT
:
1342 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1345 /* Nothing to do. */
1349 if (b
->physical_ptrs
) {
1350 switch (storage_class
) {
1351 case SpvStorageClassFunction
:
1352 case SpvStorageClassWorkgroup
:
1353 case SpvStorageClassCrossWorkgroup
:
1354 val
->type
->stride
= align(glsl_get_cl_size(val
->type
->deref
->type
),
1355 glsl_get_cl_alignment(val
->type
->deref
->type
));
1365 case SpvOpTypeImage
: {
1366 val
->type
->base_type
= vtn_base_type_image
;
1368 const struct vtn_type
*sampled_type
=
1369 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1371 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1372 glsl_get_bit_size(sampled_type
->type
) != 32,
1373 "Sampled type of OpTypeImage must be a 32-bit scalar");
1375 enum glsl_sampler_dim dim
;
1376 switch ((SpvDim
)w
[3]) {
1377 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1378 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1379 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1380 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1381 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1382 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1383 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1385 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1386 spirv_dim_to_string((SpvDim
)w
[3]), w
[3]);
1389 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1390 * The “Depth” operand of OpTypeImage is ignored.
1392 bool is_array
= w
[5];
1393 bool multisampled
= w
[6];
1394 unsigned sampled
= w
[7];
1395 SpvImageFormat format
= w
[8];
1398 val
->type
->access_qualifier
= w
[9];
1400 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1403 if (dim
== GLSL_SAMPLER_DIM_2D
)
1404 dim
= GLSL_SAMPLER_DIM_MS
;
1405 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1406 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1408 vtn_fail("Unsupported multisampled image type");
1411 val
->type
->image_format
= translate_image_format(b
, format
);
1413 enum glsl_base_type sampled_base_type
=
1414 glsl_get_base_type(sampled_type
->type
);
1416 val
->type
->sampled
= true;
1417 val
->type
->type
= glsl_sampler_type(dim
, false, is_array
,
1419 } else if (sampled
== 2) {
1420 val
->type
->sampled
= false;
1421 val
->type
->type
= glsl_image_type(dim
, is_array
, sampled_base_type
);
1423 vtn_fail("We need to know if the image will be sampled");
1428 case SpvOpTypeSampledImage
:
1429 val
->type
->base_type
= vtn_base_type_sampled_image
;
1430 val
->type
->image
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1431 val
->type
->type
= val
->type
->image
->type
;
1434 case SpvOpTypeSampler
:
1435 /* The actual sampler type here doesn't really matter. It gets
1436 * thrown away the moment you combine it with an image. What really
1437 * matters is that it's a sampler type as opposed to an integer type
1438 * so the backend knows what to do.
1440 val
->type
->base_type
= vtn_base_type_sampler
;
1441 val
->type
->type
= glsl_bare_sampler_type();
1444 case SpvOpTypeOpaque
:
1445 case SpvOpTypeEvent
:
1446 case SpvOpTypeDeviceEvent
:
1447 case SpvOpTypeReserveId
:
1448 case SpvOpTypeQueue
:
1451 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1454 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1456 if (val
->type
->base_type
== vtn_base_type_struct
&&
1457 (val
->type
->block
|| val
->type
->buffer_block
)) {
1458 for (unsigned i
= 0; i
< val
->type
->length
; i
++) {
1459 vtn_fail_if(vtn_type_contains_block(b
, val
->type
->members
[i
]),
1460 "Block and BufferBlock decorations cannot decorate a "
1461 "structure type that is nested at any level inside "
1462 "another structure type decorated with Block or "
1468 static nir_constant
*
1469 vtn_null_constant(struct vtn_builder
*b
, struct vtn_type
*type
)
1471 nir_constant
*c
= rzalloc(b
, nir_constant
);
1473 switch (type
->base_type
) {
1474 case vtn_base_type_scalar
:
1475 case vtn_base_type_vector
:
1476 /* Nothing to do here. It's already initialized to zero */
1479 case vtn_base_type_pointer
: {
1480 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1481 b
, type
->storage_class
, type
->deref
, NULL
);
1482 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
1484 const nir_const_value
*null_value
= nir_address_format_null_value(addr_format
);
1485 memcpy(c
->values
, null_value
,
1486 sizeof(nir_const_value
) * nir_address_format_num_components(addr_format
));
1490 case vtn_base_type_void
:
1491 case vtn_base_type_image
:
1492 case vtn_base_type_sampler
:
1493 case vtn_base_type_sampled_image
:
1494 case vtn_base_type_function
:
1495 /* For those we have to return something but it doesn't matter what. */
1498 case vtn_base_type_matrix
:
1499 case vtn_base_type_array
:
1500 vtn_assert(type
->length
> 0);
1501 c
->num_elements
= type
->length
;
1502 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1504 c
->elements
[0] = vtn_null_constant(b
, type
->array_element
);
1505 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1506 c
->elements
[i
] = c
->elements
[0];
1509 case vtn_base_type_struct
:
1510 c
->num_elements
= type
->length
;
1511 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1512 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
1513 c
->elements
[i
] = vtn_null_constant(b
, type
->members
[i
]);
1517 vtn_fail("Invalid type for null constant");
1524 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
1525 int member
, const struct vtn_decoration
*dec
,
1528 vtn_assert(member
== -1);
1529 if (dec
->decoration
!= SpvDecorationSpecId
)
1532 struct spec_constant_value
*const_value
= data
;
1534 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1535 if (b
->specializations
[i
].id
== dec
->operands
[0]) {
1536 if (const_value
->is_double
)
1537 const_value
->data64
= b
->specializations
[i
].data64
;
1539 const_value
->data32
= b
->specializations
[i
].data32
;
1546 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
1547 uint32_t const_value
)
1549 struct spec_constant_value data
;
1550 data
.is_double
= false;
1551 data
.data32
= const_value
;
1552 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1557 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
1558 uint64_t const_value
)
1560 struct spec_constant_value data
;
1561 data
.is_double
= true;
1562 data
.data64
= const_value
;
1563 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1568 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1569 struct vtn_value
*val
,
1571 const struct vtn_decoration
*dec
,
1574 vtn_assert(member
== -1);
1575 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1576 dec
->operands
[0] != SpvBuiltInWorkgroupSize
)
1579 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1580 b
->workgroup_size_builtin
= val
;
1584 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1585 const uint32_t *w
, unsigned count
)
1587 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1588 val
->constant
= rzalloc(b
, nir_constant
);
1590 case SpvOpConstantTrue
:
1591 case SpvOpConstantFalse
:
1592 case SpvOpSpecConstantTrue
:
1593 case SpvOpSpecConstantFalse
: {
1594 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1595 "Result type of %s must be OpTypeBool",
1596 spirv_op_to_string(opcode
));
1598 uint32_t int_val
= (opcode
== SpvOpConstantTrue
||
1599 opcode
== SpvOpSpecConstantTrue
);
1601 if (opcode
== SpvOpSpecConstantTrue
||
1602 opcode
== SpvOpSpecConstantFalse
)
1603 int_val
= get_specialization(b
, val
, int_val
);
1605 val
->constant
->values
[0].b
= int_val
!= 0;
1609 case SpvOpConstant
: {
1610 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1611 "Result type of %s must be a scalar",
1612 spirv_op_to_string(opcode
));
1613 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1616 val
->constant
->values
[0].u64
= vtn_u64_literal(&w
[3]);
1619 val
->constant
->values
[0].u32
= w
[3];
1622 val
->constant
->values
[0].u16
= w
[3];
1625 val
->constant
->values
[0].u8
= w
[3];
1628 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size
);
1633 case SpvOpSpecConstant
: {
1634 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1635 "Result type of %s must be a scalar",
1636 spirv_op_to_string(opcode
));
1637 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1640 val
->constant
->values
[0].u64
=
1641 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1644 val
->constant
->values
[0].u32
= get_specialization(b
, val
, w
[3]);
1647 val
->constant
->values
[0].u16
= get_specialization(b
, val
, w
[3]);
1650 val
->constant
->values
[0].u8
= get_specialization(b
, val
, w
[3]);
1653 vtn_fail("Unsupported SpvOpSpecConstant bit size");
1658 case SpvOpSpecConstantComposite
:
1659 case SpvOpConstantComposite
: {
1660 unsigned elem_count
= count
- 3;
1661 vtn_fail_if(elem_count
!= val
->type
->length
,
1662 "%s has %u constituents, expected %u",
1663 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1665 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1666 for (unsigned i
= 0; i
< elem_count
; i
++) {
1667 struct vtn_value
*val
= vtn_untyped_value(b
, w
[i
+ 3]);
1669 if (val
->value_type
== vtn_value_type_constant
) {
1670 elems
[i
] = val
->constant
;
1672 vtn_fail_if(val
->value_type
!= vtn_value_type_undef
,
1673 "only constants or undefs allowed for "
1674 "SpvOpConstantComposite");
1675 /* to make it easier, just insert a NULL constant for now */
1676 elems
[i
] = vtn_null_constant(b
, val
->type
);
1680 switch (val
->type
->base_type
) {
1681 case vtn_base_type_vector
: {
1682 assert(glsl_type_is_vector(val
->type
->type
));
1683 for (unsigned i
= 0; i
< elem_count
; i
++)
1684 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1688 case vtn_base_type_matrix
:
1689 case vtn_base_type_struct
:
1690 case vtn_base_type_array
:
1691 ralloc_steal(val
->constant
, elems
);
1692 val
->constant
->num_elements
= elem_count
;
1693 val
->constant
->elements
= elems
;
1697 vtn_fail("Result type of %s must be a composite type",
1698 spirv_op_to_string(opcode
));
1703 case SpvOpSpecConstantOp
: {
1704 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1706 case SpvOpVectorShuffle
: {
1707 struct vtn_value
*v0
= &b
->values
[w
[4]];
1708 struct vtn_value
*v1
= &b
->values
[w
[5]];
1710 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1711 v0
->value_type
== vtn_value_type_undef
);
1712 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1713 v1
->value_type
== vtn_value_type_undef
);
1715 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1716 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1718 vtn_assert(len0
+ len1
< 16);
1720 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1721 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1722 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1724 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1725 (void)bit_size0
; (void)bit_size1
;
1727 nir_const_value undef
= { .u64
= 0xdeadbeefdeadbeef };
1728 nir_const_value combined
[NIR_MAX_VEC_COMPONENTS
* 2];
1730 if (v0
->value_type
== vtn_value_type_constant
) {
1731 for (unsigned i
= 0; i
< len0
; i
++)
1732 combined
[i
] = v0
->constant
->values
[i
];
1734 if (v1
->value_type
== vtn_value_type_constant
) {
1735 for (unsigned i
= 0; i
< len1
; i
++)
1736 combined
[len0
+ i
] = v1
->constant
->values
[i
];
1739 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1740 uint32_t comp
= w
[i
+ 6];
1741 if (comp
== (uint32_t)-1) {
1742 /* If component is not used, set the value to a known constant
1743 * to detect if it is wrongly used.
1745 val
->constant
->values
[j
] = undef
;
1747 vtn_fail_if(comp
>= len0
+ len1
,
1748 "All Component literals must either be FFFFFFFF "
1749 "or in [0, N - 1] (inclusive).");
1750 val
->constant
->values
[j
] = combined
[comp
];
1756 case SpvOpCompositeExtract
:
1757 case SpvOpCompositeInsert
: {
1758 struct vtn_value
*comp
;
1759 unsigned deref_start
;
1760 struct nir_constant
**c
;
1761 if (opcode
== SpvOpCompositeExtract
) {
1762 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1764 c
= &comp
->constant
;
1766 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1768 val
->constant
= nir_constant_clone(comp
->constant
,
1774 const struct vtn_type
*type
= comp
->type
;
1775 for (unsigned i
= deref_start
; i
< count
; i
++) {
1776 vtn_fail_if(w
[i
] > type
->length
,
1777 "%uth index of %s is %u but the type has only "
1778 "%u elements", i
- deref_start
,
1779 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1781 switch (type
->base_type
) {
1782 case vtn_base_type_vector
:
1784 type
= type
->array_element
;
1787 case vtn_base_type_matrix
:
1788 case vtn_base_type_array
:
1789 c
= &(*c
)->elements
[w
[i
]];
1790 type
= type
->array_element
;
1793 case vtn_base_type_struct
:
1794 c
= &(*c
)->elements
[w
[i
]];
1795 type
= type
->members
[w
[i
]];
1799 vtn_fail("%s must only index into composite types",
1800 spirv_op_to_string(opcode
));
1804 if (opcode
== SpvOpCompositeExtract
) {
1808 unsigned num_components
= type
->length
;
1809 for (unsigned i
= 0; i
< num_components
; i
++)
1810 val
->constant
->values
[i
] = (*c
)->values
[elem
+ i
];
1813 struct vtn_value
*insert
=
1814 vtn_value(b
, w
[4], vtn_value_type_constant
);
1815 vtn_assert(insert
->type
== type
);
1817 *c
= insert
->constant
;
1819 unsigned num_components
= type
->length
;
1820 for (unsigned i
= 0; i
< num_components
; i
++)
1821 (*c
)->values
[elem
+ i
] = insert
->constant
->values
[i
];
1829 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
1830 nir_alu_type src_alu_type
= dst_alu_type
;
1831 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
1834 vtn_assert(count
<= 7);
1840 /* We have a source in a conversion */
1842 nir_get_nir_type_for_glsl_type(
1843 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1844 /* We use the bitsize of the conversion source to evaluate the opcode later */
1845 bit_size
= glsl_get_bit_size(
1846 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1849 bit_size
= glsl_get_bit_size(val
->type
->type
);
1852 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
1853 nir_alu_type_get_type_size(src_alu_type
),
1854 nir_alu_type_get_type_size(dst_alu_type
));
1855 nir_const_value src
[3][NIR_MAX_VEC_COMPONENTS
];
1857 for (unsigned i
= 0; i
< count
- 4; i
++) {
1858 struct vtn_value
*src_val
=
1859 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
);
1861 /* If this is an unsized source, pull the bit size from the
1862 * source; otherwise, we'll use the bit size from the destination.
1864 if (!nir_alu_type_get_type_size(nir_op_infos
[op
].input_types
[i
]))
1865 bit_size
= glsl_get_bit_size(src_val
->type
->type
);
1867 unsigned src_comps
= nir_op_infos
[op
].input_sizes
[i
] ?
1868 nir_op_infos
[op
].input_sizes
[i
] :
1871 unsigned j
= swap
? 1 - i
: i
;
1872 for (unsigned c
= 0; c
< src_comps
; c
++)
1873 src
[j
][c
] = src_val
->constant
->values
[c
];
1876 /* fix up fixed size sources */
1883 for (unsigned i
= 0; i
< num_components
; ++i
) {
1885 case 64: src
[1][i
].u32
= src
[1][i
].u64
; break;
1886 case 16: src
[1][i
].u32
= src
[1][i
].u16
; break;
1887 case 8: src
[1][i
].u32
= src
[1][i
].u8
; break;
1896 nir_const_value
*srcs
[3] = {
1897 src
[0], src
[1], src
[2],
1899 nir_eval_const_opcode(op
, val
->constant
->values
,
1900 num_components
, bit_size
, srcs
,
1901 b
->shader
->info
.float_controls_execution_mode
);
1908 case SpvOpConstantNull
:
1909 val
->constant
= vtn_null_constant(b
, val
->type
);
1912 case SpvOpConstantSampler
:
1913 vtn_fail("OpConstantSampler requires Kernel Capability");
1917 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1920 /* Now that we have the value, update the workgroup size if needed */
1921 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1924 SpvMemorySemanticsMask
1925 vtn_storage_class_to_memory_semantics(SpvStorageClass sc
)
1928 case SpvStorageClassStorageBuffer
:
1929 case SpvStorageClassPhysicalStorageBufferEXT
:
1930 return SpvMemorySemanticsUniformMemoryMask
;
1931 case SpvStorageClassWorkgroup
:
1932 return SpvMemorySemanticsWorkgroupMemoryMask
;
1934 return SpvMemorySemanticsMaskNone
;
1939 vtn_split_barrier_semantics(struct vtn_builder
*b
,
1940 SpvMemorySemanticsMask semantics
,
1941 SpvMemorySemanticsMask
*before
,
1942 SpvMemorySemanticsMask
*after
)
1944 /* For memory semantics embedded in operations, we split them into up to
1945 * two barriers, to be added before and after the operation. This is less
1946 * strict than if we propagated until the final backend stage, but still
1947 * result in correct execution.
1949 * A further improvement could be pipe this information (and use!) into the
1950 * next compiler layers, at the expense of making the handling of barriers
1954 *before
= SpvMemorySemanticsMaskNone
;
1955 *after
= SpvMemorySemanticsMaskNone
;
1957 const SpvMemorySemanticsMask order_semantics
=
1958 semantics
& (SpvMemorySemanticsAcquireMask
|
1959 SpvMemorySemanticsReleaseMask
|
1960 SpvMemorySemanticsAcquireReleaseMask
|
1961 SpvMemorySemanticsSequentiallyConsistentMask
);
1963 const SpvMemorySemanticsMask av_vis_semantics
=
1964 semantics
& (SpvMemorySemanticsMakeAvailableMask
|
1965 SpvMemorySemanticsMakeVisibleMask
);
1967 const SpvMemorySemanticsMask storage_semantics
=
1968 semantics
& (SpvMemorySemanticsUniformMemoryMask
|
1969 SpvMemorySemanticsSubgroupMemoryMask
|
1970 SpvMemorySemanticsWorkgroupMemoryMask
|
1971 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
1972 SpvMemorySemanticsAtomicCounterMemoryMask
|
1973 SpvMemorySemanticsImageMemoryMask
|
1974 SpvMemorySemanticsOutputMemoryMask
);
1976 const SpvMemorySemanticsMask other_semantics
=
1977 semantics
& ~(order_semantics
| av_vis_semantics
| storage_semantics
);
1979 if (other_semantics
)
1980 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics
);
1982 vtn_fail_if(util_bitcount(order_semantics
) > 1,
1983 "Multiple memory ordering bits specified");
1985 /* SequentiallyConsistent is treated as AcquireRelease. */
1987 /* The RELEASE barrier happens BEFORE the operation, and it is usually
1988 * associated with a Store. All the write operations with a matching
1989 * semantics will not be reordered after the Store.
1991 if (order_semantics
& (SpvMemorySemanticsReleaseMask
|
1992 SpvMemorySemanticsAcquireReleaseMask
|
1993 SpvMemorySemanticsSequentiallyConsistentMask
)) {
1994 *before
|= SpvMemorySemanticsReleaseMask
| storage_semantics
;
1997 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
1998 * associated with a Load. All the operations with a matching semantics
1999 * will not be reordered before the Load.
2001 if (order_semantics
& (SpvMemorySemanticsAcquireMask
|
2002 SpvMemorySemanticsAcquireReleaseMask
|
2003 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2004 *after
|= SpvMemorySemanticsAcquireMask
| storage_semantics
;
2007 if (av_vis_semantics
& SpvMemorySemanticsMakeVisibleMask
)
2008 *before
|= SpvMemorySemanticsMakeVisibleMask
| storage_semantics
;
2010 if (av_vis_semantics
& SpvMemorySemanticsMakeAvailableMask
)
2011 *after
|= SpvMemorySemanticsMakeAvailableMask
| storage_semantics
;
2015 vtn_emit_scoped_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
2016 SpvMemorySemanticsMask semantics
)
2018 nir_memory_semantics nir_semantics
= 0;
2019 switch (semantics
& (SpvMemorySemanticsAcquireMask
|
2020 SpvMemorySemanticsReleaseMask
|
2021 SpvMemorySemanticsAcquireReleaseMask
|
2022 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2024 /* Not an ordering barrier. */
2027 case SpvMemorySemanticsAcquireMask
:
2028 nir_semantics
= NIR_MEMORY_ACQUIRE
;
2031 case SpvMemorySemanticsReleaseMask
:
2032 nir_semantics
= NIR_MEMORY_RELEASE
;
2035 case SpvMemorySemanticsSequentiallyConsistentMask
:
2036 /* Fall through. Treated as AcquireRelease in Vulkan. */
2037 case SpvMemorySemanticsAcquireReleaseMask
:
2038 nir_semantics
= NIR_MEMORY_ACQUIRE
| NIR_MEMORY_RELEASE
;
2042 vtn_fail("Multiple memory ordering bits specified");
2045 if (semantics
& SpvMemorySemanticsMakeAvailableMask
) {
2046 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2047 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2048 "capability must be declared.");
2049 nir_semantics
|= NIR_MEMORY_MAKE_AVAILABLE
;
2052 if (semantics
& SpvMemorySemanticsMakeVisibleMask
) {
2053 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2054 "To use MakeVisible memory semantics the VulkanMemoryModel "
2055 "capability must be declared.");
2056 nir_semantics
|= NIR_MEMORY_MAKE_VISIBLE
;
2059 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2060 * and AtomicCounterMemory are ignored".
2062 semantics
&= ~(SpvMemorySemanticsSubgroupMemoryMask
|
2063 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2064 SpvMemorySemanticsAtomicCounterMemoryMask
);
2066 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2067 * for SpvMemorySemanticsImageMemoryMask.
2070 nir_variable_mode modes
= 0;
2071 if (semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2072 SpvMemorySemanticsImageMemoryMask
))
2073 modes
|= nir_var_mem_ubo
| nir_var_mem_ssbo
| nir_var_uniform
;
2074 if (semantics
& SpvMemorySemanticsWorkgroupMemoryMask
)
2075 modes
|= nir_var_mem_shared
;
2076 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
2077 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2078 "To use Output memory semantics, the VulkanMemoryModel "
2079 "capability must be declared.");
2080 modes
|= nir_var_shader_out
;
2083 /* No barrier to add. */
2084 if (nir_semantics
== 0 || modes
== 0)
2087 nir_scope nir_scope
;
2089 case SpvScopeDevice
:
2090 vtn_fail_if(b
->options
->caps
.vk_memory_model
&&
2091 !b
->options
->caps
.vk_memory_model_device_scope
,
2092 "If the Vulkan memory model is declared and any instruction "
2093 "uses Device scope, the VulkanMemoryModelDeviceScope "
2094 "capability must be declared.");
2095 nir_scope
= NIR_SCOPE_DEVICE
;
2098 case SpvScopeQueueFamily
:
2099 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2100 "To use Queue Family scope, the VulkanMemoryModel capability "
2101 "must be declared.");
2102 nir_scope
= NIR_SCOPE_QUEUE_FAMILY
;
2105 case SpvScopeWorkgroup
:
2106 nir_scope
= NIR_SCOPE_WORKGROUP
;
2109 case SpvScopeSubgroup
:
2110 nir_scope
= NIR_SCOPE_SUBGROUP
;
2113 case SpvScopeInvocation
:
2114 nir_scope
= NIR_SCOPE_INVOCATION
;
2118 vtn_fail("Invalid memory scope");
2121 nir_intrinsic_instr
*intrin
=
2122 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_scoped_memory_barrier
);
2123 nir_intrinsic_set_memory_semantics(intrin
, nir_semantics
);
2125 nir_intrinsic_set_memory_modes(intrin
, modes
);
2126 nir_intrinsic_set_memory_scope(intrin
, nir_scope
);
2127 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2130 struct vtn_ssa_value
*
2131 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
2133 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
2136 if (!glsl_type_is_vector_or_scalar(type
)) {
2137 unsigned elems
= glsl_get_length(type
);
2138 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2139 for (unsigned i
= 0; i
< elems
; i
++) {
2140 const struct glsl_type
*child_type
;
2142 switch (glsl_get_base_type(type
)) {
2144 case GLSL_TYPE_UINT
:
2145 case GLSL_TYPE_INT16
:
2146 case GLSL_TYPE_UINT16
:
2147 case GLSL_TYPE_UINT8
:
2148 case GLSL_TYPE_INT8
:
2149 case GLSL_TYPE_INT64
:
2150 case GLSL_TYPE_UINT64
:
2151 case GLSL_TYPE_BOOL
:
2152 case GLSL_TYPE_FLOAT
:
2153 case GLSL_TYPE_FLOAT16
:
2154 case GLSL_TYPE_DOUBLE
:
2155 child_type
= glsl_get_column_type(type
);
2157 case GLSL_TYPE_ARRAY
:
2158 child_type
= glsl_get_array_element(type
);
2160 case GLSL_TYPE_STRUCT
:
2161 case GLSL_TYPE_INTERFACE
:
2162 child_type
= glsl_get_struct_field(type
, i
);
2165 vtn_fail("unkown base type");
2168 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
2176 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
2179 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
2180 src
.src_type
= type
;
2185 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
2186 const uint32_t *w
, unsigned count
)
2188 if (opcode
== SpvOpSampledImage
) {
2189 struct vtn_value
*val
=
2190 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2191 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2192 val
->sampled_image
->type
=
2193 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2194 val
->sampled_image
->image
=
2195 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2196 val
->sampled_image
->sampler
=
2197 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
2199 } else if (opcode
== SpvOpImage
) {
2200 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
2201 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
2202 vtn_push_value_pointer(b
, w
[2], src_val
->sampled_image
->image
);
2204 vtn_assert(src_val
->value_type
== vtn_value_type_pointer
);
2205 vtn_push_value_pointer(b
, w
[2], src_val
->pointer
);
2210 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2212 struct vtn_sampled_image sampled
;
2213 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
2214 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
2215 sampled
= *sampled_val
->sampled_image
;
2217 vtn_assert(sampled_val
->value_type
== vtn_value_type_pointer
);
2218 sampled
.type
= sampled_val
->pointer
->type
;
2219 sampled
.image
= NULL
;
2220 sampled
.sampler
= sampled_val
->pointer
;
2223 const struct glsl_type
*image_type
= sampled
.type
->type
;
2224 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
2225 const bool is_array
= glsl_sampler_type_is_array(image_type
);
2226 nir_alu_type dest_type
= nir_type_invalid
;
2228 /* Figure out the base texture operation */
2231 case SpvOpImageSampleImplicitLod
:
2232 case SpvOpImageSampleDrefImplicitLod
:
2233 case SpvOpImageSampleProjImplicitLod
:
2234 case SpvOpImageSampleProjDrefImplicitLod
:
2235 texop
= nir_texop_tex
;
2238 case SpvOpImageSampleExplicitLod
:
2239 case SpvOpImageSampleDrefExplicitLod
:
2240 case SpvOpImageSampleProjExplicitLod
:
2241 case SpvOpImageSampleProjDrefExplicitLod
:
2242 texop
= nir_texop_txl
;
2245 case SpvOpImageFetch
:
2246 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
2247 texop
= nir_texop_txf_ms
;
2249 texop
= nir_texop_txf
;
2253 case SpvOpImageGather
:
2254 case SpvOpImageDrefGather
:
2255 texop
= nir_texop_tg4
;
2258 case SpvOpImageQuerySizeLod
:
2259 case SpvOpImageQuerySize
:
2260 texop
= nir_texop_txs
;
2261 dest_type
= nir_type_int
;
2264 case SpvOpImageQueryLod
:
2265 texop
= nir_texop_lod
;
2266 dest_type
= nir_type_float
;
2269 case SpvOpImageQueryLevels
:
2270 texop
= nir_texop_query_levels
;
2271 dest_type
= nir_type_int
;
2274 case SpvOpImageQuerySamples
:
2275 texop
= nir_texop_texture_samples
;
2276 dest_type
= nir_type_int
;
2280 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2283 nir_tex_src srcs
[10]; /* 10 should be enough */
2284 nir_tex_src
*p
= srcs
;
2286 nir_deref_instr
*sampler
= vtn_pointer_to_deref(b
, sampled
.sampler
);
2287 nir_deref_instr
*texture
=
2288 sampled
.image
? vtn_pointer_to_deref(b
, sampled
.image
) : sampler
;
2290 p
->src
= nir_src_for_ssa(&texture
->dest
.ssa
);
2291 p
->src_type
= nir_tex_src_texture_deref
;
2301 /* These operations require a sampler */
2302 p
->src
= nir_src_for_ssa(&sampler
->dest
.ssa
);
2303 p
->src_type
= nir_tex_src_sampler_deref
;
2307 case nir_texop_txf_ms
:
2309 case nir_texop_query_levels
:
2310 case nir_texop_texture_samples
:
2311 case nir_texop_samples_identical
:
2314 case nir_texop_txf_ms_fb
:
2315 vtn_fail("unexpected nir_texop_txf_ms_fb");
2317 case nir_texop_txf_ms_mcs
:
2318 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2319 case nir_texop_tex_prefetch
:
2320 vtn_fail("unexpected nir_texop_tex_prefetch");
2325 struct nir_ssa_def
*coord
;
2326 unsigned coord_components
;
2328 case SpvOpImageSampleImplicitLod
:
2329 case SpvOpImageSampleExplicitLod
:
2330 case SpvOpImageSampleDrefImplicitLod
:
2331 case SpvOpImageSampleDrefExplicitLod
:
2332 case SpvOpImageSampleProjImplicitLod
:
2333 case SpvOpImageSampleProjExplicitLod
:
2334 case SpvOpImageSampleProjDrefImplicitLod
:
2335 case SpvOpImageSampleProjDrefExplicitLod
:
2336 case SpvOpImageFetch
:
2337 case SpvOpImageGather
:
2338 case SpvOpImageDrefGather
:
2339 case SpvOpImageQueryLod
: {
2340 /* All these types have the coordinate as their first real argument */
2341 switch (sampler_dim
) {
2342 case GLSL_SAMPLER_DIM_1D
:
2343 case GLSL_SAMPLER_DIM_BUF
:
2344 coord_components
= 1;
2346 case GLSL_SAMPLER_DIM_2D
:
2347 case GLSL_SAMPLER_DIM_RECT
:
2348 case GLSL_SAMPLER_DIM_MS
:
2349 coord_components
= 2;
2351 case GLSL_SAMPLER_DIM_3D
:
2352 case GLSL_SAMPLER_DIM_CUBE
:
2353 coord_components
= 3;
2356 vtn_fail("Invalid sampler type");
2359 if (is_array
&& texop
!= nir_texop_lod
)
2362 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
2363 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
2364 (1 << coord_components
) - 1));
2365 p
->src_type
= nir_tex_src_coord
;
2372 coord_components
= 0;
2377 case SpvOpImageSampleProjImplicitLod
:
2378 case SpvOpImageSampleProjExplicitLod
:
2379 case SpvOpImageSampleProjDrefImplicitLod
:
2380 case SpvOpImageSampleProjDrefExplicitLod
:
2381 /* These have the projector as the last coordinate component */
2382 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
2383 p
->src_type
= nir_tex_src_projector
;
2391 bool is_shadow
= false;
2392 unsigned gather_component
= 0;
2394 case SpvOpImageSampleDrefImplicitLod
:
2395 case SpvOpImageSampleDrefExplicitLod
:
2396 case SpvOpImageSampleProjDrefImplicitLod
:
2397 case SpvOpImageSampleProjDrefExplicitLod
:
2398 case SpvOpImageDrefGather
:
2399 /* These all have an explicit depth value as their next source */
2401 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
2404 case SpvOpImageGather
:
2405 /* This has a component as its next source */
2406 gather_component
= vtn_constant_uint(b
, w
[idx
++]);
2413 /* For OpImageQuerySizeLod, we always have an LOD */
2414 if (opcode
== SpvOpImageQuerySizeLod
)
2415 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2417 /* Now we need to handle some number of optional arguments */
2418 struct vtn_value
*gather_offsets
= NULL
;
2420 uint32_t operands
= w
[idx
++];
2422 if (operands
& SpvImageOperandsBiasMask
) {
2423 vtn_assert(texop
== nir_texop_tex
);
2424 texop
= nir_texop_txb
;
2425 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
2428 if (operands
& SpvImageOperandsLodMask
) {
2429 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
2430 texop
== nir_texop_txs
);
2431 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2434 if (operands
& SpvImageOperandsGradMask
) {
2435 vtn_assert(texop
== nir_texop_txl
);
2436 texop
= nir_texop_txd
;
2437 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
2438 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
2441 vtn_fail_if(util_bitcount(operands
& (SpvImageOperandsConstOffsetsMask
|
2442 SpvImageOperandsOffsetMask
|
2443 SpvImageOperandsConstOffsetMask
)) > 1,
2444 "At most one of the ConstOffset, Offset, and ConstOffsets "
2445 "image operands can be used on a given instruction.");
2447 if (operands
& SpvImageOperandsOffsetMask
||
2448 operands
& SpvImageOperandsConstOffsetMask
)
2449 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
2451 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2452 vtn_assert(texop
== nir_texop_tg4
);
2453 gather_offsets
= vtn_value(b
, w
[idx
++], vtn_value_type_constant
);
2456 if (operands
& SpvImageOperandsSampleMask
) {
2457 vtn_assert(texop
== nir_texop_txf_ms
);
2458 texop
= nir_texop_txf_ms
;
2459 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2462 if (operands
& SpvImageOperandsMinLodMask
) {
2463 vtn_assert(texop
== nir_texop_tex
||
2464 texop
== nir_texop_txb
||
2465 texop
== nir_texop_txd
);
2466 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_min_lod
);
2469 /* We should have now consumed exactly all of the arguments */
2470 vtn_assert(idx
== count
);
2472 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2475 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2477 instr
->coord_components
= coord_components
;
2478 instr
->sampler_dim
= sampler_dim
;
2479 instr
->is_array
= is_array
;
2480 instr
->is_shadow
= is_shadow
;
2481 instr
->is_new_style_shadow
=
2482 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2483 instr
->component
= gather_component
;
2485 if (sampled
.image
&& (sampled
.image
->access
& ACCESS_NON_UNIFORM
))
2486 instr
->texture_non_uniform
= true;
2488 if (sampled
.sampler
&& (sampled
.sampler
->access
& ACCESS_NON_UNIFORM
))
2489 instr
->sampler_non_uniform
= true;
2491 /* for non-query ops, get dest_type from sampler type */
2492 if (dest_type
== nir_type_invalid
) {
2493 switch (glsl_get_sampler_result_type(image_type
)) {
2494 case GLSL_TYPE_FLOAT
: dest_type
= nir_type_float
; break;
2495 case GLSL_TYPE_INT
: dest_type
= nir_type_int
; break;
2496 case GLSL_TYPE_UINT
: dest_type
= nir_type_uint
; break;
2497 case GLSL_TYPE_BOOL
: dest_type
= nir_type_bool
; break;
2499 vtn_fail("Invalid base type for sampler result");
2503 instr
->dest_type
= dest_type
;
2505 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2506 nir_tex_instr_dest_size(instr
), 32, NULL
);
2508 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2509 nir_tex_instr_dest_size(instr
));
2511 if (gather_offsets
) {
2512 vtn_fail_if(gather_offsets
->type
->base_type
!= vtn_base_type_array
||
2513 gather_offsets
->type
->length
!= 4,
2514 "ConstOffsets must be an array of size four of vectors "
2515 "of two integer components");
2517 struct vtn_type
*vec_type
= gather_offsets
->type
->array_element
;
2518 vtn_fail_if(vec_type
->base_type
!= vtn_base_type_vector
||
2519 vec_type
->length
!= 2 ||
2520 !glsl_type_is_integer(vec_type
->type
),
2521 "ConstOffsets must be an array of size four of vectors "
2522 "of two integer components");
2524 unsigned bit_size
= glsl_get_bit_size(vec_type
->type
);
2525 for (uint32_t i
= 0; i
< 4; i
++) {
2526 const nir_const_value
*cvec
=
2527 gather_offsets
->constant
->elements
[i
]->values
;
2528 for (uint32_t j
= 0; j
< 2; j
++) {
2530 case 8: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i8
; break;
2531 case 16: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i16
; break;
2532 case 32: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i32
; break;
2533 case 64: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i64
; break;
2535 vtn_fail("Unsupported bit size: %u", bit_size
);
2541 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
2542 ssa
->def
= &instr
->dest
.ssa
;
2543 vtn_push_ssa(b
, w
[2], ret_type
, ssa
);
2545 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2549 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2550 const uint32_t *w
, nir_src
*src
)
2553 case SpvOpAtomicIIncrement
:
2554 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2557 case SpvOpAtomicIDecrement
:
2558 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2561 case SpvOpAtomicISub
:
2563 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
2566 case SpvOpAtomicCompareExchange
:
2567 case SpvOpAtomicCompareExchangeWeak
:
2568 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
2569 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
2572 case SpvOpAtomicExchange
:
2573 case SpvOpAtomicIAdd
:
2574 case SpvOpAtomicSMin
:
2575 case SpvOpAtomicUMin
:
2576 case SpvOpAtomicSMax
:
2577 case SpvOpAtomicUMax
:
2578 case SpvOpAtomicAnd
:
2580 case SpvOpAtomicXor
:
2581 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
2585 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
2589 static nir_ssa_def
*
2590 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2592 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
2594 /* The image_load_store intrinsics assume a 4-dim coordinate */
2595 unsigned dim
= glsl_get_vector_elements(coord
->type
);
2596 unsigned swizzle
[4];
2597 for (unsigned i
= 0; i
< 4; i
++)
2598 swizzle
[i
] = MIN2(i
, dim
- 1);
2600 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4);
2603 static nir_ssa_def
*
2604 expand_to_vec4(nir_builder
*b
, nir_ssa_def
*value
)
2606 if (value
->num_components
== 4)
2610 for (unsigned i
= 0; i
< 4; i
++)
2611 swiz
[i
] = i
< value
->num_components
? i
: 0;
2612 return nir_swizzle(b
, value
, swiz
, 4);
2616 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2617 const uint32_t *w
, unsigned count
)
2619 /* Just get this one out of the way */
2620 if (opcode
== SpvOpImageTexelPointer
) {
2621 struct vtn_value
*val
=
2622 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2623 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2625 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2626 val
->image
->coord
= get_image_coord(b
, w
[4]);
2627 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
2631 struct vtn_image_pointer image
;
2632 SpvScope scope
= SpvScopeInvocation
;
2633 SpvMemorySemanticsMask semantics
= 0;
2636 case SpvOpAtomicExchange
:
2637 case SpvOpAtomicCompareExchange
:
2638 case SpvOpAtomicCompareExchangeWeak
:
2639 case SpvOpAtomicIIncrement
:
2640 case SpvOpAtomicIDecrement
:
2641 case SpvOpAtomicIAdd
:
2642 case SpvOpAtomicISub
:
2643 case SpvOpAtomicLoad
:
2644 case SpvOpAtomicSMin
:
2645 case SpvOpAtomicUMin
:
2646 case SpvOpAtomicSMax
:
2647 case SpvOpAtomicUMax
:
2648 case SpvOpAtomicAnd
:
2650 case SpvOpAtomicXor
:
2651 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
2652 scope
= vtn_constant_uint(b
, w
[4]);
2653 semantics
= vtn_constant_uint(b
, w
[5]);
2656 case SpvOpAtomicStore
:
2657 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
2658 scope
= vtn_constant_uint(b
, w
[2]);
2659 semantics
= vtn_constant_uint(b
, w
[3]);
2662 case SpvOpImageQuerySize
:
2663 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2665 image
.sample
= NULL
;
2668 case SpvOpImageRead
: {
2669 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2670 image
.coord
= get_image_coord(b
, w
[4]);
2672 const SpvImageOperandsMask operands
=
2673 count
> 5 ? w
[5] : SpvImageOperandsMaskNone
;
2676 if (operands
& SpvImageOperandsSampleMask
) {
2677 image
.sample
= vtn_ssa_value(b
, w
[idx
])->def
;
2680 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2683 if (operands
& SpvImageOperandsMakeTexelVisibleMask
) {
2684 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2685 "MakeTexelVisible requires NonPrivateTexel to also be set.");
2686 semantics
= SpvMemorySemanticsMakeVisibleMask
;
2687 scope
= vtn_constant_uint(b
, w
[idx
]);
2691 /* TODO: Volatile. */
2696 case SpvOpImageWrite
: {
2697 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2698 image
.coord
= get_image_coord(b
, w
[2]);
2702 const SpvImageOperandsMask operands
=
2703 count
> 4 ? w
[4] : SpvImageOperandsMaskNone
;
2706 if (operands
& SpvImageOperandsSampleMask
) {
2707 image
.sample
= vtn_ssa_value(b
, w
[idx
])->def
;
2710 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2713 if (operands
& SpvImageOperandsMakeTexelAvailableMask
) {
2714 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2715 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
2716 semantics
= SpvMemorySemanticsMakeAvailableMask
;
2717 scope
= vtn_constant_uint(b
, w
[idx
]);
2720 /* TODO: Volatile. */
2726 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2729 nir_intrinsic_op op
;
2731 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
2732 OP(ImageQuerySize
, size
)
2734 OP(ImageWrite
, store
)
2735 OP(AtomicLoad
, load
)
2736 OP(AtomicStore
, store
)
2737 OP(AtomicExchange
, atomic_exchange
)
2738 OP(AtomicCompareExchange
, atomic_comp_swap
)
2739 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
2740 OP(AtomicIIncrement
, atomic_add
)
2741 OP(AtomicIDecrement
, atomic_add
)
2742 OP(AtomicIAdd
, atomic_add
)
2743 OP(AtomicISub
, atomic_add
)
2744 OP(AtomicSMin
, atomic_imin
)
2745 OP(AtomicUMin
, atomic_umin
)
2746 OP(AtomicSMax
, atomic_imax
)
2747 OP(AtomicUMax
, atomic_umax
)
2748 OP(AtomicAnd
, atomic_and
)
2749 OP(AtomicOr
, atomic_or
)
2750 OP(AtomicXor
, atomic_xor
)
2753 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2756 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2758 nir_deref_instr
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2759 intrin
->src
[0] = nir_src_for_ssa(&image_deref
->dest
.ssa
);
2761 /* ImageQuerySize doesn't take any extra parameters */
2762 if (opcode
!= SpvOpImageQuerySize
) {
2763 /* The image coordinate is always 4 components but we may not have that
2764 * many. Swizzle to compensate.
2766 intrin
->src
[1] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, image
.coord
));
2767 intrin
->src
[2] = nir_src_for_ssa(image
.sample
);
2770 nir_intrinsic_set_access(intrin
, image
.image
->access
);
2773 case SpvOpAtomicLoad
:
2774 case SpvOpImageQuerySize
:
2775 case SpvOpImageRead
:
2777 case SpvOpAtomicStore
:
2778 case SpvOpImageWrite
: {
2779 const uint32_t value_id
= opcode
== SpvOpAtomicStore
? w
[4] : w
[3];
2780 nir_ssa_def
*value
= vtn_ssa_value(b
, value_id
)->def
;
2781 /* nir_intrinsic_image_deref_store always takes a vec4 value */
2782 assert(op
== nir_intrinsic_image_deref_store
);
2783 intrin
->num_components
= 4;
2784 intrin
->src
[3] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, value
));
2788 case SpvOpAtomicCompareExchange
:
2789 case SpvOpAtomicCompareExchangeWeak
:
2790 case SpvOpAtomicIIncrement
:
2791 case SpvOpAtomicIDecrement
:
2792 case SpvOpAtomicExchange
:
2793 case SpvOpAtomicIAdd
:
2794 case SpvOpAtomicISub
:
2795 case SpvOpAtomicSMin
:
2796 case SpvOpAtomicUMin
:
2797 case SpvOpAtomicSMax
:
2798 case SpvOpAtomicUMax
:
2799 case SpvOpAtomicAnd
:
2801 case SpvOpAtomicXor
:
2802 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[3]);
2806 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2809 /* Image operations implicitly have the Image storage memory semantics. */
2810 semantics
|= SpvMemorySemanticsImageMemoryMask
;
2812 SpvMemorySemanticsMask before_semantics
;
2813 SpvMemorySemanticsMask after_semantics
;
2814 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
2816 if (before_semantics
)
2817 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
2819 if (opcode
!= SpvOpImageWrite
&& opcode
!= SpvOpAtomicStore
) {
2820 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2822 unsigned dest_components
= glsl_get_vector_elements(type
->type
);
2823 intrin
->num_components
= nir_intrinsic_infos
[op
].dest_components
;
2824 if (intrin
->num_components
== 0)
2825 intrin
->num_components
= dest_components
;
2827 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2828 intrin
->num_components
, 32, NULL
);
2830 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2832 nir_ssa_def
*result
= &intrin
->dest
.ssa
;
2833 if (intrin
->num_components
!= dest_components
)
2834 result
= nir_channels(&b
->nb
, result
, (1 << dest_components
) - 1);
2836 struct vtn_value
*val
=
2837 vtn_push_ssa(b
, w
[2], type
, vtn_create_ssa_value(b
, type
->type
));
2838 val
->ssa
->def
= result
;
2840 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2843 if (after_semantics
)
2844 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
2847 static nir_intrinsic_op
2848 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2851 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2852 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2853 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2854 OP(AtomicExchange
, atomic_exchange
)
2855 OP(AtomicCompareExchange
, atomic_comp_swap
)
2856 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
2857 OP(AtomicIIncrement
, atomic_add
)
2858 OP(AtomicIDecrement
, atomic_add
)
2859 OP(AtomicIAdd
, atomic_add
)
2860 OP(AtomicISub
, atomic_add
)
2861 OP(AtomicSMin
, atomic_imin
)
2862 OP(AtomicUMin
, atomic_umin
)
2863 OP(AtomicSMax
, atomic_imax
)
2864 OP(AtomicUMax
, atomic_umax
)
2865 OP(AtomicAnd
, atomic_and
)
2866 OP(AtomicOr
, atomic_or
)
2867 OP(AtomicXor
, atomic_xor
)
2870 vtn_fail_with_opcode("Invalid SSBO atomic", opcode
);
2874 static nir_intrinsic_op
2875 get_uniform_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2878 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
2879 OP(AtomicLoad
, read_deref
)
2880 OP(AtomicExchange
, exchange
)
2881 OP(AtomicCompareExchange
, comp_swap
)
2882 OP(AtomicCompareExchangeWeak
, comp_swap
)
2883 OP(AtomicIIncrement
, inc_deref
)
2884 OP(AtomicIDecrement
, post_dec_deref
)
2885 OP(AtomicIAdd
, add_deref
)
2886 OP(AtomicISub
, add_deref
)
2887 OP(AtomicUMin
, min_deref
)
2888 OP(AtomicUMax
, max_deref
)
2889 OP(AtomicAnd
, and_deref
)
2890 OP(AtomicOr
, or_deref
)
2891 OP(AtomicXor
, xor_deref
)
2894 /* We left the following out: AtomicStore, AtomicSMin and
2895 * AtomicSmax. Right now there are not nir intrinsics for them. At this
2896 * moment Atomic Counter support is needed for ARB_spirv support, so is
2897 * only need to support GLSL Atomic Counters that are uints and don't
2898 * allow direct storage.
2900 unreachable("Invalid uniform atomic");
2904 static nir_intrinsic_op
2905 get_deref_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2908 case SpvOpAtomicLoad
: return nir_intrinsic_load_deref
;
2909 case SpvOpAtomicStore
: return nir_intrinsic_store_deref
;
2910 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
2911 OP(AtomicExchange
, atomic_exchange
)
2912 OP(AtomicCompareExchange
, atomic_comp_swap
)
2913 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
2914 OP(AtomicIIncrement
, atomic_add
)
2915 OP(AtomicIDecrement
, atomic_add
)
2916 OP(AtomicIAdd
, atomic_add
)
2917 OP(AtomicISub
, atomic_add
)
2918 OP(AtomicSMin
, atomic_imin
)
2919 OP(AtomicUMin
, atomic_umin
)
2920 OP(AtomicSMax
, atomic_imax
)
2921 OP(AtomicUMax
, atomic_umax
)
2922 OP(AtomicAnd
, atomic_and
)
2923 OP(AtomicOr
, atomic_or
)
2924 OP(AtomicXor
, atomic_xor
)
2927 vtn_fail_with_opcode("Invalid shared atomic", opcode
);
2932 * Handles shared atomics, ssbo atomics and atomic counters.
2935 vtn_handle_atomics(struct vtn_builder
*b
, SpvOp opcode
,
2936 const uint32_t *w
, unsigned count
)
2938 struct vtn_pointer
*ptr
;
2939 nir_intrinsic_instr
*atomic
;
2941 SpvScope scope
= SpvScopeInvocation
;
2942 SpvMemorySemanticsMask semantics
= 0;
2945 case SpvOpAtomicLoad
:
2946 case SpvOpAtomicExchange
:
2947 case SpvOpAtomicCompareExchange
:
2948 case SpvOpAtomicCompareExchangeWeak
:
2949 case SpvOpAtomicIIncrement
:
2950 case SpvOpAtomicIDecrement
:
2951 case SpvOpAtomicIAdd
:
2952 case SpvOpAtomicISub
:
2953 case SpvOpAtomicSMin
:
2954 case SpvOpAtomicUMin
:
2955 case SpvOpAtomicSMax
:
2956 case SpvOpAtomicUMax
:
2957 case SpvOpAtomicAnd
:
2959 case SpvOpAtomicXor
:
2960 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2961 scope
= vtn_constant_uint(b
, w
[4]);
2962 semantics
= vtn_constant_uint(b
, w
[5]);
2965 case SpvOpAtomicStore
:
2966 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2967 scope
= vtn_constant_uint(b
, w
[2]);
2968 semantics
= vtn_constant_uint(b
, w
[3]);
2972 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
2975 /* uniform as "atomic counter uniform" */
2976 if (ptr
->mode
== vtn_variable_mode_uniform
) {
2977 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
2978 const struct glsl_type
*deref_type
= deref
->type
;
2979 nir_intrinsic_op op
= get_uniform_nir_atomic_op(b
, opcode
);
2980 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2981 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
2983 /* SSBO needs to initialize index/offset. In this case we don't need to,
2984 * as that info is already stored on the ptr->var->var nir_variable (see
2985 * vtn_create_variable)
2989 case SpvOpAtomicLoad
:
2990 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2993 case SpvOpAtomicStore
:
2994 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2995 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2998 case SpvOpAtomicExchange
:
2999 case SpvOpAtomicCompareExchange
:
3000 case SpvOpAtomicCompareExchangeWeak
:
3001 case SpvOpAtomicIIncrement
:
3002 case SpvOpAtomicIDecrement
:
3003 case SpvOpAtomicIAdd
:
3004 case SpvOpAtomicISub
:
3005 case SpvOpAtomicSMin
:
3006 case SpvOpAtomicUMin
:
3007 case SpvOpAtomicSMax
:
3008 case SpvOpAtomicUMax
:
3009 case SpvOpAtomicAnd
:
3011 case SpvOpAtomicXor
:
3012 /* Nothing: we don't need to call fill_common_atomic_sources here, as
3013 * atomic counter uniforms doesn't have sources
3018 unreachable("Invalid SPIR-V atomic");
3021 } else if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
3022 nir_ssa_def
*offset
, *index
;
3023 offset
= vtn_pointer_to_offset(b
, ptr
, &index
);
3025 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
3027 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(b
, opcode
);
3028 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3032 case SpvOpAtomicLoad
:
3033 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3034 nir_intrinsic_set_align(atomic
, 4, 0);
3035 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3036 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3037 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3040 case SpvOpAtomicStore
:
3041 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3042 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3043 nir_intrinsic_set_align(atomic
, 4, 0);
3044 atomic
->src
[src
++] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
3045 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3046 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3047 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3050 case SpvOpAtomicExchange
:
3051 case SpvOpAtomicCompareExchange
:
3052 case SpvOpAtomicCompareExchangeWeak
:
3053 case SpvOpAtomicIIncrement
:
3054 case SpvOpAtomicIDecrement
:
3055 case SpvOpAtomicIAdd
:
3056 case SpvOpAtomicISub
:
3057 case SpvOpAtomicSMin
:
3058 case SpvOpAtomicUMin
:
3059 case SpvOpAtomicSMax
:
3060 case SpvOpAtomicUMax
:
3061 case SpvOpAtomicAnd
:
3063 case SpvOpAtomicXor
:
3064 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3065 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3066 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3067 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
3071 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3074 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3075 const struct glsl_type
*deref_type
= deref
->type
;
3076 nir_intrinsic_op op
= get_deref_nir_atomic_op(b
, opcode
);
3077 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3078 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3081 case SpvOpAtomicLoad
:
3082 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3085 case SpvOpAtomicStore
:
3086 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3087 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3088 atomic
->src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
3091 case SpvOpAtomicExchange
:
3092 case SpvOpAtomicCompareExchange
:
3093 case SpvOpAtomicCompareExchangeWeak
:
3094 case SpvOpAtomicIIncrement
:
3095 case SpvOpAtomicIDecrement
:
3096 case SpvOpAtomicIAdd
:
3097 case SpvOpAtomicISub
:
3098 case SpvOpAtomicSMin
:
3099 case SpvOpAtomicUMin
:
3100 case SpvOpAtomicSMax
:
3101 case SpvOpAtomicUMax
:
3102 case SpvOpAtomicAnd
:
3104 case SpvOpAtomicXor
:
3105 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[1]);
3109 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3113 /* Atomic ordering operations will implicitly apply to the atomic operation
3114 * storage class, so include that too.
3116 semantics
|= vtn_storage_class_to_memory_semantics(ptr
->ptr_type
->storage_class
);
3118 SpvMemorySemanticsMask before_semantics
;
3119 SpvMemorySemanticsMask after_semantics
;
3120 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3122 if (before_semantics
)
3123 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3125 if (opcode
!= SpvOpAtomicStore
) {
3126 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3128 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
3129 glsl_get_vector_elements(type
->type
),
3130 glsl_get_bit_size(type
->type
), NULL
);
3132 struct vtn_ssa_value
*ssa
= rzalloc(b
, struct vtn_ssa_value
);
3133 ssa
->def
= &atomic
->dest
.ssa
;
3134 ssa
->type
= type
->type
;
3135 vtn_push_ssa(b
, w
[2], type
, ssa
);
3138 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
3140 if (after_semantics
)
3141 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3144 static nir_alu_instr
*
3145 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
3147 nir_op op
= nir_op_vec(num_components
);
3148 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
3149 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
3151 vec
->dest
.write_mask
= (1 << num_components
) - 1;
3156 struct vtn_ssa_value
*
3157 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
3159 if (src
->transposed
)
3160 return src
->transposed
;
3162 struct vtn_ssa_value
*dest
=
3163 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
3165 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
3166 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
3167 glsl_get_bit_size(src
->type
));
3168 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3169 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
3170 vec
->src
[0].swizzle
[0] = i
;
3172 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
3173 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
3174 vec
->src
[j
].swizzle
[0] = i
;
3177 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3178 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
3181 dest
->transposed
= src
;
3187 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
3189 return nir_channel(&b
->nb
, src
, index
);
3193 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
3196 nir_alu_instr
*vec
= create_vec(b
, src
->num_components
,
3199 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
3201 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
3203 vec
->src
[i
].src
= nir_src_for_ssa(src
);
3204 vec
->src
[i
].swizzle
[0] = i
;
3208 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3210 return &vec
->dest
.dest
.ssa
;
3213 static nir_ssa_def
*
3214 nir_ieq_imm(nir_builder
*b
, nir_ssa_def
*x
, uint64_t i
)
3216 return nir_ieq(b
, x
, nir_imm_intN_t(b
, i
, x
->bit_size
));
3220 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
3223 return nir_vector_extract(&b
->nb
, src
, nir_i2i(&b
->nb
, index
, 32));
3227 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
3228 nir_ssa_def
*insert
, nir_ssa_def
*index
)
3230 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
3231 for (unsigned i
= 1; i
< src
->num_components
; i
++)
3232 dest
= nir_bcsel(&b
->nb
, nir_ieq_imm(&b
->nb
, index
, i
),
3233 vtn_vector_insert(b
, src
, insert
, i
), dest
);
3238 static nir_ssa_def
*
3239 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
3240 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
3241 const uint32_t *indices
)
3243 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
3245 for (unsigned i
= 0; i
< num_components
; i
++) {
3246 uint32_t index
= indices
[i
];
3247 if (index
== 0xffffffff) {
3249 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
3250 } else if (index
< src0
->num_components
) {
3251 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
3252 vec
->src
[i
].swizzle
[0] = index
;
3254 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
3255 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
3259 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3261 return &vec
->dest
.dest
.ssa
;
3265 * Concatentates a number of vectors/scalars together to produce a vector
3267 static nir_ssa_def
*
3268 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
3269 unsigned num_srcs
, nir_ssa_def
**srcs
)
3271 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
3273 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3275 * "When constructing a vector, there must be at least two Constituent
3278 vtn_assert(num_srcs
>= 2);
3280 unsigned dest_idx
= 0;
3281 for (unsigned i
= 0; i
< num_srcs
; i
++) {
3282 nir_ssa_def
*src
= srcs
[i
];
3283 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
3284 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
3285 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
3286 vec
->src
[dest_idx
].swizzle
[0] = j
;
3291 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3293 * "When constructing a vector, the total number of components in all
3294 * the operands must equal the number of components in Result Type."
3296 vtn_assert(dest_idx
== num_components
);
3298 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3300 return &vec
->dest
.dest
.ssa
;
3303 static struct vtn_ssa_value
*
3304 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
3306 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
3307 dest
->type
= src
->type
;
3309 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3310 dest
->def
= src
->def
;
3312 unsigned elems
= glsl_get_length(src
->type
);
3314 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
3315 for (unsigned i
= 0; i
< elems
; i
++)
3316 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
3322 static struct vtn_ssa_value
*
3323 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3324 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
3325 unsigned num_indices
)
3327 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
3329 struct vtn_ssa_value
*cur
= dest
;
3331 for (i
= 0; i
< num_indices
- 1; i
++) {
3332 cur
= cur
->elems
[indices
[i
]];
3335 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3336 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3337 * the component granularity. In that case, the last index will be
3338 * the index to insert the scalar into the vector.
3341 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
3343 cur
->elems
[indices
[i
]] = insert
;
3349 static struct vtn_ssa_value
*
3350 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3351 const uint32_t *indices
, unsigned num_indices
)
3353 struct vtn_ssa_value
*cur
= src
;
3354 for (unsigned i
= 0; i
< num_indices
; i
++) {
3355 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3356 vtn_assert(i
== num_indices
- 1);
3357 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3358 * the component granularity. The last index will be the index of the
3359 * vector to extract.
3362 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
3363 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
3364 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
3367 cur
= cur
->elems
[indices
[i
]];
3375 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
3376 const uint32_t *w
, unsigned count
)
3378 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3379 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
3382 case SpvOpVectorExtractDynamic
:
3383 ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
3384 vtn_ssa_value(b
, w
[4])->def
);
3387 case SpvOpVectorInsertDynamic
:
3388 ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
3389 vtn_ssa_value(b
, w
[4])->def
,
3390 vtn_ssa_value(b
, w
[5])->def
);
3393 case SpvOpVectorShuffle
:
3394 ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
->type
),
3395 vtn_ssa_value(b
, w
[3])->def
,
3396 vtn_ssa_value(b
, w
[4])->def
,
3400 case SpvOpCompositeConstruct
: {
3401 unsigned elems
= count
- 3;
3403 if (glsl_type_is_vector_or_scalar(type
->type
)) {
3404 nir_ssa_def
*srcs
[NIR_MAX_VEC_COMPONENTS
];
3405 for (unsigned i
= 0; i
< elems
; i
++)
3406 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
3408 vtn_vector_construct(b
, glsl_get_vector_elements(type
->type
),
3411 ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
3412 for (unsigned i
= 0; i
< elems
; i
++)
3413 ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
3417 case SpvOpCompositeExtract
:
3418 ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
3422 case SpvOpCompositeInsert
:
3423 ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
3424 vtn_ssa_value(b
, w
[3]),
3428 case SpvOpCopyLogical
:
3429 case SpvOpCopyObject
:
3430 ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
3434 vtn_fail_with_opcode("unknown composite operation", opcode
);
3437 vtn_push_ssa(b
, w
[2], type
, ssa
);
3441 vtn_emit_barrier(struct vtn_builder
*b
, nir_intrinsic_op op
)
3443 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3444 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3448 vtn_emit_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
3449 SpvMemorySemanticsMask semantics
)
3451 if (b
->options
->use_scoped_memory_barrier
) {
3452 vtn_emit_scoped_memory_barrier(b
, scope
, semantics
);
3456 static const SpvMemorySemanticsMask all_memory_semantics
=
3457 SpvMemorySemanticsUniformMemoryMask
|
3458 SpvMemorySemanticsWorkgroupMemoryMask
|
3459 SpvMemorySemanticsAtomicCounterMemoryMask
|
3460 SpvMemorySemanticsImageMemoryMask
;
3462 /* If we're not actually doing a memory barrier, bail */
3463 if (!(semantics
& all_memory_semantics
))
3466 /* GL and Vulkan don't have these */
3467 vtn_assert(scope
!= SpvScopeCrossDevice
);
3469 if (scope
== SpvScopeSubgroup
)
3470 return; /* Nothing to do here */
3472 if (scope
== SpvScopeWorkgroup
) {
3473 vtn_emit_barrier(b
, nir_intrinsic_group_memory_barrier
);
3477 /* There's only two scopes thing left */
3478 vtn_assert(scope
== SpvScopeInvocation
|| scope
== SpvScopeDevice
);
3480 if ((semantics
& all_memory_semantics
) == all_memory_semantics
) {
3481 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3485 /* Issue a bunch of more specific barriers */
3486 uint32_t bits
= semantics
;
3488 SpvMemorySemanticsMask semantic
= 1 << u_bit_scan(&bits
);
3490 case SpvMemorySemanticsUniformMemoryMask
:
3491 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_buffer
);
3493 case SpvMemorySemanticsWorkgroupMemoryMask
:
3494 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_shared
);
3496 case SpvMemorySemanticsAtomicCounterMemoryMask
:
3497 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_atomic_counter
);
3499 case SpvMemorySemanticsImageMemoryMask
:
3500 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_image
);
3509 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
3510 const uint32_t *w
, unsigned count
)
3513 case SpvOpEmitVertex
:
3514 case SpvOpEmitStreamVertex
:
3515 case SpvOpEndPrimitive
:
3516 case SpvOpEndStreamPrimitive
: {
3517 nir_intrinsic_op intrinsic_op
;
3519 case SpvOpEmitVertex
:
3520 case SpvOpEmitStreamVertex
:
3521 intrinsic_op
= nir_intrinsic_emit_vertex
;
3523 case SpvOpEndPrimitive
:
3524 case SpvOpEndStreamPrimitive
:
3525 intrinsic_op
= nir_intrinsic_end_primitive
;
3528 unreachable("Invalid opcode");
3531 nir_intrinsic_instr
*intrin
=
3532 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
3535 case SpvOpEmitStreamVertex
:
3536 case SpvOpEndStreamPrimitive
: {
3537 unsigned stream
= vtn_constant_uint(b
, w
[1]);
3538 nir_intrinsic_set_stream_id(intrin
, stream
);
3546 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3550 case SpvOpMemoryBarrier
: {
3551 SpvScope scope
= vtn_constant_uint(b
, w
[1]);
3552 SpvMemorySemanticsMask semantics
= vtn_constant_uint(b
, w
[2]);
3553 vtn_emit_memory_barrier(b
, scope
, semantics
);
3557 case SpvOpControlBarrier
: {
3558 SpvScope memory_scope
= vtn_constant_uint(b
, w
[2]);
3559 SpvMemorySemanticsMask memory_semantics
= vtn_constant_uint(b
, w
[3]);
3560 vtn_emit_memory_barrier(b
, memory_scope
, memory_semantics
);
3562 SpvScope execution_scope
= vtn_constant_uint(b
, w
[1]);
3563 if (execution_scope
== SpvScopeWorkgroup
)
3564 vtn_emit_barrier(b
, nir_intrinsic_barrier
);
3569 unreachable("unknown barrier instruction");
3574 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
3575 SpvExecutionMode mode
)
3578 case SpvExecutionModeInputPoints
:
3579 case SpvExecutionModeOutputPoints
:
3580 return 0; /* GL_POINTS */
3581 case SpvExecutionModeInputLines
:
3582 return 1; /* GL_LINES */
3583 case SpvExecutionModeInputLinesAdjacency
:
3584 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
3585 case SpvExecutionModeTriangles
:
3586 return 4; /* GL_TRIANGLES */
3587 case SpvExecutionModeInputTrianglesAdjacency
:
3588 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
3589 case SpvExecutionModeQuads
:
3590 return 7; /* GL_QUADS */
3591 case SpvExecutionModeIsolines
:
3592 return 0x8E7A; /* GL_ISOLINES */
3593 case SpvExecutionModeOutputLineStrip
:
3594 return 3; /* GL_LINE_STRIP */
3595 case SpvExecutionModeOutputTriangleStrip
:
3596 return 5; /* GL_TRIANGLE_STRIP */
3598 vtn_fail("Invalid primitive type: %s (%u)",
3599 spirv_executionmode_to_string(mode
), mode
);
3604 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
3605 SpvExecutionMode mode
)
3608 case SpvExecutionModeInputPoints
:
3610 case SpvExecutionModeInputLines
:
3612 case SpvExecutionModeInputLinesAdjacency
:
3614 case SpvExecutionModeTriangles
:
3616 case SpvExecutionModeInputTrianglesAdjacency
:
3619 vtn_fail("Invalid GS input mode: %s (%u)",
3620 spirv_executionmode_to_string(mode
), mode
);
3624 static gl_shader_stage
3625 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
3628 case SpvExecutionModelVertex
:
3629 return MESA_SHADER_VERTEX
;
3630 case SpvExecutionModelTessellationControl
:
3631 return MESA_SHADER_TESS_CTRL
;
3632 case SpvExecutionModelTessellationEvaluation
:
3633 return MESA_SHADER_TESS_EVAL
;
3634 case SpvExecutionModelGeometry
:
3635 return MESA_SHADER_GEOMETRY
;
3636 case SpvExecutionModelFragment
:
3637 return MESA_SHADER_FRAGMENT
;
3638 case SpvExecutionModelGLCompute
:
3639 return MESA_SHADER_COMPUTE
;
3640 case SpvExecutionModelKernel
:
3641 return MESA_SHADER_KERNEL
;
3643 vtn_fail("Unsupported execution model: %s (%u)",
3644 spirv_executionmodel_to_string(model
), model
);
3648 #define spv_check_supported(name, cap) do { \
3649 if (!(b->options && b->options->caps.name)) \
3650 vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
3651 spirv_capability_to_string(cap), cap); \
3656 vtn_handle_entry_point(struct vtn_builder
*b
, const uint32_t *w
,
3659 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
3660 /* Let this be a name label regardless */
3661 unsigned name_words
;
3662 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
3664 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
3665 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
3668 vtn_assert(b
->entry_point
== NULL
);
3669 b
->entry_point
= entry_point
;
3673 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3674 const uint32_t *w
, unsigned count
)
3681 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
3682 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
3683 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
3684 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
3685 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
3686 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
3689 uint32_t version
= w
[2];
3692 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
3694 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
3698 case SpvOpSourceExtension
:
3699 case SpvOpSourceContinued
:
3700 case SpvOpExtension
:
3701 case SpvOpModuleProcessed
:
3702 /* Unhandled, but these are for debug so that's ok. */
3705 case SpvOpCapability
: {
3706 SpvCapability cap
= w
[1];
3708 case SpvCapabilityMatrix
:
3709 case SpvCapabilityShader
:
3710 case SpvCapabilityGeometry
:
3711 case SpvCapabilityGeometryPointSize
:
3712 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
3713 case SpvCapabilitySampledImageArrayDynamicIndexing
:
3714 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
3715 case SpvCapabilityStorageImageArrayDynamicIndexing
:
3716 case SpvCapabilityImageRect
:
3717 case SpvCapabilitySampledRect
:
3718 case SpvCapabilitySampled1D
:
3719 case SpvCapabilityImage1D
:
3720 case SpvCapabilitySampledCubeArray
:
3721 case SpvCapabilityImageCubeArray
:
3722 case SpvCapabilitySampledBuffer
:
3723 case SpvCapabilityImageBuffer
:
3724 case SpvCapabilityImageQuery
:
3725 case SpvCapabilityDerivativeControl
:
3726 case SpvCapabilityInterpolationFunction
:
3727 case SpvCapabilityMultiViewport
:
3728 case SpvCapabilitySampleRateShading
:
3729 case SpvCapabilityClipDistance
:
3730 case SpvCapabilityCullDistance
:
3731 case SpvCapabilityInputAttachment
:
3732 case SpvCapabilityImageGatherExtended
:
3733 case SpvCapabilityStorageImageExtendedFormats
:
3736 case SpvCapabilityLinkage
:
3737 case SpvCapabilityVector16
:
3738 case SpvCapabilityFloat16Buffer
:
3739 case SpvCapabilitySparseResidency
:
3740 vtn_warn("Unsupported SPIR-V capability: %s",
3741 spirv_capability_to_string(cap
));
3744 case SpvCapabilityMinLod
:
3745 spv_check_supported(min_lod
, cap
);
3748 case SpvCapabilityAtomicStorage
:
3749 spv_check_supported(atomic_storage
, cap
);
3752 case SpvCapabilityFloat64
:
3753 spv_check_supported(float64
, cap
);
3755 case SpvCapabilityInt64
:
3756 spv_check_supported(int64
, cap
);
3758 case SpvCapabilityInt16
:
3759 spv_check_supported(int16
, cap
);
3761 case SpvCapabilityInt8
:
3762 spv_check_supported(int8
, cap
);
3765 case SpvCapabilityTransformFeedback
:
3766 spv_check_supported(transform_feedback
, cap
);
3769 case SpvCapabilityGeometryStreams
:
3770 spv_check_supported(geometry_streams
, cap
);
3773 case SpvCapabilityInt64Atomics
:
3774 spv_check_supported(int64_atomics
, cap
);
3777 case SpvCapabilityStorageImageMultisample
:
3778 spv_check_supported(storage_image_ms
, cap
);
3781 case SpvCapabilityAddresses
:
3782 spv_check_supported(address
, cap
);
3785 case SpvCapabilityKernel
:
3786 spv_check_supported(kernel
, cap
);
3789 case SpvCapabilityImageBasic
:
3790 case SpvCapabilityImageReadWrite
:
3791 case SpvCapabilityImageMipmap
:
3792 case SpvCapabilityPipes
:
3793 case SpvCapabilityDeviceEnqueue
:
3794 case SpvCapabilityLiteralSampler
:
3795 case SpvCapabilityGenericPointer
:
3796 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3797 spirv_capability_to_string(cap
));
3800 case SpvCapabilityImageMSArray
:
3801 spv_check_supported(image_ms_array
, cap
);
3804 case SpvCapabilityTessellation
:
3805 case SpvCapabilityTessellationPointSize
:
3806 spv_check_supported(tessellation
, cap
);
3809 case SpvCapabilityDrawParameters
:
3810 spv_check_supported(draw_parameters
, cap
);
3813 case SpvCapabilityStorageImageReadWithoutFormat
:
3814 spv_check_supported(image_read_without_format
, cap
);
3817 case SpvCapabilityStorageImageWriteWithoutFormat
:
3818 spv_check_supported(image_write_without_format
, cap
);
3821 case SpvCapabilityDeviceGroup
:
3822 spv_check_supported(device_group
, cap
);
3825 case SpvCapabilityMultiView
:
3826 spv_check_supported(multiview
, cap
);
3829 case SpvCapabilityGroupNonUniform
:
3830 spv_check_supported(subgroup_basic
, cap
);
3833 case SpvCapabilitySubgroupVoteKHR
:
3834 case SpvCapabilityGroupNonUniformVote
:
3835 spv_check_supported(subgroup_vote
, cap
);
3838 case SpvCapabilitySubgroupBallotKHR
:
3839 case SpvCapabilityGroupNonUniformBallot
:
3840 spv_check_supported(subgroup_ballot
, cap
);
3843 case SpvCapabilityGroupNonUniformShuffle
:
3844 case SpvCapabilityGroupNonUniformShuffleRelative
:
3845 spv_check_supported(subgroup_shuffle
, cap
);
3848 case SpvCapabilityGroupNonUniformQuad
:
3849 spv_check_supported(subgroup_quad
, cap
);
3852 case SpvCapabilityGroupNonUniformArithmetic
:
3853 case SpvCapabilityGroupNonUniformClustered
:
3854 spv_check_supported(subgroup_arithmetic
, cap
);
3857 case SpvCapabilityGroups
:
3858 spv_check_supported(amd_shader_ballot
, cap
);
3861 case SpvCapabilityVariablePointersStorageBuffer
:
3862 case SpvCapabilityVariablePointers
:
3863 spv_check_supported(variable_pointers
, cap
);
3864 b
->variable_pointers
= true;
3867 case SpvCapabilityStorageUniformBufferBlock16
:
3868 case SpvCapabilityStorageUniform16
:
3869 case SpvCapabilityStoragePushConstant16
:
3870 case SpvCapabilityStorageInputOutput16
:
3871 spv_check_supported(storage_16bit
, cap
);
3874 case SpvCapabilityShaderLayer
:
3875 case SpvCapabilityShaderViewportIndex
:
3876 case SpvCapabilityShaderViewportIndexLayerEXT
:
3877 spv_check_supported(shader_viewport_index_layer
, cap
);
3880 case SpvCapabilityStorageBuffer8BitAccess
:
3881 case SpvCapabilityUniformAndStorageBuffer8BitAccess
:
3882 case SpvCapabilityStoragePushConstant8
:
3883 spv_check_supported(storage_8bit
, cap
);
3886 case SpvCapabilityShaderNonUniformEXT
:
3887 spv_check_supported(descriptor_indexing
, cap
);
3890 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT
:
3891 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT
:
3892 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT
:
3893 spv_check_supported(descriptor_array_dynamic_indexing
, cap
);
3896 case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT
:
3897 case SpvCapabilitySampledImageArrayNonUniformIndexingEXT
:
3898 case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT
:
3899 case SpvCapabilityStorageImageArrayNonUniformIndexingEXT
:
3900 case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT
:
3901 case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT
:
3902 case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT
:
3903 spv_check_supported(descriptor_array_non_uniform_indexing
, cap
);
3906 case SpvCapabilityRuntimeDescriptorArrayEXT
:
3907 spv_check_supported(runtime_descriptor_array
, cap
);
3910 case SpvCapabilityStencilExportEXT
:
3911 spv_check_supported(stencil_export
, cap
);
3914 case SpvCapabilitySampleMaskPostDepthCoverage
:
3915 spv_check_supported(post_depth_coverage
, cap
);
3918 case SpvCapabilityDenormFlushToZero
:
3919 case SpvCapabilityDenormPreserve
:
3920 case SpvCapabilitySignedZeroInfNanPreserve
:
3921 case SpvCapabilityRoundingModeRTE
:
3922 case SpvCapabilityRoundingModeRTZ
:
3923 spv_check_supported(float_controls
, cap
);
3926 case SpvCapabilityPhysicalStorageBufferAddressesEXT
:
3927 spv_check_supported(physical_storage_buffer_address
, cap
);
3930 case SpvCapabilityComputeDerivativeGroupQuadsNV
:
3931 case SpvCapabilityComputeDerivativeGroupLinearNV
:
3932 spv_check_supported(derivative_group
, cap
);
3935 case SpvCapabilityFloat16
:
3936 spv_check_supported(float16
, cap
);
3939 case SpvCapabilityFragmentShaderSampleInterlockEXT
:
3940 spv_check_supported(fragment_shader_sample_interlock
, cap
);
3943 case SpvCapabilityFragmentShaderPixelInterlockEXT
:
3944 spv_check_supported(fragment_shader_pixel_interlock
, cap
);
3947 case SpvCapabilityDemoteToHelperInvocationEXT
:
3948 spv_check_supported(demote_to_helper_invocation
, cap
);
3951 case SpvCapabilityShaderClockKHR
:
3952 spv_check_supported(shader_clock
, cap
);
3955 case SpvCapabilityVulkanMemoryModel
:
3956 spv_check_supported(vk_memory_model
, cap
);
3959 case SpvCapabilityVulkanMemoryModelDeviceScope
:
3960 spv_check_supported(vk_memory_model_device_scope
, cap
);
3964 vtn_fail("Unhandled capability: %s (%u)",
3965 spirv_capability_to_string(cap
), cap
);
3970 case SpvOpExtInstImport
:
3971 vtn_handle_extension(b
, opcode
, w
, count
);
3974 case SpvOpMemoryModel
:
3976 case SpvAddressingModelPhysical32
:
3977 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
3978 "AddressingModelPhysical32 only supported for kernels");
3979 b
->shader
->info
.cs
.ptr_size
= 32;
3980 b
->physical_ptrs
= true;
3981 b
->options
->shared_addr_format
= nir_address_format_32bit_global
;
3982 b
->options
->global_addr_format
= nir_address_format_32bit_global
;
3983 b
->options
->temp_addr_format
= nir_address_format_32bit_global
;
3985 case SpvAddressingModelPhysical64
:
3986 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
3987 "AddressingModelPhysical64 only supported for kernels");
3988 b
->shader
->info
.cs
.ptr_size
= 64;
3989 b
->physical_ptrs
= true;
3990 b
->options
->shared_addr_format
= nir_address_format_64bit_global
;
3991 b
->options
->global_addr_format
= nir_address_format_64bit_global
;
3992 b
->options
->temp_addr_format
= nir_address_format_64bit_global
;
3994 case SpvAddressingModelLogical
:
3995 vtn_fail_if(b
->shader
->info
.stage
>= MESA_SHADER_STAGES
,
3996 "AddressingModelLogical only supported for shaders");
3997 b
->shader
->info
.cs
.ptr_size
= 0;
3998 b
->physical_ptrs
= false;
4000 case SpvAddressingModelPhysicalStorageBuffer64EXT
:
4001 vtn_fail_if(!b
->options
||
4002 !b
->options
->caps
.physical_storage_buffer_address
,
4003 "AddressingModelPhysicalStorageBuffer64EXT not supported");
4006 vtn_fail("Unknown addressing model: %s (%u)",
4007 spirv_addressingmodel_to_string(w
[1]), w
[1]);
4012 case SpvMemoryModelSimple
:
4013 case SpvMemoryModelGLSL450
:
4014 case SpvMemoryModelOpenCL
:
4016 case SpvMemoryModelVulkan
:
4017 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
4018 "Vulkan memory model is unsupported by this driver");
4021 vtn_fail("Unsupported memory model: %s",
4022 spirv_memorymodel_to_string(w
[2]));
4027 case SpvOpEntryPoint
:
4028 vtn_handle_entry_point(b
, w
, count
);
4032 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
4033 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4037 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4040 case SpvOpMemberName
:
4044 case SpvOpExecutionMode
:
4045 case SpvOpExecutionModeId
:
4046 case SpvOpDecorationGroup
:
4048 case SpvOpDecorateId
:
4049 case SpvOpMemberDecorate
:
4050 case SpvOpGroupDecorate
:
4051 case SpvOpGroupMemberDecorate
:
4052 case SpvOpDecorateString
:
4053 case SpvOpMemberDecorateString
:
4054 vtn_handle_decoration(b
, opcode
, w
, count
);
4058 return false; /* End of preamble */
4065 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4066 const struct vtn_decoration
*mode
, void *data
)
4068 vtn_assert(b
->entry_point
== entry_point
);
4070 switch(mode
->exec_mode
) {
4071 case SpvExecutionModeOriginUpperLeft
:
4072 case SpvExecutionModeOriginLowerLeft
:
4073 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4074 b
->shader
->info
.fs
.origin_upper_left
=
4075 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
4078 case SpvExecutionModeEarlyFragmentTests
:
4079 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4080 b
->shader
->info
.fs
.early_fragment_tests
= true;
4083 case SpvExecutionModePostDepthCoverage
:
4084 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4085 b
->shader
->info
.fs
.post_depth_coverage
= true;
4088 case SpvExecutionModeInvocations
:
4089 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4090 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->operands
[0]);
4093 case SpvExecutionModeDepthReplacing
:
4094 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4095 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
4097 case SpvExecutionModeDepthGreater
:
4098 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4099 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
4101 case SpvExecutionModeDepthLess
:
4102 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4103 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
4105 case SpvExecutionModeDepthUnchanged
:
4106 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4107 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
4110 case SpvExecutionModeLocalSize
:
4111 vtn_assert(gl_shader_stage_is_compute(b
->shader
->info
.stage
));
4112 b
->shader
->info
.cs
.local_size
[0] = mode
->operands
[0];
4113 b
->shader
->info
.cs
.local_size
[1] = mode
->operands
[1];
4114 b
->shader
->info
.cs
.local_size
[2] = mode
->operands
[2];
4117 case SpvExecutionModeLocalSizeId
:
4118 b
->shader
->info
.cs
.local_size
[0] = vtn_constant_uint(b
, mode
->operands
[0]);
4119 b
->shader
->info
.cs
.local_size
[1] = vtn_constant_uint(b
, mode
->operands
[1]);
4120 b
->shader
->info
.cs
.local_size
[2] = vtn_constant_uint(b
, mode
->operands
[2]);
4123 case SpvExecutionModeLocalSizeHint
:
4124 case SpvExecutionModeLocalSizeHintId
:
4125 break; /* Nothing to do with this */
4127 case SpvExecutionModeOutputVertices
:
4128 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4129 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4130 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->operands
[0];
4132 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4133 b
->shader
->info
.gs
.vertices_out
= mode
->operands
[0];
4137 case SpvExecutionModeInputPoints
:
4138 case SpvExecutionModeInputLines
:
4139 case SpvExecutionModeInputLinesAdjacency
:
4140 case SpvExecutionModeTriangles
:
4141 case SpvExecutionModeInputTrianglesAdjacency
:
4142 case SpvExecutionModeQuads
:
4143 case SpvExecutionModeIsolines
:
4144 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4145 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4146 b
->shader
->info
.tess
.primitive_mode
=
4147 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4149 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4150 b
->shader
->info
.gs
.vertices_in
=
4151 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
4152 b
->shader
->info
.gs
.input_primitive
=
4153 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4157 case SpvExecutionModeOutputPoints
:
4158 case SpvExecutionModeOutputLineStrip
:
4159 case SpvExecutionModeOutputTriangleStrip
:
4160 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4161 b
->shader
->info
.gs
.output_primitive
=
4162 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4165 case SpvExecutionModeSpacingEqual
:
4166 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4167 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4168 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
4170 case SpvExecutionModeSpacingFractionalEven
:
4171 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4172 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4173 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
4175 case SpvExecutionModeSpacingFractionalOdd
:
4176 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4177 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4178 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
4180 case SpvExecutionModeVertexOrderCw
:
4181 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4182 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4183 b
->shader
->info
.tess
.ccw
= false;
4185 case SpvExecutionModeVertexOrderCcw
:
4186 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4187 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4188 b
->shader
->info
.tess
.ccw
= true;
4190 case SpvExecutionModePointMode
:
4191 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4192 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4193 b
->shader
->info
.tess
.point_mode
= true;
4196 case SpvExecutionModePixelCenterInteger
:
4197 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4198 b
->shader
->info
.fs
.pixel_center_integer
= true;
4201 case SpvExecutionModeXfb
:
4202 b
->shader
->info
.has_transform_feedback_varyings
= true;
4205 case SpvExecutionModeVecTypeHint
:
4208 case SpvExecutionModeContractionOff
:
4209 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
4210 vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4211 spirv_executionmode_to_string(mode
->exec_mode
));
4216 case SpvExecutionModeStencilRefReplacingEXT
:
4217 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4220 case SpvExecutionModeDerivativeGroupQuadsNV
:
4221 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4222 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_QUADS
;
4225 case SpvExecutionModeDerivativeGroupLinearNV
:
4226 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4227 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_LINEAR
;
4230 case SpvExecutionModePixelInterlockOrderedEXT
:
4231 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4232 b
->shader
->info
.fs
.pixel_interlock_ordered
= true;
4235 case SpvExecutionModePixelInterlockUnorderedEXT
:
4236 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4237 b
->shader
->info
.fs
.pixel_interlock_unordered
= true;
4240 case SpvExecutionModeSampleInterlockOrderedEXT
:
4241 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4242 b
->shader
->info
.fs
.sample_interlock_ordered
= true;
4245 case SpvExecutionModeSampleInterlockUnorderedEXT
:
4246 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4247 b
->shader
->info
.fs
.sample_interlock_unordered
= true;
4250 case SpvExecutionModeDenormPreserve
:
4251 case SpvExecutionModeDenormFlushToZero
:
4252 case SpvExecutionModeSignedZeroInfNanPreserve
:
4253 case SpvExecutionModeRoundingModeRTE
:
4254 case SpvExecutionModeRoundingModeRTZ
:
4255 /* Already handled in vtn_handle_rounding_mode_in_execution_mode() */
4259 vtn_fail("Unhandled execution mode: %s (%u)",
4260 spirv_executionmode_to_string(mode
->exec_mode
),
4266 vtn_handle_rounding_mode_in_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4267 const struct vtn_decoration
*mode
, void *data
)
4269 vtn_assert(b
->entry_point
== entry_point
);
4271 unsigned execution_mode
= 0;
4273 switch(mode
->exec_mode
) {
4274 case SpvExecutionModeDenormPreserve
:
4275 switch (mode
->operands
[0]) {
4276 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP16
; break;
4277 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP32
; break;
4278 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP64
; break;
4279 default: vtn_fail("Floating point type not supported");
4282 case SpvExecutionModeDenormFlushToZero
:
4283 switch (mode
->operands
[0]) {
4284 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16
; break;
4285 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32
; break;
4286 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64
; break;
4287 default: vtn_fail("Floating point type not supported");
4290 case SpvExecutionModeSignedZeroInfNanPreserve
:
4291 switch (mode
->operands
[0]) {
4292 case 16: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16
; break;
4293 case 32: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32
; break;
4294 case 64: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64
; break;
4295 default: vtn_fail("Floating point type not supported");
4298 case SpvExecutionModeRoundingModeRTE
:
4299 switch (mode
->operands
[0]) {
4300 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16
; break;
4301 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32
; break;
4302 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64
; break;
4303 default: vtn_fail("Floating point type not supported");
4306 case SpvExecutionModeRoundingModeRTZ
:
4307 switch (mode
->operands
[0]) {
4308 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16
; break;
4309 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32
; break;
4310 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64
; break;
4311 default: vtn_fail("Floating point type not supported");
4319 b
->shader
->info
.float_controls_execution_mode
|= execution_mode
;
4323 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4324 const uint32_t *w
, unsigned count
)
4326 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
4330 case SpvOpSourceContinued
:
4331 case SpvOpSourceExtension
:
4332 case SpvOpExtension
:
4333 case SpvOpCapability
:
4334 case SpvOpExtInstImport
:
4335 case SpvOpMemoryModel
:
4336 case SpvOpEntryPoint
:
4337 case SpvOpExecutionMode
:
4340 case SpvOpMemberName
:
4341 case SpvOpDecorationGroup
:
4343 case SpvOpDecorateId
:
4344 case SpvOpMemberDecorate
:
4345 case SpvOpGroupDecorate
:
4346 case SpvOpGroupMemberDecorate
:
4347 case SpvOpDecorateString
:
4348 case SpvOpMemberDecorateString
:
4349 vtn_fail("Invalid opcode types and variables section");
4355 case SpvOpTypeFloat
:
4356 case SpvOpTypeVector
:
4357 case SpvOpTypeMatrix
:
4358 case SpvOpTypeImage
:
4359 case SpvOpTypeSampler
:
4360 case SpvOpTypeSampledImage
:
4361 case SpvOpTypeArray
:
4362 case SpvOpTypeRuntimeArray
:
4363 case SpvOpTypeStruct
:
4364 case SpvOpTypeOpaque
:
4365 case SpvOpTypePointer
:
4366 case SpvOpTypeForwardPointer
:
4367 case SpvOpTypeFunction
:
4368 case SpvOpTypeEvent
:
4369 case SpvOpTypeDeviceEvent
:
4370 case SpvOpTypeReserveId
:
4371 case SpvOpTypeQueue
:
4373 vtn_handle_type(b
, opcode
, w
, count
);
4376 case SpvOpConstantTrue
:
4377 case SpvOpConstantFalse
:
4379 case SpvOpConstantComposite
:
4380 case SpvOpConstantSampler
:
4381 case SpvOpConstantNull
:
4382 case SpvOpSpecConstantTrue
:
4383 case SpvOpSpecConstantFalse
:
4384 case SpvOpSpecConstant
:
4385 case SpvOpSpecConstantComposite
:
4386 case SpvOpSpecConstantOp
:
4387 vtn_handle_constant(b
, opcode
, w
, count
);
4392 vtn_handle_variables(b
, opcode
, w
, count
);
4396 return false; /* End of preamble */
4402 static struct vtn_ssa_value
*
4403 vtn_nir_select(struct vtn_builder
*b
, struct vtn_ssa_value
*src0
,
4404 struct vtn_ssa_value
*src1
, struct vtn_ssa_value
*src2
)
4406 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
4407 dest
->type
= src1
->type
;
4409 if (glsl_type_is_vector_or_scalar(src1
->type
)) {
4410 dest
->def
= nir_bcsel(&b
->nb
, src0
->def
, src1
->def
, src2
->def
);
4412 unsigned elems
= glsl_get_length(src1
->type
);
4414 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
4415 for (unsigned i
= 0; i
< elems
; i
++) {
4416 dest
->elems
[i
] = vtn_nir_select(b
, src0
,
4417 src1
->elems
[i
], src2
->elems
[i
]);
4425 vtn_handle_select(struct vtn_builder
*b
, SpvOp opcode
,
4426 const uint32_t *w
, unsigned count
)
4428 /* Handle OpSelect up-front here because it needs to be able to handle
4429 * pointers and not just regular vectors and scalars.
4431 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
4432 struct vtn_value
*cond_val
= vtn_untyped_value(b
, w
[3]);
4433 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
4434 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
4436 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
4437 obj2_val
->type
!= res_val
->type
,
4438 "Object types must match the result type in OpSelect");
4440 vtn_fail_if((cond_val
->type
->base_type
!= vtn_base_type_scalar
&&
4441 cond_val
->type
->base_type
!= vtn_base_type_vector
) ||
4442 !glsl_type_is_boolean(cond_val
->type
->type
),
4443 "OpSelect must have either a vector of booleans or "
4444 "a boolean as Condition type");
4446 vtn_fail_if(cond_val
->type
->base_type
== vtn_base_type_vector
&&
4447 (res_val
->type
->base_type
!= vtn_base_type_vector
||
4448 res_val
->type
->length
!= cond_val
->type
->length
),
4449 "When Condition type in OpSelect is a vector, the Result "
4450 "type must be a vector of the same length");
4452 switch (res_val
->type
->base_type
) {
4453 case vtn_base_type_scalar
:
4454 case vtn_base_type_vector
:
4455 case vtn_base_type_matrix
:
4456 case vtn_base_type_array
:
4457 case vtn_base_type_struct
:
4460 case vtn_base_type_pointer
:
4461 /* We need to have actual storage for pointer types. */
4462 vtn_fail_if(res_val
->type
->type
== NULL
,
4463 "Invalid pointer result type for OpSelect");
4466 vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
4469 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4470 struct vtn_ssa_value
*ssa
= vtn_nir_select(b
,
4471 vtn_ssa_value(b
, w
[3]), vtn_ssa_value(b
, w
[4]), vtn_ssa_value(b
, w
[5]));
4473 vtn_push_ssa(b
, w
[2], res_type
, ssa
);
4477 vtn_handle_ptr(struct vtn_builder
*b
, SpvOp opcode
,
4478 const uint32_t *w
, unsigned count
)
4480 struct vtn_type
*type1
= vtn_untyped_value(b
, w
[3])->type
;
4481 struct vtn_type
*type2
= vtn_untyped_value(b
, w
[4])->type
;
4482 vtn_fail_if(type1
->base_type
!= vtn_base_type_pointer
||
4483 type2
->base_type
!= vtn_base_type_pointer
,
4484 "%s operands must have pointer types",
4485 spirv_op_to_string(opcode
));
4486 vtn_fail_if(type1
->storage_class
!= type2
->storage_class
,
4487 "%s operands must have the same storage class",
4488 spirv_op_to_string(opcode
));
4490 struct vtn_type
*vtn_type
=
4491 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4492 const struct glsl_type
*type
= vtn_type
->type
;
4494 nir_address_format addr_format
= vtn_mode_to_address_format(
4495 b
, vtn_storage_class_to_mode(b
, type1
->storage_class
, NULL
, NULL
));
4500 case SpvOpPtrDiff
: {
4501 /* OpPtrDiff returns the difference in number of elements (not byte offset). */
4502 unsigned elem_size
, elem_align
;
4503 glsl_get_natural_size_align_bytes(type1
->deref
->type
,
4504 &elem_size
, &elem_align
);
4506 def
= nir_build_addr_isub(&b
->nb
,
4507 vtn_ssa_value(b
, w
[3])->def
,
4508 vtn_ssa_value(b
, w
[4])->def
,
4510 def
= nir_idiv(&b
->nb
, def
, nir_imm_intN_t(&b
->nb
, elem_size
, def
->bit_size
));
4511 def
= nir_i2i(&b
->nb
, def
, glsl_get_bit_size(type
));
4516 case SpvOpPtrNotEqual
: {
4517 def
= nir_build_addr_ieq(&b
->nb
,
4518 vtn_ssa_value(b
, w
[3])->def
,
4519 vtn_ssa_value(b
, w
[4])->def
,
4521 if (opcode
== SpvOpPtrNotEqual
)
4522 def
= nir_inot(&b
->nb
, def
);
4527 unreachable("Invalid ptr operation");
4530 struct vtn_ssa_value
*ssa_value
= vtn_create_ssa_value(b
, type
);
4531 ssa_value
->def
= def
;
4532 vtn_push_ssa(b
, w
[2], vtn_type
, ssa_value
);
4536 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4537 const uint32_t *w
, unsigned count
)
4543 case SpvOpLoopMerge
:
4544 case SpvOpSelectionMerge
:
4545 /* This is handled by cfg pre-pass and walk_blocks */
4549 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
4550 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4555 vtn_handle_extension(b
, opcode
, w
, count
);
4561 case SpvOpCopyMemory
:
4562 case SpvOpCopyMemorySized
:
4563 case SpvOpAccessChain
:
4564 case SpvOpPtrAccessChain
:
4565 case SpvOpInBoundsAccessChain
:
4566 case SpvOpInBoundsPtrAccessChain
:
4567 case SpvOpArrayLength
:
4568 case SpvOpConvertPtrToU
:
4569 case SpvOpConvertUToPtr
:
4570 vtn_handle_variables(b
, opcode
, w
, count
);
4573 case SpvOpFunctionCall
:
4574 vtn_handle_function_call(b
, opcode
, w
, count
);
4577 case SpvOpSampledImage
:
4579 case SpvOpImageSampleImplicitLod
:
4580 case SpvOpImageSampleExplicitLod
:
4581 case SpvOpImageSampleDrefImplicitLod
:
4582 case SpvOpImageSampleDrefExplicitLod
:
4583 case SpvOpImageSampleProjImplicitLod
:
4584 case SpvOpImageSampleProjExplicitLod
:
4585 case SpvOpImageSampleProjDrefImplicitLod
:
4586 case SpvOpImageSampleProjDrefExplicitLod
:
4587 case SpvOpImageFetch
:
4588 case SpvOpImageGather
:
4589 case SpvOpImageDrefGather
:
4590 case SpvOpImageQuerySizeLod
:
4591 case SpvOpImageQueryLod
:
4592 case SpvOpImageQueryLevels
:
4593 case SpvOpImageQuerySamples
:
4594 vtn_handle_texture(b
, opcode
, w
, count
);
4597 case SpvOpImageRead
:
4598 case SpvOpImageWrite
:
4599 case SpvOpImageTexelPointer
:
4600 vtn_handle_image(b
, opcode
, w
, count
);
4603 case SpvOpImageQuerySize
: {
4604 struct vtn_pointer
*image
=
4605 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
4606 if (glsl_type_is_image(image
->type
->type
)) {
4607 vtn_handle_image(b
, opcode
, w
, count
);
4609 vtn_assert(glsl_type_is_sampler(image
->type
->type
));
4610 vtn_handle_texture(b
, opcode
, w
, count
);
4615 case SpvOpAtomicLoad
:
4616 case SpvOpAtomicExchange
:
4617 case SpvOpAtomicCompareExchange
:
4618 case SpvOpAtomicCompareExchangeWeak
:
4619 case SpvOpAtomicIIncrement
:
4620 case SpvOpAtomicIDecrement
:
4621 case SpvOpAtomicIAdd
:
4622 case SpvOpAtomicISub
:
4623 case SpvOpAtomicSMin
:
4624 case SpvOpAtomicUMin
:
4625 case SpvOpAtomicSMax
:
4626 case SpvOpAtomicUMax
:
4627 case SpvOpAtomicAnd
:
4629 case SpvOpAtomicXor
: {
4630 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
4631 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4632 vtn_handle_image(b
, opcode
, w
, count
);
4634 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4635 vtn_handle_atomics(b
, opcode
, w
, count
);
4640 case SpvOpAtomicStore
: {
4641 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
4642 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4643 vtn_handle_image(b
, opcode
, w
, count
);
4645 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4646 vtn_handle_atomics(b
, opcode
, w
, count
);
4652 vtn_handle_select(b
, opcode
, w
, count
);
4660 case SpvOpConvertFToU
:
4661 case SpvOpConvertFToS
:
4662 case SpvOpConvertSToF
:
4663 case SpvOpConvertUToF
:
4667 case SpvOpQuantizeToF16
:
4668 case SpvOpPtrCastToGeneric
:
4669 case SpvOpGenericCastToPtr
:
4674 case SpvOpSignBitSet
:
4675 case SpvOpLessOrGreater
:
4677 case SpvOpUnordered
:
4692 case SpvOpVectorTimesScalar
:
4694 case SpvOpIAddCarry
:
4695 case SpvOpISubBorrow
:
4696 case SpvOpUMulExtended
:
4697 case SpvOpSMulExtended
:
4698 case SpvOpShiftRightLogical
:
4699 case SpvOpShiftRightArithmetic
:
4700 case SpvOpShiftLeftLogical
:
4701 case SpvOpLogicalEqual
:
4702 case SpvOpLogicalNotEqual
:
4703 case SpvOpLogicalOr
:
4704 case SpvOpLogicalAnd
:
4705 case SpvOpLogicalNot
:
4706 case SpvOpBitwiseOr
:
4707 case SpvOpBitwiseXor
:
4708 case SpvOpBitwiseAnd
:
4710 case SpvOpFOrdEqual
:
4711 case SpvOpFUnordEqual
:
4712 case SpvOpINotEqual
:
4713 case SpvOpFOrdNotEqual
:
4714 case SpvOpFUnordNotEqual
:
4715 case SpvOpULessThan
:
4716 case SpvOpSLessThan
:
4717 case SpvOpFOrdLessThan
:
4718 case SpvOpFUnordLessThan
:
4719 case SpvOpUGreaterThan
:
4720 case SpvOpSGreaterThan
:
4721 case SpvOpFOrdGreaterThan
:
4722 case SpvOpFUnordGreaterThan
:
4723 case SpvOpULessThanEqual
:
4724 case SpvOpSLessThanEqual
:
4725 case SpvOpFOrdLessThanEqual
:
4726 case SpvOpFUnordLessThanEqual
:
4727 case SpvOpUGreaterThanEqual
:
4728 case SpvOpSGreaterThanEqual
:
4729 case SpvOpFOrdGreaterThanEqual
:
4730 case SpvOpFUnordGreaterThanEqual
:
4736 case SpvOpFwidthFine
:
4737 case SpvOpDPdxCoarse
:
4738 case SpvOpDPdyCoarse
:
4739 case SpvOpFwidthCoarse
:
4740 case SpvOpBitFieldInsert
:
4741 case SpvOpBitFieldSExtract
:
4742 case SpvOpBitFieldUExtract
:
4743 case SpvOpBitReverse
:
4745 case SpvOpTranspose
:
4746 case SpvOpOuterProduct
:
4747 case SpvOpMatrixTimesScalar
:
4748 case SpvOpVectorTimesMatrix
:
4749 case SpvOpMatrixTimesVector
:
4750 case SpvOpMatrixTimesMatrix
:
4751 vtn_handle_alu(b
, opcode
, w
, count
);
4755 vtn_handle_bitcast(b
, w
, count
);
4758 case SpvOpVectorExtractDynamic
:
4759 case SpvOpVectorInsertDynamic
:
4760 case SpvOpVectorShuffle
:
4761 case SpvOpCompositeConstruct
:
4762 case SpvOpCompositeExtract
:
4763 case SpvOpCompositeInsert
:
4764 case SpvOpCopyLogical
:
4765 case SpvOpCopyObject
:
4766 vtn_handle_composite(b
, opcode
, w
, count
);
4769 case SpvOpEmitVertex
:
4770 case SpvOpEndPrimitive
:
4771 case SpvOpEmitStreamVertex
:
4772 case SpvOpEndStreamPrimitive
:
4773 case SpvOpControlBarrier
:
4774 case SpvOpMemoryBarrier
:
4775 vtn_handle_barrier(b
, opcode
, w
, count
);
4778 case SpvOpGroupNonUniformElect
:
4779 case SpvOpGroupNonUniformAll
:
4780 case SpvOpGroupNonUniformAny
:
4781 case SpvOpGroupNonUniformAllEqual
:
4782 case SpvOpGroupNonUniformBroadcast
:
4783 case SpvOpGroupNonUniformBroadcastFirst
:
4784 case SpvOpGroupNonUniformBallot
:
4785 case SpvOpGroupNonUniformInverseBallot
:
4786 case SpvOpGroupNonUniformBallotBitExtract
:
4787 case SpvOpGroupNonUniformBallotBitCount
:
4788 case SpvOpGroupNonUniformBallotFindLSB
:
4789 case SpvOpGroupNonUniformBallotFindMSB
:
4790 case SpvOpGroupNonUniformShuffle
:
4791 case SpvOpGroupNonUniformShuffleXor
:
4792 case SpvOpGroupNonUniformShuffleUp
:
4793 case SpvOpGroupNonUniformShuffleDown
:
4794 case SpvOpGroupNonUniformIAdd
:
4795 case SpvOpGroupNonUniformFAdd
:
4796 case SpvOpGroupNonUniformIMul
:
4797 case SpvOpGroupNonUniformFMul
:
4798 case SpvOpGroupNonUniformSMin
:
4799 case SpvOpGroupNonUniformUMin
:
4800 case SpvOpGroupNonUniformFMin
:
4801 case SpvOpGroupNonUniformSMax
:
4802 case SpvOpGroupNonUniformUMax
:
4803 case SpvOpGroupNonUniformFMax
:
4804 case SpvOpGroupNonUniformBitwiseAnd
:
4805 case SpvOpGroupNonUniformBitwiseOr
:
4806 case SpvOpGroupNonUniformBitwiseXor
:
4807 case SpvOpGroupNonUniformLogicalAnd
:
4808 case SpvOpGroupNonUniformLogicalOr
:
4809 case SpvOpGroupNonUniformLogicalXor
:
4810 case SpvOpGroupNonUniformQuadBroadcast
:
4811 case SpvOpGroupNonUniformQuadSwap
:
4814 case SpvOpGroupBroadcast
:
4815 case SpvOpGroupIAdd
:
4816 case SpvOpGroupFAdd
:
4817 case SpvOpGroupFMin
:
4818 case SpvOpGroupUMin
:
4819 case SpvOpGroupSMin
:
4820 case SpvOpGroupFMax
:
4821 case SpvOpGroupUMax
:
4822 case SpvOpGroupSMax
:
4823 case SpvOpSubgroupBallotKHR
:
4824 case SpvOpSubgroupFirstInvocationKHR
:
4825 case SpvOpSubgroupReadInvocationKHR
:
4826 case SpvOpSubgroupAllKHR
:
4827 case SpvOpSubgroupAnyKHR
:
4828 case SpvOpSubgroupAllEqualKHR
:
4829 case SpvOpGroupIAddNonUniformAMD
:
4830 case SpvOpGroupFAddNonUniformAMD
:
4831 case SpvOpGroupFMinNonUniformAMD
:
4832 case SpvOpGroupUMinNonUniformAMD
:
4833 case SpvOpGroupSMinNonUniformAMD
:
4834 case SpvOpGroupFMaxNonUniformAMD
:
4835 case SpvOpGroupUMaxNonUniformAMD
:
4836 case SpvOpGroupSMaxNonUniformAMD
:
4837 vtn_handle_subgroup(b
, opcode
, w
, count
);
4842 case SpvOpPtrNotEqual
:
4843 vtn_handle_ptr(b
, opcode
, w
, count
);
4846 case SpvOpBeginInvocationInterlockEXT
:
4847 vtn_emit_barrier(b
, nir_intrinsic_begin_invocation_interlock
);
4850 case SpvOpEndInvocationInterlockEXT
:
4851 vtn_emit_barrier(b
, nir_intrinsic_end_invocation_interlock
);
4854 case SpvOpDemoteToHelperInvocationEXT
: {
4855 nir_intrinsic_instr
*intrin
=
4856 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_demote
);
4857 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
4861 case SpvOpIsHelperInvocationEXT
: {
4862 nir_intrinsic_instr
*intrin
=
4863 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_is_helper_invocation
);
4864 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 1, 1, NULL
);
4865 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
4867 struct vtn_type
*res_type
=
4868 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4869 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, res_type
->type
);
4870 val
->def
= &intrin
->dest
.ssa
;
4872 vtn_push_ssa(b
, w
[2], res_type
, val
);
4876 case SpvOpReadClockKHR
: {
4877 assert(vtn_constant_uint(b
, w
[3]) == SpvScopeSubgroup
);
4879 /* Operation supports two result types: uvec2 and uint64_t. The NIR
4880 * intrinsic gives uvec2, so pack the result for the other case.
4882 nir_intrinsic_instr
*intrin
=
4883 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_shader_clock
);
4884 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 2, 32, NULL
);
4885 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
4887 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
4888 const struct glsl_type
*dest_type
= type
->type
;
4889 nir_ssa_def
*result
;
4891 if (glsl_type_is_vector(dest_type
)) {
4892 assert(dest_type
== glsl_vector_type(GLSL_TYPE_UINT
, 2));
4893 result
= &intrin
->dest
.ssa
;
4895 assert(glsl_type_is_scalar(dest_type
));
4896 assert(glsl_get_base_type(dest_type
) == GLSL_TYPE_UINT64
);
4897 result
= nir_pack_64_2x32(&b
->nb
, &intrin
->dest
.ssa
);
4900 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
4902 val
->ssa
= vtn_create_ssa_value(b
, dest_type
);
4903 val
->ssa
->def
= result
;
4908 vtn_fail_with_opcode("Unhandled opcode", opcode
);
4915 vtn_create_builder(const uint32_t *words
, size_t word_count
,
4916 gl_shader_stage stage
, const char *entry_point_name
,
4917 const struct spirv_to_nir_options
*options
)
4919 /* Initialize the vtn_builder object */
4920 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
4921 struct spirv_to_nir_options
*dup_options
=
4922 ralloc(b
, struct spirv_to_nir_options
);
4923 *dup_options
= *options
;
4926 b
->spirv_word_count
= word_count
;
4930 exec_list_make_empty(&b
->functions
);
4931 b
->entry_point_stage
= stage
;
4932 b
->entry_point_name
= entry_point_name
;
4933 b
->options
= dup_options
;
4936 * Handle the SPIR-V header (first 5 dwords).
4937 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
4939 if (word_count
<= 5)
4942 if (words
[0] != SpvMagicNumber
) {
4943 vtn_err("words[0] was 0x%x, want 0x%x", words
[0], SpvMagicNumber
);
4946 if (words
[1] < 0x10000) {
4947 vtn_err("words[1] was 0x%x, want >= 0x10000", words
[1]);
4951 uint16_t generator_id
= words
[2] >> 16;
4952 uint16_t generator_version
= words
[2];
4954 /* The first GLSLang version bump actually 1.5 years after #179 was fixed
4955 * but this should at least let us shut the workaround off for modern
4956 * versions of GLSLang.
4958 b
->wa_glslang_179
= (generator_id
== 8 && generator_version
== 1);
4960 /* words[2] == generator magic */
4961 unsigned value_id_bound
= words
[3];
4962 if (words
[4] != 0) {
4963 vtn_err("words[4] was %u, want 0", words
[4]);
4967 b
->value_id_bound
= value_id_bound
;
4968 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
4976 static nir_function
*
4977 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder
*b
,
4978 nir_function
*entry_point
)
4980 vtn_assert(entry_point
== b
->entry_point
->func
->impl
->function
);
4981 vtn_fail_if(!entry_point
->name
, "entry points are required to have a name");
4982 const char *func_name
=
4983 ralloc_asprintf(b
->shader
, "__wrapped_%s", entry_point
->name
);
4985 /* we shouldn't have any inputs yet */
4986 vtn_assert(!entry_point
->shader
->num_inputs
);
4987 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
);
4989 nir_function
*main_entry_point
= nir_function_create(b
->shader
, func_name
);
4990 main_entry_point
->impl
= nir_function_impl_create(main_entry_point
);
4991 nir_builder_init(&b
->nb
, main_entry_point
->impl
);
4992 b
->nb
.cursor
= nir_after_cf_list(&main_entry_point
->impl
->body
);
4993 b
->func_param_idx
= 0;
4995 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, entry_point
);
4997 for (unsigned i
= 0; i
< entry_point
->num_params
; ++i
) {
4998 struct vtn_type
*param_type
= b
->entry_point
->func
->type
->params
[i
];
5000 /* consider all pointers to function memory to be parameters passed
5003 bool is_by_val
= param_type
->base_type
== vtn_base_type_pointer
&&
5004 param_type
->storage_class
== SpvStorageClassFunction
;
5006 /* input variable */
5007 nir_variable
*in_var
= rzalloc(b
->nb
.shader
, nir_variable
);
5008 in_var
->data
.mode
= nir_var_shader_in
;
5009 in_var
->data
.read_only
= true;
5010 in_var
->data
.location
= i
;
5013 in_var
->type
= param_type
->deref
->type
;
5015 in_var
->type
= param_type
->type
;
5017 nir_shader_add_variable(b
->nb
.shader
, in_var
);
5018 b
->nb
.shader
->num_inputs
++;
5020 /* we have to copy the entire variable into function memory */
5022 nir_variable
*copy_var
=
5023 nir_local_variable_create(main_entry_point
->impl
, in_var
->type
,
5025 nir_copy_var(&b
->nb
, copy_var
, in_var
);
5027 nir_src_for_ssa(&nir_build_deref_var(&b
->nb
, copy_var
)->dest
.ssa
);
5029 call
->params
[i
] = nir_src_for_ssa(nir_load_var(&b
->nb
, in_var
));
5033 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
5035 return main_entry_point
;
5039 spirv_to_nir(const uint32_t *words
, size_t word_count
,
5040 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
5041 gl_shader_stage stage
, const char *entry_point_name
,
5042 const struct spirv_to_nir_options
*options
,
5043 const nir_shader_compiler_options
*nir_options
)
5046 const uint32_t *word_end
= words
+ word_count
;
5048 struct vtn_builder
*b
= vtn_create_builder(words
, word_count
,
5049 stage
, entry_point_name
,
5055 /* See also _vtn_fail() */
5056 if (setjmp(b
->fail_jump
)) {
5061 /* Skip the SPIR-V header, handled at vtn_create_builder */
5064 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
5066 /* Handle all the preamble instructions */
5067 words
= vtn_foreach_instruction(b
, words
, word_end
,
5068 vtn_handle_preamble_instruction
);
5070 if (b
->entry_point
== NULL
) {
5071 vtn_fail("Entry point not found");
5076 /* Set shader info defaults */
5077 b
->shader
->info
.gs
.invocations
= 1;
5079 /* Parse rounding mode execution modes. This has to happen earlier than
5080 * other changes in the execution modes since they can affect, for example,
5081 * the result of the floating point constants.
5083 vtn_foreach_execution_mode(b
, b
->entry_point
,
5084 vtn_handle_rounding_mode_in_execution_mode
, NULL
);
5086 b
->specializations
= spec
;
5087 b
->num_specializations
= num_spec
;
5089 /* Handle all variable, type, and constant instructions */
5090 words
= vtn_foreach_instruction(b
, words
, word_end
,
5091 vtn_handle_variable_or_type_instruction
);
5093 /* Parse execution modes */
5094 vtn_foreach_execution_mode(b
, b
->entry_point
,
5095 vtn_handle_execution_mode
, NULL
);
5097 if (b
->workgroup_size_builtin
) {
5098 vtn_assert(b
->workgroup_size_builtin
->type
->type
==
5099 glsl_vector_type(GLSL_TYPE_UINT
, 3));
5101 nir_const_value
*const_size
=
5102 b
->workgroup_size_builtin
->constant
->values
;
5104 b
->shader
->info
.cs
.local_size
[0] = const_size
[0].u32
;
5105 b
->shader
->info
.cs
.local_size
[1] = const_size
[1].u32
;
5106 b
->shader
->info
.cs
.local_size
[2] = const_size
[2].u32
;
5109 /* Set types on all vtn_values */
5110 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
5112 vtn_build_cfg(b
, words
, word_end
);
5114 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5115 b
->entry_point
->func
->referenced
= true;
5120 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
5121 if (func
->referenced
&& !func
->emitted
) {
5122 b
->const_table
= _mesa_pointer_hash_table_create(b
);
5124 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
5130 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5131 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
5132 vtn_assert(entry_point
);
5134 /* post process entry_points with input params */
5135 if (entry_point
->num_params
&& b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
5136 entry_point
= vtn_emit_kernel_entry_point_wrapper(b
, entry_point
);
5138 entry_point
->is_entrypoint
= true;
5140 /* When multiple shader stages exist in the same SPIR-V module, we
5141 * generate input and output variables for every stage, in the same
5142 * NIR program. These dead variables can be invalid NIR. For example,
5143 * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
5144 * VS output variables wouldn't be.
5146 * To ensure we have valid NIR, we eliminate any dead inputs and outputs
5147 * right away. In order to do so, we must lower any constant initializers
5148 * on outputs so nir_remove_dead_variables sees that they're written to.
5150 nir_lower_constant_initializers(b
->shader
, nir_var_shader_out
);
5151 nir_remove_dead_variables(b
->shader
,
5152 nir_var_shader_in
| nir_var_shader_out
);
5154 /* We sometimes generate bogus derefs that, while never used, give the
5155 * validator a bit of heartburn. Run dead code to get rid of them.
5157 nir_opt_dce(b
->shader
);
5159 /* Unparent the shader from the vtn_builder before we delete the builder */
5160 ralloc_steal(NULL
, b
->shader
);
5162 nir_shader
*shader
= b
->shader
;