2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
35 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
36 size_t spirv_offset
, const char *message
)
38 if (b
->options
->debug
.func
) {
39 b
->options
->debug
.func(b
->options
->debug
.private_data
,
40 level
, spirv_offset
, message
);
44 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
45 fprintf(stderr
, "%s\n", message
);
50 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
51 size_t spirv_offset
, const char *fmt
, ...)
57 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
60 vtn_log(b
, level
, spirv_offset
, msg
);
66 vtn_log_err(struct vtn_builder
*b
,
67 enum nir_spirv_debug_level level
, const char *prefix
,
68 const char *file
, unsigned line
,
69 const char *fmt
, va_list args
)
73 msg
= ralloc_strdup(NULL
, prefix
);
76 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
79 ralloc_asprintf_append(&msg
, " ");
81 ralloc_vasprintf_append(&msg
, fmt
, args
);
83 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
87 ralloc_asprintf_append(&msg
,
88 "\n in SPIR-V source file %s, line %d, col %d",
89 b
->file
, b
->line
, b
->col
);
92 vtn_log(b
, level
, b
->spirv_offset
, msg
);
98 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
104 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
105 file
, line
, fmt
, args
);
110 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
111 const char *fmt
, ...)
116 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
117 file
, line
, fmt
, args
);
120 longjmp(b
->fail_jump
, 1);
123 struct spec_constant_value
{
131 static struct vtn_ssa_value
*
132 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
134 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
137 if (glsl_type_is_vector_or_scalar(type
)) {
138 unsigned num_components
= glsl_get_vector_elements(val
->type
);
139 unsigned bit_size
= glsl_get_bit_size(val
->type
);
140 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
142 unsigned elems
= glsl_get_length(val
->type
);
143 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
144 if (glsl_type_is_matrix(type
)) {
145 const struct glsl_type
*elem_type
=
146 glsl_vector_type(glsl_get_base_type(type
),
147 glsl_get_vector_elements(type
));
149 for (unsigned i
= 0; i
< elems
; i
++)
150 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
151 } else if (glsl_type_is_array(type
)) {
152 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
153 for (unsigned i
= 0; i
< elems
; i
++)
154 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
156 for (unsigned i
= 0; i
< elems
; i
++) {
157 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
158 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
166 static struct vtn_ssa_value
*
167 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
168 const struct glsl_type
*type
)
170 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
175 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
178 switch (glsl_get_base_type(type
)) {
181 case GLSL_TYPE_INT16
:
182 case GLSL_TYPE_UINT16
:
183 case GLSL_TYPE_INT64
:
184 case GLSL_TYPE_UINT64
:
186 case GLSL_TYPE_FLOAT
:
187 case GLSL_TYPE_FLOAT16
:
188 case GLSL_TYPE_DOUBLE
: {
189 int bit_size
= glsl_get_bit_size(type
);
190 if (glsl_type_is_vector_or_scalar(type
)) {
191 unsigned num_components
= glsl_get_vector_elements(val
->type
);
192 nir_load_const_instr
*load
=
193 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
195 load
->value
= constant
->values
[0];
197 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
198 val
->def
= &load
->def
;
200 assert(glsl_type_is_matrix(type
));
201 unsigned rows
= glsl_get_vector_elements(val
->type
);
202 unsigned columns
= glsl_get_matrix_columns(val
->type
);
203 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
205 for (unsigned i
= 0; i
< columns
; i
++) {
206 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
207 col_val
->type
= glsl_get_column_type(val
->type
);
208 nir_load_const_instr
*load
=
209 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
211 load
->value
= constant
->values
[i
];
213 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
214 col_val
->def
= &load
->def
;
216 val
->elems
[i
] = col_val
;
222 case GLSL_TYPE_ARRAY
: {
223 unsigned elems
= glsl_get_length(val
->type
);
224 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
225 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
226 for (unsigned i
= 0; i
< elems
; i
++)
227 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
232 case GLSL_TYPE_STRUCT
: {
233 unsigned elems
= glsl_get_length(val
->type
);
234 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
235 for (unsigned i
= 0; i
< elems
; i
++) {
236 const struct glsl_type
*elem_type
=
237 glsl_get_struct_field(val
->type
, i
);
238 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
245 vtn_fail("bad constant type");
251 struct vtn_ssa_value
*
252 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
254 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
255 switch (val
->value_type
) {
256 case vtn_value_type_undef
:
257 return vtn_undef_ssa_value(b
, val
->type
->type
);
259 case vtn_value_type_constant
:
260 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
262 case vtn_value_type_ssa
:
265 case vtn_value_type_pointer
:
266 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
267 struct vtn_ssa_value
*ssa
=
268 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
269 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
273 vtn_fail("Invalid type for an SSA value");
278 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
279 unsigned word_count
, unsigned *words_used
)
281 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
283 /* Ammount of space taken by the string (including the null) */
284 unsigned len
= strlen(dup
) + 1;
285 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
291 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
292 const uint32_t *end
, vtn_instruction_handler handler
)
298 const uint32_t *w
= start
;
300 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
301 unsigned count
= w
[0] >> SpvWordCountShift
;
302 vtn_assert(count
>= 1 && w
+ count
<= end
);
304 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
308 break; /* Do nothing */
311 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
323 if (!handler(b
, opcode
, w
, count
))
341 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
342 const uint32_t *w
, unsigned count
)
345 case SpvOpExtInstImport
: {
346 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
347 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
348 val
->ext_handler
= vtn_handle_glsl450_instruction
;
350 vtn_fail("Unsupported extension");
356 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
357 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
363 vtn_fail("Unhandled opcode");
368 _foreach_decoration_helper(struct vtn_builder
*b
,
369 struct vtn_value
*base_value
,
371 struct vtn_value
*value
,
372 vtn_decoration_foreach_cb cb
, void *data
)
374 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
376 if (dec
->scope
== VTN_DEC_DECORATION
) {
377 member
= parent_member
;
378 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
379 vtn_assert(parent_member
== -1);
380 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
382 /* Not a decoration */
387 vtn_assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
388 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
391 cb(b
, base_value
, member
, dec
, data
);
396 /** Iterates (recursively if needed) over all of the decorations on a value
398 * This function iterates over all of the decorations applied to a given
399 * value. If it encounters a decoration group, it recurses into the group
400 * and iterates over all of those decorations as well.
403 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
404 vtn_decoration_foreach_cb cb
, void *data
)
406 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
410 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
411 vtn_execution_mode_foreach_cb cb
, void *data
)
413 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
414 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
417 vtn_assert(dec
->group
== NULL
);
418 cb(b
, value
, dec
, data
);
423 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
424 const uint32_t *w
, unsigned count
)
426 const uint32_t *w_end
= w
+ count
;
427 const uint32_t target
= w
[1];
431 case SpvOpDecorationGroup
:
432 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
436 case SpvOpMemberDecorate
:
437 case SpvOpExecutionMode
: {
438 struct vtn_value
*val
= &b
->values
[target
];
440 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
443 dec
->scope
= VTN_DEC_DECORATION
;
445 case SpvOpMemberDecorate
:
446 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
448 case SpvOpExecutionMode
:
449 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
452 vtn_fail("Invalid decoration opcode");
454 dec
->decoration
= *(w
++);
457 /* Link into the list */
458 dec
->next
= val
->decoration
;
459 val
->decoration
= dec
;
463 case SpvOpGroupMemberDecorate
:
464 case SpvOpGroupDecorate
: {
465 struct vtn_value
*group
=
466 vtn_value(b
, target
, vtn_value_type_decoration_group
);
468 for (; w
< w_end
; w
++) {
469 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
470 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
473 if (opcode
== SpvOpGroupDecorate
) {
474 dec
->scope
= VTN_DEC_DECORATION
;
476 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
479 /* Link into the list */
480 dec
->next
= val
->decoration
;
481 val
->decoration
= dec
;
487 vtn_fail("Unhandled opcode");
491 struct member_decoration_ctx
{
493 struct glsl_struct_field
*fields
;
494 struct vtn_type
*type
;
497 /* does a shallow copy of a vtn_type */
499 static struct vtn_type
*
500 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
502 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
505 switch (src
->base_type
) {
506 case vtn_base_type_void
:
507 case vtn_base_type_scalar
:
508 case vtn_base_type_vector
:
509 case vtn_base_type_matrix
:
510 case vtn_base_type_array
:
511 case vtn_base_type_pointer
:
512 case vtn_base_type_image
:
513 case vtn_base_type_sampler
:
514 case vtn_base_type_sampled_image
:
515 /* Nothing more to do */
518 case vtn_base_type_struct
:
519 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
520 memcpy(dest
->members
, src
->members
,
521 src
->length
* sizeof(src
->members
[0]));
523 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
524 memcpy(dest
->offsets
, src
->offsets
,
525 src
->length
* sizeof(src
->offsets
[0]));
528 case vtn_base_type_function
:
529 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
530 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
537 static struct vtn_type
*
538 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
540 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
541 type
= type
->members
[member
];
543 /* We may have an array of matrices.... Oh, joy! */
544 while (glsl_type_is_array(type
->type
)) {
545 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
546 type
= type
->array_element
;
549 vtn_assert(glsl_type_is_matrix(type
->type
));
555 struct_member_decoration_cb(struct vtn_builder
*b
,
556 struct vtn_value
*val
, int member
,
557 const struct vtn_decoration
*dec
, void *void_ctx
)
559 struct member_decoration_ctx
*ctx
= void_ctx
;
564 vtn_assert(member
< ctx
->num_fields
);
566 switch (dec
->decoration
) {
567 case SpvDecorationNonWritable
:
568 case SpvDecorationNonReadable
:
569 case SpvDecorationRelaxedPrecision
:
570 case SpvDecorationVolatile
:
571 case SpvDecorationCoherent
:
572 case SpvDecorationUniform
:
573 break; /* FIXME: Do nothing with this for now. */
574 case SpvDecorationNoPerspective
:
575 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
577 case SpvDecorationFlat
:
578 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
580 case SpvDecorationCentroid
:
581 ctx
->fields
[member
].centroid
= true;
583 case SpvDecorationSample
:
584 ctx
->fields
[member
].sample
= true;
586 case SpvDecorationStream
:
587 /* Vulkan only allows one GS stream */
588 vtn_assert(dec
->literals
[0] == 0);
590 case SpvDecorationLocation
:
591 ctx
->fields
[member
].location
= dec
->literals
[0];
593 case SpvDecorationComponent
:
594 break; /* FIXME: What should we do with these? */
595 case SpvDecorationBuiltIn
:
596 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
597 ctx
->type
->members
[member
]->is_builtin
= true;
598 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
599 ctx
->type
->builtin_block
= true;
601 case SpvDecorationOffset
:
602 ctx
->type
->offsets
[member
] = dec
->literals
[0];
604 case SpvDecorationMatrixStride
:
605 /* Handled as a second pass */
607 case SpvDecorationColMajor
:
608 break; /* Nothing to do here. Column-major is the default. */
609 case SpvDecorationRowMajor
:
610 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
613 case SpvDecorationPatch
:
616 case SpvDecorationSpecId
:
617 case SpvDecorationBlock
:
618 case SpvDecorationBufferBlock
:
619 case SpvDecorationArrayStride
:
620 case SpvDecorationGLSLShared
:
621 case SpvDecorationGLSLPacked
:
622 case SpvDecorationInvariant
:
623 case SpvDecorationRestrict
:
624 case SpvDecorationAliased
:
625 case SpvDecorationConstant
:
626 case SpvDecorationIndex
:
627 case SpvDecorationBinding
:
628 case SpvDecorationDescriptorSet
:
629 case SpvDecorationLinkageAttributes
:
630 case SpvDecorationNoContraction
:
631 case SpvDecorationInputAttachmentIndex
:
632 vtn_warn("Decoration not allowed on struct members: %s",
633 spirv_decoration_to_string(dec
->decoration
));
636 case SpvDecorationXfbBuffer
:
637 case SpvDecorationXfbStride
:
638 vtn_warn("Vulkan does not have transform feedback");
641 case SpvDecorationCPacked
:
642 case SpvDecorationSaturatedConversion
:
643 case SpvDecorationFuncParamAttr
:
644 case SpvDecorationFPRoundingMode
:
645 case SpvDecorationFPFastMathMode
:
646 case SpvDecorationAlignment
:
647 vtn_warn("Decoration only allowed for CL-style kernels: %s",
648 spirv_decoration_to_string(dec
->decoration
));
652 vtn_fail("Unhandled decoration");
656 /* Matrix strides are handled as a separate pass because we need to know
657 * whether the matrix is row-major or not first.
660 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
661 struct vtn_value
*val
, int member
,
662 const struct vtn_decoration
*dec
,
665 if (dec
->decoration
!= SpvDecorationMatrixStride
)
667 vtn_assert(member
>= 0);
669 struct member_decoration_ctx
*ctx
= void_ctx
;
671 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
672 if (mat_type
->row_major
) {
673 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
674 mat_type
->stride
= mat_type
->array_element
->stride
;
675 mat_type
->array_element
->stride
= dec
->literals
[0];
677 vtn_assert(mat_type
->array_element
->stride
> 0);
678 mat_type
->stride
= dec
->literals
[0];
683 type_decoration_cb(struct vtn_builder
*b
,
684 struct vtn_value
*val
, int member
,
685 const struct vtn_decoration
*dec
, void *ctx
)
687 struct vtn_type
*type
= val
->type
;
692 switch (dec
->decoration
) {
693 case SpvDecorationArrayStride
:
694 vtn_assert(type
->base_type
== vtn_base_type_matrix
||
695 type
->base_type
== vtn_base_type_array
||
696 type
->base_type
== vtn_base_type_pointer
);
697 type
->stride
= dec
->literals
[0];
699 case SpvDecorationBlock
:
700 vtn_assert(type
->base_type
== vtn_base_type_struct
);
703 case SpvDecorationBufferBlock
:
704 vtn_assert(type
->base_type
== vtn_base_type_struct
);
705 type
->buffer_block
= true;
707 case SpvDecorationGLSLShared
:
708 case SpvDecorationGLSLPacked
:
709 /* Ignore these, since we get explicit offsets anyways */
712 case SpvDecorationRowMajor
:
713 case SpvDecorationColMajor
:
714 case SpvDecorationMatrixStride
:
715 case SpvDecorationBuiltIn
:
716 case SpvDecorationNoPerspective
:
717 case SpvDecorationFlat
:
718 case SpvDecorationPatch
:
719 case SpvDecorationCentroid
:
720 case SpvDecorationSample
:
721 case SpvDecorationVolatile
:
722 case SpvDecorationCoherent
:
723 case SpvDecorationNonWritable
:
724 case SpvDecorationNonReadable
:
725 case SpvDecorationUniform
:
726 case SpvDecorationStream
:
727 case SpvDecorationLocation
:
728 case SpvDecorationComponent
:
729 case SpvDecorationOffset
:
730 case SpvDecorationXfbBuffer
:
731 case SpvDecorationXfbStride
:
732 vtn_warn("Decoration only allowed for struct members: %s",
733 spirv_decoration_to_string(dec
->decoration
));
736 case SpvDecorationRelaxedPrecision
:
737 case SpvDecorationSpecId
:
738 case SpvDecorationInvariant
:
739 case SpvDecorationRestrict
:
740 case SpvDecorationAliased
:
741 case SpvDecorationConstant
:
742 case SpvDecorationIndex
:
743 case SpvDecorationBinding
:
744 case SpvDecorationDescriptorSet
:
745 case SpvDecorationLinkageAttributes
:
746 case SpvDecorationNoContraction
:
747 case SpvDecorationInputAttachmentIndex
:
748 vtn_warn("Decoration not allowed on types: %s",
749 spirv_decoration_to_string(dec
->decoration
));
752 case SpvDecorationCPacked
:
753 case SpvDecorationSaturatedConversion
:
754 case SpvDecorationFuncParamAttr
:
755 case SpvDecorationFPRoundingMode
:
756 case SpvDecorationFPFastMathMode
:
757 case SpvDecorationAlignment
:
758 vtn_warn("Decoration only allowed for CL-style kernels: %s",
759 spirv_decoration_to_string(dec
->decoration
));
763 vtn_fail("Unhandled decoration");
768 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
771 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
772 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
773 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
774 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
775 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
776 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
777 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
778 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
779 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
780 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
781 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
782 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
783 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
784 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
785 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
786 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
787 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
788 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
789 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
790 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
791 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
792 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
793 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
794 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
795 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
796 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
797 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
798 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
799 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
800 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
801 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
802 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
803 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
804 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
805 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
806 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
807 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
808 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
809 case SpvImageFormatR16ui
: return 0x8234; /* GL_R16UI */
810 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
812 vtn_fail("Invalid image format");
816 static struct vtn_type
*
817 vtn_type_layout_std430(struct vtn_builder
*b
, struct vtn_type
*type
,
818 uint32_t *size_out
, uint32_t *align_out
)
820 switch (type
->base_type
) {
821 case vtn_base_type_scalar
: {
822 uint32_t comp_size
= glsl_get_bit_size(type
->type
) / 8;
823 *size_out
= comp_size
;
824 *align_out
= comp_size
;
828 case vtn_base_type_vector
: {
829 uint32_t comp_size
= glsl_get_bit_size(type
->type
) / 8;
830 assert(type
->length
> 0 && type
->length
<= 4);
831 unsigned align_comps
= type
->length
== 3 ? 4 : type
->length
;
832 *size_out
= comp_size
* type
->length
,
833 *align_out
= comp_size
* align_comps
;
837 case vtn_base_type_matrix
:
838 case vtn_base_type_array
: {
839 /* We're going to add an array stride */
840 type
= vtn_type_copy(b
, type
);
841 uint32_t elem_size
, elem_align
;
842 type
->array_element
= vtn_type_layout_std430(b
, type
->array_element
,
843 &elem_size
, &elem_align
);
844 type
->stride
= vtn_align_u32(elem_size
, elem_align
);
845 *size_out
= type
->stride
* type
->length
;
846 *align_out
= elem_align
;
850 case vtn_base_type_struct
: {
851 /* We're going to add member offsets */
852 type
= vtn_type_copy(b
, type
);
855 for (unsigned i
= 0; i
< type
->length
; i
++) {
856 uint32_t mem_size
, mem_align
;
857 type
->members
[i
] = vtn_type_layout_std430(b
, type
->members
[i
],
858 &mem_size
, &mem_align
);
859 offset
= vtn_align_u32(offset
, mem_align
);
860 type
->offsets
[i
] = offset
;
862 align
= MAX2(align
, mem_align
);
870 unreachable("Invalid SPIR-V type for std430");
875 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
876 const uint32_t *w
, unsigned count
)
878 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
880 val
->type
= rzalloc(b
, struct vtn_type
);
881 val
->type
->val
= val
;
885 val
->type
->base_type
= vtn_base_type_void
;
886 val
->type
->type
= glsl_void_type();
889 val
->type
->base_type
= vtn_base_type_scalar
;
890 val
->type
->type
= glsl_bool_type();
891 val
->type
->length
= 1;
895 const bool signedness
= w
[3];
896 val
->type
->base_type
= vtn_base_type_scalar
;
899 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
902 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
905 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
908 vtn_fail("Invalid int bit size");
910 val
->type
->length
= 1;
914 case SpvOpTypeFloat
: {
916 val
->type
->base_type
= vtn_base_type_scalar
;
919 val
->type
->type
= glsl_float16_t_type();
922 val
->type
->type
= glsl_float_type();
925 val
->type
->type
= glsl_double_type();
928 vtn_fail("Invalid float bit size");
930 val
->type
->length
= 1;
934 case SpvOpTypeVector
: {
935 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
936 unsigned elems
= w
[3];
938 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
939 "Base type for OpTypeVector must be a scalar");
940 vtn_fail_if(elems
< 2 || elems
> 4,
941 "Invalid component count for OpTypeVector");
943 val
->type
->base_type
= vtn_base_type_vector
;
944 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
945 val
->type
->length
= elems
;
946 val
->type
->stride
= glsl_get_bit_size(base
->type
) / 8;
947 val
->type
->array_element
= base
;
951 case SpvOpTypeMatrix
: {
952 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
953 unsigned columns
= w
[3];
955 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
956 "Base type for OpTypeMatrix must be a vector");
957 vtn_fail_if(columns
< 2 || columns
> 4,
958 "Invalid column count for OpTypeMatrix");
960 val
->type
->base_type
= vtn_base_type_matrix
;
961 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
962 glsl_get_vector_elements(base
->type
),
964 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
965 "Unsupported base type for OpTypeMatrix");
966 assert(!glsl_type_is_error(val
->type
->type
));
967 val
->type
->length
= columns
;
968 val
->type
->array_element
= base
;
969 val
->type
->row_major
= false;
970 val
->type
->stride
= 0;
974 case SpvOpTypeRuntimeArray
:
975 case SpvOpTypeArray
: {
976 struct vtn_type
*array_element
=
977 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
979 if (opcode
== SpvOpTypeRuntimeArray
) {
980 /* A length of 0 is used to denote unsized arrays */
981 val
->type
->length
= 0;
984 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
987 val
->type
->base_type
= vtn_base_type_array
;
988 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
);
989 val
->type
->array_element
= array_element
;
990 val
->type
->stride
= 0;
994 case SpvOpTypeStruct
: {
995 unsigned num_fields
= count
- 2;
996 val
->type
->base_type
= vtn_base_type_struct
;
997 val
->type
->length
= num_fields
;
998 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
999 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1001 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1002 for (unsigned i
= 0; i
< num_fields
; i
++) {
1003 val
->type
->members
[i
] =
1004 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
1005 fields
[i
] = (struct glsl_struct_field
) {
1006 .type
= val
->type
->members
[i
]->type
,
1007 .name
= ralloc_asprintf(b
, "field%d", i
),
1012 struct member_decoration_ctx ctx
= {
1013 .num_fields
= num_fields
,
1018 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1019 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1021 const char *name
= val
->name
? val
->name
: "struct";
1023 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
1027 case SpvOpTypeFunction
: {
1028 val
->type
->base_type
= vtn_base_type_function
;
1029 val
->type
->type
= NULL
;
1031 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1033 const unsigned num_params
= count
- 3;
1034 val
->type
->length
= num_params
;
1035 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1036 for (unsigned i
= 0; i
< count
- 3; i
++) {
1037 val
->type
->params
[i
] =
1038 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
1043 case SpvOpTypePointer
: {
1044 SpvStorageClass storage_class
= w
[2];
1045 struct vtn_type
*deref_type
=
1046 vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
1048 val
->type
->base_type
= vtn_base_type_pointer
;
1049 val
->type
->storage_class
= storage_class
;
1050 val
->type
->deref
= deref_type
;
1052 if (storage_class
== SpvStorageClassUniform
||
1053 storage_class
== SpvStorageClassStorageBuffer
) {
1054 /* These can actually be stored to nir_variables and used as SSA
1055 * values so they need a real glsl_type.
1057 val
->type
->type
= glsl_vector_type(GLSL_TYPE_UINT
, 2);
1060 if (storage_class
== SpvStorageClassWorkgroup
&&
1061 b
->options
->lower_workgroup_access_to_offsets
) {
1062 uint32_t size
, align
;
1063 val
->type
->deref
= vtn_type_layout_std430(b
, val
->type
->deref
,
1065 val
->type
->length
= size
;
1066 val
->type
->align
= align
;
1067 /* These can actually be stored to nir_variables and used as SSA
1068 * values so they need a real glsl_type.
1070 val
->type
->type
= glsl_uint_type();
1075 case SpvOpTypeImage
: {
1076 val
->type
->base_type
= vtn_base_type_image
;
1078 const struct vtn_type
*sampled_type
=
1079 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1081 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1082 glsl_get_bit_size(sampled_type
->type
) != 32,
1083 "Sampled type of OpTypeImage must be a 32-bit scalar");
1085 enum glsl_sampler_dim dim
;
1086 switch ((SpvDim
)w
[3]) {
1087 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1088 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1089 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1090 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1091 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1092 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1093 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1095 vtn_fail("Invalid SPIR-V image dimensionality");
1098 bool is_shadow
= w
[4];
1099 bool is_array
= w
[5];
1100 bool multisampled
= w
[6];
1101 unsigned sampled
= w
[7];
1102 SpvImageFormat format
= w
[8];
1105 val
->type
->access_qualifier
= w
[9];
1107 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1110 if (dim
== GLSL_SAMPLER_DIM_2D
)
1111 dim
= GLSL_SAMPLER_DIM_MS
;
1112 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1113 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1115 vtn_fail("Unsupported multisampled image type");
1118 val
->type
->image_format
= translate_image_format(b
, format
);
1120 enum glsl_base_type sampled_base_type
=
1121 glsl_get_base_type(sampled_type
->type
);
1123 val
->type
->sampled
= true;
1124 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
1126 } else if (sampled
== 2) {
1127 vtn_assert(!is_shadow
);
1128 val
->type
->sampled
= false;
1129 val
->type
->type
= glsl_image_type(dim
, is_array
, sampled_base_type
);
1131 vtn_fail("We need to know if the image will be sampled");
1136 case SpvOpTypeSampledImage
:
1137 val
->type
->base_type
= vtn_base_type_sampled_image
;
1138 val
->type
->image
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1139 val
->type
->type
= val
->type
->image
->type
;
1142 case SpvOpTypeSampler
:
1143 /* The actual sampler type here doesn't really matter. It gets
1144 * thrown away the moment you combine it with an image. What really
1145 * matters is that it's a sampler type as opposed to an integer type
1146 * so the backend knows what to do.
1148 val
->type
->base_type
= vtn_base_type_sampler
;
1149 val
->type
->type
= glsl_bare_sampler_type();
1152 case SpvOpTypeOpaque
:
1153 case SpvOpTypeEvent
:
1154 case SpvOpTypeDeviceEvent
:
1155 case SpvOpTypeReserveId
:
1156 case SpvOpTypeQueue
:
1159 vtn_fail("Unhandled opcode");
1162 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1165 static nir_constant
*
1166 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
1168 nir_constant
*c
= rzalloc(b
, nir_constant
);
1170 /* For pointers and other typeless things, we have to return something but
1171 * it doesn't matter what.
1176 switch (glsl_get_base_type(type
)) {
1178 case GLSL_TYPE_UINT
:
1179 case GLSL_TYPE_INT16
:
1180 case GLSL_TYPE_UINT16
:
1181 case GLSL_TYPE_INT64
:
1182 case GLSL_TYPE_UINT64
:
1183 case GLSL_TYPE_BOOL
:
1184 case GLSL_TYPE_FLOAT
:
1185 case GLSL_TYPE_FLOAT16
:
1186 case GLSL_TYPE_DOUBLE
:
1187 /* Nothing to do here. It's already initialized to zero */
1190 case GLSL_TYPE_ARRAY
:
1191 vtn_assert(glsl_get_length(type
) > 0);
1192 c
->num_elements
= glsl_get_length(type
);
1193 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1195 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
1196 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1197 c
->elements
[i
] = c
->elements
[0];
1200 case GLSL_TYPE_STRUCT
:
1201 c
->num_elements
= glsl_get_length(type
);
1202 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1204 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
1205 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
1210 vtn_fail("Invalid type for null constant");
1217 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
1218 int member
, const struct vtn_decoration
*dec
,
1221 vtn_assert(member
== -1);
1222 if (dec
->decoration
!= SpvDecorationSpecId
)
1225 struct spec_constant_value
*const_value
= data
;
1227 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1228 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
1229 if (const_value
->is_double
)
1230 const_value
->data64
= b
->specializations
[i
].data64
;
1232 const_value
->data32
= b
->specializations
[i
].data32
;
1239 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
1240 uint32_t const_value
)
1242 struct spec_constant_value data
;
1243 data
.is_double
= false;
1244 data
.data32
= const_value
;
1245 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1250 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
1251 uint64_t const_value
)
1253 struct spec_constant_value data
;
1254 data
.is_double
= true;
1255 data
.data64
= const_value
;
1256 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1261 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1262 struct vtn_value
*val
,
1264 const struct vtn_decoration
*dec
,
1267 vtn_assert(member
== -1);
1268 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1269 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
1272 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1274 b
->shader
->info
.cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
1275 b
->shader
->info
.cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
1276 b
->shader
->info
.cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
1280 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1281 const uint32_t *w
, unsigned count
)
1283 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1284 val
->constant
= rzalloc(b
, nir_constant
);
1286 case SpvOpConstantTrue
:
1287 case SpvOpConstantFalse
:
1288 case SpvOpSpecConstantTrue
:
1289 case SpvOpSpecConstantFalse
: {
1290 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1291 "Result type of %s must be OpTypeBool",
1292 spirv_op_to_string(opcode
));
1294 uint32_t int_val
= (opcode
== SpvOpConstantTrue
||
1295 opcode
== SpvOpSpecConstantTrue
);
1297 if (opcode
== SpvOpSpecConstantTrue
||
1298 opcode
== SpvOpSpecConstantFalse
)
1299 int_val
= get_specialization(b
, val
, int_val
);
1301 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1305 case SpvOpConstant
: {
1306 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1307 "Result type of %s must be a scalar",
1308 spirv_op_to_string(opcode
));
1309 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1312 val
->constant
->values
->u64
[0] = vtn_u64_literal(&w
[3]);
1315 val
->constant
->values
->u32
[0] = w
[3];
1318 val
->constant
->values
->u16
[0] = w
[3];
1321 vtn_fail("Unsupported SpvOpConstant bit size");
1326 case SpvOpSpecConstant
: {
1327 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1328 "Result type of %s must be a scalar",
1329 spirv_op_to_string(opcode
));
1330 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1333 val
->constant
->values
[0].u64
[0] =
1334 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1337 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1340 val
->constant
->values
[0].u16
[0] = get_specialization(b
, val
, w
[3]);
1343 vtn_fail("Unsupported SpvOpSpecConstant bit size");
1348 case SpvOpSpecConstantComposite
:
1349 case SpvOpConstantComposite
: {
1350 unsigned elem_count
= count
- 3;
1351 vtn_fail_if(elem_count
!= val
->type
->length
,
1352 "%s has %u constituents, expected %u",
1353 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1355 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1356 for (unsigned i
= 0; i
< elem_count
; i
++)
1357 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1359 switch (val
->type
->base_type
) {
1360 case vtn_base_type_vector
: {
1361 assert(glsl_type_is_vector(val
->type
->type
));
1362 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1363 for (unsigned i
= 0; i
< elem_count
; i
++) {
1366 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1369 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1372 val
->constant
->values
[0].u16
[i
] = elems
[i
]->values
[0].u16
[0];
1375 vtn_fail("Invalid SpvOpConstantComposite bit size");
1381 case vtn_base_type_matrix
:
1382 assert(glsl_type_is_matrix(val
->type
->type
));
1383 for (unsigned i
= 0; i
< elem_count
; i
++)
1384 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1387 case vtn_base_type_struct
:
1388 case vtn_base_type_array
:
1389 ralloc_steal(val
->constant
, elems
);
1390 val
->constant
->num_elements
= elem_count
;
1391 val
->constant
->elements
= elems
;
1395 vtn_fail("Result type of %s must be a composite type",
1396 spirv_op_to_string(opcode
));
1401 case SpvOpSpecConstantOp
: {
1402 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1404 case SpvOpVectorShuffle
: {
1405 struct vtn_value
*v0
= &b
->values
[w
[4]];
1406 struct vtn_value
*v1
= &b
->values
[w
[5]];
1408 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1409 v0
->value_type
== vtn_value_type_undef
);
1410 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1411 v1
->value_type
== vtn_value_type_undef
);
1413 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1414 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1416 vtn_assert(len0
+ len1
< 16);
1418 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1419 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1420 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1422 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1423 (void)bit_size0
; (void)bit_size1
;
1425 if (bit_size
== 64) {
1427 if (v0
->value_type
== vtn_value_type_constant
) {
1428 for (unsigned i
= 0; i
< len0
; i
++)
1429 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1431 if (v1
->value_type
== vtn_value_type_constant
) {
1432 for (unsigned i
= 0; i
< len1
; i
++)
1433 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1436 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1437 uint32_t comp
= w
[i
+ 6];
1438 /* If component is not used, set the value to a known constant
1439 * to detect if it is wrongly used.
1441 if (comp
== (uint32_t)-1)
1442 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1444 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1447 /* This is for both 32-bit and 16-bit values */
1449 if (v0
->value_type
== vtn_value_type_constant
) {
1450 for (unsigned i
= 0; i
< len0
; i
++)
1451 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1453 if (v1
->value_type
== vtn_value_type_constant
) {
1454 for (unsigned i
= 0; i
< len1
; i
++)
1455 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1458 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1459 uint32_t comp
= w
[i
+ 6];
1460 /* If component is not used, set the value to a known constant
1461 * to detect if it is wrongly used.
1463 if (comp
== (uint32_t)-1)
1464 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1466 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1472 case SpvOpCompositeExtract
:
1473 case SpvOpCompositeInsert
: {
1474 struct vtn_value
*comp
;
1475 unsigned deref_start
;
1476 struct nir_constant
**c
;
1477 if (opcode
== SpvOpCompositeExtract
) {
1478 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1480 c
= &comp
->constant
;
1482 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1484 val
->constant
= nir_constant_clone(comp
->constant
,
1491 const struct vtn_type
*type
= comp
->type
;
1492 for (unsigned i
= deref_start
; i
< count
; i
++) {
1493 vtn_fail_if(w
[i
] > type
->length
,
1494 "%uth index of %s is %u but the type has only "
1495 "%u elements", i
- deref_start
,
1496 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1498 switch (type
->base_type
) {
1499 case vtn_base_type_vector
:
1501 type
= type
->array_element
;
1504 case vtn_base_type_matrix
:
1505 assert(col
== 0 && elem
== -1);
1508 type
= type
->array_element
;
1511 case vtn_base_type_array
:
1512 c
= &(*c
)->elements
[w
[i
]];
1513 type
= type
->array_element
;
1516 case vtn_base_type_struct
:
1517 c
= &(*c
)->elements
[w
[i
]];
1518 type
= type
->members
[w
[i
]];
1522 vtn_fail("%s must only index into composite types",
1523 spirv_op_to_string(opcode
));
1527 if (opcode
== SpvOpCompositeExtract
) {
1531 unsigned num_components
= type
->length
;
1532 unsigned bit_size
= glsl_get_bit_size(type
->type
);
1533 for (unsigned i
= 0; i
< num_components
; i
++)
1536 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1539 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1542 val
->constant
->values
[0].u16
[i
] = (*c
)->values
[col
].u16
[elem
+ i
];
1545 vtn_fail("Invalid SpvOpCompositeExtract bit size");
1549 struct vtn_value
*insert
=
1550 vtn_value(b
, w
[4], vtn_value_type_constant
);
1551 vtn_assert(insert
->type
== type
);
1553 *c
= insert
->constant
;
1555 unsigned num_components
= type
->length
;
1556 unsigned bit_size
= glsl_get_bit_size(type
->type
);
1557 for (unsigned i
= 0; i
< num_components
; i
++)
1560 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1563 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1566 (*c
)->values
[col
].u16
[elem
+ i
] = insert
->constant
->values
[0].u16
[i
];
1569 vtn_fail("Invalid SpvOpCompositeInsert bit size");
1578 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
1579 nir_alu_type src_alu_type
= dst_alu_type
;
1580 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
1583 vtn_assert(count
<= 7);
1588 /* We have a source in a conversion */
1590 nir_get_nir_type_for_glsl_type(
1591 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1592 /* We use the bitsize of the conversion source to evaluate the opcode later */
1593 bit_size
= glsl_get_bit_size(
1594 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1597 bit_size
= glsl_get_bit_size(val
->type
->type
);
1600 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
1603 nir_const_value src
[4];
1605 for (unsigned i
= 0; i
< count
- 4; i
++) {
1607 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1609 unsigned j
= swap
? 1 - i
: i
;
1610 src
[j
] = c
->values
[0];
1613 val
->constant
->values
[0] =
1614 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1621 case SpvOpConstantNull
:
1622 val
->constant
= vtn_null_constant(b
, val
->type
->type
);
1625 case SpvOpConstantSampler
:
1626 vtn_fail("OpConstantSampler requires Kernel Capability");
1630 vtn_fail("Unhandled opcode");
1633 /* Now that we have the value, update the workgroup size if needed */
1634 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1638 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1639 const uint32_t *w
, unsigned count
)
1641 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1642 struct vtn_function
*vtn_callee
=
1643 vtn_value(b
, w
[3], vtn_value_type_function
)->func
;
1644 struct nir_function
*callee
= vtn_callee
->impl
->function
;
1646 vtn_callee
->referenced
= true;
1648 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1649 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1650 unsigned arg_id
= w
[4 + i
];
1651 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1652 if (arg
->value_type
== vtn_value_type_pointer
&&
1653 arg
->pointer
->ptr_type
->type
== NULL
) {
1654 nir_deref_var
*d
= vtn_pointer_to_deref(b
, arg
->pointer
);
1655 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1657 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1659 /* Make a temporary to store the argument in */
1661 nir_local_variable_create(b
->nb
.impl
, arg_ssa
->type
, "arg_tmp");
1662 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1664 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1668 nir_variable
*out_tmp
= NULL
;
1669 vtn_assert(res_type
->type
== callee
->return_type
);
1670 if (!glsl_type_is_void(callee
->return_type
)) {
1671 out_tmp
= nir_local_variable_create(b
->nb
.impl
, callee
->return_type
,
1673 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1676 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1678 if (glsl_type_is_void(callee
->return_type
)) {
1679 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1681 vtn_push_ssa(b
, w
[2], res_type
, vtn_local_load(b
, call
->return_deref
));
1685 struct vtn_ssa_value
*
1686 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1688 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1691 if (!glsl_type_is_vector_or_scalar(type
)) {
1692 unsigned elems
= glsl_get_length(type
);
1693 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1694 for (unsigned i
= 0; i
< elems
; i
++) {
1695 const struct glsl_type
*child_type
;
1697 switch (glsl_get_base_type(type
)) {
1699 case GLSL_TYPE_UINT
:
1700 case GLSL_TYPE_INT16
:
1701 case GLSL_TYPE_UINT16
:
1702 case GLSL_TYPE_INT64
:
1703 case GLSL_TYPE_UINT64
:
1704 case GLSL_TYPE_BOOL
:
1705 case GLSL_TYPE_FLOAT
:
1706 case GLSL_TYPE_FLOAT16
:
1707 case GLSL_TYPE_DOUBLE
:
1708 child_type
= glsl_get_column_type(type
);
1710 case GLSL_TYPE_ARRAY
:
1711 child_type
= glsl_get_array_element(type
);
1713 case GLSL_TYPE_STRUCT
:
1714 child_type
= glsl_get_struct_field(type
, i
);
1717 vtn_fail("unkown base type");
1720 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1728 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1731 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1732 src
.src_type
= type
;
1737 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1738 const uint32_t *w
, unsigned count
)
1740 if (opcode
== SpvOpSampledImage
) {
1741 struct vtn_value
*val
=
1742 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1743 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1744 val
->sampled_image
->type
=
1745 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1746 val
->sampled_image
->image
=
1747 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1748 val
->sampled_image
->sampler
=
1749 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
1751 } else if (opcode
== SpvOpImage
) {
1752 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1753 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1754 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1755 val
->pointer
= src_val
->sampled_image
->image
;
1757 vtn_assert(src_val
->value_type
== vtn_value_type_pointer
);
1758 val
->pointer
= src_val
->pointer
;
1763 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1764 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1766 struct vtn_sampled_image sampled
;
1767 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1768 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1769 sampled
= *sampled_val
->sampled_image
;
1771 vtn_assert(sampled_val
->value_type
== vtn_value_type_pointer
);
1772 sampled
.type
= sampled_val
->pointer
->type
;
1773 sampled
.image
= NULL
;
1774 sampled
.sampler
= sampled_val
->pointer
;
1777 const struct glsl_type
*image_type
= sampled
.type
->type
;
1778 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1779 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1780 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1782 /* Figure out the base texture operation */
1785 case SpvOpImageSampleImplicitLod
:
1786 case SpvOpImageSampleDrefImplicitLod
:
1787 case SpvOpImageSampleProjImplicitLod
:
1788 case SpvOpImageSampleProjDrefImplicitLod
:
1789 texop
= nir_texop_tex
;
1792 case SpvOpImageSampleExplicitLod
:
1793 case SpvOpImageSampleDrefExplicitLod
:
1794 case SpvOpImageSampleProjExplicitLod
:
1795 case SpvOpImageSampleProjDrefExplicitLod
:
1796 texop
= nir_texop_txl
;
1799 case SpvOpImageFetch
:
1800 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1801 texop
= nir_texop_txf_ms
;
1803 texop
= nir_texop_txf
;
1807 case SpvOpImageGather
:
1808 case SpvOpImageDrefGather
:
1809 texop
= nir_texop_tg4
;
1812 case SpvOpImageQuerySizeLod
:
1813 case SpvOpImageQuerySize
:
1814 texop
= nir_texop_txs
;
1817 case SpvOpImageQueryLod
:
1818 texop
= nir_texop_lod
;
1821 case SpvOpImageQueryLevels
:
1822 texop
= nir_texop_query_levels
;
1825 case SpvOpImageQuerySamples
:
1826 texop
= nir_texop_texture_samples
;
1830 vtn_fail("Unhandled opcode");
1833 nir_tex_src srcs
[8]; /* 8 should be enough */
1834 nir_tex_src
*p
= srcs
;
1838 struct nir_ssa_def
*coord
;
1839 unsigned coord_components
;
1841 case SpvOpImageSampleImplicitLod
:
1842 case SpvOpImageSampleExplicitLod
:
1843 case SpvOpImageSampleDrefImplicitLod
:
1844 case SpvOpImageSampleDrefExplicitLod
:
1845 case SpvOpImageSampleProjImplicitLod
:
1846 case SpvOpImageSampleProjExplicitLod
:
1847 case SpvOpImageSampleProjDrefImplicitLod
:
1848 case SpvOpImageSampleProjDrefExplicitLod
:
1849 case SpvOpImageFetch
:
1850 case SpvOpImageGather
:
1851 case SpvOpImageDrefGather
:
1852 case SpvOpImageQueryLod
: {
1853 /* All these types have the coordinate as their first real argument */
1854 switch (sampler_dim
) {
1855 case GLSL_SAMPLER_DIM_1D
:
1856 case GLSL_SAMPLER_DIM_BUF
:
1857 coord_components
= 1;
1859 case GLSL_SAMPLER_DIM_2D
:
1860 case GLSL_SAMPLER_DIM_RECT
:
1861 case GLSL_SAMPLER_DIM_MS
:
1862 coord_components
= 2;
1864 case GLSL_SAMPLER_DIM_3D
:
1865 case GLSL_SAMPLER_DIM_CUBE
:
1866 coord_components
= 3;
1869 vtn_fail("Invalid sampler type");
1872 if (is_array
&& texop
!= nir_texop_lod
)
1875 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1876 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
1877 (1 << coord_components
) - 1));
1878 p
->src_type
= nir_tex_src_coord
;
1885 coord_components
= 0;
1890 case SpvOpImageSampleProjImplicitLod
:
1891 case SpvOpImageSampleProjExplicitLod
:
1892 case SpvOpImageSampleProjDrefImplicitLod
:
1893 case SpvOpImageSampleProjDrefExplicitLod
:
1894 /* These have the projector as the last coordinate component */
1895 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1896 p
->src_type
= nir_tex_src_projector
;
1904 unsigned gather_component
= 0;
1906 case SpvOpImageSampleDrefImplicitLod
:
1907 case SpvOpImageSampleDrefExplicitLod
:
1908 case SpvOpImageSampleProjDrefImplicitLod
:
1909 case SpvOpImageSampleProjDrefExplicitLod
:
1910 case SpvOpImageDrefGather
:
1911 /* These all have an explicit depth value as their next source */
1912 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1915 case SpvOpImageGather
:
1916 /* This has a component as its next source */
1918 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1925 /* For OpImageQuerySizeLod, we always have an LOD */
1926 if (opcode
== SpvOpImageQuerySizeLod
)
1927 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1929 /* Now we need to handle some number of optional arguments */
1930 const struct vtn_ssa_value
*gather_offsets
= NULL
;
1932 uint32_t operands
= w
[idx
++];
1934 if (operands
& SpvImageOperandsBiasMask
) {
1935 vtn_assert(texop
== nir_texop_tex
);
1936 texop
= nir_texop_txb
;
1937 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1940 if (operands
& SpvImageOperandsLodMask
) {
1941 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1942 texop
== nir_texop_txs
);
1943 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1946 if (operands
& SpvImageOperandsGradMask
) {
1947 vtn_assert(texop
== nir_texop_txl
);
1948 texop
= nir_texop_txd
;
1949 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1950 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1953 if (operands
& SpvImageOperandsOffsetMask
||
1954 operands
& SpvImageOperandsConstOffsetMask
)
1955 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1957 if (operands
& SpvImageOperandsConstOffsetsMask
) {
1958 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
1959 (*p
++) = (nir_tex_src
){};
1962 if (operands
& SpvImageOperandsSampleMask
) {
1963 vtn_assert(texop
== nir_texop_txf_ms
);
1964 texop
= nir_texop_txf_ms
;
1965 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1968 /* We should have now consumed exactly all of the arguments */
1969 vtn_assert(idx
== count
);
1971 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1974 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1976 instr
->coord_components
= coord_components
;
1977 instr
->sampler_dim
= sampler_dim
;
1978 instr
->is_array
= is_array
;
1979 instr
->is_shadow
= is_shadow
;
1980 instr
->is_new_style_shadow
=
1981 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
1982 instr
->component
= gather_component
;
1984 switch (glsl_get_sampler_result_type(image_type
)) {
1985 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1986 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1987 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
1988 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1990 vtn_fail("Invalid base type for sampler result");
1993 nir_deref_var
*sampler
= vtn_pointer_to_deref(b
, sampled
.sampler
);
1994 nir_deref_var
*texture
;
1995 if (sampled
.image
) {
1996 nir_deref_var
*image
= vtn_pointer_to_deref(b
, sampled
.image
);
2002 instr
->texture
= nir_deref_var_clone(texture
, instr
);
2004 switch (instr
->op
) {
2010 /* These operations require a sampler */
2011 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
2014 case nir_texop_txf_ms
:
2017 case nir_texop_query_levels
:
2018 case nir_texop_texture_samples
:
2019 case nir_texop_samples_identical
:
2021 instr
->sampler
= NULL
;
2023 case nir_texop_txf_ms_mcs
:
2024 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2027 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2028 nir_tex_instr_dest_size(instr
), 32, NULL
);
2030 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2031 nir_tex_instr_dest_size(instr
));
2034 nir_instr
*instruction
;
2035 if (gather_offsets
) {
2036 vtn_assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
2037 vtn_assert(glsl_get_length(gather_offsets
->type
) == 4);
2038 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
2040 /* Copy the current instruction 4x */
2041 for (uint32_t i
= 1; i
< 4; i
++) {
2042 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
2043 instrs
[i
]->op
= instr
->op
;
2044 instrs
[i
]->coord_components
= instr
->coord_components
;
2045 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
2046 instrs
[i
]->is_array
= instr
->is_array
;
2047 instrs
[i
]->is_shadow
= instr
->is_shadow
;
2048 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
2049 instrs
[i
]->component
= instr
->component
;
2050 instrs
[i
]->dest_type
= instr
->dest_type
;
2051 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
2052 instrs
[i
]->sampler
= NULL
;
2054 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2056 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
2057 nir_tex_instr_dest_size(instr
), 32, NULL
);
2060 /* Fill in the last argument with the offset from the passed in offsets
2061 * and insert the instruction into the stream.
2063 for (uint32_t i
= 0; i
< 4; i
++) {
2065 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
2066 src
.src_type
= nir_tex_src_offset
;
2067 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
2068 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
2071 /* Combine the results of the 4 instructions by taking their .w
2074 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
2075 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
2076 vec4
->dest
.write_mask
= 0xf;
2077 for (uint32_t i
= 0; i
< 4; i
++) {
2078 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
2079 vec4
->src
[i
].swizzle
[0] = 3;
2081 def
= &vec4
->dest
.dest
.ssa
;
2082 instruction
= &vec4
->instr
;
2084 def
= &instr
->dest
.ssa
;
2085 instruction
= &instr
->instr
;
2088 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
2089 val
->ssa
->def
= def
;
2091 nir_builder_instr_insert(&b
->nb
, instruction
);
2095 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2096 const uint32_t *w
, nir_src
*src
)
2099 case SpvOpAtomicIIncrement
:
2100 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2103 case SpvOpAtomicIDecrement
:
2104 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2107 case SpvOpAtomicISub
:
2109 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
2112 case SpvOpAtomicCompareExchange
:
2113 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
2114 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
2117 case SpvOpAtomicExchange
:
2118 case SpvOpAtomicIAdd
:
2119 case SpvOpAtomicSMin
:
2120 case SpvOpAtomicUMin
:
2121 case SpvOpAtomicSMax
:
2122 case SpvOpAtomicUMax
:
2123 case SpvOpAtomicAnd
:
2125 case SpvOpAtomicXor
:
2126 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
2130 vtn_fail("Invalid SPIR-V atomic");
2134 static nir_ssa_def
*
2135 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2137 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
2139 /* The image_load_store intrinsics assume a 4-dim coordinate */
2140 unsigned dim
= glsl_get_vector_elements(coord
->type
);
2141 unsigned swizzle
[4];
2142 for (unsigned i
= 0; i
< 4; i
++)
2143 swizzle
[i
] = MIN2(i
, dim
- 1);
2145 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
2149 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2150 const uint32_t *w
, unsigned count
)
2152 /* Just get this one out of the way */
2153 if (opcode
== SpvOpImageTexelPointer
) {
2154 struct vtn_value
*val
=
2155 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2156 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2158 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2159 val
->image
->coord
= get_image_coord(b
, w
[4]);
2160 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
2164 struct vtn_image_pointer image
;
2167 case SpvOpAtomicExchange
:
2168 case SpvOpAtomicCompareExchange
:
2169 case SpvOpAtomicCompareExchangeWeak
:
2170 case SpvOpAtomicIIncrement
:
2171 case SpvOpAtomicIDecrement
:
2172 case SpvOpAtomicIAdd
:
2173 case SpvOpAtomicISub
:
2174 case SpvOpAtomicLoad
:
2175 case SpvOpAtomicSMin
:
2176 case SpvOpAtomicUMin
:
2177 case SpvOpAtomicSMax
:
2178 case SpvOpAtomicUMax
:
2179 case SpvOpAtomicAnd
:
2181 case SpvOpAtomicXor
:
2182 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
2185 case SpvOpAtomicStore
:
2186 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
2189 case SpvOpImageQuerySize
:
2190 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2192 image
.sample
= NULL
;
2195 case SpvOpImageRead
:
2196 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2197 image
.coord
= get_image_coord(b
, w
[4]);
2199 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
2200 vtn_assert(w
[5] == SpvImageOperandsSampleMask
);
2201 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
2203 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2207 case SpvOpImageWrite
:
2208 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2209 image
.coord
= get_image_coord(b
, w
[2]);
2213 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
2214 vtn_assert(w
[4] == SpvImageOperandsSampleMask
);
2215 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
2217 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2222 vtn_fail("Invalid image opcode");
2225 nir_intrinsic_op op
;
2227 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
2228 OP(ImageQuerySize
, size
)
2230 OP(ImageWrite
, store
)
2231 OP(AtomicLoad
, load
)
2232 OP(AtomicStore
, store
)
2233 OP(AtomicExchange
, atomic_exchange
)
2234 OP(AtomicCompareExchange
, atomic_comp_swap
)
2235 OP(AtomicIIncrement
, atomic_add
)
2236 OP(AtomicIDecrement
, atomic_add
)
2237 OP(AtomicIAdd
, atomic_add
)
2238 OP(AtomicISub
, atomic_add
)
2239 OP(AtomicSMin
, atomic_min
)
2240 OP(AtomicUMin
, atomic_min
)
2241 OP(AtomicSMax
, atomic_max
)
2242 OP(AtomicUMax
, atomic_max
)
2243 OP(AtomicAnd
, atomic_and
)
2244 OP(AtomicOr
, atomic_or
)
2245 OP(AtomicXor
, atomic_xor
)
2248 vtn_fail("Invalid image opcode");
2251 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2253 nir_deref_var
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2254 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
2256 /* ImageQuerySize doesn't take any extra parameters */
2257 if (opcode
!= SpvOpImageQuerySize
) {
2258 /* The image coordinate is always 4 components but we may not have that
2259 * many. Swizzle to compensate.
2262 for (unsigned i
= 0; i
< 4; i
++)
2263 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
2264 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
2266 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
2270 case SpvOpAtomicLoad
:
2271 case SpvOpImageQuerySize
:
2272 case SpvOpImageRead
:
2274 case SpvOpAtomicStore
:
2275 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2277 case SpvOpImageWrite
:
2278 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
2281 case SpvOpAtomicCompareExchange
:
2282 case SpvOpAtomicIIncrement
:
2283 case SpvOpAtomicIDecrement
:
2284 case SpvOpAtomicExchange
:
2285 case SpvOpAtomicIAdd
:
2286 case SpvOpAtomicISub
:
2287 case SpvOpAtomicSMin
:
2288 case SpvOpAtomicUMin
:
2289 case SpvOpAtomicSMax
:
2290 case SpvOpAtomicUMax
:
2291 case SpvOpAtomicAnd
:
2293 case SpvOpAtomicXor
:
2294 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
2298 vtn_fail("Invalid image opcode");
2301 if (opcode
!= SpvOpImageWrite
) {
2302 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2303 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2305 unsigned dest_components
=
2306 nir_intrinsic_infos
[intrin
->intrinsic
].dest_components
;
2307 if (intrin
->intrinsic
== nir_intrinsic_image_size
) {
2308 dest_components
= intrin
->num_components
=
2309 glsl_get_vector_elements(type
->type
);
2312 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2313 dest_components
, 32, NULL
);
2315 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2317 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
2318 val
->ssa
->def
= &intrin
->dest
.ssa
;
2320 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2324 static nir_intrinsic_op
2325 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2328 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2329 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2330 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2331 OP(AtomicExchange
, atomic_exchange
)
2332 OP(AtomicCompareExchange
, atomic_comp_swap
)
2333 OP(AtomicIIncrement
, atomic_add
)
2334 OP(AtomicIDecrement
, atomic_add
)
2335 OP(AtomicIAdd
, atomic_add
)
2336 OP(AtomicISub
, atomic_add
)
2337 OP(AtomicSMin
, atomic_imin
)
2338 OP(AtomicUMin
, atomic_umin
)
2339 OP(AtomicSMax
, atomic_imax
)
2340 OP(AtomicUMax
, atomic_umax
)
2341 OP(AtomicAnd
, atomic_and
)
2342 OP(AtomicOr
, atomic_or
)
2343 OP(AtomicXor
, atomic_xor
)
2346 vtn_fail("Invalid SSBO atomic");
2350 static nir_intrinsic_op
2351 get_shared_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2354 case SpvOpAtomicLoad
: return nir_intrinsic_load_shared
;
2355 case SpvOpAtomicStore
: return nir_intrinsic_store_shared
;
2356 #define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
2357 OP(AtomicExchange
, atomic_exchange
)
2358 OP(AtomicCompareExchange
, atomic_comp_swap
)
2359 OP(AtomicIIncrement
, atomic_add
)
2360 OP(AtomicIDecrement
, atomic_add
)
2361 OP(AtomicIAdd
, atomic_add
)
2362 OP(AtomicISub
, atomic_add
)
2363 OP(AtomicSMin
, atomic_imin
)
2364 OP(AtomicUMin
, atomic_umin
)
2365 OP(AtomicSMax
, atomic_imax
)
2366 OP(AtomicUMax
, atomic_umax
)
2367 OP(AtomicAnd
, atomic_and
)
2368 OP(AtomicOr
, atomic_or
)
2369 OP(AtomicXor
, atomic_xor
)
2372 vtn_fail("Invalid shared atomic");
2376 static nir_intrinsic_op
2377 get_var_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2380 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
2381 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
2382 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2383 OP(AtomicExchange
, atomic_exchange
)
2384 OP(AtomicCompareExchange
, atomic_comp_swap
)
2385 OP(AtomicIIncrement
, atomic_add
)
2386 OP(AtomicIDecrement
, atomic_add
)
2387 OP(AtomicIAdd
, atomic_add
)
2388 OP(AtomicISub
, atomic_add
)
2389 OP(AtomicSMin
, atomic_imin
)
2390 OP(AtomicUMin
, atomic_umin
)
2391 OP(AtomicSMax
, atomic_imax
)
2392 OP(AtomicUMax
, atomic_umax
)
2393 OP(AtomicAnd
, atomic_and
)
2394 OP(AtomicOr
, atomic_or
)
2395 OP(AtomicXor
, atomic_xor
)
2398 vtn_fail("Invalid shared atomic");
2403 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
2404 const uint32_t *w
, unsigned count
)
2406 struct vtn_pointer
*ptr
;
2407 nir_intrinsic_instr
*atomic
;
2410 case SpvOpAtomicLoad
:
2411 case SpvOpAtomicExchange
:
2412 case SpvOpAtomicCompareExchange
:
2413 case SpvOpAtomicCompareExchangeWeak
:
2414 case SpvOpAtomicIIncrement
:
2415 case SpvOpAtomicIDecrement
:
2416 case SpvOpAtomicIAdd
:
2417 case SpvOpAtomicISub
:
2418 case SpvOpAtomicSMin
:
2419 case SpvOpAtomicUMin
:
2420 case SpvOpAtomicSMax
:
2421 case SpvOpAtomicUMax
:
2422 case SpvOpAtomicAnd
:
2424 case SpvOpAtomicXor
:
2425 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2428 case SpvOpAtomicStore
:
2429 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2433 vtn_fail("Invalid SPIR-V atomic");
2437 SpvScope scope = w[4];
2438 SpvMemorySemanticsMask semantics = w[5];
2441 if (ptr
->mode
== vtn_variable_mode_workgroup
&&
2442 !b
->options
->lower_workgroup_access_to_offsets
) {
2443 nir_deref_var
*deref
= vtn_pointer_to_deref(b
, ptr
);
2444 const struct glsl_type
*deref_type
= nir_deref_tail(&deref
->deref
)->type
;
2445 nir_intrinsic_op op
= get_var_nir_atomic_op(b
, opcode
);
2446 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2447 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2450 case SpvOpAtomicLoad
:
2451 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2454 case SpvOpAtomicStore
:
2455 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2456 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2457 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2460 case SpvOpAtomicExchange
:
2461 case SpvOpAtomicCompareExchange
:
2462 case SpvOpAtomicCompareExchangeWeak
:
2463 case SpvOpAtomicIIncrement
:
2464 case SpvOpAtomicIDecrement
:
2465 case SpvOpAtomicIAdd
:
2466 case SpvOpAtomicISub
:
2467 case SpvOpAtomicSMin
:
2468 case SpvOpAtomicUMin
:
2469 case SpvOpAtomicSMax
:
2470 case SpvOpAtomicUMax
:
2471 case SpvOpAtomicAnd
:
2473 case SpvOpAtomicXor
:
2474 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2478 vtn_fail("Invalid SPIR-V atomic");
2482 nir_ssa_def
*offset
, *index
;
2483 offset
= vtn_pointer_to_offset(b
, ptr
, &index
, NULL
);
2485 nir_intrinsic_op op
;
2486 if (ptr
->mode
== vtn_variable_mode_ssbo
) {
2487 op
= get_ssbo_nir_atomic_op(b
, opcode
);
2489 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
&&
2490 b
->options
->lower_workgroup_access_to_offsets
);
2491 op
= get_shared_nir_atomic_op(b
, opcode
);
2494 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2498 case SpvOpAtomicLoad
:
2499 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2500 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2501 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2502 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2505 case SpvOpAtomicStore
:
2506 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2507 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2508 atomic
->src
[src
++] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2509 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2510 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2511 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2514 case SpvOpAtomicExchange
:
2515 case SpvOpAtomicCompareExchange
:
2516 case SpvOpAtomicCompareExchangeWeak
:
2517 case SpvOpAtomicIIncrement
:
2518 case SpvOpAtomicIDecrement
:
2519 case SpvOpAtomicIAdd
:
2520 case SpvOpAtomicISub
:
2521 case SpvOpAtomicSMin
:
2522 case SpvOpAtomicUMin
:
2523 case SpvOpAtomicSMax
:
2524 case SpvOpAtomicUMax
:
2525 case SpvOpAtomicAnd
:
2527 case SpvOpAtomicXor
:
2528 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2529 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2530 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2531 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
2535 vtn_fail("Invalid SPIR-V atomic");
2539 if (opcode
!= SpvOpAtomicStore
) {
2540 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2542 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2543 glsl_get_vector_elements(type
->type
),
2544 glsl_get_bit_size(type
->type
), NULL
);
2546 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2547 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2548 val
->ssa
->def
= &atomic
->dest
.ssa
;
2549 val
->ssa
->type
= type
->type
;
2552 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2555 static nir_alu_instr
*
2556 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
2559 switch (num_components
) {
2560 case 1: op
= nir_op_fmov
; break;
2561 case 2: op
= nir_op_vec2
; break;
2562 case 3: op
= nir_op_vec3
; break;
2563 case 4: op
= nir_op_vec4
; break;
2564 default: vtn_fail("bad vector size");
2567 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
2568 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2570 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2575 struct vtn_ssa_value
*
2576 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2578 if (src
->transposed
)
2579 return src
->transposed
;
2581 struct vtn_ssa_value
*dest
=
2582 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2584 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2585 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
2586 glsl_get_bit_size(src
->type
));
2587 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2588 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2589 vec
->src
[0].swizzle
[0] = i
;
2591 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2592 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2593 vec
->src
[j
].swizzle
[0] = i
;
2596 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2597 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2600 dest
->transposed
= src
;
2606 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2608 unsigned swiz
[4] = { index
};
2609 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2613 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2616 nir_alu_instr
*vec
= create_vec(b
, src
->num_components
,
2619 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2621 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2623 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2624 vec
->src
[i
].swizzle
[0] = i
;
2628 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2630 return &vec
->dest
.dest
.ssa
;
2634 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2637 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2638 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2639 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2640 vtn_vector_extract(b
, src
, i
), dest
);
2646 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2647 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2649 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2650 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2651 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2652 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2657 static nir_ssa_def
*
2658 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2659 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2660 const uint32_t *indices
)
2662 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
2664 for (unsigned i
= 0; i
< num_components
; i
++) {
2665 uint32_t index
= indices
[i
];
2666 if (index
== 0xffffffff) {
2668 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2669 } else if (index
< src0
->num_components
) {
2670 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2671 vec
->src
[i
].swizzle
[0] = index
;
2673 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2674 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2678 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2680 return &vec
->dest
.dest
.ssa
;
2684 * Concatentates a number of vectors/scalars together to produce a vector
2686 static nir_ssa_def
*
2687 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2688 unsigned num_srcs
, nir_ssa_def
**srcs
)
2690 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
2692 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2694 * "When constructing a vector, there must be at least two Constituent
2697 vtn_assert(num_srcs
>= 2);
2699 unsigned dest_idx
= 0;
2700 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2701 nir_ssa_def
*src
= srcs
[i
];
2702 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
2703 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2704 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2705 vec
->src
[dest_idx
].swizzle
[0] = j
;
2710 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2712 * "When constructing a vector, the total number of components in all
2713 * the operands must equal the number of components in Result Type."
2715 vtn_assert(dest_idx
== num_components
);
2717 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2719 return &vec
->dest
.dest
.ssa
;
2722 static struct vtn_ssa_value
*
2723 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2725 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2726 dest
->type
= src
->type
;
2728 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2729 dest
->def
= src
->def
;
2731 unsigned elems
= glsl_get_length(src
->type
);
2733 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2734 for (unsigned i
= 0; i
< elems
; i
++)
2735 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2741 static struct vtn_ssa_value
*
2742 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2743 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2744 unsigned num_indices
)
2746 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2748 struct vtn_ssa_value
*cur
= dest
;
2750 for (i
= 0; i
< num_indices
- 1; i
++) {
2751 cur
= cur
->elems
[indices
[i
]];
2754 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2755 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2756 * the component granularity. In that case, the last index will be
2757 * the index to insert the scalar into the vector.
2760 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2762 cur
->elems
[indices
[i
]] = insert
;
2768 static struct vtn_ssa_value
*
2769 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2770 const uint32_t *indices
, unsigned num_indices
)
2772 struct vtn_ssa_value
*cur
= src
;
2773 for (unsigned i
= 0; i
< num_indices
; i
++) {
2774 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2775 vtn_assert(i
== num_indices
- 1);
2776 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2777 * the component granularity. The last index will be the index of the
2778 * vector to extract.
2781 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2782 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2783 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2786 cur
= cur
->elems
[indices
[i
]];
2794 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2795 const uint32_t *w
, unsigned count
)
2797 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2798 const struct glsl_type
*type
=
2799 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2800 val
->ssa
= vtn_create_ssa_value(b
, type
);
2803 case SpvOpVectorExtractDynamic
:
2804 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2805 vtn_ssa_value(b
, w
[4])->def
);
2808 case SpvOpVectorInsertDynamic
:
2809 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2810 vtn_ssa_value(b
, w
[4])->def
,
2811 vtn_ssa_value(b
, w
[5])->def
);
2814 case SpvOpVectorShuffle
:
2815 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2816 vtn_ssa_value(b
, w
[3])->def
,
2817 vtn_ssa_value(b
, w
[4])->def
,
2821 case SpvOpCompositeConstruct
: {
2822 unsigned elems
= count
- 3;
2823 if (glsl_type_is_vector_or_scalar(type
)) {
2824 nir_ssa_def
*srcs
[4];
2825 for (unsigned i
= 0; i
< elems
; i
++)
2826 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2828 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2831 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2832 for (unsigned i
= 0; i
< elems
; i
++)
2833 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2837 case SpvOpCompositeExtract
:
2838 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2842 case SpvOpCompositeInsert
:
2843 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2844 vtn_ssa_value(b
, w
[3]),
2848 case SpvOpCopyObject
:
2849 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2853 vtn_fail("unknown composite operation");
2858 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2859 const uint32_t *w
, unsigned count
)
2861 nir_intrinsic_op intrinsic_op
;
2863 case SpvOpEmitVertex
:
2864 case SpvOpEmitStreamVertex
:
2865 intrinsic_op
= nir_intrinsic_emit_vertex
;
2867 case SpvOpEndPrimitive
:
2868 case SpvOpEndStreamPrimitive
:
2869 intrinsic_op
= nir_intrinsic_end_primitive
;
2871 case SpvOpMemoryBarrier
:
2872 intrinsic_op
= nir_intrinsic_memory_barrier
;
2874 case SpvOpControlBarrier
:
2875 intrinsic_op
= nir_intrinsic_barrier
;
2878 vtn_fail("unknown barrier instruction");
2881 nir_intrinsic_instr
*intrin
=
2882 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2884 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2885 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2887 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2891 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
2892 SpvExecutionMode mode
)
2895 case SpvExecutionModeInputPoints
:
2896 case SpvExecutionModeOutputPoints
:
2897 return 0; /* GL_POINTS */
2898 case SpvExecutionModeInputLines
:
2899 return 1; /* GL_LINES */
2900 case SpvExecutionModeInputLinesAdjacency
:
2901 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2902 case SpvExecutionModeTriangles
:
2903 return 4; /* GL_TRIANGLES */
2904 case SpvExecutionModeInputTrianglesAdjacency
:
2905 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2906 case SpvExecutionModeQuads
:
2907 return 7; /* GL_QUADS */
2908 case SpvExecutionModeIsolines
:
2909 return 0x8E7A; /* GL_ISOLINES */
2910 case SpvExecutionModeOutputLineStrip
:
2911 return 3; /* GL_LINE_STRIP */
2912 case SpvExecutionModeOutputTriangleStrip
:
2913 return 5; /* GL_TRIANGLE_STRIP */
2915 vtn_fail("Invalid primitive type");
2920 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
2921 SpvExecutionMode mode
)
2924 case SpvExecutionModeInputPoints
:
2926 case SpvExecutionModeInputLines
:
2928 case SpvExecutionModeInputLinesAdjacency
:
2930 case SpvExecutionModeTriangles
:
2932 case SpvExecutionModeInputTrianglesAdjacency
:
2935 vtn_fail("Invalid GS input mode");
2939 static gl_shader_stage
2940 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
2943 case SpvExecutionModelVertex
:
2944 return MESA_SHADER_VERTEX
;
2945 case SpvExecutionModelTessellationControl
:
2946 return MESA_SHADER_TESS_CTRL
;
2947 case SpvExecutionModelTessellationEvaluation
:
2948 return MESA_SHADER_TESS_EVAL
;
2949 case SpvExecutionModelGeometry
:
2950 return MESA_SHADER_GEOMETRY
;
2951 case SpvExecutionModelFragment
:
2952 return MESA_SHADER_FRAGMENT
;
2953 case SpvExecutionModelGLCompute
:
2954 return MESA_SHADER_COMPUTE
;
2956 vtn_fail("Unsupported execution model");
2960 #define spv_check_supported(name, cap) do { \
2961 if (!(b->options && b->options->caps.name)) \
2962 vtn_warn("Unsupported SPIR-V capability: %s", \
2963 spirv_capability_to_string(cap)); \
2967 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2968 const uint32_t *w
, unsigned count
)
2975 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
2976 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
2977 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
2978 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
2979 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
2980 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
2983 uint32_t version
= w
[2];
2986 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
2988 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
2992 case SpvOpSourceExtension
:
2993 case SpvOpSourceContinued
:
2994 case SpvOpExtension
:
2995 /* Unhandled, but these are for debug so that's ok. */
2998 case SpvOpCapability
: {
2999 SpvCapability cap
= w
[1];
3001 case SpvCapabilityMatrix
:
3002 case SpvCapabilityShader
:
3003 case SpvCapabilityGeometry
:
3004 case SpvCapabilityGeometryPointSize
:
3005 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
3006 case SpvCapabilitySampledImageArrayDynamicIndexing
:
3007 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
3008 case SpvCapabilityStorageImageArrayDynamicIndexing
:
3009 case SpvCapabilityImageRect
:
3010 case SpvCapabilitySampledRect
:
3011 case SpvCapabilitySampled1D
:
3012 case SpvCapabilityImage1D
:
3013 case SpvCapabilitySampledCubeArray
:
3014 case SpvCapabilityImageCubeArray
:
3015 case SpvCapabilitySampledBuffer
:
3016 case SpvCapabilityImageBuffer
:
3017 case SpvCapabilityImageQuery
:
3018 case SpvCapabilityDerivativeControl
:
3019 case SpvCapabilityInterpolationFunction
:
3020 case SpvCapabilityMultiViewport
:
3021 case SpvCapabilitySampleRateShading
:
3022 case SpvCapabilityClipDistance
:
3023 case SpvCapabilityCullDistance
:
3024 case SpvCapabilityInputAttachment
:
3025 case SpvCapabilityImageGatherExtended
:
3026 case SpvCapabilityStorageImageExtendedFormats
:
3029 case SpvCapabilityGeometryStreams
:
3030 case SpvCapabilityLinkage
:
3031 case SpvCapabilityVector16
:
3032 case SpvCapabilityFloat16Buffer
:
3033 case SpvCapabilityFloat16
:
3034 case SpvCapabilityInt64Atomics
:
3035 case SpvCapabilityAtomicStorage
:
3036 case SpvCapabilityInt16
:
3037 case SpvCapabilityStorageImageMultisample
:
3038 case SpvCapabilityInt8
:
3039 case SpvCapabilitySparseResidency
:
3040 case SpvCapabilityMinLod
:
3041 case SpvCapabilityTransformFeedback
:
3042 vtn_warn("Unsupported SPIR-V capability: %s",
3043 spirv_capability_to_string(cap
));
3046 case SpvCapabilityFloat64
:
3047 spv_check_supported(float64
, cap
);
3049 case SpvCapabilityInt64
:
3050 spv_check_supported(int64
, cap
);
3053 case SpvCapabilityAddresses
:
3054 case SpvCapabilityKernel
:
3055 case SpvCapabilityImageBasic
:
3056 case SpvCapabilityImageReadWrite
:
3057 case SpvCapabilityImageMipmap
:
3058 case SpvCapabilityPipes
:
3059 case SpvCapabilityGroups
:
3060 case SpvCapabilityDeviceEnqueue
:
3061 case SpvCapabilityLiteralSampler
:
3062 case SpvCapabilityGenericPointer
:
3063 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3064 spirv_capability_to_string(cap
));
3067 case SpvCapabilityImageMSArray
:
3068 spv_check_supported(image_ms_array
, cap
);
3071 case SpvCapabilityTessellation
:
3072 case SpvCapabilityTessellationPointSize
:
3073 spv_check_supported(tessellation
, cap
);
3076 case SpvCapabilityDrawParameters
:
3077 spv_check_supported(draw_parameters
, cap
);
3080 case SpvCapabilityStorageImageReadWithoutFormat
:
3081 spv_check_supported(image_read_without_format
, cap
);
3084 case SpvCapabilityStorageImageWriteWithoutFormat
:
3085 spv_check_supported(image_write_without_format
, cap
);
3088 case SpvCapabilityMultiView
:
3089 spv_check_supported(multiview
, cap
);
3092 case SpvCapabilityVariablePointersStorageBuffer
:
3093 case SpvCapabilityVariablePointers
:
3094 spv_check_supported(variable_pointers
, cap
);
3097 case SpvCapabilityStorageUniformBufferBlock16
:
3098 case SpvCapabilityStorageUniform16
:
3099 case SpvCapabilityStoragePushConstant16
:
3100 case SpvCapabilityStorageInputOutput16
:
3101 spv_check_supported(storage_16bit
, cap
);
3105 vtn_fail("Unhandled capability");
3110 case SpvOpExtInstImport
:
3111 vtn_handle_extension(b
, opcode
, w
, count
);
3114 case SpvOpMemoryModel
:
3115 vtn_assert(w
[1] == SpvAddressingModelLogical
);
3116 vtn_assert(w
[2] == SpvMemoryModelSimple
||
3117 w
[2] == SpvMemoryModelGLSL450
);
3120 case SpvOpEntryPoint
: {
3121 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
3122 /* Let this be a name label regardless */
3123 unsigned name_words
;
3124 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
3126 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
3127 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
3130 vtn_assert(b
->entry_point
== NULL
);
3131 b
->entry_point
= entry_point
;
3136 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
3137 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
3141 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
3144 case SpvOpMemberName
:
3148 case SpvOpExecutionMode
:
3149 case SpvOpDecorationGroup
:
3151 case SpvOpMemberDecorate
:
3152 case SpvOpGroupDecorate
:
3153 case SpvOpGroupMemberDecorate
:
3154 vtn_handle_decoration(b
, opcode
, w
, count
);
3158 return false; /* End of preamble */
3165 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
3166 const struct vtn_decoration
*mode
, void *data
)
3168 vtn_assert(b
->entry_point
== entry_point
);
3170 switch(mode
->exec_mode
) {
3171 case SpvExecutionModeOriginUpperLeft
:
3172 case SpvExecutionModeOriginLowerLeft
:
3173 b
->origin_upper_left
=
3174 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
3177 case SpvExecutionModeEarlyFragmentTests
:
3178 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3179 b
->shader
->info
.fs
.early_fragment_tests
= true;
3182 case SpvExecutionModeInvocations
:
3183 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3184 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->literals
[0]);
3187 case SpvExecutionModeDepthReplacing
:
3188 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3189 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
3191 case SpvExecutionModeDepthGreater
:
3192 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3193 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
3195 case SpvExecutionModeDepthLess
:
3196 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3197 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
3199 case SpvExecutionModeDepthUnchanged
:
3200 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3201 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
3204 case SpvExecutionModeLocalSize
:
3205 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
3206 b
->shader
->info
.cs
.local_size
[0] = mode
->literals
[0];
3207 b
->shader
->info
.cs
.local_size
[1] = mode
->literals
[1];
3208 b
->shader
->info
.cs
.local_size
[2] = mode
->literals
[2];
3210 case SpvExecutionModeLocalSizeHint
:
3211 break; /* Nothing to do with this */
3213 case SpvExecutionModeOutputVertices
:
3214 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3215 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3216 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->literals
[0];
3218 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3219 b
->shader
->info
.gs
.vertices_out
= mode
->literals
[0];
3223 case SpvExecutionModeInputPoints
:
3224 case SpvExecutionModeInputLines
:
3225 case SpvExecutionModeInputLinesAdjacency
:
3226 case SpvExecutionModeTriangles
:
3227 case SpvExecutionModeInputTrianglesAdjacency
:
3228 case SpvExecutionModeQuads
:
3229 case SpvExecutionModeIsolines
:
3230 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3231 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3232 b
->shader
->info
.tess
.primitive_mode
=
3233 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
3235 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3236 b
->shader
->info
.gs
.vertices_in
=
3237 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
3241 case SpvExecutionModeOutputPoints
:
3242 case SpvExecutionModeOutputLineStrip
:
3243 case SpvExecutionModeOutputTriangleStrip
:
3244 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3245 b
->shader
->info
.gs
.output_primitive
=
3246 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
3249 case SpvExecutionModeSpacingEqual
:
3250 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3251 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3252 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
3254 case SpvExecutionModeSpacingFractionalEven
:
3255 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3256 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3257 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
3259 case SpvExecutionModeSpacingFractionalOdd
:
3260 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3261 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3262 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
3264 case SpvExecutionModeVertexOrderCw
:
3265 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3266 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3267 b
->shader
->info
.tess
.ccw
= false;
3269 case SpvExecutionModeVertexOrderCcw
:
3270 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3271 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3272 b
->shader
->info
.tess
.ccw
= true;
3274 case SpvExecutionModePointMode
:
3275 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3276 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3277 b
->shader
->info
.tess
.point_mode
= true;
3280 case SpvExecutionModePixelCenterInteger
:
3281 b
->pixel_center_integer
= true;
3284 case SpvExecutionModeXfb
:
3285 vtn_fail("Unhandled execution mode");
3288 case SpvExecutionModeVecTypeHint
:
3289 case SpvExecutionModeContractionOff
:
3293 vtn_fail("Unhandled execution mode");
3298 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3299 const uint32_t *w
, unsigned count
)
3301 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
3305 case SpvOpSourceContinued
:
3306 case SpvOpSourceExtension
:
3307 case SpvOpExtension
:
3308 case SpvOpCapability
:
3309 case SpvOpExtInstImport
:
3310 case SpvOpMemoryModel
:
3311 case SpvOpEntryPoint
:
3312 case SpvOpExecutionMode
:
3315 case SpvOpMemberName
:
3316 case SpvOpDecorationGroup
:
3318 case SpvOpMemberDecorate
:
3319 case SpvOpGroupDecorate
:
3320 case SpvOpGroupMemberDecorate
:
3321 vtn_fail("Invalid opcode types and variables section");
3327 case SpvOpTypeFloat
:
3328 case SpvOpTypeVector
:
3329 case SpvOpTypeMatrix
:
3330 case SpvOpTypeImage
:
3331 case SpvOpTypeSampler
:
3332 case SpvOpTypeSampledImage
:
3333 case SpvOpTypeArray
:
3334 case SpvOpTypeRuntimeArray
:
3335 case SpvOpTypeStruct
:
3336 case SpvOpTypeOpaque
:
3337 case SpvOpTypePointer
:
3338 case SpvOpTypeFunction
:
3339 case SpvOpTypeEvent
:
3340 case SpvOpTypeDeviceEvent
:
3341 case SpvOpTypeReserveId
:
3342 case SpvOpTypeQueue
:
3344 vtn_handle_type(b
, opcode
, w
, count
);
3347 case SpvOpConstantTrue
:
3348 case SpvOpConstantFalse
:
3350 case SpvOpConstantComposite
:
3351 case SpvOpConstantSampler
:
3352 case SpvOpConstantNull
:
3353 case SpvOpSpecConstantTrue
:
3354 case SpvOpSpecConstantFalse
:
3355 case SpvOpSpecConstant
:
3356 case SpvOpSpecConstantComposite
:
3357 case SpvOpSpecConstantOp
:
3358 vtn_handle_constant(b
, opcode
, w
, count
);
3363 vtn_handle_variables(b
, opcode
, w
, count
);
3367 return false; /* End of preamble */
3374 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3375 const uint32_t *w
, unsigned count
)
3381 case SpvOpLoopMerge
:
3382 case SpvOpSelectionMerge
:
3383 /* This is handled by cfg pre-pass and walk_blocks */
3387 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
3388 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3393 vtn_handle_extension(b
, opcode
, w
, count
);
3399 case SpvOpCopyMemory
:
3400 case SpvOpCopyMemorySized
:
3401 case SpvOpAccessChain
:
3402 case SpvOpPtrAccessChain
:
3403 case SpvOpInBoundsAccessChain
:
3404 case SpvOpArrayLength
:
3405 vtn_handle_variables(b
, opcode
, w
, count
);
3408 case SpvOpFunctionCall
:
3409 vtn_handle_function_call(b
, opcode
, w
, count
);
3412 case SpvOpSampledImage
:
3414 case SpvOpImageSampleImplicitLod
:
3415 case SpvOpImageSampleExplicitLod
:
3416 case SpvOpImageSampleDrefImplicitLod
:
3417 case SpvOpImageSampleDrefExplicitLod
:
3418 case SpvOpImageSampleProjImplicitLod
:
3419 case SpvOpImageSampleProjExplicitLod
:
3420 case SpvOpImageSampleProjDrefImplicitLod
:
3421 case SpvOpImageSampleProjDrefExplicitLod
:
3422 case SpvOpImageFetch
:
3423 case SpvOpImageGather
:
3424 case SpvOpImageDrefGather
:
3425 case SpvOpImageQuerySizeLod
:
3426 case SpvOpImageQueryLod
:
3427 case SpvOpImageQueryLevels
:
3428 case SpvOpImageQuerySamples
:
3429 vtn_handle_texture(b
, opcode
, w
, count
);
3432 case SpvOpImageRead
:
3433 case SpvOpImageWrite
:
3434 case SpvOpImageTexelPointer
:
3435 vtn_handle_image(b
, opcode
, w
, count
);
3438 case SpvOpImageQuerySize
: {
3439 struct vtn_pointer
*image
=
3440 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3441 if (image
->mode
== vtn_variable_mode_image
) {
3442 vtn_handle_image(b
, opcode
, w
, count
);
3444 vtn_assert(image
->mode
== vtn_variable_mode_sampler
);
3445 vtn_handle_texture(b
, opcode
, w
, count
);
3450 case SpvOpAtomicLoad
:
3451 case SpvOpAtomicExchange
:
3452 case SpvOpAtomicCompareExchange
:
3453 case SpvOpAtomicCompareExchangeWeak
:
3454 case SpvOpAtomicIIncrement
:
3455 case SpvOpAtomicIDecrement
:
3456 case SpvOpAtomicIAdd
:
3457 case SpvOpAtomicISub
:
3458 case SpvOpAtomicSMin
:
3459 case SpvOpAtomicUMin
:
3460 case SpvOpAtomicSMax
:
3461 case SpvOpAtomicUMax
:
3462 case SpvOpAtomicAnd
:
3464 case SpvOpAtomicXor
: {
3465 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
3466 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3467 vtn_handle_image(b
, opcode
, w
, count
);
3469 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
3470 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3475 case SpvOpAtomicStore
: {
3476 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
3477 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3478 vtn_handle_image(b
, opcode
, w
, count
);
3480 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
3481 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3487 /* Handle OpSelect up-front here because it needs to be able to handle
3488 * pointers and not just regular vectors and scalars.
3490 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
3491 struct vtn_value
*sel_val
= vtn_untyped_value(b
, w
[3]);
3492 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
3493 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
3495 const struct glsl_type
*sel_type
;
3496 switch (res_val
->type
->base_type
) {
3497 case vtn_base_type_scalar
:
3498 sel_type
= glsl_bool_type();
3500 case vtn_base_type_vector
:
3501 sel_type
= glsl_vector_type(GLSL_TYPE_BOOL
, res_val
->type
->length
);
3503 case vtn_base_type_pointer
:
3504 /* We need to have actual storage for pointer types */
3505 vtn_fail_if(res_val
->type
->type
== NULL
,
3506 "Invalid pointer result type for OpSelect");
3507 sel_type
= glsl_bool_type();
3510 vtn_fail("Result type of OpSelect must be a scalar, vector, or pointer");
3513 if (unlikely(sel_val
->type
->type
!= sel_type
)) {
3514 if (sel_val
->type
->type
== glsl_bool_type()) {
3515 /* This case is illegal but some older versions of GLSLang produce
3516 * it. The GLSLang issue was fixed on March 30, 2017:
3518 * https://github.com/KhronosGroup/glslang/issues/809
3520 * Unfortunately, there are applications in the wild which are
3521 * shipping with this bug so it isn't nice to fail on them so we
3522 * throw a warning instead. It's not actually a problem for us as
3523 * nir_builder will just splat the condition out which is most
3524 * likely what the client wanted anyway.
3526 vtn_warn("Condition type of OpSelect must have the same number "
3527 "of components as Result Type");
3529 vtn_fail("Condition type of OpSelect must be a scalar or vector "
3530 "of Boolean type. It must have the same number of "
3531 "components as Result Type");
3535 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
3536 obj2_val
->type
!= res_val
->type
,
3537 "Object types must match the result type in OpSelect");
3539 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3540 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, res_type
->type
);
3541 ssa
->def
= nir_bcsel(&b
->nb
, vtn_ssa_value(b
, w
[3])->def
,
3542 vtn_ssa_value(b
, w
[4])->def
,
3543 vtn_ssa_value(b
, w
[5])->def
);
3544 vtn_push_ssa(b
, w
[2], res_type
, ssa
);
3553 case SpvOpConvertFToU
:
3554 case SpvOpConvertFToS
:
3555 case SpvOpConvertSToF
:
3556 case SpvOpConvertUToF
:
3560 case SpvOpQuantizeToF16
:
3561 case SpvOpConvertPtrToU
:
3562 case SpvOpConvertUToPtr
:
3563 case SpvOpPtrCastToGeneric
:
3564 case SpvOpGenericCastToPtr
:
3570 case SpvOpSignBitSet
:
3571 case SpvOpLessOrGreater
:
3573 case SpvOpUnordered
:
3588 case SpvOpVectorTimesScalar
:
3590 case SpvOpIAddCarry
:
3591 case SpvOpISubBorrow
:
3592 case SpvOpUMulExtended
:
3593 case SpvOpSMulExtended
:
3594 case SpvOpShiftRightLogical
:
3595 case SpvOpShiftRightArithmetic
:
3596 case SpvOpShiftLeftLogical
:
3597 case SpvOpLogicalEqual
:
3598 case SpvOpLogicalNotEqual
:
3599 case SpvOpLogicalOr
:
3600 case SpvOpLogicalAnd
:
3601 case SpvOpLogicalNot
:
3602 case SpvOpBitwiseOr
:
3603 case SpvOpBitwiseXor
:
3604 case SpvOpBitwiseAnd
:
3606 case SpvOpFOrdEqual
:
3607 case SpvOpFUnordEqual
:
3608 case SpvOpINotEqual
:
3609 case SpvOpFOrdNotEqual
:
3610 case SpvOpFUnordNotEqual
:
3611 case SpvOpULessThan
:
3612 case SpvOpSLessThan
:
3613 case SpvOpFOrdLessThan
:
3614 case SpvOpFUnordLessThan
:
3615 case SpvOpUGreaterThan
:
3616 case SpvOpSGreaterThan
:
3617 case SpvOpFOrdGreaterThan
:
3618 case SpvOpFUnordGreaterThan
:
3619 case SpvOpULessThanEqual
:
3620 case SpvOpSLessThanEqual
:
3621 case SpvOpFOrdLessThanEqual
:
3622 case SpvOpFUnordLessThanEqual
:
3623 case SpvOpUGreaterThanEqual
:
3624 case SpvOpSGreaterThanEqual
:
3625 case SpvOpFOrdGreaterThanEqual
:
3626 case SpvOpFUnordGreaterThanEqual
:
3632 case SpvOpFwidthFine
:
3633 case SpvOpDPdxCoarse
:
3634 case SpvOpDPdyCoarse
:
3635 case SpvOpFwidthCoarse
:
3636 case SpvOpBitFieldInsert
:
3637 case SpvOpBitFieldSExtract
:
3638 case SpvOpBitFieldUExtract
:
3639 case SpvOpBitReverse
:
3641 case SpvOpTranspose
:
3642 case SpvOpOuterProduct
:
3643 case SpvOpMatrixTimesScalar
:
3644 case SpvOpVectorTimesMatrix
:
3645 case SpvOpMatrixTimesVector
:
3646 case SpvOpMatrixTimesMatrix
:
3647 vtn_handle_alu(b
, opcode
, w
, count
);
3650 case SpvOpVectorExtractDynamic
:
3651 case SpvOpVectorInsertDynamic
:
3652 case SpvOpVectorShuffle
:
3653 case SpvOpCompositeConstruct
:
3654 case SpvOpCompositeExtract
:
3655 case SpvOpCompositeInsert
:
3656 case SpvOpCopyObject
:
3657 vtn_handle_composite(b
, opcode
, w
, count
);
3660 case SpvOpEmitVertex
:
3661 case SpvOpEndPrimitive
:
3662 case SpvOpEmitStreamVertex
:
3663 case SpvOpEndStreamPrimitive
:
3664 case SpvOpControlBarrier
:
3665 case SpvOpMemoryBarrier
:
3666 vtn_handle_barrier(b
, opcode
, w
, count
);
3670 vtn_fail("Unhandled opcode");
3677 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3678 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3679 gl_shader_stage stage
, const char *entry_point_name
,
3680 const struct spirv_to_nir_options
*options
,
3681 const nir_shader_compiler_options
*nir_options
)
3683 /* Initialize the stn_builder object */
3684 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3689 exec_list_make_empty(&b
->functions
);
3690 b
->entry_point_stage
= stage
;
3691 b
->entry_point_name
= entry_point_name
;
3692 b
->options
= options
;
3694 /* See also _vtn_fail() */
3695 if (setjmp(b
->fail_jump
)) {
3700 const uint32_t *word_end
= words
+ word_count
;
3702 /* Handle the SPIR-V header (first 4 dwords) */
3703 vtn_assert(word_count
> 5);
3705 vtn_assert(words
[0] == SpvMagicNumber
);
3706 vtn_assert(words
[1] >= 0x10000);
3707 /* words[2] == generator magic */
3708 unsigned value_id_bound
= words
[3];
3709 vtn_assert(words
[4] == 0);
3713 b
->value_id_bound
= value_id_bound
;
3714 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3716 /* Handle all the preamble instructions */
3717 words
= vtn_foreach_instruction(b
, words
, word_end
,
3718 vtn_handle_preamble_instruction
);
3720 if (b
->entry_point
== NULL
) {
3721 vtn_fail("Entry point not found");
3726 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
3728 /* Set shader info defaults */
3729 b
->shader
->info
.gs
.invocations
= 1;
3731 /* Parse execution modes */
3732 vtn_foreach_execution_mode(b
, b
->entry_point
,
3733 vtn_handle_execution_mode
, NULL
);
3735 b
->specializations
= spec
;
3736 b
->num_specializations
= num_spec
;
3738 /* Handle all variable, type, and constant instructions */
3739 words
= vtn_foreach_instruction(b
, words
, word_end
,
3740 vtn_handle_variable_or_type_instruction
);
3742 /* Set types on all vtn_values */
3743 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
3745 vtn_build_cfg(b
, words
, word_end
);
3747 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3748 b
->entry_point
->func
->referenced
= true;
3753 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3754 if (func
->referenced
&& !func
->emitted
) {
3755 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3756 _mesa_key_pointer_equal
);
3758 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3764 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3765 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3766 vtn_assert(entry_point
);
3768 /* Unparent the shader from the vtn_builder before we delete the builder */
3769 ralloc_steal(NULL
, b
->shader
);