2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
35 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
36 size_t spirv_offset
, const char *message
)
38 if (b
->options
->debug
.func
) {
39 b
->options
->debug
.func(b
->options
->debug
.private_data
,
40 level
, spirv_offset
, message
);
44 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
45 fprintf(stderr
, "%s\n", message
);
50 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
51 size_t spirv_offset
, const char *fmt
, ...)
57 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
60 vtn_log(b
, level
, spirv_offset
, msg
);
66 vtn_log_err(struct vtn_builder
*b
,
67 enum nir_spirv_debug_level level
, const char *prefix
,
68 const char *file
, unsigned line
,
69 const char *fmt
, va_list args
)
73 msg
= ralloc_strdup(NULL
, prefix
);
76 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
79 ralloc_asprintf_append(&msg
, " ");
81 ralloc_vasprintf_append(&msg
, fmt
, args
);
83 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
87 ralloc_asprintf_append(&msg
,
88 "\n in SPIR-V source file %s, line %d, col %d",
89 b
->file
, b
->line
, b
->col
);
92 vtn_log(b
, level
, b
->spirv_offset
, msg
);
98 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
104 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
105 file
, line
, fmt
, args
);
110 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
111 const char *fmt
, ...)
116 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
117 file
, line
, fmt
, args
);
120 longjmp(b
->fail_jump
, 1);
123 struct spec_constant_value
{
131 static struct vtn_ssa_value
*
132 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
134 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
137 if (glsl_type_is_vector_or_scalar(type
)) {
138 unsigned num_components
= glsl_get_vector_elements(val
->type
);
139 unsigned bit_size
= glsl_get_bit_size(val
->type
);
140 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
142 unsigned elems
= glsl_get_length(val
->type
);
143 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
144 if (glsl_type_is_matrix(type
)) {
145 const struct glsl_type
*elem_type
=
146 glsl_vector_type(glsl_get_base_type(type
),
147 glsl_get_vector_elements(type
));
149 for (unsigned i
= 0; i
< elems
; i
++)
150 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
151 } else if (glsl_type_is_array(type
)) {
152 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
153 for (unsigned i
= 0; i
< elems
; i
++)
154 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
156 for (unsigned i
= 0; i
< elems
; i
++) {
157 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
158 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
166 static struct vtn_ssa_value
*
167 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
168 const struct glsl_type
*type
)
170 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
175 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
178 switch (glsl_get_base_type(type
)) {
181 case GLSL_TYPE_INT64
:
182 case GLSL_TYPE_UINT64
:
184 case GLSL_TYPE_FLOAT
:
185 case GLSL_TYPE_DOUBLE
: {
186 int bit_size
= glsl_get_bit_size(type
);
187 if (glsl_type_is_vector_or_scalar(type
)) {
188 unsigned num_components
= glsl_get_vector_elements(val
->type
);
189 nir_load_const_instr
*load
=
190 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
192 load
->value
= constant
->values
[0];
194 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
195 val
->def
= &load
->def
;
197 assert(glsl_type_is_matrix(type
));
198 unsigned rows
= glsl_get_vector_elements(val
->type
);
199 unsigned columns
= glsl_get_matrix_columns(val
->type
);
200 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
202 for (unsigned i
= 0; i
< columns
; i
++) {
203 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
204 col_val
->type
= glsl_get_column_type(val
->type
);
205 nir_load_const_instr
*load
=
206 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
208 load
->value
= constant
->values
[i
];
210 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
211 col_val
->def
= &load
->def
;
213 val
->elems
[i
] = col_val
;
219 case GLSL_TYPE_ARRAY
: {
220 unsigned elems
= glsl_get_length(val
->type
);
221 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
222 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
223 for (unsigned i
= 0; i
< elems
; i
++)
224 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
229 case GLSL_TYPE_STRUCT
: {
230 unsigned elems
= glsl_get_length(val
->type
);
231 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
232 for (unsigned i
= 0; i
< elems
; i
++) {
233 const struct glsl_type
*elem_type
=
234 glsl_get_struct_field(val
->type
, i
);
235 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
242 vtn_fail("bad constant type");
248 struct vtn_ssa_value
*
249 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
251 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
252 switch (val
->value_type
) {
253 case vtn_value_type_undef
:
254 return vtn_undef_ssa_value(b
, val
->type
->type
);
256 case vtn_value_type_constant
:
257 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
259 case vtn_value_type_ssa
:
262 case vtn_value_type_pointer
:
263 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
264 struct vtn_ssa_value
*ssa
=
265 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
266 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
270 vtn_fail("Invalid type for an SSA value");
275 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
276 unsigned word_count
, unsigned *words_used
)
278 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
280 /* Ammount of space taken by the string (including the null) */
281 unsigned len
= strlen(dup
) + 1;
282 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
288 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
289 const uint32_t *end
, vtn_instruction_handler handler
)
295 const uint32_t *w
= start
;
297 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
298 unsigned count
= w
[0] >> SpvWordCountShift
;
299 vtn_assert(count
>= 1 && w
+ count
<= end
);
301 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
305 break; /* Do nothing */
308 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
320 if (!handler(b
, opcode
, w
, count
))
338 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
339 const uint32_t *w
, unsigned count
)
342 case SpvOpExtInstImport
: {
343 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
344 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
345 val
->ext_handler
= vtn_handle_glsl450_instruction
;
347 vtn_fail("Unsupported extension");
353 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
354 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
360 vtn_fail("Unhandled opcode");
365 _foreach_decoration_helper(struct vtn_builder
*b
,
366 struct vtn_value
*base_value
,
368 struct vtn_value
*value
,
369 vtn_decoration_foreach_cb cb
, void *data
)
371 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
373 if (dec
->scope
== VTN_DEC_DECORATION
) {
374 member
= parent_member
;
375 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
376 vtn_assert(parent_member
== -1);
377 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
379 /* Not a decoration */
384 vtn_assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
385 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
388 cb(b
, base_value
, member
, dec
, data
);
393 /** Iterates (recursively if needed) over all of the decorations on a value
395 * This function iterates over all of the decorations applied to a given
396 * value. If it encounters a decoration group, it recurses into the group
397 * and iterates over all of those decorations as well.
400 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
401 vtn_decoration_foreach_cb cb
, void *data
)
403 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
407 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
408 vtn_execution_mode_foreach_cb cb
, void *data
)
410 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
411 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
414 vtn_assert(dec
->group
== NULL
);
415 cb(b
, value
, dec
, data
);
420 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
421 const uint32_t *w
, unsigned count
)
423 const uint32_t *w_end
= w
+ count
;
424 const uint32_t target
= w
[1];
428 case SpvOpDecorationGroup
:
429 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
433 case SpvOpMemberDecorate
:
434 case SpvOpExecutionMode
: {
435 struct vtn_value
*val
= &b
->values
[target
];
437 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
440 dec
->scope
= VTN_DEC_DECORATION
;
442 case SpvOpMemberDecorate
:
443 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
445 case SpvOpExecutionMode
:
446 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
449 vtn_fail("Invalid decoration opcode");
451 dec
->decoration
= *(w
++);
454 /* Link into the list */
455 dec
->next
= val
->decoration
;
456 val
->decoration
= dec
;
460 case SpvOpGroupMemberDecorate
:
461 case SpvOpGroupDecorate
: {
462 struct vtn_value
*group
=
463 vtn_value(b
, target
, vtn_value_type_decoration_group
);
465 for (; w
< w_end
; w
++) {
466 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
467 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
470 if (opcode
== SpvOpGroupDecorate
) {
471 dec
->scope
= VTN_DEC_DECORATION
;
473 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
476 /* Link into the list */
477 dec
->next
= val
->decoration
;
478 val
->decoration
= dec
;
484 vtn_fail("Unhandled opcode");
488 struct member_decoration_ctx
{
490 struct glsl_struct_field
*fields
;
491 struct vtn_type
*type
;
494 /* does a shallow copy of a vtn_type */
496 static struct vtn_type
*
497 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
499 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
502 switch (src
->base_type
) {
503 case vtn_base_type_void
:
504 case vtn_base_type_scalar
:
505 case vtn_base_type_vector
:
506 case vtn_base_type_matrix
:
507 case vtn_base_type_array
:
508 case vtn_base_type_pointer
:
509 case vtn_base_type_image
:
510 case vtn_base_type_sampler
:
511 /* Nothing more to do */
514 case vtn_base_type_struct
:
515 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
516 memcpy(dest
->members
, src
->members
,
517 src
->length
* sizeof(src
->members
[0]));
519 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
520 memcpy(dest
->offsets
, src
->offsets
,
521 src
->length
* sizeof(src
->offsets
[0]));
524 case vtn_base_type_function
:
525 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
526 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
533 static struct vtn_type
*
534 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
536 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
537 type
= type
->members
[member
];
539 /* We may have an array of matrices.... Oh, joy! */
540 while (glsl_type_is_array(type
->type
)) {
541 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
542 type
= type
->array_element
;
545 vtn_assert(glsl_type_is_matrix(type
->type
));
551 struct_member_decoration_cb(struct vtn_builder
*b
,
552 struct vtn_value
*val
, int member
,
553 const struct vtn_decoration
*dec
, void *void_ctx
)
555 struct member_decoration_ctx
*ctx
= void_ctx
;
560 vtn_assert(member
< ctx
->num_fields
);
562 switch (dec
->decoration
) {
563 case SpvDecorationNonWritable
:
564 case SpvDecorationNonReadable
:
565 case SpvDecorationRelaxedPrecision
:
566 case SpvDecorationVolatile
:
567 case SpvDecorationCoherent
:
568 case SpvDecorationUniform
:
569 break; /* FIXME: Do nothing with this for now. */
570 case SpvDecorationNoPerspective
:
571 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
573 case SpvDecorationFlat
:
574 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
576 case SpvDecorationCentroid
:
577 ctx
->fields
[member
].centroid
= true;
579 case SpvDecorationSample
:
580 ctx
->fields
[member
].sample
= true;
582 case SpvDecorationStream
:
583 /* Vulkan only allows one GS stream */
584 vtn_assert(dec
->literals
[0] == 0);
586 case SpvDecorationLocation
:
587 ctx
->fields
[member
].location
= dec
->literals
[0];
589 case SpvDecorationComponent
:
590 break; /* FIXME: What should we do with these? */
591 case SpvDecorationBuiltIn
:
592 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
593 ctx
->type
->members
[member
]->is_builtin
= true;
594 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
595 ctx
->type
->builtin_block
= true;
597 case SpvDecorationOffset
:
598 ctx
->type
->offsets
[member
] = dec
->literals
[0];
600 case SpvDecorationMatrixStride
:
601 /* Handled as a second pass */
603 case SpvDecorationColMajor
:
604 break; /* Nothing to do here. Column-major is the default. */
605 case SpvDecorationRowMajor
:
606 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
609 case SpvDecorationPatch
:
612 case SpvDecorationSpecId
:
613 case SpvDecorationBlock
:
614 case SpvDecorationBufferBlock
:
615 case SpvDecorationArrayStride
:
616 case SpvDecorationGLSLShared
:
617 case SpvDecorationGLSLPacked
:
618 case SpvDecorationInvariant
:
619 case SpvDecorationRestrict
:
620 case SpvDecorationAliased
:
621 case SpvDecorationConstant
:
622 case SpvDecorationIndex
:
623 case SpvDecorationBinding
:
624 case SpvDecorationDescriptorSet
:
625 case SpvDecorationLinkageAttributes
:
626 case SpvDecorationNoContraction
:
627 case SpvDecorationInputAttachmentIndex
:
628 vtn_warn("Decoration not allowed on struct members: %s",
629 spirv_decoration_to_string(dec
->decoration
));
632 case SpvDecorationXfbBuffer
:
633 case SpvDecorationXfbStride
:
634 vtn_warn("Vulkan does not have transform feedback");
637 case SpvDecorationCPacked
:
638 case SpvDecorationSaturatedConversion
:
639 case SpvDecorationFuncParamAttr
:
640 case SpvDecorationFPRoundingMode
:
641 case SpvDecorationFPFastMathMode
:
642 case SpvDecorationAlignment
:
643 vtn_warn("Decoration only allowed for CL-style kernels: %s",
644 spirv_decoration_to_string(dec
->decoration
));
648 vtn_fail("Unhandled decoration");
652 /* Matrix strides are handled as a separate pass because we need to know
653 * whether the matrix is row-major or not first.
656 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
657 struct vtn_value
*val
, int member
,
658 const struct vtn_decoration
*dec
,
661 if (dec
->decoration
!= SpvDecorationMatrixStride
)
663 vtn_assert(member
>= 0);
665 struct member_decoration_ctx
*ctx
= void_ctx
;
667 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
668 if (mat_type
->row_major
) {
669 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
670 mat_type
->stride
= mat_type
->array_element
->stride
;
671 mat_type
->array_element
->stride
= dec
->literals
[0];
673 vtn_assert(mat_type
->array_element
->stride
> 0);
674 mat_type
->stride
= dec
->literals
[0];
679 type_decoration_cb(struct vtn_builder
*b
,
680 struct vtn_value
*val
, int member
,
681 const struct vtn_decoration
*dec
, void *ctx
)
683 struct vtn_type
*type
= val
->type
;
688 switch (dec
->decoration
) {
689 case SpvDecorationArrayStride
:
690 vtn_assert(type
->base_type
== vtn_base_type_matrix
||
691 type
->base_type
== vtn_base_type_array
||
692 type
->base_type
== vtn_base_type_pointer
);
693 type
->stride
= dec
->literals
[0];
695 case SpvDecorationBlock
:
696 vtn_assert(type
->base_type
== vtn_base_type_struct
);
699 case SpvDecorationBufferBlock
:
700 vtn_assert(type
->base_type
== vtn_base_type_struct
);
701 type
->buffer_block
= true;
703 case SpvDecorationGLSLShared
:
704 case SpvDecorationGLSLPacked
:
705 /* Ignore these, since we get explicit offsets anyways */
708 case SpvDecorationRowMajor
:
709 case SpvDecorationColMajor
:
710 case SpvDecorationMatrixStride
:
711 case SpvDecorationBuiltIn
:
712 case SpvDecorationNoPerspective
:
713 case SpvDecorationFlat
:
714 case SpvDecorationPatch
:
715 case SpvDecorationCentroid
:
716 case SpvDecorationSample
:
717 case SpvDecorationVolatile
:
718 case SpvDecorationCoherent
:
719 case SpvDecorationNonWritable
:
720 case SpvDecorationNonReadable
:
721 case SpvDecorationUniform
:
722 case SpvDecorationStream
:
723 case SpvDecorationLocation
:
724 case SpvDecorationComponent
:
725 case SpvDecorationOffset
:
726 case SpvDecorationXfbBuffer
:
727 case SpvDecorationXfbStride
:
728 vtn_warn("Decoration only allowed for struct members: %s",
729 spirv_decoration_to_string(dec
->decoration
));
732 case SpvDecorationRelaxedPrecision
:
733 case SpvDecorationSpecId
:
734 case SpvDecorationInvariant
:
735 case SpvDecorationRestrict
:
736 case SpvDecorationAliased
:
737 case SpvDecorationConstant
:
738 case SpvDecorationIndex
:
739 case SpvDecorationBinding
:
740 case SpvDecorationDescriptorSet
:
741 case SpvDecorationLinkageAttributes
:
742 case SpvDecorationNoContraction
:
743 case SpvDecorationInputAttachmentIndex
:
744 vtn_warn("Decoration not allowed on types: %s",
745 spirv_decoration_to_string(dec
->decoration
));
748 case SpvDecorationCPacked
:
749 case SpvDecorationSaturatedConversion
:
750 case SpvDecorationFuncParamAttr
:
751 case SpvDecorationFPRoundingMode
:
752 case SpvDecorationFPFastMathMode
:
753 case SpvDecorationAlignment
:
754 vtn_warn("Decoration only allowed for CL-style kernels: %s",
755 spirv_decoration_to_string(dec
->decoration
));
759 vtn_fail("Unhandled decoration");
764 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
767 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
768 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
769 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
770 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
771 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
772 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
773 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
774 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
775 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
776 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
777 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
778 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
779 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
780 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
781 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
782 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
783 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
784 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
785 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
786 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
787 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
788 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
789 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
790 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
791 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
792 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
793 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
794 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
795 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
796 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
797 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
798 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
799 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
800 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
801 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
802 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
803 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
804 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
805 case SpvImageFormatR16ui
: return 0x8234; /* GL_R16UI */
806 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
808 vtn_fail("Invalid image format");
812 static struct vtn_type
*
813 vtn_type_layout_std430(struct vtn_builder
*b
, struct vtn_type
*type
,
814 uint32_t *size_out
, uint32_t *align_out
)
816 switch (type
->base_type
) {
817 case vtn_base_type_scalar
: {
818 uint32_t comp_size
= glsl_get_bit_size(type
->type
) / 8;
819 *size_out
= comp_size
;
820 *align_out
= comp_size
;
824 case vtn_base_type_vector
: {
825 uint32_t comp_size
= glsl_get_bit_size(type
->type
) / 8;
826 assert(type
->length
> 0 && type
->length
<= 4);
827 unsigned align_comps
= type
->length
== 3 ? 4 : type
->length
;
828 *size_out
= comp_size
* type
->length
,
829 *align_out
= comp_size
* align_comps
;
833 case vtn_base_type_matrix
:
834 case vtn_base_type_array
: {
835 /* We're going to add an array stride */
836 type
= vtn_type_copy(b
, type
);
837 uint32_t elem_size
, elem_align
;
838 type
->array_element
= vtn_type_layout_std430(b
, type
->array_element
,
839 &elem_size
, &elem_align
);
840 type
->stride
= vtn_align_u32(elem_size
, elem_align
);
841 *size_out
= type
->stride
* type
->length
;
842 *align_out
= elem_align
;
846 case vtn_base_type_struct
: {
847 /* We're going to add member offsets */
848 type
= vtn_type_copy(b
, type
);
851 for (unsigned i
= 0; i
< type
->length
; i
++) {
852 uint32_t mem_size
, mem_align
;
853 type
->members
[i
] = vtn_type_layout_std430(b
, type
->members
[i
],
854 &mem_size
, &mem_align
);
855 offset
= vtn_align_u32(offset
, mem_align
);
856 type
->offsets
[i
] = offset
;
858 align
= MAX2(align
, mem_align
);
866 unreachable("Invalid SPIR-V type for std430");
871 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
872 const uint32_t *w
, unsigned count
)
874 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
876 val
->type
= rzalloc(b
, struct vtn_type
);
877 val
->type
->val
= val
;
881 val
->type
->base_type
= vtn_base_type_void
;
882 val
->type
->type
= glsl_void_type();
885 val
->type
->base_type
= vtn_base_type_scalar
;
886 val
->type
->type
= glsl_bool_type();
890 const bool signedness
= w
[3];
891 val
->type
->base_type
= vtn_base_type_scalar
;
893 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
895 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
898 case SpvOpTypeFloat
: {
900 val
->type
->base_type
= vtn_base_type_scalar
;
901 val
->type
->type
= bit_size
== 64 ? glsl_double_type() : glsl_float_type();
905 case SpvOpTypeVector
: {
906 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
907 unsigned elems
= w
[3];
909 vtn_assert(glsl_type_is_scalar(base
->type
));
910 val
->type
->base_type
= vtn_base_type_vector
;
911 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
912 val
->type
->stride
= glsl_get_bit_size(base
->type
) / 8;
913 val
->type
->array_element
= base
;
917 case SpvOpTypeMatrix
: {
918 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
919 unsigned columns
= w
[3];
921 vtn_assert(glsl_type_is_vector(base
->type
));
922 val
->type
->base_type
= vtn_base_type_matrix
;
923 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
924 glsl_get_vector_elements(base
->type
),
926 vtn_assert(!glsl_type_is_error(val
->type
->type
));
927 val
->type
->length
= columns
;
928 val
->type
->array_element
= base
;
929 val
->type
->row_major
= false;
930 val
->type
->stride
= 0;
934 case SpvOpTypeRuntimeArray
:
935 case SpvOpTypeArray
: {
936 struct vtn_type
*array_element
=
937 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
939 if (opcode
== SpvOpTypeRuntimeArray
) {
940 /* A length of 0 is used to denote unsized arrays */
941 val
->type
->length
= 0;
944 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
947 val
->type
->base_type
= vtn_base_type_array
;
948 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
);
949 val
->type
->array_element
= array_element
;
950 val
->type
->stride
= 0;
954 case SpvOpTypeStruct
: {
955 unsigned num_fields
= count
- 2;
956 val
->type
->base_type
= vtn_base_type_struct
;
957 val
->type
->length
= num_fields
;
958 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
959 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
961 NIR_VLA(struct glsl_struct_field
, fields
, count
);
962 for (unsigned i
= 0; i
< num_fields
; i
++) {
963 val
->type
->members
[i
] =
964 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
965 fields
[i
] = (struct glsl_struct_field
) {
966 .type
= val
->type
->members
[i
]->type
,
967 .name
= ralloc_asprintf(b
, "field%d", i
),
972 struct member_decoration_ctx ctx
= {
973 .num_fields
= num_fields
,
978 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
979 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
981 const char *name
= val
->name
? val
->name
: "struct";
983 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
987 case SpvOpTypeFunction
: {
988 val
->type
->base_type
= vtn_base_type_function
;
989 val
->type
->type
= NULL
;
991 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
993 const unsigned num_params
= count
- 3;
994 val
->type
->length
= num_params
;
995 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
996 for (unsigned i
= 0; i
< count
- 3; i
++) {
997 val
->type
->params
[i
] =
998 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
1003 case SpvOpTypePointer
: {
1004 SpvStorageClass storage_class
= w
[2];
1005 struct vtn_type
*deref_type
=
1006 vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
1008 val
->type
->base_type
= vtn_base_type_pointer
;
1009 val
->type
->storage_class
= storage_class
;
1010 val
->type
->deref
= deref_type
;
1012 if (storage_class
== SpvStorageClassUniform
||
1013 storage_class
== SpvStorageClassStorageBuffer
) {
1014 /* These can actually be stored to nir_variables and used as SSA
1015 * values so they need a real glsl_type.
1017 val
->type
->type
= glsl_vector_type(GLSL_TYPE_UINT
, 2);
1020 if (storage_class
== SpvStorageClassWorkgroup
&&
1021 b
->options
->lower_workgroup_access_to_offsets
) {
1022 uint32_t size
, align
;
1023 val
->type
->deref
= vtn_type_layout_std430(b
, val
->type
->deref
,
1025 val
->type
->length
= size
;
1026 val
->type
->align
= align
;
1027 /* These can actually be stored to nir_variables and used as SSA
1028 * values so they need a real glsl_type.
1030 val
->type
->type
= glsl_uint_type();
1035 case SpvOpTypeImage
: {
1036 val
->type
->base_type
= vtn_base_type_image
;
1038 const struct glsl_type
*sampled_type
=
1039 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
1041 vtn_assert(glsl_type_is_vector_or_scalar(sampled_type
));
1043 enum glsl_sampler_dim dim
;
1044 switch ((SpvDim
)w
[3]) {
1045 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1046 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1047 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1048 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1049 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1050 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1051 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1053 vtn_fail("Invalid SPIR-V Sampler dimension");
1056 bool is_shadow
= w
[4];
1057 bool is_array
= w
[5];
1058 bool multisampled
= w
[6];
1059 unsigned sampled
= w
[7];
1060 SpvImageFormat format
= w
[8];
1063 val
->type
->access_qualifier
= w
[9];
1065 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1068 if (dim
== GLSL_SAMPLER_DIM_2D
)
1069 dim
= GLSL_SAMPLER_DIM_MS
;
1070 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1071 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1073 vtn_fail("Unsupported multisampled image type");
1076 val
->type
->image_format
= translate_image_format(b
, format
);
1079 val
->type
->sampled
= true;
1080 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
1081 glsl_get_base_type(sampled_type
));
1082 } else if (sampled
== 2) {
1083 vtn_assert(!is_shadow
);
1084 val
->type
->sampled
= false;
1085 val
->type
->type
= glsl_image_type(dim
, is_array
,
1086 glsl_get_base_type(sampled_type
));
1088 vtn_fail("We need to know if the image will be sampled");
1093 case SpvOpTypeSampledImage
:
1094 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1097 case SpvOpTypeSampler
:
1098 /* The actual sampler type here doesn't really matter. It gets
1099 * thrown away the moment you combine it with an image. What really
1100 * matters is that it's a sampler type as opposed to an integer type
1101 * so the backend knows what to do.
1103 val
->type
->base_type
= vtn_base_type_sampler
;
1104 val
->type
->type
= glsl_bare_sampler_type();
1107 case SpvOpTypeOpaque
:
1108 case SpvOpTypeEvent
:
1109 case SpvOpTypeDeviceEvent
:
1110 case SpvOpTypeReserveId
:
1111 case SpvOpTypeQueue
:
1114 vtn_fail("Unhandled opcode");
1117 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1120 static nir_constant
*
1121 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
1123 nir_constant
*c
= rzalloc(b
, nir_constant
);
1125 /* For pointers and other typeless things, we have to return something but
1126 * it doesn't matter what.
1131 switch (glsl_get_base_type(type
)) {
1133 case GLSL_TYPE_UINT
:
1134 case GLSL_TYPE_INT64
:
1135 case GLSL_TYPE_UINT64
:
1136 case GLSL_TYPE_BOOL
:
1137 case GLSL_TYPE_FLOAT
:
1138 case GLSL_TYPE_DOUBLE
:
1139 /* Nothing to do here. It's already initialized to zero */
1142 case GLSL_TYPE_ARRAY
:
1143 vtn_assert(glsl_get_length(type
) > 0);
1144 c
->num_elements
= glsl_get_length(type
);
1145 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1147 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
1148 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1149 c
->elements
[i
] = c
->elements
[0];
1152 case GLSL_TYPE_STRUCT
:
1153 c
->num_elements
= glsl_get_length(type
);
1154 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1156 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
1157 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
1162 vtn_fail("Invalid type for null constant");
1169 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
1170 int member
, const struct vtn_decoration
*dec
,
1173 vtn_assert(member
== -1);
1174 if (dec
->decoration
!= SpvDecorationSpecId
)
1177 struct spec_constant_value
*const_value
= data
;
1179 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1180 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
1181 if (const_value
->is_double
)
1182 const_value
->data64
= b
->specializations
[i
].data64
;
1184 const_value
->data32
= b
->specializations
[i
].data32
;
1191 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
1192 uint32_t const_value
)
1194 struct spec_constant_value data
;
1195 data
.is_double
= false;
1196 data
.data32
= const_value
;
1197 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1202 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
1203 uint64_t const_value
)
1205 struct spec_constant_value data
;
1206 data
.is_double
= true;
1207 data
.data64
= const_value
;
1208 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1213 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1214 struct vtn_value
*val
,
1216 const struct vtn_decoration
*dec
,
1219 vtn_assert(member
== -1);
1220 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1221 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
1224 vtn_assert(val
->const_type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1226 b
->shader
->info
.cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
1227 b
->shader
->info
.cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
1228 b
->shader
->info
.cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
1232 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1233 const uint32_t *w
, unsigned count
)
1235 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1236 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
1237 val
->constant
= rzalloc(b
, nir_constant
);
1239 case SpvOpConstantTrue
:
1240 vtn_assert(val
->const_type
== glsl_bool_type());
1241 val
->constant
->values
[0].u32
[0] = NIR_TRUE
;
1243 case SpvOpConstantFalse
:
1244 vtn_assert(val
->const_type
== glsl_bool_type());
1245 val
->constant
->values
[0].u32
[0] = NIR_FALSE
;
1248 case SpvOpSpecConstantTrue
:
1249 case SpvOpSpecConstantFalse
: {
1250 vtn_assert(val
->const_type
== glsl_bool_type());
1252 get_specialization(b
, val
, (opcode
== SpvOpSpecConstantTrue
));
1253 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1257 case SpvOpConstant
: {
1258 vtn_assert(glsl_type_is_scalar(val
->const_type
));
1259 int bit_size
= glsl_get_bit_size(val
->const_type
);
1260 if (bit_size
== 64) {
1261 val
->constant
->values
->u32
[0] = w
[3];
1262 val
->constant
->values
->u32
[1] = w
[4];
1264 vtn_assert(bit_size
== 32);
1265 val
->constant
->values
->u32
[0] = w
[3];
1269 case SpvOpSpecConstant
: {
1270 vtn_assert(glsl_type_is_scalar(val
->const_type
));
1271 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1272 int bit_size
= glsl_get_bit_size(val
->const_type
);
1274 val
->constant
->values
[0].u64
[0] =
1275 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1277 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1280 case SpvOpSpecConstantComposite
:
1281 case SpvOpConstantComposite
: {
1282 unsigned elem_count
= count
- 3;
1283 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1284 for (unsigned i
= 0; i
< elem_count
; i
++)
1285 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1287 switch (glsl_get_base_type(val
->const_type
)) {
1288 case GLSL_TYPE_UINT
:
1290 case GLSL_TYPE_UINT64
:
1291 case GLSL_TYPE_INT64
:
1292 case GLSL_TYPE_FLOAT
:
1293 case GLSL_TYPE_BOOL
:
1294 case GLSL_TYPE_DOUBLE
: {
1295 int bit_size
= glsl_get_bit_size(val
->const_type
);
1296 if (glsl_type_is_matrix(val
->const_type
)) {
1297 vtn_assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
1298 for (unsigned i
= 0; i
< elem_count
; i
++)
1299 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1301 vtn_assert(glsl_type_is_vector(val
->const_type
));
1302 vtn_assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
1303 for (unsigned i
= 0; i
< elem_count
; i
++) {
1304 if (bit_size
== 64) {
1305 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1307 vtn_assert(bit_size
== 32);
1308 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1315 case GLSL_TYPE_STRUCT
:
1316 case GLSL_TYPE_ARRAY
:
1317 ralloc_steal(val
->constant
, elems
);
1318 val
->constant
->num_elements
= elem_count
;
1319 val
->constant
->elements
= elems
;
1323 vtn_fail("Unsupported type for constants");
1328 case SpvOpSpecConstantOp
: {
1329 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1331 case SpvOpVectorShuffle
: {
1332 struct vtn_value
*v0
= &b
->values
[w
[4]];
1333 struct vtn_value
*v1
= &b
->values
[w
[5]];
1335 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1336 v0
->value_type
== vtn_value_type_undef
);
1337 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1338 v1
->value_type
== vtn_value_type_undef
);
1340 unsigned len0
= v0
->value_type
== vtn_value_type_constant
?
1341 glsl_get_vector_elements(v0
->const_type
) :
1342 glsl_get_vector_elements(v0
->type
->type
);
1343 unsigned len1
= v1
->value_type
== vtn_value_type_constant
?
1344 glsl_get_vector_elements(v1
->const_type
) :
1345 glsl_get_vector_elements(v1
->type
->type
);
1347 vtn_assert(len0
+ len1
< 16);
1349 unsigned bit_size
= glsl_get_bit_size(val
->const_type
);
1350 unsigned bit_size0
= v0
->value_type
== vtn_value_type_constant
?
1351 glsl_get_bit_size(v0
->const_type
) :
1352 glsl_get_bit_size(v0
->type
->type
);
1353 unsigned bit_size1
= v1
->value_type
== vtn_value_type_constant
?
1354 glsl_get_bit_size(v1
->const_type
) :
1355 glsl_get_bit_size(v1
->type
->type
);
1357 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1358 (void)bit_size0
; (void)bit_size1
;
1360 if (bit_size
== 64) {
1362 if (v0
->value_type
== vtn_value_type_constant
) {
1363 for (unsigned i
= 0; i
< len0
; i
++)
1364 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1366 if (v1
->value_type
== vtn_value_type_constant
) {
1367 for (unsigned i
= 0; i
< len1
; i
++)
1368 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1371 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1372 uint32_t comp
= w
[i
+ 6];
1373 /* If component is not used, set the value to a known constant
1374 * to detect if it is wrongly used.
1376 if (comp
== (uint32_t)-1)
1377 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1379 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1383 if (v0
->value_type
== vtn_value_type_constant
) {
1384 for (unsigned i
= 0; i
< len0
; i
++)
1385 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1387 if (v1
->value_type
== vtn_value_type_constant
) {
1388 for (unsigned i
= 0; i
< len1
; i
++)
1389 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1392 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1393 uint32_t comp
= w
[i
+ 6];
1394 /* If component is not used, set the value to a known constant
1395 * to detect if it is wrongly used.
1397 if (comp
== (uint32_t)-1)
1398 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1400 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1406 case SpvOpCompositeExtract
:
1407 case SpvOpCompositeInsert
: {
1408 struct vtn_value
*comp
;
1409 unsigned deref_start
;
1410 struct nir_constant
**c
;
1411 if (opcode
== SpvOpCompositeExtract
) {
1412 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1414 c
= &comp
->constant
;
1416 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1418 val
->constant
= nir_constant_clone(comp
->constant
,
1425 const struct glsl_type
*type
= comp
->const_type
;
1426 for (unsigned i
= deref_start
; i
< count
; i
++) {
1427 switch (glsl_get_base_type(type
)) {
1428 case GLSL_TYPE_UINT
:
1430 case GLSL_TYPE_UINT64
:
1431 case GLSL_TYPE_INT64
:
1432 case GLSL_TYPE_FLOAT
:
1433 case GLSL_TYPE_DOUBLE
:
1434 case GLSL_TYPE_BOOL
:
1435 /* If we hit this granularity, we're picking off an element */
1436 if (glsl_type_is_matrix(type
)) {
1437 vtn_assert(col
== 0 && elem
== -1);
1440 type
= glsl_get_column_type(type
);
1442 vtn_assert(elem
<= 0 && glsl_type_is_vector(type
));
1444 type
= glsl_scalar_type(glsl_get_base_type(type
));
1448 case GLSL_TYPE_ARRAY
:
1449 c
= &(*c
)->elements
[w
[i
]];
1450 type
= glsl_get_array_element(type
);
1453 case GLSL_TYPE_STRUCT
:
1454 c
= &(*c
)->elements
[w
[i
]];
1455 type
= glsl_get_struct_field(type
, w
[i
]);
1459 vtn_fail("Invalid constant type");
1463 if (opcode
== SpvOpCompositeExtract
) {
1467 unsigned num_components
= glsl_get_vector_elements(type
);
1468 unsigned bit_size
= glsl_get_bit_size(type
);
1469 for (unsigned i
= 0; i
< num_components
; i
++)
1470 if (bit_size
== 64) {
1471 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1473 vtn_assert(bit_size
== 32);
1474 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1478 struct vtn_value
*insert
=
1479 vtn_value(b
, w
[4], vtn_value_type_constant
);
1480 vtn_assert(insert
->const_type
== type
);
1482 *c
= insert
->constant
;
1484 unsigned num_components
= glsl_get_vector_elements(type
);
1485 unsigned bit_size
= glsl_get_bit_size(type
);
1486 for (unsigned i
= 0; i
< num_components
; i
++)
1487 if (bit_size
== 64) {
1488 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1490 vtn_assert(bit_size
== 32);
1491 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1500 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->const_type
);
1501 nir_alu_type src_alu_type
= dst_alu_type
;
1502 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
1506 unsigned num_components
= glsl_get_vector_elements(val
->const_type
);
1508 glsl_get_bit_size(val
->const_type
);
1510 nir_const_value src
[4];
1511 vtn_assert(count
<= 7);
1512 for (unsigned i
= 0; i
< count
- 4; i
++) {
1514 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1516 unsigned j
= swap
? 1 - i
: i
;
1517 vtn_assert(bit_size
== 32);
1518 src
[j
] = c
->values
[0];
1521 val
->constant
->values
[0] =
1522 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1529 case SpvOpConstantNull
:
1530 val
->constant
= vtn_null_constant(b
, val
->const_type
);
1533 case SpvOpConstantSampler
:
1534 vtn_fail("OpConstantSampler requires Kernel Capability");
1538 vtn_fail("Unhandled opcode");
1541 /* Now that we have the value, update the workgroup size if needed */
1542 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1546 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1547 const uint32_t *w
, unsigned count
)
1549 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1550 struct vtn_function
*vtn_callee
=
1551 vtn_value(b
, w
[3], vtn_value_type_function
)->func
;
1552 struct nir_function
*callee
= vtn_callee
->impl
->function
;
1554 vtn_callee
->referenced
= true;
1556 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1557 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1558 unsigned arg_id
= w
[4 + i
];
1559 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1560 if (arg
->value_type
== vtn_value_type_pointer
&&
1561 arg
->pointer
->ptr_type
->type
== NULL
) {
1562 nir_deref_var
*d
= vtn_pointer_to_deref(b
, arg
->pointer
);
1563 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1565 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1567 /* Make a temporary to store the argument in */
1569 nir_local_variable_create(b
->nb
.impl
, arg_ssa
->type
, "arg_tmp");
1570 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1572 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1576 nir_variable
*out_tmp
= NULL
;
1577 vtn_assert(res_type
->type
== callee
->return_type
);
1578 if (!glsl_type_is_void(callee
->return_type
)) {
1579 out_tmp
= nir_local_variable_create(b
->nb
.impl
, callee
->return_type
,
1581 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1584 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1586 if (glsl_type_is_void(callee
->return_type
)) {
1587 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1589 vtn_push_ssa(b
, w
[2], res_type
, vtn_local_load(b
, call
->return_deref
));
1593 struct vtn_ssa_value
*
1594 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1596 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1599 if (!glsl_type_is_vector_or_scalar(type
)) {
1600 unsigned elems
= glsl_get_length(type
);
1601 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1602 for (unsigned i
= 0; i
< elems
; i
++) {
1603 const struct glsl_type
*child_type
;
1605 switch (glsl_get_base_type(type
)) {
1607 case GLSL_TYPE_UINT
:
1608 case GLSL_TYPE_INT64
:
1609 case GLSL_TYPE_UINT64
:
1610 case GLSL_TYPE_BOOL
:
1611 case GLSL_TYPE_FLOAT
:
1612 case GLSL_TYPE_DOUBLE
:
1613 child_type
= glsl_get_column_type(type
);
1615 case GLSL_TYPE_ARRAY
:
1616 child_type
= glsl_get_array_element(type
);
1618 case GLSL_TYPE_STRUCT
:
1619 child_type
= glsl_get_struct_field(type
, i
);
1622 vtn_fail("unkown base type");
1625 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1633 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1636 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1637 src
.src_type
= type
;
1642 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1643 const uint32_t *w
, unsigned count
)
1645 if (opcode
== SpvOpSampledImage
) {
1646 struct vtn_value
*val
=
1647 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1648 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1649 val
->sampled_image
->type
=
1650 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1651 val
->sampled_image
->image
=
1652 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1653 val
->sampled_image
->sampler
=
1654 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
1656 } else if (opcode
== SpvOpImage
) {
1657 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1658 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1659 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1660 val
->pointer
= src_val
->sampled_image
->image
;
1662 vtn_assert(src_val
->value_type
== vtn_value_type_pointer
);
1663 val
->pointer
= src_val
->pointer
;
1668 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1669 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1671 struct vtn_sampled_image sampled
;
1672 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1673 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1674 sampled
= *sampled_val
->sampled_image
;
1676 vtn_assert(sampled_val
->value_type
== vtn_value_type_pointer
);
1677 sampled
.type
= sampled_val
->pointer
->type
;
1678 sampled
.image
= NULL
;
1679 sampled
.sampler
= sampled_val
->pointer
;
1682 const struct glsl_type
*image_type
= sampled
.type
->type
;
1683 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1684 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1685 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1687 /* Figure out the base texture operation */
1690 case SpvOpImageSampleImplicitLod
:
1691 case SpvOpImageSampleDrefImplicitLod
:
1692 case SpvOpImageSampleProjImplicitLod
:
1693 case SpvOpImageSampleProjDrefImplicitLod
:
1694 texop
= nir_texop_tex
;
1697 case SpvOpImageSampleExplicitLod
:
1698 case SpvOpImageSampleDrefExplicitLod
:
1699 case SpvOpImageSampleProjExplicitLod
:
1700 case SpvOpImageSampleProjDrefExplicitLod
:
1701 texop
= nir_texop_txl
;
1704 case SpvOpImageFetch
:
1705 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1706 texop
= nir_texop_txf_ms
;
1708 texop
= nir_texop_txf
;
1712 case SpvOpImageGather
:
1713 case SpvOpImageDrefGather
:
1714 texop
= nir_texop_tg4
;
1717 case SpvOpImageQuerySizeLod
:
1718 case SpvOpImageQuerySize
:
1719 texop
= nir_texop_txs
;
1722 case SpvOpImageQueryLod
:
1723 texop
= nir_texop_lod
;
1726 case SpvOpImageQueryLevels
:
1727 texop
= nir_texop_query_levels
;
1730 case SpvOpImageQuerySamples
:
1731 texop
= nir_texop_texture_samples
;
1735 vtn_fail("Unhandled opcode");
1738 nir_tex_src srcs
[8]; /* 8 should be enough */
1739 nir_tex_src
*p
= srcs
;
1743 struct nir_ssa_def
*coord
;
1744 unsigned coord_components
;
1746 case SpvOpImageSampleImplicitLod
:
1747 case SpvOpImageSampleExplicitLod
:
1748 case SpvOpImageSampleDrefImplicitLod
:
1749 case SpvOpImageSampleDrefExplicitLod
:
1750 case SpvOpImageSampleProjImplicitLod
:
1751 case SpvOpImageSampleProjExplicitLod
:
1752 case SpvOpImageSampleProjDrefImplicitLod
:
1753 case SpvOpImageSampleProjDrefExplicitLod
:
1754 case SpvOpImageFetch
:
1755 case SpvOpImageGather
:
1756 case SpvOpImageDrefGather
:
1757 case SpvOpImageQueryLod
: {
1758 /* All these types have the coordinate as their first real argument */
1759 switch (sampler_dim
) {
1760 case GLSL_SAMPLER_DIM_1D
:
1761 case GLSL_SAMPLER_DIM_BUF
:
1762 coord_components
= 1;
1764 case GLSL_SAMPLER_DIM_2D
:
1765 case GLSL_SAMPLER_DIM_RECT
:
1766 case GLSL_SAMPLER_DIM_MS
:
1767 coord_components
= 2;
1769 case GLSL_SAMPLER_DIM_3D
:
1770 case GLSL_SAMPLER_DIM_CUBE
:
1771 coord_components
= 3;
1774 vtn_fail("Invalid sampler type");
1777 if (is_array
&& texop
!= nir_texop_lod
)
1780 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1781 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
1782 (1 << coord_components
) - 1));
1783 p
->src_type
= nir_tex_src_coord
;
1790 coord_components
= 0;
1795 case SpvOpImageSampleProjImplicitLod
:
1796 case SpvOpImageSampleProjExplicitLod
:
1797 case SpvOpImageSampleProjDrefImplicitLod
:
1798 case SpvOpImageSampleProjDrefExplicitLod
:
1799 /* These have the projector as the last coordinate component */
1800 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1801 p
->src_type
= nir_tex_src_projector
;
1809 unsigned gather_component
= 0;
1811 case SpvOpImageSampleDrefImplicitLod
:
1812 case SpvOpImageSampleDrefExplicitLod
:
1813 case SpvOpImageSampleProjDrefImplicitLod
:
1814 case SpvOpImageSampleProjDrefExplicitLod
:
1815 case SpvOpImageDrefGather
:
1816 /* These all have an explicit depth value as their next source */
1817 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1820 case SpvOpImageGather
:
1821 /* This has a component as its next source */
1823 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1830 /* For OpImageQuerySizeLod, we always have an LOD */
1831 if (opcode
== SpvOpImageQuerySizeLod
)
1832 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1834 /* Now we need to handle some number of optional arguments */
1835 const struct vtn_ssa_value
*gather_offsets
= NULL
;
1837 uint32_t operands
= w
[idx
++];
1839 if (operands
& SpvImageOperandsBiasMask
) {
1840 vtn_assert(texop
== nir_texop_tex
);
1841 texop
= nir_texop_txb
;
1842 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1845 if (operands
& SpvImageOperandsLodMask
) {
1846 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1847 texop
== nir_texop_txs
);
1848 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1851 if (operands
& SpvImageOperandsGradMask
) {
1852 vtn_assert(texop
== nir_texop_txl
);
1853 texop
= nir_texop_txd
;
1854 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1855 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1858 if (operands
& SpvImageOperandsOffsetMask
||
1859 operands
& SpvImageOperandsConstOffsetMask
)
1860 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1862 if (operands
& SpvImageOperandsConstOffsetsMask
) {
1863 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
1864 (*p
++) = (nir_tex_src
){};
1867 if (operands
& SpvImageOperandsSampleMask
) {
1868 vtn_assert(texop
== nir_texop_txf_ms
);
1869 texop
= nir_texop_txf_ms
;
1870 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1873 /* We should have now consumed exactly all of the arguments */
1874 vtn_assert(idx
== count
);
1876 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1879 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1881 instr
->coord_components
= coord_components
;
1882 instr
->sampler_dim
= sampler_dim
;
1883 instr
->is_array
= is_array
;
1884 instr
->is_shadow
= is_shadow
;
1885 instr
->is_new_style_shadow
=
1886 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
1887 instr
->component
= gather_component
;
1889 switch (glsl_get_sampler_result_type(image_type
)) {
1890 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1891 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1892 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
1893 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1895 vtn_fail("Invalid base type for sampler result");
1898 nir_deref_var
*sampler
= vtn_pointer_to_deref(b
, sampled
.sampler
);
1899 nir_deref_var
*texture
;
1900 if (sampled
.image
) {
1901 nir_deref_var
*image
= vtn_pointer_to_deref(b
, sampled
.image
);
1907 instr
->texture
= nir_deref_var_clone(texture
, instr
);
1909 switch (instr
->op
) {
1915 /* These operations require a sampler */
1916 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
1919 case nir_texop_txf_ms
:
1922 case nir_texop_query_levels
:
1923 case nir_texop_texture_samples
:
1924 case nir_texop_samples_identical
:
1926 instr
->sampler
= NULL
;
1928 case nir_texop_txf_ms_mcs
:
1929 vtn_fail("unexpected nir_texop_txf_ms_mcs");
1932 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1933 nir_tex_instr_dest_size(instr
), 32, NULL
);
1935 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
1936 nir_tex_instr_dest_size(instr
));
1939 nir_instr
*instruction
;
1940 if (gather_offsets
) {
1941 vtn_assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
1942 vtn_assert(glsl_get_length(gather_offsets
->type
) == 4);
1943 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
1945 /* Copy the current instruction 4x */
1946 for (uint32_t i
= 1; i
< 4; i
++) {
1947 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
1948 instrs
[i
]->op
= instr
->op
;
1949 instrs
[i
]->coord_components
= instr
->coord_components
;
1950 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
1951 instrs
[i
]->is_array
= instr
->is_array
;
1952 instrs
[i
]->is_shadow
= instr
->is_shadow
;
1953 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
1954 instrs
[i
]->component
= instr
->component
;
1955 instrs
[i
]->dest_type
= instr
->dest_type
;
1956 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
1957 instrs
[i
]->sampler
= NULL
;
1959 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1961 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
1962 nir_tex_instr_dest_size(instr
), 32, NULL
);
1965 /* Fill in the last argument with the offset from the passed in offsets
1966 * and insert the instruction into the stream.
1968 for (uint32_t i
= 0; i
< 4; i
++) {
1970 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
1971 src
.src_type
= nir_tex_src_offset
;
1972 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
1973 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
1976 /* Combine the results of the 4 instructions by taking their .w
1979 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
1980 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
1981 vec4
->dest
.write_mask
= 0xf;
1982 for (uint32_t i
= 0; i
< 4; i
++) {
1983 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
1984 vec4
->src
[i
].swizzle
[0] = 3;
1986 def
= &vec4
->dest
.dest
.ssa
;
1987 instruction
= &vec4
->instr
;
1989 def
= &instr
->dest
.ssa
;
1990 instruction
= &instr
->instr
;
1993 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
1994 val
->ssa
->def
= def
;
1996 nir_builder_instr_insert(&b
->nb
, instruction
);
2000 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2001 const uint32_t *w
, nir_src
*src
)
2004 case SpvOpAtomicIIncrement
:
2005 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2008 case SpvOpAtomicIDecrement
:
2009 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2012 case SpvOpAtomicISub
:
2014 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
2017 case SpvOpAtomicCompareExchange
:
2018 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
2019 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
2022 case SpvOpAtomicExchange
:
2023 case SpvOpAtomicIAdd
:
2024 case SpvOpAtomicSMin
:
2025 case SpvOpAtomicUMin
:
2026 case SpvOpAtomicSMax
:
2027 case SpvOpAtomicUMax
:
2028 case SpvOpAtomicAnd
:
2030 case SpvOpAtomicXor
:
2031 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
2035 vtn_fail("Invalid SPIR-V atomic");
2039 static nir_ssa_def
*
2040 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2042 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
2044 /* The image_load_store intrinsics assume a 4-dim coordinate */
2045 unsigned dim
= glsl_get_vector_elements(coord
->type
);
2046 unsigned swizzle
[4];
2047 for (unsigned i
= 0; i
< 4; i
++)
2048 swizzle
[i
] = MIN2(i
, dim
- 1);
2050 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
2054 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2055 const uint32_t *w
, unsigned count
)
2057 /* Just get this one out of the way */
2058 if (opcode
== SpvOpImageTexelPointer
) {
2059 struct vtn_value
*val
=
2060 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2061 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2063 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2064 val
->image
->coord
= get_image_coord(b
, w
[4]);
2065 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
2069 struct vtn_image_pointer image
;
2072 case SpvOpAtomicExchange
:
2073 case SpvOpAtomicCompareExchange
:
2074 case SpvOpAtomicCompareExchangeWeak
:
2075 case SpvOpAtomicIIncrement
:
2076 case SpvOpAtomicIDecrement
:
2077 case SpvOpAtomicIAdd
:
2078 case SpvOpAtomicISub
:
2079 case SpvOpAtomicLoad
:
2080 case SpvOpAtomicSMin
:
2081 case SpvOpAtomicUMin
:
2082 case SpvOpAtomicSMax
:
2083 case SpvOpAtomicUMax
:
2084 case SpvOpAtomicAnd
:
2086 case SpvOpAtomicXor
:
2087 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
2090 case SpvOpAtomicStore
:
2091 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
2094 case SpvOpImageQuerySize
:
2095 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2097 image
.sample
= NULL
;
2100 case SpvOpImageRead
:
2101 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2102 image
.coord
= get_image_coord(b
, w
[4]);
2104 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
2105 vtn_assert(w
[5] == SpvImageOperandsSampleMask
);
2106 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
2108 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2112 case SpvOpImageWrite
:
2113 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2114 image
.coord
= get_image_coord(b
, w
[2]);
2118 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
2119 vtn_assert(w
[4] == SpvImageOperandsSampleMask
);
2120 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
2122 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2127 vtn_fail("Invalid image opcode");
2130 nir_intrinsic_op op
;
2132 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
2133 OP(ImageQuerySize
, size
)
2135 OP(ImageWrite
, store
)
2136 OP(AtomicLoad
, load
)
2137 OP(AtomicStore
, store
)
2138 OP(AtomicExchange
, atomic_exchange
)
2139 OP(AtomicCompareExchange
, atomic_comp_swap
)
2140 OP(AtomicIIncrement
, atomic_add
)
2141 OP(AtomicIDecrement
, atomic_add
)
2142 OP(AtomicIAdd
, atomic_add
)
2143 OP(AtomicISub
, atomic_add
)
2144 OP(AtomicSMin
, atomic_min
)
2145 OP(AtomicUMin
, atomic_min
)
2146 OP(AtomicSMax
, atomic_max
)
2147 OP(AtomicUMax
, atomic_max
)
2148 OP(AtomicAnd
, atomic_and
)
2149 OP(AtomicOr
, atomic_or
)
2150 OP(AtomicXor
, atomic_xor
)
2153 vtn_fail("Invalid image opcode");
2156 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2158 nir_deref_var
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2159 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
2161 /* ImageQuerySize doesn't take any extra parameters */
2162 if (opcode
!= SpvOpImageQuerySize
) {
2163 /* The image coordinate is always 4 components but we may not have that
2164 * many. Swizzle to compensate.
2167 for (unsigned i
= 0; i
< 4; i
++)
2168 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
2169 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
2171 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
2175 case SpvOpAtomicLoad
:
2176 case SpvOpImageQuerySize
:
2177 case SpvOpImageRead
:
2179 case SpvOpAtomicStore
:
2180 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2182 case SpvOpImageWrite
:
2183 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
2186 case SpvOpAtomicCompareExchange
:
2187 case SpvOpAtomicIIncrement
:
2188 case SpvOpAtomicIDecrement
:
2189 case SpvOpAtomicExchange
:
2190 case SpvOpAtomicIAdd
:
2191 case SpvOpAtomicISub
:
2192 case SpvOpAtomicSMin
:
2193 case SpvOpAtomicUMin
:
2194 case SpvOpAtomicSMax
:
2195 case SpvOpAtomicUMax
:
2196 case SpvOpAtomicAnd
:
2198 case SpvOpAtomicXor
:
2199 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
2203 vtn_fail("Invalid image opcode");
2206 if (opcode
!= SpvOpImageWrite
) {
2207 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2208 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2210 unsigned dest_components
=
2211 nir_intrinsic_infos
[intrin
->intrinsic
].dest_components
;
2212 if (intrin
->intrinsic
== nir_intrinsic_image_size
) {
2213 dest_components
= intrin
->num_components
=
2214 glsl_get_vector_elements(type
->type
);
2217 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2218 dest_components
, 32, NULL
);
2220 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2222 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
2223 val
->ssa
->def
= &intrin
->dest
.ssa
;
2225 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2229 static nir_intrinsic_op
2230 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2233 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2234 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2235 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2236 OP(AtomicExchange
, atomic_exchange
)
2237 OP(AtomicCompareExchange
, atomic_comp_swap
)
2238 OP(AtomicIIncrement
, atomic_add
)
2239 OP(AtomicIDecrement
, atomic_add
)
2240 OP(AtomicIAdd
, atomic_add
)
2241 OP(AtomicISub
, atomic_add
)
2242 OP(AtomicSMin
, atomic_imin
)
2243 OP(AtomicUMin
, atomic_umin
)
2244 OP(AtomicSMax
, atomic_imax
)
2245 OP(AtomicUMax
, atomic_umax
)
2246 OP(AtomicAnd
, atomic_and
)
2247 OP(AtomicOr
, atomic_or
)
2248 OP(AtomicXor
, atomic_xor
)
2251 vtn_fail("Invalid SSBO atomic");
2255 static nir_intrinsic_op
2256 get_shared_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2259 case SpvOpAtomicLoad
: return nir_intrinsic_load_shared
;
2260 case SpvOpAtomicStore
: return nir_intrinsic_store_shared
;
2261 #define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
2262 OP(AtomicExchange
, atomic_exchange
)
2263 OP(AtomicCompareExchange
, atomic_comp_swap
)
2264 OP(AtomicIIncrement
, atomic_add
)
2265 OP(AtomicIDecrement
, atomic_add
)
2266 OP(AtomicIAdd
, atomic_add
)
2267 OP(AtomicISub
, atomic_add
)
2268 OP(AtomicSMin
, atomic_imin
)
2269 OP(AtomicUMin
, atomic_umin
)
2270 OP(AtomicSMax
, atomic_imax
)
2271 OP(AtomicUMax
, atomic_umax
)
2272 OP(AtomicAnd
, atomic_and
)
2273 OP(AtomicOr
, atomic_or
)
2274 OP(AtomicXor
, atomic_xor
)
2277 vtn_fail("Invalid shared atomic");
2281 static nir_intrinsic_op
2282 get_var_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2285 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
2286 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
2287 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2288 OP(AtomicExchange
, atomic_exchange
)
2289 OP(AtomicCompareExchange
, atomic_comp_swap
)
2290 OP(AtomicIIncrement
, atomic_add
)
2291 OP(AtomicIDecrement
, atomic_add
)
2292 OP(AtomicIAdd
, atomic_add
)
2293 OP(AtomicISub
, atomic_add
)
2294 OP(AtomicSMin
, atomic_imin
)
2295 OP(AtomicUMin
, atomic_umin
)
2296 OP(AtomicSMax
, atomic_imax
)
2297 OP(AtomicUMax
, atomic_umax
)
2298 OP(AtomicAnd
, atomic_and
)
2299 OP(AtomicOr
, atomic_or
)
2300 OP(AtomicXor
, atomic_xor
)
2303 vtn_fail("Invalid shared atomic");
2308 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
2309 const uint32_t *w
, unsigned count
)
2311 struct vtn_pointer
*ptr
;
2312 nir_intrinsic_instr
*atomic
;
2315 case SpvOpAtomicLoad
:
2316 case SpvOpAtomicExchange
:
2317 case SpvOpAtomicCompareExchange
:
2318 case SpvOpAtomicCompareExchangeWeak
:
2319 case SpvOpAtomicIIncrement
:
2320 case SpvOpAtomicIDecrement
:
2321 case SpvOpAtomicIAdd
:
2322 case SpvOpAtomicISub
:
2323 case SpvOpAtomicSMin
:
2324 case SpvOpAtomicUMin
:
2325 case SpvOpAtomicSMax
:
2326 case SpvOpAtomicUMax
:
2327 case SpvOpAtomicAnd
:
2329 case SpvOpAtomicXor
:
2330 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2333 case SpvOpAtomicStore
:
2334 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2338 vtn_fail("Invalid SPIR-V atomic");
2342 SpvScope scope = w[4];
2343 SpvMemorySemanticsMask semantics = w[5];
2346 if (ptr
->mode
== vtn_variable_mode_workgroup
&&
2347 !b
->options
->lower_workgroup_access_to_offsets
) {
2348 nir_deref_var
*deref
= vtn_pointer_to_deref(b
, ptr
);
2349 const struct glsl_type
*deref_type
= nir_deref_tail(&deref
->deref
)->type
;
2350 nir_intrinsic_op op
= get_var_nir_atomic_op(b
, opcode
);
2351 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2352 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2355 case SpvOpAtomicLoad
:
2356 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2359 case SpvOpAtomicStore
:
2360 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2361 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2362 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2365 case SpvOpAtomicExchange
:
2366 case SpvOpAtomicCompareExchange
:
2367 case SpvOpAtomicCompareExchangeWeak
:
2368 case SpvOpAtomicIIncrement
:
2369 case SpvOpAtomicIDecrement
:
2370 case SpvOpAtomicIAdd
:
2371 case SpvOpAtomicISub
:
2372 case SpvOpAtomicSMin
:
2373 case SpvOpAtomicUMin
:
2374 case SpvOpAtomicSMax
:
2375 case SpvOpAtomicUMax
:
2376 case SpvOpAtomicAnd
:
2378 case SpvOpAtomicXor
:
2379 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2383 vtn_fail("Invalid SPIR-V atomic");
2387 nir_ssa_def
*offset
, *index
;
2388 offset
= vtn_pointer_to_offset(b
, ptr
, &index
, NULL
);
2390 nir_intrinsic_op op
;
2391 if (ptr
->mode
== vtn_variable_mode_ssbo
) {
2392 op
= get_ssbo_nir_atomic_op(b
, opcode
);
2394 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
&&
2395 b
->options
->lower_workgroup_access_to_offsets
);
2396 op
= get_shared_nir_atomic_op(b
, opcode
);
2399 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2403 case SpvOpAtomicLoad
:
2404 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2405 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2406 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2407 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2410 case SpvOpAtomicStore
:
2411 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2412 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2413 atomic
->src
[src
++] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2414 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2415 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2416 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2419 case SpvOpAtomicExchange
:
2420 case SpvOpAtomicCompareExchange
:
2421 case SpvOpAtomicCompareExchangeWeak
:
2422 case SpvOpAtomicIIncrement
:
2423 case SpvOpAtomicIDecrement
:
2424 case SpvOpAtomicIAdd
:
2425 case SpvOpAtomicISub
:
2426 case SpvOpAtomicSMin
:
2427 case SpvOpAtomicUMin
:
2428 case SpvOpAtomicSMax
:
2429 case SpvOpAtomicUMax
:
2430 case SpvOpAtomicAnd
:
2432 case SpvOpAtomicXor
:
2433 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2434 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2435 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2436 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
2440 vtn_fail("Invalid SPIR-V atomic");
2444 if (opcode
!= SpvOpAtomicStore
) {
2445 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2447 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2448 glsl_get_vector_elements(type
->type
),
2449 glsl_get_bit_size(type
->type
), NULL
);
2451 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2452 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2453 val
->ssa
->def
= &atomic
->dest
.ssa
;
2454 val
->ssa
->type
= type
->type
;
2457 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2460 static nir_alu_instr
*
2461 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
2464 switch (num_components
) {
2465 case 1: op
= nir_op_fmov
; break;
2466 case 2: op
= nir_op_vec2
; break;
2467 case 3: op
= nir_op_vec3
; break;
2468 case 4: op
= nir_op_vec4
; break;
2469 default: vtn_fail("bad vector size");
2472 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
2473 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2475 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2480 struct vtn_ssa_value
*
2481 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2483 if (src
->transposed
)
2484 return src
->transposed
;
2486 struct vtn_ssa_value
*dest
=
2487 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2489 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2490 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
2491 glsl_get_bit_size(src
->type
));
2492 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2493 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2494 vec
->src
[0].swizzle
[0] = i
;
2496 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2497 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2498 vec
->src
[j
].swizzle
[0] = i
;
2501 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2502 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2505 dest
->transposed
= src
;
2511 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2513 unsigned swiz
[4] = { index
};
2514 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2518 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2521 nir_alu_instr
*vec
= create_vec(b
, src
->num_components
,
2524 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2526 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2528 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2529 vec
->src
[i
].swizzle
[0] = i
;
2533 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2535 return &vec
->dest
.dest
.ssa
;
2539 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2542 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2543 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2544 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2545 vtn_vector_extract(b
, src
, i
), dest
);
2551 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2552 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2554 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2555 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2556 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2557 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2562 static nir_ssa_def
*
2563 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2564 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2565 const uint32_t *indices
)
2567 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
2569 for (unsigned i
= 0; i
< num_components
; i
++) {
2570 uint32_t index
= indices
[i
];
2571 if (index
== 0xffffffff) {
2573 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2574 } else if (index
< src0
->num_components
) {
2575 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2576 vec
->src
[i
].swizzle
[0] = index
;
2578 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2579 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2583 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2585 return &vec
->dest
.dest
.ssa
;
2589 * Concatentates a number of vectors/scalars together to produce a vector
2591 static nir_ssa_def
*
2592 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2593 unsigned num_srcs
, nir_ssa_def
**srcs
)
2595 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
2597 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2599 * "When constructing a vector, there must be at least two Constituent
2602 vtn_assert(num_srcs
>= 2);
2604 unsigned dest_idx
= 0;
2605 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2606 nir_ssa_def
*src
= srcs
[i
];
2607 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
2608 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2609 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2610 vec
->src
[dest_idx
].swizzle
[0] = j
;
2615 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2617 * "When constructing a vector, the total number of components in all
2618 * the operands must equal the number of components in Result Type."
2620 vtn_assert(dest_idx
== num_components
);
2622 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2624 return &vec
->dest
.dest
.ssa
;
2627 static struct vtn_ssa_value
*
2628 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2630 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2631 dest
->type
= src
->type
;
2633 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2634 dest
->def
= src
->def
;
2636 unsigned elems
= glsl_get_length(src
->type
);
2638 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2639 for (unsigned i
= 0; i
< elems
; i
++)
2640 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2646 static struct vtn_ssa_value
*
2647 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2648 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2649 unsigned num_indices
)
2651 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2653 struct vtn_ssa_value
*cur
= dest
;
2655 for (i
= 0; i
< num_indices
- 1; i
++) {
2656 cur
= cur
->elems
[indices
[i
]];
2659 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2660 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2661 * the component granularity. In that case, the last index will be
2662 * the index to insert the scalar into the vector.
2665 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2667 cur
->elems
[indices
[i
]] = insert
;
2673 static struct vtn_ssa_value
*
2674 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2675 const uint32_t *indices
, unsigned num_indices
)
2677 struct vtn_ssa_value
*cur
= src
;
2678 for (unsigned i
= 0; i
< num_indices
; i
++) {
2679 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2680 vtn_assert(i
== num_indices
- 1);
2681 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2682 * the component granularity. The last index will be the index of the
2683 * vector to extract.
2686 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2687 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2688 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2691 cur
= cur
->elems
[indices
[i
]];
2699 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2700 const uint32_t *w
, unsigned count
)
2702 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2703 const struct glsl_type
*type
=
2704 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2705 val
->ssa
= vtn_create_ssa_value(b
, type
);
2708 case SpvOpVectorExtractDynamic
:
2709 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2710 vtn_ssa_value(b
, w
[4])->def
);
2713 case SpvOpVectorInsertDynamic
:
2714 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2715 vtn_ssa_value(b
, w
[4])->def
,
2716 vtn_ssa_value(b
, w
[5])->def
);
2719 case SpvOpVectorShuffle
:
2720 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2721 vtn_ssa_value(b
, w
[3])->def
,
2722 vtn_ssa_value(b
, w
[4])->def
,
2726 case SpvOpCompositeConstruct
: {
2727 unsigned elems
= count
- 3;
2728 if (glsl_type_is_vector_or_scalar(type
)) {
2729 nir_ssa_def
*srcs
[4];
2730 for (unsigned i
= 0; i
< elems
; i
++)
2731 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2733 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2736 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2737 for (unsigned i
= 0; i
< elems
; i
++)
2738 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2742 case SpvOpCompositeExtract
:
2743 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2747 case SpvOpCompositeInsert
:
2748 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2749 vtn_ssa_value(b
, w
[3]),
2753 case SpvOpCopyObject
:
2754 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2758 vtn_fail("unknown composite operation");
2763 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2764 const uint32_t *w
, unsigned count
)
2766 nir_intrinsic_op intrinsic_op
;
2768 case SpvOpEmitVertex
:
2769 case SpvOpEmitStreamVertex
:
2770 intrinsic_op
= nir_intrinsic_emit_vertex
;
2772 case SpvOpEndPrimitive
:
2773 case SpvOpEndStreamPrimitive
:
2774 intrinsic_op
= nir_intrinsic_end_primitive
;
2776 case SpvOpMemoryBarrier
:
2777 intrinsic_op
= nir_intrinsic_memory_barrier
;
2779 case SpvOpControlBarrier
:
2780 intrinsic_op
= nir_intrinsic_barrier
;
2783 vtn_fail("unknown barrier instruction");
2786 nir_intrinsic_instr
*intrin
=
2787 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2789 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2790 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2792 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2796 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
2797 SpvExecutionMode mode
)
2800 case SpvExecutionModeInputPoints
:
2801 case SpvExecutionModeOutputPoints
:
2802 return 0; /* GL_POINTS */
2803 case SpvExecutionModeInputLines
:
2804 return 1; /* GL_LINES */
2805 case SpvExecutionModeInputLinesAdjacency
:
2806 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2807 case SpvExecutionModeTriangles
:
2808 return 4; /* GL_TRIANGLES */
2809 case SpvExecutionModeInputTrianglesAdjacency
:
2810 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2811 case SpvExecutionModeQuads
:
2812 return 7; /* GL_QUADS */
2813 case SpvExecutionModeIsolines
:
2814 return 0x8E7A; /* GL_ISOLINES */
2815 case SpvExecutionModeOutputLineStrip
:
2816 return 3; /* GL_LINE_STRIP */
2817 case SpvExecutionModeOutputTriangleStrip
:
2818 return 5; /* GL_TRIANGLE_STRIP */
2820 vtn_fail("Invalid primitive type");
2825 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
2826 SpvExecutionMode mode
)
2829 case SpvExecutionModeInputPoints
:
2831 case SpvExecutionModeInputLines
:
2833 case SpvExecutionModeInputLinesAdjacency
:
2835 case SpvExecutionModeTriangles
:
2837 case SpvExecutionModeInputTrianglesAdjacency
:
2840 vtn_fail("Invalid GS input mode");
2844 static gl_shader_stage
2845 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
2848 case SpvExecutionModelVertex
:
2849 return MESA_SHADER_VERTEX
;
2850 case SpvExecutionModelTessellationControl
:
2851 return MESA_SHADER_TESS_CTRL
;
2852 case SpvExecutionModelTessellationEvaluation
:
2853 return MESA_SHADER_TESS_EVAL
;
2854 case SpvExecutionModelGeometry
:
2855 return MESA_SHADER_GEOMETRY
;
2856 case SpvExecutionModelFragment
:
2857 return MESA_SHADER_FRAGMENT
;
2858 case SpvExecutionModelGLCompute
:
2859 return MESA_SHADER_COMPUTE
;
2861 vtn_fail("Unsupported execution model");
2865 #define spv_check_supported(name, cap) do { \
2866 if (!(b->options && b->options->caps.name)) \
2867 vtn_warn("Unsupported SPIR-V capability: %s", \
2868 spirv_capability_to_string(cap)); \
2872 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2873 const uint32_t *w
, unsigned count
)
2880 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
2881 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
2882 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
2883 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
2884 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
2885 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
2888 uint32_t version
= w
[2];
2891 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
2893 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
2897 case SpvOpSourceExtension
:
2898 case SpvOpSourceContinued
:
2899 case SpvOpExtension
:
2900 /* Unhandled, but these are for debug so that's ok. */
2903 case SpvOpCapability
: {
2904 SpvCapability cap
= w
[1];
2906 case SpvCapabilityMatrix
:
2907 case SpvCapabilityShader
:
2908 case SpvCapabilityGeometry
:
2909 case SpvCapabilityGeometryPointSize
:
2910 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
2911 case SpvCapabilitySampledImageArrayDynamicIndexing
:
2912 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
2913 case SpvCapabilityStorageImageArrayDynamicIndexing
:
2914 case SpvCapabilityImageRect
:
2915 case SpvCapabilitySampledRect
:
2916 case SpvCapabilitySampled1D
:
2917 case SpvCapabilityImage1D
:
2918 case SpvCapabilitySampledCubeArray
:
2919 case SpvCapabilityImageCubeArray
:
2920 case SpvCapabilitySampledBuffer
:
2921 case SpvCapabilityImageBuffer
:
2922 case SpvCapabilityImageQuery
:
2923 case SpvCapabilityDerivativeControl
:
2924 case SpvCapabilityInterpolationFunction
:
2925 case SpvCapabilityMultiViewport
:
2926 case SpvCapabilitySampleRateShading
:
2927 case SpvCapabilityClipDistance
:
2928 case SpvCapabilityCullDistance
:
2929 case SpvCapabilityInputAttachment
:
2930 case SpvCapabilityImageGatherExtended
:
2931 case SpvCapabilityStorageImageExtendedFormats
:
2934 case SpvCapabilityGeometryStreams
:
2935 case SpvCapabilityLinkage
:
2936 case SpvCapabilityVector16
:
2937 case SpvCapabilityFloat16Buffer
:
2938 case SpvCapabilityFloat16
:
2939 case SpvCapabilityInt64Atomics
:
2940 case SpvCapabilityAtomicStorage
:
2941 case SpvCapabilityInt16
:
2942 case SpvCapabilityStorageImageMultisample
:
2943 case SpvCapabilityInt8
:
2944 case SpvCapabilitySparseResidency
:
2945 case SpvCapabilityMinLod
:
2946 case SpvCapabilityTransformFeedback
:
2947 vtn_warn("Unsupported SPIR-V capability: %s",
2948 spirv_capability_to_string(cap
));
2951 case SpvCapabilityFloat64
:
2952 spv_check_supported(float64
, cap
);
2954 case SpvCapabilityInt64
:
2955 spv_check_supported(int64
, cap
);
2958 case SpvCapabilityAddresses
:
2959 case SpvCapabilityKernel
:
2960 case SpvCapabilityImageBasic
:
2961 case SpvCapabilityImageReadWrite
:
2962 case SpvCapabilityImageMipmap
:
2963 case SpvCapabilityPipes
:
2964 case SpvCapabilityGroups
:
2965 case SpvCapabilityDeviceEnqueue
:
2966 case SpvCapabilityLiteralSampler
:
2967 case SpvCapabilityGenericPointer
:
2968 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
2969 spirv_capability_to_string(cap
));
2972 case SpvCapabilityImageMSArray
:
2973 spv_check_supported(image_ms_array
, cap
);
2976 case SpvCapabilityTessellation
:
2977 case SpvCapabilityTessellationPointSize
:
2978 spv_check_supported(tessellation
, cap
);
2981 case SpvCapabilityDrawParameters
:
2982 spv_check_supported(draw_parameters
, cap
);
2985 case SpvCapabilityStorageImageReadWithoutFormat
:
2986 spv_check_supported(image_read_without_format
, cap
);
2989 case SpvCapabilityStorageImageWriteWithoutFormat
:
2990 spv_check_supported(image_write_without_format
, cap
);
2993 case SpvCapabilityMultiView
:
2994 spv_check_supported(multiview
, cap
);
2997 case SpvCapabilityVariablePointersStorageBuffer
:
2998 case SpvCapabilityVariablePointers
:
2999 spv_check_supported(variable_pointers
, cap
);
3003 vtn_fail("Unhandled capability");
3008 case SpvOpExtInstImport
:
3009 vtn_handle_extension(b
, opcode
, w
, count
);
3012 case SpvOpMemoryModel
:
3013 vtn_assert(w
[1] == SpvAddressingModelLogical
);
3014 vtn_assert(w
[2] == SpvMemoryModelSimple
||
3015 w
[2] == SpvMemoryModelGLSL450
);
3018 case SpvOpEntryPoint
: {
3019 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
3020 /* Let this be a name label regardless */
3021 unsigned name_words
;
3022 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
3024 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
3025 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
3028 vtn_assert(b
->entry_point
== NULL
);
3029 b
->entry_point
= entry_point
;
3034 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
3035 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
3039 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
3042 case SpvOpMemberName
:
3046 case SpvOpExecutionMode
:
3047 case SpvOpDecorationGroup
:
3049 case SpvOpMemberDecorate
:
3050 case SpvOpGroupDecorate
:
3051 case SpvOpGroupMemberDecorate
:
3052 vtn_handle_decoration(b
, opcode
, w
, count
);
3056 return false; /* End of preamble */
3063 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
3064 const struct vtn_decoration
*mode
, void *data
)
3066 vtn_assert(b
->entry_point
== entry_point
);
3068 switch(mode
->exec_mode
) {
3069 case SpvExecutionModeOriginUpperLeft
:
3070 case SpvExecutionModeOriginLowerLeft
:
3071 b
->origin_upper_left
=
3072 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
3075 case SpvExecutionModeEarlyFragmentTests
:
3076 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3077 b
->shader
->info
.fs
.early_fragment_tests
= true;
3080 case SpvExecutionModeInvocations
:
3081 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3082 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->literals
[0]);
3085 case SpvExecutionModeDepthReplacing
:
3086 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3087 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
3089 case SpvExecutionModeDepthGreater
:
3090 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3091 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
3093 case SpvExecutionModeDepthLess
:
3094 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3095 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
3097 case SpvExecutionModeDepthUnchanged
:
3098 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3099 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
3102 case SpvExecutionModeLocalSize
:
3103 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
3104 b
->shader
->info
.cs
.local_size
[0] = mode
->literals
[0];
3105 b
->shader
->info
.cs
.local_size
[1] = mode
->literals
[1];
3106 b
->shader
->info
.cs
.local_size
[2] = mode
->literals
[2];
3108 case SpvExecutionModeLocalSizeHint
:
3109 break; /* Nothing to do with this */
3111 case SpvExecutionModeOutputVertices
:
3112 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3113 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3114 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->literals
[0];
3116 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3117 b
->shader
->info
.gs
.vertices_out
= mode
->literals
[0];
3121 case SpvExecutionModeInputPoints
:
3122 case SpvExecutionModeInputLines
:
3123 case SpvExecutionModeInputLinesAdjacency
:
3124 case SpvExecutionModeTriangles
:
3125 case SpvExecutionModeInputTrianglesAdjacency
:
3126 case SpvExecutionModeQuads
:
3127 case SpvExecutionModeIsolines
:
3128 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3129 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3130 b
->shader
->info
.tess
.primitive_mode
=
3131 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
3133 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3134 b
->shader
->info
.gs
.vertices_in
=
3135 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
3139 case SpvExecutionModeOutputPoints
:
3140 case SpvExecutionModeOutputLineStrip
:
3141 case SpvExecutionModeOutputTriangleStrip
:
3142 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3143 b
->shader
->info
.gs
.output_primitive
=
3144 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
3147 case SpvExecutionModeSpacingEqual
:
3148 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3149 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3150 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
3152 case SpvExecutionModeSpacingFractionalEven
:
3153 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3154 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3155 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
3157 case SpvExecutionModeSpacingFractionalOdd
:
3158 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3159 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3160 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
3162 case SpvExecutionModeVertexOrderCw
:
3163 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3164 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3165 b
->shader
->info
.tess
.ccw
= false;
3167 case SpvExecutionModeVertexOrderCcw
:
3168 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3169 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3170 b
->shader
->info
.tess
.ccw
= true;
3172 case SpvExecutionModePointMode
:
3173 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3174 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3175 b
->shader
->info
.tess
.point_mode
= true;
3178 case SpvExecutionModePixelCenterInteger
:
3179 b
->pixel_center_integer
= true;
3182 case SpvExecutionModeXfb
:
3183 vtn_fail("Unhandled execution mode");
3186 case SpvExecutionModeVecTypeHint
:
3187 case SpvExecutionModeContractionOff
:
3191 vtn_fail("Unhandled execution mode");
3196 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3197 const uint32_t *w
, unsigned count
)
3201 case SpvOpSourceContinued
:
3202 case SpvOpSourceExtension
:
3203 case SpvOpExtension
:
3204 case SpvOpCapability
:
3205 case SpvOpExtInstImport
:
3206 case SpvOpMemoryModel
:
3207 case SpvOpEntryPoint
:
3208 case SpvOpExecutionMode
:
3211 case SpvOpMemberName
:
3212 case SpvOpDecorationGroup
:
3214 case SpvOpMemberDecorate
:
3215 case SpvOpGroupDecorate
:
3216 case SpvOpGroupMemberDecorate
:
3217 vtn_fail("Invalid opcode types and variables section");
3223 case SpvOpTypeFloat
:
3224 case SpvOpTypeVector
:
3225 case SpvOpTypeMatrix
:
3226 case SpvOpTypeImage
:
3227 case SpvOpTypeSampler
:
3228 case SpvOpTypeSampledImage
:
3229 case SpvOpTypeArray
:
3230 case SpvOpTypeRuntimeArray
:
3231 case SpvOpTypeStruct
:
3232 case SpvOpTypeOpaque
:
3233 case SpvOpTypePointer
:
3234 case SpvOpTypeFunction
:
3235 case SpvOpTypeEvent
:
3236 case SpvOpTypeDeviceEvent
:
3237 case SpvOpTypeReserveId
:
3238 case SpvOpTypeQueue
:
3240 vtn_handle_type(b
, opcode
, w
, count
);
3243 case SpvOpConstantTrue
:
3244 case SpvOpConstantFalse
:
3246 case SpvOpConstantComposite
:
3247 case SpvOpConstantSampler
:
3248 case SpvOpConstantNull
:
3249 case SpvOpSpecConstantTrue
:
3250 case SpvOpSpecConstantFalse
:
3251 case SpvOpSpecConstant
:
3252 case SpvOpSpecConstantComposite
:
3253 case SpvOpSpecConstantOp
:
3254 vtn_handle_constant(b
, opcode
, w
, count
);
3259 vtn_handle_variables(b
, opcode
, w
, count
);
3263 return false; /* End of preamble */
3270 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3271 const uint32_t *w
, unsigned count
)
3277 case SpvOpLoopMerge
:
3278 case SpvOpSelectionMerge
:
3279 /* This is handled by cfg pre-pass and walk_blocks */
3283 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
3284 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3289 vtn_handle_extension(b
, opcode
, w
, count
);
3295 case SpvOpCopyMemory
:
3296 case SpvOpCopyMemorySized
:
3297 case SpvOpAccessChain
:
3298 case SpvOpPtrAccessChain
:
3299 case SpvOpInBoundsAccessChain
:
3300 case SpvOpArrayLength
:
3301 vtn_handle_variables(b
, opcode
, w
, count
);
3304 case SpvOpFunctionCall
:
3305 vtn_handle_function_call(b
, opcode
, w
, count
);
3308 case SpvOpSampledImage
:
3310 case SpvOpImageSampleImplicitLod
:
3311 case SpvOpImageSampleExplicitLod
:
3312 case SpvOpImageSampleDrefImplicitLod
:
3313 case SpvOpImageSampleDrefExplicitLod
:
3314 case SpvOpImageSampleProjImplicitLod
:
3315 case SpvOpImageSampleProjExplicitLod
:
3316 case SpvOpImageSampleProjDrefImplicitLod
:
3317 case SpvOpImageSampleProjDrefExplicitLod
:
3318 case SpvOpImageFetch
:
3319 case SpvOpImageGather
:
3320 case SpvOpImageDrefGather
:
3321 case SpvOpImageQuerySizeLod
:
3322 case SpvOpImageQueryLod
:
3323 case SpvOpImageQueryLevels
:
3324 case SpvOpImageQuerySamples
:
3325 vtn_handle_texture(b
, opcode
, w
, count
);
3328 case SpvOpImageRead
:
3329 case SpvOpImageWrite
:
3330 case SpvOpImageTexelPointer
:
3331 vtn_handle_image(b
, opcode
, w
, count
);
3334 case SpvOpImageQuerySize
: {
3335 struct vtn_pointer
*image
=
3336 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3337 if (image
->mode
== vtn_variable_mode_image
) {
3338 vtn_handle_image(b
, opcode
, w
, count
);
3340 vtn_assert(image
->mode
== vtn_variable_mode_sampler
);
3341 vtn_handle_texture(b
, opcode
, w
, count
);
3346 case SpvOpAtomicLoad
:
3347 case SpvOpAtomicExchange
:
3348 case SpvOpAtomicCompareExchange
:
3349 case SpvOpAtomicCompareExchangeWeak
:
3350 case SpvOpAtomicIIncrement
:
3351 case SpvOpAtomicIDecrement
:
3352 case SpvOpAtomicIAdd
:
3353 case SpvOpAtomicISub
:
3354 case SpvOpAtomicSMin
:
3355 case SpvOpAtomicUMin
:
3356 case SpvOpAtomicSMax
:
3357 case SpvOpAtomicUMax
:
3358 case SpvOpAtomicAnd
:
3360 case SpvOpAtomicXor
: {
3361 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
3362 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3363 vtn_handle_image(b
, opcode
, w
, count
);
3365 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
3366 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3371 case SpvOpAtomicStore
: {
3372 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
3373 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3374 vtn_handle_image(b
, opcode
, w
, count
);
3376 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
3377 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3383 /* Handle OpSelect up-front here because it needs to be able to handle
3384 * pointers and not just regular vectors and scalars.
3386 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3387 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, res_type
->type
);
3388 ssa
->def
= nir_bcsel(&b
->nb
, vtn_ssa_value(b
, w
[3])->def
,
3389 vtn_ssa_value(b
, w
[4])->def
,
3390 vtn_ssa_value(b
, w
[5])->def
);
3391 vtn_push_ssa(b
, w
[2], res_type
, ssa
);
3400 case SpvOpConvertFToU
:
3401 case SpvOpConvertFToS
:
3402 case SpvOpConvertSToF
:
3403 case SpvOpConvertUToF
:
3407 case SpvOpQuantizeToF16
:
3408 case SpvOpConvertPtrToU
:
3409 case SpvOpConvertUToPtr
:
3410 case SpvOpPtrCastToGeneric
:
3411 case SpvOpGenericCastToPtr
:
3417 case SpvOpSignBitSet
:
3418 case SpvOpLessOrGreater
:
3420 case SpvOpUnordered
:
3435 case SpvOpVectorTimesScalar
:
3437 case SpvOpIAddCarry
:
3438 case SpvOpISubBorrow
:
3439 case SpvOpUMulExtended
:
3440 case SpvOpSMulExtended
:
3441 case SpvOpShiftRightLogical
:
3442 case SpvOpShiftRightArithmetic
:
3443 case SpvOpShiftLeftLogical
:
3444 case SpvOpLogicalEqual
:
3445 case SpvOpLogicalNotEqual
:
3446 case SpvOpLogicalOr
:
3447 case SpvOpLogicalAnd
:
3448 case SpvOpLogicalNot
:
3449 case SpvOpBitwiseOr
:
3450 case SpvOpBitwiseXor
:
3451 case SpvOpBitwiseAnd
:
3453 case SpvOpFOrdEqual
:
3454 case SpvOpFUnordEqual
:
3455 case SpvOpINotEqual
:
3456 case SpvOpFOrdNotEqual
:
3457 case SpvOpFUnordNotEqual
:
3458 case SpvOpULessThan
:
3459 case SpvOpSLessThan
:
3460 case SpvOpFOrdLessThan
:
3461 case SpvOpFUnordLessThan
:
3462 case SpvOpUGreaterThan
:
3463 case SpvOpSGreaterThan
:
3464 case SpvOpFOrdGreaterThan
:
3465 case SpvOpFUnordGreaterThan
:
3466 case SpvOpULessThanEqual
:
3467 case SpvOpSLessThanEqual
:
3468 case SpvOpFOrdLessThanEqual
:
3469 case SpvOpFUnordLessThanEqual
:
3470 case SpvOpUGreaterThanEqual
:
3471 case SpvOpSGreaterThanEqual
:
3472 case SpvOpFOrdGreaterThanEqual
:
3473 case SpvOpFUnordGreaterThanEqual
:
3479 case SpvOpFwidthFine
:
3480 case SpvOpDPdxCoarse
:
3481 case SpvOpDPdyCoarse
:
3482 case SpvOpFwidthCoarse
:
3483 case SpvOpBitFieldInsert
:
3484 case SpvOpBitFieldSExtract
:
3485 case SpvOpBitFieldUExtract
:
3486 case SpvOpBitReverse
:
3488 case SpvOpTranspose
:
3489 case SpvOpOuterProduct
:
3490 case SpvOpMatrixTimesScalar
:
3491 case SpvOpVectorTimesMatrix
:
3492 case SpvOpMatrixTimesVector
:
3493 case SpvOpMatrixTimesMatrix
:
3494 vtn_handle_alu(b
, opcode
, w
, count
);
3497 case SpvOpVectorExtractDynamic
:
3498 case SpvOpVectorInsertDynamic
:
3499 case SpvOpVectorShuffle
:
3500 case SpvOpCompositeConstruct
:
3501 case SpvOpCompositeExtract
:
3502 case SpvOpCompositeInsert
:
3503 case SpvOpCopyObject
:
3504 vtn_handle_composite(b
, opcode
, w
, count
);
3507 case SpvOpEmitVertex
:
3508 case SpvOpEndPrimitive
:
3509 case SpvOpEmitStreamVertex
:
3510 case SpvOpEndStreamPrimitive
:
3511 case SpvOpControlBarrier
:
3512 case SpvOpMemoryBarrier
:
3513 vtn_handle_barrier(b
, opcode
, w
, count
);
3517 vtn_fail("Unhandled opcode");
3524 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3525 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3526 gl_shader_stage stage
, const char *entry_point_name
,
3527 const struct spirv_to_nir_options
*options
,
3528 const nir_shader_compiler_options
*nir_options
)
3530 /* Initialize the stn_builder object */
3531 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3536 exec_list_make_empty(&b
->functions
);
3537 b
->entry_point_stage
= stage
;
3538 b
->entry_point_name
= entry_point_name
;
3539 b
->options
= options
;
3541 /* See also _vtn_fail() */
3542 if (setjmp(b
->fail_jump
)) {
3547 const uint32_t *word_end
= words
+ word_count
;
3549 /* Handle the SPIR-V header (first 4 dwords) */
3550 vtn_assert(word_count
> 5);
3552 vtn_assert(words
[0] == SpvMagicNumber
);
3553 vtn_assert(words
[1] >= 0x10000);
3554 /* words[2] == generator magic */
3555 unsigned value_id_bound
= words
[3];
3556 vtn_assert(words
[4] == 0);
3560 b
->value_id_bound
= value_id_bound
;
3561 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3563 /* Handle all the preamble instructions */
3564 words
= vtn_foreach_instruction(b
, words
, word_end
,
3565 vtn_handle_preamble_instruction
);
3567 if (b
->entry_point
== NULL
) {
3568 vtn_fail("Entry point not found");
3573 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
3575 /* Set shader info defaults */
3576 b
->shader
->info
.gs
.invocations
= 1;
3578 /* Parse execution modes */
3579 vtn_foreach_execution_mode(b
, b
->entry_point
,
3580 vtn_handle_execution_mode
, NULL
);
3582 b
->specializations
= spec
;
3583 b
->num_specializations
= num_spec
;
3585 /* Handle all variable, type, and constant instructions */
3586 words
= vtn_foreach_instruction(b
, words
, word_end
,
3587 vtn_handle_variable_or_type_instruction
);
3589 vtn_build_cfg(b
, words
, word_end
);
3591 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3592 b
->entry_point
->func
->referenced
= true;
3597 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3598 if (func
->referenced
&& !func
->emitted
) {
3599 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3600 _mesa_key_pointer_equal
);
3602 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3608 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3609 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3610 vtn_assert(entry_point
);
3612 /* Unparent the shader from the vtn_builder before we delete the builder */
3613 ralloc_steal(NULL
, b
->shader
);