2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
37 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
38 size_t spirv_offset
, const char *message
)
40 if (b
->options
->debug
.func
) {
41 b
->options
->debug
.func(b
->options
->debug
.private_data
,
42 level
, spirv_offset
, message
);
46 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
47 fprintf(stderr
, "%s\n", message
);
52 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
53 size_t spirv_offset
, const char *fmt
, ...)
59 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
62 vtn_log(b
, level
, spirv_offset
, msg
);
68 vtn_log_err(struct vtn_builder
*b
,
69 enum nir_spirv_debug_level level
, const char *prefix
,
70 const char *file
, unsigned line
,
71 const char *fmt
, va_list args
)
75 msg
= ralloc_strdup(NULL
, prefix
);
78 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
81 ralloc_asprintf_append(&msg
, " ");
83 ralloc_vasprintf_append(&msg
, fmt
, args
);
85 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
89 ralloc_asprintf_append(&msg
,
90 "\n in SPIR-V source file %s, line %d, col %d",
91 b
->file
, b
->line
, b
->col
);
94 vtn_log(b
, level
, b
->spirv_offset
, msg
);
100 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
105 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
106 path
, prefix
, idx
++);
107 if (len
< 0 || len
>= sizeof(filename
))
110 FILE *f
= fopen(filename
, "w");
114 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
117 vtn_info("SPIR-V shader dumped to %s", filename
);
121 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
122 const char *fmt
, ...)
127 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
128 file
, line
, fmt
, args
);
133 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
134 const char *fmt
, ...)
139 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
140 file
, line
, fmt
, args
);
143 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
145 vtn_dump_shader(b
, dump_path
, "fail");
147 longjmp(b
->fail_jump
, 1);
150 struct spec_constant_value
{
158 static struct vtn_ssa_value
*
159 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
161 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
164 if (glsl_type_is_vector_or_scalar(type
)) {
165 unsigned num_components
= glsl_get_vector_elements(val
->type
);
166 unsigned bit_size
= glsl_get_bit_size(val
->type
);
167 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
169 unsigned elems
= glsl_get_length(val
->type
);
170 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
171 if (glsl_type_is_matrix(type
)) {
172 const struct glsl_type
*elem_type
=
173 glsl_vector_type(glsl_get_base_type(type
),
174 glsl_get_vector_elements(type
));
176 for (unsigned i
= 0; i
< elems
; i
++)
177 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
178 } else if (glsl_type_is_array(type
)) {
179 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
180 for (unsigned i
= 0; i
< elems
; i
++)
181 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
183 for (unsigned i
= 0; i
< elems
; i
++) {
184 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
185 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
193 static struct vtn_ssa_value
*
194 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
195 const struct glsl_type
*type
)
197 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
202 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
205 switch (glsl_get_base_type(type
)) {
208 case GLSL_TYPE_INT16
:
209 case GLSL_TYPE_UINT16
:
210 case GLSL_TYPE_INT64
:
211 case GLSL_TYPE_UINT64
:
213 case GLSL_TYPE_FLOAT
:
214 case GLSL_TYPE_FLOAT16
:
215 case GLSL_TYPE_DOUBLE
: {
216 int bit_size
= glsl_get_bit_size(type
);
217 if (glsl_type_is_vector_or_scalar(type
)) {
218 unsigned num_components
= glsl_get_vector_elements(val
->type
);
219 nir_load_const_instr
*load
=
220 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
222 load
->value
= constant
->values
[0];
224 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
225 val
->def
= &load
->def
;
227 assert(glsl_type_is_matrix(type
));
228 unsigned rows
= glsl_get_vector_elements(val
->type
);
229 unsigned columns
= glsl_get_matrix_columns(val
->type
);
230 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
232 for (unsigned i
= 0; i
< columns
; i
++) {
233 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
234 col_val
->type
= glsl_get_column_type(val
->type
);
235 nir_load_const_instr
*load
=
236 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
238 load
->value
= constant
->values
[i
];
240 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
241 col_val
->def
= &load
->def
;
243 val
->elems
[i
] = col_val
;
249 case GLSL_TYPE_ARRAY
: {
250 unsigned elems
= glsl_get_length(val
->type
);
251 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
252 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
253 for (unsigned i
= 0; i
< elems
; i
++)
254 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
259 case GLSL_TYPE_STRUCT
: {
260 unsigned elems
= glsl_get_length(val
->type
);
261 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
262 for (unsigned i
= 0; i
< elems
; i
++) {
263 const struct glsl_type
*elem_type
=
264 glsl_get_struct_field(val
->type
, i
);
265 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
272 vtn_fail("bad constant type");
278 struct vtn_ssa_value
*
279 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
281 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
282 switch (val
->value_type
) {
283 case vtn_value_type_undef
:
284 return vtn_undef_ssa_value(b
, val
->type
->type
);
286 case vtn_value_type_constant
:
287 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
289 case vtn_value_type_ssa
:
292 case vtn_value_type_pointer
:
293 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
294 struct vtn_ssa_value
*ssa
=
295 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
296 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
300 vtn_fail("Invalid type for an SSA value");
305 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
306 unsigned word_count
, unsigned *words_used
)
308 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
310 /* Ammount of space taken by the string (including the null) */
311 unsigned len
= strlen(dup
) + 1;
312 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
318 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
319 const uint32_t *end
, vtn_instruction_handler handler
)
325 const uint32_t *w
= start
;
327 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
328 unsigned count
= w
[0] >> SpvWordCountShift
;
329 vtn_assert(count
>= 1 && w
+ count
<= end
);
331 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
335 break; /* Do nothing */
338 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
350 if (!handler(b
, opcode
, w
, count
))
368 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
369 const uint32_t *w
, unsigned count
)
372 case SpvOpExtInstImport
: {
373 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
374 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
375 val
->ext_handler
= vtn_handle_glsl450_instruction
;
377 vtn_fail("Unsupported extension");
383 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
384 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
390 vtn_fail("Unhandled opcode");
395 _foreach_decoration_helper(struct vtn_builder
*b
,
396 struct vtn_value
*base_value
,
398 struct vtn_value
*value
,
399 vtn_decoration_foreach_cb cb
, void *data
)
401 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
403 if (dec
->scope
== VTN_DEC_DECORATION
) {
404 member
= parent_member
;
405 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
406 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
407 value
->type
->base_type
!= vtn_base_type_struct
,
408 "OpMemberDecorate and OpGroupMemberDecorate are only "
409 "allowed on OpTypeStruct");
410 /* This means we haven't recursed yet */
411 assert(value
== base_value
);
413 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
415 vtn_fail_if(member
>= base_value
->type
->length
,
416 "OpMemberDecorate specifies member %d but the "
417 "OpTypeStruct has only %u members",
418 member
, base_value
->type
->length
);
420 /* Not a decoration */
421 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
426 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
427 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
430 cb(b
, base_value
, member
, dec
, data
);
435 /** Iterates (recursively if needed) over all of the decorations on a value
437 * This function iterates over all of the decorations applied to a given
438 * value. If it encounters a decoration group, it recurses into the group
439 * and iterates over all of those decorations as well.
442 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
443 vtn_decoration_foreach_cb cb
, void *data
)
445 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
449 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
450 vtn_execution_mode_foreach_cb cb
, void *data
)
452 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
453 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
456 assert(dec
->group
== NULL
);
457 cb(b
, value
, dec
, data
);
462 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
463 const uint32_t *w
, unsigned count
)
465 const uint32_t *w_end
= w
+ count
;
466 const uint32_t target
= w
[1];
470 case SpvOpDecorationGroup
:
471 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
475 case SpvOpMemberDecorate
:
476 case SpvOpExecutionMode
: {
477 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
479 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
482 dec
->scope
= VTN_DEC_DECORATION
;
484 case SpvOpMemberDecorate
:
485 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
486 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
487 "Member argument of OpMemberDecorate too large");
489 case SpvOpExecutionMode
:
490 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
493 unreachable("Invalid decoration opcode");
495 dec
->decoration
= *(w
++);
498 /* Link into the list */
499 dec
->next
= val
->decoration
;
500 val
->decoration
= dec
;
504 case SpvOpGroupMemberDecorate
:
505 case SpvOpGroupDecorate
: {
506 struct vtn_value
*group
=
507 vtn_value(b
, target
, vtn_value_type_decoration_group
);
509 for (; w
< w_end
; w
++) {
510 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
511 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
514 if (opcode
== SpvOpGroupDecorate
) {
515 dec
->scope
= VTN_DEC_DECORATION
;
517 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
518 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
519 "Member argument of OpGroupMemberDecorate too large");
522 /* Link into the list */
523 dec
->next
= val
->decoration
;
524 val
->decoration
= dec
;
530 unreachable("Unhandled opcode");
534 struct member_decoration_ctx
{
536 struct glsl_struct_field
*fields
;
537 struct vtn_type
*type
;
540 /* does a shallow copy of a vtn_type */
542 static struct vtn_type
*
543 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
545 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
548 switch (src
->base_type
) {
549 case vtn_base_type_void
:
550 case vtn_base_type_scalar
:
551 case vtn_base_type_vector
:
552 case vtn_base_type_matrix
:
553 case vtn_base_type_array
:
554 case vtn_base_type_pointer
:
555 case vtn_base_type_image
:
556 case vtn_base_type_sampler
:
557 case vtn_base_type_sampled_image
:
558 /* Nothing more to do */
561 case vtn_base_type_struct
:
562 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
563 memcpy(dest
->members
, src
->members
,
564 src
->length
* sizeof(src
->members
[0]));
566 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
567 memcpy(dest
->offsets
, src
->offsets
,
568 src
->length
* sizeof(src
->offsets
[0]));
571 case vtn_base_type_function
:
572 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
573 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
580 static struct vtn_type
*
581 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
583 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
584 type
= type
->members
[member
];
586 /* We may have an array of matrices.... Oh, joy! */
587 while (glsl_type_is_array(type
->type
)) {
588 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
589 type
= type
->array_element
;
592 vtn_assert(glsl_type_is_matrix(type
->type
));
598 struct_member_decoration_cb(struct vtn_builder
*b
,
599 struct vtn_value
*val
, int member
,
600 const struct vtn_decoration
*dec
, void *void_ctx
)
602 struct member_decoration_ctx
*ctx
= void_ctx
;
607 assert(member
< ctx
->num_fields
);
609 switch (dec
->decoration
) {
610 case SpvDecorationNonWritable
:
611 case SpvDecorationNonReadable
:
612 case SpvDecorationRelaxedPrecision
:
613 case SpvDecorationVolatile
:
614 case SpvDecorationCoherent
:
615 case SpvDecorationUniform
:
616 break; /* FIXME: Do nothing with this for now. */
617 case SpvDecorationNoPerspective
:
618 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
620 case SpvDecorationFlat
:
621 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
623 case SpvDecorationCentroid
:
624 ctx
->fields
[member
].centroid
= true;
626 case SpvDecorationSample
:
627 ctx
->fields
[member
].sample
= true;
629 case SpvDecorationStream
:
630 /* Vulkan only allows one GS stream */
631 vtn_assert(dec
->literals
[0] == 0);
633 case SpvDecorationLocation
:
634 ctx
->fields
[member
].location
= dec
->literals
[0];
636 case SpvDecorationComponent
:
637 break; /* FIXME: What should we do with these? */
638 case SpvDecorationBuiltIn
:
639 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
640 ctx
->type
->members
[member
]->is_builtin
= true;
641 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
642 ctx
->type
->builtin_block
= true;
644 case SpvDecorationOffset
:
645 ctx
->type
->offsets
[member
] = dec
->literals
[0];
647 case SpvDecorationMatrixStride
:
648 /* Handled as a second pass */
650 case SpvDecorationColMajor
:
651 break; /* Nothing to do here. Column-major is the default. */
652 case SpvDecorationRowMajor
:
653 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
656 case SpvDecorationPatch
:
659 case SpvDecorationSpecId
:
660 case SpvDecorationBlock
:
661 case SpvDecorationBufferBlock
:
662 case SpvDecorationArrayStride
:
663 case SpvDecorationGLSLShared
:
664 case SpvDecorationGLSLPacked
:
665 case SpvDecorationInvariant
:
666 case SpvDecorationRestrict
:
667 case SpvDecorationAliased
:
668 case SpvDecorationConstant
:
669 case SpvDecorationIndex
:
670 case SpvDecorationBinding
:
671 case SpvDecorationDescriptorSet
:
672 case SpvDecorationLinkageAttributes
:
673 case SpvDecorationNoContraction
:
674 case SpvDecorationInputAttachmentIndex
:
675 vtn_warn("Decoration not allowed on struct members: %s",
676 spirv_decoration_to_string(dec
->decoration
));
679 case SpvDecorationXfbBuffer
:
680 case SpvDecorationXfbStride
:
681 vtn_warn("Vulkan does not have transform feedback");
684 case SpvDecorationCPacked
:
685 case SpvDecorationSaturatedConversion
:
686 case SpvDecorationFuncParamAttr
:
687 case SpvDecorationFPRoundingMode
:
688 case SpvDecorationFPFastMathMode
:
689 case SpvDecorationAlignment
:
690 vtn_warn("Decoration only allowed for CL-style kernels: %s",
691 spirv_decoration_to_string(dec
->decoration
));
695 vtn_fail("Unhandled decoration");
699 /* Matrix strides are handled as a separate pass because we need to know
700 * whether the matrix is row-major or not first.
703 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
704 struct vtn_value
*val
, int member
,
705 const struct vtn_decoration
*dec
,
708 if (dec
->decoration
!= SpvDecorationMatrixStride
)
711 vtn_fail_if(member
< 0,
712 "The MatrixStride decoration is only allowed on members "
715 struct member_decoration_ctx
*ctx
= void_ctx
;
717 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
718 if (mat_type
->row_major
) {
719 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
720 mat_type
->stride
= mat_type
->array_element
->stride
;
721 mat_type
->array_element
->stride
= dec
->literals
[0];
723 vtn_assert(mat_type
->array_element
->stride
> 0);
724 mat_type
->stride
= dec
->literals
[0];
729 type_decoration_cb(struct vtn_builder
*b
,
730 struct vtn_value
*val
, int member
,
731 const struct vtn_decoration
*dec
, void *ctx
)
733 struct vtn_type
*type
= val
->type
;
736 /* This should have been handled by OpTypeStruct */
737 assert(val
->type
->base_type
== vtn_base_type_struct
);
738 assert(member
>= 0 && member
< val
->type
->length
);
742 switch (dec
->decoration
) {
743 case SpvDecorationArrayStride
:
744 vtn_assert(type
->base_type
== vtn_base_type_matrix
||
745 type
->base_type
== vtn_base_type_array
||
746 type
->base_type
== vtn_base_type_pointer
);
747 type
->stride
= dec
->literals
[0];
749 case SpvDecorationBlock
:
750 vtn_assert(type
->base_type
== vtn_base_type_struct
);
753 case SpvDecorationBufferBlock
:
754 vtn_assert(type
->base_type
== vtn_base_type_struct
);
755 type
->buffer_block
= true;
757 case SpvDecorationGLSLShared
:
758 case SpvDecorationGLSLPacked
:
759 /* Ignore these, since we get explicit offsets anyways */
762 case SpvDecorationRowMajor
:
763 case SpvDecorationColMajor
:
764 case SpvDecorationMatrixStride
:
765 case SpvDecorationBuiltIn
:
766 case SpvDecorationNoPerspective
:
767 case SpvDecorationFlat
:
768 case SpvDecorationPatch
:
769 case SpvDecorationCentroid
:
770 case SpvDecorationSample
:
771 case SpvDecorationVolatile
:
772 case SpvDecorationCoherent
:
773 case SpvDecorationNonWritable
:
774 case SpvDecorationNonReadable
:
775 case SpvDecorationUniform
:
776 case SpvDecorationStream
:
777 case SpvDecorationLocation
:
778 case SpvDecorationComponent
:
779 case SpvDecorationOffset
:
780 case SpvDecorationXfbBuffer
:
781 case SpvDecorationXfbStride
:
782 vtn_warn("Decoration only allowed for struct members: %s",
783 spirv_decoration_to_string(dec
->decoration
));
786 case SpvDecorationRelaxedPrecision
:
787 case SpvDecorationSpecId
:
788 case SpvDecorationInvariant
:
789 case SpvDecorationRestrict
:
790 case SpvDecorationAliased
:
791 case SpvDecorationConstant
:
792 case SpvDecorationIndex
:
793 case SpvDecorationBinding
:
794 case SpvDecorationDescriptorSet
:
795 case SpvDecorationLinkageAttributes
:
796 case SpvDecorationNoContraction
:
797 case SpvDecorationInputAttachmentIndex
:
798 vtn_warn("Decoration not allowed on types: %s",
799 spirv_decoration_to_string(dec
->decoration
));
802 case SpvDecorationCPacked
:
803 case SpvDecorationSaturatedConversion
:
804 case SpvDecorationFuncParamAttr
:
805 case SpvDecorationFPRoundingMode
:
806 case SpvDecorationFPFastMathMode
:
807 case SpvDecorationAlignment
:
808 vtn_warn("Decoration only allowed for CL-style kernels: %s",
809 spirv_decoration_to_string(dec
->decoration
));
813 vtn_fail("Unhandled decoration");
818 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
821 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
822 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
823 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
824 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
825 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
826 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
827 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
828 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
829 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
830 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
831 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
832 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
833 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
834 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
835 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
836 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
837 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
838 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
839 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
840 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
841 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
842 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
843 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
844 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
845 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
846 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
847 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
848 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
849 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
850 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
851 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
852 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
853 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
854 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
855 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
856 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
857 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
858 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
859 case SpvImageFormatR16ui
: return 0x8234; /* GL_R16UI */
860 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
862 vtn_fail("Invalid image format");
866 static struct vtn_type
*
867 vtn_type_layout_std430(struct vtn_builder
*b
, struct vtn_type
*type
,
868 uint32_t *size_out
, uint32_t *align_out
)
870 switch (type
->base_type
) {
871 case vtn_base_type_scalar
: {
872 uint32_t comp_size
= glsl_get_bit_size(type
->type
) / 8;
873 *size_out
= comp_size
;
874 *align_out
= comp_size
;
878 case vtn_base_type_vector
: {
879 uint32_t comp_size
= glsl_get_bit_size(type
->type
) / 8;
880 assert(type
->length
> 0 && type
->length
<= 4);
881 unsigned align_comps
= type
->length
== 3 ? 4 : type
->length
;
882 *size_out
= comp_size
* type
->length
,
883 *align_out
= comp_size
* align_comps
;
887 case vtn_base_type_matrix
:
888 case vtn_base_type_array
: {
889 /* We're going to add an array stride */
890 type
= vtn_type_copy(b
, type
);
891 uint32_t elem_size
, elem_align
;
892 type
->array_element
= vtn_type_layout_std430(b
, type
->array_element
,
893 &elem_size
, &elem_align
);
894 type
->stride
= vtn_align_u32(elem_size
, elem_align
);
895 *size_out
= type
->stride
* type
->length
;
896 *align_out
= elem_align
;
900 case vtn_base_type_struct
: {
901 /* We're going to add member offsets */
902 type
= vtn_type_copy(b
, type
);
905 for (unsigned i
= 0; i
< type
->length
; i
++) {
906 uint32_t mem_size
, mem_align
;
907 type
->members
[i
] = vtn_type_layout_std430(b
, type
->members
[i
],
908 &mem_size
, &mem_align
);
909 offset
= vtn_align_u32(offset
, mem_align
);
910 type
->offsets
[i
] = offset
;
912 align
= MAX2(align
, mem_align
);
920 unreachable("Invalid SPIR-V type for std430");
925 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
926 const uint32_t *w
, unsigned count
)
928 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
930 val
->type
= rzalloc(b
, struct vtn_type
);
931 val
->type
->val
= val
;
935 val
->type
->base_type
= vtn_base_type_void
;
936 val
->type
->type
= glsl_void_type();
939 val
->type
->base_type
= vtn_base_type_scalar
;
940 val
->type
->type
= glsl_bool_type();
941 val
->type
->length
= 1;
945 const bool signedness
= w
[3];
946 val
->type
->base_type
= vtn_base_type_scalar
;
949 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
952 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
955 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
958 vtn_fail("Invalid int bit size");
960 val
->type
->length
= 1;
964 case SpvOpTypeFloat
: {
966 val
->type
->base_type
= vtn_base_type_scalar
;
969 val
->type
->type
= glsl_float16_t_type();
972 val
->type
->type
= glsl_float_type();
975 val
->type
->type
= glsl_double_type();
978 vtn_fail("Invalid float bit size");
980 val
->type
->length
= 1;
984 case SpvOpTypeVector
: {
985 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
986 unsigned elems
= w
[3];
988 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
989 "Base type for OpTypeVector must be a scalar");
990 vtn_fail_if(elems
< 2 || elems
> 4,
991 "Invalid component count for OpTypeVector");
993 val
->type
->base_type
= vtn_base_type_vector
;
994 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
995 val
->type
->length
= elems
;
996 val
->type
->stride
= glsl_get_bit_size(base
->type
) / 8;
997 val
->type
->array_element
= base
;
1001 case SpvOpTypeMatrix
: {
1002 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1003 unsigned columns
= w
[3];
1005 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1006 "Base type for OpTypeMatrix must be a vector");
1007 vtn_fail_if(columns
< 2 || columns
> 4,
1008 "Invalid column count for OpTypeMatrix");
1010 val
->type
->base_type
= vtn_base_type_matrix
;
1011 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1012 glsl_get_vector_elements(base
->type
),
1014 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1015 "Unsupported base type for OpTypeMatrix");
1016 assert(!glsl_type_is_error(val
->type
->type
));
1017 val
->type
->length
= columns
;
1018 val
->type
->array_element
= base
;
1019 val
->type
->row_major
= false;
1020 val
->type
->stride
= 0;
1024 case SpvOpTypeRuntimeArray
:
1025 case SpvOpTypeArray
: {
1026 struct vtn_type
*array_element
=
1027 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1029 if (opcode
== SpvOpTypeRuntimeArray
) {
1030 /* A length of 0 is used to denote unsized arrays */
1031 val
->type
->length
= 0;
1034 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1037 val
->type
->base_type
= vtn_base_type_array
;
1038 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
);
1039 val
->type
->array_element
= array_element
;
1040 val
->type
->stride
= 0;
1044 case SpvOpTypeStruct
: {
1045 unsigned num_fields
= count
- 2;
1046 val
->type
->base_type
= vtn_base_type_struct
;
1047 val
->type
->length
= num_fields
;
1048 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1049 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1051 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1052 for (unsigned i
= 0; i
< num_fields
; i
++) {
1053 val
->type
->members
[i
] =
1054 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
1055 fields
[i
] = (struct glsl_struct_field
) {
1056 .type
= val
->type
->members
[i
]->type
,
1057 .name
= ralloc_asprintf(b
, "field%d", i
),
1062 struct member_decoration_ctx ctx
= {
1063 .num_fields
= num_fields
,
1068 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1069 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1071 const char *name
= val
->name
? val
->name
: "struct";
1073 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
1077 case SpvOpTypeFunction
: {
1078 val
->type
->base_type
= vtn_base_type_function
;
1079 val
->type
->type
= NULL
;
1081 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1083 const unsigned num_params
= count
- 3;
1084 val
->type
->length
= num_params
;
1085 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1086 for (unsigned i
= 0; i
< count
- 3; i
++) {
1087 val
->type
->params
[i
] =
1088 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
1093 case SpvOpTypePointer
: {
1094 SpvStorageClass storage_class
= w
[2];
1095 struct vtn_type
*deref_type
=
1096 vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
1098 val
->type
->base_type
= vtn_base_type_pointer
;
1099 val
->type
->storage_class
= storage_class
;
1100 val
->type
->deref
= deref_type
;
1102 if (storage_class
== SpvStorageClassUniform
||
1103 storage_class
== SpvStorageClassStorageBuffer
) {
1104 /* These can actually be stored to nir_variables and used as SSA
1105 * values so they need a real glsl_type.
1107 val
->type
->type
= glsl_vector_type(GLSL_TYPE_UINT
, 2);
1110 if (storage_class
== SpvStorageClassWorkgroup
&&
1111 b
->options
->lower_workgroup_access_to_offsets
) {
1112 uint32_t size
, align
;
1113 val
->type
->deref
= vtn_type_layout_std430(b
, val
->type
->deref
,
1115 val
->type
->length
= size
;
1116 val
->type
->align
= align
;
1117 /* These can actually be stored to nir_variables and used as SSA
1118 * values so they need a real glsl_type.
1120 val
->type
->type
= glsl_uint_type();
1125 case SpvOpTypeImage
: {
1126 val
->type
->base_type
= vtn_base_type_image
;
1128 const struct vtn_type
*sampled_type
=
1129 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1131 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1132 glsl_get_bit_size(sampled_type
->type
) != 32,
1133 "Sampled type of OpTypeImage must be a 32-bit scalar");
1135 enum glsl_sampler_dim dim
;
1136 switch ((SpvDim
)w
[3]) {
1137 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1138 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1139 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1140 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1141 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1142 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1143 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1145 vtn_fail("Invalid SPIR-V image dimensionality");
1148 bool is_shadow
= w
[4];
1149 bool is_array
= w
[5];
1150 bool multisampled
= w
[6];
1151 unsigned sampled
= w
[7];
1152 SpvImageFormat format
= w
[8];
1155 val
->type
->access_qualifier
= w
[9];
1157 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1160 if (dim
== GLSL_SAMPLER_DIM_2D
)
1161 dim
= GLSL_SAMPLER_DIM_MS
;
1162 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1163 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1165 vtn_fail("Unsupported multisampled image type");
1168 val
->type
->image_format
= translate_image_format(b
, format
);
1170 enum glsl_base_type sampled_base_type
=
1171 glsl_get_base_type(sampled_type
->type
);
1173 val
->type
->sampled
= true;
1174 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
1176 } else if (sampled
== 2) {
1177 vtn_assert(!is_shadow
);
1178 val
->type
->sampled
= false;
1179 val
->type
->type
= glsl_image_type(dim
, is_array
, sampled_base_type
);
1181 vtn_fail("We need to know if the image will be sampled");
1186 case SpvOpTypeSampledImage
:
1187 val
->type
->base_type
= vtn_base_type_sampled_image
;
1188 val
->type
->image
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1189 val
->type
->type
= val
->type
->image
->type
;
1192 case SpvOpTypeSampler
:
1193 /* The actual sampler type here doesn't really matter. It gets
1194 * thrown away the moment you combine it with an image. What really
1195 * matters is that it's a sampler type as opposed to an integer type
1196 * so the backend knows what to do.
1198 val
->type
->base_type
= vtn_base_type_sampler
;
1199 val
->type
->type
= glsl_bare_sampler_type();
1202 case SpvOpTypeOpaque
:
1203 case SpvOpTypeEvent
:
1204 case SpvOpTypeDeviceEvent
:
1205 case SpvOpTypeReserveId
:
1206 case SpvOpTypeQueue
:
1209 vtn_fail("Unhandled opcode");
1212 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1215 static nir_constant
*
1216 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
1218 nir_constant
*c
= rzalloc(b
, nir_constant
);
1220 /* For pointers and other typeless things, we have to return something but
1221 * it doesn't matter what.
1226 switch (glsl_get_base_type(type
)) {
1228 case GLSL_TYPE_UINT
:
1229 case GLSL_TYPE_INT16
:
1230 case GLSL_TYPE_UINT16
:
1231 case GLSL_TYPE_INT64
:
1232 case GLSL_TYPE_UINT64
:
1233 case GLSL_TYPE_BOOL
:
1234 case GLSL_TYPE_FLOAT
:
1235 case GLSL_TYPE_FLOAT16
:
1236 case GLSL_TYPE_DOUBLE
:
1237 /* Nothing to do here. It's already initialized to zero */
1240 case GLSL_TYPE_ARRAY
:
1241 vtn_assert(glsl_get_length(type
) > 0);
1242 c
->num_elements
= glsl_get_length(type
);
1243 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1245 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
1246 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1247 c
->elements
[i
] = c
->elements
[0];
1250 case GLSL_TYPE_STRUCT
:
1251 c
->num_elements
= glsl_get_length(type
);
1252 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1254 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
1255 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
1260 vtn_fail("Invalid type for null constant");
1267 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
1268 int member
, const struct vtn_decoration
*dec
,
1271 vtn_assert(member
== -1);
1272 if (dec
->decoration
!= SpvDecorationSpecId
)
1275 struct spec_constant_value
*const_value
= data
;
1277 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1278 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
1279 if (const_value
->is_double
)
1280 const_value
->data64
= b
->specializations
[i
].data64
;
1282 const_value
->data32
= b
->specializations
[i
].data32
;
1289 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
1290 uint32_t const_value
)
1292 struct spec_constant_value data
;
1293 data
.is_double
= false;
1294 data
.data32
= const_value
;
1295 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1300 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
1301 uint64_t const_value
)
1303 struct spec_constant_value data
;
1304 data
.is_double
= true;
1305 data
.data64
= const_value
;
1306 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1311 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1312 struct vtn_value
*val
,
1314 const struct vtn_decoration
*dec
,
1317 vtn_assert(member
== -1);
1318 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1319 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
1322 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1324 b
->shader
->info
.cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
1325 b
->shader
->info
.cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
1326 b
->shader
->info
.cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
1330 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1331 const uint32_t *w
, unsigned count
)
1333 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1334 val
->constant
= rzalloc(b
, nir_constant
);
1336 case SpvOpConstantTrue
:
1337 case SpvOpConstantFalse
:
1338 case SpvOpSpecConstantTrue
:
1339 case SpvOpSpecConstantFalse
: {
1340 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1341 "Result type of %s must be OpTypeBool",
1342 spirv_op_to_string(opcode
));
1344 uint32_t int_val
= (opcode
== SpvOpConstantTrue
||
1345 opcode
== SpvOpSpecConstantTrue
);
1347 if (opcode
== SpvOpSpecConstantTrue
||
1348 opcode
== SpvOpSpecConstantFalse
)
1349 int_val
= get_specialization(b
, val
, int_val
);
1351 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1355 case SpvOpConstant
: {
1356 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1357 "Result type of %s must be a scalar",
1358 spirv_op_to_string(opcode
));
1359 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1362 val
->constant
->values
->u64
[0] = vtn_u64_literal(&w
[3]);
1365 val
->constant
->values
->u32
[0] = w
[3];
1368 val
->constant
->values
->u16
[0] = w
[3];
1371 vtn_fail("Unsupported SpvOpConstant bit size");
1376 case SpvOpSpecConstant
: {
1377 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1378 "Result type of %s must be a scalar",
1379 spirv_op_to_string(opcode
));
1380 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1383 val
->constant
->values
[0].u64
[0] =
1384 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1387 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1390 val
->constant
->values
[0].u16
[0] = get_specialization(b
, val
, w
[3]);
1393 vtn_fail("Unsupported SpvOpSpecConstant bit size");
1398 case SpvOpSpecConstantComposite
:
1399 case SpvOpConstantComposite
: {
1400 unsigned elem_count
= count
- 3;
1401 vtn_fail_if(elem_count
!= val
->type
->length
,
1402 "%s has %u constituents, expected %u",
1403 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1405 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1406 for (unsigned i
= 0; i
< elem_count
; i
++)
1407 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1409 switch (val
->type
->base_type
) {
1410 case vtn_base_type_vector
: {
1411 assert(glsl_type_is_vector(val
->type
->type
));
1412 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1413 for (unsigned i
= 0; i
< elem_count
; i
++) {
1416 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1419 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1422 val
->constant
->values
[0].u16
[i
] = elems
[i
]->values
[0].u16
[0];
1425 vtn_fail("Invalid SpvOpConstantComposite bit size");
1431 case vtn_base_type_matrix
:
1432 assert(glsl_type_is_matrix(val
->type
->type
));
1433 for (unsigned i
= 0; i
< elem_count
; i
++)
1434 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1437 case vtn_base_type_struct
:
1438 case vtn_base_type_array
:
1439 ralloc_steal(val
->constant
, elems
);
1440 val
->constant
->num_elements
= elem_count
;
1441 val
->constant
->elements
= elems
;
1445 vtn_fail("Result type of %s must be a composite type",
1446 spirv_op_to_string(opcode
));
1451 case SpvOpSpecConstantOp
: {
1452 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1454 case SpvOpVectorShuffle
: {
1455 struct vtn_value
*v0
= &b
->values
[w
[4]];
1456 struct vtn_value
*v1
= &b
->values
[w
[5]];
1458 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1459 v0
->value_type
== vtn_value_type_undef
);
1460 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1461 v1
->value_type
== vtn_value_type_undef
);
1463 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1464 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1466 vtn_assert(len0
+ len1
< 16);
1468 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1469 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1470 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1472 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1473 (void)bit_size0
; (void)bit_size1
;
1475 if (bit_size
== 64) {
1477 if (v0
->value_type
== vtn_value_type_constant
) {
1478 for (unsigned i
= 0; i
< len0
; i
++)
1479 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1481 if (v1
->value_type
== vtn_value_type_constant
) {
1482 for (unsigned i
= 0; i
< len1
; i
++)
1483 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1486 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1487 uint32_t comp
= w
[i
+ 6];
1488 /* If component is not used, set the value to a known constant
1489 * to detect if it is wrongly used.
1491 if (comp
== (uint32_t)-1)
1492 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1494 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1497 /* This is for both 32-bit and 16-bit values */
1499 if (v0
->value_type
== vtn_value_type_constant
) {
1500 for (unsigned i
= 0; i
< len0
; i
++)
1501 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1503 if (v1
->value_type
== vtn_value_type_constant
) {
1504 for (unsigned i
= 0; i
< len1
; i
++)
1505 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1508 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1509 uint32_t comp
= w
[i
+ 6];
1510 /* If component is not used, set the value to a known constant
1511 * to detect if it is wrongly used.
1513 if (comp
== (uint32_t)-1)
1514 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1516 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1522 case SpvOpCompositeExtract
:
1523 case SpvOpCompositeInsert
: {
1524 struct vtn_value
*comp
;
1525 unsigned deref_start
;
1526 struct nir_constant
**c
;
1527 if (opcode
== SpvOpCompositeExtract
) {
1528 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1530 c
= &comp
->constant
;
1532 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1534 val
->constant
= nir_constant_clone(comp
->constant
,
1541 const struct vtn_type
*type
= comp
->type
;
1542 for (unsigned i
= deref_start
; i
< count
; i
++) {
1543 vtn_fail_if(w
[i
] > type
->length
,
1544 "%uth index of %s is %u but the type has only "
1545 "%u elements", i
- deref_start
,
1546 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1548 switch (type
->base_type
) {
1549 case vtn_base_type_vector
:
1551 type
= type
->array_element
;
1554 case vtn_base_type_matrix
:
1555 assert(col
== 0 && elem
== -1);
1558 type
= type
->array_element
;
1561 case vtn_base_type_array
:
1562 c
= &(*c
)->elements
[w
[i
]];
1563 type
= type
->array_element
;
1566 case vtn_base_type_struct
:
1567 c
= &(*c
)->elements
[w
[i
]];
1568 type
= type
->members
[w
[i
]];
1572 vtn_fail("%s must only index into composite types",
1573 spirv_op_to_string(opcode
));
1577 if (opcode
== SpvOpCompositeExtract
) {
1581 unsigned num_components
= type
->length
;
1582 unsigned bit_size
= glsl_get_bit_size(type
->type
);
1583 for (unsigned i
= 0; i
< num_components
; i
++)
1586 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1589 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1592 val
->constant
->values
[0].u16
[i
] = (*c
)->values
[col
].u16
[elem
+ i
];
1595 vtn_fail("Invalid SpvOpCompositeExtract bit size");
1599 struct vtn_value
*insert
=
1600 vtn_value(b
, w
[4], vtn_value_type_constant
);
1601 vtn_assert(insert
->type
== type
);
1603 *c
= insert
->constant
;
1605 unsigned num_components
= type
->length
;
1606 unsigned bit_size
= glsl_get_bit_size(type
->type
);
1607 for (unsigned i
= 0; i
< num_components
; i
++)
1610 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1613 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1616 (*c
)->values
[col
].u16
[elem
+ i
] = insert
->constant
->values
[0].u16
[i
];
1619 vtn_fail("Invalid SpvOpCompositeInsert bit size");
1628 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
1629 nir_alu_type src_alu_type
= dst_alu_type
;
1630 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
1633 vtn_assert(count
<= 7);
1638 /* We have a source in a conversion */
1640 nir_get_nir_type_for_glsl_type(
1641 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1642 /* We use the bitsize of the conversion source to evaluate the opcode later */
1643 bit_size
= glsl_get_bit_size(
1644 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1647 bit_size
= glsl_get_bit_size(val
->type
->type
);
1650 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
1653 nir_const_value src
[4];
1655 for (unsigned i
= 0; i
< count
- 4; i
++) {
1657 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1659 unsigned j
= swap
? 1 - i
: i
;
1660 src
[j
] = c
->values
[0];
1663 val
->constant
->values
[0] =
1664 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1671 case SpvOpConstantNull
:
1672 val
->constant
= vtn_null_constant(b
, val
->type
->type
);
1675 case SpvOpConstantSampler
:
1676 vtn_fail("OpConstantSampler requires Kernel Capability");
1680 vtn_fail("Unhandled opcode");
1683 /* Now that we have the value, update the workgroup size if needed */
1684 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1688 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1689 const uint32_t *w
, unsigned count
)
1691 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1692 struct vtn_function
*vtn_callee
=
1693 vtn_value(b
, w
[3], vtn_value_type_function
)->func
;
1694 struct nir_function
*callee
= vtn_callee
->impl
->function
;
1696 vtn_callee
->referenced
= true;
1698 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1699 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1700 unsigned arg_id
= w
[4 + i
];
1701 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1702 if (arg
->value_type
== vtn_value_type_pointer
&&
1703 arg
->pointer
->ptr_type
->type
== NULL
) {
1704 nir_deref_var
*d
= vtn_pointer_to_deref(b
, arg
->pointer
);
1705 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1707 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1709 /* Make a temporary to store the argument in */
1711 nir_local_variable_create(b
->nb
.impl
, arg_ssa
->type
, "arg_tmp");
1712 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1714 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1718 nir_variable
*out_tmp
= NULL
;
1719 vtn_assert(res_type
->type
== callee
->return_type
);
1720 if (!glsl_type_is_void(callee
->return_type
)) {
1721 out_tmp
= nir_local_variable_create(b
->nb
.impl
, callee
->return_type
,
1723 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1726 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1728 if (glsl_type_is_void(callee
->return_type
)) {
1729 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1731 vtn_push_ssa(b
, w
[2], res_type
, vtn_local_load(b
, call
->return_deref
));
1735 struct vtn_ssa_value
*
1736 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1738 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1741 if (!glsl_type_is_vector_or_scalar(type
)) {
1742 unsigned elems
= glsl_get_length(type
);
1743 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1744 for (unsigned i
= 0; i
< elems
; i
++) {
1745 const struct glsl_type
*child_type
;
1747 switch (glsl_get_base_type(type
)) {
1749 case GLSL_TYPE_UINT
:
1750 case GLSL_TYPE_INT16
:
1751 case GLSL_TYPE_UINT16
:
1752 case GLSL_TYPE_INT64
:
1753 case GLSL_TYPE_UINT64
:
1754 case GLSL_TYPE_BOOL
:
1755 case GLSL_TYPE_FLOAT
:
1756 case GLSL_TYPE_FLOAT16
:
1757 case GLSL_TYPE_DOUBLE
:
1758 child_type
= glsl_get_column_type(type
);
1760 case GLSL_TYPE_ARRAY
:
1761 child_type
= glsl_get_array_element(type
);
1763 case GLSL_TYPE_STRUCT
:
1764 child_type
= glsl_get_struct_field(type
, i
);
1767 vtn_fail("unkown base type");
1770 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1778 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1781 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1782 src
.src_type
= type
;
1787 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1788 const uint32_t *w
, unsigned count
)
1790 if (opcode
== SpvOpSampledImage
) {
1791 struct vtn_value
*val
=
1792 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1793 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1794 val
->sampled_image
->type
=
1795 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1796 val
->sampled_image
->image
=
1797 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1798 val
->sampled_image
->sampler
=
1799 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
1801 } else if (opcode
== SpvOpImage
) {
1802 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1803 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1804 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1805 val
->pointer
= src_val
->sampled_image
->image
;
1807 vtn_assert(src_val
->value_type
== vtn_value_type_pointer
);
1808 val
->pointer
= src_val
->pointer
;
1813 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1814 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1816 struct vtn_sampled_image sampled
;
1817 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1818 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1819 sampled
= *sampled_val
->sampled_image
;
1821 vtn_assert(sampled_val
->value_type
== vtn_value_type_pointer
);
1822 sampled
.type
= sampled_val
->pointer
->type
;
1823 sampled
.image
= NULL
;
1824 sampled
.sampler
= sampled_val
->pointer
;
1827 const struct glsl_type
*image_type
= sampled
.type
->type
;
1828 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1829 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1830 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1832 /* Figure out the base texture operation */
1835 case SpvOpImageSampleImplicitLod
:
1836 case SpvOpImageSampleDrefImplicitLod
:
1837 case SpvOpImageSampleProjImplicitLod
:
1838 case SpvOpImageSampleProjDrefImplicitLod
:
1839 texop
= nir_texop_tex
;
1842 case SpvOpImageSampleExplicitLod
:
1843 case SpvOpImageSampleDrefExplicitLod
:
1844 case SpvOpImageSampleProjExplicitLod
:
1845 case SpvOpImageSampleProjDrefExplicitLod
:
1846 texop
= nir_texop_txl
;
1849 case SpvOpImageFetch
:
1850 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1851 texop
= nir_texop_txf_ms
;
1853 texop
= nir_texop_txf
;
1857 case SpvOpImageGather
:
1858 case SpvOpImageDrefGather
:
1859 texop
= nir_texop_tg4
;
1862 case SpvOpImageQuerySizeLod
:
1863 case SpvOpImageQuerySize
:
1864 texop
= nir_texop_txs
;
1867 case SpvOpImageQueryLod
:
1868 texop
= nir_texop_lod
;
1871 case SpvOpImageQueryLevels
:
1872 texop
= nir_texop_query_levels
;
1875 case SpvOpImageQuerySamples
:
1876 texop
= nir_texop_texture_samples
;
1880 vtn_fail("Unhandled opcode");
1883 nir_tex_src srcs
[8]; /* 8 should be enough */
1884 nir_tex_src
*p
= srcs
;
1888 struct nir_ssa_def
*coord
;
1889 unsigned coord_components
;
1891 case SpvOpImageSampleImplicitLod
:
1892 case SpvOpImageSampleExplicitLod
:
1893 case SpvOpImageSampleDrefImplicitLod
:
1894 case SpvOpImageSampleDrefExplicitLod
:
1895 case SpvOpImageSampleProjImplicitLod
:
1896 case SpvOpImageSampleProjExplicitLod
:
1897 case SpvOpImageSampleProjDrefImplicitLod
:
1898 case SpvOpImageSampleProjDrefExplicitLod
:
1899 case SpvOpImageFetch
:
1900 case SpvOpImageGather
:
1901 case SpvOpImageDrefGather
:
1902 case SpvOpImageQueryLod
: {
1903 /* All these types have the coordinate as their first real argument */
1904 switch (sampler_dim
) {
1905 case GLSL_SAMPLER_DIM_1D
:
1906 case GLSL_SAMPLER_DIM_BUF
:
1907 coord_components
= 1;
1909 case GLSL_SAMPLER_DIM_2D
:
1910 case GLSL_SAMPLER_DIM_RECT
:
1911 case GLSL_SAMPLER_DIM_MS
:
1912 coord_components
= 2;
1914 case GLSL_SAMPLER_DIM_3D
:
1915 case GLSL_SAMPLER_DIM_CUBE
:
1916 coord_components
= 3;
1919 vtn_fail("Invalid sampler type");
1922 if (is_array
&& texop
!= nir_texop_lod
)
1925 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1926 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
1927 (1 << coord_components
) - 1));
1928 p
->src_type
= nir_tex_src_coord
;
1935 coord_components
= 0;
1940 case SpvOpImageSampleProjImplicitLod
:
1941 case SpvOpImageSampleProjExplicitLod
:
1942 case SpvOpImageSampleProjDrefImplicitLod
:
1943 case SpvOpImageSampleProjDrefExplicitLod
:
1944 /* These have the projector as the last coordinate component */
1945 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1946 p
->src_type
= nir_tex_src_projector
;
1954 unsigned gather_component
= 0;
1956 case SpvOpImageSampleDrefImplicitLod
:
1957 case SpvOpImageSampleDrefExplicitLod
:
1958 case SpvOpImageSampleProjDrefImplicitLod
:
1959 case SpvOpImageSampleProjDrefExplicitLod
:
1960 case SpvOpImageDrefGather
:
1961 /* These all have an explicit depth value as their next source */
1962 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1965 case SpvOpImageGather
:
1966 /* This has a component as its next source */
1968 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1975 /* For OpImageQuerySizeLod, we always have an LOD */
1976 if (opcode
== SpvOpImageQuerySizeLod
)
1977 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1979 /* Now we need to handle some number of optional arguments */
1980 const struct vtn_ssa_value
*gather_offsets
= NULL
;
1982 uint32_t operands
= w
[idx
++];
1984 if (operands
& SpvImageOperandsBiasMask
) {
1985 vtn_assert(texop
== nir_texop_tex
);
1986 texop
= nir_texop_txb
;
1987 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1990 if (operands
& SpvImageOperandsLodMask
) {
1991 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1992 texop
== nir_texop_txs
);
1993 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1996 if (operands
& SpvImageOperandsGradMask
) {
1997 vtn_assert(texop
== nir_texop_txl
);
1998 texop
= nir_texop_txd
;
1999 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
2000 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
2003 if (operands
& SpvImageOperandsOffsetMask
||
2004 operands
& SpvImageOperandsConstOffsetMask
)
2005 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
2007 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2008 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
2009 (*p
++) = (nir_tex_src
){};
2012 if (operands
& SpvImageOperandsSampleMask
) {
2013 vtn_assert(texop
== nir_texop_txf_ms
);
2014 texop
= nir_texop_txf_ms
;
2015 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2018 /* We should have now consumed exactly all of the arguments */
2019 vtn_assert(idx
== count
);
2021 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2024 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2026 instr
->coord_components
= coord_components
;
2027 instr
->sampler_dim
= sampler_dim
;
2028 instr
->is_array
= is_array
;
2029 instr
->is_shadow
= is_shadow
;
2030 instr
->is_new_style_shadow
=
2031 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2032 instr
->component
= gather_component
;
2034 switch (glsl_get_sampler_result_type(image_type
)) {
2035 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
2036 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
2037 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
2038 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
2040 vtn_fail("Invalid base type for sampler result");
2043 nir_deref_var
*sampler
= vtn_pointer_to_deref(b
, sampled
.sampler
);
2044 nir_deref_var
*texture
;
2045 if (sampled
.image
) {
2046 nir_deref_var
*image
= vtn_pointer_to_deref(b
, sampled
.image
);
2052 instr
->texture
= nir_deref_var_clone(texture
, instr
);
2054 switch (instr
->op
) {
2060 /* These operations require a sampler */
2061 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
2064 case nir_texop_txf_ms
:
2067 case nir_texop_query_levels
:
2068 case nir_texop_texture_samples
:
2069 case nir_texop_samples_identical
:
2071 instr
->sampler
= NULL
;
2073 case nir_texop_txf_ms_mcs
:
2074 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2077 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2078 nir_tex_instr_dest_size(instr
), 32, NULL
);
2080 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2081 nir_tex_instr_dest_size(instr
));
2084 nir_instr
*instruction
;
2085 if (gather_offsets
) {
2086 vtn_assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
2087 vtn_assert(glsl_get_length(gather_offsets
->type
) == 4);
2088 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
2090 /* Copy the current instruction 4x */
2091 for (uint32_t i
= 1; i
< 4; i
++) {
2092 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
2093 instrs
[i
]->op
= instr
->op
;
2094 instrs
[i
]->coord_components
= instr
->coord_components
;
2095 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
2096 instrs
[i
]->is_array
= instr
->is_array
;
2097 instrs
[i
]->is_shadow
= instr
->is_shadow
;
2098 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
2099 instrs
[i
]->component
= instr
->component
;
2100 instrs
[i
]->dest_type
= instr
->dest_type
;
2101 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
2102 instrs
[i
]->sampler
= NULL
;
2104 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2106 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
2107 nir_tex_instr_dest_size(instr
), 32, NULL
);
2110 /* Fill in the last argument with the offset from the passed in offsets
2111 * and insert the instruction into the stream.
2113 for (uint32_t i
= 0; i
< 4; i
++) {
2115 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
2116 src
.src_type
= nir_tex_src_offset
;
2117 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
2118 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
2121 /* Combine the results of the 4 instructions by taking their .w
2124 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
2125 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
2126 vec4
->dest
.write_mask
= 0xf;
2127 for (uint32_t i
= 0; i
< 4; i
++) {
2128 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
2129 vec4
->src
[i
].swizzle
[0] = 3;
2131 def
= &vec4
->dest
.dest
.ssa
;
2132 instruction
= &vec4
->instr
;
2134 def
= &instr
->dest
.ssa
;
2135 instruction
= &instr
->instr
;
2138 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
2139 val
->ssa
->def
= def
;
2141 nir_builder_instr_insert(&b
->nb
, instruction
);
2145 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2146 const uint32_t *w
, nir_src
*src
)
2149 case SpvOpAtomicIIncrement
:
2150 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2153 case SpvOpAtomicIDecrement
:
2154 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2157 case SpvOpAtomicISub
:
2159 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
2162 case SpvOpAtomicCompareExchange
:
2163 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
2164 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
2167 case SpvOpAtomicExchange
:
2168 case SpvOpAtomicIAdd
:
2169 case SpvOpAtomicSMin
:
2170 case SpvOpAtomicUMin
:
2171 case SpvOpAtomicSMax
:
2172 case SpvOpAtomicUMax
:
2173 case SpvOpAtomicAnd
:
2175 case SpvOpAtomicXor
:
2176 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
2180 vtn_fail("Invalid SPIR-V atomic");
2184 static nir_ssa_def
*
2185 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2187 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
2189 /* The image_load_store intrinsics assume a 4-dim coordinate */
2190 unsigned dim
= glsl_get_vector_elements(coord
->type
);
2191 unsigned swizzle
[4];
2192 for (unsigned i
= 0; i
< 4; i
++)
2193 swizzle
[i
] = MIN2(i
, dim
- 1);
2195 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
2199 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2200 const uint32_t *w
, unsigned count
)
2202 /* Just get this one out of the way */
2203 if (opcode
== SpvOpImageTexelPointer
) {
2204 struct vtn_value
*val
=
2205 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2206 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2208 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2209 val
->image
->coord
= get_image_coord(b
, w
[4]);
2210 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
2214 struct vtn_image_pointer image
;
2217 case SpvOpAtomicExchange
:
2218 case SpvOpAtomicCompareExchange
:
2219 case SpvOpAtomicCompareExchangeWeak
:
2220 case SpvOpAtomicIIncrement
:
2221 case SpvOpAtomicIDecrement
:
2222 case SpvOpAtomicIAdd
:
2223 case SpvOpAtomicISub
:
2224 case SpvOpAtomicLoad
:
2225 case SpvOpAtomicSMin
:
2226 case SpvOpAtomicUMin
:
2227 case SpvOpAtomicSMax
:
2228 case SpvOpAtomicUMax
:
2229 case SpvOpAtomicAnd
:
2231 case SpvOpAtomicXor
:
2232 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
2235 case SpvOpAtomicStore
:
2236 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
2239 case SpvOpImageQuerySize
:
2240 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2242 image
.sample
= NULL
;
2245 case SpvOpImageRead
:
2246 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2247 image
.coord
= get_image_coord(b
, w
[4]);
2249 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
2250 vtn_assert(w
[5] == SpvImageOperandsSampleMask
);
2251 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
2253 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2257 case SpvOpImageWrite
:
2258 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2259 image
.coord
= get_image_coord(b
, w
[2]);
2263 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
2264 vtn_assert(w
[4] == SpvImageOperandsSampleMask
);
2265 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
2267 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2272 vtn_fail("Invalid image opcode");
2275 nir_intrinsic_op op
;
2277 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
2278 OP(ImageQuerySize
, size
)
2280 OP(ImageWrite
, store
)
2281 OP(AtomicLoad
, load
)
2282 OP(AtomicStore
, store
)
2283 OP(AtomicExchange
, atomic_exchange
)
2284 OP(AtomicCompareExchange
, atomic_comp_swap
)
2285 OP(AtomicIIncrement
, atomic_add
)
2286 OP(AtomicIDecrement
, atomic_add
)
2287 OP(AtomicIAdd
, atomic_add
)
2288 OP(AtomicISub
, atomic_add
)
2289 OP(AtomicSMin
, atomic_min
)
2290 OP(AtomicUMin
, atomic_min
)
2291 OP(AtomicSMax
, atomic_max
)
2292 OP(AtomicUMax
, atomic_max
)
2293 OP(AtomicAnd
, atomic_and
)
2294 OP(AtomicOr
, atomic_or
)
2295 OP(AtomicXor
, atomic_xor
)
2298 vtn_fail("Invalid image opcode");
2301 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2303 nir_deref_var
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2304 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
2306 /* ImageQuerySize doesn't take any extra parameters */
2307 if (opcode
!= SpvOpImageQuerySize
) {
2308 /* The image coordinate is always 4 components but we may not have that
2309 * many. Swizzle to compensate.
2312 for (unsigned i
= 0; i
< 4; i
++)
2313 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
2314 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
2316 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
2320 case SpvOpAtomicLoad
:
2321 case SpvOpImageQuerySize
:
2322 case SpvOpImageRead
:
2324 case SpvOpAtomicStore
:
2325 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2327 case SpvOpImageWrite
:
2328 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
2331 case SpvOpAtomicCompareExchange
:
2332 case SpvOpAtomicIIncrement
:
2333 case SpvOpAtomicIDecrement
:
2334 case SpvOpAtomicExchange
:
2335 case SpvOpAtomicIAdd
:
2336 case SpvOpAtomicISub
:
2337 case SpvOpAtomicSMin
:
2338 case SpvOpAtomicUMin
:
2339 case SpvOpAtomicSMax
:
2340 case SpvOpAtomicUMax
:
2341 case SpvOpAtomicAnd
:
2343 case SpvOpAtomicXor
:
2344 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
2348 vtn_fail("Invalid image opcode");
2351 if (opcode
!= SpvOpImageWrite
) {
2352 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2353 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2355 unsigned dest_components
=
2356 nir_intrinsic_infos
[intrin
->intrinsic
].dest_components
;
2357 if (intrin
->intrinsic
== nir_intrinsic_image_size
) {
2358 dest_components
= intrin
->num_components
=
2359 glsl_get_vector_elements(type
->type
);
2362 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2363 dest_components
, 32, NULL
);
2365 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2367 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
2368 val
->ssa
->def
= &intrin
->dest
.ssa
;
2370 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2374 static nir_intrinsic_op
2375 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2378 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2379 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2380 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2381 OP(AtomicExchange
, atomic_exchange
)
2382 OP(AtomicCompareExchange
, atomic_comp_swap
)
2383 OP(AtomicIIncrement
, atomic_add
)
2384 OP(AtomicIDecrement
, atomic_add
)
2385 OP(AtomicIAdd
, atomic_add
)
2386 OP(AtomicISub
, atomic_add
)
2387 OP(AtomicSMin
, atomic_imin
)
2388 OP(AtomicUMin
, atomic_umin
)
2389 OP(AtomicSMax
, atomic_imax
)
2390 OP(AtomicUMax
, atomic_umax
)
2391 OP(AtomicAnd
, atomic_and
)
2392 OP(AtomicOr
, atomic_or
)
2393 OP(AtomicXor
, atomic_xor
)
2396 vtn_fail("Invalid SSBO atomic");
2400 static nir_intrinsic_op
2401 get_shared_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2404 case SpvOpAtomicLoad
: return nir_intrinsic_load_shared
;
2405 case SpvOpAtomicStore
: return nir_intrinsic_store_shared
;
2406 #define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
2407 OP(AtomicExchange
, atomic_exchange
)
2408 OP(AtomicCompareExchange
, atomic_comp_swap
)
2409 OP(AtomicIIncrement
, atomic_add
)
2410 OP(AtomicIDecrement
, atomic_add
)
2411 OP(AtomicIAdd
, atomic_add
)
2412 OP(AtomicISub
, atomic_add
)
2413 OP(AtomicSMin
, atomic_imin
)
2414 OP(AtomicUMin
, atomic_umin
)
2415 OP(AtomicSMax
, atomic_imax
)
2416 OP(AtomicUMax
, atomic_umax
)
2417 OP(AtomicAnd
, atomic_and
)
2418 OP(AtomicOr
, atomic_or
)
2419 OP(AtomicXor
, atomic_xor
)
2422 vtn_fail("Invalid shared atomic");
2426 static nir_intrinsic_op
2427 get_var_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2430 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
2431 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
2432 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2433 OP(AtomicExchange
, atomic_exchange
)
2434 OP(AtomicCompareExchange
, atomic_comp_swap
)
2435 OP(AtomicIIncrement
, atomic_add
)
2436 OP(AtomicIDecrement
, atomic_add
)
2437 OP(AtomicIAdd
, atomic_add
)
2438 OP(AtomicISub
, atomic_add
)
2439 OP(AtomicSMin
, atomic_imin
)
2440 OP(AtomicUMin
, atomic_umin
)
2441 OP(AtomicSMax
, atomic_imax
)
2442 OP(AtomicUMax
, atomic_umax
)
2443 OP(AtomicAnd
, atomic_and
)
2444 OP(AtomicOr
, atomic_or
)
2445 OP(AtomicXor
, atomic_xor
)
2448 vtn_fail("Invalid shared atomic");
2453 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
2454 const uint32_t *w
, unsigned count
)
2456 struct vtn_pointer
*ptr
;
2457 nir_intrinsic_instr
*atomic
;
2460 case SpvOpAtomicLoad
:
2461 case SpvOpAtomicExchange
:
2462 case SpvOpAtomicCompareExchange
:
2463 case SpvOpAtomicCompareExchangeWeak
:
2464 case SpvOpAtomicIIncrement
:
2465 case SpvOpAtomicIDecrement
:
2466 case SpvOpAtomicIAdd
:
2467 case SpvOpAtomicISub
:
2468 case SpvOpAtomicSMin
:
2469 case SpvOpAtomicUMin
:
2470 case SpvOpAtomicSMax
:
2471 case SpvOpAtomicUMax
:
2472 case SpvOpAtomicAnd
:
2474 case SpvOpAtomicXor
:
2475 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2478 case SpvOpAtomicStore
:
2479 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2483 vtn_fail("Invalid SPIR-V atomic");
2487 SpvScope scope = w[4];
2488 SpvMemorySemanticsMask semantics = w[5];
2491 if (ptr
->mode
== vtn_variable_mode_workgroup
&&
2492 !b
->options
->lower_workgroup_access_to_offsets
) {
2493 nir_deref_var
*deref
= vtn_pointer_to_deref(b
, ptr
);
2494 const struct glsl_type
*deref_type
= nir_deref_tail(&deref
->deref
)->type
;
2495 nir_intrinsic_op op
= get_var_nir_atomic_op(b
, opcode
);
2496 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2497 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2500 case SpvOpAtomicLoad
:
2501 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2504 case SpvOpAtomicStore
:
2505 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2506 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2507 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2510 case SpvOpAtomicExchange
:
2511 case SpvOpAtomicCompareExchange
:
2512 case SpvOpAtomicCompareExchangeWeak
:
2513 case SpvOpAtomicIIncrement
:
2514 case SpvOpAtomicIDecrement
:
2515 case SpvOpAtomicIAdd
:
2516 case SpvOpAtomicISub
:
2517 case SpvOpAtomicSMin
:
2518 case SpvOpAtomicUMin
:
2519 case SpvOpAtomicSMax
:
2520 case SpvOpAtomicUMax
:
2521 case SpvOpAtomicAnd
:
2523 case SpvOpAtomicXor
:
2524 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2528 vtn_fail("Invalid SPIR-V atomic");
2532 nir_ssa_def
*offset
, *index
;
2533 offset
= vtn_pointer_to_offset(b
, ptr
, &index
, NULL
);
2535 nir_intrinsic_op op
;
2536 if (ptr
->mode
== vtn_variable_mode_ssbo
) {
2537 op
= get_ssbo_nir_atomic_op(b
, opcode
);
2539 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
&&
2540 b
->options
->lower_workgroup_access_to_offsets
);
2541 op
= get_shared_nir_atomic_op(b
, opcode
);
2544 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2548 case SpvOpAtomicLoad
:
2549 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2550 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2551 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2552 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2555 case SpvOpAtomicStore
:
2556 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2557 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2558 atomic
->src
[src
++] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2559 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2560 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2561 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2564 case SpvOpAtomicExchange
:
2565 case SpvOpAtomicCompareExchange
:
2566 case SpvOpAtomicCompareExchangeWeak
:
2567 case SpvOpAtomicIIncrement
:
2568 case SpvOpAtomicIDecrement
:
2569 case SpvOpAtomicIAdd
:
2570 case SpvOpAtomicISub
:
2571 case SpvOpAtomicSMin
:
2572 case SpvOpAtomicUMin
:
2573 case SpvOpAtomicSMax
:
2574 case SpvOpAtomicUMax
:
2575 case SpvOpAtomicAnd
:
2577 case SpvOpAtomicXor
:
2578 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2579 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2580 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2581 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
2585 vtn_fail("Invalid SPIR-V atomic");
2589 if (opcode
!= SpvOpAtomicStore
) {
2590 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2592 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2593 glsl_get_vector_elements(type
->type
),
2594 glsl_get_bit_size(type
->type
), NULL
);
2596 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2597 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2598 val
->ssa
->def
= &atomic
->dest
.ssa
;
2599 val
->ssa
->type
= type
->type
;
2602 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2605 static nir_alu_instr
*
2606 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
2609 switch (num_components
) {
2610 case 1: op
= nir_op_fmov
; break;
2611 case 2: op
= nir_op_vec2
; break;
2612 case 3: op
= nir_op_vec3
; break;
2613 case 4: op
= nir_op_vec4
; break;
2614 default: vtn_fail("bad vector size");
2617 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
2618 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2620 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2625 struct vtn_ssa_value
*
2626 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2628 if (src
->transposed
)
2629 return src
->transposed
;
2631 struct vtn_ssa_value
*dest
=
2632 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2634 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2635 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
2636 glsl_get_bit_size(src
->type
));
2637 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2638 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2639 vec
->src
[0].swizzle
[0] = i
;
2641 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2642 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2643 vec
->src
[j
].swizzle
[0] = i
;
2646 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2647 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2650 dest
->transposed
= src
;
2656 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2658 unsigned swiz
[4] = { index
};
2659 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2663 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2666 nir_alu_instr
*vec
= create_vec(b
, src
->num_components
,
2669 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2671 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2673 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2674 vec
->src
[i
].swizzle
[0] = i
;
2678 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2680 return &vec
->dest
.dest
.ssa
;
2684 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2687 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2688 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2689 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2690 vtn_vector_extract(b
, src
, i
), dest
);
2696 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2697 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2699 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2700 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2701 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2702 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2707 static nir_ssa_def
*
2708 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2709 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2710 const uint32_t *indices
)
2712 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
2714 for (unsigned i
= 0; i
< num_components
; i
++) {
2715 uint32_t index
= indices
[i
];
2716 if (index
== 0xffffffff) {
2718 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2719 } else if (index
< src0
->num_components
) {
2720 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2721 vec
->src
[i
].swizzle
[0] = index
;
2723 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2724 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2728 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2730 return &vec
->dest
.dest
.ssa
;
2734 * Concatentates a number of vectors/scalars together to produce a vector
2736 static nir_ssa_def
*
2737 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2738 unsigned num_srcs
, nir_ssa_def
**srcs
)
2740 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
2742 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2744 * "When constructing a vector, there must be at least two Constituent
2747 vtn_assert(num_srcs
>= 2);
2749 unsigned dest_idx
= 0;
2750 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2751 nir_ssa_def
*src
= srcs
[i
];
2752 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
2753 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2754 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2755 vec
->src
[dest_idx
].swizzle
[0] = j
;
2760 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2762 * "When constructing a vector, the total number of components in all
2763 * the operands must equal the number of components in Result Type."
2765 vtn_assert(dest_idx
== num_components
);
2767 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2769 return &vec
->dest
.dest
.ssa
;
2772 static struct vtn_ssa_value
*
2773 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2775 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2776 dest
->type
= src
->type
;
2778 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2779 dest
->def
= src
->def
;
2781 unsigned elems
= glsl_get_length(src
->type
);
2783 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2784 for (unsigned i
= 0; i
< elems
; i
++)
2785 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2791 static struct vtn_ssa_value
*
2792 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2793 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2794 unsigned num_indices
)
2796 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2798 struct vtn_ssa_value
*cur
= dest
;
2800 for (i
= 0; i
< num_indices
- 1; i
++) {
2801 cur
= cur
->elems
[indices
[i
]];
2804 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2805 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2806 * the component granularity. In that case, the last index will be
2807 * the index to insert the scalar into the vector.
2810 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2812 cur
->elems
[indices
[i
]] = insert
;
2818 static struct vtn_ssa_value
*
2819 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2820 const uint32_t *indices
, unsigned num_indices
)
2822 struct vtn_ssa_value
*cur
= src
;
2823 for (unsigned i
= 0; i
< num_indices
; i
++) {
2824 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2825 vtn_assert(i
== num_indices
- 1);
2826 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2827 * the component granularity. The last index will be the index of the
2828 * vector to extract.
2831 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2832 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2833 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2836 cur
= cur
->elems
[indices
[i
]];
2844 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2845 const uint32_t *w
, unsigned count
)
2847 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2848 const struct glsl_type
*type
=
2849 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2850 val
->ssa
= vtn_create_ssa_value(b
, type
);
2853 case SpvOpVectorExtractDynamic
:
2854 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2855 vtn_ssa_value(b
, w
[4])->def
);
2858 case SpvOpVectorInsertDynamic
:
2859 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2860 vtn_ssa_value(b
, w
[4])->def
,
2861 vtn_ssa_value(b
, w
[5])->def
);
2864 case SpvOpVectorShuffle
:
2865 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2866 vtn_ssa_value(b
, w
[3])->def
,
2867 vtn_ssa_value(b
, w
[4])->def
,
2871 case SpvOpCompositeConstruct
: {
2872 unsigned elems
= count
- 3;
2873 if (glsl_type_is_vector_or_scalar(type
)) {
2874 nir_ssa_def
*srcs
[4];
2875 for (unsigned i
= 0; i
< elems
; i
++)
2876 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2878 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2881 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2882 for (unsigned i
= 0; i
< elems
; i
++)
2883 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2887 case SpvOpCompositeExtract
:
2888 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2892 case SpvOpCompositeInsert
:
2893 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2894 vtn_ssa_value(b
, w
[3]),
2898 case SpvOpCopyObject
:
2899 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2903 vtn_fail("unknown composite operation");
2908 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2909 const uint32_t *w
, unsigned count
)
2911 nir_intrinsic_op intrinsic_op
;
2913 case SpvOpEmitVertex
:
2914 case SpvOpEmitStreamVertex
:
2915 intrinsic_op
= nir_intrinsic_emit_vertex
;
2917 case SpvOpEndPrimitive
:
2918 case SpvOpEndStreamPrimitive
:
2919 intrinsic_op
= nir_intrinsic_end_primitive
;
2921 case SpvOpMemoryBarrier
:
2922 intrinsic_op
= nir_intrinsic_memory_barrier
;
2924 case SpvOpControlBarrier
:
2925 intrinsic_op
= nir_intrinsic_barrier
;
2928 vtn_fail("unknown barrier instruction");
2931 nir_intrinsic_instr
*intrin
=
2932 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2934 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2935 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2937 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2941 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
2942 SpvExecutionMode mode
)
2945 case SpvExecutionModeInputPoints
:
2946 case SpvExecutionModeOutputPoints
:
2947 return 0; /* GL_POINTS */
2948 case SpvExecutionModeInputLines
:
2949 return 1; /* GL_LINES */
2950 case SpvExecutionModeInputLinesAdjacency
:
2951 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2952 case SpvExecutionModeTriangles
:
2953 return 4; /* GL_TRIANGLES */
2954 case SpvExecutionModeInputTrianglesAdjacency
:
2955 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2956 case SpvExecutionModeQuads
:
2957 return 7; /* GL_QUADS */
2958 case SpvExecutionModeIsolines
:
2959 return 0x8E7A; /* GL_ISOLINES */
2960 case SpvExecutionModeOutputLineStrip
:
2961 return 3; /* GL_LINE_STRIP */
2962 case SpvExecutionModeOutputTriangleStrip
:
2963 return 5; /* GL_TRIANGLE_STRIP */
2965 vtn_fail("Invalid primitive type");
2970 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
2971 SpvExecutionMode mode
)
2974 case SpvExecutionModeInputPoints
:
2976 case SpvExecutionModeInputLines
:
2978 case SpvExecutionModeInputLinesAdjacency
:
2980 case SpvExecutionModeTriangles
:
2982 case SpvExecutionModeInputTrianglesAdjacency
:
2985 vtn_fail("Invalid GS input mode");
2989 static gl_shader_stage
2990 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
2993 case SpvExecutionModelVertex
:
2994 return MESA_SHADER_VERTEX
;
2995 case SpvExecutionModelTessellationControl
:
2996 return MESA_SHADER_TESS_CTRL
;
2997 case SpvExecutionModelTessellationEvaluation
:
2998 return MESA_SHADER_TESS_EVAL
;
2999 case SpvExecutionModelGeometry
:
3000 return MESA_SHADER_GEOMETRY
;
3001 case SpvExecutionModelFragment
:
3002 return MESA_SHADER_FRAGMENT
;
3003 case SpvExecutionModelGLCompute
:
3004 return MESA_SHADER_COMPUTE
;
3006 vtn_fail("Unsupported execution model");
3010 #define spv_check_supported(name, cap) do { \
3011 if (!(b->options && b->options->caps.name)) \
3012 vtn_warn("Unsupported SPIR-V capability: %s", \
3013 spirv_capability_to_string(cap)); \
3017 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3018 const uint32_t *w
, unsigned count
)
3025 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
3026 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
3027 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
3028 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
3029 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
3030 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
3033 uint32_t version
= w
[2];
3036 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
3038 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
3042 case SpvOpSourceExtension
:
3043 case SpvOpSourceContinued
:
3044 case SpvOpExtension
:
3045 /* Unhandled, but these are for debug so that's ok. */
3048 case SpvOpCapability
: {
3049 SpvCapability cap
= w
[1];
3051 case SpvCapabilityMatrix
:
3052 case SpvCapabilityShader
:
3053 case SpvCapabilityGeometry
:
3054 case SpvCapabilityGeometryPointSize
:
3055 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
3056 case SpvCapabilitySampledImageArrayDynamicIndexing
:
3057 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
3058 case SpvCapabilityStorageImageArrayDynamicIndexing
:
3059 case SpvCapabilityImageRect
:
3060 case SpvCapabilitySampledRect
:
3061 case SpvCapabilitySampled1D
:
3062 case SpvCapabilityImage1D
:
3063 case SpvCapabilitySampledCubeArray
:
3064 case SpvCapabilityImageCubeArray
:
3065 case SpvCapabilitySampledBuffer
:
3066 case SpvCapabilityImageBuffer
:
3067 case SpvCapabilityImageQuery
:
3068 case SpvCapabilityDerivativeControl
:
3069 case SpvCapabilityInterpolationFunction
:
3070 case SpvCapabilityMultiViewport
:
3071 case SpvCapabilitySampleRateShading
:
3072 case SpvCapabilityClipDistance
:
3073 case SpvCapabilityCullDistance
:
3074 case SpvCapabilityInputAttachment
:
3075 case SpvCapabilityImageGatherExtended
:
3076 case SpvCapabilityStorageImageExtendedFormats
:
3079 case SpvCapabilityGeometryStreams
:
3080 case SpvCapabilityLinkage
:
3081 case SpvCapabilityVector16
:
3082 case SpvCapabilityFloat16Buffer
:
3083 case SpvCapabilityFloat16
:
3084 case SpvCapabilityInt64Atomics
:
3085 case SpvCapabilityAtomicStorage
:
3086 case SpvCapabilityInt16
:
3087 case SpvCapabilityStorageImageMultisample
:
3088 case SpvCapabilityInt8
:
3089 case SpvCapabilitySparseResidency
:
3090 case SpvCapabilityMinLod
:
3091 case SpvCapabilityTransformFeedback
:
3092 vtn_warn("Unsupported SPIR-V capability: %s",
3093 spirv_capability_to_string(cap
));
3096 case SpvCapabilityFloat64
:
3097 spv_check_supported(float64
, cap
);
3099 case SpvCapabilityInt64
:
3100 spv_check_supported(int64
, cap
);
3103 case SpvCapabilityAddresses
:
3104 case SpvCapabilityKernel
:
3105 case SpvCapabilityImageBasic
:
3106 case SpvCapabilityImageReadWrite
:
3107 case SpvCapabilityImageMipmap
:
3108 case SpvCapabilityPipes
:
3109 case SpvCapabilityGroups
:
3110 case SpvCapabilityDeviceEnqueue
:
3111 case SpvCapabilityLiteralSampler
:
3112 case SpvCapabilityGenericPointer
:
3113 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3114 spirv_capability_to_string(cap
));
3117 case SpvCapabilityImageMSArray
:
3118 spv_check_supported(image_ms_array
, cap
);
3121 case SpvCapabilityTessellation
:
3122 case SpvCapabilityTessellationPointSize
:
3123 spv_check_supported(tessellation
, cap
);
3126 case SpvCapabilityDrawParameters
:
3127 spv_check_supported(draw_parameters
, cap
);
3130 case SpvCapabilityStorageImageReadWithoutFormat
:
3131 spv_check_supported(image_read_without_format
, cap
);
3134 case SpvCapabilityStorageImageWriteWithoutFormat
:
3135 spv_check_supported(image_write_without_format
, cap
);
3138 case SpvCapabilityMultiView
:
3139 spv_check_supported(multiview
, cap
);
3142 case SpvCapabilityVariablePointersStorageBuffer
:
3143 case SpvCapabilityVariablePointers
:
3144 spv_check_supported(variable_pointers
, cap
);
3147 case SpvCapabilityStorageUniformBufferBlock16
:
3148 case SpvCapabilityStorageUniform16
:
3149 case SpvCapabilityStoragePushConstant16
:
3150 case SpvCapabilityStorageInputOutput16
:
3151 spv_check_supported(storage_16bit
, cap
);
3155 vtn_fail("Unhandled capability");
3160 case SpvOpExtInstImport
:
3161 vtn_handle_extension(b
, opcode
, w
, count
);
3164 case SpvOpMemoryModel
:
3165 vtn_assert(w
[1] == SpvAddressingModelLogical
);
3166 vtn_assert(w
[2] == SpvMemoryModelSimple
||
3167 w
[2] == SpvMemoryModelGLSL450
);
3170 case SpvOpEntryPoint
: {
3171 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
3172 /* Let this be a name label regardless */
3173 unsigned name_words
;
3174 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
3176 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
3177 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
3180 vtn_assert(b
->entry_point
== NULL
);
3181 b
->entry_point
= entry_point
;
3186 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
3187 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
3191 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
3194 case SpvOpMemberName
:
3198 case SpvOpExecutionMode
:
3199 case SpvOpDecorationGroup
:
3201 case SpvOpMemberDecorate
:
3202 case SpvOpGroupDecorate
:
3203 case SpvOpGroupMemberDecorate
:
3204 vtn_handle_decoration(b
, opcode
, w
, count
);
3208 return false; /* End of preamble */
3215 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
3216 const struct vtn_decoration
*mode
, void *data
)
3218 vtn_assert(b
->entry_point
== entry_point
);
3220 switch(mode
->exec_mode
) {
3221 case SpvExecutionModeOriginUpperLeft
:
3222 case SpvExecutionModeOriginLowerLeft
:
3223 b
->origin_upper_left
=
3224 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
3227 case SpvExecutionModeEarlyFragmentTests
:
3228 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3229 b
->shader
->info
.fs
.early_fragment_tests
= true;
3232 case SpvExecutionModeInvocations
:
3233 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3234 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->literals
[0]);
3237 case SpvExecutionModeDepthReplacing
:
3238 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3239 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
3241 case SpvExecutionModeDepthGreater
:
3242 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3243 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
3245 case SpvExecutionModeDepthLess
:
3246 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3247 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
3249 case SpvExecutionModeDepthUnchanged
:
3250 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3251 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
3254 case SpvExecutionModeLocalSize
:
3255 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
3256 b
->shader
->info
.cs
.local_size
[0] = mode
->literals
[0];
3257 b
->shader
->info
.cs
.local_size
[1] = mode
->literals
[1];
3258 b
->shader
->info
.cs
.local_size
[2] = mode
->literals
[2];
3260 case SpvExecutionModeLocalSizeHint
:
3261 break; /* Nothing to do with this */
3263 case SpvExecutionModeOutputVertices
:
3264 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3265 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3266 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->literals
[0];
3268 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3269 b
->shader
->info
.gs
.vertices_out
= mode
->literals
[0];
3273 case SpvExecutionModeInputPoints
:
3274 case SpvExecutionModeInputLines
:
3275 case SpvExecutionModeInputLinesAdjacency
:
3276 case SpvExecutionModeTriangles
:
3277 case SpvExecutionModeInputTrianglesAdjacency
:
3278 case SpvExecutionModeQuads
:
3279 case SpvExecutionModeIsolines
:
3280 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3281 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3282 b
->shader
->info
.tess
.primitive_mode
=
3283 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
3285 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3286 b
->shader
->info
.gs
.vertices_in
=
3287 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
3291 case SpvExecutionModeOutputPoints
:
3292 case SpvExecutionModeOutputLineStrip
:
3293 case SpvExecutionModeOutputTriangleStrip
:
3294 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3295 b
->shader
->info
.gs
.output_primitive
=
3296 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
3299 case SpvExecutionModeSpacingEqual
:
3300 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3301 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3302 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
3304 case SpvExecutionModeSpacingFractionalEven
:
3305 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3306 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3307 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
3309 case SpvExecutionModeSpacingFractionalOdd
:
3310 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3311 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3312 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
3314 case SpvExecutionModeVertexOrderCw
:
3315 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3316 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3317 b
->shader
->info
.tess
.ccw
= false;
3319 case SpvExecutionModeVertexOrderCcw
:
3320 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3321 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3322 b
->shader
->info
.tess
.ccw
= true;
3324 case SpvExecutionModePointMode
:
3325 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3326 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3327 b
->shader
->info
.tess
.point_mode
= true;
3330 case SpvExecutionModePixelCenterInteger
:
3331 b
->pixel_center_integer
= true;
3334 case SpvExecutionModeXfb
:
3335 vtn_fail("Unhandled execution mode");
3338 case SpvExecutionModeVecTypeHint
:
3339 case SpvExecutionModeContractionOff
:
3343 vtn_fail("Unhandled execution mode");
3348 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3349 const uint32_t *w
, unsigned count
)
3351 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
3355 case SpvOpSourceContinued
:
3356 case SpvOpSourceExtension
:
3357 case SpvOpExtension
:
3358 case SpvOpCapability
:
3359 case SpvOpExtInstImport
:
3360 case SpvOpMemoryModel
:
3361 case SpvOpEntryPoint
:
3362 case SpvOpExecutionMode
:
3365 case SpvOpMemberName
:
3366 case SpvOpDecorationGroup
:
3368 case SpvOpMemberDecorate
:
3369 case SpvOpGroupDecorate
:
3370 case SpvOpGroupMemberDecorate
:
3371 vtn_fail("Invalid opcode types and variables section");
3377 case SpvOpTypeFloat
:
3378 case SpvOpTypeVector
:
3379 case SpvOpTypeMatrix
:
3380 case SpvOpTypeImage
:
3381 case SpvOpTypeSampler
:
3382 case SpvOpTypeSampledImage
:
3383 case SpvOpTypeArray
:
3384 case SpvOpTypeRuntimeArray
:
3385 case SpvOpTypeStruct
:
3386 case SpvOpTypeOpaque
:
3387 case SpvOpTypePointer
:
3388 case SpvOpTypeFunction
:
3389 case SpvOpTypeEvent
:
3390 case SpvOpTypeDeviceEvent
:
3391 case SpvOpTypeReserveId
:
3392 case SpvOpTypeQueue
:
3394 vtn_handle_type(b
, opcode
, w
, count
);
3397 case SpvOpConstantTrue
:
3398 case SpvOpConstantFalse
:
3400 case SpvOpConstantComposite
:
3401 case SpvOpConstantSampler
:
3402 case SpvOpConstantNull
:
3403 case SpvOpSpecConstantTrue
:
3404 case SpvOpSpecConstantFalse
:
3405 case SpvOpSpecConstant
:
3406 case SpvOpSpecConstantComposite
:
3407 case SpvOpSpecConstantOp
:
3408 vtn_handle_constant(b
, opcode
, w
, count
);
3413 vtn_handle_variables(b
, opcode
, w
, count
);
3417 return false; /* End of preamble */
3424 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3425 const uint32_t *w
, unsigned count
)
3431 case SpvOpLoopMerge
:
3432 case SpvOpSelectionMerge
:
3433 /* This is handled by cfg pre-pass and walk_blocks */
3437 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
3438 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3443 vtn_handle_extension(b
, opcode
, w
, count
);
3449 case SpvOpCopyMemory
:
3450 case SpvOpCopyMemorySized
:
3451 case SpvOpAccessChain
:
3452 case SpvOpPtrAccessChain
:
3453 case SpvOpInBoundsAccessChain
:
3454 case SpvOpArrayLength
:
3455 vtn_handle_variables(b
, opcode
, w
, count
);
3458 case SpvOpFunctionCall
:
3459 vtn_handle_function_call(b
, opcode
, w
, count
);
3462 case SpvOpSampledImage
:
3464 case SpvOpImageSampleImplicitLod
:
3465 case SpvOpImageSampleExplicitLod
:
3466 case SpvOpImageSampleDrefImplicitLod
:
3467 case SpvOpImageSampleDrefExplicitLod
:
3468 case SpvOpImageSampleProjImplicitLod
:
3469 case SpvOpImageSampleProjExplicitLod
:
3470 case SpvOpImageSampleProjDrefImplicitLod
:
3471 case SpvOpImageSampleProjDrefExplicitLod
:
3472 case SpvOpImageFetch
:
3473 case SpvOpImageGather
:
3474 case SpvOpImageDrefGather
:
3475 case SpvOpImageQuerySizeLod
:
3476 case SpvOpImageQueryLod
:
3477 case SpvOpImageQueryLevels
:
3478 case SpvOpImageQuerySamples
:
3479 vtn_handle_texture(b
, opcode
, w
, count
);
3482 case SpvOpImageRead
:
3483 case SpvOpImageWrite
:
3484 case SpvOpImageTexelPointer
:
3485 vtn_handle_image(b
, opcode
, w
, count
);
3488 case SpvOpImageQuerySize
: {
3489 struct vtn_pointer
*image
=
3490 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3491 if (image
->mode
== vtn_variable_mode_image
) {
3492 vtn_handle_image(b
, opcode
, w
, count
);
3494 vtn_assert(image
->mode
== vtn_variable_mode_sampler
);
3495 vtn_handle_texture(b
, opcode
, w
, count
);
3500 case SpvOpAtomicLoad
:
3501 case SpvOpAtomicExchange
:
3502 case SpvOpAtomicCompareExchange
:
3503 case SpvOpAtomicCompareExchangeWeak
:
3504 case SpvOpAtomicIIncrement
:
3505 case SpvOpAtomicIDecrement
:
3506 case SpvOpAtomicIAdd
:
3507 case SpvOpAtomicISub
:
3508 case SpvOpAtomicSMin
:
3509 case SpvOpAtomicUMin
:
3510 case SpvOpAtomicSMax
:
3511 case SpvOpAtomicUMax
:
3512 case SpvOpAtomicAnd
:
3514 case SpvOpAtomicXor
: {
3515 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
3516 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3517 vtn_handle_image(b
, opcode
, w
, count
);
3519 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
3520 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3525 case SpvOpAtomicStore
: {
3526 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
3527 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3528 vtn_handle_image(b
, opcode
, w
, count
);
3530 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
3531 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3537 /* Handle OpSelect up-front here because it needs to be able to handle
3538 * pointers and not just regular vectors and scalars.
3540 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
3541 struct vtn_value
*sel_val
= vtn_untyped_value(b
, w
[3]);
3542 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
3543 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
3545 const struct glsl_type
*sel_type
;
3546 switch (res_val
->type
->base_type
) {
3547 case vtn_base_type_scalar
:
3548 sel_type
= glsl_bool_type();
3550 case vtn_base_type_vector
:
3551 sel_type
= glsl_vector_type(GLSL_TYPE_BOOL
, res_val
->type
->length
);
3553 case vtn_base_type_pointer
:
3554 /* We need to have actual storage for pointer types */
3555 vtn_fail_if(res_val
->type
->type
== NULL
,
3556 "Invalid pointer result type for OpSelect");
3557 sel_type
= glsl_bool_type();
3560 vtn_fail("Result type of OpSelect must be a scalar, vector, or pointer");
3563 if (unlikely(sel_val
->type
->type
!= sel_type
)) {
3564 if (sel_val
->type
->type
== glsl_bool_type()) {
3565 /* This case is illegal but some older versions of GLSLang produce
3566 * it. The GLSLang issue was fixed on March 30, 2017:
3568 * https://github.com/KhronosGroup/glslang/issues/809
3570 * Unfortunately, there are applications in the wild which are
3571 * shipping with this bug so it isn't nice to fail on them so we
3572 * throw a warning instead. It's not actually a problem for us as
3573 * nir_builder will just splat the condition out which is most
3574 * likely what the client wanted anyway.
3576 vtn_warn("Condition type of OpSelect must have the same number "
3577 "of components as Result Type");
3579 vtn_fail("Condition type of OpSelect must be a scalar or vector "
3580 "of Boolean type. It must have the same number of "
3581 "components as Result Type");
3585 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
3586 obj2_val
->type
!= res_val
->type
,
3587 "Object types must match the result type in OpSelect");
3589 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3590 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, res_type
->type
);
3591 ssa
->def
= nir_bcsel(&b
->nb
, vtn_ssa_value(b
, w
[3])->def
,
3592 vtn_ssa_value(b
, w
[4])->def
,
3593 vtn_ssa_value(b
, w
[5])->def
);
3594 vtn_push_ssa(b
, w
[2], res_type
, ssa
);
3603 case SpvOpConvertFToU
:
3604 case SpvOpConvertFToS
:
3605 case SpvOpConvertSToF
:
3606 case SpvOpConvertUToF
:
3610 case SpvOpQuantizeToF16
:
3611 case SpvOpConvertPtrToU
:
3612 case SpvOpConvertUToPtr
:
3613 case SpvOpPtrCastToGeneric
:
3614 case SpvOpGenericCastToPtr
:
3620 case SpvOpSignBitSet
:
3621 case SpvOpLessOrGreater
:
3623 case SpvOpUnordered
:
3638 case SpvOpVectorTimesScalar
:
3640 case SpvOpIAddCarry
:
3641 case SpvOpISubBorrow
:
3642 case SpvOpUMulExtended
:
3643 case SpvOpSMulExtended
:
3644 case SpvOpShiftRightLogical
:
3645 case SpvOpShiftRightArithmetic
:
3646 case SpvOpShiftLeftLogical
:
3647 case SpvOpLogicalEqual
:
3648 case SpvOpLogicalNotEqual
:
3649 case SpvOpLogicalOr
:
3650 case SpvOpLogicalAnd
:
3651 case SpvOpLogicalNot
:
3652 case SpvOpBitwiseOr
:
3653 case SpvOpBitwiseXor
:
3654 case SpvOpBitwiseAnd
:
3656 case SpvOpFOrdEqual
:
3657 case SpvOpFUnordEqual
:
3658 case SpvOpINotEqual
:
3659 case SpvOpFOrdNotEqual
:
3660 case SpvOpFUnordNotEqual
:
3661 case SpvOpULessThan
:
3662 case SpvOpSLessThan
:
3663 case SpvOpFOrdLessThan
:
3664 case SpvOpFUnordLessThan
:
3665 case SpvOpUGreaterThan
:
3666 case SpvOpSGreaterThan
:
3667 case SpvOpFOrdGreaterThan
:
3668 case SpvOpFUnordGreaterThan
:
3669 case SpvOpULessThanEqual
:
3670 case SpvOpSLessThanEqual
:
3671 case SpvOpFOrdLessThanEqual
:
3672 case SpvOpFUnordLessThanEqual
:
3673 case SpvOpUGreaterThanEqual
:
3674 case SpvOpSGreaterThanEqual
:
3675 case SpvOpFOrdGreaterThanEqual
:
3676 case SpvOpFUnordGreaterThanEqual
:
3682 case SpvOpFwidthFine
:
3683 case SpvOpDPdxCoarse
:
3684 case SpvOpDPdyCoarse
:
3685 case SpvOpFwidthCoarse
:
3686 case SpvOpBitFieldInsert
:
3687 case SpvOpBitFieldSExtract
:
3688 case SpvOpBitFieldUExtract
:
3689 case SpvOpBitReverse
:
3691 case SpvOpTranspose
:
3692 case SpvOpOuterProduct
:
3693 case SpvOpMatrixTimesScalar
:
3694 case SpvOpVectorTimesMatrix
:
3695 case SpvOpMatrixTimesVector
:
3696 case SpvOpMatrixTimesMatrix
:
3697 vtn_handle_alu(b
, opcode
, w
, count
);
3700 case SpvOpVectorExtractDynamic
:
3701 case SpvOpVectorInsertDynamic
:
3702 case SpvOpVectorShuffle
:
3703 case SpvOpCompositeConstruct
:
3704 case SpvOpCompositeExtract
:
3705 case SpvOpCompositeInsert
:
3706 case SpvOpCopyObject
:
3707 vtn_handle_composite(b
, opcode
, w
, count
);
3710 case SpvOpEmitVertex
:
3711 case SpvOpEndPrimitive
:
3712 case SpvOpEmitStreamVertex
:
3713 case SpvOpEndStreamPrimitive
:
3714 case SpvOpControlBarrier
:
3715 case SpvOpMemoryBarrier
:
3716 vtn_handle_barrier(b
, opcode
, w
, count
);
3720 vtn_fail("Unhandled opcode");
3727 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3728 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3729 gl_shader_stage stage
, const char *entry_point_name
,
3730 const struct spirv_to_nir_options
*options
,
3731 const nir_shader_compiler_options
*nir_options
)
3733 /* Initialize the stn_builder object */
3734 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3736 b
->spirv_word_count
= word_count
;
3740 exec_list_make_empty(&b
->functions
);
3741 b
->entry_point_stage
= stage
;
3742 b
->entry_point_name
= entry_point_name
;
3743 b
->options
= options
;
3745 /* See also _vtn_fail() */
3746 if (setjmp(b
->fail_jump
)) {
3751 const uint32_t *word_end
= words
+ word_count
;
3753 /* Handle the SPIR-V header (first 4 dwords) */
3754 vtn_assert(word_count
> 5);
3756 vtn_assert(words
[0] == SpvMagicNumber
);
3757 vtn_assert(words
[1] >= 0x10000);
3758 /* words[2] == generator magic */
3759 unsigned value_id_bound
= words
[3];
3760 vtn_assert(words
[4] == 0);
3764 b
->value_id_bound
= value_id_bound
;
3765 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3767 /* Handle all the preamble instructions */
3768 words
= vtn_foreach_instruction(b
, words
, word_end
,
3769 vtn_handle_preamble_instruction
);
3771 if (b
->entry_point
== NULL
) {
3772 vtn_fail("Entry point not found");
3777 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
3779 /* Set shader info defaults */
3780 b
->shader
->info
.gs
.invocations
= 1;
3782 /* Parse execution modes */
3783 vtn_foreach_execution_mode(b
, b
->entry_point
,
3784 vtn_handle_execution_mode
, NULL
);
3786 b
->specializations
= spec
;
3787 b
->num_specializations
= num_spec
;
3789 /* Handle all variable, type, and constant instructions */
3790 words
= vtn_foreach_instruction(b
, words
, word_end
,
3791 vtn_handle_variable_or_type_instruction
);
3793 /* Set types on all vtn_values */
3794 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
3796 vtn_build_cfg(b
, words
, word_end
);
3798 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3799 b
->entry_point
->func
->referenced
= true;
3804 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3805 if (func
->referenced
&& !func
->emitted
) {
3806 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3807 _mesa_key_pointer_equal
);
3809 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3815 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3816 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3817 vtn_assert(entry_point
);
3819 /* Unparent the shader from the vtn_builder before we delete the builder */
3820 ralloc_steal(NULL
, b
->shader
);