2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
35 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
36 size_t spirv_offset
, const char *message
)
38 if (b
->options
->debug
.func
) {
39 b
->options
->debug
.func(b
->options
->debug
.private_data
,
40 level
, spirv_offset
, message
);
44 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
45 fprintf(stderr
, "%s\n", message
);
50 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
51 size_t spirv_offset
, const char *fmt
, ...)
57 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
60 vtn_log(b
, level
, spirv_offset
, msg
);
66 vtn_log_err(struct vtn_builder
*b
,
67 enum nir_spirv_debug_level level
, const char *prefix
,
68 const char *file
, unsigned line
,
69 const char *fmt
, va_list args
)
73 msg
= ralloc_strdup(NULL
, prefix
);
76 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
79 ralloc_asprintf_append(&msg
, " ");
81 ralloc_vasprintf_append(&msg
, fmt
, args
);
83 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
87 ralloc_asprintf_append(&msg
,
88 "\n in SPIR-V source file %s, line %d, col %d",
89 b
->file
, b
->line
, b
->col
);
92 vtn_log(b
, level
, b
->spirv_offset
, msg
);
98 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
104 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
105 file
, line
, fmt
, args
);
109 struct spec_constant_value
{
117 static struct vtn_ssa_value
*
118 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
120 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
123 if (glsl_type_is_vector_or_scalar(type
)) {
124 unsigned num_components
= glsl_get_vector_elements(val
->type
);
125 unsigned bit_size
= glsl_get_bit_size(val
->type
);
126 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
128 unsigned elems
= glsl_get_length(val
->type
);
129 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
130 if (glsl_type_is_matrix(type
)) {
131 const struct glsl_type
*elem_type
=
132 glsl_vector_type(glsl_get_base_type(type
),
133 glsl_get_vector_elements(type
));
135 for (unsigned i
= 0; i
< elems
; i
++)
136 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
137 } else if (glsl_type_is_array(type
)) {
138 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
139 for (unsigned i
= 0; i
< elems
; i
++)
140 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
142 for (unsigned i
= 0; i
< elems
; i
++) {
143 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
144 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
152 static struct vtn_ssa_value
*
153 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
154 const struct glsl_type
*type
)
156 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
161 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
164 switch (glsl_get_base_type(type
)) {
167 case GLSL_TYPE_INT64
:
168 case GLSL_TYPE_UINT64
:
170 case GLSL_TYPE_FLOAT
:
171 case GLSL_TYPE_DOUBLE
: {
172 int bit_size
= glsl_get_bit_size(type
);
173 if (glsl_type_is_vector_or_scalar(type
)) {
174 unsigned num_components
= glsl_get_vector_elements(val
->type
);
175 nir_load_const_instr
*load
=
176 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
178 load
->value
= constant
->values
[0];
180 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
181 val
->def
= &load
->def
;
183 assert(glsl_type_is_matrix(type
));
184 unsigned rows
= glsl_get_vector_elements(val
->type
);
185 unsigned columns
= glsl_get_matrix_columns(val
->type
);
186 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
188 for (unsigned i
= 0; i
< columns
; i
++) {
189 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
190 col_val
->type
= glsl_get_column_type(val
->type
);
191 nir_load_const_instr
*load
=
192 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
194 load
->value
= constant
->values
[i
];
196 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
197 col_val
->def
= &load
->def
;
199 val
->elems
[i
] = col_val
;
205 case GLSL_TYPE_ARRAY
: {
206 unsigned elems
= glsl_get_length(val
->type
);
207 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
208 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
209 for (unsigned i
= 0; i
< elems
; i
++)
210 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
215 case GLSL_TYPE_STRUCT
: {
216 unsigned elems
= glsl_get_length(val
->type
);
217 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
218 for (unsigned i
= 0; i
< elems
; i
++) {
219 const struct glsl_type
*elem_type
=
220 glsl_get_struct_field(val
->type
, i
);
221 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
228 unreachable("bad constant type");
234 struct vtn_ssa_value
*
235 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
237 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
238 switch (val
->value_type
) {
239 case vtn_value_type_undef
:
240 return vtn_undef_ssa_value(b
, val
->type
->type
);
242 case vtn_value_type_constant
:
243 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
245 case vtn_value_type_ssa
:
248 case vtn_value_type_pointer
:
249 assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
250 struct vtn_ssa_value
*ssa
=
251 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
252 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
256 unreachable("Invalid type for an SSA value");
261 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
262 unsigned word_count
, unsigned *words_used
)
264 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
266 /* Ammount of space taken by the string (including the null) */
267 unsigned len
= strlen(dup
) + 1;
268 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
274 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
275 const uint32_t *end
, vtn_instruction_handler handler
)
281 const uint32_t *w
= start
;
283 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
284 unsigned count
= w
[0] >> SpvWordCountShift
;
285 assert(count
>= 1 && w
+ count
<= end
);
287 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
291 break; /* Do nothing */
294 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
306 if (!handler(b
, opcode
, w
, count
))
324 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
325 const uint32_t *w
, unsigned count
)
328 case SpvOpExtInstImport
: {
329 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
330 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
331 val
->ext_handler
= vtn_handle_glsl450_instruction
;
333 unreachable("Unsupported extension");
339 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
340 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
347 unreachable("Unhandled opcode");
352 _foreach_decoration_helper(struct vtn_builder
*b
,
353 struct vtn_value
*base_value
,
355 struct vtn_value
*value
,
356 vtn_decoration_foreach_cb cb
, void *data
)
358 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
360 if (dec
->scope
== VTN_DEC_DECORATION
) {
361 member
= parent_member
;
362 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
363 assert(parent_member
== -1);
364 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
366 /* Not a decoration */
371 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
372 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
375 cb(b
, base_value
, member
, dec
, data
);
380 /** Iterates (recursively if needed) over all of the decorations on a value
382 * This function iterates over all of the decorations applied to a given
383 * value. If it encounters a decoration group, it recurses into the group
384 * and iterates over all of those decorations as well.
387 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
388 vtn_decoration_foreach_cb cb
, void *data
)
390 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
394 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
395 vtn_execution_mode_foreach_cb cb
, void *data
)
397 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
398 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
401 assert(dec
->group
== NULL
);
402 cb(b
, value
, dec
, data
);
407 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
408 const uint32_t *w
, unsigned count
)
410 const uint32_t *w_end
= w
+ count
;
411 const uint32_t target
= w
[1];
415 case SpvOpDecorationGroup
:
416 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
420 case SpvOpMemberDecorate
:
421 case SpvOpExecutionMode
: {
422 struct vtn_value
*val
= &b
->values
[target
];
424 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
427 dec
->scope
= VTN_DEC_DECORATION
;
429 case SpvOpMemberDecorate
:
430 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
432 case SpvOpExecutionMode
:
433 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
436 unreachable("Invalid decoration opcode");
438 dec
->decoration
= *(w
++);
441 /* Link into the list */
442 dec
->next
= val
->decoration
;
443 val
->decoration
= dec
;
447 case SpvOpGroupMemberDecorate
:
448 case SpvOpGroupDecorate
: {
449 struct vtn_value
*group
=
450 vtn_value(b
, target
, vtn_value_type_decoration_group
);
452 for (; w
< w_end
; w
++) {
453 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
454 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
457 if (opcode
== SpvOpGroupDecorate
) {
458 dec
->scope
= VTN_DEC_DECORATION
;
460 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
463 /* Link into the list */
464 dec
->next
= val
->decoration
;
465 val
->decoration
= dec
;
471 unreachable("Unhandled opcode");
475 struct member_decoration_ctx
{
477 struct glsl_struct_field
*fields
;
478 struct vtn_type
*type
;
481 /* does a shallow copy of a vtn_type */
483 static struct vtn_type
*
484 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
486 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
489 switch (src
->base_type
) {
490 case vtn_base_type_void
:
491 case vtn_base_type_scalar
:
492 case vtn_base_type_vector
:
493 case vtn_base_type_matrix
:
494 case vtn_base_type_array
:
495 case vtn_base_type_pointer
:
496 case vtn_base_type_image
:
497 case vtn_base_type_sampler
:
498 /* Nothing more to do */
501 case vtn_base_type_struct
:
502 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
503 memcpy(dest
->members
, src
->members
,
504 src
->length
* sizeof(src
->members
[0]));
506 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
507 memcpy(dest
->offsets
, src
->offsets
,
508 src
->length
* sizeof(src
->offsets
[0]));
511 case vtn_base_type_function
:
512 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
513 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
520 static struct vtn_type
*
521 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
523 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
524 type
= type
->members
[member
];
526 /* We may have an array of matrices.... Oh, joy! */
527 while (glsl_type_is_array(type
->type
)) {
528 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
529 type
= type
->array_element
;
532 assert(glsl_type_is_matrix(type
->type
));
538 struct_member_decoration_cb(struct vtn_builder
*b
,
539 struct vtn_value
*val
, int member
,
540 const struct vtn_decoration
*dec
, void *void_ctx
)
542 struct member_decoration_ctx
*ctx
= void_ctx
;
547 assert(member
< ctx
->num_fields
);
549 switch (dec
->decoration
) {
550 case SpvDecorationNonWritable
:
551 case SpvDecorationNonReadable
:
552 case SpvDecorationRelaxedPrecision
:
553 case SpvDecorationVolatile
:
554 case SpvDecorationCoherent
:
555 case SpvDecorationUniform
:
556 break; /* FIXME: Do nothing with this for now. */
557 case SpvDecorationNoPerspective
:
558 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
560 case SpvDecorationFlat
:
561 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
563 case SpvDecorationCentroid
:
564 ctx
->fields
[member
].centroid
= true;
566 case SpvDecorationSample
:
567 ctx
->fields
[member
].sample
= true;
569 case SpvDecorationStream
:
570 /* Vulkan only allows one GS stream */
571 assert(dec
->literals
[0] == 0);
573 case SpvDecorationLocation
:
574 ctx
->fields
[member
].location
= dec
->literals
[0];
576 case SpvDecorationComponent
:
577 break; /* FIXME: What should we do with these? */
578 case SpvDecorationBuiltIn
:
579 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
580 ctx
->type
->members
[member
]->is_builtin
= true;
581 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
582 ctx
->type
->builtin_block
= true;
584 case SpvDecorationOffset
:
585 ctx
->type
->offsets
[member
] = dec
->literals
[0];
587 case SpvDecorationMatrixStride
:
588 /* Handled as a second pass */
590 case SpvDecorationColMajor
:
591 break; /* Nothing to do here. Column-major is the default. */
592 case SpvDecorationRowMajor
:
593 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
596 case SpvDecorationPatch
:
599 case SpvDecorationSpecId
:
600 case SpvDecorationBlock
:
601 case SpvDecorationBufferBlock
:
602 case SpvDecorationArrayStride
:
603 case SpvDecorationGLSLShared
:
604 case SpvDecorationGLSLPacked
:
605 case SpvDecorationInvariant
:
606 case SpvDecorationRestrict
:
607 case SpvDecorationAliased
:
608 case SpvDecorationConstant
:
609 case SpvDecorationIndex
:
610 case SpvDecorationBinding
:
611 case SpvDecorationDescriptorSet
:
612 case SpvDecorationLinkageAttributes
:
613 case SpvDecorationNoContraction
:
614 case SpvDecorationInputAttachmentIndex
:
615 vtn_warn("Decoration not allowed on struct members: %s",
616 spirv_decoration_to_string(dec
->decoration
));
619 case SpvDecorationXfbBuffer
:
620 case SpvDecorationXfbStride
:
621 vtn_warn("Vulkan does not have transform feedback");
624 case SpvDecorationCPacked
:
625 case SpvDecorationSaturatedConversion
:
626 case SpvDecorationFuncParamAttr
:
627 case SpvDecorationFPRoundingMode
:
628 case SpvDecorationFPFastMathMode
:
629 case SpvDecorationAlignment
:
630 vtn_warn("Decoration only allowed for CL-style kernels: %s",
631 spirv_decoration_to_string(dec
->decoration
));
635 unreachable("Unhandled decoration");
639 /* Matrix strides are handled as a separate pass because we need to know
640 * whether the matrix is row-major or not first.
643 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
644 struct vtn_value
*val
, int member
,
645 const struct vtn_decoration
*dec
,
648 if (dec
->decoration
!= SpvDecorationMatrixStride
)
652 struct member_decoration_ctx
*ctx
= void_ctx
;
654 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
655 if (mat_type
->row_major
) {
656 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
657 mat_type
->stride
= mat_type
->array_element
->stride
;
658 mat_type
->array_element
->stride
= dec
->literals
[0];
660 assert(mat_type
->array_element
->stride
> 0);
661 mat_type
->stride
= dec
->literals
[0];
666 type_decoration_cb(struct vtn_builder
*b
,
667 struct vtn_value
*val
, int member
,
668 const struct vtn_decoration
*dec
, void *ctx
)
670 struct vtn_type
*type
= val
->type
;
675 switch (dec
->decoration
) {
676 case SpvDecorationArrayStride
:
677 assert(type
->base_type
== vtn_base_type_matrix
||
678 type
->base_type
== vtn_base_type_array
||
679 type
->base_type
== vtn_base_type_pointer
);
680 type
->stride
= dec
->literals
[0];
682 case SpvDecorationBlock
:
683 assert(type
->base_type
== vtn_base_type_struct
);
686 case SpvDecorationBufferBlock
:
687 assert(type
->base_type
== vtn_base_type_struct
);
688 type
->buffer_block
= true;
690 case SpvDecorationGLSLShared
:
691 case SpvDecorationGLSLPacked
:
692 /* Ignore these, since we get explicit offsets anyways */
695 case SpvDecorationRowMajor
:
696 case SpvDecorationColMajor
:
697 case SpvDecorationMatrixStride
:
698 case SpvDecorationBuiltIn
:
699 case SpvDecorationNoPerspective
:
700 case SpvDecorationFlat
:
701 case SpvDecorationPatch
:
702 case SpvDecorationCentroid
:
703 case SpvDecorationSample
:
704 case SpvDecorationVolatile
:
705 case SpvDecorationCoherent
:
706 case SpvDecorationNonWritable
:
707 case SpvDecorationNonReadable
:
708 case SpvDecorationUniform
:
709 case SpvDecorationStream
:
710 case SpvDecorationLocation
:
711 case SpvDecorationComponent
:
712 case SpvDecorationOffset
:
713 case SpvDecorationXfbBuffer
:
714 case SpvDecorationXfbStride
:
715 vtn_warn("Decoration only allowed for struct members: %s",
716 spirv_decoration_to_string(dec
->decoration
));
719 case SpvDecorationRelaxedPrecision
:
720 case SpvDecorationSpecId
:
721 case SpvDecorationInvariant
:
722 case SpvDecorationRestrict
:
723 case SpvDecorationAliased
:
724 case SpvDecorationConstant
:
725 case SpvDecorationIndex
:
726 case SpvDecorationBinding
:
727 case SpvDecorationDescriptorSet
:
728 case SpvDecorationLinkageAttributes
:
729 case SpvDecorationNoContraction
:
730 case SpvDecorationInputAttachmentIndex
:
731 vtn_warn("Decoration not allowed on types: %s",
732 spirv_decoration_to_string(dec
->decoration
));
735 case SpvDecorationCPacked
:
736 case SpvDecorationSaturatedConversion
:
737 case SpvDecorationFuncParamAttr
:
738 case SpvDecorationFPRoundingMode
:
739 case SpvDecorationFPFastMathMode
:
740 case SpvDecorationAlignment
:
741 vtn_warn("Decoration only allowed for CL-style kernels: %s",
742 spirv_decoration_to_string(dec
->decoration
));
746 unreachable("Unhandled decoration");
751 translate_image_format(SpvImageFormat format
)
754 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
755 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
756 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
757 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
758 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
759 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
760 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
761 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
762 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
763 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
764 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
765 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
766 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
767 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
768 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
769 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
770 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
771 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
772 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
773 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
774 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
775 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
776 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
777 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
778 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
779 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
780 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
781 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
782 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
783 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
784 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
785 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
786 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
787 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
788 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
789 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
790 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
791 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
792 case SpvImageFormatR16ui
: return 0x8234; /* GL_R16UI */
793 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
795 unreachable("Invalid image format");
801 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
802 const uint32_t *w
, unsigned count
)
804 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
806 val
->type
= rzalloc(b
, struct vtn_type
);
807 val
->type
->val
= val
;
811 val
->type
->base_type
= vtn_base_type_void
;
812 val
->type
->type
= glsl_void_type();
815 val
->type
->base_type
= vtn_base_type_scalar
;
816 val
->type
->type
= glsl_bool_type();
820 const bool signedness
= w
[3];
821 val
->type
->base_type
= vtn_base_type_scalar
;
823 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
825 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
828 case SpvOpTypeFloat
: {
830 val
->type
->base_type
= vtn_base_type_scalar
;
831 val
->type
->type
= bit_size
== 64 ? glsl_double_type() : glsl_float_type();
835 case SpvOpTypeVector
: {
836 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
837 unsigned elems
= w
[3];
839 assert(glsl_type_is_scalar(base
->type
));
840 val
->type
->base_type
= vtn_base_type_vector
;
841 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
842 val
->type
->stride
= glsl_get_bit_size(base
->type
) / 8;
843 val
->type
->array_element
= base
;
847 case SpvOpTypeMatrix
: {
848 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
849 unsigned columns
= w
[3];
851 assert(glsl_type_is_vector(base
->type
));
852 val
->type
->base_type
= vtn_base_type_matrix
;
853 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
854 glsl_get_vector_elements(base
->type
),
856 assert(!glsl_type_is_error(val
->type
->type
));
857 val
->type
->length
= columns
;
858 val
->type
->array_element
= base
;
859 val
->type
->row_major
= false;
860 val
->type
->stride
= 0;
864 case SpvOpTypeRuntimeArray
:
865 case SpvOpTypeArray
: {
866 struct vtn_type
*array_element
=
867 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
869 if (opcode
== SpvOpTypeRuntimeArray
) {
870 /* A length of 0 is used to denote unsized arrays */
871 val
->type
->length
= 0;
874 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
877 val
->type
->base_type
= vtn_base_type_array
;
878 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
);
879 val
->type
->array_element
= array_element
;
880 val
->type
->stride
= 0;
884 case SpvOpTypeStruct
: {
885 unsigned num_fields
= count
- 2;
886 val
->type
->base_type
= vtn_base_type_struct
;
887 val
->type
->length
= num_fields
;
888 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
889 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
891 NIR_VLA(struct glsl_struct_field
, fields
, count
);
892 for (unsigned i
= 0; i
< num_fields
; i
++) {
893 val
->type
->members
[i
] =
894 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
895 fields
[i
] = (struct glsl_struct_field
) {
896 .type
= val
->type
->members
[i
]->type
,
897 .name
= ralloc_asprintf(b
, "field%d", i
),
902 struct member_decoration_ctx ctx
= {
903 .num_fields
= num_fields
,
908 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
909 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
911 const char *name
= val
->name
? val
->name
: "struct";
913 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
917 case SpvOpTypeFunction
: {
918 val
->type
->base_type
= vtn_base_type_function
;
919 val
->type
->type
= NULL
;
921 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
923 const unsigned num_params
= count
- 3;
924 val
->type
->length
= num_params
;
925 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
926 for (unsigned i
= 0; i
< count
- 3; i
++) {
927 val
->type
->params
[i
] =
928 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
933 case SpvOpTypePointer
: {
934 SpvStorageClass storage_class
= w
[2];
935 struct vtn_type
*deref_type
=
936 vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
938 val
->type
->base_type
= vtn_base_type_pointer
;
939 val
->type
->storage_class
= storage_class
;
940 val
->type
->deref
= deref_type
;
942 if (storage_class
== SpvStorageClassUniform
||
943 storage_class
== SpvStorageClassStorageBuffer
) {
944 /* These can actually be stored to nir_variables and used as SSA
945 * values so they need a real glsl_type.
947 val
->type
->type
= glsl_vector_type(GLSL_TYPE_UINT
, 2);
952 case SpvOpTypeImage
: {
953 val
->type
->base_type
= vtn_base_type_image
;
955 const struct glsl_type
*sampled_type
=
956 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
958 assert(glsl_type_is_vector_or_scalar(sampled_type
));
960 enum glsl_sampler_dim dim
;
961 switch ((SpvDim
)w
[3]) {
962 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
963 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
964 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
965 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
966 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
967 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
968 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
970 unreachable("Invalid SPIR-V Sampler dimension");
973 bool is_shadow
= w
[4];
974 bool is_array
= w
[5];
975 bool multisampled
= w
[6];
976 unsigned sampled
= w
[7];
977 SpvImageFormat format
= w
[8];
980 val
->type
->access_qualifier
= w
[9];
982 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
985 if (dim
== GLSL_SAMPLER_DIM_2D
)
986 dim
= GLSL_SAMPLER_DIM_MS
;
987 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
988 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
990 unreachable("Unsupported multisampled image type");
993 val
->type
->image_format
= translate_image_format(format
);
996 val
->type
->sampled
= true;
997 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
998 glsl_get_base_type(sampled_type
));
999 } else if (sampled
== 2) {
1001 val
->type
->sampled
= false;
1002 val
->type
->type
= glsl_image_type(dim
, is_array
,
1003 glsl_get_base_type(sampled_type
));
1005 unreachable("We need to know if the image will be sampled");
1010 case SpvOpTypeSampledImage
:
1011 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1014 case SpvOpTypeSampler
:
1015 /* The actual sampler type here doesn't really matter. It gets
1016 * thrown away the moment you combine it with an image. What really
1017 * matters is that it's a sampler type as opposed to an integer type
1018 * so the backend knows what to do.
1020 val
->type
->base_type
= vtn_base_type_sampler
;
1021 val
->type
->type
= glsl_bare_sampler_type();
1024 case SpvOpTypeOpaque
:
1025 case SpvOpTypeEvent
:
1026 case SpvOpTypeDeviceEvent
:
1027 case SpvOpTypeReserveId
:
1028 case SpvOpTypeQueue
:
1031 unreachable("Unhandled opcode");
1034 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1037 static nir_constant
*
1038 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
1040 nir_constant
*c
= rzalloc(b
, nir_constant
);
1042 /* For pointers and other typeless things, we have to return something but
1043 * it doesn't matter what.
1048 switch (glsl_get_base_type(type
)) {
1050 case GLSL_TYPE_UINT
:
1051 case GLSL_TYPE_INT64
:
1052 case GLSL_TYPE_UINT64
:
1053 case GLSL_TYPE_BOOL
:
1054 case GLSL_TYPE_FLOAT
:
1055 case GLSL_TYPE_DOUBLE
:
1056 /* Nothing to do here. It's already initialized to zero */
1059 case GLSL_TYPE_ARRAY
:
1060 assert(glsl_get_length(type
) > 0);
1061 c
->num_elements
= glsl_get_length(type
);
1062 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1064 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
1065 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1066 c
->elements
[i
] = c
->elements
[0];
1069 case GLSL_TYPE_STRUCT
:
1070 c
->num_elements
= glsl_get_length(type
);
1071 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1073 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
1074 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
1079 unreachable("Invalid type for null constant");
1086 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
1087 int member
, const struct vtn_decoration
*dec
,
1090 assert(member
== -1);
1091 if (dec
->decoration
!= SpvDecorationSpecId
)
1094 struct spec_constant_value
*const_value
= data
;
1096 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1097 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
1098 if (const_value
->is_double
)
1099 const_value
->data64
= b
->specializations
[i
].data64
;
1101 const_value
->data32
= b
->specializations
[i
].data32
;
1108 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
1109 uint32_t const_value
)
1111 struct spec_constant_value data
;
1112 data
.is_double
= false;
1113 data
.data32
= const_value
;
1114 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1119 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
1120 uint64_t const_value
)
1122 struct spec_constant_value data
;
1123 data
.is_double
= true;
1124 data
.data64
= const_value
;
1125 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1130 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1131 struct vtn_value
*val
,
1133 const struct vtn_decoration
*dec
,
1136 assert(member
== -1);
1137 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1138 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
1141 assert(val
->const_type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1143 b
->shader
->info
.cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
1144 b
->shader
->info
.cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
1145 b
->shader
->info
.cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
1149 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1150 const uint32_t *w
, unsigned count
)
1152 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1153 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
1154 val
->constant
= rzalloc(b
, nir_constant
);
1156 case SpvOpConstantTrue
:
1157 assert(val
->const_type
== glsl_bool_type());
1158 val
->constant
->values
[0].u32
[0] = NIR_TRUE
;
1160 case SpvOpConstantFalse
:
1161 assert(val
->const_type
== glsl_bool_type());
1162 val
->constant
->values
[0].u32
[0] = NIR_FALSE
;
1165 case SpvOpSpecConstantTrue
:
1166 case SpvOpSpecConstantFalse
: {
1167 assert(val
->const_type
== glsl_bool_type());
1169 get_specialization(b
, val
, (opcode
== SpvOpSpecConstantTrue
));
1170 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1174 case SpvOpConstant
: {
1175 assert(glsl_type_is_scalar(val
->const_type
));
1176 int bit_size
= glsl_get_bit_size(val
->const_type
);
1177 if (bit_size
== 64) {
1178 val
->constant
->values
->u32
[0] = w
[3];
1179 val
->constant
->values
->u32
[1] = w
[4];
1181 assert(bit_size
== 32);
1182 val
->constant
->values
->u32
[0] = w
[3];
1186 case SpvOpSpecConstant
: {
1187 assert(glsl_type_is_scalar(val
->const_type
));
1188 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1189 int bit_size
= glsl_get_bit_size(val
->const_type
);
1191 val
->constant
->values
[0].u64
[0] =
1192 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1194 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1197 case SpvOpSpecConstantComposite
:
1198 case SpvOpConstantComposite
: {
1199 unsigned elem_count
= count
- 3;
1200 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1201 for (unsigned i
= 0; i
< elem_count
; i
++)
1202 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1204 switch (glsl_get_base_type(val
->const_type
)) {
1205 case GLSL_TYPE_UINT
:
1207 case GLSL_TYPE_UINT64
:
1208 case GLSL_TYPE_INT64
:
1209 case GLSL_TYPE_FLOAT
:
1210 case GLSL_TYPE_BOOL
:
1211 case GLSL_TYPE_DOUBLE
: {
1212 int bit_size
= glsl_get_bit_size(val
->const_type
);
1213 if (glsl_type_is_matrix(val
->const_type
)) {
1214 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
1215 for (unsigned i
= 0; i
< elem_count
; i
++)
1216 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1218 assert(glsl_type_is_vector(val
->const_type
));
1219 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
1220 for (unsigned i
= 0; i
< elem_count
; i
++) {
1221 if (bit_size
== 64) {
1222 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1224 assert(bit_size
== 32);
1225 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1232 case GLSL_TYPE_STRUCT
:
1233 case GLSL_TYPE_ARRAY
:
1234 ralloc_steal(val
->constant
, elems
);
1235 val
->constant
->num_elements
= elem_count
;
1236 val
->constant
->elements
= elems
;
1240 unreachable("Unsupported type for constants");
1245 case SpvOpSpecConstantOp
: {
1246 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1248 case SpvOpVectorShuffle
: {
1249 struct vtn_value
*v0
= &b
->values
[w
[4]];
1250 struct vtn_value
*v1
= &b
->values
[w
[5]];
1252 assert(v0
->value_type
== vtn_value_type_constant
||
1253 v0
->value_type
== vtn_value_type_undef
);
1254 assert(v1
->value_type
== vtn_value_type_constant
||
1255 v1
->value_type
== vtn_value_type_undef
);
1257 unsigned len0
= v0
->value_type
== vtn_value_type_constant
?
1258 glsl_get_vector_elements(v0
->const_type
) :
1259 glsl_get_vector_elements(v0
->type
->type
);
1260 unsigned len1
= v1
->value_type
== vtn_value_type_constant
?
1261 glsl_get_vector_elements(v1
->const_type
) :
1262 glsl_get_vector_elements(v1
->type
->type
);
1264 assert(len0
+ len1
< 16);
1266 unsigned bit_size
= glsl_get_bit_size(val
->const_type
);
1267 unsigned bit_size0
= v0
->value_type
== vtn_value_type_constant
?
1268 glsl_get_bit_size(v0
->const_type
) :
1269 glsl_get_bit_size(v0
->type
->type
);
1270 unsigned bit_size1
= v1
->value_type
== vtn_value_type_constant
?
1271 glsl_get_bit_size(v1
->const_type
) :
1272 glsl_get_bit_size(v1
->type
->type
);
1274 assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1275 (void)bit_size0
; (void)bit_size1
;
1277 if (bit_size
== 64) {
1279 if (v0
->value_type
== vtn_value_type_constant
) {
1280 for (unsigned i
= 0; i
< len0
; i
++)
1281 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1283 if (v1
->value_type
== vtn_value_type_constant
) {
1284 for (unsigned i
= 0; i
< len1
; i
++)
1285 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1288 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1289 uint32_t comp
= w
[i
+ 6];
1290 /* If component is not used, set the value to a known constant
1291 * to detect if it is wrongly used.
1293 if (comp
== (uint32_t)-1)
1294 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1296 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1300 if (v0
->value_type
== vtn_value_type_constant
) {
1301 for (unsigned i
= 0; i
< len0
; i
++)
1302 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1304 if (v1
->value_type
== vtn_value_type_constant
) {
1305 for (unsigned i
= 0; i
< len1
; i
++)
1306 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1309 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1310 uint32_t comp
= w
[i
+ 6];
1311 /* If component is not used, set the value to a known constant
1312 * to detect if it is wrongly used.
1314 if (comp
== (uint32_t)-1)
1315 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1317 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1323 case SpvOpCompositeExtract
:
1324 case SpvOpCompositeInsert
: {
1325 struct vtn_value
*comp
;
1326 unsigned deref_start
;
1327 struct nir_constant
**c
;
1328 if (opcode
== SpvOpCompositeExtract
) {
1329 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1331 c
= &comp
->constant
;
1333 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1335 val
->constant
= nir_constant_clone(comp
->constant
,
1342 const struct glsl_type
*type
= comp
->const_type
;
1343 for (unsigned i
= deref_start
; i
< count
; i
++) {
1344 switch (glsl_get_base_type(type
)) {
1345 case GLSL_TYPE_UINT
:
1347 case GLSL_TYPE_UINT64
:
1348 case GLSL_TYPE_INT64
:
1349 case GLSL_TYPE_FLOAT
:
1350 case GLSL_TYPE_DOUBLE
:
1351 case GLSL_TYPE_BOOL
:
1352 /* If we hit this granularity, we're picking off an element */
1353 if (glsl_type_is_matrix(type
)) {
1354 assert(col
== 0 && elem
== -1);
1357 type
= glsl_get_column_type(type
);
1359 assert(elem
<= 0 && glsl_type_is_vector(type
));
1361 type
= glsl_scalar_type(glsl_get_base_type(type
));
1365 case GLSL_TYPE_ARRAY
:
1366 c
= &(*c
)->elements
[w
[i
]];
1367 type
= glsl_get_array_element(type
);
1370 case GLSL_TYPE_STRUCT
:
1371 c
= &(*c
)->elements
[w
[i
]];
1372 type
= glsl_get_struct_field(type
, w
[i
]);
1376 unreachable("Invalid constant type");
1380 if (opcode
== SpvOpCompositeExtract
) {
1384 unsigned num_components
= glsl_get_vector_elements(type
);
1385 unsigned bit_size
= glsl_get_bit_size(type
);
1386 for (unsigned i
= 0; i
< num_components
; i
++)
1387 if (bit_size
== 64) {
1388 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1390 assert(bit_size
== 32);
1391 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1395 struct vtn_value
*insert
=
1396 vtn_value(b
, w
[4], vtn_value_type_constant
);
1397 assert(insert
->const_type
== type
);
1399 *c
= insert
->constant
;
1401 unsigned num_components
= glsl_get_vector_elements(type
);
1402 unsigned bit_size
= glsl_get_bit_size(type
);
1403 for (unsigned i
= 0; i
< num_components
; i
++)
1404 if (bit_size
== 64) {
1405 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1407 assert(bit_size
== 32);
1408 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1417 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->const_type
);
1418 nir_alu_type src_alu_type
= dst_alu_type
;
1419 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(opcode
, &swap
, src_alu_type
, dst_alu_type
);
1421 unsigned num_components
= glsl_get_vector_elements(val
->const_type
);
1423 glsl_get_bit_size(val
->const_type
);
1425 nir_const_value src
[4];
1427 for (unsigned i
= 0; i
< count
- 4; i
++) {
1429 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1431 unsigned j
= swap
? 1 - i
: i
;
1432 assert(bit_size
== 32);
1433 src
[j
] = c
->values
[0];
1436 val
->constant
->values
[0] =
1437 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1444 case SpvOpConstantNull
:
1445 val
->constant
= vtn_null_constant(b
, val
->const_type
);
1448 case SpvOpConstantSampler
:
1449 unreachable("OpConstantSampler requires Kernel Capability");
1453 unreachable("Unhandled opcode");
1456 /* Now that we have the value, update the workgroup size if needed */
1457 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1461 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1462 const uint32_t *w
, unsigned count
)
1464 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1465 struct vtn_function
*vtn_callee
=
1466 vtn_value(b
, w
[3], vtn_value_type_function
)->func
;
1467 struct nir_function
*callee
= vtn_callee
->impl
->function
;
1469 vtn_callee
->referenced
= true;
1471 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1472 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1473 unsigned arg_id
= w
[4 + i
];
1474 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1475 if (arg
->value_type
== vtn_value_type_pointer
&&
1476 arg
->pointer
->ptr_type
->type
== NULL
) {
1477 nir_deref_var
*d
= vtn_pointer_to_deref(b
, arg
->pointer
);
1478 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1480 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1482 /* Make a temporary to store the argument in */
1484 nir_local_variable_create(b
->nb
.impl
, arg_ssa
->type
, "arg_tmp");
1485 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1487 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1491 nir_variable
*out_tmp
= NULL
;
1492 assert(res_type
->type
== callee
->return_type
);
1493 if (!glsl_type_is_void(callee
->return_type
)) {
1494 out_tmp
= nir_local_variable_create(b
->nb
.impl
, callee
->return_type
,
1496 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1499 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1501 if (glsl_type_is_void(callee
->return_type
)) {
1502 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1504 vtn_push_ssa(b
, w
[2], res_type
, vtn_local_load(b
, call
->return_deref
));
1508 struct vtn_ssa_value
*
1509 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1511 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1514 if (!glsl_type_is_vector_or_scalar(type
)) {
1515 unsigned elems
= glsl_get_length(type
);
1516 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1517 for (unsigned i
= 0; i
< elems
; i
++) {
1518 const struct glsl_type
*child_type
;
1520 switch (glsl_get_base_type(type
)) {
1522 case GLSL_TYPE_UINT
:
1523 case GLSL_TYPE_INT64
:
1524 case GLSL_TYPE_UINT64
:
1525 case GLSL_TYPE_BOOL
:
1526 case GLSL_TYPE_FLOAT
:
1527 case GLSL_TYPE_DOUBLE
:
1528 child_type
= glsl_get_column_type(type
);
1530 case GLSL_TYPE_ARRAY
:
1531 child_type
= glsl_get_array_element(type
);
1533 case GLSL_TYPE_STRUCT
:
1534 child_type
= glsl_get_struct_field(type
, i
);
1537 unreachable("unkown base type");
1540 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1548 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1551 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1552 src
.src_type
= type
;
1557 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1558 const uint32_t *w
, unsigned count
)
1560 if (opcode
== SpvOpSampledImage
) {
1561 struct vtn_value
*val
=
1562 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1563 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1564 val
->sampled_image
->type
=
1565 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1566 val
->sampled_image
->image
=
1567 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1568 val
->sampled_image
->sampler
=
1569 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
1571 } else if (opcode
== SpvOpImage
) {
1572 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1573 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1574 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1575 val
->pointer
= src_val
->sampled_image
->image
;
1577 assert(src_val
->value_type
== vtn_value_type_pointer
);
1578 val
->pointer
= src_val
->pointer
;
1583 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1584 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1586 struct vtn_sampled_image sampled
;
1587 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1588 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1589 sampled
= *sampled_val
->sampled_image
;
1591 assert(sampled_val
->value_type
== vtn_value_type_pointer
);
1592 sampled
.type
= sampled_val
->pointer
->type
;
1593 sampled
.image
= NULL
;
1594 sampled
.sampler
= sampled_val
->pointer
;
1597 const struct glsl_type
*image_type
= sampled
.type
->type
;
1598 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1599 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1600 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1602 /* Figure out the base texture operation */
1605 case SpvOpImageSampleImplicitLod
:
1606 case SpvOpImageSampleDrefImplicitLod
:
1607 case SpvOpImageSampleProjImplicitLod
:
1608 case SpvOpImageSampleProjDrefImplicitLod
:
1609 texop
= nir_texop_tex
;
1612 case SpvOpImageSampleExplicitLod
:
1613 case SpvOpImageSampleDrefExplicitLod
:
1614 case SpvOpImageSampleProjExplicitLod
:
1615 case SpvOpImageSampleProjDrefExplicitLod
:
1616 texop
= nir_texop_txl
;
1619 case SpvOpImageFetch
:
1620 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1621 texop
= nir_texop_txf_ms
;
1623 texop
= nir_texop_txf
;
1627 case SpvOpImageGather
:
1628 case SpvOpImageDrefGather
:
1629 texop
= nir_texop_tg4
;
1632 case SpvOpImageQuerySizeLod
:
1633 case SpvOpImageQuerySize
:
1634 texop
= nir_texop_txs
;
1637 case SpvOpImageQueryLod
:
1638 texop
= nir_texop_lod
;
1641 case SpvOpImageQueryLevels
:
1642 texop
= nir_texop_query_levels
;
1645 case SpvOpImageQuerySamples
:
1646 texop
= nir_texop_texture_samples
;
1650 unreachable("Unhandled opcode");
1653 nir_tex_src srcs
[8]; /* 8 should be enough */
1654 nir_tex_src
*p
= srcs
;
1658 struct nir_ssa_def
*coord
;
1659 unsigned coord_components
;
1661 case SpvOpImageSampleImplicitLod
:
1662 case SpvOpImageSampleExplicitLod
:
1663 case SpvOpImageSampleDrefImplicitLod
:
1664 case SpvOpImageSampleDrefExplicitLod
:
1665 case SpvOpImageSampleProjImplicitLod
:
1666 case SpvOpImageSampleProjExplicitLod
:
1667 case SpvOpImageSampleProjDrefImplicitLod
:
1668 case SpvOpImageSampleProjDrefExplicitLod
:
1669 case SpvOpImageFetch
:
1670 case SpvOpImageGather
:
1671 case SpvOpImageDrefGather
:
1672 case SpvOpImageQueryLod
: {
1673 /* All these types have the coordinate as their first real argument */
1674 switch (sampler_dim
) {
1675 case GLSL_SAMPLER_DIM_1D
:
1676 case GLSL_SAMPLER_DIM_BUF
:
1677 coord_components
= 1;
1679 case GLSL_SAMPLER_DIM_2D
:
1680 case GLSL_SAMPLER_DIM_RECT
:
1681 case GLSL_SAMPLER_DIM_MS
:
1682 coord_components
= 2;
1684 case GLSL_SAMPLER_DIM_3D
:
1685 case GLSL_SAMPLER_DIM_CUBE
:
1686 coord_components
= 3;
1689 unreachable("Invalid sampler type");
1692 if (is_array
&& texop
!= nir_texop_lod
)
1695 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1696 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
1697 (1 << coord_components
) - 1));
1698 p
->src_type
= nir_tex_src_coord
;
1705 coord_components
= 0;
1710 case SpvOpImageSampleProjImplicitLod
:
1711 case SpvOpImageSampleProjExplicitLod
:
1712 case SpvOpImageSampleProjDrefImplicitLod
:
1713 case SpvOpImageSampleProjDrefExplicitLod
:
1714 /* These have the projector as the last coordinate component */
1715 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1716 p
->src_type
= nir_tex_src_projector
;
1724 unsigned gather_component
= 0;
1726 case SpvOpImageSampleDrefImplicitLod
:
1727 case SpvOpImageSampleDrefExplicitLod
:
1728 case SpvOpImageSampleProjDrefImplicitLod
:
1729 case SpvOpImageSampleProjDrefExplicitLod
:
1730 case SpvOpImageDrefGather
:
1731 /* These all have an explicit depth value as their next source */
1732 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1735 case SpvOpImageGather
:
1736 /* This has a component as its next source */
1738 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1745 /* For OpImageQuerySizeLod, we always have an LOD */
1746 if (opcode
== SpvOpImageQuerySizeLod
)
1747 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1749 /* Now we need to handle some number of optional arguments */
1750 const struct vtn_ssa_value
*gather_offsets
= NULL
;
1752 uint32_t operands
= w
[idx
++];
1754 if (operands
& SpvImageOperandsBiasMask
) {
1755 assert(texop
== nir_texop_tex
);
1756 texop
= nir_texop_txb
;
1757 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1760 if (operands
& SpvImageOperandsLodMask
) {
1761 assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1762 texop
== nir_texop_txs
);
1763 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1766 if (operands
& SpvImageOperandsGradMask
) {
1767 assert(texop
== nir_texop_txl
);
1768 texop
= nir_texop_txd
;
1769 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1770 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1773 if (operands
& SpvImageOperandsOffsetMask
||
1774 operands
& SpvImageOperandsConstOffsetMask
)
1775 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1777 if (operands
& SpvImageOperandsConstOffsetsMask
) {
1778 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
1779 (*p
++) = (nir_tex_src
){};
1782 if (operands
& SpvImageOperandsSampleMask
) {
1783 assert(texop
== nir_texop_txf_ms
);
1784 texop
= nir_texop_txf_ms
;
1785 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1788 /* We should have now consumed exactly all of the arguments */
1789 assert(idx
== count
);
1791 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1794 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1796 instr
->coord_components
= coord_components
;
1797 instr
->sampler_dim
= sampler_dim
;
1798 instr
->is_array
= is_array
;
1799 instr
->is_shadow
= is_shadow
;
1800 instr
->is_new_style_shadow
=
1801 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
1802 instr
->component
= gather_component
;
1804 switch (glsl_get_sampler_result_type(image_type
)) {
1805 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1806 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1807 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
1808 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1810 unreachable("Invalid base type for sampler result");
1813 nir_deref_var
*sampler
= vtn_pointer_to_deref(b
, sampled
.sampler
);
1814 nir_deref_var
*texture
;
1815 if (sampled
.image
) {
1816 nir_deref_var
*image
= vtn_pointer_to_deref(b
, sampled
.image
);
1822 instr
->texture
= nir_deref_var_clone(texture
, instr
);
1824 switch (instr
->op
) {
1830 /* These operations require a sampler */
1831 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
1834 case nir_texop_txf_ms
:
1837 case nir_texop_query_levels
:
1838 case nir_texop_texture_samples
:
1839 case nir_texop_samples_identical
:
1841 instr
->sampler
= NULL
;
1843 case nir_texop_txf_ms_mcs
:
1844 unreachable("unexpected nir_texop_txf_ms_mcs");
1847 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1848 nir_tex_instr_dest_size(instr
), 32, NULL
);
1850 assert(glsl_get_vector_elements(ret_type
->type
) ==
1851 nir_tex_instr_dest_size(instr
));
1854 nir_instr
*instruction
;
1855 if (gather_offsets
) {
1856 assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
1857 assert(glsl_get_length(gather_offsets
->type
) == 4);
1858 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
1860 /* Copy the current instruction 4x */
1861 for (uint32_t i
= 1; i
< 4; i
++) {
1862 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
1863 instrs
[i
]->op
= instr
->op
;
1864 instrs
[i
]->coord_components
= instr
->coord_components
;
1865 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
1866 instrs
[i
]->is_array
= instr
->is_array
;
1867 instrs
[i
]->is_shadow
= instr
->is_shadow
;
1868 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
1869 instrs
[i
]->component
= instr
->component
;
1870 instrs
[i
]->dest_type
= instr
->dest_type
;
1871 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
1872 instrs
[i
]->sampler
= NULL
;
1874 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1876 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
1877 nir_tex_instr_dest_size(instr
), 32, NULL
);
1880 /* Fill in the last argument with the offset from the passed in offsets
1881 * and insert the instruction into the stream.
1883 for (uint32_t i
= 0; i
< 4; i
++) {
1885 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
1886 src
.src_type
= nir_tex_src_offset
;
1887 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
1888 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
1891 /* Combine the results of the 4 instructions by taking their .w
1894 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
1895 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
1896 vec4
->dest
.write_mask
= 0xf;
1897 for (uint32_t i
= 0; i
< 4; i
++) {
1898 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
1899 vec4
->src
[i
].swizzle
[0] = 3;
1901 def
= &vec4
->dest
.dest
.ssa
;
1902 instruction
= &vec4
->instr
;
1904 def
= &instr
->dest
.ssa
;
1905 instruction
= &instr
->instr
;
1908 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
1909 val
->ssa
->def
= def
;
1911 nir_builder_instr_insert(&b
->nb
, instruction
);
1915 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
1916 const uint32_t *w
, nir_src
*src
)
1919 case SpvOpAtomicIIncrement
:
1920 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
1923 case SpvOpAtomicIDecrement
:
1924 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
1927 case SpvOpAtomicISub
:
1929 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
1932 case SpvOpAtomicCompareExchange
:
1933 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
1934 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
1937 case SpvOpAtomicExchange
:
1938 case SpvOpAtomicIAdd
:
1939 case SpvOpAtomicSMin
:
1940 case SpvOpAtomicUMin
:
1941 case SpvOpAtomicSMax
:
1942 case SpvOpAtomicUMax
:
1943 case SpvOpAtomicAnd
:
1945 case SpvOpAtomicXor
:
1946 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
1950 unreachable("Invalid SPIR-V atomic");
1954 static nir_ssa_def
*
1955 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
1957 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
1959 /* The image_load_store intrinsics assume a 4-dim coordinate */
1960 unsigned dim
= glsl_get_vector_elements(coord
->type
);
1961 unsigned swizzle
[4];
1962 for (unsigned i
= 0; i
< 4; i
++)
1963 swizzle
[i
] = MIN2(i
, dim
- 1);
1965 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
1969 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
1970 const uint32_t *w
, unsigned count
)
1972 /* Just get this one out of the way */
1973 if (opcode
== SpvOpImageTexelPointer
) {
1974 struct vtn_value
*val
=
1975 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
1976 val
->image
= ralloc(b
, struct vtn_image_pointer
);
1978 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1979 val
->image
->coord
= get_image_coord(b
, w
[4]);
1980 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
1984 struct vtn_image_pointer image
;
1987 case SpvOpAtomicExchange
:
1988 case SpvOpAtomicCompareExchange
:
1989 case SpvOpAtomicCompareExchangeWeak
:
1990 case SpvOpAtomicIIncrement
:
1991 case SpvOpAtomicIDecrement
:
1992 case SpvOpAtomicIAdd
:
1993 case SpvOpAtomicISub
:
1994 case SpvOpAtomicLoad
:
1995 case SpvOpAtomicSMin
:
1996 case SpvOpAtomicUMin
:
1997 case SpvOpAtomicSMax
:
1998 case SpvOpAtomicUMax
:
1999 case SpvOpAtomicAnd
:
2001 case SpvOpAtomicXor
:
2002 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
2005 case SpvOpAtomicStore
:
2006 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
2009 case SpvOpImageQuerySize
:
2010 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2012 image
.sample
= NULL
;
2015 case SpvOpImageRead
:
2016 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2017 image
.coord
= get_image_coord(b
, w
[4]);
2019 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
2020 assert(w
[5] == SpvImageOperandsSampleMask
);
2021 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
2023 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2027 case SpvOpImageWrite
:
2028 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2029 image
.coord
= get_image_coord(b
, w
[2]);
2033 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
2034 assert(w
[4] == SpvImageOperandsSampleMask
);
2035 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
2037 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2042 unreachable("Invalid image opcode");
2045 nir_intrinsic_op op
;
2047 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
2048 OP(ImageQuerySize
, size
)
2050 OP(ImageWrite
, store
)
2051 OP(AtomicLoad
, load
)
2052 OP(AtomicStore
, store
)
2053 OP(AtomicExchange
, atomic_exchange
)
2054 OP(AtomicCompareExchange
, atomic_comp_swap
)
2055 OP(AtomicIIncrement
, atomic_add
)
2056 OP(AtomicIDecrement
, atomic_add
)
2057 OP(AtomicIAdd
, atomic_add
)
2058 OP(AtomicISub
, atomic_add
)
2059 OP(AtomicSMin
, atomic_min
)
2060 OP(AtomicUMin
, atomic_min
)
2061 OP(AtomicSMax
, atomic_max
)
2062 OP(AtomicUMax
, atomic_max
)
2063 OP(AtomicAnd
, atomic_and
)
2064 OP(AtomicOr
, atomic_or
)
2065 OP(AtomicXor
, atomic_xor
)
2068 unreachable("Invalid image opcode");
2071 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2073 nir_deref_var
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2074 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
2076 /* ImageQuerySize doesn't take any extra parameters */
2077 if (opcode
!= SpvOpImageQuerySize
) {
2078 /* The image coordinate is always 4 components but we may not have that
2079 * many. Swizzle to compensate.
2082 for (unsigned i
= 0; i
< 4; i
++)
2083 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
2084 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
2086 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
2090 case SpvOpAtomicLoad
:
2091 case SpvOpImageQuerySize
:
2092 case SpvOpImageRead
:
2094 case SpvOpAtomicStore
:
2095 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2097 case SpvOpImageWrite
:
2098 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
2101 case SpvOpAtomicCompareExchange
:
2102 case SpvOpAtomicIIncrement
:
2103 case SpvOpAtomicIDecrement
:
2104 case SpvOpAtomicExchange
:
2105 case SpvOpAtomicIAdd
:
2106 case SpvOpAtomicISub
:
2107 case SpvOpAtomicSMin
:
2108 case SpvOpAtomicUMin
:
2109 case SpvOpAtomicSMax
:
2110 case SpvOpAtomicUMax
:
2111 case SpvOpAtomicAnd
:
2113 case SpvOpAtomicXor
:
2114 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
2118 unreachable("Invalid image opcode");
2121 if (opcode
!= SpvOpImageWrite
) {
2122 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2123 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2125 unsigned dest_components
=
2126 nir_intrinsic_infos
[intrin
->intrinsic
].dest_components
;
2127 if (intrin
->intrinsic
== nir_intrinsic_image_size
) {
2128 dest_components
= intrin
->num_components
=
2129 glsl_get_vector_elements(type
->type
);
2132 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2133 dest_components
, 32, NULL
);
2135 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2137 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
2138 val
->ssa
->def
= &intrin
->dest
.ssa
;
2140 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2144 static nir_intrinsic_op
2145 get_ssbo_nir_atomic_op(SpvOp opcode
)
2148 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2149 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2150 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2151 OP(AtomicExchange
, atomic_exchange
)
2152 OP(AtomicCompareExchange
, atomic_comp_swap
)
2153 OP(AtomicIIncrement
, atomic_add
)
2154 OP(AtomicIDecrement
, atomic_add
)
2155 OP(AtomicIAdd
, atomic_add
)
2156 OP(AtomicISub
, atomic_add
)
2157 OP(AtomicSMin
, atomic_imin
)
2158 OP(AtomicUMin
, atomic_umin
)
2159 OP(AtomicSMax
, atomic_imax
)
2160 OP(AtomicUMax
, atomic_umax
)
2161 OP(AtomicAnd
, atomic_and
)
2162 OP(AtomicOr
, atomic_or
)
2163 OP(AtomicXor
, atomic_xor
)
2166 unreachable("Invalid SSBO atomic");
2170 static nir_intrinsic_op
2171 get_shared_nir_atomic_op(SpvOp opcode
)
2174 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
2175 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
2176 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2177 OP(AtomicExchange
, atomic_exchange
)
2178 OP(AtomicCompareExchange
, atomic_comp_swap
)
2179 OP(AtomicIIncrement
, atomic_add
)
2180 OP(AtomicIDecrement
, atomic_add
)
2181 OP(AtomicIAdd
, atomic_add
)
2182 OP(AtomicISub
, atomic_add
)
2183 OP(AtomicSMin
, atomic_imin
)
2184 OP(AtomicUMin
, atomic_umin
)
2185 OP(AtomicSMax
, atomic_imax
)
2186 OP(AtomicUMax
, atomic_umax
)
2187 OP(AtomicAnd
, atomic_and
)
2188 OP(AtomicOr
, atomic_or
)
2189 OP(AtomicXor
, atomic_xor
)
2192 unreachable("Invalid shared atomic");
2197 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
2198 const uint32_t *w
, unsigned count
)
2200 struct vtn_pointer
*ptr
;
2201 nir_intrinsic_instr
*atomic
;
2204 case SpvOpAtomicLoad
:
2205 case SpvOpAtomicExchange
:
2206 case SpvOpAtomicCompareExchange
:
2207 case SpvOpAtomicCompareExchangeWeak
:
2208 case SpvOpAtomicIIncrement
:
2209 case SpvOpAtomicIDecrement
:
2210 case SpvOpAtomicIAdd
:
2211 case SpvOpAtomicISub
:
2212 case SpvOpAtomicSMin
:
2213 case SpvOpAtomicUMin
:
2214 case SpvOpAtomicSMax
:
2215 case SpvOpAtomicUMax
:
2216 case SpvOpAtomicAnd
:
2218 case SpvOpAtomicXor
:
2219 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2222 case SpvOpAtomicStore
:
2223 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2227 unreachable("Invalid SPIR-V atomic");
2231 SpvScope scope = w[4];
2232 SpvMemorySemanticsMask semantics = w[5];
2235 if (ptr
->mode
== vtn_variable_mode_workgroup
) {
2236 nir_deref_var
*deref
= vtn_pointer_to_deref(b
, ptr
);
2237 const struct glsl_type
*deref_type
= nir_deref_tail(&deref
->deref
)->type
;
2238 nir_intrinsic_op op
= get_shared_nir_atomic_op(opcode
);
2239 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2240 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2243 case SpvOpAtomicLoad
:
2244 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2247 case SpvOpAtomicStore
:
2248 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2249 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2250 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2253 case SpvOpAtomicExchange
:
2254 case SpvOpAtomicCompareExchange
:
2255 case SpvOpAtomicCompareExchangeWeak
:
2256 case SpvOpAtomicIIncrement
:
2257 case SpvOpAtomicIDecrement
:
2258 case SpvOpAtomicIAdd
:
2259 case SpvOpAtomicISub
:
2260 case SpvOpAtomicSMin
:
2261 case SpvOpAtomicUMin
:
2262 case SpvOpAtomicSMax
:
2263 case SpvOpAtomicUMax
:
2264 case SpvOpAtomicAnd
:
2266 case SpvOpAtomicXor
:
2267 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2271 unreachable("Invalid SPIR-V atomic");
2275 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
2276 nir_ssa_def
*offset
, *index
;
2277 offset
= vtn_pointer_to_offset(b
, ptr
, &index
, NULL
);
2279 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(opcode
);
2281 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2284 case SpvOpAtomicLoad
:
2285 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2286 atomic
->src
[0] = nir_src_for_ssa(index
);
2287 atomic
->src
[1] = nir_src_for_ssa(offset
);
2290 case SpvOpAtomicStore
:
2291 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2292 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2293 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2294 atomic
->src
[1] = nir_src_for_ssa(index
);
2295 atomic
->src
[2] = nir_src_for_ssa(offset
);
2298 case SpvOpAtomicExchange
:
2299 case SpvOpAtomicCompareExchange
:
2300 case SpvOpAtomicCompareExchangeWeak
:
2301 case SpvOpAtomicIIncrement
:
2302 case SpvOpAtomicIDecrement
:
2303 case SpvOpAtomicIAdd
:
2304 case SpvOpAtomicISub
:
2305 case SpvOpAtomicSMin
:
2306 case SpvOpAtomicUMin
:
2307 case SpvOpAtomicSMax
:
2308 case SpvOpAtomicUMax
:
2309 case SpvOpAtomicAnd
:
2311 case SpvOpAtomicXor
:
2312 atomic
->src
[0] = nir_src_for_ssa(index
);
2313 atomic
->src
[1] = nir_src_for_ssa(offset
);
2314 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[2]);
2318 unreachable("Invalid SPIR-V atomic");
2322 if (opcode
!= SpvOpAtomicStore
) {
2323 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2325 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2326 glsl_get_vector_elements(type
->type
),
2327 glsl_get_bit_size(type
->type
), NULL
);
2329 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2330 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2331 val
->ssa
->def
= &atomic
->dest
.ssa
;
2332 val
->ssa
->type
= type
->type
;
2335 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2338 static nir_alu_instr
*
2339 create_vec(nir_shader
*shader
, unsigned num_components
, unsigned bit_size
)
2342 switch (num_components
) {
2343 case 1: op
= nir_op_fmov
; break;
2344 case 2: op
= nir_op_vec2
; break;
2345 case 3: op
= nir_op_vec3
; break;
2346 case 4: op
= nir_op_vec4
; break;
2347 default: unreachable("bad vector size");
2350 nir_alu_instr
*vec
= nir_alu_instr_create(shader
, op
);
2351 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2353 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2358 struct vtn_ssa_value
*
2359 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2361 if (src
->transposed
)
2362 return src
->transposed
;
2364 struct vtn_ssa_value
*dest
=
2365 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2367 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2368 nir_alu_instr
*vec
= create_vec(b
->shader
,
2369 glsl_get_matrix_columns(src
->type
),
2370 glsl_get_bit_size(src
->type
));
2371 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2372 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2373 vec
->src
[0].swizzle
[0] = i
;
2375 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2376 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2377 vec
->src
[j
].swizzle
[0] = i
;
2380 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2381 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2384 dest
->transposed
= src
;
2390 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2392 unsigned swiz
[4] = { index
};
2393 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2397 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2400 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
,
2403 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2405 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2407 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2408 vec
->src
[i
].swizzle
[0] = i
;
2412 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2414 return &vec
->dest
.dest
.ssa
;
2418 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2421 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2422 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2423 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2424 vtn_vector_extract(b
, src
, i
), dest
);
2430 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2431 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2433 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2434 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2435 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2436 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2441 static nir_ssa_def
*
2442 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2443 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2444 const uint32_t *indices
)
2446 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
, src0
->bit_size
);
2448 for (unsigned i
= 0; i
< num_components
; i
++) {
2449 uint32_t index
= indices
[i
];
2450 if (index
== 0xffffffff) {
2452 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2453 } else if (index
< src0
->num_components
) {
2454 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2455 vec
->src
[i
].swizzle
[0] = index
;
2457 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2458 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2462 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2464 return &vec
->dest
.dest
.ssa
;
2468 * Concatentates a number of vectors/scalars together to produce a vector
2470 static nir_ssa_def
*
2471 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2472 unsigned num_srcs
, nir_ssa_def
**srcs
)
2474 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
,
2477 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2479 * "When constructing a vector, there must be at least two Constituent
2482 assert(num_srcs
>= 2);
2484 unsigned dest_idx
= 0;
2485 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2486 nir_ssa_def
*src
= srcs
[i
];
2487 assert(dest_idx
+ src
->num_components
<= num_components
);
2488 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2489 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2490 vec
->src
[dest_idx
].swizzle
[0] = j
;
2495 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2497 * "When constructing a vector, the total number of components in all
2498 * the operands must equal the number of components in Result Type."
2500 assert(dest_idx
== num_components
);
2502 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2504 return &vec
->dest
.dest
.ssa
;
2507 static struct vtn_ssa_value
*
2508 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2510 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2511 dest
->type
= src
->type
;
2513 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2514 dest
->def
= src
->def
;
2516 unsigned elems
= glsl_get_length(src
->type
);
2518 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2519 for (unsigned i
= 0; i
< elems
; i
++)
2520 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2526 static struct vtn_ssa_value
*
2527 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2528 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2529 unsigned num_indices
)
2531 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2533 struct vtn_ssa_value
*cur
= dest
;
2535 for (i
= 0; i
< num_indices
- 1; i
++) {
2536 cur
= cur
->elems
[indices
[i
]];
2539 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2540 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2541 * the component granularity. In that case, the last index will be
2542 * the index to insert the scalar into the vector.
2545 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2547 cur
->elems
[indices
[i
]] = insert
;
2553 static struct vtn_ssa_value
*
2554 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2555 const uint32_t *indices
, unsigned num_indices
)
2557 struct vtn_ssa_value
*cur
= src
;
2558 for (unsigned i
= 0; i
< num_indices
; i
++) {
2559 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2560 assert(i
== num_indices
- 1);
2561 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2562 * the component granularity. The last index will be the index of the
2563 * vector to extract.
2566 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2567 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2568 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2571 cur
= cur
->elems
[indices
[i
]];
2579 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2580 const uint32_t *w
, unsigned count
)
2582 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2583 const struct glsl_type
*type
=
2584 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2585 val
->ssa
= vtn_create_ssa_value(b
, type
);
2588 case SpvOpVectorExtractDynamic
:
2589 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2590 vtn_ssa_value(b
, w
[4])->def
);
2593 case SpvOpVectorInsertDynamic
:
2594 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2595 vtn_ssa_value(b
, w
[4])->def
,
2596 vtn_ssa_value(b
, w
[5])->def
);
2599 case SpvOpVectorShuffle
:
2600 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2601 vtn_ssa_value(b
, w
[3])->def
,
2602 vtn_ssa_value(b
, w
[4])->def
,
2606 case SpvOpCompositeConstruct
: {
2607 unsigned elems
= count
- 3;
2608 if (glsl_type_is_vector_or_scalar(type
)) {
2609 nir_ssa_def
*srcs
[4];
2610 for (unsigned i
= 0; i
< elems
; i
++)
2611 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2613 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2616 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2617 for (unsigned i
= 0; i
< elems
; i
++)
2618 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2622 case SpvOpCompositeExtract
:
2623 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2627 case SpvOpCompositeInsert
:
2628 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2629 vtn_ssa_value(b
, w
[3]),
2633 case SpvOpCopyObject
:
2634 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2638 unreachable("unknown composite operation");
2643 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2644 const uint32_t *w
, unsigned count
)
2646 nir_intrinsic_op intrinsic_op
;
2648 case SpvOpEmitVertex
:
2649 case SpvOpEmitStreamVertex
:
2650 intrinsic_op
= nir_intrinsic_emit_vertex
;
2652 case SpvOpEndPrimitive
:
2653 case SpvOpEndStreamPrimitive
:
2654 intrinsic_op
= nir_intrinsic_end_primitive
;
2656 case SpvOpMemoryBarrier
:
2657 intrinsic_op
= nir_intrinsic_memory_barrier
;
2659 case SpvOpControlBarrier
:
2660 intrinsic_op
= nir_intrinsic_barrier
;
2663 unreachable("unknown barrier instruction");
2666 nir_intrinsic_instr
*intrin
=
2667 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2669 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2670 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2672 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2676 gl_primitive_from_spv_execution_mode(SpvExecutionMode mode
)
2679 case SpvExecutionModeInputPoints
:
2680 case SpvExecutionModeOutputPoints
:
2681 return 0; /* GL_POINTS */
2682 case SpvExecutionModeInputLines
:
2683 return 1; /* GL_LINES */
2684 case SpvExecutionModeInputLinesAdjacency
:
2685 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2686 case SpvExecutionModeTriangles
:
2687 return 4; /* GL_TRIANGLES */
2688 case SpvExecutionModeInputTrianglesAdjacency
:
2689 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2690 case SpvExecutionModeQuads
:
2691 return 7; /* GL_QUADS */
2692 case SpvExecutionModeIsolines
:
2693 return 0x8E7A; /* GL_ISOLINES */
2694 case SpvExecutionModeOutputLineStrip
:
2695 return 3; /* GL_LINE_STRIP */
2696 case SpvExecutionModeOutputTriangleStrip
:
2697 return 5; /* GL_TRIANGLE_STRIP */
2699 unreachable("Invalid primitive type");
2705 vertices_in_from_spv_execution_mode(SpvExecutionMode mode
)
2708 case SpvExecutionModeInputPoints
:
2710 case SpvExecutionModeInputLines
:
2712 case SpvExecutionModeInputLinesAdjacency
:
2714 case SpvExecutionModeTriangles
:
2716 case SpvExecutionModeInputTrianglesAdjacency
:
2719 unreachable("Invalid GS input mode");
2724 static gl_shader_stage
2725 stage_for_execution_model(SpvExecutionModel model
)
2728 case SpvExecutionModelVertex
:
2729 return MESA_SHADER_VERTEX
;
2730 case SpvExecutionModelTessellationControl
:
2731 return MESA_SHADER_TESS_CTRL
;
2732 case SpvExecutionModelTessellationEvaluation
:
2733 return MESA_SHADER_TESS_EVAL
;
2734 case SpvExecutionModelGeometry
:
2735 return MESA_SHADER_GEOMETRY
;
2736 case SpvExecutionModelFragment
:
2737 return MESA_SHADER_FRAGMENT
;
2738 case SpvExecutionModelGLCompute
:
2739 return MESA_SHADER_COMPUTE
;
2741 unreachable("Unsupported execution model");
2745 #define spv_check_supported(name, cap) do { \
2746 if (!(b->options && b->options->caps.name)) \
2747 vtn_warn("Unsupported SPIR-V capability: %s", \
2748 spirv_capability_to_string(cap)); \
2752 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2753 const uint32_t *w
, unsigned count
)
2757 case SpvOpSourceExtension
:
2758 case SpvOpSourceContinued
:
2759 case SpvOpExtension
:
2760 /* Unhandled, but these are for debug so that's ok. */
2763 case SpvOpCapability
: {
2764 SpvCapability cap
= w
[1];
2766 case SpvCapabilityMatrix
:
2767 case SpvCapabilityShader
:
2768 case SpvCapabilityGeometry
:
2769 case SpvCapabilityGeometryPointSize
:
2770 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
2771 case SpvCapabilitySampledImageArrayDynamicIndexing
:
2772 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
2773 case SpvCapabilityStorageImageArrayDynamicIndexing
:
2774 case SpvCapabilityImageRect
:
2775 case SpvCapabilitySampledRect
:
2776 case SpvCapabilitySampled1D
:
2777 case SpvCapabilityImage1D
:
2778 case SpvCapabilitySampledCubeArray
:
2779 case SpvCapabilityImageCubeArray
:
2780 case SpvCapabilitySampledBuffer
:
2781 case SpvCapabilityImageBuffer
:
2782 case SpvCapabilityImageQuery
:
2783 case SpvCapabilityDerivativeControl
:
2784 case SpvCapabilityInterpolationFunction
:
2785 case SpvCapabilityMultiViewport
:
2786 case SpvCapabilitySampleRateShading
:
2787 case SpvCapabilityClipDistance
:
2788 case SpvCapabilityCullDistance
:
2789 case SpvCapabilityInputAttachment
:
2790 case SpvCapabilityImageGatherExtended
:
2791 case SpvCapabilityStorageImageExtendedFormats
:
2794 case SpvCapabilityGeometryStreams
:
2795 case SpvCapabilityLinkage
:
2796 case SpvCapabilityVector16
:
2797 case SpvCapabilityFloat16Buffer
:
2798 case SpvCapabilityFloat16
:
2799 case SpvCapabilityInt64Atomics
:
2800 case SpvCapabilityAtomicStorage
:
2801 case SpvCapabilityInt16
:
2802 case SpvCapabilityStorageImageMultisample
:
2803 case SpvCapabilityInt8
:
2804 case SpvCapabilitySparseResidency
:
2805 case SpvCapabilityMinLod
:
2806 case SpvCapabilityTransformFeedback
:
2807 vtn_warn("Unsupported SPIR-V capability: %s",
2808 spirv_capability_to_string(cap
));
2811 case SpvCapabilityFloat64
:
2812 spv_check_supported(float64
, cap
);
2814 case SpvCapabilityInt64
:
2815 spv_check_supported(int64
, cap
);
2818 case SpvCapabilityAddresses
:
2819 case SpvCapabilityKernel
:
2820 case SpvCapabilityImageBasic
:
2821 case SpvCapabilityImageReadWrite
:
2822 case SpvCapabilityImageMipmap
:
2823 case SpvCapabilityPipes
:
2824 case SpvCapabilityGroups
:
2825 case SpvCapabilityDeviceEnqueue
:
2826 case SpvCapabilityLiteralSampler
:
2827 case SpvCapabilityGenericPointer
:
2828 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
2829 spirv_capability_to_string(cap
));
2832 case SpvCapabilityImageMSArray
:
2833 spv_check_supported(image_ms_array
, cap
);
2836 case SpvCapabilityTessellation
:
2837 case SpvCapabilityTessellationPointSize
:
2838 spv_check_supported(tessellation
, cap
);
2841 case SpvCapabilityDrawParameters
:
2842 spv_check_supported(draw_parameters
, cap
);
2845 case SpvCapabilityStorageImageReadWithoutFormat
:
2846 spv_check_supported(image_read_without_format
, cap
);
2849 case SpvCapabilityStorageImageWriteWithoutFormat
:
2850 spv_check_supported(image_write_without_format
, cap
);
2853 case SpvCapabilityMultiView
:
2854 spv_check_supported(multiview
, cap
);
2857 case SpvCapabilityVariablePointersStorageBuffer
:
2858 case SpvCapabilityVariablePointers
:
2859 spv_check_supported(variable_pointers
, cap
);
2863 unreachable("Unhandled capability");
2868 case SpvOpExtInstImport
:
2869 vtn_handle_extension(b
, opcode
, w
, count
);
2872 case SpvOpMemoryModel
:
2873 assert(w
[1] == SpvAddressingModelLogical
);
2874 assert(w
[2] == SpvMemoryModelSimple
||
2875 w
[2] == SpvMemoryModelGLSL450
);
2878 case SpvOpEntryPoint
: {
2879 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
2880 /* Let this be a name label regardless */
2881 unsigned name_words
;
2882 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
2884 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
2885 stage_for_execution_model(w
[1]) != b
->entry_point_stage
)
2888 assert(b
->entry_point
== NULL
);
2889 b
->entry_point
= entry_point
;
2894 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2895 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2899 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2902 case SpvOpMemberName
:
2906 case SpvOpExecutionMode
:
2907 case SpvOpDecorationGroup
:
2909 case SpvOpMemberDecorate
:
2910 case SpvOpGroupDecorate
:
2911 case SpvOpGroupMemberDecorate
:
2912 vtn_handle_decoration(b
, opcode
, w
, count
);
2916 return false; /* End of preamble */
2923 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
2924 const struct vtn_decoration
*mode
, void *data
)
2926 assert(b
->entry_point
== entry_point
);
2928 switch(mode
->exec_mode
) {
2929 case SpvExecutionModeOriginUpperLeft
:
2930 case SpvExecutionModeOriginLowerLeft
:
2931 b
->origin_upper_left
=
2932 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
2935 case SpvExecutionModeEarlyFragmentTests
:
2936 assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
2937 b
->shader
->info
.fs
.early_fragment_tests
= true;
2940 case SpvExecutionModeInvocations
:
2941 assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
2942 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->literals
[0]);
2945 case SpvExecutionModeDepthReplacing
:
2946 assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
2947 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
2949 case SpvExecutionModeDepthGreater
:
2950 assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
2951 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
2953 case SpvExecutionModeDepthLess
:
2954 assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
2955 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
2957 case SpvExecutionModeDepthUnchanged
:
2958 assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
2959 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2962 case SpvExecutionModeLocalSize
:
2963 assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
2964 b
->shader
->info
.cs
.local_size
[0] = mode
->literals
[0];
2965 b
->shader
->info
.cs
.local_size
[1] = mode
->literals
[1];
2966 b
->shader
->info
.cs
.local_size
[2] = mode
->literals
[2];
2968 case SpvExecutionModeLocalSizeHint
:
2969 break; /* Nothing to do with this */
2971 case SpvExecutionModeOutputVertices
:
2972 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
2973 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
2974 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->literals
[0];
2976 assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
2977 b
->shader
->info
.gs
.vertices_out
= mode
->literals
[0];
2981 case SpvExecutionModeInputPoints
:
2982 case SpvExecutionModeInputLines
:
2983 case SpvExecutionModeInputLinesAdjacency
:
2984 case SpvExecutionModeTriangles
:
2985 case SpvExecutionModeInputTrianglesAdjacency
:
2986 case SpvExecutionModeQuads
:
2987 case SpvExecutionModeIsolines
:
2988 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
2989 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
2990 b
->shader
->info
.tess
.primitive_mode
=
2991 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2993 assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
2994 b
->shader
->info
.gs
.vertices_in
=
2995 vertices_in_from_spv_execution_mode(mode
->exec_mode
);
2999 case SpvExecutionModeOutputPoints
:
3000 case SpvExecutionModeOutputLineStrip
:
3001 case SpvExecutionModeOutputTriangleStrip
:
3002 assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3003 b
->shader
->info
.gs
.output_primitive
=
3004 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
3007 case SpvExecutionModeSpacingEqual
:
3008 assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3009 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3010 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
3012 case SpvExecutionModeSpacingFractionalEven
:
3013 assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3014 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3015 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
3017 case SpvExecutionModeSpacingFractionalOdd
:
3018 assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3019 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3020 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
3022 case SpvExecutionModeVertexOrderCw
:
3023 assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3024 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3025 b
->shader
->info
.tess
.ccw
= false;
3027 case SpvExecutionModeVertexOrderCcw
:
3028 assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3029 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3030 b
->shader
->info
.tess
.ccw
= true;
3032 case SpvExecutionModePointMode
:
3033 assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3034 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3035 b
->shader
->info
.tess
.point_mode
= true;
3038 case SpvExecutionModePixelCenterInteger
:
3039 b
->pixel_center_integer
= true;
3042 case SpvExecutionModeXfb
:
3043 unreachable("Unhandled execution mode");
3046 case SpvExecutionModeVecTypeHint
:
3047 case SpvExecutionModeContractionOff
:
3051 unreachable("Unhandled execution mode");
3056 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3057 const uint32_t *w
, unsigned count
)
3061 case SpvOpSourceContinued
:
3062 case SpvOpSourceExtension
:
3063 case SpvOpExtension
:
3064 case SpvOpCapability
:
3065 case SpvOpExtInstImport
:
3066 case SpvOpMemoryModel
:
3067 case SpvOpEntryPoint
:
3068 case SpvOpExecutionMode
:
3071 case SpvOpMemberName
:
3072 case SpvOpDecorationGroup
:
3074 case SpvOpMemberDecorate
:
3075 case SpvOpGroupDecorate
:
3076 case SpvOpGroupMemberDecorate
:
3077 unreachable("Invalid opcode types and variables section");
3083 case SpvOpTypeFloat
:
3084 case SpvOpTypeVector
:
3085 case SpvOpTypeMatrix
:
3086 case SpvOpTypeImage
:
3087 case SpvOpTypeSampler
:
3088 case SpvOpTypeSampledImage
:
3089 case SpvOpTypeArray
:
3090 case SpvOpTypeRuntimeArray
:
3091 case SpvOpTypeStruct
:
3092 case SpvOpTypeOpaque
:
3093 case SpvOpTypePointer
:
3094 case SpvOpTypeFunction
:
3095 case SpvOpTypeEvent
:
3096 case SpvOpTypeDeviceEvent
:
3097 case SpvOpTypeReserveId
:
3098 case SpvOpTypeQueue
:
3100 vtn_handle_type(b
, opcode
, w
, count
);
3103 case SpvOpConstantTrue
:
3104 case SpvOpConstantFalse
:
3106 case SpvOpConstantComposite
:
3107 case SpvOpConstantSampler
:
3108 case SpvOpConstantNull
:
3109 case SpvOpSpecConstantTrue
:
3110 case SpvOpSpecConstantFalse
:
3111 case SpvOpSpecConstant
:
3112 case SpvOpSpecConstantComposite
:
3113 case SpvOpSpecConstantOp
:
3114 vtn_handle_constant(b
, opcode
, w
, count
);
3119 vtn_handle_variables(b
, opcode
, w
, count
);
3123 return false; /* End of preamble */
3130 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3131 const uint32_t *w
, unsigned count
)
3137 case SpvOpLoopMerge
:
3138 case SpvOpSelectionMerge
:
3139 /* This is handled by cfg pre-pass and walk_blocks */
3143 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
3144 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3149 vtn_handle_extension(b
, opcode
, w
, count
);
3155 case SpvOpCopyMemory
:
3156 case SpvOpCopyMemorySized
:
3157 case SpvOpAccessChain
:
3158 case SpvOpPtrAccessChain
:
3159 case SpvOpInBoundsAccessChain
:
3160 case SpvOpArrayLength
:
3161 vtn_handle_variables(b
, opcode
, w
, count
);
3164 case SpvOpFunctionCall
:
3165 vtn_handle_function_call(b
, opcode
, w
, count
);
3168 case SpvOpSampledImage
:
3170 case SpvOpImageSampleImplicitLod
:
3171 case SpvOpImageSampleExplicitLod
:
3172 case SpvOpImageSampleDrefImplicitLod
:
3173 case SpvOpImageSampleDrefExplicitLod
:
3174 case SpvOpImageSampleProjImplicitLod
:
3175 case SpvOpImageSampleProjExplicitLod
:
3176 case SpvOpImageSampleProjDrefImplicitLod
:
3177 case SpvOpImageSampleProjDrefExplicitLod
:
3178 case SpvOpImageFetch
:
3179 case SpvOpImageGather
:
3180 case SpvOpImageDrefGather
:
3181 case SpvOpImageQuerySizeLod
:
3182 case SpvOpImageQueryLod
:
3183 case SpvOpImageQueryLevels
:
3184 case SpvOpImageQuerySamples
:
3185 vtn_handle_texture(b
, opcode
, w
, count
);
3188 case SpvOpImageRead
:
3189 case SpvOpImageWrite
:
3190 case SpvOpImageTexelPointer
:
3191 vtn_handle_image(b
, opcode
, w
, count
);
3194 case SpvOpImageQuerySize
: {
3195 struct vtn_pointer
*image
=
3196 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3197 if (image
->mode
== vtn_variable_mode_image
) {
3198 vtn_handle_image(b
, opcode
, w
, count
);
3200 assert(image
->mode
== vtn_variable_mode_sampler
);
3201 vtn_handle_texture(b
, opcode
, w
, count
);
3206 case SpvOpAtomicLoad
:
3207 case SpvOpAtomicExchange
:
3208 case SpvOpAtomicCompareExchange
:
3209 case SpvOpAtomicCompareExchangeWeak
:
3210 case SpvOpAtomicIIncrement
:
3211 case SpvOpAtomicIDecrement
:
3212 case SpvOpAtomicIAdd
:
3213 case SpvOpAtomicISub
:
3214 case SpvOpAtomicSMin
:
3215 case SpvOpAtomicUMin
:
3216 case SpvOpAtomicSMax
:
3217 case SpvOpAtomicUMax
:
3218 case SpvOpAtomicAnd
:
3220 case SpvOpAtomicXor
: {
3221 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
3222 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3223 vtn_handle_image(b
, opcode
, w
, count
);
3225 assert(pointer
->value_type
== vtn_value_type_pointer
);
3226 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3231 case SpvOpAtomicStore
: {
3232 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
3233 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3234 vtn_handle_image(b
, opcode
, w
, count
);
3236 assert(pointer
->value_type
== vtn_value_type_pointer
);
3237 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3243 /* Handle OpSelect up-front here because it needs to be able to handle
3244 * pointers and not just regular vectors and scalars.
3246 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3247 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, res_type
->type
);
3248 ssa
->def
= nir_bcsel(&b
->nb
, vtn_ssa_value(b
, w
[3])->def
,
3249 vtn_ssa_value(b
, w
[4])->def
,
3250 vtn_ssa_value(b
, w
[5])->def
);
3251 vtn_push_ssa(b
, w
[2], res_type
, ssa
);
3260 case SpvOpConvertFToU
:
3261 case SpvOpConvertFToS
:
3262 case SpvOpConvertSToF
:
3263 case SpvOpConvertUToF
:
3267 case SpvOpQuantizeToF16
:
3268 case SpvOpConvertPtrToU
:
3269 case SpvOpConvertUToPtr
:
3270 case SpvOpPtrCastToGeneric
:
3271 case SpvOpGenericCastToPtr
:
3277 case SpvOpSignBitSet
:
3278 case SpvOpLessOrGreater
:
3280 case SpvOpUnordered
:
3295 case SpvOpVectorTimesScalar
:
3297 case SpvOpIAddCarry
:
3298 case SpvOpISubBorrow
:
3299 case SpvOpUMulExtended
:
3300 case SpvOpSMulExtended
:
3301 case SpvOpShiftRightLogical
:
3302 case SpvOpShiftRightArithmetic
:
3303 case SpvOpShiftLeftLogical
:
3304 case SpvOpLogicalEqual
:
3305 case SpvOpLogicalNotEqual
:
3306 case SpvOpLogicalOr
:
3307 case SpvOpLogicalAnd
:
3308 case SpvOpLogicalNot
:
3309 case SpvOpBitwiseOr
:
3310 case SpvOpBitwiseXor
:
3311 case SpvOpBitwiseAnd
:
3313 case SpvOpFOrdEqual
:
3314 case SpvOpFUnordEqual
:
3315 case SpvOpINotEqual
:
3316 case SpvOpFOrdNotEqual
:
3317 case SpvOpFUnordNotEqual
:
3318 case SpvOpULessThan
:
3319 case SpvOpSLessThan
:
3320 case SpvOpFOrdLessThan
:
3321 case SpvOpFUnordLessThan
:
3322 case SpvOpUGreaterThan
:
3323 case SpvOpSGreaterThan
:
3324 case SpvOpFOrdGreaterThan
:
3325 case SpvOpFUnordGreaterThan
:
3326 case SpvOpULessThanEqual
:
3327 case SpvOpSLessThanEqual
:
3328 case SpvOpFOrdLessThanEqual
:
3329 case SpvOpFUnordLessThanEqual
:
3330 case SpvOpUGreaterThanEqual
:
3331 case SpvOpSGreaterThanEqual
:
3332 case SpvOpFOrdGreaterThanEqual
:
3333 case SpvOpFUnordGreaterThanEqual
:
3339 case SpvOpFwidthFine
:
3340 case SpvOpDPdxCoarse
:
3341 case SpvOpDPdyCoarse
:
3342 case SpvOpFwidthCoarse
:
3343 case SpvOpBitFieldInsert
:
3344 case SpvOpBitFieldSExtract
:
3345 case SpvOpBitFieldUExtract
:
3346 case SpvOpBitReverse
:
3348 case SpvOpTranspose
:
3349 case SpvOpOuterProduct
:
3350 case SpvOpMatrixTimesScalar
:
3351 case SpvOpVectorTimesMatrix
:
3352 case SpvOpMatrixTimesVector
:
3353 case SpvOpMatrixTimesMatrix
:
3354 vtn_handle_alu(b
, opcode
, w
, count
);
3357 case SpvOpVectorExtractDynamic
:
3358 case SpvOpVectorInsertDynamic
:
3359 case SpvOpVectorShuffle
:
3360 case SpvOpCompositeConstruct
:
3361 case SpvOpCompositeExtract
:
3362 case SpvOpCompositeInsert
:
3363 case SpvOpCopyObject
:
3364 vtn_handle_composite(b
, opcode
, w
, count
);
3367 case SpvOpEmitVertex
:
3368 case SpvOpEndPrimitive
:
3369 case SpvOpEmitStreamVertex
:
3370 case SpvOpEndStreamPrimitive
:
3371 case SpvOpControlBarrier
:
3372 case SpvOpMemoryBarrier
:
3373 vtn_handle_barrier(b
, opcode
, w
, count
);
3377 unreachable("Unhandled opcode");
3384 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3385 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3386 gl_shader_stage stage
, const char *entry_point_name
,
3387 const struct spirv_to_nir_options
*options
,
3388 const nir_shader_compiler_options
*nir_options
)
3390 /* Initialize the stn_builder object */
3391 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3396 exec_list_make_empty(&b
->functions
);
3397 b
->entry_point_stage
= stage
;
3398 b
->entry_point_name
= entry_point_name
;
3399 b
->options
= options
;
3401 const uint32_t *word_end
= words
+ word_count
;
3403 /* Handle the SPIR-V header (first 4 dwords) */
3404 assert(word_count
> 5);
3406 assert(words
[0] == SpvMagicNumber
);
3407 assert(words
[1] >= 0x10000);
3408 /* words[2] == generator magic */
3409 unsigned value_id_bound
= words
[3];
3410 assert(words
[4] == 0);
3414 b
->value_id_bound
= value_id_bound
;
3415 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3417 /* Handle all the preamble instructions */
3418 words
= vtn_foreach_instruction(b
, words
, word_end
,
3419 vtn_handle_preamble_instruction
);
3421 if (b
->entry_point
== NULL
) {
3422 assert(!"Entry point not found");
3427 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
3429 /* Set shader info defaults */
3430 b
->shader
->info
.gs
.invocations
= 1;
3432 /* Parse execution modes */
3433 vtn_foreach_execution_mode(b
, b
->entry_point
,
3434 vtn_handle_execution_mode
, NULL
);
3436 b
->specializations
= spec
;
3437 b
->num_specializations
= num_spec
;
3439 /* Handle all variable, type, and constant instructions */
3440 words
= vtn_foreach_instruction(b
, words
, word_end
,
3441 vtn_handle_variable_or_type_instruction
);
3443 vtn_build_cfg(b
, words
, word_end
);
3445 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3446 b
->entry_point
->func
->referenced
= true;
3451 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3452 if (func
->referenced
&& !func
->emitted
) {
3453 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3454 _mesa_key_pointer_equal
);
3456 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3462 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3463 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3464 assert(entry_point
);
3466 /* Unparent the shader from the vtn_builder before we delete the builder */
3467 ralloc_steal(NULL
, b
->shader
);