2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
37 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
38 size_t spirv_offset
, const char *message
)
40 if (b
->options
->debug
.func
) {
41 b
->options
->debug
.func(b
->options
->debug
.private_data
,
42 level
, spirv_offset
, message
);
46 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
47 fprintf(stderr
, "%s\n", message
);
52 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
53 size_t spirv_offset
, const char *fmt
, ...)
59 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
62 vtn_log(b
, level
, spirv_offset
, msg
);
68 vtn_log_err(struct vtn_builder
*b
,
69 enum nir_spirv_debug_level level
, const char *prefix
,
70 const char *file
, unsigned line
,
71 const char *fmt
, va_list args
)
75 msg
= ralloc_strdup(NULL
, prefix
);
78 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
81 ralloc_asprintf_append(&msg
, " ");
83 ralloc_vasprintf_append(&msg
, fmt
, args
);
85 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
89 ralloc_asprintf_append(&msg
,
90 "\n in SPIR-V source file %s, line %d, col %d",
91 b
->file
, b
->line
, b
->col
);
94 vtn_log(b
, level
, b
->spirv_offset
, msg
);
100 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
105 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
106 path
, prefix
, idx
++);
107 if (len
< 0 || len
>= sizeof(filename
))
110 FILE *f
= fopen(filename
, "w");
114 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
117 vtn_info("SPIR-V shader dumped to %s", filename
);
121 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
122 const char *fmt
, ...)
127 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
128 file
, line
, fmt
, args
);
133 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
134 const char *fmt
, ...)
139 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
140 file
, line
, fmt
, args
);
143 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
145 vtn_dump_shader(b
, dump_path
, "fail");
147 longjmp(b
->fail_jump
, 1);
150 struct spec_constant_value
{
158 static struct vtn_ssa_value
*
159 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
161 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
164 if (glsl_type_is_vector_or_scalar(type
)) {
165 unsigned num_components
= glsl_get_vector_elements(val
->type
);
166 unsigned bit_size
= glsl_get_bit_size(val
->type
);
167 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
169 unsigned elems
= glsl_get_length(val
->type
);
170 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
171 if (glsl_type_is_matrix(type
)) {
172 const struct glsl_type
*elem_type
=
173 glsl_vector_type(glsl_get_base_type(type
),
174 glsl_get_vector_elements(type
));
176 for (unsigned i
= 0; i
< elems
; i
++)
177 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
178 } else if (glsl_type_is_array(type
)) {
179 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
180 for (unsigned i
= 0; i
< elems
; i
++)
181 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
183 for (unsigned i
= 0; i
< elems
; i
++) {
184 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
185 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
193 static struct vtn_ssa_value
*
194 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
195 const struct glsl_type
*type
)
197 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
202 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
205 switch (glsl_get_base_type(type
)) {
208 case GLSL_TYPE_INT16
:
209 case GLSL_TYPE_UINT16
:
210 case GLSL_TYPE_UINT8
:
212 case GLSL_TYPE_INT64
:
213 case GLSL_TYPE_UINT64
:
215 case GLSL_TYPE_FLOAT
:
216 case GLSL_TYPE_FLOAT16
:
217 case GLSL_TYPE_DOUBLE
: {
218 int bit_size
= glsl_get_bit_size(type
);
219 if (glsl_type_is_vector_or_scalar(type
)) {
220 unsigned num_components
= glsl_get_vector_elements(val
->type
);
221 nir_load_const_instr
*load
=
222 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
224 load
->value
= constant
->values
[0];
226 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
227 val
->def
= &load
->def
;
229 assert(glsl_type_is_matrix(type
));
230 unsigned rows
= glsl_get_vector_elements(val
->type
);
231 unsigned columns
= glsl_get_matrix_columns(val
->type
);
232 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
234 for (unsigned i
= 0; i
< columns
; i
++) {
235 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
236 col_val
->type
= glsl_get_column_type(val
->type
);
237 nir_load_const_instr
*load
=
238 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
240 load
->value
= constant
->values
[i
];
242 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
243 col_val
->def
= &load
->def
;
245 val
->elems
[i
] = col_val
;
251 case GLSL_TYPE_ARRAY
: {
252 unsigned elems
= glsl_get_length(val
->type
);
253 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
254 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
255 for (unsigned i
= 0; i
< elems
; i
++)
256 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
261 case GLSL_TYPE_STRUCT
: {
262 unsigned elems
= glsl_get_length(val
->type
);
263 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
264 for (unsigned i
= 0; i
< elems
; i
++) {
265 const struct glsl_type
*elem_type
=
266 glsl_get_struct_field(val
->type
, i
);
267 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
274 vtn_fail("bad constant type");
280 struct vtn_ssa_value
*
281 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
283 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
284 switch (val
->value_type
) {
285 case vtn_value_type_undef
:
286 return vtn_undef_ssa_value(b
, val
->type
->type
);
288 case vtn_value_type_constant
:
289 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
291 case vtn_value_type_ssa
:
294 case vtn_value_type_pointer
:
295 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
296 struct vtn_ssa_value
*ssa
=
297 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
298 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
302 vtn_fail("Invalid type for an SSA value");
307 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
308 unsigned word_count
, unsigned *words_used
)
310 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
312 /* Ammount of space taken by the string (including the null) */
313 unsigned len
= strlen(dup
) + 1;
314 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
320 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
321 const uint32_t *end
, vtn_instruction_handler handler
)
327 const uint32_t *w
= start
;
329 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
330 unsigned count
= w
[0] >> SpvWordCountShift
;
331 vtn_assert(count
>= 1 && w
+ count
<= end
);
333 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
337 break; /* Do nothing */
340 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
352 if (!handler(b
, opcode
, w
, count
))
370 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
371 const uint32_t *w
, unsigned count
)
374 case SpvOpExtInstImport
: {
375 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
376 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
377 val
->ext_handler
= vtn_handle_glsl450_instruction
;
378 } else if ((strcmp((const char *)&w
[2], "SPV_AMD_gcn_shader") == 0)
379 && (b
->options
&& b
->options
->exts
.AMD_gcn_shader
)) {
380 val
->ext_handler
= vtn_handle_amd_gcn_shader_instruction
;
382 vtn_fail("Unsupported extension");
388 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
389 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
395 vtn_fail("Unhandled opcode");
400 _foreach_decoration_helper(struct vtn_builder
*b
,
401 struct vtn_value
*base_value
,
403 struct vtn_value
*value
,
404 vtn_decoration_foreach_cb cb
, void *data
)
406 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
408 if (dec
->scope
== VTN_DEC_DECORATION
) {
409 member
= parent_member
;
410 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
411 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
412 value
->type
->base_type
!= vtn_base_type_struct
,
413 "OpMemberDecorate and OpGroupMemberDecorate are only "
414 "allowed on OpTypeStruct");
415 /* This means we haven't recursed yet */
416 assert(value
== base_value
);
418 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
420 vtn_fail_if(member
>= base_value
->type
->length
,
421 "OpMemberDecorate specifies member %d but the "
422 "OpTypeStruct has only %u members",
423 member
, base_value
->type
->length
);
425 /* Not a decoration */
426 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
431 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
432 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
435 cb(b
, base_value
, member
, dec
, data
);
440 /** Iterates (recursively if needed) over all of the decorations on a value
442 * This function iterates over all of the decorations applied to a given
443 * value. If it encounters a decoration group, it recurses into the group
444 * and iterates over all of those decorations as well.
447 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
448 vtn_decoration_foreach_cb cb
, void *data
)
450 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
454 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
455 vtn_execution_mode_foreach_cb cb
, void *data
)
457 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
458 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
461 assert(dec
->group
== NULL
);
462 cb(b
, value
, dec
, data
);
467 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
468 const uint32_t *w
, unsigned count
)
470 const uint32_t *w_end
= w
+ count
;
471 const uint32_t target
= w
[1];
475 case SpvOpDecorationGroup
:
476 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
480 case SpvOpMemberDecorate
:
481 case SpvOpExecutionMode
: {
482 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
484 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
487 dec
->scope
= VTN_DEC_DECORATION
;
489 case SpvOpMemberDecorate
:
490 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
491 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
492 "Member argument of OpMemberDecorate too large");
494 case SpvOpExecutionMode
:
495 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
498 unreachable("Invalid decoration opcode");
500 dec
->decoration
= *(w
++);
503 /* Link into the list */
504 dec
->next
= val
->decoration
;
505 val
->decoration
= dec
;
509 case SpvOpGroupMemberDecorate
:
510 case SpvOpGroupDecorate
: {
511 struct vtn_value
*group
=
512 vtn_value(b
, target
, vtn_value_type_decoration_group
);
514 for (; w
< w_end
; w
++) {
515 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
516 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
519 if (opcode
== SpvOpGroupDecorate
) {
520 dec
->scope
= VTN_DEC_DECORATION
;
522 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
523 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
524 "Member argument of OpGroupMemberDecorate too large");
527 /* Link into the list */
528 dec
->next
= val
->decoration
;
529 val
->decoration
= dec
;
535 unreachable("Unhandled opcode");
539 struct member_decoration_ctx
{
541 struct glsl_struct_field
*fields
;
542 struct vtn_type
*type
;
545 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
546 * OpStore, or OpCopyMemory between them without breaking anything.
547 * Technically, the SPIR-V rules require the exact same type ID but this lets
548 * us internally be a bit looser.
551 vtn_types_compatible(struct vtn_builder
*b
,
552 struct vtn_type
*t1
, struct vtn_type
*t2
)
554 if (t1
->id
== t2
->id
)
557 if (t1
->base_type
!= t2
->base_type
)
560 switch (t1
->base_type
) {
561 case vtn_base_type_void
:
562 case vtn_base_type_scalar
:
563 case vtn_base_type_vector
:
564 case vtn_base_type_matrix
:
565 case vtn_base_type_image
:
566 case vtn_base_type_sampler
:
567 case vtn_base_type_sampled_image
:
568 return t1
->type
== t2
->type
;
570 case vtn_base_type_array
:
571 return t1
->length
== t2
->length
&&
572 vtn_types_compatible(b
, t1
->array_element
, t2
->array_element
);
574 case vtn_base_type_pointer
:
575 return vtn_types_compatible(b
, t1
->deref
, t2
->deref
);
577 case vtn_base_type_struct
:
578 if (t1
->length
!= t2
->length
)
581 for (unsigned i
= 0; i
< t1
->length
; i
++) {
582 if (!vtn_types_compatible(b
, t1
->members
[i
], t2
->members
[i
]))
587 case vtn_base_type_function
:
588 /* This case shouldn't get hit since you can't copy around function
589 * types. Just require them to be identical.
594 vtn_fail("Invalid base type");
597 /* does a shallow copy of a vtn_type */
599 static struct vtn_type
*
600 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
602 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
605 switch (src
->base_type
) {
606 case vtn_base_type_void
:
607 case vtn_base_type_scalar
:
608 case vtn_base_type_vector
:
609 case vtn_base_type_matrix
:
610 case vtn_base_type_array
:
611 case vtn_base_type_pointer
:
612 case vtn_base_type_image
:
613 case vtn_base_type_sampler
:
614 case vtn_base_type_sampled_image
:
615 /* Nothing more to do */
618 case vtn_base_type_struct
:
619 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
620 memcpy(dest
->members
, src
->members
,
621 src
->length
* sizeof(src
->members
[0]));
623 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
624 memcpy(dest
->offsets
, src
->offsets
,
625 src
->length
* sizeof(src
->offsets
[0]));
628 case vtn_base_type_function
:
629 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
630 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
637 static struct vtn_type
*
638 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
640 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
641 type
= type
->members
[member
];
643 /* We may have an array of matrices.... Oh, joy! */
644 while (glsl_type_is_array(type
->type
)) {
645 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
646 type
= type
->array_element
;
649 vtn_assert(glsl_type_is_matrix(type
->type
));
655 struct_member_decoration_cb(struct vtn_builder
*b
,
656 struct vtn_value
*val
, int member
,
657 const struct vtn_decoration
*dec
, void *void_ctx
)
659 struct member_decoration_ctx
*ctx
= void_ctx
;
664 assert(member
< ctx
->num_fields
);
666 switch (dec
->decoration
) {
667 case SpvDecorationNonWritable
:
668 case SpvDecorationNonReadable
:
669 case SpvDecorationRelaxedPrecision
:
670 case SpvDecorationVolatile
:
671 case SpvDecorationCoherent
:
672 case SpvDecorationUniform
:
673 break; /* FIXME: Do nothing with this for now. */
674 case SpvDecorationNoPerspective
:
675 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
677 case SpvDecorationFlat
:
678 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
680 case SpvDecorationCentroid
:
681 ctx
->fields
[member
].centroid
= true;
683 case SpvDecorationSample
:
684 ctx
->fields
[member
].sample
= true;
686 case SpvDecorationStream
:
687 /* Vulkan only allows one GS stream */
688 vtn_assert(dec
->literals
[0] == 0);
690 case SpvDecorationLocation
:
691 ctx
->fields
[member
].location
= dec
->literals
[0];
693 case SpvDecorationComponent
:
694 break; /* FIXME: What should we do with these? */
695 case SpvDecorationBuiltIn
:
696 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
697 ctx
->type
->members
[member
]->is_builtin
= true;
698 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
699 ctx
->type
->builtin_block
= true;
701 case SpvDecorationOffset
:
702 ctx
->type
->offsets
[member
] = dec
->literals
[0];
704 case SpvDecorationMatrixStride
:
705 /* Handled as a second pass */
707 case SpvDecorationColMajor
:
708 break; /* Nothing to do here. Column-major is the default. */
709 case SpvDecorationRowMajor
:
710 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
713 case SpvDecorationPatch
:
716 case SpvDecorationSpecId
:
717 case SpvDecorationBlock
:
718 case SpvDecorationBufferBlock
:
719 case SpvDecorationArrayStride
:
720 case SpvDecorationGLSLShared
:
721 case SpvDecorationGLSLPacked
:
722 case SpvDecorationInvariant
:
723 case SpvDecorationRestrict
:
724 case SpvDecorationAliased
:
725 case SpvDecorationConstant
:
726 case SpvDecorationIndex
:
727 case SpvDecorationBinding
:
728 case SpvDecorationDescriptorSet
:
729 case SpvDecorationLinkageAttributes
:
730 case SpvDecorationNoContraction
:
731 case SpvDecorationInputAttachmentIndex
:
732 vtn_warn("Decoration not allowed on struct members: %s",
733 spirv_decoration_to_string(dec
->decoration
));
736 case SpvDecorationXfbBuffer
:
737 case SpvDecorationXfbStride
:
738 vtn_warn("Vulkan does not have transform feedback");
741 case SpvDecorationCPacked
:
742 case SpvDecorationSaturatedConversion
:
743 case SpvDecorationFuncParamAttr
:
744 case SpvDecorationFPRoundingMode
:
745 case SpvDecorationFPFastMathMode
:
746 case SpvDecorationAlignment
:
747 vtn_warn("Decoration only allowed for CL-style kernels: %s",
748 spirv_decoration_to_string(dec
->decoration
));
752 vtn_fail("Unhandled decoration");
756 /* Matrix strides are handled as a separate pass because we need to know
757 * whether the matrix is row-major or not first.
760 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
761 struct vtn_value
*val
, int member
,
762 const struct vtn_decoration
*dec
,
765 if (dec
->decoration
!= SpvDecorationMatrixStride
)
768 vtn_fail_if(member
< 0,
769 "The MatrixStride decoration is only allowed on members "
772 struct member_decoration_ctx
*ctx
= void_ctx
;
774 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
775 if (mat_type
->row_major
) {
776 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
777 mat_type
->stride
= mat_type
->array_element
->stride
;
778 mat_type
->array_element
->stride
= dec
->literals
[0];
780 vtn_assert(mat_type
->array_element
->stride
> 0);
781 mat_type
->stride
= dec
->literals
[0];
786 type_decoration_cb(struct vtn_builder
*b
,
787 struct vtn_value
*val
, int member
,
788 const struct vtn_decoration
*dec
, void *ctx
)
790 struct vtn_type
*type
= val
->type
;
793 /* This should have been handled by OpTypeStruct */
794 assert(val
->type
->base_type
== vtn_base_type_struct
);
795 assert(member
>= 0 && member
< val
->type
->length
);
799 switch (dec
->decoration
) {
800 case SpvDecorationArrayStride
:
801 vtn_assert(type
->base_type
== vtn_base_type_matrix
||
802 type
->base_type
== vtn_base_type_array
||
803 type
->base_type
== vtn_base_type_pointer
);
804 type
->stride
= dec
->literals
[0];
806 case SpvDecorationBlock
:
807 vtn_assert(type
->base_type
== vtn_base_type_struct
);
810 case SpvDecorationBufferBlock
:
811 vtn_assert(type
->base_type
== vtn_base_type_struct
);
812 type
->buffer_block
= true;
814 case SpvDecorationGLSLShared
:
815 case SpvDecorationGLSLPacked
:
816 /* Ignore these, since we get explicit offsets anyways */
819 case SpvDecorationRowMajor
:
820 case SpvDecorationColMajor
:
821 case SpvDecorationMatrixStride
:
822 case SpvDecorationBuiltIn
:
823 case SpvDecorationNoPerspective
:
824 case SpvDecorationFlat
:
825 case SpvDecorationPatch
:
826 case SpvDecorationCentroid
:
827 case SpvDecorationSample
:
828 case SpvDecorationVolatile
:
829 case SpvDecorationCoherent
:
830 case SpvDecorationNonWritable
:
831 case SpvDecorationNonReadable
:
832 case SpvDecorationUniform
:
833 case SpvDecorationStream
:
834 case SpvDecorationLocation
:
835 case SpvDecorationComponent
:
836 case SpvDecorationOffset
:
837 case SpvDecorationXfbBuffer
:
838 case SpvDecorationXfbStride
:
839 vtn_warn("Decoration only allowed for struct members: %s",
840 spirv_decoration_to_string(dec
->decoration
));
843 case SpvDecorationRelaxedPrecision
:
844 case SpvDecorationSpecId
:
845 case SpvDecorationInvariant
:
846 case SpvDecorationRestrict
:
847 case SpvDecorationAliased
:
848 case SpvDecorationConstant
:
849 case SpvDecorationIndex
:
850 case SpvDecorationBinding
:
851 case SpvDecorationDescriptorSet
:
852 case SpvDecorationLinkageAttributes
:
853 case SpvDecorationNoContraction
:
854 case SpvDecorationInputAttachmentIndex
:
855 vtn_warn("Decoration not allowed on types: %s",
856 spirv_decoration_to_string(dec
->decoration
));
859 case SpvDecorationCPacked
:
860 case SpvDecorationSaturatedConversion
:
861 case SpvDecorationFuncParamAttr
:
862 case SpvDecorationFPRoundingMode
:
863 case SpvDecorationFPFastMathMode
:
864 case SpvDecorationAlignment
:
865 vtn_warn("Decoration only allowed for CL-style kernels: %s",
866 spirv_decoration_to_string(dec
->decoration
));
870 vtn_fail("Unhandled decoration");
875 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
878 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
879 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
880 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
881 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
882 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
883 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
884 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
885 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
886 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
887 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
888 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
889 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
890 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
891 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
892 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
893 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
894 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
895 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
896 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
897 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
898 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
899 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
900 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
901 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
902 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
903 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
904 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
905 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
906 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
907 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
908 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
909 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
910 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
911 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
912 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
913 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
914 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
915 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
916 case SpvImageFormatR16ui
: return 0x8234; /* GL_R16UI */
917 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
919 vtn_fail("Invalid image format");
923 static struct vtn_type
*
924 vtn_type_layout_std430(struct vtn_builder
*b
, struct vtn_type
*type
,
925 uint32_t *size_out
, uint32_t *align_out
)
927 switch (type
->base_type
) {
928 case vtn_base_type_scalar
: {
929 uint32_t comp_size
= glsl_get_bit_size(type
->type
) / 8;
930 *size_out
= comp_size
;
931 *align_out
= comp_size
;
935 case vtn_base_type_vector
: {
936 uint32_t comp_size
= glsl_get_bit_size(type
->type
) / 8;
937 assert(type
->length
> 0 && type
->length
<= 4);
938 unsigned align_comps
= type
->length
== 3 ? 4 : type
->length
;
939 *size_out
= comp_size
* type
->length
,
940 *align_out
= comp_size
* align_comps
;
944 case vtn_base_type_matrix
:
945 case vtn_base_type_array
: {
946 /* We're going to add an array stride */
947 type
= vtn_type_copy(b
, type
);
948 uint32_t elem_size
, elem_align
;
949 type
->array_element
= vtn_type_layout_std430(b
, type
->array_element
,
950 &elem_size
, &elem_align
);
951 type
->stride
= vtn_align_u32(elem_size
, elem_align
);
952 *size_out
= type
->stride
* type
->length
;
953 *align_out
= elem_align
;
957 case vtn_base_type_struct
: {
958 /* We're going to add member offsets */
959 type
= vtn_type_copy(b
, type
);
962 for (unsigned i
= 0; i
< type
->length
; i
++) {
963 uint32_t mem_size
, mem_align
;
964 type
->members
[i
] = vtn_type_layout_std430(b
, type
->members
[i
],
965 &mem_size
, &mem_align
);
966 offset
= vtn_align_u32(offset
, mem_align
);
967 type
->offsets
[i
] = offset
;
969 align
= MAX2(align
, mem_align
);
977 unreachable("Invalid SPIR-V type for std430");
982 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
983 const uint32_t *w
, unsigned count
)
985 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
987 val
->type
= rzalloc(b
, struct vtn_type
);
988 val
->type
->id
= w
[1];
992 val
->type
->base_type
= vtn_base_type_void
;
993 val
->type
->type
= glsl_void_type();
996 val
->type
->base_type
= vtn_base_type_scalar
;
997 val
->type
->type
= glsl_bool_type();
998 val
->type
->length
= 1;
1000 case SpvOpTypeInt
: {
1001 int bit_size
= w
[2];
1002 const bool signedness
= w
[3];
1003 val
->type
->base_type
= vtn_base_type_scalar
;
1006 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
1009 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
1012 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
1015 val
->type
->type
= (signedness
? glsl_int8_t_type() : glsl_uint8_t_type());
1018 vtn_fail("Invalid int bit size");
1020 val
->type
->length
= 1;
1024 case SpvOpTypeFloat
: {
1025 int bit_size
= w
[2];
1026 val
->type
->base_type
= vtn_base_type_scalar
;
1029 val
->type
->type
= glsl_float16_t_type();
1032 val
->type
->type
= glsl_float_type();
1035 val
->type
->type
= glsl_double_type();
1038 vtn_fail("Invalid float bit size");
1040 val
->type
->length
= 1;
1044 case SpvOpTypeVector
: {
1045 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1046 unsigned elems
= w
[3];
1048 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
1049 "Base type for OpTypeVector must be a scalar");
1050 vtn_fail_if(elems
< 2 || elems
> 4,
1051 "Invalid component count for OpTypeVector");
1053 val
->type
->base_type
= vtn_base_type_vector
;
1054 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
1055 val
->type
->length
= elems
;
1056 val
->type
->stride
= glsl_get_bit_size(base
->type
) / 8;
1057 val
->type
->array_element
= base
;
1061 case SpvOpTypeMatrix
: {
1062 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1063 unsigned columns
= w
[3];
1065 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1066 "Base type for OpTypeMatrix must be a vector");
1067 vtn_fail_if(columns
< 2 || columns
> 4,
1068 "Invalid column count for OpTypeMatrix");
1070 val
->type
->base_type
= vtn_base_type_matrix
;
1071 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1072 glsl_get_vector_elements(base
->type
),
1074 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1075 "Unsupported base type for OpTypeMatrix");
1076 assert(!glsl_type_is_error(val
->type
->type
));
1077 val
->type
->length
= columns
;
1078 val
->type
->array_element
= base
;
1079 val
->type
->row_major
= false;
1080 val
->type
->stride
= 0;
1084 case SpvOpTypeRuntimeArray
:
1085 case SpvOpTypeArray
: {
1086 struct vtn_type
*array_element
=
1087 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1089 if (opcode
== SpvOpTypeRuntimeArray
) {
1090 /* A length of 0 is used to denote unsized arrays */
1091 val
->type
->length
= 0;
1094 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1097 val
->type
->base_type
= vtn_base_type_array
;
1098 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
);
1099 val
->type
->array_element
= array_element
;
1100 val
->type
->stride
= 0;
1104 case SpvOpTypeStruct
: {
1105 unsigned num_fields
= count
- 2;
1106 val
->type
->base_type
= vtn_base_type_struct
;
1107 val
->type
->length
= num_fields
;
1108 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1109 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1111 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1112 for (unsigned i
= 0; i
< num_fields
; i
++) {
1113 val
->type
->members
[i
] =
1114 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
1115 fields
[i
] = (struct glsl_struct_field
) {
1116 .type
= val
->type
->members
[i
]->type
,
1117 .name
= ralloc_asprintf(b
, "field%d", i
),
1122 struct member_decoration_ctx ctx
= {
1123 .num_fields
= num_fields
,
1128 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1129 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1131 const char *name
= val
->name
? val
->name
: "struct";
1133 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
1137 case SpvOpTypeFunction
: {
1138 val
->type
->base_type
= vtn_base_type_function
;
1139 val
->type
->type
= NULL
;
1141 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1143 const unsigned num_params
= count
- 3;
1144 val
->type
->length
= num_params
;
1145 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1146 for (unsigned i
= 0; i
< count
- 3; i
++) {
1147 val
->type
->params
[i
] =
1148 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
1153 case SpvOpTypePointer
: {
1154 SpvStorageClass storage_class
= w
[2];
1155 struct vtn_type
*deref_type
=
1156 vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
1158 val
->type
->base_type
= vtn_base_type_pointer
;
1159 val
->type
->storage_class
= storage_class
;
1160 val
->type
->deref
= deref_type
;
1162 if (storage_class
== SpvStorageClassUniform
||
1163 storage_class
== SpvStorageClassStorageBuffer
) {
1164 /* These can actually be stored to nir_variables and used as SSA
1165 * values so they need a real glsl_type.
1167 val
->type
->type
= glsl_vector_type(GLSL_TYPE_UINT
, 2);
1170 if (storage_class
== SpvStorageClassWorkgroup
&&
1171 b
->options
->lower_workgroup_access_to_offsets
) {
1172 uint32_t size
, align
;
1173 val
->type
->deref
= vtn_type_layout_std430(b
, val
->type
->deref
,
1175 val
->type
->length
= size
;
1176 val
->type
->align
= align
;
1177 /* These can actually be stored to nir_variables and used as SSA
1178 * values so they need a real glsl_type.
1180 val
->type
->type
= glsl_uint_type();
1185 case SpvOpTypeImage
: {
1186 val
->type
->base_type
= vtn_base_type_image
;
1188 const struct vtn_type
*sampled_type
=
1189 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1191 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1192 glsl_get_bit_size(sampled_type
->type
) != 32,
1193 "Sampled type of OpTypeImage must be a 32-bit scalar");
1195 enum glsl_sampler_dim dim
;
1196 switch ((SpvDim
)w
[3]) {
1197 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1198 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1199 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1200 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1201 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1202 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1203 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1205 vtn_fail("Invalid SPIR-V image dimensionality");
1208 bool is_shadow
= w
[4];
1209 bool is_array
= w
[5];
1210 bool multisampled
= w
[6];
1211 unsigned sampled
= w
[7];
1212 SpvImageFormat format
= w
[8];
1215 val
->type
->access_qualifier
= w
[9];
1217 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1220 if (dim
== GLSL_SAMPLER_DIM_2D
)
1221 dim
= GLSL_SAMPLER_DIM_MS
;
1222 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1223 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1225 vtn_fail("Unsupported multisampled image type");
1228 val
->type
->image_format
= translate_image_format(b
, format
);
1230 enum glsl_base_type sampled_base_type
=
1231 glsl_get_base_type(sampled_type
->type
);
1233 val
->type
->sampled
= true;
1234 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
1236 } else if (sampled
== 2) {
1237 vtn_assert(!is_shadow
);
1238 val
->type
->sampled
= false;
1239 val
->type
->type
= glsl_image_type(dim
, is_array
, sampled_base_type
);
1241 vtn_fail("We need to know if the image will be sampled");
1246 case SpvOpTypeSampledImage
:
1247 val
->type
->base_type
= vtn_base_type_sampled_image
;
1248 val
->type
->image
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
1249 val
->type
->type
= val
->type
->image
->type
;
1252 case SpvOpTypeSampler
:
1253 /* The actual sampler type here doesn't really matter. It gets
1254 * thrown away the moment you combine it with an image. What really
1255 * matters is that it's a sampler type as opposed to an integer type
1256 * so the backend knows what to do.
1258 val
->type
->base_type
= vtn_base_type_sampler
;
1259 val
->type
->type
= glsl_bare_sampler_type();
1262 case SpvOpTypeOpaque
:
1263 case SpvOpTypeEvent
:
1264 case SpvOpTypeDeviceEvent
:
1265 case SpvOpTypeReserveId
:
1266 case SpvOpTypeQueue
:
1269 vtn_fail("Unhandled opcode");
1272 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1275 static nir_constant
*
1276 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
1278 nir_constant
*c
= rzalloc(b
, nir_constant
);
1280 /* For pointers and other typeless things, we have to return something but
1281 * it doesn't matter what.
1286 switch (glsl_get_base_type(type
)) {
1288 case GLSL_TYPE_UINT
:
1289 case GLSL_TYPE_INT16
:
1290 case GLSL_TYPE_UINT16
:
1291 case GLSL_TYPE_UINT8
:
1292 case GLSL_TYPE_INT8
:
1293 case GLSL_TYPE_INT64
:
1294 case GLSL_TYPE_UINT64
:
1295 case GLSL_TYPE_BOOL
:
1296 case GLSL_TYPE_FLOAT
:
1297 case GLSL_TYPE_FLOAT16
:
1298 case GLSL_TYPE_DOUBLE
:
1299 /* Nothing to do here. It's already initialized to zero */
1302 case GLSL_TYPE_ARRAY
:
1303 vtn_assert(glsl_get_length(type
) > 0);
1304 c
->num_elements
= glsl_get_length(type
);
1305 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1307 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
1308 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1309 c
->elements
[i
] = c
->elements
[0];
1312 case GLSL_TYPE_STRUCT
:
1313 c
->num_elements
= glsl_get_length(type
);
1314 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1316 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
1317 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
1322 vtn_fail("Invalid type for null constant");
1329 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
1330 int member
, const struct vtn_decoration
*dec
,
1333 vtn_assert(member
== -1);
1334 if (dec
->decoration
!= SpvDecorationSpecId
)
1337 struct spec_constant_value
*const_value
= data
;
1339 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1340 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
1341 if (const_value
->is_double
)
1342 const_value
->data64
= b
->specializations
[i
].data64
;
1344 const_value
->data32
= b
->specializations
[i
].data32
;
1351 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
1352 uint32_t const_value
)
1354 struct spec_constant_value data
;
1355 data
.is_double
= false;
1356 data
.data32
= const_value
;
1357 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1362 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
1363 uint64_t const_value
)
1365 struct spec_constant_value data
;
1366 data
.is_double
= true;
1367 data
.data64
= const_value
;
1368 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1373 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1374 struct vtn_value
*val
,
1376 const struct vtn_decoration
*dec
,
1379 vtn_assert(member
== -1);
1380 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1381 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
1384 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1386 b
->shader
->info
.cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
1387 b
->shader
->info
.cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
1388 b
->shader
->info
.cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
1392 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1393 const uint32_t *w
, unsigned count
)
1395 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1396 val
->constant
= rzalloc(b
, nir_constant
);
1398 case SpvOpConstantTrue
:
1399 case SpvOpConstantFalse
:
1400 case SpvOpSpecConstantTrue
:
1401 case SpvOpSpecConstantFalse
: {
1402 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1403 "Result type of %s must be OpTypeBool",
1404 spirv_op_to_string(opcode
));
1406 uint32_t int_val
= (opcode
== SpvOpConstantTrue
||
1407 opcode
== SpvOpSpecConstantTrue
);
1409 if (opcode
== SpvOpSpecConstantTrue
||
1410 opcode
== SpvOpSpecConstantFalse
)
1411 int_val
= get_specialization(b
, val
, int_val
);
1413 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1417 case SpvOpConstant
: {
1418 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1419 "Result type of %s must be a scalar",
1420 spirv_op_to_string(opcode
));
1421 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1424 val
->constant
->values
->u64
[0] = vtn_u64_literal(&w
[3]);
1427 val
->constant
->values
->u32
[0] = w
[3];
1430 val
->constant
->values
->u16
[0] = w
[3];
1433 val
->constant
->values
->u8
[0] = w
[3];
1436 vtn_fail("Unsupported SpvOpConstant bit size");
1441 case SpvOpSpecConstant
: {
1442 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1443 "Result type of %s must be a scalar",
1444 spirv_op_to_string(opcode
));
1445 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1448 val
->constant
->values
[0].u64
[0] =
1449 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1452 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1455 val
->constant
->values
[0].u16
[0] = get_specialization(b
, val
, w
[3]);
1458 val
->constant
->values
[0].u8
[0] = get_specialization(b
, val
, w
[3]);
1461 vtn_fail("Unsupported SpvOpSpecConstant bit size");
1466 case SpvOpSpecConstantComposite
:
1467 case SpvOpConstantComposite
: {
1468 unsigned elem_count
= count
- 3;
1469 vtn_fail_if(elem_count
!= val
->type
->length
,
1470 "%s has %u constituents, expected %u",
1471 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1473 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1474 for (unsigned i
= 0; i
< elem_count
; i
++)
1475 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1477 switch (val
->type
->base_type
) {
1478 case vtn_base_type_vector
: {
1479 assert(glsl_type_is_vector(val
->type
->type
));
1480 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1481 for (unsigned i
= 0; i
< elem_count
; i
++) {
1484 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1487 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1490 val
->constant
->values
[0].u16
[i
] = elems
[i
]->values
[0].u16
[0];
1493 val
->constant
->values
[0].u8
[i
] = elems
[i
]->values
[0].u8
[0];
1496 vtn_fail("Invalid SpvOpConstantComposite bit size");
1502 case vtn_base_type_matrix
:
1503 assert(glsl_type_is_matrix(val
->type
->type
));
1504 for (unsigned i
= 0; i
< elem_count
; i
++)
1505 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1508 case vtn_base_type_struct
:
1509 case vtn_base_type_array
:
1510 ralloc_steal(val
->constant
, elems
);
1511 val
->constant
->num_elements
= elem_count
;
1512 val
->constant
->elements
= elems
;
1516 vtn_fail("Result type of %s must be a composite type",
1517 spirv_op_to_string(opcode
));
1522 case SpvOpSpecConstantOp
: {
1523 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1525 case SpvOpVectorShuffle
: {
1526 struct vtn_value
*v0
= &b
->values
[w
[4]];
1527 struct vtn_value
*v1
= &b
->values
[w
[5]];
1529 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1530 v0
->value_type
== vtn_value_type_undef
);
1531 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1532 v1
->value_type
== vtn_value_type_undef
);
1534 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1535 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1537 vtn_assert(len0
+ len1
< 16);
1539 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1540 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1541 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1543 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1544 (void)bit_size0
; (void)bit_size1
;
1546 if (bit_size
== 64) {
1548 if (v0
->value_type
== vtn_value_type_constant
) {
1549 for (unsigned i
= 0; i
< len0
; i
++)
1550 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1552 if (v1
->value_type
== vtn_value_type_constant
) {
1553 for (unsigned i
= 0; i
< len1
; i
++)
1554 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1557 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1558 uint32_t comp
= w
[i
+ 6];
1559 /* If component is not used, set the value to a known constant
1560 * to detect if it is wrongly used.
1562 if (comp
== (uint32_t)-1)
1563 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1565 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1568 /* This is for both 32-bit and 16-bit values */
1570 if (v0
->value_type
== vtn_value_type_constant
) {
1571 for (unsigned i
= 0; i
< len0
; i
++)
1572 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1574 if (v1
->value_type
== vtn_value_type_constant
) {
1575 for (unsigned i
= 0; i
< len1
; i
++)
1576 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1579 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1580 uint32_t comp
= w
[i
+ 6];
1581 /* If component is not used, set the value to a known constant
1582 * to detect if it is wrongly used.
1584 if (comp
== (uint32_t)-1)
1585 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1587 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1593 case SpvOpCompositeExtract
:
1594 case SpvOpCompositeInsert
: {
1595 struct vtn_value
*comp
;
1596 unsigned deref_start
;
1597 struct nir_constant
**c
;
1598 if (opcode
== SpvOpCompositeExtract
) {
1599 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1601 c
= &comp
->constant
;
1603 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1605 val
->constant
= nir_constant_clone(comp
->constant
,
1612 const struct vtn_type
*type
= comp
->type
;
1613 for (unsigned i
= deref_start
; i
< count
; i
++) {
1614 vtn_fail_if(w
[i
] > type
->length
,
1615 "%uth index of %s is %u but the type has only "
1616 "%u elements", i
- deref_start
,
1617 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1619 switch (type
->base_type
) {
1620 case vtn_base_type_vector
:
1622 type
= type
->array_element
;
1625 case vtn_base_type_matrix
:
1626 assert(col
== 0 && elem
== -1);
1629 type
= type
->array_element
;
1632 case vtn_base_type_array
:
1633 c
= &(*c
)->elements
[w
[i
]];
1634 type
= type
->array_element
;
1637 case vtn_base_type_struct
:
1638 c
= &(*c
)->elements
[w
[i
]];
1639 type
= type
->members
[w
[i
]];
1643 vtn_fail("%s must only index into composite types",
1644 spirv_op_to_string(opcode
));
1648 if (opcode
== SpvOpCompositeExtract
) {
1652 unsigned num_components
= type
->length
;
1653 unsigned bit_size
= glsl_get_bit_size(type
->type
);
1654 for (unsigned i
= 0; i
< num_components
; i
++)
1657 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1660 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1663 val
->constant
->values
[0].u16
[i
] = (*c
)->values
[col
].u16
[elem
+ i
];
1666 val
->constant
->values
[0].u8
[i
] = (*c
)->values
[col
].u8
[elem
+ i
];
1669 vtn_fail("Invalid SpvOpCompositeExtract bit size");
1673 struct vtn_value
*insert
=
1674 vtn_value(b
, w
[4], vtn_value_type_constant
);
1675 vtn_assert(insert
->type
== type
);
1677 *c
= insert
->constant
;
1679 unsigned num_components
= type
->length
;
1680 unsigned bit_size
= glsl_get_bit_size(type
->type
);
1681 for (unsigned i
= 0; i
< num_components
; i
++)
1684 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1687 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1690 (*c
)->values
[col
].u16
[elem
+ i
] = insert
->constant
->values
[0].u16
[i
];
1693 (*c
)->values
[col
].u8
[elem
+ i
] = insert
->constant
->values
[0].u8
[i
];
1696 vtn_fail("Invalid SpvOpCompositeInsert bit size");
1705 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
1706 nir_alu_type src_alu_type
= dst_alu_type
;
1707 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
1710 vtn_assert(count
<= 7);
1715 /* We have a source in a conversion */
1717 nir_get_nir_type_for_glsl_type(
1718 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1719 /* We use the bitsize of the conversion source to evaluate the opcode later */
1720 bit_size
= glsl_get_bit_size(
1721 vtn_value(b
, w
[4], vtn_value_type_constant
)->type
->type
);
1724 bit_size
= glsl_get_bit_size(val
->type
->type
);
1727 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
1728 nir_alu_type_get_type_size(src_alu_type
),
1729 nir_alu_type_get_type_size(dst_alu_type
));
1730 nir_const_value src
[4];
1732 for (unsigned i
= 0; i
< count
- 4; i
++) {
1734 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1736 unsigned j
= swap
? 1 - i
: i
;
1737 src
[j
] = c
->values
[0];
1740 val
->constant
->values
[0] =
1741 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1748 case SpvOpConstantNull
:
1749 val
->constant
= vtn_null_constant(b
, val
->type
->type
);
1752 case SpvOpConstantSampler
:
1753 vtn_fail("OpConstantSampler requires Kernel Capability");
1757 vtn_fail("Unhandled opcode");
1760 /* Now that we have the value, update the workgroup size if needed */
1761 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1765 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1766 const uint32_t *w
, unsigned count
)
1768 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1769 struct vtn_function
*vtn_callee
=
1770 vtn_value(b
, w
[3], vtn_value_type_function
)->func
;
1771 struct nir_function
*callee
= vtn_callee
->impl
->function
;
1773 vtn_callee
->referenced
= true;
1775 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1776 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1777 unsigned arg_id
= w
[4 + i
];
1778 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1779 if (arg
->value_type
== vtn_value_type_pointer
&&
1780 arg
->pointer
->ptr_type
->type
== NULL
) {
1781 nir_deref_var
*d
= vtn_pointer_to_deref(b
, arg
->pointer
);
1782 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1784 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1786 /* Make a temporary to store the argument in */
1788 nir_local_variable_create(b
->nb
.impl
, arg_ssa
->type
, "arg_tmp");
1789 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1791 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1795 nir_variable
*out_tmp
= NULL
;
1796 vtn_assert(res_type
->type
== callee
->return_type
);
1797 if (!glsl_type_is_void(callee
->return_type
)) {
1798 out_tmp
= nir_local_variable_create(b
->nb
.impl
, callee
->return_type
,
1800 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1803 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1805 if (glsl_type_is_void(callee
->return_type
)) {
1806 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1808 vtn_push_ssa(b
, w
[2], res_type
, vtn_local_load(b
, call
->return_deref
));
1812 struct vtn_ssa_value
*
1813 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1815 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1818 if (!glsl_type_is_vector_or_scalar(type
)) {
1819 unsigned elems
= glsl_get_length(type
);
1820 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1821 for (unsigned i
= 0; i
< elems
; i
++) {
1822 const struct glsl_type
*child_type
;
1824 switch (glsl_get_base_type(type
)) {
1826 case GLSL_TYPE_UINT
:
1827 case GLSL_TYPE_INT16
:
1828 case GLSL_TYPE_UINT16
:
1829 case GLSL_TYPE_UINT8
:
1830 case GLSL_TYPE_INT8
:
1831 case GLSL_TYPE_INT64
:
1832 case GLSL_TYPE_UINT64
:
1833 case GLSL_TYPE_BOOL
:
1834 case GLSL_TYPE_FLOAT
:
1835 case GLSL_TYPE_FLOAT16
:
1836 case GLSL_TYPE_DOUBLE
:
1837 child_type
= glsl_get_column_type(type
);
1839 case GLSL_TYPE_ARRAY
:
1840 child_type
= glsl_get_array_element(type
);
1842 case GLSL_TYPE_STRUCT
:
1843 child_type
= glsl_get_struct_field(type
, i
);
1846 vtn_fail("unkown base type");
1849 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1857 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1860 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1861 src
.src_type
= type
;
1866 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1867 const uint32_t *w
, unsigned count
)
1869 if (opcode
== SpvOpSampledImage
) {
1870 struct vtn_value
*val
=
1871 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1872 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1873 val
->sampled_image
->type
=
1874 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1875 val
->sampled_image
->image
=
1876 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1877 val
->sampled_image
->sampler
=
1878 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
1880 } else if (opcode
== SpvOpImage
) {
1881 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1882 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1883 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1884 val
->pointer
= src_val
->sampled_image
->image
;
1886 vtn_assert(src_val
->value_type
== vtn_value_type_pointer
);
1887 val
->pointer
= src_val
->pointer
;
1892 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1893 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1895 struct vtn_sampled_image sampled
;
1896 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1897 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1898 sampled
= *sampled_val
->sampled_image
;
1900 vtn_assert(sampled_val
->value_type
== vtn_value_type_pointer
);
1901 sampled
.type
= sampled_val
->pointer
->type
;
1902 sampled
.image
= NULL
;
1903 sampled
.sampler
= sampled_val
->pointer
;
1906 const struct glsl_type
*image_type
= sampled
.type
->type
;
1907 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1908 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1909 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1911 /* Figure out the base texture operation */
1914 case SpvOpImageSampleImplicitLod
:
1915 case SpvOpImageSampleDrefImplicitLod
:
1916 case SpvOpImageSampleProjImplicitLod
:
1917 case SpvOpImageSampleProjDrefImplicitLod
:
1918 texop
= nir_texop_tex
;
1921 case SpvOpImageSampleExplicitLod
:
1922 case SpvOpImageSampleDrefExplicitLod
:
1923 case SpvOpImageSampleProjExplicitLod
:
1924 case SpvOpImageSampleProjDrefExplicitLod
:
1925 texop
= nir_texop_txl
;
1928 case SpvOpImageFetch
:
1929 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1930 texop
= nir_texop_txf_ms
;
1932 texop
= nir_texop_txf
;
1936 case SpvOpImageGather
:
1937 case SpvOpImageDrefGather
:
1938 texop
= nir_texop_tg4
;
1941 case SpvOpImageQuerySizeLod
:
1942 case SpvOpImageQuerySize
:
1943 texop
= nir_texop_txs
;
1946 case SpvOpImageQueryLod
:
1947 texop
= nir_texop_lod
;
1950 case SpvOpImageQueryLevels
:
1951 texop
= nir_texop_query_levels
;
1954 case SpvOpImageQuerySamples
:
1955 texop
= nir_texop_texture_samples
;
1959 vtn_fail("Unhandled opcode");
1962 nir_tex_src srcs
[8]; /* 8 should be enough */
1963 nir_tex_src
*p
= srcs
;
1967 struct nir_ssa_def
*coord
;
1968 unsigned coord_components
;
1970 case SpvOpImageSampleImplicitLod
:
1971 case SpvOpImageSampleExplicitLod
:
1972 case SpvOpImageSampleDrefImplicitLod
:
1973 case SpvOpImageSampleDrefExplicitLod
:
1974 case SpvOpImageSampleProjImplicitLod
:
1975 case SpvOpImageSampleProjExplicitLod
:
1976 case SpvOpImageSampleProjDrefImplicitLod
:
1977 case SpvOpImageSampleProjDrefExplicitLod
:
1978 case SpvOpImageFetch
:
1979 case SpvOpImageGather
:
1980 case SpvOpImageDrefGather
:
1981 case SpvOpImageQueryLod
: {
1982 /* All these types have the coordinate as their first real argument */
1983 switch (sampler_dim
) {
1984 case GLSL_SAMPLER_DIM_1D
:
1985 case GLSL_SAMPLER_DIM_BUF
:
1986 coord_components
= 1;
1988 case GLSL_SAMPLER_DIM_2D
:
1989 case GLSL_SAMPLER_DIM_RECT
:
1990 case GLSL_SAMPLER_DIM_MS
:
1991 coord_components
= 2;
1993 case GLSL_SAMPLER_DIM_3D
:
1994 case GLSL_SAMPLER_DIM_CUBE
:
1995 coord_components
= 3;
1998 vtn_fail("Invalid sampler type");
2001 if (is_array
&& texop
!= nir_texop_lod
)
2004 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
2005 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
2006 (1 << coord_components
) - 1));
2007 p
->src_type
= nir_tex_src_coord
;
2014 coord_components
= 0;
2019 case SpvOpImageSampleProjImplicitLod
:
2020 case SpvOpImageSampleProjExplicitLod
:
2021 case SpvOpImageSampleProjDrefImplicitLod
:
2022 case SpvOpImageSampleProjDrefExplicitLod
:
2023 /* These have the projector as the last coordinate component */
2024 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
2025 p
->src_type
= nir_tex_src_projector
;
2033 unsigned gather_component
= 0;
2035 case SpvOpImageSampleDrefImplicitLod
:
2036 case SpvOpImageSampleDrefExplicitLod
:
2037 case SpvOpImageSampleProjDrefImplicitLod
:
2038 case SpvOpImageSampleProjDrefExplicitLod
:
2039 case SpvOpImageDrefGather
:
2040 /* These all have an explicit depth value as their next source */
2041 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
2044 case SpvOpImageGather
:
2045 /* This has a component as its next source */
2047 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
2054 /* For OpImageQuerySizeLod, we always have an LOD */
2055 if (opcode
== SpvOpImageQuerySizeLod
)
2056 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2058 /* Now we need to handle some number of optional arguments */
2059 const struct vtn_ssa_value
*gather_offsets
= NULL
;
2061 uint32_t operands
= w
[idx
++];
2063 if (operands
& SpvImageOperandsBiasMask
) {
2064 vtn_assert(texop
== nir_texop_tex
);
2065 texop
= nir_texop_txb
;
2066 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
2069 if (operands
& SpvImageOperandsLodMask
) {
2070 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
2071 texop
== nir_texop_txs
);
2072 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2075 if (operands
& SpvImageOperandsGradMask
) {
2076 vtn_assert(texop
== nir_texop_txl
);
2077 texop
= nir_texop_txd
;
2078 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
2079 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
2082 if (operands
& SpvImageOperandsOffsetMask
||
2083 operands
& SpvImageOperandsConstOffsetMask
)
2084 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
2086 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2087 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
2088 (*p
++) = (nir_tex_src
){};
2091 if (operands
& SpvImageOperandsSampleMask
) {
2092 vtn_assert(texop
== nir_texop_txf_ms
);
2093 texop
= nir_texop_txf_ms
;
2094 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2097 /* We should have now consumed exactly all of the arguments */
2098 vtn_assert(idx
== count
);
2100 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2103 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2105 instr
->coord_components
= coord_components
;
2106 instr
->sampler_dim
= sampler_dim
;
2107 instr
->is_array
= is_array
;
2108 instr
->is_shadow
= is_shadow
;
2109 instr
->is_new_style_shadow
=
2110 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2111 instr
->component
= gather_component
;
2113 switch (glsl_get_sampler_result_type(image_type
)) {
2114 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
2115 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
2116 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
2117 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
2119 vtn_fail("Invalid base type for sampler result");
2122 nir_deref_var
*sampler
= vtn_pointer_to_deref(b
, sampled
.sampler
);
2123 nir_deref_var
*texture
;
2124 if (sampled
.image
) {
2125 nir_deref_var
*image
= vtn_pointer_to_deref(b
, sampled
.image
);
2131 instr
->texture
= nir_deref_var_clone(texture
, instr
);
2133 switch (instr
->op
) {
2139 /* These operations require a sampler */
2140 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
2143 case nir_texop_txf_ms
:
2146 case nir_texop_query_levels
:
2147 case nir_texop_texture_samples
:
2148 case nir_texop_samples_identical
:
2150 instr
->sampler
= NULL
;
2152 case nir_texop_txf_ms_mcs
:
2153 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2156 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2157 nir_tex_instr_dest_size(instr
), 32, NULL
);
2159 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2160 nir_tex_instr_dest_size(instr
));
2163 nir_instr
*instruction
;
2164 if (gather_offsets
) {
2165 vtn_assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
2166 vtn_assert(glsl_get_length(gather_offsets
->type
) == 4);
2167 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
2169 /* Copy the current instruction 4x */
2170 for (uint32_t i
= 1; i
< 4; i
++) {
2171 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
2172 instrs
[i
]->op
= instr
->op
;
2173 instrs
[i
]->coord_components
= instr
->coord_components
;
2174 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
2175 instrs
[i
]->is_array
= instr
->is_array
;
2176 instrs
[i
]->is_shadow
= instr
->is_shadow
;
2177 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
2178 instrs
[i
]->component
= instr
->component
;
2179 instrs
[i
]->dest_type
= instr
->dest_type
;
2180 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
2181 instrs
[i
]->sampler
= NULL
;
2183 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2185 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
2186 nir_tex_instr_dest_size(instr
), 32, NULL
);
2189 /* Fill in the last argument with the offset from the passed in offsets
2190 * and insert the instruction into the stream.
2192 for (uint32_t i
= 0; i
< 4; i
++) {
2194 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
2195 src
.src_type
= nir_tex_src_offset
;
2196 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
2197 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
2200 /* Combine the results of the 4 instructions by taking their .w
2203 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
2204 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
2205 vec4
->dest
.write_mask
= 0xf;
2206 for (uint32_t i
= 0; i
< 4; i
++) {
2207 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
2208 vec4
->src
[i
].swizzle
[0] = 3;
2210 def
= &vec4
->dest
.dest
.ssa
;
2211 instruction
= &vec4
->instr
;
2213 def
= &instr
->dest
.ssa
;
2214 instruction
= &instr
->instr
;
2217 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
2218 val
->ssa
->def
= def
;
2220 nir_builder_instr_insert(&b
->nb
, instruction
);
2224 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2225 const uint32_t *w
, nir_src
*src
)
2228 case SpvOpAtomicIIncrement
:
2229 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2232 case SpvOpAtomicIDecrement
:
2233 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2236 case SpvOpAtomicISub
:
2238 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
2241 case SpvOpAtomicCompareExchange
:
2242 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
2243 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
2246 case SpvOpAtomicExchange
:
2247 case SpvOpAtomicIAdd
:
2248 case SpvOpAtomicSMin
:
2249 case SpvOpAtomicUMin
:
2250 case SpvOpAtomicSMax
:
2251 case SpvOpAtomicUMax
:
2252 case SpvOpAtomicAnd
:
2254 case SpvOpAtomicXor
:
2255 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
2259 vtn_fail("Invalid SPIR-V atomic");
2263 static nir_ssa_def
*
2264 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2266 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
2268 /* The image_load_store intrinsics assume a 4-dim coordinate */
2269 unsigned dim
= glsl_get_vector_elements(coord
->type
);
2270 unsigned swizzle
[4];
2271 for (unsigned i
= 0; i
< 4; i
++)
2272 swizzle
[i
] = MIN2(i
, dim
- 1);
2274 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
2278 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2279 const uint32_t *w
, unsigned count
)
2281 /* Just get this one out of the way */
2282 if (opcode
== SpvOpImageTexelPointer
) {
2283 struct vtn_value
*val
=
2284 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2285 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2287 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2288 val
->image
->coord
= get_image_coord(b
, w
[4]);
2289 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
2293 struct vtn_image_pointer image
;
2296 case SpvOpAtomicExchange
:
2297 case SpvOpAtomicCompareExchange
:
2298 case SpvOpAtomicCompareExchangeWeak
:
2299 case SpvOpAtomicIIncrement
:
2300 case SpvOpAtomicIDecrement
:
2301 case SpvOpAtomicIAdd
:
2302 case SpvOpAtomicISub
:
2303 case SpvOpAtomicLoad
:
2304 case SpvOpAtomicSMin
:
2305 case SpvOpAtomicUMin
:
2306 case SpvOpAtomicSMax
:
2307 case SpvOpAtomicUMax
:
2308 case SpvOpAtomicAnd
:
2310 case SpvOpAtomicXor
:
2311 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
2314 case SpvOpAtomicStore
:
2315 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
2318 case SpvOpImageQuerySize
:
2319 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2321 image
.sample
= NULL
;
2324 case SpvOpImageRead
:
2325 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2326 image
.coord
= get_image_coord(b
, w
[4]);
2328 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
2329 vtn_assert(w
[5] == SpvImageOperandsSampleMask
);
2330 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
2332 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2336 case SpvOpImageWrite
:
2337 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2338 image
.coord
= get_image_coord(b
, w
[2]);
2342 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
2343 vtn_assert(w
[4] == SpvImageOperandsSampleMask
);
2344 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
2346 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2351 vtn_fail("Invalid image opcode");
2354 nir_intrinsic_op op
;
2356 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
2357 OP(ImageQuerySize
, size
)
2359 OP(ImageWrite
, store
)
2360 OP(AtomicLoad
, load
)
2361 OP(AtomicStore
, store
)
2362 OP(AtomicExchange
, atomic_exchange
)
2363 OP(AtomicCompareExchange
, atomic_comp_swap
)
2364 OP(AtomicIIncrement
, atomic_add
)
2365 OP(AtomicIDecrement
, atomic_add
)
2366 OP(AtomicIAdd
, atomic_add
)
2367 OP(AtomicISub
, atomic_add
)
2368 OP(AtomicSMin
, atomic_min
)
2369 OP(AtomicUMin
, atomic_min
)
2370 OP(AtomicSMax
, atomic_max
)
2371 OP(AtomicUMax
, atomic_max
)
2372 OP(AtomicAnd
, atomic_and
)
2373 OP(AtomicOr
, atomic_or
)
2374 OP(AtomicXor
, atomic_xor
)
2377 vtn_fail("Invalid image opcode");
2380 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2382 nir_deref_var
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2383 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
2385 /* ImageQuerySize doesn't take any extra parameters */
2386 if (opcode
!= SpvOpImageQuerySize
) {
2387 /* The image coordinate is always 4 components but we may not have that
2388 * many. Swizzle to compensate.
2391 for (unsigned i
= 0; i
< 4; i
++)
2392 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
2393 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
2395 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
2399 case SpvOpAtomicLoad
:
2400 case SpvOpImageQuerySize
:
2401 case SpvOpImageRead
:
2403 case SpvOpAtomicStore
:
2404 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2406 case SpvOpImageWrite
:
2407 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
2410 case SpvOpAtomicCompareExchange
:
2411 case SpvOpAtomicIIncrement
:
2412 case SpvOpAtomicIDecrement
:
2413 case SpvOpAtomicExchange
:
2414 case SpvOpAtomicIAdd
:
2415 case SpvOpAtomicISub
:
2416 case SpvOpAtomicSMin
:
2417 case SpvOpAtomicUMin
:
2418 case SpvOpAtomicSMax
:
2419 case SpvOpAtomicUMax
:
2420 case SpvOpAtomicAnd
:
2422 case SpvOpAtomicXor
:
2423 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
2427 vtn_fail("Invalid image opcode");
2430 if (opcode
!= SpvOpImageWrite
) {
2431 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2432 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2434 unsigned dest_components
=
2435 nir_intrinsic_infos
[intrin
->intrinsic
].dest_components
;
2436 if (intrin
->intrinsic
== nir_intrinsic_image_size
) {
2437 dest_components
= intrin
->num_components
=
2438 glsl_get_vector_elements(type
->type
);
2441 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2442 dest_components
, 32, NULL
);
2444 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2446 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
2447 val
->ssa
->def
= &intrin
->dest
.ssa
;
2449 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2453 static nir_intrinsic_op
2454 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2457 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2458 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2459 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2460 OP(AtomicExchange
, atomic_exchange
)
2461 OP(AtomicCompareExchange
, atomic_comp_swap
)
2462 OP(AtomicIIncrement
, atomic_add
)
2463 OP(AtomicIDecrement
, atomic_add
)
2464 OP(AtomicIAdd
, atomic_add
)
2465 OP(AtomicISub
, atomic_add
)
2466 OP(AtomicSMin
, atomic_imin
)
2467 OP(AtomicUMin
, atomic_umin
)
2468 OP(AtomicSMax
, atomic_imax
)
2469 OP(AtomicUMax
, atomic_umax
)
2470 OP(AtomicAnd
, atomic_and
)
2471 OP(AtomicOr
, atomic_or
)
2472 OP(AtomicXor
, atomic_xor
)
2475 vtn_fail("Invalid SSBO atomic");
2479 static nir_intrinsic_op
2480 get_shared_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2483 case SpvOpAtomicLoad
: return nir_intrinsic_load_shared
;
2484 case SpvOpAtomicStore
: return nir_intrinsic_store_shared
;
2485 #define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
2486 OP(AtomicExchange
, atomic_exchange
)
2487 OP(AtomicCompareExchange
, atomic_comp_swap
)
2488 OP(AtomicIIncrement
, atomic_add
)
2489 OP(AtomicIDecrement
, atomic_add
)
2490 OP(AtomicIAdd
, atomic_add
)
2491 OP(AtomicISub
, atomic_add
)
2492 OP(AtomicSMin
, atomic_imin
)
2493 OP(AtomicUMin
, atomic_umin
)
2494 OP(AtomicSMax
, atomic_imax
)
2495 OP(AtomicUMax
, atomic_umax
)
2496 OP(AtomicAnd
, atomic_and
)
2497 OP(AtomicOr
, atomic_or
)
2498 OP(AtomicXor
, atomic_xor
)
2501 vtn_fail("Invalid shared atomic");
2505 static nir_intrinsic_op
2506 get_var_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
2509 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
2510 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
2511 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2512 OP(AtomicExchange
, atomic_exchange
)
2513 OP(AtomicCompareExchange
, atomic_comp_swap
)
2514 OP(AtomicIIncrement
, atomic_add
)
2515 OP(AtomicIDecrement
, atomic_add
)
2516 OP(AtomicIAdd
, atomic_add
)
2517 OP(AtomicISub
, atomic_add
)
2518 OP(AtomicSMin
, atomic_imin
)
2519 OP(AtomicUMin
, atomic_umin
)
2520 OP(AtomicSMax
, atomic_imax
)
2521 OP(AtomicUMax
, atomic_umax
)
2522 OP(AtomicAnd
, atomic_and
)
2523 OP(AtomicOr
, atomic_or
)
2524 OP(AtomicXor
, atomic_xor
)
2527 vtn_fail("Invalid shared atomic");
2532 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
2533 const uint32_t *w
, unsigned count
)
2535 struct vtn_pointer
*ptr
;
2536 nir_intrinsic_instr
*atomic
;
2539 case SpvOpAtomicLoad
:
2540 case SpvOpAtomicExchange
:
2541 case SpvOpAtomicCompareExchange
:
2542 case SpvOpAtomicCompareExchangeWeak
:
2543 case SpvOpAtomicIIncrement
:
2544 case SpvOpAtomicIDecrement
:
2545 case SpvOpAtomicIAdd
:
2546 case SpvOpAtomicISub
:
2547 case SpvOpAtomicSMin
:
2548 case SpvOpAtomicUMin
:
2549 case SpvOpAtomicSMax
:
2550 case SpvOpAtomicUMax
:
2551 case SpvOpAtomicAnd
:
2553 case SpvOpAtomicXor
:
2554 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2557 case SpvOpAtomicStore
:
2558 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2562 vtn_fail("Invalid SPIR-V atomic");
2566 SpvScope scope = w[4];
2567 SpvMemorySemanticsMask semantics = w[5];
2570 if (ptr
->mode
== vtn_variable_mode_workgroup
&&
2571 !b
->options
->lower_workgroup_access_to_offsets
) {
2572 nir_deref_var
*deref
= vtn_pointer_to_deref(b
, ptr
);
2573 const struct glsl_type
*deref_type
= nir_deref_tail(&deref
->deref
)->type
;
2574 nir_intrinsic_op op
= get_var_nir_atomic_op(b
, opcode
);
2575 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2576 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2579 case SpvOpAtomicLoad
:
2580 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2583 case SpvOpAtomicStore
:
2584 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2585 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2586 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2589 case SpvOpAtomicExchange
:
2590 case SpvOpAtomicCompareExchange
:
2591 case SpvOpAtomicCompareExchangeWeak
:
2592 case SpvOpAtomicIIncrement
:
2593 case SpvOpAtomicIDecrement
:
2594 case SpvOpAtomicIAdd
:
2595 case SpvOpAtomicISub
:
2596 case SpvOpAtomicSMin
:
2597 case SpvOpAtomicUMin
:
2598 case SpvOpAtomicSMax
:
2599 case SpvOpAtomicUMax
:
2600 case SpvOpAtomicAnd
:
2602 case SpvOpAtomicXor
:
2603 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2607 vtn_fail("Invalid SPIR-V atomic");
2611 nir_ssa_def
*offset
, *index
;
2612 offset
= vtn_pointer_to_offset(b
, ptr
, &index
, NULL
);
2614 nir_intrinsic_op op
;
2615 if (ptr
->mode
== vtn_variable_mode_ssbo
) {
2616 op
= get_ssbo_nir_atomic_op(b
, opcode
);
2618 vtn_assert(ptr
->mode
== vtn_variable_mode_workgroup
&&
2619 b
->options
->lower_workgroup_access_to_offsets
);
2620 op
= get_shared_nir_atomic_op(b
, opcode
);
2623 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2627 case SpvOpAtomicLoad
:
2628 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2629 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2630 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2631 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2634 case SpvOpAtomicStore
:
2635 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2636 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2637 atomic
->src
[src
++] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2638 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2639 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2640 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2643 case SpvOpAtomicExchange
:
2644 case SpvOpAtomicCompareExchange
:
2645 case SpvOpAtomicCompareExchangeWeak
:
2646 case SpvOpAtomicIIncrement
:
2647 case SpvOpAtomicIDecrement
:
2648 case SpvOpAtomicIAdd
:
2649 case SpvOpAtomicISub
:
2650 case SpvOpAtomicSMin
:
2651 case SpvOpAtomicUMin
:
2652 case SpvOpAtomicSMax
:
2653 case SpvOpAtomicUMax
:
2654 case SpvOpAtomicAnd
:
2656 case SpvOpAtomicXor
:
2657 if (ptr
->mode
== vtn_variable_mode_ssbo
)
2658 atomic
->src
[src
++] = nir_src_for_ssa(index
);
2659 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
2660 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
2664 vtn_fail("Invalid SPIR-V atomic");
2668 if (opcode
!= SpvOpAtomicStore
) {
2669 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2671 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2672 glsl_get_vector_elements(type
->type
),
2673 glsl_get_bit_size(type
->type
), NULL
);
2675 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2676 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2677 val
->ssa
->def
= &atomic
->dest
.ssa
;
2678 val
->ssa
->type
= type
->type
;
2681 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2684 static nir_alu_instr
*
2685 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
2688 switch (num_components
) {
2689 case 1: op
= nir_op_fmov
; break;
2690 case 2: op
= nir_op_vec2
; break;
2691 case 3: op
= nir_op_vec3
; break;
2692 case 4: op
= nir_op_vec4
; break;
2693 default: vtn_fail("bad vector size");
2696 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
2697 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2699 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2704 struct vtn_ssa_value
*
2705 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2707 if (src
->transposed
)
2708 return src
->transposed
;
2710 struct vtn_ssa_value
*dest
=
2711 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2713 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2714 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
2715 glsl_get_bit_size(src
->type
));
2716 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2717 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2718 vec
->src
[0].swizzle
[0] = i
;
2720 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2721 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2722 vec
->src
[j
].swizzle
[0] = i
;
2725 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2726 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2729 dest
->transposed
= src
;
2735 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2737 unsigned swiz
[4] = { index
};
2738 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2742 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2745 nir_alu_instr
*vec
= create_vec(b
, src
->num_components
,
2748 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2750 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2752 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2753 vec
->src
[i
].swizzle
[0] = i
;
2757 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2759 return &vec
->dest
.dest
.ssa
;
2763 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2766 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2767 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2768 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2769 vtn_vector_extract(b
, src
, i
), dest
);
2775 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2776 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2778 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2779 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2780 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2781 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2786 static nir_ssa_def
*
2787 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2788 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2789 const uint32_t *indices
)
2791 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
2793 for (unsigned i
= 0; i
< num_components
; i
++) {
2794 uint32_t index
= indices
[i
];
2795 if (index
== 0xffffffff) {
2797 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2798 } else if (index
< src0
->num_components
) {
2799 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2800 vec
->src
[i
].swizzle
[0] = index
;
2802 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2803 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2807 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2809 return &vec
->dest
.dest
.ssa
;
2813 * Concatentates a number of vectors/scalars together to produce a vector
2815 static nir_ssa_def
*
2816 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2817 unsigned num_srcs
, nir_ssa_def
**srcs
)
2819 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
2821 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2823 * "When constructing a vector, there must be at least two Constituent
2826 vtn_assert(num_srcs
>= 2);
2828 unsigned dest_idx
= 0;
2829 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2830 nir_ssa_def
*src
= srcs
[i
];
2831 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
2832 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2833 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2834 vec
->src
[dest_idx
].swizzle
[0] = j
;
2839 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2841 * "When constructing a vector, the total number of components in all
2842 * the operands must equal the number of components in Result Type."
2844 vtn_assert(dest_idx
== num_components
);
2846 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2848 return &vec
->dest
.dest
.ssa
;
2851 static struct vtn_ssa_value
*
2852 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2854 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2855 dest
->type
= src
->type
;
2857 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2858 dest
->def
= src
->def
;
2860 unsigned elems
= glsl_get_length(src
->type
);
2862 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2863 for (unsigned i
= 0; i
< elems
; i
++)
2864 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2870 static struct vtn_ssa_value
*
2871 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2872 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2873 unsigned num_indices
)
2875 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2877 struct vtn_ssa_value
*cur
= dest
;
2879 for (i
= 0; i
< num_indices
- 1; i
++) {
2880 cur
= cur
->elems
[indices
[i
]];
2883 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2884 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2885 * the component granularity. In that case, the last index will be
2886 * the index to insert the scalar into the vector.
2889 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2891 cur
->elems
[indices
[i
]] = insert
;
2897 static struct vtn_ssa_value
*
2898 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2899 const uint32_t *indices
, unsigned num_indices
)
2901 struct vtn_ssa_value
*cur
= src
;
2902 for (unsigned i
= 0; i
< num_indices
; i
++) {
2903 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2904 vtn_assert(i
== num_indices
- 1);
2905 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2906 * the component granularity. The last index will be the index of the
2907 * vector to extract.
2910 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2911 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2912 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2915 cur
= cur
->elems
[indices
[i
]];
2923 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2924 const uint32_t *w
, unsigned count
)
2926 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2927 const struct glsl_type
*type
=
2928 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2929 val
->ssa
= vtn_create_ssa_value(b
, type
);
2932 case SpvOpVectorExtractDynamic
:
2933 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2934 vtn_ssa_value(b
, w
[4])->def
);
2937 case SpvOpVectorInsertDynamic
:
2938 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2939 vtn_ssa_value(b
, w
[4])->def
,
2940 vtn_ssa_value(b
, w
[5])->def
);
2943 case SpvOpVectorShuffle
:
2944 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2945 vtn_ssa_value(b
, w
[3])->def
,
2946 vtn_ssa_value(b
, w
[4])->def
,
2950 case SpvOpCompositeConstruct
: {
2951 unsigned elems
= count
- 3;
2953 if (glsl_type_is_vector_or_scalar(type
)) {
2954 nir_ssa_def
*srcs
[4];
2955 for (unsigned i
= 0; i
< elems
; i
++)
2956 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2958 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2961 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2962 for (unsigned i
= 0; i
< elems
; i
++)
2963 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2967 case SpvOpCompositeExtract
:
2968 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2972 case SpvOpCompositeInsert
:
2973 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2974 vtn_ssa_value(b
, w
[3]),
2978 case SpvOpCopyObject
:
2979 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2983 vtn_fail("unknown composite operation");
2988 vtn_emit_barrier(struct vtn_builder
*b
, nir_intrinsic_op op
)
2990 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2991 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2995 vtn_emit_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
2996 SpvMemorySemanticsMask semantics
)
2998 static const SpvMemorySemanticsMask all_memory_semantics
=
2999 SpvMemorySemanticsUniformMemoryMask
|
3000 SpvMemorySemanticsWorkgroupMemoryMask
|
3001 SpvMemorySemanticsAtomicCounterMemoryMask
|
3002 SpvMemorySemanticsImageMemoryMask
;
3004 /* If we're not actually doing a memory barrier, bail */
3005 if (!(semantics
& all_memory_semantics
))
3008 /* GL and Vulkan don't have these */
3009 vtn_assert(scope
!= SpvScopeCrossDevice
);
3011 if (scope
== SpvScopeSubgroup
)
3012 return; /* Nothing to do here */
3014 if (scope
== SpvScopeWorkgroup
) {
3015 vtn_emit_barrier(b
, nir_intrinsic_group_memory_barrier
);
3019 /* There's only two scopes thing left */
3020 vtn_assert(scope
== SpvScopeInvocation
|| scope
== SpvScopeDevice
);
3022 if ((semantics
& all_memory_semantics
) == all_memory_semantics
) {
3023 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3027 /* Issue a bunch of more specific barriers */
3028 uint32_t bits
= semantics
;
3030 SpvMemorySemanticsMask semantic
= 1 << u_bit_scan(&bits
);
3032 case SpvMemorySemanticsUniformMemoryMask
:
3033 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_buffer
);
3035 case SpvMemorySemanticsWorkgroupMemoryMask
:
3036 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_shared
);
3038 case SpvMemorySemanticsAtomicCounterMemoryMask
:
3039 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_atomic_counter
);
3041 case SpvMemorySemanticsImageMemoryMask
:
3042 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_image
);
3051 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
3052 const uint32_t *w
, unsigned count
)
3055 case SpvOpEmitVertex
:
3056 case SpvOpEmitStreamVertex
:
3057 case SpvOpEndPrimitive
:
3058 case SpvOpEndStreamPrimitive
: {
3059 nir_intrinsic_op intrinsic_op
;
3061 case SpvOpEmitVertex
:
3062 case SpvOpEmitStreamVertex
:
3063 intrinsic_op
= nir_intrinsic_emit_vertex
;
3065 case SpvOpEndPrimitive
:
3066 case SpvOpEndStreamPrimitive
:
3067 intrinsic_op
= nir_intrinsic_end_primitive
;
3070 unreachable("Invalid opcode");
3073 nir_intrinsic_instr
*intrin
=
3074 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
3077 case SpvOpEmitStreamVertex
:
3078 case SpvOpEndStreamPrimitive
:
3079 nir_intrinsic_set_stream_id(intrin
, w
[1]);
3085 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3089 case SpvOpMemoryBarrier
: {
3090 SpvScope scope
= vtn_constant_value(b
, w
[1])->values
[0].u32
[0];
3091 SpvMemorySemanticsMask semantics
=
3092 vtn_constant_value(b
, w
[2])->values
[0].u32
[0];
3093 vtn_emit_memory_barrier(b
, scope
, semantics
);
3097 case SpvOpControlBarrier
: {
3098 SpvScope execution_scope
=
3099 vtn_constant_value(b
, w
[1])->values
[0].u32
[0];
3100 if (execution_scope
== SpvScopeWorkgroup
)
3101 vtn_emit_barrier(b
, nir_intrinsic_barrier
);
3103 SpvScope memory_scope
=
3104 vtn_constant_value(b
, w
[2])->values
[0].u32
[0];
3105 SpvMemorySemanticsMask memory_semantics
=
3106 vtn_constant_value(b
, w
[3])->values
[0].u32
[0];
3107 vtn_emit_memory_barrier(b
, memory_scope
, memory_semantics
);
3112 unreachable("unknown barrier instruction");
3117 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
3118 SpvExecutionMode mode
)
3121 case SpvExecutionModeInputPoints
:
3122 case SpvExecutionModeOutputPoints
:
3123 return 0; /* GL_POINTS */
3124 case SpvExecutionModeInputLines
:
3125 return 1; /* GL_LINES */
3126 case SpvExecutionModeInputLinesAdjacency
:
3127 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
3128 case SpvExecutionModeTriangles
:
3129 return 4; /* GL_TRIANGLES */
3130 case SpvExecutionModeInputTrianglesAdjacency
:
3131 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
3132 case SpvExecutionModeQuads
:
3133 return 7; /* GL_QUADS */
3134 case SpvExecutionModeIsolines
:
3135 return 0x8E7A; /* GL_ISOLINES */
3136 case SpvExecutionModeOutputLineStrip
:
3137 return 3; /* GL_LINE_STRIP */
3138 case SpvExecutionModeOutputTriangleStrip
:
3139 return 5; /* GL_TRIANGLE_STRIP */
3141 vtn_fail("Invalid primitive type");
3146 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
3147 SpvExecutionMode mode
)
3150 case SpvExecutionModeInputPoints
:
3152 case SpvExecutionModeInputLines
:
3154 case SpvExecutionModeInputLinesAdjacency
:
3156 case SpvExecutionModeTriangles
:
3158 case SpvExecutionModeInputTrianglesAdjacency
:
3161 vtn_fail("Invalid GS input mode");
3165 static gl_shader_stage
3166 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
3169 case SpvExecutionModelVertex
:
3170 return MESA_SHADER_VERTEX
;
3171 case SpvExecutionModelTessellationControl
:
3172 return MESA_SHADER_TESS_CTRL
;
3173 case SpvExecutionModelTessellationEvaluation
:
3174 return MESA_SHADER_TESS_EVAL
;
3175 case SpvExecutionModelGeometry
:
3176 return MESA_SHADER_GEOMETRY
;
3177 case SpvExecutionModelFragment
:
3178 return MESA_SHADER_FRAGMENT
;
3179 case SpvExecutionModelGLCompute
:
3180 return MESA_SHADER_COMPUTE
;
3182 vtn_fail("Unsupported execution model");
3186 #define spv_check_supported(name, cap) do { \
3187 if (!(b->options && b->options->caps.name)) \
3188 vtn_warn("Unsupported SPIR-V capability: %s", \
3189 spirv_capability_to_string(cap)); \
3193 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3194 const uint32_t *w
, unsigned count
)
3201 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
3202 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
3203 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
3204 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
3205 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
3206 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
3209 uint32_t version
= w
[2];
3212 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
3214 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
3218 case SpvOpSourceExtension
:
3219 case SpvOpSourceContinued
:
3220 case SpvOpExtension
:
3221 case SpvOpModuleProcessed
:
3222 /* Unhandled, but these are for debug so that's ok. */
3225 case SpvOpCapability
: {
3226 SpvCapability cap
= w
[1];
3228 case SpvCapabilityMatrix
:
3229 case SpvCapabilityShader
:
3230 case SpvCapabilityGeometry
:
3231 case SpvCapabilityGeometryPointSize
:
3232 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
3233 case SpvCapabilitySampledImageArrayDynamicIndexing
:
3234 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
3235 case SpvCapabilityStorageImageArrayDynamicIndexing
:
3236 case SpvCapabilityImageRect
:
3237 case SpvCapabilitySampledRect
:
3238 case SpvCapabilitySampled1D
:
3239 case SpvCapabilityImage1D
:
3240 case SpvCapabilitySampledCubeArray
:
3241 case SpvCapabilityImageCubeArray
:
3242 case SpvCapabilitySampledBuffer
:
3243 case SpvCapabilityImageBuffer
:
3244 case SpvCapabilityImageQuery
:
3245 case SpvCapabilityDerivativeControl
:
3246 case SpvCapabilityInterpolationFunction
:
3247 case SpvCapabilityMultiViewport
:
3248 case SpvCapabilitySampleRateShading
:
3249 case SpvCapabilityClipDistance
:
3250 case SpvCapabilityCullDistance
:
3251 case SpvCapabilityInputAttachment
:
3252 case SpvCapabilityImageGatherExtended
:
3253 case SpvCapabilityStorageImageExtendedFormats
:
3256 case SpvCapabilityGeometryStreams
:
3257 case SpvCapabilityLinkage
:
3258 case SpvCapabilityVector16
:
3259 case SpvCapabilityFloat16Buffer
:
3260 case SpvCapabilityFloat16
:
3261 case SpvCapabilityInt64Atomics
:
3262 case SpvCapabilityAtomicStorage
:
3263 case SpvCapabilityInt16
:
3264 case SpvCapabilityStorageImageMultisample
:
3265 case SpvCapabilityInt8
:
3266 case SpvCapabilitySparseResidency
:
3267 case SpvCapabilityMinLod
:
3268 case SpvCapabilityTransformFeedback
:
3269 vtn_warn("Unsupported SPIR-V capability: %s",
3270 spirv_capability_to_string(cap
));
3273 case SpvCapabilityFloat64
:
3274 spv_check_supported(float64
, cap
);
3276 case SpvCapabilityInt64
:
3277 spv_check_supported(int64
, cap
);
3280 case SpvCapabilityAddresses
:
3281 case SpvCapabilityKernel
:
3282 case SpvCapabilityImageBasic
:
3283 case SpvCapabilityImageReadWrite
:
3284 case SpvCapabilityImageMipmap
:
3285 case SpvCapabilityPipes
:
3286 case SpvCapabilityGroups
:
3287 case SpvCapabilityDeviceEnqueue
:
3288 case SpvCapabilityLiteralSampler
:
3289 case SpvCapabilityGenericPointer
:
3290 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3291 spirv_capability_to_string(cap
));
3294 case SpvCapabilityImageMSArray
:
3295 spv_check_supported(image_ms_array
, cap
);
3298 case SpvCapabilityTessellation
:
3299 case SpvCapabilityTessellationPointSize
:
3300 spv_check_supported(tessellation
, cap
);
3303 case SpvCapabilityDrawParameters
:
3304 spv_check_supported(draw_parameters
, cap
);
3307 case SpvCapabilityStorageImageReadWithoutFormat
:
3308 spv_check_supported(image_read_without_format
, cap
);
3311 case SpvCapabilityStorageImageWriteWithoutFormat
:
3312 spv_check_supported(image_write_without_format
, cap
);
3315 case SpvCapabilityDeviceGroup
:
3316 spv_check_supported(device_group
, cap
);
3319 case SpvCapabilityMultiView
:
3320 spv_check_supported(multiview
, cap
);
3323 case SpvCapabilityGroupNonUniform
:
3324 spv_check_supported(subgroup_basic
, cap
);
3327 case SpvCapabilityGroupNonUniformVote
:
3328 spv_check_supported(subgroup_vote
, cap
);
3331 case SpvCapabilitySubgroupBallotKHR
:
3332 case SpvCapabilityGroupNonUniformBallot
:
3333 spv_check_supported(subgroup_ballot
, cap
);
3336 case SpvCapabilityGroupNonUniformShuffle
:
3337 case SpvCapabilityGroupNonUniformShuffleRelative
:
3338 spv_check_supported(subgroup_shuffle
, cap
);
3341 case SpvCapabilityGroupNonUniformQuad
:
3342 spv_check_supported(subgroup_quad
, cap
);
3344 case SpvCapabilityGroupNonUniformArithmetic
:
3345 case SpvCapabilityGroupNonUniformClustered
:
3346 spv_check_supported(subgroup_arithmetic
, cap
);
3348 case SpvCapabilityVariablePointersStorageBuffer
:
3349 case SpvCapabilityVariablePointers
:
3350 spv_check_supported(variable_pointers
, cap
);
3353 case SpvCapabilityStorageUniformBufferBlock16
:
3354 case SpvCapabilityStorageUniform16
:
3355 case SpvCapabilityStoragePushConstant16
:
3356 case SpvCapabilityStorageInputOutput16
:
3357 spv_check_supported(storage_16bit
, cap
);
3360 case SpvCapabilityShaderViewportIndexLayerEXT
:
3361 spv_check_supported(shader_viewport_index_layer
, cap
);
3365 vtn_fail("Unhandled capability");
3370 case SpvOpExtInstImport
:
3371 vtn_handle_extension(b
, opcode
, w
, count
);
3374 case SpvOpMemoryModel
:
3375 vtn_assert(w
[1] == SpvAddressingModelLogical
);
3376 vtn_assert(w
[2] == SpvMemoryModelSimple
||
3377 w
[2] == SpvMemoryModelGLSL450
);
3380 case SpvOpEntryPoint
: {
3381 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
3382 /* Let this be a name label regardless */
3383 unsigned name_words
;
3384 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
3386 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
3387 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
3390 vtn_assert(b
->entry_point
== NULL
);
3391 b
->entry_point
= entry_point
;
3396 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
3397 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
3401 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
3404 case SpvOpMemberName
:
3408 case SpvOpExecutionMode
:
3409 case SpvOpDecorationGroup
:
3411 case SpvOpMemberDecorate
:
3412 case SpvOpGroupDecorate
:
3413 case SpvOpGroupMemberDecorate
:
3414 vtn_handle_decoration(b
, opcode
, w
, count
);
3418 return false; /* End of preamble */
3425 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
3426 const struct vtn_decoration
*mode
, void *data
)
3428 vtn_assert(b
->entry_point
== entry_point
);
3430 switch(mode
->exec_mode
) {
3431 case SpvExecutionModeOriginUpperLeft
:
3432 case SpvExecutionModeOriginLowerLeft
:
3433 b
->origin_upper_left
=
3434 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
3437 case SpvExecutionModeEarlyFragmentTests
:
3438 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3439 b
->shader
->info
.fs
.early_fragment_tests
= true;
3442 case SpvExecutionModeInvocations
:
3443 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3444 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->literals
[0]);
3447 case SpvExecutionModeDepthReplacing
:
3448 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3449 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
3451 case SpvExecutionModeDepthGreater
:
3452 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3453 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
3455 case SpvExecutionModeDepthLess
:
3456 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3457 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
3459 case SpvExecutionModeDepthUnchanged
:
3460 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
3461 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
3464 case SpvExecutionModeLocalSize
:
3465 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
3466 b
->shader
->info
.cs
.local_size
[0] = mode
->literals
[0];
3467 b
->shader
->info
.cs
.local_size
[1] = mode
->literals
[1];
3468 b
->shader
->info
.cs
.local_size
[2] = mode
->literals
[2];
3470 case SpvExecutionModeLocalSizeHint
:
3471 break; /* Nothing to do with this */
3473 case SpvExecutionModeOutputVertices
:
3474 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3475 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3476 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->literals
[0];
3478 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3479 b
->shader
->info
.gs
.vertices_out
= mode
->literals
[0];
3483 case SpvExecutionModeInputPoints
:
3484 case SpvExecutionModeInputLines
:
3485 case SpvExecutionModeInputLinesAdjacency
:
3486 case SpvExecutionModeTriangles
:
3487 case SpvExecutionModeInputTrianglesAdjacency
:
3488 case SpvExecutionModeQuads
:
3489 case SpvExecutionModeIsolines
:
3490 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3491 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
3492 b
->shader
->info
.tess
.primitive_mode
=
3493 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
3495 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3496 b
->shader
->info
.gs
.vertices_in
=
3497 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
3501 case SpvExecutionModeOutputPoints
:
3502 case SpvExecutionModeOutputLineStrip
:
3503 case SpvExecutionModeOutputTriangleStrip
:
3504 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
3505 b
->shader
->info
.gs
.output_primitive
=
3506 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
3509 case SpvExecutionModeSpacingEqual
:
3510 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3511 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3512 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
3514 case SpvExecutionModeSpacingFractionalEven
:
3515 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3516 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3517 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
3519 case SpvExecutionModeSpacingFractionalOdd
:
3520 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3521 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3522 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
3524 case SpvExecutionModeVertexOrderCw
:
3525 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3526 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3527 b
->shader
->info
.tess
.ccw
= false;
3529 case SpvExecutionModeVertexOrderCcw
:
3530 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3531 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3532 b
->shader
->info
.tess
.ccw
= true;
3534 case SpvExecutionModePointMode
:
3535 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
3536 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
3537 b
->shader
->info
.tess
.point_mode
= true;
3540 case SpvExecutionModePixelCenterInteger
:
3541 b
->pixel_center_integer
= true;
3544 case SpvExecutionModeXfb
:
3545 vtn_fail("Unhandled execution mode");
3548 case SpvExecutionModeVecTypeHint
:
3549 case SpvExecutionModeContractionOff
:
3553 vtn_fail("Unhandled execution mode");
3558 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3559 const uint32_t *w
, unsigned count
)
3561 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
3565 case SpvOpSourceContinued
:
3566 case SpvOpSourceExtension
:
3567 case SpvOpExtension
:
3568 case SpvOpCapability
:
3569 case SpvOpExtInstImport
:
3570 case SpvOpMemoryModel
:
3571 case SpvOpEntryPoint
:
3572 case SpvOpExecutionMode
:
3575 case SpvOpMemberName
:
3576 case SpvOpDecorationGroup
:
3578 case SpvOpMemberDecorate
:
3579 case SpvOpGroupDecorate
:
3580 case SpvOpGroupMemberDecorate
:
3581 vtn_fail("Invalid opcode types and variables section");
3587 case SpvOpTypeFloat
:
3588 case SpvOpTypeVector
:
3589 case SpvOpTypeMatrix
:
3590 case SpvOpTypeImage
:
3591 case SpvOpTypeSampler
:
3592 case SpvOpTypeSampledImage
:
3593 case SpvOpTypeArray
:
3594 case SpvOpTypeRuntimeArray
:
3595 case SpvOpTypeStruct
:
3596 case SpvOpTypeOpaque
:
3597 case SpvOpTypePointer
:
3598 case SpvOpTypeFunction
:
3599 case SpvOpTypeEvent
:
3600 case SpvOpTypeDeviceEvent
:
3601 case SpvOpTypeReserveId
:
3602 case SpvOpTypeQueue
:
3604 vtn_handle_type(b
, opcode
, w
, count
);
3607 case SpvOpConstantTrue
:
3608 case SpvOpConstantFalse
:
3610 case SpvOpConstantComposite
:
3611 case SpvOpConstantSampler
:
3612 case SpvOpConstantNull
:
3613 case SpvOpSpecConstantTrue
:
3614 case SpvOpSpecConstantFalse
:
3615 case SpvOpSpecConstant
:
3616 case SpvOpSpecConstantComposite
:
3617 case SpvOpSpecConstantOp
:
3618 vtn_handle_constant(b
, opcode
, w
, count
);
3623 vtn_handle_variables(b
, opcode
, w
, count
);
3627 return false; /* End of preamble */
3634 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3635 const uint32_t *w
, unsigned count
)
3641 case SpvOpLoopMerge
:
3642 case SpvOpSelectionMerge
:
3643 /* This is handled by cfg pre-pass and walk_blocks */
3647 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
3648 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3653 vtn_handle_extension(b
, opcode
, w
, count
);
3659 case SpvOpCopyMemory
:
3660 case SpvOpCopyMemorySized
:
3661 case SpvOpAccessChain
:
3662 case SpvOpPtrAccessChain
:
3663 case SpvOpInBoundsAccessChain
:
3664 case SpvOpArrayLength
:
3665 vtn_handle_variables(b
, opcode
, w
, count
);
3668 case SpvOpFunctionCall
:
3669 vtn_handle_function_call(b
, opcode
, w
, count
);
3672 case SpvOpSampledImage
:
3674 case SpvOpImageSampleImplicitLod
:
3675 case SpvOpImageSampleExplicitLod
:
3676 case SpvOpImageSampleDrefImplicitLod
:
3677 case SpvOpImageSampleDrefExplicitLod
:
3678 case SpvOpImageSampleProjImplicitLod
:
3679 case SpvOpImageSampleProjExplicitLod
:
3680 case SpvOpImageSampleProjDrefImplicitLod
:
3681 case SpvOpImageSampleProjDrefExplicitLod
:
3682 case SpvOpImageFetch
:
3683 case SpvOpImageGather
:
3684 case SpvOpImageDrefGather
:
3685 case SpvOpImageQuerySizeLod
:
3686 case SpvOpImageQueryLod
:
3687 case SpvOpImageQueryLevels
:
3688 case SpvOpImageQuerySamples
:
3689 vtn_handle_texture(b
, opcode
, w
, count
);
3692 case SpvOpImageRead
:
3693 case SpvOpImageWrite
:
3694 case SpvOpImageTexelPointer
:
3695 vtn_handle_image(b
, opcode
, w
, count
);
3698 case SpvOpImageQuerySize
: {
3699 struct vtn_pointer
*image
=
3700 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3701 if (image
->mode
== vtn_variable_mode_image
) {
3702 vtn_handle_image(b
, opcode
, w
, count
);
3704 vtn_assert(image
->mode
== vtn_variable_mode_sampler
);
3705 vtn_handle_texture(b
, opcode
, w
, count
);
3710 case SpvOpAtomicLoad
:
3711 case SpvOpAtomicExchange
:
3712 case SpvOpAtomicCompareExchange
:
3713 case SpvOpAtomicCompareExchangeWeak
:
3714 case SpvOpAtomicIIncrement
:
3715 case SpvOpAtomicIDecrement
:
3716 case SpvOpAtomicIAdd
:
3717 case SpvOpAtomicISub
:
3718 case SpvOpAtomicSMin
:
3719 case SpvOpAtomicUMin
:
3720 case SpvOpAtomicSMax
:
3721 case SpvOpAtomicUMax
:
3722 case SpvOpAtomicAnd
:
3724 case SpvOpAtomicXor
: {
3725 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
3726 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3727 vtn_handle_image(b
, opcode
, w
, count
);
3729 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
3730 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3735 case SpvOpAtomicStore
: {
3736 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
3737 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3738 vtn_handle_image(b
, opcode
, w
, count
);
3740 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
3741 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3747 /* Handle OpSelect up-front here because it needs to be able to handle
3748 * pointers and not just regular vectors and scalars.
3750 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
3751 struct vtn_value
*sel_val
= vtn_untyped_value(b
, w
[3]);
3752 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
3753 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
3755 const struct glsl_type
*sel_type
;
3756 switch (res_val
->type
->base_type
) {
3757 case vtn_base_type_scalar
:
3758 sel_type
= glsl_bool_type();
3760 case vtn_base_type_vector
:
3761 sel_type
= glsl_vector_type(GLSL_TYPE_BOOL
, res_val
->type
->length
);
3763 case vtn_base_type_pointer
:
3764 /* We need to have actual storage for pointer types */
3765 vtn_fail_if(res_val
->type
->type
== NULL
,
3766 "Invalid pointer result type for OpSelect");
3767 sel_type
= glsl_bool_type();
3770 vtn_fail("Result type of OpSelect must be a scalar, vector, or pointer");
3773 if (unlikely(sel_val
->type
->type
!= sel_type
)) {
3774 if (sel_val
->type
->type
== glsl_bool_type()) {
3775 /* This case is illegal but some older versions of GLSLang produce
3776 * it. The GLSLang issue was fixed on March 30, 2017:
3778 * https://github.com/KhronosGroup/glslang/issues/809
3780 * Unfortunately, there are applications in the wild which are
3781 * shipping with this bug so it isn't nice to fail on them so we
3782 * throw a warning instead. It's not actually a problem for us as
3783 * nir_builder will just splat the condition out which is most
3784 * likely what the client wanted anyway.
3786 vtn_warn("Condition type of OpSelect must have the same number "
3787 "of components as Result Type");
3789 vtn_fail("Condition type of OpSelect must be a scalar or vector "
3790 "of Boolean type. It must have the same number of "
3791 "components as Result Type");
3795 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
3796 obj2_val
->type
!= res_val
->type
,
3797 "Object types must match the result type in OpSelect");
3799 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3800 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, res_type
->type
);
3801 ssa
->def
= nir_bcsel(&b
->nb
, vtn_ssa_value(b
, w
[3])->def
,
3802 vtn_ssa_value(b
, w
[4])->def
,
3803 vtn_ssa_value(b
, w
[5])->def
);
3804 vtn_push_ssa(b
, w
[2], res_type
, ssa
);
3813 case SpvOpConvertFToU
:
3814 case SpvOpConvertFToS
:
3815 case SpvOpConvertSToF
:
3816 case SpvOpConvertUToF
:
3820 case SpvOpQuantizeToF16
:
3821 case SpvOpConvertPtrToU
:
3822 case SpvOpConvertUToPtr
:
3823 case SpvOpPtrCastToGeneric
:
3824 case SpvOpGenericCastToPtr
:
3830 case SpvOpSignBitSet
:
3831 case SpvOpLessOrGreater
:
3833 case SpvOpUnordered
:
3848 case SpvOpVectorTimesScalar
:
3850 case SpvOpIAddCarry
:
3851 case SpvOpISubBorrow
:
3852 case SpvOpUMulExtended
:
3853 case SpvOpSMulExtended
:
3854 case SpvOpShiftRightLogical
:
3855 case SpvOpShiftRightArithmetic
:
3856 case SpvOpShiftLeftLogical
:
3857 case SpvOpLogicalEqual
:
3858 case SpvOpLogicalNotEqual
:
3859 case SpvOpLogicalOr
:
3860 case SpvOpLogicalAnd
:
3861 case SpvOpLogicalNot
:
3862 case SpvOpBitwiseOr
:
3863 case SpvOpBitwiseXor
:
3864 case SpvOpBitwiseAnd
:
3866 case SpvOpFOrdEqual
:
3867 case SpvOpFUnordEqual
:
3868 case SpvOpINotEqual
:
3869 case SpvOpFOrdNotEqual
:
3870 case SpvOpFUnordNotEqual
:
3871 case SpvOpULessThan
:
3872 case SpvOpSLessThan
:
3873 case SpvOpFOrdLessThan
:
3874 case SpvOpFUnordLessThan
:
3875 case SpvOpUGreaterThan
:
3876 case SpvOpSGreaterThan
:
3877 case SpvOpFOrdGreaterThan
:
3878 case SpvOpFUnordGreaterThan
:
3879 case SpvOpULessThanEqual
:
3880 case SpvOpSLessThanEqual
:
3881 case SpvOpFOrdLessThanEqual
:
3882 case SpvOpFUnordLessThanEqual
:
3883 case SpvOpUGreaterThanEqual
:
3884 case SpvOpSGreaterThanEqual
:
3885 case SpvOpFOrdGreaterThanEqual
:
3886 case SpvOpFUnordGreaterThanEqual
:
3892 case SpvOpFwidthFine
:
3893 case SpvOpDPdxCoarse
:
3894 case SpvOpDPdyCoarse
:
3895 case SpvOpFwidthCoarse
:
3896 case SpvOpBitFieldInsert
:
3897 case SpvOpBitFieldSExtract
:
3898 case SpvOpBitFieldUExtract
:
3899 case SpvOpBitReverse
:
3901 case SpvOpTranspose
:
3902 case SpvOpOuterProduct
:
3903 case SpvOpMatrixTimesScalar
:
3904 case SpvOpVectorTimesMatrix
:
3905 case SpvOpMatrixTimesVector
:
3906 case SpvOpMatrixTimesMatrix
:
3907 vtn_handle_alu(b
, opcode
, w
, count
);
3910 case SpvOpVectorExtractDynamic
:
3911 case SpvOpVectorInsertDynamic
:
3912 case SpvOpVectorShuffle
:
3913 case SpvOpCompositeConstruct
:
3914 case SpvOpCompositeExtract
:
3915 case SpvOpCompositeInsert
:
3916 case SpvOpCopyObject
:
3917 vtn_handle_composite(b
, opcode
, w
, count
);
3920 case SpvOpEmitVertex
:
3921 case SpvOpEndPrimitive
:
3922 case SpvOpEmitStreamVertex
:
3923 case SpvOpEndStreamPrimitive
:
3924 case SpvOpControlBarrier
:
3925 case SpvOpMemoryBarrier
:
3926 vtn_handle_barrier(b
, opcode
, w
, count
);
3929 case SpvOpGroupNonUniformElect
:
3930 case SpvOpGroupNonUniformAll
:
3931 case SpvOpGroupNonUniformAny
:
3932 case SpvOpGroupNonUniformAllEqual
:
3933 case SpvOpGroupNonUniformBroadcast
:
3934 case SpvOpGroupNonUniformBroadcastFirst
:
3935 case SpvOpGroupNonUniformBallot
:
3936 case SpvOpGroupNonUniformInverseBallot
:
3937 case SpvOpGroupNonUniformBallotBitExtract
:
3938 case SpvOpGroupNonUniformBallotBitCount
:
3939 case SpvOpGroupNonUniformBallotFindLSB
:
3940 case SpvOpGroupNonUniformBallotFindMSB
:
3941 case SpvOpGroupNonUniformShuffle
:
3942 case SpvOpGroupNonUniformShuffleXor
:
3943 case SpvOpGroupNonUniformShuffleUp
:
3944 case SpvOpGroupNonUniformShuffleDown
:
3945 case SpvOpGroupNonUniformIAdd
:
3946 case SpvOpGroupNonUniformFAdd
:
3947 case SpvOpGroupNonUniformIMul
:
3948 case SpvOpGroupNonUniformFMul
:
3949 case SpvOpGroupNonUniformSMin
:
3950 case SpvOpGroupNonUniformUMin
:
3951 case SpvOpGroupNonUniformFMin
:
3952 case SpvOpGroupNonUniformSMax
:
3953 case SpvOpGroupNonUniformUMax
:
3954 case SpvOpGroupNonUniformFMax
:
3955 case SpvOpGroupNonUniformBitwiseAnd
:
3956 case SpvOpGroupNonUniformBitwiseOr
:
3957 case SpvOpGroupNonUniformBitwiseXor
:
3958 case SpvOpGroupNonUniformLogicalAnd
:
3959 case SpvOpGroupNonUniformLogicalOr
:
3960 case SpvOpGroupNonUniformLogicalXor
:
3961 case SpvOpGroupNonUniformQuadBroadcast
:
3962 case SpvOpGroupNonUniformQuadSwap
:
3963 vtn_handle_subgroup(b
, opcode
, w
, count
);
3967 vtn_fail("Unhandled opcode");
3974 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3975 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3976 gl_shader_stage stage
, const char *entry_point_name
,
3977 const struct spirv_to_nir_options
*options
,
3978 const nir_shader_compiler_options
*nir_options
)
3980 /* Initialize the stn_builder object */
3981 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3983 b
->spirv_word_count
= word_count
;
3987 exec_list_make_empty(&b
->functions
);
3988 b
->entry_point_stage
= stage
;
3989 b
->entry_point_name
= entry_point_name
;
3990 b
->options
= options
;
3992 /* See also _vtn_fail() */
3993 if (setjmp(b
->fail_jump
)) {
3998 const uint32_t *word_end
= words
+ word_count
;
4000 /* Handle the SPIR-V header (first 4 dwords) */
4001 vtn_assert(word_count
> 5);
4003 vtn_assert(words
[0] == SpvMagicNumber
);
4004 vtn_assert(words
[1] >= 0x10000);
4005 /* words[2] == generator magic */
4006 unsigned value_id_bound
= words
[3];
4007 vtn_assert(words
[4] == 0);
4011 b
->value_id_bound
= value_id_bound
;
4012 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
4014 /* Handle all the preamble instructions */
4015 words
= vtn_foreach_instruction(b
, words
, word_end
,
4016 vtn_handle_preamble_instruction
);
4018 if (b
->entry_point
== NULL
) {
4019 vtn_fail("Entry point not found");
4024 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
4026 /* Set shader info defaults */
4027 b
->shader
->info
.gs
.invocations
= 1;
4029 /* Parse execution modes */
4030 vtn_foreach_execution_mode(b
, b
->entry_point
,
4031 vtn_handle_execution_mode
, NULL
);
4033 b
->specializations
= spec
;
4034 b
->num_specializations
= num_spec
;
4036 /* Handle all variable, type, and constant instructions */
4037 words
= vtn_foreach_instruction(b
, words
, word_end
,
4038 vtn_handle_variable_or_type_instruction
);
4040 /* Set types on all vtn_values */
4041 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
4043 vtn_build_cfg(b
, words
, word_end
);
4045 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
4046 b
->entry_point
->func
->referenced
= true;
4051 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
4052 if (func
->referenced
&& !func
->emitted
) {
4053 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
4054 _mesa_key_pointer_equal
);
4056 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
4062 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
4063 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
4064 vtn_assert(entry_point
);
4066 /* Unparent the shader from the vtn_builder before we delete the builder */
4067 ralloc_steal(NULL
, b
->shader
);