2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
34 struct spec_constant_value
{
43 _vtn_warn(const char *file
, int line
, const char *msg
, ...)
49 formatted
= ralloc_vasprintf(NULL
, msg
, args
);
52 fprintf(stderr
, "%s:%d WARNING: %s\n", file
, line
, formatted
);
54 ralloc_free(formatted
);
57 static struct vtn_ssa_value
*
58 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
60 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
63 if (glsl_type_is_vector_or_scalar(type
)) {
64 unsigned num_components
= glsl_get_vector_elements(val
->type
);
65 unsigned bit_size
= glsl_get_bit_size(val
->type
);
66 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
68 unsigned elems
= glsl_get_length(val
->type
);
69 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
70 if (glsl_type_is_matrix(type
)) {
71 const struct glsl_type
*elem_type
=
72 glsl_vector_type(glsl_get_base_type(type
),
73 glsl_get_vector_elements(type
));
75 for (unsigned i
= 0; i
< elems
; i
++)
76 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
77 } else if (glsl_type_is_array(type
)) {
78 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
79 for (unsigned i
= 0; i
< elems
; i
++)
80 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
82 for (unsigned i
= 0; i
< elems
; i
++) {
83 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
84 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
92 static struct vtn_ssa_value
*
93 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
94 const struct glsl_type
*type
)
96 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
101 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
104 switch (glsl_get_base_type(type
)) {
108 case GLSL_TYPE_FLOAT
:
109 case GLSL_TYPE_DOUBLE
: {
110 int bit_size
= glsl_get_bit_size(type
);
111 if (glsl_type_is_vector_or_scalar(type
)) {
112 unsigned num_components
= glsl_get_vector_elements(val
->type
);
113 nir_load_const_instr
*load
=
114 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
116 load
->value
= constant
->values
[0];
118 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
119 val
->def
= &load
->def
;
121 assert(glsl_type_is_matrix(type
));
122 unsigned rows
= glsl_get_vector_elements(val
->type
);
123 unsigned columns
= glsl_get_matrix_columns(val
->type
);
124 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
126 for (unsigned i
= 0; i
< columns
; i
++) {
127 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
128 col_val
->type
= glsl_get_column_type(val
->type
);
129 nir_load_const_instr
*load
=
130 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
132 load
->value
= constant
->values
[i
];
134 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
135 col_val
->def
= &load
->def
;
137 val
->elems
[i
] = col_val
;
143 case GLSL_TYPE_ARRAY
: {
144 unsigned elems
= glsl_get_length(val
->type
);
145 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
146 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
147 for (unsigned i
= 0; i
< elems
; i
++)
148 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
153 case GLSL_TYPE_STRUCT
: {
154 unsigned elems
= glsl_get_length(val
->type
);
155 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
156 for (unsigned i
= 0; i
< elems
; i
++) {
157 const struct glsl_type
*elem_type
=
158 glsl_get_struct_field(val
->type
, i
);
159 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
166 unreachable("bad constant type");
172 struct vtn_ssa_value
*
173 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
175 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
176 switch (val
->value_type
) {
177 case vtn_value_type_undef
:
178 return vtn_undef_ssa_value(b
, val
->type
->type
);
180 case vtn_value_type_constant
:
181 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
183 case vtn_value_type_ssa
:
186 case vtn_value_type_access_chain
:
187 /* This is needed for function parameters */
188 return vtn_variable_load(b
, val
->access_chain
);
191 unreachable("Invalid type for an SSA value");
196 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
197 unsigned word_count
, unsigned *words_used
)
199 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
201 /* Ammount of space taken by the string (including the null) */
202 unsigned len
= strlen(dup
) + 1;
203 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
209 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
210 const uint32_t *end
, vtn_instruction_handler handler
)
216 const uint32_t *w
= start
;
218 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
219 unsigned count
= w
[0] >> SpvWordCountShift
;
220 assert(count
>= 1 && w
+ count
<= end
);
224 break; /* Do nothing */
227 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
239 if (!handler(b
, opcode
, w
, count
))
251 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
252 const uint32_t *w
, unsigned count
)
255 case SpvOpExtInstImport
: {
256 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
257 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
258 val
->ext_handler
= vtn_handle_glsl450_instruction
;
260 assert(!"Unsupported extension");
266 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
267 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
274 unreachable("Unhandled opcode");
279 _foreach_decoration_helper(struct vtn_builder
*b
,
280 struct vtn_value
*base_value
,
282 struct vtn_value
*value
,
283 vtn_decoration_foreach_cb cb
, void *data
)
285 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
287 if (dec
->scope
== VTN_DEC_DECORATION
) {
288 member
= parent_member
;
289 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
290 assert(parent_member
== -1);
291 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
293 /* Not a decoration */
298 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
299 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
302 cb(b
, base_value
, member
, dec
, data
);
307 /** Iterates (recursively if needed) over all of the decorations on a value
309 * This function iterates over all of the decorations applied to a given
310 * value. If it encounters a decoration group, it recurses into the group
311 * and iterates over all of those decorations as well.
314 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
315 vtn_decoration_foreach_cb cb
, void *data
)
317 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
321 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
322 vtn_execution_mode_foreach_cb cb
, void *data
)
324 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
325 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
328 assert(dec
->group
== NULL
);
329 cb(b
, value
, dec
, data
);
334 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
335 const uint32_t *w
, unsigned count
)
337 const uint32_t *w_end
= w
+ count
;
338 const uint32_t target
= w
[1];
342 case SpvOpDecorationGroup
:
343 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
347 case SpvOpMemberDecorate
:
348 case SpvOpExecutionMode
: {
349 struct vtn_value
*val
= &b
->values
[target
];
351 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
354 dec
->scope
= VTN_DEC_DECORATION
;
356 case SpvOpMemberDecorate
:
357 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
359 case SpvOpExecutionMode
:
360 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
363 unreachable("Invalid decoration opcode");
365 dec
->decoration
= *(w
++);
368 /* Link into the list */
369 dec
->next
= val
->decoration
;
370 val
->decoration
= dec
;
374 case SpvOpGroupMemberDecorate
:
375 case SpvOpGroupDecorate
: {
376 struct vtn_value
*group
=
377 vtn_value(b
, target
, vtn_value_type_decoration_group
);
379 for (; w
< w_end
; w
++) {
380 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
381 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
384 if (opcode
== SpvOpGroupDecorate
) {
385 dec
->scope
= VTN_DEC_DECORATION
;
387 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
390 /* Link into the list */
391 dec
->next
= val
->decoration
;
392 val
->decoration
= dec
;
398 unreachable("Unhandled opcode");
402 struct member_decoration_ctx
{
404 struct glsl_struct_field
*fields
;
405 struct vtn_type
*type
;
408 /* does a shallow copy of a vtn_type */
410 static struct vtn_type
*
411 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
413 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
414 dest
->type
= src
->type
;
415 dest
->is_builtin
= src
->is_builtin
;
417 dest
->builtin
= src
->builtin
;
419 if (!glsl_type_is_scalar(src
->type
)) {
420 switch (glsl_get_base_type(src
->type
)) {
424 case GLSL_TYPE_FLOAT
:
425 case GLSL_TYPE_DOUBLE
:
426 case GLSL_TYPE_ARRAY
:
427 dest
->row_major
= src
->row_major
;
428 dest
->stride
= src
->stride
;
429 dest
->array_element
= src
->array_element
;
432 case GLSL_TYPE_STRUCT
: {
433 unsigned elems
= glsl_get_length(src
->type
);
435 dest
->members
= ralloc_array(b
, struct vtn_type
*, elems
);
436 memcpy(dest
->members
, src
->members
, elems
* sizeof(struct vtn_type
*));
438 dest
->offsets
= ralloc_array(b
, unsigned, elems
);
439 memcpy(dest
->offsets
, src
->offsets
, elems
* sizeof(unsigned));
444 unreachable("unhandled type");
451 static struct vtn_type
*
452 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
454 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
455 type
= type
->members
[member
];
457 /* We may have an array of matrices.... Oh, joy! */
458 while (glsl_type_is_array(type
->type
)) {
459 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
460 type
= type
->array_element
;
463 assert(glsl_type_is_matrix(type
->type
));
469 struct_member_decoration_cb(struct vtn_builder
*b
,
470 struct vtn_value
*val
, int member
,
471 const struct vtn_decoration
*dec
, void *void_ctx
)
473 struct member_decoration_ctx
*ctx
= void_ctx
;
478 assert(member
< ctx
->num_fields
);
480 switch (dec
->decoration
) {
481 case SpvDecorationNonWritable
:
482 case SpvDecorationNonReadable
:
483 case SpvDecorationRelaxedPrecision
:
484 case SpvDecorationVolatile
:
485 case SpvDecorationCoherent
:
486 case SpvDecorationUniform
:
487 break; /* FIXME: Do nothing with this for now. */
488 case SpvDecorationNoPerspective
:
489 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
491 case SpvDecorationFlat
:
492 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
494 case SpvDecorationCentroid
:
495 ctx
->fields
[member
].centroid
= true;
497 case SpvDecorationSample
:
498 ctx
->fields
[member
].sample
= true;
500 case SpvDecorationStream
:
501 /* Vulkan only allows one GS stream */
502 assert(dec
->literals
[0] == 0);
504 case SpvDecorationLocation
:
505 ctx
->fields
[member
].location
= dec
->literals
[0];
507 case SpvDecorationComponent
:
508 break; /* FIXME: What should we do with these? */
509 case SpvDecorationBuiltIn
:
510 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
511 ctx
->type
->members
[member
]->is_builtin
= true;
512 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
513 ctx
->type
->builtin_block
= true;
515 case SpvDecorationOffset
:
516 ctx
->type
->offsets
[member
] = dec
->literals
[0];
518 case SpvDecorationMatrixStride
:
519 mutable_matrix_member(b
, ctx
->type
, member
)->stride
= dec
->literals
[0];
521 case SpvDecorationColMajor
:
522 break; /* Nothing to do here. Column-major is the default. */
523 case SpvDecorationRowMajor
:
524 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
527 case SpvDecorationPatch
:
530 case SpvDecorationSpecId
:
531 case SpvDecorationBlock
:
532 case SpvDecorationBufferBlock
:
533 case SpvDecorationArrayStride
:
534 case SpvDecorationGLSLShared
:
535 case SpvDecorationGLSLPacked
:
536 case SpvDecorationInvariant
:
537 case SpvDecorationRestrict
:
538 case SpvDecorationAliased
:
539 case SpvDecorationConstant
:
540 case SpvDecorationIndex
:
541 case SpvDecorationBinding
:
542 case SpvDecorationDescriptorSet
:
543 case SpvDecorationLinkageAttributes
:
544 case SpvDecorationNoContraction
:
545 case SpvDecorationInputAttachmentIndex
:
546 vtn_warn("Decoration not allowed on struct members: %s",
547 spirv_decoration_to_string(dec
->decoration
));
550 case SpvDecorationXfbBuffer
:
551 case SpvDecorationXfbStride
:
552 vtn_warn("Vulkan does not have transform feedback");
555 case SpvDecorationCPacked
:
556 case SpvDecorationSaturatedConversion
:
557 case SpvDecorationFuncParamAttr
:
558 case SpvDecorationFPRoundingMode
:
559 case SpvDecorationFPFastMathMode
:
560 case SpvDecorationAlignment
:
561 vtn_warn("Decoration only allowed for CL-style kernels: %s",
562 spirv_decoration_to_string(dec
->decoration
));
566 unreachable("Unhandled decoration");
571 type_decoration_cb(struct vtn_builder
*b
,
572 struct vtn_value
*val
, int member
,
573 const struct vtn_decoration
*dec
, void *ctx
)
575 struct vtn_type
*type
= val
->type
;
580 switch (dec
->decoration
) {
581 case SpvDecorationArrayStride
:
582 type
->stride
= dec
->literals
[0];
584 case SpvDecorationBlock
:
587 case SpvDecorationBufferBlock
:
588 type
->buffer_block
= true;
590 case SpvDecorationGLSLShared
:
591 case SpvDecorationGLSLPacked
:
592 /* Ignore these, since we get explicit offsets anyways */
595 case SpvDecorationRowMajor
:
596 case SpvDecorationColMajor
:
597 case SpvDecorationMatrixStride
:
598 case SpvDecorationBuiltIn
:
599 case SpvDecorationNoPerspective
:
600 case SpvDecorationFlat
:
601 case SpvDecorationPatch
:
602 case SpvDecorationCentroid
:
603 case SpvDecorationSample
:
604 case SpvDecorationVolatile
:
605 case SpvDecorationCoherent
:
606 case SpvDecorationNonWritable
:
607 case SpvDecorationNonReadable
:
608 case SpvDecorationUniform
:
609 case SpvDecorationStream
:
610 case SpvDecorationLocation
:
611 case SpvDecorationComponent
:
612 case SpvDecorationOffset
:
613 case SpvDecorationXfbBuffer
:
614 case SpvDecorationXfbStride
:
615 vtn_warn("Decoration only allowed for struct members: %s",
616 spirv_decoration_to_string(dec
->decoration
));
619 case SpvDecorationRelaxedPrecision
:
620 case SpvDecorationSpecId
:
621 case SpvDecorationInvariant
:
622 case SpvDecorationRestrict
:
623 case SpvDecorationAliased
:
624 case SpvDecorationConstant
:
625 case SpvDecorationIndex
:
626 case SpvDecorationBinding
:
627 case SpvDecorationDescriptorSet
:
628 case SpvDecorationLinkageAttributes
:
629 case SpvDecorationNoContraction
:
630 case SpvDecorationInputAttachmentIndex
:
631 vtn_warn("Decoration not allowed on types: %s",
632 spirv_decoration_to_string(dec
->decoration
));
635 case SpvDecorationCPacked
:
636 case SpvDecorationSaturatedConversion
:
637 case SpvDecorationFuncParamAttr
:
638 case SpvDecorationFPRoundingMode
:
639 case SpvDecorationFPFastMathMode
:
640 case SpvDecorationAlignment
:
641 vtn_warn("Decoration only allowed for CL-style kernels: %s",
642 spirv_decoration_to_string(dec
->decoration
));
646 unreachable("Unhandled decoration");
651 translate_image_format(SpvImageFormat format
)
654 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
655 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
656 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
657 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
658 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
659 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
660 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
661 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
662 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
663 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
664 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
665 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
666 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
667 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
668 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
669 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
670 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
671 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
672 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
673 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
674 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
675 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
676 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
677 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
678 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
679 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
680 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
681 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
682 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
683 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
684 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
685 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
686 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
687 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
688 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
689 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
690 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
691 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
692 case SpvImageFormatR16ui
: return 0x823A; /* GL_RG16UI */
693 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
695 assert(!"Invalid image format");
701 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
702 const uint32_t *w
, unsigned count
)
704 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
706 val
->type
= rzalloc(b
, struct vtn_type
);
707 val
->type
->is_builtin
= false;
708 val
->type
->val
= val
;
712 val
->type
->type
= glsl_void_type();
715 val
->type
->type
= glsl_bool_type();
718 const bool signedness
= w
[3];
719 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
722 case SpvOpTypeFloat
: {
724 val
->type
->type
= bit_size
== 64 ? glsl_double_type() : glsl_float_type();
728 case SpvOpTypeVector
: {
729 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
730 unsigned elems
= w
[3];
732 assert(glsl_type_is_scalar(base
->type
));
733 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
735 /* Vectors implicitly have sizeof(base_type) stride. For now, this
736 * is always 4 bytes. This will have to change if we want to start
737 * supporting doubles or half-floats.
739 val
->type
->stride
= 4;
740 val
->type
->array_element
= base
;
744 case SpvOpTypeMatrix
: {
745 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
746 unsigned columns
= w
[3];
748 assert(glsl_type_is_vector(base
->type
));
749 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
750 glsl_get_vector_elements(base
->type
),
752 assert(!glsl_type_is_error(val
->type
->type
));
753 val
->type
->array_element
= base
;
754 val
->type
->row_major
= false;
755 val
->type
->stride
= 0;
759 case SpvOpTypeRuntimeArray
:
760 case SpvOpTypeArray
: {
761 struct vtn_type
*array_element
=
762 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
765 if (opcode
== SpvOpTypeRuntimeArray
) {
766 /* A length of 0 is used to denote unsized arrays */
770 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
773 val
->type
->type
= glsl_array_type(array_element
->type
, length
);
774 val
->type
->array_element
= array_element
;
775 val
->type
->stride
= 0;
779 case SpvOpTypeStruct
: {
780 unsigned num_fields
= count
- 2;
781 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
782 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
784 NIR_VLA(struct glsl_struct_field
, fields
, count
);
785 for (unsigned i
= 0; i
< num_fields
; i
++) {
786 val
->type
->members
[i
] =
787 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
788 fields
[i
] = (struct glsl_struct_field
) {
789 .type
= val
->type
->members
[i
]->type
,
790 .name
= ralloc_asprintf(b
, "field%d", i
),
795 struct member_decoration_ctx ctx
= {
796 .num_fields
= num_fields
,
801 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
803 const char *name
= val
->name
? val
->name
: "struct";
805 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
809 case SpvOpTypeFunction
: {
810 const struct glsl_type
*return_type
=
811 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
812 NIR_VLA(struct glsl_function_param
, params
, count
- 3);
813 for (unsigned i
= 0; i
< count
- 3; i
++) {
814 params
[i
].type
= vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
->type
;
818 params
[i
].out
= true;
820 val
->type
->type
= glsl_function_type(return_type
, params
, count
- 3);
824 case SpvOpTypePointer
:
825 /* FIXME: For now, we'll just do the really lame thing and return
826 * the same type. The validator should ensure that the proper number
827 * of dereferences happen
829 val
->type
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
832 case SpvOpTypeImage
: {
833 const struct glsl_type
*sampled_type
=
834 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
836 assert(glsl_type_is_vector_or_scalar(sampled_type
));
838 enum glsl_sampler_dim dim
;
839 switch ((SpvDim
)w
[3]) {
840 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
841 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
842 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
843 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
844 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
845 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
846 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
848 unreachable("Invalid SPIR-V Sampler dimension");
851 bool is_shadow
= w
[4];
852 bool is_array
= w
[5];
853 bool multisampled
= w
[6];
854 unsigned sampled
= w
[7];
855 SpvImageFormat format
= w
[8];
858 val
->type
->access_qualifier
= w
[9];
860 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
863 if (dim
== GLSL_SAMPLER_DIM_2D
)
864 dim
= GLSL_SAMPLER_DIM_MS
;
865 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
866 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
868 assert(!"Unsupported multisampled image type");
871 val
->type
->image_format
= translate_image_format(format
);
874 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
875 glsl_get_base_type(sampled_type
));
876 } else if (sampled
== 2) {
877 assert((dim
== GLSL_SAMPLER_DIM_SUBPASS
||
878 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) || format
);
880 val
->type
->type
= glsl_image_type(dim
, is_array
,
881 glsl_get_base_type(sampled_type
));
883 assert(!"We need to know if the image will be sampled");
888 case SpvOpTypeSampledImage
:
889 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
892 case SpvOpTypeSampler
:
893 /* The actual sampler type here doesn't really matter. It gets
894 * thrown away the moment you combine it with an image. What really
895 * matters is that it's a sampler type as opposed to an integer type
896 * so the backend knows what to do.
898 val
->type
->type
= glsl_bare_sampler_type();
901 case SpvOpTypeOpaque
:
903 case SpvOpTypeDeviceEvent
:
904 case SpvOpTypeReserveId
:
908 unreachable("Unhandled opcode");
911 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
914 static nir_constant
*
915 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
917 nir_constant
*c
= rzalloc(b
, nir_constant
);
919 switch (glsl_get_base_type(type
)) {
923 case GLSL_TYPE_FLOAT
:
924 case GLSL_TYPE_DOUBLE
:
925 /* Nothing to do here. It's already initialized to zero */
928 case GLSL_TYPE_ARRAY
:
929 assert(glsl_get_length(type
) > 0);
930 c
->num_elements
= glsl_get_length(type
);
931 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
933 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
934 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
935 c
->elements
[i
] = c
->elements
[0];
938 case GLSL_TYPE_STRUCT
:
939 c
->num_elements
= glsl_get_length(type
);
940 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
942 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
943 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
948 unreachable("Invalid type for null constant");
955 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
956 int member
, const struct vtn_decoration
*dec
,
959 assert(member
== -1);
960 if (dec
->decoration
!= SpvDecorationSpecId
)
963 struct spec_constant_value
*const_value
= data
;
965 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
966 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
967 if (const_value
->is_double
)
968 const_value
->data64
= b
->specializations
[i
].data64
;
970 const_value
->data32
= b
->specializations
[i
].data32
;
977 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
978 uint32_t const_value
)
980 struct spec_constant_value data
;
981 data
.is_double
= false;
982 data
.data32
= const_value
;
983 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
988 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
989 uint64_t const_value
)
991 struct spec_constant_value data
;
992 data
.is_double
= true;
993 data
.data64
= const_value
;
994 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
999 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1000 struct vtn_value
*val
,
1002 const struct vtn_decoration
*dec
,
1005 assert(member
== -1);
1006 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1007 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
1010 assert(val
->const_type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1012 b
->shader
->info
->cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
1013 b
->shader
->info
->cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
1014 b
->shader
->info
->cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
1018 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1019 const uint32_t *w
, unsigned count
)
1021 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1022 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
1023 val
->constant
= rzalloc(b
, nir_constant
);
1025 case SpvOpConstantTrue
:
1026 assert(val
->const_type
== glsl_bool_type());
1027 val
->constant
->values
[0].u32
[0] = NIR_TRUE
;
1029 case SpvOpConstantFalse
:
1030 assert(val
->const_type
== glsl_bool_type());
1031 val
->constant
->values
[0].u32
[0] = NIR_FALSE
;
1034 case SpvOpSpecConstantTrue
:
1035 case SpvOpSpecConstantFalse
: {
1036 assert(val
->const_type
== glsl_bool_type());
1038 get_specialization(b
, val
, (opcode
== SpvOpSpecConstantTrue
));
1039 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1043 case SpvOpConstant
: {
1044 assert(glsl_type_is_scalar(val
->const_type
));
1045 int bit_size
= glsl_get_bit_size(val
->const_type
);
1046 if (bit_size
== 64) {
1047 val
->constant
->values
->u32
[0] = w
[3];
1048 val
->constant
->values
->u32
[1] = w
[4];
1050 assert(bit_size
== 32);
1051 val
->constant
->values
->u32
[0] = w
[3];
1055 case SpvOpSpecConstant
: {
1056 assert(glsl_type_is_scalar(val
->const_type
));
1057 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1058 int bit_size
= glsl_get_bit_size(val
->const_type
);
1060 val
->constant
->values
[0].u64
[0] =
1061 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1063 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1066 case SpvOpSpecConstantComposite
:
1067 case SpvOpConstantComposite
: {
1068 unsigned elem_count
= count
- 3;
1069 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1070 for (unsigned i
= 0; i
< elem_count
; i
++)
1071 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1073 switch (glsl_get_base_type(val
->const_type
)) {
1074 case GLSL_TYPE_UINT
:
1076 case GLSL_TYPE_FLOAT
:
1077 case GLSL_TYPE_BOOL
:
1078 case GLSL_TYPE_DOUBLE
: {
1079 int bit_size
= glsl_get_bit_size(val
->const_type
);
1080 if (glsl_type_is_matrix(val
->const_type
)) {
1081 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
1082 for (unsigned i
= 0; i
< elem_count
; i
++)
1083 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1085 assert(glsl_type_is_vector(val
->const_type
));
1086 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
1087 for (unsigned i
= 0; i
< elem_count
; i
++) {
1088 if (bit_size
== 64) {
1089 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1091 assert(bit_size
== 32);
1092 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1099 case GLSL_TYPE_STRUCT
:
1100 case GLSL_TYPE_ARRAY
:
1101 ralloc_steal(val
->constant
, elems
);
1102 val
->constant
->num_elements
= elem_count
;
1103 val
->constant
->elements
= elems
;
1107 unreachable("Unsupported type for constants");
1112 case SpvOpSpecConstantOp
: {
1113 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1115 case SpvOpVectorShuffle
: {
1116 struct vtn_value
*v0
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1117 struct vtn_value
*v1
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1118 unsigned len0
= glsl_get_vector_elements(v0
->const_type
);
1119 unsigned len1
= glsl_get_vector_elements(v1
->const_type
);
1121 assert(len0
+ len1
< 16);
1123 unsigned bit_size
= glsl_get_bit_size(val
->const_type
);
1124 assert(bit_size
== glsl_get_bit_size(v0
->const_type
) &&
1125 bit_size
== glsl_get_bit_size(v1
->const_type
));
1127 if (bit_size
== 64) {
1129 for (unsigned i
= 0; i
< len0
; i
++)
1130 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1131 for (unsigned i
= 0; i
< len1
; i
++)
1132 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1134 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1135 uint32_t comp
= w
[i
+ 6];
1136 /* If component is not used, set the value to a known constant
1137 * to detect if it is wrongly used.
1139 if (comp
== (uint32_t)-1)
1140 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1142 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1146 for (unsigned i
= 0; i
< len0
; i
++)
1147 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1149 for (unsigned i
= 0; i
< len1
; i
++)
1150 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1152 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1153 uint32_t comp
= w
[i
+ 6];
1154 /* If component is not used, set the value to a known constant
1155 * to detect if it is wrongly used.
1157 if (comp
== (uint32_t)-1)
1158 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1160 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1166 case SpvOpCompositeExtract
:
1167 case SpvOpCompositeInsert
: {
1168 struct vtn_value
*comp
;
1169 unsigned deref_start
;
1170 struct nir_constant
**c
;
1171 if (opcode
== SpvOpCompositeExtract
) {
1172 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1174 c
= &comp
->constant
;
1176 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1178 val
->constant
= nir_constant_clone(comp
->constant
,
1185 const struct glsl_type
*type
= comp
->const_type
;
1186 for (unsigned i
= deref_start
; i
< count
; i
++) {
1187 switch (glsl_get_base_type(type
)) {
1188 case GLSL_TYPE_UINT
:
1190 case GLSL_TYPE_FLOAT
:
1191 case GLSL_TYPE_DOUBLE
:
1192 case GLSL_TYPE_BOOL
:
1193 /* If we hit this granularity, we're picking off an element */
1194 if (glsl_type_is_matrix(type
)) {
1195 assert(col
== 0 && elem
== -1);
1198 type
= glsl_get_column_type(type
);
1200 assert(elem
<= 0 && glsl_type_is_vector(type
));
1202 type
= glsl_scalar_type(glsl_get_base_type(type
));
1206 case GLSL_TYPE_ARRAY
:
1207 c
= &(*c
)->elements
[w
[i
]];
1208 type
= glsl_get_array_element(type
);
1211 case GLSL_TYPE_STRUCT
:
1212 c
= &(*c
)->elements
[w
[i
]];
1213 type
= glsl_get_struct_field(type
, w
[i
]);
1217 unreachable("Invalid constant type");
1221 if (opcode
== SpvOpCompositeExtract
) {
1225 unsigned num_components
= glsl_get_vector_elements(type
);
1226 unsigned bit_size
= glsl_get_bit_size(type
);
1227 for (unsigned i
= 0; i
< num_components
; i
++)
1228 if (bit_size
== 64) {
1229 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1231 assert(bit_size
== 32);
1232 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1236 struct vtn_value
*insert
=
1237 vtn_value(b
, w
[4], vtn_value_type_constant
);
1238 assert(insert
->const_type
== type
);
1240 *c
= insert
->constant
;
1242 unsigned num_components
= glsl_get_vector_elements(type
);
1243 unsigned bit_size
= glsl_get_bit_size(type
);
1244 for (unsigned i
= 0; i
< num_components
; i
++)
1245 if (bit_size
== 64) {
1246 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1248 assert(bit_size
== 32);
1249 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1258 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->const_type
);
1259 nir_alu_type src_alu_type
= dst_alu_type
;
1260 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(opcode
, &swap
, src_alu_type
, dst_alu_type
);
1262 unsigned num_components
= glsl_get_vector_elements(val
->const_type
);
1264 glsl_get_bit_size(val
->const_type
);
1266 nir_const_value src
[4];
1268 for (unsigned i
= 0; i
< count
- 4; i
++) {
1270 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1272 unsigned j
= swap
? 1 - i
: i
;
1273 assert(bit_size
== 32);
1274 src
[j
] = c
->values
[0];
1277 val
->constant
->values
[0] =
1278 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1285 case SpvOpConstantNull
:
1286 val
->constant
= vtn_null_constant(b
, val
->const_type
);
1289 case SpvOpConstantSampler
:
1290 assert(!"OpConstantSampler requires Kernel Capability");
1294 unreachable("Unhandled opcode");
1297 /* Now that we have the value, update the workgroup size if needed */
1298 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1302 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1303 const uint32_t *w
, unsigned count
)
1305 struct nir_function
*callee
=
1306 vtn_value(b
, w
[3], vtn_value_type_function
)->func
->impl
->function
;
1308 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1309 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1310 unsigned arg_id
= w
[4 + i
];
1311 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1312 if (arg
->value_type
== vtn_value_type_access_chain
) {
1313 nir_deref_var
*d
= vtn_access_chain_to_deref(b
, arg
->access_chain
);
1314 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1316 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1318 /* Make a temporary to store the argument in */
1320 nir_local_variable_create(b
->impl
, arg_ssa
->type
, "arg_tmp");
1321 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1323 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1327 nir_variable
*out_tmp
= NULL
;
1328 if (!glsl_type_is_void(callee
->return_type
)) {
1329 out_tmp
= nir_local_variable_create(b
->impl
, callee
->return_type
,
1331 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1334 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1336 if (glsl_type_is_void(callee
->return_type
)) {
1337 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1339 struct vtn_value
*retval
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1340 retval
->ssa
= vtn_local_load(b
, call
->return_deref
);
1344 struct vtn_ssa_value
*
1345 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1347 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1350 if (!glsl_type_is_vector_or_scalar(type
)) {
1351 unsigned elems
= glsl_get_length(type
);
1352 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1353 for (unsigned i
= 0; i
< elems
; i
++) {
1354 const struct glsl_type
*child_type
;
1356 switch (glsl_get_base_type(type
)) {
1358 case GLSL_TYPE_UINT
:
1359 case GLSL_TYPE_BOOL
:
1360 case GLSL_TYPE_FLOAT
:
1361 case GLSL_TYPE_DOUBLE
:
1362 child_type
= glsl_get_column_type(type
);
1364 case GLSL_TYPE_ARRAY
:
1365 child_type
= glsl_get_array_element(type
);
1367 case GLSL_TYPE_STRUCT
:
1368 child_type
= glsl_get_struct_field(type
, i
);
1371 unreachable("unkown base type");
1374 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1382 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1385 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1386 src
.src_type
= type
;
1391 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1392 const uint32_t *w
, unsigned count
)
1394 if (opcode
== SpvOpSampledImage
) {
1395 struct vtn_value
*val
=
1396 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1397 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1398 val
->sampled_image
->image
=
1399 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1400 val
->sampled_image
->sampler
=
1401 vtn_value(b
, w
[4], vtn_value_type_access_chain
)->access_chain
;
1403 } else if (opcode
== SpvOpImage
) {
1404 struct vtn_value
*val
=
1405 vtn_push_value(b
, w
[2], vtn_value_type_access_chain
);
1406 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1407 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1408 val
->access_chain
= src_val
->sampled_image
->image
;
1410 assert(src_val
->value_type
== vtn_value_type_access_chain
);
1411 val
->access_chain
= src_val
->access_chain
;
1416 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1417 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1419 struct vtn_sampled_image sampled
;
1420 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1421 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1422 sampled
= *sampled_val
->sampled_image
;
1424 assert(sampled_val
->value_type
== vtn_value_type_access_chain
);
1425 sampled
.image
= NULL
;
1426 sampled
.sampler
= sampled_val
->access_chain
;
1429 const struct glsl_type
*image_type
;
1430 if (sampled
.image
) {
1431 image_type
= sampled
.image
->var
->var
->interface_type
;
1433 image_type
= sampled
.sampler
->var
->var
->interface_type
;
1435 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1436 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1437 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1439 /* Figure out the base texture operation */
1442 case SpvOpImageSampleImplicitLod
:
1443 case SpvOpImageSampleDrefImplicitLod
:
1444 case SpvOpImageSampleProjImplicitLod
:
1445 case SpvOpImageSampleProjDrefImplicitLod
:
1446 texop
= nir_texop_tex
;
1449 case SpvOpImageSampleExplicitLod
:
1450 case SpvOpImageSampleDrefExplicitLod
:
1451 case SpvOpImageSampleProjExplicitLod
:
1452 case SpvOpImageSampleProjDrefExplicitLod
:
1453 texop
= nir_texop_txl
;
1456 case SpvOpImageFetch
:
1457 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1458 texop
= nir_texop_txf_ms
;
1460 texop
= nir_texop_txf
;
1464 case SpvOpImageGather
:
1465 case SpvOpImageDrefGather
:
1466 texop
= nir_texop_tg4
;
1469 case SpvOpImageQuerySizeLod
:
1470 case SpvOpImageQuerySize
:
1471 texop
= nir_texop_txs
;
1474 case SpvOpImageQueryLod
:
1475 texop
= nir_texop_lod
;
1478 case SpvOpImageQueryLevels
:
1479 texop
= nir_texop_query_levels
;
1482 case SpvOpImageQuerySamples
:
1483 texop
= nir_texop_texture_samples
;
1487 unreachable("Unhandled opcode");
1490 nir_tex_src srcs
[8]; /* 8 should be enough */
1491 nir_tex_src
*p
= srcs
;
1495 struct nir_ssa_def
*coord
;
1496 unsigned coord_components
;
1498 case SpvOpImageSampleImplicitLod
:
1499 case SpvOpImageSampleExplicitLod
:
1500 case SpvOpImageSampleDrefImplicitLod
:
1501 case SpvOpImageSampleDrefExplicitLod
:
1502 case SpvOpImageSampleProjImplicitLod
:
1503 case SpvOpImageSampleProjExplicitLod
:
1504 case SpvOpImageSampleProjDrefImplicitLod
:
1505 case SpvOpImageSampleProjDrefExplicitLod
:
1506 case SpvOpImageFetch
:
1507 case SpvOpImageGather
:
1508 case SpvOpImageDrefGather
:
1509 case SpvOpImageQueryLod
: {
1510 /* All these types have the coordinate as their first real argument */
1511 switch (sampler_dim
) {
1512 case GLSL_SAMPLER_DIM_1D
:
1513 case GLSL_SAMPLER_DIM_BUF
:
1514 coord_components
= 1;
1516 case GLSL_SAMPLER_DIM_2D
:
1517 case GLSL_SAMPLER_DIM_RECT
:
1518 case GLSL_SAMPLER_DIM_MS
:
1519 coord_components
= 2;
1521 case GLSL_SAMPLER_DIM_3D
:
1522 case GLSL_SAMPLER_DIM_CUBE
:
1523 coord_components
= 3;
1526 unreachable("Invalid sampler type");
1529 if (is_array
&& texop
!= nir_texop_lod
)
1532 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1533 p
->src
= nir_src_for_ssa(coord
);
1534 p
->src_type
= nir_tex_src_coord
;
1541 coord_components
= 0;
1546 case SpvOpImageSampleProjImplicitLod
:
1547 case SpvOpImageSampleProjExplicitLod
:
1548 case SpvOpImageSampleProjDrefImplicitLod
:
1549 case SpvOpImageSampleProjDrefExplicitLod
:
1550 /* These have the projector as the last coordinate component */
1551 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1552 p
->src_type
= nir_tex_src_projector
;
1560 unsigned gather_component
= 0;
1562 case SpvOpImageSampleDrefImplicitLod
:
1563 case SpvOpImageSampleDrefExplicitLod
:
1564 case SpvOpImageSampleProjDrefImplicitLod
:
1565 case SpvOpImageSampleProjDrefExplicitLod
:
1566 case SpvOpImageDrefGather
:
1567 /* These all have an explicit depth value as their next source */
1568 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1571 case SpvOpImageGather
:
1572 /* This has a component as its next source */
1574 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1581 /* For OpImageQuerySizeLod, we always have an LOD */
1582 if (opcode
== SpvOpImageQuerySizeLod
)
1583 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1585 /* Now we need to handle some number of optional arguments */
1586 const struct vtn_ssa_value
*gather_offsets
= NULL
;
1588 uint32_t operands
= w
[idx
++];
1590 if (operands
& SpvImageOperandsBiasMask
) {
1591 assert(texop
== nir_texop_tex
);
1592 texop
= nir_texop_txb
;
1593 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1596 if (operands
& SpvImageOperandsLodMask
) {
1597 assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1598 texop
== nir_texop_txs
);
1599 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1602 if (operands
& SpvImageOperandsGradMask
) {
1603 assert(texop
== nir_texop_txl
);
1604 texop
= nir_texop_txd
;
1605 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1606 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1609 if (operands
& SpvImageOperandsOffsetMask
||
1610 operands
& SpvImageOperandsConstOffsetMask
)
1611 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1613 if (operands
& SpvImageOperandsConstOffsetsMask
) {
1614 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
1615 (*p
++) = (nir_tex_src
){};
1618 if (operands
& SpvImageOperandsSampleMask
) {
1619 assert(texop
== nir_texop_txf_ms
);
1620 texop
= nir_texop_txf_ms
;
1621 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1624 /* We should have now consumed exactly all of the arguments */
1625 assert(idx
== count
);
1627 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1630 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1632 instr
->coord_components
= coord_components
;
1633 instr
->sampler_dim
= sampler_dim
;
1634 instr
->is_array
= is_array
;
1635 instr
->is_shadow
= is_shadow
;
1636 instr
->is_new_style_shadow
=
1637 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
1638 instr
->component
= gather_component
;
1640 switch (glsl_get_sampler_result_type(image_type
)) {
1641 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1642 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1643 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
1644 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1646 unreachable("Invalid base type for sampler result");
1649 nir_deref_var
*sampler
= vtn_access_chain_to_deref(b
, sampled
.sampler
);
1650 nir_deref_var
*texture
;
1651 if (sampled
.image
) {
1652 nir_deref_var
*image
= vtn_access_chain_to_deref(b
, sampled
.image
);
1658 instr
->texture
= nir_deref_var_clone(texture
, instr
);
1660 switch (instr
->op
) {
1665 /* These operations require a sampler */
1666 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
1669 case nir_texop_txf_ms
:
1673 case nir_texop_query_levels
:
1674 case nir_texop_texture_samples
:
1675 case nir_texop_samples_identical
:
1677 instr
->sampler
= NULL
;
1679 case nir_texop_txf_ms_mcs
:
1680 unreachable("unexpected nir_texop_txf_ms_mcs");
1683 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1684 nir_tex_instr_dest_size(instr
), 32, NULL
);
1686 assert(glsl_get_vector_elements(ret_type
->type
) ==
1687 nir_tex_instr_dest_size(instr
));
1690 nir_instr
*instruction
;
1691 if (gather_offsets
) {
1692 assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
1693 assert(glsl_get_length(gather_offsets
->type
) == 4);
1694 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
1696 /* Copy the current instruction 4x */
1697 for (uint32_t i
= 1; i
< 4; i
++) {
1698 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
1699 instrs
[i
]->op
= instr
->op
;
1700 instrs
[i
]->coord_components
= instr
->coord_components
;
1701 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
1702 instrs
[i
]->is_array
= instr
->is_array
;
1703 instrs
[i
]->is_shadow
= instr
->is_shadow
;
1704 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
1705 instrs
[i
]->component
= instr
->component
;
1706 instrs
[i
]->dest_type
= instr
->dest_type
;
1707 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
1708 instrs
[i
]->sampler
= NULL
;
1710 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1712 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
1713 nir_tex_instr_dest_size(instr
), 32, NULL
);
1716 /* Fill in the last argument with the offset from the passed in offsets
1717 * and insert the instruction into the stream.
1719 for (uint32_t i
= 0; i
< 4; i
++) {
1721 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
1722 src
.src_type
= nir_tex_src_offset
;
1723 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
1724 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
1727 /* Combine the results of the 4 instructions by taking their .w
1730 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
1731 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
1732 vec4
->dest
.write_mask
= 0xf;
1733 for (uint32_t i
= 0; i
< 4; i
++) {
1734 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
1735 vec4
->src
[i
].swizzle
[0] = 3;
1737 def
= &vec4
->dest
.dest
.ssa
;
1738 instruction
= &vec4
->instr
;
1740 def
= &instr
->dest
.ssa
;
1741 instruction
= &instr
->instr
;
1744 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
1745 val
->ssa
->def
= def
;
1747 nir_builder_instr_insert(&b
->nb
, instruction
);
1751 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
1752 const uint32_t *w
, nir_src
*src
)
1755 case SpvOpAtomicIIncrement
:
1756 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
1759 case SpvOpAtomicIDecrement
:
1760 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
1763 case SpvOpAtomicISub
:
1765 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
1768 case SpvOpAtomicCompareExchange
:
1769 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
1770 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
1773 case SpvOpAtomicExchange
:
1774 case SpvOpAtomicIAdd
:
1775 case SpvOpAtomicSMin
:
1776 case SpvOpAtomicUMin
:
1777 case SpvOpAtomicSMax
:
1778 case SpvOpAtomicUMax
:
1779 case SpvOpAtomicAnd
:
1781 case SpvOpAtomicXor
:
1782 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
1786 unreachable("Invalid SPIR-V atomic");
1790 static nir_ssa_def
*
1791 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
1793 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
1795 /* The image_load_store intrinsics assume a 4-dim coordinate */
1796 unsigned dim
= glsl_get_vector_elements(coord
->type
);
1797 unsigned swizzle
[4];
1798 for (unsigned i
= 0; i
< 4; i
++)
1799 swizzle
[i
] = MIN2(i
, dim
- 1);
1801 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
1805 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
1806 const uint32_t *w
, unsigned count
)
1808 /* Just get this one out of the way */
1809 if (opcode
== SpvOpImageTexelPointer
) {
1810 struct vtn_value
*val
=
1811 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
1812 val
->image
= ralloc(b
, struct vtn_image_pointer
);
1815 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1816 val
->image
->coord
= get_image_coord(b
, w
[4]);
1817 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
1821 struct vtn_image_pointer image
;
1824 case SpvOpAtomicExchange
:
1825 case SpvOpAtomicCompareExchange
:
1826 case SpvOpAtomicCompareExchangeWeak
:
1827 case SpvOpAtomicIIncrement
:
1828 case SpvOpAtomicIDecrement
:
1829 case SpvOpAtomicIAdd
:
1830 case SpvOpAtomicISub
:
1831 case SpvOpAtomicLoad
:
1832 case SpvOpAtomicSMin
:
1833 case SpvOpAtomicUMin
:
1834 case SpvOpAtomicSMax
:
1835 case SpvOpAtomicUMax
:
1836 case SpvOpAtomicAnd
:
1838 case SpvOpAtomicXor
:
1839 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
1842 case SpvOpAtomicStore
:
1843 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
1846 case SpvOpImageQuerySize
:
1848 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1850 image
.sample
= NULL
;
1853 case SpvOpImageRead
:
1855 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1856 image
.coord
= get_image_coord(b
, w
[4]);
1858 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
1859 assert(w
[5] == SpvImageOperandsSampleMask
);
1860 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
1862 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1866 case SpvOpImageWrite
:
1868 vtn_value(b
, w
[1], vtn_value_type_access_chain
)->access_chain
;
1869 image
.coord
= get_image_coord(b
, w
[2]);
1873 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
1874 assert(w
[4] == SpvImageOperandsSampleMask
);
1875 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
1877 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1882 unreachable("Invalid image opcode");
1885 nir_intrinsic_op op
;
1887 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
1888 OP(ImageQuerySize
, size
)
1890 OP(ImageWrite
, store
)
1891 OP(AtomicLoad
, load
)
1892 OP(AtomicStore
, store
)
1893 OP(AtomicExchange
, atomic_exchange
)
1894 OP(AtomicCompareExchange
, atomic_comp_swap
)
1895 OP(AtomicIIncrement
, atomic_add
)
1896 OP(AtomicIDecrement
, atomic_add
)
1897 OP(AtomicIAdd
, atomic_add
)
1898 OP(AtomicISub
, atomic_add
)
1899 OP(AtomicSMin
, atomic_min
)
1900 OP(AtomicUMin
, atomic_min
)
1901 OP(AtomicSMax
, atomic_max
)
1902 OP(AtomicUMax
, atomic_max
)
1903 OP(AtomicAnd
, atomic_and
)
1904 OP(AtomicOr
, atomic_or
)
1905 OP(AtomicXor
, atomic_xor
)
1908 unreachable("Invalid image opcode");
1911 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
1913 nir_deref_var
*image_deref
= vtn_access_chain_to_deref(b
, image
.image
);
1914 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
1916 /* ImageQuerySize doesn't take any extra parameters */
1917 if (opcode
!= SpvOpImageQuerySize
) {
1918 /* The image coordinate is always 4 components but we may not have that
1919 * many. Swizzle to compensate.
1922 for (unsigned i
= 0; i
< 4; i
++)
1923 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
1924 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
1926 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
1930 case SpvOpAtomicLoad
:
1931 case SpvOpImageQuerySize
:
1932 case SpvOpImageRead
:
1934 case SpvOpAtomicStore
:
1935 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
1937 case SpvOpImageWrite
:
1938 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
1941 case SpvOpAtomicIIncrement
:
1942 case SpvOpAtomicIDecrement
:
1943 case SpvOpAtomicExchange
:
1944 case SpvOpAtomicIAdd
:
1945 case SpvOpAtomicSMin
:
1946 case SpvOpAtomicUMin
:
1947 case SpvOpAtomicSMax
:
1948 case SpvOpAtomicUMax
:
1949 case SpvOpAtomicAnd
:
1951 case SpvOpAtomicXor
:
1952 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
1956 unreachable("Invalid image opcode");
1959 if (opcode
!= SpvOpImageWrite
) {
1960 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1961 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1962 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 4, 32, NULL
);
1964 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
1966 /* The image intrinsics always return 4 channels but we may not want
1967 * that many. Emit a mov to trim it down.
1969 unsigned swiz
[4] = {0, 1, 2, 3};
1970 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
1971 val
->ssa
->def
= nir_swizzle(&b
->nb
, &intrin
->dest
.ssa
, swiz
,
1972 glsl_get_vector_elements(type
->type
), false);
1974 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
1978 static nir_intrinsic_op
1979 get_ssbo_nir_atomic_op(SpvOp opcode
)
1982 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
1983 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
1984 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
1985 OP(AtomicExchange
, atomic_exchange
)
1986 OP(AtomicCompareExchange
, atomic_comp_swap
)
1987 OP(AtomicIIncrement
, atomic_add
)
1988 OP(AtomicIDecrement
, atomic_add
)
1989 OP(AtomicIAdd
, atomic_add
)
1990 OP(AtomicISub
, atomic_add
)
1991 OP(AtomicSMin
, atomic_imin
)
1992 OP(AtomicUMin
, atomic_umin
)
1993 OP(AtomicSMax
, atomic_imax
)
1994 OP(AtomicUMax
, atomic_umax
)
1995 OP(AtomicAnd
, atomic_and
)
1996 OP(AtomicOr
, atomic_or
)
1997 OP(AtomicXor
, atomic_xor
)
2000 unreachable("Invalid SSBO atomic");
2004 static nir_intrinsic_op
2005 get_shared_nir_atomic_op(SpvOp opcode
)
2008 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
2009 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
2010 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2011 OP(AtomicExchange
, atomic_exchange
)
2012 OP(AtomicCompareExchange
, atomic_comp_swap
)
2013 OP(AtomicIIncrement
, atomic_add
)
2014 OP(AtomicIDecrement
, atomic_add
)
2015 OP(AtomicIAdd
, atomic_add
)
2016 OP(AtomicISub
, atomic_add
)
2017 OP(AtomicSMin
, atomic_imin
)
2018 OP(AtomicUMin
, atomic_umin
)
2019 OP(AtomicSMax
, atomic_imax
)
2020 OP(AtomicUMax
, atomic_umax
)
2021 OP(AtomicAnd
, atomic_and
)
2022 OP(AtomicOr
, atomic_or
)
2023 OP(AtomicXor
, atomic_xor
)
2026 unreachable("Invalid shared atomic");
2031 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
2032 const uint32_t *w
, unsigned count
)
2034 struct vtn_access_chain
*chain
;
2035 nir_intrinsic_instr
*atomic
;
2038 case SpvOpAtomicLoad
:
2039 case SpvOpAtomicExchange
:
2040 case SpvOpAtomicCompareExchange
:
2041 case SpvOpAtomicCompareExchangeWeak
:
2042 case SpvOpAtomicIIncrement
:
2043 case SpvOpAtomicIDecrement
:
2044 case SpvOpAtomicIAdd
:
2045 case SpvOpAtomicISub
:
2046 case SpvOpAtomicSMin
:
2047 case SpvOpAtomicUMin
:
2048 case SpvOpAtomicSMax
:
2049 case SpvOpAtomicUMax
:
2050 case SpvOpAtomicAnd
:
2052 case SpvOpAtomicXor
:
2054 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
2057 case SpvOpAtomicStore
:
2059 vtn_value(b
, w
[1], vtn_value_type_access_chain
)->access_chain
;
2063 unreachable("Invalid SPIR-V atomic");
2067 SpvScope scope = w[4];
2068 SpvMemorySemanticsMask semantics = w[5];
2071 if (chain
->var
->mode
== vtn_variable_mode_workgroup
) {
2072 struct vtn_type
*type
= chain
->var
->type
;
2073 nir_deref_var
*deref
= vtn_access_chain_to_deref(b
, chain
);
2074 nir_intrinsic_op op
= get_shared_nir_atomic_op(opcode
);
2075 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2076 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2079 case SpvOpAtomicLoad
:
2080 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2083 case SpvOpAtomicStore
:
2084 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2085 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2086 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2089 case SpvOpAtomicExchange
:
2090 case SpvOpAtomicCompareExchange
:
2091 case SpvOpAtomicCompareExchangeWeak
:
2092 case SpvOpAtomicIIncrement
:
2093 case SpvOpAtomicIDecrement
:
2094 case SpvOpAtomicIAdd
:
2095 case SpvOpAtomicISub
:
2096 case SpvOpAtomicSMin
:
2097 case SpvOpAtomicUMin
:
2098 case SpvOpAtomicSMax
:
2099 case SpvOpAtomicUMax
:
2100 case SpvOpAtomicAnd
:
2102 case SpvOpAtomicXor
:
2103 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2107 unreachable("Invalid SPIR-V atomic");
2111 assert(chain
->var
->mode
== vtn_variable_mode_ssbo
);
2112 struct vtn_type
*type
;
2113 nir_ssa_def
*offset
, *index
;
2114 offset
= vtn_access_chain_to_offset(b
, chain
, &index
, &type
, NULL
, false);
2116 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(opcode
);
2118 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2121 case SpvOpAtomicLoad
:
2122 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2123 atomic
->src
[0] = nir_src_for_ssa(index
);
2124 atomic
->src
[1] = nir_src_for_ssa(offset
);
2127 case SpvOpAtomicStore
:
2128 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2129 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2130 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2131 atomic
->src
[1] = nir_src_for_ssa(index
);
2132 atomic
->src
[2] = nir_src_for_ssa(offset
);
2135 case SpvOpAtomicExchange
:
2136 case SpvOpAtomicCompareExchange
:
2137 case SpvOpAtomicCompareExchangeWeak
:
2138 case SpvOpAtomicIIncrement
:
2139 case SpvOpAtomicIDecrement
:
2140 case SpvOpAtomicIAdd
:
2141 case SpvOpAtomicISub
:
2142 case SpvOpAtomicSMin
:
2143 case SpvOpAtomicUMin
:
2144 case SpvOpAtomicSMax
:
2145 case SpvOpAtomicUMax
:
2146 case SpvOpAtomicAnd
:
2148 case SpvOpAtomicXor
:
2149 atomic
->src
[0] = nir_src_for_ssa(index
);
2150 atomic
->src
[1] = nir_src_for_ssa(offset
);
2151 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[2]);
2155 unreachable("Invalid SPIR-V atomic");
2159 if (opcode
!= SpvOpAtomicStore
) {
2160 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2162 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2163 glsl_get_vector_elements(type
->type
),
2164 glsl_get_bit_size(type
->type
), NULL
);
2166 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2167 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2168 val
->ssa
->def
= &atomic
->dest
.ssa
;
2169 val
->ssa
->type
= type
->type
;
2172 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2175 static nir_alu_instr
*
2176 create_vec(nir_shader
*shader
, unsigned num_components
, unsigned bit_size
)
2179 switch (num_components
) {
2180 case 1: op
= nir_op_fmov
; break;
2181 case 2: op
= nir_op_vec2
; break;
2182 case 3: op
= nir_op_vec3
; break;
2183 case 4: op
= nir_op_vec4
; break;
2184 default: unreachable("bad vector size");
2187 nir_alu_instr
*vec
= nir_alu_instr_create(shader
, op
);
2188 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2190 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2195 struct vtn_ssa_value
*
2196 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2198 if (src
->transposed
)
2199 return src
->transposed
;
2201 struct vtn_ssa_value
*dest
=
2202 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2204 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2205 nir_alu_instr
*vec
= create_vec(b
->shader
,
2206 glsl_get_matrix_columns(src
->type
),
2207 glsl_get_bit_size(src
->type
));
2208 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2209 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2210 vec
->src
[0].swizzle
[0] = i
;
2212 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2213 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2214 vec
->src
[j
].swizzle
[0] = i
;
2217 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2218 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2221 dest
->transposed
= src
;
2227 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2229 unsigned swiz
[4] = { index
};
2230 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2234 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2237 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
,
2240 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2242 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2244 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2245 vec
->src
[i
].swizzle
[0] = i
;
2249 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2251 return &vec
->dest
.dest
.ssa
;
2255 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2258 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2259 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2260 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2261 vtn_vector_extract(b
, src
, i
), dest
);
2267 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2268 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2270 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2271 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2272 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2273 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2278 static nir_ssa_def
*
2279 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2280 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2281 const uint32_t *indices
)
2283 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
, src0
->bit_size
);
2285 for (unsigned i
= 0; i
< num_components
; i
++) {
2286 uint32_t index
= indices
[i
];
2287 if (index
== 0xffffffff) {
2289 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2290 } else if (index
< src0
->num_components
) {
2291 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2292 vec
->src
[i
].swizzle
[0] = index
;
2294 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2295 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2299 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2301 return &vec
->dest
.dest
.ssa
;
2305 * Concatentates a number of vectors/scalars together to produce a vector
2307 static nir_ssa_def
*
2308 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2309 unsigned num_srcs
, nir_ssa_def
**srcs
)
2311 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
,
2314 unsigned dest_idx
= 0;
2315 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2316 nir_ssa_def
*src
= srcs
[i
];
2317 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2318 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2319 vec
->src
[dest_idx
].swizzle
[0] = j
;
2324 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2326 return &vec
->dest
.dest
.ssa
;
2329 static struct vtn_ssa_value
*
2330 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2332 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2333 dest
->type
= src
->type
;
2335 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2336 dest
->def
= src
->def
;
2338 unsigned elems
= glsl_get_length(src
->type
);
2340 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2341 for (unsigned i
= 0; i
< elems
; i
++)
2342 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2348 static struct vtn_ssa_value
*
2349 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2350 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2351 unsigned num_indices
)
2353 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2355 struct vtn_ssa_value
*cur
= dest
;
2357 for (i
= 0; i
< num_indices
- 1; i
++) {
2358 cur
= cur
->elems
[indices
[i
]];
2361 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2362 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2363 * the component granularity. In that case, the last index will be
2364 * the index to insert the scalar into the vector.
2367 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2369 cur
->elems
[indices
[i
]] = insert
;
2375 static struct vtn_ssa_value
*
2376 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2377 const uint32_t *indices
, unsigned num_indices
)
2379 struct vtn_ssa_value
*cur
= src
;
2380 for (unsigned i
= 0; i
< num_indices
; i
++) {
2381 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2382 assert(i
== num_indices
- 1);
2383 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2384 * the component granularity. The last index will be the index of the
2385 * vector to extract.
2388 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2389 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2390 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2393 cur
= cur
->elems
[indices
[i
]];
2401 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2402 const uint32_t *w
, unsigned count
)
2404 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2405 const struct glsl_type
*type
=
2406 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2407 val
->ssa
= vtn_create_ssa_value(b
, type
);
2410 case SpvOpVectorExtractDynamic
:
2411 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2412 vtn_ssa_value(b
, w
[4])->def
);
2415 case SpvOpVectorInsertDynamic
:
2416 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2417 vtn_ssa_value(b
, w
[4])->def
,
2418 vtn_ssa_value(b
, w
[5])->def
);
2421 case SpvOpVectorShuffle
:
2422 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2423 vtn_ssa_value(b
, w
[3])->def
,
2424 vtn_ssa_value(b
, w
[4])->def
,
2428 case SpvOpCompositeConstruct
: {
2429 unsigned elems
= count
- 3;
2430 if (glsl_type_is_vector_or_scalar(type
)) {
2431 nir_ssa_def
*srcs
[4];
2432 for (unsigned i
= 0; i
< elems
; i
++)
2433 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2435 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2438 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2439 for (unsigned i
= 0; i
< elems
; i
++)
2440 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2444 case SpvOpCompositeExtract
:
2445 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2449 case SpvOpCompositeInsert
:
2450 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2451 vtn_ssa_value(b
, w
[3]),
2455 case SpvOpCopyObject
:
2456 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2460 unreachable("unknown composite operation");
2465 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2466 const uint32_t *w
, unsigned count
)
2468 nir_intrinsic_op intrinsic_op
;
2470 case SpvOpEmitVertex
:
2471 case SpvOpEmitStreamVertex
:
2472 intrinsic_op
= nir_intrinsic_emit_vertex
;
2474 case SpvOpEndPrimitive
:
2475 case SpvOpEndStreamPrimitive
:
2476 intrinsic_op
= nir_intrinsic_end_primitive
;
2478 case SpvOpMemoryBarrier
:
2479 intrinsic_op
= nir_intrinsic_memory_barrier
;
2481 case SpvOpControlBarrier
:
2482 intrinsic_op
= nir_intrinsic_barrier
;
2485 unreachable("unknown barrier instruction");
2488 nir_intrinsic_instr
*intrin
=
2489 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2491 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2492 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2494 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2498 gl_primitive_from_spv_execution_mode(SpvExecutionMode mode
)
2501 case SpvExecutionModeInputPoints
:
2502 case SpvExecutionModeOutputPoints
:
2503 return 0; /* GL_POINTS */
2504 case SpvExecutionModeInputLines
:
2505 return 1; /* GL_LINES */
2506 case SpvExecutionModeInputLinesAdjacency
:
2507 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2508 case SpvExecutionModeTriangles
:
2509 return 4; /* GL_TRIANGLES */
2510 case SpvExecutionModeInputTrianglesAdjacency
:
2511 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2512 case SpvExecutionModeQuads
:
2513 return 7; /* GL_QUADS */
2514 case SpvExecutionModeIsolines
:
2515 return 0x8E7A; /* GL_ISOLINES */
2516 case SpvExecutionModeOutputLineStrip
:
2517 return 3; /* GL_LINE_STRIP */
2518 case SpvExecutionModeOutputTriangleStrip
:
2519 return 5; /* GL_TRIANGLE_STRIP */
2521 assert(!"Invalid primitive type");
2527 vertices_in_from_spv_execution_mode(SpvExecutionMode mode
)
2530 case SpvExecutionModeInputPoints
:
2532 case SpvExecutionModeInputLines
:
2534 case SpvExecutionModeInputLinesAdjacency
:
2536 case SpvExecutionModeTriangles
:
2538 case SpvExecutionModeInputTrianglesAdjacency
:
2541 assert(!"Invalid GS input mode");
2546 static gl_shader_stage
2547 stage_for_execution_model(SpvExecutionModel model
)
2550 case SpvExecutionModelVertex
:
2551 return MESA_SHADER_VERTEX
;
2552 case SpvExecutionModelTessellationControl
:
2553 return MESA_SHADER_TESS_CTRL
;
2554 case SpvExecutionModelTessellationEvaluation
:
2555 return MESA_SHADER_TESS_EVAL
;
2556 case SpvExecutionModelGeometry
:
2557 return MESA_SHADER_GEOMETRY
;
2558 case SpvExecutionModelFragment
:
2559 return MESA_SHADER_FRAGMENT
;
2560 case SpvExecutionModelGLCompute
:
2561 return MESA_SHADER_COMPUTE
;
2563 unreachable("Unsupported execution model");
2567 #define spv_check_supported(name, cap) do { \
2568 if (!(b->ext && b->ext->name)) \
2569 vtn_warn("Unsupported SPIR-V capability: %s", \
2570 spirv_capability_to_string(cap)); \
2574 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2575 const uint32_t *w
, unsigned count
)
2579 case SpvOpSourceExtension
:
2580 case SpvOpSourceContinued
:
2581 case SpvOpExtension
:
2582 /* Unhandled, but these are for debug so that's ok. */
2585 case SpvOpCapability
: {
2586 SpvCapability cap
= w
[1];
2588 case SpvCapabilityMatrix
:
2589 case SpvCapabilityShader
:
2590 case SpvCapabilityGeometry
:
2591 case SpvCapabilityGeometryPointSize
:
2592 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
2593 case SpvCapabilitySampledImageArrayDynamicIndexing
:
2594 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
2595 case SpvCapabilityStorageImageArrayDynamicIndexing
:
2596 case SpvCapabilityImageRect
:
2597 case SpvCapabilitySampledRect
:
2598 case SpvCapabilitySampled1D
:
2599 case SpvCapabilityImage1D
:
2600 case SpvCapabilitySampledCubeArray
:
2601 case SpvCapabilitySampledBuffer
:
2602 case SpvCapabilityImageBuffer
:
2603 case SpvCapabilityImageQuery
:
2604 case SpvCapabilityDerivativeControl
:
2605 case SpvCapabilityInterpolationFunction
:
2606 case SpvCapabilityMultiViewport
:
2607 case SpvCapabilitySampleRateShading
:
2608 case SpvCapabilityClipDistance
:
2609 case SpvCapabilityCullDistance
:
2610 case SpvCapabilityInputAttachment
:
2611 case SpvCapabilityImageGatherExtended
:
2612 case SpvCapabilityStorageImageExtendedFormats
:
2615 case SpvCapabilityGeometryStreams
:
2616 case SpvCapabilityLinkage
:
2617 case SpvCapabilityVector16
:
2618 case SpvCapabilityFloat16Buffer
:
2619 case SpvCapabilityFloat16
:
2620 case SpvCapabilityInt64
:
2621 case SpvCapabilityInt64Atomics
:
2622 case SpvCapabilityAtomicStorage
:
2623 case SpvCapabilityInt16
:
2624 case SpvCapabilityStorageImageMultisample
:
2625 case SpvCapabilityImageCubeArray
:
2626 case SpvCapabilityInt8
:
2627 case SpvCapabilitySparseResidency
:
2628 case SpvCapabilityMinLod
:
2629 case SpvCapabilityTransformFeedback
:
2630 case SpvCapabilityStorageImageReadWithoutFormat
:
2631 case SpvCapabilityStorageImageWriteWithoutFormat
:
2632 vtn_warn("Unsupported SPIR-V capability: %s",
2633 spirv_capability_to_string(cap
));
2636 case SpvCapabilityFloat64
:
2637 spv_check_supported(float64
, cap
);
2640 case SpvCapabilityAddresses
:
2641 case SpvCapabilityKernel
:
2642 case SpvCapabilityImageBasic
:
2643 case SpvCapabilityImageReadWrite
:
2644 case SpvCapabilityImageMipmap
:
2645 case SpvCapabilityPipes
:
2646 case SpvCapabilityGroups
:
2647 case SpvCapabilityDeviceEnqueue
:
2648 case SpvCapabilityLiteralSampler
:
2649 case SpvCapabilityGenericPointer
:
2650 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
2651 spirv_capability_to_string(cap
));
2654 case SpvCapabilityImageMSArray
:
2655 spv_check_supported(image_ms_array
, cap
);
2658 case SpvCapabilityTessellation
:
2659 case SpvCapabilityTessellationPointSize
:
2660 spv_check_supported(tessellation
, cap
);
2664 unreachable("Unhandled capability");
2669 case SpvOpExtInstImport
:
2670 vtn_handle_extension(b
, opcode
, w
, count
);
2673 case SpvOpMemoryModel
:
2674 assert(w
[1] == SpvAddressingModelLogical
);
2675 assert(w
[2] == SpvMemoryModelGLSL450
);
2678 case SpvOpEntryPoint
: {
2679 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
2680 /* Let this be a name label regardless */
2681 unsigned name_words
;
2682 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
2684 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
2685 stage_for_execution_model(w
[1]) != b
->entry_point_stage
)
2688 assert(b
->entry_point
== NULL
);
2689 b
->entry_point
= entry_point
;
2694 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2695 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2699 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2702 case SpvOpMemberName
:
2706 case SpvOpExecutionMode
:
2707 case SpvOpDecorationGroup
:
2709 case SpvOpMemberDecorate
:
2710 case SpvOpGroupDecorate
:
2711 case SpvOpGroupMemberDecorate
:
2712 vtn_handle_decoration(b
, opcode
, w
, count
);
2716 return false; /* End of preamble */
2723 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
2724 const struct vtn_decoration
*mode
, void *data
)
2726 assert(b
->entry_point
== entry_point
);
2728 switch(mode
->exec_mode
) {
2729 case SpvExecutionModeOriginUpperLeft
:
2730 case SpvExecutionModeOriginLowerLeft
:
2731 b
->origin_upper_left
=
2732 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
2735 case SpvExecutionModeEarlyFragmentTests
:
2736 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2737 b
->shader
->info
->fs
.early_fragment_tests
= true;
2740 case SpvExecutionModeInvocations
:
2741 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2742 b
->shader
->info
->gs
.invocations
= MAX2(1, mode
->literals
[0]);
2745 case SpvExecutionModeDepthReplacing
:
2746 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2747 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
2749 case SpvExecutionModeDepthGreater
:
2750 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2751 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
2753 case SpvExecutionModeDepthLess
:
2754 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2755 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
2757 case SpvExecutionModeDepthUnchanged
:
2758 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2759 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2762 case SpvExecutionModeLocalSize
:
2763 assert(b
->shader
->stage
== MESA_SHADER_COMPUTE
);
2764 b
->shader
->info
->cs
.local_size
[0] = mode
->literals
[0];
2765 b
->shader
->info
->cs
.local_size
[1] = mode
->literals
[1];
2766 b
->shader
->info
->cs
.local_size
[2] = mode
->literals
[2];
2768 case SpvExecutionModeLocalSizeHint
:
2769 break; /* Nothing to do with this */
2771 case SpvExecutionModeOutputVertices
:
2772 if (b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2773 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
) {
2774 b
->shader
->info
->tess
.tcs_vertices_out
= mode
->literals
[0];
2776 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2777 b
->shader
->info
->gs
.vertices_out
= mode
->literals
[0];
2781 case SpvExecutionModeInputPoints
:
2782 case SpvExecutionModeInputLines
:
2783 case SpvExecutionModeInputLinesAdjacency
:
2784 case SpvExecutionModeTriangles
:
2785 case SpvExecutionModeInputTrianglesAdjacency
:
2786 case SpvExecutionModeQuads
:
2787 case SpvExecutionModeIsolines
:
2788 if (b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2789 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
) {
2790 b
->shader
->info
->tess
.primitive_mode
=
2791 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2793 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2794 b
->shader
->info
->gs
.vertices_in
=
2795 vertices_in_from_spv_execution_mode(mode
->exec_mode
);
2799 case SpvExecutionModeOutputPoints
:
2800 case SpvExecutionModeOutputLineStrip
:
2801 case SpvExecutionModeOutputTriangleStrip
:
2802 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2803 b
->shader
->info
->gs
.output_primitive
=
2804 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2807 case SpvExecutionModeSpacingEqual
:
2808 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2809 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2810 b
->shader
->info
->tess
.spacing
= TESS_SPACING_EQUAL
;
2812 case SpvExecutionModeSpacingFractionalEven
:
2813 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2814 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2815 b
->shader
->info
->tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
2817 case SpvExecutionModeSpacingFractionalOdd
:
2818 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2819 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2820 b
->shader
->info
->tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
2822 case SpvExecutionModeVertexOrderCw
:
2823 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2824 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2825 /* Vulkan's notion of CCW seems to match the hardware backends,
2826 * but be the opposite of OpenGL. Currently NIR follows GL semantics,
2827 * so we set it backwards here.
2829 b
->shader
->info
->tess
.ccw
= true;
2831 case SpvExecutionModeVertexOrderCcw
:
2832 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2833 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2834 /* Backwards; see above */
2835 b
->shader
->info
->tess
.ccw
= false;
2837 case SpvExecutionModePointMode
:
2838 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2839 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2840 b
->shader
->info
->tess
.point_mode
= true;
2843 case SpvExecutionModePixelCenterInteger
:
2844 b
->pixel_center_integer
= true;
2847 case SpvExecutionModeXfb
:
2848 assert(!"Unhandled execution mode");
2851 case SpvExecutionModeVecTypeHint
:
2852 case SpvExecutionModeContractionOff
:
2856 unreachable("Unhandled execution mode");
2861 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2862 const uint32_t *w
, unsigned count
)
2866 case SpvOpSourceContinued
:
2867 case SpvOpSourceExtension
:
2868 case SpvOpExtension
:
2869 case SpvOpCapability
:
2870 case SpvOpExtInstImport
:
2871 case SpvOpMemoryModel
:
2872 case SpvOpEntryPoint
:
2873 case SpvOpExecutionMode
:
2876 case SpvOpMemberName
:
2877 case SpvOpDecorationGroup
:
2879 case SpvOpMemberDecorate
:
2880 case SpvOpGroupDecorate
:
2881 case SpvOpGroupMemberDecorate
:
2882 assert(!"Invalid opcode types and variables section");
2888 case SpvOpTypeFloat
:
2889 case SpvOpTypeVector
:
2890 case SpvOpTypeMatrix
:
2891 case SpvOpTypeImage
:
2892 case SpvOpTypeSampler
:
2893 case SpvOpTypeSampledImage
:
2894 case SpvOpTypeArray
:
2895 case SpvOpTypeRuntimeArray
:
2896 case SpvOpTypeStruct
:
2897 case SpvOpTypeOpaque
:
2898 case SpvOpTypePointer
:
2899 case SpvOpTypeFunction
:
2900 case SpvOpTypeEvent
:
2901 case SpvOpTypeDeviceEvent
:
2902 case SpvOpTypeReserveId
:
2903 case SpvOpTypeQueue
:
2905 vtn_handle_type(b
, opcode
, w
, count
);
2908 case SpvOpConstantTrue
:
2909 case SpvOpConstantFalse
:
2911 case SpvOpConstantComposite
:
2912 case SpvOpConstantSampler
:
2913 case SpvOpConstantNull
:
2914 case SpvOpSpecConstantTrue
:
2915 case SpvOpSpecConstantFalse
:
2916 case SpvOpSpecConstant
:
2917 case SpvOpSpecConstantComposite
:
2918 case SpvOpSpecConstantOp
:
2919 vtn_handle_constant(b
, opcode
, w
, count
);
2924 vtn_handle_variables(b
, opcode
, w
, count
);
2928 return false; /* End of preamble */
2935 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2936 const uint32_t *w
, unsigned count
)
2942 case SpvOpLoopMerge
:
2943 case SpvOpSelectionMerge
:
2944 /* This is handled by cfg pre-pass and walk_blocks */
2948 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2949 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2954 vtn_handle_extension(b
, opcode
, w
, count
);
2960 case SpvOpCopyMemory
:
2961 case SpvOpCopyMemorySized
:
2962 case SpvOpAccessChain
:
2963 case SpvOpInBoundsAccessChain
:
2964 case SpvOpArrayLength
:
2965 vtn_handle_variables(b
, opcode
, w
, count
);
2968 case SpvOpFunctionCall
:
2969 vtn_handle_function_call(b
, opcode
, w
, count
);
2972 case SpvOpSampledImage
:
2974 case SpvOpImageSampleImplicitLod
:
2975 case SpvOpImageSampleExplicitLod
:
2976 case SpvOpImageSampleDrefImplicitLod
:
2977 case SpvOpImageSampleDrefExplicitLod
:
2978 case SpvOpImageSampleProjImplicitLod
:
2979 case SpvOpImageSampleProjExplicitLod
:
2980 case SpvOpImageSampleProjDrefImplicitLod
:
2981 case SpvOpImageSampleProjDrefExplicitLod
:
2982 case SpvOpImageFetch
:
2983 case SpvOpImageGather
:
2984 case SpvOpImageDrefGather
:
2985 case SpvOpImageQuerySizeLod
:
2986 case SpvOpImageQueryLod
:
2987 case SpvOpImageQueryLevels
:
2988 case SpvOpImageQuerySamples
:
2989 vtn_handle_texture(b
, opcode
, w
, count
);
2992 case SpvOpImageRead
:
2993 case SpvOpImageWrite
:
2994 case SpvOpImageTexelPointer
:
2995 vtn_handle_image(b
, opcode
, w
, count
);
2998 case SpvOpImageQuerySize
: {
2999 struct vtn_access_chain
*image
=
3000 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
3001 if (glsl_type_is_image(image
->var
->var
->interface_type
)) {
3002 vtn_handle_image(b
, opcode
, w
, count
);
3004 vtn_handle_texture(b
, opcode
, w
, count
);
3009 case SpvOpAtomicLoad
:
3010 case SpvOpAtomicExchange
:
3011 case SpvOpAtomicCompareExchange
:
3012 case SpvOpAtomicCompareExchangeWeak
:
3013 case SpvOpAtomicIIncrement
:
3014 case SpvOpAtomicIDecrement
:
3015 case SpvOpAtomicIAdd
:
3016 case SpvOpAtomicISub
:
3017 case SpvOpAtomicSMin
:
3018 case SpvOpAtomicUMin
:
3019 case SpvOpAtomicSMax
:
3020 case SpvOpAtomicUMax
:
3021 case SpvOpAtomicAnd
:
3023 case SpvOpAtomicXor
: {
3024 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
3025 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3026 vtn_handle_image(b
, opcode
, w
, count
);
3028 assert(pointer
->value_type
== vtn_value_type_access_chain
);
3029 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3034 case SpvOpAtomicStore
: {
3035 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
3036 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3037 vtn_handle_image(b
, opcode
, w
, count
);
3039 assert(pointer
->value_type
== vtn_value_type_access_chain
);
3040 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3050 case SpvOpConvertFToU
:
3051 case SpvOpConvertFToS
:
3052 case SpvOpConvertSToF
:
3053 case SpvOpConvertUToF
:
3057 case SpvOpQuantizeToF16
:
3058 case SpvOpConvertPtrToU
:
3059 case SpvOpConvertUToPtr
:
3060 case SpvOpPtrCastToGeneric
:
3061 case SpvOpGenericCastToPtr
:
3067 case SpvOpSignBitSet
:
3068 case SpvOpLessOrGreater
:
3070 case SpvOpUnordered
:
3085 case SpvOpVectorTimesScalar
:
3087 case SpvOpIAddCarry
:
3088 case SpvOpISubBorrow
:
3089 case SpvOpUMulExtended
:
3090 case SpvOpSMulExtended
:
3091 case SpvOpShiftRightLogical
:
3092 case SpvOpShiftRightArithmetic
:
3093 case SpvOpShiftLeftLogical
:
3094 case SpvOpLogicalEqual
:
3095 case SpvOpLogicalNotEqual
:
3096 case SpvOpLogicalOr
:
3097 case SpvOpLogicalAnd
:
3098 case SpvOpLogicalNot
:
3099 case SpvOpBitwiseOr
:
3100 case SpvOpBitwiseXor
:
3101 case SpvOpBitwiseAnd
:
3104 case SpvOpFOrdEqual
:
3105 case SpvOpFUnordEqual
:
3106 case SpvOpINotEqual
:
3107 case SpvOpFOrdNotEqual
:
3108 case SpvOpFUnordNotEqual
:
3109 case SpvOpULessThan
:
3110 case SpvOpSLessThan
:
3111 case SpvOpFOrdLessThan
:
3112 case SpvOpFUnordLessThan
:
3113 case SpvOpUGreaterThan
:
3114 case SpvOpSGreaterThan
:
3115 case SpvOpFOrdGreaterThan
:
3116 case SpvOpFUnordGreaterThan
:
3117 case SpvOpULessThanEqual
:
3118 case SpvOpSLessThanEqual
:
3119 case SpvOpFOrdLessThanEqual
:
3120 case SpvOpFUnordLessThanEqual
:
3121 case SpvOpUGreaterThanEqual
:
3122 case SpvOpSGreaterThanEqual
:
3123 case SpvOpFOrdGreaterThanEqual
:
3124 case SpvOpFUnordGreaterThanEqual
:
3130 case SpvOpFwidthFine
:
3131 case SpvOpDPdxCoarse
:
3132 case SpvOpDPdyCoarse
:
3133 case SpvOpFwidthCoarse
:
3134 case SpvOpBitFieldInsert
:
3135 case SpvOpBitFieldSExtract
:
3136 case SpvOpBitFieldUExtract
:
3137 case SpvOpBitReverse
:
3139 case SpvOpTranspose
:
3140 case SpvOpOuterProduct
:
3141 case SpvOpMatrixTimesScalar
:
3142 case SpvOpVectorTimesMatrix
:
3143 case SpvOpMatrixTimesVector
:
3144 case SpvOpMatrixTimesMatrix
:
3145 vtn_handle_alu(b
, opcode
, w
, count
);
3148 case SpvOpVectorExtractDynamic
:
3149 case SpvOpVectorInsertDynamic
:
3150 case SpvOpVectorShuffle
:
3151 case SpvOpCompositeConstruct
:
3152 case SpvOpCompositeExtract
:
3153 case SpvOpCompositeInsert
:
3154 case SpvOpCopyObject
:
3155 vtn_handle_composite(b
, opcode
, w
, count
);
3158 case SpvOpEmitVertex
:
3159 case SpvOpEndPrimitive
:
3160 case SpvOpEmitStreamVertex
:
3161 case SpvOpEndStreamPrimitive
:
3162 case SpvOpControlBarrier
:
3163 case SpvOpMemoryBarrier
:
3164 vtn_handle_barrier(b
, opcode
, w
, count
);
3168 unreachable("Unhandled opcode");
3175 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3176 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3177 gl_shader_stage stage
, const char *entry_point_name
,
3178 const struct nir_spirv_supported_extensions
*ext
,
3179 const nir_shader_compiler_options
*options
)
3181 const uint32_t *word_end
= words
+ word_count
;
3183 /* Handle the SPIR-V header (first 4 dwords) */
3184 assert(word_count
> 5);
3186 assert(words
[0] == SpvMagicNumber
);
3187 assert(words
[1] >= 0x10000);
3188 /* words[2] == generator magic */
3189 unsigned value_id_bound
= words
[3];
3190 assert(words
[4] == 0);
3194 /* Initialize the stn_builder object */
3195 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3196 b
->value_id_bound
= value_id_bound
;
3197 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3198 exec_list_make_empty(&b
->functions
);
3199 b
->entry_point_stage
= stage
;
3200 b
->entry_point_name
= entry_point_name
;
3203 /* Handle all the preamble instructions */
3204 words
= vtn_foreach_instruction(b
, words
, word_end
,
3205 vtn_handle_preamble_instruction
);
3207 if (b
->entry_point
== NULL
) {
3208 assert(!"Entry point not found");
3213 b
->shader
= nir_shader_create(NULL
, stage
, options
, NULL
);
3215 /* Set shader info defaults */
3216 b
->shader
->info
->gs
.invocations
= 1;
3218 /* Parse execution modes */
3219 vtn_foreach_execution_mode(b
, b
->entry_point
,
3220 vtn_handle_execution_mode
, NULL
);
3222 b
->specializations
= spec
;
3223 b
->num_specializations
= num_spec
;
3225 /* Handle all variable, type, and constant instructions */
3226 words
= vtn_foreach_instruction(b
, words
, word_end
,
3227 vtn_handle_variable_or_type_instruction
);
3229 vtn_build_cfg(b
, words
, word_end
);
3231 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3232 b
->impl
= func
->impl
;
3233 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3234 _mesa_key_pointer_equal
);
3236 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3239 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3240 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3241 assert(entry_point
);