2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
34 struct spec_constant_value
{
43 _vtn_warn(const char *file
, int line
, const char *msg
, ...)
49 formatted
= ralloc_vasprintf(NULL
, msg
, args
);
52 fprintf(stderr
, "%s:%d WARNING: %s\n", file
, line
, formatted
);
54 ralloc_free(formatted
);
57 static struct vtn_ssa_value
*
58 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
60 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
63 if (glsl_type_is_vector_or_scalar(type
)) {
64 unsigned num_components
= glsl_get_vector_elements(val
->type
);
65 unsigned bit_size
= glsl_get_bit_size(val
->type
);
66 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
68 unsigned elems
= glsl_get_length(val
->type
);
69 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
70 if (glsl_type_is_matrix(type
)) {
71 const struct glsl_type
*elem_type
=
72 glsl_vector_type(glsl_get_base_type(type
),
73 glsl_get_vector_elements(type
));
75 for (unsigned i
= 0; i
< elems
; i
++)
76 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
77 } else if (glsl_type_is_array(type
)) {
78 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
79 for (unsigned i
= 0; i
< elems
; i
++)
80 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
82 for (unsigned i
= 0; i
< elems
; i
++) {
83 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
84 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
92 static struct vtn_ssa_value
*
93 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
94 const struct glsl_type
*type
)
96 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
101 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
104 switch (glsl_get_base_type(type
)) {
107 case GLSL_TYPE_INT64
:
108 case GLSL_TYPE_UINT64
:
110 case GLSL_TYPE_FLOAT
:
111 case GLSL_TYPE_DOUBLE
: {
112 int bit_size
= glsl_get_bit_size(type
);
113 if (glsl_type_is_vector_or_scalar(type
)) {
114 unsigned num_components
= glsl_get_vector_elements(val
->type
);
115 nir_load_const_instr
*load
=
116 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
118 load
->value
= constant
->values
[0];
120 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
121 val
->def
= &load
->def
;
123 assert(glsl_type_is_matrix(type
));
124 unsigned rows
= glsl_get_vector_elements(val
->type
);
125 unsigned columns
= glsl_get_matrix_columns(val
->type
);
126 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
128 for (unsigned i
= 0; i
< columns
; i
++) {
129 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
130 col_val
->type
= glsl_get_column_type(val
->type
);
131 nir_load_const_instr
*load
=
132 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
134 load
->value
= constant
->values
[i
];
136 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
137 col_val
->def
= &load
->def
;
139 val
->elems
[i
] = col_val
;
145 case GLSL_TYPE_ARRAY
: {
146 unsigned elems
= glsl_get_length(val
->type
);
147 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
148 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
149 for (unsigned i
= 0; i
< elems
; i
++)
150 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
155 case GLSL_TYPE_STRUCT
: {
156 unsigned elems
= glsl_get_length(val
->type
);
157 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
158 for (unsigned i
= 0; i
< elems
; i
++) {
159 const struct glsl_type
*elem_type
=
160 glsl_get_struct_field(val
->type
, i
);
161 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
168 unreachable("bad constant type");
174 struct vtn_ssa_value
*
175 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
177 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
178 switch (val
->value_type
) {
179 case vtn_value_type_undef
:
180 return vtn_undef_ssa_value(b
, val
->type
->type
);
182 case vtn_value_type_constant
:
183 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
185 case vtn_value_type_ssa
:
188 case vtn_value_type_access_chain
:
189 /* This is needed for function parameters */
190 return vtn_variable_load(b
, val
->access_chain
);
193 unreachable("Invalid type for an SSA value");
198 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
199 unsigned word_count
, unsigned *words_used
)
201 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
203 /* Ammount of space taken by the string (including the null) */
204 unsigned len
= strlen(dup
) + 1;
205 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
211 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
212 const uint32_t *end
, vtn_instruction_handler handler
)
218 const uint32_t *w
= start
;
220 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
221 unsigned count
= w
[0] >> SpvWordCountShift
;
222 assert(count
>= 1 && w
+ count
<= end
);
226 break; /* Do nothing */
229 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
241 if (!handler(b
, opcode
, w
, count
))
253 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
254 const uint32_t *w
, unsigned count
)
257 case SpvOpExtInstImport
: {
258 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
259 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
260 val
->ext_handler
= vtn_handle_glsl450_instruction
;
262 assert(!"Unsupported extension");
268 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
269 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
276 unreachable("Unhandled opcode");
281 _foreach_decoration_helper(struct vtn_builder
*b
,
282 struct vtn_value
*base_value
,
284 struct vtn_value
*value
,
285 vtn_decoration_foreach_cb cb
, void *data
)
287 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
289 if (dec
->scope
== VTN_DEC_DECORATION
) {
290 member
= parent_member
;
291 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
292 assert(parent_member
== -1);
293 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
295 /* Not a decoration */
300 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
301 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
304 cb(b
, base_value
, member
, dec
, data
);
309 /** Iterates (recursively if needed) over all of the decorations on a value
311 * This function iterates over all of the decorations applied to a given
312 * value. If it encounters a decoration group, it recurses into the group
313 * and iterates over all of those decorations as well.
316 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
317 vtn_decoration_foreach_cb cb
, void *data
)
319 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
323 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
324 vtn_execution_mode_foreach_cb cb
, void *data
)
326 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
327 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
330 assert(dec
->group
== NULL
);
331 cb(b
, value
, dec
, data
);
336 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
337 const uint32_t *w
, unsigned count
)
339 const uint32_t *w_end
= w
+ count
;
340 const uint32_t target
= w
[1];
344 case SpvOpDecorationGroup
:
345 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
349 case SpvOpMemberDecorate
:
350 case SpvOpExecutionMode
: {
351 struct vtn_value
*val
= &b
->values
[target
];
353 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
356 dec
->scope
= VTN_DEC_DECORATION
;
358 case SpvOpMemberDecorate
:
359 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
361 case SpvOpExecutionMode
:
362 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
365 unreachable("Invalid decoration opcode");
367 dec
->decoration
= *(w
++);
370 /* Link into the list */
371 dec
->next
= val
->decoration
;
372 val
->decoration
= dec
;
376 case SpvOpGroupMemberDecorate
:
377 case SpvOpGroupDecorate
: {
378 struct vtn_value
*group
=
379 vtn_value(b
, target
, vtn_value_type_decoration_group
);
381 for (; w
< w_end
; w
++) {
382 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
383 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
386 if (opcode
== SpvOpGroupDecorate
) {
387 dec
->scope
= VTN_DEC_DECORATION
;
389 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
392 /* Link into the list */
393 dec
->next
= val
->decoration
;
394 val
->decoration
= dec
;
400 unreachable("Unhandled opcode");
404 struct member_decoration_ctx
{
406 struct glsl_struct_field
*fields
;
407 struct vtn_type
*type
;
410 /* does a shallow copy of a vtn_type */
412 static struct vtn_type
*
413 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
415 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
416 dest
->type
= src
->type
;
417 dest
->is_builtin
= src
->is_builtin
;
419 dest
->builtin
= src
->builtin
;
421 if (!glsl_type_is_scalar(src
->type
)) {
422 switch (glsl_get_base_type(src
->type
)) {
425 case GLSL_TYPE_INT64
:
426 case GLSL_TYPE_UINT64
:
428 case GLSL_TYPE_FLOAT
:
429 case GLSL_TYPE_DOUBLE
:
430 case GLSL_TYPE_ARRAY
:
431 dest
->row_major
= src
->row_major
;
432 dest
->stride
= src
->stride
;
433 dest
->array_element
= src
->array_element
;
436 case GLSL_TYPE_STRUCT
: {
437 unsigned elems
= glsl_get_length(src
->type
);
439 dest
->members
= ralloc_array(b
, struct vtn_type
*, elems
);
440 memcpy(dest
->members
, src
->members
, elems
* sizeof(struct vtn_type
*));
442 dest
->offsets
= ralloc_array(b
, unsigned, elems
);
443 memcpy(dest
->offsets
, src
->offsets
, elems
* sizeof(unsigned));
448 unreachable("unhandled type");
455 static struct vtn_type
*
456 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
458 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
459 type
= type
->members
[member
];
461 /* We may have an array of matrices.... Oh, joy! */
462 while (glsl_type_is_array(type
->type
)) {
463 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
464 type
= type
->array_element
;
467 assert(glsl_type_is_matrix(type
->type
));
473 struct_member_decoration_cb(struct vtn_builder
*b
,
474 struct vtn_value
*val
, int member
,
475 const struct vtn_decoration
*dec
, void *void_ctx
)
477 struct member_decoration_ctx
*ctx
= void_ctx
;
482 assert(member
< ctx
->num_fields
);
484 switch (dec
->decoration
) {
485 case SpvDecorationNonWritable
:
486 case SpvDecorationNonReadable
:
487 case SpvDecorationRelaxedPrecision
:
488 case SpvDecorationVolatile
:
489 case SpvDecorationCoherent
:
490 case SpvDecorationUniform
:
491 break; /* FIXME: Do nothing with this for now. */
492 case SpvDecorationNoPerspective
:
493 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
495 case SpvDecorationFlat
:
496 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
498 case SpvDecorationCentroid
:
499 ctx
->fields
[member
].centroid
= true;
501 case SpvDecorationSample
:
502 ctx
->fields
[member
].sample
= true;
504 case SpvDecorationStream
:
505 /* Vulkan only allows one GS stream */
506 assert(dec
->literals
[0] == 0);
508 case SpvDecorationLocation
:
509 ctx
->fields
[member
].location
= dec
->literals
[0];
511 case SpvDecorationComponent
:
512 break; /* FIXME: What should we do with these? */
513 case SpvDecorationBuiltIn
:
514 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
515 ctx
->type
->members
[member
]->is_builtin
= true;
516 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
517 ctx
->type
->builtin_block
= true;
519 case SpvDecorationOffset
:
520 ctx
->type
->offsets
[member
] = dec
->literals
[0];
522 case SpvDecorationMatrixStride
:
523 mutable_matrix_member(b
, ctx
->type
, member
)->stride
= dec
->literals
[0];
525 case SpvDecorationColMajor
:
526 break; /* Nothing to do here. Column-major is the default. */
527 case SpvDecorationRowMajor
:
528 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
531 case SpvDecorationPatch
:
534 case SpvDecorationSpecId
:
535 case SpvDecorationBlock
:
536 case SpvDecorationBufferBlock
:
537 case SpvDecorationArrayStride
:
538 case SpvDecorationGLSLShared
:
539 case SpvDecorationGLSLPacked
:
540 case SpvDecorationInvariant
:
541 case SpvDecorationRestrict
:
542 case SpvDecorationAliased
:
543 case SpvDecorationConstant
:
544 case SpvDecorationIndex
:
545 case SpvDecorationBinding
:
546 case SpvDecorationDescriptorSet
:
547 case SpvDecorationLinkageAttributes
:
548 case SpvDecorationNoContraction
:
549 case SpvDecorationInputAttachmentIndex
:
550 vtn_warn("Decoration not allowed on struct members: %s",
551 spirv_decoration_to_string(dec
->decoration
));
554 case SpvDecorationXfbBuffer
:
555 case SpvDecorationXfbStride
:
556 vtn_warn("Vulkan does not have transform feedback");
559 case SpvDecorationCPacked
:
560 case SpvDecorationSaturatedConversion
:
561 case SpvDecorationFuncParamAttr
:
562 case SpvDecorationFPRoundingMode
:
563 case SpvDecorationFPFastMathMode
:
564 case SpvDecorationAlignment
:
565 vtn_warn("Decoration only allowed for CL-style kernels: %s",
566 spirv_decoration_to_string(dec
->decoration
));
570 unreachable("Unhandled decoration");
575 type_decoration_cb(struct vtn_builder
*b
,
576 struct vtn_value
*val
, int member
,
577 const struct vtn_decoration
*dec
, void *ctx
)
579 struct vtn_type
*type
= val
->type
;
584 switch (dec
->decoration
) {
585 case SpvDecorationArrayStride
:
586 type
->stride
= dec
->literals
[0];
588 case SpvDecorationBlock
:
591 case SpvDecorationBufferBlock
:
592 type
->buffer_block
= true;
594 case SpvDecorationGLSLShared
:
595 case SpvDecorationGLSLPacked
:
596 /* Ignore these, since we get explicit offsets anyways */
599 case SpvDecorationRowMajor
:
600 case SpvDecorationColMajor
:
601 case SpvDecorationMatrixStride
:
602 case SpvDecorationBuiltIn
:
603 case SpvDecorationNoPerspective
:
604 case SpvDecorationFlat
:
605 case SpvDecorationPatch
:
606 case SpvDecorationCentroid
:
607 case SpvDecorationSample
:
608 case SpvDecorationVolatile
:
609 case SpvDecorationCoherent
:
610 case SpvDecorationNonWritable
:
611 case SpvDecorationNonReadable
:
612 case SpvDecorationUniform
:
613 case SpvDecorationStream
:
614 case SpvDecorationLocation
:
615 case SpvDecorationComponent
:
616 case SpvDecorationOffset
:
617 case SpvDecorationXfbBuffer
:
618 case SpvDecorationXfbStride
:
619 vtn_warn("Decoration only allowed for struct members: %s",
620 spirv_decoration_to_string(dec
->decoration
));
623 case SpvDecorationRelaxedPrecision
:
624 case SpvDecorationSpecId
:
625 case SpvDecorationInvariant
:
626 case SpvDecorationRestrict
:
627 case SpvDecorationAliased
:
628 case SpvDecorationConstant
:
629 case SpvDecorationIndex
:
630 case SpvDecorationBinding
:
631 case SpvDecorationDescriptorSet
:
632 case SpvDecorationLinkageAttributes
:
633 case SpvDecorationNoContraction
:
634 case SpvDecorationInputAttachmentIndex
:
635 vtn_warn("Decoration not allowed on types: %s",
636 spirv_decoration_to_string(dec
->decoration
));
639 case SpvDecorationCPacked
:
640 case SpvDecorationSaturatedConversion
:
641 case SpvDecorationFuncParamAttr
:
642 case SpvDecorationFPRoundingMode
:
643 case SpvDecorationFPFastMathMode
:
644 case SpvDecorationAlignment
:
645 vtn_warn("Decoration only allowed for CL-style kernels: %s",
646 spirv_decoration_to_string(dec
->decoration
));
650 unreachable("Unhandled decoration");
655 translate_image_format(SpvImageFormat format
)
658 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
659 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
660 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
661 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
662 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
663 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
664 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
665 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
666 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
667 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
668 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
669 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
670 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
671 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
672 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
673 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
674 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
675 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
676 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
677 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
678 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
679 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
680 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
681 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
682 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
683 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
684 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
685 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
686 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
687 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
688 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
689 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
690 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
691 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
692 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
693 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
694 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
695 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
696 case SpvImageFormatR16ui
: return 0x823A; /* GL_RG16UI */
697 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
699 assert(!"Invalid image format");
705 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
706 const uint32_t *w
, unsigned count
)
708 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
710 val
->type
= rzalloc(b
, struct vtn_type
);
711 val
->type
->is_builtin
= false;
712 val
->type
->val
= val
;
716 val
->type
->type
= glsl_void_type();
719 val
->type
->type
= glsl_bool_type();
723 const bool signedness
= w
[3];
725 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
727 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
730 case SpvOpTypeFloat
: {
732 val
->type
->type
= bit_size
== 64 ? glsl_double_type() : glsl_float_type();
736 case SpvOpTypeVector
: {
737 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
738 unsigned elems
= w
[3];
740 assert(glsl_type_is_scalar(base
->type
));
741 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
743 /* Vectors implicitly have sizeof(base_type) stride. For now, this
744 * is always 4 bytes. This will have to change if we want to start
745 * supporting doubles or half-floats.
747 val
->type
->stride
= 4;
748 val
->type
->array_element
= base
;
752 case SpvOpTypeMatrix
: {
753 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
754 unsigned columns
= w
[3];
756 assert(glsl_type_is_vector(base
->type
));
757 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
758 glsl_get_vector_elements(base
->type
),
760 assert(!glsl_type_is_error(val
->type
->type
));
761 val
->type
->array_element
= base
;
762 val
->type
->row_major
= false;
763 val
->type
->stride
= 0;
767 case SpvOpTypeRuntimeArray
:
768 case SpvOpTypeArray
: {
769 struct vtn_type
*array_element
=
770 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
773 if (opcode
== SpvOpTypeRuntimeArray
) {
774 /* A length of 0 is used to denote unsized arrays */
778 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
781 val
->type
->type
= glsl_array_type(array_element
->type
, length
);
782 val
->type
->array_element
= array_element
;
783 val
->type
->stride
= 0;
787 case SpvOpTypeStruct
: {
788 unsigned num_fields
= count
- 2;
789 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
790 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
792 NIR_VLA(struct glsl_struct_field
, fields
, count
);
793 for (unsigned i
= 0; i
< num_fields
; i
++) {
794 val
->type
->members
[i
] =
795 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
796 fields
[i
] = (struct glsl_struct_field
) {
797 .type
= val
->type
->members
[i
]->type
,
798 .name
= ralloc_asprintf(b
, "field%d", i
),
803 struct member_decoration_ctx ctx
= {
804 .num_fields
= num_fields
,
809 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
811 const char *name
= val
->name
? val
->name
: "struct";
813 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
817 case SpvOpTypeFunction
: {
818 const struct glsl_type
*return_type
=
819 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
820 NIR_VLA(struct glsl_function_param
, params
, count
- 3);
821 for (unsigned i
= 0; i
< count
- 3; i
++) {
822 params
[i
].type
= vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
->type
;
826 params
[i
].out
= true;
828 val
->type
->type
= glsl_function_type(return_type
, params
, count
- 3);
832 case SpvOpTypePointer
:
833 /* FIXME: For now, we'll just do the really lame thing and return
834 * the same type. The validator should ensure that the proper number
835 * of dereferences happen
837 val
->type
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
840 case SpvOpTypeImage
: {
841 const struct glsl_type
*sampled_type
=
842 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
844 assert(glsl_type_is_vector_or_scalar(sampled_type
));
846 enum glsl_sampler_dim dim
;
847 switch ((SpvDim
)w
[3]) {
848 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
849 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
850 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
851 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
852 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
853 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
854 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
856 unreachable("Invalid SPIR-V Sampler dimension");
859 bool is_shadow
= w
[4];
860 bool is_array
= w
[5];
861 bool multisampled
= w
[6];
862 unsigned sampled
= w
[7];
863 SpvImageFormat format
= w
[8];
866 val
->type
->access_qualifier
= w
[9];
868 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
871 if (dim
== GLSL_SAMPLER_DIM_2D
)
872 dim
= GLSL_SAMPLER_DIM_MS
;
873 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
874 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
876 assert(!"Unsupported multisampled image type");
879 val
->type
->image_format
= translate_image_format(format
);
882 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
883 glsl_get_base_type(sampled_type
));
884 } else if (sampled
== 2) {
886 val
->type
->type
= glsl_image_type(dim
, is_array
,
887 glsl_get_base_type(sampled_type
));
889 assert(!"We need to know if the image will be sampled");
894 case SpvOpTypeSampledImage
:
895 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
898 case SpvOpTypeSampler
:
899 /* The actual sampler type here doesn't really matter. It gets
900 * thrown away the moment you combine it with an image. What really
901 * matters is that it's a sampler type as opposed to an integer type
902 * so the backend knows what to do.
904 val
->type
->type
= glsl_bare_sampler_type();
907 case SpvOpTypeOpaque
:
909 case SpvOpTypeDeviceEvent
:
910 case SpvOpTypeReserveId
:
914 unreachable("Unhandled opcode");
917 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
920 static nir_constant
*
921 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
923 nir_constant
*c
= rzalloc(b
, nir_constant
);
925 switch (glsl_get_base_type(type
)) {
928 case GLSL_TYPE_INT64
:
929 case GLSL_TYPE_UINT64
:
931 case GLSL_TYPE_FLOAT
:
932 case GLSL_TYPE_DOUBLE
:
933 /* Nothing to do here. It's already initialized to zero */
936 case GLSL_TYPE_ARRAY
:
937 assert(glsl_get_length(type
) > 0);
938 c
->num_elements
= glsl_get_length(type
);
939 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
941 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
942 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
943 c
->elements
[i
] = c
->elements
[0];
946 case GLSL_TYPE_STRUCT
:
947 c
->num_elements
= glsl_get_length(type
);
948 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
950 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
951 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
956 unreachable("Invalid type for null constant");
963 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
964 int member
, const struct vtn_decoration
*dec
,
967 assert(member
== -1);
968 if (dec
->decoration
!= SpvDecorationSpecId
)
971 struct spec_constant_value
*const_value
= data
;
973 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
974 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
975 if (const_value
->is_double
)
976 const_value
->data64
= b
->specializations
[i
].data64
;
978 const_value
->data32
= b
->specializations
[i
].data32
;
985 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
986 uint32_t const_value
)
988 struct spec_constant_value data
;
989 data
.is_double
= false;
990 data
.data32
= const_value
;
991 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
996 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
997 uint64_t const_value
)
999 struct spec_constant_value data
;
1000 data
.is_double
= true;
1001 data
.data64
= const_value
;
1002 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1007 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1008 struct vtn_value
*val
,
1010 const struct vtn_decoration
*dec
,
1013 assert(member
== -1);
1014 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1015 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
1018 assert(val
->const_type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1020 b
->shader
->info
->cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
1021 b
->shader
->info
->cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
1022 b
->shader
->info
->cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
1026 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1027 const uint32_t *w
, unsigned count
)
1029 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1030 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
1031 val
->constant
= rzalloc(b
, nir_constant
);
1033 case SpvOpConstantTrue
:
1034 assert(val
->const_type
== glsl_bool_type());
1035 val
->constant
->values
[0].u32
[0] = NIR_TRUE
;
1037 case SpvOpConstantFalse
:
1038 assert(val
->const_type
== glsl_bool_type());
1039 val
->constant
->values
[0].u32
[0] = NIR_FALSE
;
1042 case SpvOpSpecConstantTrue
:
1043 case SpvOpSpecConstantFalse
: {
1044 assert(val
->const_type
== glsl_bool_type());
1046 get_specialization(b
, val
, (opcode
== SpvOpSpecConstantTrue
));
1047 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1051 case SpvOpConstant
: {
1052 assert(glsl_type_is_scalar(val
->const_type
));
1053 int bit_size
= glsl_get_bit_size(val
->const_type
);
1054 if (bit_size
== 64) {
1055 val
->constant
->values
->u32
[0] = w
[3];
1056 val
->constant
->values
->u32
[1] = w
[4];
1058 assert(bit_size
== 32);
1059 val
->constant
->values
->u32
[0] = w
[3];
1063 case SpvOpSpecConstant
: {
1064 assert(glsl_type_is_scalar(val
->const_type
));
1065 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1066 int bit_size
= glsl_get_bit_size(val
->const_type
);
1068 val
->constant
->values
[0].u64
[0] =
1069 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1071 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1074 case SpvOpSpecConstantComposite
:
1075 case SpvOpConstantComposite
: {
1076 unsigned elem_count
= count
- 3;
1077 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1078 for (unsigned i
= 0; i
< elem_count
; i
++)
1079 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1081 switch (glsl_get_base_type(val
->const_type
)) {
1082 case GLSL_TYPE_UINT
:
1084 case GLSL_TYPE_UINT64
:
1085 case GLSL_TYPE_INT64
:
1086 case GLSL_TYPE_FLOAT
:
1087 case GLSL_TYPE_BOOL
:
1088 case GLSL_TYPE_DOUBLE
: {
1089 int bit_size
= glsl_get_bit_size(val
->const_type
);
1090 if (glsl_type_is_matrix(val
->const_type
)) {
1091 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
1092 for (unsigned i
= 0; i
< elem_count
; i
++)
1093 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1095 assert(glsl_type_is_vector(val
->const_type
));
1096 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
1097 for (unsigned i
= 0; i
< elem_count
; i
++) {
1098 if (bit_size
== 64) {
1099 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1101 assert(bit_size
== 32);
1102 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1109 case GLSL_TYPE_STRUCT
:
1110 case GLSL_TYPE_ARRAY
:
1111 ralloc_steal(val
->constant
, elems
);
1112 val
->constant
->num_elements
= elem_count
;
1113 val
->constant
->elements
= elems
;
1117 unreachable("Unsupported type for constants");
1122 case SpvOpSpecConstantOp
: {
1123 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1125 case SpvOpVectorShuffle
: {
1126 struct vtn_value
*v0
= &b
->values
[w
[4]];
1127 struct vtn_value
*v1
= &b
->values
[w
[5]];
1129 assert(v0
->value_type
== vtn_value_type_constant
||
1130 v0
->value_type
== vtn_value_type_undef
);
1131 assert(v1
->value_type
== vtn_value_type_constant
||
1132 v1
->value_type
== vtn_value_type_undef
);
1134 unsigned len0
= v0
->value_type
== vtn_value_type_constant
?
1135 glsl_get_vector_elements(v0
->const_type
) :
1136 glsl_get_vector_elements(v0
->type
->type
);
1137 unsigned len1
= v1
->value_type
== vtn_value_type_constant
?
1138 glsl_get_vector_elements(v1
->const_type
) :
1139 glsl_get_vector_elements(v1
->type
->type
);
1141 assert(len0
+ len1
< 16);
1143 unsigned bit_size
= glsl_get_bit_size(val
->const_type
);
1144 unsigned bit_size0
= v0
->value_type
== vtn_value_type_constant
?
1145 glsl_get_bit_size(v0
->const_type
) :
1146 glsl_get_bit_size(v0
->type
->type
);
1147 unsigned bit_size1
= v1
->value_type
== vtn_value_type_constant
?
1148 glsl_get_bit_size(v1
->const_type
) :
1149 glsl_get_bit_size(v1
->type
->type
);
1151 assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1152 (void)bit_size0
; (void)bit_size1
;
1154 if (bit_size
== 64) {
1156 if (v0
->value_type
== vtn_value_type_constant
) {
1157 for (unsigned i
= 0; i
< len0
; i
++)
1158 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1160 if (v1
->value_type
== vtn_value_type_constant
) {
1161 for (unsigned i
= 0; i
< len1
; i
++)
1162 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1165 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1166 uint32_t comp
= w
[i
+ 6];
1167 /* If component is not used, set the value to a known constant
1168 * to detect if it is wrongly used.
1170 if (comp
== (uint32_t)-1)
1171 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1173 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1177 if (v0
->value_type
== vtn_value_type_constant
) {
1178 for (unsigned i
= 0; i
< len0
; i
++)
1179 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1181 if (v1
->value_type
== vtn_value_type_constant
) {
1182 for (unsigned i
= 0; i
< len1
; i
++)
1183 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1186 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1187 uint32_t comp
= w
[i
+ 6];
1188 /* If component is not used, set the value to a known constant
1189 * to detect if it is wrongly used.
1191 if (comp
== (uint32_t)-1)
1192 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1194 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1200 case SpvOpCompositeExtract
:
1201 case SpvOpCompositeInsert
: {
1202 struct vtn_value
*comp
;
1203 unsigned deref_start
;
1204 struct nir_constant
**c
;
1205 if (opcode
== SpvOpCompositeExtract
) {
1206 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1208 c
= &comp
->constant
;
1210 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1212 val
->constant
= nir_constant_clone(comp
->constant
,
1219 const struct glsl_type
*type
= comp
->const_type
;
1220 for (unsigned i
= deref_start
; i
< count
; i
++) {
1221 switch (glsl_get_base_type(type
)) {
1222 case GLSL_TYPE_UINT
:
1224 case GLSL_TYPE_UINT64
:
1225 case GLSL_TYPE_INT64
:
1226 case GLSL_TYPE_FLOAT
:
1227 case GLSL_TYPE_DOUBLE
:
1228 case GLSL_TYPE_BOOL
:
1229 /* If we hit this granularity, we're picking off an element */
1230 if (glsl_type_is_matrix(type
)) {
1231 assert(col
== 0 && elem
== -1);
1234 type
= glsl_get_column_type(type
);
1236 assert(elem
<= 0 && glsl_type_is_vector(type
));
1238 type
= glsl_scalar_type(glsl_get_base_type(type
));
1242 case GLSL_TYPE_ARRAY
:
1243 c
= &(*c
)->elements
[w
[i
]];
1244 type
= glsl_get_array_element(type
);
1247 case GLSL_TYPE_STRUCT
:
1248 c
= &(*c
)->elements
[w
[i
]];
1249 type
= glsl_get_struct_field(type
, w
[i
]);
1253 unreachable("Invalid constant type");
1257 if (opcode
== SpvOpCompositeExtract
) {
1261 unsigned num_components
= glsl_get_vector_elements(type
);
1262 unsigned bit_size
= glsl_get_bit_size(type
);
1263 for (unsigned i
= 0; i
< num_components
; i
++)
1264 if (bit_size
== 64) {
1265 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1267 assert(bit_size
== 32);
1268 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1272 struct vtn_value
*insert
=
1273 vtn_value(b
, w
[4], vtn_value_type_constant
);
1274 assert(insert
->const_type
== type
);
1276 *c
= insert
->constant
;
1278 unsigned num_components
= glsl_get_vector_elements(type
);
1279 unsigned bit_size
= glsl_get_bit_size(type
);
1280 for (unsigned i
= 0; i
< num_components
; i
++)
1281 if (bit_size
== 64) {
1282 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1284 assert(bit_size
== 32);
1285 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1294 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->const_type
);
1295 nir_alu_type src_alu_type
= dst_alu_type
;
1296 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(opcode
, &swap
, src_alu_type
, dst_alu_type
);
1298 unsigned num_components
= glsl_get_vector_elements(val
->const_type
);
1300 glsl_get_bit_size(val
->const_type
);
1302 nir_const_value src
[4];
1304 for (unsigned i
= 0; i
< count
- 4; i
++) {
1306 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1308 unsigned j
= swap
? 1 - i
: i
;
1309 assert(bit_size
== 32);
1310 src
[j
] = c
->values
[0];
1313 val
->constant
->values
[0] =
1314 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1321 case SpvOpConstantNull
:
1322 val
->constant
= vtn_null_constant(b
, val
->const_type
);
1325 case SpvOpConstantSampler
:
1326 assert(!"OpConstantSampler requires Kernel Capability");
1330 unreachable("Unhandled opcode");
1333 /* Now that we have the value, update the workgroup size if needed */
1334 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1338 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1339 const uint32_t *w
, unsigned count
)
1341 struct nir_function
*callee
=
1342 vtn_value(b
, w
[3], vtn_value_type_function
)->func
->impl
->function
;
1344 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1345 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1346 unsigned arg_id
= w
[4 + i
];
1347 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1348 if (arg
->value_type
== vtn_value_type_access_chain
) {
1349 nir_deref_var
*d
= vtn_access_chain_to_deref(b
, arg
->access_chain
);
1350 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1352 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1354 /* Make a temporary to store the argument in */
1356 nir_local_variable_create(b
->impl
, arg_ssa
->type
, "arg_tmp");
1357 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1359 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1363 nir_variable
*out_tmp
= NULL
;
1364 if (!glsl_type_is_void(callee
->return_type
)) {
1365 out_tmp
= nir_local_variable_create(b
->impl
, callee
->return_type
,
1367 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1370 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1372 if (glsl_type_is_void(callee
->return_type
)) {
1373 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1375 struct vtn_value
*retval
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1376 retval
->ssa
= vtn_local_load(b
, call
->return_deref
);
1380 struct vtn_ssa_value
*
1381 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1383 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1386 if (!glsl_type_is_vector_or_scalar(type
)) {
1387 unsigned elems
= glsl_get_length(type
);
1388 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1389 for (unsigned i
= 0; i
< elems
; i
++) {
1390 const struct glsl_type
*child_type
;
1392 switch (glsl_get_base_type(type
)) {
1394 case GLSL_TYPE_UINT
:
1395 case GLSL_TYPE_INT64
:
1396 case GLSL_TYPE_UINT64
:
1397 case GLSL_TYPE_BOOL
:
1398 case GLSL_TYPE_FLOAT
:
1399 case GLSL_TYPE_DOUBLE
:
1400 child_type
= glsl_get_column_type(type
);
1402 case GLSL_TYPE_ARRAY
:
1403 child_type
= glsl_get_array_element(type
);
1405 case GLSL_TYPE_STRUCT
:
1406 child_type
= glsl_get_struct_field(type
, i
);
1409 unreachable("unkown base type");
1412 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1420 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1423 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1424 src
.src_type
= type
;
1429 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1430 const uint32_t *w
, unsigned count
)
1432 if (opcode
== SpvOpSampledImage
) {
1433 struct vtn_value
*val
=
1434 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1435 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1436 val
->sampled_image
->image
=
1437 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1438 val
->sampled_image
->sampler
=
1439 vtn_value(b
, w
[4], vtn_value_type_access_chain
)->access_chain
;
1441 } else if (opcode
== SpvOpImage
) {
1442 struct vtn_value
*val
=
1443 vtn_push_value(b
, w
[2], vtn_value_type_access_chain
);
1444 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1445 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1446 val
->access_chain
= src_val
->sampled_image
->image
;
1448 assert(src_val
->value_type
== vtn_value_type_access_chain
);
1449 val
->access_chain
= src_val
->access_chain
;
1454 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1455 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1457 struct vtn_sampled_image sampled
;
1458 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1459 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1460 sampled
= *sampled_val
->sampled_image
;
1462 assert(sampled_val
->value_type
== vtn_value_type_access_chain
);
1463 sampled
.image
= NULL
;
1464 sampled
.sampler
= sampled_val
->access_chain
;
1467 const struct glsl_type
*image_type
;
1468 if (sampled
.image
) {
1469 image_type
= sampled
.image
->var
->var
->interface_type
;
1471 image_type
= sampled
.sampler
->var
->var
->interface_type
;
1473 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1474 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1475 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1477 /* Figure out the base texture operation */
1480 case SpvOpImageSampleImplicitLod
:
1481 case SpvOpImageSampleDrefImplicitLod
:
1482 case SpvOpImageSampleProjImplicitLod
:
1483 case SpvOpImageSampleProjDrefImplicitLod
:
1484 texop
= nir_texop_tex
;
1487 case SpvOpImageSampleExplicitLod
:
1488 case SpvOpImageSampleDrefExplicitLod
:
1489 case SpvOpImageSampleProjExplicitLod
:
1490 case SpvOpImageSampleProjDrefExplicitLod
:
1491 texop
= nir_texop_txl
;
1494 case SpvOpImageFetch
:
1495 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1496 texop
= nir_texop_txf_ms
;
1498 texop
= nir_texop_txf
;
1502 case SpvOpImageGather
:
1503 case SpvOpImageDrefGather
:
1504 texop
= nir_texop_tg4
;
1507 case SpvOpImageQuerySizeLod
:
1508 case SpvOpImageQuerySize
:
1509 texop
= nir_texop_txs
;
1512 case SpvOpImageQueryLod
:
1513 texop
= nir_texop_lod
;
1516 case SpvOpImageQueryLevels
:
1517 texop
= nir_texop_query_levels
;
1520 case SpvOpImageQuerySamples
:
1521 texop
= nir_texop_texture_samples
;
1525 unreachable("Unhandled opcode");
1528 nir_tex_src srcs
[8]; /* 8 should be enough */
1529 nir_tex_src
*p
= srcs
;
1533 struct nir_ssa_def
*coord
;
1534 unsigned coord_components
;
1536 case SpvOpImageSampleImplicitLod
:
1537 case SpvOpImageSampleExplicitLod
:
1538 case SpvOpImageSampleDrefImplicitLod
:
1539 case SpvOpImageSampleDrefExplicitLod
:
1540 case SpvOpImageSampleProjImplicitLod
:
1541 case SpvOpImageSampleProjExplicitLod
:
1542 case SpvOpImageSampleProjDrefImplicitLod
:
1543 case SpvOpImageSampleProjDrefExplicitLod
:
1544 case SpvOpImageFetch
:
1545 case SpvOpImageGather
:
1546 case SpvOpImageDrefGather
:
1547 case SpvOpImageQueryLod
: {
1548 /* All these types have the coordinate as their first real argument */
1549 switch (sampler_dim
) {
1550 case GLSL_SAMPLER_DIM_1D
:
1551 case GLSL_SAMPLER_DIM_BUF
:
1552 coord_components
= 1;
1554 case GLSL_SAMPLER_DIM_2D
:
1555 case GLSL_SAMPLER_DIM_RECT
:
1556 case GLSL_SAMPLER_DIM_MS
:
1557 coord_components
= 2;
1559 case GLSL_SAMPLER_DIM_3D
:
1560 case GLSL_SAMPLER_DIM_CUBE
:
1561 coord_components
= 3;
1564 unreachable("Invalid sampler type");
1567 if (is_array
&& texop
!= nir_texop_lod
)
1570 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1571 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
1572 (1 << coord_components
) - 1));
1573 p
->src_type
= nir_tex_src_coord
;
1580 coord_components
= 0;
1585 case SpvOpImageSampleProjImplicitLod
:
1586 case SpvOpImageSampleProjExplicitLod
:
1587 case SpvOpImageSampleProjDrefImplicitLod
:
1588 case SpvOpImageSampleProjDrefExplicitLod
:
1589 /* These have the projector as the last coordinate component */
1590 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1591 p
->src_type
= nir_tex_src_projector
;
1599 unsigned gather_component
= 0;
1601 case SpvOpImageSampleDrefImplicitLod
:
1602 case SpvOpImageSampleDrefExplicitLod
:
1603 case SpvOpImageSampleProjDrefImplicitLod
:
1604 case SpvOpImageSampleProjDrefExplicitLod
:
1605 case SpvOpImageDrefGather
:
1606 /* These all have an explicit depth value as their next source */
1607 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1610 case SpvOpImageGather
:
1611 /* This has a component as its next source */
1613 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1620 /* For OpImageQuerySizeLod, we always have an LOD */
1621 if (opcode
== SpvOpImageQuerySizeLod
)
1622 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1624 /* Now we need to handle some number of optional arguments */
1625 const struct vtn_ssa_value
*gather_offsets
= NULL
;
1627 uint32_t operands
= w
[idx
++];
1629 if (operands
& SpvImageOperandsBiasMask
) {
1630 assert(texop
== nir_texop_tex
);
1631 texop
= nir_texop_txb
;
1632 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1635 if (operands
& SpvImageOperandsLodMask
) {
1636 assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1637 texop
== nir_texop_txs
);
1638 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1641 if (operands
& SpvImageOperandsGradMask
) {
1642 assert(texop
== nir_texop_txl
);
1643 texop
= nir_texop_txd
;
1644 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1645 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1648 if (operands
& SpvImageOperandsOffsetMask
||
1649 operands
& SpvImageOperandsConstOffsetMask
)
1650 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1652 if (operands
& SpvImageOperandsConstOffsetsMask
) {
1653 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
1654 (*p
++) = (nir_tex_src
){};
1657 if (operands
& SpvImageOperandsSampleMask
) {
1658 assert(texop
== nir_texop_txf_ms
);
1659 texop
= nir_texop_txf_ms
;
1660 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1663 /* We should have now consumed exactly all of the arguments */
1664 assert(idx
== count
);
1666 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1669 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1671 instr
->coord_components
= coord_components
;
1672 instr
->sampler_dim
= sampler_dim
;
1673 instr
->is_array
= is_array
;
1674 instr
->is_shadow
= is_shadow
;
1675 instr
->is_new_style_shadow
=
1676 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
1677 instr
->component
= gather_component
;
1679 switch (glsl_get_sampler_result_type(image_type
)) {
1680 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1681 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1682 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
1683 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1685 unreachable("Invalid base type for sampler result");
1688 nir_deref_var
*sampler
= vtn_access_chain_to_deref(b
, sampled
.sampler
);
1689 nir_deref_var
*texture
;
1690 if (sampled
.image
) {
1691 nir_deref_var
*image
= vtn_access_chain_to_deref(b
, sampled
.image
);
1697 instr
->texture
= nir_deref_var_clone(texture
, instr
);
1699 switch (instr
->op
) {
1704 /* These operations require a sampler */
1705 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
1708 case nir_texop_txf_ms
:
1712 case nir_texop_query_levels
:
1713 case nir_texop_texture_samples
:
1714 case nir_texop_samples_identical
:
1716 instr
->sampler
= NULL
;
1718 case nir_texop_txf_ms_mcs
:
1719 unreachable("unexpected nir_texop_txf_ms_mcs");
1722 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1723 nir_tex_instr_dest_size(instr
), 32, NULL
);
1725 assert(glsl_get_vector_elements(ret_type
->type
) ==
1726 nir_tex_instr_dest_size(instr
));
1729 nir_instr
*instruction
;
1730 if (gather_offsets
) {
1731 assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
1732 assert(glsl_get_length(gather_offsets
->type
) == 4);
1733 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
1735 /* Copy the current instruction 4x */
1736 for (uint32_t i
= 1; i
< 4; i
++) {
1737 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
1738 instrs
[i
]->op
= instr
->op
;
1739 instrs
[i
]->coord_components
= instr
->coord_components
;
1740 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
1741 instrs
[i
]->is_array
= instr
->is_array
;
1742 instrs
[i
]->is_shadow
= instr
->is_shadow
;
1743 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
1744 instrs
[i
]->component
= instr
->component
;
1745 instrs
[i
]->dest_type
= instr
->dest_type
;
1746 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
1747 instrs
[i
]->sampler
= NULL
;
1749 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1751 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
1752 nir_tex_instr_dest_size(instr
), 32, NULL
);
1755 /* Fill in the last argument with the offset from the passed in offsets
1756 * and insert the instruction into the stream.
1758 for (uint32_t i
= 0; i
< 4; i
++) {
1760 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
1761 src
.src_type
= nir_tex_src_offset
;
1762 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
1763 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
1766 /* Combine the results of the 4 instructions by taking their .w
1769 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
1770 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
1771 vec4
->dest
.write_mask
= 0xf;
1772 for (uint32_t i
= 0; i
< 4; i
++) {
1773 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
1774 vec4
->src
[i
].swizzle
[0] = 3;
1776 def
= &vec4
->dest
.dest
.ssa
;
1777 instruction
= &vec4
->instr
;
1779 def
= &instr
->dest
.ssa
;
1780 instruction
= &instr
->instr
;
1783 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
1784 val
->ssa
->def
= def
;
1786 nir_builder_instr_insert(&b
->nb
, instruction
);
1790 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
1791 const uint32_t *w
, nir_src
*src
)
1794 case SpvOpAtomicIIncrement
:
1795 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
1798 case SpvOpAtomicIDecrement
:
1799 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
1802 case SpvOpAtomicISub
:
1804 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
1807 case SpvOpAtomicCompareExchange
:
1808 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
1809 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
1812 case SpvOpAtomicExchange
:
1813 case SpvOpAtomicIAdd
:
1814 case SpvOpAtomicSMin
:
1815 case SpvOpAtomicUMin
:
1816 case SpvOpAtomicSMax
:
1817 case SpvOpAtomicUMax
:
1818 case SpvOpAtomicAnd
:
1820 case SpvOpAtomicXor
:
1821 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
1825 unreachable("Invalid SPIR-V atomic");
1829 static nir_ssa_def
*
1830 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
1832 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
1834 /* The image_load_store intrinsics assume a 4-dim coordinate */
1835 unsigned dim
= glsl_get_vector_elements(coord
->type
);
1836 unsigned swizzle
[4];
1837 for (unsigned i
= 0; i
< 4; i
++)
1838 swizzle
[i
] = MIN2(i
, dim
- 1);
1840 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
1844 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
1845 const uint32_t *w
, unsigned count
)
1847 /* Just get this one out of the way */
1848 if (opcode
== SpvOpImageTexelPointer
) {
1849 struct vtn_value
*val
=
1850 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
1851 val
->image
= ralloc(b
, struct vtn_image_pointer
);
1854 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1855 val
->image
->coord
= get_image_coord(b
, w
[4]);
1856 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
1860 struct vtn_image_pointer image
;
1863 case SpvOpAtomicExchange
:
1864 case SpvOpAtomicCompareExchange
:
1865 case SpvOpAtomicCompareExchangeWeak
:
1866 case SpvOpAtomicIIncrement
:
1867 case SpvOpAtomicIDecrement
:
1868 case SpvOpAtomicIAdd
:
1869 case SpvOpAtomicISub
:
1870 case SpvOpAtomicLoad
:
1871 case SpvOpAtomicSMin
:
1872 case SpvOpAtomicUMin
:
1873 case SpvOpAtomicSMax
:
1874 case SpvOpAtomicUMax
:
1875 case SpvOpAtomicAnd
:
1877 case SpvOpAtomicXor
:
1878 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
1881 case SpvOpAtomicStore
:
1882 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
1885 case SpvOpImageQuerySize
:
1887 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1889 image
.sample
= NULL
;
1892 case SpvOpImageRead
:
1894 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1895 image
.coord
= get_image_coord(b
, w
[4]);
1897 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
1898 assert(w
[5] == SpvImageOperandsSampleMask
);
1899 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
1901 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1905 case SpvOpImageWrite
:
1907 vtn_value(b
, w
[1], vtn_value_type_access_chain
)->access_chain
;
1908 image
.coord
= get_image_coord(b
, w
[2]);
1912 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
1913 assert(w
[4] == SpvImageOperandsSampleMask
);
1914 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
1916 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1921 unreachable("Invalid image opcode");
1924 nir_intrinsic_op op
;
1926 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
1927 OP(ImageQuerySize
, size
)
1929 OP(ImageWrite
, store
)
1930 OP(AtomicLoad
, load
)
1931 OP(AtomicStore
, store
)
1932 OP(AtomicExchange
, atomic_exchange
)
1933 OP(AtomicCompareExchange
, atomic_comp_swap
)
1934 OP(AtomicIIncrement
, atomic_add
)
1935 OP(AtomicIDecrement
, atomic_add
)
1936 OP(AtomicIAdd
, atomic_add
)
1937 OP(AtomicISub
, atomic_add
)
1938 OP(AtomicSMin
, atomic_min
)
1939 OP(AtomicUMin
, atomic_min
)
1940 OP(AtomicSMax
, atomic_max
)
1941 OP(AtomicUMax
, atomic_max
)
1942 OP(AtomicAnd
, atomic_and
)
1943 OP(AtomicOr
, atomic_or
)
1944 OP(AtomicXor
, atomic_xor
)
1947 unreachable("Invalid image opcode");
1950 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
1952 nir_deref_var
*image_deref
= vtn_access_chain_to_deref(b
, image
.image
);
1953 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
1955 /* ImageQuerySize doesn't take any extra parameters */
1956 if (opcode
!= SpvOpImageQuerySize
) {
1957 /* The image coordinate is always 4 components but we may not have that
1958 * many. Swizzle to compensate.
1961 for (unsigned i
= 0; i
< 4; i
++)
1962 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
1963 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
1965 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
1969 case SpvOpAtomicLoad
:
1970 case SpvOpImageQuerySize
:
1971 case SpvOpImageRead
:
1973 case SpvOpAtomicStore
:
1974 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
1976 case SpvOpImageWrite
:
1977 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
1980 case SpvOpAtomicIIncrement
:
1981 case SpvOpAtomicIDecrement
:
1982 case SpvOpAtomicExchange
:
1983 case SpvOpAtomicIAdd
:
1984 case SpvOpAtomicSMin
:
1985 case SpvOpAtomicUMin
:
1986 case SpvOpAtomicSMax
:
1987 case SpvOpAtomicUMax
:
1988 case SpvOpAtomicAnd
:
1990 case SpvOpAtomicXor
:
1991 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
1995 unreachable("Invalid image opcode");
1998 if (opcode
!= SpvOpImageWrite
) {
1999 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2000 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2001 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 4, 32, NULL
);
2003 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2005 /* The image intrinsics always return 4 channels but we may not want
2006 * that many. Emit a mov to trim it down.
2008 unsigned swiz
[4] = {0, 1, 2, 3};
2009 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
2010 val
->ssa
->def
= nir_swizzle(&b
->nb
, &intrin
->dest
.ssa
, swiz
,
2011 glsl_get_vector_elements(type
->type
), false);
2013 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2017 static nir_intrinsic_op
2018 get_ssbo_nir_atomic_op(SpvOp opcode
)
2021 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2022 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2023 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2024 OP(AtomicExchange
, atomic_exchange
)
2025 OP(AtomicCompareExchange
, atomic_comp_swap
)
2026 OP(AtomicIIncrement
, atomic_add
)
2027 OP(AtomicIDecrement
, atomic_add
)
2028 OP(AtomicIAdd
, atomic_add
)
2029 OP(AtomicISub
, atomic_add
)
2030 OP(AtomicSMin
, atomic_imin
)
2031 OP(AtomicUMin
, atomic_umin
)
2032 OP(AtomicSMax
, atomic_imax
)
2033 OP(AtomicUMax
, atomic_umax
)
2034 OP(AtomicAnd
, atomic_and
)
2035 OP(AtomicOr
, atomic_or
)
2036 OP(AtomicXor
, atomic_xor
)
2039 unreachable("Invalid SSBO atomic");
2043 static nir_intrinsic_op
2044 get_shared_nir_atomic_op(SpvOp opcode
)
2047 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
2048 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
2049 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2050 OP(AtomicExchange
, atomic_exchange
)
2051 OP(AtomicCompareExchange
, atomic_comp_swap
)
2052 OP(AtomicIIncrement
, atomic_add
)
2053 OP(AtomicIDecrement
, atomic_add
)
2054 OP(AtomicIAdd
, atomic_add
)
2055 OP(AtomicISub
, atomic_add
)
2056 OP(AtomicSMin
, atomic_imin
)
2057 OP(AtomicUMin
, atomic_umin
)
2058 OP(AtomicSMax
, atomic_imax
)
2059 OP(AtomicUMax
, atomic_umax
)
2060 OP(AtomicAnd
, atomic_and
)
2061 OP(AtomicOr
, atomic_or
)
2062 OP(AtomicXor
, atomic_xor
)
2065 unreachable("Invalid shared atomic");
2070 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
2071 const uint32_t *w
, unsigned count
)
2073 struct vtn_access_chain
*chain
;
2074 nir_intrinsic_instr
*atomic
;
2077 case SpvOpAtomicLoad
:
2078 case SpvOpAtomicExchange
:
2079 case SpvOpAtomicCompareExchange
:
2080 case SpvOpAtomicCompareExchangeWeak
:
2081 case SpvOpAtomicIIncrement
:
2082 case SpvOpAtomicIDecrement
:
2083 case SpvOpAtomicIAdd
:
2084 case SpvOpAtomicISub
:
2085 case SpvOpAtomicSMin
:
2086 case SpvOpAtomicUMin
:
2087 case SpvOpAtomicSMax
:
2088 case SpvOpAtomicUMax
:
2089 case SpvOpAtomicAnd
:
2091 case SpvOpAtomicXor
:
2093 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
2096 case SpvOpAtomicStore
:
2098 vtn_value(b
, w
[1], vtn_value_type_access_chain
)->access_chain
;
2102 unreachable("Invalid SPIR-V atomic");
2106 SpvScope scope = w[4];
2107 SpvMemorySemanticsMask semantics = w[5];
2110 if (chain
->var
->mode
== vtn_variable_mode_workgroup
) {
2111 struct vtn_type
*type
= chain
->var
->type
;
2112 nir_deref_var
*deref
= vtn_access_chain_to_deref(b
, chain
);
2113 nir_intrinsic_op op
= get_shared_nir_atomic_op(opcode
);
2114 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2115 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2118 case SpvOpAtomicLoad
:
2119 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2122 case SpvOpAtomicStore
:
2123 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2124 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2125 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2128 case SpvOpAtomicExchange
:
2129 case SpvOpAtomicCompareExchange
:
2130 case SpvOpAtomicCompareExchangeWeak
:
2131 case SpvOpAtomicIIncrement
:
2132 case SpvOpAtomicIDecrement
:
2133 case SpvOpAtomicIAdd
:
2134 case SpvOpAtomicISub
:
2135 case SpvOpAtomicSMin
:
2136 case SpvOpAtomicUMin
:
2137 case SpvOpAtomicSMax
:
2138 case SpvOpAtomicUMax
:
2139 case SpvOpAtomicAnd
:
2141 case SpvOpAtomicXor
:
2142 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2146 unreachable("Invalid SPIR-V atomic");
2150 assert(chain
->var
->mode
== vtn_variable_mode_ssbo
);
2151 struct vtn_type
*type
;
2152 nir_ssa_def
*offset
, *index
;
2153 offset
= vtn_access_chain_to_offset(b
, chain
, &index
, &type
, NULL
, false);
2155 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(opcode
);
2157 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2160 case SpvOpAtomicLoad
:
2161 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2162 atomic
->src
[0] = nir_src_for_ssa(index
);
2163 atomic
->src
[1] = nir_src_for_ssa(offset
);
2166 case SpvOpAtomicStore
:
2167 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2168 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2169 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2170 atomic
->src
[1] = nir_src_for_ssa(index
);
2171 atomic
->src
[2] = nir_src_for_ssa(offset
);
2174 case SpvOpAtomicExchange
:
2175 case SpvOpAtomicCompareExchange
:
2176 case SpvOpAtomicCompareExchangeWeak
:
2177 case SpvOpAtomicIIncrement
:
2178 case SpvOpAtomicIDecrement
:
2179 case SpvOpAtomicIAdd
:
2180 case SpvOpAtomicISub
:
2181 case SpvOpAtomicSMin
:
2182 case SpvOpAtomicUMin
:
2183 case SpvOpAtomicSMax
:
2184 case SpvOpAtomicUMax
:
2185 case SpvOpAtomicAnd
:
2187 case SpvOpAtomicXor
:
2188 atomic
->src
[0] = nir_src_for_ssa(index
);
2189 atomic
->src
[1] = nir_src_for_ssa(offset
);
2190 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[2]);
2194 unreachable("Invalid SPIR-V atomic");
2198 if (opcode
!= SpvOpAtomicStore
) {
2199 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2201 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2202 glsl_get_vector_elements(type
->type
),
2203 glsl_get_bit_size(type
->type
), NULL
);
2205 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2206 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2207 val
->ssa
->def
= &atomic
->dest
.ssa
;
2208 val
->ssa
->type
= type
->type
;
2211 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2214 static nir_alu_instr
*
2215 create_vec(nir_shader
*shader
, unsigned num_components
, unsigned bit_size
)
2218 switch (num_components
) {
2219 case 1: op
= nir_op_fmov
; break;
2220 case 2: op
= nir_op_vec2
; break;
2221 case 3: op
= nir_op_vec3
; break;
2222 case 4: op
= nir_op_vec4
; break;
2223 default: unreachable("bad vector size");
2226 nir_alu_instr
*vec
= nir_alu_instr_create(shader
, op
);
2227 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2229 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2234 struct vtn_ssa_value
*
2235 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2237 if (src
->transposed
)
2238 return src
->transposed
;
2240 struct vtn_ssa_value
*dest
=
2241 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2243 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2244 nir_alu_instr
*vec
= create_vec(b
->shader
,
2245 glsl_get_matrix_columns(src
->type
),
2246 glsl_get_bit_size(src
->type
));
2247 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2248 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2249 vec
->src
[0].swizzle
[0] = i
;
2251 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2252 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2253 vec
->src
[j
].swizzle
[0] = i
;
2256 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2257 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2260 dest
->transposed
= src
;
2266 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2268 unsigned swiz
[4] = { index
};
2269 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2273 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2276 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
,
2279 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2281 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2283 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2284 vec
->src
[i
].swizzle
[0] = i
;
2288 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2290 return &vec
->dest
.dest
.ssa
;
2294 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2297 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2298 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2299 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2300 vtn_vector_extract(b
, src
, i
), dest
);
2306 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2307 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2309 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2310 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2311 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2312 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2317 static nir_ssa_def
*
2318 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2319 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2320 const uint32_t *indices
)
2322 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
, src0
->bit_size
);
2324 for (unsigned i
= 0; i
< num_components
; i
++) {
2325 uint32_t index
= indices
[i
];
2326 if (index
== 0xffffffff) {
2328 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2329 } else if (index
< src0
->num_components
) {
2330 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2331 vec
->src
[i
].swizzle
[0] = index
;
2333 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2334 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2338 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2340 return &vec
->dest
.dest
.ssa
;
2344 * Concatentates a number of vectors/scalars together to produce a vector
2346 static nir_ssa_def
*
2347 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2348 unsigned num_srcs
, nir_ssa_def
**srcs
)
2350 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
,
2353 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2355 * "When constructing a vector, there must be at least two Constituent
2358 assert(num_srcs
>= 2);
2360 unsigned dest_idx
= 0;
2361 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2362 nir_ssa_def
*src
= srcs
[i
];
2363 assert(dest_idx
+ src
->num_components
<= num_components
);
2364 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2365 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2366 vec
->src
[dest_idx
].swizzle
[0] = j
;
2371 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2373 * "When constructing a vector, the total number of components in all
2374 * the operands must equal the number of components in Result Type."
2376 assert(dest_idx
== num_components
);
2378 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2380 return &vec
->dest
.dest
.ssa
;
2383 static struct vtn_ssa_value
*
2384 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2386 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2387 dest
->type
= src
->type
;
2389 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2390 dest
->def
= src
->def
;
2392 unsigned elems
= glsl_get_length(src
->type
);
2394 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2395 for (unsigned i
= 0; i
< elems
; i
++)
2396 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2402 static struct vtn_ssa_value
*
2403 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2404 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2405 unsigned num_indices
)
2407 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2409 struct vtn_ssa_value
*cur
= dest
;
2411 for (i
= 0; i
< num_indices
- 1; i
++) {
2412 cur
= cur
->elems
[indices
[i
]];
2415 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2416 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2417 * the component granularity. In that case, the last index will be
2418 * the index to insert the scalar into the vector.
2421 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2423 cur
->elems
[indices
[i
]] = insert
;
2429 static struct vtn_ssa_value
*
2430 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2431 const uint32_t *indices
, unsigned num_indices
)
2433 struct vtn_ssa_value
*cur
= src
;
2434 for (unsigned i
= 0; i
< num_indices
; i
++) {
2435 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2436 assert(i
== num_indices
- 1);
2437 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2438 * the component granularity. The last index will be the index of the
2439 * vector to extract.
2442 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2443 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2444 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2447 cur
= cur
->elems
[indices
[i
]];
2455 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2456 const uint32_t *w
, unsigned count
)
2458 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2459 const struct glsl_type
*type
=
2460 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2461 val
->ssa
= vtn_create_ssa_value(b
, type
);
2464 case SpvOpVectorExtractDynamic
:
2465 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2466 vtn_ssa_value(b
, w
[4])->def
);
2469 case SpvOpVectorInsertDynamic
:
2470 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2471 vtn_ssa_value(b
, w
[4])->def
,
2472 vtn_ssa_value(b
, w
[5])->def
);
2475 case SpvOpVectorShuffle
:
2476 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2477 vtn_ssa_value(b
, w
[3])->def
,
2478 vtn_ssa_value(b
, w
[4])->def
,
2482 case SpvOpCompositeConstruct
: {
2483 unsigned elems
= count
- 3;
2484 if (glsl_type_is_vector_or_scalar(type
)) {
2485 nir_ssa_def
*srcs
[4];
2486 for (unsigned i
= 0; i
< elems
; i
++)
2487 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2489 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2492 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2493 for (unsigned i
= 0; i
< elems
; i
++)
2494 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2498 case SpvOpCompositeExtract
:
2499 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2503 case SpvOpCompositeInsert
:
2504 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2505 vtn_ssa_value(b
, w
[3]),
2509 case SpvOpCopyObject
:
2510 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2514 unreachable("unknown composite operation");
2519 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2520 const uint32_t *w
, unsigned count
)
2522 nir_intrinsic_op intrinsic_op
;
2524 case SpvOpEmitVertex
:
2525 case SpvOpEmitStreamVertex
:
2526 intrinsic_op
= nir_intrinsic_emit_vertex
;
2528 case SpvOpEndPrimitive
:
2529 case SpvOpEndStreamPrimitive
:
2530 intrinsic_op
= nir_intrinsic_end_primitive
;
2532 case SpvOpMemoryBarrier
:
2533 intrinsic_op
= nir_intrinsic_memory_barrier
;
2535 case SpvOpControlBarrier
:
2536 intrinsic_op
= nir_intrinsic_barrier
;
2539 unreachable("unknown barrier instruction");
2542 nir_intrinsic_instr
*intrin
=
2543 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2545 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2546 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2548 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2552 gl_primitive_from_spv_execution_mode(SpvExecutionMode mode
)
2555 case SpvExecutionModeInputPoints
:
2556 case SpvExecutionModeOutputPoints
:
2557 return 0; /* GL_POINTS */
2558 case SpvExecutionModeInputLines
:
2559 return 1; /* GL_LINES */
2560 case SpvExecutionModeInputLinesAdjacency
:
2561 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2562 case SpvExecutionModeTriangles
:
2563 return 4; /* GL_TRIANGLES */
2564 case SpvExecutionModeInputTrianglesAdjacency
:
2565 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2566 case SpvExecutionModeQuads
:
2567 return 7; /* GL_QUADS */
2568 case SpvExecutionModeIsolines
:
2569 return 0x8E7A; /* GL_ISOLINES */
2570 case SpvExecutionModeOutputLineStrip
:
2571 return 3; /* GL_LINE_STRIP */
2572 case SpvExecutionModeOutputTriangleStrip
:
2573 return 5; /* GL_TRIANGLE_STRIP */
2575 assert(!"Invalid primitive type");
2581 vertices_in_from_spv_execution_mode(SpvExecutionMode mode
)
2584 case SpvExecutionModeInputPoints
:
2586 case SpvExecutionModeInputLines
:
2588 case SpvExecutionModeInputLinesAdjacency
:
2590 case SpvExecutionModeTriangles
:
2592 case SpvExecutionModeInputTrianglesAdjacency
:
2595 assert(!"Invalid GS input mode");
2600 static gl_shader_stage
2601 stage_for_execution_model(SpvExecutionModel model
)
2604 case SpvExecutionModelVertex
:
2605 return MESA_SHADER_VERTEX
;
2606 case SpvExecutionModelTessellationControl
:
2607 return MESA_SHADER_TESS_CTRL
;
2608 case SpvExecutionModelTessellationEvaluation
:
2609 return MESA_SHADER_TESS_EVAL
;
2610 case SpvExecutionModelGeometry
:
2611 return MESA_SHADER_GEOMETRY
;
2612 case SpvExecutionModelFragment
:
2613 return MESA_SHADER_FRAGMENT
;
2614 case SpvExecutionModelGLCompute
:
2615 return MESA_SHADER_COMPUTE
;
2617 unreachable("Unsupported execution model");
2621 #define spv_check_supported(name, cap) do { \
2622 if (!(b->ext && b->ext->name)) \
2623 vtn_warn("Unsupported SPIR-V capability: %s", \
2624 spirv_capability_to_string(cap)); \
2628 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2629 const uint32_t *w
, unsigned count
)
2633 case SpvOpSourceExtension
:
2634 case SpvOpSourceContinued
:
2635 case SpvOpExtension
:
2636 /* Unhandled, but these are for debug so that's ok. */
2639 case SpvOpCapability
: {
2640 SpvCapability cap
= w
[1];
2642 case SpvCapabilityMatrix
:
2643 case SpvCapabilityShader
:
2644 case SpvCapabilityGeometry
:
2645 case SpvCapabilityGeometryPointSize
:
2646 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
2647 case SpvCapabilitySampledImageArrayDynamicIndexing
:
2648 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
2649 case SpvCapabilityStorageImageArrayDynamicIndexing
:
2650 case SpvCapabilityImageRect
:
2651 case SpvCapabilitySampledRect
:
2652 case SpvCapabilitySampled1D
:
2653 case SpvCapabilityImage1D
:
2654 case SpvCapabilitySampledCubeArray
:
2655 case SpvCapabilitySampledBuffer
:
2656 case SpvCapabilityImageBuffer
:
2657 case SpvCapabilityImageQuery
:
2658 case SpvCapabilityDerivativeControl
:
2659 case SpvCapabilityInterpolationFunction
:
2660 case SpvCapabilityMultiViewport
:
2661 case SpvCapabilitySampleRateShading
:
2662 case SpvCapabilityClipDistance
:
2663 case SpvCapabilityCullDistance
:
2664 case SpvCapabilityInputAttachment
:
2665 case SpvCapabilityImageGatherExtended
:
2666 case SpvCapabilityStorageImageExtendedFormats
:
2669 case SpvCapabilityGeometryStreams
:
2670 case SpvCapabilityLinkage
:
2671 case SpvCapabilityVector16
:
2672 case SpvCapabilityFloat16Buffer
:
2673 case SpvCapabilityFloat16
:
2674 case SpvCapabilityInt64Atomics
:
2675 case SpvCapabilityAtomicStorage
:
2676 case SpvCapabilityInt16
:
2677 case SpvCapabilityStorageImageMultisample
:
2678 case SpvCapabilityImageCubeArray
:
2679 case SpvCapabilityInt8
:
2680 case SpvCapabilitySparseResidency
:
2681 case SpvCapabilityMinLod
:
2682 case SpvCapabilityTransformFeedback
:
2683 vtn_warn("Unsupported SPIR-V capability: %s",
2684 spirv_capability_to_string(cap
));
2687 case SpvCapabilityFloat64
:
2688 spv_check_supported(float64
, cap
);
2690 case SpvCapabilityInt64
:
2691 spv_check_supported(int64
, cap
);
2694 case SpvCapabilityAddresses
:
2695 case SpvCapabilityKernel
:
2696 case SpvCapabilityImageBasic
:
2697 case SpvCapabilityImageReadWrite
:
2698 case SpvCapabilityImageMipmap
:
2699 case SpvCapabilityPipes
:
2700 case SpvCapabilityGroups
:
2701 case SpvCapabilityDeviceEnqueue
:
2702 case SpvCapabilityLiteralSampler
:
2703 case SpvCapabilityGenericPointer
:
2704 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
2705 spirv_capability_to_string(cap
));
2708 case SpvCapabilityImageMSArray
:
2709 spv_check_supported(image_ms_array
, cap
);
2712 case SpvCapabilityTessellation
:
2713 case SpvCapabilityTessellationPointSize
:
2714 spv_check_supported(tessellation
, cap
);
2717 case SpvCapabilityDrawParameters
:
2718 spv_check_supported(draw_parameters
, cap
);
2721 case SpvCapabilityStorageImageReadWithoutFormat
:
2722 spv_check_supported(image_read_without_format
, cap
);
2725 case SpvCapabilityStorageImageWriteWithoutFormat
:
2726 spv_check_supported(image_write_without_format
, cap
);
2730 unreachable("Unhandled capability");
2735 case SpvOpExtInstImport
:
2736 vtn_handle_extension(b
, opcode
, w
, count
);
2739 case SpvOpMemoryModel
:
2740 assert(w
[1] == SpvAddressingModelLogical
);
2741 assert(w
[2] == SpvMemoryModelGLSL450
);
2744 case SpvOpEntryPoint
: {
2745 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
2746 /* Let this be a name label regardless */
2747 unsigned name_words
;
2748 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
2750 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
2751 stage_for_execution_model(w
[1]) != b
->entry_point_stage
)
2754 assert(b
->entry_point
== NULL
);
2755 b
->entry_point
= entry_point
;
2760 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2761 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2765 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2768 case SpvOpMemberName
:
2772 case SpvOpExecutionMode
:
2773 case SpvOpDecorationGroup
:
2775 case SpvOpMemberDecorate
:
2776 case SpvOpGroupDecorate
:
2777 case SpvOpGroupMemberDecorate
:
2778 vtn_handle_decoration(b
, opcode
, w
, count
);
2782 return false; /* End of preamble */
2789 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
2790 const struct vtn_decoration
*mode
, void *data
)
2792 assert(b
->entry_point
== entry_point
);
2794 switch(mode
->exec_mode
) {
2795 case SpvExecutionModeOriginUpperLeft
:
2796 case SpvExecutionModeOriginLowerLeft
:
2797 b
->origin_upper_left
=
2798 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
2801 case SpvExecutionModeEarlyFragmentTests
:
2802 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2803 b
->shader
->info
->fs
.early_fragment_tests
= true;
2806 case SpvExecutionModeInvocations
:
2807 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2808 b
->shader
->info
->gs
.invocations
= MAX2(1, mode
->literals
[0]);
2811 case SpvExecutionModeDepthReplacing
:
2812 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2813 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
2815 case SpvExecutionModeDepthGreater
:
2816 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2817 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
2819 case SpvExecutionModeDepthLess
:
2820 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2821 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
2823 case SpvExecutionModeDepthUnchanged
:
2824 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2825 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2828 case SpvExecutionModeLocalSize
:
2829 assert(b
->shader
->stage
== MESA_SHADER_COMPUTE
);
2830 b
->shader
->info
->cs
.local_size
[0] = mode
->literals
[0];
2831 b
->shader
->info
->cs
.local_size
[1] = mode
->literals
[1];
2832 b
->shader
->info
->cs
.local_size
[2] = mode
->literals
[2];
2834 case SpvExecutionModeLocalSizeHint
:
2835 break; /* Nothing to do with this */
2837 case SpvExecutionModeOutputVertices
:
2838 if (b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2839 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
) {
2840 b
->shader
->info
->tess
.tcs_vertices_out
= mode
->literals
[0];
2842 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2843 b
->shader
->info
->gs
.vertices_out
= mode
->literals
[0];
2847 case SpvExecutionModeInputPoints
:
2848 case SpvExecutionModeInputLines
:
2849 case SpvExecutionModeInputLinesAdjacency
:
2850 case SpvExecutionModeTriangles
:
2851 case SpvExecutionModeInputTrianglesAdjacency
:
2852 case SpvExecutionModeQuads
:
2853 case SpvExecutionModeIsolines
:
2854 if (b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2855 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
) {
2856 b
->shader
->info
->tess
.primitive_mode
=
2857 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2859 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2860 b
->shader
->info
->gs
.vertices_in
=
2861 vertices_in_from_spv_execution_mode(mode
->exec_mode
);
2865 case SpvExecutionModeOutputPoints
:
2866 case SpvExecutionModeOutputLineStrip
:
2867 case SpvExecutionModeOutputTriangleStrip
:
2868 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2869 b
->shader
->info
->gs
.output_primitive
=
2870 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2873 case SpvExecutionModeSpacingEqual
:
2874 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2875 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2876 b
->shader
->info
->tess
.spacing
= TESS_SPACING_EQUAL
;
2878 case SpvExecutionModeSpacingFractionalEven
:
2879 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2880 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2881 b
->shader
->info
->tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
2883 case SpvExecutionModeSpacingFractionalOdd
:
2884 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2885 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2886 b
->shader
->info
->tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
2888 case SpvExecutionModeVertexOrderCw
:
2889 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2890 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2891 /* Vulkan's notion of CCW seems to match the hardware backends,
2892 * but be the opposite of OpenGL. Currently NIR follows GL semantics,
2893 * so we set it backwards here.
2895 b
->shader
->info
->tess
.ccw
= true;
2897 case SpvExecutionModeVertexOrderCcw
:
2898 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2899 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2900 /* Backwards; see above */
2901 b
->shader
->info
->tess
.ccw
= false;
2903 case SpvExecutionModePointMode
:
2904 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2905 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2906 b
->shader
->info
->tess
.point_mode
= true;
2909 case SpvExecutionModePixelCenterInteger
:
2910 b
->pixel_center_integer
= true;
2913 case SpvExecutionModeXfb
:
2914 assert(!"Unhandled execution mode");
2917 case SpvExecutionModeVecTypeHint
:
2918 case SpvExecutionModeContractionOff
:
2922 unreachable("Unhandled execution mode");
2927 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2928 const uint32_t *w
, unsigned count
)
2932 case SpvOpSourceContinued
:
2933 case SpvOpSourceExtension
:
2934 case SpvOpExtension
:
2935 case SpvOpCapability
:
2936 case SpvOpExtInstImport
:
2937 case SpvOpMemoryModel
:
2938 case SpvOpEntryPoint
:
2939 case SpvOpExecutionMode
:
2942 case SpvOpMemberName
:
2943 case SpvOpDecorationGroup
:
2945 case SpvOpMemberDecorate
:
2946 case SpvOpGroupDecorate
:
2947 case SpvOpGroupMemberDecorate
:
2948 assert(!"Invalid opcode types and variables section");
2954 case SpvOpTypeFloat
:
2955 case SpvOpTypeVector
:
2956 case SpvOpTypeMatrix
:
2957 case SpvOpTypeImage
:
2958 case SpvOpTypeSampler
:
2959 case SpvOpTypeSampledImage
:
2960 case SpvOpTypeArray
:
2961 case SpvOpTypeRuntimeArray
:
2962 case SpvOpTypeStruct
:
2963 case SpvOpTypeOpaque
:
2964 case SpvOpTypePointer
:
2965 case SpvOpTypeFunction
:
2966 case SpvOpTypeEvent
:
2967 case SpvOpTypeDeviceEvent
:
2968 case SpvOpTypeReserveId
:
2969 case SpvOpTypeQueue
:
2971 vtn_handle_type(b
, opcode
, w
, count
);
2974 case SpvOpConstantTrue
:
2975 case SpvOpConstantFalse
:
2977 case SpvOpConstantComposite
:
2978 case SpvOpConstantSampler
:
2979 case SpvOpConstantNull
:
2980 case SpvOpSpecConstantTrue
:
2981 case SpvOpSpecConstantFalse
:
2982 case SpvOpSpecConstant
:
2983 case SpvOpSpecConstantComposite
:
2984 case SpvOpSpecConstantOp
:
2985 vtn_handle_constant(b
, opcode
, w
, count
);
2990 vtn_handle_variables(b
, opcode
, w
, count
);
2994 return false; /* End of preamble */
3001 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3002 const uint32_t *w
, unsigned count
)
3008 case SpvOpLoopMerge
:
3009 case SpvOpSelectionMerge
:
3010 /* This is handled by cfg pre-pass and walk_blocks */
3014 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
3015 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3020 vtn_handle_extension(b
, opcode
, w
, count
);
3026 case SpvOpCopyMemory
:
3027 case SpvOpCopyMemorySized
:
3028 case SpvOpAccessChain
:
3029 case SpvOpInBoundsAccessChain
:
3030 case SpvOpArrayLength
:
3031 vtn_handle_variables(b
, opcode
, w
, count
);
3034 case SpvOpFunctionCall
:
3035 vtn_handle_function_call(b
, opcode
, w
, count
);
3038 case SpvOpSampledImage
:
3040 case SpvOpImageSampleImplicitLod
:
3041 case SpvOpImageSampleExplicitLod
:
3042 case SpvOpImageSampleDrefImplicitLod
:
3043 case SpvOpImageSampleDrefExplicitLod
:
3044 case SpvOpImageSampleProjImplicitLod
:
3045 case SpvOpImageSampleProjExplicitLod
:
3046 case SpvOpImageSampleProjDrefImplicitLod
:
3047 case SpvOpImageSampleProjDrefExplicitLod
:
3048 case SpvOpImageFetch
:
3049 case SpvOpImageGather
:
3050 case SpvOpImageDrefGather
:
3051 case SpvOpImageQuerySizeLod
:
3052 case SpvOpImageQueryLod
:
3053 case SpvOpImageQueryLevels
:
3054 case SpvOpImageQuerySamples
:
3055 vtn_handle_texture(b
, opcode
, w
, count
);
3058 case SpvOpImageRead
:
3059 case SpvOpImageWrite
:
3060 case SpvOpImageTexelPointer
:
3061 vtn_handle_image(b
, opcode
, w
, count
);
3064 case SpvOpImageQuerySize
: {
3065 struct vtn_access_chain
*image
=
3066 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
3067 if (glsl_type_is_image(image
->var
->var
->interface_type
)) {
3068 vtn_handle_image(b
, opcode
, w
, count
);
3070 vtn_handle_texture(b
, opcode
, w
, count
);
3075 case SpvOpAtomicLoad
:
3076 case SpvOpAtomicExchange
:
3077 case SpvOpAtomicCompareExchange
:
3078 case SpvOpAtomicCompareExchangeWeak
:
3079 case SpvOpAtomicIIncrement
:
3080 case SpvOpAtomicIDecrement
:
3081 case SpvOpAtomicIAdd
:
3082 case SpvOpAtomicISub
:
3083 case SpvOpAtomicSMin
:
3084 case SpvOpAtomicUMin
:
3085 case SpvOpAtomicSMax
:
3086 case SpvOpAtomicUMax
:
3087 case SpvOpAtomicAnd
:
3089 case SpvOpAtomicXor
: {
3090 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
3091 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3092 vtn_handle_image(b
, opcode
, w
, count
);
3094 assert(pointer
->value_type
== vtn_value_type_access_chain
);
3095 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3100 case SpvOpAtomicStore
: {
3101 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
3102 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3103 vtn_handle_image(b
, opcode
, w
, count
);
3105 assert(pointer
->value_type
== vtn_value_type_access_chain
);
3106 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3116 case SpvOpConvertFToU
:
3117 case SpvOpConvertFToS
:
3118 case SpvOpConvertSToF
:
3119 case SpvOpConvertUToF
:
3123 case SpvOpQuantizeToF16
:
3124 case SpvOpConvertPtrToU
:
3125 case SpvOpConvertUToPtr
:
3126 case SpvOpPtrCastToGeneric
:
3127 case SpvOpGenericCastToPtr
:
3133 case SpvOpSignBitSet
:
3134 case SpvOpLessOrGreater
:
3136 case SpvOpUnordered
:
3151 case SpvOpVectorTimesScalar
:
3153 case SpvOpIAddCarry
:
3154 case SpvOpISubBorrow
:
3155 case SpvOpUMulExtended
:
3156 case SpvOpSMulExtended
:
3157 case SpvOpShiftRightLogical
:
3158 case SpvOpShiftRightArithmetic
:
3159 case SpvOpShiftLeftLogical
:
3160 case SpvOpLogicalEqual
:
3161 case SpvOpLogicalNotEqual
:
3162 case SpvOpLogicalOr
:
3163 case SpvOpLogicalAnd
:
3164 case SpvOpLogicalNot
:
3165 case SpvOpBitwiseOr
:
3166 case SpvOpBitwiseXor
:
3167 case SpvOpBitwiseAnd
:
3170 case SpvOpFOrdEqual
:
3171 case SpvOpFUnordEqual
:
3172 case SpvOpINotEqual
:
3173 case SpvOpFOrdNotEqual
:
3174 case SpvOpFUnordNotEqual
:
3175 case SpvOpULessThan
:
3176 case SpvOpSLessThan
:
3177 case SpvOpFOrdLessThan
:
3178 case SpvOpFUnordLessThan
:
3179 case SpvOpUGreaterThan
:
3180 case SpvOpSGreaterThan
:
3181 case SpvOpFOrdGreaterThan
:
3182 case SpvOpFUnordGreaterThan
:
3183 case SpvOpULessThanEqual
:
3184 case SpvOpSLessThanEqual
:
3185 case SpvOpFOrdLessThanEqual
:
3186 case SpvOpFUnordLessThanEqual
:
3187 case SpvOpUGreaterThanEqual
:
3188 case SpvOpSGreaterThanEqual
:
3189 case SpvOpFOrdGreaterThanEqual
:
3190 case SpvOpFUnordGreaterThanEqual
:
3196 case SpvOpFwidthFine
:
3197 case SpvOpDPdxCoarse
:
3198 case SpvOpDPdyCoarse
:
3199 case SpvOpFwidthCoarse
:
3200 case SpvOpBitFieldInsert
:
3201 case SpvOpBitFieldSExtract
:
3202 case SpvOpBitFieldUExtract
:
3203 case SpvOpBitReverse
:
3205 case SpvOpTranspose
:
3206 case SpvOpOuterProduct
:
3207 case SpvOpMatrixTimesScalar
:
3208 case SpvOpVectorTimesMatrix
:
3209 case SpvOpMatrixTimesVector
:
3210 case SpvOpMatrixTimesMatrix
:
3211 vtn_handle_alu(b
, opcode
, w
, count
);
3214 case SpvOpVectorExtractDynamic
:
3215 case SpvOpVectorInsertDynamic
:
3216 case SpvOpVectorShuffle
:
3217 case SpvOpCompositeConstruct
:
3218 case SpvOpCompositeExtract
:
3219 case SpvOpCompositeInsert
:
3220 case SpvOpCopyObject
:
3221 vtn_handle_composite(b
, opcode
, w
, count
);
3224 case SpvOpEmitVertex
:
3225 case SpvOpEndPrimitive
:
3226 case SpvOpEmitStreamVertex
:
3227 case SpvOpEndStreamPrimitive
:
3228 case SpvOpControlBarrier
:
3229 case SpvOpMemoryBarrier
:
3230 vtn_handle_barrier(b
, opcode
, w
, count
);
3234 unreachable("Unhandled opcode");
3241 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3242 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3243 gl_shader_stage stage
, const char *entry_point_name
,
3244 const struct nir_spirv_supported_extensions
*ext
,
3245 const nir_shader_compiler_options
*options
)
3247 const uint32_t *word_end
= words
+ word_count
;
3249 /* Handle the SPIR-V header (first 4 dwords) */
3250 assert(word_count
> 5);
3252 assert(words
[0] == SpvMagicNumber
);
3253 assert(words
[1] >= 0x10000);
3254 /* words[2] == generator magic */
3255 unsigned value_id_bound
= words
[3];
3256 assert(words
[4] == 0);
3260 /* Initialize the stn_builder object */
3261 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3262 b
->value_id_bound
= value_id_bound
;
3263 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3264 exec_list_make_empty(&b
->functions
);
3265 b
->entry_point_stage
= stage
;
3266 b
->entry_point_name
= entry_point_name
;
3269 /* Handle all the preamble instructions */
3270 words
= vtn_foreach_instruction(b
, words
, word_end
,
3271 vtn_handle_preamble_instruction
);
3273 if (b
->entry_point
== NULL
) {
3274 assert(!"Entry point not found");
3279 b
->shader
= nir_shader_create(NULL
, stage
, options
, NULL
);
3281 /* Set shader info defaults */
3282 b
->shader
->info
->gs
.invocations
= 1;
3284 /* Parse execution modes */
3285 vtn_foreach_execution_mode(b
, b
->entry_point
,
3286 vtn_handle_execution_mode
, NULL
);
3288 b
->specializations
= spec
;
3289 b
->num_specializations
= num_spec
;
3291 /* Handle all variable, type, and constant instructions */
3292 words
= vtn_foreach_instruction(b
, words
, word_end
,
3293 vtn_handle_variable_or_type_instruction
);
3295 vtn_build_cfg(b
, words
, word_end
);
3297 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3298 b
->impl
= func
->impl
;
3299 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3300 _mesa_key_pointer_equal
);
3302 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3305 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3306 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3307 assert(entry_point
);