2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
34 struct spec_constant_value
{
43 _vtn_warn(const char *file
, int line
, const char *msg
, ...)
49 formatted
= ralloc_vasprintf(NULL
, msg
, args
);
52 fprintf(stderr
, "%s:%d WARNING: %s\n", file
, line
, formatted
);
54 ralloc_free(formatted
);
57 static struct vtn_ssa_value
*
58 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
60 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
63 if (glsl_type_is_vector_or_scalar(type
)) {
64 unsigned num_components
= glsl_get_vector_elements(val
->type
);
65 unsigned bit_size
= glsl_get_bit_size(val
->type
);
66 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
68 unsigned elems
= glsl_get_length(val
->type
);
69 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
70 if (glsl_type_is_matrix(type
)) {
71 const struct glsl_type
*elem_type
=
72 glsl_vector_type(glsl_get_base_type(type
),
73 glsl_get_vector_elements(type
));
75 for (unsigned i
= 0; i
< elems
; i
++)
76 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
77 } else if (glsl_type_is_array(type
)) {
78 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
79 for (unsigned i
= 0; i
< elems
; i
++)
80 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
82 for (unsigned i
= 0; i
< elems
; i
++) {
83 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
84 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
92 static struct vtn_ssa_value
*
93 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
94 const struct glsl_type
*type
)
96 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
101 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
104 switch (glsl_get_base_type(type
)) {
107 case GLSL_TYPE_INT64
:
108 case GLSL_TYPE_UINT64
:
110 case GLSL_TYPE_FLOAT
:
111 case GLSL_TYPE_DOUBLE
: {
112 int bit_size
= glsl_get_bit_size(type
);
113 if (glsl_type_is_vector_or_scalar(type
)) {
114 unsigned num_components
= glsl_get_vector_elements(val
->type
);
115 nir_load_const_instr
*load
=
116 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
118 load
->value
= constant
->values
[0];
120 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
121 val
->def
= &load
->def
;
123 assert(glsl_type_is_matrix(type
));
124 unsigned rows
= glsl_get_vector_elements(val
->type
);
125 unsigned columns
= glsl_get_matrix_columns(val
->type
);
126 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
128 for (unsigned i
= 0; i
< columns
; i
++) {
129 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
130 col_val
->type
= glsl_get_column_type(val
->type
);
131 nir_load_const_instr
*load
=
132 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
134 load
->value
= constant
->values
[i
];
136 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
137 col_val
->def
= &load
->def
;
139 val
->elems
[i
] = col_val
;
145 case GLSL_TYPE_ARRAY
: {
146 unsigned elems
= glsl_get_length(val
->type
);
147 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
148 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
149 for (unsigned i
= 0; i
< elems
; i
++)
150 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
155 case GLSL_TYPE_STRUCT
: {
156 unsigned elems
= glsl_get_length(val
->type
);
157 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
158 for (unsigned i
= 0; i
< elems
; i
++) {
159 const struct glsl_type
*elem_type
=
160 glsl_get_struct_field(val
->type
, i
);
161 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
168 unreachable("bad constant type");
174 struct vtn_ssa_value
*
175 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
177 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
178 switch (val
->value_type
) {
179 case vtn_value_type_undef
:
180 return vtn_undef_ssa_value(b
, val
->type
->type
);
182 case vtn_value_type_constant
:
183 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
185 case vtn_value_type_ssa
:
189 unreachable("Invalid type for an SSA value");
194 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
195 unsigned word_count
, unsigned *words_used
)
197 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
199 /* Ammount of space taken by the string (including the null) */
200 unsigned len
= strlen(dup
) + 1;
201 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
207 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
208 const uint32_t *end
, vtn_instruction_handler handler
)
214 const uint32_t *w
= start
;
216 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
217 unsigned count
= w
[0] >> SpvWordCountShift
;
218 assert(count
>= 1 && w
+ count
<= end
);
222 break; /* Do nothing */
225 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
237 if (!handler(b
, opcode
, w
, count
))
249 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
250 const uint32_t *w
, unsigned count
)
253 case SpvOpExtInstImport
: {
254 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
255 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
256 val
->ext_handler
= vtn_handle_glsl450_instruction
;
258 assert(!"Unsupported extension");
264 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
265 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
272 unreachable("Unhandled opcode");
277 _foreach_decoration_helper(struct vtn_builder
*b
,
278 struct vtn_value
*base_value
,
280 struct vtn_value
*value
,
281 vtn_decoration_foreach_cb cb
, void *data
)
283 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
285 if (dec
->scope
== VTN_DEC_DECORATION
) {
286 member
= parent_member
;
287 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
288 assert(parent_member
== -1);
289 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
291 /* Not a decoration */
296 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
297 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
300 cb(b
, base_value
, member
, dec
, data
);
305 /** Iterates (recursively if needed) over all of the decorations on a value
307 * This function iterates over all of the decorations applied to a given
308 * value. If it encounters a decoration group, it recurses into the group
309 * and iterates over all of those decorations as well.
312 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
313 vtn_decoration_foreach_cb cb
, void *data
)
315 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
319 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
320 vtn_execution_mode_foreach_cb cb
, void *data
)
322 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
323 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
326 assert(dec
->group
== NULL
);
327 cb(b
, value
, dec
, data
);
332 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
333 const uint32_t *w
, unsigned count
)
335 const uint32_t *w_end
= w
+ count
;
336 const uint32_t target
= w
[1];
340 case SpvOpDecorationGroup
:
341 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
345 case SpvOpMemberDecorate
:
346 case SpvOpExecutionMode
: {
347 struct vtn_value
*val
= &b
->values
[target
];
349 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
352 dec
->scope
= VTN_DEC_DECORATION
;
354 case SpvOpMemberDecorate
:
355 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
357 case SpvOpExecutionMode
:
358 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
361 unreachable("Invalid decoration opcode");
363 dec
->decoration
= *(w
++);
366 /* Link into the list */
367 dec
->next
= val
->decoration
;
368 val
->decoration
= dec
;
372 case SpvOpGroupMemberDecorate
:
373 case SpvOpGroupDecorate
: {
374 struct vtn_value
*group
=
375 vtn_value(b
, target
, vtn_value_type_decoration_group
);
377 for (; w
< w_end
; w
++) {
378 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
379 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
382 if (opcode
== SpvOpGroupDecorate
) {
383 dec
->scope
= VTN_DEC_DECORATION
;
385 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
388 /* Link into the list */
389 dec
->next
= val
->decoration
;
390 val
->decoration
= dec
;
396 unreachable("Unhandled opcode");
400 struct member_decoration_ctx
{
402 struct glsl_struct_field
*fields
;
403 struct vtn_type
*type
;
406 /* does a shallow copy of a vtn_type */
408 static struct vtn_type
*
409 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
411 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
414 switch (src
->base_type
) {
415 case vtn_base_type_void
:
416 case vtn_base_type_scalar
:
417 case vtn_base_type_vector
:
418 case vtn_base_type_matrix
:
419 case vtn_base_type_array
:
420 case vtn_base_type_pointer
:
421 case vtn_base_type_image
:
422 case vtn_base_type_sampler
:
423 /* Nothing more to do */
426 case vtn_base_type_struct
:
427 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
428 memcpy(dest
->members
, src
->members
,
429 src
->length
* sizeof(src
->members
[0]));
431 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
432 memcpy(dest
->offsets
, src
->offsets
,
433 src
->length
* sizeof(src
->offsets
[0]));
436 case vtn_base_type_function
:
437 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
438 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
445 static struct vtn_type
*
446 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
448 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
449 type
= type
->members
[member
];
451 /* We may have an array of matrices.... Oh, joy! */
452 while (glsl_type_is_array(type
->type
)) {
453 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
454 type
= type
->array_element
;
457 assert(glsl_type_is_matrix(type
->type
));
463 struct_member_decoration_cb(struct vtn_builder
*b
,
464 struct vtn_value
*val
, int member
,
465 const struct vtn_decoration
*dec
, void *void_ctx
)
467 struct member_decoration_ctx
*ctx
= void_ctx
;
472 assert(member
< ctx
->num_fields
);
474 switch (dec
->decoration
) {
475 case SpvDecorationNonWritable
:
476 case SpvDecorationNonReadable
:
477 case SpvDecorationRelaxedPrecision
:
478 case SpvDecorationVolatile
:
479 case SpvDecorationCoherent
:
480 case SpvDecorationUniform
:
481 break; /* FIXME: Do nothing with this for now. */
482 case SpvDecorationNoPerspective
:
483 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
485 case SpvDecorationFlat
:
486 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
488 case SpvDecorationCentroid
:
489 ctx
->fields
[member
].centroid
= true;
491 case SpvDecorationSample
:
492 ctx
->fields
[member
].sample
= true;
494 case SpvDecorationStream
:
495 /* Vulkan only allows one GS stream */
496 assert(dec
->literals
[0] == 0);
498 case SpvDecorationLocation
:
499 ctx
->fields
[member
].location
= dec
->literals
[0];
501 case SpvDecorationComponent
:
502 break; /* FIXME: What should we do with these? */
503 case SpvDecorationBuiltIn
:
504 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
505 ctx
->type
->members
[member
]->is_builtin
= true;
506 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
507 ctx
->type
->builtin_block
= true;
509 case SpvDecorationOffset
:
510 ctx
->type
->offsets
[member
] = dec
->literals
[0];
512 case SpvDecorationMatrixStride
:
513 /* Handled as a second pass */
515 case SpvDecorationColMajor
:
516 break; /* Nothing to do here. Column-major is the default. */
517 case SpvDecorationRowMajor
:
518 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
521 case SpvDecorationPatch
:
524 case SpvDecorationSpecId
:
525 case SpvDecorationBlock
:
526 case SpvDecorationBufferBlock
:
527 case SpvDecorationArrayStride
:
528 case SpvDecorationGLSLShared
:
529 case SpvDecorationGLSLPacked
:
530 case SpvDecorationInvariant
:
531 case SpvDecorationRestrict
:
532 case SpvDecorationAliased
:
533 case SpvDecorationConstant
:
534 case SpvDecorationIndex
:
535 case SpvDecorationBinding
:
536 case SpvDecorationDescriptorSet
:
537 case SpvDecorationLinkageAttributes
:
538 case SpvDecorationNoContraction
:
539 case SpvDecorationInputAttachmentIndex
:
540 vtn_warn("Decoration not allowed on struct members: %s",
541 spirv_decoration_to_string(dec
->decoration
));
544 case SpvDecorationXfbBuffer
:
545 case SpvDecorationXfbStride
:
546 vtn_warn("Vulkan does not have transform feedback");
549 case SpvDecorationCPacked
:
550 case SpvDecorationSaturatedConversion
:
551 case SpvDecorationFuncParamAttr
:
552 case SpvDecorationFPRoundingMode
:
553 case SpvDecorationFPFastMathMode
:
554 case SpvDecorationAlignment
:
555 vtn_warn("Decoration only allowed for CL-style kernels: %s",
556 spirv_decoration_to_string(dec
->decoration
));
560 unreachable("Unhandled decoration");
564 /* Matrix strides are handled as a separate pass because we need to know
565 * whether the matrix is row-major or not first.
568 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
569 struct vtn_value
*val
, int member
,
570 const struct vtn_decoration
*dec
,
573 if (dec
->decoration
!= SpvDecorationMatrixStride
)
577 struct member_decoration_ctx
*ctx
= void_ctx
;
579 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
580 if (mat_type
->row_major
) {
581 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
582 mat_type
->stride
= mat_type
->array_element
->stride
;
583 mat_type
->array_element
->stride
= dec
->literals
[0];
585 assert(mat_type
->array_element
->stride
> 0);
586 mat_type
->stride
= dec
->literals
[0];
591 type_decoration_cb(struct vtn_builder
*b
,
592 struct vtn_value
*val
, int member
,
593 const struct vtn_decoration
*dec
, void *ctx
)
595 struct vtn_type
*type
= val
->type
;
600 switch (dec
->decoration
) {
601 case SpvDecorationArrayStride
:
602 assert(type
->base_type
== vtn_base_type_matrix
||
603 type
->base_type
== vtn_base_type_array
||
604 type
->base_type
== vtn_base_type_pointer
);
605 type
->stride
= dec
->literals
[0];
607 case SpvDecorationBlock
:
608 assert(type
->base_type
== vtn_base_type_struct
);
611 case SpvDecorationBufferBlock
:
612 assert(type
->base_type
== vtn_base_type_struct
);
613 type
->buffer_block
= true;
615 case SpvDecorationGLSLShared
:
616 case SpvDecorationGLSLPacked
:
617 /* Ignore these, since we get explicit offsets anyways */
620 case SpvDecorationRowMajor
:
621 case SpvDecorationColMajor
:
622 case SpvDecorationMatrixStride
:
623 case SpvDecorationBuiltIn
:
624 case SpvDecorationNoPerspective
:
625 case SpvDecorationFlat
:
626 case SpvDecorationPatch
:
627 case SpvDecorationCentroid
:
628 case SpvDecorationSample
:
629 case SpvDecorationVolatile
:
630 case SpvDecorationCoherent
:
631 case SpvDecorationNonWritable
:
632 case SpvDecorationNonReadable
:
633 case SpvDecorationUniform
:
634 case SpvDecorationStream
:
635 case SpvDecorationLocation
:
636 case SpvDecorationComponent
:
637 case SpvDecorationOffset
:
638 case SpvDecorationXfbBuffer
:
639 case SpvDecorationXfbStride
:
640 vtn_warn("Decoration only allowed for struct members: %s",
641 spirv_decoration_to_string(dec
->decoration
));
644 case SpvDecorationRelaxedPrecision
:
645 case SpvDecorationSpecId
:
646 case SpvDecorationInvariant
:
647 case SpvDecorationRestrict
:
648 case SpvDecorationAliased
:
649 case SpvDecorationConstant
:
650 case SpvDecorationIndex
:
651 case SpvDecorationBinding
:
652 case SpvDecorationDescriptorSet
:
653 case SpvDecorationLinkageAttributes
:
654 case SpvDecorationNoContraction
:
655 case SpvDecorationInputAttachmentIndex
:
656 vtn_warn("Decoration not allowed on types: %s",
657 spirv_decoration_to_string(dec
->decoration
));
660 case SpvDecorationCPacked
:
661 case SpvDecorationSaturatedConversion
:
662 case SpvDecorationFuncParamAttr
:
663 case SpvDecorationFPRoundingMode
:
664 case SpvDecorationFPFastMathMode
:
665 case SpvDecorationAlignment
:
666 vtn_warn("Decoration only allowed for CL-style kernels: %s",
667 spirv_decoration_to_string(dec
->decoration
));
671 unreachable("Unhandled decoration");
676 translate_image_format(SpvImageFormat format
)
679 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
680 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
681 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
682 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
683 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
684 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
685 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
686 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
687 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
688 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
689 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
690 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
691 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
692 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
693 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
694 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
695 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
696 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
697 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
698 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
699 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
700 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
701 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
702 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
703 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
704 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
705 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
706 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
707 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
708 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
709 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
710 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
711 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
712 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
713 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
714 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
715 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
716 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
717 case SpvImageFormatR16ui
: return 0x823A; /* GL_RG16UI */
718 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
720 assert(!"Invalid image format");
726 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
727 const uint32_t *w
, unsigned count
)
729 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
731 val
->type
= rzalloc(b
, struct vtn_type
);
732 val
->type
->val
= val
;
736 val
->type
->base_type
= vtn_base_type_void
;
737 val
->type
->type
= glsl_void_type();
740 val
->type
->base_type
= vtn_base_type_scalar
;
741 val
->type
->type
= glsl_bool_type();
745 const bool signedness
= w
[3];
746 val
->type
->base_type
= vtn_base_type_scalar
;
748 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
750 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
753 case SpvOpTypeFloat
: {
755 val
->type
->base_type
= vtn_base_type_scalar
;
756 val
->type
->type
= bit_size
== 64 ? glsl_double_type() : glsl_float_type();
760 case SpvOpTypeVector
: {
761 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
762 unsigned elems
= w
[3];
764 assert(glsl_type_is_scalar(base
->type
));
765 val
->type
->base_type
= vtn_base_type_vector
;
766 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
767 val
->type
->stride
= glsl_get_bit_size(base
->type
) / 8;
768 val
->type
->array_element
= base
;
772 case SpvOpTypeMatrix
: {
773 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
774 unsigned columns
= w
[3];
776 assert(glsl_type_is_vector(base
->type
));
777 val
->type
->base_type
= vtn_base_type_matrix
;
778 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
779 glsl_get_vector_elements(base
->type
),
781 assert(!glsl_type_is_error(val
->type
->type
));
782 val
->type
->length
= columns
;
783 val
->type
->array_element
= base
;
784 val
->type
->row_major
= false;
785 val
->type
->stride
= 0;
789 case SpvOpTypeRuntimeArray
:
790 case SpvOpTypeArray
: {
791 struct vtn_type
*array_element
=
792 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
794 if (opcode
== SpvOpTypeRuntimeArray
) {
795 /* A length of 0 is used to denote unsized arrays */
796 val
->type
->length
= 0;
799 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
802 val
->type
->base_type
= vtn_base_type_array
;
803 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
);
804 val
->type
->array_element
= array_element
;
805 val
->type
->stride
= 0;
809 case SpvOpTypeStruct
: {
810 unsigned num_fields
= count
- 2;
811 val
->type
->base_type
= vtn_base_type_struct
;
812 val
->type
->length
= num_fields
;
813 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
814 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
816 NIR_VLA(struct glsl_struct_field
, fields
, count
);
817 for (unsigned i
= 0; i
< num_fields
; i
++) {
818 val
->type
->members
[i
] =
819 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
820 fields
[i
] = (struct glsl_struct_field
) {
821 .type
= val
->type
->members
[i
]->type
,
822 .name
= ralloc_asprintf(b
, "field%d", i
),
827 struct member_decoration_ctx ctx
= {
828 .num_fields
= num_fields
,
833 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
834 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
836 const char *name
= val
->name
? val
->name
: "struct";
838 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
842 case SpvOpTypeFunction
: {
843 val
->type
->base_type
= vtn_base_type_function
;
844 val
->type
->type
= NULL
;
846 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
848 const unsigned num_params
= count
- 3;
849 val
->type
->length
= num_params
;
850 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
851 for (unsigned i
= 0; i
< count
- 3; i
++) {
852 val
->type
->params
[i
] =
853 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
858 case SpvOpTypePointer
: {
859 SpvStorageClass storage_class
= w
[2];
860 struct vtn_type
*deref_type
=
861 vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
863 val
->type
->base_type
= vtn_base_type_pointer
;
864 val
->type
->type
= NULL
;
865 val
->type
->storage_class
= storage_class
;
866 val
->type
->deref
= deref_type
;
870 case SpvOpTypeImage
: {
871 val
->type
->base_type
= vtn_base_type_image
;
873 const struct glsl_type
*sampled_type
=
874 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
876 assert(glsl_type_is_vector_or_scalar(sampled_type
));
878 enum glsl_sampler_dim dim
;
879 switch ((SpvDim
)w
[3]) {
880 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
881 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
882 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
883 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
884 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
885 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
886 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
888 unreachable("Invalid SPIR-V Sampler dimension");
891 bool is_shadow
= w
[4];
892 bool is_array
= w
[5];
893 bool multisampled
= w
[6];
894 unsigned sampled
= w
[7];
895 SpvImageFormat format
= w
[8];
898 val
->type
->access_qualifier
= w
[9];
900 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
903 if (dim
== GLSL_SAMPLER_DIM_2D
)
904 dim
= GLSL_SAMPLER_DIM_MS
;
905 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
906 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
908 assert(!"Unsupported multisampled image type");
911 val
->type
->image_format
= translate_image_format(format
);
914 val
->type
->sampled
= true;
915 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
916 glsl_get_base_type(sampled_type
));
917 } else if (sampled
== 2) {
919 val
->type
->sampled
= false;
920 val
->type
->type
= glsl_image_type(dim
, is_array
,
921 glsl_get_base_type(sampled_type
));
923 assert(!"We need to know if the image will be sampled");
928 case SpvOpTypeSampledImage
:
929 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
932 case SpvOpTypeSampler
:
933 /* The actual sampler type here doesn't really matter. It gets
934 * thrown away the moment you combine it with an image. What really
935 * matters is that it's a sampler type as opposed to an integer type
936 * so the backend knows what to do.
938 val
->type
->base_type
= vtn_base_type_sampler
;
939 val
->type
->type
= glsl_bare_sampler_type();
942 case SpvOpTypeOpaque
:
944 case SpvOpTypeDeviceEvent
:
945 case SpvOpTypeReserveId
:
949 unreachable("Unhandled opcode");
952 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
955 static nir_constant
*
956 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
958 nir_constant
*c
= rzalloc(b
, nir_constant
);
960 /* For pointers and other typeless things, we have to return something but
961 * it doesn't matter what.
966 switch (glsl_get_base_type(type
)) {
969 case GLSL_TYPE_INT64
:
970 case GLSL_TYPE_UINT64
:
972 case GLSL_TYPE_FLOAT
:
973 case GLSL_TYPE_DOUBLE
:
974 /* Nothing to do here. It's already initialized to zero */
977 case GLSL_TYPE_ARRAY
:
978 assert(glsl_get_length(type
) > 0);
979 c
->num_elements
= glsl_get_length(type
);
980 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
982 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
983 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
984 c
->elements
[i
] = c
->elements
[0];
987 case GLSL_TYPE_STRUCT
:
988 c
->num_elements
= glsl_get_length(type
);
989 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
991 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
992 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
997 unreachable("Invalid type for null constant");
1004 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
1005 int member
, const struct vtn_decoration
*dec
,
1008 assert(member
== -1);
1009 if (dec
->decoration
!= SpvDecorationSpecId
)
1012 struct spec_constant_value
*const_value
= data
;
1014 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1015 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
1016 if (const_value
->is_double
)
1017 const_value
->data64
= b
->specializations
[i
].data64
;
1019 const_value
->data32
= b
->specializations
[i
].data32
;
1026 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
1027 uint32_t const_value
)
1029 struct spec_constant_value data
;
1030 data
.is_double
= false;
1031 data
.data32
= const_value
;
1032 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1037 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
1038 uint64_t const_value
)
1040 struct spec_constant_value data
;
1041 data
.is_double
= true;
1042 data
.data64
= const_value
;
1043 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1048 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1049 struct vtn_value
*val
,
1051 const struct vtn_decoration
*dec
,
1054 assert(member
== -1);
1055 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1056 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
1059 assert(val
->const_type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1061 b
->shader
->info
.cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
1062 b
->shader
->info
.cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
1063 b
->shader
->info
.cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
1067 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1068 const uint32_t *w
, unsigned count
)
1070 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1071 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
1072 val
->constant
= rzalloc(b
, nir_constant
);
1074 case SpvOpConstantTrue
:
1075 assert(val
->const_type
== glsl_bool_type());
1076 val
->constant
->values
[0].u32
[0] = NIR_TRUE
;
1078 case SpvOpConstantFalse
:
1079 assert(val
->const_type
== glsl_bool_type());
1080 val
->constant
->values
[0].u32
[0] = NIR_FALSE
;
1083 case SpvOpSpecConstantTrue
:
1084 case SpvOpSpecConstantFalse
: {
1085 assert(val
->const_type
== glsl_bool_type());
1087 get_specialization(b
, val
, (opcode
== SpvOpSpecConstantTrue
));
1088 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1092 case SpvOpConstant
: {
1093 assert(glsl_type_is_scalar(val
->const_type
));
1094 int bit_size
= glsl_get_bit_size(val
->const_type
);
1095 if (bit_size
== 64) {
1096 val
->constant
->values
->u32
[0] = w
[3];
1097 val
->constant
->values
->u32
[1] = w
[4];
1099 assert(bit_size
== 32);
1100 val
->constant
->values
->u32
[0] = w
[3];
1104 case SpvOpSpecConstant
: {
1105 assert(glsl_type_is_scalar(val
->const_type
));
1106 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1107 int bit_size
= glsl_get_bit_size(val
->const_type
);
1109 val
->constant
->values
[0].u64
[0] =
1110 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1112 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1115 case SpvOpSpecConstantComposite
:
1116 case SpvOpConstantComposite
: {
1117 unsigned elem_count
= count
- 3;
1118 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1119 for (unsigned i
= 0; i
< elem_count
; i
++)
1120 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1122 switch (glsl_get_base_type(val
->const_type
)) {
1123 case GLSL_TYPE_UINT
:
1125 case GLSL_TYPE_UINT64
:
1126 case GLSL_TYPE_INT64
:
1127 case GLSL_TYPE_FLOAT
:
1128 case GLSL_TYPE_BOOL
:
1129 case GLSL_TYPE_DOUBLE
: {
1130 int bit_size
= glsl_get_bit_size(val
->const_type
);
1131 if (glsl_type_is_matrix(val
->const_type
)) {
1132 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
1133 for (unsigned i
= 0; i
< elem_count
; i
++)
1134 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1136 assert(glsl_type_is_vector(val
->const_type
));
1137 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
1138 for (unsigned i
= 0; i
< elem_count
; i
++) {
1139 if (bit_size
== 64) {
1140 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1142 assert(bit_size
== 32);
1143 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1150 case GLSL_TYPE_STRUCT
:
1151 case GLSL_TYPE_ARRAY
:
1152 ralloc_steal(val
->constant
, elems
);
1153 val
->constant
->num_elements
= elem_count
;
1154 val
->constant
->elements
= elems
;
1158 unreachable("Unsupported type for constants");
1163 case SpvOpSpecConstantOp
: {
1164 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1166 case SpvOpVectorShuffle
: {
1167 struct vtn_value
*v0
= &b
->values
[w
[4]];
1168 struct vtn_value
*v1
= &b
->values
[w
[5]];
1170 assert(v0
->value_type
== vtn_value_type_constant
||
1171 v0
->value_type
== vtn_value_type_undef
);
1172 assert(v1
->value_type
== vtn_value_type_constant
||
1173 v1
->value_type
== vtn_value_type_undef
);
1175 unsigned len0
= v0
->value_type
== vtn_value_type_constant
?
1176 glsl_get_vector_elements(v0
->const_type
) :
1177 glsl_get_vector_elements(v0
->type
->type
);
1178 unsigned len1
= v1
->value_type
== vtn_value_type_constant
?
1179 glsl_get_vector_elements(v1
->const_type
) :
1180 glsl_get_vector_elements(v1
->type
->type
);
1182 assert(len0
+ len1
< 16);
1184 unsigned bit_size
= glsl_get_bit_size(val
->const_type
);
1185 unsigned bit_size0
= v0
->value_type
== vtn_value_type_constant
?
1186 glsl_get_bit_size(v0
->const_type
) :
1187 glsl_get_bit_size(v0
->type
->type
);
1188 unsigned bit_size1
= v1
->value_type
== vtn_value_type_constant
?
1189 glsl_get_bit_size(v1
->const_type
) :
1190 glsl_get_bit_size(v1
->type
->type
);
1192 assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1193 (void)bit_size0
; (void)bit_size1
;
1195 if (bit_size
== 64) {
1197 if (v0
->value_type
== vtn_value_type_constant
) {
1198 for (unsigned i
= 0; i
< len0
; i
++)
1199 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1201 if (v1
->value_type
== vtn_value_type_constant
) {
1202 for (unsigned i
= 0; i
< len1
; i
++)
1203 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1206 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1207 uint32_t comp
= w
[i
+ 6];
1208 /* If component is not used, set the value to a known constant
1209 * to detect if it is wrongly used.
1211 if (comp
== (uint32_t)-1)
1212 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1214 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1218 if (v0
->value_type
== vtn_value_type_constant
) {
1219 for (unsigned i
= 0; i
< len0
; i
++)
1220 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1222 if (v1
->value_type
== vtn_value_type_constant
) {
1223 for (unsigned i
= 0; i
< len1
; i
++)
1224 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1227 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1228 uint32_t comp
= w
[i
+ 6];
1229 /* If component is not used, set the value to a known constant
1230 * to detect if it is wrongly used.
1232 if (comp
== (uint32_t)-1)
1233 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1235 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1241 case SpvOpCompositeExtract
:
1242 case SpvOpCompositeInsert
: {
1243 struct vtn_value
*comp
;
1244 unsigned deref_start
;
1245 struct nir_constant
**c
;
1246 if (opcode
== SpvOpCompositeExtract
) {
1247 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1249 c
= &comp
->constant
;
1251 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1253 val
->constant
= nir_constant_clone(comp
->constant
,
1260 const struct glsl_type
*type
= comp
->const_type
;
1261 for (unsigned i
= deref_start
; i
< count
; i
++) {
1262 switch (glsl_get_base_type(type
)) {
1263 case GLSL_TYPE_UINT
:
1265 case GLSL_TYPE_UINT64
:
1266 case GLSL_TYPE_INT64
:
1267 case GLSL_TYPE_FLOAT
:
1268 case GLSL_TYPE_DOUBLE
:
1269 case GLSL_TYPE_BOOL
:
1270 /* If we hit this granularity, we're picking off an element */
1271 if (glsl_type_is_matrix(type
)) {
1272 assert(col
== 0 && elem
== -1);
1275 type
= glsl_get_column_type(type
);
1277 assert(elem
<= 0 && glsl_type_is_vector(type
));
1279 type
= glsl_scalar_type(glsl_get_base_type(type
));
1283 case GLSL_TYPE_ARRAY
:
1284 c
= &(*c
)->elements
[w
[i
]];
1285 type
= glsl_get_array_element(type
);
1288 case GLSL_TYPE_STRUCT
:
1289 c
= &(*c
)->elements
[w
[i
]];
1290 type
= glsl_get_struct_field(type
, w
[i
]);
1294 unreachable("Invalid constant type");
1298 if (opcode
== SpvOpCompositeExtract
) {
1302 unsigned num_components
= glsl_get_vector_elements(type
);
1303 unsigned bit_size
= glsl_get_bit_size(type
);
1304 for (unsigned i
= 0; i
< num_components
; i
++)
1305 if (bit_size
== 64) {
1306 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1308 assert(bit_size
== 32);
1309 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1313 struct vtn_value
*insert
=
1314 vtn_value(b
, w
[4], vtn_value_type_constant
);
1315 assert(insert
->const_type
== type
);
1317 *c
= insert
->constant
;
1319 unsigned num_components
= glsl_get_vector_elements(type
);
1320 unsigned bit_size
= glsl_get_bit_size(type
);
1321 for (unsigned i
= 0; i
< num_components
; i
++)
1322 if (bit_size
== 64) {
1323 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1325 assert(bit_size
== 32);
1326 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1335 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->const_type
);
1336 nir_alu_type src_alu_type
= dst_alu_type
;
1337 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(opcode
, &swap
, src_alu_type
, dst_alu_type
);
1339 unsigned num_components
= glsl_get_vector_elements(val
->const_type
);
1341 glsl_get_bit_size(val
->const_type
);
1343 nir_const_value src
[4];
1345 for (unsigned i
= 0; i
< count
- 4; i
++) {
1347 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1349 unsigned j
= swap
? 1 - i
: i
;
1350 assert(bit_size
== 32);
1351 src
[j
] = c
->values
[0];
1354 val
->constant
->values
[0] =
1355 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1362 case SpvOpConstantNull
:
1363 val
->constant
= vtn_null_constant(b
, val
->const_type
);
1366 case SpvOpConstantSampler
:
1367 assert(!"OpConstantSampler requires Kernel Capability");
1371 unreachable("Unhandled opcode");
1374 /* Now that we have the value, update the workgroup size if needed */
1375 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1379 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1380 const uint32_t *w
, unsigned count
)
1382 struct nir_function
*callee
=
1383 vtn_value(b
, w
[3], vtn_value_type_function
)->func
->impl
->function
;
1385 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1386 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1387 unsigned arg_id
= w
[4 + i
];
1388 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1389 if (arg
->value_type
== vtn_value_type_pointer
) {
1390 nir_deref_var
*d
= vtn_pointer_to_deref(b
, arg
->pointer
);
1391 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1393 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1395 /* Make a temporary to store the argument in */
1397 nir_local_variable_create(b
->impl
, arg_ssa
->type
, "arg_tmp");
1398 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1400 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1404 nir_variable
*out_tmp
= NULL
;
1405 if (!glsl_type_is_void(callee
->return_type
)) {
1406 out_tmp
= nir_local_variable_create(b
->impl
, callee
->return_type
,
1408 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1411 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1413 if (glsl_type_is_void(callee
->return_type
)) {
1414 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1416 struct vtn_value
*retval
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1417 retval
->ssa
= vtn_local_load(b
, call
->return_deref
);
1421 struct vtn_ssa_value
*
1422 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1424 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1427 if (!glsl_type_is_vector_or_scalar(type
)) {
1428 unsigned elems
= glsl_get_length(type
);
1429 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1430 for (unsigned i
= 0; i
< elems
; i
++) {
1431 const struct glsl_type
*child_type
;
1433 switch (glsl_get_base_type(type
)) {
1435 case GLSL_TYPE_UINT
:
1436 case GLSL_TYPE_INT64
:
1437 case GLSL_TYPE_UINT64
:
1438 case GLSL_TYPE_BOOL
:
1439 case GLSL_TYPE_FLOAT
:
1440 case GLSL_TYPE_DOUBLE
:
1441 child_type
= glsl_get_column_type(type
);
1443 case GLSL_TYPE_ARRAY
:
1444 child_type
= glsl_get_array_element(type
);
1446 case GLSL_TYPE_STRUCT
:
1447 child_type
= glsl_get_struct_field(type
, i
);
1450 unreachable("unkown base type");
1453 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1461 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1464 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1465 src
.src_type
= type
;
1470 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1471 const uint32_t *w
, unsigned count
)
1473 if (opcode
== SpvOpSampledImage
) {
1474 struct vtn_value
*val
=
1475 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1476 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1477 val
->sampled_image
->image
=
1478 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1479 val
->sampled_image
->sampler
=
1480 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
1482 } else if (opcode
== SpvOpImage
) {
1483 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1484 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1485 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1486 val
->pointer
= src_val
->sampled_image
->image
;
1488 assert(src_val
->value_type
== vtn_value_type_pointer
);
1489 val
->pointer
= src_val
->pointer
;
1494 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1495 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1497 struct vtn_sampled_image sampled
;
1498 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1499 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1500 sampled
= *sampled_val
->sampled_image
;
1502 assert(sampled_val
->value_type
== vtn_value_type_pointer
);
1503 sampled
.image
= NULL
;
1504 sampled
.sampler
= sampled_val
->pointer
;
1507 const struct glsl_type
*image_type
;
1508 if (sampled
.image
) {
1509 image_type
= sampled
.image
->var
->var
->interface_type
;
1511 image_type
= sampled
.sampler
->var
->var
->interface_type
;
1513 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1514 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1515 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1517 /* Figure out the base texture operation */
1520 case SpvOpImageSampleImplicitLod
:
1521 case SpvOpImageSampleDrefImplicitLod
:
1522 case SpvOpImageSampleProjImplicitLod
:
1523 case SpvOpImageSampleProjDrefImplicitLod
:
1524 texop
= nir_texop_tex
;
1527 case SpvOpImageSampleExplicitLod
:
1528 case SpvOpImageSampleDrefExplicitLod
:
1529 case SpvOpImageSampleProjExplicitLod
:
1530 case SpvOpImageSampleProjDrefExplicitLod
:
1531 texop
= nir_texop_txl
;
1534 case SpvOpImageFetch
:
1535 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1536 texop
= nir_texop_txf_ms
;
1538 texop
= nir_texop_txf
;
1542 case SpvOpImageGather
:
1543 case SpvOpImageDrefGather
:
1544 texop
= nir_texop_tg4
;
1547 case SpvOpImageQuerySizeLod
:
1548 case SpvOpImageQuerySize
:
1549 texop
= nir_texop_txs
;
1552 case SpvOpImageQueryLod
:
1553 texop
= nir_texop_lod
;
1556 case SpvOpImageQueryLevels
:
1557 texop
= nir_texop_query_levels
;
1560 case SpvOpImageQuerySamples
:
1561 texop
= nir_texop_texture_samples
;
1565 unreachable("Unhandled opcode");
1568 nir_tex_src srcs
[8]; /* 8 should be enough */
1569 nir_tex_src
*p
= srcs
;
1573 struct nir_ssa_def
*coord
;
1574 unsigned coord_components
;
1576 case SpvOpImageSampleImplicitLod
:
1577 case SpvOpImageSampleExplicitLod
:
1578 case SpvOpImageSampleDrefImplicitLod
:
1579 case SpvOpImageSampleDrefExplicitLod
:
1580 case SpvOpImageSampleProjImplicitLod
:
1581 case SpvOpImageSampleProjExplicitLod
:
1582 case SpvOpImageSampleProjDrefImplicitLod
:
1583 case SpvOpImageSampleProjDrefExplicitLod
:
1584 case SpvOpImageFetch
:
1585 case SpvOpImageGather
:
1586 case SpvOpImageDrefGather
:
1587 case SpvOpImageQueryLod
: {
1588 /* All these types have the coordinate as their first real argument */
1589 switch (sampler_dim
) {
1590 case GLSL_SAMPLER_DIM_1D
:
1591 case GLSL_SAMPLER_DIM_BUF
:
1592 coord_components
= 1;
1594 case GLSL_SAMPLER_DIM_2D
:
1595 case GLSL_SAMPLER_DIM_RECT
:
1596 case GLSL_SAMPLER_DIM_MS
:
1597 coord_components
= 2;
1599 case GLSL_SAMPLER_DIM_3D
:
1600 case GLSL_SAMPLER_DIM_CUBE
:
1601 coord_components
= 3;
1604 unreachable("Invalid sampler type");
1607 if (is_array
&& texop
!= nir_texop_lod
)
1610 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1611 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
1612 (1 << coord_components
) - 1));
1613 p
->src_type
= nir_tex_src_coord
;
1620 coord_components
= 0;
1625 case SpvOpImageSampleProjImplicitLod
:
1626 case SpvOpImageSampleProjExplicitLod
:
1627 case SpvOpImageSampleProjDrefImplicitLod
:
1628 case SpvOpImageSampleProjDrefExplicitLod
:
1629 /* These have the projector as the last coordinate component */
1630 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1631 p
->src_type
= nir_tex_src_projector
;
1639 unsigned gather_component
= 0;
1641 case SpvOpImageSampleDrefImplicitLod
:
1642 case SpvOpImageSampleDrefExplicitLod
:
1643 case SpvOpImageSampleProjDrefImplicitLod
:
1644 case SpvOpImageSampleProjDrefExplicitLod
:
1645 case SpvOpImageDrefGather
:
1646 /* These all have an explicit depth value as their next source */
1647 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1650 case SpvOpImageGather
:
1651 /* This has a component as its next source */
1653 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1660 /* For OpImageQuerySizeLod, we always have an LOD */
1661 if (opcode
== SpvOpImageQuerySizeLod
)
1662 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1664 /* Now we need to handle some number of optional arguments */
1665 const struct vtn_ssa_value
*gather_offsets
= NULL
;
1667 uint32_t operands
= w
[idx
++];
1669 if (operands
& SpvImageOperandsBiasMask
) {
1670 assert(texop
== nir_texop_tex
);
1671 texop
= nir_texop_txb
;
1672 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1675 if (operands
& SpvImageOperandsLodMask
) {
1676 assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1677 texop
== nir_texop_txs
);
1678 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1681 if (operands
& SpvImageOperandsGradMask
) {
1682 assert(texop
== nir_texop_txl
);
1683 texop
= nir_texop_txd
;
1684 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1685 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1688 if (operands
& SpvImageOperandsOffsetMask
||
1689 operands
& SpvImageOperandsConstOffsetMask
)
1690 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1692 if (operands
& SpvImageOperandsConstOffsetsMask
) {
1693 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
1694 (*p
++) = (nir_tex_src
){};
1697 if (operands
& SpvImageOperandsSampleMask
) {
1698 assert(texop
== nir_texop_txf_ms
);
1699 texop
= nir_texop_txf_ms
;
1700 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1703 /* We should have now consumed exactly all of the arguments */
1704 assert(idx
== count
);
1706 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1709 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1711 instr
->coord_components
= coord_components
;
1712 instr
->sampler_dim
= sampler_dim
;
1713 instr
->is_array
= is_array
;
1714 instr
->is_shadow
= is_shadow
;
1715 instr
->is_new_style_shadow
=
1716 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
1717 instr
->component
= gather_component
;
1719 switch (glsl_get_sampler_result_type(image_type
)) {
1720 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1721 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1722 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
1723 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1725 unreachable("Invalid base type for sampler result");
1728 nir_deref_var
*sampler
= vtn_pointer_to_deref(b
, sampled
.sampler
);
1729 nir_deref_var
*texture
;
1730 if (sampled
.image
) {
1731 nir_deref_var
*image
= vtn_pointer_to_deref(b
, sampled
.image
);
1737 instr
->texture
= nir_deref_var_clone(texture
, instr
);
1739 switch (instr
->op
) {
1744 /* These operations require a sampler */
1745 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
1748 case nir_texop_txf_ms
:
1752 case nir_texop_query_levels
:
1753 case nir_texop_texture_samples
:
1754 case nir_texop_samples_identical
:
1756 instr
->sampler
= NULL
;
1758 case nir_texop_txf_ms_mcs
:
1759 unreachable("unexpected nir_texop_txf_ms_mcs");
1762 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1763 nir_tex_instr_dest_size(instr
), 32, NULL
);
1765 assert(glsl_get_vector_elements(ret_type
->type
) ==
1766 nir_tex_instr_dest_size(instr
));
1769 nir_instr
*instruction
;
1770 if (gather_offsets
) {
1771 assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
1772 assert(glsl_get_length(gather_offsets
->type
) == 4);
1773 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
1775 /* Copy the current instruction 4x */
1776 for (uint32_t i
= 1; i
< 4; i
++) {
1777 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
1778 instrs
[i
]->op
= instr
->op
;
1779 instrs
[i
]->coord_components
= instr
->coord_components
;
1780 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
1781 instrs
[i
]->is_array
= instr
->is_array
;
1782 instrs
[i
]->is_shadow
= instr
->is_shadow
;
1783 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
1784 instrs
[i
]->component
= instr
->component
;
1785 instrs
[i
]->dest_type
= instr
->dest_type
;
1786 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
1787 instrs
[i
]->sampler
= NULL
;
1789 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1791 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
1792 nir_tex_instr_dest_size(instr
), 32, NULL
);
1795 /* Fill in the last argument with the offset from the passed in offsets
1796 * and insert the instruction into the stream.
1798 for (uint32_t i
= 0; i
< 4; i
++) {
1800 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
1801 src
.src_type
= nir_tex_src_offset
;
1802 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
1803 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
1806 /* Combine the results of the 4 instructions by taking their .w
1809 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
1810 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
1811 vec4
->dest
.write_mask
= 0xf;
1812 for (uint32_t i
= 0; i
< 4; i
++) {
1813 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
1814 vec4
->src
[i
].swizzle
[0] = 3;
1816 def
= &vec4
->dest
.dest
.ssa
;
1817 instruction
= &vec4
->instr
;
1819 def
= &instr
->dest
.ssa
;
1820 instruction
= &instr
->instr
;
1823 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
1824 val
->ssa
->def
= def
;
1826 nir_builder_instr_insert(&b
->nb
, instruction
);
1830 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
1831 const uint32_t *w
, nir_src
*src
)
1834 case SpvOpAtomicIIncrement
:
1835 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
1838 case SpvOpAtomicIDecrement
:
1839 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
1842 case SpvOpAtomicISub
:
1844 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
1847 case SpvOpAtomicCompareExchange
:
1848 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
1849 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
1852 case SpvOpAtomicExchange
:
1853 case SpvOpAtomicIAdd
:
1854 case SpvOpAtomicSMin
:
1855 case SpvOpAtomicUMin
:
1856 case SpvOpAtomicSMax
:
1857 case SpvOpAtomicUMax
:
1858 case SpvOpAtomicAnd
:
1860 case SpvOpAtomicXor
:
1861 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
1865 unreachable("Invalid SPIR-V atomic");
1869 static nir_ssa_def
*
1870 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
1872 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
1874 /* The image_load_store intrinsics assume a 4-dim coordinate */
1875 unsigned dim
= glsl_get_vector_elements(coord
->type
);
1876 unsigned swizzle
[4];
1877 for (unsigned i
= 0; i
< 4; i
++)
1878 swizzle
[i
] = MIN2(i
, dim
- 1);
1880 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
1884 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
1885 const uint32_t *w
, unsigned count
)
1887 /* Just get this one out of the way */
1888 if (opcode
== SpvOpImageTexelPointer
) {
1889 struct vtn_value
*val
=
1890 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
1891 val
->image
= ralloc(b
, struct vtn_image_pointer
);
1893 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1894 val
->image
->coord
= get_image_coord(b
, w
[4]);
1895 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
1899 struct vtn_image_pointer image
;
1902 case SpvOpAtomicExchange
:
1903 case SpvOpAtomicCompareExchange
:
1904 case SpvOpAtomicCompareExchangeWeak
:
1905 case SpvOpAtomicIIncrement
:
1906 case SpvOpAtomicIDecrement
:
1907 case SpvOpAtomicIAdd
:
1908 case SpvOpAtomicISub
:
1909 case SpvOpAtomicLoad
:
1910 case SpvOpAtomicSMin
:
1911 case SpvOpAtomicUMin
:
1912 case SpvOpAtomicSMax
:
1913 case SpvOpAtomicUMax
:
1914 case SpvOpAtomicAnd
:
1916 case SpvOpAtomicXor
:
1917 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
1920 case SpvOpAtomicStore
:
1921 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
1924 case SpvOpImageQuerySize
:
1925 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1927 image
.sample
= NULL
;
1930 case SpvOpImageRead
:
1931 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1932 image
.coord
= get_image_coord(b
, w
[4]);
1934 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
1935 assert(w
[5] == SpvImageOperandsSampleMask
);
1936 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
1938 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1942 case SpvOpImageWrite
:
1943 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
1944 image
.coord
= get_image_coord(b
, w
[2]);
1948 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
1949 assert(w
[4] == SpvImageOperandsSampleMask
);
1950 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
1952 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1957 unreachable("Invalid image opcode");
1960 nir_intrinsic_op op
;
1962 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
1963 OP(ImageQuerySize
, size
)
1965 OP(ImageWrite
, store
)
1966 OP(AtomicLoad
, load
)
1967 OP(AtomicStore
, store
)
1968 OP(AtomicExchange
, atomic_exchange
)
1969 OP(AtomicCompareExchange
, atomic_comp_swap
)
1970 OP(AtomicIIncrement
, atomic_add
)
1971 OP(AtomicIDecrement
, atomic_add
)
1972 OP(AtomicIAdd
, atomic_add
)
1973 OP(AtomicISub
, atomic_add
)
1974 OP(AtomicSMin
, atomic_min
)
1975 OP(AtomicUMin
, atomic_min
)
1976 OP(AtomicSMax
, atomic_max
)
1977 OP(AtomicUMax
, atomic_max
)
1978 OP(AtomicAnd
, atomic_and
)
1979 OP(AtomicOr
, atomic_or
)
1980 OP(AtomicXor
, atomic_xor
)
1983 unreachable("Invalid image opcode");
1986 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
1988 nir_deref_var
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
1989 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
1991 /* ImageQuerySize doesn't take any extra parameters */
1992 if (opcode
!= SpvOpImageQuerySize
) {
1993 /* The image coordinate is always 4 components but we may not have that
1994 * many. Swizzle to compensate.
1997 for (unsigned i
= 0; i
< 4; i
++)
1998 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
1999 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
2001 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
2005 case SpvOpAtomicLoad
:
2006 case SpvOpImageQuerySize
:
2007 case SpvOpImageRead
:
2009 case SpvOpAtomicStore
:
2010 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2012 case SpvOpImageWrite
:
2013 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
2016 case SpvOpAtomicCompareExchange
:
2017 case SpvOpAtomicIIncrement
:
2018 case SpvOpAtomicIDecrement
:
2019 case SpvOpAtomicExchange
:
2020 case SpvOpAtomicIAdd
:
2021 case SpvOpAtomicSMin
:
2022 case SpvOpAtomicUMin
:
2023 case SpvOpAtomicSMax
:
2024 case SpvOpAtomicUMax
:
2025 case SpvOpAtomicAnd
:
2027 case SpvOpAtomicXor
:
2028 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
2032 unreachable("Invalid image opcode");
2035 if (opcode
!= SpvOpImageWrite
) {
2036 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2037 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2039 unsigned dest_components
=
2040 nir_intrinsic_infos
[intrin
->intrinsic
].dest_components
;
2041 if (intrin
->intrinsic
== nir_intrinsic_image_size
) {
2042 dest_components
= intrin
->num_components
=
2043 glsl_get_vector_elements(type
->type
);
2046 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2047 dest_components
, 32, NULL
);
2049 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2051 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
2052 val
->ssa
->def
= &intrin
->dest
.ssa
;
2054 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2058 static nir_intrinsic_op
2059 get_ssbo_nir_atomic_op(SpvOp opcode
)
2062 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2063 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2064 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2065 OP(AtomicExchange
, atomic_exchange
)
2066 OP(AtomicCompareExchange
, atomic_comp_swap
)
2067 OP(AtomicIIncrement
, atomic_add
)
2068 OP(AtomicIDecrement
, atomic_add
)
2069 OP(AtomicIAdd
, atomic_add
)
2070 OP(AtomicISub
, atomic_add
)
2071 OP(AtomicSMin
, atomic_imin
)
2072 OP(AtomicUMin
, atomic_umin
)
2073 OP(AtomicSMax
, atomic_imax
)
2074 OP(AtomicUMax
, atomic_umax
)
2075 OP(AtomicAnd
, atomic_and
)
2076 OP(AtomicOr
, atomic_or
)
2077 OP(AtomicXor
, atomic_xor
)
2080 unreachable("Invalid SSBO atomic");
2084 static nir_intrinsic_op
2085 get_shared_nir_atomic_op(SpvOp opcode
)
2088 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
2089 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
2090 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2091 OP(AtomicExchange
, atomic_exchange
)
2092 OP(AtomicCompareExchange
, atomic_comp_swap
)
2093 OP(AtomicIIncrement
, atomic_add
)
2094 OP(AtomicIDecrement
, atomic_add
)
2095 OP(AtomicIAdd
, atomic_add
)
2096 OP(AtomicISub
, atomic_add
)
2097 OP(AtomicSMin
, atomic_imin
)
2098 OP(AtomicUMin
, atomic_umin
)
2099 OP(AtomicSMax
, atomic_imax
)
2100 OP(AtomicUMax
, atomic_umax
)
2101 OP(AtomicAnd
, atomic_and
)
2102 OP(AtomicOr
, atomic_or
)
2103 OP(AtomicXor
, atomic_xor
)
2106 unreachable("Invalid shared atomic");
2111 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
2112 const uint32_t *w
, unsigned count
)
2114 struct vtn_pointer
*ptr
;
2115 nir_intrinsic_instr
*atomic
;
2118 case SpvOpAtomicLoad
:
2119 case SpvOpAtomicExchange
:
2120 case SpvOpAtomicCompareExchange
:
2121 case SpvOpAtomicCompareExchangeWeak
:
2122 case SpvOpAtomicIIncrement
:
2123 case SpvOpAtomicIDecrement
:
2124 case SpvOpAtomicIAdd
:
2125 case SpvOpAtomicISub
:
2126 case SpvOpAtomicSMin
:
2127 case SpvOpAtomicUMin
:
2128 case SpvOpAtomicSMax
:
2129 case SpvOpAtomicUMax
:
2130 case SpvOpAtomicAnd
:
2132 case SpvOpAtomicXor
:
2133 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2136 case SpvOpAtomicStore
:
2137 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2141 unreachable("Invalid SPIR-V atomic");
2145 SpvScope scope = w[4];
2146 SpvMemorySemanticsMask semantics = w[5];
2149 if (ptr
->mode
== vtn_variable_mode_workgroup
) {
2150 nir_deref_var
*deref
= vtn_pointer_to_deref(b
, ptr
);
2151 const struct glsl_type
*deref_type
= nir_deref_tail(&deref
->deref
)->type
;
2152 nir_intrinsic_op op
= get_shared_nir_atomic_op(opcode
);
2153 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2154 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2157 case SpvOpAtomicLoad
:
2158 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2161 case SpvOpAtomicStore
:
2162 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2163 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2164 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2167 case SpvOpAtomicExchange
:
2168 case SpvOpAtomicCompareExchange
:
2169 case SpvOpAtomicCompareExchangeWeak
:
2170 case SpvOpAtomicIIncrement
:
2171 case SpvOpAtomicIDecrement
:
2172 case SpvOpAtomicIAdd
:
2173 case SpvOpAtomicISub
:
2174 case SpvOpAtomicSMin
:
2175 case SpvOpAtomicUMin
:
2176 case SpvOpAtomicSMax
:
2177 case SpvOpAtomicUMax
:
2178 case SpvOpAtomicAnd
:
2180 case SpvOpAtomicXor
:
2181 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2185 unreachable("Invalid SPIR-V atomic");
2189 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
2190 nir_ssa_def
*offset
, *index
;
2191 offset
= vtn_pointer_to_offset(b
, ptr
, &index
, NULL
);
2193 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(opcode
);
2195 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2198 case SpvOpAtomicLoad
:
2199 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2200 atomic
->src
[0] = nir_src_for_ssa(index
);
2201 atomic
->src
[1] = nir_src_for_ssa(offset
);
2204 case SpvOpAtomicStore
:
2205 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2206 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2207 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2208 atomic
->src
[1] = nir_src_for_ssa(index
);
2209 atomic
->src
[2] = nir_src_for_ssa(offset
);
2212 case SpvOpAtomicExchange
:
2213 case SpvOpAtomicCompareExchange
:
2214 case SpvOpAtomicCompareExchangeWeak
:
2215 case SpvOpAtomicIIncrement
:
2216 case SpvOpAtomicIDecrement
:
2217 case SpvOpAtomicIAdd
:
2218 case SpvOpAtomicISub
:
2219 case SpvOpAtomicSMin
:
2220 case SpvOpAtomicUMin
:
2221 case SpvOpAtomicSMax
:
2222 case SpvOpAtomicUMax
:
2223 case SpvOpAtomicAnd
:
2225 case SpvOpAtomicXor
:
2226 atomic
->src
[0] = nir_src_for_ssa(index
);
2227 atomic
->src
[1] = nir_src_for_ssa(offset
);
2228 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[2]);
2232 unreachable("Invalid SPIR-V atomic");
2236 if (opcode
!= SpvOpAtomicStore
) {
2237 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2239 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2240 glsl_get_vector_elements(type
->type
),
2241 glsl_get_bit_size(type
->type
), NULL
);
2243 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2244 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2245 val
->ssa
->def
= &atomic
->dest
.ssa
;
2246 val
->ssa
->type
= type
->type
;
2249 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2252 static nir_alu_instr
*
2253 create_vec(nir_shader
*shader
, unsigned num_components
, unsigned bit_size
)
2256 switch (num_components
) {
2257 case 1: op
= nir_op_fmov
; break;
2258 case 2: op
= nir_op_vec2
; break;
2259 case 3: op
= nir_op_vec3
; break;
2260 case 4: op
= nir_op_vec4
; break;
2261 default: unreachable("bad vector size");
2264 nir_alu_instr
*vec
= nir_alu_instr_create(shader
, op
);
2265 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2267 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2272 struct vtn_ssa_value
*
2273 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2275 if (src
->transposed
)
2276 return src
->transposed
;
2278 struct vtn_ssa_value
*dest
=
2279 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2281 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2282 nir_alu_instr
*vec
= create_vec(b
->shader
,
2283 glsl_get_matrix_columns(src
->type
),
2284 glsl_get_bit_size(src
->type
));
2285 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2286 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2287 vec
->src
[0].swizzle
[0] = i
;
2289 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2290 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2291 vec
->src
[j
].swizzle
[0] = i
;
2294 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2295 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2298 dest
->transposed
= src
;
2304 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2306 unsigned swiz
[4] = { index
};
2307 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2311 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2314 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
,
2317 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2319 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2321 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2322 vec
->src
[i
].swizzle
[0] = i
;
2326 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2328 return &vec
->dest
.dest
.ssa
;
2332 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2335 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2336 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2337 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2338 vtn_vector_extract(b
, src
, i
), dest
);
2344 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2345 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2347 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2348 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2349 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2350 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2355 static nir_ssa_def
*
2356 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2357 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2358 const uint32_t *indices
)
2360 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
, src0
->bit_size
);
2362 for (unsigned i
= 0; i
< num_components
; i
++) {
2363 uint32_t index
= indices
[i
];
2364 if (index
== 0xffffffff) {
2366 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2367 } else if (index
< src0
->num_components
) {
2368 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2369 vec
->src
[i
].swizzle
[0] = index
;
2371 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2372 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2376 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2378 return &vec
->dest
.dest
.ssa
;
2382 * Concatentates a number of vectors/scalars together to produce a vector
2384 static nir_ssa_def
*
2385 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2386 unsigned num_srcs
, nir_ssa_def
**srcs
)
2388 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
,
2391 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2393 * "When constructing a vector, there must be at least two Constituent
2396 assert(num_srcs
>= 2);
2398 unsigned dest_idx
= 0;
2399 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2400 nir_ssa_def
*src
= srcs
[i
];
2401 assert(dest_idx
+ src
->num_components
<= num_components
);
2402 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2403 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2404 vec
->src
[dest_idx
].swizzle
[0] = j
;
2409 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2411 * "When constructing a vector, the total number of components in all
2412 * the operands must equal the number of components in Result Type."
2414 assert(dest_idx
== num_components
);
2416 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2418 return &vec
->dest
.dest
.ssa
;
2421 static struct vtn_ssa_value
*
2422 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2424 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2425 dest
->type
= src
->type
;
2427 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2428 dest
->def
= src
->def
;
2430 unsigned elems
= glsl_get_length(src
->type
);
2432 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2433 for (unsigned i
= 0; i
< elems
; i
++)
2434 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2440 static struct vtn_ssa_value
*
2441 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2442 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2443 unsigned num_indices
)
2445 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2447 struct vtn_ssa_value
*cur
= dest
;
2449 for (i
= 0; i
< num_indices
- 1; i
++) {
2450 cur
= cur
->elems
[indices
[i
]];
2453 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2454 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2455 * the component granularity. In that case, the last index will be
2456 * the index to insert the scalar into the vector.
2459 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2461 cur
->elems
[indices
[i
]] = insert
;
2467 static struct vtn_ssa_value
*
2468 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2469 const uint32_t *indices
, unsigned num_indices
)
2471 struct vtn_ssa_value
*cur
= src
;
2472 for (unsigned i
= 0; i
< num_indices
; i
++) {
2473 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2474 assert(i
== num_indices
- 1);
2475 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2476 * the component granularity. The last index will be the index of the
2477 * vector to extract.
2480 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2481 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2482 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2485 cur
= cur
->elems
[indices
[i
]];
2493 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2494 const uint32_t *w
, unsigned count
)
2496 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2497 const struct glsl_type
*type
=
2498 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2499 val
->ssa
= vtn_create_ssa_value(b
, type
);
2502 case SpvOpVectorExtractDynamic
:
2503 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2504 vtn_ssa_value(b
, w
[4])->def
);
2507 case SpvOpVectorInsertDynamic
:
2508 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2509 vtn_ssa_value(b
, w
[4])->def
,
2510 vtn_ssa_value(b
, w
[5])->def
);
2513 case SpvOpVectorShuffle
:
2514 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2515 vtn_ssa_value(b
, w
[3])->def
,
2516 vtn_ssa_value(b
, w
[4])->def
,
2520 case SpvOpCompositeConstruct
: {
2521 unsigned elems
= count
- 3;
2522 if (glsl_type_is_vector_or_scalar(type
)) {
2523 nir_ssa_def
*srcs
[4];
2524 for (unsigned i
= 0; i
< elems
; i
++)
2525 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2527 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2530 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2531 for (unsigned i
= 0; i
< elems
; i
++)
2532 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2536 case SpvOpCompositeExtract
:
2537 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2541 case SpvOpCompositeInsert
:
2542 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2543 vtn_ssa_value(b
, w
[3]),
2547 case SpvOpCopyObject
:
2548 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2552 unreachable("unknown composite operation");
2557 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2558 const uint32_t *w
, unsigned count
)
2560 nir_intrinsic_op intrinsic_op
;
2562 case SpvOpEmitVertex
:
2563 case SpvOpEmitStreamVertex
:
2564 intrinsic_op
= nir_intrinsic_emit_vertex
;
2566 case SpvOpEndPrimitive
:
2567 case SpvOpEndStreamPrimitive
:
2568 intrinsic_op
= nir_intrinsic_end_primitive
;
2570 case SpvOpMemoryBarrier
:
2571 intrinsic_op
= nir_intrinsic_memory_barrier
;
2573 case SpvOpControlBarrier
:
2574 intrinsic_op
= nir_intrinsic_barrier
;
2577 unreachable("unknown barrier instruction");
2580 nir_intrinsic_instr
*intrin
=
2581 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2583 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2584 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2586 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2590 gl_primitive_from_spv_execution_mode(SpvExecutionMode mode
)
2593 case SpvExecutionModeInputPoints
:
2594 case SpvExecutionModeOutputPoints
:
2595 return 0; /* GL_POINTS */
2596 case SpvExecutionModeInputLines
:
2597 return 1; /* GL_LINES */
2598 case SpvExecutionModeInputLinesAdjacency
:
2599 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2600 case SpvExecutionModeTriangles
:
2601 return 4; /* GL_TRIANGLES */
2602 case SpvExecutionModeInputTrianglesAdjacency
:
2603 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2604 case SpvExecutionModeQuads
:
2605 return 7; /* GL_QUADS */
2606 case SpvExecutionModeIsolines
:
2607 return 0x8E7A; /* GL_ISOLINES */
2608 case SpvExecutionModeOutputLineStrip
:
2609 return 3; /* GL_LINE_STRIP */
2610 case SpvExecutionModeOutputTriangleStrip
:
2611 return 5; /* GL_TRIANGLE_STRIP */
2613 assert(!"Invalid primitive type");
2619 vertices_in_from_spv_execution_mode(SpvExecutionMode mode
)
2622 case SpvExecutionModeInputPoints
:
2624 case SpvExecutionModeInputLines
:
2626 case SpvExecutionModeInputLinesAdjacency
:
2628 case SpvExecutionModeTriangles
:
2630 case SpvExecutionModeInputTrianglesAdjacency
:
2633 assert(!"Invalid GS input mode");
2638 static gl_shader_stage
2639 stage_for_execution_model(SpvExecutionModel model
)
2642 case SpvExecutionModelVertex
:
2643 return MESA_SHADER_VERTEX
;
2644 case SpvExecutionModelTessellationControl
:
2645 return MESA_SHADER_TESS_CTRL
;
2646 case SpvExecutionModelTessellationEvaluation
:
2647 return MESA_SHADER_TESS_EVAL
;
2648 case SpvExecutionModelGeometry
:
2649 return MESA_SHADER_GEOMETRY
;
2650 case SpvExecutionModelFragment
:
2651 return MESA_SHADER_FRAGMENT
;
2652 case SpvExecutionModelGLCompute
:
2653 return MESA_SHADER_COMPUTE
;
2655 unreachable("Unsupported execution model");
2659 #define spv_check_supported(name, cap) do { \
2660 if (!(b->ext && b->ext->name)) \
2661 vtn_warn("Unsupported SPIR-V capability: %s", \
2662 spirv_capability_to_string(cap)); \
2666 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2667 const uint32_t *w
, unsigned count
)
2671 case SpvOpSourceExtension
:
2672 case SpvOpSourceContinued
:
2673 case SpvOpExtension
:
2674 /* Unhandled, but these are for debug so that's ok. */
2677 case SpvOpCapability
: {
2678 SpvCapability cap
= w
[1];
2680 case SpvCapabilityMatrix
:
2681 case SpvCapabilityShader
:
2682 case SpvCapabilityGeometry
:
2683 case SpvCapabilityGeometryPointSize
:
2684 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
2685 case SpvCapabilitySampledImageArrayDynamicIndexing
:
2686 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
2687 case SpvCapabilityStorageImageArrayDynamicIndexing
:
2688 case SpvCapabilityImageRect
:
2689 case SpvCapabilitySampledRect
:
2690 case SpvCapabilitySampled1D
:
2691 case SpvCapabilityImage1D
:
2692 case SpvCapabilitySampledCubeArray
:
2693 case SpvCapabilitySampledBuffer
:
2694 case SpvCapabilityImageBuffer
:
2695 case SpvCapabilityImageQuery
:
2696 case SpvCapabilityDerivativeControl
:
2697 case SpvCapabilityInterpolationFunction
:
2698 case SpvCapabilityMultiViewport
:
2699 case SpvCapabilitySampleRateShading
:
2700 case SpvCapabilityClipDistance
:
2701 case SpvCapabilityCullDistance
:
2702 case SpvCapabilityInputAttachment
:
2703 case SpvCapabilityImageGatherExtended
:
2704 case SpvCapabilityStorageImageExtendedFormats
:
2707 case SpvCapabilityGeometryStreams
:
2708 case SpvCapabilityLinkage
:
2709 case SpvCapabilityVector16
:
2710 case SpvCapabilityFloat16Buffer
:
2711 case SpvCapabilityFloat16
:
2712 case SpvCapabilityInt64Atomics
:
2713 case SpvCapabilityAtomicStorage
:
2714 case SpvCapabilityInt16
:
2715 case SpvCapabilityStorageImageMultisample
:
2716 case SpvCapabilityImageCubeArray
:
2717 case SpvCapabilityInt8
:
2718 case SpvCapabilitySparseResidency
:
2719 case SpvCapabilityMinLod
:
2720 case SpvCapabilityTransformFeedback
:
2721 vtn_warn("Unsupported SPIR-V capability: %s",
2722 spirv_capability_to_string(cap
));
2725 case SpvCapabilityFloat64
:
2726 spv_check_supported(float64
, cap
);
2728 case SpvCapabilityInt64
:
2729 spv_check_supported(int64
, cap
);
2732 case SpvCapabilityAddresses
:
2733 case SpvCapabilityKernel
:
2734 case SpvCapabilityImageBasic
:
2735 case SpvCapabilityImageReadWrite
:
2736 case SpvCapabilityImageMipmap
:
2737 case SpvCapabilityPipes
:
2738 case SpvCapabilityGroups
:
2739 case SpvCapabilityDeviceEnqueue
:
2740 case SpvCapabilityLiteralSampler
:
2741 case SpvCapabilityGenericPointer
:
2742 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
2743 spirv_capability_to_string(cap
));
2746 case SpvCapabilityImageMSArray
:
2747 spv_check_supported(image_ms_array
, cap
);
2750 case SpvCapabilityTessellation
:
2751 case SpvCapabilityTessellationPointSize
:
2752 spv_check_supported(tessellation
, cap
);
2755 case SpvCapabilityDrawParameters
:
2756 spv_check_supported(draw_parameters
, cap
);
2759 case SpvCapabilityStorageImageReadWithoutFormat
:
2760 spv_check_supported(image_read_without_format
, cap
);
2763 case SpvCapabilityStorageImageWriteWithoutFormat
:
2764 spv_check_supported(image_write_without_format
, cap
);
2767 case SpvCapabilityMultiView
:
2768 spv_check_supported(multiview
, cap
);
2772 unreachable("Unhandled capability");
2777 case SpvOpExtInstImport
:
2778 vtn_handle_extension(b
, opcode
, w
, count
);
2781 case SpvOpMemoryModel
:
2782 assert(w
[1] == SpvAddressingModelLogical
);
2783 assert(w
[2] == SpvMemoryModelGLSL450
);
2786 case SpvOpEntryPoint
: {
2787 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
2788 /* Let this be a name label regardless */
2789 unsigned name_words
;
2790 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
2792 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
2793 stage_for_execution_model(w
[1]) != b
->entry_point_stage
)
2796 assert(b
->entry_point
== NULL
);
2797 b
->entry_point
= entry_point
;
2802 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2803 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2807 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2810 case SpvOpMemberName
:
2814 case SpvOpExecutionMode
:
2815 case SpvOpDecorationGroup
:
2817 case SpvOpMemberDecorate
:
2818 case SpvOpGroupDecorate
:
2819 case SpvOpGroupMemberDecorate
:
2820 vtn_handle_decoration(b
, opcode
, w
, count
);
2824 return false; /* End of preamble */
2831 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
2832 const struct vtn_decoration
*mode
, void *data
)
2834 assert(b
->entry_point
== entry_point
);
2836 switch(mode
->exec_mode
) {
2837 case SpvExecutionModeOriginUpperLeft
:
2838 case SpvExecutionModeOriginLowerLeft
:
2839 b
->origin_upper_left
=
2840 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
2843 case SpvExecutionModeEarlyFragmentTests
:
2844 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2845 b
->shader
->info
.fs
.early_fragment_tests
= true;
2848 case SpvExecutionModeInvocations
:
2849 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2850 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->literals
[0]);
2853 case SpvExecutionModeDepthReplacing
:
2854 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2855 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
2857 case SpvExecutionModeDepthGreater
:
2858 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2859 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
2861 case SpvExecutionModeDepthLess
:
2862 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2863 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
2865 case SpvExecutionModeDepthUnchanged
:
2866 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2867 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2870 case SpvExecutionModeLocalSize
:
2871 assert(b
->shader
->stage
== MESA_SHADER_COMPUTE
);
2872 b
->shader
->info
.cs
.local_size
[0] = mode
->literals
[0];
2873 b
->shader
->info
.cs
.local_size
[1] = mode
->literals
[1];
2874 b
->shader
->info
.cs
.local_size
[2] = mode
->literals
[2];
2876 case SpvExecutionModeLocalSizeHint
:
2877 break; /* Nothing to do with this */
2879 case SpvExecutionModeOutputVertices
:
2880 if (b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2881 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
) {
2882 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->literals
[0];
2884 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2885 b
->shader
->info
.gs
.vertices_out
= mode
->literals
[0];
2889 case SpvExecutionModeInputPoints
:
2890 case SpvExecutionModeInputLines
:
2891 case SpvExecutionModeInputLinesAdjacency
:
2892 case SpvExecutionModeTriangles
:
2893 case SpvExecutionModeInputTrianglesAdjacency
:
2894 case SpvExecutionModeQuads
:
2895 case SpvExecutionModeIsolines
:
2896 if (b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2897 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
) {
2898 b
->shader
->info
.tess
.primitive_mode
=
2899 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2901 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2902 b
->shader
->info
.gs
.vertices_in
=
2903 vertices_in_from_spv_execution_mode(mode
->exec_mode
);
2907 case SpvExecutionModeOutputPoints
:
2908 case SpvExecutionModeOutputLineStrip
:
2909 case SpvExecutionModeOutputTriangleStrip
:
2910 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2911 b
->shader
->info
.gs
.output_primitive
=
2912 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2915 case SpvExecutionModeSpacingEqual
:
2916 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2917 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2918 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
2920 case SpvExecutionModeSpacingFractionalEven
:
2921 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2922 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2923 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
2925 case SpvExecutionModeSpacingFractionalOdd
:
2926 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2927 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2928 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
2930 case SpvExecutionModeVertexOrderCw
:
2931 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2932 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2933 /* Vulkan's notion of CCW seems to match the hardware backends,
2934 * but be the opposite of OpenGL. Currently NIR follows GL semantics,
2935 * so we set it backwards here.
2937 b
->shader
->info
.tess
.ccw
= true;
2939 case SpvExecutionModeVertexOrderCcw
:
2940 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2941 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2942 /* Backwards; see above */
2943 b
->shader
->info
.tess
.ccw
= false;
2945 case SpvExecutionModePointMode
:
2946 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2947 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2948 b
->shader
->info
.tess
.point_mode
= true;
2951 case SpvExecutionModePixelCenterInteger
:
2952 b
->pixel_center_integer
= true;
2955 case SpvExecutionModeXfb
:
2956 assert(!"Unhandled execution mode");
2959 case SpvExecutionModeVecTypeHint
:
2960 case SpvExecutionModeContractionOff
:
2964 unreachable("Unhandled execution mode");
2969 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2970 const uint32_t *w
, unsigned count
)
2974 case SpvOpSourceContinued
:
2975 case SpvOpSourceExtension
:
2976 case SpvOpExtension
:
2977 case SpvOpCapability
:
2978 case SpvOpExtInstImport
:
2979 case SpvOpMemoryModel
:
2980 case SpvOpEntryPoint
:
2981 case SpvOpExecutionMode
:
2984 case SpvOpMemberName
:
2985 case SpvOpDecorationGroup
:
2987 case SpvOpMemberDecorate
:
2988 case SpvOpGroupDecorate
:
2989 case SpvOpGroupMemberDecorate
:
2990 assert(!"Invalid opcode types and variables section");
2996 case SpvOpTypeFloat
:
2997 case SpvOpTypeVector
:
2998 case SpvOpTypeMatrix
:
2999 case SpvOpTypeImage
:
3000 case SpvOpTypeSampler
:
3001 case SpvOpTypeSampledImage
:
3002 case SpvOpTypeArray
:
3003 case SpvOpTypeRuntimeArray
:
3004 case SpvOpTypeStruct
:
3005 case SpvOpTypeOpaque
:
3006 case SpvOpTypePointer
:
3007 case SpvOpTypeFunction
:
3008 case SpvOpTypeEvent
:
3009 case SpvOpTypeDeviceEvent
:
3010 case SpvOpTypeReserveId
:
3011 case SpvOpTypeQueue
:
3013 vtn_handle_type(b
, opcode
, w
, count
);
3016 case SpvOpConstantTrue
:
3017 case SpvOpConstantFalse
:
3019 case SpvOpConstantComposite
:
3020 case SpvOpConstantSampler
:
3021 case SpvOpConstantNull
:
3022 case SpvOpSpecConstantTrue
:
3023 case SpvOpSpecConstantFalse
:
3024 case SpvOpSpecConstant
:
3025 case SpvOpSpecConstantComposite
:
3026 case SpvOpSpecConstantOp
:
3027 vtn_handle_constant(b
, opcode
, w
, count
);
3032 vtn_handle_variables(b
, opcode
, w
, count
);
3036 return false; /* End of preamble */
3043 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3044 const uint32_t *w
, unsigned count
)
3050 case SpvOpLoopMerge
:
3051 case SpvOpSelectionMerge
:
3052 /* This is handled by cfg pre-pass and walk_blocks */
3056 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
3057 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3062 vtn_handle_extension(b
, opcode
, w
, count
);
3068 case SpvOpCopyMemory
:
3069 case SpvOpCopyMemorySized
:
3070 case SpvOpAccessChain
:
3071 case SpvOpPtrAccessChain
:
3072 case SpvOpInBoundsAccessChain
:
3073 case SpvOpArrayLength
:
3074 vtn_handle_variables(b
, opcode
, w
, count
);
3077 case SpvOpFunctionCall
:
3078 vtn_handle_function_call(b
, opcode
, w
, count
);
3081 case SpvOpSampledImage
:
3083 case SpvOpImageSampleImplicitLod
:
3084 case SpvOpImageSampleExplicitLod
:
3085 case SpvOpImageSampleDrefImplicitLod
:
3086 case SpvOpImageSampleDrefExplicitLod
:
3087 case SpvOpImageSampleProjImplicitLod
:
3088 case SpvOpImageSampleProjExplicitLod
:
3089 case SpvOpImageSampleProjDrefImplicitLod
:
3090 case SpvOpImageSampleProjDrefExplicitLod
:
3091 case SpvOpImageFetch
:
3092 case SpvOpImageGather
:
3093 case SpvOpImageDrefGather
:
3094 case SpvOpImageQuerySizeLod
:
3095 case SpvOpImageQueryLod
:
3096 case SpvOpImageQueryLevels
:
3097 case SpvOpImageQuerySamples
:
3098 vtn_handle_texture(b
, opcode
, w
, count
);
3101 case SpvOpImageRead
:
3102 case SpvOpImageWrite
:
3103 case SpvOpImageTexelPointer
:
3104 vtn_handle_image(b
, opcode
, w
, count
);
3107 case SpvOpImageQuerySize
: {
3108 struct vtn_pointer
*image
=
3109 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3110 if (image
->mode
== vtn_variable_mode_image
) {
3111 vtn_handle_image(b
, opcode
, w
, count
);
3113 assert(image
->mode
== vtn_variable_mode_sampler
);
3114 vtn_handle_texture(b
, opcode
, w
, count
);
3119 case SpvOpAtomicLoad
:
3120 case SpvOpAtomicExchange
:
3121 case SpvOpAtomicCompareExchange
:
3122 case SpvOpAtomicCompareExchangeWeak
:
3123 case SpvOpAtomicIIncrement
:
3124 case SpvOpAtomicIDecrement
:
3125 case SpvOpAtomicIAdd
:
3126 case SpvOpAtomicISub
:
3127 case SpvOpAtomicSMin
:
3128 case SpvOpAtomicUMin
:
3129 case SpvOpAtomicSMax
:
3130 case SpvOpAtomicUMax
:
3131 case SpvOpAtomicAnd
:
3133 case SpvOpAtomicXor
: {
3134 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
3135 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3136 vtn_handle_image(b
, opcode
, w
, count
);
3138 assert(pointer
->value_type
== vtn_value_type_pointer
);
3139 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3144 case SpvOpAtomicStore
: {
3145 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
3146 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3147 vtn_handle_image(b
, opcode
, w
, count
);
3149 assert(pointer
->value_type
== vtn_value_type_pointer
);
3150 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3160 case SpvOpConvertFToU
:
3161 case SpvOpConvertFToS
:
3162 case SpvOpConvertSToF
:
3163 case SpvOpConvertUToF
:
3167 case SpvOpQuantizeToF16
:
3168 case SpvOpConvertPtrToU
:
3169 case SpvOpConvertUToPtr
:
3170 case SpvOpPtrCastToGeneric
:
3171 case SpvOpGenericCastToPtr
:
3177 case SpvOpSignBitSet
:
3178 case SpvOpLessOrGreater
:
3180 case SpvOpUnordered
:
3195 case SpvOpVectorTimesScalar
:
3197 case SpvOpIAddCarry
:
3198 case SpvOpISubBorrow
:
3199 case SpvOpUMulExtended
:
3200 case SpvOpSMulExtended
:
3201 case SpvOpShiftRightLogical
:
3202 case SpvOpShiftRightArithmetic
:
3203 case SpvOpShiftLeftLogical
:
3204 case SpvOpLogicalEqual
:
3205 case SpvOpLogicalNotEqual
:
3206 case SpvOpLogicalOr
:
3207 case SpvOpLogicalAnd
:
3208 case SpvOpLogicalNot
:
3209 case SpvOpBitwiseOr
:
3210 case SpvOpBitwiseXor
:
3211 case SpvOpBitwiseAnd
:
3214 case SpvOpFOrdEqual
:
3215 case SpvOpFUnordEqual
:
3216 case SpvOpINotEqual
:
3217 case SpvOpFOrdNotEqual
:
3218 case SpvOpFUnordNotEqual
:
3219 case SpvOpULessThan
:
3220 case SpvOpSLessThan
:
3221 case SpvOpFOrdLessThan
:
3222 case SpvOpFUnordLessThan
:
3223 case SpvOpUGreaterThan
:
3224 case SpvOpSGreaterThan
:
3225 case SpvOpFOrdGreaterThan
:
3226 case SpvOpFUnordGreaterThan
:
3227 case SpvOpULessThanEqual
:
3228 case SpvOpSLessThanEqual
:
3229 case SpvOpFOrdLessThanEqual
:
3230 case SpvOpFUnordLessThanEqual
:
3231 case SpvOpUGreaterThanEqual
:
3232 case SpvOpSGreaterThanEqual
:
3233 case SpvOpFOrdGreaterThanEqual
:
3234 case SpvOpFUnordGreaterThanEqual
:
3240 case SpvOpFwidthFine
:
3241 case SpvOpDPdxCoarse
:
3242 case SpvOpDPdyCoarse
:
3243 case SpvOpFwidthCoarse
:
3244 case SpvOpBitFieldInsert
:
3245 case SpvOpBitFieldSExtract
:
3246 case SpvOpBitFieldUExtract
:
3247 case SpvOpBitReverse
:
3249 case SpvOpTranspose
:
3250 case SpvOpOuterProduct
:
3251 case SpvOpMatrixTimesScalar
:
3252 case SpvOpVectorTimesMatrix
:
3253 case SpvOpMatrixTimesVector
:
3254 case SpvOpMatrixTimesMatrix
:
3255 vtn_handle_alu(b
, opcode
, w
, count
);
3258 case SpvOpVectorExtractDynamic
:
3259 case SpvOpVectorInsertDynamic
:
3260 case SpvOpVectorShuffle
:
3261 case SpvOpCompositeConstruct
:
3262 case SpvOpCompositeExtract
:
3263 case SpvOpCompositeInsert
:
3264 case SpvOpCopyObject
:
3265 vtn_handle_composite(b
, opcode
, w
, count
);
3268 case SpvOpEmitVertex
:
3269 case SpvOpEndPrimitive
:
3270 case SpvOpEmitStreamVertex
:
3271 case SpvOpEndStreamPrimitive
:
3272 case SpvOpControlBarrier
:
3273 case SpvOpMemoryBarrier
:
3274 vtn_handle_barrier(b
, opcode
, w
, count
);
3278 unreachable("Unhandled opcode");
3285 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3286 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3287 gl_shader_stage stage
, const char *entry_point_name
,
3288 const struct nir_spirv_supported_extensions
*ext
,
3289 const nir_shader_compiler_options
*options
)
3291 const uint32_t *word_end
= words
+ word_count
;
3293 /* Handle the SPIR-V header (first 4 dwords) */
3294 assert(word_count
> 5);
3296 assert(words
[0] == SpvMagicNumber
);
3297 assert(words
[1] >= 0x10000);
3298 /* words[2] == generator magic */
3299 unsigned value_id_bound
= words
[3];
3300 assert(words
[4] == 0);
3304 /* Initialize the stn_builder object */
3305 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3306 b
->value_id_bound
= value_id_bound
;
3307 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3308 exec_list_make_empty(&b
->functions
);
3309 b
->entry_point_stage
= stage
;
3310 b
->entry_point_name
= entry_point_name
;
3313 /* Handle all the preamble instructions */
3314 words
= vtn_foreach_instruction(b
, words
, word_end
,
3315 vtn_handle_preamble_instruction
);
3317 if (b
->entry_point
== NULL
) {
3318 assert(!"Entry point not found");
3323 b
->shader
= nir_shader_create(NULL
, stage
, options
, NULL
);
3325 /* Set shader info defaults */
3326 b
->shader
->info
.gs
.invocations
= 1;
3328 /* Parse execution modes */
3329 vtn_foreach_execution_mode(b
, b
->entry_point
,
3330 vtn_handle_execution_mode
, NULL
);
3332 b
->specializations
= spec
;
3333 b
->num_specializations
= num_spec
;
3335 /* Handle all variable, type, and constant instructions */
3336 words
= vtn_foreach_instruction(b
, words
, word_end
,
3337 vtn_handle_variable_or_type_instruction
);
3339 vtn_build_cfg(b
, words
, word_end
);
3341 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3342 b
->impl
= func
->impl
;
3343 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3344 _mesa_key_pointer_equal
);
3346 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3349 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3350 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3351 assert(entry_point
);