2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
34 struct spec_constant_value
{
43 _vtn_warn(const char *file
, int line
, const char *msg
, ...)
49 formatted
= ralloc_vasprintf(NULL
, msg
, args
);
52 fprintf(stderr
, "%s:%d WARNING: %s\n", file
, line
, formatted
);
54 ralloc_free(formatted
);
57 static struct vtn_ssa_value
*
58 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
60 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
63 if (glsl_type_is_vector_or_scalar(type
)) {
64 unsigned num_components
= glsl_get_vector_elements(val
->type
);
65 unsigned bit_size
= glsl_get_bit_size(val
->type
);
66 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
68 unsigned elems
= glsl_get_length(val
->type
);
69 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
70 if (glsl_type_is_matrix(type
)) {
71 const struct glsl_type
*elem_type
=
72 glsl_vector_type(glsl_get_base_type(type
),
73 glsl_get_vector_elements(type
));
75 for (unsigned i
= 0; i
< elems
; i
++)
76 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
77 } else if (glsl_type_is_array(type
)) {
78 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
79 for (unsigned i
= 0; i
< elems
; i
++)
80 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
82 for (unsigned i
= 0; i
< elems
; i
++) {
83 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
84 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
92 static struct vtn_ssa_value
*
93 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
94 const struct glsl_type
*type
)
96 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
101 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
104 switch (glsl_get_base_type(type
)) {
107 case GLSL_TYPE_INT64
:
108 case GLSL_TYPE_UINT64
:
110 case GLSL_TYPE_FLOAT
:
111 case GLSL_TYPE_DOUBLE
: {
112 int bit_size
= glsl_get_bit_size(type
);
113 if (glsl_type_is_vector_or_scalar(type
)) {
114 unsigned num_components
= glsl_get_vector_elements(val
->type
);
115 nir_load_const_instr
*load
=
116 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
118 load
->value
= constant
->values
[0];
120 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
121 val
->def
= &load
->def
;
123 assert(glsl_type_is_matrix(type
));
124 unsigned rows
= glsl_get_vector_elements(val
->type
);
125 unsigned columns
= glsl_get_matrix_columns(val
->type
);
126 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
128 for (unsigned i
= 0; i
< columns
; i
++) {
129 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
130 col_val
->type
= glsl_get_column_type(val
->type
);
131 nir_load_const_instr
*load
=
132 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
134 load
->value
= constant
->values
[i
];
136 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
137 col_val
->def
= &load
->def
;
139 val
->elems
[i
] = col_val
;
145 case GLSL_TYPE_ARRAY
: {
146 unsigned elems
= glsl_get_length(val
->type
);
147 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
148 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
149 for (unsigned i
= 0; i
< elems
; i
++)
150 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
155 case GLSL_TYPE_STRUCT
: {
156 unsigned elems
= glsl_get_length(val
->type
);
157 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
158 for (unsigned i
= 0; i
< elems
; i
++) {
159 const struct glsl_type
*elem_type
=
160 glsl_get_struct_field(val
->type
, i
);
161 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
168 unreachable("bad constant type");
174 struct vtn_ssa_value
*
175 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
177 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
178 switch (val
->value_type
) {
179 case vtn_value_type_undef
:
180 return vtn_undef_ssa_value(b
, val
->type
->type
);
182 case vtn_value_type_constant
:
183 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
185 case vtn_value_type_ssa
:
188 case vtn_value_type_pointer
:
189 assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
190 struct vtn_ssa_value
*ssa
=
191 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
192 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
196 unreachable("Invalid type for an SSA value");
201 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
202 unsigned word_count
, unsigned *words_used
)
204 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
206 /* Ammount of space taken by the string (including the null) */
207 unsigned len
= strlen(dup
) + 1;
208 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
214 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
215 const uint32_t *end
, vtn_instruction_handler handler
)
221 const uint32_t *w
= start
;
223 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
224 unsigned count
= w
[0] >> SpvWordCountShift
;
225 assert(count
>= 1 && w
+ count
<= end
);
229 break; /* Do nothing */
232 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
244 if (!handler(b
, opcode
, w
, count
))
256 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
257 const uint32_t *w
, unsigned count
)
260 case SpvOpExtInstImport
: {
261 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
262 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
263 val
->ext_handler
= vtn_handle_glsl450_instruction
;
265 unreachable("Unsupported extension");
271 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
272 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
279 unreachable("Unhandled opcode");
284 _foreach_decoration_helper(struct vtn_builder
*b
,
285 struct vtn_value
*base_value
,
287 struct vtn_value
*value
,
288 vtn_decoration_foreach_cb cb
, void *data
)
290 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
292 if (dec
->scope
== VTN_DEC_DECORATION
) {
293 member
= parent_member
;
294 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
295 assert(parent_member
== -1);
296 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
298 /* Not a decoration */
303 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
304 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
307 cb(b
, base_value
, member
, dec
, data
);
312 /** Iterates (recursively if needed) over all of the decorations on a value
314 * This function iterates over all of the decorations applied to a given
315 * value. If it encounters a decoration group, it recurses into the group
316 * and iterates over all of those decorations as well.
319 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
320 vtn_decoration_foreach_cb cb
, void *data
)
322 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
326 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
327 vtn_execution_mode_foreach_cb cb
, void *data
)
329 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
330 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
333 assert(dec
->group
== NULL
);
334 cb(b
, value
, dec
, data
);
339 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
340 const uint32_t *w
, unsigned count
)
342 const uint32_t *w_end
= w
+ count
;
343 const uint32_t target
= w
[1];
347 case SpvOpDecorationGroup
:
348 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
352 case SpvOpMemberDecorate
:
353 case SpvOpExecutionMode
: {
354 struct vtn_value
*val
= &b
->values
[target
];
356 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
359 dec
->scope
= VTN_DEC_DECORATION
;
361 case SpvOpMemberDecorate
:
362 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
364 case SpvOpExecutionMode
:
365 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
368 unreachable("Invalid decoration opcode");
370 dec
->decoration
= *(w
++);
373 /* Link into the list */
374 dec
->next
= val
->decoration
;
375 val
->decoration
= dec
;
379 case SpvOpGroupMemberDecorate
:
380 case SpvOpGroupDecorate
: {
381 struct vtn_value
*group
=
382 vtn_value(b
, target
, vtn_value_type_decoration_group
);
384 for (; w
< w_end
; w
++) {
385 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
386 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
389 if (opcode
== SpvOpGroupDecorate
) {
390 dec
->scope
= VTN_DEC_DECORATION
;
392 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
395 /* Link into the list */
396 dec
->next
= val
->decoration
;
397 val
->decoration
= dec
;
403 unreachable("Unhandled opcode");
407 struct member_decoration_ctx
{
409 struct glsl_struct_field
*fields
;
410 struct vtn_type
*type
;
413 /* does a shallow copy of a vtn_type */
415 static struct vtn_type
*
416 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
418 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
421 switch (src
->base_type
) {
422 case vtn_base_type_void
:
423 case vtn_base_type_scalar
:
424 case vtn_base_type_vector
:
425 case vtn_base_type_matrix
:
426 case vtn_base_type_array
:
427 case vtn_base_type_pointer
:
428 case vtn_base_type_image
:
429 case vtn_base_type_sampler
:
430 /* Nothing more to do */
433 case vtn_base_type_struct
:
434 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
435 memcpy(dest
->members
, src
->members
,
436 src
->length
* sizeof(src
->members
[0]));
438 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
439 memcpy(dest
->offsets
, src
->offsets
,
440 src
->length
* sizeof(src
->offsets
[0]));
443 case vtn_base_type_function
:
444 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
445 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
452 static struct vtn_type
*
453 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
455 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
456 type
= type
->members
[member
];
458 /* We may have an array of matrices.... Oh, joy! */
459 while (glsl_type_is_array(type
->type
)) {
460 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
461 type
= type
->array_element
;
464 assert(glsl_type_is_matrix(type
->type
));
470 struct_member_decoration_cb(struct vtn_builder
*b
,
471 struct vtn_value
*val
, int member
,
472 const struct vtn_decoration
*dec
, void *void_ctx
)
474 struct member_decoration_ctx
*ctx
= void_ctx
;
479 assert(member
< ctx
->num_fields
);
481 switch (dec
->decoration
) {
482 case SpvDecorationNonWritable
:
483 case SpvDecorationNonReadable
:
484 case SpvDecorationRelaxedPrecision
:
485 case SpvDecorationVolatile
:
486 case SpvDecorationCoherent
:
487 case SpvDecorationUniform
:
488 break; /* FIXME: Do nothing with this for now. */
489 case SpvDecorationNoPerspective
:
490 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
492 case SpvDecorationFlat
:
493 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
495 case SpvDecorationCentroid
:
496 ctx
->fields
[member
].centroid
= true;
498 case SpvDecorationSample
:
499 ctx
->fields
[member
].sample
= true;
501 case SpvDecorationStream
:
502 /* Vulkan only allows one GS stream */
503 assert(dec
->literals
[0] == 0);
505 case SpvDecorationLocation
:
506 ctx
->fields
[member
].location
= dec
->literals
[0];
508 case SpvDecorationComponent
:
509 break; /* FIXME: What should we do with these? */
510 case SpvDecorationBuiltIn
:
511 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
512 ctx
->type
->members
[member
]->is_builtin
= true;
513 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
514 ctx
->type
->builtin_block
= true;
516 case SpvDecorationOffset
:
517 ctx
->type
->offsets
[member
] = dec
->literals
[0];
519 case SpvDecorationMatrixStride
:
520 /* Handled as a second pass */
522 case SpvDecorationColMajor
:
523 break; /* Nothing to do here. Column-major is the default. */
524 case SpvDecorationRowMajor
:
525 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
528 case SpvDecorationPatch
:
531 case SpvDecorationSpecId
:
532 case SpvDecorationBlock
:
533 case SpvDecorationBufferBlock
:
534 case SpvDecorationArrayStride
:
535 case SpvDecorationGLSLShared
:
536 case SpvDecorationGLSLPacked
:
537 case SpvDecorationInvariant
:
538 case SpvDecorationRestrict
:
539 case SpvDecorationAliased
:
540 case SpvDecorationConstant
:
541 case SpvDecorationIndex
:
542 case SpvDecorationBinding
:
543 case SpvDecorationDescriptorSet
:
544 case SpvDecorationLinkageAttributes
:
545 case SpvDecorationNoContraction
:
546 case SpvDecorationInputAttachmentIndex
:
547 vtn_warn("Decoration not allowed on struct members: %s",
548 spirv_decoration_to_string(dec
->decoration
));
551 case SpvDecorationXfbBuffer
:
552 case SpvDecorationXfbStride
:
553 vtn_warn("Vulkan does not have transform feedback");
556 case SpvDecorationCPacked
:
557 case SpvDecorationSaturatedConversion
:
558 case SpvDecorationFuncParamAttr
:
559 case SpvDecorationFPRoundingMode
:
560 case SpvDecorationFPFastMathMode
:
561 case SpvDecorationAlignment
:
562 vtn_warn("Decoration only allowed for CL-style kernels: %s",
563 spirv_decoration_to_string(dec
->decoration
));
567 unreachable("Unhandled decoration");
571 /* Matrix strides are handled as a separate pass because we need to know
572 * whether the matrix is row-major or not first.
575 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
576 struct vtn_value
*val
, int member
,
577 const struct vtn_decoration
*dec
,
580 if (dec
->decoration
!= SpvDecorationMatrixStride
)
584 struct member_decoration_ctx
*ctx
= void_ctx
;
586 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
587 if (mat_type
->row_major
) {
588 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
589 mat_type
->stride
= mat_type
->array_element
->stride
;
590 mat_type
->array_element
->stride
= dec
->literals
[0];
592 assert(mat_type
->array_element
->stride
> 0);
593 mat_type
->stride
= dec
->literals
[0];
598 type_decoration_cb(struct vtn_builder
*b
,
599 struct vtn_value
*val
, int member
,
600 const struct vtn_decoration
*dec
, void *ctx
)
602 struct vtn_type
*type
= val
->type
;
607 switch (dec
->decoration
) {
608 case SpvDecorationArrayStride
:
609 assert(type
->base_type
== vtn_base_type_matrix
||
610 type
->base_type
== vtn_base_type_array
||
611 type
->base_type
== vtn_base_type_pointer
);
612 type
->stride
= dec
->literals
[0];
614 case SpvDecorationBlock
:
615 assert(type
->base_type
== vtn_base_type_struct
);
618 case SpvDecorationBufferBlock
:
619 assert(type
->base_type
== vtn_base_type_struct
);
620 type
->buffer_block
= true;
622 case SpvDecorationGLSLShared
:
623 case SpvDecorationGLSLPacked
:
624 /* Ignore these, since we get explicit offsets anyways */
627 case SpvDecorationRowMajor
:
628 case SpvDecorationColMajor
:
629 case SpvDecorationMatrixStride
:
630 case SpvDecorationBuiltIn
:
631 case SpvDecorationNoPerspective
:
632 case SpvDecorationFlat
:
633 case SpvDecorationPatch
:
634 case SpvDecorationCentroid
:
635 case SpvDecorationSample
:
636 case SpvDecorationVolatile
:
637 case SpvDecorationCoherent
:
638 case SpvDecorationNonWritable
:
639 case SpvDecorationNonReadable
:
640 case SpvDecorationUniform
:
641 case SpvDecorationStream
:
642 case SpvDecorationLocation
:
643 case SpvDecorationComponent
:
644 case SpvDecorationOffset
:
645 case SpvDecorationXfbBuffer
:
646 case SpvDecorationXfbStride
:
647 vtn_warn("Decoration only allowed for struct members: %s",
648 spirv_decoration_to_string(dec
->decoration
));
651 case SpvDecorationRelaxedPrecision
:
652 case SpvDecorationSpecId
:
653 case SpvDecorationInvariant
:
654 case SpvDecorationRestrict
:
655 case SpvDecorationAliased
:
656 case SpvDecorationConstant
:
657 case SpvDecorationIndex
:
658 case SpvDecorationBinding
:
659 case SpvDecorationDescriptorSet
:
660 case SpvDecorationLinkageAttributes
:
661 case SpvDecorationNoContraction
:
662 case SpvDecorationInputAttachmentIndex
:
663 vtn_warn("Decoration not allowed on types: %s",
664 spirv_decoration_to_string(dec
->decoration
));
667 case SpvDecorationCPacked
:
668 case SpvDecorationSaturatedConversion
:
669 case SpvDecorationFuncParamAttr
:
670 case SpvDecorationFPRoundingMode
:
671 case SpvDecorationFPFastMathMode
:
672 case SpvDecorationAlignment
:
673 vtn_warn("Decoration only allowed for CL-style kernels: %s",
674 spirv_decoration_to_string(dec
->decoration
));
678 unreachable("Unhandled decoration");
683 translate_image_format(SpvImageFormat format
)
686 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
687 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
688 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
689 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
690 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
691 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
692 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
693 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
694 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
695 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
696 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
697 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
698 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
699 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
700 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
701 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
702 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
703 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
704 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
705 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
706 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
707 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
708 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
709 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
710 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
711 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
712 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
713 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
714 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
715 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
716 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
717 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
718 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
719 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
720 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
721 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
722 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
723 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
724 case SpvImageFormatR16ui
: return 0x8234; /* GL_R16UI */
725 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
727 unreachable("Invalid image format");
733 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
734 const uint32_t *w
, unsigned count
)
736 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
738 val
->type
= rzalloc(b
, struct vtn_type
);
739 val
->type
->val
= val
;
743 val
->type
->base_type
= vtn_base_type_void
;
744 val
->type
->type
= glsl_void_type();
747 val
->type
->base_type
= vtn_base_type_scalar
;
748 val
->type
->type
= glsl_bool_type();
752 const bool signedness
= w
[3];
753 val
->type
->base_type
= vtn_base_type_scalar
;
755 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
757 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
760 case SpvOpTypeFloat
: {
762 val
->type
->base_type
= vtn_base_type_scalar
;
763 val
->type
->type
= bit_size
== 64 ? glsl_double_type() : glsl_float_type();
767 case SpvOpTypeVector
: {
768 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
769 unsigned elems
= w
[3];
771 assert(glsl_type_is_scalar(base
->type
));
772 val
->type
->base_type
= vtn_base_type_vector
;
773 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
774 val
->type
->stride
= glsl_get_bit_size(base
->type
) / 8;
775 val
->type
->array_element
= base
;
779 case SpvOpTypeMatrix
: {
780 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
781 unsigned columns
= w
[3];
783 assert(glsl_type_is_vector(base
->type
));
784 val
->type
->base_type
= vtn_base_type_matrix
;
785 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
786 glsl_get_vector_elements(base
->type
),
788 assert(!glsl_type_is_error(val
->type
->type
));
789 val
->type
->length
= columns
;
790 val
->type
->array_element
= base
;
791 val
->type
->row_major
= false;
792 val
->type
->stride
= 0;
796 case SpvOpTypeRuntimeArray
:
797 case SpvOpTypeArray
: {
798 struct vtn_type
*array_element
=
799 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
801 if (opcode
== SpvOpTypeRuntimeArray
) {
802 /* A length of 0 is used to denote unsized arrays */
803 val
->type
->length
= 0;
806 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
809 val
->type
->base_type
= vtn_base_type_array
;
810 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
);
811 val
->type
->array_element
= array_element
;
812 val
->type
->stride
= 0;
816 case SpvOpTypeStruct
: {
817 unsigned num_fields
= count
- 2;
818 val
->type
->base_type
= vtn_base_type_struct
;
819 val
->type
->length
= num_fields
;
820 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
821 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
823 NIR_VLA(struct glsl_struct_field
, fields
, count
);
824 for (unsigned i
= 0; i
< num_fields
; i
++) {
825 val
->type
->members
[i
] =
826 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
827 fields
[i
] = (struct glsl_struct_field
) {
828 .type
= val
->type
->members
[i
]->type
,
829 .name
= ralloc_asprintf(b
, "field%d", i
),
834 struct member_decoration_ctx ctx
= {
835 .num_fields
= num_fields
,
840 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
841 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
843 const char *name
= val
->name
? val
->name
: "struct";
845 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
849 case SpvOpTypeFunction
: {
850 val
->type
->base_type
= vtn_base_type_function
;
851 val
->type
->type
= NULL
;
853 val
->type
->return_type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
855 const unsigned num_params
= count
- 3;
856 val
->type
->length
= num_params
;
857 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
858 for (unsigned i
= 0; i
< count
- 3; i
++) {
859 val
->type
->params
[i
] =
860 vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
;
865 case SpvOpTypePointer
: {
866 SpvStorageClass storage_class
= w
[2];
867 struct vtn_type
*deref_type
=
868 vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
870 val
->type
->base_type
= vtn_base_type_pointer
;
871 val
->type
->storage_class
= storage_class
;
872 val
->type
->deref
= deref_type
;
874 if (storage_class
== SpvStorageClassUniform
||
875 storage_class
== SpvStorageClassStorageBuffer
) {
876 /* These can actually be stored to nir_variables and used as SSA
877 * values so they need a real glsl_type.
879 val
->type
->type
= glsl_vector_type(GLSL_TYPE_UINT
, 2);
884 case SpvOpTypeImage
: {
885 val
->type
->base_type
= vtn_base_type_image
;
887 const struct glsl_type
*sampled_type
=
888 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
890 assert(glsl_type_is_vector_or_scalar(sampled_type
));
892 enum glsl_sampler_dim dim
;
893 switch ((SpvDim
)w
[3]) {
894 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
895 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
896 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
897 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
898 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
899 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
900 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
902 unreachable("Invalid SPIR-V Sampler dimension");
905 bool is_shadow
= w
[4];
906 bool is_array
= w
[5];
907 bool multisampled
= w
[6];
908 unsigned sampled
= w
[7];
909 SpvImageFormat format
= w
[8];
912 val
->type
->access_qualifier
= w
[9];
914 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
917 if (dim
== GLSL_SAMPLER_DIM_2D
)
918 dim
= GLSL_SAMPLER_DIM_MS
;
919 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
920 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
922 unreachable("Unsupported multisampled image type");
925 val
->type
->image_format
= translate_image_format(format
);
928 val
->type
->sampled
= true;
929 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
930 glsl_get_base_type(sampled_type
));
931 } else if (sampled
== 2) {
933 val
->type
->sampled
= false;
934 val
->type
->type
= glsl_image_type(dim
, is_array
,
935 glsl_get_base_type(sampled_type
));
937 unreachable("We need to know if the image will be sampled");
942 case SpvOpTypeSampledImage
:
943 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
946 case SpvOpTypeSampler
:
947 /* The actual sampler type here doesn't really matter. It gets
948 * thrown away the moment you combine it with an image. What really
949 * matters is that it's a sampler type as opposed to an integer type
950 * so the backend knows what to do.
952 val
->type
->base_type
= vtn_base_type_sampler
;
953 val
->type
->type
= glsl_bare_sampler_type();
956 case SpvOpTypeOpaque
:
958 case SpvOpTypeDeviceEvent
:
959 case SpvOpTypeReserveId
:
963 unreachable("Unhandled opcode");
966 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
969 static nir_constant
*
970 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
972 nir_constant
*c
= rzalloc(b
, nir_constant
);
974 /* For pointers and other typeless things, we have to return something but
975 * it doesn't matter what.
980 switch (glsl_get_base_type(type
)) {
983 case GLSL_TYPE_INT64
:
984 case GLSL_TYPE_UINT64
:
986 case GLSL_TYPE_FLOAT
:
987 case GLSL_TYPE_DOUBLE
:
988 /* Nothing to do here. It's already initialized to zero */
991 case GLSL_TYPE_ARRAY
:
992 assert(glsl_get_length(type
) > 0);
993 c
->num_elements
= glsl_get_length(type
);
994 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
996 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
997 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
998 c
->elements
[i
] = c
->elements
[0];
1001 case GLSL_TYPE_STRUCT
:
1002 c
->num_elements
= glsl_get_length(type
);
1003 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1005 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
1006 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
1011 unreachable("Invalid type for null constant");
1018 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
1019 int member
, const struct vtn_decoration
*dec
,
1022 assert(member
== -1);
1023 if (dec
->decoration
!= SpvDecorationSpecId
)
1026 struct spec_constant_value
*const_value
= data
;
1028 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1029 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
1030 if (const_value
->is_double
)
1031 const_value
->data64
= b
->specializations
[i
].data64
;
1033 const_value
->data32
= b
->specializations
[i
].data32
;
1040 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
1041 uint32_t const_value
)
1043 struct spec_constant_value data
;
1044 data
.is_double
= false;
1045 data
.data32
= const_value
;
1046 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1051 get_specialization64(struct vtn_builder
*b
, struct vtn_value
*val
,
1052 uint64_t const_value
)
1054 struct spec_constant_value data
;
1055 data
.is_double
= true;
1056 data
.data64
= const_value
;
1057 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &data
);
1062 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1063 struct vtn_value
*val
,
1065 const struct vtn_decoration
*dec
,
1068 assert(member
== -1);
1069 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1070 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
1073 assert(val
->const_type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1075 b
->shader
->info
.cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
1076 b
->shader
->info
.cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
1077 b
->shader
->info
.cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
1081 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1082 const uint32_t *w
, unsigned count
)
1084 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1085 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
1086 val
->constant
= rzalloc(b
, nir_constant
);
1088 case SpvOpConstantTrue
:
1089 assert(val
->const_type
== glsl_bool_type());
1090 val
->constant
->values
[0].u32
[0] = NIR_TRUE
;
1092 case SpvOpConstantFalse
:
1093 assert(val
->const_type
== glsl_bool_type());
1094 val
->constant
->values
[0].u32
[0] = NIR_FALSE
;
1097 case SpvOpSpecConstantTrue
:
1098 case SpvOpSpecConstantFalse
: {
1099 assert(val
->const_type
== glsl_bool_type());
1101 get_specialization(b
, val
, (opcode
== SpvOpSpecConstantTrue
));
1102 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1106 case SpvOpConstant
: {
1107 assert(glsl_type_is_scalar(val
->const_type
));
1108 int bit_size
= glsl_get_bit_size(val
->const_type
);
1109 if (bit_size
== 64) {
1110 val
->constant
->values
->u32
[0] = w
[3];
1111 val
->constant
->values
->u32
[1] = w
[4];
1113 assert(bit_size
== 32);
1114 val
->constant
->values
->u32
[0] = w
[3];
1118 case SpvOpSpecConstant
: {
1119 assert(glsl_type_is_scalar(val
->const_type
));
1120 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1121 int bit_size
= glsl_get_bit_size(val
->const_type
);
1123 val
->constant
->values
[0].u64
[0] =
1124 get_specialization64(b
, val
, vtn_u64_literal(&w
[3]));
1126 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1129 case SpvOpSpecConstantComposite
:
1130 case SpvOpConstantComposite
: {
1131 unsigned elem_count
= count
- 3;
1132 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1133 for (unsigned i
= 0; i
< elem_count
; i
++)
1134 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1136 switch (glsl_get_base_type(val
->const_type
)) {
1137 case GLSL_TYPE_UINT
:
1139 case GLSL_TYPE_UINT64
:
1140 case GLSL_TYPE_INT64
:
1141 case GLSL_TYPE_FLOAT
:
1142 case GLSL_TYPE_BOOL
:
1143 case GLSL_TYPE_DOUBLE
: {
1144 int bit_size
= glsl_get_bit_size(val
->const_type
);
1145 if (glsl_type_is_matrix(val
->const_type
)) {
1146 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
1147 for (unsigned i
= 0; i
< elem_count
; i
++)
1148 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1150 assert(glsl_type_is_vector(val
->const_type
));
1151 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
1152 for (unsigned i
= 0; i
< elem_count
; i
++) {
1153 if (bit_size
== 64) {
1154 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1156 assert(bit_size
== 32);
1157 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1164 case GLSL_TYPE_STRUCT
:
1165 case GLSL_TYPE_ARRAY
:
1166 ralloc_steal(val
->constant
, elems
);
1167 val
->constant
->num_elements
= elem_count
;
1168 val
->constant
->elements
= elems
;
1172 unreachable("Unsupported type for constants");
1177 case SpvOpSpecConstantOp
: {
1178 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1180 case SpvOpVectorShuffle
: {
1181 struct vtn_value
*v0
= &b
->values
[w
[4]];
1182 struct vtn_value
*v1
= &b
->values
[w
[5]];
1184 assert(v0
->value_type
== vtn_value_type_constant
||
1185 v0
->value_type
== vtn_value_type_undef
);
1186 assert(v1
->value_type
== vtn_value_type_constant
||
1187 v1
->value_type
== vtn_value_type_undef
);
1189 unsigned len0
= v0
->value_type
== vtn_value_type_constant
?
1190 glsl_get_vector_elements(v0
->const_type
) :
1191 glsl_get_vector_elements(v0
->type
->type
);
1192 unsigned len1
= v1
->value_type
== vtn_value_type_constant
?
1193 glsl_get_vector_elements(v1
->const_type
) :
1194 glsl_get_vector_elements(v1
->type
->type
);
1196 assert(len0
+ len1
< 16);
1198 unsigned bit_size
= glsl_get_bit_size(val
->const_type
);
1199 unsigned bit_size0
= v0
->value_type
== vtn_value_type_constant
?
1200 glsl_get_bit_size(v0
->const_type
) :
1201 glsl_get_bit_size(v0
->type
->type
);
1202 unsigned bit_size1
= v1
->value_type
== vtn_value_type_constant
?
1203 glsl_get_bit_size(v1
->const_type
) :
1204 glsl_get_bit_size(v1
->type
->type
);
1206 assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1207 (void)bit_size0
; (void)bit_size1
;
1209 if (bit_size
== 64) {
1211 if (v0
->value_type
== vtn_value_type_constant
) {
1212 for (unsigned i
= 0; i
< len0
; i
++)
1213 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1215 if (v1
->value_type
== vtn_value_type_constant
) {
1216 for (unsigned i
= 0; i
< len1
; i
++)
1217 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1220 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1221 uint32_t comp
= w
[i
+ 6];
1222 /* If component is not used, set the value to a known constant
1223 * to detect if it is wrongly used.
1225 if (comp
== (uint32_t)-1)
1226 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1228 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1232 if (v0
->value_type
== vtn_value_type_constant
) {
1233 for (unsigned i
= 0; i
< len0
; i
++)
1234 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1236 if (v1
->value_type
== vtn_value_type_constant
) {
1237 for (unsigned i
= 0; i
< len1
; i
++)
1238 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1241 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1242 uint32_t comp
= w
[i
+ 6];
1243 /* If component is not used, set the value to a known constant
1244 * to detect if it is wrongly used.
1246 if (comp
== (uint32_t)-1)
1247 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1249 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1255 case SpvOpCompositeExtract
:
1256 case SpvOpCompositeInsert
: {
1257 struct vtn_value
*comp
;
1258 unsigned deref_start
;
1259 struct nir_constant
**c
;
1260 if (opcode
== SpvOpCompositeExtract
) {
1261 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1263 c
= &comp
->constant
;
1265 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1267 val
->constant
= nir_constant_clone(comp
->constant
,
1274 const struct glsl_type
*type
= comp
->const_type
;
1275 for (unsigned i
= deref_start
; i
< count
; i
++) {
1276 switch (glsl_get_base_type(type
)) {
1277 case GLSL_TYPE_UINT
:
1279 case GLSL_TYPE_UINT64
:
1280 case GLSL_TYPE_INT64
:
1281 case GLSL_TYPE_FLOAT
:
1282 case GLSL_TYPE_DOUBLE
:
1283 case GLSL_TYPE_BOOL
:
1284 /* If we hit this granularity, we're picking off an element */
1285 if (glsl_type_is_matrix(type
)) {
1286 assert(col
== 0 && elem
== -1);
1289 type
= glsl_get_column_type(type
);
1291 assert(elem
<= 0 && glsl_type_is_vector(type
));
1293 type
= glsl_scalar_type(glsl_get_base_type(type
));
1297 case GLSL_TYPE_ARRAY
:
1298 c
= &(*c
)->elements
[w
[i
]];
1299 type
= glsl_get_array_element(type
);
1302 case GLSL_TYPE_STRUCT
:
1303 c
= &(*c
)->elements
[w
[i
]];
1304 type
= glsl_get_struct_field(type
, w
[i
]);
1308 unreachable("Invalid constant type");
1312 if (opcode
== SpvOpCompositeExtract
) {
1316 unsigned num_components
= glsl_get_vector_elements(type
);
1317 unsigned bit_size
= glsl_get_bit_size(type
);
1318 for (unsigned i
= 0; i
< num_components
; i
++)
1319 if (bit_size
== 64) {
1320 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1322 assert(bit_size
== 32);
1323 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1327 struct vtn_value
*insert
=
1328 vtn_value(b
, w
[4], vtn_value_type_constant
);
1329 assert(insert
->const_type
== type
);
1331 *c
= insert
->constant
;
1333 unsigned num_components
= glsl_get_vector_elements(type
);
1334 unsigned bit_size
= glsl_get_bit_size(type
);
1335 for (unsigned i
= 0; i
< num_components
; i
++)
1336 if (bit_size
== 64) {
1337 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1339 assert(bit_size
== 32);
1340 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1349 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->const_type
);
1350 nir_alu_type src_alu_type
= dst_alu_type
;
1351 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(opcode
, &swap
, src_alu_type
, dst_alu_type
);
1353 unsigned num_components
= glsl_get_vector_elements(val
->const_type
);
1355 glsl_get_bit_size(val
->const_type
);
1357 nir_const_value src
[4];
1359 for (unsigned i
= 0; i
< count
- 4; i
++) {
1361 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1363 unsigned j
= swap
? 1 - i
: i
;
1364 assert(bit_size
== 32);
1365 src
[j
] = c
->values
[0];
1368 val
->constant
->values
[0] =
1369 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1376 case SpvOpConstantNull
:
1377 val
->constant
= vtn_null_constant(b
, val
->const_type
);
1380 case SpvOpConstantSampler
:
1381 unreachable("OpConstantSampler requires Kernel Capability");
1385 unreachable("Unhandled opcode");
1388 /* Now that we have the value, update the workgroup size if needed */
1389 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1393 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1394 const uint32_t *w
, unsigned count
)
1396 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1397 struct nir_function
*callee
=
1398 vtn_value(b
, w
[3], vtn_value_type_function
)->func
->impl
->function
;
1400 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1401 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1402 unsigned arg_id
= w
[4 + i
];
1403 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1404 if (arg
->value_type
== vtn_value_type_pointer
&&
1405 arg
->pointer
->ptr_type
->type
== NULL
) {
1406 nir_deref_var
*d
= vtn_pointer_to_deref(b
, arg
->pointer
);
1407 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1409 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1411 /* Make a temporary to store the argument in */
1413 nir_local_variable_create(b
->impl
, arg_ssa
->type
, "arg_tmp");
1414 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1416 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1420 nir_variable
*out_tmp
= NULL
;
1421 assert(res_type
->type
== callee
->return_type
);
1422 if (!glsl_type_is_void(callee
->return_type
)) {
1423 out_tmp
= nir_local_variable_create(b
->impl
, callee
->return_type
,
1425 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1428 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1430 if (glsl_type_is_void(callee
->return_type
)) {
1431 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1433 vtn_push_ssa(b
, w
[2], res_type
, vtn_local_load(b
, call
->return_deref
));
1437 struct vtn_ssa_value
*
1438 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1440 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1443 if (!glsl_type_is_vector_or_scalar(type
)) {
1444 unsigned elems
= glsl_get_length(type
);
1445 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1446 for (unsigned i
= 0; i
< elems
; i
++) {
1447 const struct glsl_type
*child_type
;
1449 switch (glsl_get_base_type(type
)) {
1451 case GLSL_TYPE_UINT
:
1452 case GLSL_TYPE_INT64
:
1453 case GLSL_TYPE_UINT64
:
1454 case GLSL_TYPE_BOOL
:
1455 case GLSL_TYPE_FLOAT
:
1456 case GLSL_TYPE_DOUBLE
:
1457 child_type
= glsl_get_column_type(type
);
1459 case GLSL_TYPE_ARRAY
:
1460 child_type
= glsl_get_array_element(type
);
1462 case GLSL_TYPE_STRUCT
:
1463 child_type
= glsl_get_struct_field(type
, i
);
1466 unreachable("unkown base type");
1469 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1477 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1480 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1481 src
.src_type
= type
;
1486 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1487 const uint32_t *w
, unsigned count
)
1489 if (opcode
== SpvOpSampledImage
) {
1490 struct vtn_value
*val
=
1491 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1492 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1493 val
->sampled_image
->image
=
1494 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1495 val
->sampled_image
->sampler
=
1496 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
1498 } else if (opcode
== SpvOpImage
) {
1499 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
1500 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1501 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1502 val
->pointer
= src_val
->sampled_image
->image
;
1504 assert(src_val
->value_type
== vtn_value_type_pointer
);
1505 val
->pointer
= src_val
->pointer
;
1510 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1511 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1513 struct vtn_sampled_image sampled
;
1514 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1515 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1516 sampled
= *sampled_val
->sampled_image
;
1518 assert(sampled_val
->value_type
== vtn_value_type_pointer
);
1519 sampled
.image
= NULL
;
1520 sampled
.sampler
= sampled_val
->pointer
;
1523 const struct glsl_type
*image_type
;
1524 if (sampled
.image
) {
1525 image_type
= sampled
.image
->var
->var
->interface_type
;
1527 image_type
= sampled
.sampler
->var
->var
->interface_type
;
1529 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1530 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1531 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1533 /* Figure out the base texture operation */
1536 case SpvOpImageSampleImplicitLod
:
1537 case SpvOpImageSampleDrefImplicitLod
:
1538 case SpvOpImageSampleProjImplicitLod
:
1539 case SpvOpImageSampleProjDrefImplicitLod
:
1540 texop
= nir_texop_tex
;
1543 case SpvOpImageSampleExplicitLod
:
1544 case SpvOpImageSampleDrefExplicitLod
:
1545 case SpvOpImageSampleProjExplicitLod
:
1546 case SpvOpImageSampleProjDrefExplicitLod
:
1547 texop
= nir_texop_txl
;
1550 case SpvOpImageFetch
:
1551 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1552 texop
= nir_texop_txf_ms
;
1554 texop
= nir_texop_txf
;
1558 case SpvOpImageGather
:
1559 case SpvOpImageDrefGather
:
1560 texop
= nir_texop_tg4
;
1563 case SpvOpImageQuerySizeLod
:
1564 case SpvOpImageQuerySize
:
1565 texop
= nir_texop_txs
;
1568 case SpvOpImageQueryLod
:
1569 texop
= nir_texop_lod
;
1572 case SpvOpImageQueryLevels
:
1573 texop
= nir_texop_query_levels
;
1576 case SpvOpImageQuerySamples
:
1577 texop
= nir_texop_texture_samples
;
1581 unreachable("Unhandled opcode");
1584 nir_tex_src srcs
[8]; /* 8 should be enough */
1585 nir_tex_src
*p
= srcs
;
1589 struct nir_ssa_def
*coord
;
1590 unsigned coord_components
;
1592 case SpvOpImageSampleImplicitLod
:
1593 case SpvOpImageSampleExplicitLod
:
1594 case SpvOpImageSampleDrefImplicitLod
:
1595 case SpvOpImageSampleDrefExplicitLod
:
1596 case SpvOpImageSampleProjImplicitLod
:
1597 case SpvOpImageSampleProjExplicitLod
:
1598 case SpvOpImageSampleProjDrefImplicitLod
:
1599 case SpvOpImageSampleProjDrefExplicitLod
:
1600 case SpvOpImageFetch
:
1601 case SpvOpImageGather
:
1602 case SpvOpImageDrefGather
:
1603 case SpvOpImageQueryLod
: {
1604 /* All these types have the coordinate as their first real argument */
1605 switch (sampler_dim
) {
1606 case GLSL_SAMPLER_DIM_1D
:
1607 case GLSL_SAMPLER_DIM_BUF
:
1608 coord_components
= 1;
1610 case GLSL_SAMPLER_DIM_2D
:
1611 case GLSL_SAMPLER_DIM_RECT
:
1612 case GLSL_SAMPLER_DIM_MS
:
1613 coord_components
= 2;
1615 case GLSL_SAMPLER_DIM_3D
:
1616 case GLSL_SAMPLER_DIM_CUBE
:
1617 coord_components
= 3;
1620 unreachable("Invalid sampler type");
1623 if (is_array
&& texop
!= nir_texop_lod
)
1626 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1627 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
1628 (1 << coord_components
) - 1));
1629 p
->src_type
= nir_tex_src_coord
;
1636 coord_components
= 0;
1641 case SpvOpImageSampleProjImplicitLod
:
1642 case SpvOpImageSampleProjExplicitLod
:
1643 case SpvOpImageSampleProjDrefImplicitLod
:
1644 case SpvOpImageSampleProjDrefExplicitLod
:
1645 /* These have the projector as the last coordinate component */
1646 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1647 p
->src_type
= nir_tex_src_projector
;
1655 unsigned gather_component
= 0;
1657 case SpvOpImageSampleDrefImplicitLod
:
1658 case SpvOpImageSampleDrefExplicitLod
:
1659 case SpvOpImageSampleProjDrefImplicitLod
:
1660 case SpvOpImageSampleProjDrefExplicitLod
:
1661 case SpvOpImageDrefGather
:
1662 /* These all have an explicit depth value as their next source */
1663 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1666 case SpvOpImageGather
:
1667 /* This has a component as its next source */
1669 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1676 /* For OpImageQuerySizeLod, we always have an LOD */
1677 if (opcode
== SpvOpImageQuerySizeLod
)
1678 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1680 /* Now we need to handle some number of optional arguments */
1681 const struct vtn_ssa_value
*gather_offsets
= NULL
;
1683 uint32_t operands
= w
[idx
++];
1685 if (operands
& SpvImageOperandsBiasMask
) {
1686 assert(texop
== nir_texop_tex
);
1687 texop
= nir_texop_txb
;
1688 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1691 if (operands
& SpvImageOperandsLodMask
) {
1692 assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1693 texop
== nir_texop_txs
);
1694 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1697 if (operands
& SpvImageOperandsGradMask
) {
1698 assert(texop
== nir_texop_txl
);
1699 texop
= nir_texop_txd
;
1700 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1701 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1704 if (operands
& SpvImageOperandsOffsetMask
||
1705 operands
& SpvImageOperandsConstOffsetMask
)
1706 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1708 if (operands
& SpvImageOperandsConstOffsetsMask
) {
1709 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
1710 (*p
++) = (nir_tex_src
){};
1713 if (operands
& SpvImageOperandsSampleMask
) {
1714 assert(texop
== nir_texop_txf_ms
);
1715 texop
= nir_texop_txf_ms
;
1716 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1719 /* We should have now consumed exactly all of the arguments */
1720 assert(idx
== count
);
1722 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1725 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1727 instr
->coord_components
= coord_components
;
1728 instr
->sampler_dim
= sampler_dim
;
1729 instr
->is_array
= is_array
;
1730 instr
->is_shadow
= is_shadow
;
1731 instr
->is_new_style_shadow
=
1732 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
1733 instr
->component
= gather_component
;
1735 switch (glsl_get_sampler_result_type(image_type
)) {
1736 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1737 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1738 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
1739 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1741 unreachable("Invalid base type for sampler result");
1744 nir_deref_var
*sampler
= vtn_pointer_to_deref(b
, sampled
.sampler
);
1745 nir_deref_var
*texture
;
1746 if (sampled
.image
) {
1747 nir_deref_var
*image
= vtn_pointer_to_deref(b
, sampled
.image
);
1753 instr
->texture
= nir_deref_var_clone(texture
, instr
);
1755 switch (instr
->op
) {
1760 /* These operations require a sampler */
1761 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
1764 case nir_texop_txf_ms
:
1768 case nir_texop_query_levels
:
1769 case nir_texop_texture_samples
:
1770 case nir_texop_samples_identical
:
1772 instr
->sampler
= NULL
;
1774 case nir_texop_txf_ms_mcs
:
1775 unreachable("unexpected nir_texop_txf_ms_mcs");
1778 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1779 nir_tex_instr_dest_size(instr
), 32, NULL
);
1781 assert(glsl_get_vector_elements(ret_type
->type
) ==
1782 nir_tex_instr_dest_size(instr
));
1785 nir_instr
*instruction
;
1786 if (gather_offsets
) {
1787 assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
1788 assert(glsl_get_length(gather_offsets
->type
) == 4);
1789 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
1791 /* Copy the current instruction 4x */
1792 for (uint32_t i
= 1; i
< 4; i
++) {
1793 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
1794 instrs
[i
]->op
= instr
->op
;
1795 instrs
[i
]->coord_components
= instr
->coord_components
;
1796 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
1797 instrs
[i
]->is_array
= instr
->is_array
;
1798 instrs
[i
]->is_shadow
= instr
->is_shadow
;
1799 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
1800 instrs
[i
]->component
= instr
->component
;
1801 instrs
[i
]->dest_type
= instr
->dest_type
;
1802 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
1803 instrs
[i
]->sampler
= NULL
;
1805 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1807 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
1808 nir_tex_instr_dest_size(instr
), 32, NULL
);
1811 /* Fill in the last argument with the offset from the passed in offsets
1812 * and insert the instruction into the stream.
1814 for (uint32_t i
= 0; i
< 4; i
++) {
1816 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
1817 src
.src_type
= nir_tex_src_offset
;
1818 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
1819 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
1822 /* Combine the results of the 4 instructions by taking their .w
1825 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
1826 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
1827 vec4
->dest
.write_mask
= 0xf;
1828 for (uint32_t i
= 0; i
< 4; i
++) {
1829 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
1830 vec4
->src
[i
].swizzle
[0] = 3;
1832 def
= &vec4
->dest
.dest
.ssa
;
1833 instruction
= &vec4
->instr
;
1835 def
= &instr
->dest
.ssa
;
1836 instruction
= &instr
->instr
;
1839 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
1840 val
->ssa
->def
= def
;
1842 nir_builder_instr_insert(&b
->nb
, instruction
);
1846 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
1847 const uint32_t *w
, nir_src
*src
)
1850 case SpvOpAtomicIIncrement
:
1851 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
1854 case SpvOpAtomicIDecrement
:
1855 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
1858 case SpvOpAtomicISub
:
1860 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
1863 case SpvOpAtomicCompareExchange
:
1864 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
1865 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
1868 case SpvOpAtomicExchange
:
1869 case SpvOpAtomicIAdd
:
1870 case SpvOpAtomicSMin
:
1871 case SpvOpAtomicUMin
:
1872 case SpvOpAtomicSMax
:
1873 case SpvOpAtomicUMax
:
1874 case SpvOpAtomicAnd
:
1876 case SpvOpAtomicXor
:
1877 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
1881 unreachable("Invalid SPIR-V atomic");
1885 static nir_ssa_def
*
1886 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
1888 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
1890 /* The image_load_store intrinsics assume a 4-dim coordinate */
1891 unsigned dim
= glsl_get_vector_elements(coord
->type
);
1892 unsigned swizzle
[4];
1893 for (unsigned i
= 0; i
< 4; i
++)
1894 swizzle
[i
] = MIN2(i
, dim
- 1);
1896 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
1900 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
1901 const uint32_t *w
, unsigned count
)
1903 /* Just get this one out of the way */
1904 if (opcode
== SpvOpImageTexelPointer
) {
1905 struct vtn_value
*val
=
1906 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
1907 val
->image
= ralloc(b
, struct vtn_image_pointer
);
1909 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1910 val
->image
->coord
= get_image_coord(b
, w
[4]);
1911 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
1915 struct vtn_image_pointer image
;
1918 case SpvOpAtomicExchange
:
1919 case SpvOpAtomicCompareExchange
:
1920 case SpvOpAtomicCompareExchangeWeak
:
1921 case SpvOpAtomicIIncrement
:
1922 case SpvOpAtomicIDecrement
:
1923 case SpvOpAtomicIAdd
:
1924 case SpvOpAtomicISub
:
1925 case SpvOpAtomicLoad
:
1926 case SpvOpAtomicSMin
:
1927 case SpvOpAtomicUMin
:
1928 case SpvOpAtomicSMax
:
1929 case SpvOpAtomicUMax
:
1930 case SpvOpAtomicAnd
:
1932 case SpvOpAtomicXor
:
1933 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
1936 case SpvOpAtomicStore
:
1937 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
1940 case SpvOpImageQuerySize
:
1941 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1943 image
.sample
= NULL
;
1946 case SpvOpImageRead
:
1947 image
.image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
1948 image
.coord
= get_image_coord(b
, w
[4]);
1950 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
1951 assert(w
[5] == SpvImageOperandsSampleMask
);
1952 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
1954 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1958 case SpvOpImageWrite
:
1959 image
.image
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
1960 image
.coord
= get_image_coord(b
, w
[2]);
1964 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
1965 assert(w
[4] == SpvImageOperandsSampleMask
);
1966 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
1968 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1973 unreachable("Invalid image opcode");
1976 nir_intrinsic_op op
;
1978 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
1979 OP(ImageQuerySize
, size
)
1981 OP(ImageWrite
, store
)
1982 OP(AtomicLoad
, load
)
1983 OP(AtomicStore
, store
)
1984 OP(AtomicExchange
, atomic_exchange
)
1985 OP(AtomicCompareExchange
, atomic_comp_swap
)
1986 OP(AtomicIIncrement
, atomic_add
)
1987 OP(AtomicIDecrement
, atomic_add
)
1988 OP(AtomicIAdd
, atomic_add
)
1989 OP(AtomicISub
, atomic_add
)
1990 OP(AtomicSMin
, atomic_min
)
1991 OP(AtomicUMin
, atomic_min
)
1992 OP(AtomicSMax
, atomic_max
)
1993 OP(AtomicUMax
, atomic_max
)
1994 OP(AtomicAnd
, atomic_and
)
1995 OP(AtomicOr
, atomic_or
)
1996 OP(AtomicXor
, atomic_xor
)
1999 unreachable("Invalid image opcode");
2002 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2004 nir_deref_var
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2005 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
2007 /* ImageQuerySize doesn't take any extra parameters */
2008 if (opcode
!= SpvOpImageQuerySize
) {
2009 /* The image coordinate is always 4 components but we may not have that
2010 * many. Swizzle to compensate.
2013 for (unsigned i
= 0; i
< 4; i
++)
2014 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
2015 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
2017 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
2021 case SpvOpAtomicLoad
:
2022 case SpvOpImageQuerySize
:
2023 case SpvOpImageRead
:
2025 case SpvOpAtomicStore
:
2026 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2028 case SpvOpImageWrite
:
2029 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
2032 case SpvOpAtomicCompareExchange
:
2033 case SpvOpAtomicIIncrement
:
2034 case SpvOpAtomicIDecrement
:
2035 case SpvOpAtomicExchange
:
2036 case SpvOpAtomicIAdd
:
2037 case SpvOpAtomicSMin
:
2038 case SpvOpAtomicUMin
:
2039 case SpvOpAtomicSMax
:
2040 case SpvOpAtomicUMax
:
2041 case SpvOpAtomicAnd
:
2043 case SpvOpAtomicXor
:
2044 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
2048 unreachable("Invalid image opcode");
2051 if (opcode
!= SpvOpImageWrite
) {
2052 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2053 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2055 unsigned dest_components
=
2056 nir_intrinsic_infos
[intrin
->intrinsic
].dest_components
;
2057 if (intrin
->intrinsic
== nir_intrinsic_image_size
) {
2058 dest_components
= intrin
->num_components
=
2059 glsl_get_vector_elements(type
->type
);
2062 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
2063 dest_components
, 32, NULL
);
2065 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2067 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
2068 val
->ssa
->def
= &intrin
->dest
.ssa
;
2070 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2074 static nir_intrinsic_op
2075 get_ssbo_nir_atomic_op(SpvOp opcode
)
2078 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
2079 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
2080 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2081 OP(AtomicExchange
, atomic_exchange
)
2082 OP(AtomicCompareExchange
, atomic_comp_swap
)
2083 OP(AtomicIIncrement
, atomic_add
)
2084 OP(AtomicIDecrement
, atomic_add
)
2085 OP(AtomicIAdd
, atomic_add
)
2086 OP(AtomicISub
, atomic_add
)
2087 OP(AtomicSMin
, atomic_imin
)
2088 OP(AtomicUMin
, atomic_umin
)
2089 OP(AtomicSMax
, atomic_imax
)
2090 OP(AtomicUMax
, atomic_umax
)
2091 OP(AtomicAnd
, atomic_and
)
2092 OP(AtomicOr
, atomic_or
)
2093 OP(AtomicXor
, atomic_xor
)
2096 unreachable("Invalid SSBO atomic");
2100 static nir_intrinsic_op
2101 get_shared_nir_atomic_op(SpvOp opcode
)
2104 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
2105 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
2106 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2107 OP(AtomicExchange
, atomic_exchange
)
2108 OP(AtomicCompareExchange
, atomic_comp_swap
)
2109 OP(AtomicIIncrement
, atomic_add
)
2110 OP(AtomicIDecrement
, atomic_add
)
2111 OP(AtomicIAdd
, atomic_add
)
2112 OP(AtomicISub
, atomic_add
)
2113 OP(AtomicSMin
, atomic_imin
)
2114 OP(AtomicUMin
, atomic_umin
)
2115 OP(AtomicSMax
, atomic_imax
)
2116 OP(AtomicUMax
, atomic_umax
)
2117 OP(AtomicAnd
, atomic_and
)
2118 OP(AtomicOr
, atomic_or
)
2119 OP(AtomicXor
, atomic_xor
)
2122 unreachable("Invalid shared atomic");
2127 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
2128 const uint32_t *w
, unsigned count
)
2130 struct vtn_pointer
*ptr
;
2131 nir_intrinsic_instr
*atomic
;
2134 case SpvOpAtomicLoad
:
2135 case SpvOpAtomicExchange
:
2136 case SpvOpAtomicCompareExchange
:
2137 case SpvOpAtomicCompareExchangeWeak
:
2138 case SpvOpAtomicIIncrement
:
2139 case SpvOpAtomicIDecrement
:
2140 case SpvOpAtomicIAdd
:
2141 case SpvOpAtomicISub
:
2142 case SpvOpAtomicSMin
:
2143 case SpvOpAtomicUMin
:
2144 case SpvOpAtomicSMax
:
2145 case SpvOpAtomicUMax
:
2146 case SpvOpAtomicAnd
:
2148 case SpvOpAtomicXor
:
2149 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2152 case SpvOpAtomicStore
:
2153 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
2157 unreachable("Invalid SPIR-V atomic");
2161 SpvScope scope = w[4];
2162 SpvMemorySemanticsMask semantics = w[5];
2165 if (ptr
->mode
== vtn_variable_mode_workgroup
) {
2166 nir_deref_var
*deref
= vtn_pointer_to_deref(b
, ptr
);
2167 const struct glsl_type
*deref_type
= nir_deref_tail(&deref
->deref
)->type
;
2168 nir_intrinsic_op op
= get_shared_nir_atomic_op(opcode
);
2169 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2170 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2173 case SpvOpAtomicLoad
:
2174 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2177 case SpvOpAtomicStore
:
2178 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
2179 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2180 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2183 case SpvOpAtomicExchange
:
2184 case SpvOpAtomicCompareExchange
:
2185 case SpvOpAtomicCompareExchangeWeak
:
2186 case SpvOpAtomicIIncrement
:
2187 case SpvOpAtomicIDecrement
:
2188 case SpvOpAtomicIAdd
:
2189 case SpvOpAtomicISub
:
2190 case SpvOpAtomicSMin
:
2191 case SpvOpAtomicUMin
:
2192 case SpvOpAtomicSMax
:
2193 case SpvOpAtomicUMax
:
2194 case SpvOpAtomicAnd
:
2196 case SpvOpAtomicXor
:
2197 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2201 unreachable("Invalid SPIR-V atomic");
2205 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
2206 nir_ssa_def
*offset
, *index
;
2207 offset
= vtn_pointer_to_offset(b
, ptr
, &index
, NULL
);
2209 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(opcode
);
2211 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2214 case SpvOpAtomicLoad
:
2215 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2216 atomic
->src
[0] = nir_src_for_ssa(index
);
2217 atomic
->src
[1] = nir_src_for_ssa(offset
);
2220 case SpvOpAtomicStore
:
2221 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
2222 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2223 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2224 atomic
->src
[1] = nir_src_for_ssa(index
);
2225 atomic
->src
[2] = nir_src_for_ssa(offset
);
2228 case SpvOpAtomicExchange
:
2229 case SpvOpAtomicCompareExchange
:
2230 case SpvOpAtomicCompareExchangeWeak
:
2231 case SpvOpAtomicIIncrement
:
2232 case SpvOpAtomicIDecrement
:
2233 case SpvOpAtomicIAdd
:
2234 case SpvOpAtomicISub
:
2235 case SpvOpAtomicSMin
:
2236 case SpvOpAtomicUMin
:
2237 case SpvOpAtomicSMax
:
2238 case SpvOpAtomicUMax
:
2239 case SpvOpAtomicAnd
:
2241 case SpvOpAtomicXor
:
2242 atomic
->src
[0] = nir_src_for_ssa(index
);
2243 atomic
->src
[1] = nir_src_for_ssa(offset
);
2244 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[2]);
2248 unreachable("Invalid SPIR-V atomic");
2252 if (opcode
!= SpvOpAtomicStore
) {
2253 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2255 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2256 glsl_get_vector_elements(type
->type
),
2257 glsl_get_bit_size(type
->type
), NULL
);
2259 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2260 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2261 val
->ssa
->def
= &atomic
->dest
.ssa
;
2262 val
->ssa
->type
= type
->type
;
2265 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2268 static nir_alu_instr
*
2269 create_vec(nir_shader
*shader
, unsigned num_components
, unsigned bit_size
)
2272 switch (num_components
) {
2273 case 1: op
= nir_op_fmov
; break;
2274 case 2: op
= nir_op_vec2
; break;
2275 case 3: op
= nir_op_vec3
; break;
2276 case 4: op
= nir_op_vec4
; break;
2277 default: unreachable("bad vector size");
2280 nir_alu_instr
*vec
= nir_alu_instr_create(shader
, op
);
2281 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2283 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2288 struct vtn_ssa_value
*
2289 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2291 if (src
->transposed
)
2292 return src
->transposed
;
2294 struct vtn_ssa_value
*dest
=
2295 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2297 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2298 nir_alu_instr
*vec
= create_vec(b
->shader
,
2299 glsl_get_matrix_columns(src
->type
),
2300 glsl_get_bit_size(src
->type
));
2301 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2302 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2303 vec
->src
[0].swizzle
[0] = i
;
2305 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2306 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2307 vec
->src
[j
].swizzle
[0] = i
;
2310 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2311 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2314 dest
->transposed
= src
;
2320 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2322 unsigned swiz
[4] = { index
};
2323 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2327 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2330 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
,
2333 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2335 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2337 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2338 vec
->src
[i
].swizzle
[0] = i
;
2342 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2344 return &vec
->dest
.dest
.ssa
;
2348 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2351 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2352 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2353 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2354 vtn_vector_extract(b
, src
, i
), dest
);
2360 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2361 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2363 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2364 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2365 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2366 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2371 static nir_ssa_def
*
2372 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2373 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2374 const uint32_t *indices
)
2376 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
, src0
->bit_size
);
2378 for (unsigned i
= 0; i
< num_components
; i
++) {
2379 uint32_t index
= indices
[i
];
2380 if (index
== 0xffffffff) {
2382 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2383 } else if (index
< src0
->num_components
) {
2384 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2385 vec
->src
[i
].swizzle
[0] = index
;
2387 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2388 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2392 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2394 return &vec
->dest
.dest
.ssa
;
2398 * Concatentates a number of vectors/scalars together to produce a vector
2400 static nir_ssa_def
*
2401 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2402 unsigned num_srcs
, nir_ssa_def
**srcs
)
2404 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
,
2407 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2409 * "When constructing a vector, there must be at least two Constituent
2412 assert(num_srcs
>= 2);
2414 unsigned dest_idx
= 0;
2415 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2416 nir_ssa_def
*src
= srcs
[i
];
2417 assert(dest_idx
+ src
->num_components
<= num_components
);
2418 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2419 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2420 vec
->src
[dest_idx
].swizzle
[0] = j
;
2425 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2427 * "When constructing a vector, the total number of components in all
2428 * the operands must equal the number of components in Result Type."
2430 assert(dest_idx
== num_components
);
2432 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2434 return &vec
->dest
.dest
.ssa
;
2437 static struct vtn_ssa_value
*
2438 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2440 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2441 dest
->type
= src
->type
;
2443 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2444 dest
->def
= src
->def
;
2446 unsigned elems
= glsl_get_length(src
->type
);
2448 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2449 for (unsigned i
= 0; i
< elems
; i
++)
2450 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2456 static struct vtn_ssa_value
*
2457 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2458 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2459 unsigned num_indices
)
2461 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2463 struct vtn_ssa_value
*cur
= dest
;
2465 for (i
= 0; i
< num_indices
- 1; i
++) {
2466 cur
= cur
->elems
[indices
[i
]];
2469 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2470 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2471 * the component granularity. In that case, the last index will be
2472 * the index to insert the scalar into the vector.
2475 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2477 cur
->elems
[indices
[i
]] = insert
;
2483 static struct vtn_ssa_value
*
2484 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2485 const uint32_t *indices
, unsigned num_indices
)
2487 struct vtn_ssa_value
*cur
= src
;
2488 for (unsigned i
= 0; i
< num_indices
; i
++) {
2489 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2490 assert(i
== num_indices
- 1);
2491 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2492 * the component granularity. The last index will be the index of the
2493 * vector to extract.
2496 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2497 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2498 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2501 cur
= cur
->elems
[indices
[i
]];
2509 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2510 const uint32_t *w
, unsigned count
)
2512 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2513 const struct glsl_type
*type
=
2514 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2515 val
->ssa
= vtn_create_ssa_value(b
, type
);
2518 case SpvOpVectorExtractDynamic
:
2519 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2520 vtn_ssa_value(b
, w
[4])->def
);
2523 case SpvOpVectorInsertDynamic
:
2524 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2525 vtn_ssa_value(b
, w
[4])->def
,
2526 vtn_ssa_value(b
, w
[5])->def
);
2529 case SpvOpVectorShuffle
:
2530 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2531 vtn_ssa_value(b
, w
[3])->def
,
2532 vtn_ssa_value(b
, w
[4])->def
,
2536 case SpvOpCompositeConstruct
: {
2537 unsigned elems
= count
- 3;
2538 if (glsl_type_is_vector_or_scalar(type
)) {
2539 nir_ssa_def
*srcs
[4];
2540 for (unsigned i
= 0; i
< elems
; i
++)
2541 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2543 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2546 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2547 for (unsigned i
= 0; i
< elems
; i
++)
2548 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2552 case SpvOpCompositeExtract
:
2553 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2557 case SpvOpCompositeInsert
:
2558 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2559 vtn_ssa_value(b
, w
[3]),
2563 case SpvOpCopyObject
:
2564 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2568 unreachable("unknown composite operation");
2573 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2574 const uint32_t *w
, unsigned count
)
2576 nir_intrinsic_op intrinsic_op
;
2578 case SpvOpEmitVertex
:
2579 case SpvOpEmitStreamVertex
:
2580 intrinsic_op
= nir_intrinsic_emit_vertex
;
2582 case SpvOpEndPrimitive
:
2583 case SpvOpEndStreamPrimitive
:
2584 intrinsic_op
= nir_intrinsic_end_primitive
;
2586 case SpvOpMemoryBarrier
:
2587 intrinsic_op
= nir_intrinsic_memory_barrier
;
2589 case SpvOpControlBarrier
:
2590 intrinsic_op
= nir_intrinsic_barrier
;
2593 unreachable("unknown barrier instruction");
2596 nir_intrinsic_instr
*intrin
=
2597 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2599 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2600 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2602 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2606 gl_primitive_from_spv_execution_mode(SpvExecutionMode mode
)
2609 case SpvExecutionModeInputPoints
:
2610 case SpvExecutionModeOutputPoints
:
2611 return 0; /* GL_POINTS */
2612 case SpvExecutionModeInputLines
:
2613 return 1; /* GL_LINES */
2614 case SpvExecutionModeInputLinesAdjacency
:
2615 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2616 case SpvExecutionModeTriangles
:
2617 return 4; /* GL_TRIANGLES */
2618 case SpvExecutionModeInputTrianglesAdjacency
:
2619 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2620 case SpvExecutionModeQuads
:
2621 return 7; /* GL_QUADS */
2622 case SpvExecutionModeIsolines
:
2623 return 0x8E7A; /* GL_ISOLINES */
2624 case SpvExecutionModeOutputLineStrip
:
2625 return 3; /* GL_LINE_STRIP */
2626 case SpvExecutionModeOutputTriangleStrip
:
2627 return 5; /* GL_TRIANGLE_STRIP */
2629 unreachable("Invalid primitive type");
2635 vertices_in_from_spv_execution_mode(SpvExecutionMode mode
)
2638 case SpvExecutionModeInputPoints
:
2640 case SpvExecutionModeInputLines
:
2642 case SpvExecutionModeInputLinesAdjacency
:
2644 case SpvExecutionModeTriangles
:
2646 case SpvExecutionModeInputTrianglesAdjacency
:
2649 unreachable("Invalid GS input mode");
2654 static gl_shader_stage
2655 stage_for_execution_model(SpvExecutionModel model
)
2658 case SpvExecutionModelVertex
:
2659 return MESA_SHADER_VERTEX
;
2660 case SpvExecutionModelTessellationControl
:
2661 return MESA_SHADER_TESS_CTRL
;
2662 case SpvExecutionModelTessellationEvaluation
:
2663 return MESA_SHADER_TESS_EVAL
;
2664 case SpvExecutionModelGeometry
:
2665 return MESA_SHADER_GEOMETRY
;
2666 case SpvExecutionModelFragment
:
2667 return MESA_SHADER_FRAGMENT
;
2668 case SpvExecutionModelGLCompute
:
2669 return MESA_SHADER_COMPUTE
;
2671 unreachable("Unsupported execution model");
2675 #define spv_check_supported(name, cap) do { \
2676 if (!(b->ext && b->ext->name)) \
2677 vtn_warn("Unsupported SPIR-V capability: %s", \
2678 spirv_capability_to_string(cap)); \
2682 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2683 const uint32_t *w
, unsigned count
)
2687 case SpvOpSourceExtension
:
2688 case SpvOpSourceContinued
:
2689 case SpvOpExtension
:
2690 /* Unhandled, but these are for debug so that's ok. */
2693 case SpvOpCapability
: {
2694 SpvCapability cap
= w
[1];
2696 case SpvCapabilityMatrix
:
2697 case SpvCapabilityShader
:
2698 case SpvCapabilityGeometry
:
2699 case SpvCapabilityGeometryPointSize
:
2700 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
2701 case SpvCapabilitySampledImageArrayDynamicIndexing
:
2702 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
2703 case SpvCapabilityStorageImageArrayDynamicIndexing
:
2704 case SpvCapabilityImageRect
:
2705 case SpvCapabilitySampledRect
:
2706 case SpvCapabilitySampled1D
:
2707 case SpvCapabilityImage1D
:
2708 case SpvCapabilitySampledCubeArray
:
2709 case SpvCapabilitySampledBuffer
:
2710 case SpvCapabilityImageBuffer
:
2711 case SpvCapabilityImageQuery
:
2712 case SpvCapabilityDerivativeControl
:
2713 case SpvCapabilityInterpolationFunction
:
2714 case SpvCapabilityMultiViewport
:
2715 case SpvCapabilitySampleRateShading
:
2716 case SpvCapabilityClipDistance
:
2717 case SpvCapabilityCullDistance
:
2718 case SpvCapabilityInputAttachment
:
2719 case SpvCapabilityImageGatherExtended
:
2720 case SpvCapabilityStorageImageExtendedFormats
:
2723 case SpvCapabilityGeometryStreams
:
2724 case SpvCapabilityLinkage
:
2725 case SpvCapabilityVector16
:
2726 case SpvCapabilityFloat16Buffer
:
2727 case SpvCapabilityFloat16
:
2728 case SpvCapabilityInt64Atomics
:
2729 case SpvCapabilityAtomicStorage
:
2730 case SpvCapabilityInt16
:
2731 case SpvCapabilityStorageImageMultisample
:
2732 case SpvCapabilityImageCubeArray
:
2733 case SpvCapabilityInt8
:
2734 case SpvCapabilitySparseResidency
:
2735 case SpvCapabilityMinLod
:
2736 case SpvCapabilityTransformFeedback
:
2737 vtn_warn("Unsupported SPIR-V capability: %s",
2738 spirv_capability_to_string(cap
));
2741 case SpvCapabilityFloat64
:
2742 spv_check_supported(float64
, cap
);
2744 case SpvCapabilityInt64
:
2745 spv_check_supported(int64
, cap
);
2748 case SpvCapabilityAddresses
:
2749 case SpvCapabilityKernel
:
2750 case SpvCapabilityImageBasic
:
2751 case SpvCapabilityImageReadWrite
:
2752 case SpvCapabilityImageMipmap
:
2753 case SpvCapabilityPipes
:
2754 case SpvCapabilityGroups
:
2755 case SpvCapabilityDeviceEnqueue
:
2756 case SpvCapabilityLiteralSampler
:
2757 case SpvCapabilityGenericPointer
:
2758 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
2759 spirv_capability_to_string(cap
));
2762 case SpvCapabilityImageMSArray
:
2763 spv_check_supported(image_ms_array
, cap
);
2766 case SpvCapabilityTessellation
:
2767 case SpvCapabilityTessellationPointSize
:
2768 spv_check_supported(tessellation
, cap
);
2771 case SpvCapabilityDrawParameters
:
2772 spv_check_supported(draw_parameters
, cap
);
2775 case SpvCapabilityStorageImageReadWithoutFormat
:
2776 spv_check_supported(image_read_without_format
, cap
);
2779 case SpvCapabilityStorageImageWriteWithoutFormat
:
2780 spv_check_supported(image_write_without_format
, cap
);
2783 case SpvCapabilityMultiView
:
2784 spv_check_supported(multiview
, cap
);
2787 case SpvCapabilityVariablePointersStorageBuffer
:
2788 case SpvCapabilityVariablePointers
:
2789 spv_check_supported(variable_pointers
, cap
);
2793 unreachable("Unhandled capability");
2798 case SpvOpExtInstImport
:
2799 vtn_handle_extension(b
, opcode
, w
, count
);
2802 case SpvOpMemoryModel
:
2803 assert(w
[1] == SpvAddressingModelLogical
);
2804 assert(w
[2] == SpvMemoryModelGLSL450
);
2807 case SpvOpEntryPoint
: {
2808 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
2809 /* Let this be a name label regardless */
2810 unsigned name_words
;
2811 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
2813 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
2814 stage_for_execution_model(w
[1]) != b
->entry_point_stage
)
2817 assert(b
->entry_point
== NULL
);
2818 b
->entry_point
= entry_point
;
2823 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2824 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2828 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2831 case SpvOpMemberName
:
2835 case SpvOpExecutionMode
:
2836 case SpvOpDecorationGroup
:
2838 case SpvOpMemberDecorate
:
2839 case SpvOpGroupDecorate
:
2840 case SpvOpGroupMemberDecorate
:
2841 vtn_handle_decoration(b
, opcode
, w
, count
);
2845 return false; /* End of preamble */
2852 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
2853 const struct vtn_decoration
*mode
, void *data
)
2855 assert(b
->entry_point
== entry_point
);
2857 switch(mode
->exec_mode
) {
2858 case SpvExecutionModeOriginUpperLeft
:
2859 case SpvExecutionModeOriginLowerLeft
:
2860 b
->origin_upper_left
=
2861 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
2864 case SpvExecutionModeEarlyFragmentTests
:
2865 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2866 b
->shader
->info
.fs
.early_fragment_tests
= true;
2869 case SpvExecutionModeInvocations
:
2870 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2871 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->literals
[0]);
2874 case SpvExecutionModeDepthReplacing
:
2875 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2876 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
2878 case SpvExecutionModeDepthGreater
:
2879 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2880 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
2882 case SpvExecutionModeDepthLess
:
2883 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2884 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
2886 case SpvExecutionModeDepthUnchanged
:
2887 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2888 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2891 case SpvExecutionModeLocalSize
:
2892 assert(b
->shader
->stage
== MESA_SHADER_COMPUTE
);
2893 b
->shader
->info
.cs
.local_size
[0] = mode
->literals
[0];
2894 b
->shader
->info
.cs
.local_size
[1] = mode
->literals
[1];
2895 b
->shader
->info
.cs
.local_size
[2] = mode
->literals
[2];
2897 case SpvExecutionModeLocalSizeHint
:
2898 break; /* Nothing to do with this */
2900 case SpvExecutionModeOutputVertices
:
2901 if (b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2902 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
) {
2903 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->literals
[0];
2905 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2906 b
->shader
->info
.gs
.vertices_out
= mode
->literals
[0];
2910 case SpvExecutionModeInputPoints
:
2911 case SpvExecutionModeInputLines
:
2912 case SpvExecutionModeInputLinesAdjacency
:
2913 case SpvExecutionModeTriangles
:
2914 case SpvExecutionModeInputTrianglesAdjacency
:
2915 case SpvExecutionModeQuads
:
2916 case SpvExecutionModeIsolines
:
2917 if (b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2918 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
) {
2919 b
->shader
->info
.tess
.primitive_mode
=
2920 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2922 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2923 b
->shader
->info
.gs
.vertices_in
=
2924 vertices_in_from_spv_execution_mode(mode
->exec_mode
);
2928 case SpvExecutionModeOutputPoints
:
2929 case SpvExecutionModeOutputLineStrip
:
2930 case SpvExecutionModeOutputTriangleStrip
:
2931 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2932 b
->shader
->info
.gs
.output_primitive
=
2933 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2936 case SpvExecutionModeSpacingEqual
:
2937 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2938 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2939 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
2941 case SpvExecutionModeSpacingFractionalEven
:
2942 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2943 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2944 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
2946 case SpvExecutionModeSpacingFractionalOdd
:
2947 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2948 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2949 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
2951 case SpvExecutionModeVertexOrderCw
:
2952 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2953 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2954 /* Vulkan's notion of CCW seems to match the hardware backends,
2955 * but be the opposite of OpenGL. Currently NIR follows GL semantics,
2956 * so we set it backwards here.
2958 b
->shader
->info
.tess
.ccw
= true;
2960 case SpvExecutionModeVertexOrderCcw
:
2961 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2962 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2963 /* Backwards; see above */
2964 b
->shader
->info
.tess
.ccw
= false;
2966 case SpvExecutionModePointMode
:
2967 assert(b
->shader
->stage
== MESA_SHADER_TESS_CTRL
||
2968 b
->shader
->stage
== MESA_SHADER_TESS_EVAL
);
2969 b
->shader
->info
.tess
.point_mode
= true;
2972 case SpvExecutionModePixelCenterInteger
:
2973 b
->pixel_center_integer
= true;
2976 case SpvExecutionModeXfb
:
2977 unreachable("Unhandled execution mode");
2980 case SpvExecutionModeVecTypeHint
:
2981 case SpvExecutionModeContractionOff
:
2985 unreachable("Unhandled execution mode");
2990 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2991 const uint32_t *w
, unsigned count
)
2995 case SpvOpSourceContinued
:
2996 case SpvOpSourceExtension
:
2997 case SpvOpExtension
:
2998 case SpvOpCapability
:
2999 case SpvOpExtInstImport
:
3000 case SpvOpMemoryModel
:
3001 case SpvOpEntryPoint
:
3002 case SpvOpExecutionMode
:
3005 case SpvOpMemberName
:
3006 case SpvOpDecorationGroup
:
3008 case SpvOpMemberDecorate
:
3009 case SpvOpGroupDecorate
:
3010 case SpvOpGroupMemberDecorate
:
3011 unreachable("Invalid opcode types and variables section");
3017 case SpvOpTypeFloat
:
3018 case SpvOpTypeVector
:
3019 case SpvOpTypeMatrix
:
3020 case SpvOpTypeImage
:
3021 case SpvOpTypeSampler
:
3022 case SpvOpTypeSampledImage
:
3023 case SpvOpTypeArray
:
3024 case SpvOpTypeRuntimeArray
:
3025 case SpvOpTypeStruct
:
3026 case SpvOpTypeOpaque
:
3027 case SpvOpTypePointer
:
3028 case SpvOpTypeFunction
:
3029 case SpvOpTypeEvent
:
3030 case SpvOpTypeDeviceEvent
:
3031 case SpvOpTypeReserveId
:
3032 case SpvOpTypeQueue
:
3034 vtn_handle_type(b
, opcode
, w
, count
);
3037 case SpvOpConstantTrue
:
3038 case SpvOpConstantFalse
:
3040 case SpvOpConstantComposite
:
3041 case SpvOpConstantSampler
:
3042 case SpvOpConstantNull
:
3043 case SpvOpSpecConstantTrue
:
3044 case SpvOpSpecConstantFalse
:
3045 case SpvOpSpecConstant
:
3046 case SpvOpSpecConstantComposite
:
3047 case SpvOpSpecConstantOp
:
3048 vtn_handle_constant(b
, opcode
, w
, count
);
3053 vtn_handle_variables(b
, opcode
, w
, count
);
3057 return false; /* End of preamble */
3064 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3065 const uint32_t *w
, unsigned count
)
3071 case SpvOpLoopMerge
:
3072 case SpvOpSelectionMerge
:
3073 /* This is handled by cfg pre-pass and walk_blocks */
3077 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
3078 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3083 vtn_handle_extension(b
, opcode
, w
, count
);
3089 case SpvOpCopyMemory
:
3090 case SpvOpCopyMemorySized
:
3091 case SpvOpAccessChain
:
3092 case SpvOpPtrAccessChain
:
3093 case SpvOpInBoundsAccessChain
:
3094 case SpvOpArrayLength
:
3095 vtn_handle_variables(b
, opcode
, w
, count
);
3098 case SpvOpFunctionCall
:
3099 vtn_handle_function_call(b
, opcode
, w
, count
);
3102 case SpvOpSampledImage
:
3104 case SpvOpImageSampleImplicitLod
:
3105 case SpvOpImageSampleExplicitLod
:
3106 case SpvOpImageSampleDrefImplicitLod
:
3107 case SpvOpImageSampleDrefExplicitLod
:
3108 case SpvOpImageSampleProjImplicitLod
:
3109 case SpvOpImageSampleProjExplicitLod
:
3110 case SpvOpImageSampleProjDrefImplicitLod
:
3111 case SpvOpImageSampleProjDrefExplicitLod
:
3112 case SpvOpImageFetch
:
3113 case SpvOpImageGather
:
3114 case SpvOpImageDrefGather
:
3115 case SpvOpImageQuerySizeLod
:
3116 case SpvOpImageQueryLod
:
3117 case SpvOpImageQueryLevels
:
3118 case SpvOpImageQuerySamples
:
3119 vtn_handle_texture(b
, opcode
, w
, count
);
3122 case SpvOpImageRead
:
3123 case SpvOpImageWrite
:
3124 case SpvOpImageTexelPointer
:
3125 vtn_handle_image(b
, opcode
, w
, count
);
3128 case SpvOpImageQuerySize
: {
3129 struct vtn_pointer
*image
=
3130 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3131 if (image
->mode
== vtn_variable_mode_image
) {
3132 vtn_handle_image(b
, opcode
, w
, count
);
3134 assert(image
->mode
== vtn_variable_mode_sampler
);
3135 vtn_handle_texture(b
, opcode
, w
, count
);
3140 case SpvOpAtomicLoad
:
3141 case SpvOpAtomicExchange
:
3142 case SpvOpAtomicCompareExchange
:
3143 case SpvOpAtomicCompareExchangeWeak
:
3144 case SpvOpAtomicIIncrement
:
3145 case SpvOpAtomicIDecrement
:
3146 case SpvOpAtomicIAdd
:
3147 case SpvOpAtomicISub
:
3148 case SpvOpAtomicSMin
:
3149 case SpvOpAtomicUMin
:
3150 case SpvOpAtomicSMax
:
3151 case SpvOpAtomicUMax
:
3152 case SpvOpAtomicAnd
:
3154 case SpvOpAtomicXor
: {
3155 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
3156 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3157 vtn_handle_image(b
, opcode
, w
, count
);
3159 assert(pointer
->value_type
== vtn_value_type_pointer
);
3160 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3165 case SpvOpAtomicStore
: {
3166 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
3167 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
3168 vtn_handle_image(b
, opcode
, w
, count
);
3170 assert(pointer
->value_type
== vtn_value_type_pointer
);
3171 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
3177 /* Handle OpSelect up-front here because it needs to be able to handle
3178 * pointers and not just regular vectors and scalars.
3180 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
3181 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, res_type
->type
);
3182 ssa
->def
= nir_bcsel(&b
->nb
, vtn_ssa_value(b
, w
[3])->def
,
3183 vtn_ssa_value(b
, w
[4])->def
,
3184 vtn_ssa_value(b
, w
[5])->def
);
3185 vtn_push_ssa(b
, w
[2], res_type
, ssa
);
3194 case SpvOpConvertFToU
:
3195 case SpvOpConvertFToS
:
3196 case SpvOpConvertSToF
:
3197 case SpvOpConvertUToF
:
3201 case SpvOpQuantizeToF16
:
3202 case SpvOpConvertPtrToU
:
3203 case SpvOpConvertUToPtr
:
3204 case SpvOpPtrCastToGeneric
:
3205 case SpvOpGenericCastToPtr
:
3211 case SpvOpSignBitSet
:
3212 case SpvOpLessOrGreater
:
3214 case SpvOpUnordered
:
3229 case SpvOpVectorTimesScalar
:
3231 case SpvOpIAddCarry
:
3232 case SpvOpISubBorrow
:
3233 case SpvOpUMulExtended
:
3234 case SpvOpSMulExtended
:
3235 case SpvOpShiftRightLogical
:
3236 case SpvOpShiftRightArithmetic
:
3237 case SpvOpShiftLeftLogical
:
3238 case SpvOpLogicalEqual
:
3239 case SpvOpLogicalNotEqual
:
3240 case SpvOpLogicalOr
:
3241 case SpvOpLogicalAnd
:
3242 case SpvOpLogicalNot
:
3243 case SpvOpBitwiseOr
:
3244 case SpvOpBitwiseXor
:
3245 case SpvOpBitwiseAnd
:
3247 case SpvOpFOrdEqual
:
3248 case SpvOpFUnordEqual
:
3249 case SpvOpINotEqual
:
3250 case SpvOpFOrdNotEqual
:
3251 case SpvOpFUnordNotEqual
:
3252 case SpvOpULessThan
:
3253 case SpvOpSLessThan
:
3254 case SpvOpFOrdLessThan
:
3255 case SpvOpFUnordLessThan
:
3256 case SpvOpUGreaterThan
:
3257 case SpvOpSGreaterThan
:
3258 case SpvOpFOrdGreaterThan
:
3259 case SpvOpFUnordGreaterThan
:
3260 case SpvOpULessThanEqual
:
3261 case SpvOpSLessThanEqual
:
3262 case SpvOpFOrdLessThanEqual
:
3263 case SpvOpFUnordLessThanEqual
:
3264 case SpvOpUGreaterThanEqual
:
3265 case SpvOpSGreaterThanEqual
:
3266 case SpvOpFOrdGreaterThanEqual
:
3267 case SpvOpFUnordGreaterThanEqual
:
3273 case SpvOpFwidthFine
:
3274 case SpvOpDPdxCoarse
:
3275 case SpvOpDPdyCoarse
:
3276 case SpvOpFwidthCoarse
:
3277 case SpvOpBitFieldInsert
:
3278 case SpvOpBitFieldSExtract
:
3279 case SpvOpBitFieldUExtract
:
3280 case SpvOpBitReverse
:
3282 case SpvOpTranspose
:
3283 case SpvOpOuterProduct
:
3284 case SpvOpMatrixTimesScalar
:
3285 case SpvOpVectorTimesMatrix
:
3286 case SpvOpMatrixTimesVector
:
3287 case SpvOpMatrixTimesMatrix
:
3288 vtn_handle_alu(b
, opcode
, w
, count
);
3291 case SpvOpVectorExtractDynamic
:
3292 case SpvOpVectorInsertDynamic
:
3293 case SpvOpVectorShuffle
:
3294 case SpvOpCompositeConstruct
:
3295 case SpvOpCompositeExtract
:
3296 case SpvOpCompositeInsert
:
3297 case SpvOpCopyObject
:
3298 vtn_handle_composite(b
, opcode
, w
, count
);
3301 case SpvOpEmitVertex
:
3302 case SpvOpEndPrimitive
:
3303 case SpvOpEmitStreamVertex
:
3304 case SpvOpEndStreamPrimitive
:
3305 case SpvOpControlBarrier
:
3306 case SpvOpMemoryBarrier
:
3307 vtn_handle_barrier(b
, opcode
, w
, count
);
3311 unreachable("Unhandled opcode");
3318 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3319 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3320 gl_shader_stage stage
, const char *entry_point_name
,
3321 const struct nir_spirv_supported_extensions
*ext
,
3322 const nir_shader_compiler_options
*options
)
3324 const uint32_t *word_end
= words
+ word_count
;
3326 /* Handle the SPIR-V header (first 4 dwords) */
3327 assert(word_count
> 5);
3329 assert(words
[0] == SpvMagicNumber
);
3330 assert(words
[1] >= 0x10000);
3331 /* words[2] == generator magic */
3332 unsigned value_id_bound
= words
[3];
3333 assert(words
[4] == 0);
3337 /* Initialize the stn_builder object */
3338 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3339 b
->value_id_bound
= value_id_bound
;
3340 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3341 exec_list_make_empty(&b
->functions
);
3342 b
->entry_point_stage
= stage
;
3343 b
->entry_point_name
= entry_point_name
;
3346 /* Handle all the preamble instructions */
3347 words
= vtn_foreach_instruction(b
, words
, word_end
,
3348 vtn_handle_preamble_instruction
);
3350 if (b
->entry_point
== NULL
) {
3351 assert(!"Entry point not found");
3356 b
->shader
= nir_shader_create(NULL
, stage
, options
, NULL
);
3358 /* Set shader info defaults */
3359 b
->shader
->info
.gs
.invocations
= 1;
3361 /* Parse execution modes */
3362 vtn_foreach_execution_mode(b
, b
->entry_point
,
3363 vtn_handle_execution_mode
, NULL
);
3365 b
->specializations
= spec
;
3366 b
->num_specializations
= num_spec
;
3368 /* Handle all variable, type, and constant instructions */
3369 words
= vtn_foreach_instruction(b
, words
, word_end
,
3370 vtn_handle_variable_or_type_instruction
);
3372 vtn_build_cfg(b
, words
, word_end
);
3374 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3375 b
->impl
= func
->impl
;
3376 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3377 _mesa_key_pointer_equal
);
3379 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3382 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3383 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3384 assert(entry_point
);