2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
35 _vtn_warn(const char *file
, int line
, const char *msg
, ...)
41 formatted
= ralloc_vasprintf(NULL
, msg
, args
);
44 fprintf(stderr
, "%s:%d WARNING: %s\n", file
, line
, formatted
);
46 ralloc_free(formatted
);
49 static struct vtn_ssa_value
*
50 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
52 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
55 if (glsl_type_is_vector_or_scalar(type
)) {
56 unsigned num_components
= glsl_get_vector_elements(val
->type
);
57 unsigned bit_size
= glsl_get_bit_size(val
->type
);
58 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
60 unsigned elems
= glsl_get_length(val
->type
);
61 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
62 if (glsl_type_is_matrix(type
)) {
63 const struct glsl_type
*elem_type
=
64 glsl_vector_type(glsl_get_base_type(type
),
65 glsl_get_vector_elements(type
));
67 for (unsigned i
= 0; i
< elems
; i
++)
68 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
69 } else if (glsl_type_is_array(type
)) {
70 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
71 for (unsigned i
= 0; i
< elems
; i
++)
72 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
74 for (unsigned i
= 0; i
< elems
; i
++) {
75 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
76 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
84 static struct vtn_ssa_value
*
85 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
86 const struct glsl_type
*type
)
88 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
93 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
96 switch (glsl_get_base_type(type
)) {
100 case GLSL_TYPE_FLOAT
:
101 case GLSL_TYPE_DOUBLE
:
102 if (glsl_type_is_vector_or_scalar(type
)) {
103 unsigned num_components
= glsl_get_vector_elements(val
->type
);
104 nir_load_const_instr
*load
=
105 nir_load_const_instr_create(b
->shader
, num_components
, 32);
107 load
->value
= constant
->values
[0];
109 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
110 val
->def
= &load
->def
;
112 assert(glsl_type_is_matrix(type
));
113 unsigned rows
= glsl_get_vector_elements(val
->type
);
114 unsigned columns
= glsl_get_matrix_columns(val
->type
);
115 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
117 for (unsigned i
= 0; i
< columns
; i
++) {
118 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
119 col_val
->type
= glsl_get_column_type(val
->type
);
120 nir_load_const_instr
*load
=
121 nir_load_const_instr_create(b
->shader
, rows
, 32);
123 load
->value
= constant
->values
[i
];
125 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
126 col_val
->def
= &load
->def
;
128 val
->elems
[i
] = col_val
;
133 case GLSL_TYPE_ARRAY
: {
134 unsigned elems
= glsl_get_length(val
->type
);
135 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
136 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
137 for (unsigned i
= 0; i
< elems
; i
++)
138 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
143 case GLSL_TYPE_STRUCT
: {
144 unsigned elems
= glsl_get_length(val
->type
);
145 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
146 for (unsigned i
= 0; i
< elems
; i
++) {
147 const struct glsl_type
*elem_type
=
148 glsl_get_struct_field(val
->type
, i
);
149 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
156 unreachable("bad constant type");
162 struct vtn_ssa_value
*
163 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
165 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
166 switch (val
->value_type
) {
167 case vtn_value_type_undef
:
168 return vtn_undef_ssa_value(b
, val
->type
->type
);
170 case vtn_value_type_constant
:
171 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
173 case vtn_value_type_ssa
:
176 case vtn_value_type_access_chain
:
177 /* This is needed for function parameters */
178 return vtn_variable_load(b
, val
->access_chain
);
181 unreachable("Invalid type for an SSA value");
186 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
187 unsigned word_count
, unsigned *words_used
)
189 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
191 /* Ammount of space taken by the string (including the null) */
192 unsigned len
= strlen(dup
) + 1;
193 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
199 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
200 const uint32_t *end
, vtn_instruction_handler handler
)
206 const uint32_t *w
= start
;
208 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
209 unsigned count
= w
[0] >> SpvWordCountShift
;
210 assert(count
>= 1 && w
+ count
<= end
);
214 break; /* Do nothing */
217 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
229 if (!handler(b
, opcode
, w
, count
))
241 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
242 const uint32_t *w
, unsigned count
)
245 case SpvOpExtInstImport
: {
246 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
247 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
248 val
->ext_handler
= vtn_handle_glsl450_instruction
;
250 assert(!"Unsupported extension");
256 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
257 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
264 unreachable("Unhandled opcode");
269 _foreach_decoration_helper(struct vtn_builder
*b
,
270 struct vtn_value
*base_value
,
272 struct vtn_value
*value
,
273 vtn_decoration_foreach_cb cb
, void *data
)
275 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
277 if (dec
->scope
== VTN_DEC_DECORATION
) {
278 member
= parent_member
;
279 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
280 assert(parent_member
== -1);
281 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
283 /* Not a decoration */
288 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
289 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
292 cb(b
, base_value
, member
, dec
, data
);
297 /** Iterates (recursively if needed) over all of the decorations on a value
299 * This function iterates over all of the decorations applied to a given
300 * value. If it encounters a decoration group, it recurses into the group
301 * and iterates over all of those decorations as well.
304 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
305 vtn_decoration_foreach_cb cb
, void *data
)
307 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
311 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
312 vtn_execution_mode_foreach_cb cb
, void *data
)
314 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
315 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
318 assert(dec
->group
== NULL
);
319 cb(b
, value
, dec
, data
);
324 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
325 const uint32_t *w
, unsigned count
)
327 const uint32_t *w_end
= w
+ count
;
328 const uint32_t target
= w
[1];
332 case SpvOpDecorationGroup
:
333 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
337 case SpvOpMemberDecorate
:
338 case SpvOpExecutionMode
: {
339 struct vtn_value
*val
= &b
->values
[target
];
341 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
344 dec
->scope
= VTN_DEC_DECORATION
;
346 case SpvOpMemberDecorate
:
347 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
349 case SpvOpExecutionMode
:
350 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
353 unreachable("Invalid decoration opcode");
355 dec
->decoration
= *(w
++);
358 /* Link into the list */
359 dec
->next
= val
->decoration
;
360 val
->decoration
= dec
;
364 case SpvOpGroupMemberDecorate
:
365 case SpvOpGroupDecorate
: {
366 struct vtn_value
*group
=
367 vtn_value(b
, target
, vtn_value_type_decoration_group
);
369 for (; w
< w_end
; w
++) {
370 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
371 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
374 if (opcode
== SpvOpGroupDecorate
) {
375 dec
->scope
= VTN_DEC_DECORATION
;
377 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
380 /* Link into the list */
381 dec
->next
= val
->decoration
;
382 val
->decoration
= dec
;
388 unreachable("Unhandled opcode");
392 struct member_decoration_ctx
{
394 struct glsl_struct_field
*fields
;
395 struct vtn_type
*type
;
398 /* does a shallow copy of a vtn_type */
400 static struct vtn_type
*
401 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
403 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
404 dest
->type
= src
->type
;
405 dest
->is_builtin
= src
->is_builtin
;
407 dest
->builtin
= src
->builtin
;
409 if (!glsl_type_is_scalar(src
->type
)) {
410 switch (glsl_get_base_type(src
->type
)) {
414 case GLSL_TYPE_FLOAT
:
415 case GLSL_TYPE_DOUBLE
:
416 case GLSL_TYPE_ARRAY
:
417 dest
->row_major
= src
->row_major
;
418 dest
->stride
= src
->stride
;
419 dest
->array_element
= src
->array_element
;
422 case GLSL_TYPE_STRUCT
: {
423 unsigned elems
= glsl_get_length(src
->type
);
425 dest
->members
= ralloc_array(b
, struct vtn_type
*, elems
);
426 memcpy(dest
->members
, src
->members
, elems
* sizeof(struct vtn_type
*));
428 dest
->offsets
= ralloc_array(b
, unsigned, elems
);
429 memcpy(dest
->offsets
, src
->offsets
, elems
* sizeof(unsigned));
434 unreachable("unhandled type");
441 static struct vtn_type
*
442 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
444 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
445 type
= type
->members
[member
];
447 /* We may have an array of matrices.... Oh, joy! */
448 while (glsl_type_is_array(type
->type
)) {
449 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
450 type
= type
->array_element
;
453 assert(glsl_type_is_matrix(type
->type
));
459 struct_member_decoration_cb(struct vtn_builder
*b
,
460 struct vtn_value
*val
, int member
,
461 const struct vtn_decoration
*dec
, void *void_ctx
)
463 struct member_decoration_ctx
*ctx
= void_ctx
;
468 assert(member
< ctx
->num_fields
);
470 switch (dec
->decoration
) {
471 case SpvDecorationNonWritable
:
472 case SpvDecorationNonReadable
:
473 case SpvDecorationRelaxedPrecision
:
474 case SpvDecorationVolatile
:
475 case SpvDecorationCoherent
:
476 case SpvDecorationUniform
:
477 break; /* FIXME: Do nothing with this for now. */
478 case SpvDecorationNoPerspective
:
479 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
481 case SpvDecorationFlat
:
482 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
484 case SpvDecorationCentroid
:
485 ctx
->fields
[member
].centroid
= true;
487 case SpvDecorationSample
:
488 ctx
->fields
[member
].sample
= true;
490 case SpvDecorationStream
:
491 /* Vulkan only allows one GS stream */
492 assert(dec
->literals
[0] == 0);
494 case SpvDecorationLocation
:
495 ctx
->fields
[member
].location
= dec
->literals
[0];
497 case SpvDecorationComponent
:
498 break; /* FIXME: What should we do with these? */
499 case SpvDecorationBuiltIn
:
500 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
501 ctx
->type
->members
[member
]->is_builtin
= true;
502 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
503 ctx
->type
->builtin_block
= true;
505 case SpvDecorationOffset
:
506 ctx
->type
->offsets
[member
] = dec
->literals
[0];
508 case SpvDecorationMatrixStride
:
509 mutable_matrix_member(b
, ctx
->type
, member
)->stride
= dec
->literals
[0];
511 case SpvDecorationColMajor
:
512 break; /* Nothing to do here. Column-major is the default. */
513 case SpvDecorationRowMajor
:
514 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
517 case SpvDecorationPatch
:
518 vtn_warn("Tessellation not yet supported");
521 case SpvDecorationSpecId
:
522 case SpvDecorationBlock
:
523 case SpvDecorationBufferBlock
:
524 case SpvDecorationArrayStride
:
525 case SpvDecorationGLSLShared
:
526 case SpvDecorationGLSLPacked
:
527 case SpvDecorationInvariant
:
528 case SpvDecorationRestrict
:
529 case SpvDecorationAliased
:
530 case SpvDecorationConstant
:
531 case SpvDecorationIndex
:
532 case SpvDecorationBinding
:
533 case SpvDecorationDescriptorSet
:
534 case SpvDecorationLinkageAttributes
:
535 case SpvDecorationNoContraction
:
536 case SpvDecorationInputAttachmentIndex
:
537 vtn_warn("Decoration not allowed on struct members: %s",
538 spirv_decoration_to_string(dec
->decoration
));
541 case SpvDecorationXfbBuffer
:
542 case SpvDecorationXfbStride
:
543 vtn_warn("Vulkan does not have transform feedback");
546 case SpvDecorationCPacked
:
547 case SpvDecorationSaturatedConversion
:
548 case SpvDecorationFuncParamAttr
:
549 case SpvDecorationFPRoundingMode
:
550 case SpvDecorationFPFastMathMode
:
551 case SpvDecorationAlignment
:
552 vtn_warn("Decoraiton only allowed for CL-style kernels: %s",
553 spirv_decoration_to_string(dec
->decoration
));
559 type_decoration_cb(struct vtn_builder
*b
,
560 struct vtn_value
*val
, int member
,
561 const struct vtn_decoration
*dec
, void *ctx
)
563 struct vtn_type
*type
= val
->type
;
568 switch (dec
->decoration
) {
569 case SpvDecorationArrayStride
:
570 type
->stride
= dec
->literals
[0];
572 case SpvDecorationBlock
:
575 case SpvDecorationBufferBlock
:
576 type
->buffer_block
= true;
578 case SpvDecorationGLSLShared
:
579 case SpvDecorationGLSLPacked
:
580 /* Ignore these, since we get explicit offsets anyways */
583 case SpvDecorationRowMajor
:
584 case SpvDecorationColMajor
:
585 case SpvDecorationMatrixStride
:
586 case SpvDecorationBuiltIn
:
587 case SpvDecorationNoPerspective
:
588 case SpvDecorationFlat
:
589 case SpvDecorationPatch
:
590 case SpvDecorationCentroid
:
591 case SpvDecorationSample
:
592 case SpvDecorationVolatile
:
593 case SpvDecorationCoherent
:
594 case SpvDecorationNonWritable
:
595 case SpvDecorationNonReadable
:
596 case SpvDecorationUniform
:
597 case SpvDecorationStream
:
598 case SpvDecorationLocation
:
599 case SpvDecorationComponent
:
600 case SpvDecorationOffset
:
601 case SpvDecorationXfbBuffer
:
602 case SpvDecorationXfbStride
:
603 vtn_warn("Decoraiton only allowed for struct members: %s",
604 spirv_decoration_to_string(dec
->decoration
));
607 case SpvDecorationRelaxedPrecision
:
608 case SpvDecorationSpecId
:
609 case SpvDecorationInvariant
:
610 case SpvDecorationRestrict
:
611 case SpvDecorationAliased
:
612 case SpvDecorationConstant
:
613 case SpvDecorationIndex
:
614 case SpvDecorationBinding
:
615 case SpvDecorationDescriptorSet
:
616 case SpvDecorationLinkageAttributes
:
617 case SpvDecorationNoContraction
:
618 case SpvDecorationInputAttachmentIndex
:
619 vtn_warn("Decoraiton not allowed on types: %s",
620 spirv_decoration_to_string(dec
->decoration
));
623 case SpvDecorationCPacked
:
624 case SpvDecorationSaturatedConversion
:
625 case SpvDecorationFuncParamAttr
:
626 case SpvDecorationFPRoundingMode
:
627 case SpvDecorationFPFastMathMode
:
628 case SpvDecorationAlignment
:
629 vtn_warn("Decoraiton only allowed for CL-style kernels: %s",
630 spirv_decoration_to_string(dec
->decoration
));
636 translate_image_format(SpvImageFormat format
)
639 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
640 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
641 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
642 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
643 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
644 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
645 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
646 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
647 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
648 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
649 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
650 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
651 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
652 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
653 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
654 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
655 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
656 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
657 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
658 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
659 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
660 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
661 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
662 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
663 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
664 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
665 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
666 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
667 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
668 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
669 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
670 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
671 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
672 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
673 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
674 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
675 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
676 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
677 case SpvImageFormatR16ui
: return 0x823A; /* GL_RG16UI */
678 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
680 assert(!"Invalid image format");
686 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
687 const uint32_t *w
, unsigned count
)
689 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
691 val
->type
= rzalloc(b
, struct vtn_type
);
692 val
->type
->is_builtin
= false;
693 val
->type
->val
= val
;
697 val
->type
->type
= glsl_void_type();
700 val
->type
->type
= glsl_bool_type();
703 const bool signedness
= w
[3];
704 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
708 val
->type
->type
= glsl_float_type();
711 case SpvOpTypeVector
: {
712 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
713 unsigned elems
= w
[3];
715 assert(glsl_type_is_scalar(base
->type
));
716 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
718 /* Vectors implicitly have sizeof(base_type) stride. For now, this
719 * is always 4 bytes. This will have to change if we want to start
720 * supporting doubles or half-floats.
722 val
->type
->stride
= 4;
723 val
->type
->array_element
= base
;
727 case SpvOpTypeMatrix
: {
728 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
729 unsigned columns
= w
[3];
731 assert(glsl_type_is_vector(base
->type
));
732 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
733 glsl_get_vector_elements(base
->type
),
735 assert(!glsl_type_is_error(val
->type
->type
));
736 val
->type
->array_element
= base
;
737 val
->type
->row_major
= false;
738 val
->type
->stride
= 0;
742 case SpvOpTypeRuntimeArray
:
743 case SpvOpTypeArray
: {
744 struct vtn_type
*array_element
=
745 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
748 if (opcode
== SpvOpTypeRuntimeArray
) {
749 /* A length of 0 is used to denote unsized arrays */
753 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
756 val
->type
->type
= glsl_array_type(array_element
->type
, length
);
757 val
->type
->array_element
= array_element
;
758 val
->type
->stride
= 0;
762 case SpvOpTypeStruct
: {
763 unsigned num_fields
= count
- 2;
764 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
765 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
767 NIR_VLA(struct glsl_struct_field
, fields
, count
);
768 for (unsigned i
= 0; i
< num_fields
; i
++) {
769 val
->type
->members
[i
] =
770 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
771 fields
[i
] = (struct glsl_struct_field
) {
772 .type
= val
->type
->members
[i
]->type
,
773 .name
= ralloc_asprintf(b
, "field%d", i
),
778 struct member_decoration_ctx ctx
= {
779 .num_fields
= num_fields
,
784 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
786 const char *name
= val
->name
? val
->name
: "struct";
788 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
792 case SpvOpTypeFunction
: {
793 const struct glsl_type
*return_type
=
794 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
795 NIR_VLA(struct glsl_function_param
, params
, count
- 3);
796 for (unsigned i
= 0; i
< count
- 3; i
++) {
797 params
[i
].type
= vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
->type
;
801 params
[i
].out
= true;
803 val
->type
->type
= glsl_function_type(return_type
, params
, count
- 3);
807 case SpvOpTypePointer
:
808 /* FIXME: For now, we'll just do the really lame thing and return
809 * the same type. The validator should ensure that the proper number
810 * of dereferences happen
812 val
->type
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
815 case SpvOpTypeImage
: {
816 const struct glsl_type
*sampled_type
=
817 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
819 assert(glsl_type_is_vector_or_scalar(sampled_type
));
821 enum glsl_sampler_dim dim
;
822 switch ((SpvDim
)w
[3]) {
823 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
824 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
825 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
826 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
827 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
828 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
829 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
831 unreachable("Invalid SPIR-V Sampler dimension");
834 bool is_shadow
= w
[4];
835 bool is_array
= w
[5];
836 bool multisampled
= w
[6];
837 unsigned sampled
= w
[7];
838 SpvImageFormat format
= w
[8];
841 val
->type
->access_qualifier
= w
[9];
843 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
846 assert(dim
== GLSL_SAMPLER_DIM_2D
);
847 dim
= GLSL_SAMPLER_DIM_MS
;
850 val
->type
->image_format
= translate_image_format(format
);
853 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
854 glsl_get_base_type(sampled_type
));
855 } else if (sampled
== 2) {
856 assert((dim
== GLSL_SAMPLER_DIM_SUBPASS
) || format
);
858 val
->type
->type
= glsl_image_type(dim
, is_array
,
859 glsl_get_base_type(sampled_type
));
861 assert(!"We need to know if the image will be sampled");
866 case SpvOpTypeSampledImage
:
867 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
870 case SpvOpTypeSampler
:
871 /* The actual sampler type here doesn't really matter. It gets
872 * thrown away the moment you combine it with an image. What really
873 * matters is that it's a sampler type as opposed to an integer type
874 * so the backend knows what to do.
876 val
->type
->type
= glsl_bare_sampler_type();
879 case SpvOpTypeOpaque
:
881 case SpvOpTypeDeviceEvent
:
882 case SpvOpTypeReserveId
:
886 unreachable("Unhandled opcode");
889 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
892 static nir_constant
*
893 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
895 nir_constant
*c
= rzalloc(b
, nir_constant
);
897 switch (glsl_get_base_type(type
)) {
901 case GLSL_TYPE_FLOAT
:
902 case GLSL_TYPE_DOUBLE
:
903 /* Nothing to do here. It's already initialized to zero */
906 case GLSL_TYPE_ARRAY
:
907 assert(glsl_get_length(type
) > 0);
908 c
->num_elements
= glsl_get_length(type
);
909 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
911 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
912 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
913 c
->elements
[i
] = c
->elements
[0];
916 case GLSL_TYPE_STRUCT
:
917 c
->num_elements
= glsl_get_length(type
);
918 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
920 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
921 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
926 unreachable("Invalid type for null constant");
933 spec_constant_deocoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
934 int member
, const struct vtn_decoration
*dec
,
937 assert(member
== -1);
938 if (dec
->decoration
!= SpvDecorationSpecId
)
941 uint32_t *const_value
= data
;
943 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
944 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
945 *const_value
= b
->specializations
[i
].data
;
952 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
953 uint32_t const_value
)
955 vtn_foreach_decoration(b
, val
, spec_constant_deocoration_cb
, &const_value
);
960 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
961 struct vtn_value
*val
,
963 const struct vtn_decoration
*dec
,
966 assert(member
== -1);
967 if (dec
->decoration
!= SpvDecorationBuiltIn
||
968 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
971 assert(val
->const_type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
973 b
->shader
->info
->cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
974 b
->shader
->info
->cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
975 b
->shader
->info
->cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
979 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
980 const uint32_t *w
, unsigned count
)
982 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
983 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
984 val
->constant
= rzalloc(b
, nir_constant
);
986 case SpvOpConstantTrue
:
987 assert(val
->const_type
== glsl_bool_type());
988 val
->constant
->values
[0].u32
[0] = NIR_TRUE
;
990 case SpvOpConstantFalse
:
991 assert(val
->const_type
== glsl_bool_type());
992 val
->constant
->values
[0].u32
[0] = NIR_FALSE
;
995 case SpvOpSpecConstantTrue
:
996 case SpvOpSpecConstantFalse
: {
997 assert(val
->const_type
== glsl_bool_type());
999 get_specialization(b
, val
, (opcode
== SpvOpSpecConstantTrue
));
1000 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1005 assert(glsl_type_is_scalar(val
->const_type
));
1006 val
->constant
->values
[0].u32
[0] = w
[3];
1008 case SpvOpSpecConstant
:
1009 assert(glsl_type_is_scalar(val
->const_type
));
1010 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1012 case SpvOpSpecConstantComposite
:
1013 case SpvOpConstantComposite
: {
1014 unsigned elem_count
= count
- 3;
1015 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1016 for (unsigned i
= 0; i
< elem_count
; i
++)
1017 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1019 switch (glsl_get_base_type(val
->const_type
)) {
1020 case GLSL_TYPE_UINT
:
1022 case GLSL_TYPE_FLOAT
:
1023 case GLSL_TYPE_BOOL
:
1024 if (glsl_type_is_matrix(val
->const_type
)) {
1025 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
1026 for (unsigned i
= 0; i
< elem_count
; i
++)
1027 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1029 assert(glsl_type_is_vector(val
->const_type
));
1030 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
1031 for (unsigned i
= 0; i
< elem_count
; i
++)
1032 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1037 case GLSL_TYPE_STRUCT
:
1038 case GLSL_TYPE_ARRAY
:
1039 ralloc_steal(val
->constant
, elems
);
1040 val
->constant
->num_elements
= elem_count
;
1041 val
->constant
->elements
= elems
;
1045 unreachable("Unsupported type for constants");
1050 case SpvOpSpecConstantOp
: {
1051 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1053 case SpvOpVectorShuffle
: {
1054 struct vtn_value
*v0
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1055 struct vtn_value
*v1
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1056 unsigned len0
= glsl_get_vector_elements(v0
->const_type
);
1057 unsigned len1
= glsl_get_vector_elements(v1
->const_type
);
1060 for (unsigned i
= 0; i
< len0
; i
++)
1061 u
[i
] = v0
->constant
->values
[0].u32
[i
];
1062 for (unsigned i
= 0; i
< len1
; i
++)
1063 u
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1065 for (unsigned i
= 0; i
< count
- 6; i
++) {
1066 uint32_t comp
= w
[i
+ 6];
1067 if (comp
== (uint32_t)-1) {
1068 val
->constant
->values
[0].u32
[i
] = 0xdeadbeef;
1070 val
->constant
->values
[0].u32
[i
] = u
[comp
];
1076 case SpvOpCompositeExtract
:
1077 case SpvOpCompositeInsert
: {
1078 struct vtn_value
*comp
;
1079 unsigned deref_start
;
1080 struct nir_constant
**c
;
1081 if (opcode
== SpvOpCompositeExtract
) {
1082 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1084 c
= &comp
->constant
;
1086 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1088 val
->constant
= nir_constant_clone(comp
->constant
,
1095 const struct glsl_type
*type
= comp
->const_type
;
1096 for (unsigned i
= deref_start
; i
< count
; i
++) {
1097 switch (glsl_get_base_type(type
)) {
1098 case GLSL_TYPE_UINT
:
1100 case GLSL_TYPE_FLOAT
:
1101 case GLSL_TYPE_BOOL
:
1102 /* If we hit this granularity, we're picking off an element */
1103 if (glsl_type_is_matrix(type
)) {
1104 assert(col
== 0 && elem
== -1);
1107 type
= glsl_get_column_type(type
);
1109 assert(elem
<= 0 && glsl_type_is_vector(type
));
1111 type
= glsl_scalar_type(glsl_get_base_type(type
));
1115 case GLSL_TYPE_ARRAY
:
1116 c
= &(*c
)->elements
[w
[i
]];
1117 type
= glsl_get_array_element(type
);
1120 case GLSL_TYPE_STRUCT
:
1121 c
= &(*c
)->elements
[w
[i
]];
1122 type
= glsl_get_struct_field(type
, w
[i
]);
1126 unreachable("Invalid constant type");
1130 if (opcode
== SpvOpCompositeExtract
) {
1134 unsigned num_components
= glsl_get_vector_elements(type
);
1135 for (unsigned i
= 0; i
< num_components
; i
++)
1136 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1139 struct vtn_value
*insert
=
1140 vtn_value(b
, w
[4], vtn_value_type_constant
);
1141 assert(insert
->const_type
== type
);
1143 *c
= insert
->constant
;
1145 unsigned num_components
= glsl_get_vector_elements(type
);
1146 for (unsigned i
= 0; i
< num_components
; i
++)
1147 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1155 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(opcode
, &swap
);
1157 unsigned num_components
= glsl_get_vector_elements(val
->const_type
);
1159 glsl_get_bit_size(val
->const_type
);
1161 nir_const_value src
[4];
1163 for (unsigned i
= 0; i
< count
- 4; i
++) {
1165 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1167 unsigned j
= swap
? 1 - i
: i
;
1168 assert(bit_size
== 32);
1169 src
[j
] = c
->values
[0];
1172 val
->constant
->values
[0] =
1173 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1180 case SpvOpConstantNull
:
1181 val
->constant
= vtn_null_constant(b
, val
->const_type
);
1184 case SpvOpConstantSampler
:
1185 assert(!"OpConstantSampler requires Kernel Capability");
1189 unreachable("Unhandled opcode");
1192 /* Now that we have the value, update the workgroup size if needed */
1193 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1197 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1198 const uint32_t *w
, unsigned count
)
1200 struct nir_function
*callee
=
1201 vtn_value(b
, w
[3], vtn_value_type_function
)->func
->impl
->function
;
1203 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1204 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1205 unsigned arg_id
= w
[4 + i
];
1206 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1207 if (arg
->value_type
== vtn_value_type_access_chain
) {
1208 nir_deref_var
*d
= vtn_access_chain_to_deref(b
, arg
->access_chain
);
1209 call
->params
[i
] = nir_deref_as_var(nir_copy_deref(call
, &d
->deref
));
1211 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1213 /* Make a temporary to store the argument in */
1215 nir_local_variable_create(b
->impl
, arg_ssa
->type
, "arg_tmp");
1216 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1218 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1222 nir_variable
*out_tmp
= NULL
;
1223 if (!glsl_type_is_void(callee
->return_type
)) {
1224 out_tmp
= nir_local_variable_create(b
->impl
, callee
->return_type
,
1226 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1229 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1231 if (glsl_type_is_void(callee
->return_type
)) {
1232 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1234 struct vtn_value
*retval
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1235 retval
->ssa
= vtn_local_load(b
, call
->return_deref
);
1239 struct vtn_ssa_value
*
1240 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1242 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1245 if (!glsl_type_is_vector_or_scalar(type
)) {
1246 unsigned elems
= glsl_get_length(type
);
1247 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1248 for (unsigned i
= 0; i
< elems
; i
++) {
1249 const struct glsl_type
*child_type
;
1251 switch (glsl_get_base_type(type
)) {
1253 case GLSL_TYPE_UINT
:
1254 case GLSL_TYPE_BOOL
:
1255 case GLSL_TYPE_FLOAT
:
1256 case GLSL_TYPE_DOUBLE
:
1257 child_type
= glsl_get_column_type(type
);
1259 case GLSL_TYPE_ARRAY
:
1260 child_type
= glsl_get_array_element(type
);
1262 case GLSL_TYPE_STRUCT
:
1263 child_type
= glsl_get_struct_field(type
, i
);
1266 unreachable("unkown base type");
1269 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1277 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1280 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1281 src
.src_type
= type
;
1286 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1287 const uint32_t *w
, unsigned count
)
1289 if (opcode
== SpvOpSampledImage
) {
1290 struct vtn_value
*val
=
1291 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1292 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1293 val
->sampled_image
->image
=
1294 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1295 val
->sampled_image
->sampler
=
1296 vtn_value(b
, w
[4], vtn_value_type_access_chain
)->access_chain
;
1298 } else if (opcode
== SpvOpImage
) {
1299 struct vtn_value
*val
=
1300 vtn_push_value(b
, w
[2], vtn_value_type_access_chain
);
1301 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1302 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1303 val
->access_chain
= src_val
->sampled_image
->image
;
1305 assert(src_val
->value_type
== vtn_value_type_access_chain
);
1306 val
->access_chain
= src_val
->access_chain
;
1311 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1312 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1314 struct vtn_sampled_image sampled
;
1315 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1316 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1317 sampled
= *sampled_val
->sampled_image
;
1319 assert(sampled_val
->value_type
== vtn_value_type_access_chain
);
1320 sampled
.image
= NULL
;
1321 sampled
.sampler
= sampled_val
->access_chain
;
1324 const struct glsl_type
*image_type
;
1325 if (sampled
.image
) {
1326 image_type
= sampled
.image
->var
->var
->interface_type
;
1328 image_type
= sampled
.sampler
->var
->var
->interface_type
;
1330 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1331 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1332 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1334 /* Figure out the base texture operation */
1337 case SpvOpImageSampleImplicitLod
:
1338 case SpvOpImageSampleDrefImplicitLod
:
1339 case SpvOpImageSampleProjImplicitLod
:
1340 case SpvOpImageSampleProjDrefImplicitLod
:
1341 texop
= nir_texop_tex
;
1344 case SpvOpImageSampleExplicitLod
:
1345 case SpvOpImageSampleDrefExplicitLod
:
1346 case SpvOpImageSampleProjExplicitLod
:
1347 case SpvOpImageSampleProjDrefExplicitLod
:
1348 texop
= nir_texop_txl
;
1351 case SpvOpImageFetch
:
1352 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1353 texop
= nir_texop_txf_ms
;
1355 texop
= nir_texop_txf
;
1359 case SpvOpImageGather
:
1360 case SpvOpImageDrefGather
:
1361 texop
= nir_texop_tg4
;
1364 case SpvOpImageQuerySizeLod
:
1365 case SpvOpImageQuerySize
:
1366 texop
= nir_texop_txs
;
1369 case SpvOpImageQueryLod
:
1370 texop
= nir_texop_lod
;
1373 case SpvOpImageQueryLevels
:
1374 texop
= nir_texop_query_levels
;
1377 case SpvOpImageQuerySamples
:
1378 texop
= nir_texop_texture_samples
;
1382 unreachable("Unhandled opcode");
1385 nir_tex_src srcs
[8]; /* 8 should be enough */
1386 nir_tex_src
*p
= srcs
;
1390 struct nir_ssa_def
*coord
;
1391 unsigned coord_components
;
1393 case SpvOpImageSampleImplicitLod
:
1394 case SpvOpImageSampleExplicitLod
:
1395 case SpvOpImageSampleDrefImplicitLod
:
1396 case SpvOpImageSampleDrefExplicitLod
:
1397 case SpvOpImageSampleProjImplicitLod
:
1398 case SpvOpImageSampleProjExplicitLod
:
1399 case SpvOpImageSampleProjDrefImplicitLod
:
1400 case SpvOpImageSampleProjDrefExplicitLod
:
1401 case SpvOpImageFetch
:
1402 case SpvOpImageGather
:
1403 case SpvOpImageDrefGather
:
1404 case SpvOpImageQueryLod
: {
1405 /* All these types have the coordinate as their first real argument */
1406 switch (sampler_dim
) {
1407 case GLSL_SAMPLER_DIM_1D
:
1408 case GLSL_SAMPLER_DIM_BUF
:
1409 coord_components
= 1;
1411 case GLSL_SAMPLER_DIM_2D
:
1412 case GLSL_SAMPLER_DIM_RECT
:
1413 case GLSL_SAMPLER_DIM_MS
:
1414 coord_components
= 2;
1416 case GLSL_SAMPLER_DIM_3D
:
1417 case GLSL_SAMPLER_DIM_CUBE
:
1418 coord_components
= 3;
1421 unreachable("Invalid sampler type");
1424 if (is_array
&& texop
!= nir_texop_lod
)
1427 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1428 p
->src
= nir_src_for_ssa(coord
);
1429 p
->src_type
= nir_tex_src_coord
;
1436 coord_components
= 0;
1441 case SpvOpImageSampleProjImplicitLod
:
1442 case SpvOpImageSampleProjExplicitLod
:
1443 case SpvOpImageSampleProjDrefImplicitLod
:
1444 case SpvOpImageSampleProjDrefExplicitLod
:
1445 /* These have the projector as the last coordinate component */
1446 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1447 p
->src_type
= nir_tex_src_projector
;
1455 unsigned gather_component
= 0;
1457 case SpvOpImageSampleDrefImplicitLod
:
1458 case SpvOpImageSampleDrefExplicitLod
:
1459 case SpvOpImageSampleProjDrefImplicitLod
:
1460 case SpvOpImageSampleProjDrefExplicitLod
:
1461 case SpvOpImageDrefGather
:
1462 /* These all have an explicit depth value as their next source */
1463 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1466 case SpvOpImageGather
:
1467 /* This has a component as its next source */
1469 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1476 /* For OpImageQuerySizeLod, we always have an LOD */
1477 if (opcode
== SpvOpImageQuerySizeLod
)
1478 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1480 /* Now we need to handle some number of optional arguments */
1482 uint32_t operands
= w
[idx
++];
1484 if (operands
& SpvImageOperandsBiasMask
) {
1485 assert(texop
== nir_texop_tex
);
1486 texop
= nir_texop_txb
;
1487 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1490 if (operands
& SpvImageOperandsLodMask
) {
1491 assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1492 texop
== nir_texop_txs
);
1493 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1496 if (operands
& SpvImageOperandsGradMask
) {
1497 assert(texop
== nir_texop_txl
);
1498 texop
= nir_texop_txd
;
1499 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1500 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1503 if (operands
& SpvImageOperandsOffsetMask
||
1504 operands
& SpvImageOperandsConstOffsetMask
)
1505 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1507 if (operands
& SpvImageOperandsConstOffsetsMask
)
1508 assert(!"Constant offsets to texture gather not yet implemented");
1510 if (operands
& SpvImageOperandsSampleMask
) {
1511 assert(texop
== nir_texop_txf_ms
);
1512 texop
= nir_texop_txf_ms
;
1513 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1516 /* We should have now consumed exactly all of the arguments */
1517 assert(idx
== count
);
1519 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1522 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1524 instr
->coord_components
= coord_components
;
1525 instr
->sampler_dim
= sampler_dim
;
1526 instr
->is_array
= is_array
;
1527 instr
->is_shadow
= is_shadow
;
1528 instr
->is_new_style_shadow
=
1529 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
1530 instr
->component
= gather_component
;
1532 switch (glsl_get_sampler_result_type(image_type
)) {
1533 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1534 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1535 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
1536 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1538 unreachable("Invalid base type for sampler result");
1541 nir_deref_var
*sampler
= vtn_access_chain_to_deref(b
, sampled
.sampler
);
1542 if (sampled
.image
) {
1543 nir_deref_var
*image
= vtn_access_chain_to_deref(b
, sampled
.image
);
1544 instr
->texture
= nir_deref_as_var(nir_copy_deref(instr
, &image
->deref
));
1546 instr
->texture
= nir_deref_as_var(nir_copy_deref(instr
, &sampler
->deref
));
1549 switch (instr
->op
) {
1554 /* These operations require a sampler */
1555 instr
->sampler
= nir_deref_as_var(nir_copy_deref(instr
, &sampler
->deref
));
1558 case nir_texop_txf_ms
:
1562 case nir_texop_query_levels
:
1563 case nir_texop_texture_samples
:
1564 case nir_texop_samples_identical
:
1566 instr
->sampler
= NULL
;
1568 case nir_texop_txf_ms_mcs
:
1569 unreachable("unexpected nir_texop_txf_ms_mcs");
1572 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1573 nir_tex_instr_dest_size(instr
), 32, NULL
);
1575 assert(glsl_get_vector_elements(ret_type
->type
) ==
1576 nir_tex_instr_dest_size(instr
));
1578 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
1579 val
->ssa
->def
= &instr
->dest
.ssa
;
1581 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
1585 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
1586 const uint32_t *w
, nir_src
*src
)
1589 case SpvOpAtomicIIncrement
:
1590 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
1593 case SpvOpAtomicIDecrement
:
1594 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
1597 case SpvOpAtomicISub
:
1599 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
1602 case SpvOpAtomicCompareExchange
:
1603 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
1604 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
1607 case SpvOpAtomicExchange
:
1608 case SpvOpAtomicIAdd
:
1609 case SpvOpAtomicSMin
:
1610 case SpvOpAtomicUMin
:
1611 case SpvOpAtomicSMax
:
1612 case SpvOpAtomicUMax
:
1613 case SpvOpAtomicAnd
:
1615 case SpvOpAtomicXor
:
1616 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
1620 unreachable("Invalid SPIR-V atomic");
1624 static nir_ssa_def
*
1625 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
1627 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
1629 /* The image_load_store intrinsics assume a 4-dim coordinate */
1630 unsigned dim
= glsl_get_vector_elements(coord
->type
);
1631 unsigned swizzle
[4];
1632 for (unsigned i
= 0; i
< 4; i
++)
1633 swizzle
[i
] = MIN2(i
, dim
- 1);
1635 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
1639 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
1640 const uint32_t *w
, unsigned count
)
1642 /* Just get this one out of the way */
1643 if (opcode
== SpvOpImageTexelPointer
) {
1644 struct vtn_value
*val
=
1645 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
1646 val
->image
= ralloc(b
, struct vtn_image_pointer
);
1649 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1650 val
->image
->coord
= get_image_coord(b
, w
[4]);
1651 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
1655 struct vtn_image_pointer image
;
1658 case SpvOpAtomicExchange
:
1659 case SpvOpAtomicCompareExchange
:
1660 case SpvOpAtomicCompareExchangeWeak
:
1661 case SpvOpAtomicIIncrement
:
1662 case SpvOpAtomicIDecrement
:
1663 case SpvOpAtomicIAdd
:
1664 case SpvOpAtomicISub
:
1665 case SpvOpAtomicLoad
:
1666 case SpvOpAtomicSMin
:
1667 case SpvOpAtomicUMin
:
1668 case SpvOpAtomicSMax
:
1669 case SpvOpAtomicUMax
:
1670 case SpvOpAtomicAnd
:
1672 case SpvOpAtomicXor
:
1673 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
1676 case SpvOpAtomicStore
:
1677 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
1680 case SpvOpImageQuerySize
:
1682 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1684 image
.sample
= NULL
;
1687 case SpvOpImageRead
:
1689 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1690 image
.coord
= get_image_coord(b
, w
[4]);
1692 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
1693 assert(w
[5] == SpvImageOperandsSampleMask
);
1694 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
1696 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1700 case SpvOpImageWrite
:
1702 vtn_value(b
, w
[1], vtn_value_type_access_chain
)->access_chain
;
1703 image
.coord
= get_image_coord(b
, w
[2]);
1707 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
1708 assert(w
[4] == SpvImageOperandsSampleMask
);
1709 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
1711 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1716 unreachable("Invalid image opcode");
1719 nir_intrinsic_op op
;
1721 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
1722 OP(ImageQuerySize
, size
)
1724 OP(ImageWrite
, store
)
1725 OP(AtomicLoad
, load
)
1726 OP(AtomicStore
, store
)
1727 OP(AtomicExchange
, atomic_exchange
)
1728 OP(AtomicCompareExchange
, atomic_comp_swap
)
1729 OP(AtomicIIncrement
, atomic_add
)
1730 OP(AtomicIDecrement
, atomic_add
)
1731 OP(AtomicIAdd
, atomic_add
)
1732 OP(AtomicISub
, atomic_add
)
1733 OP(AtomicSMin
, atomic_min
)
1734 OP(AtomicUMin
, atomic_min
)
1735 OP(AtomicSMax
, atomic_max
)
1736 OP(AtomicUMax
, atomic_max
)
1737 OP(AtomicAnd
, atomic_and
)
1738 OP(AtomicOr
, atomic_or
)
1739 OP(AtomicXor
, atomic_xor
)
1742 unreachable("Invalid image opcode");
1745 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
1747 nir_deref_var
*image_deref
= vtn_access_chain_to_deref(b
, image
.image
);
1748 intrin
->variables
[0] =
1749 nir_deref_as_var(nir_copy_deref(&intrin
->instr
, &image_deref
->deref
));
1751 /* ImageQuerySize doesn't take any extra parameters */
1752 if (opcode
!= SpvOpImageQuerySize
) {
1753 /* The image coordinate is always 4 components but we may not have that
1754 * many. Swizzle to compensate.
1757 for (unsigned i
= 0; i
< 4; i
++)
1758 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
1759 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
1761 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
1765 case SpvOpAtomicLoad
:
1766 case SpvOpImageQuerySize
:
1767 case SpvOpImageRead
:
1769 case SpvOpAtomicStore
:
1770 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
1772 case SpvOpImageWrite
:
1773 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
1776 case SpvOpAtomicIIncrement
:
1777 case SpvOpAtomicIDecrement
:
1778 case SpvOpAtomicExchange
:
1779 case SpvOpAtomicIAdd
:
1780 case SpvOpAtomicSMin
:
1781 case SpvOpAtomicUMin
:
1782 case SpvOpAtomicSMax
:
1783 case SpvOpAtomicUMax
:
1784 case SpvOpAtomicAnd
:
1786 case SpvOpAtomicXor
:
1787 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
1791 unreachable("Invalid image opcode");
1794 if (opcode
!= SpvOpImageWrite
) {
1795 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1796 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1797 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 4, 32, NULL
);
1799 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
1801 /* The image intrinsics always return 4 channels but we may not want
1802 * that many. Emit a mov to trim it down.
1804 unsigned swiz
[4] = {0, 1, 2, 3};
1805 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
1806 val
->ssa
->def
= nir_swizzle(&b
->nb
, &intrin
->dest
.ssa
, swiz
,
1807 glsl_get_vector_elements(type
->type
), false);
1809 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
1813 static nir_intrinsic_op
1814 get_ssbo_nir_atomic_op(SpvOp opcode
)
1817 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
1818 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
1819 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
1820 OP(AtomicExchange
, atomic_exchange
)
1821 OP(AtomicCompareExchange
, atomic_comp_swap
)
1822 OP(AtomicIIncrement
, atomic_add
)
1823 OP(AtomicIDecrement
, atomic_add
)
1824 OP(AtomicIAdd
, atomic_add
)
1825 OP(AtomicISub
, atomic_add
)
1826 OP(AtomicSMin
, atomic_imin
)
1827 OP(AtomicUMin
, atomic_umin
)
1828 OP(AtomicSMax
, atomic_imax
)
1829 OP(AtomicUMax
, atomic_umax
)
1830 OP(AtomicAnd
, atomic_and
)
1831 OP(AtomicOr
, atomic_or
)
1832 OP(AtomicXor
, atomic_xor
)
1835 unreachable("Invalid SSBO atomic");
1839 static nir_intrinsic_op
1840 get_shared_nir_atomic_op(SpvOp opcode
)
1843 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
1844 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
1845 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
1846 OP(AtomicExchange
, atomic_exchange
)
1847 OP(AtomicCompareExchange
, atomic_comp_swap
)
1848 OP(AtomicIIncrement
, atomic_add
)
1849 OP(AtomicIDecrement
, atomic_add
)
1850 OP(AtomicIAdd
, atomic_add
)
1851 OP(AtomicISub
, atomic_add
)
1852 OP(AtomicSMin
, atomic_imin
)
1853 OP(AtomicUMin
, atomic_umin
)
1854 OP(AtomicSMax
, atomic_imax
)
1855 OP(AtomicUMax
, atomic_umax
)
1856 OP(AtomicAnd
, atomic_and
)
1857 OP(AtomicOr
, atomic_or
)
1858 OP(AtomicXor
, atomic_xor
)
1861 unreachable("Invalid shared atomic");
1866 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
1867 const uint32_t *w
, unsigned count
)
1869 struct vtn_access_chain
*chain
;
1870 nir_intrinsic_instr
*atomic
;
1873 case SpvOpAtomicLoad
:
1874 case SpvOpAtomicExchange
:
1875 case SpvOpAtomicCompareExchange
:
1876 case SpvOpAtomicCompareExchangeWeak
:
1877 case SpvOpAtomicIIncrement
:
1878 case SpvOpAtomicIDecrement
:
1879 case SpvOpAtomicIAdd
:
1880 case SpvOpAtomicISub
:
1881 case SpvOpAtomicSMin
:
1882 case SpvOpAtomicUMin
:
1883 case SpvOpAtomicSMax
:
1884 case SpvOpAtomicUMax
:
1885 case SpvOpAtomicAnd
:
1887 case SpvOpAtomicXor
:
1889 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1892 case SpvOpAtomicStore
:
1894 vtn_value(b
, w
[1], vtn_value_type_access_chain
)->access_chain
;
1898 unreachable("Invalid SPIR-V atomic");
1902 SpvScope scope = w[4];
1903 SpvMemorySemanticsMask semantics = w[5];
1906 if (chain
->var
->mode
== vtn_variable_mode_workgroup
) {
1907 struct vtn_type
*type
= chain
->var
->type
;
1908 nir_deref
*deref
= &vtn_access_chain_to_deref(b
, chain
)->deref
;
1909 nir_intrinsic_op op
= get_shared_nir_atomic_op(opcode
);
1910 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
1911 atomic
->variables
[0] = nir_deref_as_var(nir_copy_deref(atomic
, deref
));
1914 case SpvOpAtomicLoad
:
1915 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
1918 case SpvOpAtomicStore
:
1919 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
1920 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
1921 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
1924 case SpvOpAtomicExchange
:
1925 case SpvOpAtomicCompareExchange
:
1926 case SpvOpAtomicCompareExchangeWeak
:
1927 case SpvOpAtomicIIncrement
:
1928 case SpvOpAtomicIDecrement
:
1929 case SpvOpAtomicIAdd
:
1930 case SpvOpAtomicISub
:
1931 case SpvOpAtomicSMin
:
1932 case SpvOpAtomicUMin
:
1933 case SpvOpAtomicSMax
:
1934 case SpvOpAtomicUMax
:
1935 case SpvOpAtomicAnd
:
1937 case SpvOpAtomicXor
:
1938 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
1942 unreachable("Invalid SPIR-V atomic");
1946 assert(chain
->var
->mode
== vtn_variable_mode_ssbo
);
1947 struct vtn_type
*type
;
1948 nir_ssa_def
*offset
, *index
;
1949 offset
= vtn_access_chain_to_offset(b
, chain
, &index
, &type
, NULL
, false);
1951 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(opcode
);
1953 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
1956 case SpvOpAtomicLoad
:
1957 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
1958 atomic
->src
[0] = nir_src_for_ssa(index
);
1959 atomic
->src
[1] = nir_src_for_ssa(offset
);
1962 case SpvOpAtomicStore
:
1963 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
1964 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
1965 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
1966 atomic
->src
[1] = nir_src_for_ssa(index
);
1967 atomic
->src
[2] = nir_src_for_ssa(offset
);
1970 case SpvOpAtomicExchange
:
1971 case SpvOpAtomicCompareExchange
:
1972 case SpvOpAtomicCompareExchangeWeak
:
1973 case SpvOpAtomicIIncrement
:
1974 case SpvOpAtomicIDecrement
:
1975 case SpvOpAtomicIAdd
:
1976 case SpvOpAtomicISub
:
1977 case SpvOpAtomicSMin
:
1978 case SpvOpAtomicUMin
:
1979 case SpvOpAtomicSMax
:
1980 case SpvOpAtomicUMax
:
1981 case SpvOpAtomicAnd
:
1983 case SpvOpAtomicXor
:
1984 atomic
->src
[0] = nir_src_for_ssa(index
);
1985 atomic
->src
[1] = nir_src_for_ssa(offset
);
1986 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[2]);
1990 unreachable("Invalid SPIR-V atomic");
1994 if (opcode
!= SpvOpAtomicStore
) {
1995 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1997 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
1998 glsl_get_vector_elements(type
->type
),
1999 glsl_get_bit_size(type
->type
), NULL
);
2001 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2002 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2003 val
->ssa
->def
= &atomic
->dest
.ssa
;
2004 val
->ssa
->type
= type
->type
;
2007 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2010 static nir_alu_instr
*
2011 create_vec(nir_shader
*shader
, unsigned num_components
, unsigned bit_size
)
2014 switch (num_components
) {
2015 case 1: op
= nir_op_fmov
; break;
2016 case 2: op
= nir_op_vec2
; break;
2017 case 3: op
= nir_op_vec3
; break;
2018 case 4: op
= nir_op_vec4
; break;
2019 default: unreachable("bad vector size");
2022 nir_alu_instr
*vec
= nir_alu_instr_create(shader
, op
);
2023 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2025 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2030 struct vtn_ssa_value
*
2031 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2033 if (src
->transposed
)
2034 return src
->transposed
;
2036 struct vtn_ssa_value
*dest
=
2037 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2039 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2040 nir_alu_instr
*vec
= create_vec(b
->shader
,
2041 glsl_get_matrix_columns(src
->type
),
2042 glsl_get_bit_size(src
->type
));
2043 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2044 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2045 vec
->src
[0].swizzle
[0] = i
;
2047 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2048 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2049 vec
->src
[j
].swizzle
[0] = i
;
2052 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2053 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2056 dest
->transposed
= src
;
2062 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2064 unsigned swiz
[4] = { index
};
2065 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2069 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2072 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
,
2075 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2077 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2079 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2080 vec
->src
[i
].swizzle
[0] = i
;
2084 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2086 return &vec
->dest
.dest
.ssa
;
2090 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2093 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2094 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2095 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2096 vtn_vector_extract(b
, src
, i
), dest
);
2102 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2103 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2105 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2106 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2107 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2108 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2113 static nir_ssa_def
*
2114 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2115 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2116 const uint32_t *indices
)
2118 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
, src0
->bit_size
);
2120 for (unsigned i
= 0; i
< num_components
; i
++) {
2121 uint32_t index
= indices
[i
];
2122 if (index
== 0xffffffff) {
2124 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2125 } else if (index
< src0
->num_components
) {
2126 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2127 vec
->src
[i
].swizzle
[0] = index
;
2129 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2130 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2134 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2136 return &vec
->dest
.dest
.ssa
;
2140 * Concatentates a number of vectors/scalars together to produce a vector
2142 static nir_ssa_def
*
2143 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2144 unsigned num_srcs
, nir_ssa_def
**srcs
)
2146 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
,
2149 unsigned dest_idx
= 0;
2150 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2151 nir_ssa_def
*src
= srcs
[i
];
2152 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2153 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2154 vec
->src
[dest_idx
].swizzle
[0] = j
;
2159 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2161 return &vec
->dest
.dest
.ssa
;
2164 static struct vtn_ssa_value
*
2165 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2167 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2168 dest
->type
= src
->type
;
2170 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2171 dest
->def
= src
->def
;
2173 unsigned elems
= glsl_get_length(src
->type
);
2175 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2176 for (unsigned i
= 0; i
< elems
; i
++)
2177 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2183 static struct vtn_ssa_value
*
2184 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2185 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2186 unsigned num_indices
)
2188 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2190 struct vtn_ssa_value
*cur
= dest
;
2192 for (i
= 0; i
< num_indices
- 1; i
++) {
2193 cur
= cur
->elems
[indices
[i
]];
2196 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2197 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2198 * the component granularity. In that case, the last index will be
2199 * the index to insert the scalar into the vector.
2202 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2204 cur
->elems
[indices
[i
]] = insert
;
2210 static struct vtn_ssa_value
*
2211 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2212 const uint32_t *indices
, unsigned num_indices
)
2214 struct vtn_ssa_value
*cur
= src
;
2215 for (unsigned i
= 0; i
< num_indices
; i
++) {
2216 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2217 assert(i
== num_indices
- 1);
2218 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2219 * the component granularity. The last index will be the index of the
2220 * vector to extract.
2223 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2224 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2225 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2228 cur
= cur
->elems
[indices
[i
]];
2236 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2237 const uint32_t *w
, unsigned count
)
2239 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2240 const struct glsl_type
*type
=
2241 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2242 val
->ssa
= vtn_create_ssa_value(b
, type
);
2245 case SpvOpVectorExtractDynamic
:
2246 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2247 vtn_ssa_value(b
, w
[4])->def
);
2250 case SpvOpVectorInsertDynamic
:
2251 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2252 vtn_ssa_value(b
, w
[4])->def
,
2253 vtn_ssa_value(b
, w
[5])->def
);
2256 case SpvOpVectorShuffle
:
2257 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2258 vtn_ssa_value(b
, w
[3])->def
,
2259 vtn_ssa_value(b
, w
[4])->def
,
2263 case SpvOpCompositeConstruct
: {
2264 unsigned elems
= count
- 3;
2265 if (glsl_type_is_vector_or_scalar(type
)) {
2266 nir_ssa_def
*srcs
[4];
2267 for (unsigned i
= 0; i
< elems
; i
++)
2268 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2270 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2273 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2274 for (unsigned i
= 0; i
< elems
; i
++)
2275 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2279 case SpvOpCompositeExtract
:
2280 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2284 case SpvOpCompositeInsert
:
2285 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2286 vtn_ssa_value(b
, w
[3]),
2290 case SpvOpCopyObject
:
2291 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2295 unreachable("unknown composite operation");
2300 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2301 const uint32_t *w
, unsigned count
)
2303 nir_intrinsic_op intrinsic_op
;
2305 case SpvOpEmitVertex
:
2306 case SpvOpEmitStreamVertex
:
2307 intrinsic_op
= nir_intrinsic_emit_vertex
;
2309 case SpvOpEndPrimitive
:
2310 case SpvOpEndStreamPrimitive
:
2311 intrinsic_op
= nir_intrinsic_end_primitive
;
2313 case SpvOpMemoryBarrier
:
2314 intrinsic_op
= nir_intrinsic_memory_barrier
;
2316 case SpvOpControlBarrier
:
2317 intrinsic_op
= nir_intrinsic_barrier
;
2320 unreachable("unknown barrier instruction");
2323 nir_intrinsic_instr
*intrin
=
2324 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2326 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2327 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2329 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2333 gl_primitive_from_spv_execution_mode(SpvExecutionMode mode
)
2336 case SpvExecutionModeInputPoints
:
2337 case SpvExecutionModeOutputPoints
:
2338 return 0; /* GL_POINTS */
2339 case SpvExecutionModeInputLines
:
2340 return 1; /* GL_LINES */
2341 case SpvExecutionModeInputLinesAdjacency
:
2342 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2343 case SpvExecutionModeTriangles
:
2344 return 4; /* GL_TRIANGLES */
2345 case SpvExecutionModeInputTrianglesAdjacency
:
2346 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2347 case SpvExecutionModeQuads
:
2348 return 7; /* GL_QUADS */
2349 case SpvExecutionModeIsolines
:
2350 return 0x8E7A; /* GL_ISOLINES */
2351 case SpvExecutionModeOutputLineStrip
:
2352 return 3; /* GL_LINE_STRIP */
2353 case SpvExecutionModeOutputTriangleStrip
:
2354 return 5; /* GL_TRIANGLE_STRIP */
2356 assert(!"Invalid primitive type");
2362 vertices_in_from_spv_execution_mode(SpvExecutionMode mode
)
2365 case SpvExecutionModeInputPoints
:
2367 case SpvExecutionModeInputLines
:
2369 case SpvExecutionModeInputLinesAdjacency
:
2371 case SpvExecutionModeTriangles
:
2373 case SpvExecutionModeInputTrianglesAdjacency
:
2376 assert(!"Invalid GS input mode");
2381 static gl_shader_stage
2382 stage_for_execution_model(SpvExecutionModel model
)
2385 case SpvExecutionModelVertex
:
2386 return MESA_SHADER_VERTEX
;
2387 case SpvExecutionModelTessellationControl
:
2388 return MESA_SHADER_TESS_CTRL
;
2389 case SpvExecutionModelTessellationEvaluation
:
2390 return MESA_SHADER_TESS_EVAL
;
2391 case SpvExecutionModelGeometry
:
2392 return MESA_SHADER_GEOMETRY
;
2393 case SpvExecutionModelFragment
:
2394 return MESA_SHADER_FRAGMENT
;
2395 case SpvExecutionModelGLCompute
:
2396 return MESA_SHADER_COMPUTE
;
2398 unreachable("Unsupported execution model");
2403 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2404 const uint32_t *w
, unsigned count
)
2408 case SpvOpSourceExtension
:
2409 case SpvOpSourceContinued
:
2410 case SpvOpExtension
:
2411 /* Unhandled, but these are for debug so that's ok. */
2414 case SpvOpCapability
: {
2415 SpvCapability cap
= w
[1];
2417 case SpvCapabilityMatrix
:
2418 case SpvCapabilityShader
:
2419 case SpvCapabilityGeometry
:
2420 case SpvCapabilityGeometryPointSize
:
2421 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
2422 case SpvCapabilitySampledImageArrayDynamicIndexing
:
2423 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
2424 case SpvCapabilityStorageImageArrayDynamicIndexing
:
2425 case SpvCapabilityImageRect
:
2426 case SpvCapabilitySampledRect
:
2427 case SpvCapabilitySampled1D
:
2428 case SpvCapabilityImage1D
:
2429 case SpvCapabilitySampledCubeArray
:
2430 case SpvCapabilitySampledBuffer
:
2431 case SpvCapabilityImageBuffer
:
2432 case SpvCapabilityImageQuery
:
2433 case SpvCapabilityDerivativeControl
:
2434 case SpvCapabilityInterpolationFunction
:
2435 case SpvCapabilityMultiViewport
:
2436 case SpvCapabilitySampleRateShading
:
2437 case SpvCapabilityClipDistance
:
2438 case SpvCapabilityCullDistance
:
2439 case SpvCapabilityInputAttachment
:
2442 case SpvCapabilityGeometryStreams
:
2443 case SpvCapabilityTessellation
:
2444 case SpvCapabilityTessellationPointSize
:
2445 case SpvCapabilityLinkage
:
2446 case SpvCapabilityVector16
:
2447 case SpvCapabilityFloat16Buffer
:
2448 case SpvCapabilityFloat16
:
2449 case SpvCapabilityFloat64
:
2450 case SpvCapabilityInt64
:
2451 case SpvCapabilityInt64Atomics
:
2452 case SpvCapabilityAtomicStorage
:
2453 case SpvCapabilityInt16
:
2454 case SpvCapabilityImageGatherExtended
:
2455 case SpvCapabilityStorageImageMultisample
:
2456 case SpvCapabilityImageCubeArray
:
2457 case SpvCapabilityInt8
:
2458 case SpvCapabilitySparseResidency
:
2459 case SpvCapabilityMinLod
:
2460 case SpvCapabilityImageMSArray
:
2461 case SpvCapabilityStorageImageExtendedFormats
:
2462 case SpvCapabilityTransformFeedback
:
2463 case SpvCapabilityStorageImageReadWithoutFormat
:
2464 case SpvCapabilityStorageImageWriteWithoutFormat
:
2465 vtn_warn("Unsupported SPIR-V capability: %s",
2466 spirv_capability_to_string(cap
));
2469 case SpvCapabilityAddresses
:
2470 case SpvCapabilityKernel
:
2471 case SpvCapabilityImageBasic
:
2472 case SpvCapabilityImageReadWrite
:
2473 case SpvCapabilityImageMipmap
:
2474 case SpvCapabilityPipes
:
2475 case SpvCapabilityGroups
:
2476 case SpvCapabilityDeviceEnqueue
:
2477 case SpvCapabilityLiteralSampler
:
2478 case SpvCapabilityGenericPointer
:
2479 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
2480 spirv_capability_to_string(cap
));
2486 case SpvOpExtInstImport
:
2487 vtn_handle_extension(b
, opcode
, w
, count
);
2490 case SpvOpMemoryModel
:
2491 assert(w
[1] == SpvAddressingModelLogical
);
2492 assert(w
[2] == SpvMemoryModelGLSL450
);
2495 case SpvOpEntryPoint
: {
2496 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
2497 /* Let this be a name label regardless */
2498 unsigned name_words
;
2499 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
2501 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
2502 stage_for_execution_model(w
[1]) != b
->entry_point_stage
)
2505 assert(b
->entry_point
== NULL
);
2506 b
->entry_point
= entry_point
;
2511 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2512 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2516 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2519 case SpvOpMemberName
:
2523 case SpvOpExecutionMode
:
2524 case SpvOpDecorationGroup
:
2526 case SpvOpMemberDecorate
:
2527 case SpvOpGroupDecorate
:
2528 case SpvOpGroupMemberDecorate
:
2529 vtn_handle_decoration(b
, opcode
, w
, count
);
2533 return false; /* End of preamble */
2540 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
2541 const struct vtn_decoration
*mode
, void *data
)
2543 assert(b
->entry_point
== entry_point
);
2545 switch(mode
->exec_mode
) {
2546 case SpvExecutionModeOriginUpperLeft
:
2547 case SpvExecutionModeOriginLowerLeft
:
2548 b
->origin_upper_left
=
2549 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
2552 case SpvExecutionModeEarlyFragmentTests
:
2553 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2554 b
->shader
->info
->fs
.early_fragment_tests
= true;
2557 case SpvExecutionModeInvocations
:
2558 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2559 b
->shader
->info
->gs
.invocations
= MAX2(1, mode
->literals
[0]);
2562 case SpvExecutionModeDepthReplacing
:
2563 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2564 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
2566 case SpvExecutionModeDepthGreater
:
2567 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2568 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
2570 case SpvExecutionModeDepthLess
:
2571 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2572 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
2574 case SpvExecutionModeDepthUnchanged
:
2575 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2576 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2579 case SpvExecutionModeLocalSize
:
2580 assert(b
->shader
->stage
== MESA_SHADER_COMPUTE
);
2581 b
->shader
->info
->cs
.local_size
[0] = mode
->literals
[0];
2582 b
->shader
->info
->cs
.local_size
[1] = mode
->literals
[1];
2583 b
->shader
->info
->cs
.local_size
[2] = mode
->literals
[2];
2585 case SpvExecutionModeLocalSizeHint
:
2586 break; /* Nothing to do with this */
2588 case SpvExecutionModeOutputVertices
:
2589 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2590 b
->shader
->info
->gs
.vertices_out
= mode
->literals
[0];
2593 case SpvExecutionModeInputPoints
:
2594 case SpvExecutionModeInputLines
:
2595 case SpvExecutionModeInputLinesAdjacency
:
2596 case SpvExecutionModeTriangles
:
2597 case SpvExecutionModeInputTrianglesAdjacency
:
2598 case SpvExecutionModeQuads
:
2599 case SpvExecutionModeIsolines
:
2600 if (b
->shader
->stage
== MESA_SHADER_GEOMETRY
) {
2601 b
->shader
->info
->gs
.vertices_in
=
2602 vertices_in_from_spv_execution_mode(mode
->exec_mode
);
2604 assert(!"Tesselation shaders not yet supported");
2608 case SpvExecutionModeOutputPoints
:
2609 case SpvExecutionModeOutputLineStrip
:
2610 case SpvExecutionModeOutputTriangleStrip
:
2611 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2612 b
->shader
->info
->gs
.output_primitive
=
2613 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2616 case SpvExecutionModeSpacingEqual
:
2617 case SpvExecutionModeSpacingFractionalEven
:
2618 case SpvExecutionModeSpacingFractionalOdd
:
2619 case SpvExecutionModeVertexOrderCw
:
2620 case SpvExecutionModeVertexOrderCcw
:
2621 case SpvExecutionModePointMode
:
2622 assert(!"TODO: Add tessellation metadata");
2625 case SpvExecutionModePixelCenterInteger
:
2626 b
->pixel_center_integer
= true;
2629 case SpvExecutionModeXfb
:
2630 assert(!"Unhandled execution mode");
2633 case SpvExecutionModeVecTypeHint
:
2634 case SpvExecutionModeContractionOff
:
2640 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2641 const uint32_t *w
, unsigned count
)
2645 case SpvOpSourceContinued
:
2646 case SpvOpSourceExtension
:
2647 case SpvOpExtension
:
2648 case SpvOpCapability
:
2649 case SpvOpExtInstImport
:
2650 case SpvOpMemoryModel
:
2651 case SpvOpEntryPoint
:
2652 case SpvOpExecutionMode
:
2655 case SpvOpMemberName
:
2656 case SpvOpDecorationGroup
:
2658 case SpvOpMemberDecorate
:
2659 case SpvOpGroupDecorate
:
2660 case SpvOpGroupMemberDecorate
:
2661 assert(!"Invalid opcode types and variables section");
2667 case SpvOpTypeFloat
:
2668 case SpvOpTypeVector
:
2669 case SpvOpTypeMatrix
:
2670 case SpvOpTypeImage
:
2671 case SpvOpTypeSampler
:
2672 case SpvOpTypeSampledImage
:
2673 case SpvOpTypeArray
:
2674 case SpvOpTypeRuntimeArray
:
2675 case SpvOpTypeStruct
:
2676 case SpvOpTypeOpaque
:
2677 case SpvOpTypePointer
:
2678 case SpvOpTypeFunction
:
2679 case SpvOpTypeEvent
:
2680 case SpvOpTypeDeviceEvent
:
2681 case SpvOpTypeReserveId
:
2682 case SpvOpTypeQueue
:
2684 vtn_handle_type(b
, opcode
, w
, count
);
2687 case SpvOpConstantTrue
:
2688 case SpvOpConstantFalse
:
2690 case SpvOpConstantComposite
:
2691 case SpvOpConstantSampler
:
2692 case SpvOpConstantNull
:
2693 case SpvOpSpecConstantTrue
:
2694 case SpvOpSpecConstantFalse
:
2695 case SpvOpSpecConstant
:
2696 case SpvOpSpecConstantComposite
:
2697 case SpvOpSpecConstantOp
:
2698 vtn_handle_constant(b
, opcode
, w
, count
);
2702 vtn_handle_variables(b
, opcode
, w
, count
);
2706 return false; /* End of preamble */
2713 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2714 const uint32_t *w
, unsigned count
)
2720 case SpvOpLoopMerge
:
2721 case SpvOpSelectionMerge
:
2722 /* This is handled by cfg pre-pass and walk_blocks */
2726 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2727 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2732 vtn_handle_extension(b
, opcode
, w
, count
);
2738 case SpvOpCopyMemory
:
2739 case SpvOpCopyMemorySized
:
2740 case SpvOpAccessChain
:
2741 case SpvOpInBoundsAccessChain
:
2742 case SpvOpArrayLength
:
2743 vtn_handle_variables(b
, opcode
, w
, count
);
2746 case SpvOpFunctionCall
:
2747 vtn_handle_function_call(b
, opcode
, w
, count
);
2750 case SpvOpSampledImage
:
2752 case SpvOpImageSampleImplicitLod
:
2753 case SpvOpImageSampleExplicitLod
:
2754 case SpvOpImageSampleDrefImplicitLod
:
2755 case SpvOpImageSampleDrefExplicitLod
:
2756 case SpvOpImageSampleProjImplicitLod
:
2757 case SpvOpImageSampleProjExplicitLod
:
2758 case SpvOpImageSampleProjDrefImplicitLod
:
2759 case SpvOpImageSampleProjDrefExplicitLod
:
2760 case SpvOpImageFetch
:
2761 case SpvOpImageGather
:
2762 case SpvOpImageDrefGather
:
2763 case SpvOpImageQuerySizeLod
:
2764 case SpvOpImageQueryLod
:
2765 case SpvOpImageQueryLevels
:
2766 case SpvOpImageQuerySamples
:
2767 vtn_handle_texture(b
, opcode
, w
, count
);
2770 case SpvOpImageRead
:
2771 case SpvOpImageWrite
:
2772 case SpvOpImageTexelPointer
:
2773 vtn_handle_image(b
, opcode
, w
, count
);
2776 case SpvOpImageQuerySize
: {
2777 struct vtn_access_chain
*image
=
2778 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
2779 if (glsl_type_is_image(image
->var
->var
->interface_type
)) {
2780 vtn_handle_image(b
, opcode
, w
, count
);
2782 vtn_handle_texture(b
, opcode
, w
, count
);
2787 case SpvOpAtomicLoad
:
2788 case SpvOpAtomicExchange
:
2789 case SpvOpAtomicCompareExchange
:
2790 case SpvOpAtomicCompareExchangeWeak
:
2791 case SpvOpAtomicIIncrement
:
2792 case SpvOpAtomicIDecrement
:
2793 case SpvOpAtomicIAdd
:
2794 case SpvOpAtomicISub
:
2795 case SpvOpAtomicSMin
:
2796 case SpvOpAtomicUMin
:
2797 case SpvOpAtomicSMax
:
2798 case SpvOpAtomicUMax
:
2799 case SpvOpAtomicAnd
:
2801 case SpvOpAtomicXor
: {
2802 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
2803 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
2804 vtn_handle_image(b
, opcode
, w
, count
);
2806 assert(pointer
->value_type
== vtn_value_type_access_chain
);
2807 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
2812 case SpvOpAtomicStore
: {
2813 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
2814 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
2815 vtn_handle_image(b
, opcode
, w
, count
);
2817 assert(pointer
->value_type
== vtn_value_type_access_chain
);
2818 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
2828 case SpvOpConvertFToU
:
2829 case SpvOpConvertFToS
:
2830 case SpvOpConvertSToF
:
2831 case SpvOpConvertUToF
:
2835 case SpvOpQuantizeToF16
:
2836 case SpvOpConvertPtrToU
:
2837 case SpvOpConvertUToPtr
:
2838 case SpvOpPtrCastToGeneric
:
2839 case SpvOpGenericCastToPtr
:
2845 case SpvOpSignBitSet
:
2846 case SpvOpLessOrGreater
:
2848 case SpvOpUnordered
:
2863 case SpvOpVectorTimesScalar
:
2865 case SpvOpIAddCarry
:
2866 case SpvOpISubBorrow
:
2867 case SpvOpUMulExtended
:
2868 case SpvOpSMulExtended
:
2869 case SpvOpShiftRightLogical
:
2870 case SpvOpShiftRightArithmetic
:
2871 case SpvOpShiftLeftLogical
:
2872 case SpvOpLogicalEqual
:
2873 case SpvOpLogicalNotEqual
:
2874 case SpvOpLogicalOr
:
2875 case SpvOpLogicalAnd
:
2876 case SpvOpLogicalNot
:
2877 case SpvOpBitwiseOr
:
2878 case SpvOpBitwiseXor
:
2879 case SpvOpBitwiseAnd
:
2882 case SpvOpFOrdEqual
:
2883 case SpvOpFUnordEqual
:
2884 case SpvOpINotEqual
:
2885 case SpvOpFOrdNotEqual
:
2886 case SpvOpFUnordNotEqual
:
2887 case SpvOpULessThan
:
2888 case SpvOpSLessThan
:
2889 case SpvOpFOrdLessThan
:
2890 case SpvOpFUnordLessThan
:
2891 case SpvOpUGreaterThan
:
2892 case SpvOpSGreaterThan
:
2893 case SpvOpFOrdGreaterThan
:
2894 case SpvOpFUnordGreaterThan
:
2895 case SpvOpULessThanEqual
:
2896 case SpvOpSLessThanEqual
:
2897 case SpvOpFOrdLessThanEqual
:
2898 case SpvOpFUnordLessThanEqual
:
2899 case SpvOpUGreaterThanEqual
:
2900 case SpvOpSGreaterThanEqual
:
2901 case SpvOpFOrdGreaterThanEqual
:
2902 case SpvOpFUnordGreaterThanEqual
:
2908 case SpvOpFwidthFine
:
2909 case SpvOpDPdxCoarse
:
2910 case SpvOpDPdyCoarse
:
2911 case SpvOpFwidthCoarse
:
2912 case SpvOpBitFieldInsert
:
2913 case SpvOpBitFieldSExtract
:
2914 case SpvOpBitFieldUExtract
:
2915 case SpvOpBitReverse
:
2917 case SpvOpTranspose
:
2918 case SpvOpOuterProduct
:
2919 case SpvOpMatrixTimesScalar
:
2920 case SpvOpVectorTimesMatrix
:
2921 case SpvOpMatrixTimesVector
:
2922 case SpvOpMatrixTimesMatrix
:
2923 vtn_handle_alu(b
, opcode
, w
, count
);
2926 case SpvOpVectorExtractDynamic
:
2927 case SpvOpVectorInsertDynamic
:
2928 case SpvOpVectorShuffle
:
2929 case SpvOpCompositeConstruct
:
2930 case SpvOpCompositeExtract
:
2931 case SpvOpCompositeInsert
:
2932 case SpvOpCopyObject
:
2933 vtn_handle_composite(b
, opcode
, w
, count
);
2936 case SpvOpEmitVertex
:
2937 case SpvOpEndPrimitive
:
2938 case SpvOpEmitStreamVertex
:
2939 case SpvOpEndStreamPrimitive
:
2940 case SpvOpControlBarrier
:
2941 case SpvOpMemoryBarrier
:
2942 vtn_handle_barrier(b
, opcode
, w
, count
);
2946 unreachable("Unhandled opcode");
2953 spirv_to_nir(const uint32_t *words
, size_t word_count
,
2954 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
2955 gl_shader_stage stage
, const char *entry_point_name
,
2956 const nir_shader_compiler_options
*options
)
2958 const uint32_t *word_end
= words
+ word_count
;
2960 /* Handle the SPIR-V header (first 4 dwords) */
2961 assert(word_count
> 5);
2963 assert(words
[0] == SpvMagicNumber
);
2964 assert(words
[1] >= 0x10000);
2965 /* words[2] == generator magic */
2966 unsigned value_id_bound
= words
[3];
2967 assert(words
[4] == 0);
2971 /* Initialize the stn_builder object */
2972 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
2973 b
->value_id_bound
= value_id_bound
;
2974 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
2975 exec_list_make_empty(&b
->functions
);
2976 b
->entry_point_stage
= stage
;
2977 b
->entry_point_name
= entry_point_name
;
2979 /* Handle all the preamble instructions */
2980 words
= vtn_foreach_instruction(b
, words
, word_end
,
2981 vtn_handle_preamble_instruction
);
2983 if (b
->entry_point
== NULL
) {
2984 assert(!"Entry point not found");
2989 b
->shader
= nir_shader_create(NULL
, stage
, options
, NULL
);
2991 /* Set shader info defaults */
2992 b
->shader
->info
->gs
.invocations
= 1;
2994 /* Parse execution modes */
2995 vtn_foreach_execution_mode(b
, b
->entry_point
,
2996 vtn_handle_execution_mode
, NULL
);
2998 b
->specializations
= spec
;
2999 b
->num_specializations
= num_spec
;
3001 /* Handle all variable, type, and constant instructions */
3002 words
= vtn_foreach_instruction(b
, words
, word_end
,
3003 vtn_handle_variable_or_type_instruction
);
3005 vtn_build_cfg(b
, words
, word_end
);
3007 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3008 b
->impl
= func
->impl
;
3009 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3010 _mesa_key_pointer_equal
);
3012 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3015 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3016 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3017 assert(entry_point
);