2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
35 _vtn_warn(const char *file
, int line
, const char *msg
, ...)
41 formatted
= ralloc_vasprintf(NULL
, msg
, args
);
44 fprintf(stderr
, "%s:%d WARNING: %s\n", file
, line
, formatted
);
46 ralloc_free(formatted
);
49 static struct vtn_ssa_value
*
50 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
52 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
55 if (glsl_type_is_vector_or_scalar(type
)) {
56 unsigned num_components
= glsl_get_vector_elements(val
->type
);
57 unsigned bit_size
= glsl_get_bit_size(val
->type
);
58 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
60 unsigned elems
= glsl_get_length(val
->type
);
61 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
62 if (glsl_type_is_matrix(type
)) {
63 const struct glsl_type
*elem_type
=
64 glsl_vector_type(glsl_get_base_type(type
),
65 glsl_get_vector_elements(type
));
67 for (unsigned i
= 0; i
< elems
; i
++)
68 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
69 } else if (glsl_type_is_array(type
)) {
70 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
71 for (unsigned i
= 0; i
< elems
; i
++)
72 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
74 for (unsigned i
= 0; i
< elems
; i
++) {
75 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
76 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
84 static struct vtn_ssa_value
*
85 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
86 const struct glsl_type
*type
)
88 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
93 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
96 switch (glsl_get_base_type(type
)) {
100 case GLSL_TYPE_FLOAT
:
101 case GLSL_TYPE_DOUBLE
: {
102 int bit_size
= glsl_get_bit_size(type
);
103 if (glsl_type_is_vector_or_scalar(type
)) {
104 unsigned num_components
= glsl_get_vector_elements(val
->type
);
105 nir_load_const_instr
*load
=
106 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
108 load
->value
= constant
->values
[0];
110 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
111 val
->def
= &load
->def
;
113 assert(glsl_type_is_matrix(type
));
114 unsigned rows
= glsl_get_vector_elements(val
->type
);
115 unsigned columns
= glsl_get_matrix_columns(val
->type
);
116 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
118 for (unsigned i
= 0; i
< columns
; i
++) {
119 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
120 col_val
->type
= glsl_get_column_type(val
->type
);
121 nir_load_const_instr
*load
=
122 nir_load_const_instr_create(b
->shader
, rows
, bit_size
);
124 load
->value
= constant
->values
[i
];
126 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
127 col_val
->def
= &load
->def
;
129 val
->elems
[i
] = col_val
;
135 case GLSL_TYPE_ARRAY
: {
136 unsigned elems
= glsl_get_length(val
->type
);
137 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
138 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
139 for (unsigned i
= 0; i
< elems
; i
++)
140 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
145 case GLSL_TYPE_STRUCT
: {
146 unsigned elems
= glsl_get_length(val
->type
);
147 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
148 for (unsigned i
= 0; i
< elems
; i
++) {
149 const struct glsl_type
*elem_type
=
150 glsl_get_struct_field(val
->type
, i
);
151 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
158 unreachable("bad constant type");
164 struct vtn_ssa_value
*
165 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
167 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
168 switch (val
->value_type
) {
169 case vtn_value_type_undef
:
170 return vtn_undef_ssa_value(b
, val
->type
->type
);
172 case vtn_value_type_constant
:
173 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
175 case vtn_value_type_ssa
:
178 case vtn_value_type_access_chain
:
179 /* This is needed for function parameters */
180 return vtn_variable_load(b
, val
->access_chain
);
183 unreachable("Invalid type for an SSA value");
188 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
189 unsigned word_count
, unsigned *words_used
)
191 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
193 /* Ammount of space taken by the string (including the null) */
194 unsigned len
= strlen(dup
) + 1;
195 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
201 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
202 const uint32_t *end
, vtn_instruction_handler handler
)
208 const uint32_t *w
= start
;
210 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
211 unsigned count
= w
[0] >> SpvWordCountShift
;
212 assert(count
>= 1 && w
+ count
<= end
);
216 break; /* Do nothing */
219 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
231 if (!handler(b
, opcode
, w
, count
))
243 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
244 const uint32_t *w
, unsigned count
)
247 case SpvOpExtInstImport
: {
248 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
249 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
250 val
->ext_handler
= vtn_handle_glsl450_instruction
;
252 assert(!"Unsupported extension");
258 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
259 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
266 unreachable("Unhandled opcode");
271 _foreach_decoration_helper(struct vtn_builder
*b
,
272 struct vtn_value
*base_value
,
274 struct vtn_value
*value
,
275 vtn_decoration_foreach_cb cb
, void *data
)
277 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
279 if (dec
->scope
== VTN_DEC_DECORATION
) {
280 member
= parent_member
;
281 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
282 assert(parent_member
== -1);
283 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
285 /* Not a decoration */
290 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
291 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
294 cb(b
, base_value
, member
, dec
, data
);
299 /** Iterates (recursively if needed) over all of the decorations on a value
301 * This function iterates over all of the decorations applied to a given
302 * value. If it encounters a decoration group, it recurses into the group
303 * and iterates over all of those decorations as well.
306 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
307 vtn_decoration_foreach_cb cb
, void *data
)
309 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
313 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
314 vtn_execution_mode_foreach_cb cb
, void *data
)
316 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
317 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
320 assert(dec
->group
== NULL
);
321 cb(b
, value
, dec
, data
);
326 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
327 const uint32_t *w
, unsigned count
)
329 const uint32_t *w_end
= w
+ count
;
330 const uint32_t target
= w
[1];
334 case SpvOpDecorationGroup
:
335 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
339 case SpvOpMemberDecorate
:
340 case SpvOpExecutionMode
: {
341 struct vtn_value
*val
= &b
->values
[target
];
343 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
346 dec
->scope
= VTN_DEC_DECORATION
;
348 case SpvOpMemberDecorate
:
349 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
351 case SpvOpExecutionMode
:
352 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
355 unreachable("Invalid decoration opcode");
357 dec
->decoration
= *(w
++);
360 /* Link into the list */
361 dec
->next
= val
->decoration
;
362 val
->decoration
= dec
;
366 case SpvOpGroupMemberDecorate
:
367 case SpvOpGroupDecorate
: {
368 struct vtn_value
*group
=
369 vtn_value(b
, target
, vtn_value_type_decoration_group
);
371 for (; w
< w_end
; w
++) {
372 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
373 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
376 if (opcode
== SpvOpGroupDecorate
) {
377 dec
->scope
= VTN_DEC_DECORATION
;
379 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
382 /* Link into the list */
383 dec
->next
= val
->decoration
;
384 val
->decoration
= dec
;
390 unreachable("Unhandled opcode");
394 struct member_decoration_ctx
{
396 struct glsl_struct_field
*fields
;
397 struct vtn_type
*type
;
400 /* does a shallow copy of a vtn_type */
402 static struct vtn_type
*
403 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
405 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
406 dest
->type
= src
->type
;
407 dest
->is_builtin
= src
->is_builtin
;
409 dest
->builtin
= src
->builtin
;
411 if (!glsl_type_is_scalar(src
->type
)) {
412 switch (glsl_get_base_type(src
->type
)) {
416 case GLSL_TYPE_FLOAT
:
417 case GLSL_TYPE_DOUBLE
:
418 case GLSL_TYPE_ARRAY
:
419 dest
->row_major
= src
->row_major
;
420 dest
->stride
= src
->stride
;
421 dest
->array_element
= src
->array_element
;
424 case GLSL_TYPE_STRUCT
: {
425 unsigned elems
= glsl_get_length(src
->type
);
427 dest
->members
= ralloc_array(b
, struct vtn_type
*, elems
);
428 memcpy(dest
->members
, src
->members
, elems
* sizeof(struct vtn_type
*));
430 dest
->offsets
= ralloc_array(b
, unsigned, elems
);
431 memcpy(dest
->offsets
, src
->offsets
, elems
* sizeof(unsigned));
436 unreachable("unhandled type");
443 static struct vtn_type
*
444 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
446 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
447 type
= type
->members
[member
];
449 /* We may have an array of matrices.... Oh, joy! */
450 while (glsl_type_is_array(type
->type
)) {
451 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
452 type
= type
->array_element
;
455 assert(glsl_type_is_matrix(type
->type
));
461 struct_member_decoration_cb(struct vtn_builder
*b
,
462 struct vtn_value
*val
, int member
,
463 const struct vtn_decoration
*dec
, void *void_ctx
)
465 struct member_decoration_ctx
*ctx
= void_ctx
;
470 assert(member
< ctx
->num_fields
);
472 switch (dec
->decoration
) {
473 case SpvDecorationNonWritable
:
474 case SpvDecorationNonReadable
:
475 case SpvDecorationRelaxedPrecision
:
476 case SpvDecorationVolatile
:
477 case SpvDecorationCoherent
:
478 case SpvDecorationUniform
:
479 break; /* FIXME: Do nothing with this for now. */
480 case SpvDecorationNoPerspective
:
481 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
483 case SpvDecorationFlat
:
484 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
486 case SpvDecorationCentroid
:
487 ctx
->fields
[member
].centroid
= true;
489 case SpvDecorationSample
:
490 ctx
->fields
[member
].sample
= true;
492 case SpvDecorationStream
:
493 /* Vulkan only allows one GS stream */
494 assert(dec
->literals
[0] == 0);
496 case SpvDecorationLocation
:
497 ctx
->fields
[member
].location
= dec
->literals
[0];
499 case SpvDecorationComponent
:
500 break; /* FIXME: What should we do with these? */
501 case SpvDecorationBuiltIn
:
502 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
503 ctx
->type
->members
[member
]->is_builtin
= true;
504 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
505 ctx
->type
->builtin_block
= true;
507 case SpvDecorationOffset
:
508 ctx
->type
->offsets
[member
] = dec
->literals
[0];
510 case SpvDecorationMatrixStride
:
511 mutable_matrix_member(b
, ctx
->type
, member
)->stride
= dec
->literals
[0];
513 case SpvDecorationColMajor
:
514 break; /* Nothing to do here. Column-major is the default. */
515 case SpvDecorationRowMajor
:
516 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
519 case SpvDecorationPatch
:
520 vtn_warn("Tessellation not yet supported");
523 case SpvDecorationSpecId
:
524 case SpvDecorationBlock
:
525 case SpvDecorationBufferBlock
:
526 case SpvDecorationArrayStride
:
527 case SpvDecorationGLSLShared
:
528 case SpvDecorationGLSLPacked
:
529 case SpvDecorationInvariant
:
530 case SpvDecorationRestrict
:
531 case SpvDecorationAliased
:
532 case SpvDecorationConstant
:
533 case SpvDecorationIndex
:
534 case SpvDecorationBinding
:
535 case SpvDecorationDescriptorSet
:
536 case SpvDecorationLinkageAttributes
:
537 case SpvDecorationNoContraction
:
538 case SpvDecorationInputAttachmentIndex
:
539 vtn_warn("Decoration not allowed on struct members: %s",
540 spirv_decoration_to_string(dec
->decoration
));
543 case SpvDecorationXfbBuffer
:
544 case SpvDecorationXfbStride
:
545 vtn_warn("Vulkan does not have transform feedback");
548 case SpvDecorationCPacked
:
549 case SpvDecorationSaturatedConversion
:
550 case SpvDecorationFuncParamAttr
:
551 case SpvDecorationFPRoundingMode
:
552 case SpvDecorationFPFastMathMode
:
553 case SpvDecorationAlignment
:
554 vtn_warn("Decoraiton only allowed for CL-style kernels: %s",
555 spirv_decoration_to_string(dec
->decoration
));
561 type_decoration_cb(struct vtn_builder
*b
,
562 struct vtn_value
*val
, int member
,
563 const struct vtn_decoration
*dec
, void *ctx
)
565 struct vtn_type
*type
= val
->type
;
570 switch (dec
->decoration
) {
571 case SpvDecorationArrayStride
:
572 type
->stride
= dec
->literals
[0];
574 case SpvDecorationBlock
:
577 case SpvDecorationBufferBlock
:
578 type
->buffer_block
= true;
580 case SpvDecorationGLSLShared
:
581 case SpvDecorationGLSLPacked
:
582 /* Ignore these, since we get explicit offsets anyways */
585 case SpvDecorationRowMajor
:
586 case SpvDecorationColMajor
:
587 case SpvDecorationMatrixStride
:
588 case SpvDecorationBuiltIn
:
589 case SpvDecorationNoPerspective
:
590 case SpvDecorationFlat
:
591 case SpvDecorationPatch
:
592 case SpvDecorationCentroid
:
593 case SpvDecorationSample
:
594 case SpvDecorationVolatile
:
595 case SpvDecorationCoherent
:
596 case SpvDecorationNonWritable
:
597 case SpvDecorationNonReadable
:
598 case SpvDecorationUniform
:
599 case SpvDecorationStream
:
600 case SpvDecorationLocation
:
601 case SpvDecorationComponent
:
602 case SpvDecorationOffset
:
603 case SpvDecorationXfbBuffer
:
604 case SpvDecorationXfbStride
:
605 vtn_warn("Decoraiton only allowed for struct members: %s",
606 spirv_decoration_to_string(dec
->decoration
));
609 case SpvDecorationRelaxedPrecision
:
610 case SpvDecorationSpecId
:
611 case SpvDecorationInvariant
:
612 case SpvDecorationRestrict
:
613 case SpvDecorationAliased
:
614 case SpvDecorationConstant
:
615 case SpvDecorationIndex
:
616 case SpvDecorationBinding
:
617 case SpvDecorationDescriptorSet
:
618 case SpvDecorationLinkageAttributes
:
619 case SpvDecorationNoContraction
:
620 case SpvDecorationInputAttachmentIndex
:
621 vtn_warn("Decoraiton not allowed on types: %s",
622 spirv_decoration_to_string(dec
->decoration
));
625 case SpvDecorationCPacked
:
626 case SpvDecorationSaturatedConversion
:
627 case SpvDecorationFuncParamAttr
:
628 case SpvDecorationFPRoundingMode
:
629 case SpvDecorationFPFastMathMode
:
630 case SpvDecorationAlignment
:
631 vtn_warn("Decoraiton only allowed for CL-style kernels: %s",
632 spirv_decoration_to_string(dec
->decoration
));
638 translate_image_format(SpvImageFormat format
)
641 case SpvImageFormatUnknown
: return 0; /* GL_NONE */
642 case SpvImageFormatRgba32f
: return 0x8814; /* GL_RGBA32F */
643 case SpvImageFormatRgba16f
: return 0x881A; /* GL_RGBA16F */
644 case SpvImageFormatR32f
: return 0x822E; /* GL_R32F */
645 case SpvImageFormatRgba8
: return 0x8058; /* GL_RGBA8 */
646 case SpvImageFormatRgba8Snorm
: return 0x8F97; /* GL_RGBA8_SNORM */
647 case SpvImageFormatRg32f
: return 0x8230; /* GL_RG32F */
648 case SpvImageFormatRg16f
: return 0x822F; /* GL_RG16F */
649 case SpvImageFormatR11fG11fB10f
: return 0x8C3A; /* GL_R11F_G11F_B10F */
650 case SpvImageFormatR16f
: return 0x822D; /* GL_R16F */
651 case SpvImageFormatRgba16
: return 0x805B; /* GL_RGBA16 */
652 case SpvImageFormatRgb10A2
: return 0x8059; /* GL_RGB10_A2 */
653 case SpvImageFormatRg16
: return 0x822C; /* GL_RG16 */
654 case SpvImageFormatRg8
: return 0x822B; /* GL_RG8 */
655 case SpvImageFormatR16
: return 0x822A; /* GL_R16 */
656 case SpvImageFormatR8
: return 0x8229; /* GL_R8 */
657 case SpvImageFormatRgba16Snorm
: return 0x8F9B; /* GL_RGBA16_SNORM */
658 case SpvImageFormatRg16Snorm
: return 0x8F99; /* GL_RG16_SNORM */
659 case SpvImageFormatRg8Snorm
: return 0x8F95; /* GL_RG8_SNORM */
660 case SpvImageFormatR16Snorm
: return 0x8F98; /* GL_R16_SNORM */
661 case SpvImageFormatR8Snorm
: return 0x8F94; /* GL_R8_SNORM */
662 case SpvImageFormatRgba32i
: return 0x8D82; /* GL_RGBA32I */
663 case SpvImageFormatRgba16i
: return 0x8D88; /* GL_RGBA16I */
664 case SpvImageFormatRgba8i
: return 0x8D8E; /* GL_RGBA8I */
665 case SpvImageFormatR32i
: return 0x8235; /* GL_R32I */
666 case SpvImageFormatRg32i
: return 0x823B; /* GL_RG32I */
667 case SpvImageFormatRg16i
: return 0x8239; /* GL_RG16I */
668 case SpvImageFormatRg8i
: return 0x8237; /* GL_RG8I */
669 case SpvImageFormatR16i
: return 0x8233; /* GL_R16I */
670 case SpvImageFormatR8i
: return 0x8231; /* GL_R8I */
671 case SpvImageFormatRgba32ui
: return 0x8D70; /* GL_RGBA32UI */
672 case SpvImageFormatRgba16ui
: return 0x8D76; /* GL_RGBA16UI */
673 case SpvImageFormatRgba8ui
: return 0x8D7C; /* GL_RGBA8UI */
674 case SpvImageFormatR32ui
: return 0x8236; /* GL_R32UI */
675 case SpvImageFormatRgb10a2ui
: return 0x906F; /* GL_RGB10_A2UI */
676 case SpvImageFormatRg32ui
: return 0x823C; /* GL_RG32UI */
677 case SpvImageFormatRg16ui
: return 0x823A; /* GL_RG16UI */
678 case SpvImageFormatRg8ui
: return 0x8238; /* GL_RG8UI */
679 case SpvImageFormatR16ui
: return 0x823A; /* GL_RG16UI */
680 case SpvImageFormatR8ui
: return 0x8232; /* GL_R8UI */
682 assert(!"Invalid image format");
688 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
689 const uint32_t *w
, unsigned count
)
691 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
693 val
->type
= rzalloc(b
, struct vtn_type
);
694 val
->type
->is_builtin
= false;
695 val
->type
->val
= val
;
699 val
->type
->type
= glsl_void_type();
702 val
->type
->type
= glsl_bool_type();
705 const bool signedness
= w
[3];
706 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
709 case SpvOpTypeFloat
: {
711 val
->type
->type
= bit_size
== 64 ? glsl_double_type() : glsl_float_type();
715 case SpvOpTypeVector
: {
716 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
717 unsigned elems
= w
[3];
719 assert(glsl_type_is_scalar(base
->type
));
720 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
722 /* Vectors implicitly have sizeof(base_type) stride. For now, this
723 * is always 4 bytes. This will have to change if we want to start
724 * supporting doubles or half-floats.
726 val
->type
->stride
= 4;
727 val
->type
->array_element
= base
;
731 case SpvOpTypeMatrix
: {
732 struct vtn_type
*base
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
733 unsigned columns
= w
[3];
735 assert(glsl_type_is_vector(base
->type
));
736 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
737 glsl_get_vector_elements(base
->type
),
739 assert(!glsl_type_is_error(val
->type
->type
));
740 val
->type
->array_element
= base
;
741 val
->type
->row_major
= false;
742 val
->type
->stride
= 0;
746 case SpvOpTypeRuntimeArray
:
747 case SpvOpTypeArray
: {
748 struct vtn_type
*array_element
=
749 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
752 if (opcode
== SpvOpTypeRuntimeArray
) {
753 /* A length of 0 is used to denote unsized arrays */
757 vtn_value(b
, w
[3], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
760 val
->type
->type
= glsl_array_type(array_element
->type
, length
);
761 val
->type
->array_element
= array_element
;
762 val
->type
->stride
= 0;
766 case SpvOpTypeStruct
: {
767 unsigned num_fields
= count
- 2;
768 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
769 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
771 NIR_VLA(struct glsl_struct_field
, fields
, count
);
772 for (unsigned i
= 0; i
< num_fields
; i
++) {
773 val
->type
->members
[i
] =
774 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
775 fields
[i
] = (struct glsl_struct_field
) {
776 .type
= val
->type
->members
[i
]->type
,
777 .name
= ralloc_asprintf(b
, "field%d", i
),
782 struct member_decoration_ctx ctx
= {
783 .num_fields
= num_fields
,
788 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
790 const char *name
= val
->name
? val
->name
: "struct";
792 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
796 case SpvOpTypeFunction
: {
797 const struct glsl_type
*return_type
=
798 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
799 NIR_VLA(struct glsl_function_param
, params
, count
- 3);
800 for (unsigned i
= 0; i
< count
- 3; i
++) {
801 params
[i
].type
= vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
->type
;
805 params
[i
].out
= true;
807 val
->type
->type
= glsl_function_type(return_type
, params
, count
- 3);
811 case SpvOpTypePointer
:
812 /* FIXME: For now, we'll just do the really lame thing and return
813 * the same type. The validator should ensure that the proper number
814 * of dereferences happen
816 val
->type
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
819 case SpvOpTypeImage
: {
820 const struct glsl_type
*sampled_type
=
821 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
823 assert(glsl_type_is_vector_or_scalar(sampled_type
));
825 enum glsl_sampler_dim dim
;
826 switch ((SpvDim
)w
[3]) {
827 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
828 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
829 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
830 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
831 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
832 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
833 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
835 unreachable("Invalid SPIR-V Sampler dimension");
838 bool is_shadow
= w
[4];
839 bool is_array
= w
[5];
840 bool multisampled
= w
[6];
841 unsigned sampled
= w
[7];
842 SpvImageFormat format
= w
[8];
845 val
->type
->access_qualifier
= w
[9];
847 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
850 assert(dim
== GLSL_SAMPLER_DIM_2D
);
851 dim
= GLSL_SAMPLER_DIM_MS
;
854 val
->type
->image_format
= translate_image_format(format
);
857 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
858 glsl_get_base_type(sampled_type
));
859 } else if (sampled
== 2) {
860 assert((dim
== GLSL_SAMPLER_DIM_SUBPASS
) || format
);
862 val
->type
->type
= glsl_image_type(dim
, is_array
,
863 glsl_get_base_type(sampled_type
));
865 assert(!"We need to know if the image will be sampled");
870 case SpvOpTypeSampledImage
:
871 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
874 case SpvOpTypeSampler
:
875 /* The actual sampler type here doesn't really matter. It gets
876 * thrown away the moment you combine it with an image. What really
877 * matters is that it's a sampler type as opposed to an integer type
878 * so the backend knows what to do.
880 val
->type
->type
= glsl_bare_sampler_type();
883 case SpvOpTypeOpaque
:
885 case SpvOpTypeDeviceEvent
:
886 case SpvOpTypeReserveId
:
890 unreachable("Unhandled opcode");
893 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
896 static nir_constant
*
897 vtn_null_constant(struct vtn_builder
*b
, const struct glsl_type
*type
)
899 nir_constant
*c
= rzalloc(b
, nir_constant
);
901 switch (glsl_get_base_type(type
)) {
905 case GLSL_TYPE_FLOAT
:
906 case GLSL_TYPE_DOUBLE
:
907 /* Nothing to do here. It's already initialized to zero */
910 case GLSL_TYPE_ARRAY
:
911 assert(glsl_get_length(type
) > 0);
912 c
->num_elements
= glsl_get_length(type
);
913 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
915 c
->elements
[0] = vtn_null_constant(b
, glsl_get_array_element(type
));
916 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
917 c
->elements
[i
] = c
->elements
[0];
920 case GLSL_TYPE_STRUCT
:
921 c
->num_elements
= glsl_get_length(type
);
922 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
924 for (unsigned i
= 0; i
< c
->num_elements
; i
++) {
925 c
->elements
[i
] = vtn_null_constant(b
, glsl_get_struct_field(type
, i
));
930 unreachable("Invalid type for null constant");
937 spec_constant_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*v
,
938 int member
, const struct vtn_decoration
*dec
,
941 assert(member
== -1);
942 if (dec
->decoration
!= SpvDecorationSpecId
)
945 uint32_t *const_value
= data
;
947 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
948 if (b
->specializations
[i
].id
== dec
->literals
[0]) {
949 *const_value
= b
->specializations
[i
].data
;
956 get_specialization(struct vtn_builder
*b
, struct vtn_value
*val
,
957 uint32_t const_value
)
959 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &const_value
);
964 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
965 struct vtn_value
*val
,
967 const struct vtn_decoration
*dec
,
970 assert(member
== -1);
971 if (dec
->decoration
!= SpvDecorationBuiltIn
||
972 dec
->literals
[0] != SpvBuiltInWorkgroupSize
)
975 assert(val
->const_type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
977 b
->shader
->info
->cs
.local_size
[0] = val
->constant
->values
[0].u32
[0];
978 b
->shader
->info
->cs
.local_size
[1] = val
->constant
->values
[0].u32
[1];
979 b
->shader
->info
->cs
.local_size
[2] = val
->constant
->values
[0].u32
[2];
983 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
984 const uint32_t *w
, unsigned count
)
986 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
987 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
988 val
->constant
= rzalloc(b
, nir_constant
);
990 case SpvOpConstantTrue
:
991 assert(val
->const_type
== glsl_bool_type());
992 val
->constant
->values
[0].u32
[0] = NIR_TRUE
;
994 case SpvOpConstantFalse
:
995 assert(val
->const_type
== glsl_bool_type());
996 val
->constant
->values
[0].u32
[0] = NIR_FALSE
;
999 case SpvOpSpecConstantTrue
:
1000 case SpvOpSpecConstantFalse
: {
1001 assert(val
->const_type
== glsl_bool_type());
1003 get_specialization(b
, val
, (opcode
== SpvOpSpecConstantTrue
));
1004 val
->constant
->values
[0].u32
[0] = int_val
? NIR_TRUE
: NIR_FALSE
;
1008 case SpvOpConstant
: {
1009 assert(glsl_type_is_scalar(val
->const_type
));
1010 int bit_size
= glsl_get_bit_size(val
->const_type
);
1011 if (bit_size
== 64) {
1012 val
->constant
->values
->u32
[0] = w
[3];
1013 val
->constant
->values
->u32
[1] = w
[4];
1015 assert(bit_size
== 32);
1016 val
->constant
->values
->u32
[0] = w
[3];
1020 case SpvOpSpecConstant
:
1021 assert(glsl_type_is_scalar(val
->const_type
));
1022 val
->constant
->values
[0].u32
[0] = get_specialization(b
, val
, w
[3]);
1024 case SpvOpSpecConstantComposite
:
1025 case SpvOpConstantComposite
: {
1026 unsigned elem_count
= count
- 3;
1027 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1028 for (unsigned i
= 0; i
< elem_count
; i
++)
1029 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
1031 switch (glsl_get_base_type(val
->const_type
)) {
1032 case GLSL_TYPE_UINT
:
1034 case GLSL_TYPE_FLOAT
:
1035 case GLSL_TYPE_BOOL
:
1036 case GLSL_TYPE_DOUBLE
: {
1037 int bit_size
= glsl_get_bit_size(val
->const_type
);
1038 if (glsl_type_is_matrix(val
->const_type
)) {
1039 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
1040 for (unsigned i
= 0; i
< elem_count
; i
++)
1041 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1043 assert(glsl_type_is_vector(val
->const_type
));
1044 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
1045 for (unsigned i
= 0; i
< elem_count
; i
++) {
1046 if (bit_size
== 64) {
1047 val
->constant
->values
[0].u64
[i
] = elems
[i
]->values
[0].u64
[0];
1049 assert(bit_size
== 32);
1050 val
->constant
->values
[0].u32
[i
] = elems
[i
]->values
[0].u32
[0];
1057 case GLSL_TYPE_STRUCT
:
1058 case GLSL_TYPE_ARRAY
:
1059 ralloc_steal(val
->constant
, elems
);
1060 val
->constant
->num_elements
= elem_count
;
1061 val
->constant
->elements
= elems
;
1065 unreachable("Unsupported type for constants");
1070 case SpvOpSpecConstantOp
: {
1071 SpvOp opcode
= get_specialization(b
, val
, w
[3]);
1073 case SpvOpVectorShuffle
: {
1074 struct vtn_value
*v0
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1075 struct vtn_value
*v1
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1076 unsigned len0
= glsl_get_vector_elements(v0
->const_type
);
1077 unsigned len1
= glsl_get_vector_elements(v1
->const_type
);
1079 assert(len0
+ len1
< 16);
1081 unsigned bit_size
= glsl_get_bit_size(val
->const_type
);
1082 assert(bit_size
== glsl_get_bit_size(v0
->const_type
) &&
1083 bit_size
== glsl_get_bit_size(v1
->const_type
));
1085 if (bit_size
== 64) {
1087 for (unsigned i
= 0; i
< len0
; i
++)
1088 u64
[i
] = v0
->constant
->values
[0].u64
[i
];
1089 for (unsigned i
= 0; i
< len1
; i
++)
1090 u64
[len0
+ i
] = v1
->constant
->values
[0].u64
[i
];
1092 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1093 uint32_t comp
= w
[i
+ 6];
1094 /* If component is not used, set the value to a known constant
1095 * to detect if it is wrongly used.
1097 if (comp
== (uint32_t)-1)
1098 val
->constant
->values
[0].u64
[j
] = 0xdeadbeefdeadbeef;
1100 val
->constant
->values
[0].u64
[j
] = u64
[comp
];
1104 for (unsigned i
= 0; i
< len0
; i
++)
1105 u32
[i
] = v0
->constant
->values
[0].u32
[i
];
1107 for (unsigned i
= 0; i
< len1
; i
++)
1108 u32
[len0
+ i
] = v1
->constant
->values
[0].u32
[i
];
1110 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1111 uint32_t comp
= w
[i
+ 6];
1112 /* If component is not used, set the value to a known constant
1113 * to detect if it is wrongly used.
1115 if (comp
== (uint32_t)-1)
1116 val
->constant
->values
[0].u32
[j
] = 0xdeadbeef;
1118 val
->constant
->values
[0].u32
[j
] = u32
[comp
];
1124 case SpvOpCompositeExtract
:
1125 case SpvOpCompositeInsert
: {
1126 struct vtn_value
*comp
;
1127 unsigned deref_start
;
1128 struct nir_constant
**c
;
1129 if (opcode
== SpvOpCompositeExtract
) {
1130 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1132 c
= &comp
->constant
;
1134 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1136 val
->constant
= nir_constant_clone(comp
->constant
,
1143 const struct glsl_type
*type
= comp
->const_type
;
1144 for (unsigned i
= deref_start
; i
< count
; i
++) {
1145 switch (glsl_get_base_type(type
)) {
1146 case GLSL_TYPE_UINT
:
1148 case GLSL_TYPE_FLOAT
:
1149 case GLSL_TYPE_BOOL
:
1150 /* If we hit this granularity, we're picking off an element */
1151 if (glsl_type_is_matrix(type
)) {
1152 assert(col
== 0 && elem
== -1);
1155 type
= glsl_get_column_type(type
);
1157 assert(elem
<= 0 && glsl_type_is_vector(type
));
1159 type
= glsl_scalar_type(glsl_get_base_type(type
));
1163 case GLSL_TYPE_ARRAY
:
1164 c
= &(*c
)->elements
[w
[i
]];
1165 type
= glsl_get_array_element(type
);
1168 case GLSL_TYPE_STRUCT
:
1169 c
= &(*c
)->elements
[w
[i
]];
1170 type
= glsl_get_struct_field(type
, w
[i
]);
1174 unreachable("Invalid constant type");
1178 if (opcode
== SpvOpCompositeExtract
) {
1182 unsigned num_components
= glsl_get_vector_elements(type
);
1183 unsigned bit_size
= glsl_get_bit_size(type
);
1184 for (unsigned i
= 0; i
< num_components
; i
++)
1185 if (bit_size
== 64) {
1186 val
->constant
->values
[0].u64
[i
] = (*c
)->values
[col
].u64
[elem
+ i
];
1188 assert(bit_size
== 32);
1189 val
->constant
->values
[0].u32
[i
] = (*c
)->values
[col
].u32
[elem
+ i
];
1193 struct vtn_value
*insert
=
1194 vtn_value(b
, w
[4], vtn_value_type_constant
);
1195 assert(insert
->const_type
== type
);
1197 *c
= insert
->constant
;
1199 unsigned num_components
= glsl_get_vector_elements(type
);
1200 unsigned bit_size
= glsl_get_bit_size(type
);
1201 for (unsigned i
= 0; i
< num_components
; i
++)
1202 if (bit_size
== 64) {
1203 (*c
)->values
[col
].u64
[elem
+ i
] = insert
->constant
->values
[0].u64
[i
];
1205 assert(bit_size
== 32);
1206 (*c
)->values
[col
].u32
[elem
+ i
] = insert
->constant
->values
[0].u32
[i
];
1215 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(opcode
, &swap
);
1217 unsigned num_components
= glsl_get_vector_elements(val
->const_type
);
1219 glsl_get_bit_size(val
->const_type
);
1221 nir_const_value src
[4];
1223 for (unsigned i
= 0; i
< count
- 4; i
++) {
1225 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
)->constant
;
1227 unsigned j
= swap
? 1 - i
: i
;
1228 assert(bit_size
== 32);
1229 src
[j
] = c
->values
[0];
1232 val
->constant
->values
[0] =
1233 nir_eval_const_opcode(op
, num_components
, bit_size
, src
);
1240 case SpvOpConstantNull
:
1241 val
->constant
= vtn_null_constant(b
, val
->const_type
);
1244 case SpvOpConstantSampler
:
1245 assert(!"OpConstantSampler requires Kernel Capability");
1249 unreachable("Unhandled opcode");
1252 /* Now that we have the value, update the workgroup size if needed */
1253 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1257 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1258 const uint32_t *w
, unsigned count
)
1260 struct nir_function
*callee
=
1261 vtn_value(b
, w
[3], vtn_value_type_function
)->func
->impl
->function
;
1263 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
1264 for (unsigned i
= 0; i
< call
->num_params
; i
++) {
1265 unsigned arg_id
= w
[4 + i
];
1266 struct vtn_value
*arg
= vtn_untyped_value(b
, arg_id
);
1267 if (arg
->value_type
== vtn_value_type_access_chain
) {
1268 nir_deref_var
*d
= vtn_access_chain_to_deref(b
, arg
->access_chain
);
1269 call
->params
[i
] = nir_deref_var_clone(d
, call
);
1271 struct vtn_ssa_value
*arg_ssa
= vtn_ssa_value(b
, arg_id
);
1273 /* Make a temporary to store the argument in */
1275 nir_local_variable_create(b
->impl
, arg_ssa
->type
, "arg_tmp");
1276 call
->params
[i
] = nir_deref_var_create(call
, tmp
);
1278 vtn_local_store(b
, arg_ssa
, call
->params
[i
]);
1282 nir_variable
*out_tmp
= NULL
;
1283 if (!glsl_type_is_void(callee
->return_type
)) {
1284 out_tmp
= nir_local_variable_create(b
->impl
, callee
->return_type
,
1286 call
->return_deref
= nir_deref_var_create(call
, out_tmp
);
1289 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
1291 if (glsl_type_is_void(callee
->return_type
)) {
1292 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
1294 struct vtn_value
*retval
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1295 retval
->ssa
= vtn_local_load(b
, call
->return_deref
);
1299 struct vtn_ssa_value
*
1300 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1302 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1305 if (!glsl_type_is_vector_or_scalar(type
)) {
1306 unsigned elems
= glsl_get_length(type
);
1307 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1308 for (unsigned i
= 0; i
< elems
; i
++) {
1309 const struct glsl_type
*child_type
;
1311 switch (glsl_get_base_type(type
)) {
1313 case GLSL_TYPE_UINT
:
1314 case GLSL_TYPE_BOOL
:
1315 case GLSL_TYPE_FLOAT
:
1316 case GLSL_TYPE_DOUBLE
:
1317 child_type
= glsl_get_column_type(type
);
1319 case GLSL_TYPE_ARRAY
:
1320 child_type
= glsl_get_array_element(type
);
1322 case GLSL_TYPE_STRUCT
:
1323 child_type
= glsl_get_struct_field(type
, i
);
1326 unreachable("unkown base type");
1329 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1337 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1340 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1341 src
.src_type
= type
;
1346 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1347 const uint32_t *w
, unsigned count
)
1349 if (opcode
== SpvOpSampledImage
) {
1350 struct vtn_value
*val
=
1351 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
1352 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
1353 val
->sampled_image
->image
=
1354 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1355 val
->sampled_image
->sampler
=
1356 vtn_value(b
, w
[4], vtn_value_type_access_chain
)->access_chain
;
1358 } else if (opcode
== SpvOpImage
) {
1359 struct vtn_value
*val
=
1360 vtn_push_value(b
, w
[2], vtn_value_type_access_chain
);
1361 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
1362 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
1363 val
->access_chain
= src_val
->sampled_image
->image
;
1365 assert(src_val
->value_type
== vtn_value_type_access_chain
);
1366 val
->access_chain
= src_val
->access_chain
;
1371 struct vtn_type
*ret_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1372 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1374 struct vtn_sampled_image sampled
;
1375 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
1376 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
1377 sampled
= *sampled_val
->sampled_image
;
1379 assert(sampled_val
->value_type
== vtn_value_type_access_chain
);
1380 sampled
.image
= NULL
;
1381 sampled
.sampler
= sampled_val
->access_chain
;
1384 const struct glsl_type
*image_type
;
1385 if (sampled
.image
) {
1386 image_type
= sampled
.image
->var
->var
->interface_type
;
1388 image_type
= sampled
.sampler
->var
->var
->interface_type
;
1390 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
1391 const bool is_array
= glsl_sampler_type_is_array(image_type
);
1392 const bool is_shadow
= glsl_sampler_type_is_shadow(image_type
);
1394 /* Figure out the base texture operation */
1397 case SpvOpImageSampleImplicitLod
:
1398 case SpvOpImageSampleDrefImplicitLod
:
1399 case SpvOpImageSampleProjImplicitLod
:
1400 case SpvOpImageSampleProjDrefImplicitLod
:
1401 texop
= nir_texop_tex
;
1404 case SpvOpImageSampleExplicitLod
:
1405 case SpvOpImageSampleDrefExplicitLod
:
1406 case SpvOpImageSampleProjExplicitLod
:
1407 case SpvOpImageSampleProjDrefExplicitLod
:
1408 texop
= nir_texop_txl
;
1411 case SpvOpImageFetch
:
1412 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
1413 texop
= nir_texop_txf_ms
;
1415 texop
= nir_texop_txf
;
1419 case SpvOpImageGather
:
1420 case SpvOpImageDrefGather
:
1421 texop
= nir_texop_tg4
;
1424 case SpvOpImageQuerySizeLod
:
1425 case SpvOpImageQuerySize
:
1426 texop
= nir_texop_txs
;
1429 case SpvOpImageQueryLod
:
1430 texop
= nir_texop_lod
;
1433 case SpvOpImageQueryLevels
:
1434 texop
= nir_texop_query_levels
;
1437 case SpvOpImageQuerySamples
:
1438 texop
= nir_texop_texture_samples
;
1442 unreachable("Unhandled opcode");
1445 nir_tex_src srcs
[8]; /* 8 should be enough */
1446 nir_tex_src
*p
= srcs
;
1450 struct nir_ssa_def
*coord
;
1451 unsigned coord_components
;
1453 case SpvOpImageSampleImplicitLod
:
1454 case SpvOpImageSampleExplicitLod
:
1455 case SpvOpImageSampleDrefImplicitLod
:
1456 case SpvOpImageSampleDrefExplicitLod
:
1457 case SpvOpImageSampleProjImplicitLod
:
1458 case SpvOpImageSampleProjExplicitLod
:
1459 case SpvOpImageSampleProjDrefImplicitLod
:
1460 case SpvOpImageSampleProjDrefExplicitLod
:
1461 case SpvOpImageFetch
:
1462 case SpvOpImageGather
:
1463 case SpvOpImageDrefGather
:
1464 case SpvOpImageQueryLod
: {
1465 /* All these types have the coordinate as their first real argument */
1466 switch (sampler_dim
) {
1467 case GLSL_SAMPLER_DIM_1D
:
1468 case GLSL_SAMPLER_DIM_BUF
:
1469 coord_components
= 1;
1471 case GLSL_SAMPLER_DIM_2D
:
1472 case GLSL_SAMPLER_DIM_RECT
:
1473 case GLSL_SAMPLER_DIM_MS
:
1474 coord_components
= 2;
1476 case GLSL_SAMPLER_DIM_3D
:
1477 case GLSL_SAMPLER_DIM_CUBE
:
1478 coord_components
= 3;
1481 unreachable("Invalid sampler type");
1484 if (is_array
&& texop
!= nir_texop_lod
)
1487 coord
= vtn_ssa_value(b
, w
[idx
++])->def
;
1488 p
->src
= nir_src_for_ssa(coord
);
1489 p
->src_type
= nir_tex_src_coord
;
1496 coord_components
= 0;
1501 case SpvOpImageSampleProjImplicitLod
:
1502 case SpvOpImageSampleProjExplicitLod
:
1503 case SpvOpImageSampleProjDrefImplicitLod
:
1504 case SpvOpImageSampleProjDrefExplicitLod
:
1505 /* These have the projector as the last coordinate component */
1506 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
1507 p
->src_type
= nir_tex_src_projector
;
1515 unsigned gather_component
= 0;
1517 case SpvOpImageSampleDrefImplicitLod
:
1518 case SpvOpImageSampleDrefExplicitLod
:
1519 case SpvOpImageSampleProjDrefImplicitLod
:
1520 case SpvOpImageSampleProjDrefExplicitLod
:
1521 case SpvOpImageDrefGather
:
1522 /* These all have an explicit depth value as their next source */
1523 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
1526 case SpvOpImageGather
:
1527 /* This has a component as its next source */
1529 vtn_value(b
, w
[idx
++], vtn_value_type_constant
)->constant
->values
[0].u32
[0];
1536 /* For OpImageQuerySizeLod, we always have an LOD */
1537 if (opcode
== SpvOpImageQuerySizeLod
)
1538 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1540 /* Now we need to handle some number of optional arguments */
1541 const struct vtn_ssa_value
*gather_offsets
= NULL
;
1543 uint32_t operands
= w
[idx
++];
1545 if (operands
& SpvImageOperandsBiasMask
) {
1546 assert(texop
== nir_texop_tex
);
1547 texop
= nir_texop_txb
;
1548 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1551 if (operands
& SpvImageOperandsLodMask
) {
1552 assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1553 texop
== nir_texop_txs
);
1554 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1557 if (operands
& SpvImageOperandsGradMask
) {
1558 assert(texop
== nir_texop_txl
);
1559 texop
= nir_texop_txd
;
1560 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1561 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1564 if (operands
& SpvImageOperandsOffsetMask
||
1565 operands
& SpvImageOperandsConstOffsetMask
)
1566 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1568 if (operands
& SpvImageOperandsConstOffsetsMask
) {
1569 gather_offsets
= vtn_ssa_value(b
, w
[idx
++]);
1570 (*p
++) = (nir_tex_src
){};
1573 if (operands
& SpvImageOperandsSampleMask
) {
1574 assert(texop
== nir_texop_txf_ms
);
1575 texop
= nir_texop_txf_ms
;
1576 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1579 /* We should have now consumed exactly all of the arguments */
1580 assert(idx
== count
);
1582 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1585 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1587 instr
->coord_components
= coord_components
;
1588 instr
->sampler_dim
= sampler_dim
;
1589 instr
->is_array
= is_array
;
1590 instr
->is_shadow
= is_shadow
;
1591 instr
->is_new_style_shadow
=
1592 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
1593 instr
->component
= gather_component
;
1595 switch (glsl_get_sampler_result_type(image_type
)) {
1596 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1597 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1598 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_uint
; break;
1599 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1601 unreachable("Invalid base type for sampler result");
1604 nir_deref_var
*sampler
= vtn_access_chain_to_deref(b
, sampled
.sampler
);
1605 nir_deref_var
*texture
;
1606 if (sampled
.image
) {
1607 nir_deref_var
*image
= vtn_access_chain_to_deref(b
, sampled
.image
);
1613 instr
->texture
= nir_deref_var_clone(texture
, instr
);
1615 switch (instr
->op
) {
1620 /* These operations require a sampler */
1621 instr
->sampler
= nir_deref_var_clone(sampler
, instr
);
1624 case nir_texop_txf_ms
:
1628 case nir_texop_query_levels
:
1629 case nir_texop_texture_samples
:
1630 case nir_texop_samples_identical
:
1632 instr
->sampler
= NULL
;
1634 case nir_texop_txf_ms_mcs
:
1635 unreachable("unexpected nir_texop_txf_ms_mcs");
1638 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1639 nir_tex_instr_dest_size(instr
), 32, NULL
);
1641 assert(glsl_get_vector_elements(ret_type
->type
) ==
1642 nir_tex_instr_dest_size(instr
));
1645 nir_instr
*instruction
;
1646 if (gather_offsets
) {
1647 assert(glsl_get_base_type(gather_offsets
->type
) == GLSL_TYPE_ARRAY
);
1648 assert(glsl_get_length(gather_offsets
->type
) == 4);
1649 nir_tex_instr
*instrs
[4] = {instr
, NULL
, NULL
, NULL
};
1651 /* Copy the current instruction 4x */
1652 for (uint32_t i
= 1; i
< 4; i
++) {
1653 instrs
[i
] = nir_tex_instr_create(b
->shader
, instr
->num_srcs
);
1654 instrs
[i
]->op
= instr
->op
;
1655 instrs
[i
]->coord_components
= instr
->coord_components
;
1656 instrs
[i
]->sampler_dim
= instr
->sampler_dim
;
1657 instrs
[i
]->is_array
= instr
->is_array
;
1658 instrs
[i
]->is_shadow
= instr
->is_shadow
;
1659 instrs
[i
]->is_new_style_shadow
= instr
->is_new_style_shadow
;
1660 instrs
[i
]->component
= instr
->component
;
1661 instrs
[i
]->dest_type
= instr
->dest_type
;
1662 instrs
[i
]->texture
= nir_deref_var_clone(texture
, instrs
[i
]);
1663 instrs
[i
]->sampler
= NULL
;
1665 memcpy(instrs
[i
]->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1667 nir_ssa_dest_init(&instrs
[i
]->instr
, &instrs
[i
]->dest
,
1668 nir_tex_instr_dest_size(instr
), 32, NULL
);
1671 /* Fill in the last argument with the offset from the passed in offsets
1672 * and insert the instruction into the stream.
1674 for (uint32_t i
= 0; i
< 4; i
++) {
1676 src
.src
= nir_src_for_ssa(gather_offsets
->elems
[i
]->def
);
1677 src
.src_type
= nir_tex_src_offset
;
1678 instrs
[i
]->src
[instrs
[i
]->num_srcs
- 1] = src
;
1679 nir_builder_instr_insert(&b
->nb
, &instrs
[i
]->instr
);
1682 /* Combine the results of the 4 instructions by taking their .w
1685 nir_alu_instr
*vec4
= nir_alu_instr_create(b
->shader
, nir_op_vec4
);
1686 nir_ssa_dest_init(&vec4
->instr
, &vec4
->dest
.dest
, 4, 32, NULL
);
1687 vec4
->dest
.write_mask
= 0xf;
1688 for (uint32_t i
= 0; i
< 4; i
++) {
1689 vec4
->src
[i
].src
= nir_src_for_ssa(&instrs
[i
]->dest
.ssa
);
1690 vec4
->src
[i
].swizzle
[0] = 3;
1692 def
= &vec4
->dest
.dest
.ssa
;
1693 instruction
= &vec4
->instr
;
1695 def
= &instr
->dest
.ssa
;
1696 instruction
= &instr
->instr
;
1699 val
->ssa
= vtn_create_ssa_value(b
, ret_type
->type
);
1700 val
->ssa
->def
= def
;
1702 nir_builder_instr_insert(&b
->nb
, instruction
);
1706 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
1707 const uint32_t *w
, nir_src
*src
)
1710 case SpvOpAtomicIIncrement
:
1711 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
1714 case SpvOpAtomicIDecrement
:
1715 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
1718 case SpvOpAtomicISub
:
1720 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_ssa_value(b
, w
[6])->def
));
1723 case SpvOpAtomicCompareExchange
:
1724 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[8])->def
);
1725 src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[7])->def
);
1728 case SpvOpAtomicExchange
:
1729 case SpvOpAtomicIAdd
:
1730 case SpvOpAtomicSMin
:
1731 case SpvOpAtomicUMin
:
1732 case SpvOpAtomicSMax
:
1733 case SpvOpAtomicUMax
:
1734 case SpvOpAtomicAnd
:
1736 case SpvOpAtomicXor
:
1737 src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
1741 unreachable("Invalid SPIR-V atomic");
1745 static nir_ssa_def
*
1746 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
1748 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, value
);
1750 /* The image_load_store intrinsics assume a 4-dim coordinate */
1751 unsigned dim
= glsl_get_vector_elements(coord
->type
);
1752 unsigned swizzle
[4];
1753 for (unsigned i
= 0; i
< 4; i
++)
1754 swizzle
[i
] = MIN2(i
, dim
- 1);
1756 return nir_swizzle(&b
->nb
, coord
->def
, swizzle
, 4, false);
1760 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
1761 const uint32_t *w
, unsigned count
)
1763 /* Just get this one out of the way */
1764 if (opcode
== SpvOpImageTexelPointer
) {
1765 struct vtn_value
*val
=
1766 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
1767 val
->image
= ralloc(b
, struct vtn_image_pointer
);
1770 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1771 val
->image
->coord
= get_image_coord(b
, w
[4]);
1772 val
->image
->sample
= vtn_ssa_value(b
, w
[5])->def
;
1776 struct vtn_image_pointer image
;
1779 case SpvOpAtomicExchange
:
1780 case SpvOpAtomicCompareExchange
:
1781 case SpvOpAtomicCompareExchangeWeak
:
1782 case SpvOpAtomicIIncrement
:
1783 case SpvOpAtomicIDecrement
:
1784 case SpvOpAtomicIAdd
:
1785 case SpvOpAtomicISub
:
1786 case SpvOpAtomicLoad
:
1787 case SpvOpAtomicSMin
:
1788 case SpvOpAtomicUMin
:
1789 case SpvOpAtomicSMax
:
1790 case SpvOpAtomicUMax
:
1791 case SpvOpAtomicAnd
:
1793 case SpvOpAtomicXor
:
1794 image
= *vtn_value(b
, w
[3], vtn_value_type_image_pointer
)->image
;
1797 case SpvOpAtomicStore
:
1798 image
= *vtn_value(b
, w
[1], vtn_value_type_image_pointer
)->image
;
1801 case SpvOpImageQuerySize
:
1803 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1805 image
.sample
= NULL
;
1808 case SpvOpImageRead
:
1810 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
1811 image
.coord
= get_image_coord(b
, w
[4]);
1813 if (count
> 5 && (w
[5] & SpvImageOperandsSampleMask
)) {
1814 assert(w
[5] == SpvImageOperandsSampleMask
);
1815 image
.sample
= vtn_ssa_value(b
, w
[6])->def
;
1817 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1821 case SpvOpImageWrite
:
1823 vtn_value(b
, w
[1], vtn_value_type_access_chain
)->access_chain
;
1824 image
.coord
= get_image_coord(b
, w
[2]);
1828 if (count
> 4 && (w
[4] & SpvImageOperandsSampleMask
)) {
1829 assert(w
[4] == SpvImageOperandsSampleMask
);
1830 image
.sample
= vtn_ssa_value(b
, w
[5])->def
;
1832 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
1837 unreachable("Invalid image opcode");
1840 nir_intrinsic_op op
;
1842 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
1843 OP(ImageQuerySize
, size
)
1845 OP(ImageWrite
, store
)
1846 OP(AtomicLoad
, load
)
1847 OP(AtomicStore
, store
)
1848 OP(AtomicExchange
, atomic_exchange
)
1849 OP(AtomicCompareExchange
, atomic_comp_swap
)
1850 OP(AtomicIIncrement
, atomic_add
)
1851 OP(AtomicIDecrement
, atomic_add
)
1852 OP(AtomicIAdd
, atomic_add
)
1853 OP(AtomicISub
, atomic_add
)
1854 OP(AtomicSMin
, atomic_min
)
1855 OP(AtomicUMin
, atomic_min
)
1856 OP(AtomicSMax
, atomic_max
)
1857 OP(AtomicUMax
, atomic_max
)
1858 OP(AtomicAnd
, atomic_and
)
1859 OP(AtomicOr
, atomic_or
)
1860 OP(AtomicXor
, atomic_xor
)
1863 unreachable("Invalid image opcode");
1866 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
1868 nir_deref_var
*image_deref
= vtn_access_chain_to_deref(b
, image
.image
);
1869 intrin
->variables
[0] = nir_deref_var_clone(image_deref
, intrin
);
1871 /* ImageQuerySize doesn't take any extra parameters */
1872 if (opcode
!= SpvOpImageQuerySize
) {
1873 /* The image coordinate is always 4 components but we may not have that
1874 * many. Swizzle to compensate.
1877 for (unsigned i
= 0; i
< 4; i
++)
1878 swiz
[i
] = i
< image
.coord
->num_components
? i
: 0;
1879 intrin
->src
[0] = nir_src_for_ssa(nir_swizzle(&b
->nb
, image
.coord
,
1881 intrin
->src
[1] = nir_src_for_ssa(image
.sample
);
1885 case SpvOpAtomicLoad
:
1886 case SpvOpImageQuerySize
:
1887 case SpvOpImageRead
:
1889 case SpvOpAtomicStore
:
1890 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
1892 case SpvOpImageWrite
:
1893 intrin
->src
[2] = nir_src_for_ssa(vtn_ssa_value(b
, w
[3])->def
);
1896 case SpvOpAtomicIIncrement
:
1897 case SpvOpAtomicIDecrement
:
1898 case SpvOpAtomicExchange
:
1899 case SpvOpAtomicIAdd
:
1900 case SpvOpAtomicSMin
:
1901 case SpvOpAtomicUMin
:
1902 case SpvOpAtomicSMax
:
1903 case SpvOpAtomicUMax
:
1904 case SpvOpAtomicAnd
:
1906 case SpvOpAtomicXor
:
1907 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[2]);
1911 unreachable("Invalid image opcode");
1914 if (opcode
!= SpvOpImageWrite
) {
1915 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1916 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1917 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 4, 32, NULL
);
1919 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
1921 /* The image intrinsics always return 4 channels but we may not want
1922 * that many. Emit a mov to trim it down.
1924 unsigned swiz
[4] = {0, 1, 2, 3};
1925 val
->ssa
= vtn_create_ssa_value(b
, type
->type
);
1926 val
->ssa
->def
= nir_swizzle(&b
->nb
, &intrin
->dest
.ssa
, swiz
,
1927 glsl_get_vector_elements(type
->type
), false);
1929 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
1933 static nir_intrinsic_op
1934 get_ssbo_nir_atomic_op(SpvOp opcode
)
1937 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
1938 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
1939 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
1940 OP(AtomicExchange
, atomic_exchange
)
1941 OP(AtomicCompareExchange
, atomic_comp_swap
)
1942 OP(AtomicIIncrement
, atomic_add
)
1943 OP(AtomicIDecrement
, atomic_add
)
1944 OP(AtomicIAdd
, atomic_add
)
1945 OP(AtomicISub
, atomic_add
)
1946 OP(AtomicSMin
, atomic_imin
)
1947 OP(AtomicUMin
, atomic_umin
)
1948 OP(AtomicSMax
, atomic_imax
)
1949 OP(AtomicUMax
, atomic_umax
)
1950 OP(AtomicAnd
, atomic_and
)
1951 OP(AtomicOr
, atomic_or
)
1952 OP(AtomicXor
, atomic_xor
)
1955 unreachable("Invalid SSBO atomic");
1959 static nir_intrinsic_op
1960 get_shared_nir_atomic_op(SpvOp opcode
)
1963 case SpvOpAtomicLoad
: return nir_intrinsic_load_var
;
1964 case SpvOpAtomicStore
: return nir_intrinsic_store_var
;
1965 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
1966 OP(AtomicExchange
, atomic_exchange
)
1967 OP(AtomicCompareExchange
, atomic_comp_swap
)
1968 OP(AtomicIIncrement
, atomic_add
)
1969 OP(AtomicIDecrement
, atomic_add
)
1970 OP(AtomicIAdd
, atomic_add
)
1971 OP(AtomicISub
, atomic_add
)
1972 OP(AtomicSMin
, atomic_imin
)
1973 OP(AtomicUMin
, atomic_umin
)
1974 OP(AtomicSMax
, atomic_imax
)
1975 OP(AtomicUMax
, atomic_umax
)
1976 OP(AtomicAnd
, atomic_and
)
1977 OP(AtomicOr
, atomic_or
)
1978 OP(AtomicXor
, atomic_xor
)
1981 unreachable("Invalid shared atomic");
1986 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder
*b
, SpvOp opcode
,
1987 const uint32_t *w
, unsigned count
)
1989 struct vtn_access_chain
*chain
;
1990 nir_intrinsic_instr
*atomic
;
1993 case SpvOpAtomicLoad
:
1994 case SpvOpAtomicExchange
:
1995 case SpvOpAtomicCompareExchange
:
1996 case SpvOpAtomicCompareExchangeWeak
:
1997 case SpvOpAtomicIIncrement
:
1998 case SpvOpAtomicIDecrement
:
1999 case SpvOpAtomicIAdd
:
2000 case SpvOpAtomicISub
:
2001 case SpvOpAtomicSMin
:
2002 case SpvOpAtomicUMin
:
2003 case SpvOpAtomicSMax
:
2004 case SpvOpAtomicUMax
:
2005 case SpvOpAtomicAnd
:
2007 case SpvOpAtomicXor
:
2009 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
2012 case SpvOpAtomicStore
:
2014 vtn_value(b
, w
[1], vtn_value_type_access_chain
)->access_chain
;
2018 unreachable("Invalid SPIR-V atomic");
2022 SpvScope scope = w[4];
2023 SpvMemorySemanticsMask semantics = w[5];
2026 if (chain
->var
->mode
== vtn_variable_mode_workgroup
) {
2027 struct vtn_type
*type
= chain
->var
->type
;
2028 nir_deref_var
*deref
= vtn_access_chain_to_deref(b
, chain
);
2029 nir_intrinsic_op op
= get_shared_nir_atomic_op(opcode
);
2030 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2031 atomic
->variables
[0] = nir_deref_var_clone(deref
, atomic
);
2034 case SpvOpAtomicLoad
:
2035 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2038 case SpvOpAtomicStore
:
2039 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2040 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2041 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2044 case SpvOpAtomicExchange
:
2045 case SpvOpAtomicCompareExchange
:
2046 case SpvOpAtomicCompareExchangeWeak
:
2047 case SpvOpAtomicIIncrement
:
2048 case SpvOpAtomicIDecrement
:
2049 case SpvOpAtomicIAdd
:
2050 case SpvOpAtomicISub
:
2051 case SpvOpAtomicSMin
:
2052 case SpvOpAtomicUMin
:
2053 case SpvOpAtomicSMax
:
2054 case SpvOpAtomicUMax
:
2055 case SpvOpAtomicAnd
:
2057 case SpvOpAtomicXor
:
2058 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[0]);
2062 unreachable("Invalid SPIR-V atomic");
2066 assert(chain
->var
->mode
== vtn_variable_mode_ssbo
);
2067 struct vtn_type
*type
;
2068 nir_ssa_def
*offset
, *index
;
2069 offset
= vtn_access_chain_to_offset(b
, chain
, &index
, &type
, NULL
, false);
2071 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(opcode
);
2073 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
2076 case SpvOpAtomicLoad
:
2077 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2078 atomic
->src
[0] = nir_src_for_ssa(index
);
2079 atomic
->src
[1] = nir_src_for_ssa(offset
);
2082 case SpvOpAtomicStore
:
2083 atomic
->num_components
= glsl_get_vector_elements(type
->type
);
2084 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
2085 atomic
->src
[0] = nir_src_for_ssa(vtn_ssa_value(b
, w
[4])->def
);
2086 atomic
->src
[1] = nir_src_for_ssa(index
);
2087 atomic
->src
[2] = nir_src_for_ssa(offset
);
2090 case SpvOpAtomicExchange
:
2091 case SpvOpAtomicCompareExchange
:
2092 case SpvOpAtomicCompareExchangeWeak
:
2093 case SpvOpAtomicIIncrement
:
2094 case SpvOpAtomicIDecrement
:
2095 case SpvOpAtomicIAdd
:
2096 case SpvOpAtomicISub
:
2097 case SpvOpAtomicSMin
:
2098 case SpvOpAtomicUMin
:
2099 case SpvOpAtomicSMax
:
2100 case SpvOpAtomicUMax
:
2101 case SpvOpAtomicAnd
:
2103 case SpvOpAtomicXor
:
2104 atomic
->src
[0] = nir_src_for_ssa(index
);
2105 atomic
->src
[1] = nir_src_for_ssa(offset
);
2106 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[2]);
2110 unreachable("Invalid SPIR-V atomic");
2114 if (opcode
!= SpvOpAtomicStore
) {
2115 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2117 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
2118 glsl_get_vector_elements(type
->type
),
2119 glsl_get_bit_size(type
->type
), NULL
);
2121 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2122 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
2123 val
->ssa
->def
= &atomic
->dest
.ssa
;
2124 val
->ssa
->type
= type
->type
;
2127 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
2130 static nir_alu_instr
*
2131 create_vec(nir_shader
*shader
, unsigned num_components
, unsigned bit_size
)
2134 switch (num_components
) {
2135 case 1: op
= nir_op_fmov
; break;
2136 case 2: op
= nir_op_vec2
; break;
2137 case 3: op
= nir_op_vec3
; break;
2138 case 4: op
= nir_op_vec4
; break;
2139 default: unreachable("bad vector size");
2142 nir_alu_instr
*vec
= nir_alu_instr_create(shader
, op
);
2143 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
2145 vec
->dest
.write_mask
= (1 << num_components
) - 1;
2150 struct vtn_ssa_value
*
2151 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
2153 if (src
->transposed
)
2154 return src
->transposed
;
2156 struct vtn_ssa_value
*dest
=
2157 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
2159 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
2160 nir_alu_instr
*vec
= create_vec(b
->shader
,
2161 glsl_get_matrix_columns(src
->type
),
2162 glsl_get_bit_size(src
->type
));
2163 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2164 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
2165 vec
->src
[0].swizzle
[0] = i
;
2167 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
2168 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
2169 vec
->src
[j
].swizzle
[0] = i
;
2172 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2173 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
2176 dest
->transposed
= src
;
2182 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2184 unsigned swiz
[4] = { index
};
2185 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2189 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2192 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
,
2195 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2197 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2199 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2200 vec
->src
[i
].swizzle
[0] = i
;
2204 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2206 return &vec
->dest
.dest
.ssa
;
2210 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2213 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2214 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2215 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2216 vtn_vector_extract(b
, src
, i
), dest
);
2222 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2223 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2225 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2226 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2227 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2228 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2233 static nir_ssa_def
*
2234 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2235 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2236 const uint32_t *indices
)
2238 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
, src0
->bit_size
);
2240 for (unsigned i
= 0; i
< num_components
; i
++) {
2241 uint32_t index
= indices
[i
];
2242 if (index
== 0xffffffff) {
2244 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
2245 } else if (index
< src0
->num_components
) {
2246 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2247 vec
->src
[i
].swizzle
[0] = index
;
2249 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2250 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2254 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2256 return &vec
->dest
.dest
.ssa
;
2260 * Concatentates a number of vectors/scalars together to produce a vector
2262 static nir_ssa_def
*
2263 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2264 unsigned num_srcs
, nir_ssa_def
**srcs
)
2266 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
,
2269 unsigned dest_idx
= 0;
2270 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2271 nir_ssa_def
*src
= srcs
[i
];
2272 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2273 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2274 vec
->src
[dest_idx
].swizzle
[0] = j
;
2279 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2281 return &vec
->dest
.dest
.ssa
;
2284 static struct vtn_ssa_value
*
2285 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2287 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2288 dest
->type
= src
->type
;
2290 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2291 dest
->def
= src
->def
;
2293 unsigned elems
= glsl_get_length(src
->type
);
2295 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2296 for (unsigned i
= 0; i
< elems
; i
++)
2297 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2303 static struct vtn_ssa_value
*
2304 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2305 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2306 unsigned num_indices
)
2308 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2310 struct vtn_ssa_value
*cur
= dest
;
2312 for (i
= 0; i
< num_indices
- 1; i
++) {
2313 cur
= cur
->elems
[indices
[i
]];
2316 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2317 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2318 * the component granularity. In that case, the last index will be
2319 * the index to insert the scalar into the vector.
2322 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2324 cur
->elems
[indices
[i
]] = insert
;
2330 static struct vtn_ssa_value
*
2331 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2332 const uint32_t *indices
, unsigned num_indices
)
2334 struct vtn_ssa_value
*cur
= src
;
2335 for (unsigned i
= 0; i
< num_indices
; i
++) {
2336 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2337 assert(i
== num_indices
- 1);
2338 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2339 * the component granularity. The last index will be the index of the
2340 * vector to extract.
2343 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2344 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2345 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2348 cur
= cur
->elems
[indices
[i
]];
2356 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2357 const uint32_t *w
, unsigned count
)
2359 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2360 const struct glsl_type
*type
=
2361 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2362 val
->ssa
= vtn_create_ssa_value(b
, type
);
2365 case SpvOpVectorExtractDynamic
:
2366 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2367 vtn_ssa_value(b
, w
[4])->def
);
2370 case SpvOpVectorInsertDynamic
:
2371 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2372 vtn_ssa_value(b
, w
[4])->def
,
2373 vtn_ssa_value(b
, w
[5])->def
);
2376 case SpvOpVectorShuffle
:
2377 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2378 vtn_ssa_value(b
, w
[3])->def
,
2379 vtn_ssa_value(b
, w
[4])->def
,
2383 case SpvOpCompositeConstruct
: {
2384 unsigned elems
= count
- 3;
2385 if (glsl_type_is_vector_or_scalar(type
)) {
2386 nir_ssa_def
*srcs
[4];
2387 for (unsigned i
= 0; i
< elems
; i
++)
2388 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2390 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2393 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2394 for (unsigned i
= 0; i
< elems
; i
++)
2395 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2399 case SpvOpCompositeExtract
:
2400 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2404 case SpvOpCompositeInsert
:
2405 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2406 vtn_ssa_value(b
, w
[3]),
2410 case SpvOpCopyObject
:
2411 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2415 unreachable("unknown composite operation");
2420 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
2421 const uint32_t *w
, unsigned count
)
2423 nir_intrinsic_op intrinsic_op
;
2425 case SpvOpEmitVertex
:
2426 case SpvOpEmitStreamVertex
:
2427 intrinsic_op
= nir_intrinsic_emit_vertex
;
2429 case SpvOpEndPrimitive
:
2430 case SpvOpEndStreamPrimitive
:
2431 intrinsic_op
= nir_intrinsic_end_primitive
;
2433 case SpvOpMemoryBarrier
:
2434 intrinsic_op
= nir_intrinsic_memory_barrier
;
2436 case SpvOpControlBarrier
:
2437 intrinsic_op
= nir_intrinsic_barrier
;
2440 unreachable("unknown barrier instruction");
2443 nir_intrinsic_instr
*intrin
=
2444 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
2446 if (opcode
== SpvOpEmitStreamVertex
|| opcode
== SpvOpEndStreamPrimitive
)
2447 nir_intrinsic_set_stream_id(intrin
, w
[1]);
2449 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
2453 gl_primitive_from_spv_execution_mode(SpvExecutionMode mode
)
2456 case SpvExecutionModeInputPoints
:
2457 case SpvExecutionModeOutputPoints
:
2458 return 0; /* GL_POINTS */
2459 case SpvExecutionModeInputLines
:
2460 return 1; /* GL_LINES */
2461 case SpvExecutionModeInputLinesAdjacency
:
2462 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2463 case SpvExecutionModeTriangles
:
2464 return 4; /* GL_TRIANGLES */
2465 case SpvExecutionModeInputTrianglesAdjacency
:
2466 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2467 case SpvExecutionModeQuads
:
2468 return 7; /* GL_QUADS */
2469 case SpvExecutionModeIsolines
:
2470 return 0x8E7A; /* GL_ISOLINES */
2471 case SpvExecutionModeOutputLineStrip
:
2472 return 3; /* GL_LINE_STRIP */
2473 case SpvExecutionModeOutputTriangleStrip
:
2474 return 5; /* GL_TRIANGLE_STRIP */
2476 assert(!"Invalid primitive type");
2482 vertices_in_from_spv_execution_mode(SpvExecutionMode mode
)
2485 case SpvExecutionModeInputPoints
:
2487 case SpvExecutionModeInputLines
:
2489 case SpvExecutionModeInputLinesAdjacency
:
2491 case SpvExecutionModeTriangles
:
2493 case SpvExecutionModeInputTrianglesAdjacency
:
2496 assert(!"Invalid GS input mode");
2501 static gl_shader_stage
2502 stage_for_execution_model(SpvExecutionModel model
)
2505 case SpvExecutionModelVertex
:
2506 return MESA_SHADER_VERTEX
;
2507 case SpvExecutionModelTessellationControl
:
2508 return MESA_SHADER_TESS_CTRL
;
2509 case SpvExecutionModelTessellationEvaluation
:
2510 return MESA_SHADER_TESS_EVAL
;
2511 case SpvExecutionModelGeometry
:
2512 return MESA_SHADER_GEOMETRY
;
2513 case SpvExecutionModelFragment
:
2514 return MESA_SHADER_FRAGMENT
;
2515 case SpvExecutionModelGLCompute
:
2516 return MESA_SHADER_COMPUTE
;
2518 unreachable("Unsupported execution model");
2522 #define spv_check_supported(name, cap) do { \
2523 if (!(b->ext && b->ext->name)) \
2524 vtn_warn("Unsupported SPIR-V capability: %s", \
2525 spirv_capability_to_string(cap)); \
2529 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2530 const uint32_t *w
, unsigned count
)
2534 case SpvOpSourceExtension
:
2535 case SpvOpSourceContinued
:
2536 case SpvOpExtension
:
2537 /* Unhandled, but these are for debug so that's ok. */
2540 case SpvOpCapability
: {
2541 SpvCapability cap
= w
[1];
2543 case SpvCapabilityMatrix
:
2544 case SpvCapabilityShader
:
2545 case SpvCapabilityGeometry
:
2546 case SpvCapabilityGeometryPointSize
:
2547 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
2548 case SpvCapabilitySampledImageArrayDynamicIndexing
:
2549 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
2550 case SpvCapabilityStorageImageArrayDynamicIndexing
:
2551 case SpvCapabilityImageRect
:
2552 case SpvCapabilitySampledRect
:
2553 case SpvCapabilitySampled1D
:
2554 case SpvCapabilityImage1D
:
2555 case SpvCapabilitySampledCubeArray
:
2556 case SpvCapabilitySampledBuffer
:
2557 case SpvCapabilityImageBuffer
:
2558 case SpvCapabilityImageQuery
:
2559 case SpvCapabilityDerivativeControl
:
2560 case SpvCapabilityInterpolationFunction
:
2561 case SpvCapabilityMultiViewport
:
2562 case SpvCapabilitySampleRateShading
:
2563 case SpvCapabilityClipDistance
:
2564 case SpvCapabilityCullDistance
:
2565 case SpvCapabilityInputAttachment
:
2566 case SpvCapabilityImageGatherExtended
:
2567 case SpvCapabilityStorageImageExtendedFormats
:
2570 case SpvCapabilityGeometryStreams
:
2571 case SpvCapabilityTessellation
:
2572 case SpvCapabilityTessellationPointSize
:
2573 case SpvCapabilityLinkage
:
2574 case SpvCapabilityVector16
:
2575 case SpvCapabilityFloat16Buffer
:
2576 case SpvCapabilityFloat16
:
2577 case SpvCapabilityFloat64
:
2578 case SpvCapabilityInt64
:
2579 case SpvCapabilityInt64Atomics
:
2580 case SpvCapabilityAtomicStorage
:
2581 case SpvCapabilityInt16
:
2582 case SpvCapabilityStorageImageMultisample
:
2583 case SpvCapabilityImageCubeArray
:
2584 case SpvCapabilityInt8
:
2585 case SpvCapabilitySparseResidency
:
2586 case SpvCapabilityMinLod
:
2587 case SpvCapabilityTransformFeedback
:
2588 case SpvCapabilityStorageImageReadWithoutFormat
:
2589 case SpvCapabilityStorageImageWriteWithoutFormat
:
2590 vtn_warn("Unsupported SPIR-V capability: %s",
2591 spirv_capability_to_string(cap
));
2594 case SpvCapabilityAddresses
:
2595 case SpvCapabilityKernel
:
2596 case SpvCapabilityImageBasic
:
2597 case SpvCapabilityImageReadWrite
:
2598 case SpvCapabilityImageMipmap
:
2599 case SpvCapabilityPipes
:
2600 case SpvCapabilityGroups
:
2601 case SpvCapabilityDeviceEnqueue
:
2602 case SpvCapabilityLiteralSampler
:
2603 case SpvCapabilityGenericPointer
:
2604 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
2605 spirv_capability_to_string(cap
));
2608 case SpvCapabilityImageMSArray
:
2609 spv_check_supported(image_ms_array
, cap
);
2615 case SpvOpExtInstImport
:
2616 vtn_handle_extension(b
, opcode
, w
, count
);
2619 case SpvOpMemoryModel
:
2620 assert(w
[1] == SpvAddressingModelLogical
);
2621 assert(w
[2] == SpvMemoryModelGLSL450
);
2624 case SpvOpEntryPoint
: {
2625 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
2626 /* Let this be a name label regardless */
2627 unsigned name_words
;
2628 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
2630 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
2631 stage_for_execution_model(w
[1]) != b
->entry_point_stage
)
2634 assert(b
->entry_point
== NULL
);
2635 b
->entry_point
= entry_point
;
2640 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2641 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2645 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
2648 case SpvOpMemberName
:
2652 case SpvOpExecutionMode
:
2653 case SpvOpDecorationGroup
:
2655 case SpvOpMemberDecorate
:
2656 case SpvOpGroupDecorate
:
2657 case SpvOpGroupMemberDecorate
:
2658 vtn_handle_decoration(b
, opcode
, w
, count
);
2662 return false; /* End of preamble */
2669 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
2670 const struct vtn_decoration
*mode
, void *data
)
2672 assert(b
->entry_point
== entry_point
);
2674 switch(mode
->exec_mode
) {
2675 case SpvExecutionModeOriginUpperLeft
:
2676 case SpvExecutionModeOriginLowerLeft
:
2677 b
->origin_upper_left
=
2678 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
2681 case SpvExecutionModeEarlyFragmentTests
:
2682 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2683 b
->shader
->info
->fs
.early_fragment_tests
= true;
2686 case SpvExecutionModeInvocations
:
2687 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2688 b
->shader
->info
->gs
.invocations
= MAX2(1, mode
->literals
[0]);
2691 case SpvExecutionModeDepthReplacing
:
2692 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2693 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
2695 case SpvExecutionModeDepthGreater
:
2696 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2697 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
2699 case SpvExecutionModeDepthLess
:
2700 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2701 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
2703 case SpvExecutionModeDepthUnchanged
:
2704 assert(b
->shader
->stage
== MESA_SHADER_FRAGMENT
);
2705 b
->shader
->info
->fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2708 case SpvExecutionModeLocalSize
:
2709 assert(b
->shader
->stage
== MESA_SHADER_COMPUTE
);
2710 b
->shader
->info
->cs
.local_size
[0] = mode
->literals
[0];
2711 b
->shader
->info
->cs
.local_size
[1] = mode
->literals
[1];
2712 b
->shader
->info
->cs
.local_size
[2] = mode
->literals
[2];
2714 case SpvExecutionModeLocalSizeHint
:
2715 break; /* Nothing to do with this */
2717 case SpvExecutionModeOutputVertices
:
2718 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2719 b
->shader
->info
->gs
.vertices_out
= mode
->literals
[0];
2722 case SpvExecutionModeInputPoints
:
2723 case SpvExecutionModeInputLines
:
2724 case SpvExecutionModeInputLinesAdjacency
:
2725 case SpvExecutionModeTriangles
:
2726 case SpvExecutionModeInputTrianglesAdjacency
:
2727 case SpvExecutionModeQuads
:
2728 case SpvExecutionModeIsolines
:
2729 if (b
->shader
->stage
== MESA_SHADER_GEOMETRY
) {
2730 b
->shader
->info
->gs
.vertices_in
=
2731 vertices_in_from_spv_execution_mode(mode
->exec_mode
);
2733 assert(!"Tesselation shaders not yet supported");
2737 case SpvExecutionModeOutputPoints
:
2738 case SpvExecutionModeOutputLineStrip
:
2739 case SpvExecutionModeOutputTriangleStrip
:
2740 assert(b
->shader
->stage
== MESA_SHADER_GEOMETRY
);
2741 b
->shader
->info
->gs
.output_primitive
=
2742 gl_primitive_from_spv_execution_mode(mode
->exec_mode
);
2745 case SpvExecutionModeSpacingEqual
:
2746 case SpvExecutionModeSpacingFractionalEven
:
2747 case SpvExecutionModeSpacingFractionalOdd
:
2748 case SpvExecutionModeVertexOrderCw
:
2749 case SpvExecutionModeVertexOrderCcw
:
2750 case SpvExecutionModePointMode
:
2751 assert(!"TODO: Add tessellation metadata");
2754 case SpvExecutionModePixelCenterInteger
:
2755 b
->pixel_center_integer
= true;
2758 case SpvExecutionModeXfb
:
2759 assert(!"Unhandled execution mode");
2762 case SpvExecutionModeVecTypeHint
:
2763 case SpvExecutionModeContractionOff
:
2769 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2770 const uint32_t *w
, unsigned count
)
2774 case SpvOpSourceContinued
:
2775 case SpvOpSourceExtension
:
2776 case SpvOpExtension
:
2777 case SpvOpCapability
:
2778 case SpvOpExtInstImport
:
2779 case SpvOpMemoryModel
:
2780 case SpvOpEntryPoint
:
2781 case SpvOpExecutionMode
:
2784 case SpvOpMemberName
:
2785 case SpvOpDecorationGroup
:
2787 case SpvOpMemberDecorate
:
2788 case SpvOpGroupDecorate
:
2789 case SpvOpGroupMemberDecorate
:
2790 assert(!"Invalid opcode types and variables section");
2796 case SpvOpTypeFloat
:
2797 case SpvOpTypeVector
:
2798 case SpvOpTypeMatrix
:
2799 case SpvOpTypeImage
:
2800 case SpvOpTypeSampler
:
2801 case SpvOpTypeSampledImage
:
2802 case SpvOpTypeArray
:
2803 case SpvOpTypeRuntimeArray
:
2804 case SpvOpTypeStruct
:
2805 case SpvOpTypeOpaque
:
2806 case SpvOpTypePointer
:
2807 case SpvOpTypeFunction
:
2808 case SpvOpTypeEvent
:
2809 case SpvOpTypeDeviceEvent
:
2810 case SpvOpTypeReserveId
:
2811 case SpvOpTypeQueue
:
2813 vtn_handle_type(b
, opcode
, w
, count
);
2816 case SpvOpConstantTrue
:
2817 case SpvOpConstantFalse
:
2819 case SpvOpConstantComposite
:
2820 case SpvOpConstantSampler
:
2821 case SpvOpConstantNull
:
2822 case SpvOpSpecConstantTrue
:
2823 case SpvOpSpecConstantFalse
:
2824 case SpvOpSpecConstant
:
2825 case SpvOpSpecConstantComposite
:
2826 case SpvOpSpecConstantOp
:
2827 vtn_handle_constant(b
, opcode
, w
, count
);
2831 vtn_handle_variables(b
, opcode
, w
, count
);
2835 return false; /* End of preamble */
2842 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2843 const uint32_t *w
, unsigned count
)
2849 case SpvOpLoopMerge
:
2850 case SpvOpSelectionMerge
:
2851 /* This is handled by cfg pre-pass and walk_blocks */
2855 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2856 val
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
2861 vtn_handle_extension(b
, opcode
, w
, count
);
2867 case SpvOpCopyMemory
:
2868 case SpvOpCopyMemorySized
:
2869 case SpvOpAccessChain
:
2870 case SpvOpInBoundsAccessChain
:
2871 case SpvOpArrayLength
:
2872 vtn_handle_variables(b
, opcode
, w
, count
);
2875 case SpvOpFunctionCall
:
2876 vtn_handle_function_call(b
, opcode
, w
, count
);
2879 case SpvOpSampledImage
:
2881 case SpvOpImageSampleImplicitLod
:
2882 case SpvOpImageSampleExplicitLod
:
2883 case SpvOpImageSampleDrefImplicitLod
:
2884 case SpvOpImageSampleDrefExplicitLod
:
2885 case SpvOpImageSampleProjImplicitLod
:
2886 case SpvOpImageSampleProjExplicitLod
:
2887 case SpvOpImageSampleProjDrefImplicitLod
:
2888 case SpvOpImageSampleProjDrefExplicitLod
:
2889 case SpvOpImageFetch
:
2890 case SpvOpImageGather
:
2891 case SpvOpImageDrefGather
:
2892 case SpvOpImageQuerySizeLod
:
2893 case SpvOpImageQueryLod
:
2894 case SpvOpImageQueryLevels
:
2895 case SpvOpImageQuerySamples
:
2896 vtn_handle_texture(b
, opcode
, w
, count
);
2899 case SpvOpImageRead
:
2900 case SpvOpImageWrite
:
2901 case SpvOpImageTexelPointer
:
2902 vtn_handle_image(b
, opcode
, w
, count
);
2905 case SpvOpImageQuerySize
: {
2906 struct vtn_access_chain
*image
=
2907 vtn_value(b
, w
[3], vtn_value_type_access_chain
)->access_chain
;
2908 if (glsl_type_is_image(image
->var
->var
->interface_type
)) {
2909 vtn_handle_image(b
, opcode
, w
, count
);
2911 vtn_handle_texture(b
, opcode
, w
, count
);
2916 case SpvOpAtomicLoad
:
2917 case SpvOpAtomicExchange
:
2918 case SpvOpAtomicCompareExchange
:
2919 case SpvOpAtomicCompareExchangeWeak
:
2920 case SpvOpAtomicIIncrement
:
2921 case SpvOpAtomicIDecrement
:
2922 case SpvOpAtomicIAdd
:
2923 case SpvOpAtomicISub
:
2924 case SpvOpAtomicSMin
:
2925 case SpvOpAtomicUMin
:
2926 case SpvOpAtomicSMax
:
2927 case SpvOpAtomicUMax
:
2928 case SpvOpAtomicAnd
:
2930 case SpvOpAtomicXor
: {
2931 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
2932 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
2933 vtn_handle_image(b
, opcode
, w
, count
);
2935 assert(pointer
->value_type
== vtn_value_type_access_chain
);
2936 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
2941 case SpvOpAtomicStore
: {
2942 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
2943 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
2944 vtn_handle_image(b
, opcode
, w
, count
);
2946 assert(pointer
->value_type
== vtn_value_type_access_chain
);
2947 vtn_handle_ssbo_or_shared_atomic(b
, opcode
, w
, count
);
2957 case SpvOpConvertFToU
:
2958 case SpvOpConvertFToS
:
2959 case SpvOpConvertSToF
:
2960 case SpvOpConvertUToF
:
2964 case SpvOpQuantizeToF16
:
2965 case SpvOpConvertPtrToU
:
2966 case SpvOpConvertUToPtr
:
2967 case SpvOpPtrCastToGeneric
:
2968 case SpvOpGenericCastToPtr
:
2974 case SpvOpSignBitSet
:
2975 case SpvOpLessOrGreater
:
2977 case SpvOpUnordered
:
2992 case SpvOpVectorTimesScalar
:
2994 case SpvOpIAddCarry
:
2995 case SpvOpISubBorrow
:
2996 case SpvOpUMulExtended
:
2997 case SpvOpSMulExtended
:
2998 case SpvOpShiftRightLogical
:
2999 case SpvOpShiftRightArithmetic
:
3000 case SpvOpShiftLeftLogical
:
3001 case SpvOpLogicalEqual
:
3002 case SpvOpLogicalNotEqual
:
3003 case SpvOpLogicalOr
:
3004 case SpvOpLogicalAnd
:
3005 case SpvOpLogicalNot
:
3006 case SpvOpBitwiseOr
:
3007 case SpvOpBitwiseXor
:
3008 case SpvOpBitwiseAnd
:
3011 case SpvOpFOrdEqual
:
3012 case SpvOpFUnordEqual
:
3013 case SpvOpINotEqual
:
3014 case SpvOpFOrdNotEqual
:
3015 case SpvOpFUnordNotEqual
:
3016 case SpvOpULessThan
:
3017 case SpvOpSLessThan
:
3018 case SpvOpFOrdLessThan
:
3019 case SpvOpFUnordLessThan
:
3020 case SpvOpUGreaterThan
:
3021 case SpvOpSGreaterThan
:
3022 case SpvOpFOrdGreaterThan
:
3023 case SpvOpFUnordGreaterThan
:
3024 case SpvOpULessThanEqual
:
3025 case SpvOpSLessThanEqual
:
3026 case SpvOpFOrdLessThanEqual
:
3027 case SpvOpFUnordLessThanEqual
:
3028 case SpvOpUGreaterThanEqual
:
3029 case SpvOpSGreaterThanEqual
:
3030 case SpvOpFOrdGreaterThanEqual
:
3031 case SpvOpFUnordGreaterThanEqual
:
3037 case SpvOpFwidthFine
:
3038 case SpvOpDPdxCoarse
:
3039 case SpvOpDPdyCoarse
:
3040 case SpvOpFwidthCoarse
:
3041 case SpvOpBitFieldInsert
:
3042 case SpvOpBitFieldSExtract
:
3043 case SpvOpBitFieldUExtract
:
3044 case SpvOpBitReverse
:
3046 case SpvOpTranspose
:
3047 case SpvOpOuterProduct
:
3048 case SpvOpMatrixTimesScalar
:
3049 case SpvOpVectorTimesMatrix
:
3050 case SpvOpMatrixTimesVector
:
3051 case SpvOpMatrixTimesMatrix
:
3052 vtn_handle_alu(b
, opcode
, w
, count
);
3055 case SpvOpVectorExtractDynamic
:
3056 case SpvOpVectorInsertDynamic
:
3057 case SpvOpVectorShuffle
:
3058 case SpvOpCompositeConstruct
:
3059 case SpvOpCompositeExtract
:
3060 case SpvOpCompositeInsert
:
3061 case SpvOpCopyObject
:
3062 vtn_handle_composite(b
, opcode
, w
, count
);
3065 case SpvOpEmitVertex
:
3066 case SpvOpEndPrimitive
:
3067 case SpvOpEmitStreamVertex
:
3068 case SpvOpEndStreamPrimitive
:
3069 case SpvOpControlBarrier
:
3070 case SpvOpMemoryBarrier
:
3071 vtn_handle_barrier(b
, opcode
, w
, count
);
3075 unreachable("Unhandled opcode");
3082 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3083 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
3084 gl_shader_stage stage
, const char *entry_point_name
,
3085 const struct nir_spirv_supported_extensions
*ext
,
3086 const nir_shader_compiler_options
*options
)
3088 const uint32_t *word_end
= words
+ word_count
;
3090 /* Handle the SPIR-V header (first 4 dwords) */
3091 assert(word_count
> 5);
3093 assert(words
[0] == SpvMagicNumber
);
3094 assert(words
[1] >= 0x10000);
3095 /* words[2] == generator magic */
3096 unsigned value_id_bound
= words
[3];
3097 assert(words
[4] == 0);
3101 /* Initialize the stn_builder object */
3102 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3103 b
->value_id_bound
= value_id_bound
;
3104 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3105 exec_list_make_empty(&b
->functions
);
3106 b
->entry_point_stage
= stage
;
3107 b
->entry_point_name
= entry_point_name
;
3110 /* Handle all the preamble instructions */
3111 words
= vtn_foreach_instruction(b
, words
, word_end
,
3112 vtn_handle_preamble_instruction
);
3114 if (b
->entry_point
== NULL
) {
3115 assert(!"Entry point not found");
3120 b
->shader
= nir_shader_create(NULL
, stage
, options
, NULL
);
3122 /* Set shader info defaults */
3123 b
->shader
->info
->gs
.invocations
= 1;
3125 /* Parse execution modes */
3126 vtn_foreach_execution_mode(b
, b
->entry_point
,
3127 vtn_handle_execution_mode
, NULL
);
3129 b
->specializations
= spec
;
3130 b
->num_specializations
= num_spec
;
3132 /* Handle all variable, type, and constant instructions */
3133 words
= vtn_foreach_instruction(b
, words
, word_end
,
3134 vtn_handle_variable_or_type_instruction
);
3136 vtn_build_cfg(b
, words
, word_end
);
3138 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3139 b
->impl
= func
->impl
;
3140 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3141 _mesa_key_pointer_equal
);
3143 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
3146 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
3147 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
3148 assert(entry_point
);