2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
41 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
42 size_t spirv_offset
, const char *message
)
44 if (b
->options
->debug
.func
) {
45 b
->options
->debug
.func(b
->options
->debug
.private_data
,
46 level
, spirv_offset
, message
);
50 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
51 fprintf(stderr
, "%s\n", message
);
56 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
57 size_t spirv_offset
, const char *fmt
, ...)
63 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
66 vtn_log(b
, level
, spirv_offset
, msg
);
72 vtn_log_err(struct vtn_builder
*b
,
73 enum nir_spirv_debug_level level
, const char *prefix
,
74 const char *file
, unsigned line
,
75 const char *fmt
, va_list args
)
79 msg
= ralloc_strdup(NULL
, prefix
);
82 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
85 ralloc_asprintf_append(&msg
, " ");
87 ralloc_vasprintf_append(&msg
, fmt
, args
);
89 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
93 ralloc_asprintf_append(&msg
,
94 "\n in SPIR-V source file %s, line %d, col %d",
95 b
->file
, b
->line
, b
->col
);
98 vtn_log(b
, level
, b
->spirv_offset
, msg
);
104 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
109 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
110 path
, prefix
, idx
++);
111 if (len
< 0 || len
>= sizeof(filename
))
114 FILE *f
= fopen(filename
, "w");
118 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
121 vtn_info("SPIR-V shader dumped to %s", filename
);
125 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
126 const char *fmt
, ...)
131 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
132 file
, line
, fmt
, args
);
137 _vtn_err(struct vtn_builder
*b
, const char *file
, unsigned line
,
138 const char *fmt
, ...)
143 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V ERROR:\n",
144 file
, line
, fmt
, args
);
149 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
150 const char *fmt
, ...)
155 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
156 file
, line
, fmt
, args
);
159 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
161 vtn_dump_shader(b
, dump_path
, "fail");
163 longjmp(b
->fail_jump
, 1);
166 static struct vtn_ssa_value
*
167 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
169 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
172 if (glsl_type_is_vector_or_scalar(type
)) {
173 unsigned num_components
= glsl_get_vector_elements(val
->type
);
174 unsigned bit_size
= glsl_get_bit_size(val
->type
);
175 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
177 unsigned elems
= glsl_get_length(val
->type
);
178 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
179 if (glsl_type_is_matrix(type
)) {
180 const struct glsl_type
*elem_type
=
181 glsl_vector_type(glsl_get_base_type(type
),
182 glsl_get_vector_elements(type
));
184 for (unsigned i
= 0; i
< elems
; i
++)
185 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
186 } else if (glsl_type_is_array(type
)) {
187 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
188 for (unsigned i
= 0; i
< elems
; i
++)
189 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
191 for (unsigned i
= 0; i
< elems
; i
++) {
192 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
193 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
201 static struct vtn_ssa_value
*
202 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
203 const struct glsl_type
*type
)
205 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
210 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
213 switch (glsl_get_base_type(type
)) {
216 case GLSL_TYPE_INT16
:
217 case GLSL_TYPE_UINT16
:
218 case GLSL_TYPE_UINT8
:
220 case GLSL_TYPE_INT64
:
221 case GLSL_TYPE_UINT64
:
223 case GLSL_TYPE_FLOAT
:
224 case GLSL_TYPE_FLOAT16
:
225 case GLSL_TYPE_DOUBLE
: {
226 int bit_size
= glsl_get_bit_size(type
);
227 if (glsl_type_is_vector_or_scalar(type
)) {
228 unsigned num_components
= glsl_get_vector_elements(val
->type
);
229 nir_load_const_instr
*load
=
230 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
232 memcpy(load
->value
, constant
->values
,
233 sizeof(nir_const_value
) * load
->def
.num_components
);
235 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
236 val
->def
= &load
->def
;
238 assert(glsl_type_is_matrix(type
));
239 unsigned columns
= glsl_get_matrix_columns(val
->type
);
240 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
241 const struct glsl_type
*column_type
= glsl_get_column_type(val
->type
);
242 for (unsigned i
= 0; i
< columns
; i
++)
243 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
249 case GLSL_TYPE_ARRAY
: {
250 unsigned elems
= glsl_get_length(val
->type
);
251 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
252 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
253 for (unsigned i
= 0; i
< elems
; i
++)
254 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
259 case GLSL_TYPE_STRUCT
:
260 case GLSL_TYPE_INTERFACE
: {
261 unsigned elems
= glsl_get_length(val
->type
);
262 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
263 for (unsigned i
= 0; i
< elems
; i
++) {
264 const struct glsl_type
*elem_type
=
265 glsl_get_struct_field(val
->type
, i
);
266 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
273 vtn_fail("bad constant type");
279 struct vtn_ssa_value
*
280 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
282 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
283 switch (val
->value_type
) {
284 case vtn_value_type_undef
:
285 return vtn_undef_ssa_value(b
, val
->type
->type
);
287 case vtn_value_type_constant
:
288 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
290 case vtn_value_type_ssa
:
293 case vtn_value_type_pointer
:
294 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
295 struct vtn_ssa_value
*ssa
=
296 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
297 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
301 vtn_fail("Invalid type for an SSA value");
306 vtn_push_ssa_value(struct vtn_builder
*b
, uint32_t value_id
,
307 struct vtn_ssa_value
*ssa
)
309 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
311 struct vtn_value
*val
;
312 if (type
->base_type
== vtn_base_type_pointer
) {
313 val
= vtn_push_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
315 val
= vtn_push_value(b
, value_id
, vtn_value_type_ssa
);
323 vtn_get_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
)
325 struct vtn_ssa_value
*ssa
= vtn_ssa_value(b
, value_id
);
326 vtn_fail_if(!glsl_type_is_vector_or_scalar(ssa
->type
),
327 "Expected a vector or scalar type");
332 vtn_push_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
, nir_ssa_def
*def
)
334 /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
335 * type will be valid by the time we get here.
337 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
338 vtn_fail_if(def
->num_components
!= glsl_get_vector_elements(type
->type
) ||
339 def
->bit_size
!= glsl_get_bit_size(type
->type
),
340 "Mismatch between NIR and SPIR-V type.");
341 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
343 return vtn_push_ssa_value(b
, value_id
, ssa
);
347 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
348 unsigned word_count
, unsigned *words_used
)
350 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
352 /* Ammount of space taken by the string (including the null) */
353 unsigned len
= strlen(dup
) + 1;
354 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
360 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
361 const uint32_t *end
, vtn_instruction_handler handler
)
367 const uint32_t *w
= start
;
369 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
370 unsigned count
= w
[0] >> SpvWordCountShift
;
371 vtn_assert(count
>= 1 && w
+ count
<= end
);
373 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
377 break; /* Do nothing */
380 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
392 if (!handler(b
, opcode
, w
, count
))
410 vtn_handle_non_semantic_instruction(struct vtn_builder
*b
, SpvOp ext_opcode
,
411 const uint32_t *w
, unsigned count
)
418 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
419 const uint32_t *w
, unsigned count
)
421 const char *ext
= (const char *)&w
[2];
423 case SpvOpExtInstImport
: {
424 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
425 if (strcmp(ext
, "GLSL.std.450") == 0) {
426 val
->ext_handler
= vtn_handle_glsl450_instruction
;
427 } else if ((strcmp(ext
, "SPV_AMD_gcn_shader") == 0)
428 && (b
->options
&& b
->options
->caps
.amd_gcn_shader
)) {
429 val
->ext_handler
= vtn_handle_amd_gcn_shader_instruction
;
430 } else if ((strcmp(ext
, "SPV_AMD_shader_ballot") == 0)
431 && (b
->options
&& b
->options
->caps
.amd_shader_ballot
)) {
432 val
->ext_handler
= vtn_handle_amd_shader_ballot_instruction
;
433 } else if ((strcmp(ext
, "SPV_AMD_shader_trinary_minmax") == 0)
434 && (b
->options
&& b
->options
->caps
.amd_trinary_minmax
)) {
435 val
->ext_handler
= vtn_handle_amd_shader_trinary_minmax_instruction
;
436 } else if ((strcmp(ext
, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
437 && (b
->options
&& b
->options
->caps
.amd_shader_explicit_vertex_parameter
)) {
438 val
->ext_handler
= vtn_handle_amd_shader_explicit_vertex_parameter_instruction
;
439 } else if (strcmp(ext
, "OpenCL.std") == 0) {
440 val
->ext_handler
= vtn_handle_opencl_instruction
;
441 } else if (strstr(ext
, "NonSemantic.") == ext
) {
442 val
->ext_handler
= vtn_handle_non_semantic_instruction
;
444 vtn_fail("Unsupported extension: %s", ext
);
450 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
451 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
457 vtn_fail_with_opcode("Unhandled opcode", opcode
);
462 _foreach_decoration_helper(struct vtn_builder
*b
,
463 struct vtn_value
*base_value
,
465 struct vtn_value
*value
,
466 vtn_decoration_foreach_cb cb
, void *data
)
468 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
470 if (dec
->scope
== VTN_DEC_DECORATION
) {
471 member
= parent_member
;
472 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
473 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
474 value
->type
->base_type
!= vtn_base_type_struct
,
475 "OpMemberDecorate and OpGroupMemberDecorate are only "
476 "allowed on OpTypeStruct");
477 /* This means we haven't recursed yet */
478 assert(value
== base_value
);
480 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
482 vtn_fail_if(member
>= base_value
->type
->length
,
483 "OpMemberDecorate specifies member %d but the "
484 "OpTypeStruct has only %u members",
485 member
, base_value
->type
->length
);
487 /* Not a decoration */
488 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
493 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
494 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
497 cb(b
, base_value
, member
, dec
, data
);
502 /** Iterates (recursively if needed) over all of the decorations on a value
504 * This function iterates over all of the decorations applied to a given
505 * value. If it encounters a decoration group, it recurses into the group
506 * and iterates over all of those decorations as well.
509 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
510 vtn_decoration_foreach_cb cb
, void *data
)
512 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
516 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
517 vtn_execution_mode_foreach_cb cb
, void *data
)
519 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
520 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
523 assert(dec
->group
== NULL
);
524 cb(b
, value
, dec
, data
);
529 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
530 const uint32_t *w
, unsigned count
)
532 const uint32_t *w_end
= w
+ count
;
533 const uint32_t target
= w
[1];
537 case SpvOpDecorationGroup
:
538 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
542 case SpvOpDecorateId
:
543 case SpvOpMemberDecorate
:
544 case SpvOpDecorateString
:
545 case SpvOpMemberDecorateString
:
546 case SpvOpExecutionMode
:
547 case SpvOpExecutionModeId
: {
548 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
550 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
553 case SpvOpDecorateId
:
554 case SpvOpDecorateString
:
555 dec
->scope
= VTN_DEC_DECORATION
;
557 case SpvOpMemberDecorate
:
558 case SpvOpMemberDecorateString
:
559 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
560 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
561 "Member argument of OpMemberDecorate too large");
563 case SpvOpExecutionMode
:
564 case SpvOpExecutionModeId
:
565 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
568 unreachable("Invalid decoration opcode");
570 dec
->decoration
= *(w
++);
573 /* Link into the list */
574 dec
->next
= val
->decoration
;
575 val
->decoration
= dec
;
579 case SpvOpGroupMemberDecorate
:
580 case SpvOpGroupDecorate
: {
581 struct vtn_value
*group
=
582 vtn_value(b
, target
, vtn_value_type_decoration_group
);
584 for (; w
< w_end
; w
++) {
585 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
586 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
589 if (opcode
== SpvOpGroupDecorate
) {
590 dec
->scope
= VTN_DEC_DECORATION
;
592 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
593 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
594 "Member argument of OpGroupMemberDecorate too large");
597 /* Link into the list */
598 dec
->next
= val
->decoration
;
599 val
->decoration
= dec
;
605 unreachable("Unhandled opcode");
609 struct member_decoration_ctx
{
611 struct glsl_struct_field
*fields
;
612 struct vtn_type
*type
;
616 * Returns true if the given type contains a struct decorated Block or
620 vtn_type_contains_block(struct vtn_builder
*b
, struct vtn_type
*type
)
622 switch (type
->base_type
) {
623 case vtn_base_type_array
:
624 return vtn_type_contains_block(b
, type
->array_element
);
625 case vtn_base_type_struct
:
626 if (type
->block
|| type
->buffer_block
)
628 for (unsigned i
= 0; i
< type
->length
; i
++) {
629 if (vtn_type_contains_block(b
, type
->members
[i
]))
638 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
639 * OpStore, or OpCopyMemory between them without breaking anything.
640 * Technically, the SPIR-V rules require the exact same type ID but this lets
641 * us internally be a bit looser.
644 vtn_types_compatible(struct vtn_builder
*b
,
645 struct vtn_type
*t1
, struct vtn_type
*t2
)
647 if (t1
->id
== t2
->id
)
650 if (t1
->base_type
!= t2
->base_type
)
653 switch (t1
->base_type
) {
654 case vtn_base_type_void
:
655 case vtn_base_type_scalar
:
656 case vtn_base_type_vector
:
657 case vtn_base_type_matrix
:
658 case vtn_base_type_image
:
659 case vtn_base_type_sampler
:
660 case vtn_base_type_sampled_image
:
661 return t1
->type
== t2
->type
;
663 case vtn_base_type_array
:
664 return t1
->length
== t2
->length
&&
665 vtn_types_compatible(b
, t1
->array_element
, t2
->array_element
);
667 case vtn_base_type_pointer
:
668 return vtn_types_compatible(b
, t1
->deref
, t2
->deref
);
670 case vtn_base_type_struct
:
671 if (t1
->length
!= t2
->length
)
674 for (unsigned i
= 0; i
< t1
->length
; i
++) {
675 if (!vtn_types_compatible(b
, t1
->members
[i
], t2
->members
[i
]))
680 case vtn_base_type_function
:
681 /* This case shouldn't get hit since you can't copy around function
682 * types. Just require them to be identical.
687 vtn_fail("Invalid base type");
691 vtn_type_without_array(struct vtn_type
*type
)
693 while (type
->base_type
== vtn_base_type_array
)
694 type
= type
->array_element
;
698 /* does a shallow copy of a vtn_type */
700 static struct vtn_type
*
701 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
703 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
706 switch (src
->base_type
) {
707 case vtn_base_type_void
:
708 case vtn_base_type_scalar
:
709 case vtn_base_type_vector
:
710 case vtn_base_type_matrix
:
711 case vtn_base_type_array
:
712 case vtn_base_type_pointer
:
713 case vtn_base_type_image
:
714 case vtn_base_type_sampler
:
715 case vtn_base_type_sampled_image
:
716 /* Nothing more to do */
719 case vtn_base_type_struct
:
720 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
721 memcpy(dest
->members
, src
->members
,
722 src
->length
* sizeof(src
->members
[0]));
724 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
725 memcpy(dest
->offsets
, src
->offsets
,
726 src
->length
* sizeof(src
->offsets
[0]));
729 case vtn_base_type_function
:
730 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
731 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
738 static struct vtn_type
*
739 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
741 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
742 type
= type
->members
[member
];
744 /* We may have an array of matrices.... Oh, joy! */
745 while (glsl_type_is_array(type
->type
)) {
746 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
747 type
= type
->array_element
;
750 vtn_assert(glsl_type_is_matrix(type
->type
));
756 vtn_handle_access_qualifier(struct vtn_builder
*b
, struct vtn_type
*type
,
757 int member
, enum gl_access_qualifier access
)
759 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
760 type
= type
->members
[member
];
762 type
->access
|= access
;
766 array_stride_decoration_cb(struct vtn_builder
*b
,
767 struct vtn_value
*val
, int member
,
768 const struct vtn_decoration
*dec
, void *void_ctx
)
770 struct vtn_type
*type
= val
->type
;
772 if (dec
->decoration
== SpvDecorationArrayStride
) {
773 if (vtn_type_contains_block(b
, type
)) {
774 vtn_warn("The ArrayStride decoration cannot be applied to an array "
775 "type which contains a structure type decorated Block "
777 /* Ignore the decoration */
779 vtn_fail_if(dec
->operands
[0] == 0, "ArrayStride must be non-zero");
780 type
->stride
= dec
->operands
[0];
786 struct_member_decoration_cb(struct vtn_builder
*b
,
787 UNUSED
struct vtn_value
*val
, int member
,
788 const struct vtn_decoration
*dec
, void *void_ctx
)
790 struct member_decoration_ctx
*ctx
= void_ctx
;
795 assert(member
< ctx
->num_fields
);
797 switch (dec
->decoration
) {
798 case SpvDecorationRelaxedPrecision
:
799 case SpvDecorationUniform
:
800 case SpvDecorationUniformId
:
801 break; /* FIXME: Do nothing with this for now. */
802 case SpvDecorationNonWritable
:
803 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_WRITEABLE
);
805 case SpvDecorationNonReadable
:
806 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_READABLE
);
808 case SpvDecorationVolatile
:
809 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_VOLATILE
);
811 case SpvDecorationCoherent
:
812 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_COHERENT
);
814 case SpvDecorationNoPerspective
:
815 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
817 case SpvDecorationFlat
:
818 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
820 case SpvDecorationExplicitInterpAMD
:
821 ctx
->fields
[member
].interpolation
= INTERP_MODE_EXPLICIT
;
823 case SpvDecorationCentroid
:
824 ctx
->fields
[member
].centroid
= true;
826 case SpvDecorationSample
:
827 ctx
->fields
[member
].sample
= true;
829 case SpvDecorationStream
:
830 /* This is handled later by var_decoration_cb in vtn_variables.c */
832 case SpvDecorationLocation
:
833 ctx
->fields
[member
].location
= dec
->operands
[0];
835 case SpvDecorationComponent
:
836 break; /* FIXME: What should we do with these? */
837 case SpvDecorationBuiltIn
:
838 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
839 ctx
->type
->members
[member
]->is_builtin
= true;
840 ctx
->type
->members
[member
]->builtin
= dec
->operands
[0];
841 ctx
->type
->builtin_block
= true;
843 case SpvDecorationOffset
:
844 ctx
->type
->offsets
[member
] = dec
->operands
[0];
845 ctx
->fields
[member
].offset
= dec
->operands
[0];
847 case SpvDecorationMatrixStride
:
848 /* Handled as a second pass */
850 case SpvDecorationColMajor
:
851 break; /* Nothing to do here. Column-major is the default. */
852 case SpvDecorationRowMajor
:
853 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
856 case SpvDecorationPatch
:
859 case SpvDecorationSpecId
:
860 case SpvDecorationBlock
:
861 case SpvDecorationBufferBlock
:
862 case SpvDecorationArrayStride
:
863 case SpvDecorationGLSLShared
:
864 case SpvDecorationGLSLPacked
:
865 case SpvDecorationInvariant
:
866 case SpvDecorationRestrict
:
867 case SpvDecorationAliased
:
868 case SpvDecorationConstant
:
869 case SpvDecorationIndex
:
870 case SpvDecorationBinding
:
871 case SpvDecorationDescriptorSet
:
872 case SpvDecorationLinkageAttributes
:
873 case SpvDecorationNoContraction
:
874 case SpvDecorationInputAttachmentIndex
:
875 vtn_warn("Decoration not allowed on struct members: %s",
876 spirv_decoration_to_string(dec
->decoration
));
879 case SpvDecorationXfbBuffer
:
880 case SpvDecorationXfbStride
:
881 /* This is handled later by var_decoration_cb in vtn_variables.c */
884 case SpvDecorationCPacked
:
885 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
886 vtn_warn("Decoration only allowed for CL-style kernels: %s",
887 spirv_decoration_to_string(dec
->decoration
));
889 ctx
->type
->packed
= true;
892 case SpvDecorationSaturatedConversion
:
893 case SpvDecorationFuncParamAttr
:
894 case SpvDecorationFPRoundingMode
:
895 case SpvDecorationFPFastMathMode
:
896 case SpvDecorationAlignment
:
897 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
898 vtn_warn("Decoration only allowed for CL-style kernels: %s",
899 spirv_decoration_to_string(dec
->decoration
));
903 case SpvDecorationUserSemantic
:
904 case SpvDecorationUserTypeGOOGLE
:
905 /* User semantic decorations can safely be ignored by the driver. */
909 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
913 /** Chases the array type all the way down to the tail and rewrites the
914 * glsl_types to be based off the tail's glsl_type.
917 vtn_array_type_rewrite_glsl_type(struct vtn_type
*type
)
919 if (type
->base_type
!= vtn_base_type_array
)
922 vtn_array_type_rewrite_glsl_type(type
->array_element
);
924 type
->type
= glsl_array_type(type
->array_element
->type
,
925 type
->length
, type
->stride
);
928 /* Matrix strides are handled as a separate pass because we need to know
929 * whether the matrix is row-major or not first.
932 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
933 UNUSED
struct vtn_value
*val
, int member
,
934 const struct vtn_decoration
*dec
,
937 if (dec
->decoration
!= SpvDecorationMatrixStride
)
940 vtn_fail_if(member
< 0,
941 "The MatrixStride decoration is only allowed on members "
943 vtn_fail_if(dec
->operands
[0] == 0, "MatrixStride must be non-zero");
945 struct member_decoration_ctx
*ctx
= void_ctx
;
947 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
948 if (mat_type
->row_major
) {
949 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
950 mat_type
->stride
= mat_type
->array_element
->stride
;
951 mat_type
->array_element
->stride
= dec
->operands
[0];
953 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
954 dec
->operands
[0], true);
955 mat_type
->array_element
->type
= glsl_get_column_type(mat_type
->type
);
957 vtn_assert(mat_type
->array_element
->stride
> 0);
958 mat_type
->stride
= dec
->operands
[0];
960 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
961 dec
->operands
[0], false);
964 /* Now that we've replaced the glsl_type with a properly strided matrix
965 * type, rewrite the member type so that it's an array of the proper kind
968 vtn_array_type_rewrite_glsl_type(ctx
->type
->members
[member
]);
969 ctx
->fields
[member
].type
= ctx
->type
->members
[member
]->type
;
973 struct_block_decoration_cb(struct vtn_builder
*b
,
974 struct vtn_value
*val
, int member
,
975 const struct vtn_decoration
*dec
, void *ctx
)
980 struct vtn_type
*type
= val
->type
;
981 if (dec
->decoration
== SpvDecorationBlock
)
983 else if (dec
->decoration
== SpvDecorationBufferBlock
)
984 type
->buffer_block
= true;
988 type_decoration_cb(struct vtn_builder
*b
,
989 struct vtn_value
*val
, int member
,
990 const struct vtn_decoration
*dec
, UNUSED
void *ctx
)
992 struct vtn_type
*type
= val
->type
;
995 /* This should have been handled by OpTypeStruct */
996 assert(val
->type
->base_type
== vtn_base_type_struct
);
997 assert(member
>= 0 && member
< val
->type
->length
);
1001 switch (dec
->decoration
) {
1002 case SpvDecorationArrayStride
:
1003 vtn_assert(type
->base_type
== vtn_base_type_array
||
1004 type
->base_type
== vtn_base_type_pointer
);
1006 case SpvDecorationBlock
:
1007 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1008 vtn_assert(type
->block
);
1010 case SpvDecorationBufferBlock
:
1011 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1012 vtn_assert(type
->buffer_block
);
1014 case SpvDecorationGLSLShared
:
1015 case SpvDecorationGLSLPacked
:
1016 /* Ignore these, since we get explicit offsets anyways */
1019 case SpvDecorationRowMajor
:
1020 case SpvDecorationColMajor
:
1021 case SpvDecorationMatrixStride
:
1022 case SpvDecorationBuiltIn
:
1023 case SpvDecorationNoPerspective
:
1024 case SpvDecorationFlat
:
1025 case SpvDecorationPatch
:
1026 case SpvDecorationCentroid
:
1027 case SpvDecorationSample
:
1028 case SpvDecorationExplicitInterpAMD
:
1029 case SpvDecorationVolatile
:
1030 case SpvDecorationCoherent
:
1031 case SpvDecorationNonWritable
:
1032 case SpvDecorationNonReadable
:
1033 case SpvDecorationUniform
:
1034 case SpvDecorationUniformId
:
1035 case SpvDecorationLocation
:
1036 case SpvDecorationComponent
:
1037 case SpvDecorationOffset
:
1038 case SpvDecorationXfbBuffer
:
1039 case SpvDecorationXfbStride
:
1040 case SpvDecorationUserSemantic
:
1041 vtn_warn("Decoration only allowed for struct members: %s",
1042 spirv_decoration_to_string(dec
->decoration
));
1045 case SpvDecorationStream
:
1046 /* We don't need to do anything here, as stream is filled up when
1047 * aplying the decoration to a variable, just check that if it is not a
1048 * struct member, it should be a struct.
1050 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1053 case SpvDecorationRelaxedPrecision
:
1054 case SpvDecorationSpecId
:
1055 case SpvDecorationInvariant
:
1056 case SpvDecorationRestrict
:
1057 case SpvDecorationAliased
:
1058 case SpvDecorationConstant
:
1059 case SpvDecorationIndex
:
1060 case SpvDecorationBinding
:
1061 case SpvDecorationDescriptorSet
:
1062 case SpvDecorationLinkageAttributes
:
1063 case SpvDecorationNoContraction
:
1064 case SpvDecorationInputAttachmentIndex
:
1065 vtn_warn("Decoration not allowed on types: %s",
1066 spirv_decoration_to_string(dec
->decoration
));
1069 case SpvDecorationCPacked
:
1070 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
1071 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1072 spirv_decoration_to_string(dec
->decoration
));
1074 type
->packed
= true;
1077 case SpvDecorationSaturatedConversion
:
1078 case SpvDecorationFuncParamAttr
:
1079 case SpvDecorationFPRoundingMode
:
1080 case SpvDecorationFPFastMathMode
:
1081 case SpvDecorationAlignment
:
1082 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1083 spirv_decoration_to_string(dec
->decoration
));
1086 case SpvDecorationUserTypeGOOGLE
:
1087 /* User semantic decorations can safely be ignored by the driver. */
1091 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1096 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
1099 case SpvImageFormatUnknown
: return PIPE_FORMAT_NONE
;
1100 case SpvImageFormatRgba32f
: return PIPE_FORMAT_R32G32B32A32_FLOAT
;
1101 case SpvImageFormatRgba16f
: return PIPE_FORMAT_R16G16B16A16_FLOAT
;
1102 case SpvImageFormatR32f
: return PIPE_FORMAT_R32_FLOAT
;
1103 case SpvImageFormatRgba8
: return PIPE_FORMAT_R8G8B8A8_UNORM
;
1104 case SpvImageFormatRgba8Snorm
: return PIPE_FORMAT_R8G8B8A8_SNORM
;
1105 case SpvImageFormatRg32f
: return PIPE_FORMAT_R32G32_FLOAT
;
1106 case SpvImageFormatRg16f
: return PIPE_FORMAT_R16G16_FLOAT
;
1107 case SpvImageFormatR11fG11fB10f
: return PIPE_FORMAT_R11G11B10_FLOAT
;
1108 case SpvImageFormatR16f
: return PIPE_FORMAT_R16_FLOAT
;
1109 case SpvImageFormatRgba16
: return PIPE_FORMAT_R16G16B16A16_UNORM
;
1110 case SpvImageFormatRgb10A2
: return PIPE_FORMAT_R10G10B10A2_UNORM
;
1111 case SpvImageFormatRg16
: return PIPE_FORMAT_R16G16_UNORM
;
1112 case SpvImageFormatRg8
: return PIPE_FORMAT_R8G8_UNORM
;
1113 case SpvImageFormatR16
: return PIPE_FORMAT_R16_UNORM
;
1114 case SpvImageFormatR8
: return PIPE_FORMAT_R8_UNORM
;
1115 case SpvImageFormatRgba16Snorm
: return PIPE_FORMAT_R16G16B16A16_SNORM
;
1116 case SpvImageFormatRg16Snorm
: return PIPE_FORMAT_R16G16_SNORM
;
1117 case SpvImageFormatRg8Snorm
: return PIPE_FORMAT_R8G8_SNORM
;
1118 case SpvImageFormatR16Snorm
: return PIPE_FORMAT_R16_SNORM
;
1119 case SpvImageFormatR8Snorm
: return PIPE_FORMAT_R8_SNORM
;
1120 case SpvImageFormatRgba32i
: return PIPE_FORMAT_R32G32B32A32_SINT
;
1121 case SpvImageFormatRgba16i
: return PIPE_FORMAT_R16G16B16A16_SINT
;
1122 case SpvImageFormatRgba8i
: return PIPE_FORMAT_R8G8B8A8_SINT
;
1123 case SpvImageFormatR32i
: return PIPE_FORMAT_R32_SINT
;
1124 case SpvImageFormatRg32i
: return PIPE_FORMAT_R32G32_SINT
;
1125 case SpvImageFormatRg16i
: return PIPE_FORMAT_R16G16_SINT
;
1126 case SpvImageFormatRg8i
: return PIPE_FORMAT_R8G8_SINT
;
1127 case SpvImageFormatR16i
: return PIPE_FORMAT_R16_SINT
;
1128 case SpvImageFormatR8i
: return PIPE_FORMAT_R8_SINT
;
1129 case SpvImageFormatRgba32ui
: return PIPE_FORMAT_R32G32B32A32_UINT
;
1130 case SpvImageFormatRgba16ui
: return PIPE_FORMAT_R16G16B16A16_UINT
;
1131 case SpvImageFormatRgba8ui
: return PIPE_FORMAT_R8G8B8A8_UINT
;
1132 case SpvImageFormatR32ui
: return PIPE_FORMAT_R32_UINT
;
1133 case SpvImageFormatRgb10a2ui
: return PIPE_FORMAT_R10G10B10A2_UINT
;
1134 case SpvImageFormatRg32ui
: return PIPE_FORMAT_R32G32_UINT
;
1135 case SpvImageFormatRg16ui
: return PIPE_FORMAT_R16G16_UINT
;
1136 case SpvImageFormatRg8ui
: return PIPE_FORMAT_R8G8_UINT
;
1137 case SpvImageFormatR16ui
: return PIPE_FORMAT_R16_UINT
;
1138 case SpvImageFormatR8ui
: return PIPE_FORMAT_R8_UINT
;
1140 vtn_fail("Invalid image format: %s (%u)",
1141 spirv_imageformat_to_string(format
), format
);
1146 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
1147 const uint32_t *w
, unsigned count
)
1149 struct vtn_value
*val
= NULL
;
1151 /* In order to properly handle forward declarations, we have to defer
1152 * allocation for pointer types.
1154 if (opcode
!= SpvOpTypePointer
&& opcode
!= SpvOpTypeForwardPointer
) {
1155 val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
1156 vtn_fail_if(val
->type
!= NULL
,
1157 "Only pointers can have forward declarations");
1158 val
->type
= rzalloc(b
, struct vtn_type
);
1159 val
->type
->id
= w
[1];
1164 val
->type
->base_type
= vtn_base_type_void
;
1165 val
->type
->type
= glsl_void_type();
1168 val
->type
->base_type
= vtn_base_type_scalar
;
1169 val
->type
->type
= glsl_bool_type();
1170 val
->type
->length
= 1;
1172 case SpvOpTypeInt
: {
1173 int bit_size
= w
[2];
1174 const bool signedness
= w
[3];
1175 val
->type
->base_type
= vtn_base_type_scalar
;
1178 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
1181 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
1184 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
1187 val
->type
->type
= (signedness
? glsl_int8_t_type() : glsl_uint8_t_type());
1190 vtn_fail("Invalid int bit size: %u", bit_size
);
1192 val
->type
->length
= 1;
1196 case SpvOpTypeFloat
: {
1197 int bit_size
= w
[2];
1198 val
->type
->base_type
= vtn_base_type_scalar
;
1201 val
->type
->type
= glsl_float16_t_type();
1204 val
->type
->type
= glsl_float_type();
1207 val
->type
->type
= glsl_double_type();
1210 vtn_fail("Invalid float bit size: %u", bit_size
);
1212 val
->type
->length
= 1;
1216 case SpvOpTypeVector
: {
1217 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1218 unsigned elems
= w
[3];
1220 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
1221 "Base type for OpTypeVector must be a scalar");
1222 vtn_fail_if((elems
< 2 || elems
> 4) && (elems
!= 8) && (elems
!= 16),
1223 "Invalid component count for OpTypeVector");
1225 val
->type
->base_type
= vtn_base_type_vector
;
1226 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
1227 val
->type
->length
= elems
;
1228 val
->type
->stride
= glsl_type_is_boolean(val
->type
->type
)
1229 ? 4 : glsl_get_bit_size(base
->type
) / 8;
1230 val
->type
->array_element
= base
;
1234 case SpvOpTypeMatrix
: {
1235 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1236 unsigned columns
= w
[3];
1238 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1239 "Base type for OpTypeMatrix must be a vector");
1240 vtn_fail_if(columns
< 2 || columns
> 4,
1241 "Invalid column count for OpTypeMatrix");
1243 val
->type
->base_type
= vtn_base_type_matrix
;
1244 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1245 glsl_get_vector_elements(base
->type
),
1247 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1248 "Unsupported base type for OpTypeMatrix");
1249 assert(!glsl_type_is_error(val
->type
->type
));
1250 val
->type
->length
= columns
;
1251 val
->type
->array_element
= base
;
1252 val
->type
->row_major
= false;
1253 val
->type
->stride
= 0;
1257 case SpvOpTypeRuntimeArray
:
1258 case SpvOpTypeArray
: {
1259 struct vtn_type
*array_element
= vtn_get_type(b
, w
[2]);
1261 if (opcode
== SpvOpTypeRuntimeArray
) {
1262 /* A length of 0 is used to denote unsized arrays */
1263 val
->type
->length
= 0;
1265 val
->type
->length
= vtn_constant_uint(b
, w
[3]);
1268 val
->type
->base_type
= vtn_base_type_array
;
1269 val
->type
->array_element
= array_element
;
1270 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1271 val
->type
->stride
= glsl_get_cl_size(array_element
->type
);
1273 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1274 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
,
1279 case SpvOpTypeStruct
: {
1280 unsigned num_fields
= count
- 2;
1281 val
->type
->base_type
= vtn_base_type_struct
;
1282 val
->type
->length
= num_fields
;
1283 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1284 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1285 val
->type
->packed
= false;
1287 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1288 for (unsigned i
= 0; i
< num_fields
; i
++) {
1289 val
->type
->members
[i
] = vtn_get_type(b
, w
[i
+ 2]);
1290 fields
[i
] = (struct glsl_struct_field
) {
1291 .type
= val
->type
->members
[i
]->type
,
1292 .name
= ralloc_asprintf(b
, "field%d", i
),
1298 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1299 unsigned offset
= 0;
1300 for (unsigned i
= 0; i
< num_fields
; i
++) {
1301 offset
= align(offset
, glsl_get_cl_alignment(fields
[i
].type
));
1302 fields
[i
].offset
= offset
;
1303 offset
+= glsl_get_cl_size(fields
[i
].type
);
1307 struct member_decoration_ctx ctx
= {
1308 .num_fields
= num_fields
,
1313 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1314 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1316 vtn_foreach_decoration(b
, val
, struct_block_decoration_cb
, NULL
);
1318 const char *name
= val
->name
;
1320 if (val
->type
->block
|| val
->type
->buffer_block
) {
1321 /* Packing will be ignored since types coming from SPIR-V are
1322 * explicitly laid out.
1324 val
->type
->type
= glsl_interface_type(fields
, num_fields
,
1325 /* packing */ 0, false,
1326 name
? name
: "block");
1328 val
->type
->type
= glsl_struct_type(fields
, num_fields
,
1329 name
? name
: "struct", false);
1334 case SpvOpTypeFunction
: {
1335 val
->type
->base_type
= vtn_base_type_function
;
1336 val
->type
->type
= NULL
;
1338 val
->type
->return_type
= vtn_get_type(b
, w
[2]);
1340 const unsigned num_params
= count
- 3;
1341 val
->type
->length
= num_params
;
1342 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1343 for (unsigned i
= 0; i
< count
- 3; i
++) {
1344 val
->type
->params
[i
] = vtn_get_type(b
, w
[i
+ 3]);
1349 case SpvOpTypePointer
:
1350 case SpvOpTypeForwardPointer
: {
1351 /* We can't blindly push the value because it might be a forward
1354 val
= vtn_untyped_value(b
, w
[1]);
1356 SpvStorageClass storage_class
= w
[2];
1358 if (val
->value_type
== vtn_value_type_invalid
) {
1359 val
->value_type
= vtn_value_type_type
;
1360 val
->type
= rzalloc(b
, struct vtn_type
);
1361 val
->type
->id
= w
[1];
1362 val
->type
->base_type
= vtn_base_type_pointer
;
1363 val
->type
->storage_class
= storage_class
;
1365 /* These can actually be stored to nir_variables and used as SSA
1366 * values so they need a real glsl_type.
1368 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1369 b
, storage_class
, NULL
, NULL
);
1370 val
->type
->type
= nir_address_format_to_glsl_type(
1371 vtn_mode_to_address_format(b
, mode
));
1373 vtn_fail_if(val
->type
->storage_class
!= storage_class
,
1374 "The storage classes of an OpTypePointer and any "
1375 "OpTypeForwardPointers that provide forward "
1376 "declarations of it must match.");
1379 if (opcode
== SpvOpTypePointer
) {
1380 vtn_fail_if(val
->type
->deref
!= NULL
,
1381 "While OpTypeForwardPointer can be used to provide a "
1382 "forward declaration of a pointer, OpTypePointer can "
1383 "only be used once for a given id.");
1385 val
->type
->deref
= vtn_get_type(b
, w
[3]);
1387 /* Only certain storage classes use ArrayStride. The others (in
1388 * particular Workgroup) are expected to be laid out by the driver.
1390 switch (storage_class
) {
1391 case SpvStorageClassUniform
:
1392 case SpvStorageClassPushConstant
:
1393 case SpvStorageClassStorageBuffer
:
1394 case SpvStorageClassPhysicalStorageBuffer
:
1395 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1398 /* Nothing to do. */
1402 if (b
->physical_ptrs
) {
1403 switch (storage_class
) {
1404 case SpvStorageClassFunction
:
1405 case SpvStorageClassWorkgroup
:
1406 case SpvStorageClassCrossWorkgroup
:
1407 case SpvStorageClassUniformConstant
:
1408 val
->type
->stride
= align(glsl_get_cl_size(val
->type
->deref
->type
),
1409 glsl_get_cl_alignment(val
->type
->deref
->type
));
1419 case SpvOpTypeImage
: {
1420 val
->type
->base_type
= vtn_base_type_image
;
1422 const struct vtn_type
*sampled_type
= vtn_get_type(b
, w
[2]);
1423 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1424 glsl_get_bit_size(sampled_type
->type
) != 32,
1425 "Sampled type of OpTypeImage must be a 32-bit scalar");
1427 enum glsl_sampler_dim dim
;
1428 switch ((SpvDim
)w
[3]) {
1429 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1430 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1431 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1432 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1433 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1434 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1435 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1437 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1438 spirv_dim_to_string((SpvDim
)w
[3]), w
[3]);
1441 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1442 * The “Depth” operand of OpTypeImage is ignored.
1444 bool is_array
= w
[5];
1445 bool multisampled
= w
[6];
1446 unsigned sampled
= w
[7];
1447 SpvImageFormat format
= w
[8];
1450 val
->type
->access_qualifier
= w
[9];
1452 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1455 if (dim
== GLSL_SAMPLER_DIM_2D
)
1456 dim
= GLSL_SAMPLER_DIM_MS
;
1457 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1458 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1460 vtn_fail("Unsupported multisampled image type");
1463 val
->type
->image_format
= translate_image_format(b
, format
);
1465 enum glsl_base_type sampled_base_type
=
1466 glsl_get_base_type(sampled_type
->type
);
1468 val
->type
->sampled
= true;
1469 val
->type
->type
= glsl_sampler_type(dim
, false, is_array
,
1471 } else if (sampled
== 2) {
1472 val
->type
->sampled
= false;
1473 val
->type
->type
= glsl_image_type(dim
, is_array
, sampled_base_type
);
1475 vtn_fail("We need to know if the image will be sampled");
1480 case SpvOpTypeSampledImage
:
1481 val
->type
->base_type
= vtn_base_type_sampled_image
;
1482 val
->type
->image
= vtn_get_type(b
, w
[2]);
1483 val
->type
->type
= val
->type
->image
->type
;
1486 case SpvOpTypeSampler
:
1487 /* The actual sampler type here doesn't really matter. It gets
1488 * thrown away the moment you combine it with an image. What really
1489 * matters is that it's a sampler type as opposed to an integer type
1490 * so the backend knows what to do.
1492 val
->type
->base_type
= vtn_base_type_sampler
;
1493 val
->type
->type
= glsl_bare_sampler_type();
1496 case SpvOpTypeOpaque
:
1497 case SpvOpTypeEvent
:
1498 case SpvOpTypeDeviceEvent
:
1499 case SpvOpTypeReserveId
:
1500 case SpvOpTypeQueue
:
1503 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1506 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1508 if (val
->type
->base_type
== vtn_base_type_struct
&&
1509 (val
->type
->block
|| val
->type
->buffer_block
)) {
1510 for (unsigned i
= 0; i
< val
->type
->length
; i
++) {
1511 vtn_fail_if(vtn_type_contains_block(b
, val
->type
->members
[i
]),
1512 "Block and BufferBlock decorations cannot decorate a "
1513 "structure type that is nested at any level inside "
1514 "another structure type decorated with Block or "
1520 static nir_constant
*
1521 vtn_null_constant(struct vtn_builder
*b
, struct vtn_type
*type
)
1523 nir_constant
*c
= rzalloc(b
, nir_constant
);
1525 switch (type
->base_type
) {
1526 case vtn_base_type_scalar
:
1527 case vtn_base_type_vector
:
1528 /* Nothing to do here. It's already initialized to zero */
1531 case vtn_base_type_pointer
: {
1532 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1533 b
, type
->storage_class
, type
->deref
, NULL
);
1534 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
1536 const nir_const_value
*null_value
= nir_address_format_null_value(addr_format
);
1537 memcpy(c
->values
, null_value
,
1538 sizeof(nir_const_value
) * nir_address_format_num_components(addr_format
));
1542 case vtn_base_type_void
:
1543 case vtn_base_type_image
:
1544 case vtn_base_type_sampler
:
1545 case vtn_base_type_sampled_image
:
1546 case vtn_base_type_function
:
1547 /* For those we have to return something but it doesn't matter what. */
1550 case vtn_base_type_matrix
:
1551 case vtn_base_type_array
:
1552 vtn_assert(type
->length
> 0);
1553 c
->num_elements
= type
->length
;
1554 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1556 c
->elements
[0] = vtn_null_constant(b
, type
->array_element
);
1557 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1558 c
->elements
[i
] = c
->elements
[0];
1561 case vtn_base_type_struct
:
1562 c
->num_elements
= type
->length
;
1563 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1564 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
1565 c
->elements
[i
] = vtn_null_constant(b
, type
->members
[i
]);
1569 vtn_fail("Invalid type for null constant");
1576 spec_constant_decoration_cb(struct vtn_builder
*b
, UNUSED
struct vtn_value
*val
,
1577 ASSERTED
int member
,
1578 const struct vtn_decoration
*dec
, void *data
)
1580 vtn_assert(member
== -1);
1581 if (dec
->decoration
!= SpvDecorationSpecId
)
1584 nir_const_value
*value
= data
;
1585 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1586 if (b
->specializations
[i
].id
== dec
->operands
[0]) {
1587 *value
= b
->specializations
[i
].value
;
1594 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1595 struct vtn_value
*val
,
1596 ASSERTED
int member
,
1597 const struct vtn_decoration
*dec
,
1600 vtn_assert(member
== -1);
1601 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1602 dec
->operands
[0] != SpvBuiltInWorkgroupSize
)
1605 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1606 b
->workgroup_size_builtin
= val
;
1610 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1611 const uint32_t *w
, unsigned count
)
1613 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1614 val
->constant
= rzalloc(b
, nir_constant
);
1616 case SpvOpConstantTrue
:
1617 case SpvOpConstantFalse
:
1618 case SpvOpSpecConstantTrue
:
1619 case SpvOpSpecConstantFalse
: {
1620 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1621 "Result type of %s must be OpTypeBool",
1622 spirv_op_to_string(opcode
));
1624 bool bval
= (opcode
== SpvOpConstantTrue
||
1625 opcode
== SpvOpSpecConstantTrue
);
1627 nir_const_value u32val
= nir_const_value_for_uint(bval
, 32);
1629 if (opcode
== SpvOpSpecConstantTrue
||
1630 opcode
== SpvOpSpecConstantFalse
)
1631 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32val
);
1633 val
->constant
->values
[0].b
= u32val
.u32
!= 0;
1638 case SpvOpSpecConstant
: {
1639 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1640 "Result type of %s must be a scalar",
1641 spirv_op_to_string(opcode
));
1642 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1645 val
->constant
->values
[0].u64
= vtn_u64_literal(&w
[3]);
1648 val
->constant
->values
[0].u32
= w
[3];
1651 val
->constant
->values
[0].u16
= w
[3];
1654 val
->constant
->values
[0].u8
= w
[3];
1657 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size
);
1660 if (opcode
== SpvOpSpecConstant
)
1661 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
,
1662 &val
->constant
->values
[0]);
1666 case SpvOpSpecConstantComposite
:
1667 case SpvOpConstantComposite
: {
1668 unsigned elem_count
= count
- 3;
1669 vtn_fail_if(elem_count
!= val
->type
->length
,
1670 "%s has %u constituents, expected %u",
1671 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1673 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1674 for (unsigned i
= 0; i
< elem_count
; i
++) {
1675 struct vtn_value
*val
= vtn_untyped_value(b
, w
[i
+ 3]);
1677 if (val
->value_type
== vtn_value_type_constant
) {
1678 elems
[i
] = val
->constant
;
1680 vtn_fail_if(val
->value_type
!= vtn_value_type_undef
,
1681 "only constants or undefs allowed for "
1682 "SpvOpConstantComposite");
1683 /* to make it easier, just insert a NULL constant for now */
1684 elems
[i
] = vtn_null_constant(b
, val
->type
);
1688 switch (val
->type
->base_type
) {
1689 case vtn_base_type_vector
: {
1690 assert(glsl_type_is_vector(val
->type
->type
));
1691 for (unsigned i
= 0; i
< elem_count
; i
++)
1692 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1696 case vtn_base_type_matrix
:
1697 case vtn_base_type_struct
:
1698 case vtn_base_type_array
:
1699 ralloc_steal(val
->constant
, elems
);
1700 val
->constant
->num_elements
= elem_count
;
1701 val
->constant
->elements
= elems
;
1705 vtn_fail("Result type of %s must be a composite type",
1706 spirv_op_to_string(opcode
));
1711 case SpvOpSpecConstantOp
: {
1712 nir_const_value u32op
= nir_const_value_for_uint(w
[3], 32);
1713 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32op
);
1714 SpvOp opcode
= u32op
.u32
;
1716 case SpvOpVectorShuffle
: {
1717 struct vtn_value
*v0
= &b
->values
[w
[4]];
1718 struct vtn_value
*v1
= &b
->values
[w
[5]];
1720 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1721 v0
->value_type
== vtn_value_type_undef
);
1722 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1723 v1
->value_type
== vtn_value_type_undef
);
1725 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1726 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1728 vtn_assert(len0
+ len1
< 16);
1730 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1731 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1732 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1734 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1735 (void)bit_size0
; (void)bit_size1
;
1737 nir_const_value undef
= { .u64
= 0xdeadbeefdeadbeef };
1738 nir_const_value combined
[NIR_MAX_VEC_COMPONENTS
* 2];
1740 if (v0
->value_type
== vtn_value_type_constant
) {
1741 for (unsigned i
= 0; i
< len0
; i
++)
1742 combined
[i
] = v0
->constant
->values
[i
];
1744 if (v1
->value_type
== vtn_value_type_constant
) {
1745 for (unsigned i
= 0; i
< len1
; i
++)
1746 combined
[len0
+ i
] = v1
->constant
->values
[i
];
1749 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1750 uint32_t comp
= w
[i
+ 6];
1751 if (comp
== (uint32_t)-1) {
1752 /* If component is not used, set the value to a known constant
1753 * to detect if it is wrongly used.
1755 val
->constant
->values
[j
] = undef
;
1757 vtn_fail_if(comp
>= len0
+ len1
,
1758 "All Component literals must either be FFFFFFFF "
1759 "or in [0, N - 1] (inclusive).");
1760 val
->constant
->values
[j
] = combined
[comp
];
1766 case SpvOpCompositeExtract
:
1767 case SpvOpCompositeInsert
: {
1768 struct vtn_value
*comp
;
1769 unsigned deref_start
;
1770 struct nir_constant
**c
;
1771 if (opcode
== SpvOpCompositeExtract
) {
1772 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1774 c
= &comp
->constant
;
1776 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1778 val
->constant
= nir_constant_clone(comp
->constant
,
1784 const struct vtn_type
*type
= comp
->type
;
1785 for (unsigned i
= deref_start
; i
< count
; i
++) {
1786 vtn_fail_if(w
[i
] > type
->length
,
1787 "%uth index of %s is %u but the type has only "
1788 "%u elements", i
- deref_start
,
1789 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1791 switch (type
->base_type
) {
1792 case vtn_base_type_vector
:
1794 type
= type
->array_element
;
1797 case vtn_base_type_matrix
:
1798 case vtn_base_type_array
:
1799 c
= &(*c
)->elements
[w
[i
]];
1800 type
= type
->array_element
;
1803 case vtn_base_type_struct
:
1804 c
= &(*c
)->elements
[w
[i
]];
1805 type
= type
->members
[w
[i
]];
1809 vtn_fail("%s must only index into composite types",
1810 spirv_op_to_string(opcode
));
1814 if (opcode
== SpvOpCompositeExtract
) {
1818 unsigned num_components
= type
->length
;
1819 for (unsigned i
= 0; i
< num_components
; i
++)
1820 val
->constant
->values
[i
] = (*c
)->values
[elem
+ i
];
1823 struct vtn_value
*insert
=
1824 vtn_value(b
, w
[4], vtn_value_type_constant
);
1825 vtn_assert(insert
->type
== type
);
1827 *c
= insert
->constant
;
1829 unsigned num_components
= type
->length
;
1830 for (unsigned i
= 0; i
< num_components
; i
++)
1831 (*c
)->values
[elem
+ i
] = insert
->constant
->values
[i
];
1839 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
1840 nir_alu_type src_alu_type
= dst_alu_type
;
1841 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
1844 vtn_assert(count
<= 7);
1850 /* We have a source in a conversion */
1852 nir_get_nir_type_for_glsl_type(vtn_get_value_type(b
, w
[4])->type
);
1853 /* We use the bitsize of the conversion source to evaluate the opcode later */
1854 bit_size
= glsl_get_bit_size(vtn_get_value_type(b
, w
[4])->type
);
1857 bit_size
= glsl_get_bit_size(val
->type
->type
);
1860 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
1861 nir_alu_type_get_type_size(src_alu_type
),
1862 nir_alu_type_get_type_size(dst_alu_type
));
1863 nir_const_value src
[3][NIR_MAX_VEC_COMPONENTS
];
1865 for (unsigned i
= 0; i
< count
- 4; i
++) {
1866 struct vtn_value
*src_val
=
1867 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
);
1869 /* If this is an unsized source, pull the bit size from the
1870 * source; otherwise, we'll use the bit size from the destination.
1872 if (!nir_alu_type_get_type_size(nir_op_infos
[op
].input_types
[i
]))
1873 bit_size
= glsl_get_bit_size(src_val
->type
->type
);
1875 unsigned src_comps
= nir_op_infos
[op
].input_sizes
[i
] ?
1876 nir_op_infos
[op
].input_sizes
[i
] :
1879 unsigned j
= swap
? 1 - i
: i
;
1880 for (unsigned c
= 0; c
< src_comps
; c
++)
1881 src
[j
][c
] = src_val
->constant
->values
[c
];
1884 /* fix up fixed size sources */
1891 for (unsigned i
= 0; i
< num_components
; ++i
) {
1893 case 64: src
[1][i
].u32
= src
[1][i
].u64
; break;
1894 case 16: src
[1][i
].u32
= src
[1][i
].u16
; break;
1895 case 8: src
[1][i
].u32
= src
[1][i
].u8
; break;
1904 nir_const_value
*srcs
[3] = {
1905 src
[0], src
[1], src
[2],
1907 nir_eval_const_opcode(op
, val
->constant
->values
,
1908 num_components
, bit_size
, srcs
,
1909 b
->shader
->info
.float_controls_execution_mode
);
1916 case SpvOpConstantNull
:
1917 val
->constant
= vtn_null_constant(b
, val
->type
);
1920 case SpvOpConstantSampler
:
1921 vtn_fail("OpConstantSampler requires Kernel Capability");
1925 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1928 /* Now that we have the value, update the workgroup size if needed */
1929 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1932 SpvMemorySemanticsMask
1933 vtn_storage_class_to_memory_semantics(SpvStorageClass sc
)
1936 case SpvStorageClassStorageBuffer
:
1937 case SpvStorageClassPhysicalStorageBuffer
:
1938 return SpvMemorySemanticsUniformMemoryMask
;
1939 case SpvStorageClassWorkgroup
:
1940 return SpvMemorySemanticsWorkgroupMemoryMask
;
1942 return SpvMemorySemanticsMaskNone
;
1947 vtn_split_barrier_semantics(struct vtn_builder
*b
,
1948 SpvMemorySemanticsMask semantics
,
1949 SpvMemorySemanticsMask
*before
,
1950 SpvMemorySemanticsMask
*after
)
1952 /* For memory semantics embedded in operations, we split them into up to
1953 * two barriers, to be added before and after the operation. This is less
1954 * strict than if we propagated until the final backend stage, but still
1955 * result in correct execution.
1957 * A further improvement could be pipe this information (and use!) into the
1958 * next compiler layers, at the expense of making the handling of barriers
1962 *before
= SpvMemorySemanticsMaskNone
;
1963 *after
= SpvMemorySemanticsMaskNone
;
1965 SpvMemorySemanticsMask order_semantics
=
1966 semantics
& (SpvMemorySemanticsAcquireMask
|
1967 SpvMemorySemanticsReleaseMask
|
1968 SpvMemorySemanticsAcquireReleaseMask
|
1969 SpvMemorySemanticsSequentiallyConsistentMask
);
1971 if (util_bitcount(order_semantics
) > 1) {
1972 /* Old GLSLang versions incorrectly set all the ordering bits. This was
1973 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
1974 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
1976 vtn_warn("Multiple memory ordering semantics specified, "
1977 "assuming AcquireRelease.");
1978 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
1981 const SpvMemorySemanticsMask av_vis_semantics
=
1982 semantics
& (SpvMemorySemanticsMakeAvailableMask
|
1983 SpvMemorySemanticsMakeVisibleMask
);
1985 const SpvMemorySemanticsMask storage_semantics
=
1986 semantics
& (SpvMemorySemanticsUniformMemoryMask
|
1987 SpvMemorySemanticsSubgroupMemoryMask
|
1988 SpvMemorySemanticsWorkgroupMemoryMask
|
1989 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
1990 SpvMemorySemanticsAtomicCounterMemoryMask
|
1991 SpvMemorySemanticsImageMemoryMask
|
1992 SpvMemorySemanticsOutputMemoryMask
);
1994 const SpvMemorySemanticsMask other_semantics
=
1995 semantics
& ~(order_semantics
| av_vis_semantics
| storage_semantics
);
1997 if (other_semantics
)
1998 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics
);
2000 /* SequentiallyConsistent is treated as AcquireRelease. */
2002 /* The RELEASE barrier happens BEFORE the operation, and it is usually
2003 * associated with a Store. All the write operations with a matching
2004 * semantics will not be reordered after the Store.
2006 if (order_semantics
& (SpvMemorySemanticsReleaseMask
|
2007 SpvMemorySemanticsAcquireReleaseMask
|
2008 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2009 *before
|= SpvMemorySemanticsReleaseMask
| storage_semantics
;
2012 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2013 * associated with a Load. All the operations with a matching semantics
2014 * will not be reordered before the Load.
2016 if (order_semantics
& (SpvMemorySemanticsAcquireMask
|
2017 SpvMemorySemanticsAcquireReleaseMask
|
2018 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2019 *after
|= SpvMemorySemanticsAcquireMask
| storage_semantics
;
2022 if (av_vis_semantics
& SpvMemorySemanticsMakeVisibleMask
)
2023 *before
|= SpvMemorySemanticsMakeVisibleMask
| storage_semantics
;
2025 if (av_vis_semantics
& SpvMemorySemanticsMakeAvailableMask
)
2026 *after
|= SpvMemorySemanticsMakeAvailableMask
| storage_semantics
;
2029 static nir_memory_semantics
2030 vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder
*b
,
2031 SpvMemorySemanticsMask semantics
)
2033 nir_memory_semantics nir_semantics
= 0;
2035 SpvMemorySemanticsMask order_semantics
=
2036 semantics
& (SpvMemorySemanticsAcquireMask
|
2037 SpvMemorySemanticsReleaseMask
|
2038 SpvMemorySemanticsAcquireReleaseMask
|
2039 SpvMemorySemanticsSequentiallyConsistentMask
);
2041 if (util_bitcount(order_semantics
) > 1) {
2042 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2043 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2044 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2046 vtn_warn("Multiple memory ordering semantics bits specified, "
2047 "assuming AcquireRelease.");
2048 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2051 switch (order_semantics
) {
2053 /* Not an ordering barrier. */
2056 case SpvMemorySemanticsAcquireMask
:
2057 nir_semantics
= NIR_MEMORY_ACQUIRE
;
2060 case SpvMemorySemanticsReleaseMask
:
2061 nir_semantics
= NIR_MEMORY_RELEASE
;
2064 case SpvMemorySemanticsSequentiallyConsistentMask
:
2065 /* Fall through. Treated as AcquireRelease in Vulkan. */
2066 case SpvMemorySemanticsAcquireReleaseMask
:
2067 nir_semantics
= NIR_MEMORY_ACQUIRE
| NIR_MEMORY_RELEASE
;
2071 unreachable("Invalid memory order semantics");
2074 if (semantics
& SpvMemorySemanticsMakeAvailableMask
) {
2075 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2076 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2077 "capability must be declared.");
2078 nir_semantics
|= NIR_MEMORY_MAKE_AVAILABLE
;
2081 if (semantics
& SpvMemorySemanticsMakeVisibleMask
) {
2082 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2083 "To use MakeVisible memory semantics the VulkanMemoryModel "
2084 "capability must be declared.");
2085 nir_semantics
|= NIR_MEMORY_MAKE_VISIBLE
;
2088 return nir_semantics
;
2091 static nir_variable_mode
2092 vtn_mem_sematics_to_nir_var_modes(struct vtn_builder
*b
,
2093 SpvMemorySemanticsMask semantics
)
2095 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2096 * and AtomicCounterMemory are ignored".
2098 semantics
&= ~(SpvMemorySemanticsSubgroupMemoryMask
|
2099 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2100 SpvMemorySemanticsAtomicCounterMemoryMask
);
2102 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2103 * for SpvMemorySemanticsImageMemoryMask.
2106 nir_variable_mode modes
= 0;
2107 if (semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2108 SpvMemorySemanticsImageMemoryMask
)) {
2109 modes
|= nir_var_uniform
|
2114 if (semantics
& SpvMemorySemanticsWorkgroupMemoryMask
)
2115 modes
|= nir_var_mem_shared
;
2116 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
2117 modes
|= nir_var_shader_out
;
2124 vtn_scope_to_nir_scope(struct vtn_builder
*b
, SpvScope scope
)
2126 nir_scope nir_scope
;
2128 case SpvScopeDevice
:
2129 vtn_fail_if(b
->options
->caps
.vk_memory_model
&&
2130 !b
->options
->caps
.vk_memory_model_device_scope
,
2131 "If the Vulkan memory model is declared and any instruction "
2132 "uses Device scope, the VulkanMemoryModelDeviceScope "
2133 "capability must be declared.");
2134 nir_scope
= NIR_SCOPE_DEVICE
;
2137 case SpvScopeQueueFamily
:
2138 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2139 "To use Queue Family scope, the VulkanMemoryModel capability "
2140 "must be declared.");
2141 nir_scope
= NIR_SCOPE_QUEUE_FAMILY
;
2144 case SpvScopeWorkgroup
:
2145 nir_scope
= NIR_SCOPE_WORKGROUP
;
2148 case SpvScopeSubgroup
:
2149 nir_scope
= NIR_SCOPE_SUBGROUP
;
2152 case SpvScopeInvocation
:
2153 nir_scope
= NIR_SCOPE_INVOCATION
;
2157 vtn_fail("Invalid memory scope");
2164 vtn_emit_scoped_control_barrier(struct vtn_builder
*b
, SpvScope exec_scope
,
2166 SpvMemorySemanticsMask semantics
)
2168 nir_memory_semantics nir_semantics
=
2169 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2170 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2171 nir_scope nir_exec_scope
= vtn_scope_to_nir_scope(b
, exec_scope
);
2173 /* Memory semantics is optional for OpControlBarrier. */
2174 nir_scope nir_mem_scope
;
2175 if (nir_semantics
== 0 || modes
== 0)
2176 nir_mem_scope
= NIR_SCOPE_NONE
;
2178 nir_mem_scope
= vtn_scope_to_nir_scope(b
, mem_scope
);
2180 nir_scoped_barrier(&b
->nb
, nir_exec_scope
, nir_mem_scope
, nir_semantics
, modes
);
2184 vtn_emit_scoped_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
2185 SpvMemorySemanticsMask semantics
)
2187 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2188 nir_memory_semantics nir_semantics
=
2189 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2191 /* No barrier to add. */
2192 if (nir_semantics
== 0 || modes
== 0)
2195 nir_scope nir_mem_scope
= vtn_scope_to_nir_scope(b
, scope
);
2196 nir_scoped_barrier(&b
->nb
, NIR_SCOPE_NONE
, nir_mem_scope
, nir_semantics
, modes
);
2199 struct vtn_ssa_value
*
2200 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
2202 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
2205 if (!glsl_type_is_vector_or_scalar(type
)) {
2206 unsigned elems
= glsl_get_length(type
);
2207 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2208 for (unsigned i
= 0; i
< elems
; i
++) {
2209 const struct glsl_type
*child_type
;
2211 switch (glsl_get_base_type(type
)) {
2213 case GLSL_TYPE_UINT
:
2214 case GLSL_TYPE_INT16
:
2215 case GLSL_TYPE_UINT16
:
2216 case GLSL_TYPE_UINT8
:
2217 case GLSL_TYPE_INT8
:
2218 case GLSL_TYPE_INT64
:
2219 case GLSL_TYPE_UINT64
:
2220 case GLSL_TYPE_BOOL
:
2221 case GLSL_TYPE_FLOAT
:
2222 case GLSL_TYPE_FLOAT16
:
2223 case GLSL_TYPE_DOUBLE
:
2224 child_type
= glsl_get_column_type(type
);
2226 case GLSL_TYPE_ARRAY
:
2227 child_type
= glsl_get_array_element(type
);
2229 case GLSL_TYPE_STRUCT
:
2230 case GLSL_TYPE_INTERFACE
:
2231 child_type
= glsl_get_struct_field(type
, i
);
2234 vtn_fail("unkown base type");
2237 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
2245 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
2248 src
.src
= nir_src_for_ssa(vtn_get_nir_ssa(b
, index
));
2249 src
.src_type
= type
;
2254 image_operand_arg(struct vtn_builder
*b
, const uint32_t *w
, uint32_t count
,
2255 uint32_t mask_idx
, SpvImageOperandsMask op
)
2257 static const SpvImageOperandsMask ops_with_arg
=
2258 SpvImageOperandsBiasMask
|
2259 SpvImageOperandsLodMask
|
2260 SpvImageOperandsGradMask
|
2261 SpvImageOperandsConstOffsetMask
|
2262 SpvImageOperandsOffsetMask
|
2263 SpvImageOperandsConstOffsetsMask
|
2264 SpvImageOperandsSampleMask
|
2265 SpvImageOperandsMinLodMask
|
2266 SpvImageOperandsMakeTexelAvailableMask
|
2267 SpvImageOperandsMakeTexelVisibleMask
;
2269 assert(util_bitcount(op
) == 1);
2270 assert(w
[mask_idx
] & op
);
2271 assert(op
& ops_with_arg
);
2273 uint32_t idx
= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_arg
) + 1;
2275 /* Adjust indices for operands with two arguments. */
2276 static const SpvImageOperandsMask ops_with_two_args
=
2277 SpvImageOperandsGradMask
;
2278 idx
+= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_two_args
);
2282 vtn_fail_if(idx
+ (op
& ops_with_two_args
? 1 : 0) >= count
,
2283 "Image op claims to have %s but does not enough "
2284 "following operands", spirv_imageoperands_to_string(op
));
2290 non_uniform_decoration_cb(struct vtn_builder
*b
,
2291 struct vtn_value
*val
, int member
,
2292 const struct vtn_decoration
*dec
, void *void_ctx
)
2294 enum gl_access_qualifier
*access
= void_ctx
;
2295 switch (dec
->decoration
) {
2296 case SpvDecorationNonUniformEXT
:
2297 *access
|= ACCESS_NON_UNIFORM
;
2307 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
2308 const uint32_t *w
, unsigned count
)
2310 if (opcode
== SpvOpSampledImage
) {
2311 struct vtn_value
*val
=
2312 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2313 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2315 /* It seems valid to use OpSampledImage with OpUndef instead of
2316 * OpTypeImage or OpTypeSampler.
2318 if (vtn_untyped_value(b
, w
[3])->value_type
== vtn_value_type_undef
) {
2319 val
->sampled_image
->image
= NULL
;
2321 val
->sampled_image
->image
=
2322 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2325 if (vtn_untyped_value(b
, w
[4])->value_type
== vtn_value_type_undef
) {
2326 val
->sampled_image
->sampler
= NULL
;
2328 val
->sampled_image
->sampler
=
2329 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
2332 } else if (opcode
== SpvOpImage
) {
2333 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
2334 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
2335 vtn_push_pointer(b
, w
[2], src_val
->sampled_image
->image
);
2337 vtn_assert(src_val
->value_type
== vtn_value_type_pointer
);
2338 vtn_push_pointer(b
, w
[2], src_val
->pointer
);
2343 struct vtn_type
*ret_type
= vtn_get_type(b
, w
[1]);
2345 struct vtn_pointer
*image
= NULL
, *sampler
= NULL
;
2346 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
2347 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
2348 image
= sampled_val
->sampled_image
->image
;
2349 sampler
= sampled_val
->sampled_image
->sampler
;
2351 vtn_assert(sampled_val
->value_type
== vtn_value_type_pointer
);
2352 image
= sampled_val
->pointer
;
2356 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2360 nir_deref_instr
*image_deref
= vtn_pointer_to_deref(b
, image
);
2361 nir_deref_instr
*sampler_deref
=
2362 sampler
? vtn_pointer_to_deref(b
, sampler
) : NULL
;
2364 const struct glsl_type
*image_type
= sampled_val
->type
->type
;
2365 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
2366 const bool is_array
= glsl_sampler_type_is_array(image_type
);
2367 nir_alu_type dest_type
= nir_type_invalid
;
2369 /* Figure out the base texture operation */
2372 case SpvOpImageSampleImplicitLod
:
2373 case SpvOpImageSampleDrefImplicitLod
:
2374 case SpvOpImageSampleProjImplicitLod
:
2375 case SpvOpImageSampleProjDrefImplicitLod
:
2376 texop
= nir_texop_tex
;
2379 case SpvOpImageSampleExplicitLod
:
2380 case SpvOpImageSampleDrefExplicitLod
:
2381 case SpvOpImageSampleProjExplicitLod
:
2382 case SpvOpImageSampleProjDrefExplicitLod
:
2383 texop
= nir_texop_txl
;
2386 case SpvOpImageFetch
:
2387 if (sampler_dim
== GLSL_SAMPLER_DIM_MS
) {
2388 texop
= nir_texop_txf_ms
;
2390 texop
= nir_texop_txf
;
2394 case SpvOpImageGather
:
2395 case SpvOpImageDrefGather
:
2396 texop
= nir_texop_tg4
;
2399 case SpvOpImageQuerySizeLod
:
2400 case SpvOpImageQuerySize
:
2401 texop
= nir_texop_txs
;
2402 dest_type
= nir_type_int
;
2405 case SpvOpImageQueryLod
:
2406 texop
= nir_texop_lod
;
2407 dest_type
= nir_type_float
;
2410 case SpvOpImageQueryLevels
:
2411 texop
= nir_texop_query_levels
;
2412 dest_type
= nir_type_int
;
2415 case SpvOpImageQuerySamples
:
2416 texop
= nir_texop_texture_samples
;
2417 dest_type
= nir_type_int
;
2420 case SpvOpFragmentFetchAMD
:
2421 texop
= nir_texop_fragment_fetch
;
2424 case SpvOpFragmentMaskFetchAMD
:
2425 texop
= nir_texop_fragment_mask_fetch
;
2429 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2432 nir_tex_src srcs
[10]; /* 10 should be enough */
2433 nir_tex_src
*p
= srcs
;
2435 p
->src
= nir_src_for_ssa(&image_deref
->dest
.ssa
);
2436 p
->src_type
= nir_tex_src_texture_deref
;
2446 vtn_fail_if(sampler
== NULL
,
2447 "%s requires an image of type OpTypeSampledImage",
2448 spirv_op_to_string(opcode
));
2449 p
->src
= nir_src_for_ssa(&sampler_deref
->dest
.ssa
);
2450 p
->src_type
= nir_tex_src_sampler_deref
;
2454 case nir_texop_txf_ms
:
2456 case nir_texop_query_levels
:
2457 case nir_texop_texture_samples
:
2458 case nir_texop_samples_identical
:
2459 case nir_texop_fragment_fetch
:
2460 case nir_texop_fragment_mask_fetch
:
2463 case nir_texop_txf_ms_fb
:
2464 vtn_fail("unexpected nir_texop_txf_ms_fb");
2466 case nir_texop_txf_ms_mcs
:
2467 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2468 case nir_texop_tex_prefetch
:
2469 vtn_fail("unexpected nir_texop_tex_prefetch");
2474 struct nir_ssa_def
*coord
;
2475 unsigned coord_components
;
2477 case SpvOpImageSampleImplicitLod
:
2478 case SpvOpImageSampleExplicitLod
:
2479 case SpvOpImageSampleDrefImplicitLod
:
2480 case SpvOpImageSampleDrefExplicitLod
:
2481 case SpvOpImageSampleProjImplicitLod
:
2482 case SpvOpImageSampleProjExplicitLod
:
2483 case SpvOpImageSampleProjDrefImplicitLod
:
2484 case SpvOpImageSampleProjDrefExplicitLod
:
2485 case SpvOpImageFetch
:
2486 case SpvOpImageGather
:
2487 case SpvOpImageDrefGather
:
2488 case SpvOpImageQueryLod
:
2489 case SpvOpFragmentFetchAMD
:
2490 case SpvOpFragmentMaskFetchAMD
: {
2491 /* All these types have the coordinate as their first real argument */
2492 coord_components
= glsl_get_sampler_dim_coordinate_components(sampler_dim
);
2494 if (is_array
&& texop
!= nir_texop_lod
)
2497 coord
= vtn_get_nir_ssa(b
, w
[idx
++]);
2498 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
2499 (1 << coord_components
) - 1));
2500 p
->src_type
= nir_tex_src_coord
;
2507 coord_components
= 0;
2512 case SpvOpImageSampleProjImplicitLod
:
2513 case SpvOpImageSampleProjExplicitLod
:
2514 case SpvOpImageSampleProjDrefImplicitLod
:
2515 case SpvOpImageSampleProjDrefExplicitLod
:
2516 /* These have the projector as the last coordinate component */
2517 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
2518 p
->src_type
= nir_tex_src_projector
;
2526 bool is_shadow
= false;
2527 unsigned gather_component
= 0;
2529 case SpvOpImageSampleDrefImplicitLod
:
2530 case SpvOpImageSampleDrefExplicitLod
:
2531 case SpvOpImageSampleProjDrefImplicitLod
:
2532 case SpvOpImageSampleProjDrefExplicitLod
:
2533 case SpvOpImageDrefGather
:
2534 /* These all have an explicit depth value as their next source */
2536 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
2539 case SpvOpImageGather
:
2540 /* This has a component as its next source */
2541 gather_component
= vtn_constant_uint(b
, w
[idx
++]);
2548 /* For OpImageQuerySizeLod, we always have an LOD */
2549 if (opcode
== SpvOpImageQuerySizeLod
)
2550 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2552 /* For OpFragmentFetchAMD, we always have a multisample index */
2553 if (opcode
== SpvOpFragmentFetchAMD
)
2554 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2556 /* Now we need to handle some number of optional arguments */
2557 struct vtn_value
*gather_offsets
= NULL
;
2559 uint32_t operands
= w
[idx
];
2561 if (operands
& SpvImageOperandsBiasMask
) {
2562 vtn_assert(texop
== nir_texop_tex
||
2563 texop
== nir_texop_tg4
);
2564 if (texop
== nir_texop_tex
)
2565 texop
= nir_texop_txb
;
2566 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2567 SpvImageOperandsBiasMask
);
2568 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_bias
);
2571 if (operands
& SpvImageOperandsLodMask
) {
2572 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
2573 texop
== nir_texop_txs
|| texop
== nir_texop_tg4
);
2574 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2575 SpvImageOperandsLodMask
);
2576 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_lod
);
2579 if (operands
& SpvImageOperandsGradMask
) {
2580 vtn_assert(texop
== nir_texop_txl
);
2581 texop
= nir_texop_txd
;
2582 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2583 SpvImageOperandsGradMask
);
2584 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ddx
);
2585 (*p
++) = vtn_tex_src(b
, w
[arg
+ 1], nir_tex_src_ddy
);
2588 vtn_fail_if(util_bitcount(operands
& (SpvImageOperandsConstOffsetsMask
|
2589 SpvImageOperandsOffsetMask
|
2590 SpvImageOperandsConstOffsetMask
)) > 1,
2591 "At most one of the ConstOffset, Offset, and ConstOffsets "
2592 "image operands can be used on a given instruction.");
2594 if (operands
& SpvImageOperandsOffsetMask
) {
2595 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2596 SpvImageOperandsOffsetMask
);
2597 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2600 if (operands
& SpvImageOperandsConstOffsetMask
) {
2601 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2602 SpvImageOperandsConstOffsetMask
);
2603 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2606 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2607 vtn_assert(texop
== nir_texop_tg4
);
2608 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2609 SpvImageOperandsConstOffsetsMask
);
2610 gather_offsets
= vtn_value(b
, w
[arg
], vtn_value_type_constant
);
2613 if (operands
& SpvImageOperandsSampleMask
) {
2614 vtn_assert(texop
== nir_texop_txf_ms
);
2615 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2616 SpvImageOperandsSampleMask
);
2617 texop
= nir_texop_txf_ms
;
2618 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ms_index
);
2621 if (operands
& SpvImageOperandsMinLodMask
) {
2622 vtn_assert(texop
== nir_texop_tex
||
2623 texop
== nir_texop_txb
||
2624 texop
== nir_texop_txd
);
2625 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2626 SpvImageOperandsMinLodMask
);
2627 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_min_lod
);
2631 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2634 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2636 instr
->coord_components
= coord_components
;
2637 instr
->sampler_dim
= sampler_dim
;
2638 instr
->is_array
= is_array
;
2639 instr
->is_shadow
= is_shadow
;
2640 instr
->is_new_style_shadow
=
2641 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2642 instr
->component
= gather_component
;
2644 /* The Vulkan spec says:
2646 * "If an instruction loads from or stores to a resource (including
2647 * atomics and image instructions) and the resource descriptor being
2648 * accessed is not dynamically uniform, then the operand corresponding
2649 * to that resource (e.g. the pointer or sampled image operand) must be
2650 * decorated with NonUniform."
2652 * It's very careful to specify that the exact operand must be decorated
2653 * NonUniform. The SPIR-V parser is not expected to chase through long
2654 * chains to find the NonUniform decoration. It's either right there or we
2655 * can assume it doesn't exist.
2657 enum gl_access_qualifier access
= 0;
2658 vtn_foreach_decoration(b
, sampled_val
, non_uniform_decoration_cb
, &access
);
2660 if (image
&& (access
& ACCESS_NON_UNIFORM
))
2661 instr
->texture_non_uniform
= true;
2663 if (sampler
&& (access
& ACCESS_NON_UNIFORM
))
2664 instr
->sampler_non_uniform
= true;
2666 /* for non-query ops, get dest_type from sampler type */
2667 if (dest_type
== nir_type_invalid
) {
2668 switch (glsl_get_sampler_result_type(image_type
)) {
2669 case GLSL_TYPE_FLOAT
: dest_type
= nir_type_float
; break;
2670 case GLSL_TYPE_INT
: dest_type
= nir_type_int
; break;
2671 case GLSL_TYPE_UINT
: dest_type
= nir_type_uint
; break;
2672 case GLSL_TYPE_BOOL
: dest_type
= nir_type_bool
; break;
2674 vtn_fail("Invalid base type for sampler result");
2678 instr
->dest_type
= dest_type
;
2680 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2681 nir_tex_instr_dest_size(instr
), 32, NULL
);
2683 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2684 nir_tex_instr_dest_size(instr
));
2686 if (gather_offsets
) {
2687 vtn_fail_if(gather_offsets
->type
->base_type
!= vtn_base_type_array
||
2688 gather_offsets
->type
->length
!= 4,
2689 "ConstOffsets must be an array of size four of vectors "
2690 "of two integer components");
2692 struct vtn_type
*vec_type
= gather_offsets
->type
->array_element
;
2693 vtn_fail_if(vec_type
->base_type
!= vtn_base_type_vector
||
2694 vec_type
->length
!= 2 ||
2695 !glsl_type_is_integer(vec_type
->type
),
2696 "ConstOffsets must be an array of size four of vectors "
2697 "of two integer components");
2699 unsigned bit_size
= glsl_get_bit_size(vec_type
->type
);
2700 for (uint32_t i
= 0; i
< 4; i
++) {
2701 const nir_const_value
*cvec
=
2702 gather_offsets
->constant
->elements
[i
]->values
;
2703 for (uint32_t j
= 0; j
< 2; j
++) {
2705 case 8: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i8
; break;
2706 case 16: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i16
; break;
2707 case 32: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i32
; break;
2708 case 64: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i64
; break;
2710 vtn_fail("Unsupported bit size: %u", bit_size
);
2716 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2718 vtn_push_nir_ssa(b
, w
[2], &instr
->dest
.ssa
);
2722 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2723 const uint32_t *w
, nir_src
*src
)
2726 case SpvOpAtomicIIncrement
:
2727 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2730 case SpvOpAtomicIDecrement
:
2731 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2734 case SpvOpAtomicISub
:
2736 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_get_nir_ssa(b
, w
[6])));
2739 case SpvOpAtomicCompareExchange
:
2740 case SpvOpAtomicCompareExchangeWeak
:
2741 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[8]));
2742 src
[1] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[7]));
2745 case SpvOpAtomicExchange
:
2746 case SpvOpAtomicIAdd
:
2747 case SpvOpAtomicSMin
:
2748 case SpvOpAtomicUMin
:
2749 case SpvOpAtomicSMax
:
2750 case SpvOpAtomicUMax
:
2751 case SpvOpAtomicAnd
:
2753 case SpvOpAtomicXor
:
2754 case SpvOpAtomicFAddEXT
:
2755 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[6]));
2759 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
2763 static nir_ssa_def
*
2764 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2766 nir_ssa_def
*coord
= vtn_get_nir_ssa(b
, value
);
2768 /* The image_load_store intrinsics assume a 4-dim coordinate */
2769 unsigned swizzle
[4];
2770 for (unsigned i
= 0; i
< 4; i
++)
2771 swizzle
[i
] = MIN2(i
, coord
->num_components
- 1);
2773 return nir_swizzle(&b
->nb
, coord
, swizzle
, 4);
2776 static nir_ssa_def
*
2777 expand_to_vec4(nir_builder
*b
, nir_ssa_def
*value
)
2779 if (value
->num_components
== 4)
2783 for (unsigned i
= 0; i
< 4; i
++)
2784 swiz
[i
] = i
< value
->num_components
? i
: 0;
2785 return nir_swizzle(b
, value
, swiz
, 4);
2789 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2790 const uint32_t *w
, unsigned count
)
2792 /* Just get this one out of the way */
2793 if (opcode
== SpvOpImageTexelPointer
) {
2794 struct vtn_value
*val
=
2795 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2796 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2798 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2799 val
->image
->coord
= get_image_coord(b
, w
[4]);
2800 val
->image
->sample
= vtn_get_nir_ssa(b
, w
[5]);
2801 val
->image
->lod
= nir_imm_int(&b
->nb
, 0);
2805 struct vtn_image_pointer image
;
2806 SpvScope scope
= SpvScopeInvocation
;
2807 SpvMemorySemanticsMask semantics
= 0;
2809 struct vtn_value
*res_val
;
2811 case SpvOpAtomicExchange
:
2812 case SpvOpAtomicCompareExchange
:
2813 case SpvOpAtomicCompareExchangeWeak
:
2814 case SpvOpAtomicIIncrement
:
2815 case SpvOpAtomicIDecrement
:
2816 case SpvOpAtomicIAdd
:
2817 case SpvOpAtomicISub
:
2818 case SpvOpAtomicLoad
:
2819 case SpvOpAtomicSMin
:
2820 case SpvOpAtomicUMin
:
2821 case SpvOpAtomicSMax
:
2822 case SpvOpAtomicUMax
:
2823 case SpvOpAtomicAnd
:
2825 case SpvOpAtomicXor
:
2826 case SpvOpAtomicFAddEXT
:
2827 res_val
= vtn_value(b
, w
[3], vtn_value_type_image_pointer
);
2828 image
= *res_val
->image
;
2829 scope
= vtn_constant_uint(b
, w
[4]);
2830 semantics
= vtn_constant_uint(b
, w
[5]);
2833 case SpvOpAtomicStore
:
2834 res_val
= vtn_value(b
, w
[1], vtn_value_type_image_pointer
);
2835 image
= *res_val
->image
;
2836 scope
= vtn_constant_uint(b
, w
[2]);
2837 semantics
= vtn_constant_uint(b
, w
[3]);
2840 case SpvOpImageQuerySize
:
2841 res_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2842 image
.image
= res_val
->pointer
;
2844 image
.sample
= NULL
;
2848 case SpvOpImageRead
: {
2849 res_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2850 image
.image
= res_val
->pointer
;
2851 image
.coord
= get_image_coord(b
, w
[4]);
2853 const SpvImageOperandsMask operands
=
2854 count
> 5 ? w
[5] : SpvImageOperandsMaskNone
;
2856 if (operands
& SpvImageOperandsSampleMask
) {
2857 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2858 SpvImageOperandsSampleMask
);
2859 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
2861 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2864 if (operands
& SpvImageOperandsMakeTexelVisibleMask
) {
2865 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2866 "MakeTexelVisible requires NonPrivateTexel to also be set.");
2867 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2868 SpvImageOperandsMakeTexelVisibleMask
);
2869 semantics
= SpvMemorySemanticsMakeVisibleMask
;
2870 scope
= vtn_constant_uint(b
, w
[arg
]);
2873 if (operands
& SpvImageOperandsLodMask
) {
2874 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2875 SpvImageOperandsLodMask
);
2876 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
2878 image
.lod
= nir_imm_int(&b
->nb
, 0);
2881 /* TODO: Volatile. */
2886 case SpvOpImageWrite
: {
2887 res_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2888 image
.image
= res_val
->pointer
;
2889 image
.coord
= get_image_coord(b
, w
[2]);
2893 const SpvImageOperandsMask operands
=
2894 count
> 4 ? w
[4] : SpvImageOperandsMaskNone
;
2896 if (operands
& SpvImageOperandsSampleMask
) {
2897 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2898 SpvImageOperandsSampleMask
);
2899 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
2901 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2904 if (operands
& SpvImageOperandsMakeTexelAvailableMask
) {
2905 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2906 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
2907 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2908 SpvImageOperandsMakeTexelAvailableMask
);
2909 semantics
= SpvMemorySemanticsMakeAvailableMask
;
2910 scope
= vtn_constant_uint(b
, w
[arg
]);
2913 if (operands
& SpvImageOperandsLodMask
) {
2914 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2915 SpvImageOperandsLodMask
);
2916 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
2918 image
.lod
= nir_imm_int(&b
->nb
, 0);
2921 /* TODO: Volatile. */
2927 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2930 nir_intrinsic_op op
;
2932 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
2933 OP(ImageQuerySize
, size
)
2935 OP(ImageWrite
, store
)
2936 OP(AtomicLoad
, load
)
2937 OP(AtomicStore
, store
)
2938 OP(AtomicExchange
, atomic_exchange
)
2939 OP(AtomicCompareExchange
, atomic_comp_swap
)
2940 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
2941 OP(AtomicIIncrement
, atomic_add
)
2942 OP(AtomicIDecrement
, atomic_add
)
2943 OP(AtomicIAdd
, atomic_add
)
2944 OP(AtomicISub
, atomic_add
)
2945 OP(AtomicSMin
, atomic_imin
)
2946 OP(AtomicUMin
, atomic_umin
)
2947 OP(AtomicSMax
, atomic_imax
)
2948 OP(AtomicUMax
, atomic_umax
)
2949 OP(AtomicAnd
, atomic_and
)
2950 OP(AtomicOr
, atomic_or
)
2951 OP(AtomicXor
, atomic_xor
)
2952 OP(AtomicFAddEXT
, atomic_fadd
)
2955 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2958 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2960 nir_deref_instr
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2961 intrin
->src
[0] = nir_src_for_ssa(&image_deref
->dest
.ssa
);
2963 /* ImageQuerySize doesn't take any extra parameters */
2964 if (opcode
!= SpvOpImageQuerySize
) {
2965 /* The image coordinate is always 4 components but we may not have that
2966 * many. Swizzle to compensate.
2968 intrin
->src
[1] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, image
.coord
));
2969 intrin
->src
[2] = nir_src_for_ssa(image
.sample
);
2972 /* The Vulkan spec says:
2974 * "If an instruction loads from or stores to a resource (including
2975 * atomics and image instructions) and the resource descriptor being
2976 * accessed is not dynamically uniform, then the operand corresponding
2977 * to that resource (e.g. the pointer or sampled image operand) must be
2978 * decorated with NonUniform."
2980 * It's very careful to specify that the exact operand must be decorated
2981 * NonUniform. The SPIR-V parser is not expected to chase through long
2982 * chains to find the NonUniform decoration. It's either right there or we
2983 * can assume it doesn't exist.
2985 enum gl_access_qualifier access
= 0;
2986 vtn_foreach_decoration(b
, res_val
, non_uniform_decoration_cb
, &access
);
2987 nir_intrinsic_set_access(intrin
, access
);
2990 case SpvOpAtomicLoad
:
2991 case SpvOpImageQuerySize
:
2992 case SpvOpImageRead
:
2993 if (opcode
== SpvOpImageRead
|| opcode
== SpvOpAtomicLoad
) {
2994 /* Only OpImageRead can support a lod parameter if
2995 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
2996 * intrinsics definition for atomics requires us to set it for
2999 intrin
->src
[3] = nir_src_for_ssa(image
.lod
);
3002 case SpvOpAtomicStore
:
3003 case SpvOpImageWrite
: {
3004 const uint32_t value_id
= opcode
== SpvOpAtomicStore
? w
[4] : w
[3];
3005 nir_ssa_def
*value
= vtn_get_nir_ssa(b
, value_id
);
3006 /* nir_intrinsic_image_deref_store always takes a vec4 value */
3007 assert(op
== nir_intrinsic_image_deref_store
);
3008 intrin
->num_components
= 4;
3009 intrin
->src
[3] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, value
));
3010 /* Only OpImageWrite can support a lod parameter if
3011 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
3012 * intrinsics definition for atomics requires us to set it for
3015 intrin
->src
[4] = nir_src_for_ssa(image
.lod
);
3019 case SpvOpAtomicCompareExchange
:
3020 case SpvOpAtomicCompareExchangeWeak
:
3021 case SpvOpAtomicIIncrement
:
3022 case SpvOpAtomicIDecrement
:
3023 case SpvOpAtomicExchange
:
3024 case SpvOpAtomicIAdd
:
3025 case SpvOpAtomicISub
:
3026 case SpvOpAtomicSMin
:
3027 case SpvOpAtomicUMin
:
3028 case SpvOpAtomicSMax
:
3029 case SpvOpAtomicUMax
:
3030 case SpvOpAtomicAnd
:
3032 case SpvOpAtomicXor
:
3033 case SpvOpAtomicFAddEXT
:
3034 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[3]);
3038 vtn_fail_with_opcode("Invalid image opcode", opcode
);
3041 /* Image operations implicitly have the Image storage memory semantics. */
3042 semantics
|= SpvMemorySemanticsImageMemoryMask
;
3044 SpvMemorySemanticsMask before_semantics
;
3045 SpvMemorySemanticsMask after_semantics
;
3046 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3048 if (before_semantics
)
3049 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3051 if (opcode
!= SpvOpImageWrite
&& opcode
!= SpvOpAtomicStore
) {
3052 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3054 unsigned dest_components
= glsl_get_vector_elements(type
->type
);
3055 if (nir_intrinsic_infos
[op
].dest_components
== 0)
3056 intrin
->num_components
= dest_components
;
3058 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
3059 nir_intrinsic_dest_components(intrin
), 32, NULL
);
3061 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3063 nir_ssa_def
*result
= &intrin
->dest
.ssa
;
3064 if (nir_intrinsic_dest_components(intrin
) != dest_components
)
3065 result
= nir_channels(&b
->nb
, result
, (1 << dest_components
) - 1);
3067 vtn_push_nir_ssa(b
, w
[2], result
);
3069 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3072 if (after_semantics
)
3073 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3076 static nir_intrinsic_op
3077 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3080 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
3081 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
3082 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
3083 OP(AtomicExchange
, atomic_exchange
)
3084 OP(AtomicCompareExchange
, atomic_comp_swap
)
3085 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3086 OP(AtomicIIncrement
, atomic_add
)
3087 OP(AtomicIDecrement
, atomic_add
)
3088 OP(AtomicIAdd
, atomic_add
)
3089 OP(AtomicISub
, atomic_add
)
3090 OP(AtomicSMin
, atomic_imin
)
3091 OP(AtomicUMin
, atomic_umin
)
3092 OP(AtomicSMax
, atomic_imax
)
3093 OP(AtomicUMax
, atomic_umax
)
3094 OP(AtomicAnd
, atomic_and
)
3095 OP(AtomicOr
, atomic_or
)
3096 OP(AtomicXor
, atomic_xor
)
3097 OP(AtomicFAddEXT
, atomic_fadd
)
3100 vtn_fail_with_opcode("Invalid SSBO atomic", opcode
);
3104 static nir_intrinsic_op
3105 get_uniform_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3108 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
3109 OP(AtomicLoad
, read_deref
)
3110 OP(AtomicExchange
, exchange
)
3111 OP(AtomicCompareExchange
, comp_swap
)
3112 OP(AtomicCompareExchangeWeak
, comp_swap
)
3113 OP(AtomicIIncrement
, inc_deref
)
3114 OP(AtomicIDecrement
, post_dec_deref
)
3115 OP(AtomicIAdd
, add_deref
)
3116 OP(AtomicISub
, add_deref
)
3117 OP(AtomicUMin
, min_deref
)
3118 OP(AtomicUMax
, max_deref
)
3119 OP(AtomicAnd
, and_deref
)
3120 OP(AtomicOr
, or_deref
)
3121 OP(AtomicXor
, xor_deref
)
3124 /* We left the following out: AtomicStore, AtomicSMin and
3125 * AtomicSmax. Right now there are not nir intrinsics for them. At this
3126 * moment Atomic Counter support is needed for ARB_spirv support, so is
3127 * only need to support GLSL Atomic Counters that are uints and don't
3128 * allow direct storage.
3130 vtn_fail("Invalid uniform atomic");
3134 static nir_intrinsic_op
3135 get_deref_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3138 case SpvOpAtomicLoad
: return nir_intrinsic_load_deref
;
3139 case SpvOpAtomicStore
: return nir_intrinsic_store_deref
;
3140 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
3141 OP(AtomicExchange
, atomic_exchange
)
3142 OP(AtomicCompareExchange
, atomic_comp_swap
)
3143 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3144 OP(AtomicIIncrement
, atomic_add
)
3145 OP(AtomicIDecrement
, atomic_add
)
3146 OP(AtomicIAdd
, atomic_add
)
3147 OP(AtomicISub
, atomic_add
)
3148 OP(AtomicSMin
, atomic_imin
)
3149 OP(AtomicUMin
, atomic_umin
)
3150 OP(AtomicSMax
, atomic_imax
)
3151 OP(AtomicUMax
, atomic_umax
)
3152 OP(AtomicAnd
, atomic_and
)
3153 OP(AtomicOr
, atomic_or
)
3154 OP(AtomicXor
, atomic_xor
)
3155 OP(AtomicFAddEXT
, atomic_fadd
)
3158 vtn_fail_with_opcode("Invalid shared atomic", opcode
);
3163 * Handles shared atomics, ssbo atomics and atomic counters.
3166 vtn_handle_atomics(struct vtn_builder
*b
, SpvOp opcode
,
3167 const uint32_t *w
, UNUSED
unsigned count
)
3169 struct vtn_pointer
*ptr
;
3170 nir_intrinsic_instr
*atomic
;
3172 SpvScope scope
= SpvScopeInvocation
;
3173 SpvMemorySemanticsMask semantics
= 0;
3176 case SpvOpAtomicLoad
:
3177 case SpvOpAtomicExchange
:
3178 case SpvOpAtomicCompareExchange
:
3179 case SpvOpAtomicCompareExchangeWeak
:
3180 case SpvOpAtomicIIncrement
:
3181 case SpvOpAtomicIDecrement
:
3182 case SpvOpAtomicIAdd
:
3183 case SpvOpAtomicISub
:
3184 case SpvOpAtomicSMin
:
3185 case SpvOpAtomicUMin
:
3186 case SpvOpAtomicSMax
:
3187 case SpvOpAtomicUMax
:
3188 case SpvOpAtomicAnd
:
3190 case SpvOpAtomicXor
:
3191 case SpvOpAtomicFAddEXT
:
3192 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3193 scope
= vtn_constant_uint(b
, w
[4]);
3194 semantics
= vtn_constant_uint(b
, w
[5]);
3197 case SpvOpAtomicStore
:
3198 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
3199 scope
= vtn_constant_uint(b
, w
[2]);
3200 semantics
= vtn_constant_uint(b
, w
[3]);
3204 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3207 /* uniform as "atomic counter uniform" */
3208 if (ptr
->mode
== vtn_variable_mode_uniform
) {
3209 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3210 nir_intrinsic_op op
= get_uniform_nir_atomic_op(b
, opcode
);
3211 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3212 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3214 /* SSBO needs to initialize index/offset. In this case we don't need to,
3215 * as that info is already stored on the ptr->var->var nir_variable (see
3216 * vtn_create_variable)
3220 case SpvOpAtomicLoad
:
3221 case SpvOpAtomicExchange
:
3222 case SpvOpAtomicCompareExchange
:
3223 case SpvOpAtomicCompareExchangeWeak
:
3224 case SpvOpAtomicIIncrement
:
3225 case SpvOpAtomicIDecrement
:
3226 case SpvOpAtomicIAdd
:
3227 case SpvOpAtomicISub
:
3228 case SpvOpAtomicSMin
:
3229 case SpvOpAtomicUMin
:
3230 case SpvOpAtomicSMax
:
3231 case SpvOpAtomicUMax
:
3232 case SpvOpAtomicAnd
:
3234 case SpvOpAtomicXor
:
3235 /* Nothing: we don't need to call fill_common_atomic_sources here, as
3236 * atomic counter uniforms doesn't have sources
3241 unreachable("Invalid SPIR-V atomic");
3244 } else if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
3245 nir_ssa_def
*offset
, *index
;
3246 offset
= vtn_pointer_to_offset(b
, ptr
, &index
);
3248 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
3250 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(b
, opcode
);
3251 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3255 case SpvOpAtomicLoad
:
3256 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3257 nir_intrinsic_set_align(atomic
, 4, 0);
3258 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3259 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3260 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3263 case SpvOpAtomicStore
:
3264 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3265 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3266 nir_intrinsic_set_align(atomic
, 4, 0);
3267 atomic
->src
[src
++] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[4]));
3268 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3269 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3270 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3273 case SpvOpAtomicExchange
:
3274 case SpvOpAtomicCompareExchange
:
3275 case SpvOpAtomicCompareExchangeWeak
:
3276 case SpvOpAtomicIIncrement
:
3277 case SpvOpAtomicIDecrement
:
3278 case SpvOpAtomicIAdd
:
3279 case SpvOpAtomicISub
:
3280 case SpvOpAtomicSMin
:
3281 case SpvOpAtomicUMin
:
3282 case SpvOpAtomicSMax
:
3283 case SpvOpAtomicUMax
:
3284 case SpvOpAtomicAnd
:
3286 case SpvOpAtomicXor
:
3287 case SpvOpAtomicFAddEXT
:
3288 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3289 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3290 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3291 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
3295 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3298 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3299 const struct glsl_type
*deref_type
= deref
->type
;
3300 nir_intrinsic_op op
= get_deref_nir_atomic_op(b
, opcode
);
3301 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3302 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3305 case SpvOpAtomicLoad
:
3306 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3309 case SpvOpAtomicStore
:
3310 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3311 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3312 atomic
->src
[1] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[4]));
3315 case SpvOpAtomicExchange
:
3316 case SpvOpAtomicCompareExchange
:
3317 case SpvOpAtomicCompareExchangeWeak
:
3318 case SpvOpAtomicIIncrement
:
3319 case SpvOpAtomicIDecrement
:
3320 case SpvOpAtomicIAdd
:
3321 case SpvOpAtomicISub
:
3322 case SpvOpAtomicSMin
:
3323 case SpvOpAtomicUMin
:
3324 case SpvOpAtomicSMax
:
3325 case SpvOpAtomicUMax
:
3326 case SpvOpAtomicAnd
:
3328 case SpvOpAtomicXor
:
3329 case SpvOpAtomicFAddEXT
:
3330 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[1]);
3334 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3338 /* Atomic ordering operations will implicitly apply to the atomic operation
3339 * storage class, so include that too.
3341 semantics
|= vtn_storage_class_to_memory_semantics(ptr
->ptr_type
->storage_class
);
3343 SpvMemorySemanticsMask before_semantics
;
3344 SpvMemorySemanticsMask after_semantics
;
3345 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3347 if (before_semantics
)
3348 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3350 if (opcode
!= SpvOpAtomicStore
) {
3351 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3353 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
3354 glsl_get_vector_elements(type
->type
),
3355 glsl_get_bit_size(type
->type
), NULL
);
3357 vtn_push_nir_ssa(b
, w
[2], &atomic
->dest
.ssa
);
3360 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
3362 if (after_semantics
)
3363 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3366 static nir_alu_instr
*
3367 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
3369 nir_op op
= nir_op_vec(num_components
);
3370 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
3371 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
3373 vec
->dest
.write_mask
= (1 << num_components
) - 1;
3378 struct vtn_ssa_value
*
3379 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
3381 if (src
->transposed
)
3382 return src
->transposed
;
3384 struct vtn_ssa_value
*dest
=
3385 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
3387 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
3388 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
3389 glsl_get_bit_size(src
->type
));
3390 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3391 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
3392 vec
->src
[0].swizzle
[0] = i
;
3394 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
3395 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
3396 vec
->src
[j
].swizzle
[0] = i
;
3399 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3400 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
3403 dest
->transposed
= src
;
3408 static nir_ssa_def
*
3409 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
3410 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
3411 const uint32_t *indices
)
3413 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
3415 for (unsigned i
= 0; i
< num_components
; i
++) {
3416 uint32_t index
= indices
[i
];
3417 if (index
== 0xffffffff) {
3419 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
3420 } else if (index
< src0
->num_components
) {
3421 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
3422 vec
->src
[i
].swizzle
[0] = index
;
3424 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
3425 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
3429 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3431 return &vec
->dest
.dest
.ssa
;
3435 * Concatentates a number of vectors/scalars together to produce a vector
3437 static nir_ssa_def
*
3438 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
3439 unsigned num_srcs
, nir_ssa_def
**srcs
)
3441 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
3443 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3445 * "When constructing a vector, there must be at least two Constituent
3448 vtn_assert(num_srcs
>= 2);
3450 unsigned dest_idx
= 0;
3451 for (unsigned i
= 0; i
< num_srcs
; i
++) {
3452 nir_ssa_def
*src
= srcs
[i
];
3453 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
3454 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
3455 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
3456 vec
->src
[dest_idx
].swizzle
[0] = j
;
3461 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3463 * "When constructing a vector, the total number of components in all
3464 * the operands must equal the number of components in Result Type."
3466 vtn_assert(dest_idx
== num_components
);
3468 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3470 return &vec
->dest
.dest
.ssa
;
3473 static struct vtn_ssa_value
*
3474 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
3476 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
3477 dest
->type
= src
->type
;
3479 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3480 dest
->def
= src
->def
;
3482 unsigned elems
= glsl_get_length(src
->type
);
3484 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
3485 for (unsigned i
= 0; i
< elems
; i
++)
3486 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
3492 static struct vtn_ssa_value
*
3493 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3494 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
3495 unsigned num_indices
)
3497 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
3499 struct vtn_ssa_value
*cur
= dest
;
3501 for (i
= 0; i
< num_indices
- 1; i
++) {
3502 /* If we got a vector here, that means the next index will be trying to
3503 * dereference a scalar.
3505 vtn_fail_if(glsl_type_is_vector_or_scalar(cur
->type
),
3506 "OpCompositeInsert has too many indices.");
3507 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3508 "All indices in an OpCompositeInsert must be in-bounds");
3509 cur
= cur
->elems
[indices
[i
]];
3512 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3513 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3514 "All indices in an OpCompositeInsert must be in-bounds");
3516 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3517 * the component granularity. In that case, the last index will be
3518 * the index to insert the scalar into the vector.
3521 cur
->def
= nir_vector_insert_imm(&b
->nb
, cur
->def
, insert
->def
, indices
[i
]);
3523 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3524 "All indices in an OpCompositeInsert must be in-bounds");
3525 cur
->elems
[indices
[i
]] = insert
;
3531 static struct vtn_ssa_value
*
3532 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3533 const uint32_t *indices
, unsigned num_indices
)
3535 struct vtn_ssa_value
*cur
= src
;
3536 for (unsigned i
= 0; i
< num_indices
; i
++) {
3537 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3538 vtn_assert(i
== num_indices
- 1);
3539 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3540 "All indices in an OpCompositeExtract must be in-bounds");
3542 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3543 * the component granularity. The last index will be the index of the
3544 * vector to extract.
3547 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
3548 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
3549 ret
->def
= nir_channel(&b
->nb
, cur
->def
, indices
[i
]);
3552 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3553 "All indices in an OpCompositeExtract must be in-bounds");
3554 cur
= cur
->elems
[indices
[i
]];
3562 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
3563 const uint32_t *w
, unsigned count
)
3565 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3566 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
3569 case SpvOpVectorExtractDynamic
:
3570 ssa
->def
= nir_vector_extract(&b
->nb
, vtn_get_nir_ssa(b
, w
[3]),
3571 vtn_get_nir_ssa(b
, w
[4]));
3574 case SpvOpVectorInsertDynamic
:
3575 ssa
->def
= nir_vector_insert(&b
->nb
, vtn_get_nir_ssa(b
, w
[3]),
3576 vtn_get_nir_ssa(b
, w
[4]),
3577 vtn_get_nir_ssa(b
, w
[5]));
3580 case SpvOpVectorShuffle
:
3581 ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
->type
),
3582 vtn_get_nir_ssa(b
, w
[3]),
3583 vtn_get_nir_ssa(b
, w
[4]),
3587 case SpvOpCompositeConstruct
: {
3588 unsigned elems
= count
- 3;
3590 if (glsl_type_is_vector_or_scalar(type
->type
)) {
3591 nir_ssa_def
*srcs
[NIR_MAX_VEC_COMPONENTS
];
3592 for (unsigned i
= 0; i
< elems
; i
++)
3593 srcs
[i
] = vtn_get_nir_ssa(b
, w
[3 + i
]);
3595 vtn_vector_construct(b
, glsl_get_vector_elements(type
->type
),
3598 ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
3599 for (unsigned i
= 0; i
< elems
; i
++)
3600 ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
3604 case SpvOpCompositeExtract
:
3605 ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
3609 case SpvOpCompositeInsert
:
3610 ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
3611 vtn_ssa_value(b
, w
[3]),
3615 case SpvOpCopyLogical
:
3616 ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
3618 case SpvOpCopyObject
:
3619 vtn_copy_value(b
, w
[3], w
[2]);
3623 vtn_fail_with_opcode("unknown composite operation", opcode
);
3626 vtn_push_ssa_value(b
, w
[2], ssa
);
3630 vtn_emit_barrier(struct vtn_builder
*b
, nir_intrinsic_op op
)
3632 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3633 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3637 vtn_emit_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
3638 SpvMemorySemanticsMask semantics
)
3640 if (b
->shader
->options
->use_scoped_barrier
) {
3641 vtn_emit_scoped_memory_barrier(b
, scope
, semantics
);
3645 static const SpvMemorySemanticsMask all_memory_semantics
=
3646 SpvMemorySemanticsUniformMemoryMask
|
3647 SpvMemorySemanticsWorkgroupMemoryMask
|
3648 SpvMemorySemanticsAtomicCounterMemoryMask
|
3649 SpvMemorySemanticsImageMemoryMask
|
3650 SpvMemorySemanticsOutputMemoryMask
;
3652 /* If we're not actually doing a memory barrier, bail */
3653 if (!(semantics
& all_memory_semantics
))
3656 /* GL and Vulkan don't have these */
3657 vtn_assert(scope
!= SpvScopeCrossDevice
);
3659 if (scope
== SpvScopeSubgroup
)
3660 return; /* Nothing to do here */
3662 if (scope
== SpvScopeWorkgroup
) {
3663 vtn_emit_barrier(b
, nir_intrinsic_group_memory_barrier
);
3667 /* There's only two scopes thing left */
3668 vtn_assert(scope
== SpvScopeInvocation
|| scope
== SpvScopeDevice
);
3670 /* Map the GLSL memoryBarrier() construct and any barriers with more than one
3671 * semantic to the corresponding NIR one.
3673 if (util_bitcount(semantics
& all_memory_semantics
) > 1) {
3674 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3675 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
3676 /* GLSL memoryBarrier() (and the corresponding NIR one) doesn't include
3677 * TCS outputs, so we have to emit it's own intrinsic for that. We
3678 * then need to emit another memory_barrier to prevent moving
3679 * non-output operations to before the tcs_patch barrier.
3681 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3682 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3687 /* Issue a more specific barrier */
3688 switch (semantics
& all_memory_semantics
) {
3689 case SpvMemorySemanticsUniformMemoryMask
:
3690 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_buffer
);
3692 case SpvMemorySemanticsWorkgroupMemoryMask
:
3693 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_shared
);
3695 case SpvMemorySemanticsAtomicCounterMemoryMask
:
3696 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_atomic_counter
);
3698 case SpvMemorySemanticsImageMemoryMask
:
3699 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_image
);
3701 case SpvMemorySemanticsOutputMemoryMask
:
3702 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
3703 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3711 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
3712 const uint32_t *w
, UNUSED
unsigned count
)
3715 case SpvOpEmitVertex
:
3716 case SpvOpEmitStreamVertex
:
3717 case SpvOpEndPrimitive
:
3718 case SpvOpEndStreamPrimitive
: {
3719 nir_intrinsic_op intrinsic_op
;
3721 case SpvOpEmitVertex
:
3722 case SpvOpEmitStreamVertex
:
3723 intrinsic_op
= nir_intrinsic_emit_vertex
;
3725 case SpvOpEndPrimitive
:
3726 case SpvOpEndStreamPrimitive
:
3727 intrinsic_op
= nir_intrinsic_end_primitive
;
3730 unreachable("Invalid opcode");
3733 nir_intrinsic_instr
*intrin
=
3734 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
3737 case SpvOpEmitStreamVertex
:
3738 case SpvOpEndStreamPrimitive
: {
3739 unsigned stream
= vtn_constant_uint(b
, w
[1]);
3740 nir_intrinsic_set_stream_id(intrin
, stream
);
3748 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3752 case SpvOpMemoryBarrier
: {
3753 SpvScope scope
= vtn_constant_uint(b
, w
[1]);
3754 SpvMemorySemanticsMask semantics
= vtn_constant_uint(b
, w
[2]);
3755 vtn_emit_memory_barrier(b
, scope
, semantics
);
3759 case SpvOpControlBarrier
: {
3760 SpvScope execution_scope
= vtn_constant_uint(b
, w
[1]);
3761 SpvScope memory_scope
= vtn_constant_uint(b
, w
[2]);
3762 SpvMemorySemanticsMask memory_semantics
= vtn_constant_uint(b
, w
[3]);
3764 /* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
3765 * memory semantics of None for GLSL barrier().
3766 * And before that, prior to c3f1cdfa, emitted the OpControlBarrier with
3767 * Device instead of Workgroup for execution scope.
3769 if (b
->wa_glslang_cs_barrier
&&
3770 b
->nb
.shader
->info
.stage
== MESA_SHADER_COMPUTE
&&
3771 (execution_scope
== SpvScopeWorkgroup
||
3772 execution_scope
== SpvScopeDevice
) &&
3773 memory_semantics
== SpvMemorySemanticsMaskNone
) {
3774 execution_scope
= SpvScopeWorkgroup
;
3775 memory_scope
= SpvScopeWorkgroup
;
3776 memory_semantics
= SpvMemorySemanticsAcquireReleaseMask
|
3777 SpvMemorySemanticsWorkgroupMemoryMask
;
3780 /* From the SPIR-V spec:
3782 * "When used with the TessellationControl execution model, it also
3783 * implicitly synchronizes the Output Storage Class: Writes to Output
3784 * variables performed by any invocation executed prior to a
3785 * OpControlBarrier will be visible to any other invocation after
3786 * return from that OpControlBarrier."
3788 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3789 memory_semantics
&= ~(SpvMemorySemanticsAcquireMask
|
3790 SpvMemorySemanticsReleaseMask
|
3791 SpvMemorySemanticsAcquireReleaseMask
|
3792 SpvMemorySemanticsSequentiallyConsistentMask
);
3793 memory_semantics
|= SpvMemorySemanticsAcquireReleaseMask
|
3794 SpvMemorySemanticsOutputMemoryMask
;
3797 if (b
->shader
->options
->use_scoped_barrier
) {
3798 vtn_emit_scoped_control_barrier(b
, execution_scope
, memory_scope
,
3801 vtn_emit_memory_barrier(b
, memory_scope
, memory_semantics
);
3803 if (execution_scope
== SpvScopeWorkgroup
)
3804 vtn_emit_barrier(b
, nir_intrinsic_control_barrier
);
3810 unreachable("unknown barrier instruction");
3815 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
3816 SpvExecutionMode mode
)
3819 case SpvExecutionModeInputPoints
:
3820 case SpvExecutionModeOutputPoints
:
3821 return 0; /* GL_POINTS */
3822 case SpvExecutionModeInputLines
:
3823 return 1; /* GL_LINES */
3824 case SpvExecutionModeInputLinesAdjacency
:
3825 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
3826 case SpvExecutionModeTriangles
:
3827 return 4; /* GL_TRIANGLES */
3828 case SpvExecutionModeInputTrianglesAdjacency
:
3829 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
3830 case SpvExecutionModeQuads
:
3831 return 7; /* GL_QUADS */
3832 case SpvExecutionModeIsolines
:
3833 return 0x8E7A; /* GL_ISOLINES */
3834 case SpvExecutionModeOutputLineStrip
:
3835 return 3; /* GL_LINE_STRIP */
3836 case SpvExecutionModeOutputTriangleStrip
:
3837 return 5; /* GL_TRIANGLE_STRIP */
3839 vtn_fail("Invalid primitive type: %s (%u)",
3840 spirv_executionmode_to_string(mode
), mode
);
3845 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
3846 SpvExecutionMode mode
)
3849 case SpvExecutionModeInputPoints
:
3851 case SpvExecutionModeInputLines
:
3853 case SpvExecutionModeInputLinesAdjacency
:
3855 case SpvExecutionModeTriangles
:
3857 case SpvExecutionModeInputTrianglesAdjacency
:
3860 vtn_fail("Invalid GS input mode: %s (%u)",
3861 spirv_executionmode_to_string(mode
), mode
);
3865 static gl_shader_stage
3866 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
3869 case SpvExecutionModelVertex
:
3870 return MESA_SHADER_VERTEX
;
3871 case SpvExecutionModelTessellationControl
:
3872 return MESA_SHADER_TESS_CTRL
;
3873 case SpvExecutionModelTessellationEvaluation
:
3874 return MESA_SHADER_TESS_EVAL
;
3875 case SpvExecutionModelGeometry
:
3876 return MESA_SHADER_GEOMETRY
;
3877 case SpvExecutionModelFragment
:
3878 return MESA_SHADER_FRAGMENT
;
3879 case SpvExecutionModelGLCompute
:
3880 return MESA_SHADER_COMPUTE
;
3881 case SpvExecutionModelKernel
:
3882 return MESA_SHADER_KERNEL
;
3884 vtn_fail("Unsupported execution model: %s (%u)",
3885 spirv_executionmodel_to_string(model
), model
);
3889 #define spv_check_supported(name, cap) do { \
3890 if (!(b->options && b->options->caps.name)) \
3891 vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
3892 spirv_capability_to_string(cap), cap); \
3897 vtn_handle_entry_point(struct vtn_builder
*b
, const uint32_t *w
,
3900 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
3901 /* Let this be a name label regardless */
3902 unsigned name_words
;
3903 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
3905 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
3906 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
3909 vtn_assert(b
->entry_point
== NULL
);
3910 b
->entry_point
= entry_point
;
3914 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3915 const uint32_t *w
, unsigned count
)
3922 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
3923 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
3924 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
3925 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
3926 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
3927 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
3930 uint32_t version
= w
[2];
3933 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
3935 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
3939 case SpvOpSourceExtension
:
3940 case SpvOpSourceContinued
:
3941 case SpvOpExtension
:
3942 case SpvOpModuleProcessed
:
3943 /* Unhandled, but these are for debug so that's ok. */
3946 case SpvOpCapability
: {
3947 SpvCapability cap
= w
[1];
3949 case SpvCapabilityMatrix
:
3950 case SpvCapabilityShader
:
3951 case SpvCapabilityGeometry
:
3952 case SpvCapabilityGeometryPointSize
:
3953 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
3954 case SpvCapabilitySampledImageArrayDynamicIndexing
:
3955 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
3956 case SpvCapabilityStorageImageArrayDynamicIndexing
:
3957 case SpvCapabilityImageRect
:
3958 case SpvCapabilitySampledRect
:
3959 case SpvCapabilitySampled1D
:
3960 case SpvCapabilityImage1D
:
3961 case SpvCapabilitySampledCubeArray
:
3962 case SpvCapabilityImageCubeArray
:
3963 case SpvCapabilitySampledBuffer
:
3964 case SpvCapabilityImageBuffer
:
3965 case SpvCapabilityImageQuery
:
3966 case SpvCapabilityDerivativeControl
:
3967 case SpvCapabilityInterpolationFunction
:
3968 case SpvCapabilityMultiViewport
:
3969 case SpvCapabilitySampleRateShading
:
3970 case SpvCapabilityClipDistance
:
3971 case SpvCapabilityCullDistance
:
3972 case SpvCapabilityInputAttachment
:
3973 case SpvCapabilityImageGatherExtended
:
3974 case SpvCapabilityStorageImageExtendedFormats
:
3975 case SpvCapabilityVector16
:
3978 case SpvCapabilityLinkage
:
3979 case SpvCapabilityFloat16Buffer
:
3980 case SpvCapabilitySparseResidency
:
3981 vtn_warn("Unsupported SPIR-V capability: %s",
3982 spirv_capability_to_string(cap
));
3985 case SpvCapabilityMinLod
:
3986 spv_check_supported(min_lod
, cap
);
3989 case SpvCapabilityAtomicStorage
:
3990 spv_check_supported(atomic_storage
, cap
);
3993 case SpvCapabilityFloat64
:
3994 spv_check_supported(float64
, cap
);
3996 case SpvCapabilityInt64
:
3997 spv_check_supported(int64
, cap
);
3999 case SpvCapabilityInt16
:
4000 spv_check_supported(int16
, cap
);
4002 case SpvCapabilityInt8
:
4003 spv_check_supported(int8
, cap
);
4006 case SpvCapabilityTransformFeedback
:
4007 spv_check_supported(transform_feedback
, cap
);
4010 case SpvCapabilityGeometryStreams
:
4011 spv_check_supported(geometry_streams
, cap
);
4014 case SpvCapabilityInt64Atomics
:
4015 spv_check_supported(int64_atomics
, cap
);
4018 case SpvCapabilityStorageImageMultisample
:
4019 spv_check_supported(storage_image_ms
, cap
);
4022 case SpvCapabilityAddresses
:
4023 spv_check_supported(address
, cap
);
4026 case SpvCapabilityKernel
:
4027 spv_check_supported(kernel
, cap
);
4030 case SpvCapabilityImageBasic
:
4031 case SpvCapabilityImageReadWrite
:
4032 case SpvCapabilityImageMipmap
:
4033 case SpvCapabilityPipes
:
4034 case SpvCapabilityDeviceEnqueue
:
4035 case SpvCapabilityLiteralSampler
:
4036 case SpvCapabilityGenericPointer
:
4037 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
4038 spirv_capability_to_string(cap
));
4041 case SpvCapabilityImageMSArray
:
4042 spv_check_supported(image_ms_array
, cap
);
4045 case SpvCapabilityTessellation
:
4046 case SpvCapabilityTessellationPointSize
:
4047 spv_check_supported(tessellation
, cap
);
4050 case SpvCapabilityDrawParameters
:
4051 spv_check_supported(draw_parameters
, cap
);
4054 case SpvCapabilityStorageImageReadWithoutFormat
:
4055 spv_check_supported(image_read_without_format
, cap
);
4058 case SpvCapabilityStorageImageWriteWithoutFormat
:
4059 spv_check_supported(image_write_without_format
, cap
);
4062 case SpvCapabilityDeviceGroup
:
4063 spv_check_supported(device_group
, cap
);
4066 case SpvCapabilityMultiView
:
4067 spv_check_supported(multiview
, cap
);
4070 case SpvCapabilityGroupNonUniform
:
4071 spv_check_supported(subgroup_basic
, cap
);
4074 case SpvCapabilitySubgroupVoteKHR
:
4075 case SpvCapabilityGroupNonUniformVote
:
4076 spv_check_supported(subgroup_vote
, cap
);
4079 case SpvCapabilitySubgroupBallotKHR
:
4080 case SpvCapabilityGroupNonUniformBallot
:
4081 spv_check_supported(subgroup_ballot
, cap
);
4084 case SpvCapabilityGroupNonUniformShuffle
:
4085 case SpvCapabilityGroupNonUniformShuffleRelative
:
4086 spv_check_supported(subgroup_shuffle
, cap
);
4089 case SpvCapabilityGroupNonUniformQuad
:
4090 spv_check_supported(subgroup_quad
, cap
);
4093 case SpvCapabilityGroupNonUniformArithmetic
:
4094 case SpvCapabilityGroupNonUniformClustered
:
4095 spv_check_supported(subgroup_arithmetic
, cap
);
4098 case SpvCapabilityGroups
:
4099 spv_check_supported(amd_shader_ballot
, cap
);
4102 case SpvCapabilityVariablePointersStorageBuffer
:
4103 case SpvCapabilityVariablePointers
:
4104 spv_check_supported(variable_pointers
, cap
);
4105 b
->variable_pointers
= true;
4108 case SpvCapabilityStorageUniformBufferBlock16
:
4109 case SpvCapabilityStorageUniform16
:
4110 case SpvCapabilityStoragePushConstant16
:
4111 case SpvCapabilityStorageInputOutput16
:
4112 spv_check_supported(storage_16bit
, cap
);
4115 case SpvCapabilityShaderLayer
:
4116 case SpvCapabilityShaderViewportIndex
:
4117 case SpvCapabilityShaderViewportIndexLayerEXT
:
4118 spv_check_supported(shader_viewport_index_layer
, cap
);
4121 case SpvCapabilityStorageBuffer8BitAccess
:
4122 case SpvCapabilityUniformAndStorageBuffer8BitAccess
:
4123 case SpvCapabilityStoragePushConstant8
:
4124 spv_check_supported(storage_8bit
, cap
);
4127 case SpvCapabilityShaderNonUniformEXT
:
4128 spv_check_supported(descriptor_indexing
, cap
);
4131 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT
:
4132 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT
:
4133 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT
:
4134 spv_check_supported(descriptor_array_dynamic_indexing
, cap
);
4137 case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT
:
4138 case SpvCapabilitySampledImageArrayNonUniformIndexingEXT
:
4139 case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT
:
4140 case SpvCapabilityStorageImageArrayNonUniformIndexingEXT
:
4141 case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT
:
4142 case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT
:
4143 case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT
:
4144 spv_check_supported(descriptor_array_non_uniform_indexing
, cap
);
4147 case SpvCapabilityRuntimeDescriptorArrayEXT
:
4148 spv_check_supported(runtime_descriptor_array
, cap
);
4151 case SpvCapabilityStencilExportEXT
:
4152 spv_check_supported(stencil_export
, cap
);
4155 case SpvCapabilitySampleMaskPostDepthCoverage
:
4156 spv_check_supported(post_depth_coverage
, cap
);
4159 case SpvCapabilityDenormFlushToZero
:
4160 case SpvCapabilityDenormPreserve
:
4161 case SpvCapabilitySignedZeroInfNanPreserve
:
4162 case SpvCapabilityRoundingModeRTE
:
4163 case SpvCapabilityRoundingModeRTZ
:
4164 spv_check_supported(float_controls
, cap
);
4167 case SpvCapabilityPhysicalStorageBufferAddresses
:
4168 spv_check_supported(physical_storage_buffer_address
, cap
);
4171 case SpvCapabilityComputeDerivativeGroupQuadsNV
:
4172 case SpvCapabilityComputeDerivativeGroupLinearNV
:
4173 spv_check_supported(derivative_group
, cap
);
4176 case SpvCapabilityFloat16
:
4177 spv_check_supported(float16
, cap
);
4180 case SpvCapabilityFragmentShaderSampleInterlockEXT
:
4181 spv_check_supported(fragment_shader_sample_interlock
, cap
);
4184 case SpvCapabilityFragmentShaderPixelInterlockEXT
:
4185 spv_check_supported(fragment_shader_pixel_interlock
, cap
);
4188 case SpvCapabilityDemoteToHelperInvocationEXT
:
4189 spv_check_supported(demote_to_helper_invocation
, cap
);
4192 case SpvCapabilityShaderClockKHR
:
4193 spv_check_supported(shader_clock
, cap
);
4196 case SpvCapabilityVulkanMemoryModel
:
4197 spv_check_supported(vk_memory_model
, cap
);
4200 case SpvCapabilityVulkanMemoryModelDeviceScope
:
4201 spv_check_supported(vk_memory_model_device_scope
, cap
);
4204 case SpvCapabilityImageReadWriteLodAMD
:
4205 spv_check_supported(amd_image_read_write_lod
, cap
);
4208 case SpvCapabilityIntegerFunctions2INTEL
:
4209 spv_check_supported(integer_functions2
, cap
);
4212 case SpvCapabilityFragmentMaskAMD
:
4213 spv_check_supported(amd_fragment_mask
, cap
);
4216 case SpvCapabilityImageGatherBiasLodAMD
:
4217 spv_check_supported(amd_image_gather_bias_lod
, cap
);
4220 case SpvCapabilityAtomicFloat32AddEXT
:
4221 spv_check_supported(float32_atomic_add
, cap
);
4224 case SpvCapabilityAtomicFloat64AddEXT
:
4225 spv_check_supported(float64_atomic_add
, cap
);
4229 vtn_fail("Unhandled capability: %s (%u)",
4230 spirv_capability_to_string(cap
), cap
);
4235 case SpvOpExtInstImport
:
4236 vtn_handle_extension(b
, opcode
, w
, count
);
4239 case SpvOpMemoryModel
:
4241 case SpvAddressingModelPhysical32
:
4242 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4243 "AddressingModelPhysical32 only supported for kernels");
4244 b
->shader
->info
.cs
.ptr_size
= 32;
4245 b
->physical_ptrs
= true;
4246 b
->options
->shared_addr_format
= nir_address_format_32bit_global
;
4247 b
->options
->global_addr_format
= nir_address_format_32bit_global
;
4248 b
->options
->temp_addr_format
= nir_address_format_32bit_global
;
4250 case SpvAddressingModelPhysical64
:
4251 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4252 "AddressingModelPhysical64 only supported for kernels");
4253 b
->shader
->info
.cs
.ptr_size
= 64;
4254 b
->physical_ptrs
= true;
4255 b
->options
->shared_addr_format
= nir_address_format_64bit_global
;
4256 b
->options
->global_addr_format
= nir_address_format_64bit_global
;
4257 b
->options
->temp_addr_format
= nir_address_format_64bit_global
;
4259 case SpvAddressingModelLogical
:
4260 vtn_fail_if(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
,
4261 "AddressingModelLogical only supported for shaders");
4262 b
->physical_ptrs
= false;
4264 case SpvAddressingModelPhysicalStorageBuffer64
:
4265 vtn_fail_if(!b
->options
||
4266 !b
->options
->caps
.physical_storage_buffer_address
,
4267 "AddressingModelPhysicalStorageBuffer64 not supported");
4270 vtn_fail("Unknown addressing model: %s (%u)",
4271 spirv_addressingmodel_to_string(w
[1]), w
[1]);
4275 b
->mem_model
= w
[2];
4277 case SpvMemoryModelSimple
:
4278 case SpvMemoryModelGLSL450
:
4279 case SpvMemoryModelOpenCL
:
4281 case SpvMemoryModelVulkan
:
4282 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
4283 "Vulkan memory model is unsupported by this driver");
4286 vtn_fail("Unsupported memory model: %s",
4287 spirv_memorymodel_to_string(w
[2]));
4292 case SpvOpEntryPoint
:
4293 vtn_handle_entry_point(b
, w
, count
);
4297 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
4298 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4302 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4305 case SpvOpMemberName
:
4309 case SpvOpExecutionMode
:
4310 case SpvOpExecutionModeId
:
4311 case SpvOpDecorationGroup
:
4313 case SpvOpDecorateId
:
4314 case SpvOpMemberDecorate
:
4315 case SpvOpGroupDecorate
:
4316 case SpvOpGroupMemberDecorate
:
4317 case SpvOpDecorateString
:
4318 case SpvOpMemberDecorateString
:
4319 vtn_handle_decoration(b
, opcode
, w
, count
);
4322 case SpvOpExtInst
: {
4323 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4324 if (val
->ext_handler
== vtn_handle_non_semantic_instruction
) {
4325 /* NonSemantic extended instructions are acceptable in preamble. */
4326 vtn_handle_non_semantic_instruction(b
, w
[4], w
, count
);
4329 return false; /* End of preamble. */
4334 return false; /* End of preamble */
4341 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4342 const struct vtn_decoration
*mode
, UNUSED
void *data
)
4344 vtn_assert(b
->entry_point
== entry_point
);
4346 switch(mode
->exec_mode
) {
4347 case SpvExecutionModeOriginUpperLeft
:
4348 case SpvExecutionModeOriginLowerLeft
:
4349 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4350 b
->shader
->info
.fs
.origin_upper_left
=
4351 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
4354 case SpvExecutionModeEarlyFragmentTests
:
4355 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4356 b
->shader
->info
.fs
.early_fragment_tests
= true;
4359 case SpvExecutionModePostDepthCoverage
:
4360 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4361 b
->shader
->info
.fs
.post_depth_coverage
= true;
4364 case SpvExecutionModeInvocations
:
4365 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4366 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->operands
[0]);
4369 case SpvExecutionModeDepthReplacing
:
4370 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4371 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
4373 case SpvExecutionModeDepthGreater
:
4374 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4375 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
4377 case SpvExecutionModeDepthLess
:
4378 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4379 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
4381 case SpvExecutionModeDepthUnchanged
:
4382 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4383 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
4386 case SpvExecutionModeLocalSize
:
4387 vtn_assert(gl_shader_stage_is_compute(b
->shader
->info
.stage
));
4388 b
->shader
->info
.cs
.local_size
[0] = mode
->operands
[0];
4389 b
->shader
->info
.cs
.local_size
[1] = mode
->operands
[1];
4390 b
->shader
->info
.cs
.local_size
[2] = mode
->operands
[2];
4393 case SpvExecutionModeLocalSizeId
:
4394 b
->shader
->info
.cs
.local_size
[0] = vtn_constant_uint(b
, mode
->operands
[0]);
4395 b
->shader
->info
.cs
.local_size
[1] = vtn_constant_uint(b
, mode
->operands
[1]);
4396 b
->shader
->info
.cs
.local_size
[2] = vtn_constant_uint(b
, mode
->operands
[2]);
4399 case SpvExecutionModeLocalSizeHint
:
4400 case SpvExecutionModeLocalSizeHintId
:
4401 break; /* Nothing to do with this */
4403 case SpvExecutionModeOutputVertices
:
4404 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4405 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4406 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->operands
[0];
4408 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4409 b
->shader
->info
.gs
.vertices_out
= mode
->operands
[0];
4413 case SpvExecutionModeInputPoints
:
4414 case SpvExecutionModeInputLines
:
4415 case SpvExecutionModeInputLinesAdjacency
:
4416 case SpvExecutionModeTriangles
:
4417 case SpvExecutionModeInputTrianglesAdjacency
:
4418 case SpvExecutionModeQuads
:
4419 case SpvExecutionModeIsolines
:
4420 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4421 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4422 b
->shader
->info
.tess
.primitive_mode
=
4423 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4425 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4426 b
->shader
->info
.gs
.vertices_in
=
4427 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
4428 b
->shader
->info
.gs
.input_primitive
=
4429 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4433 case SpvExecutionModeOutputPoints
:
4434 case SpvExecutionModeOutputLineStrip
:
4435 case SpvExecutionModeOutputTriangleStrip
:
4436 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4437 b
->shader
->info
.gs
.output_primitive
=
4438 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4441 case SpvExecutionModeSpacingEqual
:
4442 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4443 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4444 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
4446 case SpvExecutionModeSpacingFractionalEven
:
4447 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4448 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4449 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
4451 case SpvExecutionModeSpacingFractionalOdd
:
4452 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4453 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4454 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
4456 case SpvExecutionModeVertexOrderCw
:
4457 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4458 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4459 b
->shader
->info
.tess
.ccw
= false;
4461 case SpvExecutionModeVertexOrderCcw
:
4462 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4463 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4464 b
->shader
->info
.tess
.ccw
= true;
4466 case SpvExecutionModePointMode
:
4467 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4468 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4469 b
->shader
->info
.tess
.point_mode
= true;
4472 case SpvExecutionModePixelCenterInteger
:
4473 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4474 b
->shader
->info
.fs
.pixel_center_integer
= true;
4477 case SpvExecutionModeXfb
:
4478 b
->shader
->info
.has_transform_feedback_varyings
= true;
4481 case SpvExecutionModeVecTypeHint
:
4484 case SpvExecutionModeContractionOff
:
4485 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
4486 vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4487 spirv_executionmode_to_string(mode
->exec_mode
));
4492 case SpvExecutionModeStencilRefReplacingEXT
:
4493 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4496 case SpvExecutionModeDerivativeGroupQuadsNV
:
4497 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4498 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_QUADS
;
4501 case SpvExecutionModeDerivativeGroupLinearNV
:
4502 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4503 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_LINEAR
;
4506 case SpvExecutionModePixelInterlockOrderedEXT
:
4507 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4508 b
->shader
->info
.fs
.pixel_interlock_ordered
= true;
4511 case SpvExecutionModePixelInterlockUnorderedEXT
:
4512 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4513 b
->shader
->info
.fs
.pixel_interlock_unordered
= true;
4516 case SpvExecutionModeSampleInterlockOrderedEXT
:
4517 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4518 b
->shader
->info
.fs
.sample_interlock_ordered
= true;
4521 case SpvExecutionModeSampleInterlockUnorderedEXT
:
4522 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4523 b
->shader
->info
.fs
.sample_interlock_unordered
= true;
4526 case SpvExecutionModeDenormPreserve
:
4527 case SpvExecutionModeDenormFlushToZero
:
4528 case SpvExecutionModeSignedZeroInfNanPreserve
:
4529 case SpvExecutionModeRoundingModeRTE
:
4530 case SpvExecutionModeRoundingModeRTZ
:
4531 /* Already handled in vtn_handle_rounding_mode_in_execution_mode() */
4535 vtn_fail("Unhandled execution mode: %s (%u)",
4536 spirv_executionmode_to_string(mode
->exec_mode
),
4542 vtn_handle_rounding_mode_in_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4543 const struct vtn_decoration
*mode
, void *data
)
4545 vtn_assert(b
->entry_point
== entry_point
);
4547 unsigned execution_mode
= 0;
4549 switch(mode
->exec_mode
) {
4550 case SpvExecutionModeDenormPreserve
:
4551 switch (mode
->operands
[0]) {
4552 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP16
; break;
4553 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP32
; break;
4554 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP64
; break;
4555 default: vtn_fail("Floating point type not supported");
4558 case SpvExecutionModeDenormFlushToZero
:
4559 switch (mode
->operands
[0]) {
4560 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16
; break;
4561 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32
; break;
4562 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64
; break;
4563 default: vtn_fail("Floating point type not supported");
4566 case SpvExecutionModeSignedZeroInfNanPreserve
:
4567 switch (mode
->operands
[0]) {
4568 case 16: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16
; break;
4569 case 32: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32
; break;
4570 case 64: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64
; break;
4571 default: vtn_fail("Floating point type not supported");
4574 case SpvExecutionModeRoundingModeRTE
:
4575 switch (mode
->operands
[0]) {
4576 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16
; break;
4577 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32
; break;
4578 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64
; break;
4579 default: vtn_fail("Floating point type not supported");
4582 case SpvExecutionModeRoundingModeRTZ
:
4583 switch (mode
->operands
[0]) {
4584 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16
; break;
4585 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32
; break;
4586 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64
; break;
4587 default: vtn_fail("Floating point type not supported");
4595 b
->shader
->info
.float_controls_execution_mode
|= execution_mode
;
4599 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4600 const uint32_t *w
, unsigned count
)
4602 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
4606 case SpvOpSourceContinued
:
4607 case SpvOpSourceExtension
:
4608 case SpvOpExtension
:
4609 case SpvOpCapability
:
4610 case SpvOpExtInstImport
:
4611 case SpvOpMemoryModel
:
4612 case SpvOpEntryPoint
:
4613 case SpvOpExecutionMode
:
4616 case SpvOpMemberName
:
4617 case SpvOpDecorationGroup
:
4619 case SpvOpDecorateId
:
4620 case SpvOpMemberDecorate
:
4621 case SpvOpGroupDecorate
:
4622 case SpvOpGroupMemberDecorate
:
4623 case SpvOpDecorateString
:
4624 case SpvOpMemberDecorateString
:
4625 vtn_fail("Invalid opcode types and variables section");
4631 case SpvOpTypeFloat
:
4632 case SpvOpTypeVector
:
4633 case SpvOpTypeMatrix
:
4634 case SpvOpTypeImage
:
4635 case SpvOpTypeSampler
:
4636 case SpvOpTypeSampledImage
:
4637 case SpvOpTypeArray
:
4638 case SpvOpTypeRuntimeArray
:
4639 case SpvOpTypeStruct
:
4640 case SpvOpTypeOpaque
:
4641 case SpvOpTypePointer
:
4642 case SpvOpTypeForwardPointer
:
4643 case SpvOpTypeFunction
:
4644 case SpvOpTypeEvent
:
4645 case SpvOpTypeDeviceEvent
:
4646 case SpvOpTypeReserveId
:
4647 case SpvOpTypeQueue
:
4649 vtn_handle_type(b
, opcode
, w
, count
);
4652 case SpvOpConstantTrue
:
4653 case SpvOpConstantFalse
:
4655 case SpvOpConstantComposite
:
4656 case SpvOpConstantSampler
:
4657 case SpvOpConstantNull
:
4658 case SpvOpSpecConstantTrue
:
4659 case SpvOpSpecConstantFalse
:
4660 case SpvOpSpecConstant
:
4661 case SpvOpSpecConstantComposite
:
4662 case SpvOpSpecConstantOp
:
4663 vtn_handle_constant(b
, opcode
, w
, count
);
4668 vtn_handle_variables(b
, opcode
, w
, count
);
4671 case SpvOpExtInst
: {
4672 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4673 /* NonSemantic extended instructions are acceptable in preamble, others
4674 * will indicate the end of preamble.
4676 return val
->ext_handler
== vtn_handle_non_semantic_instruction
;
4680 return false; /* End of preamble */
4686 static struct vtn_ssa_value
*
4687 vtn_nir_select(struct vtn_builder
*b
, struct vtn_ssa_value
*src0
,
4688 struct vtn_ssa_value
*src1
, struct vtn_ssa_value
*src2
)
4690 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
4691 dest
->type
= src1
->type
;
4693 if (glsl_type_is_vector_or_scalar(src1
->type
)) {
4694 dest
->def
= nir_bcsel(&b
->nb
, src0
->def
, src1
->def
, src2
->def
);
4696 unsigned elems
= glsl_get_length(src1
->type
);
4698 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
4699 for (unsigned i
= 0; i
< elems
; i
++) {
4700 dest
->elems
[i
] = vtn_nir_select(b
, src0
,
4701 src1
->elems
[i
], src2
->elems
[i
]);
4709 vtn_handle_select(struct vtn_builder
*b
, SpvOp opcode
,
4710 const uint32_t *w
, unsigned count
)
4712 /* Handle OpSelect up-front here because it needs to be able to handle
4713 * pointers and not just regular vectors and scalars.
4715 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
4716 struct vtn_value
*cond_val
= vtn_untyped_value(b
, w
[3]);
4717 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
4718 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
4720 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
4721 obj2_val
->type
!= res_val
->type
,
4722 "Object types must match the result type in OpSelect");
4724 vtn_fail_if((cond_val
->type
->base_type
!= vtn_base_type_scalar
&&
4725 cond_val
->type
->base_type
!= vtn_base_type_vector
) ||
4726 !glsl_type_is_boolean(cond_val
->type
->type
),
4727 "OpSelect must have either a vector of booleans or "
4728 "a boolean as Condition type");
4730 vtn_fail_if(cond_val
->type
->base_type
== vtn_base_type_vector
&&
4731 (res_val
->type
->base_type
!= vtn_base_type_vector
||
4732 res_val
->type
->length
!= cond_val
->type
->length
),
4733 "When Condition type in OpSelect is a vector, the Result "
4734 "type must be a vector of the same length");
4736 switch (res_val
->type
->base_type
) {
4737 case vtn_base_type_scalar
:
4738 case vtn_base_type_vector
:
4739 case vtn_base_type_matrix
:
4740 case vtn_base_type_array
:
4741 case vtn_base_type_struct
:
4744 case vtn_base_type_pointer
:
4745 /* We need to have actual storage for pointer types. */
4746 vtn_fail_if(res_val
->type
->type
== NULL
,
4747 "Invalid pointer result type for OpSelect");
4750 vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
4753 vtn_push_ssa_value(b
, w
[2],
4754 vtn_nir_select(b
, vtn_ssa_value(b
, w
[3]),
4755 vtn_ssa_value(b
, w
[4]),
4756 vtn_ssa_value(b
, w
[5])));
4760 vtn_handle_ptr(struct vtn_builder
*b
, SpvOp opcode
,
4761 const uint32_t *w
, unsigned count
)
4763 struct vtn_type
*type1
= vtn_get_value_type(b
, w
[3]);
4764 struct vtn_type
*type2
= vtn_get_value_type(b
, w
[4]);
4765 vtn_fail_if(type1
->base_type
!= vtn_base_type_pointer
||
4766 type2
->base_type
!= vtn_base_type_pointer
,
4767 "%s operands must have pointer types",
4768 spirv_op_to_string(opcode
));
4769 vtn_fail_if(type1
->storage_class
!= type2
->storage_class
,
4770 "%s operands must have the same storage class",
4771 spirv_op_to_string(opcode
));
4773 struct vtn_type
*vtn_type
= vtn_get_type(b
, w
[1]);
4774 const struct glsl_type
*type
= vtn_type
->type
;
4776 nir_address_format addr_format
= vtn_mode_to_address_format(
4777 b
, vtn_storage_class_to_mode(b
, type1
->storage_class
, NULL
, NULL
));
4782 case SpvOpPtrDiff
: {
4783 /* OpPtrDiff returns the difference in number of elements (not byte offset). */
4784 unsigned elem_size
, elem_align
;
4785 glsl_get_natural_size_align_bytes(type1
->deref
->type
,
4786 &elem_size
, &elem_align
);
4788 def
= nir_build_addr_isub(&b
->nb
,
4789 vtn_get_nir_ssa(b
, w
[3]),
4790 vtn_get_nir_ssa(b
, w
[4]),
4792 def
= nir_idiv(&b
->nb
, def
, nir_imm_intN_t(&b
->nb
, elem_size
, def
->bit_size
));
4793 def
= nir_i2i(&b
->nb
, def
, glsl_get_bit_size(type
));
4798 case SpvOpPtrNotEqual
: {
4799 def
= nir_build_addr_ieq(&b
->nb
,
4800 vtn_get_nir_ssa(b
, w
[3]),
4801 vtn_get_nir_ssa(b
, w
[4]),
4803 if (opcode
== SpvOpPtrNotEqual
)
4804 def
= nir_inot(&b
->nb
, def
);
4809 unreachable("Invalid ptr operation");
4812 vtn_push_nir_ssa(b
, w
[2], def
);
4816 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4817 const uint32_t *w
, unsigned count
)
4823 case SpvOpLoopMerge
:
4824 case SpvOpSelectionMerge
:
4825 /* This is handled by cfg pre-pass and walk_blocks */
4829 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
4830 val
->type
= vtn_get_type(b
, w
[1]);
4835 vtn_handle_extension(b
, opcode
, w
, count
);
4841 case SpvOpCopyMemory
:
4842 case SpvOpCopyMemorySized
:
4843 case SpvOpAccessChain
:
4844 case SpvOpPtrAccessChain
:
4845 case SpvOpInBoundsAccessChain
:
4846 case SpvOpInBoundsPtrAccessChain
:
4847 case SpvOpArrayLength
:
4848 case SpvOpConvertPtrToU
:
4849 case SpvOpConvertUToPtr
:
4850 vtn_handle_variables(b
, opcode
, w
, count
);
4853 case SpvOpFunctionCall
:
4854 vtn_handle_function_call(b
, opcode
, w
, count
);
4857 case SpvOpSampledImage
:
4859 case SpvOpImageSampleImplicitLod
:
4860 case SpvOpImageSampleExplicitLod
:
4861 case SpvOpImageSampleDrefImplicitLod
:
4862 case SpvOpImageSampleDrefExplicitLod
:
4863 case SpvOpImageSampleProjImplicitLod
:
4864 case SpvOpImageSampleProjExplicitLod
:
4865 case SpvOpImageSampleProjDrefImplicitLod
:
4866 case SpvOpImageSampleProjDrefExplicitLod
:
4867 case SpvOpImageFetch
:
4868 case SpvOpImageGather
:
4869 case SpvOpImageDrefGather
:
4870 case SpvOpImageQuerySizeLod
:
4871 case SpvOpImageQueryLod
:
4872 case SpvOpImageQueryLevels
:
4873 case SpvOpImageQuerySamples
:
4874 vtn_handle_texture(b
, opcode
, w
, count
);
4877 case SpvOpImageRead
:
4878 case SpvOpImageWrite
:
4879 case SpvOpImageTexelPointer
:
4880 vtn_handle_image(b
, opcode
, w
, count
);
4883 case SpvOpImageQuerySize
: {
4884 struct vtn_pointer
*image
=
4885 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
4886 if (glsl_type_is_image(image
->type
->type
)) {
4887 vtn_handle_image(b
, opcode
, w
, count
);
4889 vtn_assert(glsl_type_is_sampler(image
->type
->type
));
4890 vtn_handle_texture(b
, opcode
, w
, count
);
4895 case SpvOpFragmentMaskFetchAMD
:
4896 case SpvOpFragmentFetchAMD
:
4897 vtn_handle_texture(b
, opcode
, w
, count
);
4900 case SpvOpAtomicLoad
:
4901 case SpvOpAtomicExchange
:
4902 case SpvOpAtomicCompareExchange
:
4903 case SpvOpAtomicCompareExchangeWeak
:
4904 case SpvOpAtomicIIncrement
:
4905 case SpvOpAtomicIDecrement
:
4906 case SpvOpAtomicIAdd
:
4907 case SpvOpAtomicISub
:
4908 case SpvOpAtomicSMin
:
4909 case SpvOpAtomicUMin
:
4910 case SpvOpAtomicSMax
:
4911 case SpvOpAtomicUMax
:
4912 case SpvOpAtomicAnd
:
4914 case SpvOpAtomicXor
:
4915 case SpvOpAtomicFAddEXT
: {
4916 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
4917 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4918 vtn_handle_image(b
, opcode
, w
, count
);
4920 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4921 vtn_handle_atomics(b
, opcode
, w
, count
);
4926 case SpvOpAtomicStore
: {
4927 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
4928 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4929 vtn_handle_image(b
, opcode
, w
, count
);
4931 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4932 vtn_handle_atomics(b
, opcode
, w
, count
);
4938 vtn_handle_select(b
, opcode
, w
, count
);
4946 case SpvOpConvertFToU
:
4947 case SpvOpConvertFToS
:
4948 case SpvOpConvertSToF
:
4949 case SpvOpConvertUToF
:
4953 case SpvOpQuantizeToF16
:
4954 case SpvOpPtrCastToGeneric
:
4955 case SpvOpGenericCastToPtr
:
4960 case SpvOpSignBitSet
:
4961 case SpvOpLessOrGreater
:
4963 case SpvOpUnordered
:
4978 case SpvOpVectorTimesScalar
:
4980 case SpvOpIAddCarry
:
4981 case SpvOpISubBorrow
:
4982 case SpvOpUMulExtended
:
4983 case SpvOpSMulExtended
:
4984 case SpvOpShiftRightLogical
:
4985 case SpvOpShiftRightArithmetic
:
4986 case SpvOpShiftLeftLogical
:
4987 case SpvOpLogicalEqual
:
4988 case SpvOpLogicalNotEqual
:
4989 case SpvOpLogicalOr
:
4990 case SpvOpLogicalAnd
:
4991 case SpvOpLogicalNot
:
4992 case SpvOpBitwiseOr
:
4993 case SpvOpBitwiseXor
:
4994 case SpvOpBitwiseAnd
:
4996 case SpvOpFOrdEqual
:
4997 case SpvOpFUnordEqual
:
4998 case SpvOpINotEqual
:
4999 case SpvOpFOrdNotEqual
:
5000 case SpvOpFUnordNotEqual
:
5001 case SpvOpULessThan
:
5002 case SpvOpSLessThan
:
5003 case SpvOpFOrdLessThan
:
5004 case SpvOpFUnordLessThan
:
5005 case SpvOpUGreaterThan
:
5006 case SpvOpSGreaterThan
:
5007 case SpvOpFOrdGreaterThan
:
5008 case SpvOpFUnordGreaterThan
:
5009 case SpvOpULessThanEqual
:
5010 case SpvOpSLessThanEqual
:
5011 case SpvOpFOrdLessThanEqual
:
5012 case SpvOpFUnordLessThanEqual
:
5013 case SpvOpUGreaterThanEqual
:
5014 case SpvOpSGreaterThanEqual
:
5015 case SpvOpFOrdGreaterThanEqual
:
5016 case SpvOpFUnordGreaterThanEqual
:
5022 case SpvOpFwidthFine
:
5023 case SpvOpDPdxCoarse
:
5024 case SpvOpDPdyCoarse
:
5025 case SpvOpFwidthCoarse
:
5026 case SpvOpBitFieldInsert
:
5027 case SpvOpBitFieldSExtract
:
5028 case SpvOpBitFieldUExtract
:
5029 case SpvOpBitReverse
:
5031 case SpvOpTranspose
:
5032 case SpvOpOuterProduct
:
5033 case SpvOpMatrixTimesScalar
:
5034 case SpvOpVectorTimesMatrix
:
5035 case SpvOpMatrixTimesVector
:
5036 case SpvOpMatrixTimesMatrix
:
5037 case SpvOpUCountLeadingZerosINTEL
:
5038 case SpvOpUCountTrailingZerosINTEL
:
5039 case SpvOpAbsISubINTEL
:
5040 case SpvOpAbsUSubINTEL
:
5041 case SpvOpIAddSatINTEL
:
5042 case SpvOpUAddSatINTEL
:
5043 case SpvOpIAverageINTEL
:
5044 case SpvOpUAverageINTEL
:
5045 case SpvOpIAverageRoundedINTEL
:
5046 case SpvOpUAverageRoundedINTEL
:
5047 case SpvOpISubSatINTEL
:
5048 case SpvOpUSubSatINTEL
:
5049 case SpvOpIMul32x16INTEL
:
5050 case SpvOpUMul32x16INTEL
:
5051 vtn_handle_alu(b
, opcode
, w
, count
);
5055 vtn_handle_bitcast(b
, w
, count
);
5058 case SpvOpVectorExtractDynamic
:
5059 case SpvOpVectorInsertDynamic
:
5060 case SpvOpVectorShuffle
:
5061 case SpvOpCompositeConstruct
:
5062 case SpvOpCompositeExtract
:
5063 case SpvOpCompositeInsert
:
5064 case SpvOpCopyLogical
:
5065 case SpvOpCopyObject
:
5066 vtn_handle_composite(b
, opcode
, w
, count
);
5069 case SpvOpEmitVertex
:
5070 case SpvOpEndPrimitive
:
5071 case SpvOpEmitStreamVertex
:
5072 case SpvOpEndStreamPrimitive
:
5073 case SpvOpControlBarrier
:
5074 case SpvOpMemoryBarrier
:
5075 vtn_handle_barrier(b
, opcode
, w
, count
);
5078 case SpvOpGroupNonUniformElect
:
5079 case SpvOpGroupNonUniformAll
:
5080 case SpvOpGroupNonUniformAny
:
5081 case SpvOpGroupNonUniformAllEqual
:
5082 case SpvOpGroupNonUniformBroadcast
:
5083 case SpvOpGroupNonUniformBroadcastFirst
:
5084 case SpvOpGroupNonUniformBallot
:
5085 case SpvOpGroupNonUniformInverseBallot
:
5086 case SpvOpGroupNonUniformBallotBitExtract
:
5087 case SpvOpGroupNonUniformBallotBitCount
:
5088 case SpvOpGroupNonUniformBallotFindLSB
:
5089 case SpvOpGroupNonUniformBallotFindMSB
:
5090 case SpvOpGroupNonUniformShuffle
:
5091 case SpvOpGroupNonUniformShuffleXor
:
5092 case SpvOpGroupNonUniformShuffleUp
:
5093 case SpvOpGroupNonUniformShuffleDown
:
5094 case SpvOpGroupNonUniformIAdd
:
5095 case SpvOpGroupNonUniformFAdd
:
5096 case SpvOpGroupNonUniformIMul
:
5097 case SpvOpGroupNonUniformFMul
:
5098 case SpvOpGroupNonUniformSMin
:
5099 case SpvOpGroupNonUniformUMin
:
5100 case SpvOpGroupNonUniformFMin
:
5101 case SpvOpGroupNonUniformSMax
:
5102 case SpvOpGroupNonUniformUMax
:
5103 case SpvOpGroupNonUniformFMax
:
5104 case SpvOpGroupNonUniformBitwiseAnd
:
5105 case SpvOpGroupNonUniformBitwiseOr
:
5106 case SpvOpGroupNonUniformBitwiseXor
:
5107 case SpvOpGroupNonUniformLogicalAnd
:
5108 case SpvOpGroupNonUniformLogicalOr
:
5109 case SpvOpGroupNonUniformLogicalXor
:
5110 case SpvOpGroupNonUniformQuadBroadcast
:
5111 case SpvOpGroupNonUniformQuadSwap
:
5114 case SpvOpGroupBroadcast
:
5115 case SpvOpGroupIAdd
:
5116 case SpvOpGroupFAdd
:
5117 case SpvOpGroupFMin
:
5118 case SpvOpGroupUMin
:
5119 case SpvOpGroupSMin
:
5120 case SpvOpGroupFMax
:
5121 case SpvOpGroupUMax
:
5122 case SpvOpGroupSMax
:
5123 case SpvOpSubgroupBallotKHR
:
5124 case SpvOpSubgroupFirstInvocationKHR
:
5125 case SpvOpSubgroupReadInvocationKHR
:
5126 case SpvOpSubgroupAllKHR
:
5127 case SpvOpSubgroupAnyKHR
:
5128 case SpvOpSubgroupAllEqualKHR
:
5129 case SpvOpGroupIAddNonUniformAMD
:
5130 case SpvOpGroupFAddNonUniformAMD
:
5131 case SpvOpGroupFMinNonUniformAMD
:
5132 case SpvOpGroupUMinNonUniformAMD
:
5133 case SpvOpGroupSMinNonUniformAMD
:
5134 case SpvOpGroupFMaxNonUniformAMD
:
5135 case SpvOpGroupUMaxNonUniformAMD
:
5136 case SpvOpGroupSMaxNonUniformAMD
:
5137 vtn_handle_subgroup(b
, opcode
, w
, count
);
5142 case SpvOpPtrNotEqual
:
5143 vtn_handle_ptr(b
, opcode
, w
, count
);
5146 case SpvOpBeginInvocationInterlockEXT
:
5147 vtn_emit_barrier(b
, nir_intrinsic_begin_invocation_interlock
);
5150 case SpvOpEndInvocationInterlockEXT
:
5151 vtn_emit_barrier(b
, nir_intrinsic_end_invocation_interlock
);
5154 case SpvOpDemoteToHelperInvocationEXT
: {
5155 nir_intrinsic_instr
*intrin
=
5156 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_demote
);
5157 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5161 case SpvOpIsHelperInvocationEXT
: {
5162 nir_intrinsic_instr
*intrin
=
5163 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_is_helper_invocation
);
5164 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 1, 1, NULL
);
5165 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5167 vtn_push_nir_ssa(b
, w
[2], &intrin
->dest
.ssa
);
5171 case SpvOpReadClockKHR
: {
5172 SpvScope scope
= vtn_constant_uint(b
, w
[3]);
5173 nir_scope nir_scope
;
5176 case SpvScopeDevice
:
5177 nir_scope
= NIR_SCOPE_DEVICE
;
5179 case SpvScopeSubgroup
:
5180 nir_scope
= NIR_SCOPE_SUBGROUP
;
5183 vtn_fail("invalid read clock scope");
5186 /* Operation supports two result types: uvec2 and uint64_t. The NIR
5187 * intrinsic gives uvec2, so pack the result for the other case.
5189 nir_intrinsic_instr
*intrin
=
5190 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_shader_clock
);
5191 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 2, 32, NULL
);
5192 nir_intrinsic_set_memory_scope(intrin
, nir_scope
);
5193 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5195 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
5196 const struct glsl_type
*dest_type
= type
->type
;
5197 nir_ssa_def
*result
;
5199 if (glsl_type_is_vector(dest_type
)) {
5200 assert(dest_type
== glsl_vector_type(GLSL_TYPE_UINT
, 2));
5201 result
= &intrin
->dest
.ssa
;
5203 assert(glsl_type_is_scalar(dest_type
));
5204 assert(glsl_get_base_type(dest_type
) == GLSL_TYPE_UINT64
);
5205 result
= nir_pack_64_2x32(&b
->nb
, &intrin
->dest
.ssa
);
5208 vtn_push_nir_ssa(b
, w
[2], result
);
5212 case SpvOpLifetimeStart
:
5213 case SpvOpLifetimeStop
:
5217 vtn_fail_with_opcode("Unhandled opcode", opcode
);
5224 vtn_create_builder(const uint32_t *words
, size_t word_count
,
5225 gl_shader_stage stage
, const char *entry_point_name
,
5226 const struct spirv_to_nir_options
*options
)
5228 /* Initialize the vtn_builder object */
5229 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
5230 struct spirv_to_nir_options
*dup_options
=
5231 ralloc(b
, struct spirv_to_nir_options
);
5232 *dup_options
= *options
;
5235 b
->spirv_word_count
= word_count
;
5239 list_inithead(&b
->functions
);
5240 b
->entry_point_stage
= stage
;
5241 b
->entry_point_name
= entry_point_name
;
5242 b
->options
= dup_options
;
5245 * Handle the SPIR-V header (first 5 dwords).
5246 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
5248 if (word_count
<= 5)
5251 if (words
[0] != SpvMagicNumber
) {
5252 vtn_err("words[0] was 0x%x, want 0x%x", words
[0], SpvMagicNumber
);
5255 if (words
[1] < 0x10000) {
5256 vtn_err("words[1] was 0x%x, want >= 0x10000", words
[1]);
5260 uint16_t generator_id
= words
[2] >> 16;
5261 uint16_t generator_version
= words
[2];
5263 /* The first GLSLang version bump actually 1.5 years after #179 was fixed
5264 * but this should at least let us shut the workaround off for modern
5265 * versions of GLSLang.
5267 b
->wa_glslang_179
= (generator_id
== 8 && generator_version
== 1);
5269 /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
5270 * to provide correct memory semantics on compute shader barrier()
5271 * commands. Prior to that, we need to fix them up ourselves. This
5272 * GLSLang fix caused them to bump to generator version 3.
5274 b
->wa_glslang_cs_barrier
= (generator_id
== 8 && generator_version
< 3);
5276 /* words[2] == generator magic */
5277 unsigned value_id_bound
= words
[3];
5278 if (words
[4] != 0) {
5279 vtn_err("words[4] was %u, want 0", words
[4]);
5283 b
->value_id_bound
= value_id_bound
;
5284 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
5292 static nir_function
*
5293 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder
*b
,
5294 nir_function
*entry_point
)
5296 vtn_assert(entry_point
== b
->entry_point
->func
->impl
->function
);
5297 vtn_fail_if(!entry_point
->name
, "entry points are required to have a name");
5298 const char *func_name
=
5299 ralloc_asprintf(b
->shader
, "__wrapped_%s", entry_point
->name
);
5301 /* we shouldn't have any inputs yet */
5302 vtn_assert(!entry_point
->shader
->num_inputs
);
5303 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
);
5305 nir_function
*main_entry_point
= nir_function_create(b
->shader
, func_name
);
5306 main_entry_point
->impl
= nir_function_impl_create(main_entry_point
);
5307 nir_builder_init(&b
->nb
, main_entry_point
->impl
);
5308 b
->nb
.cursor
= nir_after_cf_list(&main_entry_point
->impl
->body
);
5309 b
->func_param_idx
= 0;
5311 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, entry_point
);
5313 for (unsigned i
= 0; i
< entry_point
->num_params
; ++i
) {
5314 struct vtn_type
*param_type
= b
->entry_point
->func
->type
->params
[i
];
5316 /* consider all pointers to function memory to be parameters passed
5319 bool is_by_val
= param_type
->base_type
== vtn_base_type_pointer
&&
5320 param_type
->storage_class
== SpvStorageClassFunction
;
5322 /* input variable */
5323 nir_variable
*in_var
= rzalloc(b
->nb
.shader
, nir_variable
);
5324 in_var
->data
.mode
= nir_var_shader_in
;
5325 in_var
->data
.read_only
= true;
5326 in_var
->data
.location
= i
;
5329 in_var
->type
= param_type
->deref
->type
;
5331 in_var
->type
= param_type
->type
;
5333 nir_shader_add_variable(b
->nb
.shader
, in_var
);
5334 b
->nb
.shader
->num_inputs
++;
5336 /* we have to copy the entire variable into function memory */
5338 nir_variable
*copy_var
=
5339 nir_local_variable_create(main_entry_point
->impl
, in_var
->type
,
5341 nir_copy_var(&b
->nb
, copy_var
, in_var
);
5343 nir_src_for_ssa(&nir_build_deref_var(&b
->nb
, copy_var
)->dest
.ssa
);
5345 call
->params
[i
] = nir_src_for_ssa(nir_load_var(&b
->nb
, in_var
));
5349 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
5351 return main_entry_point
;
5355 spirv_to_nir(const uint32_t *words
, size_t word_count
,
5356 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
5357 gl_shader_stage stage
, const char *entry_point_name
,
5358 const struct spirv_to_nir_options
*options
,
5359 const nir_shader_compiler_options
*nir_options
)
5362 const uint32_t *word_end
= words
+ word_count
;
5364 struct vtn_builder
*b
= vtn_create_builder(words
, word_count
,
5365 stage
, entry_point_name
,
5371 /* See also _vtn_fail() */
5372 if (setjmp(b
->fail_jump
)) {
5377 /* Skip the SPIR-V header, handled at vtn_create_builder */
5380 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
5382 /* Handle all the preamble instructions */
5383 words
= vtn_foreach_instruction(b
, words
, word_end
,
5384 vtn_handle_preamble_instruction
);
5386 if (b
->entry_point
== NULL
) {
5387 vtn_fail("Entry point not found");
5392 /* Set shader info defaults */
5393 if (stage
== MESA_SHADER_GEOMETRY
)
5394 b
->shader
->info
.gs
.invocations
= 1;
5396 /* Parse rounding mode execution modes. This has to happen earlier than
5397 * other changes in the execution modes since they can affect, for example,
5398 * the result of the floating point constants.
5400 vtn_foreach_execution_mode(b
, b
->entry_point
,
5401 vtn_handle_rounding_mode_in_execution_mode
, NULL
);
5403 b
->specializations
= spec
;
5404 b
->num_specializations
= num_spec
;
5406 /* Handle all variable, type, and constant instructions */
5407 words
= vtn_foreach_instruction(b
, words
, word_end
,
5408 vtn_handle_variable_or_type_instruction
);
5410 /* Parse execution modes */
5411 vtn_foreach_execution_mode(b
, b
->entry_point
,
5412 vtn_handle_execution_mode
, NULL
);
5414 if (b
->workgroup_size_builtin
) {
5415 vtn_assert(b
->workgroup_size_builtin
->type
->type
==
5416 glsl_vector_type(GLSL_TYPE_UINT
, 3));
5418 nir_const_value
*const_size
=
5419 b
->workgroup_size_builtin
->constant
->values
;
5421 b
->shader
->info
.cs
.local_size
[0] = const_size
[0].u32
;
5422 b
->shader
->info
.cs
.local_size
[1] = const_size
[1].u32
;
5423 b
->shader
->info
.cs
.local_size
[2] = const_size
[2].u32
;
5426 /* Set types on all vtn_values */
5427 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
5429 vtn_build_cfg(b
, words
, word_end
);
5431 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5432 b
->entry_point
->func
->referenced
= true;
5437 vtn_foreach_cf_node(node
, &b
->functions
) {
5438 struct vtn_function
*func
= vtn_cf_node_as_function(node
);
5439 if (func
->referenced
&& !func
->emitted
) {
5440 b
->const_table
= _mesa_pointer_hash_table_create(b
);
5442 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
5448 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5449 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
5450 vtn_assert(entry_point
);
5452 /* post process entry_points with input params */
5453 if (entry_point
->num_params
&& b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
5454 entry_point
= vtn_emit_kernel_entry_point_wrapper(b
, entry_point
);
5456 entry_point
->is_entrypoint
= true;
5458 /* When multiple shader stages exist in the same SPIR-V module, we
5459 * generate input and output variables for every stage, in the same
5460 * NIR program. These dead variables can be invalid NIR. For example,
5461 * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
5462 * VS output variables wouldn't be.
5464 * To ensure we have valid NIR, we eliminate any dead inputs and outputs
5465 * right away. In order to do so, we must lower any constant initializers
5466 * on outputs so nir_remove_dead_variables sees that they're written to.
5468 nir_lower_variable_initializers(b
->shader
, nir_var_shader_out
);
5469 nir_remove_dead_variables(b
->shader
,
5470 nir_var_shader_in
| nir_var_shader_out
, NULL
);
5472 /* We sometimes generate bogus derefs that, while never used, give the
5473 * validator a bit of heartburn. Run dead code to get rid of them.
5475 nir_opt_dce(b
->shader
);
5477 /* Unparent the shader from the vtn_builder before we delete the builder */
5478 ralloc_steal(NULL
, b
->shader
);
5480 nir_shader
*shader
= b
->shader
;