2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
41 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
42 size_t spirv_offset
, const char *message
)
44 if (b
->options
->debug
.func
) {
45 b
->options
->debug
.func(b
->options
->debug
.private_data
,
46 level
, spirv_offset
, message
);
50 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
51 fprintf(stderr
, "%s\n", message
);
56 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
57 size_t spirv_offset
, const char *fmt
, ...)
63 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
66 vtn_log(b
, level
, spirv_offset
, msg
);
72 vtn_log_err(struct vtn_builder
*b
,
73 enum nir_spirv_debug_level level
, const char *prefix
,
74 const char *file
, unsigned line
,
75 const char *fmt
, va_list args
)
79 msg
= ralloc_strdup(NULL
, prefix
);
82 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
85 ralloc_asprintf_append(&msg
, " ");
87 ralloc_vasprintf_append(&msg
, fmt
, args
);
89 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
93 ralloc_asprintf_append(&msg
,
94 "\n in SPIR-V source file %s, line %d, col %d",
95 b
->file
, b
->line
, b
->col
);
98 vtn_log(b
, level
, b
->spirv_offset
, msg
);
104 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
109 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
110 path
, prefix
, idx
++);
111 if (len
< 0 || len
>= sizeof(filename
))
114 FILE *f
= fopen(filename
, "w");
118 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
121 vtn_info("SPIR-V shader dumped to %s", filename
);
125 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
126 const char *fmt
, ...)
131 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
132 file
, line
, fmt
, args
);
137 _vtn_err(struct vtn_builder
*b
, const char *file
, unsigned line
,
138 const char *fmt
, ...)
143 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V ERROR:\n",
144 file
, line
, fmt
, args
);
149 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
150 const char *fmt
, ...)
155 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
156 file
, line
, fmt
, args
);
159 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
161 vtn_dump_shader(b
, dump_path
, "fail");
163 longjmp(b
->fail_jump
, 1);
166 static struct vtn_ssa_value
*
167 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
169 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
170 val
->type
= glsl_get_bare_type(type
);
172 if (glsl_type_is_vector_or_scalar(type
)) {
173 unsigned num_components
= glsl_get_vector_elements(val
->type
);
174 unsigned bit_size
= glsl_get_bit_size(val
->type
);
175 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
177 unsigned elems
= glsl_get_length(val
->type
);
178 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
179 if (glsl_type_is_array_or_matrix(type
)) {
180 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
181 for (unsigned i
= 0; i
< elems
; i
++)
182 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
184 vtn_assert(glsl_type_is_struct_or_ifc(type
));
185 for (unsigned i
= 0; i
< elems
; i
++) {
186 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
187 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
195 static struct vtn_ssa_value
*
196 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
197 const struct glsl_type
*type
)
199 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
204 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
205 val
->type
= glsl_get_bare_type(type
);
207 if (glsl_type_is_vector_or_scalar(type
)) {
208 unsigned num_components
= glsl_get_vector_elements(val
->type
);
209 unsigned bit_size
= glsl_get_bit_size(type
);
210 nir_load_const_instr
*load
=
211 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
213 memcpy(load
->value
, constant
->values
,
214 sizeof(nir_const_value
) * num_components
);
216 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
217 val
->def
= &load
->def
;
219 unsigned elems
= glsl_get_length(val
->type
);
220 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
221 if (glsl_type_is_array_or_matrix(type
)) {
222 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
223 for (unsigned i
= 0; i
< elems
; i
++) {
224 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
228 vtn_assert(glsl_type_is_struct_or_ifc(type
));
229 for (unsigned i
= 0; i
< elems
; i
++) {
230 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
231 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
240 struct vtn_ssa_value
*
241 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
243 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
244 switch (val
->value_type
) {
245 case vtn_value_type_undef
:
246 return vtn_undef_ssa_value(b
, val
->type
->type
);
248 case vtn_value_type_constant
:
249 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
251 case vtn_value_type_ssa
:
254 case vtn_value_type_pointer
:
255 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
256 struct vtn_ssa_value
*ssa
=
257 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
258 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
262 vtn_fail("Invalid type for an SSA value");
267 vtn_push_ssa_value(struct vtn_builder
*b
, uint32_t value_id
,
268 struct vtn_ssa_value
*ssa
)
270 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
272 /* See vtn_create_ssa_value */
273 vtn_fail_if(ssa
->type
!= glsl_get_bare_type(type
->type
),
274 "Type mismatch for SPIR-V SSA value");
276 struct vtn_value
*val
;
277 if (type
->base_type
== vtn_base_type_pointer
) {
278 val
= vtn_push_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
280 /* Don't trip the value_type_ssa check in vtn_push_value */
281 val
= vtn_push_value(b
, value_id
, vtn_value_type_invalid
);
282 val
->value_type
= vtn_value_type_ssa
;
290 vtn_get_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
)
292 struct vtn_ssa_value
*ssa
= vtn_ssa_value(b
, value_id
);
293 vtn_fail_if(!glsl_type_is_vector_or_scalar(ssa
->type
),
294 "Expected a vector or scalar type");
299 vtn_push_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
, nir_ssa_def
*def
)
301 /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
302 * type will be valid by the time we get here.
304 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
305 vtn_fail_if(def
->num_components
!= glsl_get_vector_elements(type
->type
) ||
306 def
->bit_size
!= glsl_get_bit_size(type
->type
),
307 "Mismatch between NIR and SPIR-V type.");
308 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
310 return vtn_push_ssa_value(b
, value_id
, ssa
);
314 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
315 unsigned word_count
, unsigned *words_used
)
317 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
319 /* Ammount of space taken by the string (including the null) */
320 unsigned len
= strlen(dup
) + 1;
321 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
327 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
328 const uint32_t *end
, vtn_instruction_handler handler
)
334 const uint32_t *w
= start
;
336 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
337 unsigned count
= w
[0] >> SpvWordCountShift
;
338 vtn_assert(count
>= 1 && w
+ count
<= end
);
340 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
344 break; /* Do nothing */
347 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
359 if (!handler(b
, opcode
, w
, count
))
377 vtn_handle_non_semantic_instruction(struct vtn_builder
*b
, SpvOp ext_opcode
,
378 const uint32_t *w
, unsigned count
)
385 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
386 const uint32_t *w
, unsigned count
)
388 const char *ext
= (const char *)&w
[2];
390 case SpvOpExtInstImport
: {
391 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
392 if (strcmp(ext
, "GLSL.std.450") == 0) {
393 val
->ext_handler
= vtn_handle_glsl450_instruction
;
394 } else if ((strcmp(ext
, "SPV_AMD_gcn_shader") == 0)
395 && (b
->options
&& b
->options
->caps
.amd_gcn_shader
)) {
396 val
->ext_handler
= vtn_handle_amd_gcn_shader_instruction
;
397 } else if ((strcmp(ext
, "SPV_AMD_shader_ballot") == 0)
398 && (b
->options
&& b
->options
->caps
.amd_shader_ballot
)) {
399 val
->ext_handler
= vtn_handle_amd_shader_ballot_instruction
;
400 } else if ((strcmp(ext
, "SPV_AMD_shader_trinary_minmax") == 0)
401 && (b
->options
&& b
->options
->caps
.amd_trinary_minmax
)) {
402 val
->ext_handler
= vtn_handle_amd_shader_trinary_minmax_instruction
;
403 } else if ((strcmp(ext
, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
404 && (b
->options
&& b
->options
->caps
.amd_shader_explicit_vertex_parameter
)) {
405 val
->ext_handler
= vtn_handle_amd_shader_explicit_vertex_parameter_instruction
;
406 } else if (strcmp(ext
, "OpenCL.std") == 0) {
407 val
->ext_handler
= vtn_handle_opencl_instruction
;
408 } else if (strstr(ext
, "NonSemantic.") == ext
) {
409 val
->ext_handler
= vtn_handle_non_semantic_instruction
;
411 vtn_fail("Unsupported extension: %s", ext
);
417 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
418 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
424 vtn_fail_with_opcode("Unhandled opcode", opcode
);
429 _foreach_decoration_helper(struct vtn_builder
*b
,
430 struct vtn_value
*base_value
,
432 struct vtn_value
*value
,
433 vtn_decoration_foreach_cb cb
, void *data
)
435 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
437 if (dec
->scope
== VTN_DEC_DECORATION
) {
438 member
= parent_member
;
439 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
440 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
441 value
->type
->base_type
!= vtn_base_type_struct
,
442 "OpMemberDecorate and OpGroupMemberDecorate are only "
443 "allowed on OpTypeStruct");
444 /* This means we haven't recursed yet */
445 assert(value
== base_value
);
447 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
449 vtn_fail_if(member
>= base_value
->type
->length
,
450 "OpMemberDecorate specifies member %d but the "
451 "OpTypeStruct has only %u members",
452 member
, base_value
->type
->length
);
454 /* Not a decoration */
455 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
460 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
461 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
464 cb(b
, base_value
, member
, dec
, data
);
469 /** Iterates (recursively if needed) over all of the decorations on a value
471 * This function iterates over all of the decorations applied to a given
472 * value. If it encounters a decoration group, it recurses into the group
473 * and iterates over all of those decorations as well.
476 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
477 vtn_decoration_foreach_cb cb
, void *data
)
479 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
483 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
484 vtn_execution_mode_foreach_cb cb
, void *data
)
486 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
487 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
490 assert(dec
->group
== NULL
);
491 cb(b
, value
, dec
, data
);
496 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
497 const uint32_t *w
, unsigned count
)
499 const uint32_t *w_end
= w
+ count
;
500 const uint32_t target
= w
[1];
504 case SpvOpDecorationGroup
:
505 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
509 case SpvOpDecorateId
:
510 case SpvOpMemberDecorate
:
511 case SpvOpDecorateString
:
512 case SpvOpMemberDecorateString
:
513 case SpvOpExecutionMode
:
514 case SpvOpExecutionModeId
: {
515 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
517 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
520 case SpvOpDecorateId
:
521 case SpvOpDecorateString
:
522 dec
->scope
= VTN_DEC_DECORATION
;
524 case SpvOpMemberDecorate
:
525 case SpvOpMemberDecorateString
:
526 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
527 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
528 "Member argument of OpMemberDecorate too large");
530 case SpvOpExecutionMode
:
531 case SpvOpExecutionModeId
:
532 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
535 unreachable("Invalid decoration opcode");
537 dec
->decoration
= *(w
++);
540 /* Link into the list */
541 dec
->next
= val
->decoration
;
542 val
->decoration
= dec
;
546 case SpvOpGroupMemberDecorate
:
547 case SpvOpGroupDecorate
: {
548 struct vtn_value
*group
=
549 vtn_value(b
, target
, vtn_value_type_decoration_group
);
551 for (; w
< w_end
; w
++) {
552 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
553 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
556 if (opcode
== SpvOpGroupDecorate
) {
557 dec
->scope
= VTN_DEC_DECORATION
;
559 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
560 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
561 "Member argument of OpGroupMemberDecorate too large");
564 /* Link into the list */
565 dec
->next
= val
->decoration
;
566 val
->decoration
= dec
;
572 unreachable("Unhandled opcode");
576 struct member_decoration_ctx
{
578 struct glsl_struct_field
*fields
;
579 struct vtn_type
*type
;
583 * Returns true if the given type contains a struct decorated Block or
587 vtn_type_contains_block(struct vtn_builder
*b
, struct vtn_type
*type
)
589 switch (type
->base_type
) {
590 case vtn_base_type_array
:
591 return vtn_type_contains_block(b
, type
->array_element
);
592 case vtn_base_type_struct
:
593 if (type
->block
|| type
->buffer_block
)
595 for (unsigned i
= 0; i
< type
->length
; i
++) {
596 if (vtn_type_contains_block(b
, type
->members
[i
]))
605 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
606 * OpStore, or OpCopyMemory between them without breaking anything.
607 * Technically, the SPIR-V rules require the exact same type ID but this lets
608 * us internally be a bit looser.
611 vtn_types_compatible(struct vtn_builder
*b
,
612 struct vtn_type
*t1
, struct vtn_type
*t2
)
614 if (t1
->id
== t2
->id
)
617 if (t1
->base_type
!= t2
->base_type
)
620 switch (t1
->base_type
) {
621 case vtn_base_type_void
:
622 case vtn_base_type_scalar
:
623 case vtn_base_type_vector
:
624 case vtn_base_type_matrix
:
625 case vtn_base_type_image
:
626 case vtn_base_type_sampler
:
627 case vtn_base_type_sampled_image
:
628 return t1
->type
== t2
->type
;
630 case vtn_base_type_array
:
631 return t1
->length
== t2
->length
&&
632 vtn_types_compatible(b
, t1
->array_element
, t2
->array_element
);
634 case vtn_base_type_pointer
:
635 return vtn_types_compatible(b
, t1
->deref
, t2
->deref
);
637 case vtn_base_type_struct
:
638 if (t1
->length
!= t2
->length
)
641 for (unsigned i
= 0; i
< t1
->length
; i
++) {
642 if (!vtn_types_compatible(b
, t1
->members
[i
], t2
->members
[i
]))
647 case vtn_base_type_function
:
648 /* This case shouldn't get hit since you can't copy around function
649 * types. Just require them to be identical.
654 vtn_fail("Invalid base type");
658 vtn_type_without_array(struct vtn_type
*type
)
660 while (type
->base_type
== vtn_base_type_array
)
661 type
= type
->array_element
;
665 /* does a shallow copy of a vtn_type */
667 static struct vtn_type
*
668 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
670 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
673 switch (src
->base_type
) {
674 case vtn_base_type_void
:
675 case vtn_base_type_scalar
:
676 case vtn_base_type_vector
:
677 case vtn_base_type_matrix
:
678 case vtn_base_type_array
:
679 case vtn_base_type_pointer
:
680 case vtn_base_type_image
:
681 case vtn_base_type_sampler
:
682 case vtn_base_type_sampled_image
:
683 /* Nothing more to do */
686 case vtn_base_type_struct
:
687 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
688 memcpy(dest
->members
, src
->members
,
689 src
->length
* sizeof(src
->members
[0]));
691 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
692 memcpy(dest
->offsets
, src
->offsets
,
693 src
->length
* sizeof(src
->offsets
[0]));
696 case vtn_base_type_function
:
697 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
698 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
705 static struct vtn_type
*
706 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
708 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
709 type
= type
->members
[member
];
711 /* We may have an array of matrices.... Oh, joy! */
712 while (glsl_type_is_array(type
->type
)) {
713 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
714 type
= type
->array_element
;
717 vtn_assert(glsl_type_is_matrix(type
->type
));
723 vtn_handle_access_qualifier(struct vtn_builder
*b
, struct vtn_type
*type
,
724 int member
, enum gl_access_qualifier access
)
726 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
727 type
= type
->members
[member
];
729 type
->access
|= access
;
733 array_stride_decoration_cb(struct vtn_builder
*b
,
734 struct vtn_value
*val
, int member
,
735 const struct vtn_decoration
*dec
, void *void_ctx
)
737 struct vtn_type
*type
= val
->type
;
739 if (dec
->decoration
== SpvDecorationArrayStride
) {
740 if (vtn_type_contains_block(b
, type
)) {
741 vtn_warn("The ArrayStride decoration cannot be applied to an array "
742 "type which contains a structure type decorated Block "
744 /* Ignore the decoration */
746 vtn_fail_if(dec
->operands
[0] == 0, "ArrayStride must be non-zero");
747 type
->stride
= dec
->operands
[0];
753 struct_member_decoration_cb(struct vtn_builder
*b
,
754 UNUSED
struct vtn_value
*val
, int member
,
755 const struct vtn_decoration
*dec
, void *void_ctx
)
757 struct member_decoration_ctx
*ctx
= void_ctx
;
762 assert(member
< ctx
->num_fields
);
764 switch (dec
->decoration
) {
765 case SpvDecorationRelaxedPrecision
:
766 case SpvDecorationUniform
:
767 case SpvDecorationUniformId
:
768 break; /* FIXME: Do nothing with this for now. */
769 case SpvDecorationNonWritable
:
770 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_WRITEABLE
);
772 case SpvDecorationNonReadable
:
773 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_READABLE
);
775 case SpvDecorationVolatile
:
776 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_VOLATILE
);
778 case SpvDecorationCoherent
:
779 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_COHERENT
);
781 case SpvDecorationNoPerspective
:
782 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
784 case SpvDecorationFlat
:
785 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
787 case SpvDecorationExplicitInterpAMD
:
788 ctx
->fields
[member
].interpolation
= INTERP_MODE_EXPLICIT
;
790 case SpvDecorationCentroid
:
791 ctx
->fields
[member
].centroid
= true;
793 case SpvDecorationSample
:
794 ctx
->fields
[member
].sample
= true;
796 case SpvDecorationStream
:
797 /* This is handled later by var_decoration_cb in vtn_variables.c */
799 case SpvDecorationLocation
:
800 ctx
->fields
[member
].location
= dec
->operands
[0];
802 case SpvDecorationComponent
:
803 break; /* FIXME: What should we do with these? */
804 case SpvDecorationBuiltIn
:
805 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
806 ctx
->type
->members
[member
]->is_builtin
= true;
807 ctx
->type
->members
[member
]->builtin
= dec
->operands
[0];
808 ctx
->type
->builtin_block
= true;
810 case SpvDecorationOffset
:
811 ctx
->type
->offsets
[member
] = dec
->operands
[0];
812 ctx
->fields
[member
].offset
= dec
->operands
[0];
814 case SpvDecorationMatrixStride
:
815 /* Handled as a second pass */
817 case SpvDecorationColMajor
:
818 break; /* Nothing to do here. Column-major is the default. */
819 case SpvDecorationRowMajor
:
820 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
823 case SpvDecorationPatch
:
826 case SpvDecorationSpecId
:
827 case SpvDecorationBlock
:
828 case SpvDecorationBufferBlock
:
829 case SpvDecorationArrayStride
:
830 case SpvDecorationGLSLShared
:
831 case SpvDecorationGLSLPacked
:
832 case SpvDecorationInvariant
:
833 case SpvDecorationRestrict
:
834 case SpvDecorationAliased
:
835 case SpvDecorationConstant
:
836 case SpvDecorationIndex
:
837 case SpvDecorationBinding
:
838 case SpvDecorationDescriptorSet
:
839 case SpvDecorationLinkageAttributes
:
840 case SpvDecorationNoContraction
:
841 case SpvDecorationInputAttachmentIndex
:
842 vtn_warn("Decoration not allowed on struct members: %s",
843 spirv_decoration_to_string(dec
->decoration
));
846 case SpvDecorationXfbBuffer
:
847 case SpvDecorationXfbStride
:
848 /* This is handled later by var_decoration_cb in vtn_variables.c */
851 case SpvDecorationCPacked
:
852 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
853 vtn_warn("Decoration only allowed for CL-style kernels: %s",
854 spirv_decoration_to_string(dec
->decoration
));
856 ctx
->type
->packed
= true;
859 case SpvDecorationSaturatedConversion
:
860 case SpvDecorationFuncParamAttr
:
861 case SpvDecorationFPRoundingMode
:
862 case SpvDecorationFPFastMathMode
:
863 case SpvDecorationAlignment
:
864 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
865 vtn_warn("Decoration only allowed for CL-style kernels: %s",
866 spirv_decoration_to_string(dec
->decoration
));
870 case SpvDecorationUserSemantic
:
871 case SpvDecorationUserTypeGOOGLE
:
872 /* User semantic decorations can safely be ignored by the driver. */
876 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
880 /** Chases the array type all the way down to the tail and rewrites the
881 * glsl_types to be based off the tail's glsl_type.
884 vtn_array_type_rewrite_glsl_type(struct vtn_type
*type
)
886 if (type
->base_type
!= vtn_base_type_array
)
889 vtn_array_type_rewrite_glsl_type(type
->array_element
);
891 type
->type
= glsl_array_type(type
->array_element
->type
,
892 type
->length
, type
->stride
);
895 /* Matrix strides are handled as a separate pass because we need to know
896 * whether the matrix is row-major or not first.
899 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
900 UNUSED
struct vtn_value
*val
, int member
,
901 const struct vtn_decoration
*dec
,
904 if (dec
->decoration
!= SpvDecorationMatrixStride
)
907 vtn_fail_if(member
< 0,
908 "The MatrixStride decoration is only allowed on members "
910 vtn_fail_if(dec
->operands
[0] == 0, "MatrixStride must be non-zero");
912 struct member_decoration_ctx
*ctx
= void_ctx
;
914 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
915 if (mat_type
->row_major
) {
916 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
917 mat_type
->stride
= mat_type
->array_element
->stride
;
918 mat_type
->array_element
->stride
= dec
->operands
[0];
920 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
921 dec
->operands
[0], true);
922 mat_type
->array_element
->type
= glsl_get_column_type(mat_type
->type
);
924 vtn_assert(mat_type
->array_element
->stride
> 0);
925 mat_type
->stride
= dec
->operands
[0];
927 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
928 dec
->operands
[0], false);
931 /* Now that we've replaced the glsl_type with a properly strided matrix
932 * type, rewrite the member type so that it's an array of the proper kind
935 vtn_array_type_rewrite_glsl_type(ctx
->type
->members
[member
]);
936 ctx
->fields
[member
].type
= ctx
->type
->members
[member
]->type
;
940 struct_block_decoration_cb(struct vtn_builder
*b
,
941 struct vtn_value
*val
, int member
,
942 const struct vtn_decoration
*dec
, void *ctx
)
947 struct vtn_type
*type
= val
->type
;
948 if (dec
->decoration
== SpvDecorationBlock
)
950 else if (dec
->decoration
== SpvDecorationBufferBlock
)
951 type
->buffer_block
= true;
955 type_decoration_cb(struct vtn_builder
*b
,
956 struct vtn_value
*val
, int member
,
957 const struct vtn_decoration
*dec
, UNUSED
void *ctx
)
959 struct vtn_type
*type
= val
->type
;
962 /* This should have been handled by OpTypeStruct */
963 assert(val
->type
->base_type
== vtn_base_type_struct
);
964 assert(member
>= 0 && member
< val
->type
->length
);
968 switch (dec
->decoration
) {
969 case SpvDecorationArrayStride
:
970 vtn_assert(type
->base_type
== vtn_base_type_array
||
971 type
->base_type
== vtn_base_type_pointer
);
973 case SpvDecorationBlock
:
974 vtn_assert(type
->base_type
== vtn_base_type_struct
);
975 vtn_assert(type
->block
);
977 case SpvDecorationBufferBlock
:
978 vtn_assert(type
->base_type
== vtn_base_type_struct
);
979 vtn_assert(type
->buffer_block
);
981 case SpvDecorationGLSLShared
:
982 case SpvDecorationGLSLPacked
:
983 /* Ignore these, since we get explicit offsets anyways */
986 case SpvDecorationRowMajor
:
987 case SpvDecorationColMajor
:
988 case SpvDecorationMatrixStride
:
989 case SpvDecorationBuiltIn
:
990 case SpvDecorationNoPerspective
:
991 case SpvDecorationFlat
:
992 case SpvDecorationPatch
:
993 case SpvDecorationCentroid
:
994 case SpvDecorationSample
:
995 case SpvDecorationExplicitInterpAMD
:
996 case SpvDecorationVolatile
:
997 case SpvDecorationCoherent
:
998 case SpvDecorationNonWritable
:
999 case SpvDecorationNonReadable
:
1000 case SpvDecorationUniform
:
1001 case SpvDecorationUniformId
:
1002 case SpvDecorationLocation
:
1003 case SpvDecorationComponent
:
1004 case SpvDecorationOffset
:
1005 case SpvDecorationXfbBuffer
:
1006 case SpvDecorationXfbStride
:
1007 case SpvDecorationUserSemantic
:
1008 vtn_warn("Decoration only allowed for struct members: %s",
1009 spirv_decoration_to_string(dec
->decoration
));
1012 case SpvDecorationStream
:
1013 /* We don't need to do anything here, as stream is filled up when
1014 * aplying the decoration to a variable, just check that if it is not a
1015 * struct member, it should be a struct.
1017 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1020 case SpvDecorationRelaxedPrecision
:
1021 case SpvDecorationSpecId
:
1022 case SpvDecorationInvariant
:
1023 case SpvDecorationRestrict
:
1024 case SpvDecorationAliased
:
1025 case SpvDecorationConstant
:
1026 case SpvDecorationIndex
:
1027 case SpvDecorationBinding
:
1028 case SpvDecorationDescriptorSet
:
1029 case SpvDecorationLinkageAttributes
:
1030 case SpvDecorationNoContraction
:
1031 case SpvDecorationInputAttachmentIndex
:
1032 vtn_warn("Decoration not allowed on types: %s",
1033 spirv_decoration_to_string(dec
->decoration
));
1036 case SpvDecorationCPacked
:
1037 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
1038 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1039 spirv_decoration_to_string(dec
->decoration
));
1041 type
->packed
= true;
1044 case SpvDecorationSaturatedConversion
:
1045 case SpvDecorationFuncParamAttr
:
1046 case SpvDecorationFPRoundingMode
:
1047 case SpvDecorationFPFastMathMode
:
1048 case SpvDecorationAlignment
:
1049 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1050 spirv_decoration_to_string(dec
->decoration
));
1053 case SpvDecorationUserTypeGOOGLE
:
1054 /* User semantic decorations can safely be ignored by the driver. */
1058 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1063 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
1066 case SpvImageFormatUnknown
: return PIPE_FORMAT_NONE
;
1067 case SpvImageFormatRgba32f
: return PIPE_FORMAT_R32G32B32A32_FLOAT
;
1068 case SpvImageFormatRgba16f
: return PIPE_FORMAT_R16G16B16A16_FLOAT
;
1069 case SpvImageFormatR32f
: return PIPE_FORMAT_R32_FLOAT
;
1070 case SpvImageFormatRgba8
: return PIPE_FORMAT_R8G8B8A8_UNORM
;
1071 case SpvImageFormatRgba8Snorm
: return PIPE_FORMAT_R8G8B8A8_SNORM
;
1072 case SpvImageFormatRg32f
: return PIPE_FORMAT_R32G32_FLOAT
;
1073 case SpvImageFormatRg16f
: return PIPE_FORMAT_R16G16_FLOAT
;
1074 case SpvImageFormatR11fG11fB10f
: return PIPE_FORMAT_R11G11B10_FLOAT
;
1075 case SpvImageFormatR16f
: return PIPE_FORMAT_R16_FLOAT
;
1076 case SpvImageFormatRgba16
: return PIPE_FORMAT_R16G16B16A16_UNORM
;
1077 case SpvImageFormatRgb10A2
: return PIPE_FORMAT_R10G10B10A2_UNORM
;
1078 case SpvImageFormatRg16
: return PIPE_FORMAT_R16G16_UNORM
;
1079 case SpvImageFormatRg8
: return PIPE_FORMAT_R8G8_UNORM
;
1080 case SpvImageFormatR16
: return PIPE_FORMAT_R16_UNORM
;
1081 case SpvImageFormatR8
: return PIPE_FORMAT_R8_UNORM
;
1082 case SpvImageFormatRgba16Snorm
: return PIPE_FORMAT_R16G16B16A16_SNORM
;
1083 case SpvImageFormatRg16Snorm
: return PIPE_FORMAT_R16G16_SNORM
;
1084 case SpvImageFormatRg8Snorm
: return PIPE_FORMAT_R8G8_SNORM
;
1085 case SpvImageFormatR16Snorm
: return PIPE_FORMAT_R16_SNORM
;
1086 case SpvImageFormatR8Snorm
: return PIPE_FORMAT_R8_SNORM
;
1087 case SpvImageFormatRgba32i
: return PIPE_FORMAT_R32G32B32A32_SINT
;
1088 case SpvImageFormatRgba16i
: return PIPE_FORMAT_R16G16B16A16_SINT
;
1089 case SpvImageFormatRgba8i
: return PIPE_FORMAT_R8G8B8A8_SINT
;
1090 case SpvImageFormatR32i
: return PIPE_FORMAT_R32_SINT
;
1091 case SpvImageFormatRg32i
: return PIPE_FORMAT_R32G32_SINT
;
1092 case SpvImageFormatRg16i
: return PIPE_FORMAT_R16G16_SINT
;
1093 case SpvImageFormatRg8i
: return PIPE_FORMAT_R8G8_SINT
;
1094 case SpvImageFormatR16i
: return PIPE_FORMAT_R16_SINT
;
1095 case SpvImageFormatR8i
: return PIPE_FORMAT_R8_SINT
;
1096 case SpvImageFormatRgba32ui
: return PIPE_FORMAT_R32G32B32A32_UINT
;
1097 case SpvImageFormatRgba16ui
: return PIPE_FORMAT_R16G16B16A16_UINT
;
1098 case SpvImageFormatRgba8ui
: return PIPE_FORMAT_R8G8B8A8_UINT
;
1099 case SpvImageFormatR32ui
: return PIPE_FORMAT_R32_UINT
;
1100 case SpvImageFormatRgb10a2ui
: return PIPE_FORMAT_R10G10B10A2_UINT
;
1101 case SpvImageFormatRg32ui
: return PIPE_FORMAT_R32G32_UINT
;
1102 case SpvImageFormatRg16ui
: return PIPE_FORMAT_R16G16_UINT
;
1103 case SpvImageFormatRg8ui
: return PIPE_FORMAT_R8G8_UINT
;
1104 case SpvImageFormatR16ui
: return PIPE_FORMAT_R16_UINT
;
1105 case SpvImageFormatR8ui
: return PIPE_FORMAT_R8_UINT
;
1107 vtn_fail("Invalid image format: %s (%u)",
1108 spirv_imageformat_to_string(format
), format
);
1113 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
1114 const uint32_t *w
, unsigned count
)
1116 struct vtn_value
*val
= NULL
;
1118 /* In order to properly handle forward declarations, we have to defer
1119 * allocation for pointer types.
1121 if (opcode
!= SpvOpTypePointer
&& opcode
!= SpvOpTypeForwardPointer
) {
1122 val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
1123 vtn_fail_if(val
->type
!= NULL
,
1124 "Only pointers can have forward declarations");
1125 val
->type
= rzalloc(b
, struct vtn_type
);
1126 val
->type
->id
= w
[1];
1131 val
->type
->base_type
= vtn_base_type_void
;
1132 val
->type
->type
= glsl_void_type();
1135 val
->type
->base_type
= vtn_base_type_scalar
;
1136 val
->type
->type
= glsl_bool_type();
1137 val
->type
->length
= 1;
1139 case SpvOpTypeInt
: {
1140 int bit_size
= w
[2];
1141 const bool signedness
= w
[3];
1142 val
->type
->base_type
= vtn_base_type_scalar
;
1145 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
1148 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
1151 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
1154 val
->type
->type
= (signedness
? glsl_int8_t_type() : glsl_uint8_t_type());
1157 vtn_fail("Invalid int bit size: %u", bit_size
);
1159 val
->type
->length
= 1;
1163 case SpvOpTypeFloat
: {
1164 int bit_size
= w
[2];
1165 val
->type
->base_type
= vtn_base_type_scalar
;
1168 val
->type
->type
= glsl_float16_t_type();
1171 val
->type
->type
= glsl_float_type();
1174 val
->type
->type
= glsl_double_type();
1177 vtn_fail("Invalid float bit size: %u", bit_size
);
1179 val
->type
->length
= 1;
1183 case SpvOpTypeVector
: {
1184 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1185 unsigned elems
= w
[3];
1187 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
1188 "Base type for OpTypeVector must be a scalar");
1189 vtn_fail_if((elems
< 2 || elems
> 4) && (elems
!= 8) && (elems
!= 16),
1190 "Invalid component count for OpTypeVector");
1192 val
->type
->base_type
= vtn_base_type_vector
;
1193 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
1194 val
->type
->length
= elems
;
1195 val
->type
->stride
= glsl_type_is_boolean(val
->type
->type
)
1196 ? 4 : glsl_get_bit_size(base
->type
) / 8;
1197 val
->type
->array_element
= base
;
1201 case SpvOpTypeMatrix
: {
1202 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1203 unsigned columns
= w
[3];
1205 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1206 "Base type for OpTypeMatrix must be a vector");
1207 vtn_fail_if(columns
< 2 || columns
> 4,
1208 "Invalid column count for OpTypeMatrix");
1210 val
->type
->base_type
= vtn_base_type_matrix
;
1211 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1212 glsl_get_vector_elements(base
->type
),
1214 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1215 "Unsupported base type for OpTypeMatrix");
1216 assert(!glsl_type_is_error(val
->type
->type
));
1217 val
->type
->length
= columns
;
1218 val
->type
->array_element
= base
;
1219 val
->type
->row_major
= false;
1220 val
->type
->stride
= 0;
1224 case SpvOpTypeRuntimeArray
:
1225 case SpvOpTypeArray
: {
1226 struct vtn_type
*array_element
= vtn_get_type(b
, w
[2]);
1228 if (opcode
== SpvOpTypeRuntimeArray
) {
1229 /* A length of 0 is used to denote unsized arrays */
1230 val
->type
->length
= 0;
1232 val
->type
->length
= vtn_constant_uint(b
, w
[3]);
1235 val
->type
->base_type
= vtn_base_type_array
;
1236 val
->type
->array_element
= array_element
;
1237 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1238 val
->type
->stride
= glsl_get_cl_size(array_element
->type
);
1240 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1241 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
,
1246 case SpvOpTypeStruct
: {
1247 unsigned num_fields
= count
- 2;
1248 val
->type
->base_type
= vtn_base_type_struct
;
1249 val
->type
->length
= num_fields
;
1250 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1251 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1252 val
->type
->packed
= false;
1254 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1255 for (unsigned i
= 0; i
< num_fields
; i
++) {
1256 val
->type
->members
[i
] = vtn_get_type(b
, w
[i
+ 2]);
1257 fields
[i
] = (struct glsl_struct_field
) {
1258 .type
= val
->type
->members
[i
]->type
,
1259 .name
= ralloc_asprintf(b
, "field%d", i
),
1265 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1266 unsigned offset
= 0;
1267 for (unsigned i
= 0; i
< num_fields
; i
++) {
1268 offset
= align(offset
, glsl_get_cl_alignment(fields
[i
].type
));
1269 fields
[i
].offset
= offset
;
1270 offset
+= glsl_get_cl_size(fields
[i
].type
);
1274 struct member_decoration_ctx ctx
= {
1275 .num_fields
= num_fields
,
1280 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1281 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1283 vtn_foreach_decoration(b
, val
, struct_block_decoration_cb
, NULL
);
1285 const char *name
= val
->name
;
1287 if (val
->type
->block
|| val
->type
->buffer_block
) {
1288 /* Packing will be ignored since types coming from SPIR-V are
1289 * explicitly laid out.
1291 val
->type
->type
= glsl_interface_type(fields
, num_fields
,
1292 /* packing */ 0, false,
1293 name
? name
: "block");
1295 val
->type
->type
= glsl_struct_type(fields
, num_fields
,
1296 name
? name
: "struct", false);
1301 case SpvOpTypeFunction
: {
1302 val
->type
->base_type
= vtn_base_type_function
;
1303 val
->type
->type
= NULL
;
1305 val
->type
->return_type
= vtn_get_type(b
, w
[2]);
1307 const unsigned num_params
= count
- 3;
1308 val
->type
->length
= num_params
;
1309 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1310 for (unsigned i
= 0; i
< count
- 3; i
++) {
1311 val
->type
->params
[i
] = vtn_get_type(b
, w
[i
+ 3]);
1316 case SpvOpTypePointer
:
1317 case SpvOpTypeForwardPointer
: {
1318 /* We can't blindly push the value because it might be a forward
1321 val
= vtn_untyped_value(b
, w
[1]);
1323 SpvStorageClass storage_class
= w
[2];
1325 if (val
->value_type
== vtn_value_type_invalid
) {
1326 val
->value_type
= vtn_value_type_type
;
1327 val
->type
= rzalloc(b
, struct vtn_type
);
1328 val
->type
->id
= w
[1];
1329 val
->type
->base_type
= vtn_base_type_pointer
;
1330 val
->type
->storage_class
= storage_class
;
1332 /* These can actually be stored to nir_variables and used as SSA
1333 * values so they need a real glsl_type.
1335 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1336 b
, storage_class
, NULL
, NULL
);
1337 val
->type
->type
= nir_address_format_to_glsl_type(
1338 vtn_mode_to_address_format(b
, mode
));
1340 vtn_fail_if(val
->type
->storage_class
!= storage_class
,
1341 "The storage classes of an OpTypePointer and any "
1342 "OpTypeForwardPointers that provide forward "
1343 "declarations of it must match.");
1346 if (opcode
== SpvOpTypePointer
) {
1347 vtn_fail_if(val
->type
->deref
!= NULL
,
1348 "While OpTypeForwardPointer can be used to provide a "
1349 "forward declaration of a pointer, OpTypePointer can "
1350 "only be used once for a given id.");
1352 val
->type
->deref
= vtn_get_type(b
, w
[3]);
1354 /* Only certain storage classes use ArrayStride. The others (in
1355 * particular Workgroup) are expected to be laid out by the driver.
1357 switch (storage_class
) {
1358 case SpvStorageClassUniform
:
1359 case SpvStorageClassPushConstant
:
1360 case SpvStorageClassStorageBuffer
:
1361 case SpvStorageClassPhysicalStorageBuffer
:
1362 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1365 /* Nothing to do. */
1369 if (b
->physical_ptrs
) {
1370 switch (storage_class
) {
1371 case SpvStorageClassFunction
:
1372 case SpvStorageClassWorkgroup
:
1373 case SpvStorageClassCrossWorkgroup
:
1374 case SpvStorageClassUniformConstant
:
1375 val
->type
->stride
= align(glsl_get_cl_size(val
->type
->deref
->type
),
1376 glsl_get_cl_alignment(val
->type
->deref
->type
));
1386 case SpvOpTypeImage
: {
1387 val
->type
->base_type
= vtn_base_type_image
;
1389 const struct vtn_type
*sampled_type
= vtn_get_type(b
, w
[2]);
1390 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1391 glsl_get_bit_size(sampled_type
->type
) != 32,
1392 "Sampled type of OpTypeImage must be a 32-bit scalar");
1394 enum glsl_sampler_dim dim
;
1395 switch ((SpvDim
)w
[3]) {
1396 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1397 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1398 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1399 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1400 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1401 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1402 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1404 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1405 spirv_dim_to_string((SpvDim
)w
[3]), w
[3]);
1408 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1409 * The “Depth” operand of OpTypeImage is ignored.
1411 bool is_array
= w
[5];
1412 bool multisampled
= w
[6];
1413 unsigned sampled
= w
[7];
1414 SpvImageFormat format
= w
[8];
1417 val
->type
->access_qualifier
= w
[9];
1419 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1422 if (dim
== GLSL_SAMPLER_DIM_2D
)
1423 dim
= GLSL_SAMPLER_DIM_MS
;
1424 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1425 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1427 vtn_fail("Unsupported multisampled image type");
1430 val
->type
->image_format
= translate_image_format(b
, format
);
1432 enum glsl_base_type sampled_base_type
=
1433 glsl_get_base_type(sampled_type
->type
);
1435 val
->type
->sampled
= true;
1436 val
->type
->type
= glsl_sampler_type(dim
, false, is_array
,
1438 } else if (sampled
== 2) {
1439 val
->type
->sampled
= false;
1440 val
->type
->type
= glsl_image_type(dim
, is_array
, sampled_base_type
);
1442 vtn_fail("We need to know if the image will be sampled");
1447 case SpvOpTypeSampledImage
:
1448 val
->type
->base_type
= vtn_base_type_sampled_image
;
1449 val
->type
->image
= vtn_get_type(b
, w
[2]);
1450 val
->type
->type
= val
->type
->image
->type
;
1453 case SpvOpTypeSampler
:
1454 /* The actual sampler type here doesn't really matter. It gets
1455 * thrown away the moment you combine it with an image. What really
1456 * matters is that it's a sampler type as opposed to an integer type
1457 * so the backend knows what to do.
1459 val
->type
->base_type
= vtn_base_type_sampler
;
1460 val
->type
->type
= glsl_bare_sampler_type();
1463 case SpvOpTypeOpaque
:
1464 case SpvOpTypeEvent
:
1465 case SpvOpTypeDeviceEvent
:
1466 case SpvOpTypeReserveId
:
1467 case SpvOpTypeQueue
:
1470 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1473 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1475 if (val
->type
->base_type
== vtn_base_type_struct
&&
1476 (val
->type
->block
|| val
->type
->buffer_block
)) {
1477 for (unsigned i
= 0; i
< val
->type
->length
; i
++) {
1478 vtn_fail_if(vtn_type_contains_block(b
, val
->type
->members
[i
]),
1479 "Block and BufferBlock decorations cannot decorate a "
1480 "structure type that is nested at any level inside "
1481 "another structure type decorated with Block or "
1487 static nir_constant
*
1488 vtn_null_constant(struct vtn_builder
*b
, struct vtn_type
*type
)
1490 nir_constant
*c
= rzalloc(b
, nir_constant
);
1492 switch (type
->base_type
) {
1493 case vtn_base_type_scalar
:
1494 case vtn_base_type_vector
:
1495 /* Nothing to do here. It's already initialized to zero */
1498 case vtn_base_type_pointer
: {
1499 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1500 b
, type
->storage_class
, type
->deref
, NULL
);
1501 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
1503 const nir_const_value
*null_value
= nir_address_format_null_value(addr_format
);
1504 memcpy(c
->values
, null_value
,
1505 sizeof(nir_const_value
) * nir_address_format_num_components(addr_format
));
1509 case vtn_base_type_void
:
1510 case vtn_base_type_image
:
1511 case vtn_base_type_sampler
:
1512 case vtn_base_type_sampled_image
:
1513 case vtn_base_type_function
:
1514 /* For those we have to return something but it doesn't matter what. */
1517 case vtn_base_type_matrix
:
1518 case vtn_base_type_array
:
1519 vtn_assert(type
->length
> 0);
1520 c
->num_elements
= type
->length
;
1521 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1523 c
->elements
[0] = vtn_null_constant(b
, type
->array_element
);
1524 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1525 c
->elements
[i
] = c
->elements
[0];
1528 case vtn_base_type_struct
:
1529 c
->num_elements
= type
->length
;
1530 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1531 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
1532 c
->elements
[i
] = vtn_null_constant(b
, type
->members
[i
]);
1536 vtn_fail("Invalid type for null constant");
1543 spec_constant_decoration_cb(struct vtn_builder
*b
, UNUSED
struct vtn_value
*val
,
1544 ASSERTED
int member
,
1545 const struct vtn_decoration
*dec
, void *data
)
1547 vtn_assert(member
== -1);
1548 if (dec
->decoration
!= SpvDecorationSpecId
)
1551 nir_const_value
*value
= data
;
1552 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1553 if (b
->specializations
[i
].id
== dec
->operands
[0]) {
1554 *value
= b
->specializations
[i
].value
;
1561 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1562 struct vtn_value
*val
,
1563 ASSERTED
int member
,
1564 const struct vtn_decoration
*dec
,
1567 vtn_assert(member
== -1);
1568 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1569 dec
->operands
[0] != SpvBuiltInWorkgroupSize
)
1572 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1573 b
->workgroup_size_builtin
= val
;
1577 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1578 const uint32_t *w
, unsigned count
)
1580 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1581 val
->constant
= rzalloc(b
, nir_constant
);
1583 case SpvOpConstantTrue
:
1584 case SpvOpConstantFalse
:
1585 case SpvOpSpecConstantTrue
:
1586 case SpvOpSpecConstantFalse
: {
1587 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1588 "Result type of %s must be OpTypeBool",
1589 spirv_op_to_string(opcode
));
1591 bool bval
= (opcode
== SpvOpConstantTrue
||
1592 opcode
== SpvOpSpecConstantTrue
);
1594 nir_const_value u32val
= nir_const_value_for_uint(bval
, 32);
1596 if (opcode
== SpvOpSpecConstantTrue
||
1597 opcode
== SpvOpSpecConstantFalse
)
1598 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32val
);
1600 val
->constant
->values
[0].b
= u32val
.u32
!= 0;
1605 case SpvOpSpecConstant
: {
1606 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1607 "Result type of %s must be a scalar",
1608 spirv_op_to_string(opcode
));
1609 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1612 val
->constant
->values
[0].u64
= vtn_u64_literal(&w
[3]);
1615 val
->constant
->values
[0].u32
= w
[3];
1618 val
->constant
->values
[0].u16
= w
[3];
1621 val
->constant
->values
[0].u8
= w
[3];
1624 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size
);
1627 if (opcode
== SpvOpSpecConstant
)
1628 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
,
1629 &val
->constant
->values
[0]);
1633 case SpvOpSpecConstantComposite
:
1634 case SpvOpConstantComposite
: {
1635 unsigned elem_count
= count
- 3;
1636 vtn_fail_if(elem_count
!= val
->type
->length
,
1637 "%s has %u constituents, expected %u",
1638 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1640 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1641 for (unsigned i
= 0; i
< elem_count
; i
++) {
1642 struct vtn_value
*val
= vtn_untyped_value(b
, w
[i
+ 3]);
1644 if (val
->value_type
== vtn_value_type_constant
) {
1645 elems
[i
] = val
->constant
;
1647 vtn_fail_if(val
->value_type
!= vtn_value_type_undef
,
1648 "only constants or undefs allowed for "
1649 "SpvOpConstantComposite");
1650 /* to make it easier, just insert a NULL constant for now */
1651 elems
[i
] = vtn_null_constant(b
, val
->type
);
1655 switch (val
->type
->base_type
) {
1656 case vtn_base_type_vector
: {
1657 assert(glsl_type_is_vector(val
->type
->type
));
1658 for (unsigned i
= 0; i
< elem_count
; i
++)
1659 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1663 case vtn_base_type_matrix
:
1664 case vtn_base_type_struct
:
1665 case vtn_base_type_array
:
1666 ralloc_steal(val
->constant
, elems
);
1667 val
->constant
->num_elements
= elem_count
;
1668 val
->constant
->elements
= elems
;
1672 vtn_fail("Result type of %s must be a composite type",
1673 spirv_op_to_string(opcode
));
1678 case SpvOpSpecConstantOp
: {
1679 nir_const_value u32op
= nir_const_value_for_uint(w
[3], 32);
1680 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32op
);
1681 SpvOp opcode
= u32op
.u32
;
1683 case SpvOpVectorShuffle
: {
1684 struct vtn_value
*v0
= &b
->values
[w
[4]];
1685 struct vtn_value
*v1
= &b
->values
[w
[5]];
1687 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1688 v0
->value_type
== vtn_value_type_undef
);
1689 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1690 v1
->value_type
== vtn_value_type_undef
);
1692 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1693 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1695 vtn_assert(len0
+ len1
< 16);
1697 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1698 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1699 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1701 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1702 (void)bit_size0
; (void)bit_size1
;
1704 nir_const_value undef
= { .u64
= 0xdeadbeefdeadbeef };
1705 nir_const_value combined
[NIR_MAX_VEC_COMPONENTS
* 2];
1707 if (v0
->value_type
== vtn_value_type_constant
) {
1708 for (unsigned i
= 0; i
< len0
; i
++)
1709 combined
[i
] = v0
->constant
->values
[i
];
1711 if (v1
->value_type
== vtn_value_type_constant
) {
1712 for (unsigned i
= 0; i
< len1
; i
++)
1713 combined
[len0
+ i
] = v1
->constant
->values
[i
];
1716 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1717 uint32_t comp
= w
[i
+ 6];
1718 if (comp
== (uint32_t)-1) {
1719 /* If component is not used, set the value to a known constant
1720 * to detect if it is wrongly used.
1722 val
->constant
->values
[j
] = undef
;
1724 vtn_fail_if(comp
>= len0
+ len1
,
1725 "All Component literals must either be FFFFFFFF "
1726 "or in [0, N - 1] (inclusive).");
1727 val
->constant
->values
[j
] = combined
[comp
];
1733 case SpvOpCompositeExtract
:
1734 case SpvOpCompositeInsert
: {
1735 struct vtn_value
*comp
;
1736 unsigned deref_start
;
1737 struct nir_constant
**c
;
1738 if (opcode
== SpvOpCompositeExtract
) {
1739 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1741 c
= &comp
->constant
;
1743 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1745 val
->constant
= nir_constant_clone(comp
->constant
,
1751 const struct vtn_type
*type
= comp
->type
;
1752 for (unsigned i
= deref_start
; i
< count
; i
++) {
1753 vtn_fail_if(w
[i
] > type
->length
,
1754 "%uth index of %s is %u but the type has only "
1755 "%u elements", i
- deref_start
,
1756 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1758 switch (type
->base_type
) {
1759 case vtn_base_type_vector
:
1761 type
= type
->array_element
;
1764 case vtn_base_type_matrix
:
1765 case vtn_base_type_array
:
1766 c
= &(*c
)->elements
[w
[i
]];
1767 type
= type
->array_element
;
1770 case vtn_base_type_struct
:
1771 c
= &(*c
)->elements
[w
[i
]];
1772 type
= type
->members
[w
[i
]];
1776 vtn_fail("%s must only index into composite types",
1777 spirv_op_to_string(opcode
));
1781 if (opcode
== SpvOpCompositeExtract
) {
1785 unsigned num_components
= type
->length
;
1786 for (unsigned i
= 0; i
< num_components
; i
++)
1787 val
->constant
->values
[i
] = (*c
)->values
[elem
+ i
];
1790 struct vtn_value
*insert
=
1791 vtn_value(b
, w
[4], vtn_value_type_constant
);
1792 vtn_assert(insert
->type
== type
);
1794 *c
= insert
->constant
;
1796 unsigned num_components
= type
->length
;
1797 for (unsigned i
= 0; i
< num_components
; i
++)
1798 (*c
)->values
[elem
+ i
] = insert
->constant
->values
[i
];
1806 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
1807 nir_alu_type src_alu_type
= dst_alu_type
;
1808 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
1811 vtn_assert(count
<= 7);
1817 /* We have a source in a conversion */
1819 nir_get_nir_type_for_glsl_type(vtn_get_value_type(b
, w
[4])->type
);
1820 /* We use the bitsize of the conversion source to evaluate the opcode later */
1821 bit_size
= glsl_get_bit_size(vtn_get_value_type(b
, w
[4])->type
);
1824 bit_size
= glsl_get_bit_size(val
->type
->type
);
1827 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
1828 nir_alu_type_get_type_size(src_alu_type
),
1829 nir_alu_type_get_type_size(dst_alu_type
));
1830 nir_const_value src
[3][NIR_MAX_VEC_COMPONENTS
];
1832 for (unsigned i
= 0; i
< count
- 4; i
++) {
1833 struct vtn_value
*src_val
=
1834 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
);
1836 /* If this is an unsized source, pull the bit size from the
1837 * source; otherwise, we'll use the bit size from the destination.
1839 if (!nir_alu_type_get_type_size(nir_op_infos
[op
].input_types
[i
]))
1840 bit_size
= glsl_get_bit_size(src_val
->type
->type
);
1842 unsigned src_comps
= nir_op_infos
[op
].input_sizes
[i
] ?
1843 nir_op_infos
[op
].input_sizes
[i
] :
1846 unsigned j
= swap
? 1 - i
: i
;
1847 for (unsigned c
= 0; c
< src_comps
; c
++)
1848 src
[j
][c
] = src_val
->constant
->values
[c
];
1851 /* fix up fixed size sources */
1858 for (unsigned i
= 0; i
< num_components
; ++i
) {
1860 case 64: src
[1][i
].u32
= src
[1][i
].u64
; break;
1861 case 16: src
[1][i
].u32
= src
[1][i
].u16
; break;
1862 case 8: src
[1][i
].u32
= src
[1][i
].u8
; break;
1871 nir_const_value
*srcs
[3] = {
1872 src
[0], src
[1], src
[2],
1874 nir_eval_const_opcode(op
, val
->constant
->values
,
1875 num_components
, bit_size
, srcs
,
1876 b
->shader
->info
.float_controls_execution_mode
);
1883 case SpvOpConstantNull
:
1884 val
->constant
= vtn_null_constant(b
, val
->type
);
1887 case SpvOpConstantSampler
:
1888 vtn_fail("OpConstantSampler requires Kernel Capability");
1892 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1895 /* Now that we have the value, update the workgroup size if needed */
1896 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
1899 SpvMemorySemanticsMask
1900 vtn_storage_class_to_memory_semantics(SpvStorageClass sc
)
1903 case SpvStorageClassStorageBuffer
:
1904 case SpvStorageClassPhysicalStorageBuffer
:
1905 return SpvMemorySemanticsUniformMemoryMask
;
1906 case SpvStorageClassWorkgroup
:
1907 return SpvMemorySemanticsWorkgroupMemoryMask
;
1909 return SpvMemorySemanticsMaskNone
;
1914 vtn_split_barrier_semantics(struct vtn_builder
*b
,
1915 SpvMemorySemanticsMask semantics
,
1916 SpvMemorySemanticsMask
*before
,
1917 SpvMemorySemanticsMask
*after
)
1919 /* For memory semantics embedded in operations, we split them into up to
1920 * two barriers, to be added before and after the operation. This is less
1921 * strict than if we propagated until the final backend stage, but still
1922 * result in correct execution.
1924 * A further improvement could be pipe this information (and use!) into the
1925 * next compiler layers, at the expense of making the handling of barriers
1929 *before
= SpvMemorySemanticsMaskNone
;
1930 *after
= SpvMemorySemanticsMaskNone
;
1932 SpvMemorySemanticsMask order_semantics
=
1933 semantics
& (SpvMemorySemanticsAcquireMask
|
1934 SpvMemorySemanticsReleaseMask
|
1935 SpvMemorySemanticsAcquireReleaseMask
|
1936 SpvMemorySemanticsSequentiallyConsistentMask
);
1938 if (util_bitcount(order_semantics
) > 1) {
1939 /* Old GLSLang versions incorrectly set all the ordering bits. This was
1940 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
1941 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
1943 vtn_warn("Multiple memory ordering semantics specified, "
1944 "assuming AcquireRelease.");
1945 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
1948 const SpvMemorySemanticsMask av_vis_semantics
=
1949 semantics
& (SpvMemorySemanticsMakeAvailableMask
|
1950 SpvMemorySemanticsMakeVisibleMask
);
1952 const SpvMemorySemanticsMask storage_semantics
=
1953 semantics
& (SpvMemorySemanticsUniformMemoryMask
|
1954 SpvMemorySemanticsSubgroupMemoryMask
|
1955 SpvMemorySemanticsWorkgroupMemoryMask
|
1956 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
1957 SpvMemorySemanticsAtomicCounterMemoryMask
|
1958 SpvMemorySemanticsImageMemoryMask
|
1959 SpvMemorySemanticsOutputMemoryMask
);
1961 const SpvMemorySemanticsMask other_semantics
=
1962 semantics
& ~(order_semantics
| av_vis_semantics
| storage_semantics
);
1964 if (other_semantics
)
1965 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics
);
1967 /* SequentiallyConsistent is treated as AcquireRelease. */
1969 /* The RELEASE barrier happens BEFORE the operation, and it is usually
1970 * associated with a Store. All the write operations with a matching
1971 * semantics will not be reordered after the Store.
1973 if (order_semantics
& (SpvMemorySemanticsReleaseMask
|
1974 SpvMemorySemanticsAcquireReleaseMask
|
1975 SpvMemorySemanticsSequentiallyConsistentMask
)) {
1976 *before
|= SpvMemorySemanticsReleaseMask
| storage_semantics
;
1979 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
1980 * associated with a Load. All the operations with a matching semantics
1981 * will not be reordered before the Load.
1983 if (order_semantics
& (SpvMemorySemanticsAcquireMask
|
1984 SpvMemorySemanticsAcquireReleaseMask
|
1985 SpvMemorySemanticsSequentiallyConsistentMask
)) {
1986 *after
|= SpvMemorySemanticsAcquireMask
| storage_semantics
;
1989 if (av_vis_semantics
& SpvMemorySemanticsMakeVisibleMask
)
1990 *before
|= SpvMemorySemanticsMakeVisibleMask
| storage_semantics
;
1992 if (av_vis_semantics
& SpvMemorySemanticsMakeAvailableMask
)
1993 *after
|= SpvMemorySemanticsMakeAvailableMask
| storage_semantics
;
1996 static nir_memory_semantics
1997 vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder
*b
,
1998 SpvMemorySemanticsMask semantics
)
2000 nir_memory_semantics nir_semantics
= 0;
2002 SpvMemorySemanticsMask order_semantics
=
2003 semantics
& (SpvMemorySemanticsAcquireMask
|
2004 SpvMemorySemanticsReleaseMask
|
2005 SpvMemorySemanticsAcquireReleaseMask
|
2006 SpvMemorySemanticsSequentiallyConsistentMask
);
2008 if (util_bitcount(order_semantics
) > 1) {
2009 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2010 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2011 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2013 vtn_warn("Multiple memory ordering semantics bits specified, "
2014 "assuming AcquireRelease.");
2015 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2018 switch (order_semantics
) {
2020 /* Not an ordering barrier. */
2023 case SpvMemorySemanticsAcquireMask
:
2024 nir_semantics
= NIR_MEMORY_ACQUIRE
;
2027 case SpvMemorySemanticsReleaseMask
:
2028 nir_semantics
= NIR_MEMORY_RELEASE
;
2031 case SpvMemorySemanticsSequentiallyConsistentMask
:
2032 /* Fall through. Treated as AcquireRelease in Vulkan. */
2033 case SpvMemorySemanticsAcquireReleaseMask
:
2034 nir_semantics
= NIR_MEMORY_ACQUIRE
| NIR_MEMORY_RELEASE
;
2038 unreachable("Invalid memory order semantics");
2041 if (semantics
& SpvMemorySemanticsMakeAvailableMask
) {
2042 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2043 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2044 "capability must be declared.");
2045 nir_semantics
|= NIR_MEMORY_MAKE_AVAILABLE
;
2048 if (semantics
& SpvMemorySemanticsMakeVisibleMask
) {
2049 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2050 "To use MakeVisible memory semantics the VulkanMemoryModel "
2051 "capability must be declared.");
2052 nir_semantics
|= NIR_MEMORY_MAKE_VISIBLE
;
2055 return nir_semantics
;
2058 static nir_variable_mode
2059 vtn_mem_sematics_to_nir_var_modes(struct vtn_builder
*b
,
2060 SpvMemorySemanticsMask semantics
)
2062 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2063 * and AtomicCounterMemory are ignored".
2065 semantics
&= ~(SpvMemorySemanticsSubgroupMemoryMask
|
2066 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2067 SpvMemorySemanticsAtomicCounterMemoryMask
);
2069 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2070 * for SpvMemorySemanticsImageMemoryMask.
2073 nir_variable_mode modes
= 0;
2074 if (semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2075 SpvMemorySemanticsImageMemoryMask
)) {
2076 modes
|= nir_var_uniform
|
2081 if (semantics
& SpvMemorySemanticsWorkgroupMemoryMask
)
2082 modes
|= nir_var_mem_shared
;
2083 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
2084 modes
|= nir_var_shader_out
;
2091 vtn_scope_to_nir_scope(struct vtn_builder
*b
, SpvScope scope
)
2093 nir_scope nir_scope
;
2095 case SpvScopeDevice
:
2096 vtn_fail_if(b
->options
->caps
.vk_memory_model
&&
2097 !b
->options
->caps
.vk_memory_model_device_scope
,
2098 "If the Vulkan memory model is declared and any instruction "
2099 "uses Device scope, the VulkanMemoryModelDeviceScope "
2100 "capability must be declared.");
2101 nir_scope
= NIR_SCOPE_DEVICE
;
2104 case SpvScopeQueueFamily
:
2105 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2106 "To use Queue Family scope, the VulkanMemoryModel capability "
2107 "must be declared.");
2108 nir_scope
= NIR_SCOPE_QUEUE_FAMILY
;
2111 case SpvScopeWorkgroup
:
2112 nir_scope
= NIR_SCOPE_WORKGROUP
;
2115 case SpvScopeSubgroup
:
2116 nir_scope
= NIR_SCOPE_SUBGROUP
;
2119 case SpvScopeInvocation
:
2120 nir_scope
= NIR_SCOPE_INVOCATION
;
2124 vtn_fail("Invalid memory scope");
2131 vtn_emit_scoped_control_barrier(struct vtn_builder
*b
, SpvScope exec_scope
,
2133 SpvMemorySemanticsMask semantics
)
2135 nir_memory_semantics nir_semantics
=
2136 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2137 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2138 nir_scope nir_exec_scope
= vtn_scope_to_nir_scope(b
, exec_scope
);
2140 /* Memory semantics is optional for OpControlBarrier. */
2141 nir_scope nir_mem_scope
;
2142 if (nir_semantics
== 0 || modes
== 0)
2143 nir_mem_scope
= NIR_SCOPE_NONE
;
2145 nir_mem_scope
= vtn_scope_to_nir_scope(b
, mem_scope
);
2147 nir_scoped_barrier(&b
->nb
, nir_exec_scope
, nir_mem_scope
, nir_semantics
, modes
);
2151 vtn_emit_scoped_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
2152 SpvMemorySemanticsMask semantics
)
2154 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2155 nir_memory_semantics nir_semantics
=
2156 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2158 /* No barrier to add. */
2159 if (nir_semantics
== 0 || modes
== 0)
2162 nir_scope nir_mem_scope
= vtn_scope_to_nir_scope(b
, scope
);
2163 nir_scoped_barrier(&b
->nb
, NIR_SCOPE_NONE
, nir_mem_scope
, nir_semantics
, modes
);
2166 struct vtn_ssa_value
*
2167 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
2169 /* Always use bare types for SSA values for a couple of reasons:
2171 * 1. Code which emits deref chains should never listen to the explicit
2172 * layout information on the SSA value if any exists. If we've
2173 * accidentally been relying on this, we want to find those bugs.
2175 * 2. We want to be able to quickly check that an SSA value being assigned
2176 * to a SPIR-V value has the right type. Using bare types everywhere
2177 * ensures that we can pointer-compare.
2179 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
2180 val
->type
= glsl_get_bare_type(type
);
2183 if (!glsl_type_is_vector_or_scalar(type
)) {
2184 unsigned elems
= glsl_get_length(val
->type
);
2185 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2186 if (glsl_type_is_array_or_matrix(type
)) {
2187 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
2188 for (unsigned i
= 0; i
< elems
; i
++)
2189 val
->elems
[i
] = vtn_create_ssa_value(b
, elem_type
);
2191 vtn_assert(glsl_type_is_struct_or_ifc(type
));
2192 for (unsigned i
= 0; i
< elems
; i
++) {
2193 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
2194 val
->elems
[i
] = vtn_create_ssa_value(b
, elem_type
);
2203 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
2206 src
.src
= nir_src_for_ssa(vtn_get_nir_ssa(b
, index
));
2207 src
.src_type
= type
;
2212 image_operand_arg(struct vtn_builder
*b
, const uint32_t *w
, uint32_t count
,
2213 uint32_t mask_idx
, SpvImageOperandsMask op
)
2215 static const SpvImageOperandsMask ops_with_arg
=
2216 SpvImageOperandsBiasMask
|
2217 SpvImageOperandsLodMask
|
2218 SpvImageOperandsGradMask
|
2219 SpvImageOperandsConstOffsetMask
|
2220 SpvImageOperandsOffsetMask
|
2221 SpvImageOperandsConstOffsetsMask
|
2222 SpvImageOperandsSampleMask
|
2223 SpvImageOperandsMinLodMask
|
2224 SpvImageOperandsMakeTexelAvailableMask
|
2225 SpvImageOperandsMakeTexelVisibleMask
;
2227 assert(util_bitcount(op
) == 1);
2228 assert(w
[mask_idx
] & op
);
2229 assert(op
& ops_with_arg
);
2231 uint32_t idx
= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_arg
) + 1;
2233 /* Adjust indices for operands with two arguments. */
2234 static const SpvImageOperandsMask ops_with_two_args
=
2235 SpvImageOperandsGradMask
;
2236 idx
+= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_two_args
);
2240 vtn_fail_if(idx
+ (op
& ops_with_two_args
? 1 : 0) >= count
,
2241 "Image op claims to have %s but does not enough "
2242 "following operands", spirv_imageoperands_to_string(op
));
2248 non_uniform_decoration_cb(struct vtn_builder
*b
,
2249 struct vtn_value
*val
, int member
,
2250 const struct vtn_decoration
*dec
, void *void_ctx
)
2252 enum gl_access_qualifier
*access
= void_ctx
;
2253 switch (dec
->decoration
) {
2254 case SpvDecorationNonUniformEXT
:
2255 *access
|= ACCESS_NON_UNIFORM
;
2265 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
2266 const uint32_t *w
, unsigned count
)
2268 if (opcode
== SpvOpSampledImage
) {
2269 struct vtn_value
*val
=
2270 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
2271 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
2273 /* It seems valid to use OpSampledImage with OpUndef instead of
2274 * OpTypeImage or OpTypeSampler.
2276 if (vtn_untyped_value(b
, w
[3])->value_type
== vtn_value_type_undef
) {
2277 val
->sampled_image
->image
= NULL
;
2279 val
->sampled_image
->image
=
2280 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2283 if (vtn_untyped_value(b
, w
[4])->value_type
== vtn_value_type_undef
) {
2284 val
->sampled_image
->sampler
= NULL
;
2286 val
->sampled_image
->sampler
=
2287 vtn_value(b
, w
[4], vtn_value_type_pointer
)->pointer
;
2290 } else if (opcode
== SpvOpImage
) {
2291 struct vtn_value
*src_val
= vtn_untyped_value(b
, w
[3]);
2292 if (src_val
->value_type
== vtn_value_type_sampled_image
) {
2293 vtn_push_pointer(b
, w
[2], src_val
->sampled_image
->image
);
2295 vtn_assert(src_val
->value_type
== vtn_value_type_pointer
);
2296 vtn_push_pointer(b
, w
[2], src_val
->pointer
);
2301 struct vtn_type
*ret_type
= vtn_get_type(b
, w
[1]);
2303 struct vtn_pointer
*image
= NULL
, *sampler
= NULL
;
2304 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
2305 if (sampled_val
->value_type
== vtn_value_type_sampled_image
) {
2306 image
= sampled_val
->sampled_image
->image
;
2307 sampler
= sampled_val
->sampled_image
->sampler
;
2309 vtn_assert(sampled_val
->value_type
== vtn_value_type_pointer
);
2310 image
= sampled_val
->pointer
;
2314 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2318 nir_deref_instr
*image_deref
= vtn_pointer_to_deref(b
, image
);
2319 nir_deref_instr
*sampler_deref
=
2320 sampler
? vtn_pointer_to_deref(b
, sampler
) : NULL
;
2322 const struct glsl_type
*image_type
= sampled_val
->type
->type
;
2323 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image_type
);
2324 const bool is_array
= glsl_sampler_type_is_array(image_type
);
2325 nir_alu_type dest_type
= nir_type_invalid
;
2327 /* Figure out the base texture operation */
2330 case SpvOpImageSampleImplicitLod
:
2331 case SpvOpImageSampleDrefImplicitLod
:
2332 case SpvOpImageSampleProjImplicitLod
:
2333 case SpvOpImageSampleProjDrefImplicitLod
:
2334 texop
= nir_texop_tex
;
2337 case SpvOpImageSampleExplicitLod
:
2338 case SpvOpImageSampleDrefExplicitLod
:
2339 case SpvOpImageSampleProjExplicitLod
:
2340 case SpvOpImageSampleProjDrefExplicitLod
:
2341 texop
= nir_texop_txl
;
2344 case SpvOpImageFetch
:
2345 if (sampler_dim
== GLSL_SAMPLER_DIM_MS
) {
2346 texop
= nir_texop_txf_ms
;
2348 texop
= nir_texop_txf
;
2352 case SpvOpImageGather
:
2353 case SpvOpImageDrefGather
:
2354 texop
= nir_texop_tg4
;
2357 case SpvOpImageQuerySizeLod
:
2358 case SpvOpImageQuerySize
:
2359 texop
= nir_texop_txs
;
2360 dest_type
= nir_type_int
;
2363 case SpvOpImageQueryLod
:
2364 texop
= nir_texop_lod
;
2365 dest_type
= nir_type_float
;
2368 case SpvOpImageQueryLevels
:
2369 texop
= nir_texop_query_levels
;
2370 dest_type
= nir_type_int
;
2373 case SpvOpImageQuerySamples
:
2374 texop
= nir_texop_texture_samples
;
2375 dest_type
= nir_type_int
;
2378 case SpvOpFragmentFetchAMD
:
2379 texop
= nir_texop_fragment_fetch
;
2382 case SpvOpFragmentMaskFetchAMD
:
2383 texop
= nir_texop_fragment_mask_fetch
;
2387 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2390 nir_tex_src srcs
[10]; /* 10 should be enough */
2391 nir_tex_src
*p
= srcs
;
2393 p
->src
= nir_src_for_ssa(&image_deref
->dest
.ssa
);
2394 p
->src_type
= nir_tex_src_texture_deref
;
2404 vtn_fail_if(sampler
== NULL
,
2405 "%s requires an image of type OpTypeSampledImage",
2406 spirv_op_to_string(opcode
));
2407 p
->src
= nir_src_for_ssa(&sampler_deref
->dest
.ssa
);
2408 p
->src_type
= nir_tex_src_sampler_deref
;
2412 case nir_texop_txf_ms
:
2414 case nir_texop_query_levels
:
2415 case nir_texop_texture_samples
:
2416 case nir_texop_samples_identical
:
2417 case nir_texop_fragment_fetch
:
2418 case nir_texop_fragment_mask_fetch
:
2421 case nir_texop_txf_ms_fb
:
2422 vtn_fail("unexpected nir_texop_txf_ms_fb");
2424 case nir_texop_txf_ms_mcs
:
2425 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2426 case nir_texop_tex_prefetch
:
2427 vtn_fail("unexpected nir_texop_tex_prefetch");
2432 struct nir_ssa_def
*coord
;
2433 unsigned coord_components
;
2435 case SpvOpImageSampleImplicitLod
:
2436 case SpvOpImageSampleExplicitLod
:
2437 case SpvOpImageSampleDrefImplicitLod
:
2438 case SpvOpImageSampleDrefExplicitLod
:
2439 case SpvOpImageSampleProjImplicitLod
:
2440 case SpvOpImageSampleProjExplicitLod
:
2441 case SpvOpImageSampleProjDrefImplicitLod
:
2442 case SpvOpImageSampleProjDrefExplicitLod
:
2443 case SpvOpImageFetch
:
2444 case SpvOpImageGather
:
2445 case SpvOpImageDrefGather
:
2446 case SpvOpImageQueryLod
:
2447 case SpvOpFragmentFetchAMD
:
2448 case SpvOpFragmentMaskFetchAMD
: {
2449 /* All these types have the coordinate as their first real argument */
2450 coord_components
= glsl_get_sampler_dim_coordinate_components(sampler_dim
);
2452 if (is_array
&& texop
!= nir_texop_lod
)
2455 coord
= vtn_get_nir_ssa(b
, w
[idx
++]);
2456 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
2457 (1 << coord_components
) - 1));
2458 p
->src_type
= nir_tex_src_coord
;
2465 coord_components
= 0;
2470 case SpvOpImageSampleProjImplicitLod
:
2471 case SpvOpImageSampleProjExplicitLod
:
2472 case SpvOpImageSampleProjDrefImplicitLod
:
2473 case SpvOpImageSampleProjDrefExplicitLod
:
2474 /* These have the projector as the last coordinate component */
2475 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
2476 p
->src_type
= nir_tex_src_projector
;
2484 bool is_shadow
= false;
2485 unsigned gather_component
= 0;
2487 case SpvOpImageSampleDrefImplicitLod
:
2488 case SpvOpImageSampleDrefExplicitLod
:
2489 case SpvOpImageSampleProjDrefImplicitLod
:
2490 case SpvOpImageSampleProjDrefExplicitLod
:
2491 case SpvOpImageDrefGather
:
2492 /* These all have an explicit depth value as their next source */
2494 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
2497 case SpvOpImageGather
:
2498 /* This has a component as its next source */
2499 gather_component
= vtn_constant_uint(b
, w
[idx
++]);
2506 /* For OpImageQuerySizeLod, we always have an LOD */
2507 if (opcode
== SpvOpImageQuerySizeLod
)
2508 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2510 /* For OpFragmentFetchAMD, we always have a multisample index */
2511 if (opcode
== SpvOpFragmentFetchAMD
)
2512 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2514 /* Now we need to handle some number of optional arguments */
2515 struct vtn_value
*gather_offsets
= NULL
;
2517 uint32_t operands
= w
[idx
];
2519 if (operands
& SpvImageOperandsBiasMask
) {
2520 vtn_assert(texop
== nir_texop_tex
||
2521 texop
== nir_texop_tg4
);
2522 if (texop
== nir_texop_tex
)
2523 texop
= nir_texop_txb
;
2524 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2525 SpvImageOperandsBiasMask
);
2526 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_bias
);
2529 if (operands
& SpvImageOperandsLodMask
) {
2530 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
2531 texop
== nir_texop_txs
|| texop
== nir_texop_tg4
);
2532 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2533 SpvImageOperandsLodMask
);
2534 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_lod
);
2537 if (operands
& SpvImageOperandsGradMask
) {
2538 vtn_assert(texop
== nir_texop_txl
);
2539 texop
= nir_texop_txd
;
2540 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2541 SpvImageOperandsGradMask
);
2542 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ddx
);
2543 (*p
++) = vtn_tex_src(b
, w
[arg
+ 1], nir_tex_src_ddy
);
2546 vtn_fail_if(util_bitcount(operands
& (SpvImageOperandsConstOffsetsMask
|
2547 SpvImageOperandsOffsetMask
|
2548 SpvImageOperandsConstOffsetMask
)) > 1,
2549 "At most one of the ConstOffset, Offset, and ConstOffsets "
2550 "image operands can be used on a given instruction.");
2552 if (operands
& SpvImageOperandsOffsetMask
) {
2553 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2554 SpvImageOperandsOffsetMask
);
2555 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2558 if (operands
& SpvImageOperandsConstOffsetMask
) {
2559 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2560 SpvImageOperandsConstOffsetMask
);
2561 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2564 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2565 vtn_assert(texop
== nir_texop_tg4
);
2566 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2567 SpvImageOperandsConstOffsetsMask
);
2568 gather_offsets
= vtn_value(b
, w
[arg
], vtn_value_type_constant
);
2571 if (operands
& SpvImageOperandsSampleMask
) {
2572 vtn_assert(texop
== nir_texop_txf_ms
);
2573 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2574 SpvImageOperandsSampleMask
);
2575 texop
= nir_texop_txf_ms
;
2576 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ms_index
);
2579 if (operands
& SpvImageOperandsMinLodMask
) {
2580 vtn_assert(texop
== nir_texop_tex
||
2581 texop
== nir_texop_txb
||
2582 texop
== nir_texop_txd
);
2583 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2584 SpvImageOperandsMinLodMask
);
2585 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_min_lod
);
2589 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2592 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2594 instr
->coord_components
= coord_components
;
2595 instr
->sampler_dim
= sampler_dim
;
2596 instr
->is_array
= is_array
;
2597 instr
->is_shadow
= is_shadow
;
2598 instr
->is_new_style_shadow
=
2599 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2600 instr
->component
= gather_component
;
2602 /* The Vulkan spec says:
2604 * "If an instruction loads from or stores to a resource (including
2605 * atomics and image instructions) and the resource descriptor being
2606 * accessed is not dynamically uniform, then the operand corresponding
2607 * to that resource (e.g. the pointer or sampled image operand) must be
2608 * decorated with NonUniform."
2610 * It's very careful to specify that the exact operand must be decorated
2611 * NonUniform. The SPIR-V parser is not expected to chase through long
2612 * chains to find the NonUniform decoration. It's either right there or we
2613 * can assume it doesn't exist.
2615 enum gl_access_qualifier access
= 0;
2616 vtn_foreach_decoration(b
, sampled_val
, non_uniform_decoration_cb
, &access
);
2618 if (image
&& (access
& ACCESS_NON_UNIFORM
))
2619 instr
->texture_non_uniform
= true;
2621 if (sampler
&& (access
& ACCESS_NON_UNIFORM
))
2622 instr
->sampler_non_uniform
= true;
2624 /* for non-query ops, get dest_type from sampler type */
2625 if (dest_type
== nir_type_invalid
) {
2626 switch (glsl_get_sampler_result_type(image_type
)) {
2627 case GLSL_TYPE_FLOAT
: dest_type
= nir_type_float
; break;
2628 case GLSL_TYPE_INT
: dest_type
= nir_type_int
; break;
2629 case GLSL_TYPE_UINT
: dest_type
= nir_type_uint
; break;
2630 case GLSL_TYPE_BOOL
: dest_type
= nir_type_bool
; break;
2632 vtn_fail("Invalid base type for sampler result");
2636 instr
->dest_type
= dest_type
;
2638 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2639 nir_tex_instr_dest_size(instr
), 32, NULL
);
2641 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2642 nir_tex_instr_dest_size(instr
));
2644 if (gather_offsets
) {
2645 vtn_fail_if(gather_offsets
->type
->base_type
!= vtn_base_type_array
||
2646 gather_offsets
->type
->length
!= 4,
2647 "ConstOffsets must be an array of size four of vectors "
2648 "of two integer components");
2650 struct vtn_type
*vec_type
= gather_offsets
->type
->array_element
;
2651 vtn_fail_if(vec_type
->base_type
!= vtn_base_type_vector
||
2652 vec_type
->length
!= 2 ||
2653 !glsl_type_is_integer(vec_type
->type
),
2654 "ConstOffsets must be an array of size four of vectors "
2655 "of two integer components");
2657 unsigned bit_size
= glsl_get_bit_size(vec_type
->type
);
2658 for (uint32_t i
= 0; i
< 4; i
++) {
2659 const nir_const_value
*cvec
=
2660 gather_offsets
->constant
->elements
[i
]->values
;
2661 for (uint32_t j
= 0; j
< 2; j
++) {
2663 case 8: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i8
; break;
2664 case 16: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i16
; break;
2665 case 32: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i32
; break;
2666 case 64: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i64
; break;
2668 vtn_fail("Unsupported bit size: %u", bit_size
);
2674 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2676 vtn_push_nir_ssa(b
, w
[2], &instr
->dest
.ssa
);
2680 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2681 const uint32_t *w
, nir_src
*src
)
2684 case SpvOpAtomicIIncrement
:
2685 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2688 case SpvOpAtomicIDecrement
:
2689 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2692 case SpvOpAtomicISub
:
2694 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_get_nir_ssa(b
, w
[6])));
2697 case SpvOpAtomicCompareExchange
:
2698 case SpvOpAtomicCompareExchangeWeak
:
2699 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[8]));
2700 src
[1] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[7]));
2703 case SpvOpAtomicExchange
:
2704 case SpvOpAtomicIAdd
:
2705 case SpvOpAtomicSMin
:
2706 case SpvOpAtomicUMin
:
2707 case SpvOpAtomicSMax
:
2708 case SpvOpAtomicUMax
:
2709 case SpvOpAtomicAnd
:
2711 case SpvOpAtomicXor
:
2712 case SpvOpAtomicFAddEXT
:
2713 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[6]));
2717 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
2721 static nir_ssa_def
*
2722 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2724 nir_ssa_def
*coord
= vtn_get_nir_ssa(b
, value
);
2726 /* The image_load_store intrinsics assume a 4-dim coordinate */
2727 unsigned swizzle
[4];
2728 for (unsigned i
= 0; i
< 4; i
++)
2729 swizzle
[i
] = MIN2(i
, coord
->num_components
- 1);
2731 return nir_swizzle(&b
->nb
, coord
, swizzle
, 4);
2734 static nir_ssa_def
*
2735 expand_to_vec4(nir_builder
*b
, nir_ssa_def
*value
)
2737 if (value
->num_components
== 4)
2741 for (unsigned i
= 0; i
< 4; i
++)
2742 swiz
[i
] = i
< value
->num_components
? i
: 0;
2743 return nir_swizzle(b
, value
, swiz
, 4);
2747 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2748 const uint32_t *w
, unsigned count
)
2750 /* Just get this one out of the way */
2751 if (opcode
== SpvOpImageTexelPointer
) {
2752 struct vtn_value
*val
=
2753 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2754 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2756 val
->image
->image
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
2757 val
->image
->coord
= get_image_coord(b
, w
[4]);
2758 val
->image
->sample
= vtn_get_nir_ssa(b
, w
[5]);
2759 val
->image
->lod
= nir_imm_int(&b
->nb
, 0);
2763 struct vtn_image_pointer image
;
2764 SpvScope scope
= SpvScopeInvocation
;
2765 SpvMemorySemanticsMask semantics
= 0;
2767 struct vtn_value
*res_val
;
2769 case SpvOpAtomicExchange
:
2770 case SpvOpAtomicCompareExchange
:
2771 case SpvOpAtomicCompareExchangeWeak
:
2772 case SpvOpAtomicIIncrement
:
2773 case SpvOpAtomicIDecrement
:
2774 case SpvOpAtomicIAdd
:
2775 case SpvOpAtomicISub
:
2776 case SpvOpAtomicLoad
:
2777 case SpvOpAtomicSMin
:
2778 case SpvOpAtomicUMin
:
2779 case SpvOpAtomicSMax
:
2780 case SpvOpAtomicUMax
:
2781 case SpvOpAtomicAnd
:
2783 case SpvOpAtomicXor
:
2784 case SpvOpAtomicFAddEXT
:
2785 res_val
= vtn_value(b
, w
[3], vtn_value_type_image_pointer
);
2786 image
= *res_val
->image
;
2787 scope
= vtn_constant_uint(b
, w
[4]);
2788 semantics
= vtn_constant_uint(b
, w
[5]);
2791 case SpvOpAtomicStore
:
2792 res_val
= vtn_value(b
, w
[1], vtn_value_type_image_pointer
);
2793 image
= *res_val
->image
;
2794 scope
= vtn_constant_uint(b
, w
[2]);
2795 semantics
= vtn_constant_uint(b
, w
[3]);
2798 case SpvOpImageQuerySize
:
2799 res_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2800 image
.image
= res_val
->pointer
;
2802 image
.sample
= NULL
;
2806 case SpvOpImageRead
: {
2807 res_val
= vtn_value(b
, w
[3], vtn_value_type_pointer
);
2808 image
.image
= res_val
->pointer
;
2809 image
.coord
= get_image_coord(b
, w
[4]);
2811 const SpvImageOperandsMask operands
=
2812 count
> 5 ? w
[5] : SpvImageOperandsMaskNone
;
2814 if (operands
& SpvImageOperandsSampleMask
) {
2815 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2816 SpvImageOperandsSampleMask
);
2817 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
2819 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2822 if (operands
& SpvImageOperandsMakeTexelVisibleMask
) {
2823 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2824 "MakeTexelVisible requires NonPrivateTexel to also be set.");
2825 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2826 SpvImageOperandsMakeTexelVisibleMask
);
2827 semantics
= SpvMemorySemanticsMakeVisibleMask
;
2828 scope
= vtn_constant_uint(b
, w
[arg
]);
2831 if (operands
& SpvImageOperandsLodMask
) {
2832 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2833 SpvImageOperandsLodMask
);
2834 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
2836 image
.lod
= nir_imm_int(&b
->nb
, 0);
2839 /* TODO: Volatile. */
2844 case SpvOpImageWrite
: {
2845 res_val
= vtn_value(b
, w
[1], vtn_value_type_pointer
);
2846 image
.image
= res_val
->pointer
;
2847 image
.coord
= get_image_coord(b
, w
[2]);
2851 const SpvImageOperandsMask operands
=
2852 count
> 4 ? w
[4] : SpvImageOperandsMaskNone
;
2854 if (operands
& SpvImageOperandsSampleMask
) {
2855 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2856 SpvImageOperandsSampleMask
);
2857 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
2859 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2862 if (operands
& SpvImageOperandsMakeTexelAvailableMask
) {
2863 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2864 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
2865 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2866 SpvImageOperandsMakeTexelAvailableMask
);
2867 semantics
= SpvMemorySemanticsMakeAvailableMask
;
2868 scope
= vtn_constant_uint(b
, w
[arg
]);
2871 if (operands
& SpvImageOperandsLodMask
) {
2872 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
2873 SpvImageOperandsLodMask
);
2874 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
2876 image
.lod
= nir_imm_int(&b
->nb
, 0);
2879 /* TODO: Volatile. */
2885 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2888 nir_intrinsic_op op
;
2890 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
2891 OP(ImageQuerySize
, size
)
2893 OP(ImageWrite
, store
)
2894 OP(AtomicLoad
, load
)
2895 OP(AtomicStore
, store
)
2896 OP(AtomicExchange
, atomic_exchange
)
2897 OP(AtomicCompareExchange
, atomic_comp_swap
)
2898 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
2899 OP(AtomicIIncrement
, atomic_add
)
2900 OP(AtomicIDecrement
, atomic_add
)
2901 OP(AtomicIAdd
, atomic_add
)
2902 OP(AtomicISub
, atomic_add
)
2903 OP(AtomicSMin
, atomic_imin
)
2904 OP(AtomicUMin
, atomic_umin
)
2905 OP(AtomicSMax
, atomic_imax
)
2906 OP(AtomicUMax
, atomic_umax
)
2907 OP(AtomicAnd
, atomic_and
)
2908 OP(AtomicOr
, atomic_or
)
2909 OP(AtomicXor
, atomic_xor
)
2910 OP(AtomicFAddEXT
, atomic_fadd
)
2913 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2916 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
2918 nir_deref_instr
*image_deref
= vtn_pointer_to_deref(b
, image
.image
);
2919 intrin
->src
[0] = nir_src_for_ssa(&image_deref
->dest
.ssa
);
2921 /* ImageQuerySize doesn't take any extra parameters */
2922 if (opcode
!= SpvOpImageQuerySize
) {
2923 /* The image coordinate is always 4 components but we may not have that
2924 * many. Swizzle to compensate.
2926 intrin
->src
[1] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, image
.coord
));
2927 intrin
->src
[2] = nir_src_for_ssa(image
.sample
);
2930 /* The Vulkan spec says:
2932 * "If an instruction loads from or stores to a resource (including
2933 * atomics and image instructions) and the resource descriptor being
2934 * accessed is not dynamically uniform, then the operand corresponding
2935 * to that resource (e.g. the pointer or sampled image operand) must be
2936 * decorated with NonUniform."
2938 * It's very careful to specify that the exact operand must be decorated
2939 * NonUniform. The SPIR-V parser is not expected to chase through long
2940 * chains to find the NonUniform decoration. It's either right there or we
2941 * can assume it doesn't exist.
2943 enum gl_access_qualifier access
= 0;
2944 vtn_foreach_decoration(b
, res_val
, non_uniform_decoration_cb
, &access
);
2945 nir_intrinsic_set_access(intrin
, access
);
2948 case SpvOpAtomicLoad
:
2949 case SpvOpImageQuerySize
:
2950 case SpvOpImageRead
:
2951 if (opcode
== SpvOpImageRead
|| opcode
== SpvOpAtomicLoad
) {
2952 /* Only OpImageRead can support a lod parameter if
2953 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
2954 * intrinsics definition for atomics requires us to set it for
2957 intrin
->src
[3] = nir_src_for_ssa(image
.lod
);
2960 case SpvOpAtomicStore
:
2961 case SpvOpImageWrite
: {
2962 const uint32_t value_id
= opcode
== SpvOpAtomicStore
? w
[4] : w
[3];
2963 nir_ssa_def
*value
= vtn_get_nir_ssa(b
, value_id
);
2964 /* nir_intrinsic_image_deref_store always takes a vec4 value */
2965 assert(op
== nir_intrinsic_image_deref_store
);
2966 intrin
->num_components
= 4;
2967 intrin
->src
[3] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, value
));
2968 /* Only OpImageWrite can support a lod parameter if
2969 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
2970 * intrinsics definition for atomics requires us to set it for
2973 intrin
->src
[4] = nir_src_for_ssa(image
.lod
);
2977 case SpvOpAtomicCompareExchange
:
2978 case SpvOpAtomicCompareExchangeWeak
:
2979 case SpvOpAtomicIIncrement
:
2980 case SpvOpAtomicIDecrement
:
2981 case SpvOpAtomicExchange
:
2982 case SpvOpAtomicIAdd
:
2983 case SpvOpAtomicISub
:
2984 case SpvOpAtomicSMin
:
2985 case SpvOpAtomicUMin
:
2986 case SpvOpAtomicSMax
:
2987 case SpvOpAtomicUMax
:
2988 case SpvOpAtomicAnd
:
2990 case SpvOpAtomicXor
:
2991 case SpvOpAtomicFAddEXT
:
2992 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[3]);
2996 vtn_fail_with_opcode("Invalid image opcode", opcode
);
2999 /* Image operations implicitly have the Image storage memory semantics. */
3000 semantics
|= SpvMemorySemanticsImageMemoryMask
;
3002 SpvMemorySemanticsMask before_semantics
;
3003 SpvMemorySemanticsMask after_semantics
;
3004 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3006 if (before_semantics
)
3007 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3009 if (opcode
!= SpvOpImageWrite
&& opcode
!= SpvOpAtomicStore
) {
3010 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3012 unsigned dest_components
= glsl_get_vector_elements(type
->type
);
3013 if (nir_intrinsic_infos
[op
].dest_components
== 0)
3014 intrin
->num_components
= dest_components
;
3016 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
3017 nir_intrinsic_dest_components(intrin
), 32, NULL
);
3019 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3021 nir_ssa_def
*result
= &intrin
->dest
.ssa
;
3022 if (nir_intrinsic_dest_components(intrin
) != dest_components
)
3023 result
= nir_channels(&b
->nb
, result
, (1 << dest_components
) - 1);
3025 vtn_push_nir_ssa(b
, w
[2], result
);
3027 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3030 if (after_semantics
)
3031 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3034 static nir_intrinsic_op
3035 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3038 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
3039 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
3040 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
3041 OP(AtomicExchange
, atomic_exchange
)
3042 OP(AtomicCompareExchange
, atomic_comp_swap
)
3043 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3044 OP(AtomicIIncrement
, atomic_add
)
3045 OP(AtomicIDecrement
, atomic_add
)
3046 OP(AtomicIAdd
, atomic_add
)
3047 OP(AtomicISub
, atomic_add
)
3048 OP(AtomicSMin
, atomic_imin
)
3049 OP(AtomicUMin
, atomic_umin
)
3050 OP(AtomicSMax
, atomic_imax
)
3051 OP(AtomicUMax
, atomic_umax
)
3052 OP(AtomicAnd
, atomic_and
)
3053 OP(AtomicOr
, atomic_or
)
3054 OP(AtomicXor
, atomic_xor
)
3055 OP(AtomicFAddEXT
, atomic_fadd
)
3058 vtn_fail_with_opcode("Invalid SSBO atomic", opcode
);
3062 static nir_intrinsic_op
3063 get_uniform_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3066 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
3067 OP(AtomicLoad
, read_deref
)
3068 OP(AtomicExchange
, exchange
)
3069 OP(AtomicCompareExchange
, comp_swap
)
3070 OP(AtomicCompareExchangeWeak
, comp_swap
)
3071 OP(AtomicIIncrement
, inc_deref
)
3072 OP(AtomicIDecrement
, post_dec_deref
)
3073 OP(AtomicIAdd
, add_deref
)
3074 OP(AtomicISub
, add_deref
)
3075 OP(AtomicUMin
, min_deref
)
3076 OP(AtomicUMax
, max_deref
)
3077 OP(AtomicAnd
, and_deref
)
3078 OP(AtomicOr
, or_deref
)
3079 OP(AtomicXor
, xor_deref
)
3082 /* We left the following out: AtomicStore, AtomicSMin and
3083 * AtomicSmax. Right now there are not nir intrinsics for them. At this
3084 * moment Atomic Counter support is needed for ARB_spirv support, so is
3085 * only need to support GLSL Atomic Counters that are uints and don't
3086 * allow direct storage.
3088 vtn_fail("Invalid uniform atomic");
3092 static nir_intrinsic_op
3093 get_deref_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3096 case SpvOpAtomicLoad
: return nir_intrinsic_load_deref
;
3097 case SpvOpAtomicStore
: return nir_intrinsic_store_deref
;
3098 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
3099 OP(AtomicExchange
, atomic_exchange
)
3100 OP(AtomicCompareExchange
, atomic_comp_swap
)
3101 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3102 OP(AtomicIIncrement
, atomic_add
)
3103 OP(AtomicIDecrement
, atomic_add
)
3104 OP(AtomicIAdd
, atomic_add
)
3105 OP(AtomicISub
, atomic_add
)
3106 OP(AtomicSMin
, atomic_imin
)
3107 OP(AtomicUMin
, atomic_umin
)
3108 OP(AtomicSMax
, atomic_imax
)
3109 OP(AtomicUMax
, atomic_umax
)
3110 OP(AtomicAnd
, atomic_and
)
3111 OP(AtomicOr
, atomic_or
)
3112 OP(AtomicXor
, atomic_xor
)
3113 OP(AtomicFAddEXT
, atomic_fadd
)
3116 vtn_fail_with_opcode("Invalid shared atomic", opcode
);
3121 * Handles shared atomics, ssbo atomics and atomic counters.
3124 vtn_handle_atomics(struct vtn_builder
*b
, SpvOp opcode
,
3125 const uint32_t *w
, UNUSED
unsigned count
)
3127 struct vtn_pointer
*ptr
;
3128 nir_intrinsic_instr
*atomic
;
3130 SpvScope scope
= SpvScopeInvocation
;
3131 SpvMemorySemanticsMask semantics
= 0;
3134 case SpvOpAtomicLoad
:
3135 case SpvOpAtomicExchange
:
3136 case SpvOpAtomicCompareExchange
:
3137 case SpvOpAtomicCompareExchangeWeak
:
3138 case SpvOpAtomicIIncrement
:
3139 case SpvOpAtomicIDecrement
:
3140 case SpvOpAtomicIAdd
:
3141 case SpvOpAtomicISub
:
3142 case SpvOpAtomicSMin
:
3143 case SpvOpAtomicUMin
:
3144 case SpvOpAtomicSMax
:
3145 case SpvOpAtomicUMax
:
3146 case SpvOpAtomicAnd
:
3148 case SpvOpAtomicXor
:
3149 case SpvOpAtomicFAddEXT
:
3150 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3151 scope
= vtn_constant_uint(b
, w
[4]);
3152 semantics
= vtn_constant_uint(b
, w
[5]);
3155 case SpvOpAtomicStore
:
3156 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
3157 scope
= vtn_constant_uint(b
, w
[2]);
3158 semantics
= vtn_constant_uint(b
, w
[3]);
3162 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3165 /* uniform as "atomic counter uniform" */
3166 if (ptr
->mode
== vtn_variable_mode_uniform
) {
3167 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3168 nir_intrinsic_op op
= get_uniform_nir_atomic_op(b
, opcode
);
3169 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3170 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3172 /* SSBO needs to initialize index/offset. In this case we don't need to,
3173 * as that info is already stored on the ptr->var->var nir_variable (see
3174 * vtn_create_variable)
3178 case SpvOpAtomicLoad
:
3179 case SpvOpAtomicExchange
:
3180 case SpvOpAtomicCompareExchange
:
3181 case SpvOpAtomicCompareExchangeWeak
:
3182 case SpvOpAtomicIIncrement
:
3183 case SpvOpAtomicIDecrement
:
3184 case SpvOpAtomicIAdd
:
3185 case SpvOpAtomicISub
:
3186 case SpvOpAtomicSMin
:
3187 case SpvOpAtomicUMin
:
3188 case SpvOpAtomicSMax
:
3189 case SpvOpAtomicUMax
:
3190 case SpvOpAtomicAnd
:
3192 case SpvOpAtomicXor
:
3193 /* Nothing: we don't need to call fill_common_atomic_sources here, as
3194 * atomic counter uniforms doesn't have sources
3199 unreachable("Invalid SPIR-V atomic");
3202 } else if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
3203 nir_ssa_def
*offset
, *index
;
3204 offset
= vtn_pointer_to_offset(b
, ptr
, &index
);
3206 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
3208 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(b
, opcode
);
3209 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3213 case SpvOpAtomicLoad
:
3214 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3215 nir_intrinsic_set_align(atomic
, 4, 0);
3216 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3217 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3218 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3221 case SpvOpAtomicStore
:
3222 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3223 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3224 nir_intrinsic_set_align(atomic
, 4, 0);
3225 atomic
->src
[src
++] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[4]));
3226 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3227 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3228 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3231 case SpvOpAtomicExchange
:
3232 case SpvOpAtomicCompareExchange
:
3233 case SpvOpAtomicCompareExchangeWeak
:
3234 case SpvOpAtomicIIncrement
:
3235 case SpvOpAtomicIDecrement
:
3236 case SpvOpAtomicIAdd
:
3237 case SpvOpAtomicISub
:
3238 case SpvOpAtomicSMin
:
3239 case SpvOpAtomicUMin
:
3240 case SpvOpAtomicSMax
:
3241 case SpvOpAtomicUMax
:
3242 case SpvOpAtomicAnd
:
3244 case SpvOpAtomicXor
:
3245 case SpvOpAtomicFAddEXT
:
3246 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3247 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3248 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3249 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
3253 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3256 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3257 const struct glsl_type
*deref_type
= deref
->type
;
3258 nir_intrinsic_op op
= get_deref_nir_atomic_op(b
, opcode
);
3259 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3260 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3263 case SpvOpAtomicLoad
:
3264 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3267 case SpvOpAtomicStore
:
3268 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3269 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3270 atomic
->src
[1] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[4]));
3273 case SpvOpAtomicExchange
:
3274 case SpvOpAtomicCompareExchange
:
3275 case SpvOpAtomicCompareExchangeWeak
:
3276 case SpvOpAtomicIIncrement
:
3277 case SpvOpAtomicIDecrement
:
3278 case SpvOpAtomicIAdd
:
3279 case SpvOpAtomicISub
:
3280 case SpvOpAtomicSMin
:
3281 case SpvOpAtomicUMin
:
3282 case SpvOpAtomicSMax
:
3283 case SpvOpAtomicUMax
:
3284 case SpvOpAtomicAnd
:
3286 case SpvOpAtomicXor
:
3287 case SpvOpAtomicFAddEXT
:
3288 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[1]);
3292 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3296 /* Atomic ordering operations will implicitly apply to the atomic operation
3297 * storage class, so include that too.
3299 semantics
|= vtn_storage_class_to_memory_semantics(ptr
->ptr_type
->storage_class
);
3301 SpvMemorySemanticsMask before_semantics
;
3302 SpvMemorySemanticsMask after_semantics
;
3303 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3305 if (before_semantics
)
3306 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3308 if (opcode
!= SpvOpAtomicStore
) {
3309 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3311 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
3312 glsl_get_vector_elements(type
->type
),
3313 glsl_get_bit_size(type
->type
), NULL
);
3315 vtn_push_nir_ssa(b
, w
[2], &atomic
->dest
.ssa
);
3318 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
3320 if (after_semantics
)
3321 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3324 static nir_alu_instr
*
3325 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
3327 nir_op op
= nir_op_vec(num_components
);
3328 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
3329 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
3331 vec
->dest
.write_mask
= (1 << num_components
) - 1;
3336 struct vtn_ssa_value
*
3337 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
3339 if (src
->transposed
)
3340 return src
->transposed
;
3342 struct vtn_ssa_value
*dest
=
3343 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
3345 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
3346 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
3347 glsl_get_bit_size(src
->type
));
3348 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3349 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
3350 vec
->src
[0].swizzle
[0] = i
;
3352 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
3353 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
3354 vec
->src
[j
].swizzle
[0] = i
;
3357 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3358 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
3361 dest
->transposed
= src
;
3366 static nir_ssa_def
*
3367 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
3368 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
3369 const uint32_t *indices
)
3371 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
3373 for (unsigned i
= 0; i
< num_components
; i
++) {
3374 uint32_t index
= indices
[i
];
3375 if (index
== 0xffffffff) {
3377 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
3378 } else if (index
< src0
->num_components
) {
3379 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
3380 vec
->src
[i
].swizzle
[0] = index
;
3382 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
3383 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
3387 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3389 return &vec
->dest
.dest
.ssa
;
3393 * Concatentates a number of vectors/scalars together to produce a vector
3395 static nir_ssa_def
*
3396 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
3397 unsigned num_srcs
, nir_ssa_def
**srcs
)
3399 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
3401 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3403 * "When constructing a vector, there must be at least two Constituent
3406 vtn_assert(num_srcs
>= 2);
3408 unsigned dest_idx
= 0;
3409 for (unsigned i
= 0; i
< num_srcs
; i
++) {
3410 nir_ssa_def
*src
= srcs
[i
];
3411 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
3412 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
3413 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
3414 vec
->src
[dest_idx
].swizzle
[0] = j
;
3419 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3421 * "When constructing a vector, the total number of components in all
3422 * the operands must equal the number of components in Result Type."
3424 vtn_assert(dest_idx
== num_components
);
3426 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3428 return &vec
->dest
.dest
.ssa
;
3431 static struct vtn_ssa_value
*
3432 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
3434 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
3435 dest
->type
= src
->type
;
3437 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3438 dest
->def
= src
->def
;
3440 unsigned elems
= glsl_get_length(src
->type
);
3442 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
3443 for (unsigned i
= 0; i
< elems
; i
++)
3444 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
3450 static struct vtn_ssa_value
*
3451 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3452 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
3453 unsigned num_indices
)
3455 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
3457 struct vtn_ssa_value
*cur
= dest
;
3459 for (i
= 0; i
< num_indices
- 1; i
++) {
3460 /* If we got a vector here, that means the next index will be trying to
3461 * dereference a scalar.
3463 vtn_fail_if(glsl_type_is_vector_or_scalar(cur
->type
),
3464 "OpCompositeInsert has too many indices.");
3465 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3466 "All indices in an OpCompositeInsert must be in-bounds");
3467 cur
= cur
->elems
[indices
[i
]];
3470 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3471 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3472 "All indices in an OpCompositeInsert must be in-bounds");
3474 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3475 * the component granularity. In that case, the last index will be
3476 * the index to insert the scalar into the vector.
3479 cur
->def
= nir_vector_insert_imm(&b
->nb
, cur
->def
, insert
->def
, indices
[i
]);
3481 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3482 "All indices in an OpCompositeInsert must be in-bounds");
3483 cur
->elems
[indices
[i
]] = insert
;
3489 static struct vtn_ssa_value
*
3490 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3491 const uint32_t *indices
, unsigned num_indices
)
3493 struct vtn_ssa_value
*cur
= src
;
3494 for (unsigned i
= 0; i
< num_indices
; i
++) {
3495 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3496 vtn_assert(i
== num_indices
- 1);
3497 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3498 "All indices in an OpCompositeExtract must be in-bounds");
3500 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3501 * the component granularity. The last index will be the index of the
3502 * vector to extract.
3505 const struct glsl_type
*scalar_type
=
3506 glsl_scalar_type(glsl_get_base_type(cur
->type
));
3507 struct vtn_ssa_value
*ret
= vtn_create_ssa_value(b
, scalar_type
);
3508 ret
->def
= nir_channel(&b
->nb
, cur
->def
, indices
[i
]);
3511 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3512 "All indices in an OpCompositeExtract must be in-bounds");
3513 cur
= cur
->elems
[indices
[i
]];
3521 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
3522 const uint32_t *w
, unsigned count
)
3524 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3525 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
3528 case SpvOpVectorExtractDynamic
:
3529 ssa
->def
= nir_vector_extract(&b
->nb
, vtn_get_nir_ssa(b
, w
[3]),
3530 vtn_get_nir_ssa(b
, w
[4]));
3533 case SpvOpVectorInsertDynamic
:
3534 ssa
->def
= nir_vector_insert(&b
->nb
, vtn_get_nir_ssa(b
, w
[3]),
3535 vtn_get_nir_ssa(b
, w
[4]),
3536 vtn_get_nir_ssa(b
, w
[5]));
3539 case SpvOpVectorShuffle
:
3540 ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
->type
),
3541 vtn_get_nir_ssa(b
, w
[3]),
3542 vtn_get_nir_ssa(b
, w
[4]),
3546 case SpvOpCompositeConstruct
: {
3547 unsigned elems
= count
- 3;
3549 if (glsl_type_is_vector_or_scalar(type
->type
)) {
3550 nir_ssa_def
*srcs
[NIR_MAX_VEC_COMPONENTS
];
3551 for (unsigned i
= 0; i
< elems
; i
++)
3552 srcs
[i
] = vtn_get_nir_ssa(b
, w
[3 + i
]);
3554 vtn_vector_construct(b
, glsl_get_vector_elements(type
->type
),
3557 ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
3558 for (unsigned i
= 0; i
< elems
; i
++)
3559 ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
3563 case SpvOpCompositeExtract
:
3564 ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
3568 case SpvOpCompositeInsert
:
3569 ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
3570 vtn_ssa_value(b
, w
[3]),
3574 case SpvOpCopyLogical
:
3575 ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
3577 case SpvOpCopyObject
:
3578 vtn_copy_value(b
, w
[3], w
[2]);
3582 vtn_fail_with_opcode("unknown composite operation", opcode
);
3585 vtn_push_ssa_value(b
, w
[2], ssa
);
3589 vtn_emit_barrier(struct vtn_builder
*b
, nir_intrinsic_op op
)
3591 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3592 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3596 vtn_emit_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
3597 SpvMemorySemanticsMask semantics
)
3599 if (b
->shader
->options
->use_scoped_barrier
) {
3600 vtn_emit_scoped_memory_barrier(b
, scope
, semantics
);
3604 static const SpvMemorySemanticsMask all_memory_semantics
=
3605 SpvMemorySemanticsUniformMemoryMask
|
3606 SpvMemorySemanticsWorkgroupMemoryMask
|
3607 SpvMemorySemanticsAtomicCounterMemoryMask
|
3608 SpvMemorySemanticsImageMemoryMask
|
3609 SpvMemorySemanticsOutputMemoryMask
;
3611 /* If we're not actually doing a memory barrier, bail */
3612 if (!(semantics
& all_memory_semantics
))
3615 /* GL and Vulkan don't have these */
3616 vtn_assert(scope
!= SpvScopeCrossDevice
);
3618 if (scope
== SpvScopeSubgroup
)
3619 return; /* Nothing to do here */
3621 if (scope
== SpvScopeWorkgroup
) {
3622 vtn_emit_barrier(b
, nir_intrinsic_group_memory_barrier
);
3626 /* There's only two scopes thing left */
3627 vtn_assert(scope
== SpvScopeInvocation
|| scope
== SpvScopeDevice
);
3629 /* Map the GLSL memoryBarrier() construct and any barriers with more than one
3630 * semantic to the corresponding NIR one.
3632 if (util_bitcount(semantics
& all_memory_semantics
) > 1) {
3633 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3634 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
3635 /* GLSL memoryBarrier() (and the corresponding NIR one) doesn't include
3636 * TCS outputs, so we have to emit it's own intrinsic for that. We
3637 * then need to emit another memory_barrier to prevent moving
3638 * non-output operations to before the tcs_patch barrier.
3640 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3641 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3646 /* Issue a more specific barrier */
3647 switch (semantics
& all_memory_semantics
) {
3648 case SpvMemorySemanticsUniformMemoryMask
:
3649 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_buffer
);
3651 case SpvMemorySemanticsWorkgroupMemoryMask
:
3652 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_shared
);
3654 case SpvMemorySemanticsAtomicCounterMemoryMask
:
3655 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_atomic_counter
);
3657 case SpvMemorySemanticsImageMemoryMask
:
3658 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_image
);
3660 case SpvMemorySemanticsOutputMemoryMask
:
3661 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
3662 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3670 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
3671 const uint32_t *w
, UNUSED
unsigned count
)
3674 case SpvOpEmitVertex
:
3675 case SpvOpEmitStreamVertex
:
3676 case SpvOpEndPrimitive
:
3677 case SpvOpEndStreamPrimitive
: {
3678 nir_intrinsic_op intrinsic_op
;
3680 case SpvOpEmitVertex
:
3681 case SpvOpEmitStreamVertex
:
3682 intrinsic_op
= nir_intrinsic_emit_vertex
;
3684 case SpvOpEndPrimitive
:
3685 case SpvOpEndStreamPrimitive
:
3686 intrinsic_op
= nir_intrinsic_end_primitive
;
3689 unreachable("Invalid opcode");
3692 nir_intrinsic_instr
*intrin
=
3693 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
3696 case SpvOpEmitStreamVertex
:
3697 case SpvOpEndStreamPrimitive
: {
3698 unsigned stream
= vtn_constant_uint(b
, w
[1]);
3699 nir_intrinsic_set_stream_id(intrin
, stream
);
3707 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3711 case SpvOpMemoryBarrier
: {
3712 SpvScope scope
= vtn_constant_uint(b
, w
[1]);
3713 SpvMemorySemanticsMask semantics
= vtn_constant_uint(b
, w
[2]);
3714 vtn_emit_memory_barrier(b
, scope
, semantics
);
3718 case SpvOpControlBarrier
: {
3719 SpvScope execution_scope
= vtn_constant_uint(b
, w
[1]);
3720 SpvScope memory_scope
= vtn_constant_uint(b
, w
[2]);
3721 SpvMemorySemanticsMask memory_semantics
= vtn_constant_uint(b
, w
[3]);
3723 /* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
3724 * memory semantics of None for GLSL barrier().
3725 * And before that, prior to c3f1cdfa, emitted the OpControlBarrier with
3726 * Device instead of Workgroup for execution scope.
3728 if (b
->wa_glslang_cs_barrier
&&
3729 b
->nb
.shader
->info
.stage
== MESA_SHADER_COMPUTE
&&
3730 (execution_scope
== SpvScopeWorkgroup
||
3731 execution_scope
== SpvScopeDevice
) &&
3732 memory_semantics
== SpvMemorySemanticsMaskNone
) {
3733 execution_scope
= SpvScopeWorkgroup
;
3734 memory_scope
= SpvScopeWorkgroup
;
3735 memory_semantics
= SpvMemorySemanticsAcquireReleaseMask
|
3736 SpvMemorySemanticsWorkgroupMemoryMask
;
3739 /* From the SPIR-V spec:
3741 * "When used with the TessellationControl execution model, it also
3742 * implicitly synchronizes the Output Storage Class: Writes to Output
3743 * variables performed by any invocation executed prior to a
3744 * OpControlBarrier will be visible to any other invocation after
3745 * return from that OpControlBarrier."
3747 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3748 memory_semantics
&= ~(SpvMemorySemanticsAcquireMask
|
3749 SpvMemorySemanticsReleaseMask
|
3750 SpvMemorySemanticsAcquireReleaseMask
|
3751 SpvMemorySemanticsSequentiallyConsistentMask
);
3752 memory_semantics
|= SpvMemorySemanticsAcquireReleaseMask
|
3753 SpvMemorySemanticsOutputMemoryMask
;
3756 if (b
->shader
->options
->use_scoped_barrier
) {
3757 vtn_emit_scoped_control_barrier(b
, execution_scope
, memory_scope
,
3760 vtn_emit_memory_barrier(b
, memory_scope
, memory_semantics
);
3762 if (execution_scope
== SpvScopeWorkgroup
)
3763 vtn_emit_barrier(b
, nir_intrinsic_control_barrier
);
3769 unreachable("unknown barrier instruction");
3774 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
3775 SpvExecutionMode mode
)
3778 case SpvExecutionModeInputPoints
:
3779 case SpvExecutionModeOutputPoints
:
3780 return 0; /* GL_POINTS */
3781 case SpvExecutionModeInputLines
:
3782 return 1; /* GL_LINES */
3783 case SpvExecutionModeInputLinesAdjacency
:
3784 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
3785 case SpvExecutionModeTriangles
:
3786 return 4; /* GL_TRIANGLES */
3787 case SpvExecutionModeInputTrianglesAdjacency
:
3788 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
3789 case SpvExecutionModeQuads
:
3790 return 7; /* GL_QUADS */
3791 case SpvExecutionModeIsolines
:
3792 return 0x8E7A; /* GL_ISOLINES */
3793 case SpvExecutionModeOutputLineStrip
:
3794 return 3; /* GL_LINE_STRIP */
3795 case SpvExecutionModeOutputTriangleStrip
:
3796 return 5; /* GL_TRIANGLE_STRIP */
3798 vtn_fail("Invalid primitive type: %s (%u)",
3799 spirv_executionmode_to_string(mode
), mode
);
3804 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
3805 SpvExecutionMode mode
)
3808 case SpvExecutionModeInputPoints
:
3810 case SpvExecutionModeInputLines
:
3812 case SpvExecutionModeInputLinesAdjacency
:
3814 case SpvExecutionModeTriangles
:
3816 case SpvExecutionModeInputTrianglesAdjacency
:
3819 vtn_fail("Invalid GS input mode: %s (%u)",
3820 spirv_executionmode_to_string(mode
), mode
);
3824 static gl_shader_stage
3825 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
3828 case SpvExecutionModelVertex
:
3829 return MESA_SHADER_VERTEX
;
3830 case SpvExecutionModelTessellationControl
:
3831 return MESA_SHADER_TESS_CTRL
;
3832 case SpvExecutionModelTessellationEvaluation
:
3833 return MESA_SHADER_TESS_EVAL
;
3834 case SpvExecutionModelGeometry
:
3835 return MESA_SHADER_GEOMETRY
;
3836 case SpvExecutionModelFragment
:
3837 return MESA_SHADER_FRAGMENT
;
3838 case SpvExecutionModelGLCompute
:
3839 return MESA_SHADER_COMPUTE
;
3840 case SpvExecutionModelKernel
:
3841 return MESA_SHADER_KERNEL
;
3843 vtn_fail("Unsupported execution model: %s (%u)",
3844 spirv_executionmodel_to_string(model
), model
);
3848 #define spv_check_supported(name, cap) do { \
3849 if (!(b->options && b->options->caps.name)) \
3850 vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
3851 spirv_capability_to_string(cap), cap); \
3856 vtn_handle_entry_point(struct vtn_builder
*b
, const uint32_t *w
,
3859 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
3860 /* Let this be a name label regardless */
3861 unsigned name_words
;
3862 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
3864 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
3865 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
3868 vtn_assert(b
->entry_point
== NULL
);
3869 b
->entry_point
= entry_point
;
3873 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
3874 const uint32_t *w
, unsigned count
)
3881 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
3882 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
3883 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
3884 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
3885 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
3886 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
3889 uint32_t version
= w
[2];
3892 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
3894 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
3898 case SpvOpSourceExtension
:
3899 case SpvOpSourceContinued
:
3900 case SpvOpExtension
:
3901 case SpvOpModuleProcessed
:
3902 /* Unhandled, but these are for debug so that's ok. */
3905 case SpvOpCapability
: {
3906 SpvCapability cap
= w
[1];
3908 case SpvCapabilityMatrix
:
3909 case SpvCapabilityShader
:
3910 case SpvCapabilityGeometry
:
3911 case SpvCapabilityGeometryPointSize
:
3912 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
3913 case SpvCapabilitySampledImageArrayDynamicIndexing
:
3914 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
3915 case SpvCapabilityStorageImageArrayDynamicIndexing
:
3916 case SpvCapabilityImageRect
:
3917 case SpvCapabilitySampledRect
:
3918 case SpvCapabilitySampled1D
:
3919 case SpvCapabilityImage1D
:
3920 case SpvCapabilitySampledCubeArray
:
3921 case SpvCapabilityImageCubeArray
:
3922 case SpvCapabilitySampledBuffer
:
3923 case SpvCapabilityImageBuffer
:
3924 case SpvCapabilityImageQuery
:
3925 case SpvCapabilityDerivativeControl
:
3926 case SpvCapabilityInterpolationFunction
:
3927 case SpvCapabilityMultiViewport
:
3928 case SpvCapabilitySampleRateShading
:
3929 case SpvCapabilityClipDistance
:
3930 case SpvCapabilityCullDistance
:
3931 case SpvCapabilityInputAttachment
:
3932 case SpvCapabilityImageGatherExtended
:
3933 case SpvCapabilityStorageImageExtendedFormats
:
3934 case SpvCapabilityVector16
:
3937 case SpvCapabilityLinkage
:
3938 case SpvCapabilityFloat16Buffer
:
3939 case SpvCapabilitySparseResidency
:
3940 vtn_warn("Unsupported SPIR-V capability: %s",
3941 spirv_capability_to_string(cap
));
3944 case SpvCapabilityMinLod
:
3945 spv_check_supported(min_lod
, cap
);
3948 case SpvCapabilityAtomicStorage
:
3949 spv_check_supported(atomic_storage
, cap
);
3952 case SpvCapabilityFloat64
:
3953 spv_check_supported(float64
, cap
);
3955 case SpvCapabilityInt64
:
3956 spv_check_supported(int64
, cap
);
3958 case SpvCapabilityInt16
:
3959 spv_check_supported(int16
, cap
);
3961 case SpvCapabilityInt8
:
3962 spv_check_supported(int8
, cap
);
3965 case SpvCapabilityTransformFeedback
:
3966 spv_check_supported(transform_feedback
, cap
);
3969 case SpvCapabilityGeometryStreams
:
3970 spv_check_supported(geometry_streams
, cap
);
3973 case SpvCapabilityInt64Atomics
:
3974 spv_check_supported(int64_atomics
, cap
);
3977 case SpvCapabilityStorageImageMultisample
:
3978 spv_check_supported(storage_image_ms
, cap
);
3981 case SpvCapabilityAddresses
:
3982 spv_check_supported(address
, cap
);
3985 case SpvCapabilityKernel
:
3986 spv_check_supported(kernel
, cap
);
3989 case SpvCapabilityImageBasic
:
3990 case SpvCapabilityImageReadWrite
:
3991 case SpvCapabilityImageMipmap
:
3992 case SpvCapabilityPipes
:
3993 case SpvCapabilityDeviceEnqueue
:
3994 case SpvCapabilityLiteralSampler
:
3995 case SpvCapabilityGenericPointer
:
3996 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3997 spirv_capability_to_string(cap
));
4000 case SpvCapabilityImageMSArray
:
4001 spv_check_supported(image_ms_array
, cap
);
4004 case SpvCapabilityTessellation
:
4005 case SpvCapabilityTessellationPointSize
:
4006 spv_check_supported(tessellation
, cap
);
4009 case SpvCapabilityDrawParameters
:
4010 spv_check_supported(draw_parameters
, cap
);
4013 case SpvCapabilityStorageImageReadWithoutFormat
:
4014 spv_check_supported(image_read_without_format
, cap
);
4017 case SpvCapabilityStorageImageWriteWithoutFormat
:
4018 spv_check_supported(image_write_without_format
, cap
);
4021 case SpvCapabilityDeviceGroup
:
4022 spv_check_supported(device_group
, cap
);
4025 case SpvCapabilityMultiView
:
4026 spv_check_supported(multiview
, cap
);
4029 case SpvCapabilityGroupNonUniform
:
4030 spv_check_supported(subgroup_basic
, cap
);
4033 case SpvCapabilitySubgroupVoteKHR
:
4034 case SpvCapabilityGroupNonUniformVote
:
4035 spv_check_supported(subgroup_vote
, cap
);
4038 case SpvCapabilitySubgroupBallotKHR
:
4039 case SpvCapabilityGroupNonUniformBallot
:
4040 spv_check_supported(subgroup_ballot
, cap
);
4043 case SpvCapabilityGroupNonUniformShuffle
:
4044 case SpvCapabilityGroupNonUniformShuffleRelative
:
4045 spv_check_supported(subgroup_shuffle
, cap
);
4048 case SpvCapabilityGroupNonUniformQuad
:
4049 spv_check_supported(subgroup_quad
, cap
);
4052 case SpvCapabilityGroupNonUniformArithmetic
:
4053 case SpvCapabilityGroupNonUniformClustered
:
4054 spv_check_supported(subgroup_arithmetic
, cap
);
4057 case SpvCapabilityGroups
:
4058 spv_check_supported(amd_shader_ballot
, cap
);
4061 case SpvCapabilityVariablePointersStorageBuffer
:
4062 case SpvCapabilityVariablePointers
:
4063 spv_check_supported(variable_pointers
, cap
);
4064 b
->variable_pointers
= true;
4067 case SpvCapabilityStorageUniformBufferBlock16
:
4068 case SpvCapabilityStorageUniform16
:
4069 case SpvCapabilityStoragePushConstant16
:
4070 case SpvCapabilityStorageInputOutput16
:
4071 spv_check_supported(storage_16bit
, cap
);
4074 case SpvCapabilityShaderLayer
:
4075 case SpvCapabilityShaderViewportIndex
:
4076 case SpvCapabilityShaderViewportIndexLayerEXT
:
4077 spv_check_supported(shader_viewport_index_layer
, cap
);
4080 case SpvCapabilityStorageBuffer8BitAccess
:
4081 case SpvCapabilityUniformAndStorageBuffer8BitAccess
:
4082 case SpvCapabilityStoragePushConstant8
:
4083 spv_check_supported(storage_8bit
, cap
);
4086 case SpvCapabilityShaderNonUniformEXT
:
4087 spv_check_supported(descriptor_indexing
, cap
);
4090 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT
:
4091 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT
:
4092 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT
:
4093 spv_check_supported(descriptor_array_dynamic_indexing
, cap
);
4096 case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT
:
4097 case SpvCapabilitySampledImageArrayNonUniformIndexingEXT
:
4098 case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT
:
4099 case SpvCapabilityStorageImageArrayNonUniformIndexingEXT
:
4100 case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT
:
4101 case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT
:
4102 case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT
:
4103 spv_check_supported(descriptor_array_non_uniform_indexing
, cap
);
4106 case SpvCapabilityRuntimeDescriptorArrayEXT
:
4107 spv_check_supported(runtime_descriptor_array
, cap
);
4110 case SpvCapabilityStencilExportEXT
:
4111 spv_check_supported(stencil_export
, cap
);
4114 case SpvCapabilitySampleMaskPostDepthCoverage
:
4115 spv_check_supported(post_depth_coverage
, cap
);
4118 case SpvCapabilityDenormFlushToZero
:
4119 case SpvCapabilityDenormPreserve
:
4120 case SpvCapabilitySignedZeroInfNanPreserve
:
4121 case SpvCapabilityRoundingModeRTE
:
4122 case SpvCapabilityRoundingModeRTZ
:
4123 spv_check_supported(float_controls
, cap
);
4126 case SpvCapabilityPhysicalStorageBufferAddresses
:
4127 spv_check_supported(physical_storage_buffer_address
, cap
);
4130 case SpvCapabilityComputeDerivativeGroupQuadsNV
:
4131 case SpvCapabilityComputeDerivativeGroupLinearNV
:
4132 spv_check_supported(derivative_group
, cap
);
4135 case SpvCapabilityFloat16
:
4136 spv_check_supported(float16
, cap
);
4139 case SpvCapabilityFragmentShaderSampleInterlockEXT
:
4140 spv_check_supported(fragment_shader_sample_interlock
, cap
);
4143 case SpvCapabilityFragmentShaderPixelInterlockEXT
:
4144 spv_check_supported(fragment_shader_pixel_interlock
, cap
);
4147 case SpvCapabilityDemoteToHelperInvocationEXT
:
4148 spv_check_supported(demote_to_helper_invocation
, cap
);
4151 case SpvCapabilityShaderClockKHR
:
4152 spv_check_supported(shader_clock
, cap
);
4155 case SpvCapabilityVulkanMemoryModel
:
4156 spv_check_supported(vk_memory_model
, cap
);
4159 case SpvCapabilityVulkanMemoryModelDeviceScope
:
4160 spv_check_supported(vk_memory_model_device_scope
, cap
);
4163 case SpvCapabilityImageReadWriteLodAMD
:
4164 spv_check_supported(amd_image_read_write_lod
, cap
);
4167 case SpvCapabilityIntegerFunctions2INTEL
:
4168 spv_check_supported(integer_functions2
, cap
);
4171 case SpvCapabilityFragmentMaskAMD
:
4172 spv_check_supported(amd_fragment_mask
, cap
);
4175 case SpvCapabilityImageGatherBiasLodAMD
:
4176 spv_check_supported(amd_image_gather_bias_lod
, cap
);
4179 case SpvCapabilityAtomicFloat32AddEXT
:
4180 spv_check_supported(float32_atomic_add
, cap
);
4183 case SpvCapabilityAtomicFloat64AddEXT
:
4184 spv_check_supported(float64_atomic_add
, cap
);
4188 vtn_fail("Unhandled capability: %s (%u)",
4189 spirv_capability_to_string(cap
), cap
);
4194 case SpvOpExtInstImport
:
4195 vtn_handle_extension(b
, opcode
, w
, count
);
4198 case SpvOpMemoryModel
:
4200 case SpvAddressingModelPhysical32
:
4201 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4202 "AddressingModelPhysical32 only supported for kernels");
4203 b
->shader
->info
.cs
.ptr_size
= 32;
4204 b
->physical_ptrs
= true;
4205 b
->options
->shared_addr_format
= nir_address_format_32bit_global
;
4206 b
->options
->global_addr_format
= nir_address_format_32bit_global
;
4207 b
->options
->temp_addr_format
= nir_address_format_32bit_global
;
4209 case SpvAddressingModelPhysical64
:
4210 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4211 "AddressingModelPhysical64 only supported for kernels");
4212 b
->shader
->info
.cs
.ptr_size
= 64;
4213 b
->physical_ptrs
= true;
4214 b
->options
->shared_addr_format
= nir_address_format_64bit_global
;
4215 b
->options
->global_addr_format
= nir_address_format_64bit_global
;
4216 b
->options
->temp_addr_format
= nir_address_format_64bit_global
;
4218 case SpvAddressingModelLogical
:
4219 vtn_fail_if(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
,
4220 "AddressingModelLogical only supported for shaders");
4221 b
->physical_ptrs
= false;
4223 case SpvAddressingModelPhysicalStorageBuffer64
:
4224 vtn_fail_if(!b
->options
||
4225 !b
->options
->caps
.physical_storage_buffer_address
,
4226 "AddressingModelPhysicalStorageBuffer64 not supported");
4229 vtn_fail("Unknown addressing model: %s (%u)",
4230 spirv_addressingmodel_to_string(w
[1]), w
[1]);
4234 b
->mem_model
= w
[2];
4236 case SpvMemoryModelSimple
:
4237 case SpvMemoryModelGLSL450
:
4238 case SpvMemoryModelOpenCL
:
4240 case SpvMemoryModelVulkan
:
4241 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
4242 "Vulkan memory model is unsupported by this driver");
4245 vtn_fail("Unsupported memory model: %s",
4246 spirv_memorymodel_to_string(w
[2]));
4251 case SpvOpEntryPoint
:
4252 vtn_handle_entry_point(b
, w
, count
);
4256 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
4257 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4261 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4264 case SpvOpMemberName
:
4268 case SpvOpExecutionMode
:
4269 case SpvOpExecutionModeId
:
4270 case SpvOpDecorationGroup
:
4272 case SpvOpDecorateId
:
4273 case SpvOpMemberDecorate
:
4274 case SpvOpGroupDecorate
:
4275 case SpvOpGroupMemberDecorate
:
4276 case SpvOpDecorateString
:
4277 case SpvOpMemberDecorateString
:
4278 vtn_handle_decoration(b
, opcode
, w
, count
);
4281 case SpvOpExtInst
: {
4282 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4283 if (val
->ext_handler
== vtn_handle_non_semantic_instruction
) {
4284 /* NonSemantic extended instructions are acceptable in preamble. */
4285 vtn_handle_non_semantic_instruction(b
, w
[4], w
, count
);
4288 return false; /* End of preamble. */
4293 return false; /* End of preamble */
4300 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4301 const struct vtn_decoration
*mode
, UNUSED
void *data
)
4303 vtn_assert(b
->entry_point
== entry_point
);
4305 switch(mode
->exec_mode
) {
4306 case SpvExecutionModeOriginUpperLeft
:
4307 case SpvExecutionModeOriginLowerLeft
:
4308 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4309 b
->shader
->info
.fs
.origin_upper_left
=
4310 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
4313 case SpvExecutionModeEarlyFragmentTests
:
4314 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4315 b
->shader
->info
.fs
.early_fragment_tests
= true;
4318 case SpvExecutionModePostDepthCoverage
:
4319 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4320 b
->shader
->info
.fs
.post_depth_coverage
= true;
4323 case SpvExecutionModeInvocations
:
4324 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4325 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->operands
[0]);
4328 case SpvExecutionModeDepthReplacing
:
4329 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4330 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
4332 case SpvExecutionModeDepthGreater
:
4333 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4334 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
4336 case SpvExecutionModeDepthLess
:
4337 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4338 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
4340 case SpvExecutionModeDepthUnchanged
:
4341 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4342 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
4345 case SpvExecutionModeLocalSize
:
4346 vtn_assert(gl_shader_stage_is_compute(b
->shader
->info
.stage
));
4347 b
->shader
->info
.cs
.local_size
[0] = mode
->operands
[0];
4348 b
->shader
->info
.cs
.local_size
[1] = mode
->operands
[1];
4349 b
->shader
->info
.cs
.local_size
[2] = mode
->operands
[2];
4352 case SpvExecutionModeLocalSizeId
:
4353 b
->shader
->info
.cs
.local_size
[0] = vtn_constant_uint(b
, mode
->operands
[0]);
4354 b
->shader
->info
.cs
.local_size
[1] = vtn_constant_uint(b
, mode
->operands
[1]);
4355 b
->shader
->info
.cs
.local_size
[2] = vtn_constant_uint(b
, mode
->operands
[2]);
4358 case SpvExecutionModeLocalSizeHint
:
4359 case SpvExecutionModeLocalSizeHintId
:
4360 break; /* Nothing to do with this */
4362 case SpvExecutionModeOutputVertices
:
4363 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4364 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4365 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->operands
[0];
4367 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4368 b
->shader
->info
.gs
.vertices_out
= mode
->operands
[0];
4372 case SpvExecutionModeInputPoints
:
4373 case SpvExecutionModeInputLines
:
4374 case SpvExecutionModeInputLinesAdjacency
:
4375 case SpvExecutionModeTriangles
:
4376 case SpvExecutionModeInputTrianglesAdjacency
:
4377 case SpvExecutionModeQuads
:
4378 case SpvExecutionModeIsolines
:
4379 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4380 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4381 b
->shader
->info
.tess
.primitive_mode
=
4382 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4384 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4385 b
->shader
->info
.gs
.vertices_in
=
4386 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
4387 b
->shader
->info
.gs
.input_primitive
=
4388 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4392 case SpvExecutionModeOutputPoints
:
4393 case SpvExecutionModeOutputLineStrip
:
4394 case SpvExecutionModeOutputTriangleStrip
:
4395 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4396 b
->shader
->info
.gs
.output_primitive
=
4397 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4400 case SpvExecutionModeSpacingEqual
:
4401 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4402 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4403 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
4405 case SpvExecutionModeSpacingFractionalEven
:
4406 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4407 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4408 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
4410 case SpvExecutionModeSpacingFractionalOdd
:
4411 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4412 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4413 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
4415 case SpvExecutionModeVertexOrderCw
:
4416 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4417 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4418 b
->shader
->info
.tess
.ccw
= false;
4420 case SpvExecutionModeVertexOrderCcw
:
4421 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4422 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4423 b
->shader
->info
.tess
.ccw
= true;
4425 case SpvExecutionModePointMode
:
4426 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4427 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4428 b
->shader
->info
.tess
.point_mode
= true;
4431 case SpvExecutionModePixelCenterInteger
:
4432 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4433 b
->shader
->info
.fs
.pixel_center_integer
= true;
4436 case SpvExecutionModeXfb
:
4437 b
->shader
->info
.has_transform_feedback_varyings
= true;
4440 case SpvExecutionModeVecTypeHint
:
4443 case SpvExecutionModeContractionOff
:
4444 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
4445 vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4446 spirv_executionmode_to_string(mode
->exec_mode
));
4451 case SpvExecutionModeStencilRefReplacingEXT
:
4452 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4455 case SpvExecutionModeDerivativeGroupQuadsNV
:
4456 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4457 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_QUADS
;
4460 case SpvExecutionModeDerivativeGroupLinearNV
:
4461 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4462 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_LINEAR
;
4465 case SpvExecutionModePixelInterlockOrderedEXT
:
4466 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4467 b
->shader
->info
.fs
.pixel_interlock_ordered
= true;
4470 case SpvExecutionModePixelInterlockUnorderedEXT
:
4471 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4472 b
->shader
->info
.fs
.pixel_interlock_unordered
= true;
4475 case SpvExecutionModeSampleInterlockOrderedEXT
:
4476 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4477 b
->shader
->info
.fs
.sample_interlock_ordered
= true;
4480 case SpvExecutionModeSampleInterlockUnorderedEXT
:
4481 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4482 b
->shader
->info
.fs
.sample_interlock_unordered
= true;
4485 case SpvExecutionModeDenormPreserve
:
4486 case SpvExecutionModeDenormFlushToZero
:
4487 case SpvExecutionModeSignedZeroInfNanPreserve
:
4488 case SpvExecutionModeRoundingModeRTE
:
4489 case SpvExecutionModeRoundingModeRTZ
:
4490 /* Already handled in vtn_handle_rounding_mode_in_execution_mode() */
4494 vtn_fail("Unhandled execution mode: %s (%u)",
4495 spirv_executionmode_to_string(mode
->exec_mode
),
4501 vtn_handle_rounding_mode_in_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4502 const struct vtn_decoration
*mode
, void *data
)
4504 vtn_assert(b
->entry_point
== entry_point
);
4506 unsigned execution_mode
= 0;
4508 switch(mode
->exec_mode
) {
4509 case SpvExecutionModeDenormPreserve
:
4510 switch (mode
->operands
[0]) {
4511 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP16
; break;
4512 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP32
; break;
4513 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP64
; break;
4514 default: vtn_fail("Floating point type not supported");
4517 case SpvExecutionModeDenormFlushToZero
:
4518 switch (mode
->operands
[0]) {
4519 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16
; break;
4520 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32
; break;
4521 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64
; break;
4522 default: vtn_fail("Floating point type not supported");
4525 case SpvExecutionModeSignedZeroInfNanPreserve
:
4526 switch (mode
->operands
[0]) {
4527 case 16: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16
; break;
4528 case 32: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32
; break;
4529 case 64: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64
; break;
4530 default: vtn_fail("Floating point type not supported");
4533 case SpvExecutionModeRoundingModeRTE
:
4534 switch (mode
->operands
[0]) {
4535 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16
; break;
4536 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32
; break;
4537 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64
; break;
4538 default: vtn_fail("Floating point type not supported");
4541 case SpvExecutionModeRoundingModeRTZ
:
4542 switch (mode
->operands
[0]) {
4543 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16
; break;
4544 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32
; break;
4545 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64
; break;
4546 default: vtn_fail("Floating point type not supported");
4554 b
->shader
->info
.float_controls_execution_mode
|= execution_mode
;
4558 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4559 const uint32_t *w
, unsigned count
)
4561 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
4565 case SpvOpSourceContinued
:
4566 case SpvOpSourceExtension
:
4567 case SpvOpExtension
:
4568 case SpvOpCapability
:
4569 case SpvOpExtInstImport
:
4570 case SpvOpMemoryModel
:
4571 case SpvOpEntryPoint
:
4572 case SpvOpExecutionMode
:
4575 case SpvOpMemberName
:
4576 case SpvOpDecorationGroup
:
4578 case SpvOpDecorateId
:
4579 case SpvOpMemberDecorate
:
4580 case SpvOpGroupDecorate
:
4581 case SpvOpGroupMemberDecorate
:
4582 case SpvOpDecorateString
:
4583 case SpvOpMemberDecorateString
:
4584 vtn_fail("Invalid opcode types and variables section");
4590 case SpvOpTypeFloat
:
4591 case SpvOpTypeVector
:
4592 case SpvOpTypeMatrix
:
4593 case SpvOpTypeImage
:
4594 case SpvOpTypeSampler
:
4595 case SpvOpTypeSampledImage
:
4596 case SpvOpTypeArray
:
4597 case SpvOpTypeRuntimeArray
:
4598 case SpvOpTypeStruct
:
4599 case SpvOpTypeOpaque
:
4600 case SpvOpTypePointer
:
4601 case SpvOpTypeForwardPointer
:
4602 case SpvOpTypeFunction
:
4603 case SpvOpTypeEvent
:
4604 case SpvOpTypeDeviceEvent
:
4605 case SpvOpTypeReserveId
:
4606 case SpvOpTypeQueue
:
4608 vtn_handle_type(b
, opcode
, w
, count
);
4611 case SpvOpConstantTrue
:
4612 case SpvOpConstantFalse
:
4614 case SpvOpConstantComposite
:
4615 case SpvOpConstantSampler
:
4616 case SpvOpConstantNull
:
4617 case SpvOpSpecConstantTrue
:
4618 case SpvOpSpecConstantFalse
:
4619 case SpvOpSpecConstant
:
4620 case SpvOpSpecConstantComposite
:
4621 case SpvOpSpecConstantOp
:
4622 vtn_handle_constant(b
, opcode
, w
, count
);
4627 vtn_handle_variables(b
, opcode
, w
, count
);
4630 case SpvOpExtInst
: {
4631 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4632 /* NonSemantic extended instructions are acceptable in preamble, others
4633 * will indicate the end of preamble.
4635 return val
->ext_handler
== vtn_handle_non_semantic_instruction
;
4639 return false; /* End of preamble */
4645 static struct vtn_ssa_value
*
4646 vtn_nir_select(struct vtn_builder
*b
, struct vtn_ssa_value
*src0
,
4647 struct vtn_ssa_value
*src1
, struct vtn_ssa_value
*src2
)
4649 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
4650 dest
->type
= src1
->type
;
4652 if (glsl_type_is_vector_or_scalar(src1
->type
)) {
4653 dest
->def
= nir_bcsel(&b
->nb
, src0
->def
, src1
->def
, src2
->def
);
4655 unsigned elems
= glsl_get_length(src1
->type
);
4657 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
4658 for (unsigned i
= 0; i
< elems
; i
++) {
4659 dest
->elems
[i
] = vtn_nir_select(b
, src0
,
4660 src1
->elems
[i
], src2
->elems
[i
]);
4668 vtn_handle_select(struct vtn_builder
*b
, SpvOp opcode
,
4669 const uint32_t *w
, unsigned count
)
4671 /* Handle OpSelect up-front here because it needs to be able to handle
4672 * pointers and not just regular vectors and scalars.
4674 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
4675 struct vtn_value
*cond_val
= vtn_untyped_value(b
, w
[3]);
4676 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
4677 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
4679 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
4680 obj2_val
->type
!= res_val
->type
,
4681 "Object types must match the result type in OpSelect");
4683 vtn_fail_if((cond_val
->type
->base_type
!= vtn_base_type_scalar
&&
4684 cond_val
->type
->base_type
!= vtn_base_type_vector
) ||
4685 !glsl_type_is_boolean(cond_val
->type
->type
),
4686 "OpSelect must have either a vector of booleans or "
4687 "a boolean as Condition type");
4689 vtn_fail_if(cond_val
->type
->base_type
== vtn_base_type_vector
&&
4690 (res_val
->type
->base_type
!= vtn_base_type_vector
||
4691 res_val
->type
->length
!= cond_val
->type
->length
),
4692 "When Condition type in OpSelect is a vector, the Result "
4693 "type must be a vector of the same length");
4695 switch (res_val
->type
->base_type
) {
4696 case vtn_base_type_scalar
:
4697 case vtn_base_type_vector
:
4698 case vtn_base_type_matrix
:
4699 case vtn_base_type_array
:
4700 case vtn_base_type_struct
:
4703 case vtn_base_type_pointer
:
4704 /* We need to have actual storage for pointer types. */
4705 vtn_fail_if(res_val
->type
->type
== NULL
,
4706 "Invalid pointer result type for OpSelect");
4709 vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
4712 vtn_push_ssa_value(b
, w
[2],
4713 vtn_nir_select(b
, vtn_ssa_value(b
, w
[3]),
4714 vtn_ssa_value(b
, w
[4]),
4715 vtn_ssa_value(b
, w
[5])));
4719 vtn_handle_ptr(struct vtn_builder
*b
, SpvOp opcode
,
4720 const uint32_t *w
, unsigned count
)
4722 struct vtn_type
*type1
= vtn_get_value_type(b
, w
[3]);
4723 struct vtn_type
*type2
= vtn_get_value_type(b
, w
[4]);
4724 vtn_fail_if(type1
->base_type
!= vtn_base_type_pointer
||
4725 type2
->base_type
!= vtn_base_type_pointer
,
4726 "%s operands must have pointer types",
4727 spirv_op_to_string(opcode
));
4728 vtn_fail_if(type1
->storage_class
!= type2
->storage_class
,
4729 "%s operands must have the same storage class",
4730 spirv_op_to_string(opcode
));
4732 struct vtn_type
*vtn_type
= vtn_get_type(b
, w
[1]);
4733 const struct glsl_type
*type
= vtn_type
->type
;
4735 nir_address_format addr_format
= vtn_mode_to_address_format(
4736 b
, vtn_storage_class_to_mode(b
, type1
->storage_class
, NULL
, NULL
));
4741 case SpvOpPtrDiff
: {
4742 /* OpPtrDiff returns the difference in number of elements (not byte offset). */
4743 unsigned elem_size
, elem_align
;
4744 glsl_get_natural_size_align_bytes(type1
->deref
->type
,
4745 &elem_size
, &elem_align
);
4747 def
= nir_build_addr_isub(&b
->nb
,
4748 vtn_get_nir_ssa(b
, w
[3]),
4749 vtn_get_nir_ssa(b
, w
[4]),
4751 def
= nir_idiv(&b
->nb
, def
, nir_imm_intN_t(&b
->nb
, elem_size
, def
->bit_size
));
4752 def
= nir_i2i(&b
->nb
, def
, glsl_get_bit_size(type
));
4757 case SpvOpPtrNotEqual
: {
4758 def
= nir_build_addr_ieq(&b
->nb
,
4759 vtn_get_nir_ssa(b
, w
[3]),
4760 vtn_get_nir_ssa(b
, w
[4]),
4762 if (opcode
== SpvOpPtrNotEqual
)
4763 def
= nir_inot(&b
->nb
, def
);
4768 unreachable("Invalid ptr operation");
4771 vtn_push_nir_ssa(b
, w
[2], def
);
4775 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4776 const uint32_t *w
, unsigned count
)
4782 case SpvOpLoopMerge
:
4783 case SpvOpSelectionMerge
:
4784 /* This is handled by cfg pre-pass and walk_blocks */
4788 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
4789 val
->type
= vtn_get_type(b
, w
[1]);
4794 vtn_handle_extension(b
, opcode
, w
, count
);
4800 case SpvOpCopyMemory
:
4801 case SpvOpCopyMemorySized
:
4802 case SpvOpAccessChain
:
4803 case SpvOpPtrAccessChain
:
4804 case SpvOpInBoundsAccessChain
:
4805 case SpvOpInBoundsPtrAccessChain
:
4806 case SpvOpArrayLength
:
4807 case SpvOpConvertPtrToU
:
4808 case SpvOpConvertUToPtr
:
4809 vtn_handle_variables(b
, opcode
, w
, count
);
4812 case SpvOpFunctionCall
:
4813 vtn_handle_function_call(b
, opcode
, w
, count
);
4816 case SpvOpSampledImage
:
4818 case SpvOpImageSampleImplicitLod
:
4819 case SpvOpImageSampleExplicitLod
:
4820 case SpvOpImageSampleDrefImplicitLod
:
4821 case SpvOpImageSampleDrefExplicitLod
:
4822 case SpvOpImageSampleProjImplicitLod
:
4823 case SpvOpImageSampleProjExplicitLod
:
4824 case SpvOpImageSampleProjDrefImplicitLod
:
4825 case SpvOpImageSampleProjDrefExplicitLod
:
4826 case SpvOpImageFetch
:
4827 case SpvOpImageGather
:
4828 case SpvOpImageDrefGather
:
4829 case SpvOpImageQuerySizeLod
:
4830 case SpvOpImageQueryLod
:
4831 case SpvOpImageQueryLevels
:
4832 case SpvOpImageQuerySamples
:
4833 vtn_handle_texture(b
, opcode
, w
, count
);
4836 case SpvOpImageRead
:
4837 case SpvOpImageWrite
:
4838 case SpvOpImageTexelPointer
:
4839 vtn_handle_image(b
, opcode
, w
, count
);
4842 case SpvOpImageQuerySize
: {
4843 struct vtn_pointer
*image
=
4844 vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
4845 if (glsl_type_is_image(image
->type
->type
)) {
4846 vtn_handle_image(b
, opcode
, w
, count
);
4848 vtn_assert(glsl_type_is_sampler(image
->type
->type
));
4849 vtn_handle_texture(b
, opcode
, w
, count
);
4854 case SpvOpFragmentMaskFetchAMD
:
4855 case SpvOpFragmentFetchAMD
:
4856 vtn_handle_texture(b
, opcode
, w
, count
);
4859 case SpvOpAtomicLoad
:
4860 case SpvOpAtomicExchange
:
4861 case SpvOpAtomicCompareExchange
:
4862 case SpvOpAtomicCompareExchangeWeak
:
4863 case SpvOpAtomicIIncrement
:
4864 case SpvOpAtomicIDecrement
:
4865 case SpvOpAtomicIAdd
:
4866 case SpvOpAtomicISub
:
4867 case SpvOpAtomicSMin
:
4868 case SpvOpAtomicUMin
:
4869 case SpvOpAtomicSMax
:
4870 case SpvOpAtomicUMax
:
4871 case SpvOpAtomicAnd
:
4873 case SpvOpAtomicXor
:
4874 case SpvOpAtomicFAddEXT
: {
4875 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
4876 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4877 vtn_handle_image(b
, opcode
, w
, count
);
4879 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4880 vtn_handle_atomics(b
, opcode
, w
, count
);
4885 case SpvOpAtomicStore
: {
4886 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
4887 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
4888 vtn_handle_image(b
, opcode
, w
, count
);
4890 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
4891 vtn_handle_atomics(b
, opcode
, w
, count
);
4897 vtn_handle_select(b
, opcode
, w
, count
);
4905 case SpvOpConvertFToU
:
4906 case SpvOpConvertFToS
:
4907 case SpvOpConvertSToF
:
4908 case SpvOpConvertUToF
:
4912 case SpvOpQuantizeToF16
:
4913 case SpvOpPtrCastToGeneric
:
4914 case SpvOpGenericCastToPtr
:
4919 case SpvOpSignBitSet
:
4920 case SpvOpLessOrGreater
:
4922 case SpvOpUnordered
:
4937 case SpvOpVectorTimesScalar
:
4939 case SpvOpIAddCarry
:
4940 case SpvOpISubBorrow
:
4941 case SpvOpUMulExtended
:
4942 case SpvOpSMulExtended
:
4943 case SpvOpShiftRightLogical
:
4944 case SpvOpShiftRightArithmetic
:
4945 case SpvOpShiftLeftLogical
:
4946 case SpvOpLogicalEqual
:
4947 case SpvOpLogicalNotEqual
:
4948 case SpvOpLogicalOr
:
4949 case SpvOpLogicalAnd
:
4950 case SpvOpLogicalNot
:
4951 case SpvOpBitwiseOr
:
4952 case SpvOpBitwiseXor
:
4953 case SpvOpBitwiseAnd
:
4955 case SpvOpFOrdEqual
:
4956 case SpvOpFUnordEqual
:
4957 case SpvOpINotEqual
:
4958 case SpvOpFOrdNotEqual
:
4959 case SpvOpFUnordNotEqual
:
4960 case SpvOpULessThan
:
4961 case SpvOpSLessThan
:
4962 case SpvOpFOrdLessThan
:
4963 case SpvOpFUnordLessThan
:
4964 case SpvOpUGreaterThan
:
4965 case SpvOpSGreaterThan
:
4966 case SpvOpFOrdGreaterThan
:
4967 case SpvOpFUnordGreaterThan
:
4968 case SpvOpULessThanEqual
:
4969 case SpvOpSLessThanEqual
:
4970 case SpvOpFOrdLessThanEqual
:
4971 case SpvOpFUnordLessThanEqual
:
4972 case SpvOpUGreaterThanEqual
:
4973 case SpvOpSGreaterThanEqual
:
4974 case SpvOpFOrdGreaterThanEqual
:
4975 case SpvOpFUnordGreaterThanEqual
:
4981 case SpvOpFwidthFine
:
4982 case SpvOpDPdxCoarse
:
4983 case SpvOpDPdyCoarse
:
4984 case SpvOpFwidthCoarse
:
4985 case SpvOpBitFieldInsert
:
4986 case SpvOpBitFieldSExtract
:
4987 case SpvOpBitFieldUExtract
:
4988 case SpvOpBitReverse
:
4990 case SpvOpTranspose
:
4991 case SpvOpOuterProduct
:
4992 case SpvOpMatrixTimesScalar
:
4993 case SpvOpVectorTimesMatrix
:
4994 case SpvOpMatrixTimesVector
:
4995 case SpvOpMatrixTimesMatrix
:
4996 case SpvOpUCountLeadingZerosINTEL
:
4997 case SpvOpUCountTrailingZerosINTEL
:
4998 case SpvOpAbsISubINTEL
:
4999 case SpvOpAbsUSubINTEL
:
5000 case SpvOpIAddSatINTEL
:
5001 case SpvOpUAddSatINTEL
:
5002 case SpvOpIAverageINTEL
:
5003 case SpvOpUAverageINTEL
:
5004 case SpvOpIAverageRoundedINTEL
:
5005 case SpvOpUAverageRoundedINTEL
:
5006 case SpvOpISubSatINTEL
:
5007 case SpvOpUSubSatINTEL
:
5008 case SpvOpIMul32x16INTEL
:
5009 case SpvOpUMul32x16INTEL
:
5010 vtn_handle_alu(b
, opcode
, w
, count
);
5014 vtn_handle_bitcast(b
, w
, count
);
5017 case SpvOpVectorExtractDynamic
:
5018 case SpvOpVectorInsertDynamic
:
5019 case SpvOpVectorShuffle
:
5020 case SpvOpCompositeConstruct
:
5021 case SpvOpCompositeExtract
:
5022 case SpvOpCompositeInsert
:
5023 case SpvOpCopyLogical
:
5024 case SpvOpCopyObject
:
5025 vtn_handle_composite(b
, opcode
, w
, count
);
5028 case SpvOpEmitVertex
:
5029 case SpvOpEndPrimitive
:
5030 case SpvOpEmitStreamVertex
:
5031 case SpvOpEndStreamPrimitive
:
5032 case SpvOpControlBarrier
:
5033 case SpvOpMemoryBarrier
:
5034 vtn_handle_barrier(b
, opcode
, w
, count
);
5037 case SpvOpGroupNonUniformElect
:
5038 case SpvOpGroupNonUniformAll
:
5039 case SpvOpGroupNonUniformAny
:
5040 case SpvOpGroupNonUniformAllEqual
:
5041 case SpvOpGroupNonUniformBroadcast
:
5042 case SpvOpGroupNonUniformBroadcastFirst
:
5043 case SpvOpGroupNonUniformBallot
:
5044 case SpvOpGroupNonUniformInverseBallot
:
5045 case SpvOpGroupNonUniformBallotBitExtract
:
5046 case SpvOpGroupNonUniformBallotBitCount
:
5047 case SpvOpGroupNonUniformBallotFindLSB
:
5048 case SpvOpGroupNonUniformBallotFindMSB
:
5049 case SpvOpGroupNonUniformShuffle
:
5050 case SpvOpGroupNonUniformShuffleXor
:
5051 case SpvOpGroupNonUniformShuffleUp
:
5052 case SpvOpGroupNonUniformShuffleDown
:
5053 case SpvOpGroupNonUniformIAdd
:
5054 case SpvOpGroupNonUniformFAdd
:
5055 case SpvOpGroupNonUniformIMul
:
5056 case SpvOpGroupNonUniformFMul
:
5057 case SpvOpGroupNonUniformSMin
:
5058 case SpvOpGroupNonUniformUMin
:
5059 case SpvOpGroupNonUniformFMin
:
5060 case SpvOpGroupNonUniformSMax
:
5061 case SpvOpGroupNonUniformUMax
:
5062 case SpvOpGroupNonUniformFMax
:
5063 case SpvOpGroupNonUniformBitwiseAnd
:
5064 case SpvOpGroupNonUniformBitwiseOr
:
5065 case SpvOpGroupNonUniformBitwiseXor
:
5066 case SpvOpGroupNonUniformLogicalAnd
:
5067 case SpvOpGroupNonUniformLogicalOr
:
5068 case SpvOpGroupNonUniformLogicalXor
:
5069 case SpvOpGroupNonUniformQuadBroadcast
:
5070 case SpvOpGroupNonUniformQuadSwap
:
5073 case SpvOpGroupBroadcast
:
5074 case SpvOpGroupIAdd
:
5075 case SpvOpGroupFAdd
:
5076 case SpvOpGroupFMin
:
5077 case SpvOpGroupUMin
:
5078 case SpvOpGroupSMin
:
5079 case SpvOpGroupFMax
:
5080 case SpvOpGroupUMax
:
5081 case SpvOpGroupSMax
:
5082 case SpvOpSubgroupBallotKHR
:
5083 case SpvOpSubgroupFirstInvocationKHR
:
5084 case SpvOpSubgroupReadInvocationKHR
:
5085 case SpvOpSubgroupAllKHR
:
5086 case SpvOpSubgroupAnyKHR
:
5087 case SpvOpSubgroupAllEqualKHR
:
5088 case SpvOpGroupIAddNonUniformAMD
:
5089 case SpvOpGroupFAddNonUniformAMD
:
5090 case SpvOpGroupFMinNonUniformAMD
:
5091 case SpvOpGroupUMinNonUniformAMD
:
5092 case SpvOpGroupSMinNonUniformAMD
:
5093 case SpvOpGroupFMaxNonUniformAMD
:
5094 case SpvOpGroupUMaxNonUniformAMD
:
5095 case SpvOpGroupSMaxNonUniformAMD
:
5096 vtn_handle_subgroup(b
, opcode
, w
, count
);
5101 case SpvOpPtrNotEqual
:
5102 vtn_handle_ptr(b
, opcode
, w
, count
);
5105 case SpvOpBeginInvocationInterlockEXT
:
5106 vtn_emit_barrier(b
, nir_intrinsic_begin_invocation_interlock
);
5109 case SpvOpEndInvocationInterlockEXT
:
5110 vtn_emit_barrier(b
, nir_intrinsic_end_invocation_interlock
);
5113 case SpvOpDemoteToHelperInvocationEXT
: {
5114 nir_intrinsic_instr
*intrin
=
5115 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_demote
);
5116 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5120 case SpvOpIsHelperInvocationEXT
: {
5121 nir_intrinsic_instr
*intrin
=
5122 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_is_helper_invocation
);
5123 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 1, 1, NULL
);
5124 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5126 vtn_push_nir_ssa(b
, w
[2], &intrin
->dest
.ssa
);
5130 case SpvOpReadClockKHR
: {
5131 SpvScope scope
= vtn_constant_uint(b
, w
[3]);
5132 nir_scope nir_scope
;
5135 case SpvScopeDevice
:
5136 nir_scope
= NIR_SCOPE_DEVICE
;
5138 case SpvScopeSubgroup
:
5139 nir_scope
= NIR_SCOPE_SUBGROUP
;
5142 vtn_fail("invalid read clock scope");
5145 /* Operation supports two result types: uvec2 and uint64_t. The NIR
5146 * intrinsic gives uvec2, so pack the result for the other case.
5148 nir_intrinsic_instr
*intrin
=
5149 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_shader_clock
);
5150 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 2, 32, NULL
);
5151 nir_intrinsic_set_memory_scope(intrin
, nir_scope
);
5152 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5154 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
5155 const struct glsl_type
*dest_type
= type
->type
;
5156 nir_ssa_def
*result
;
5158 if (glsl_type_is_vector(dest_type
)) {
5159 assert(dest_type
== glsl_vector_type(GLSL_TYPE_UINT
, 2));
5160 result
= &intrin
->dest
.ssa
;
5162 assert(glsl_type_is_scalar(dest_type
));
5163 assert(glsl_get_base_type(dest_type
) == GLSL_TYPE_UINT64
);
5164 result
= nir_pack_64_2x32(&b
->nb
, &intrin
->dest
.ssa
);
5167 vtn_push_nir_ssa(b
, w
[2], result
);
5171 case SpvOpLifetimeStart
:
5172 case SpvOpLifetimeStop
:
5176 vtn_fail_with_opcode("Unhandled opcode", opcode
);
5183 vtn_create_builder(const uint32_t *words
, size_t word_count
,
5184 gl_shader_stage stage
, const char *entry_point_name
,
5185 const struct spirv_to_nir_options
*options
)
5187 /* Initialize the vtn_builder object */
5188 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
5189 struct spirv_to_nir_options
*dup_options
=
5190 ralloc(b
, struct spirv_to_nir_options
);
5191 *dup_options
= *options
;
5194 b
->spirv_word_count
= word_count
;
5198 list_inithead(&b
->functions
);
5199 b
->entry_point_stage
= stage
;
5200 b
->entry_point_name
= entry_point_name
;
5201 b
->options
= dup_options
;
5204 * Handle the SPIR-V header (first 5 dwords).
5205 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
5207 if (word_count
<= 5)
5210 if (words
[0] != SpvMagicNumber
) {
5211 vtn_err("words[0] was 0x%x, want 0x%x", words
[0], SpvMagicNumber
);
5214 if (words
[1] < 0x10000) {
5215 vtn_err("words[1] was 0x%x, want >= 0x10000", words
[1]);
5219 uint16_t generator_id
= words
[2] >> 16;
5220 uint16_t generator_version
= words
[2];
5222 /* The first GLSLang version bump actually 1.5 years after #179 was fixed
5223 * but this should at least let us shut the workaround off for modern
5224 * versions of GLSLang.
5226 b
->wa_glslang_179
= (generator_id
== 8 && generator_version
== 1);
5228 /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
5229 * to provide correct memory semantics on compute shader barrier()
5230 * commands. Prior to that, we need to fix them up ourselves. This
5231 * GLSLang fix caused them to bump to generator version 3.
5233 b
->wa_glslang_cs_barrier
= (generator_id
== 8 && generator_version
< 3);
5235 /* words[2] == generator magic */
5236 unsigned value_id_bound
= words
[3];
5237 if (words
[4] != 0) {
5238 vtn_err("words[4] was %u, want 0", words
[4]);
5242 b
->value_id_bound
= value_id_bound
;
5243 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
5251 static nir_function
*
5252 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder
*b
,
5253 nir_function
*entry_point
)
5255 vtn_assert(entry_point
== b
->entry_point
->func
->impl
->function
);
5256 vtn_fail_if(!entry_point
->name
, "entry points are required to have a name");
5257 const char *func_name
=
5258 ralloc_asprintf(b
->shader
, "__wrapped_%s", entry_point
->name
);
5260 /* we shouldn't have any inputs yet */
5261 vtn_assert(!entry_point
->shader
->num_inputs
);
5262 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
);
5264 nir_function
*main_entry_point
= nir_function_create(b
->shader
, func_name
);
5265 main_entry_point
->impl
= nir_function_impl_create(main_entry_point
);
5266 nir_builder_init(&b
->nb
, main_entry_point
->impl
);
5267 b
->nb
.cursor
= nir_after_cf_list(&main_entry_point
->impl
->body
);
5268 b
->func_param_idx
= 0;
5270 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, entry_point
);
5272 for (unsigned i
= 0; i
< entry_point
->num_params
; ++i
) {
5273 struct vtn_type
*param_type
= b
->entry_point
->func
->type
->params
[i
];
5275 /* consider all pointers to function memory to be parameters passed
5278 bool is_by_val
= param_type
->base_type
== vtn_base_type_pointer
&&
5279 param_type
->storage_class
== SpvStorageClassFunction
;
5281 /* input variable */
5282 nir_variable
*in_var
= rzalloc(b
->nb
.shader
, nir_variable
);
5283 in_var
->data
.mode
= nir_var_shader_in
;
5284 in_var
->data
.read_only
= true;
5285 in_var
->data
.location
= i
;
5288 in_var
->type
= param_type
->deref
->type
;
5290 in_var
->type
= param_type
->type
;
5292 nir_shader_add_variable(b
->nb
.shader
, in_var
);
5293 b
->nb
.shader
->num_inputs
++;
5295 /* we have to copy the entire variable into function memory */
5297 nir_variable
*copy_var
=
5298 nir_local_variable_create(main_entry_point
->impl
, in_var
->type
,
5300 nir_copy_var(&b
->nb
, copy_var
, in_var
);
5302 nir_src_for_ssa(&nir_build_deref_var(&b
->nb
, copy_var
)->dest
.ssa
);
5304 call
->params
[i
] = nir_src_for_ssa(nir_load_var(&b
->nb
, in_var
));
5308 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
5310 return main_entry_point
;
5314 spirv_to_nir(const uint32_t *words
, size_t word_count
,
5315 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
5316 gl_shader_stage stage
, const char *entry_point_name
,
5317 const struct spirv_to_nir_options
*options
,
5318 const nir_shader_compiler_options
*nir_options
)
5321 const uint32_t *word_end
= words
+ word_count
;
5323 struct vtn_builder
*b
= vtn_create_builder(words
, word_count
,
5324 stage
, entry_point_name
,
5330 /* See also _vtn_fail() */
5331 if (setjmp(b
->fail_jump
)) {
5336 /* Skip the SPIR-V header, handled at vtn_create_builder */
5339 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
5341 /* Handle all the preamble instructions */
5342 words
= vtn_foreach_instruction(b
, words
, word_end
,
5343 vtn_handle_preamble_instruction
);
5345 if (b
->entry_point
== NULL
) {
5346 vtn_fail("Entry point not found");
5351 /* Set shader info defaults */
5352 if (stage
== MESA_SHADER_GEOMETRY
)
5353 b
->shader
->info
.gs
.invocations
= 1;
5355 /* Parse rounding mode execution modes. This has to happen earlier than
5356 * other changes in the execution modes since they can affect, for example,
5357 * the result of the floating point constants.
5359 vtn_foreach_execution_mode(b
, b
->entry_point
,
5360 vtn_handle_rounding_mode_in_execution_mode
, NULL
);
5362 b
->specializations
= spec
;
5363 b
->num_specializations
= num_spec
;
5365 /* Handle all variable, type, and constant instructions */
5366 words
= vtn_foreach_instruction(b
, words
, word_end
,
5367 vtn_handle_variable_or_type_instruction
);
5369 /* Parse execution modes */
5370 vtn_foreach_execution_mode(b
, b
->entry_point
,
5371 vtn_handle_execution_mode
, NULL
);
5373 if (b
->workgroup_size_builtin
) {
5374 vtn_assert(b
->workgroup_size_builtin
->type
->type
==
5375 glsl_vector_type(GLSL_TYPE_UINT
, 3));
5377 nir_const_value
*const_size
=
5378 b
->workgroup_size_builtin
->constant
->values
;
5380 b
->shader
->info
.cs
.local_size
[0] = const_size
[0].u32
;
5381 b
->shader
->info
.cs
.local_size
[1] = const_size
[1].u32
;
5382 b
->shader
->info
.cs
.local_size
[2] = const_size
[2].u32
;
5385 /* Set types on all vtn_values */
5386 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
5388 vtn_build_cfg(b
, words
, word_end
);
5390 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5391 b
->entry_point
->func
->referenced
= true;
5396 vtn_foreach_cf_node(node
, &b
->functions
) {
5397 struct vtn_function
*func
= vtn_cf_node_as_function(node
);
5398 if (func
->referenced
&& !func
->emitted
) {
5399 b
->const_table
= _mesa_pointer_hash_table_create(b
);
5401 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
5407 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5408 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
5409 vtn_assert(entry_point
);
5411 /* post process entry_points with input params */
5412 if (entry_point
->num_params
&& b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
5413 entry_point
= vtn_emit_kernel_entry_point_wrapper(b
, entry_point
);
5415 entry_point
->is_entrypoint
= true;
5417 /* When multiple shader stages exist in the same SPIR-V module, we
5418 * generate input and output variables for every stage, in the same
5419 * NIR program. These dead variables can be invalid NIR. For example,
5420 * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
5421 * VS output variables wouldn't be.
5423 * To ensure we have valid NIR, we eliminate any dead inputs and outputs
5424 * right away. In order to do so, we must lower any constant initializers
5425 * on outputs so nir_remove_dead_variables sees that they're written to.
5427 nir_lower_variable_initializers(b
->shader
, nir_var_shader_out
);
5428 nir_remove_dead_variables(b
->shader
,
5429 nir_var_shader_in
| nir_var_shader_out
, NULL
);
5431 /* We sometimes generate bogus derefs that, while never used, give the
5432 * validator a bit of heartburn. Run dead code to get rid of them.
5434 nir_opt_dce(b
->shader
);
5436 /* Unparent the shader from the vtn_builder before we delete the builder */
5437 ralloc_steal(NULL
, b
->shader
);
5439 nir_shader
*shader
= b
->shader
;