2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
41 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
42 size_t spirv_offset
, const char *message
)
44 if (b
->options
->debug
.func
) {
45 b
->options
->debug
.func(b
->options
->debug
.private_data
,
46 level
, spirv_offset
, message
);
50 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
51 fprintf(stderr
, "%s\n", message
);
56 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
57 size_t spirv_offset
, const char *fmt
, ...)
63 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
66 vtn_log(b
, level
, spirv_offset
, msg
);
72 vtn_log_err(struct vtn_builder
*b
,
73 enum nir_spirv_debug_level level
, const char *prefix
,
74 const char *file
, unsigned line
,
75 const char *fmt
, va_list args
)
79 msg
= ralloc_strdup(NULL
, prefix
);
82 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
85 ralloc_asprintf_append(&msg
, " ");
87 ralloc_vasprintf_append(&msg
, fmt
, args
);
89 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
93 ralloc_asprintf_append(&msg
,
94 "\n in SPIR-V source file %s, line %d, col %d",
95 b
->file
, b
->line
, b
->col
);
98 vtn_log(b
, level
, b
->spirv_offset
, msg
);
104 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
109 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
110 path
, prefix
, idx
++);
111 if (len
< 0 || len
>= sizeof(filename
))
114 FILE *f
= fopen(filename
, "w");
118 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
121 vtn_info("SPIR-V shader dumped to %s", filename
);
125 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
126 const char *fmt
, ...)
131 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
132 file
, line
, fmt
, args
);
137 _vtn_err(struct vtn_builder
*b
, const char *file
, unsigned line
,
138 const char *fmt
, ...)
143 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V ERROR:\n",
144 file
, line
, fmt
, args
);
149 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
150 const char *fmt
, ...)
155 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
156 file
, line
, fmt
, args
);
159 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
161 vtn_dump_shader(b
, dump_path
, "fail");
163 longjmp(b
->fail_jump
, 1);
166 static struct vtn_ssa_value
*
167 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
169 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
170 val
->type
= glsl_get_bare_type(type
);
172 if (glsl_type_is_vector_or_scalar(type
)) {
173 unsigned num_components
= glsl_get_vector_elements(val
->type
);
174 unsigned bit_size
= glsl_get_bit_size(val
->type
);
175 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
177 unsigned elems
= glsl_get_length(val
->type
);
178 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
179 if (glsl_type_is_array_or_matrix(type
)) {
180 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
181 for (unsigned i
= 0; i
< elems
; i
++)
182 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
184 vtn_assert(glsl_type_is_struct_or_ifc(type
));
185 for (unsigned i
= 0; i
< elems
; i
++) {
186 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
187 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
195 static struct vtn_ssa_value
*
196 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
197 const struct glsl_type
*type
)
199 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
204 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
205 val
->type
= glsl_get_bare_type(type
);
207 if (glsl_type_is_vector_or_scalar(type
)) {
208 unsigned num_components
= glsl_get_vector_elements(val
->type
);
209 unsigned bit_size
= glsl_get_bit_size(type
);
210 nir_load_const_instr
*load
=
211 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
213 memcpy(load
->value
, constant
->values
,
214 sizeof(nir_const_value
) * num_components
);
216 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
217 val
->def
= &load
->def
;
219 unsigned elems
= glsl_get_length(val
->type
);
220 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
221 if (glsl_type_is_array_or_matrix(type
)) {
222 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
223 for (unsigned i
= 0; i
< elems
; i
++) {
224 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
228 vtn_assert(glsl_type_is_struct_or_ifc(type
));
229 for (unsigned i
= 0; i
< elems
; i
++) {
230 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
231 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
240 struct vtn_ssa_value
*
241 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
243 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
244 switch (val
->value_type
) {
245 case vtn_value_type_undef
:
246 return vtn_undef_ssa_value(b
, val
->type
->type
);
248 case vtn_value_type_constant
:
249 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
251 case vtn_value_type_ssa
:
254 case vtn_value_type_pointer
:
255 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
256 struct vtn_ssa_value
*ssa
=
257 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
258 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
262 vtn_fail("Invalid type for an SSA value");
267 vtn_push_ssa_value(struct vtn_builder
*b
, uint32_t value_id
,
268 struct vtn_ssa_value
*ssa
)
270 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
272 /* See vtn_create_ssa_value */
273 vtn_fail_if(ssa
->type
!= glsl_get_bare_type(type
->type
),
274 "Type mismatch for SPIR-V SSA value");
276 struct vtn_value
*val
;
277 if (type
->base_type
== vtn_base_type_pointer
) {
278 val
= vtn_push_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
280 /* Don't trip the value_type_ssa check in vtn_push_value */
281 val
= vtn_push_value(b
, value_id
, vtn_value_type_invalid
);
282 val
->value_type
= vtn_value_type_ssa
;
290 vtn_get_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
)
292 struct vtn_ssa_value
*ssa
= vtn_ssa_value(b
, value_id
);
293 vtn_fail_if(!glsl_type_is_vector_or_scalar(ssa
->type
),
294 "Expected a vector or scalar type");
299 vtn_push_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
, nir_ssa_def
*def
)
301 /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
302 * type will be valid by the time we get here.
304 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
305 vtn_fail_if(def
->num_components
!= glsl_get_vector_elements(type
->type
) ||
306 def
->bit_size
!= glsl_get_bit_size(type
->type
),
307 "Mismatch between NIR and SPIR-V type.");
308 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
310 return vtn_push_ssa_value(b
, value_id
, ssa
);
313 static nir_deref_instr
*
314 vtn_get_image(struct vtn_builder
*b
, uint32_t value_id
)
316 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
317 vtn_assert(type
->base_type
== vtn_base_type_image
);
318 return nir_build_deref_cast(&b
->nb
, vtn_get_nir_ssa(b
, value_id
),
319 nir_var_uniform
, type
->glsl_image
, 0);
323 vtn_push_image(struct vtn_builder
*b
, uint32_t value_id
,
324 nir_deref_instr
*deref
)
326 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
327 vtn_assert(type
->base_type
== vtn_base_type_image
);
328 vtn_push_nir_ssa(b
, value_id
, &deref
->dest
.ssa
);
331 static nir_deref_instr
*
332 vtn_get_sampler(struct vtn_builder
*b
, uint32_t value_id
)
334 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
335 vtn_assert(type
->base_type
== vtn_base_type_sampler
);
336 return nir_build_deref_cast(&b
->nb
, vtn_get_nir_ssa(b
, value_id
),
337 nir_var_uniform
, glsl_bare_sampler_type(), 0);
341 vtn_sampled_image_to_nir_ssa(struct vtn_builder
*b
,
342 struct vtn_sampled_image si
)
344 return nir_vec2(&b
->nb
, &si
.image
->dest
.ssa
, &si
.sampler
->dest
.ssa
);
348 vtn_push_sampled_image(struct vtn_builder
*b
, uint32_t value_id
,
349 struct vtn_sampled_image si
)
351 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
352 vtn_assert(type
->base_type
== vtn_base_type_sampled_image
);
353 vtn_push_nir_ssa(b
, value_id
, vtn_sampled_image_to_nir_ssa(b
, si
));
356 static struct vtn_sampled_image
357 vtn_get_sampled_image(struct vtn_builder
*b
, uint32_t value_id
)
359 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
360 vtn_assert(type
->base_type
== vtn_base_type_sampled_image
);
361 nir_ssa_def
*si_vec2
= vtn_get_nir_ssa(b
, value_id
);
363 struct vtn_sampled_image si
= { NULL
, };
364 si
.image
= nir_build_deref_cast(&b
->nb
, nir_channel(&b
->nb
, si_vec2
, 0),
366 type
->image
->glsl_image
, 0);
367 si
.sampler
= nir_build_deref_cast(&b
->nb
, nir_channel(&b
->nb
, si_vec2
, 1),
369 glsl_bare_sampler_type(), 0);
374 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
375 unsigned word_count
, unsigned *words_used
)
377 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
379 /* Ammount of space taken by the string (including the null) */
380 unsigned len
= strlen(dup
) + 1;
381 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
387 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
388 const uint32_t *end
, vtn_instruction_handler handler
)
394 const uint32_t *w
= start
;
396 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
397 unsigned count
= w
[0] >> SpvWordCountShift
;
398 vtn_assert(count
>= 1 && w
+ count
<= end
);
400 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
404 break; /* Do nothing */
407 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
419 if (!handler(b
, opcode
, w
, count
))
437 vtn_handle_non_semantic_instruction(struct vtn_builder
*b
, SpvOp ext_opcode
,
438 const uint32_t *w
, unsigned count
)
445 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
446 const uint32_t *w
, unsigned count
)
448 const char *ext
= (const char *)&w
[2];
450 case SpvOpExtInstImport
: {
451 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
452 if (strcmp(ext
, "GLSL.std.450") == 0) {
453 val
->ext_handler
= vtn_handle_glsl450_instruction
;
454 } else if ((strcmp(ext
, "SPV_AMD_gcn_shader") == 0)
455 && (b
->options
&& b
->options
->caps
.amd_gcn_shader
)) {
456 val
->ext_handler
= vtn_handle_amd_gcn_shader_instruction
;
457 } else if ((strcmp(ext
, "SPV_AMD_shader_ballot") == 0)
458 && (b
->options
&& b
->options
->caps
.amd_shader_ballot
)) {
459 val
->ext_handler
= vtn_handle_amd_shader_ballot_instruction
;
460 } else if ((strcmp(ext
, "SPV_AMD_shader_trinary_minmax") == 0)
461 && (b
->options
&& b
->options
->caps
.amd_trinary_minmax
)) {
462 val
->ext_handler
= vtn_handle_amd_shader_trinary_minmax_instruction
;
463 } else if ((strcmp(ext
, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
464 && (b
->options
&& b
->options
->caps
.amd_shader_explicit_vertex_parameter
)) {
465 val
->ext_handler
= vtn_handle_amd_shader_explicit_vertex_parameter_instruction
;
466 } else if (strcmp(ext
, "OpenCL.std") == 0) {
467 val
->ext_handler
= vtn_handle_opencl_instruction
;
468 } else if (strstr(ext
, "NonSemantic.") == ext
) {
469 val
->ext_handler
= vtn_handle_non_semantic_instruction
;
471 vtn_fail("Unsupported extension: %s", ext
);
477 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
478 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
484 vtn_fail_with_opcode("Unhandled opcode", opcode
);
489 _foreach_decoration_helper(struct vtn_builder
*b
,
490 struct vtn_value
*base_value
,
492 struct vtn_value
*value
,
493 vtn_decoration_foreach_cb cb
, void *data
)
495 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
497 if (dec
->scope
== VTN_DEC_DECORATION
) {
498 member
= parent_member
;
499 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
500 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
501 value
->type
->base_type
!= vtn_base_type_struct
,
502 "OpMemberDecorate and OpGroupMemberDecorate are only "
503 "allowed on OpTypeStruct");
504 /* This means we haven't recursed yet */
505 assert(value
== base_value
);
507 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
509 vtn_fail_if(member
>= base_value
->type
->length
,
510 "OpMemberDecorate specifies member %d but the "
511 "OpTypeStruct has only %u members",
512 member
, base_value
->type
->length
);
514 /* Not a decoration */
515 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
520 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
521 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
524 cb(b
, base_value
, member
, dec
, data
);
529 /** Iterates (recursively if needed) over all of the decorations on a value
531 * This function iterates over all of the decorations applied to a given
532 * value. If it encounters a decoration group, it recurses into the group
533 * and iterates over all of those decorations as well.
536 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
537 vtn_decoration_foreach_cb cb
, void *data
)
539 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
543 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
544 vtn_execution_mode_foreach_cb cb
, void *data
)
546 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
547 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
550 assert(dec
->group
== NULL
);
551 cb(b
, value
, dec
, data
);
556 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
557 const uint32_t *w
, unsigned count
)
559 const uint32_t *w_end
= w
+ count
;
560 const uint32_t target
= w
[1];
564 case SpvOpDecorationGroup
:
565 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
569 case SpvOpDecorateId
:
570 case SpvOpMemberDecorate
:
571 case SpvOpDecorateString
:
572 case SpvOpMemberDecorateString
:
573 case SpvOpExecutionMode
:
574 case SpvOpExecutionModeId
: {
575 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
577 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
580 case SpvOpDecorateId
:
581 case SpvOpDecorateString
:
582 dec
->scope
= VTN_DEC_DECORATION
;
584 case SpvOpMemberDecorate
:
585 case SpvOpMemberDecorateString
:
586 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
587 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
588 "Member argument of OpMemberDecorate too large");
590 case SpvOpExecutionMode
:
591 case SpvOpExecutionModeId
:
592 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
595 unreachable("Invalid decoration opcode");
597 dec
->decoration
= *(w
++);
600 /* Link into the list */
601 dec
->next
= val
->decoration
;
602 val
->decoration
= dec
;
606 case SpvOpGroupMemberDecorate
:
607 case SpvOpGroupDecorate
: {
608 struct vtn_value
*group
=
609 vtn_value(b
, target
, vtn_value_type_decoration_group
);
611 for (; w
< w_end
; w
++) {
612 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
613 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
616 if (opcode
== SpvOpGroupDecorate
) {
617 dec
->scope
= VTN_DEC_DECORATION
;
619 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
620 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
621 "Member argument of OpGroupMemberDecorate too large");
624 /* Link into the list */
625 dec
->next
= val
->decoration
;
626 val
->decoration
= dec
;
632 unreachable("Unhandled opcode");
636 struct member_decoration_ctx
{
638 struct glsl_struct_field
*fields
;
639 struct vtn_type
*type
;
643 * Returns true if the given type contains a struct decorated Block or
647 vtn_type_contains_block(struct vtn_builder
*b
, struct vtn_type
*type
)
649 switch (type
->base_type
) {
650 case vtn_base_type_array
:
651 return vtn_type_contains_block(b
, type
->array_element
);
652 case vtn_base_type_struct
:
653 if (type
->block
|| type
->buffer_block
)
655 for (unsigned i
= 0; i
< type
->length
; i
++) {
656 if (vtn_type_contains_block(b
, type
->members
[i
]))
665 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
666 * OpStore, or OpCopyMemory between them without breaking anything.
667 * Technically, the SPIR-V rules require the exact same type ID but this lets
668 * us internally be a bit looser.
671 vtn_types_compatible(struct vtn_builder
*b
,
672 struct vtn_type
*t1
, struct vtn_type
*t2
)
674 if (t1
->id
== t2
->id
)
677 if (t1
->base_type
!= t2
->base_type
)
680 switch (t1
->base_type
) {
681 case vtn_base_type_void
:
682 case vtn_base_type_scalar
:
683 case vtn_base_type_vector
:
684 case vtn_base_type_matrix
:
685 case vtn_base_type_image
:
686 case vtn_base_type_sampler
:
687 case vtn_base_type_sampled_image
:
688 return t1
->type
== t2
->type
;
690 case vtn_base_type_array
:
691 return t1
->length
== t2
->length
&&
692 vtn_types_compatible(b
, t1
->array_element
, t2
->array_element
);
694 case vtn_base_type_pointer
:
695 return vtn_types_compatible(b
, t1
->deref
, t2
->deref
);
697 case vtn_base_type_struct
:
698 if (t1
->length
!= t2
->length
)
701 for (unsigned i
= 0; i
< t1
->length
; i
++) {
702 if (!vtn_types_compatible(b
, t1
->members
[i
], t2
->members
[i
]))
707 case vtn_base_type_function
:
708 /* This case shouldn't get hit since you can't copy around function
709 * types. Just require them to be identical.
714 vtn_fail("Invalid base type");
718 vtn_type_without_array(struct vtn_type
*type
)
720 while (type
->base_type
== vtn_base_type_array
)
721 type
= type
->array_element
;
725 /* does a shallow copy of a vtn_type */
727 static struct vtn_type
*
728 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
730 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
733 switch (src
->base_type
) {
734 case vtn_base_type_void
:
735 case vtn_base_type_scalar
:
736 case vtn_base_type_vector
:
737 case vtn_base_type_matrix
:
738 case vtn_base_type_array
:
739 case vtn_base_type_pointer
:
740 case vtn_base_type_image
:
741 case vtn_base_type_sampler
:
742 case vtn_base_type_sampled_image
:
743 /* Nothing more to do */
746 case vtn_base_type_struct
:
747 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
748 memcpy(dest
->members
, src
->members
,
749 src
->length
* sizeof(src
->members
[0]));
751 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
752 memcpy(dest
->offsets
, src
->offsets
,
753 src
->length
* sizeof(src
->offsets
[0]));
756 case vtn_base_type_function
:
757 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
758 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
765 static const struct glsl_type
*
766 wrap_type_in_array(const struct glsl_type
*type
,
767 const struct glsl_type
*array_type
)
769 if (!glsl_type_is_array(array_type
))
772 const struct glsl_type
*elem_type
=
773 wrap_type_in_array(type
, glsl_get_array_element(array_type
));
774 return glsl_array_type(elem_type
, glsl_get_length(array_type
),
775 glsl_get_explicit_stride(array_type
));
779 vtn_type_needs_explicit_layout(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
781 /* For OpenCL we never want to strip the info from the types, and it makes
782 * type comparisons easier in later stages.
784 if (b
->options
->environment
== NIR_SPIRV_OPENCL
)
788 case vtn_variable_mode_input
:
789 case vtn_variable_mode_output
:
790 /* Layout decorations kept because we need offsets for XFB arrays of
793 return b
->shader
->info
.has_transform_feedback_varyings
;
795 case vtn_variable_mode_ssbo
:
796 case vtn_variable_mode_phys_ssbo
:
797 case vtn_variable_mode_ubo
:
805 const struct glsl_type
*
806 vtn_type_get_nir_type(struct vtn_builder
*b
, struct vtn_type
*type
,
807 enum vtn_variable_mode mode
)
809 if (mode
== vtn_variable_mode_atomic_counter
) {
810 vtn_fail_if(glsl_without_array(type
->type
) != glsl_uint_type(),
811 "Variables in the AtomicCounter storage class should be "
812 "(possibly arrays of arrays of) uint.");
813 return wrap_type_in_array(glsl_atomic_uint_type(), type
->type
);
816 if (mode
== vtn_variable_mode_uniform
) {
817 switch (type
->base_type
) {
818 case vtn_base_type_array
: {
819 const struct glsl_type
*elem_type
=
820 vtn_type_get_nir_type(b
, type
->array_element
, mode
);
822 return glsl_array_type(elem_type
, type
->length
,
823 glsl_get_explicit_stride(type
->type
));
826 case vtn_base_type_struct
: {
827 bool need_new_struct
= false;
828 const uint32_t num_fields
= type
->length
;
829 NIR_VLA(struct glsl_struct_field
, fields
, num_fields
);
830 for (unsigned i
= 0; i
< num_fields
; i
++) {
831 fields
[i
] = *glsl_get_struct_field_data(type
->type
, i
);
832 const struct glsl_type
*field_nir_type
=
833 vtn_type_get_nir_type(b
, type
->members
[i
], mode
);
834 if (fields
[i
].type
!= field_nir_type
) {
835 fields
[i
].type
= field_nir_type
;
836 need_new_struct
= true;
839 if (need_new_struct
) {
840 if (glsl_type_is_interface(type
->type
)) {
841 return glsl_interface_type(fields
, num_fields
,
842 /* packing */ 0, false,
843 glsl_get_type_name(type
->type
));
845 return glsl_struct_type(fields
, num_fields
,
846 glsl_get_type_name(type
->type
),
847 glsl_struct_type_is_packed(type
->type
));
850 /* No changes, just pass it on */
855 case vtn_base_type_image
:
856 return type
->glsl_image
;
858 case vtn_base_type_sampler
:
859 return glsl_bare_sampler_type();
861 case vtn_base_type_sampled_image
:
862 return type
->image
->glsl_image
;
869 /* Layout decorations are allowed but ignored in certain conditions,
870 * to allow SPIR-V generators perform type deduplication. Discard
871 * unnecessary ones when passing to NIR.
873 if (!vtn_type_needs_explicit_layout(b
, mode
))
874 return glsl_get_bare_type(type
->type
);
879 static struct vtn_type
*
880 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
882 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
883 type
= type
->members
[member
];
885 /* We may have an array of matrices.... Oh, joy! */
886 while (glsl_type_is_array(type
->type
)) {
887 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
888 type
= type
->array_element
;
891 vtn_assert(glsl_type_is_matrix(type
->type
));
897 vtn_handle_access_qualifier(struct vtn_builder
*b
, struct vtn_type
*type
,
898 int member
, enum gl_access_qualifier access
)
900 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
901 type
= type
->members
[member
];
903 type
->access
|= access
;
907 array_stride_decoration_cb(struct vtn_builder
*b
,
908 struct vtn_value
*val
, int member
,
909 const struct vtn_decoration
*dec
, void *void_ctx
)
911 struct vtn_type
*type
= val
->type
;
913 if (dec
->decoration
== SpvDecorationArrayStride
) {
914 if (vtn_type_contains_block(b
, type
)) {
915 vtn_warn("The ArrayStride decoration cannot be applied to an array "
916 "type which contains a structure type decorated Block "
918 /* Ignore the decoration */
920 vtn_fail_if(dec
->operands
[0] == 0, "ArrayStride must be non-zero");
921 type
->stride
= dec
->operands
[0];
927 struct_member_decoration_cb(struct vtn_builder
*b
,
928 UNUSED
struct vtn_value
*val
, int member
,
929 const struct vtn_decoration
*dec
, void *void_ctx
)
931 struct member_decoration_ctx
*ctx
= void_ctx
;
936 assert(member
< ctx
->num_fields
);
938 switch (dec
->decoration
) {
939 case SpvDecorationRelaxedPrecision
:
940 case SpvDecorationUniform
:
941 case SpvDecorationUniformId
:
942 break; /* FIXME: Do nothing with this for now. */
943 case SpvDecorationNonWritable
:
944 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_WRITEABLE
);
946 case SpvDecorationNonReadable
:
947 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_READABLE
);
949 case SpvDecorationVolatile
:
950 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_VOLATILE
);
952 case SpvDecorationCoherent
:
953 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_COHERENT
);
955 case SpvDecorationNoPerspective
:
956 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
958 case SpvDecorationFlat
:
959 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
961 case SpvDecorationExplicitInterpAMD
:
962 ctx
->fields
[member
].interpolation
= INTERP_MODE_EXPLICIT
;
964 case SpvDecorationCentroid
:
965 ctx
->fields
[member
].centroid
= true;
967 case SpvDecorationSample
:
968 ctx
->fields
[member
].sample
= true;
970 case SpvDecorationStream
:
971 /* This is handled later by var_decoration_cb in vtn_variables.c */
973 case SpvDecorationLocation
:
974 ctx
->fields
[member
].location
= dec
->operands
[0];
976 case SpvDecorationComponent
:
977 break; /* FIXME: What should we do with these? */
978 case SpvDecorationBuiltIn
:
979 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
980 ctx
->type
->members
[member
]->is_builtin
= true;
981 ctx
->type
->members
[member
]->builtin
= dec
->operands
[0];
982 ctx
->type
->builtin_block
= true;
984 case SpvDecorationOffset
:
985 ctx
->type
->offsets
[member
] = dec
->operands
[0];
986 ctx
->fields
[member
].offset
= dec
->operands
[0];
988 case SpvDecorationMatrixStride
:
989 /* Handled as a second pass */
991 case SpvDecorationColMajor
:
992 break; /* Nothing to do here. Column-major is the default. */
993 case SpvDecorationRowMajor
:
994 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
997 case SpvDecorationPatch
:
1000 case SpvDecorationSpecId
:
1001 case SpvDecorationBlock
:
1002 case SpvDecorationBufferBlock
:
1003 case SpvDecorationArrayStride
:
1004 case SpvDecorationGLSLShared
:
1005 case SpvDecorationGLSLPacked
:
1006 case SpvDecorationInvariant
:
1007 case SpvDecorationRestrict
:
1008 case SpvDecorationAliased
:
1009 case SpvDecorationConstant
:
1010 case SpvDecorationIndex
:
1011 case SpvDecorationBinding
:
1012 case SpvDecorationDescriptorSet
:
1013 case SpvDecorationLinkageAttributes
:
1014 case SpvDecorationNoContraction
:
1015 case SpvDecorationInputAttachmentIndex
:
1016 vtn_warn("Decoration not allowed on struct members: %s",
1017 spirv_decoration_to_string(dec
->decoration
));
1020 case SpvDecorationXfbBuffer
:
1021 case SpvDecorationXfbStride
:
1022 /* This is handled later by var_decoration_cb in vtn_variables.c */
1025 case SpvDecorationCPacked
:
1026 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
1027 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1028 spirv_decoration_to_string(dec
->decoration
));
1030 ctx
->type
->packed
= true;
1033 case SpvDecorationSaturatedConversion
:
1034 case SpvDecorationFuncParamAttr
:
1035 case SpvDecorationFPRoundingMode
:
1036 case SpvDecorationFPFastMathMode
:
1037 case SpvDecorationAlignment
:
1038 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1039 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1040 spirv_decoration_to_string(dec
->decoration
));
1044 case SpvDecorationUserSemantic
:
1045 case SpvDecorationUserTypeGOOGLE
:
1046 /* User semantic decorations can safely be ignored by the driver. */
1050 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1054 /** Chases the array type all the way down to the tail and rewrites the
1055 * glsl_types to be based off the tail's glsl_type.
1058 vtn_array_type_rewrite_glsl_type(struct vtn_type
*type
)
1060 if (type
->base_type
!= vtn_base_type_array
)
1063 vtn_array_type_rewrite_glsl_type(type
->array_element
);
1065 type
->type
= glsl_array_type(type
->array_element
->type
,
1066 type
->length
, type
->stride
);
1069 /* Matrix strides are handled as a separate pass because we need to know
1070 * whether the matrix is row-major or not first.
1073 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
1074 UNUSED
struct vtn_value
*val
, int member
,
1075 const struct vtn_decoration
*dec
,
1078 if (dec
->decoration
!= SpvDecorationMatrixStride
)
1081 vtn_fail_if(member
< 0,
1082 "The MatrixStride decoration is only allowed on members "
1084 vtn_fail_if(dec
->operands
[0] == 0, "MatrixStride must be non-zero");
1086 struct member_decoration_ctx
*ctx
= void_ctx
;
1088 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
1089 if (mat_type
->row_major
) {
1090 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
1091 mat_type
->stride
= mat_type
->array_element
->stride
;
1092 mat_type
->array_element
->stride
= dec
->operands
[0];
1094 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
1095 dec
->operands
[0], true);
1096 mat_type
->array_element
->type
= glsl_get_column_type(mat_type
->type
);
1098 vtn_assert(mat_type
->array_element
->stride
> 0);
1099 mat_type
->stride
= dec
->operands
[0];
1101 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
1102 dec
->operands
[0], false);
1105 /* Now that we've replaced the glsl_type with a properly strided matrix
1106 * type, rewrite the member type so that it's an array of the proper kind
1109 vtn_array_type_rewrite_glsl_type(ctx
->type
->members
[member
]);
1110 ctx
->fields
[member
].type
= ctx
->type
->members
[member
]->type
;
1114 struct_block_decoration_cb(struct vtn_builder
*b
,
1115 struct vtn_value
*val
, int member
,
1116 const struct vtn_decoration
*dec
, void *ctx
)
1121 struct vtn_type
*type
= val
->type
;
1122 if (dec
->decoration
== SpvDecorationBlock
)
1124 else if (dec
->decoration
== SpvDecorationBufferBlock
)
1125 type
->buffer_block
= true;
1129 type_decoration_cb(struct vtn_builder
*b
,
1130 struct vtn_value
*val
, int member
,
1131 const struct vtn_decoration
*dec
, UNUSED
void *ctx
)
1133 struct vtn_type
*type
= val
->type
;
1136 /* This should have been handled by OpTypeStruct */
1137 assert(val
->type
->base_type
== vtn_base_type_struct
);
1138 assert(member
>= 0 && member
< val
->type
->length
);
1142 switch (dec
->decoration
) {
1143 case SpvDecorationArrayStride
:
1144 vtn_assert(type
->base_type
== vtn_base_type_array
||
1145 type
->base_type
== vtn_base_type_pointer
);
1147 case SpvDecorationBlock
:
1148 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1149 vtn_assert(type
->block
);
1151 case SpvDecorationBufferBlock
:
1152 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1153 vtn_assert(type
->buffer_block
);
1155 case SpvDecorationGLSLShared
:
1156 case SpvDecorationGLSLPacked
:
1157 /* Ignore these, since we get explicit offsets anyways */
1160 case SpvDecorationRowMajor
:
1161 case SpvDecorationColMajor
:
1162 case SpvDecorationMatrixStride
:
1163 case SpvDecorationBuiltIn
:
1164 case SpvDecorationNoPerspective
:
1165 case SpvDecorationFlat
:
1166 case SpvDecorationPatch
:
1167 case SpvDecorationCentroid
:
1168 case SpvDecorationSample
:
1169 case SpvDecorationExplicitInterpAMD
:
1170 case SpvDecorationVolatile
:
1171 case SpvDecorationCoherent
:
1172 case SpvDecorationNonWritable
:
1173 case SpvDecorationNonReadable
:
1174 case SpvDecorationUniform
:
1175 case SpvDecorationUniformId
:
1176 case SpvDecorationLocation
:
1177 case SpvDecorationComponent
:
1178 case SpvDecorationOffset
:
1179 case SpvDecorationXfbBuffer
:
1180 case SpvDecorationXfbStride
:
1181 case SpvDecorationUserSemantic
:
1182 vtn_warn("Decoration only allowed for struct members: %s",
1183 spirv_decoration_to_string(dec
->decoration
));
1186 case SpvDecorationStream
:
1187 /* We don't need to do anything here, as stream is filled up when
1188 * aplying the decoration to a variable, just check that if it is not a
1189 * struct member, it should be a struct.
1191 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1194 case SpvDecorationRelaxedPrecision
:
1195 case SpvDecorationSpecId
:
1196 case SpvDecorationInvariant
:
1197 case SpvDecorationRestrict
:
1198 case SpvDecorationAliased
:
1199 case SpvDecorationConstant
:
1200 case SpvDecorationIndex
:
1201 case SpvDecorationBinding
:
1202 case SpvDecorationDescriptorSet
:
1203 case SpvDecorationLinkageAttributes
:
1204 case SpvDecorationNoContraction
:
1205 case SpvDecorationInputAttachmentIndex
:
1206 vtn_warn("Decoration not allowed on types: %s",
1207 spirv_decoration_to_string(dec
->decoration
));
1210 case SpvDecorationCPacked
:
1211 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
1212 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1213 spirv_decoration_to_string(dec
->decoration
));
1215 type
->packed
= true;
1218 case SpvDecorationSaturatedConversion
:
1219 case SpvDecorationFuncParamAttr
:
1220 case SpvDecorationFPRoundingMode
:
1221 case SpvDecorationFPFastMathMode
:
1222 case SpvDecorationAlignment
:
1223 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1224 spirv_decoration_to_string(dec
->decoration
));
1227 case SpvDecorationUserTypeGOOGLE
:
1228 /* User semantic decorations can safely be ignored by the driver. */
1232 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1237 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
1240 case SpvImageFormatUnknown
: return PIPE_FORMAT_NONE
;
1241 case SpvImageFormatRgba32f
: return PIPE_FORMAT_R32G32B32A32_FLOAT
;
1242 case SpvImageFormatRgba16f
: return PIPE_FORMAT_R16G16B16A16_FLOAT
;
1243 case SpvImageFormatR32f
: return PIPE_FORMAT_R32_FLOAT
;
1244 case SpvImageFormatRgba8
: return PIPE_FORMAT_R8G8B8A8_UNORM
;
1245 case SpvImageFormatRgba8Snorm
: return PIPE_FORMAT_R8G8B8A8_SNORM
;
1246 case SpvImageFormatRg32f
: return PIPE_FORMAT_R32G32_FLOAT
;
1247 case SpvImageFormatRg16f
: return PIPE_FORMAT_R16G16_FLOAT
;
1248 case SpvImageFormatR11fG11fB10f
: return PIPE_FORMAT_R11G11B10_FLOAT
;
1249 case SpvImageFormatR16f
: return PIPE_FORMAT_R16_FLOAT
;
1250 case SpvImageFormatRgba16
: return PIPE_FORMAT_R16G16B16A16_UNORM
;
1251 case SpvImageFormatRgb10A2
: return PIPE_FORMAT_R10G10B10A2_UNORM
;
1252 case SpvImageFormatRg16
: return PIPE_FORMAT_R16G16_UNORM
;
1253 case SpvImageFormatRg8
: return PIPE_FORMAT_R8G8_UNORM
;
1254 case SpvImageFormatR16
: return PIPE_FORMAT_R16_UNORM
;
1255 case SpvImageFormatR8
: return PIPE_FORMAT_R8_UNORM
;
1256 case SpvImageFormatRgba16Snorm
: return PIPE_FORMAT_R16G16B16A16_SNORM
;
1257 case SpvImageFormatRg16Snorm
: return PIPE_FORMAT_R16G16_SNORM
;
1258 case SpvImageFormatRg8Snorm
: return PIPE_FORMAT_R8G8_SNORM
;
1259 case SpvImageFormatR16Snorm
: return PIPE_FORMAT_R16_SNORM
;
1260 case SpvImageFormatR8Snorm
: return PIPE_FORMAT_R8_SNORM
;
1261 case SpvImageFormatRgba32i
: return PIPE_FORMAT_R32G32B32A32_SINT
;
1262 case SpvImageFormatRgba16i
: return PIPE_FORMAT_R16G16B16A16_SINT
;
1263 case SpvImageFormatRgba8i
: return PIPE_FORMAT_R8G8B8A8_SINT
;
1264 case SpvImageFormatR32i
: return PIPE_FORMAT_R32_SINT
;
1265 case SpvImageFormatRg32i
: return PIPE_FORMAT_R32G32_SINT
;
1266 case SpvImageFormatRg16i
: return PIPE_FORMAT_R16G16_SINT
;
1267 case SpvImageFormatRg8i
: return PIPE_FORMAT_R8G8_SINT
;
1268 case SpvImageFormatR16i
: return PIPE_FORMAT_R16_SINT
;
1269 case SpvImageFormatR8i
: return PIPE_FORMAT_R8_SINT
;
1270 case SpvImageFormatRgba32ui
: return PIPE_FORMAT_R32G32B32A32_UINT
;
1271 case SpvImageFormatRgba16ui
: return PIPE_FORMAT_R16G16B16A16_UINT
;
1272 case SpvImageFormatRgba8ui
: return PIPE_FORMAT_R8G8B8A8_UINT
;
1273 case SpvImageFormatR32ui
: return PIPE_FORMAT_R32_UINT
;
1274 case SpvImageFormatRgb10a2ui
: return PIPE_FORMAT_R10G10B10A2_UINT
;
1275 case SpvImageFormatRg32ui
: return PIPE_FORMAT_R32G32_UINT
;
1276 case SpvImageFormatRg16ui
: return PIPE_FORMAT_R16G16_UINT
;
1277 case SpvImageFormatRg8ui
: return PIPE_FORMAT_R8G8_UINT
;
1278 case SpvImageFormatR16ui
: return PIPE_FORMAT_R16_UINT
;
1279 case SpvImageFormatR8ui
: return PIPE_FORMAT_R8_UINT
;
1281 vtn_fail("Invalid image format: %s (%u)",
1282 spirv_imageformat_to_string(format
), format
);
1287 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
1288 const uint32_t *w
, unsigned count
)
1290 struct vtn_value
*val
= NULL
;
1292 /* In order to properly handle forward declarations, we have to defer
1293 * allocation for pointer types.
1295 if (opcode
!= SpvOpTypePointer
&& opcode
!= SpvOpTypeForwardPointer
) {
1296 val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
1297 vtn_fail_if(val
->type
!= NULL
,
1298 "Only pointers can have forward declarations");
1299 val
->type
= rzalloc(b
, struct vtn_type
);
1300 val
->type
->id
= w
[1];
1305 val
->type
->base_type
= vtn_base_type_void
;
1306 val
->type
->type
= glsl_void_type();
1309 val
->type
->base_type
= vtn_base_type_scalar
;
1310 val
->type
->type
= glsl_bool_type();
1311 val
->type
->length
= 1;
1313 case SpvOpTypeInt
: {
1314 int bit_size
= w
[2];
1315 const bool signedness
= w
[3];
1316 val
->type
->base_type
= vtn_base_type_scalar
;
1319 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
1322 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
1325 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
1328 val
->type
->type
= (signedness
? glsl_int8_t_type() : glsl_uint8_t_type());
1331 vtn_fail("Invalid int bit size: %u", bit_size
);
1333 val
->type
->length
= 1;
1337 case SpvOpTypeFloat
: {
1338 int bit_size
= w
[2];
1339 val
->type
->base_type
= vtn_base_type_scalar
;
1342 val
->type
->type
= glsl_float16_t_type();
1345 val
->type
->type
= glsl_float_type();
1348 val
->type
->type
= glsl_double_type();
1351 vtn_fail("Invalid float bit size: %u", bit_size
);
1353 val
->type
->length
= 1;
1357 case SpvOpTypeVector
: {
1358 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1359 unsigned elems
= w
[3];
1361 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
1362 "Base type for OpTypeVector must be a scalar");
1363 vtn_fail_if((elems
< 2 || elems
> 4) && (elems
!= 8) && (elems
!= 16),
1364 "Invalid component count for OpTypeVector");
1366 val
->type
->base_type
= vtn_base_type_vector
;
1367 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
1368 val
->type
->length
= elems
;
1369 val
->type
->stride
= glsl_type_is_boolean(val
->type
->type
)
1370 ? 4 : glsl_get_bit_size(base
->type
) / 8;
1371 val
->type
->array_element
= base
;
1375 case SpvOpTypeMatrix
: {
1376 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1377 unsigned columns
= w
[3];
1379 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1380 "Base type for OpTypeMatrix must be a vector");
1381 vtn_fail_if(columns
< 2 || columns
> 4,
1382 "Invalid column count for OpTypeMatrix");
1384 val
->type
->base_type
= vtn_base_type_matrix
;
1385 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1386 glsl_get_vector_elements(base
->type
),
1388 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1389 "Unsupported base type for OpTypeMatrix");
1390 assert(!glsl_type_is_error(val
->type
->type
));
1391 val
->type
->length
= columns
;
1392 val
->type
->array_element
= base
;
1393 val
->type
->row_major
= false;
1394 val
->type
->stride
= 0;
1398 case SpvOpTypeRuntimeArray
:
1399 case SpvOpTypeArray
: {
1400 struct vtn_type
*array_element
= vtn_get_type(b
, w
[2]);
1402 if (opcode
== SpvOpTypeRuntimeArray
) {
1403 /* A length of 0 is used to denote unsized arrays */
1404 val
->type
->length
= 0;
1406 val
->type
->length
= vtn_constant_uint(b
, w
[3]);
1409 val
->type
->base_type
= vtn_base_type_array
;
1410 val
->type
->array_element
= array_element
;
1411 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1412 val
->type
->stride
= glsl_get_cl_size(array_element
->type
);
1414 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1415 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
,
1420 case SpvOpTypeStruct
: {
1421 unsigned num_fields
= count
- 2;
1422 val
->type
->base_type
= vtn_base_type_struct
;
1423 val
->type
->length
= num_fields
;
1424 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1425 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1426 val
->type
->packed
= false;
1428 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1429 for (unsigned i
= 0; i
< num_fields
; i
++) {
1430 val
->type
->members
[i
] = vtn_get_type(b
, w
[i
+ 2]);
1431 fields
[i
] = (struct glsl_struct_field
) {
1432 .type
= val
->type
->members
[i
]->type
,
1433 .name
= ralloc_asprintf(b
, "field%d", i
),
1439 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1440 unsigned offset
= 0;
1441 for (unsigned i
= 0; i
< num_fields
; i
++) {
1442 offset
= align(offset
, glsl_get_cl_alignment(fields
[i
].type
));
1443 fields
[i
].offset
= offset
;
1444 offset
+= glsl_get_cl_size(fields
[i
].type
);
1448 struct member_decoration_ctx ctx
= {
1449 .num_fields
= num_fields
,
1454 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1455 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1457 vtn_foreach_decoration(b
, val
, struct_block_decoration_cb
, NULL
);
1459 const char *name
= val
->name
;
1461 if (val
->type
->block
|| val
->type
->buffer_block
) {
1462 /* Packing will be ignored since types coming from SPIR-V are
1463 * explicitly laid out.
1465 val
->type
->type
= glsl_interface_type(fields
, num_fields
,
1466 /* packing */ 0, false,
1467 name
? name
: "block");
1469 val
->type
->type
= glsl_struct_type(fields
, num_fields
,
1470 name
? name
: "struct", false);
1475 case SpvOpTypeFunction
: {
1476 val
->type
->base_type
= vtn_base_type_function
;
1477 val
->type
->type
= NULL
;
1479 val
->type
->return_type
= vtn_get_type(b
, w
[2]);
1481 const unsigned num_params
= count
- 3;
1482 val
->type
->length
= num_params
;
1483 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1484 for (unsigned i
= 0; i
< count
- 3; i
++) {
1485 val
->type
->params
[i
] = vtn_get_type(b
, w
[i
+ 3]);
1490 case SpvOpTypePointer
:
1491 case SpvOpTypeForwardPointer
: {
1492 /* We can't blindly push the value because it might be a forward
1495 val
= vtn_untyped_value(b
, w
[1]);
1497 SpvStorageClass storage_class
= w
[2];
1499 if (val
->value_type
== vtn_value_type_invalid
) {
1500 val
->value_type
= vtn_value_type_type
;
1501 val
->type
= rzalloc(b
, struct vtn_type
);
1502 val
->type
->id
= w
[1];
1503 val
->type
->base_type
= vtn_base_type_pointer
;
1504 val
->type
->storage_class
= storage_class
;
1506 /* These can actually be stored to nir_variables and used as SSA
1507 * values so they need a real glsl_type.
1509 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1510 b
, storage_class
, NULL
, NULL
);
1511 val
->type
->type
= nir_address_format_to_glsl_type(
1512 vtn_mode_to_address_format(b
, mode
));
1514 vtn_fail_if(val
->type
->storage_class
!= storage_class
,
1515 "The storage classes of an OpTypePointer and any "
1516 "OpTypeForwardPointers that provide forward "
1517 "declarations of it must match.");
1520 if (opcode
== SpvOpTypePointer
) {
1521 vtn_fail_if(val
->type
->deref
!= NULL
,
1522 "While OpTypeForwardPointer can be used to provide a "
1523 "forward declaration of a pointer, OpTypePointer can "
1524 "only be used once for a given id.");
1526 val
->type
->deref
= vtn_get_type(b
, w
[3]);
1528 /* Only certain storage classes use ArrayStride. The others (in
1529 * particular Workgroup) are expected to be laid out by the driver.
1531 switch (storage_class
) {
1532 case SpvStorageClassUniform
:
1533 case SpvStorageClassPushConstant
:
1534 case SpvStorageClassStorageBuffer
:
1535 case SpvStorageClassPhysicalStorageBuffer
:
1536 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1539 /* Nothing to do. */
1543 if (b
->physical_ptrs
) {
1544 switch (storage_class
) {
1545 case SpvStorageClassFunction
:
1546 case SpvStorageClassWorkgroup
:
1547 case SpvStorageClassCrossWorkgroup
:
1548 case SpvStorageClassUniformConstant
:
1549 val
->type
->stride
= align(glsl_get_cl_size(val
->type
->deref
->type
),
1550 glsl_get_cl_alignment(val
->type
->deref
->type
));
1560 case SpvOpTypeImage
: {
1561 val
->type
->base_type
= vtn_base_type_image
;
1563 /* Images are represented in NIR as a scalar SSA value that is the
1564 * result of a deref instruction. An OpLoad on an OpTypeImage pointer
1565 * from UniformConstant memory just takes the NIR deref from the pointer
1566 * and turns it into an SSA value.
1568 val
->type
->type
= nir_address_format_to_glsl_type(
1569 vtn_mode_to_address_format(b
, vtn_variable_mode_function
));
1571 const struct vtn_type
*sampled_type
= vtn_get_type(b
, w
[2]);
1572 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1573 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_void
,
1574 "Sampled type of OpTypeImage must be void for kernels");
1576 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1577 glsl_get_bit_size(sampled_type
->type
) != 32,
1578 "Sampled type of OpTypeImage must be a 32-bit scalar");
1581 enum glsl_sampler_dim dim
;
1582 switch ((SpvDim
)w
[3]) {
1583 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1584 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1585 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1586 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1587 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1588 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1589 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1591 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1592 spirv_dim_to_string((SpvDim
)w
[3]), w
[3]);
1595 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1596 * The “Depth” operand of OpTypeImage is ignored.
1598 bool is_array
= w
[5];
1599 bool multisampled
= w
[6];
1600 unsigned sampled
= w
[7];
1601 SpvImageFormat format
= w
[8];
1604 val
->type
->access_qualifier
= w
[9];
1605 else if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1606 /* Per the CL C spec: If no qualifier is provided, read_only is assumed. */
1607 val
->type
->access_qualifier
= SpvAccessQualifierReadOnly
;
1609 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1612 if (dim
== GLSL_SAMPLER_DIM_2D
)
1613 dim
= GLSL_SAMPLER_DIM_MS
;
1614 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1615 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1617 vtn_fail("Unsupported multisampled image type");
1620 val
->type
->image_format
= translate_image_format(b
, format
);
1622 enum glsl_base_type sampled_base_type
=
1623 glsl_get_base_type(sampled_type
->type
);
1625 val
->type
->glsl_image
= glsl_sampler_type(dim
, false, is_array
,
1627 } else if (sampled
== 2) {
1628 val
->type
->glsl_image
= glsl_image_type(dim
, is_array
,
1630 } else if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1631 val
->type
->glsl_image
= glsl_image_type(dim
, is_array
,
1634 vtn_fail("We need to know if the image will be sampled");
1639 case SpvOpTypeSampledImage
: {
1640 val
->type
->base_type
= vtn_base_type_sampled_image
;
1641 val
->type
->image
= vtn_get_type(b
, w
[2]);
1643 /* Sampled images are represented NIR as a vec2 SSA value where each
1644 * component is the result of a deref instruction. The first component
1645 * is the image and the second is the sampler. An OpLoad on an
1646 * OpTypeSampledImage pointer from UniformConstant memory just takes
1647 * the NIR deref from the pointer and duplicates it to both vector
1650 nir_address_format addr_format
=
1651 vtn_mode_to_address_format(b
, vtn_variable_mode_function
);
1652 assert(nir_address_format_num_components(addr_format
) == 1);
1653 unsigned bit_size
= nir_address_format_bit_size(addr_format
);
1654 assert(bit_size
== 32 || bit_size
== 64);
1656 enum glsl_base_type base_type
=
1657 bit_size
== 32 ? GLSL_TYPE_UINT
: GLSL_TYPE_UINT64
;
1658 val
->type
->type
= glsl_vector_type(base_type
, 2);
1662 case SpvOpTypeSampler
:
1663 val
->type
->base_type
= vtn_base_type_sampler
;
1665 /* Samplers are represented in NIR as a scalar SSA value that is the
1666 * result of a deref instruction. An OpLoad on an OpTypeSampler pointer
1667 * from UniformConstant memory just takes the NIR deref from the pointer
1668 * and turns it into an SSA value.
1670 val
->type
->type
= nir_address_format_to_glsl_type(
1671 vtn_mode_to_address_format(b
, vtn_variable_mode_function
));
1674 case SpvOpTypeOpaque
:
1675 case SpvOpTypeEvent
:
1676 case SpvOpTypeDeviceEvent
:
1677 case SpvOpTypeReserveId
:
1678 case SpvOpTypeQueue
:
1681 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1684 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1686 if (val
->type
->base_type
== vtn_base_type_struct
&&
1687 (val
->type
->block
|| val
->type
->buffer_block
)) {
1688 for (unsigned i
= 0; i
< val
->type
->length
; i
++) {
1689 vtn_fail_if(vtn_type_contains_block(b
, val
->type
->members
[i
]),
1690 "Block and BufferBlock decorations cannot decorate a "
1691 "structure type that is nested at any level inside "
1692 "another structure type decorated with Block or "
1698 static nir_constant
*
1699 vtn_null_constant(struct vtn_builder
*b
, struct vtn_type
*type
)
1701 nir_constant
*c
= rzalloc(b
, nir_constant
);
1703 switch (type
->base_type
) {
1704 case vtn_base_type_scalar
:
1705 case vtn_base_type_vector
:
1706 /* Nothing to do here. It's already initialized to zero */
1709 case vtn_base_type_pointer
: {
1710 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1711 b
, type
->storage_class
, type
->deref
, NULL
);
1712 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
1714 const nir_const_value
*null_value
= nir_address_format_null_value(addr_format
);
1715 memcpy(c
->values
, null_value
,
1716 sizeof(nir_const_value
) * nir_address_format_num_components(addr_format
));
1720 case vtn_base_type_void
:
1721 case vtn_base_type_image
:
1722 case vtn_base_type_sampler
:
1723 case vtn_base_type_sampled_image
:
1724 case vtn_base_type_function
:
1725 /* For those we have to return something but it doesn't matter what. */
1728 case vtn_base_type_matrix
:
1729 case vtn_base_type_array
:
1730 vtn_assert(type
->length
> 0);
1731 c
->num_elements
= type
->length
;
1732 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1734 c
->elements
[0] = vtn_null_constant(b
, type
->array_element
);
1735 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1736 c
->elements
[i
] = c
->elements
[0];
1739 case vtn_base_type_struct
:
1740 c
->num_elements
= type
->length
;
1741 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1742 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
1743 c
->elements
[i
] = vtn_null_constant(b
, type
->members
[i
]);
1747 vtn_fail("Invalid type for null constant");
1754 spec_constant_decoration_cb(struct vtn_builder
*b
, UNUSED
struct vtn_value
*val
,
1755 ASSERTED
int member
,
1756 const struct vtn_decoration
*dec
, void *data
)
1758 vtn_assert(member
== -1);
1759 if (dec
->decoration
!= SpvDecorationSpecId
)
1762 nir_const_value
*value
= data
;
1763 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1764 if (b
->specializations
[i
].id
== dec
->operands
[0]) {
1765 *value
= b
->specializations
[i
].value
;
1772 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1773 struct vtn_value
*val
,
1774 ASSERTED
int member
,
1775 const struct vtn_decoration
*dec
,
1778 vtn_assert(member
== -1);
1779 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1780 dec
->operands
[0] != SpvBuiltInWorkgroupSize
)
1783 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1784 b
->workgroup_size_builtin
= val
;
1788 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1789 const uint32_t *w
, unsigned count
)
1791 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1792 val
->constant
= rzalloc(b
, nir_constant
);
1794 case SpvOpConstantTrue
:
1795 case SpvOpConstantFalse
:
1796 case SpvOpSpecConstantTrue
:
1797 case SpvOpSpecConstantFalse
: {
1798 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1799 "Result type of %s must be OpTypeBool",
1800 spirv_op_to_string(opcode
));
1802 bool bval
= (opcode
== SpvOpConstantTrue
||
1803 opcode
== SpvOpSpecConstantTrue
);
1805 nir_const_value u32val
= nir_const_value_for_uint(bval
, 32);
1807 if (opcode
== SpvOpSpecConstantTrue
||
1808 opcode
== SpvOpSpecConstantFalse
)
1809 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32val
);
1811 val
->constant
->values
[0].b
= u32val
.u32
!= 0;
1816 case SpvOpSpecConstant
: {
1817 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1818 "Result type of %s must be a scalar",
1819 spirv_op_to_string(opcode
));
1820 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1823 val
->constant
->values
[0].u64
= vtn_u64_literal(&w
[3]);
1826 val
->constant
->values
[0].u32
= w
[3];
1829 val
->constant
->values
[0].u16
= w
[3];
1832 val
->constant
->values
[0].u8
= w
[3];
1835 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size
);
1838 if (opcode
== SpvOpSpecConstant
)
1839 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
,
1840 &val
->constant
->values
[0]);
1844 case SpvOpSpecConstantComposite
:
1845 case SpvOpConstantComposite
: {
1846 unsigned elem_count
= count
- 3;
1847 vtn_fail_if(elem_count
!= val
->type
->length
,
1848 "%s has %u constituents, expected %u",
1849 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1851 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1852 for (unsigned i
= 0; i
< elem_count
; i
++) {
1853 struct vtn_value
*val
= vtn_untyped_value(b
, w
[i
+ 3]);
1855 if (val
->value_type
== vtn_value_type_constant
) {
1856 elems
[i
] = val
->constant
;
1858 vtn_fail_if(val
->value_type
!= vtn_value_type_undef
,
1859 "only constants or undefs allowed for "
1860 "SpvOpConstantComposite");
1861 /* to make it easier, just insert a NULL constant for now */
1862 elems
[i
] = vtn_null_constant(b
, val
->type
);
1866 switch (val
->type
->base_type
) {
1867 case vtn_base_type_vector
: {
1868 assert(glsl_type_is_vector(val
->type
->type
));
1869 for (unsigned i
= 0; i
< elem_count
; i
++)
1870 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1874 case vtn_base_type_matrix
:
1875 case vtn_base_type_struct
:
1876 case vtn_base_type_array
:
1877 ralloc_steal(val
->constant
, elems
);
1878 val
->constant
->num_elements
= elem_count
;
1879 val
->constant
->elements
= elems
;
1883 vtn_fail("Result type of %s must be a composite type",
1884 spirv_op_to_string(opcode
));
1889 case SpvOpSpecConstantOp
: {
1890 nir_const_value u32op
= nir_const_value_for_uint(w
[3], 32);
1891 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32op
);
1892 SpvOp opcode
= u32op
.u32
;
1894 case SpvOpVectorShuffle
: {
1895 struct vtn_value
*v0
= &b
->values
[w
[4]];
1896 struct vtn_value
*v1
= &b
->values
[w
[5]];
1898 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1899 v0
->value_type
== vtn_value_type_undef
);
1900 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1901 v1
->value_type
== vtn_value_type_undef
);
1903 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1904 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1906 vtn_assert(len0
+ len1
< 16);
1908 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1909 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1910 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1912 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1913 (void)bit_size0
; (void)bit_size1
;
1915 nir_const_value undef
= { .u64
= 0xdeadbeefdeadbeef };
1916 nir_const_value combined
[NIR_MAX_VEC_COMPONENTS
* 2];
1918 if (v0
->value_type
== vtn_value_type_constant
) {
1919 for (unsigned i
= 0; i
< len0
; i
++)
1920 combined
[i
] = v0
->constant
->values
[i
];
1922 if (v1
->value_type
== vtn_value_type_constant
) {
1923 for (unsigned i
= 0; i
< len1
; i
++)
1924 combined
[len0
+ i
] = v1
->constant
->values
[i
];
1927 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1928 uint32_t comp
= w
[i
+ 6];
1929 if (comp
== (uint32_t)-1) {
1930 /* If component is not used, set the value to a known constant
1931 * to detect if it is wrongly used.
1933 val
->constant
->values
[j
] = undef
;
1935 vtn_fail_if(comp
>= len0
+ len1
,
1936 "All Component literals must either be FFFFFFFF "
1937 "or in [0, N - 1] (inclusive).");
1938 val
->constant
->values
[j
] = combined
[comp
];
1944 case SpvOpCompositeExtract
:
1945 case SpvOpCompositeInsert
: {
1946 struct vtn_value
*comp
;
1947 unsigned deref_start
;
1948 struct nir_constant
**c
;
1949 if (opcode
== SpvOpCompositeExtract
) {
1950 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1952 c
= &comp
->constant
;
1954 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1956 val
->constant
= nir_constant_clone(comp
->constant
,
1962 const struct vtn_type
*type
= comp
->type
;
1963 for (unsigned i
= deref_start
; i
< count
; i
++) {
1964 vtn_fail_if(w
[i
] > type
->length
,
1965 "%uth index of %s is %u but the type has only "
1966 "%u elements", i
- deref_start
,
1967 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1969 switch (type
->base_type
) {
1970 case vtn_base_type_vector
:
1972 type
= type
->array_element
;
1975 case vtn_base_type_matrix
:
1976 case vtn_base_type_array
:
1977 c
= &(*c
)->elements
[w
[i
]];
1978 type
= type
->array_element
;
1981 case vtn_base_type_struct
:
1982 c
= &(*c
)->elements
[w
[i
]];
1983 type
= type
->members
[w
[i
]];
1987 vtn_fail("%s must only index into composite types",
1988 spirv_op_to_string(opcode
));
1992 if (opcode
== SpvOpCompositeExtract
) {
1996 unsigned num_components
= type
->length
;
1997 for (unsigned i
= 0; i
< num_components
; i
++)
1998 val
->constant
->values
[i
] = (*c
)->values
[elem
+ i
];
2001 struct vtn_value
*insert
=
2002 vtn_value(b
, w
[4], vtn_value_type_constant
);
2003 vtn_assert(insert
->type
== type
);
2005 *c
= insert
->constant
;
2007 unsigned num_components
= type
->length
;
2008 for (unsigned i
= 0; i
< num_components
; i
++)
2009 (*c
)->values
[elem
+ i
] = insert
->constant
->values
[i
];
2017 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
2018 nir_alu_type src_alu_type
= dst_alu_type
;
2019 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
2022 vtn_assert(count
<= 7);
2028 /* We have a source in a conversion */
2030 nir_get_nir_type_for_glsl_type(vtn_get_value_type(b
, w
[4])->type
);
2031 /* We use the bitsize of the conversion source to evaluate the opcode later */
2032 bit_size
= glsl_get_bit_size(vtn_get_value_type(b
, w
[4])->type
);
2035 bit_size
= glsl_get_bit_size(val
->type
->type
);
2038 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
2039 nir_alu_type_get_type_size(src_alu_type
),
2040 nir_alu_type_get_type_size(dst_alu_type
));
2041 nir_const_value src
[3][NIR_MAX_VEC_COMPONENTS
];
2043 for (unsigned i
= 0; i
< count
- 4; i
++) {
2044 struct vtn_value
*src_val
=
2045 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
);
2047 /* If this is an unsized source, pull the bit size from the
2048 * source; otherwise, we'll use the bit size from the destination.
2050 if (!nir_alu_type_get_type_size(nir_op_infos
[op
].input_types
[i
]))
2051 bit_size
= glsl_get_bit_size(src_val
->type
->type
);
2053 unsigned src_comps
= nir_op_infos
[op
].input_sizes
[i
] ?
2054 nir_op_infos
[op
].input_sizes
[i
] :
2057 unsigned j
= swap
? 1 - i
: i
;
2058 for (unsigned c
= 0; c
< src_comps
; c
++)
2059 src
[j
][c
] = src_val
->constant
->values
[c
];
2062 /* fix up fixed size sources */
2069 for (unsigned i
= 0; i
< num_components
; ++i
) {
2071 case 64: src
[1][i
].u32
= src
[1][i
].u64
; break;
2072 case 16: src
[1][i
].u32
= src
[1][i
].u16
; break;
2073 case 8: src
[1][i
].u32
= src
[1][i
].u8
; break;
2082 nir_const_value
*srcs
[3] = {
2083 src
[0], src
[1], src
[2],
2085 nir_eval_const_opcode(op
, val
->constant
->values
,
2086 num_components
, bit_size
, srcs
,
2087 b
->shader
->info
.float_controls_execution_mode
);
2094 case SpvOpConstantNull
:
2095 val
->constant
= vtn_null_constant(b
, val
->type
);
2098 case SpvOpConstantSampler
:
2099 vtn_fail("OpConstantSampler requires Kernel Capability");
2103 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2106 /* Now that we have the value, update the workgroup size if needed */
2107 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
2110 SpvMemorySemanticsMask
2111 vtn_storage_class_to_memory_semantics(SpvStorageClass sc
)
2114 case SpvStorageClassStorageBuffer
:
2115 case SpvStorageClassPhysicalStorageBuffer
:
2116 return SpvMemorySemanticsUniformMemoryMask
;
2117 case SpvStorageClassWorkgroup
:
2118 return SpvMemorySemanticsWorkgroupMemoryMask
;
2120 return SpvMemorySemanticsMaskNone
;
2125 vtn_split_barrier_semantics(struct vtn_builder
*b
,
2126 SpvMemorySemanticsMask semantics
,
2127 SpvMemorySemanticsMask
*before
,
2128 SpvMemorySemanticsMask
*after
)
2130 /* For memory semantics embedded in operations, we split them into up to
2131 * two barriers, to be added before and after the operation. This is less
2132 * strict than if we propagated until the final backend stage, but still
2133 * result in correct execution.
2135 * A further improvement could be pipe this information (and use!) into the
2136 * next compiler layers, at the expense of making the handling of barriers
2140 *before
= SpvMemorySemanticsMaskNone
;
2141 *after
= SpvMemorySemanticsMaskNone
;
2143 SpvMemorySemanticsMask order_semantics
=
2144 semantics
& (SpvMemorySemanticsAcquireMask
|
2145 SpvMemorySemanticsReleaseMask
|
2146 SpvMemorySemanticsAcquireReleaseMask
|
2147 SpvMemorySemanticsSequentiallyConsistentMask
);
2149 if (util_bitcount(order_semantics
) > 1) {
2150 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2151 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2152 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2154 vtn_warn("Multiple memory ordering semantics specified, "
2155 "assuming AcquireRelease.");
2156 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2159 const SpvMemorySemanticsMask av_vis_semantics
=
2160 semantics
& (SpvMemorySemanticsMakeAvailableMask
|
2161 SpvMemorySemanticsMakeVisibleMask
);
2163 const SpvMemorySemanticsMask storage_semantics
=
2164 semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2165 SpvMemorySemanticsSubgroupMemoryMask
|
2166 SpvMemorySemanticsWorkgroupMemoryMask
|
2167 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2168 SpvMemorySemanticsAtomicCounterMemoryMask
|
2169 SpvMemorySemanticsImageMemoryMask
|
2170 SpvMemorySemanticsOutputMemoryMask
);
2172 const SpvMemorySemanticsMask other_semantics
=
2173 semantics
& ~(order_semantics
| av_vis_semantics
| storage_semantics
);
2175 if (other_semantics
)
2176 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics
);
2178 /* SequentiallyConsistent is treated as AcquireRelease. */
2180 /* The RELEASE barrier happens BEFORE the operation, and it is usually
2181 * associated with a Store. All the write operations with a matching
2182 * semantics will not be reordered after the Store.
2184 if (order_semantics
& (SpvMemorySemanticsReleaseMask
|
2185 SpvMemorySemanticsAcquireReleaseMask
|
2186 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2187 *before
|= SpvMemorySemanticsReleaseMask
| storage_semantics
;
2190 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2191 * associated with a Load. All the operations with a matching semantics
2192 * will not be reordered before the Load.
2194 if (order_semantics
& (SpvMemorySemanticsAcquireMask
|
2195 SpvMemorySemanticsAcquireReleaseMask
|
2196 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2197 *after
|= SpvMemorySemanticsAcquireMask
| storage_semantics
;
2200 if (av_vis_semantics
& SpvMemorySemanticsMakeVisibleMask
)
2201 *before
|= SpvMemorySemanticsMakeVisibleMask
| storage_semantics
;
2203 if (av_vis_semantics
& SpvMemorySemanticsMakeAvailableMask
)
2204 *after
|= SpvMemorySemanticsMakeAvailableMask
| storage_semantics
;
2207 static nir_memory_semantics
2208 vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder
*b
,
2209 SpvMemorySemanticsMask semantics
)
2211 nir_memory_semantics nir_semantics
= 0;
2213 SpvMemorySemanticsMask order_semantics
=
2214 semantics
& (SpvMemorySemanticsAcquireMask
|
2215 SpvMemorySemanticsReleaseMask
|
2216 SpvMemorySemanticsAcquireReleaseMask
|
2217 SpvMemorySemanticsSequentiallyConsistentMask
);
2219 if (util_bitcount(order_semantics
) > 1) {
2220 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2221 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2222 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2224 vtn_warn("Multiple memory ordering semantics bits specified, "
2225 "assuming AcquireRelease.");
2226 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2229 switch (order_semantics
) {
2231 /* Not an ordering barrier. */
2234 case SpvMemorySemanticsAcquireMask
:
2235 nir_semantics
= NIR_MEMORY_ACQUIRE
;
2238 case SpvMemorySemanticsReleaseMask
:
2239 nir_semantics
= NIR_MEMORY_RELEASE
;
2242 case SpvMemorySemanticsSequentiallyConsistentMask
:
2243 /* Fall through. Treated as AcquireRelease in Vulkan. */
2244 case SpvMemorySemanticsAcquireReleaseMask
:
2245 nir_semantics
= NIR_MEMORY_ACQUIRE
| NIR_MEMORY_RELEASE
;
2249 unreachable("Invalid memory order semantics");
2252 if (semantics
& SpvMemorySemanticsMakeAvailableMask
) {
2253 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2254 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2255 "capability must be declared.");
2256 nir_semantics
|= NIR_MEMORY_MAKE_AVAILABLE
;
2259 if (semantics
& SpvMemorySemanticsMakeVisibleMask
) {
2260 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2261 "To use MakeVisible memory semantics the VulkanMemoryModel "
2262 "capability must be declared.");
2263 nir_semantics
|= NIR_MEMORY_MAKE_VISIBLE
;
2266 return nir_semantics
;
2269 static nir_variable_mode
2270 vtn_mem_sematics_to_nir_var_modes(struct vtn_builder
*b
,
2271 SpvMemorySemanticsMask semantics
)
2273 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2274 * and AtomicCounterMemory are ignored".
2276 semantics
&= ~(SpvMemorySemanticsSubgroupMemoryMask
|
2277 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2278 SpvMemorySemanticsAtomicCounterMemoryMask
);
2280 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2281 * for SpvMemorySemanticsImageMemoryMask.
2284 nir_variable_mode modes
= 0;
2285 if (semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2286 SpvMemorySemanticsImageMemoryMask
)) {
2287 modes
|= nir_var_uniform
|
2292 if (semantics
& SpvMemorySemanticsWorkgroupMemoryMask
)
2293 modes
|= nir_var_mem_shared
;
2294 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
2295 modes
|= nir_var_shader_out
;
2302 vtn_scope_to_nir_scope(struct vtn_builder
*b
, SpvScope scope
)
2304 nir_scope nir_scope
;
2306 case SpvScopeDevice
:
2307 vtn_fail_if(b
->options
->caps
.vk_memory_model
&&
2308 !b
->options
->caps
.vk_memory_model_device_scope
,
2309 "If the Vulkan memory model is declared and any instruction "
2310 "uses Device scope, the VulkanMemoryModelDeviceScope "
2311 "capability must be declared.");
2312 nir_scope
= NIR_SCOPE_DEVICE
;
2315 case SpvScopeQueueFamily
:
2316 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2317 "To use Queue Family scope, the VulkanMemoryModel capability "
2318 "must be declared.");
2319 nir_scope
= NIR_SCOPE_QUEUE_FAMILY
;
2322 case SpvScopeWorkgroup
:
2323 nir_scope
= NIR_SCOPE_WORKGROUP
;
2326 case SpvScopeSubgroup
:
2327 nir_scope
= NIR_SCOPE_SUBGROUP
;
2330 case SpvScopeInvocation
:
2331 nir_scope
= NIR_SCOPE_INVOCATION
;
2335 vtn_fail("Invalid memory scope");
2342 vtn_emit_scoped_control_barrier(struct vtn_builder
*b
, SpvScope exec_scope
,
2344 SpvMemorySemanticsMask semantics
)
2346 nir_memory_semantics nir_semantics
=
2347 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2348 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2349 nir_scope nir_exec_scope
= vtn_scope_to_nir_scope(b
, exec_scope
);
2351 /* Memory semantics is optional for OpControlBarrier. */
2352 nir_scope nir_mem_scope
;
2353 if (nir_semantics
== 0 || modes
== 0)
2354 nir_mem_scope
= NIR_SCOPE_NONE
;
2356 nir_mem_scope
= vtn_scope_to_nir_scope(b
, mem_scope
);
2358 nir_scoped_barrier(&b
->nb
, nir_exec_scope
, nir_mem_scope
, nir_semantics
, modes
);
2362 vtn_emit_scoped_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
2363 SpvMemorySemanticsMask semantics
)
2365 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2366 nir_memory_semantics nir_semantics
=
2367 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2369 /* No barrier to add. */
2370 if (nir_semantics
== 0 || modes
== 0)
2373 nir_scope nir_mem_scope
= vtn_scope_to_nir_scope(b
, scope
);
2374 nir_scoped_barrier(&b
->nb
, NIR_SCOPE_NONE
, nir_mem_scope
, nir_semantics
, modes
);
2377 struct vtn_ssa_value
*
2378 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
2380 /* Always use bare types for SSA values for a couple of reasons:
2382 * 1. Code which emits deref chains should never listen to the explicit
2383 * layout information on the SSA value if any exists. If we've
2384 * accidentally been relying on this, we want to find those bugs.
2386 * 2. We want to be able to quickly check that an SSA value being assigned
2387 * to a SPIR-V value has the right type. Using bare types everywhere
2388 * ensures that we can pointer-compare.
2390 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
2391 val
->type
= glsl_get_bare_type(type
);
2394 if (!glsl_type_is_vector_or_scalar(type
)) {
2395 unsigned elems
= glsl_get_length(val
->type
);
2396 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2397 if (glsl_type_is_array_or_matrix(type
)) {
2398 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
2399 for (unsigned i
= 0; i
< elems
; i
++)
2400 val
->elems
[i
] = vtn_create_ssa_value(b
, elem_type
);
2402 vtn_assert(glsl_type_is_struct_or_ifc(type
));
2403 for (unsigned i
= 0; i
< elems
; i
++) {
2404 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
2405 val
->elems
[i
] = vtn_create_ssa_value(b
, elem_type
);
2414 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
2417 src
.src
= nir_src_for_ssa(vtn_get_nir_ssa(b
, index
));
2418 src
.src_type
= type
;
2423 image_operand_arg(struct vtn_builder
*b
, const uint32_t *w
, uint32_t count
,
2424 uint32_t mask_idx
, SpvImageOperandsMask op
)
2426 static const SpvImageOperandsMask ops_with_arg
=
2427 SpvImageOperandsBiasMask
|
2428 SpvImageOperandsLodMask
|
2429 SpvImageOperandsGradMask
|
2430 SpvImageOperandsConstOffsetMask
|
2431 SpvImageOperandsOffsetMask
|
2432 SpvImageOperandsConstOffsetsMask
|
2433 SpvImageOperandsSampleMask
|
2434 SpvImageOperandsMinLodMask
|
2435 SpvImageOperandsMakeTexelAvailableMask
|
2436 SpvImageOperandsMakeTexelVisibleMask
;
2438 assert(util_bitcount(op
) == 1);
2439 assert(w
[mask_idx
] & op
);
2440 assert(op
& ops_with_arg
);
2442 uint32_t idx
= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_arg
) + 1;
2444 /* Adjust indices for operands with two arguments. */
2445 static const SpvImageOperandsMask ops_with_two_args
=
2446 SpvImageOperandsGradMask
;
2447 idx
+= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_two_args
);
2451 vtn_fail_if(idx
+ (op
& ops_with_two_args
? 1 : 0) >= count
,
2452 "Image op claims to have %s but does not enough "
2453 "following operands", spirv_imageoperands_to_string(op
));
2459 non_uniform_decoration_cb(struct vtn_builder
*b
,
2460 struct vtn_value
*val
, int member
,
2461 const struct vtn_decoration
*dec
, void *void_ctx
)
2463 enum gl_access_qualifier
*access
= void_ctx
;
2464 switch (dec
->decoration
) {
2465 case SpvDecorationNonUniformEXT
:
2466 *access
|= ACCESS_NON_UNIFORM
;
2475 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
2476 const uint32_t *w
, unsigned count
)
2478 struct vtn_type
*ret_type
= vtn_get_type(b
, w
[1]);
2480 if (opcode
== SpvOpSampledImage
) {
2481 struct vtn_sampled_image si
= {
2482 .image
= vtn_get_image(b
, w
[3]),
2483 .sampler
= vtn_get_sampler(b
, w
[4]),
2485 vtn_push_sampled_image(b
, w
[2], si
);
2487 } else if (opcode
== SpvOpImage
) {
2488 struct vtn_sampled_image si
= vtn_get_sampled_image(b
, w
[3]);
2489 vtn_push_image(b
, w
[2], si
.image
);
2493 nir_deref_instr
*image
= NULL
, *sampler
= NULL
;
2494 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
2495 if (sampled_val
->type
->base_type
== vtn_base_type_sampled_image
) {
2496 struct vtn_sampled_image si
= vtn_get_sampled_image(b
, w
[3]);
2498 sampler
= si
.sampler
;
2500 image
= vtn_get_image(b
, w
[3]);
2503 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image
->type
);
2504 const bool is_array
= glsl_sampler_type_is_array(image
->type
);
2505 nir_alu_type dest_type
= nir_type_invalid
;
2507 /* Figure out the base texture operation */
2510 case SpvOpImageSampleImplicitLod
:
2511 case SpvOpImageSampleDrefImplicitLod
:
2512 case SpvOpImageSampleProjImplicitLod
:
2513 case SpvOpImageSampleProjDrefImplicitLod
:
2514 texop
= nir_texop_tex
;
2517 case SpvOpImageSampleExplicitLod
:
2518 case SpvOpImageSampleDrefExplicitLod
:
2519 case SpvOpImageSampleProjExplicitLod
:
2520 case SpvOpImageSampleProjDrefExplicitLod
:
2521 texop
= nir_texop_txl
;
2524 case SpvOpImageFetch
:
2525 if (sampler_dim
== GLSL_SAMPLER_DIM_MS
) {
2526 texop
= nir_texop_txf_ms
;
2528 texop
= nir_texop_txf
;
2532 case SpvOpImageGather
:
2533 case SpvOpImageDrefGather
:
2534 texop
= nir_texop_tg4
;
2537 case SpvOpImageQuerySizeLod
:
2538 case SpvOpImageQuerySize
:
2539 texop
= nir_texop_txs
;
2540 dest_type
= nir_type_int
;
2543 case SpvOpImageQueryLod
:
2544 texop
= nir_texop_lod
;
2545 dest_type
= nir_type_float
;
2548 case SpvOpImageQueryLevels
:
2549 texop
= nir_texop_query_levels
;
2550 dest_type
= nir_type_int
;
2553 case SpvOpImageQuerySamples
:
2554 texop
= nir_texop_texture_samples
;
2555 dest_type
= nir_type_int
;
2558 case SpvOpFragmentFetchAMD
:
2559 texop
= nir_texop_fragment_fetch
;
2562 case SpvOpFragmentMaskFetchAMD
:
2563 texop
= nir_texop_fragment_mask_fetch
;
2567 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2570 nir_tex_src srcs
[10]; /* 10 should be enough */
2571 nir_tex_src
*p
= srcs
;
2573 p
->src
= nir_src_for_ssa(&image
->dest
.ssa
);
2574 p
->src_type
= nir_tex_src_texture_deref
;
2584 vtn_fail_if(sampler
== NULL
,
2585 "%s requires an image of type OpTypeSampledImage",
2586 spirv_op_to_string(opcode
));
2587 p
->src
= nir_src_for_ssa(&sampler
->dest
.ssa
);
2588 p
->src_type
= nir_tex_src_sampler_deref
;
2592 case nir_texop_txf_ms
:
2594 case nir_texop_query_levels
:
2595 case nir_texop_texture_samples
:
2596 case nir_texop_samples_identical
:
2597 case nir_texop_fragment_fetch
:
2598 case nir_texop_fragment_mask_fetch
:
2601 case nir_texop_txf_ms_fb
:
2602 vtn_fail("unexpected nir_texop_txf_ms_fb");
2604 case nir_texop_txf_ms_mcs
:
2605 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2606 case nir_texop_tex_prefetch
:
2607 vtn_fail("unexpected nir_texop_tex_prefetch");
2612 struct nir_ssa_def
*coord
;
2613 unsigned coord_components
;
2615 case SpvOpImageSampleImplicitLod
:
2616 case SpvOpImageSampleExplicitLod
:
2617 case SpvOpImageSampleDrefImplicitLod
:
2618 case SpvOpImageSampleDrefExplicitLod
:
2619 case SpvOpImageSampleProjImplicitLod
:
2620 case SpvOpImageSampleProjExplicitLod
:
2621 case SpvOpImageSampleProjDrefImplicitLod
:
2622 case SpvOpImageSampleProjDrefExplicitLod
:
2623 case SpvOpImageFetch
:
2624 case SpvOpImageGather
:
2625 case SpvOpImageDrefGather
:
2626 case SpvOpImageQueryLod
:
2627 case SpvOpFragmentFetchAMD
:
2628 case SpvOpFragmentMaskFetchAMD
: {
2629 /* All these types have the coordinate as their first real argument */
2630 coord_components
= glsl_get_sampler_dim_coordinate_components(sampler_dim
);
2632 if (is_array
&& texop
!= nir_texop_lod
)
2635 coord
= vtn_get_nir_ssa(b
, w
[idx
++]);
2636 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
2637 (1 << coord_components
) - 1));
2638 p
->src_type
= nir_tex_src_coord
;
2645 coord_components
= 0;
2650 case SpvOpImageSampleProjImplicitLod
:
2651 case SpvOpImageSampleProjExplicitLod
:
2652 case SpvOpImageSampleProjDrefImplicitLod
:
2653 case SpvOpImageSampleProjDrefExplicitLod
:
2654 /* These have the projector as the last coordinate component */
2655 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
2656 p
->src_type
= nir_tex_src_projector
;
2664 bool is_shadow
= false;
2665 unsigned gather_component
= 0;
2667 case SpvOpImageSampleDrefImplicitLod
:
2668 case SpvOpImageSampleDrefExplicitLod
:
2669 case SpvOpImageSampleProjDrefImplicitLod
:
2670 case SpvOpImageSampleProjDrefExplicitLod
:
2671 case SpvOpImageDrefGather
:
2672 /* These all have an explicit depth value as their next source */
2674 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
2677 case SpvOpImageGather
:
2678 /* This has a component as its next source */
2679 gather_component
= vtn_constant_uint(b
, w
[idx
++]);
2686 /* For OpImageQuerySizeLod, we always have an LOD */
2687 if (opcode
== SpvOpImageQuerySizeLod
)
2688 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2690 /* For OpFragmentFetchAMD, we always have a multisample index */
2691 if (opcode
== SpvOpFragmentFetchAMD
)
2692 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2694 /* Now we need to handle some number of optional arguments */
2695 struct vtn_value
*gather_offsets
= NULL
;
2697 uint32_t operands
= w
[idx
];
2699 if (operands
& SpvImageOperandsBiasMask
) {
2700 vtn_assert(texop
== nir_texop_tex
||
2701 texop
== nir_texop_tg4
);
2702 if (texop
== nir_texop_tex
)
2703 texop
= nir_texop_txb
;
2704 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2705 SpvImageOperandsBiasMask
);
2706 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_bias
);
2709 if (operands
& SpvImageOperandsLodMask
) {
2710 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
2711 texop
== nir_texop_txs
|| texop
== nir_texop_tg4
);
2712 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2713 SpvImageOperandsLodMask
);
2714 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_lod
);
2717 if (operands
& SpvImageOperandsGradMask
) {
2718 vtn_assert(texop
== nir_texop_txl
);
2719 texop
= nir_texop_txd
;
2720 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2721 SpvImageOperandsGradMask
);
2722 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ddx
);
2723 (*p
++) = vtn_tex_src(b
, w
[arg
+ 1], nir_tex_src_ddy
);
2726 vtn_fail_if(util_bitcount(operands
& (SpvImageOperandsConstOffsetsMask
|
2727 SpvImageOperandsOffsetMask
|
2728 SpvImageOperandsConstOffsetMask
)) > 1,
2729 "At most one of the ConstOffset, Offset, and ConstOffsets "
2730 "image operands can be used on a given instruction.");
2732 if (operands
& SpvImageOperandsOffsetMask
) {
2733 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2734 SpvImageOperandsOffsetMask
);
2735 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2738 if (operands
& SpvImageOperandsConstOffsetMask
) {
2739 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2740 SpvImageOperandsConstOffsetMask
);
2741 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2744 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2745 vtn_assert(texop
== nir_texop_tg4
);
2746 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2747 SpvImageOperandsConstOffsetsMask
);
2748 gather_offsets
= vtn_value(b
, w
[arg
], vtn_value_type_constant
);
2751 if (operands
& SpvImageOperandsSampleMask
) {
2752 vtn_assert(texop
== nir_texop_txf_ms
);
2753 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2754 SpvImageOperandsSampleMask
);
2755 texop
= nir_texop_txf_ms
;
2756 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ms_index
);
2759 if (operands
& SpvImageOperandsMinLodMask
) {
2760 vtn_assert(texop
== nir_texop_tex
||
2761 texop
== nir_texop_txb
||
2762 texop
== nir_texop_txd
);
2763 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2764 SpvImageOperandsMinLodMask
);
2765 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_min_lod
);
2769 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2772 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2774 instr
->coord_components
= coord_components
;
2775 instr
->sampler_dim
= sampler_dim
;
2776 instr
->is_array
= is_array
;
2777 instr
->is_shadow
= is_shadow
;
2778 instr
->is_new_style_shadow
=
2779 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2780 instr
->component
= gather_component
;
2782 /* The Vulkan spec says:
2784 * "If an instruction loads from or stores to a resource (including
2785 * atomics and image instructions) and the resource descriptor being
2786 * accessed is not dynamically uniform, then the operand corresponding
2787 * to that resource (e.g. the pointer or sampled image operand) must be
2788 * decorated with NonUniform."
2790 * It's very careful to specify that the exact operand must be decorated
2791 * NonUniform. The SPIR-V parser is not expected to chase through long
2792 * chains to find the NonUniform decoration. It's either right there or we
2793 * can assume it doesn't exist.
2795 enum gl_access_qualifier access
= 0;
2796 vtn_foreach_decoration(b
, sampled_val
, non_uniform_decoration_cb
, &access
);
2798 if (image
&& (access
& ACCESS_NON_UNIFORM
))
2799 instr
->texture_non_uniform
= true;
2801 if (sampler
&& (access
& ACCESS_NON_UNIFORM
))
2802 instr
->sampler_non_uniform
= true;
2804 /* for non-query ops, get dest_type from sampler type */
2805 if (dest_type
== nir_type_invalid
) {
2806 switch (glsl_get_sampler_result_type(image
->type
)) {
2807 case GLSL_TYPE_FLOAT
: dest_type
= nir_type_float
; break;
2808 case GLSL_TYPE_INT
: dest_type
= nir_type_int
; break;
2809 case GLSL_TYPE_UINT
: dest_type
= nir_type_uint
; break;
2810 case GLSL_TYPE_BOOL
: dest_type
= nir_type_bool
; break;
2812 vtn_fail("Invalid base type for sampler result");
2816 instr
->dest_type
= dest_type
;
2818 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2819 nir_tex_instr_dest_size(instr
), 32, NULL
);
2821 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2822 nir_tex_instr_dest_size(instr
));
2824 if (gather_offsets
) {
2825 vtn_fail_if(gather_offsets
->type
->base_type
!= vtn_base_type_array
||
2826 gather_offsets
->type
->length
!= 4,
2827 "ConstOffsets must be an array of size four of vectors "
2828 "of two integer components");
2830 struct vtn_type
*vec_type
= gather_offsets
->type
->array_element
;
2831 vtn_fail_if(vec_type
->base_type
!= vtn_base_type_vector
||
2832 vec_type
->length
!= 2 ||
2833 !glsl_type_is_integer(vec_type
->type
),
2834 "ConstOffsets must be an array of size four of vectors "
2835 "of two integer components");
2837 unsigned bit_size
= glsl_get_bit_size(vec_type
->type
);
2838 for (uint32_t i
= 0; i
< 4; i
++) {
2839 const nir_const_value
*cvec
=
2840 gather_offsets
->constant
->elements
[i
]->values
;
2841 for (uint32_t j
= 0; j
< 2; j
++) {
2843 case 8: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i8
; break;
2844 case 16: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i16
; break;
2845 case 32: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i32
; break;
2846 case 64: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i64
; break;
2848 vtn_fail("Unsupported bit size: %u", bit_size
);
2854 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2856 vtn_push_nir_ssa(b
, w
[2], &instr
->dest
.ssa
);
2860 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2861 const uint32_t *w
, nir_src
*src
)
2864 case SpvOpAtomicIIncrement
:
2865 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2868 case SpvOpAtomicIDecrement
:
2869 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2872 case SpvOpAtomicISub
:
2874 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_get_nir_ssa(b
, w
[6])));
2877 case SpvOpAtomicCompareExchange
:
2878 case SpvOpAtomicCompareExchangeWeak
:
2879 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[8]));
2880 src
[1] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[7]));
2883 case SpvOpAtomicExchange
:
2884 case SpvOpAtomicIAdd
:
2885 case SpvOpAtomicSMin
:
2886 case SpvOpAtomicUMin
:
2887 case SpvOpAtomicSMax
:
2888 case SpvOpAtomicUMax
:
2889 case SpvOpAtomicAnd
:
2891 case SpvOpAtomicXor
:
2892 case SpvOpAtomicFAddEXT
:
2893 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[6]));
2897 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
2901 static nir_ssa_def
*
2902 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2904 nir_ssa_def
*coord
= vtn_get_nir_ssa(b
, value
);
2906 /* The image_load_store intrinsics assume a 4-dim coordinate */
2907 unsigned swizzle
[4];
2908 for (unsigned i
= 0; i
< 4; i
++)
2909 swizzle
[i
] = MIN2(i
, coord
->num_components
- 1);
2911 return nir_swizzle(&b
->nb
, coord
, swizzle
, 4);
2914 static nir_ssa_def
*
2915 expand_to_vec4(nir_builder
*b
, nir_ssa_def
*value
)
2917 if (value
->num_components
== 4)
2921 for (unsigned i
= 0; i
< 4; i
++)
2922 swiz
[i
] = i
< value
->num_components
? i
: 0;
2923 return nir_swizzle(b
, value
, swiz
, 4);
2927 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2928 const uint32_t *w
, unsigned count
)
2930 /* Just get this one out of the way */
2931 if (opcode
== SpvOpImageTexelPointer
) {
2932 struct vtn_value
*val
=
2933 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2934 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2936 val
->image
->image
= vtn_nir_deref(b
, w
[3]);
2937 val
->image
->coord
= get_image_coord(b
, w
[4]);
2938 val
->image
->sample
= vtn_get_nir_ssa(b
, w
[5]);
2939 val
->image
->lod
= nir_imm_int(&b
->nb
, 0);
2943 struct vtn_image_pointer image
;
2944 SpvScope scope
= SpvScopeInvocation
;
2945 SpvMemorySemanticsMask semantics
= 0;
2947 enum gl_access_qualifier access
= 0;
2949 struct vtn_value
*res_val
;
2951 case SpvOpAtomicExchange
:
2952 case SpvOpAtomicCompareExchange
:
2953 case SpvOpAtomicCompareExchangeWeak
:
2954 case SpvOpAtomicIIncrement
:
2955 case SpvOpAtomicIDecrement
:
2956 case SpvOpAtomicIAdd
:
2957 case SpvOpAtomicISub
:
2958 case SpvOpAtomicLoad
:
2959 case SpvOpAtomicSMin
:
2960 case SpvOpAtomicUMin
:
2961 case SpvOpAtomicSMax
:
2962 case SpvOpAtomicUMax
:
2963 case SpvOpAtomicAnd
:
2965 case SpvOpAtomicXor
:
2966 case SpvOpAtomicFAddEXT
:
2967 res_val
= vtn_value(b
, w
[3], vtn_value_type_image_pointer
);
2968 image
= *res_val
->image
;
2969 scope
= vtn_constant_uint(b
, w
[4]);
2970 semantics
= vtn_constant_uint(b
, w
[5]);
2971 access
|= ACCESS_COHERENT
;
2974 case SpvOpAtomicStore
:
2975 res_val
= vtn_value(b
, w
[1], vtn_value_type_image_pointer
);
2976 image
= *res_val
->image
;
2977 scope
= vtn_constant_uint(b
, w
[2]);
2978 semantics
= vtn_constant_uint(b
, w
[3]);
2979 access
|= ACCESS_COHERENT
;
2982 case SpvOpImageQuerySize
:
2983 res_val
= vtn_untyped_value(b
, w
[3]);
2984 image
.image
= vtn_get_image(b
, w
[3]);
2986 image
.sample
= NULL
;
2990 case SpvOpImageRead
: {
2991 res_val
= vtn_untyped_value(b
, w
[3]);
2992 image
.image
= vtn_get_image(b
, w
[3]);
2993 image
.coord
= get_image_coord(b
, w
[4]);
2995 const SpvImageOperandsMask operands
=
2996 count
> 5 ? w
[5] : SpvImageOperandsMaskNone
;
2998 if (operands
& SpvImageOperandsSampleMask
) {
2999 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
3000 SpvImageOperandsSampleMask
);
3001 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
3003 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
3006 if (operands
& SpvImageOperandsMakeTexelVisibleMask
) {
3007 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
3008 "MakeTexelVisible requires NonPrivateTexel to also be set.");
3009 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
3010 SpvImageOperandsMakeTexelVisibleMask
);
3011 semantics
= SpvMemorySemanticsMakeVisibleMask
;
3012 scope
= vtn_constant_uint(b
, w
[arg
]);
3015 if (operands
& SpvImageOperandsLodMask
) {
3016 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
3017 SpvImageOperandsLodMask
);
3018 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
3020 image
.lod
= nir_imm_int(&b
->nb
, 0);
3023 /* TODO: Volatile. */
3028 case SpvOpImageWrite
: {
3029 res_val
= vtn_untyped_value(b
, w
[1]);
3030 image
.image
= vtn_get_image(b
, w
[1]);
3031 image
.coord
= get_image_coord(b
, w
[2]);
3035 const SpvImageOperandsMask operands
=
3036 count
> 4 ? w
[4] : SpvImageOperandsMaskNone
;
3038 if (operands
& SpvImageOperandsSampleMask
) {
3039 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
3040 SpvImageOperandsSampleMask
);
3041 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
3043 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
3046 if (operands
& SpvImageOperandsMakeTexelAvailableMask
) {
3047 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
3048 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
3049 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
3050 SpvImageOperandsMakeTexelAvailableMask
);
3051 semantics
= SpvMemorySemanticsMakeAvailableMask
;
3052 scope
= vtn_constant_uint(b
, w
[arg
]);
3055 if (operands
& SpvImageOperandsLodMask
) {
3056 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
3057 SpvImageOperandsLodMask
);
3058 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
3060 image
.lod
= nir_imm_int(&b
->nb
, 0);
3063 /* TODO: Volatile. */
3069 vtn_fail_with_opcode("Invalid image opcode", opcode
);
3072 nir_intrinsic_op op
;
3074 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
3075 OP(ImageQuerySize
, size
)
3077 OP(ImageWrite
, store
)
3078 OP(AtomicLoad
, load
)
3079 OP(AtomicStore
, store
)
3080 OP(AtomicExchange
, atomic_exchange
)
3081 OP(AtomicCompareExchange
, atomic_comp_swap
)
3082 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3083 OP(AtomicIIncrement
, atomic_add
)
3084 OP(AtomicIDecrement
, atomic_add
)
3085 OP(AtomicIAdd
, atomic_add
)
3086 OP(AtomicISub
, atomic_add
)
3087 OP(AtomicSMin
, atomic_imin
)
3088 OP(AtomicUMin
, atomic_umin
)
3089 OP(AtomicSMax
, atomic_imax
)
3090 OP(AtomicUMax
, atomic_umax
)
3091 OP(AtomicAnd
, atomic_and
)
3092 OP(AtomicOr
, atomic_or
)
3093 OP(AtomicXor
, atomic_xor
)
3094 OP(AtomicFAddEXT
, atomic_fadd
)
3097 vtn_fail_with_opcode("Invalid image opcode", opcode
);
3100 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3102 intrin
->src
[0] = nir_src_for_ssa(&image
.image
->dest
.ssa
);
3104 if (opcode
== SpvOpImageQuerySize
) {
3105 /* ImageQuerySize only has an LOD which is currently always 0 */
3106 intrin
->src
[1] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 0));
3108 /* The image coordinate is always 4 components but we may not have that
3109 * many. Swizzle to compensate.
3111 intrin
->src
[1] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, image
.coord
));
3112 intrin
->src
[2] = nir_src_for_ssa(image
.sample
);
3115 /* The Vulkan spec says:
3117 * "If an instruction loads from or stores to a resource (including
3118 * atomics and image instructions) and the resource descriptor being
3119 * accessed is not dynamically uniform, then the operand corresponding
3120 * to that resource (e.g. the pointer or sampled image operand) must be
3121 * decorated with NonUniform."
3123 * It's very careful to specify that the exact operand must be decorated
3124 * NonUniform. The SPIR-V parser is not expected to chase through long
3125 * chains to find the NonUniform decoration. It's either right there or we
3126 * can assume it doesn't exist.
3128 vtn_foreach_decoration(b
, res_val
, non_uniform_decoration_cb
, &access
);
3129 nir_intrinsic_set_access(intrin
, access
);
3132 case SpvOpAtomicLoad
:
3133 case SpvOpImageQuerySize
:
3134 case SpvOpImageRead
:
3135 if (opcode
== SpvOpImageRead
|| opcode
== SpvOpAtomicLoad
) {
3136 /* Only OpImageRead can support a lod parameter if
3137 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
3138 * intrinsics definition for atomics requires us to set it for
3141 intrin
->src
[3] = nir_src_for_ssa(image
.lod
);
3144 case SpvOpAtomicStore
:
3145 case SpvOpImageWrite
: {
3146 const uint32_t value_id
= opcode
== SpvOpAtomicStore
? w
[4] : w
[3];
3147 struct vtn_ssa_value
*value
= vtn_ssa_value(b
, value_id
);
3148 /* nir_intrinsic_image_deref_store always takes a vec4 value */
3149 assert(op
== nir_intrinsic_image_deref_store
);
3150 intrin
->num_components
= 4;
3151 intrin
->src
[3] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, value
->def
));
3152 /* Only OpImageWrite can support a lod parameter if
3153 * SPV_AMD_shader_image_load_store_lod is used but the current NIR
3154 * intrinsics definition for atomics requires us to set it for
3157 intrin
->src
[4] = nir_src_for_ssa(image
.lod
);
3159 if (opcode
== SpvOpImageWrite
)
3160 nir_intrinsic_set_type(intrin
, nir_get_nir_type_for_glsl_type(value
->type
));
3164 case SpvOpAtomicCompareExchange
:
3165 case SpvOpAtomicCompareExchangeWeak
:
3166 case SpvOpAtomicIIncrement
:
3167 case SpvOpAtomicIDecrement
:
3168 case SpvOpAtomicExchange
:
3169 case SpvOpAtomicIAdd
:
3170 case SpvOpAtomicISub
:
3171 case SpvOpAtomicSMin
:
3172 case SpvOpAtomicUMin
:
3173 case SpvOpAtomicSMax
:
3174 case SpvOpAtomicUMax
:
3175 case SpvOpAtomicAnd
:
3177 case SpvOpAtomicXor
:
3178 case SpvOpAtomicFAddEXT
:
3179 fill_common_atomic_sources(b
, opcode
, w
, &intrin
->src
[3]);
3183 vtn_fail_with_opcode("Invalid image opcode", opcode
);
3186 /* Image operations implicitly have the Image storage memory semantics. */
3187 semantics
|= SpvMemorySemanticsImageMemoryMask
;
3189 SpvMemorySemanticsMask before_semantics
;
3190 SpvMemorySemanticsMask after_semantics
;
3191 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3193 if (before_semantics
)
3194 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3196 if (opcode
!= SpvOpImageWrite
&& opcode
!= SpvOpAtomicStore
) {
3197 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3199 unsigned dest_components
= glsl_get_vector_elements(type
->type
);
3200 if (nir_intrinsic_infos
[op
].dest_components
== 0)
3201 intrin
->num_components
= dest_components
;
3203 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
3204 nir_intrinsic_dest_components(intrin
), 32, NULL
);
3206 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3208 nir_ssa_def
*result
= &intrin
->dest
.ssa
;
3209 if (nir_intrinsic_dest_components(intrin
) != dest_components
)
3210 result
= nir_channels(&b
->nb
, result
, (1 << dest_components
) - 1);
3212 vtn_push_nir_ssa(b
, w
[2], result
);
3214 if (opcode
== SpvOpImageRead
)
3215 nir_intrinsic_set_type(intrin
, nir_get_nir_type_for_glsl_type(type
->type
));
3217 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3220 if (after_semantics
)
3221 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3224 static nir_intrinsic_op
3225 get_ssbo_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3228 case SpvOpAtomicLoad
: return nir_intrinsic_load_ssbo
;
3229 case SpvOpAtomicStore
: return nir_intrinsic_store_ssbo
;
3230 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
3231 OP(AtomicExchange
, atomic_exchange
)
3232 OP(AtomicCompareExchange
, atomic_comp_swap
)
3233 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3234 OP(AtomicIIncrement
, atomic_add
)
3235 OP(AtomicIDecrement
, atomic_add
)
3236 OP(AtomicIAdd
, atomic_add
)
3237 OP(AtomicISub
, atomic_add
)
3238 OP(AtomicSMin
, atomic_imin
)
3239 OP(AtomicUMin
, atomic_umin
)
3240 OP(AtomicSMax
, atomic_imax
)
3241 OP(AtomicUMax
, atomic_umax
)
3242 OP(AtomicAnd
, atomic_and
)
3243 OP(AtomicOr
, atomic_or
)
3244 OP(AtomicXor
, atomic_xor
)
3245 OP(AtomicFAddEXT
, atomic_fadd
)
3248 vtn_fail_with_opcode("Invalid SSBO atomic", opcode
);
3252 static nir_intrinsic_op
3253 get_uniform_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3256 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
3257 OP(AtomicLoad
, read_deref
)
3258 OP(AtomicExchange
, exchange
)
3259 OP(AtomicCompareExchange
, comp_swap
)
3260 OP(AtomicCompareExchangeWeak
, comp_swap
)
3261 OP(AtomicIIncrement
, inc_deref
)
3262 OP(AtomicIDecrement
, post_dec_deref
)
3263 OP(AtomicIAdd
, add_deref
)
3264 OP(AtomicISub
, add_deref
)
3265 OP(AtomicUMin
, min_deref
)
3266 OP(AtomicUMax
, max_deref
)
3267 OP(AtomicAnd
, and_deref
)
3268 OP(AtomicOr
, or_deref
)
3269 OP(AtomicXor
, xor_deref
)
3272 /* We left the following out: AtomicStore, AtomicSMin and
3273 * AtomicSmax. Right now there are not nir intrinsics for them. At this
3274 * moment Atomic Counter support is needed for ARB_spirv support, so is
3275 * only need to support GLSL Atomic Counters that are uints and don't
3276 * allow direct storage.
3278 vtn_fail("Invalid uniform atomic");
3282 static nir_intrinsic_op
3283 get_deref_nir_atomic_op(struct vtn_builder
*b
, SpvOp opcode
)
3286 case SpvOpAtomicLoad
: return nir_intrinsic_load_deref
;
3287 case SpvOpAtomicStore
: return nir_intrinsic_store_deref
;
3288 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
3289 OP(AtomicExchange
, atomic_exchange
)
3290 OP(AtomicCompareExchange
, atomic_comp_swap
)
3291 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3292 OP(AtomicIIncrement
, atomic_add
)
3293 OP(AtomicIDecrement
, atomic_add
)
3294 OP(AtomicIAdd
, atomic_add
)
3295 OP(AtomicISub
, atomic_add
)
3296 OP(AtomicSMin
, atomic_imin
)
3297 OP(AtomicUMin
, atomic_umin
)
3298 OP(AtomicSMax
, atomic_imax
)
3299 OP(AtomicUMax
, atomic_umax
)
3300 OP(AtomicAnd
, atomic_and
)
3301 OP(AtomicOr
, atomic_or
)
3302 OP(AtomicXor
, atomic_xor
)
3303 OP(AtomicFAddEXT
, atomic_fadd
)
3306 vtn_fail_with_opcode("Invalid shared atomic", opcode
);
3311 * Handles shared atomics, ssbo atomics and atomic counters.
3314 vtn_handle_atomics(struct vtn_builder
*b
, SpvOp opcode
,
3315 const uint32_t *w
, UNUSED
unsigned count
)
3317 struct vtn_pointer
*ptr
;
3318 nir_intrinsic_instr
*atomic
;
3320 SpvScope scope
= SpvScopeInvocation
;
3321 SpvMemorySemanticsMask semantics
= 0;
3324 case SpvOpAtomicLoad
:
3325 case SpvOpAtomicExchange
:
3326 case SpvOpAtomicCompareExchange
:
3327 case SpvOpAtomicCompareExchangeWeak
:
3328 case SpvOpAtomicIIncrement
:
3329 case SpvOpAtomicIDecrement
:
3330 case SpvOpAtomicIAdd
:
3331 case SpvOpAtomicISub
:
3332 case SpvOpAtomicSMin
:
3333 case SpvOpAtomicUMin
:
3334 case SpvOpAtomicSMax
:
3335 case SpvOpAtomicUMax
:
3336 case SpvOpAtomicAnd
:
3338 case SpvOpAtomicXor
:
3339 case SpvOpAtomicFAddEXT
:
3340 ptr
= vtn_value(b
, w
[3], vtn_value_type_pointer
)->pointer
;
3341 scope
= vtn_constant_uint(b
, w
[4]);
3342 semantics
= vtn_constant_uint(b
, w
[5]);
3345 case SpvOpAtomicStore
:
3346 ptr
= vtn_value(b
, w
[1], vtn_value_type_pointer
)->pointer
;
3347 scope
= vtn_constant_uint(b
, w
[2]);
3348 semantics
= vtn_constant_uint(b
, w
[3]);
3352 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3355 /* uniform as "atomic counter uniform" */
3356 if (ptr
->mode
== vtn_variable_mode_atomic_counter
) {
3357 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3358 nir_intrinsic_op op
= get_uniform_nir_atomic_op(b
, opcode
);
3359 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3360 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3362 /* SSBO needs to initialize index/offset. In this case we don't need to,
3363 * as that info is already stored on the ptr->var->var nir_variable (see
3364 * vtn_create_variable)
3368 case SpvOpAtomicLoad
:
3369 case SpvOpAtomicExchange
:
3370 case SpvOpAtomicCompareExchange
:
3371 case SpvOpAtomicCompareExchangeWeak
:
3372 case SpvOpAtomicIIncrement
:
3373 case SpvOpAtomicIDecrement
:
3374 case SpvOpAtomicIAdd
:
3375 case SpvOpAtomicISub
:
3376 case SpvOpAtomicSMin
:
3377 case SpvOpAtomicUMin
:
3378 case SpvOpAtomicSMax
:
3379 case SpvOpAtomicUMax
:
3380 case SpvOpAtomicAnd
:
3382 case SpvOpAtomicXor
:
3383 /* Nothing: we don't need to call fill_common_atomic_sources here, as
3384 * atomic counter uniforms doesn't have sources
3389 unreachable("Invalid SPIR-V atomic");
3392 } else if (vtn_pointer_uses_ssa_offset(b
, ptr
)) {
3393 nir_ssa_def
*offset
, *index
;
3394 offset
= vtn_pointer_to_offset(b
, ptr
, &index
);
3396 assert(ptr
->mode
== vtn_variable_mode_ssbo
);
3398 nir_intrinsic_op op
= get_ssbo_nir_atomic_op(b
, opcode
);
3399 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3401 nir_intrinsic_set_access(atomic
, ACCESS_COHERENT
);
3405 case SpvOpAtomicLoad
:
3406 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3407 nir_intrinsic_set_align(atomic
, 4, 0);
3408 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3409 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3410 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3413 case SpvOpAtomicStore
:
3414 atomic
->num_components
= glsl_get_vector_elements(ptr
->type
->type
);
3415 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3416 nir_intrinsic_set_align(atomic
, 4, 0);
3417 atomic
->src
[src
++] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[4]));
3418 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3419 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3420 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3423 case SpvOpAtomicExchange
:
3424 case SpvOpAtomicCompareExchange
:
3425 case SpvOpAtomicCompareExchangeWeak
:
3426 case SpvOpAtomicIIncrement
:
3427 case SpvOpAtomicIDecrement
:
3428 case SpvOpAtomicIAdd
:
3429 case SpvOpAtomicISub
:
3430 case SpvOpAtomicSMin
:
3431 case SpvOpAtomicUMin
:
3432 case SpvOpAtomicSMax
:
3433 case SpvOpAtomicUMax
:
3434 case SpvOpAtomicAnd
:
3436 case SpvOpAtomicXor
:
3437 case SpvOpAtomicFAddEXT
:
3438 if (ptr
->mode
== vtn_variable_mode_ssbo
)
3439 atomic
->src
[src
++] = nir_src_for_ssa(index
);
3440 atomic
->src
[src
++] = nir_src_for_ssa(offset
);
3441 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[src
]);
3445 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3448 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
3449 const struct glsl_type
*deref_type
= deref
->type
;
3450 nir_intrinsic_op op
= get_deref_nir_atomic_op(b
, opcode
);
3451 atomic
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
3452 atomic
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
3454 if (ptr
->mode
!= vtn_variable_mode_workgroup
)
3455 nir_intrinsic_set_access(atomic
, ACCESS_COHERENT
);
3458 case SpvOpAtomicLoad
:
3459 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3462 case SpvOpAtomicStore
:
3463 atomic
->num_components
= glsl_get_vector_elements(deref_type
);
3464 nir_intrinsic_set_write_mask(atomic
, (1 << atomic
->num_components
) - 1);
3465 atomic
->src
[1] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[4]));
3468 case SpvOpAtomicExchange
:
3469 case SpvOpAtomicCompareExchange
:
3470 case SpvOpAtomicCompareExchangeWeak
:
3471 case SpvOpAtomicIIncrement
:
3472 case SpvOpAtomicIDecrement
:
3473 case SpvOpAtomicIAdd
:
3474 case SpvOpAtomicISub
:
3475 case SpvOpAtomicSMin
:
3476 case SpvOpAtomicUMin
:
3477 case SpvOpAtomicSMax
:
3478 case SpvOpAtomicUMax
:
3479 case SpvOpAtomicAnd
:
3481 case SpvOpAtomicXor
:
3482 case SpvOpAtomicFAddEXT
:
3483 fill_common_atomic_sources(b
, opcode
, w
, &atomic
->src
[1]);
3487 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
3491 /* Atomic ordering operations will implicitly apply to the atomic operation
3492 * storage class, so include that too.
3494 semantics
|= vtn_storage_class_to_memory_semantics(ptr
->ptr_type
->storage_class
);
3496 SpvMemorySemanticsMask before_semantics
;
3497 SpvMemorySemanticsMask after_semantics
;
3498 vtn_split_barrier_semantics(b
, semantics
, &before_semantics
, &after_semantics
);
3500 if (before_semantics
)
3501 vtn_emit_memory_barrier(b
, scope
, before_semantics
);
3503 if (opcode
!= SpvOpAtomicStore
) {
3504 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3506 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
3507 glsl_get_vector_elements(type
->type
),
3508 glsl_get_bit_size(type
->type
), NULL
);
3510 vtn_push_nir_ssa(b
, w
[2], &atomic
->dest
.ssa
);
3513 nir_builder_instr_insert(&b
->nb
, &atomic
->instr
);
3515 if (after_semantics
)
3516 vtn_emit_memory_barrier(b
, scope
, after_semantics
);
3519 static nir_alu_instr
*
3520 create_vec(struct vtn_builder
*b
, unsigned num_components
, unsigned bit_size
)
3522 nir_op op
= nir_op_vec(num_components
);
3523 nir_alu_instr
*vec
= nir_alu_instr_create(b
->shader
, op
);
3524 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
,
3526 vec
->dest
.write_mask
= (1 << num_components
) - 1;
3531 struct vtn_ssa_value
*
3532 vtn_ssa_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
3534 if (src
->transposed
)
3535 return src
->transposed
;
3537 struct vtn_ssa_value
*dest
=
3538 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
3540 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
3541 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
),
3542 glsl_get_bit_size(src
->type
));
3543 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3544 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
3545 vec
->src
[0].swizzle
[0] = i
;
3547 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
3548 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
3549 vec
->src
[j
].swizzle
[0] = i
;
3552 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3553 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
3556 dest
->transposed
= src
;
3561 static nir_ssa_def
*
3562 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
3563 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
3564 const uint32_t *indices
)
3566 nir_alu_instr
*vec
= create_vec(b
, num_components
, src0
->bit_size
);
3568 for (unsigned i
= 0; i
< num_components
; i
++) {
3569 uint32_t index
= indices
[i
];
3570 if (index
== 0xffffffff) {
3572 nir_src_for_ssa(nir_ssa_undef(&b
->nb
, 1, src0
->bit_size
));
3573 } else if (index
< src0
->num_components
) {
3574 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
3575 vec
->src
[i
].swizzle
[0] = index
;
3577 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
3578 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
3582 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3584 return &vec
->dest
.dest
.ssa
;
3588 * Concatentates a number of vectors/scalars together to produce a vector
3590 static nir_ssa_def
*
3591 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
3592 unsigned num_srcs
, nir_ssa_def
**srcs
)
3594 nir_alu_instr
*vec
= create_vec(b
, num_components
, srcs
[0]->bit_size
);
3596 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3598 * "When constructing a vector, there must be at least two Constituent
3601 vtn_assert(num_srcs
>= 2);
3603 unsigned dest_idx
= 0;
3604 for (unsigned i
= 0; i
< num_srcs
; i
++) {
3605 nir_ssa_def
*src
= srcs
[i
];
3606 vtn_assert(dest_idx
+ src
->num_components
<= num_components
);
3607 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
3608 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
3609 vec
->src
[dest_idx
].swizzle
[0] = j
;
3614 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3616 * "When constructing a vector, the total number of components in all
3617 * the operands must equal the number of components in Result Type."
3619 vtn_assert(dest_idx
== num_components
);
3621 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
3623 return &vec
->dest
.dest
.ssa
;
3626 static struct vtn_ssa_value
*
3627 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
3629 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
3630 dest
->type
= src
->type
;
3632 if (glsl_type_is_vector_or_scalar(src
->type
)) {
3633 dest
->def
= src
->def
;
3635 unsigned elems
= glsl_get_length(src
->type
);
3637 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
3638 for (unsigned i
= 0; i
< elems
; i
++)
3639 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
3645 static struct vtn_ssa_value
*
3646 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3647 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
3648 unsigned num_indices
)
3650 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
3652 struct vtn_ssa_value
*cur
= dest
;
3654 for (i
= 0; i
< num_indices
- 1; i
++) {
3655 /* If we got a vector here, that means the next index will be trying to
3656 * dereference a scalar.
3658 vtn_fail_if(glsl_type_is_vector_or_scalar(cur
->type
),
3659 "OpCompositeInsert has too many indices.");
3660 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3661 "All indices in an OpCompositeInsert must be in-bounds");
3662 cur
= cur
->elems
[indices
[i
]];
3665 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3666 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3667 "All indices in an OpCompositeInsert must be in-bounds");
3669 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3670 * the component granularity. In that case, the last index will be
3671 * the index to insert the scalar into the vector.
3674 cur
->def
= nir_vector_insert_imm(&b
->nb
, cur
->def
, insert
->def
, indices
[i
]);
3676 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3677 "All indices in an OpCompositeInsert must be in-bounds");
3678 cur
->elems
[indices
[i
]] = insert
;
3684 static struct vtn_ssa_value
*
3685 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
3686 const uint32_t *indices
, unsigned num_indices
)
3688 struct vtn_ssa_value
*cur
= src
;
3689 for (unsigned i
= 0; i
< num_indices
; i
++) {
3690 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
3691 vtn_assert(i
== num_indices
- 1);
3692 vtn_fail_if(indices
[i
] >= glsl_get_vector_elements(cur
->type
),
3693 "All indices in an OpCompositeExtract must be in-bounds");
3695 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3696 * the component granularity. The last index will be the index of the
3697 * vector to extract.
3700 const struct glsl_type
*scalar_type
=
3701 glsl_scalar_type(glsl_get_base_type(cur
->type
));
3702 struct vtn_ssa_value
*ret
= vtn_create_ssa_value(b
, scalar_type
);
3703 ret
->def
= nir_channel(&b
->nb
, cur
->def
, indices
[i
]);
3706 vtn_fail_if(indices
[i
] >= glsl_get_length(cur
->type
),
3707 "All indices in an OpCompositeExtract must be in-bounds");
3708 cur
= cur
->elems
[indices
[i
]];
3716 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
3717 const uint32_t *w
, unsigned count
)
3719 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
3720 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
3723 case SpvOpVectorExtractDynamic
:
3724 ssa
->def
= nir_vector_extract(&b
->nb
, vtn_get_nir_ssa(b
, w
[3]),
3725 vtn_get_nir_ssa(b
, w
[4]));
3728 case SpvOpVectorInsertDynamic
:
3729 ssa
->def
= nir_vector_insert(&b
->nb
, vtn_get_nir_ssa(b
, w
[3]),
3730 vtn_get_nir_ssa(b
, w
[4]),
3731 vtn_get_nir_ssa(b
, w
[5]));
3734 case SpvOpVectorShuffle
:
3735 ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
->type
),
3736 vtn_get_nir_ssa(b
, w
[3]),
3737 vtn_get_nir_ssa(b
, w
[4]),
3741 case SpvOpCompositeConstruct
: {
3742 unsigned elems
= count
- 3;
3744 if (glsl_type_is_vector_or_scalar(type
->type
)) {
3745 nir_ssa_def
*srcs
[NIR_MAX_VEC_COMPONENTS
];
3746 for (unsigned i
= 0; i
< elems
; i
++)
3747 srcs
[i
] = vtn_get_nir_ssa(b
, w
[3 + i
]);
3749 vtn_vector_construct(b
, glsl_get_vector_elements(type
->type
),
3752 ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
3753 for (unsigned i
= 0; i
< elems
; i
++)
3754 ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
3758 case SpvOpCompositeExtract
:
3759 ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
3763 case SpvOpCompositeInsert
:
3764 ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
3765 vtn_ssa_value(b
, w
[3]),
3769 case SpvOpCopyLogical
:
3770 ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
3772 case SpvOpCopyObject
:
3773 vtn_copy_value(b
, w
[3], w
[2]);
3777 vtn_fail_with_opcode("unknown composite operation", opcode
);
3780 vtn_push_ssa_value(b
, w
[2], ssa
);
3784 vtn_emit_barrier(struct vtn_builder
*b
, nir_intrinsic_op op
)
3786 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3787 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3791 vtn_emit_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
3792 SpvMemorySemanticsMask semantics
)
3794 if (b
->shader
->options
->use_scoped_barrier
) {
3795 vtn_emit_scoped_memory_barrier(b
, scope
, semantics
);
3799 static const SpvMemorySemanticsMask all_memory_semantics
=
3800 SpvMemorySemanticsUniformMemoryMask
|
3801 SpvMemorySemanticsWorkgroupMemoryMask
|
3802 SpvMemorySemanticsAtomicCounterMemoryMask
|
3803 SpvMemorySemanticsImageMemoryMask
|
3804 SpvMemorySemanticsOutputMemoryMask
;
3806 /* If we're not actually doing a memory barrier, bail */
3807 if (!(semantics
& all_memory_semantics
))
3810 /* GL and Vulkan don't have these */
3811 vtn_assert(scope
!= SpvScopeCrossDevice
);
3813 if (scope
== SpvScopeSubgroup
)
3814 return; /* Nothing to do here */
3816 if (scope
== SpvScopeWorkgroup
) {
3817 vtn_emit_barrier(b
, nir_intrinsic_group_memory_barrier
);
3821 /* There's only two scopes thing left */
3822 vtn_assert(scope
== SpvScopeInvocation
|| scope
== SpvScopeDevice
);
3824 /* Map the GLSL memoryBarrier() construct and any barriers with more than one
3825 * semantic to the corresponding NIR one.
3827 if (util_bitcount(semantics
& all_memory_semantics
) > 1) {
3828 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3829 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
3830 /* GLSL memoryBarrier() (and the corresponding NIR one) doesn't include
3831 * TCS outputs, so we have to emit it's own intrinsic for that. We
3832 * then need to emit another memory_barrier to prevent moving
3833 * non-output operations to before the tcs_patch barrier.
3835 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3836 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier
);
3841 /* Issue a more specific barrier */
3842 switch (semantics
& all_memory_semantics
) {
3843 case SpvMemorySemanticsUniformMemoryMask
:
3844 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_buffer
);
3846 case SpvMemorySemanticsWorkgroupMemoryMask
:
3847 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_shared
);
3849 case SpvMemorySemanticsAtomicCounterMemoryMask
:
3850 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_atomic_counter
);
3852 case SpvMemorySemanticsImageMemoryMask
:
3853 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_image
);
3855 case SpvMemorySemanticsOutputMemoryMask
:
3856 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
3857 vtn_emit_barrier(b
, nir_intrinsic_memory_barrier_tcs_patch
);
3865 vtn_handle_barrier(struct vtn_builder
*b
, SpvOp opcode
,
3866 const uint32_t *w
, UNUSED
unsigned count
)
3869 case SpvOpEmitVertex
:
3870 case SpvOpEmitStreamVertex
:
3871 case SpvOpEndPrimitive
:
3872 case SpvOpEndStreamPrimitive
: {
3873 nir_intrinsic_op intrinsic_op
;
3875 case SpvOpEmitVertex
:
3876 case SpvOpEmitStreamVertex
:
3877 intrinsic_op
= nir_intrinsic_emit_vertex
;
3879 case SpvOpEndPrimitive
:
3880 case SpvOpEndStreamPrimitive
:
3881 intrinsic_op
= nir_intrinsic_end_primitive
;
3884 unreachable("Invalid opcode");
3887 nir_intrinsic_instr
*intrin
=
3888 nir_intrinsic_instr_create(b
->shader
, intrinsic_op
);
3891 case SpvOpEmitStreamVertex
:
3892 case SpvOpEndStreamPrimitive
: {
3893 unsigned stream
= vtn_constant_uint(b
, w
[1]);
3894 nir_intrinsic_set_stream_id(intrin
, stream
);
3902 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
3906 case SpvOpMemoryBarrier
: {
3907 SpvScope scope
= vtn_constant_uint(b
, w
[1]);
3908 SpvMemorySemanticsMask semantics
= vtn_constant_uint(b
, w
[2]);
3909 vtn_emit_memory_barrier(b
, scope
, semantics
);
3913 case SpvOpControlBarrier
: {
3914 SpvScope execution_scope
= vtn_constant_uint(b
, w
[1]);
3915 SpvScope memory_scope
= vtn_constant_uint(b
, w
[2]);
3916 SpvMemorySemanticsMask memory_semantics
= vtn_constant_uint(b
, w
[3]);
3918 /* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
3919 * memory semantics of None for GLSL barrier().
3920 * And before that, prior to c3f1cdfa, emitted the OpControlBarrier with
3921 * Device instead of Workgroup for execution scope.
3923 if (b
->wa_glslang_cs_barrier
&&
3924 b
->nb
.shader
->info
.stage
== MESA_SHADER_COMPUTE
&&
3925 (execution_scope
== SpvScopeWorkgroup
||
3926 execution_scope
== SpvScopeDevice
) &&
3927 memory_semantics
== SpvMemorySemanticsMaskNone
) {
3928 execution_scope
= SpvScopeWorkgroup
;
3929 memory_scope
= SpvScopeWorkgroup
;
3930 memory_semantics
= SpvMemorySemanticsAcquireReleaseMask
|
3931 SpvMemorySemanticsWorkgroupMemoryMask
;
3934 /* From the SPIR-V spec:
3936 * "When used with the TessellationControl execution model, it also
3937 * implicitly synchronizes the Output Storage Class: Writes to Output
3938 * variables performed by any invocation executed prior to a
3939 * OpControlBarrier will be visible to any other invocation after
3940 * return from that OpControlBarrier."
3942 if (b
->nb
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
3943 memory_semantics
&= ~(SpvMemorySemanticsAcquireMask
|
3944 SpvMemorySemanticsReleaseMask
|
3945 SpvMemorySemanticsAcquireReleaseMask
|
3946 SpvMemorySemanticsSequentiallyConsistentMask
);
3947 memory_semantics
|= SpvMemorySemanticsAcquireReleaseMask
|
3948 SpvMemorySemanticsOutputMemoryMask
;
3951 if (b
->shader
->options
->use_scoped_barrier
) {
3952 vtn_emit_scoped_control_barrier(b
, execution_scope
, memory_scope
,
3955 vtn_emit_memory_barrier(b
, memory_scope
, memory_semantics
);
3957 if (execution_scope
== SpvScopeWorkgroup
)
3958 vtn_emit_barrier(b
, nir_intrinsic_control_barrier
);
3964 unreachable("unknown barrier instruction");
3969 gl_primitive_from_spv_execution_mode(struct vtn_builder
*b
,
3970 SpvExecutionMode mode
)
3973 case SpvExecutionModeInputPoints
:
3974 case SpvExecutionModeOutputPoints
:
3975 return 0; /* GL_POINTS */
3976 case SpvExecutionModeInputLines
:
3977 return 1; /* GL_LINES */
3978 case SpvExecutionModeInputLinesAdjacency
:
3979 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
3980 case SpvExecutionModeTriangles
:
3981 return 4; /* GL_TRIANGLES */
3982 case SpvExecutionModeInputTrianglesAdjacency
:
3983 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
3984 case SpvExecutionModeQuads
:
3985 return 7; /* GL_QUADS */
3986 case SpvExecutionModeIsolines
:
3987 return 0x8E7A; /* GL_ISOLINES */
3988 case SpvExecutionModeOutputLineStrip
:
3989 return 3; /* GL_LINE_STRIP */
3990 case SpvExecutionModeOutputTriangleStrip
:
3991 return 5; /* GL_TRIANGLE_STRIP */
3993 vtn_fail("Invalid primitive type: %s (%u)",
3994 spirv_executionmode_to_string(mode
), mode
);
3999 vertices_in_from_spv_execution_mode(struct vtn_builder
*b
,
4000 SpvExecutionMode mode
)
4003 case SpvExecutionModeInputPoints
:
4005 case SpvExecutionModeInputLines
:
4007 case SpvExecutionModeInputLinesAdjacency
:
4009 case SpvExecutionModeTriangles
:
4011 case SpvExecutionModeInputTrianglesAdjacency
:
4014 vtn_fail("Invalid GS input mode: %s (%u)",
4015 spirv_executionmode_to_string(mode
), mode
);
4019 static gl_shader_stage
4020 stage_for_execution_model(struct vtn_builder
*b
, SpvExecutionModel model
)
4023 case SpvExecutionModelVertex
:
4024 return MESA_SHADER_VERTEX
;
4025 case SpvExecutionModelTessellationControl
:
4026 return MESA_SHADER_TESS_CTRL
;
4027 case SpvExecutionModelTessellationEvaluation
:
4028 return MESA_SHADER_TESS_EVAL
;
4029 case SpvExecutionModelGeometry
:
4030 return MESA_SHADER_GEOMETRY
;
4031 case SpvExecutionModelFragment
:
4032 return MESA_SHADER_FRAGMENT
;
4033 case SpvExecutionModelGLCompute
:
4034 return MESA_SHADER_COMPUTE
;
4035 case SpvExecutionModelKernel
:
4036 return MESA_SHADER_KERNEL
;
4038 vtn_fail("Unsupported execution model: %s (%u)",
4039 spirv_executionmodel_to_string(model
), model
);
4043 #define spv_check_supported(name, cap) do { \
4044 if (!(b->options && b->options->caps.name)) \
4045 vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
4046 spirv_capability_to_string(cap), cap); \
4051 vtn_handle_entry_point(struct vtn_builder
*b
, const uint32_t *w
,
4054 struct vtn_value
*entry_point
= &b
->values
[w
[2]];
4055 /* Let this be a name label regardless */
4056 unsigned name_words
;
4057 entry_point
->name
= vtn_string_literal(b
, &w
[3], count
- 3, &name_words
);
4059 if (strcmp(entry_point
->name
, b
->entry_point_name
) != 0 ||
4060 stage_for_execution_model(b
, w
[1]) != b
->entry_point_stage
)
4063 vtn_assert(b
->entry_point
== NULL
);
4064 b
->entry_point
= entry_point
;
4068 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4069 const uint32_t *w
, unsigned count
)
4076 case SpvSourceLanguageUnknown
: lang
= "unknown"; break;
4077 case SpvSourceLanguageESSL
: lang
= "ESSL"; break;
4078 case SpvSourceLanguageGLSL
: lang
= "GLSL"; break;
4079 case SpvSourceLanguageOpenCL_C
: lang
= "OpenCL C"; break;
4080 case SpvSourceLanguageOpenCL_CPP
: lang
= "OpenCL C++"; break;
4081 case SpvSourceLanguageHLSL
: lang
= "HLSL"; break;
4084 uint32_t version
= w
[2];
4087 (count
> 3) ? vtn_value(b
, w
[3], vtn_value_type_string
)->str
: "";
4089 vtn_info("Parsing SPIR-V from %s %u source file %s", lang
, version
, file
);
4093 case SpvOpSourceExtension
:
4094 case SpvOpSourceContinued
:
4095 case SpvOpExtension
:
4096 case SpvOpModuleProcessed
:
4097 /* Unhandled, but these are for debug so that's ok. */
4100 case SpvOpCapability
: {
4101 SpvCapability cap
= w
[1];
4103 case SpvCapabilityMatrix
:
4104 case SpvCapabilityShader
:
4105 case SpvCapabilityGeometry
:
4106 case SpvCapabilityGeometryPointSize
:
4107 case SpvCapabilityUniformBufferArrayDynamicIndexing
:
4108 case SpvCapabilitySampledImageArrayDynamicIndexing
:
4109 case SpvCapabilityStorageBufferArrayDynamicIndexing
:
4110 case SpvCapabilityStorageImageArrayDynamicIndexing
:
4111 case SpvCapabilityImageRect
:
4112 case SpvCapabilitySampledRect
:
4113 case SpvCapabilitySampled1D
:
4114 case SpvCapabilityImage1D
:
4115 case SpvCapabilitySampledCubeArray
:
4116 case SpvCapabilityImageCubeArray
:
4117 case SpvCapabilitySampledBuffer
:
4118 case SpvCapabilityImageBuffer
:
4119 case SpvCapabilityImageQuery
:
4120 case SpvCapabilityDerivativeControl
:
4121 case SpvCapabilityInterpolationFunction
:
4122 case SpvCapabilityMultiViewport
:
4123 case SpvCapabilitySampleRateShading
:
4124 case SpvCapabilityClipDistance
:
4125 case SpvCapabilityCullDistance
:
4126 case SpvCapabilityInputAttachment
:
4127 case SpvCapabilityImageGatherExtended
:
4128 case SpvCapabilityStorageImageExtendedFormats
:
4129 case SpvCapabilityVector16
:
4132 case SpvCapabilityLinkage
:
4133 case SpvCapabilityFloat16Buffer
:
4134 case SpvCapabilitySparseResidency
:
4135 vtn_warn("Unsupported SPIR-V capability: %s",
4136 spirv_capability_to_string(cap
));
4139 case SpvCapabilityMinLod
:
4140 spv_check_supported(min_lod
, cap
);
4143 case SpvCapabilityAtomicStorage
:
4144 spv_check_supported(atomic_storage
, cap
);
4147 case SpvCapabilityFloat64
:
4148 spv_check_supported(float64
, cap
);
4150 case SpvCapabilityInt64
:
4151 spv_check_supported(int64
, cap
);
4153 case SpvCapabilityInt16
:
4154 spv_check_supported(int16
, cap
);
4156 case SpvCapabilityInt8
:
4157 spv_check_supported(int8
, cap
);
4160 case SpvCapabilityTransformFeedback
:
4161 spv_check_supported(transform_feedback
, cap
);
4164 case SpvCapabilityGeometryStreams
:
4165 spv_check_supported(geometry_streams
, cap
);
4168 case SpvCapabilityInt64Atomics
:
4169 spv_check_supported(int64_atomics
, cap
);
4172 case SpvCapabilityStorageImageMultisample
:
4173 spv_check_supported(storage_image_ms
, cap
);
4176 case SpvCapabilityAddresses
:
4177 spv_check_supported(address
, cap
);
4180 case SpvCapabilityKernel
:
4181 spv_check_supported(kernel
, cap
);
4184 case SpvCapabilityImageBasic
:
4185 spv_check_supported(kernel_image
, cap
);
4188 case SpvCapabilityImageReadWrite
:
4189 case SpvCapabilityImageMipmap
:
4190 case SpvCapabilityPipes
:
4191 case SpvCapabilityDeviceEnqueue
:
4192 case SpvCapabilityLiteralSampler
:
4193 case SpvCapabilityGenericPointer
:
4194 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
4195 spirv_capability_to_string(cap
));
4198 case SpvCapabilityImageMSArray
:
4199 spv_check_supported(image_ms_array
, cap
);
4202 case SpvCapabilityTessellation
:
4203 case SpvCapabilityTessellationPointSize
:
4204 spv_check_supported(tessellation
, cap
);
4207 case SpvCapabilityDrawParameters
:
4208 spv_check_supported(draw_parameters
, cap
);
4211 case SpvCapabilityStorageImageReadWithoutFormat
:
4212 spv_check_supported(image_read_without_format
, cap
);
4215 case SpvCapabilityStorageImageWriteWithoutFormat
:
4216 spv_check_supported(image_write_without_format
, cap
);
4219 case SpvCapabilityDeviceGroup
:
4220 spv_check_supported(device_group
, cap
);
4223 case SpvCapabilityMultiView
:
4224 spv_check_supported(multiview
, cap
);
4227 case SpvCapabilityGroupNonUniform
:
4228 spv_check_supported(subgroup_basic
, cap
);
4231 case SpvCapabilitySubgroupVoteKHR
:
4232 case SpvCapabilityGroupNonUniformVote
:
4233 spv_check_supported(subgroup_vote
, cap
);
4236 case SpvCapabilitySubgroupBallotKHR
:
4237 case SpvCapabilityGroupNonUniformBallot
:
4238 spv_check_supported(subgroup_ballot
, cap
);
4241 case SpvCapabilityGroupNonUniformShuffle
:
4242 case SpvCapabilityGroupNonUniformShuffleRelative
:
4243 spv_check_supported(subgroup_shuffle
, cap
);
4246 case SpvCapabilityGroupNonUniformQuad
:
4247 spv_check_supported(subgroup_quad
, cap
);
4250 case SpvCapabilityGroupNonUniformArithmetic
:
4251 case SpvCapabilityGroupNonUniformClustered
:
4252 spv_check_supported(subgroup_arithmetic
, cap
);
4255 case SpvCapabilityGroups
:
4256 spv_check_supported(amd_shader_ballot
, cap
);
4259 case SpvCapabilityVariablePointersStorageBuffer
:
4260 case SpvCapabilityVariablePointers
:
4261 spv_check_supported(variable_pointers
, cap
);
4262 b
->variable_pointers
= true;
4265 case SpvCapabilityStorageUniformBufferBlock16
:
4266 case SpvCapabilityStorageUniform16
:
4267 case SpvCapabilityStoragePushConstant16
:
4268 case SpvCapabilityStorageInputOutput16
:
4269 spv_check_supported(storage_16bit
, cap
);
4272 case SpvCapabilityShaderLayer
:
4273 case SpvCapabilityShaderViewportIndex
:
4274 case SpvCapabilityShaderViewportIndexLayerEXT
:
4275 spv_check_supported(shader_viewport_index_layer
, cap
);
4278 case SpvCapabilityStorageBuffer8BitAccess
:
4279 case SpvCapabilityUniformAndStorageBuffer8BitAccess
:
4280 case SpvCapabilityStoragePushConstant8
:
4281 spv_check_supported(storage_8bit
, cap
);
4284 case SpvCapabilityShaderNonUniformEXT
:
4285 spv_check_supported(descriptor_indexing
, cap
);
4288 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT
:
4289 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT
:
4290 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT
:
4291 spv_check_supported(descriptor_array_dynamic_indexing
, cap
);
4294 case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT
:
4295 case SpvCapabilitySampledImageArrayNonUniformIndexingEXT
:
4296 case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT
:
4297 case SpvCapabilityStorageImageArrayNonUniformIndexingEXT
:
4298 case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT
:
4299 case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT
:
4300 case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT
:
4301 spv_check_supported(descriptor_array_non_uniform_indexing
, cap
);
4304 case SpvCapabilityRuntimeDescriptorArrayEXT
:
4305 spv_check_supported(runtime_descriptor_array
, cap
);
4308 case SpvCapabilityStencilExportEXT
:
4309 spv_check_supported(stencil_export
, cap
);
4312 case SpvCapabilitySampleMaskPostDepthCoverage
:
4313 spv_check_supported(post_depth_coverage
, cap
);
4316 case SpvCapabilityDenormFlushToZero
:
4317 case SpvCapabilityDenormPreserve
:
4318 case SpvCapabilitySignedZeroInfNanPreserve
:
4319 case SpvCapabilityRoundingModeRTE
:
4320 case SpvCapabilityRoundingModeRTZ
:
4321 spv_check_supported(float_controls
, cap
);
4324 case SpvCapabilityPhysicalStorageBufferAddresses
:
4325 spv_check_supported(physical_storage_buffer_address
, cap
);
4328 case SpvCapabilityComputeDerivativeGroupQuadsNV
:
4329 case SpvCapabilityComputeDerivativeGroupLinearNV
:
4330 spv_check_supported(derivative_group
, cap
);
4333 case SpvCapabilityFloat16
:
4334 spv_check_supported(float16
, cap
);
4337 case SpvCapabilityFragmentShaderSampleInterlockEXT
:
4338 spv_check_supported(fragment_shader_sample_interlock
, cap
);
4341 case SpvCapabilityFragmentShaderPixelInterlockEXT
:
4342 spv_check_supported(fragment_shader_pixel_interlock
, cap
);
4345 case SpvCapabilityDemoteToHelperInvocationEXT
:
4346 spv_check_supported(demote_to_helper_invocation
, cap
);
4349 case SpvCapabilityShaderClockKHR
:
4350 spv_check_supported(shader_clock
, cap
);
4353 case SpvCapabilityVulkanMemoryModel
:
4354 spv_check_supported(vk_memory_model
, cap
);
4357 case SpvCapabilityVulkanMemoryModelDeviceScope
:
4358 spv_check_supported(vk_memory_model_device_scope
, cap
);
4361 case SpvCapabilityImageReadWriteLodAMD
:
4362 spv_check_supported(amd_image_read_write_lod
, cap
);
4365 case SpvCapabilityIntegerFunctions2INTEL
:
4366 spv_check_supported(integer_functions2
, cap
);
4369 case SpvCapabilityFragmentMaskAMD
:
4370 spv_check_supported(amd_fragment_mask
, cap
);
4373 case SpvCapabilityImageGatherBiasLodAMD
:
4374 spv_check_supported(amd_image_gather_bias_lod
, cap
);
4377 case SpvCapabilityAtomicFloat32AddEXT
:
4378 spv_check_supported(float32_atomic_add
, cap
);
4381 case SpvCapabilityAtomicFloat64AddEXT
:
4382 spv_check_supported(float64_atomic_add
, cap
);
4386 vtn_fail("Unhandled capability: %s (%u)",
4387 spirv_capability_to_string(cap
), cap
);
4392 case SpvOpExtInstImport
:
4393 vtn_handle_extension(b
, opcode
, w
, count
);
4396 case SpvOpMemoryModel
:
4398 case SpvAddressingModelPhysical32
:
4399 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4400 "AddressingModelPhysical32 only supported for kernels");
4401 b
->shader
->info
.cs
.ptr_size
= 32;
4402 b
->physical_ptrs
= true;
4403 assert(nir_address_format_bit_size(b
->options
->global_addr_format
) == 32);
4404 assert(nir_address_format_num_components(b
->options
->global_addr_format
) == 1);
4405 assert(nir_address_format_bit_size(b
->options
->shared_addr_format
) == 32);
4406 assert(nir_address_format_num_components(b
->options
->shared_addr_format
) == 1);
4407 if (!b
->options
->constant_as_global
) {
4408 assert(nir_address_format_bit_size(b
->options
->ubo_addr_format
) == 32);
4409 assert(nir_address_format_num_components(b
->options
->ubo_addr_format
) == 1);
4412 case SpvAddressingModelPhysical64
:
4413 vtn_fail_if(b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
,
4414 "AddressingModelPhysical64 only supported for kernels");
4415 b
->shader
->info
.cs
.ptr_size
= 64;
4416 b
->physical_ptrs
= true;
4417 assert(nir_address_format_bit_size(b
->options
->global_addr_format
) == 64);
4418 assert(nir_address_format_num_components(b
->options
->global_addr_format
) == 1);
4419 assert(nir_address_format_bit_size(b
->options
->shared_addr_format
) == 64);
4420 assert(nir_address_format_num_components(b
->options
->shared_addr_format
) == 1);
4421 if (!b
->options
->constant_as_global
) {
4422 assert(nir_address_format_bit_size(b
->options
->ubo_addr_format
) == 64);
4423 assert(nir_address_format_num_components(b
->options
->ubo_addr_format
) == 1);
4426 case SpvAddressingModelLogical
:
4427 vtn_fail_if(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
,
4428 "AddressingModelLogical only supported for shaders");
4429 b
->physical_ptrs
= false;
4431 case SpvAddressingModelPhysicalStorageBuffer64
:
4432 vtn_fail_if(!b
->options
||
4433 !b
->options
->caps
.physical_storage_buffer_address
,
4434 "AddressingModelPhysicalStorageBuffer64 not supported");
4437 vtn_fail("Unknown addressing model: %s (%u)",
4438 spirv_addressingmodel_to_string(w
[1]), w
[1]);
4442 b
->mem_model
= w
[2];
4444 case SpvMemoryModelSimple
:
4445 case SpvMemoryModelGLSL450
:
4446 case SpvMemoryModelOpenCL
:
4448 case SpvMemoryModelVulkan
:
4449 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
4450 "Vulkan memory model is unsupported by this driver");
4453 vtn_fail("Unsupported memory model: %s",
4454 spirv_memorymodel_to_string(w
[2]));
4459 case SpvOpEntryPoint
:
4460 vtn_handle_entry_point(b
, w
, count
);
4464 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
4465 vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4469 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2, NULL
);
4472 case SpvOpMemberName
:
4476 case SpvOpExecutionMode
:
4477 case SpvOpExecutionModeId
:
4478 case SpvOpDecorationGroup
:
4480 case SpvOpDecorateId
:
4481 case SpvOpMemberDecorate
:
4482 case SpvOpGroupDecorate
:
4483 case SpvOpGroupMemberDecorate
:
4484 case SpvOpDecorateString
:
4485 case SpvOpMemberDecorateString
:
4486 vtn_handle_decoration(b
, opcode
, w
, count
);
4489 case SpvOpExtInst
: {
4490 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4491 if (val
->ext_handler
== vtn_handle_non_semantic_instruction
) {
4492 /* NonSemantic extended instructions are acceptable in preamble. */
4493 vtn_handle_non_semantic_instruction(b
, w
[4], w
, count
);
4496 return false; /* End of preamble. */
4501 return false; /* End of preamble */
4508 vtn_handle_execution_mode(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4509 const struct vtn_decoration
*mode
, UNUSED
void *data
)
4511 vtn_assert(b
->entry_point
== entry_point
);
4513 switch(mode
->exec_mode
) {
4514 case SpvExecutionModeOriginUpperLeft
:
4515 case SpvExecutionModeOriginLowerLeft
:
4516 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4517 b
->shader
->info
.fs
.origin_upper_left
=
4518 (mode
->exec_mode
== SpvExecutionModeOriginUpperLeft
);
4521 case SpvExecutionModeEarlyFragmentTests
:
4522 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4523 b
->shader
->info
.fs
.early_fragment_tests
= true;
4526 case SpvExecutionModePostDepthCoverage
:
4527 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4528 b
->shader
->info
.fs
.post_depth_coverage
= true;
4531 case SpvExecutionModeInvocations
:
4532 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4533 b
->shader
->info
.gs
.invocations
= MAX2(1, mode
->operands
[0]);
4536 case SpvExecutionModeDepthReplacing
:
4537 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4538 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
4540 case SpvExecutionModeDepthGreater
:
4541 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4542 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
4544 case SpvExecutionModeDepthLess
:
4545 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4546 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
4548 case SpvExecutionModeDepthUnchanged
:
4549 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4550 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
4553 case SpvExecutionModeLocalSize
:
4554 vtn_assert(gl_shader_stage_is_compute(b
->shader
->info
.stage
));
4555 b
->shader
->info
.cs
.local_size
[0] = mode
->operands
[0];
4556 b
->shader
->info
.cs
.local_size
[1] = mode
->operands
[1];
4557 b
->shader
->info
.cs
.local_size
[2] = mode
->operands
[2];
4560 case SpvExecutionModeLocalSizeHint
:
4561 break; /* Nothing to do with this */
4563 case SpvExecutionModeOutputVertices
:
4564 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4565 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4566 b
->shader
->info
.tess
.tcs_vertices_out
= mode
->operands
[0];
4568 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4569 b
->shader
->info
.gs
.vertices_out
= mode
->operands
[0];
4573 case SpvExecutionModeInputPoints
:
4574 case SpvExecutionModeInputLines
:
4575 case SpvExecutionModeInputLinesAdjacency
:
4576 case SpvExecutionModeTriangles
:
4577 case SpvExecutionModeInputTrianglesAdjacency
:
4578 case SpvExecutionModeQuads
:
4579 case SpvExecutionModeIsolines
:
4580 if (b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4581 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
) {
4582 b
->shader
->info
.tess
.primitive_mode
=
4583 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4585 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4586 b
->shader
->info
.gs
.vertices_in
=
4587 vertices_in_from_spv_execution_mode(b
, mode
->exec_mode
);
4588 b
->shader
->info
.gs
.input_primitive
=
4589 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4593 case SpvExecutionModeOutputPoints
:
4594 case SpvExecutionModeOutputLineStrip
:
4595 case SpvExecutionModeOutputTriangleStrip
:
4596 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
4597 b
->shader
->info
.gs
.output_primitive
=
4598 gl_primitive_from_spv_execution_mode(b
, mode
->exec_mode
);
4601 case SpvExecutionModeSpacingEqual
:
4602 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4603 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4604 b
->shader
->info
.tess
.spacing
= TESS_SPACING_EQUAL
;
4606 case SpvExecutionModeSpacingFractionalEven
:
4607 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4608 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4609 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_EVEN
;
4611 case SpvExecutionModeSpacingFractionalOdd
:
4612 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4613 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4614 b
->shader
->info
.tess
.spacing
= TESS_SPACING_FRACTIONAL_ODD
;
4616 case SpvExecutionModeVertexOrderCw
:
4617 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4618 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4619 b
->shader
->info
.tess
.ccw
= false;
4621 case SpvExecutionModeVertexOrderCcw
:
4622 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4623 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4624 b
->shader
->info
.tess
.ccw
= true;
4626 case SpvExecutionModePointMode
:
4627 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
4628 b
->shader
->info
.stage
== MESA_SHADER_TESS_EVAL
);
4629 b
->shader
->info
.tess
.point_mode
= true;
4632 case SpvExecutionModePixelCenterInteger
:
4633 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4634 b
->shader
->info
.fs
.pixel_center_integer
= true;
4637 case SpvExecutionModeXfb
:
4638 b
->shader
->info
.has_transform_feedback_varyings
= true;
4641 case SpvExecutionModeVecTypeHint
:
4644 case SpvExecutionModeContractionOff
:
4645 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
4646 vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4647 spirv_executionmode_to_string(mode
->exec_mode
));
4652 case SpvExecutionModeStencilRefReplacingEXT
:
4653 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4656 case SpvExecutionModeDerivativeGroupQuadsNV
:
4657 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4658 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_QUADS
;
4661 case SpvExecutionModeDerivativeGroupLinearNV
:
4662 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_COMPUTE
);
4663 b
->shader
->info
.cs
.derivative_group
= DERIVATIVE_GROUP_LINEAR
;
4666 case SpvExecutionModePixelInterlockOrderedEXT
:
4667 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4668 b
->shader
->info
.fs
.pixel_interlock_ordered
= true;
4671 case SpvExecutionModePixelInterlockUnorderedEXT
:
4672 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4673 b
->shader
->info
.fs
.pixel_interlock_unordered
= true;
4676 case SpvExecutionModeSampleInterlockOrderedEXT
:
4677 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4678 b
->shader
->info
.fs
.sample_interlock_ordered
= true;
4681 case SpvExecutionModeSampleInterlockUnorderedEXT
:
4682 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
4683 b
->shader
->info
.fs
.sample_interlock_unordered
= true;
4686 case SpvExecutionModeDenormPreserve
:
4687 case SpvExecutionModeDenormFlushToZero
:
4688 case SpvExecutionModeSignedZeroInfNanPreserve
:
4689 case SpvExecutionModeRoundingModeRTE
:
4690 case SpvExecutionModeRoundingModeRTZ
: {
4691 unsigned execution_mode
= 0;
4692 switch (mode
->exec_mode
) {
4693 case SpvExecutionModeDenormPreserve
:
4694 switch (mode
->operands
[0]) {
4695 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP16
; break;
4696 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP32
; break;
4697 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_PRESERVE_FP64
; break;
4698 default: vtn_fail("Floating point type not supported");
4701 case SpvExecutionModeDenormFlushToZero
:
4702 switch (mode
->operands
[0]) {
4703 case 16: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16
; break;
4704 case 32: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32
; break;
4705 case 64: execution_mode
= FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64
; break;
4706 default: vtn_fail("Floating point type not supported");
4709 case SpvExecutionModeSignedZeroInfNanPreserve
:
4710 switch (mode
->operands
[0]) {
4711 case 16: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16
; break;
4712 case 32: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32
; break;
4713 case 64: execution_mode
= FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64
; break;
4714 default: vtn_fail("Floating point type not supported");
4717 case SpvExecutionModeRoundingModeRTE
:
4718 switch (mode
->operands
[0]) {
4719 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16
; break;
4720 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32
; break;
4721 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64
; break;
4722 default: vtn_fail("Floating point type not supported");
4725 case SpvExecutionModeRoundingModeRTZ
:
4726 switch (mode
->operands
[0]) {
4727 case 16: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16
; break;
4728 case 32: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32
; break;
4729 case 64: execution_mode
= FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64
; break;
4730 default: vtn_fail("Floating point type not supported");
4737 b
->shader
->info
.float_controls_execution_mode
|= execution_mode
;
4741 case SpvExecutionModeLocalSizeId
:
4742 case SpvExecutionModeLocalSizeHintId
:
4743 /* Handled later by vtn_handle_execution_mode_id(). */
4747 vtn_fail("Unhandled execution mode: %s (%u)",
4748 spirv_executionmode_to_string(mode
->exec_mode
),
4754 vtn_handle_execution_mode_id(struct vtn_builder
*b
, struct vtn_value
*entry_point
,
4755 const struct vtn_decoration
*mode
, UNUSED
void *data
)
4758 vtn_assert(b
->entry_point
== entry_point
);
4760 switch (mode
->exec_mode
) {
4761 case SpvExecutionModeLocalSizeId
:
4762 b
->shader
->info
.cs
.local_size
[0] = vtn_constant_uint(b
, mode
->operands
[0]);
4763 b
->shader
->info
.cs
.local_size
[1] = vtn_constant_uint(b
, mode
->operands
[1]);
4764 b
->shader
->info
.cs
.local_size
[2] = vtn_constant_uint(b
, mode
->operands
[2]);
4767 case SpvExecutionModeLocalSizeHintId
:
4768 /* Nothing to do with this hint. */
4772 /* Nothing to do. Literal execution modes already handled by
4773 * vtn_handle_execution_mode(). */
4779 vtn_handle_variable_or_type_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4780 const uint32_t *w
, unsigned count
)
4782 vtn_set_instruction_result_type(b
, opcode
, w
, count
);
4786 case SpvOpSourceContinued
:
4787 case SpvOpSourceExtension
:
4788 case SpvOpExtension
:
4789 case SpvOpCapability
:
4790 case SpvOpExtInstImport
:
4791 case SpvOpMemoryModel
:
4792 case SpvOpEntryPoint
:
4793 case SpvOpExecutionMode
:
4796 case SpvOpMemberName
:
4797 case SpvOpDecorationGroup
:
4799 case SpvOpDecorateId
:
4800 case SpvOpMemberDecorate
:
4801 case SpvOpGroupDecorate
:
4802 case SpvOpGroupMemberDecorate
:
4803 case SpvOpDecorateString
:
4804 case SpvOpMemberDecorateString
:
4805 vtn_fail("Invalid opcode types and variables section");
4811 case SpvOpTypeFloat
:
4812 case SpvOpTypeVector
:
4813 case SpvOpTypeMatrix
:
4814 case SpvOpTypeImage
:
4815 case SpvOpTypeSampler
:
4816 case SpvOpTypeSampledImage
:
4817 case SpvOpTypeArray
:
4818 case SpvOpTypeRuntimeArray
:
4819 case SpvOpTypeStruct
:
4820 case SpvOpTypeOpaque
:
4821 case SpvOpTypePointer
:
4822 case SpvOpTypeForwardPointer
:
4823 case SpvOpTypeFunction
:
4824 case SpvOpTypeEvent
:
4825 case SpvOpTypeDeviceEvent
:
4826 case SpvOpTypeReserveId
:
4827 case SpvOpTypeQueue
:
4829 vtn_handle_type(b
, opcode
, w
, count
);
4832 case SpvOpConstantTrue
:
4833 case SpvOpConstantFalse
:
4835 case SpvOpConstantComposite
:
4836 case SpvOpConstantSampler
:
4837 case SpvOpConstantNull
:
4838 case SpvOpSpecConstantTrue
:
4839 case SpvOpSpecConstantFalse
:
4840 case SpvOpSpecConstant
:
4841 case SpvOpSpecConstantComposite
:
4842 case SpvOpSpecConstantOp
:
4843 vtn_handle_constant(b
, opcode
, w
, count
);
4848 vtn_handle_variables(b
, opcode
, w
, count
);
4851 case SpvOpExtInst
: {
4852 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
4853 /* NonSemantic extended instructions are acceptable in preamble, others
4854 * will indicate the end of preamble.
4856 return val
->ext_handler
== vtn_handle_non_semantic_instruction
;
4860 return false; /* End of preamble */
4866 static struct vtn_ssa_value
*
4867 vtn_nir_select(struct vtn_builder
*b
, struct vtn_ssa_value
*src0
,
4868 struct vtn_ssa_value
*src1
, struct vtn_ssa_value
*src2
)
4870 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
4871 dest
->type
= src1
->type
;
4873 if (glsl_type_is_vector_or_scalar(src1
->type
)) {
4874 dest
->def
= nir_bcsel(&b
->nb
, src0
->def
, src1
->def
, src2
->def
);
4876 unsigned elems
= glsl_get_length(src1
->type
);
4878 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
4879 for (unsigned i
= 0; i
< elems
; i
++) {
4880 dest
->elems
[i
] = vtn_nir_select(b
, src0
,
4881 src1
->elems
[i
], src2
->elems
[i
]);
4889 vtn_handle_select(struct vtn_builder
*b
, SpvOp opcode
,
4890 const uint32_t *w
, unsigned count
)
4892 /* Handle OpSelect up-front here because it needs to be able to handle
4893 * pointers and not just regular vectors and scalars.
4895 struct vtn_value
*res_val
= vtn_untyped_value(b
, w
[2]);
4896 struct vtn_value
*cond_val
= vtn_untyped_value(b
, w
[3]);
4897 struct vtn_value
*obj1_val
= vtn_untyped_value(b
, w
[4]);
4898 struct vtn_value
*obj2_val
= vtn_untyped_value(b
, w
[5]);
4900 vtn_fail_if(obj1_val
->type
!= res_val
->type
||
4901 obj2_val
->type
!= res_val
->type
,
4902 "Object types must match the result type in OpSelect");
4904 vtn_fail_if((cond_val
->type
->base_type
!= vtn_base_type_scalar
&&
4905 cond_val
->type
->base_type
!= vtn_base_type_vector
) ||
4906 !glsl_type_is_boolean(cond_val
->type
->type
),
4907 "OpSelect must have either a vector of booleans or "
4908 "a boolean as Condition type");
4910 vtn_fail_if(cond_val
->type
->base_type
== vtn_base_type_vector
&&
4911 (res_val
->type
->base_type
!= vtn_base_type_vector
||
4912 res_val
->type
->length
!= cond_val
->type
->length
),
4913 "When Condition type in OpSelect is a vector, the Result "
4914 "type must be a vector of the same length");
4916 switch (res_val
->type
->base_type
) {
4917 case vtn_base_type_scalar
:
4918 case vtn_base_type_vector
:
4919 case vtn_base_type_matrix
:
4920 case vtn_base_type_array
:
4921 case vtn_base_type_struct
:
4924 case vtn_base_type_pointer
:
4925 /* We need to have actual storage for pointer types. */
4926 vtn_fail_if(res_val
->type
->type
== NULL
,
4927 "Invalid pointer result type for OpSelect");
4930 vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
4933 vtn_push_ssa_value(b
, w
[2],
4934 vtn_nir_select(b
, vtn_ssa_value(b
, w
[3]),
4935 vtn_ssa_value(b
, w
[4]),
4936 vtn_ssa_value(b
, w
[5])));
4940 vtn_handle_ptr(struct vtn_builder
*b
, SpvOp opcode
,
4941 const uint32_t *w
, unsigned count
)
4943 struct vtn_type
*type1
= vtn_get_value_type(b
, w
[3]);
4944 struct vtn_type
*type2
= vtn_get_value_type(b
, w
[4]);
4945 vtn_fail_if(type1
->base_type
!= vtn_base_type_pointer
||
4946 type2
->base_type
!= vtn_base_type_pointer
,
4947 "%s operands must have pointer types",
4948 spirv_op_to_string(opcode
));
4949 vtn_fail_if(type1
->storage_class
!= type2
->storage_class
,
4950 "%s operands must have the same storage class",
4951 spirv_op_to_string(opcode
));
4953 struct vtn_type
*vtn_type
= vtn_get_type(b
, w
[1]);
4954 const struct glsl_type
*type
= vtn_type
->type
;
4956 nir_address_format addr_format
= vtn_mode_to_address_format(
4957 b
, vtn_storage_class_to_mode(b
, type1
->storage_class
, NULL
, NULL
));
4962 case SpvOpPtrDiff
: {
4963 /* OpPtrDiff returns the difference in number of elements (not byte offset). */
4964 unsigned elem_size
, elem_align
;
4965 glsl_get_natural_size_align_bytes(type1
->deref
->type
,
4966 &elem_size
, &elem_align
);
4968 def
= nir_build_addr_isub(&b
->nb
,
4969 vtn_get_nir_ssa(b
, w
[3]),
4970 vtn_get_nir_ssa(b
, w
[4]),
4972 def
= nir_idiv(&b
->nb
, def
, nir_imm_intN_t(&b
->nb
, elem_size
, def
->bit_size
));
4973 def
= nir_i2i(&b
->nb
, def
, glsl_get_bit_size(type
));
4978 case SpvOpPtrNotEqual
: {
4979 def
= nir_build_addr_ieq(&b
->nb
,
4980 vtn_get_nir_ssa(b
, w
[3]),
4981 vtn_get_nir_ssa(b
, w
[4]),
4983 if (opcode
== SpvOpPtrNotEqual
)
4984 def
= nir_inot(&b
->nb
, def
);
4989 unreachable("Invalid ptr operation");
4992 vtn_push_nir_ssa(b
, w
[2], def
);
4996 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
4997 const uint32_t *w
, unsigned count
)
5003 case SpvOpLoopMerge
:
5004 case SpvOpSelectionMerge
:
5005 /* This is handled by cfg pre-pass and walk_blocks */
5009 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_undef
);
5010 val
->type
= vtn_get_type(b
, w
[1]);
5015 vtn_handle_extension(b
, opcode
, w
, count
);
5021 case SpvOpCopyMemory
:
5022 case SpvOpCopyMemorySized
:
5023 case SpvOpAccessChain
:
5024 case SpvOpPtrAccessChain
:
5025 case SpvOpInBoundsAccessChain
:
5026 case SpvOpInBoundsPtrAccessChain
:
5027 case SpvOpArrayLength
:
5028 case SpvOpConvertPtrToU
:
5029 case SpvOpConvertUToPtr
:
5030 vtn_handle_variables(b
, opcode
, w
, count
);
5033 case SpvOpFunctionCall
:
5034 vtn_handle_function_call(b
, opcode
, w
, count
);
5037 case SpvOpSampledImage
:
5039 case SpvOpImageSampleImplicitLod
:
5040 case SpvOpImageSampleExplicitLod
:
5041 case SpvOpImageSampleDrefImplicitLod
:
5042 case SpvOpImageSampleDrefExplicitLod
:
5043 case SpvOpImageSampleProjImplicitLod
:
5044 case SpvOpImageSampleProjExplicitLod
:
5045 case SpvOpImageSampleProjDrefImplicitLod
:
5046 case SpvOpImageSampleProjDrefExplicitLod
:
5047 case SpvOpImageFetch
:
5048 case SpvOpImageGather
:
5049 case SpvOpImageDrefGather
:
5050 case SpvOpImageQuerySizeLod
:
5051 case SpvOpImageQueryLod
:
5052 case SpvOpImageQueryLevels
:
5053 case SpvOpImageQuerySamples
:
5054 vtn_handle_texture(b
, opcode
, w
, count
);
5057 case SpvOpImageRead
:
5058 case SpvOpImageWrite
:
5059 case SpvOpImageTexelPointer
:
5060 vtn_handle_image(b
, opcode
, w
, count
);
5063 case SpvOpImageQuerySize
: {
5064 struct vtn_type
*image_type
= vtn_get_value_type(b
, w
[3]);
5065 vtn_assert(image_type
->base_type
== vtn_base_type_image
);
5066 if (glsl_type_is_image(image_type
->glsl_image
)) {
5067 vtn_handle_image(b
, opcode
, w
, count
);
5069 vtn_assert(glsl_type_is_sampler(image_type
->glsl_image
));
5070 vtn_handle_texture(b
, opcode
, w
, count
);
5075 case SpvOpFragmentMaskFetchAMD
:
5076 case SpvOpFragmentFetchAMD
:
5077 vtn_handle_texture(b
, opcode
, w
, count
);
5080 case SpvOpAtomicLoad
:
5081 case SpvOpAtomicExchange
:
5082 case SpvOpAtomicCompareExchange
:
5083 case SpvOpAtomicCompareExchangeWeak
:
5084 case SpvOpAtomicIIncrement
:
5085 case SpvOpAtomicIDecrement
:
5086 case SpvOpAtomicIAdd
:
5087 case SpvOpAtomicISub
:
5088 case SpvOpAtomicSMin
:
5089 case SpvOpAtomicUMin
:
5090 case SpvOpAtomicSMax
:
5091 case SpvOpAtomicUMax
:
5092 case SpvOpAtomicAnd
:
5094 case SpvOpAtomicXor
:
5095 case SpvOpAtomicFAddEXT
: {
5096 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[3]);
5097 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
5098 vtn_handle_image(b
, opcode
, w
, count
);
5100 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
5101 vtn_handle_atomics(b
, opcode
, w
, count
);
5106 case SpvOpAtomicStore
: {
5107 struct vtn_value
*pointer
= vtn_untyped_value(b
, w
[1]);
5108 if (pointer
->value_type
== vtn_value_type_image_pointer
) {
5109 vtn_handle_image(b
, opcode
, w
, count
);
5111 vtn_assert(pointer
->value_type
== vtn_value_type_pointer
);
5112 vtn_handle_atomics(b
, opcode
, w
, count
);
5118 vtn_handle_select(b
, opcode
, w
, count
);
5126 case SpvOpConvertFToU
:
5127 case SpvOpConvertFToS
:
5128 case SpvOpConvertSToF
:
5129 case SpvOpConvertUToF
:
5133 case SpvOpQuantizeToF16
:
5134 case SpvOpPtrCastToGeneric
:
5135 case SpvOpGenericCastToPtr
:
5140 case SpvOpSignBitSet
:
5141 case SpvOpLessOrGreater
:
5143 case SpvOpUnordered
:
5158 case SpvOpVectorTimesScalar
:
5160 case SpvOpIAddCarry
:
5161 case SpvOpISubBorrow
:
5162 case SpvOpUMulExtended
:
5163 case SpvOpSMulExtended
:
5164 case SpvOpShiftRightLogical
:
5165 case SpvOpShiftRightArithmetic
:
5166 case SpvOpShiftLeftLogical
:
5167 case SpvOpLogicalEqual
:
5168 case SpvOpLogicalNotEqual
:
5169 case SpvOpLogicalOr
:
5170 case SpvOpLogicalAnd
:
5171 case SpvOpLogicalNot
:
5172 case SpvOpBitwiseOr
:
5173 case SpvOpBitwiseXor
:
5174 case SpvOpBitwiseAnd
:
5176 case SpvOpFOrdEqual
:
5177 case SpvOpFUnordEqual
:
5178 case SpvOpINotEqual
:
5179 case SpvOpFOrdNotEqual
:
5180 case SpvOpFUnordNotEqual
:
5181 case SpvOpULessThan
:
5182 case SpvOpSLessThan
:
5183 case SpvOpFOrdLessThan
:
5184 case SpvOpFUnordLessThan
:
5185 case SpvOpUGreaterThan
:
5186 case SpvOpSGreaterThan
:
5187 case SpvOpFOrdGreaterThan
:
5188 case SpvOpFUnordGreaterThan
:
5189 case SpvOpULessThanEqual
:
5190 case SpvOpSLessThanEqual
:
5191 case SpvOpFOrdLessThanEqual
:
5192 case SpvOpFUnordLessThanEqual
:
5193 case SpvOpUGreaterThanEqual
:
5194 case SpvOpSGreaterThanEqual
:
5195 case SpvOpFOrdGreaterThanEqual
:
5196 case SpvOpFUnordGreaterThanEqual
:
5202 case SpvOpFwidthFine
:
5203 case SpvOpDPdxCoarse
:
5204 case SpvOpDPdyCoarse
:
5205 case SpvOpFwidthCoarse
:
5206 case SpvOpBitFieldInsert
:
5207 case SpvOpBitFieldSExtract
:
5208 case SpvOpBitFieldUExtract
:
5209 case SpvOpBitReverse
:
5211 case SpvOpTranspose
:
5212 case SpvOpOuterProduct
:
5213 case SpvOpMatrixTimesScalar
:
5214 case SpvOpVectorTimesMatrix
:
5215 case SpvOpMatrixTimesVector
:
5216 case SpvOpMatrixTimesMatrix
:
5217 case SpvOpUCountLeadingZerosINTEL
:
5218 case SpvOpUCountTrailingZerosINTEL
:
5219 case SpvOpAbsISubINTEL
:
5220 case SpvOpAbsUSubINTEL
:
5221 case SpvOpIAddSatINTEL
:
5222 case SpvOpUAddSatINTEL
:
5223 case SpvOpIAverageINTEL
:
5224 case SpvOpUAverageINTEL
:
5225 case SpvOpIAverageRoundedINTEL
:
5226 case SpvOpUAverageRoundedINTEL
:
5227 case SpvOpISubSatINTEL
:
5228 case SpvOpUSubSatINTEL
:
5229 case SpvOpIMul32x16INTEL
:
5230 case SpvOpUMul32x16INTEL
:
5231 vtn_handle_alu(b
, opcode
, w
, count
);
5235 vtn_handle_bitcast(b
, w
, count
);
5238 case SpvOpVectorExtractDynamic
:
5239 case SpvOpVectorInsertDynamic
:
5240 case SpvOpVectorShuffle
:
5241 case SpvOpCompositeConstruct
:
5242 case SpvOpCompositeExtract
:
5243 case SpvOpCompositeInsert
:
5244 case SpvOpCopyLogical
:
5245 case SpvOpCopyObject
:
5246 vtn_handle_composite(b
, opcode
, w
, count
);
5249 case SpvOpEmitVertex
:
5250 case SpvOpEndPrimitive
:
5251 case SpvOpEmitStreamVertex
:
5252 case SpvOpEndStreamPrimitive
:
5253 case SpvOpControlBarrier
:
5254 case SpvOpMemoryBarrier
:
5255 vtn_handle_barrier(b
, opcode
, w
, count
);
5258 case SpvOpGroupNonUniformElect
:
5259 case SpvOpGroupNonUniformAll
:
5260 case SpvOpGroupNonUniformAny
:
5261 case SpvOpGroupNonUniformAllEqual
:
5262 case SpvOpGroupNonUniformBroadcast
:
5263 case SpvOpGroupNonUniformBroadcastFirst
:
5264 case SpvOpGroupNonUniformBallot
:
5265 case SpvOpGroupNonUniformInverseBallot
:
5266 case SpvOpGroupNonUniformBallotBitExtract
:
5267 case SpvOpGroupNonUniformBallotBitCount
:
5268 case SpvOpGroupNonUniformBallotFindLSB
:
5269 case SpvOpGroupNonUniformBallotFindMSB
:
5270 case SpvOpGroupNonUniformShuffle
:
5271 case SpvOpGroupNonUniformShuffleXor
:
5272 case SpvOpGroupNonUniformShuffleUp
:
5273 case SpvOpGroupNonUniformShuffleDown
:
5274 case SpvOpGroupNonUniformIAdd
:
5275 case SpvOpGroupNonUniformFAdd
:
5276 case SpvOpGroupNonUniformIMul
:
5277 case SpvOpGroupNonUniformFMul
:
5278 case SpvOpGroupNonUniformSMin
:
5279 case SpvOpGroupNonUniformUMin
:
5280 case SpvOpGroupNonUniformFMin
:
5281 case SpvOpGroupNonUniformSMax
:
5282 case SpvOpGroupNonUniformUMax
:
5283 case SpvOpGroupNonUniformFMax
:
5284 case SpvOpGroupNonUniformBitwiseAnd
:
5285 case SpvOpGroupNonUniformBitwiseOr
:
5286 case SpvOpGroupNonUniformBitwiseXor
:
5287 case SpvOpGroupNonUniformLogicalAnd
:
5288 case SpvOpGroupNonUniformLogicalOr
:
5289 case SpvOpGroupNonUniformLogicalXor
:
5290 case SpvOpGroupNonUniformQuadBroadcast
:
5291 case SpvOpGroupNonUniformQuadSwap
:
5294 case SpvOpGroupBroadcast
:
5295 case SpvOpGroupIAdd
:
5296 case SpvOpGroupFAdd
:
5297 case SpvOpGroupFMin
:
5298 case SpvOpGroupUMin
:
5299 case SpvOpGroupSMin
:
5300 case SpvOpGroupFMax
:
5301 case SpvOpGroupUMax
:
5302 case SpvOpGroupSMax
:
5303 case SpvOpSubgroupBallotKHR
:
5304 case SpvOpSubgroupFirstInvocationKHR
:
5305 case SpvOpSubgroupReadInvocationKHR
:
5306 case SpvOpSubgroupAllKHR
:
5307 case SpvOpSubgroupAnyKHR
:
5308 case SpvOpSubgroupAllEqualKHR
:
5309 case SpvOpGroupIAddNonUniformAMD
:
5310 case SpvOpGroupFAddNonUniformAMD
:
5311 case SpvOpGroupFMinNonUniformAMD
:
5312 case SpvOpGroupUMinNonUniformAMD
:
5313 case SpvOpGroupSMinNonUniformAMD
:
5314 case SpvOpGroupFMaxNonUniformAMD
:
5315 case SpvOpGroupUMaxNonUniformAMD
:
5316 case SpvOpGroupSMaxNonUniformAMD
:
5317 vtn_handle_subgroup(b
, opcode
, w
, count
);
5322 case SpvOpPtrNotEqual
:
5323 vtn_handle_ptr(b
, opcode
, w
, count
);
5326 case SpvOpBeginInvocationInterlockEXT
:
5327 vtn_emit_barrier(b
, nir_intrinsic_begin_invocation_interlock
);
5330 case SpvOpEndInvocationInterlockEXT
:
5331 vtn_emit_barrier(b
, nir_intrinsic_end_invocation_interlock
);
5334 case SpvOpDemoteToHelperInvocationEXT
: {
5335 nir_intrinsic_instr
*intrin
=
5336 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_demote
);
5337 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5341 case SpvOpIsHelperInvocationEXT
: {
5342 nir_intrinsic_instr
*intrin
=
5343 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_is_helper_invocation
);
5344 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 1, 1, NULL
);
5345 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5347 vtn_push_nir_ssa(b
, w
[2], &intrin
->dest
.ssa
);
5351 case SpvOpReadClockKHR
: {
5352 SpvScope scope
= vtn_constant_uint(b
, w
[3]);
5353 nir_scope nir_scope
;
5356 case SpvScopeDevice
:
5357 nir_scope
= NIR_SCOPE_DEVICE
;
5359 case SpvScopeSubgroup
:
5360 nir_scope
= NIR_SCOPE_SUBGROUP
;
5363 vtn_fail("invalid read clock scope");
5366 /* Operation supports two result types: uvec2 and uint64_t. The NIR
5367 * intrinsic gives uvec2, so pack the result for the other case.
5369 nir_intrinsic_instr
*intrin
=
5370 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_shader_clock
);
5371 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
, 2, 32, NULL
);
5372 nir_intrinsic_set_memory_scope(intrin
, nir_scope
);
5373 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
5375 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
5376 const struct glsl_type
*dest_type
= type
->type
;
5377 nir_ssa_def
*result
;
5379 if (glsl_type_is_vector(dest_type
)) {
5380 assert(dest_type
== glsl_vector_type(GLSL_TYPE_UINT
, 2));
5381 result
= &intrin
->dest
.ssa
;
5383 assert(glsl_type_is_scalar(dest_type
));
5384 assert(glsl_get_base_type(dest_type
) == GLSL_TYPE_UINT64
);
5385 result
= nir_pack_64_2x32(&b
->nb
, &intrin
->dest
.ssa
);
5388 vtn_push_nir_ssa(b
, w
[2], result
);
5392 case SpvOpLifetimeStart
:
5393 case SpvOpLifetimeStop
:
5397 vtn_fail_with_opcode("Unhandled opcode", opcode
);
5404 vtn_create_builder(const uint32_t *words
, size_t word_count
,
5405 gl_shader_stage stage
, const char *entry_point_name
,
5406 const struct spirv_to_nir_options
*options
)
5408 /* Initialize the vtn_builder object */
5409 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
5410 struct spirv_to_nir_options
*dup_options
=
5411 ralloc(b
, struct spirv_to_nir_options
);
5412 *dup_options
= *options
;
5415 b
->spirv_word_count
= word_count
;
5419 list_inithead(&b
->functions
);
5420 b
->entry_point_stage
= stage
;
5421 b
->entry_point_name
= entry_point_name
;
5422 b
->options
= dup_options
;
5425 * Handle the SPIR-V header (first 5 dwords).
5426 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
5428 if (word_count
<= 5)
5431 if (words
[0] != SpvMagicNumber
) {
5432 vtn_err("words[0] was 0x%x, want 0x%x", words
[0], SpvMagicNumber
);
5435 if (words
[1] < 0x10000) {
5436 vtn_err("words[1] was 0x%x, want >= 0x10000", words
[1]);
5440 uint16_t generator_id
= words
[2] >> 16;
5441 uint16_t generator_version
= words
[2];
5443 /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
5444 * to provide correct memory semantics on compute shader barrier()
5445 * commands. Prior to that, we need to fix them up ourselves. This
5446 * GLSLang fix caused them to bump to generator version 3.
5448 b
->wa_glslang_cs_barrier
= (generator_id
== 8 && generator_version
< 3);
5450 /* words[2] == generator magic */
5451 unsigned value_id_bound
= words
[3];
5452 if (words
[4] != 0) {
5453 vtn_err("words[4] was %u, want 0", words
[4]);
5457 b
->value_id_bound
= value_id_bound
;
5458 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
5466 static nir_function
*
5467 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder
*b
,
5468 nir_function
*entry_point
)
5470 vtn_assert(entry_point
== b
->entry_point
->func
->impl
->function
);
5471 vtn_fail_if(!entry_point
->name
, "entry points are required to have a name");
5472 const char *func_name
=
5473 ralloc_asprintf(b
->shader
, "__wrapped_%s", entry_point
->name
);
5475 /* we shouldn't have any inputs yet */
5476 vtn_assert(!entry_point
->shader
->num_inputs
);
5477 vtn_assert(b
->shader
->info
.stage
== MESA_SHADER_KERNEL
);
5479 nir_function
*main_entry_point
= nir_function_create(b
->shader
, func_name
);
5480 main_entry_point
->impl
= nir_function_impl_create(main_entry_point
);
5481 nir_builder_init(&b
->nb
, main_entry_point
->impl
);
5482 b
->nb
.cursor
= nir_after_cf_list(&main_entry_point
->impl
->body
);
5483 b
->func_param_idx
= 0;
5485 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, entry_point
);
5487 for (unsigned i
= 0; i
< entry_point
->num_params
; ++i
) {
5488 struct vtn_type
*param_type
= b
->entry_point
->func
->type
->params
[i
];
5490 /* consider all pointers to function memory to be parameters passed
5493 bool is_by_val
= param_type
->base_type
== vtn_base_type_pointer
&&
5494 param_type
->storage_class
== SpvStorageClassFunction
;
5496 /* input variable */
5497 nir_variable
*in_var
= rzalloc(b
->nb
.shader
, nir_variable
);
5498 in_var
->data
.mode
= nir_var_uniform
;
5499 in_var
->data
.read_only
= true;
5500 in_var
->data
.location
= i
;
5501 if (param_type
->base_type
== vtn_base_type_image
) {
5502 in_var
->data
.access
= 0;
5503 if (param_type
->access_qualifier
& SpvAccessQualifierReadOnly
)
5504 in_var
->data
.access
|= ACCESS_NON_WRITEABLE
;
5505 if (param_type
->access_qualifier
& SpvAccessQualifierWriteOnly
)
5506 in_var
->data
.access
|= ACCESS_NON_READABLE
;
5510 in_var
->type
= param_type
->deref
->type
;
5511 else if (param_type
->base_type
== vtn_base_type_image
)
5512 in_var
->type
= param_type
->glsl_image
;
5513 else if (param_type
->base_type
== vtn_base_type_sampler
)
5514 in_var
->type
= glsl_bare_sampler_type();
5516 in_var
->type
= param_type
->type
;
5518 nir_shader_add_variable(b
->nb
.shader
, in_var
);
5519 b
->nb
.shader
->num_inputs
++;
5521 /* we have to copy the entire variable into function memory */
5523 nir_variable
*copy_var
=
5524 nir_local_variable_create(main_entry_point
->impl
, in_var
->type
,
5526 nir_copy_var(&b
->nb
, copy_var
, in_var
);
5528 nir_src_for_ssa(&nir_build_deref_var(&b
->nb
, copy_var
)->dest
.ssa
);
5529 } else if (param_type
->base_type
== vtn_base_type_image
||
5530 param_type
->base_type
== vtn_base_type_sampler
) {
5531 /* Don't load the var, just pass a deref of it */
5532 call
->params
[i
] = nir_src_for_ssa(&nir_build_deref_var(&b
->nb
, in_var
)->dest
.ssa
);
5534 call
->params
[i
] = nir_src_for_ssa(nir_load_var(&b
->nb
, in_var
));
5538 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
5540 return main_entry_point
;
5544 spirv_to_nir(const uint32_t *words
, size_t word_count
,
5545 struct nir_spirv_specialization
*spec
, unsigned num_spec
,
5546 gl_shader_stage stage
, const char *entry_point_name
,
5547 const struct spirv_to_nir_options
*options
,
5548 const nir_shader_compiler_options
*nir_options
)
5551 const uint32_t *word_end
= words
+ word_count
;
5553 struct vtn_builder
*b
= vtn_create_builder(words
, word_count
,
5554 stage
, entry_point_name
,
5560 /* See also _vtn_fail() */
5561 if (setjmp(b
->fail_jump
)) {
5566 /* Skip the SPIR-V header, handled at vtn_create_builder */
5569 b
->shader
= nir_shader_create(b
, stage
, nir_options
, NULL
);
5571 /* Handle all the preamble instructions */
5572 words
= vtn_foreach_instruction(b
, words
, word_end
,
5573 vtn_handle_preamble_instruction
);
5575 if (b
->entry_point
== NULL
) {
5576 vtn_fail("Entry point not found");
5581 /* Ensure a sane address mode is being used for function temps */
5582 assert(nir_address_format_bit_size(b
->options
->temp_addr_format
) == nir_get_ptr_bitsize(b
->shader
));
5583 assert(nir_address_format_num_components(b
->options
->temp_addr_format
) == 1);
5585 /* Set shader info defaults */
5586 if (stage
== MESA_SHADER_GEOMETRY
)
5587 b
->shader
->info
.gs
.invocations
= 1;
5589 /* Parse execution modes. */
5590 vtn_foreach_execution_mode(b
, b
->entry_point
,
5591 vtn_handle_execution_mode
, NULL
);
5593 b
->specializations
= spec
;
5594 b
->num_specializations
= num_spec
;
5596 /* Handle all variable, type, and constant instructions */
5597 words
= vtn_foreach_instruction(b
, words
, word_end
,
5598 vtn_handle_variable_or_type_instruction
);
5600 /* Parse execution modes that depend on IDs. Must happen after we have
5603 vtn_foreach_execution_mode(b
, b
->entry_point
,
5604 vtn_handle_execution_mode_id
, NULL
);
5606 if (b
->workgroup_size_builtin
) {
5607 vtn_assert(b
->workgroup_size_builtin
->type
->type
==
5608 glsl_vector_type(GLSL_TYPE_UINT
, 3));
5610 nir_const_value
*const_size
=
5611 b
->workgroup_size_builtin
->constant
->values
;
5613 b
->shader
->info
.cs
.local_size
[0] = const_size
[0].u32
;
5614 b
->shader
->info
.cs
.local_size
[1] = const_size
[1].u32
;
5615 b
->shader
->info
.cs
.local_size
[2] = const_size
[2].u32
;
5618 /* Set types on all vtn_values */
5619 vtn_foreach_instruction(b
, words
, word_end
, vtn_set_instruction_result_type
);
5621 vtn_build_cfg(b
, words
, word_end
);
5623 assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5624 b
->entry_point
->func
->referenced
= true;
5629 vtn_foreach_cf_node(node
, &b
->functions
) {
5630 struct vtn_function
*func
= vtn_cf_node_as_function(node
);
5631 if (func
->referenced
&& !func
->emitted
) {
5632 b
->const_table
= _mesa_pointer_hash_table_create(b
);
5634 vtn_function_emit(b
, func
, vtn_handle_body_instruction
);
5640 vtn_assert(b
->entry_point
->value_type
== vtn_value_type_function
);
5641 nir_function
*entry_point
= b
->entry_point
->func
->impl
->function
;
5642 vtn_assert(entry_point
);
5644 /* post process entry_points with input params */
5645 if (entry_point
->num_params
&& b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
5646 entry_point
= vtn_emit_kernel_entry_point_wrapper(b
, entry_point
);
5648 /* structurize the CFG */
5649 nir_lower_goto_ifs(b
->shader
);
5651 entry_point
->is_entrypoint
= true;
5653 /* When multiple shader stages exist in the same SPIR-V module, we
5654 * generate input and output variables for every stage, in the same
5655 * NIR program. These dead variables can be invalid NIR. For example,
5656 * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
5657 * VS output variables wouldn't be.
5659 * To ensure we have valid NIR, we eliminate any dead inputs and outputs
5660 * right away. In order to do so, we must lower any constant initializers
5661 * on outputs so nir_remove_dead_variables sees that they're written to.
5663 nir_lower_variable_initializers(b
->shader
, nir_var_shader_out
);
5664 nir_remove_dead_variables(b
->shader
,
5665 nir_var_shader_in
| nir_var_shader_out
, NULL
);
5667 /* We sometimes generate bogus derefs that, while never used, give the
5668 * validator a bit of heartburn. Run dead code to get rid of them.
5670 nir_opt_dce(b
->shader
);
5672 /* Unparent the shader from the vtn_builder before we delete the builder */
5673 ralloc_steal(NULL
, b
->shader
);
5675 nir_shader
*shader
= b
->shader
;