2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
41 vtn_log(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
42 size_t spirv_offset
, const char *message
)
44 if (b
->options
->debug
.func
) {
45 b
->options
->debug
.func(b
->options
->debug
.private_data
,
46 level
, spirv_offset
, message
);
50 if (level
>= NIR_SPIRV_DEBUG_LEVEL_WARNING
)
51 fprintf(stderr
, "%s\n", message
);
56 vtn_logf(struct vtn_builder
*b
, enum nir_spirv_debug_level level
,
57 size_t spirv_offset
, const char *fmt
, ...)
63 msg
= ralloc_vasprintf(NULL
, fmt
, args
);
66 vtn_log(b
, level
, spirv_offset
, msg
);
72 vtn_log_err(struct vtn_builder
*b
,
73 enum nir_spirv_debug_level level
, const char *prefix
,
74 const char *file
, unsigned line
,
75 const char *fmt
, va_list args
)
79 msg
= ralloc_strdup(NULL
, prefix
);
82 ralloc_asprintf_append(&msg
, " In file %s:%u\n", file
, line
);
85 ralloc_asprintf_append(&msg
, " ");
87 ralloc_vasprintf_append(&msg
, fmt
, args
);
89 ralloc_asprintf_append(&msg
, "\n %zu bytes into the SPIR-V binary",
93 ralloc_asprintf_append(&msg
,
94 "\n in SPIR-V source file %s, line %d, col %d",
95 b
->file
, b
->line
, b
->col
);
98 vtn_log(b
, level
, b
->spirv_offset
, msg
);
104 vtn_dump_shader(struct vtn_builder
*b
, const char *path
, const char *prefix
)
109 int len
= snprintf(filename
, sizeof(filename
), "%s/%s-%d.spirv",
110 path
, prefix
, idx
++);
111 if (len
< 0 || len
>= sizeof(filename
))
114 FILE *f
= fopen(filename
, "w");
118 fwrite(b
->spirv
, sizeof(*b
->spirv
), b
->spirv_word_count
, f
);
121 vtn_info("SPIR-V shader dumped to %s", filename
);
125 _vtn_warn(struct vtn_builder
*b
, const char *file
, unsigned line
,
126 const char *fmt
, ...)
131 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_WARNING
, "SPIR-V WARNING:\n",
132 file
, line
, fmt
, args
);
137 _vtn_err(struct vtn_builder
*b
, const char *file
, unsigned line
,
138 const char *fmt
, ...)
143 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V ERROR:\n",
144 file
, line
, fmt
, args
);
149 _vtn_fail(struct vtn_builder
*b
, const char *file
, unsigned line
,
150 const char *fmt
, ...)
155 vtn_log_err(b
, NIR_SPIRV_DEBUG_LEVEL_ERROR
, "SPIR-V parsing FAILED:\n",
156 file
, line
, fmt
, args
);
159 const char *dump_path
= getenv("MESA_SPIRV_FAIL_DUMP_PATH");
161 vtn_dump_shader(b
, dump_path
, "fail");
163 longjmp(b
->fail_jump
, 1);
166 static struct vtn_ssa_value
*
167 vtn_undef_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
169 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
170 val
->type
= glsl_get_bare_type(type
);
172 if (glsl_type_is_vector_or_scalar(type
)) {
173 unsigned num_components
= glsl_get_vector_elements(val
->type
);
174 unsigned bit_size
= glsl_get_bit_size(val
->type
);
175 val
->def
= nir_ssa_undef(&b
->nb
, num_components
, bit_size
);
177 unsigned elems
= glsl_get_length(val
->type
);
178 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
179 if (glsl_type_is_array_or_matrix(type
)) {
180 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
181 for (unsigned i
= 0; i
< elems
; i
++)
182 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
184 vtn_assert(glsl_type_is_struct_or_ifc(type
));
185 for (unsigned i
= 0; i
< elems
; i
++) {
186 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
187 val
->elems
[i
] = vtn_undef_ssa_value(b
, elem_type
);
195 static struct vtn_ssa_value
*
196 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
197 const struct glsl_type
*type
)
199 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
204 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
205 val
->type
= glsl_get_bare_type(type
);
207 if (glsl_type_is_vector_or_scalar(type
)) {
208 unsigned num_components
= glsl_get_vector_elements(val
->type
);
209 unsigned bit_size
= glsl_get_bit_size(type
);
210 nir_load_const_instr
*load
=
211 nir_load_const_instr_create(b
->shader
, num_components
, bit_size
);
213 memcpy(load
->value
, constant
->values
,
214 sizeof(nir_const_value
) * num_components
);
216 nir_instr_insert_before_cf_list(&b
->nb
.impl
->body
, &load
->instr
);
217 val
->def
= &load
->def
;
219 unsigned elems
= glsl_get_length(val
->type
);
220 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
221 if (glsl_type_is_array_or_matrix(type
)) {
222 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
223 for (unsigned i
= 0; i
< elems
; i
++) {
224 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
228 vtn_assert(glsl_type_is_struct_or_ifc(type
));
229 for (unsigned i
= 0; i
< elems
; i
++) {
230 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
231 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
240 struct vtn_ssa_value
*
241 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
243 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
244 switch (val
->value_type
) {
245 case vtn_value_type_undef
:
246 return vtn_undef_ssa_value(b
, val
->type
->type
);
248 case vtn_value_type_constant
:
249 return vtn_const_ssa_value(b
, val
->constant
, val
->type
->type
);
251 case vtn_value_type_ssa
:
254 case vtn_value_type_pointer
:
255 vtn_assert(val
->pointer
->ptr_type
&& val
->pointer
->ptr_type
->type
);
256 struct vtn_ssa_value
*ssa
=
257 vtn_create_ssa_value(b
, val
->pointer
->ptr_type
->type
);
258 ssa
->def
= vtn_pointer_to_ssa(b
, val
->pointer
);
262 vtn_fail("Invalid type for an SSA value");
267 vtn_push_ssa_value(struct vtn_builder
*b
, uint32_t value_id
,
268 struct vtn_ssa_value
*ssa
)
270 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
272 /* See vtn_create_ssa_value */
273 vtn_fail_if(ssa
->type
!= glsl_get_bare_type(type
->type
),
274 "Type mismatch for SPIR-V SSA value");
276 struct vtn_value
*val
;
277 if (type
->base_type
== vtn_base_type_pointer
) {
278 val
= vtn_push_pointer(b
, value_id
, vtn_pointer_from_ssa(b
, ssa
->def
, type
));
280 /* Don't trip the value_type_ssa check in vtn_push_value */
281 val
= vtn_push_value(b
, value_id
, vtn_value_type_invalid
);
282 val
->value_type
= vtn_value_type_ssa
;
290 vtn_get_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
)
292 struct vtn_ssa_value
*ssa
= vtn_ssa_value(b
, value_id
);
293 vtn_fail_if(!glsl_type_is_vector_or_scalar(ssa
->type
),
294 "Expected a vector or scalar type");
299 vtn_push_nir_ssa(struct vtn_builder
*b
, uint32_t value_id
, nir_ssa_def
*def
)
301 /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
302 * type will be valid by the time we get here.
304 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
305 vtn_fail_if(def
->num_components
!= glsl_get_vector_elements(type
->type
) ||
306 def
->bit_size
!= glsl_get_bit_size(type
->type
),
307 "Mismatch between NIR and SPIR-V type.");
308 struct vtn_ssa_value
*ssa
= vtn_create_ssa_value(b
, type
->type
);
310 return vtn_push_ssa_value(b
, value_id
, ssa
);
313 static nir_deref_instr
*
314 vtn_get_image(struct vtn_builder
*b
, uint32_t value_id
)
316 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
317 vtn_assert(type
->base_type
== vtn_base_type_image
);
318 return nir_build_deref_cast(&b
->nb
, vtn_get_nir_ssa(b
, value_id
),
319 nir_var_uniform
, type
->glsl_image
, 0);
323 vtn_push_image(struct vtn_builder
*b
, uint32_t value_id
,
324 nir_deref_instr
*deref
)
326 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
327 vtn_assert(type
->base_type
== vtn_base_type_image
);
328 vtn_push_nir_ssa(b
, value_id
, &deref
->dest
.ssa
);
331 static nir_deref_instr
*
332 vtn_get_sampler(struct vtn_builder
*b
, uint32_t value_id
)
334 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
335 vtn_assert(type
->base_type
== vtn_base_type_sampler
);
336 return nir_build_deref_cast(&b
->nb
, vtn_get_nir_ssa(b
, value_id
),
337 nir_var_uniform
, glsl_bare_sampler_type(), 0);
341 vtn_sampled_image_to_nir_ssa(struct vtn_builder
*b
,
342 struct vtn_sampled_image si
)
344 return nir_vec2(&b
->nb
, &si
.image
->dest
.ssa
, &si
.sampler
->dest
.ssa
);
348 vtn_push_sampled_image(struct vtn_builder
*b
, uint32_t value_id
,
349 struct vtn_sampled_image si
)
351 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
352 vtn_assert(type
->base_type
== vtn_base_type_sampled_image
);
353 vtn_push_nir_ssa(b
, value_id
, vtn_sampled_image_to_nir_ssa(b
, si
));
356 static struct vtn_sampled_image
357 vtn_get_sampled_image(struct vtn_builder
*b
, uint32_t value_id
)
359 struct vtn_type
*type
= vtn_get_value_type(b
, value_id
);
360 vtn_assert(type
->base_type
== vtn_base_type_sampled_image
);
361 nir_ssa_def
*si_vec2
= vtn_get_nir_ssa(b
, value_id
);
363 struct vtn_sampled_image si
= { NULL
, };
364 si
.image
= nir_build_deref_cast(&b
->nb
, nir_channel(&b
->nb
, si_vec2
, 0),
366 type
->image
->glsl_image
, 0);
367 si
.sampler
= nir_build_deref_cast(&b
->nb
, nir_channel(&b
->nb
, si_vec2
, 1),
369 glsl_bare_sampler_type(), 0);
374 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
375 unsigned word_count
, unsigned *words_used
)
377 char *dup
= ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
379 /* Ammount of space taken by the string (including the null) */
380 unsigned len
= strlen(dup
) + 1;
381 *words_used
= DIV_ROUND_UP(len
, sizeof(*words
));
387 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
388 const uint32_t *end
, vtn_instruction_handler handler
)
394 const uint32_t *w
= start
;
396 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
397 unsigned count
= w
[0] >> SpvWordCountShift
;
398 vtn_assert(count
>= 1 && w
+ count
<= end
);
400 b
->spirv_offset
= (uint8_t *)w
- (uint8_t *)b
->spirv
;
404 break; /* Do nothing */
407 b
->file
= vtn_value(b
, w
[1], vtn_value_type_string
)->str
;
419 if (!handler(b
, opcode
, w
, count
))
437 vtn_handle_non_semantic_instruction(struct vtn_builder
*b
, SpvOp ext_opcode
,
438 const uint32_t *w
, unsigned count
)
445 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
446 const uint32_t *w
, unsigned count
)
448 const char *ext
= (const char *)&w
[2];
450 case SpvOpExtInstImport
: {
451 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
452 if (strcmp(ext
, "GLSL.std.450") == 0) {
453 val
->ext_handler
= vtn_handle_glsl450_instruction
;
454 } else if ((strcmp(ext
, "SPV_AMD_gcn_shader") == 0)
455 && (b
->options
&& b
->options
->caps
.amd_gcn_shader
)) {
456 val
->ext_handler
= vtn_handle_amd_gcn_shader_instruction
;
457 } else if ((strcmp(ext
, "SPV_AMD_shader_ballot") == 0)
458 && (b
->options
&& b
->options
->caps
.amd_shader_ballot
)) {
459 val
->ext_handler
= vtn_handle_amd_shader_ballot_instruction
;
460 } else if ((strcmp(ext
, "SPV_AMD_shader_trinary_minmax") == 0)
461 && (b
->options
&& b
->options
->caps
.amd_trinary_minmax
)) {
462 val
->ext_handler
= vtn_handle_amd_shader_trinary_minmax_instruction
;
463 } else if ((strcmp(ext
, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
464 && (b
->options
&& b
->options
->caps
.amd_shader_explicit_vertex_parameter
)) {
465 val
->ext_handler
= vtn_handle_amd_shader_explicit_vertex_parameter_instruction
;
466 } else if (strcmp(ext
, "OpenCL.std") == 0) {
467 val
->ext_handler
= vtn_handle_opencl_instruction
;
468 } else if (strstr(ext
, "NonSemantic.") == ext
) {
469 val
->ext_handler
= vtn_handle_non_semantic_instruction
;
471 vtn_fail("Unsupported extension: %s", ext
);
477 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
478 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
484 vtn_fail_with_opcode("Unhandled opcode", opcode
);
489 _foreach_decoration_helper(struct vtn_builder
*b
,
490 struct vtn_value
*base_value
,
492 struct vtn_value
*value
,
493 vtn_decoration_foreach_cb cb
, void *data
)
495 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
497 if (dec
->scope
== VTN_DEC_DECORATION
) {
498 member
= parent_member
;
499 } else if (dec
->scope
>= VTN_DEC_STRUCT_MEMBER0
) {
500 vtn_fail_if(value
->value_type
!= vtn_value_type_type
||
501 value
->type
->base_type
!= vtn_base_type_struct
,
502 "OpMemberDecorate and OpGroupMemberDecorate are only "
503 "allowed on OpTypeStruct");
504 /* This means we haven't recursed yet */
505 assert(value
== base_value
);
507 member
= dec
->scope
- VTN_DEC_STRUCT_MEMBER0
;
509 vtn_fail_if(member
>= base_value
->type
->length
,
510 "OpMemberDecorate specifies member %d but the "
511 "OpTypeStruct has only %u members",
512 member
, base_value
->type
->length
);
514 /* Not a decoration */
515 assert(dec
->scope
== VTN_DEC_EXECUTION_MODE
);
520 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
521 _foreach_decoration_helper(b
, base_value
, member
, dec
->group
,
524 cb(b
, base_value
, member
, dec
, data
);
529 /** Iterates (recursively if needed) over all of the decorations on a value
531 * This function iterates over all of the decorations applied to a given
532 * value. If it encounters a decoration group, it recurses into the group
533 * and iterates over all of those decorations as well.
536 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
537 vtn_decoration_foreach_cb cb
, void *data
)
539 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
543 vtn_foreach_execution_mode(struct vtn_builder
*b
, struct vtn_value
*value
,
544 vtn_execution_mode_foreach_cb cb
, void *data
)
546 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
547 if (dec
->scope
!= VTN_DEC_EXECUTION_MODE
)
550 assert(dec
->group
== NULL
);
551 cb(b
, value
, dec
, data
);
556 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
557 const uint32_t *w
, unsigned count
)
559 const uint32_t *w_end
= w
+ count
;
560 const uint32_t target
= w
[1];
564 case SpvOpDecorationGroup
:
565 vtn_push_value(b
, target
, vtn_value_type_decoration_group
);
569 case SpvOpDecorateId
:
570 case SpvOpMemberDecorate
:
571 case SpvOpDecorateString
:
572 case SpvOpMemberDecorateString
:
573 case SpvOpExecutionMode
:
574 case SpvOpExecutionModeId
: {
575 struct vtn_value
*val
= vtn_untyped_value(b
, target
);
577 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
580 case SpvOpDecorateId
:
581 case SpvOpDecorateString
:
582 dec
->scope
= VTN_DEC_DECORATION
;
584 case SpvOpMemberDecorate
:
585 case SpvOpMemberDecorateString
:
586 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(w
++);
587 vtn_fail_if(dec
->scope
< VTN_DEC_STRUCT_MEMBER0
, /* overflow */
588 "Member argument of OpMemberDecorate too large");
590 case SpvOpExecutionMode
:
591 case SpvOpExecutionModeId
:
592 dec
->scope
= VTN_DEC_EXECUTION_MODE
;
595 unreachable("Invalid decoration opcode");
597 dec
->decoration
= *(w
++);
600 /* Link into the list */
601 dec
->next
= val
->decoration
;
602 val
->decoration
= dec
;
606 case SpvOpGroupMemberDecorate
:
607 case SpvOpGroupDecorate
: {
608 struct vtn_value
*group
=
609 vtn_value(b
, target
, vtn_value_type_decoration_group
);
611 for (; w
< w_end
; w
++) {
612 struct vtn_value
*val
= vtn_untyped_value(b
, *w
);
613 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
616 if (opcode
== SpvOpGroupDecorate
) {
617 dec
->scope
= VTN_DEC_DECORATION
;
619 dec
->scope
= VTN_DEC_STRUCT_MEMBER0
+ *(++w
);
620 vtn_fail_if(dec
->scope
< 0, /* Check for overflow */
621 "Member argument of OpGroupMemberDecorate too large");
624 /* Link into the list */
625 dec
->next
= val
->decoration
;
626 val
->decoration
= dec
;
632 unreachable("Unhandled opcode");
636 struct member_decoration_ctx
{
638 struct glsl_struct_field
*fields
;
639 struct vtn_type
*type
;
643 * Returns true if the given type contains a struct decorated Block or
647 vtn_type_contains_block(struct vtn_builder
*b
, struct vtn_type
*type
)
649 switch (type
->base_type
) {
650 case vtn_base_type_array
:
651 return vtn_type_contains_block(b
, type
->array_element
);
652 case vtn_base_type_struct
:
653 if (type
->block
|| type
->buffer_block
)
655 for (unsigned i
= 0; i
< type
->length
; i
++) {
656 if (vtn_type_contains_block(b
, type
->members
[i
]))
665 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
666 * OpStore, or OpCopyMemory between them without breaking anything.
667 * Technically, the SPIR-V rules require the exact same type ID but this lets
668 * us internally be a bit looser.
671 vtn_types_compatible(struct vtn_builder
*b
,
672 struct vtn_type
*t1
, struct vtn_type
*t2
)
674 if (t1
->id
== t2
->id
)
677 if (t1
->base_type
!= t2
->base_type
)
680 switch (t1
->base_type
) {
681 case vtn_base_type_void
:
682 case vtn_base_type_scalar
:
683 case vtn_base_type_vector
:
684 case vtn_base_type_matrix
:
685 case vtn_base_type_image
:
686 case vtn_base_type_sampler
:
687 case vtn_base_type_sampled_image
:
688 return t1
->type
== t2
->type
;
690 case vtn_base_type_array
:
691 return t1
->length
== t2
->length
&&
692 vtn_types_compatible(b
, t1
->array_element
, t2
->array_element
);
694 case vtn_base_type_pointer
:
695 return vtn_types_compatible(b
, t1
->deref
, t2
->deref
);
697 case vtn_base_type_struct
:
698 if (t1
->length
!= t2
->length
)
701 for (unsigned i
= 0; i
< t1
->length
; i
++) {
702 if (!vtn_types_compatible(b
, t1
->members
[i
], t2
->members
[i
]))
707 case vtn_base_type_function
:
708 /* This case shouldn't get hit since you can't copy around function
709 * types. Just require them to be identical.
714 vtn_fail("Invalid base type");
718 vtn_type_without_array(struct vtn_type
*type
)
720 while (type
->base_type
== vtn_base_type_array
)
721 type
= type
->array_element
;
725 /* does a shallow copy of a vtn_type */
727 static struct vtn_type
*
728 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
730 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
733 switch (src
->base_type
) {
734 case vtn_base_type_void
:
735 case vtn_base_type_scalar
:
736 case vtn_base_type_vector
:
737 case vtn_base_type_matrix
:
738 case vtn_base_type_array
:
739 case vtn_base_type_pointer
:
740 case vtn_base_type_image
:
741 case vtn_base_type_sampler
:
742 case vtn_base_type_sampled_image
:
743 /* Nothing more to do */
746 case vtn_base_type_struct
:
747 dest
->members
= ralloc_array(b
, struct vtn_type
*, src
->length
);
748 memcpy(dest
->members
, src
->members
,
749 src
->length
* sizeof(src
->members
[0]));
751 dest
->offsets
= ralloc_array(b
, unsigned, src
->length
);
752 memcpy(dest
->offsets
, src
->offsets
,
753 src
->length
* sizeof(src
->offsets
[0]));
756 case vtn_base_type_function
:
757 dest
->params
= ralloc_array(b
, struct vtn_type
*, src
->length
);
758 memcpy(dest
->params
, src
->params
, src
->length
* sizeof(src
->params
[0]));
765 static const struct glsl_type
*
766 wrap_type_in_array(const struct glsl_type
*type
,
767 const struct glsl_type
*array_type
)
769 if (!glsl_type_is_array(array_type
))
772 const struct glsl_type
*elem_type
=
773 wrap_type_in_array(type
, glsl_get_array_element(array_type
));
774 return glsl_array_type(elem_type
, glsl_get_length(array_type
),
775 glsl_get_explicit_stride(array_type
));
779 vtn_type_needs_explicit_layout(struct vtn_builder
*b
, enum vtn_variable_mode mode
)
781 /* For OpenCL we never want to strip the info from the types, and it makes
782 * type comparisons easier in later stages.
784 if (b
->options
->environment
== NIR_SPIRV_OPENCL
)
788 case vtn_variable_mode_input
:
789 case vtn_variable_mode_output
:
790 /* Layout decorations kept because we need offsets for XFB arrays of
793 return b
->shader
->info
.has_transform_feedback_varyings
;
795 case vtn_variable_mode_ssbo
:
796 case vtn_variable_mode_phys_ssbo
:
797 case vtn_variable_mode_ubo
:
805 const struct glsl_type
*
806 vtn_type_get_nir_type(struct vtn_builder
*b
, struct vtn_type
*type
,
807 enum vtn_variable_mode mode
)
809 if (mode
== vtn_variable_mode_atomic_counter
) {
810 vtn_fail_if(glsl_without_array(type
->type
) != glsl_uint_type(),
811 "Variables in the AtomicCounter storage class should be "
812 "(possibly arrays of arrays of) uint.");
813 return wrap_type_in_array(glsl_atomic_uint_type(), type
->type
);
816 if (mode
== vtn_variable_mode_uniform
) {
817 switch (type
->base_type
) {
818 case vtn_base_type_array
: {
819 const struct glsl_type
*elem_type
=
820 vtn_type_get_nir_type(b
, type
->array_element
, mode
);
822 return glsl_array_type(elem_type
, type
->length
,
823 glsl_get_explicit_stride(type
->type
));
826 case vtn_base_type_struct
: {
827 bool need_new_struct
= false;
828 const uint32_t num_fields
= type
->length
;
829 NIR_VLA(struct glsl_struct_field
, fields
, num_fields
);
830 for (unsigned i
= 0; i
< num_fields
; i
++) {
831 fields
[i
] = *glsl_get_struct_field_data(type
->type
, i
);
832 const struct glsl_type
*field_nir_type
=
833 vtn_type_get_nir_type(b
, type
->members
[i
], mode
);
834 if (fields
[i
].type
!= field_nir_type
) {
835 fields
[i
].type
= field_nir_type
;
836 need_new_struct
= true;
839 if (need_new_struct
) {
840 if (glsl_type_is_interface(type
->type
)) {
841 return glsl_interface_type(fields
, num_fields
,
842 /* packing */ 0, false,
843 glsl_get_type_name(type
->type
));
845 return glsl_struct_type(fields
, num_fields
,
846 glsl_get_type_name(type
->type
),
847 glsl_struct_type_is_packed(type
->type
));
850 /* No changes, just pass it on */
855 case vtn_base_type_image
:
856 return type
->glsl_image
;
858 case vtn_base_type_sampler
:
859 return glsl_bare_sampler_type();
861 case vtn_base_type_sampled_image
:
862 return type
->image
->glsl_image
;
869 /* Layout decorations are allowed but ignored in certain conditions,
870 * to allow SPIR-V generators perform type deduplication. Discard
871 * unnecessary ones when passing to NIR.
873 if (!vtn_type_needs_explicit_layout(b
, mode
))
874 return glsl_get_bare_type(type
->type
);
879 static struct vtn_type
*
880 mutable_matrix_member(struct vtn_builder
*b
, struct vtn_type
*type
, int member
)
882 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
883 type
= type
->members
[member
];
885 /* We may have an array of matrices.... Oh, joy! */
886 while (glsl_type_is_array(type
->type
)) {
887 type
->array_element
= vtn_type_copy(b
, type
->array_element
);
888 type
= type
->array_element
;
891 vtn_assert(glsl_type_is_matrix(type
->type
));
897 vtn_handle_access_qualifier(struct vtn_builder
*b
, struct vtn_type
*type
,
898 int member
, enum gl_access_qualifier access
)
900 type
->members
[member
] = vtn_type_copy(b
, type
->members
[member
]);
901 type
= type
->members
[member
];
903 type
->access
|= access
;
907 array_stride_decoration_cb(struct vtn_builder
*b
,
908 struct vtn_value
*val
, int member
,
909 const struct vtn_decoration
*dec
, void *void_ctx
)
911 struct vtn_type
*type
= val
->type
;
913 if (dec
->decoration
== SpvDecorationArrayStride
) {
914 if (vtn_type_contains_block(b
, type
)) {
915 vtn_warn("The ArrayStride decoration cannot be applied to an array "
916 "type which contains a structure type decorated Block "
918 /* Ignore the decoration */
920 vtn_fail_if(dec
->operands
[0] == 0, "ArrayStride must be non-zero");
921 type
->stride
= dec
->operands
[0];
927 struct_member_decoration_cb(struct vtn_builder
*b
,
928 UNUSED
struct vtn_value
*val
, int member
,
929 const struct vtn_decoration
*dec
, void *void_ctx
)
931 struct member_decoration_ctx
*ctx
= void_ctx
;
936 assert(member
< ctx
->num_fields
);
938 switch (dec
->decoration
) {
939 case SpvDecorationRelaxedPrecision
:
940 case SpvDecorationUniform
:
941 case SpvDecorationUniformId
:
942 break; /* FIXME: Do nothing with this for now. */
943 case SpvDecorationNonWritable
:
944 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_WRITEABLE
);
946 case SpvDecorationNonReadable
:
947 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_NON_READABLE
);
949 case SpvDecorationVolatile
:
950 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_VOLATILE
);
952 case SpvDecorationCoherent
:
953 vtn_handle_access_qualifier(b
, ctx
->type
, member
, ACCESS_COHERENT
);
955 case SpvDecorationNoPerspective
:
956 ctx
->fields
[member
].interpolation
= INTERP_MODE_NOPERSPECTIVE
;
958 case SpvDecorationFlat
:
959 ctx
->fields
[member
].interpolation
= INTERP_MODE_FLAT
;
961 case SpvDecorationExplicitInterpAMD
:
962 ctx
->fields
[member
].interpolation
= INTERP_MODE_EXPLICIT
;
964 case SpvDecorationCentroid
:
965 ctx
->fields
[member
].centroid
= true;
967 case SpvDecorationSample
:
968 ctx
->fields
[member
].sample
= true;
970 case SpvDecorationStream
:
971 /* This is handled later by var_decoration_cb in vtn_variables.c */
973 case SpvDecorationLocation
:
974 ctx
->fields
[member
].location
= dec
->operands
[0];
976 case SpvDecorationComponent
:
977 break; /* FIXME: What should we do with these? */
978 case SpvDecorationBuiltIn
:
979 ctx
->type
->members
[member
] = vtn_type_copy(b
, ctx
->type
->members
[member
]);
980 ctx
->type
->members
[member
]->is_builtin
= true;
981 ctx
->type
->members
[member
]->builtin
= dec
->operands
[0];
982 ctx
->type
->builtin_block
= true;
984 case SpvDecorationOffset
:
985 ctx
->type
->offsets
[member
] = dec
->operands
[0];
986 ctx
->fields
[member
].offset
= dec
->operands
[0];
988 case SpvDecorationMatrixStride
:
989 /* Handled as a second pass */
991 case SpvDecorationColMajor
:
992 break; /* Nothing to do here. Column-major is the default. */
993 case SpvDecorationRowMajor
:
994 mutable_matrix_member(b
, ctx
->type
, member
)->row_major
= true;
997 case SpvDecorationPatch
:
1000 case SpvDecorationSpecId
:
1001 case SpvDecorationBlock
:
1002 case SpvDecorationBufferBlock
:
1003 case SpvDecorationArrayStride
:
1004 case SpvDecorationGLSLShared
:
1005 case SpvDecorationGLSLPacked
:
1006 case SpvDecorationInvariant
:
1007 case SpvDecorationRestrict
:
1008 case SpvDecorationAliased
:
1009 case SpvDecorationConstant
:
1010 case SpvDecorationIndex
:
1011 case SpvDecorationBinding
:
1012 case SpvDecorationDescriptorSet
:
1013 case SpvDecorationLinkageAttributes
:
1014 case SpvDecorationNoContraction
:
1015 case SpvDecorationInputAttachmentIndex
:
1016 vtn_warn("Decoration not allowed on struct members: %s",
1017 spirv_decoration_to_string(dec
->decoration
));
1020 case SpvDecorationXfbBuffer
:
1021 case SpvDecorationXfbStride
:
1022 /* This is handled later by var_decoration_cb in vtn_variables.c */
1025 case SpvDecorationCPacked
:
1026 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
1027 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1028 spirv_decoration_to_string(dec
->decoration
));
1030 ctx
->type
->packed
= true;
1033 case SpvDecorationSaturatedConversion
:
1034 case SpvDecorationFuncParamAttr
:
1035 case SpvDecorationFPRoundingMode
:
1036 case SpvDecorationFPFastMathMode
:
1037 case SpvDecorationAlignment
:
1038 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
) {
1039 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1040 spirv_decoration_to_string(dec
->decoration
));
1044 case SpvDecorationUserSemantic
:
1045 case SpvDecorationUserTypeGOOGLE
:
1046 /* User semantic decorations can safely be ignored by the driver. */
1050 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1054 /** Chases the array type all the way down to the tail and rewrites the
1055 * glsl_types to be based off the tail's glsl_type.
1058 vtn_array_type_rewrite_glsl_type(struct vtn_type
*type
)
1060 if (type
->base_type
!= vtn_base_type_array
)
1063 vtn_array_type_rewrite_glsl_type(type
->array_element
);
1065 type
->type
= glsl_array_type(type
->array_element
->type
,
1066 type
->length
, type
->stride
);
1069 /* Matrix strides are handled as a separate pass because we need to know
1070 * whether the matrix is row-major or not first.
1073 struct_member_matrix_stride_cb(struct vtn_builder
*b
,
1074 UNUSED
struct vtn_value
*val
, int member
,
1075 const struct vtn_decoration
*dec
,
1078 if (dec
->decoration
!= SpvDecorationMatrixStride
)
1081 vtn_fail_if(member
< 0,
1082 "The MatrixStride decoration is only allowed on members "
1084 vtn_fail_if(dec
->operands
[0] == 0, "MatrixStride must be non-zero");
1086 struct member_decoration_ctx
*ctx
= void_ctx
;
1088 struct vtn_type
*mat_type
= mutable_matrix_member(b
, ctx
->type
, member
);
1089 if (mat_type
->row_major
) {
1090 mat_type
->array_element
= vtn_type_copy(b
, mat_type
->array_element
);
1091 mat_type
->stride
= mat_type
->array_element
->stride
;
1092 mat_type
->array_element
->stride
= dec
->operands
[0];
1094 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
1095 dec
->operands
[0], true);
1096 mat_type
->array_element
->type
= glsl_get_column_type(mat_type
->type
);
1098 vtn_assert(mat_type
->array_element
->stride
> 0);
1099 mat_type
->stride
= dec
->operands
[0];
1101 mat_type
->type
= glsl_explicit_matrix_type(mat_type
->type
,
1102 dec
->operands
[0], false);
1105 /* Now that we've replaced the glsl_type with a properly strided matrix
1106 * type, rewrite the member type so that it's an array of the proper kind
1109 vtn_array_type_rewrite_glsl_type(ctx
->type
->members
[member
]);
1110 ctx
->fields
[member
].type
= ctx
->type
->members
[member
]->type
;
1114 struct_block_decoration_cb(struct vtn_builder
*b
,
1115 struct vtn_value
*val
, int member
,
1116 const struct vtn_decoration
*dec
, void *ctx
)
1121 struct vtn_type
*type
= val
->type
;
1122 if (dec
->decoration
== SpvDecorationBlock
)
1124 else if (dec
->decoration
== SpvDecorationBufferBlock
)
1125 type
->buffer_block
= true;
1129 type_decoration_cb(struct vtn_builder
*b
,
1130 struct vtn_value
*val
, int member
,
1131 const struct vtn_decoration
*dec
, UNUSED
void *ctx
)
1133 struct vtn_type
*type
= val
->type
;
1136 /* This should have been handled by OpTypeStruct */
1137 assert(val
->type
->base_type
== vtn_base_type_struct
);
1138 assert(member
>= 0 && member
< val
->type
->length
);
1142 switch (dec
->decoration
) {
1143 case SpvDecorationArrayStride
:
1144 vtn_assert(type
->base_type
== vtn_base_type_array
||
1145 type
->base_type
== vtn_base_type_pointer
);
1147 case SpvDecorationBlock
:
1148 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1149 vtn_assert(type
->block
);
1151 case SpvDecorationBufferBlock
:
1152 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1153 vtn_assert(type
->buffer_block
);
1155 case SpvDecorationGLSLShared
:
1156 case SpvDecorationGLSLPacked
:
1157 /* Ignore these, since we get explicit offsets anyways */
1160 case SpvDecorationRowMajor
:
1161 case SpvDecorationColMajor
:
1162 case SpvDecorationMatrixStride
:
1163 case SpvDecorationBuiltIn
:
1164 case SpvDecorationNoPerspective
:
1165 case SpvDecorationFlat
:
1166 case SpvDecorationPatch
:
1167 case SpvDecorationCentroid
:
1168 case SpvDecorationSample
:
1169 case SpvDecorationExplicitInterpAMD
:
1170 case SpvDecorationVolatile
:
1171 case SpvDecorationCoherent
:
1172 case SpvDecorationNonWritable
:
1173 case SpvDecorationNonReadable
:
1174 case SpvDecorationUniform
:
1175 case SpvDecorationUniformId
:
1176 case SpvDecorationLocation
:
1177 case SpvDecorationComponent
:
1178 case SpvDecorationOffset
:
1179 case SpvDecorationXfbBuffer
:
1180 case SpvDecorationXfbStride
:
1181 case SpvDecorationUserSemantic
:
1182 vtn_warn("Decoration only allowed for struct members: %s",
1183 spirv_decoration_to_string(dec
->decoration
));
1186 case SpvDecorationStream
:
1187 /* We don't need to do anything here, as stream is filled up when
1188 * aplying the decoration to a variable, just check that if it is not a
1189 * struct member, it should be a struct.
1191 vtn_assert(type
->base_type
== vtn_base_type_struct
);
1194 case SpvDecorationRelaxedPrecision
:
1195 case SpvDecorationSpecId
:
1196 case SpvDecorationInvariant
:
1197 case SpvDecorationRestrict
:
1198 case SpvDecorationAliased
:
1199 case SpvDecorationConstant
:
1200 case SpvDecorationIndex
:
1201 case SpvDecorationBinding
:
1202 case SpvDecorationDescriptorSet
:
1203 case SpvDecorationLinkageAttributes
:
1204 case SpvDecorationNoContraction
:
1205 case SpvDecorationInputAttachmentIndex
:
1206 vtn_warn("Decoration not allowed on types: %s",
1207 spirv_decoration_to_string(dec
->decoration
));
1210 case SpvDecorationCPacked
:
1211 if (b
->shader
->info
.stage
!= MESA_SHADER_KERNEL
)
1212 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1213 spirv_decoration_to_string(dec
->decoration
));
1215 type
->packed
= true;
1218 case SpvDecorationSaturatedConversion
:
1219 case SpvDecorationFuncParamAttr
:
1220 case SpvDecorationFPRoundingMode
:
1221 case SpvDecorationFPFastMathMode
:
1222 case SpvDecorationAlignment
:
1223 vtn_warn("Decoration only allowed for CL-style kernels: %s",
1224 spirv_decoration_to_string(dec
->decoration
));
1227 case SpvDecorationUserTypeGOOGLE
:
1228 /* User semantic decorations can safely be ignored by the driver. */
1232 vtn_fail_with_decoration("Unhandled decoration", dec
->decoration
);
1237 translate_image_format(struct vtn_builder
*b
, SpvImageFormat format
)
1240 case SpvImageFormatUnknown
: return PIPE_FORMAT_NONE
;
1241 case SpvImageFormatRgba32f
: return PIPE_FORMAT_R32G32B32A32_FLOAT
;
1242 case SpvImageFormatRgba16f
: return PIPE_FORMAT_R16G16B16A16_FLOAT
;
1243 case SpvImageFormatR32f
: return PIPE_FORMAT_R32_FLOAT
;
1244 case SpvImageFormatRgba8
: return PIPE_FORMAT_R8G8B8A8_UNORM
;
1245 case SpvImageFormatRgba8Snorm
: return PIPE_FORMAT_R8G8B8A8_SNORM
;
1246 case SpvImageFormatRg32f
: return PIPE_FORMAT_R32G32_FLOAT
;
1247 case SpvImageFormatRg16f
: return PIPE_FORMAT_R16G16_FLOAT
;
1248 case SpvImageFormatR11fG11fB10f
: return PIPE_FORMAT_R11G11B10_FLOAT
;
1249 case SpvImageFormatR16f
: return PIPE_FORMAT_R16_FLOAT
;
1250 case SpvImageFormatRgba16
: return PIPE_FORMAT_R16G16B16A16_UNORM
;
1251 case SpvImageFormatRgb10A2
: return PIPE_FORMAT_R10G10B10A2_UNORM
;
1252 case SpvImageFormatRg16
: return PIPE_FORMAT_R16G16_UNORM
;
1253 case SpvImageFormatRg8
: return PIPE_FORMAT_R8G8_UNORM
;
1254 case SpvImageFormatR16
: return PIPE_FORMAT_R16_UNORM
;
1255 case SpvImageFormatR8
: return PIPE_FORMAT_R8_UNORM
;
1256 case SpvImageFormatRgba16Snorm
: return PIPE_FORMAT_R16G16B16A16_SNORM
;
1257 case SpvImageFormatRg16Snorm
: return PIPE_FORMAT_R16G16_SNORM
;
1258 case SpvImageFormatRg8Snorm
: return PIPE_FORMAT_R8G8_SNORM
;
1259 case SpvImageFormatR16Snorm
: return PIPE_FORMAT_R16_SNORM
;
1260 case SpvImageFormatR8Snorm
: return PIPE_FORMAT_R8_SNORM
;
1261 case SpvImageFormatRgba32i
: return PIPE_FORMAT_R32G32B32A32_SINT
;
1262 case SpvImageFormatRgba16i
: return PIPE_FORMAT_R16G16B16A16_SINT
;
1263 case SpvImageFormatRgba8i
: return PIPE_FORMAT_R8G8B8A8_SINT
;
1264 case SpvImageFormatR32i
: return PIPE_FORMAT_R32_SINT
;
1265 case SpvImageFormatRg32i
: return PIPE_FORMAT_R32G32_SINT
;
1266 case SpvImageFormatRg16i
: return PIPE_FORMAT_R16G16_SINT
;
1267 case SpvImageFormatRg8i
: return PIPE_FORMAT_R8G8_SINT
;
1268 case SpvImageFormatR16i
: return PIPE_FORMAT_R16_SINT
;
1269 case SpvImageFormatR8i
: return PIPE_FORMAT_R8_SINT
;
1270 case SpvImageFormatRgba32ui
: return PIPE_FORMAT_R32G32B32A32_UINT
;
1271 case SpvImageFormatRgba16ui
: return PIPE_FORMAT_R16G16B16A16_UINT
;
1272 case SpvImageFormatRgba8ui
: return PIPE_FORMAT_R8G8B8A8_UINT
;
1273 case SpvImageFormatR32ui
: return PIPE_FORMAT_R32_UINT
;
1274 case SpvImageFormatRgb10a2ui
: return PIPE_FORMAT_R10G10B10A2_UINT
;
1275 case SpvImageFormatRg32ui
: return PIPE_FORMAT_R32G32_UINT
;
1276 case SpvImageFormatRg16ui
: return PIPE_FORMAT_R16G16_UINT
;
1277 case SpvImageFormatRg8ui
: return PIPE_FORMAT_R8G8_UINT
;
1278 case SpvImageFormatR16ui
: return PIPE_FORMAT_R16_UINT
;
1279 case SpvImageFormatR8ui
: return PIPE_FORMAT_R8_UINT
;
1281 vtn_fail("Invalid image format: %s (%u)",
1282 spirv_imageformat_to_string(format
), format
);
1287 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
1288 const uint32_t *w
, unsigned count
)
1290 struct vtn_value
*val
= NULL
;
1292 /* In order to properly handle forward declarations, we have to defer
1293 * allocation for pointer types.
1295 if (opcode
!= SpvOpTypePointer
&& opcode
!= SpvOpTypeForwardPointer
) {
1296 val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
1297 vtn_fail_if(val
->type
!= NULL
,
1298 "Only pointers can have forward declarations");
1299 val
->type
= rzalloc(b
, struct vtn_type
);
1300 val
->type
->id
= w
[1];
1305 val
->type
->base_type
= vtn_base_type_void
;
1306 val
->type
->type
= glsl_void_type();
1309 val
->type
->base_type
= vtn_base_type_scalar
;
1310 val
->type
->type
= glsl_bool_type();
1311 val
->type
->length
= 1;
1313 case SpvOpTypeInt
: {
1314 int bit_size
= w
[2];
1315 const bool signedness
= w
[3];
1316 val
->type
->base_type
= vtn_base_type_scalar
;
1319 val
->type
->type
= (signedness
? glsl_int64_t_type() : glsl_uint64_t_type());
1322 val
->type
->type
= (signedness
? glsl_int_type() : glsl_uint_type());
1325 val
->type
->type
= (signedness
? glsl_int16_t_type() : glsl_uint16_t_type());
1328 val
->type
->type
= (signedness
? glsl_int8_t_type() : glsl_uint8_t_type());
1331 vtn_fail("Invalid int bit size: %u", bit_size
);
1333 val
->type
->length
= 1;
1337 case SpvOpTypeFloat
: {
1338 int bit_size
= w
[2];
1339 val
->type
->base_type
= vtn_base_type_scalar
;
1342 val
->type
->type
= glsl_float16_t_type();
1345 val
->type
->type
= glsl_float_type();
1348 val
->type
->type
= glsl_double_type();
1351 vtn_fail("Invalid float bit size: %u", bit_size
);
1353 val
->type
->length
= 1;
1357 case SpvOpTypeVector
: {
1358 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1359 unsigned elems
= w
[3];
1361 vtn_fail_if(base
->base_type
!= vtn_base_type_scalar
,
1362 "Base type for OpTypeVector must be a scalar");
1363 vtn_fail_if((elems
< 2 || elems
> 4) && (elems
!= 8) && (elems
!= 16),
1364 "Invalid component count for OpTypeVector");
1366 val
->type
->base_type
= vtn_base_type_vector
;
1367 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
->type
), elems
);
1368 val
->type
->length
= elems
;
1369 val
->type
->stride
= glsl_type_is_boolean(val
->type
->type
)
1370 ? 4 : glsl_get_bit_size(base
->type
) / 8;
1371 val
->type
->array_element
= base
;
1375 case SpvOpTypeMatrix
: {
1376 struct vtn_type
*base
= vtn_get_type(b
, w
[2]);
1377 unsigned columns
= w
[3];
1379 vtn_fail_if(base
->base_type
!= vtn_base_type_vector
,
1380 "Base type for OpTypeMatrix must be a vector");
1381 vtn_fail_if(columns
< 2 || columns
> 4,
1382 "Invalid column count for OpTypeMatrix");
1384 val
->type
->base_type
= vtn_base_type_matrix
;
1385 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
1386 glsl_get_vector_elements(base
->type
),
1388 vtn_fail_if(glsl_type_is_error(val
->type
->type
),
1389 "Unsupported base type for OpTypeMatrix");
1390 assert(!glsl_type_is_error(val
->type
->type
));
1391 val
->type
->length
= columns
;
1392 val
->type
->array_element
= base
;
1393 val
->type
->row_major
= false;
1394 val
->type
->stride
= 0;
1398 case SpvOpTypeRuntimeArray
:
1399 case SpvOpTypeArray
: {
1400 struct vtn_type
*array_element
= vtn_get_type(b
, w
[2]);
1402 if (opcode
== SpvOpTypeRuntimeArray
) {
1403 /* A length of 0 is used to denote unsized arrays */
1404 val
->type
->length
= 0;
1406 val
->type
->length
= vtn_constant_uint(b
, w
[3]);
1409 val
->type
->base_type
= vtn_base_type_array
;
1410 val
->type
->array_element
= array_element
;
1411 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
1412 val
->type
->stride
= glsl_get_cl_size(array_element
->type
);
1414 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1415 val
->type
->type
= glsl_array_type(array_element
->type
, val
->type
->length
,
1420 case SpvOpTypeStruct
: {
1421 unsigned num_fields
= count
- 2;
1422 val
->type
->base_type
= vtn_base_type_struct
;
1423 val
->type
->length
= num_fields
;
1424 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
1425 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
1426 val
->type
->packed
= false;
1428 NIR_VLA(struct glsl_struct_field
, fields
, count
);
1429 for (unsigned i
= 0; i
< num_fields
; i
++) {
1430 val
->type
->members
[i
] = vtn_get_type(b
, w
[i
+ 2]);
1431 fields
[i
] = (struct glsl_struct_field
) {
1432 .type
= val
->type
->members
[i
]->type
,
1433 .name
= ralloc_asprintf(b
, "field%d", i
),
1439 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
) {
1440 unsigned offset
= 0;
1441 for (unsigned i
= 0; i
< num_fields
; i
++) {
1442 offset
= align(offset
, glsl_get_cl_alignment(fields
[i
].type
));
1443 fields
[i
].offset
= offset
;
1444 offset
+= glsl_get_cl_size(fields
[i
].type
);
1448 struct member_decoration_ctx ctx
= {
1449 .num_fields
= num_fields
,
1454 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
1455 vtn_foreach_decoration(b
, val
, struct_member_matrix_stride_cb
, &ctx
);
1457 vtn_foreach_decoration(b
, val
, struct_block_decoration_cb
, NULL
);
1459 const char *name
= val
->name
;
1461 if (val
->type
->block
|| val
->type
->buffer_block
) {
1462 /* Packing will be ignored since types coming from SPIR-V are
1463 * explicitly laid out.
1465 val
->type
->type
= glsl_interface_type(fields
, num_fields
,
1466 /* packing */ 0, false,
1467 name
? name
: "block");
1469 val
->type
->type
= glsl_struct_type(fields
, num_fields
,
1470 name
? name
: "struct", false);
1475 case SpvOpTypeFunction
: {
1476 val
->type
->base_type
= vtn_base_type_function
;
1477 val
->type
->type
= NULL
;
1479 val
->type
->return_type
= vtn_get_type(b
, w
[2]);
1481 const unsigned num_params
= count
- 3;
1482 val
->type
->length
= num_params
;
1483 val
->type
->params
= ralloc_array(b
, struct vtn_type
*, num_params
);
1484 for (unsigned i
= 0; i
< count
- 3; i
++) {
1485 val
->type
->params
[i
] = vtn_get_type(b
, w
[i
+ 3]);
1490 case SpvOpTypePointer
:
1491 case SpvOpTypeForwardPointer
: {
1492 /* We can't blindly push the value because it might be a forward
1495 val
= vtn_untyped_value(b
, w
[1]);
1497 SpvStorageClass storage_class
= w
[2];
1499 if (val
->value_type
== vtn_value_type_invalid
) {
1500 val
->value_type
= vtn_value_type_type
;
1501 val
->type
= rzalloc(b
, struct vtn_type
);
1502 val
->type
->id
= w
[1];
1503 val
->type
->base_type
= vtn_base_type_pointer
;
1504 val
->type
->storage_class
= storage_class
;
1506 /* These can actually be stored to nir_variables and used as SSA
1507 * values so they need a real glsl_type.
1509 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1510 b
, storage_class
, NULL
, NULL
);
1511 val
->type
->type
= nir_address_format_to_glsl_type(
1512 vtn_mode_to_address_format(b
, mode
));
1514 vtn_fail_if(val
->type
->storage_class
!= storage_class
,
1515 "The storage classes of an OpTypePointer and any "
1516 "OpTypeForwardPointers that provide forward "
1517 "declarations of it must match.");
1520 if (opcode
== SpvOpTypePointer
) {
1521 vtn_fail_if(val
->type
->deref
!= NULL
,
1522 "While OpTypeForwardPointer can be used to provide a "
1523 "forward declaration of a pointer, OpTypePointer can "
1524 "only be used once for a given id.");
1526 val
->type
->deref
= vtn_get_type(b
, w
[3]);
1528 /* Only certain storage classes use ArrayStride. The others (in
1529 * particular Workgroup) are expected to be laid out by the driver.
1531 switch (storage_class
) {
1532 case SpvStorageClassUniform
:
1533 case SpvStorageClassPushConstant
:
1534 case SpvStorageClassStorageBuffer
:
1535 case SpvStorageClassPhysicalStorageBuffer
:
1536 vtn_foreach_decoration(b
, val
, array_stride_decoration_cb
, NULL
);
1539 /* Nothing to do. */
1543 if (b
->physical_ptrs
) {
1544 switch (storage_class
) {
1545 case SpvStorageClassFunction
:
1546 case SpvStorageClassWorkgroup
:
1547 case SpvStorageClassCrossWorkgroup
:
1548 case SpvStorageClassUniformConstant
:
1549 val
->type
->stride
= align(glsl_get_cl_size(val
->type
->deref
->type
),
1550 glsl_get_cl_alignment(val
->type
->deref
->type
));
1560 case SpvOpTypeImage
: {
1561 val
->type
->base_type
= vtn_base_type_image
;
1563 /* Images are represented in NIR as a scalar SSA value that is the
1564 * result of a deref instruction. An OpLoad on an OpTypeImage pointer
1565 * from UniformConstant memory just takes the NIR deref from the pointer
1566 * and turns it into an SSA value.
1568 val
->type
->type
= nir_address_format_to_glsl_type(
1569 vtn_mode_to_address_format(b
, vtn_variable_mode_function
));
1571 const struct vtn_type
*sampled_type
= vtn_get_type(b
, w
[2]);
1572 vtn_fail_if(sampled_type
->base_type
!= vtn_base_type_scalar
||
1573 glsl_get_bit_size(sampled_type
->type
) != 32,
1574 "Sampled type of OpTypeImage must be a 32-bit scalar");
1576 enum glsl_sampler_dim dim
;
1577 switch ((SpvDim
)w
[3]) {
1578 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
1579 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
1580 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
1581 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
1582 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
1583 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
1584 case SpvDimSubpassData
: dim
= GLSL_SAMPLER_DIM_SUBPASS
; break;
1586 vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1587 spirv_dim_to_string((SpvDim
)w
[3]), w
[3]);
1590 /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1591 * The “Depth” operand of OpTypeImage is ignored.
1593 bool is_array
= w
[5];
1594 bool multisampled
= w
[6];
1595 unsigned sampled
= w
[7];
1596 SpvImageFormat format
= w
[8];
1599 val
->type
->access_qualifier
= w
[9];
1601 val
->type
->access_qualifier
= SpvAccessQualifierReadWrite
;
1604 if (dim
== GLSL_SAMPLER_DIM_2D
)
1605 dim
= GLSL_SAMPLER_DIM_MS
;
1606 else if (dim
== GLSL_SAMPLER_DIM_SUBPASS
)
1607 dim
= GLSL_SAMPLER_DIM_SUBPASS_MS
;
1609 vtn_fail("Unsupported multisampled image type");
1612 val
->type
->image_format
= translate_image_format(b
, format
);
1614 enum glsl_base_type sampled_base_type
=
1615 glsl_get_base_type(sampled_type
->type
);
1617 val
->type
->glsl_image
= glsl_sampler_type(dim
, false, is_array
,
1619 } else if (sampled
== 2) {
1620 val
->type
->glsl_image
= glsl_image_type(dim
, is_array
,
1623 vtn_fail("We need to know if the image will be sampled");
1628 case SpvOpTypeSampledImage
: {
1629 val
->type
->base_type
= vtn_base_type_sampled_image
;
1630 val
->type
->image
= vtn_get_type(b
, w
[2]);
1632 /* Sampled images are represented NIR as a vec2 SSA value where each
1633 * component is the result of a deref instruction. The first component
1634 * is the image and the second is the sampler. An OpLoad on an
1635 * OpTypeSampledImage pointer from UniformConstant memory just takes
1636 * the NIR deref from the pointer and duplicates it to both vector
1639 nir_address_format addr_format
=
1640 vtn_mode_to_address_format(b
, vtn_variable_mode_function
);
1641 assert(nir_address_format_num_components(addr_format
) == 1);
1642 unsigned bit_size
= nir_address_format_bit_size(addr_format
);
1643 assert(bit_size
== 32 || bit_size
== 64);
1645 enum glsl_base_type base_type
=
1646 bit_size
== 32 ? GLSL_TYPE_UINT
: GLSL_TYPE_UINT64
;
1647 val
->type
->type
= glsl_vector_type(base_type
, 2);
1651 case SpvOpTypeSampler
:
1652 val
->type
->base_type
= vtn_base_type_sampler
;
1654 /* Samplers are represented in NIR as a scalar SSA value that is the
1655 * result of a deref instruction. An OpLoad on an OpTypeSampler pointer
1656 * from UniformConstant memory just takes the NIR deref from the pointer
1657 * and turns it into an SSA value.
1659 val
->type
->type
= nir_address_format_to_glsl_type(
1660 vtn_mode_to_address_format(b
, vtn_variable_mode_function
));
1663 case SpvOpTypeOpaque
:
1664 case SpvOpTypeEvent
:
1665 case SpvOpTypeDeviceEvent
:
1666 case SpvOpTypeReserveId
:
1667 case SpvOpTypeQueue
:
1670 vtn_fail_with_opcode("Unhandled opcode", opcode
);
1673 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
1675 if (val
->type
->base_type
== vtn_base_type_struct
&&
1676 (val
->type
->block
|| val
->type
->buffer_block
)) {
1677 for (unsigned i
= 0; i
< val
->type
->length
; i
++) {
1678 vtn_fail_if(vtn_type_contains_block(b
, val
->type
->members
[i
]),
1679 "Block and BufferBlock decorations cannot decorate a "
1680 "structure type that is nested at any level inside "
1681 "another structure type decorated with Block or "
1687 static nir_constant
*
1688 vtn_null_constant(struct vtn_builder
*b
, struct vtn_type
*type
)
1690 nir_constant
*c
= rzalloc(b
, nir_constant
);
1692 switch (type
->base_type
) {
1693 case vtn_base_type_scalar
:
1694 case vtn_base_type_vector
:
1695 /* Nothing to do here. It's already initialized to zero */
1698 case vtn_base_type_pointer
: {
1699 enum vtn_variable_mode mode
= vtn_storage_class_to_mode(
1700 b
, type
->storage_class
, type
->deref
, NULL
);
1701 nir_address_format addr_format
= vtn_mode_to_address_format(b
, mode
);
1703 const nir_const_value
*null_value
= nir_address_format_null_value(addr_format
);
1704 memcpy(c
->values
, null_value
,
1705 sizeof(nir_const_value
) * nir_address_format_num_components(addr_format
));
1709 case vtn_base_type_void
:
1710 case vtn_base_type_image
:
1711 case vtn_base_type_sampler
:
1712 case vtn_base_type_sampled_image
:
1713 case vtn_base_type_function
:
1714 /* For those we have to return something but it doesn't matter what. */
1717 case vtn_base_type_matrix
:
1718 case vtn_base_type_array
:
1719 vtn_assert(type
->length
> 0);
1720 c
->num_elements
= type
->length
;
1721 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1723 c
->elements
[0] = vtn_null_constant(b
, type
->array_element
);
1724 for (unsigned i
= 1; i
< c
->num_elements
; i
++)
1725 c
->elements
[i
] = c
->elements
[0];
1728 case vtn_base_type_struct
:
1729 c
->num_elements
= type
->length
;
1730 c
->elements
= ralloc_array(b
, nir_constant
*, c
->num_elements
);
1731 for (unsigned i
= 0; i
< c
->num_elements
; i
++)
1732 c
->elements
[i
] = vtn_null_constant(b
, type
->members
[i
]);
1736 vtn_fail("Invalid type for null constant");
1743 spec_constant_decoration_cb(struct vtn_builder
*b
, UNUSED
struct vtn_value
*val
,
1744 ASSERTED
int member
,
1745 const struct vtn_decoration
*dec
, void *data
)
1747 vtn_assert(member
== -1);
1748 if (dec
->decoration
!= SpvDecorationSpecId
)
1751 nir_const_value
*value
= data
;
1752 for (unsigned i
= 0; i
< b
->num_specializations
; i
++) {
1753 if (b
->specializations
[i
].id
== dec
->operands
[0]) {
1754 *value
= b
->specializations
[i
].value
;
1761 handle_workgroup_size_decoration_cb(struct vtn_builder
*b
,
1762 struct vtn_value
*val
,
1763 ASSERTED
int member
,
1764 const struct vtn_decoration
*dec
,
1767 vtn_assert(member
== -1);
1768 if (dec
->decoration
!= SpvDecorationBuiltIn
||
1769 dec
->operands
[0] != SpvBuiltInWorkgroupSize
)
1772 vtn_assert(val
->type
->type
== glsl_vector_type(GLSL_TYPE_UINT
, 3));
1773 b
->workgroup_size_builtin
= val
;
1777 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
1778 const uint32_t *w
, unsigned count
)
1780 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
1781 val
->constant
= rzalloc(b
, nir_constant
);
1783 case SpvOpConstantTrue
:
1784 case SpvOpConstantFalse
:
1785 case SpvOpSpecConstantTrue
:
1786 case SpvOpSpecConstantFalse
: {
1787 vtn_fail_if(val
->type
->type
!= glsl_bool_type(),
1788 "Result type of %s must be OpTypeBool",
1789 spirv_op_to_string(opcode
));
1791 bool bval
= (opcode
== SpvOpConstantTrue
||
1792 opcode
== SpvOpSpecConstantTrue
);
1794 nir_const_value u32val
= nir_const_value_for_uint(bval
, 32);
1796 if (opcode
== SpvOpSpecConstantTrue
||
1797 opcode
== SpvOpSpecConstantFalse
)
1798 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32val
);
1800 val
->constant
->values
[0].b
= u32val
.u32
!= 0;
1805 case SpvOpSpecConstant
: {
1806 vtn_fail_if(val
->type
->base_type
!= vtn_base_type_scalar
,
1807 "Result type of %s must be a scalar",
1808 spirv_op_to_string(opcode
));
1809 int bit_size
= glsl_get_bit_size(val
->type
->type
);
1812 val
->constant
->values
[0].u64
= vtn_u64_literal(&w
[3]);
1815 val
->constant
->values
[0].u32
= w
[3];
1818 val
->constant
->values
[0].u16
= w
[3];
1821 val
->constant
->values
[0].u8
= w
[3];
1824 vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size
);
1827 if (opcode
== SpvOpSpecConstant
)
1828 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
,
1829 &val
->constant
->values
[0]);
1833 case SpvOpSpecConstantComposite
:
1834 case SpvOpConstantComposite
: {
1835 unsigned elem_count
= count
- 3;
1836 vtn_fail_if(elem_count
!= val
->type
->length
,
1837 "%s has %u constituents, expected %u",
1838 spirv_op_to_string(opcode
), elem_count
, val
->type
->length
);
1840 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
1841 for (unsigned i
= 0; i
< elem_count
; i
++) {
1842 struct vtn_value
*val
= vtn_untyped_value(b
, w
[i
+ 3]);
1844 if (val
->value_type
== vtn_value_type_constant
) {
1845 elems
[i
] = val
->constant
;
1847 vtn_fail_if(val
->value_type
!= vtn_value_type_undef
,
1848 "only constants or undefs allowed for "
1849 "SpvOpConstantComposite");
1850 /* to make it easier, just insert a NULL constant for now */
1851 elems
[i
] = vtn_null_constant(b
, val
->type
);
1855 switch (val
->type
->base_type
) {
1856 case vtn_base_type_vector
: {
1857 assert(glsl_type_is_vector(val
->type
->type
));
1858 for (unsigned i
= 0; i
< elem_count
; i
++)
1859 val
->constant
->values
[i
] = elems
[i
]->values
[0];
1863 case vtn_base_type_matrix
:
1864 case vtn_base_type_struct
:
1865 case vtn_base_type_array
:
1866 ralloc_steal(val
->constant
, elems
);
1867 val
->constant
->num_elements
= elem_count
;
1868 val
->constant
->elements
= elems
;
1872 vtn_fail("Result type of %s must be a composite type",
1873 spirv_op_to_string(opcode
));
1878 case SpvOpSpecConstantOp
: {
1879 nir_const_value u32op
= nir_const_value_for_uint(w
[3], 32);
1880 vtn_foreach_decoration(b
, val
, spec_constant_decoration_cb
, &u32op
);
1881 SpvOp opcode
= u32op
.u32
;
1883 case SpvOpVectorShuffle
: {
1884 struct vtn_value
*v0
= &b
->values
[w
[4]];
1885 struct vtn_value
*v1
= &b
->values
[w
[5]];
1887 vtn_assert(v0
->value_type
== vtn_value_type_constant
||
1888 v0
->value_type
== vtn_value_type_undef
);
1889 vtn_assert(v1
->value_type
== vtn_value_type_constant
||
1890 v1
->value_type
== vtn_value_type_undef
);
1892 unsigned len0
= glsl_get_vector_elements(v0
->type
->type
);
1893 unsigned len1
= glsl_get_vector_elements(v1
->type
->type
);
1895 vtn_assert(len0
+ len1
< 16);
1897 unsigned bit_size
= glsl_get_bit_size(val
->type
->type
);
1898 unsigned bit_size0
= glsl_get_bit_size(v0
->type
->type
);
1899 unsigned bit_size1
= glsl_get_bit_size(v1
->type
->type
);
1901 vtn_assert(bit_size
== bit_size0
&& bit_size
== bit_size1
);
1902 (void)bit_size0
; (void)bit_size1
;
1904 nir_const_value undef
= { .u64
= 0xdeadbeefdeadbeef };
1905 nir_const_value combined
[NIR_MAX_VEC_COMPONENTS
* 2];
1907 if (v0
->value_type
== vtn_value_type_constant
) {
1908 for (unsigned i
= 0; i
< len0
; i
++)
1909 combined
[i
] = v0
->constant
->values
[i
];
1911 if (v1
->value_type
== vtn_value_type_constant
) {
1912 for (unsigned i
= 0; i
< len1
; i
++)
1913 combined
[len0
+ i
] = v1
->constant
->values
[i
];
1916 for (unsigned i
= 0, j
= 0; i
< count
- 6; i
++, j
++) {
1917 uint32_t comp
= w
[i
+ 6];
1918 if (comp
== (uint32_t)-1) {
1919 /* If component is not used, set the value to a known constant
1920 * to detect if it is wrongly used.
1922 val
->constant
->values
[j
] = undef
;
1924 vtn_fail_if(comp
>= len0
+ len1
,
1925 "All Component literals must either be FFFFFFFF "
1926 "or in [0, N - 1] (inclusive).");
1927 val
->constant
->values
[j
] = combined
[comp
];
1933 case SpvOpCompositeExtract
:
1934 case SpvOpCompositeInsert
: {
1935 struct vtn_value
*comp
;
1936 unsigned deref_start
;
1937 struct nir_constant
**c
;
1938 if (opcode
== SpvOpCompositeExtract
) {
1939 comp
= vtn_value(b
, w
[4], vtn_value_type_constant
);
1941 c
= &comp
->constant
;
1943 comp
= vtn_value(b
, w
[5], vtn_value_type_constant
);
1945 val
->constant
= nir_constant_clone(comp
->constant
,
1951 const struct vtn_type
*type
= comp
->type
;
1952 for (unsigned i
= deref_start
; i
< count
; i
++) {
1953 vtn_fail_if(w
[i
] > type
->length
,
1954 "%uth index of %s is %u but the type has only "
1955 "%u elements", i
- deref_start
,
1956 spirv_op_to_string(opcode
), w
[i
], type
->length
);
1958 switch (type
->base_type
) {
1959 case vtn_base_type_vector
:
1961 type
= type
->array_element
;
1964 case vtn_base_type_matrix
:
1965 case vtn_base_type_array
:
1966 c
= &(*c
)->elements
[w
[i
]];
1967 type
= type
->array_element
;
1970 case vtn_base_type_struct
:
1971 c
= &(*c
)->elements
[w
[i
]];
1972 type
= type
->members
[w
[i
]];
1976 vtn_fail("%s must only index into composite types",
1977 spirv_op_to_string(opcode
));
1981 if (opcode
== SpvOpCompositeExtract
) {
1985 unsigned num_components
= type
->length
;
1986 for (unsigned i
= 0; i
< num_components
; i
++)
1987 val
->constant
->values
[i
] = (*c
)->values
[elem
+ i
];
1990 struct vtn_value
*insert
=
1991 vtn_value(b
, w
[4], vtn_value_type_constant
);
1992 vtn_assert(insert
->type
== type
);
1994 *c
= insert
->constant
;
1996 unsigned num_components
= type
->length
;
1997 for (unsigned i
= 0; i
< num_components
; i
++)
1998 (*c
)->values
[elem
+ i
] = insert
->constant
->values
[i
];
2006 nir_alu_type dst_alu_type
= nir_get_nir_type_for_glsl_type(val
->type
->type
);
2007 nir_alu_type src_alu_type
= dst_alu_type
;
2008 unsigned num_components
= glsl_get_vector_elements(val
->type
->type
);
2011 vtn_assert(count
<= 7);
2017 /* We have a source in a conversion */
2019 nir_get_nir_type_for_glsl_type(vtn_get_value_type(b
, w
[4])->type
);
2020 /* We use the bitsize of the conversion source to evaluate the opcode later */
2021 bit_size
= glsl_get_bit_size(vtn_get_value_type(b
, w
[4])->type
);
2024 bit_size
= glsl_get_bit_size(val
->type
->type
);
2027 nir_op op
= vtn_nir_alu_op_for_spirv_opcode(b
, opcode
, &swap
,
2028 nir_alu_type_get_type_size(src_alu_type
),
2029 nir_alu_type_get_type_size(dst_alu_type
));
2030 nir_const_value src
[3][NIR_MAX_VEC_COMPONENTS
];
2032 for (unsigned i
= 0; i
< count
- 4; i
++) {
2033 struct vtn_value
*src_val
=
2034 vtn_value(b
, w
[4 + i
], vtn_value_type_constant
);
2036 /* If this is an unsized source, pull the bit size from the
2037 * source; otherwise, we'll use the bit size from the destination.
2039 if (!nir_alu_type_get_type_size(nir_op_infos
[op
].input_types
[i
]))
2040 bit_size
= glsl_get_bit_size(src_val
->type
->type
);
2042 unsigned src_comps
= nir_op_infos
[op
].input_sizes
[i
] ?
2043 nir_op_infos
[op
].input_sizes
[i
] :
2046 unsigned j
= swap
? 1 - i
: i
;
2047 for (unsigned c
= 0; c
< src_comps
; c
++)
2048 src
[j
][c
] = src_val
->constant
->values
[c
];
2051 /* fix up fixed size sources */
2058 for (unsigned i
= 0; i
< num_components
; ++i
) {
2060 case 64: src
[1][i
].u32
= src
[1][i
].u64
; break;
2061 case 16: src
[1][i
].u32
= src
[1][i
].u16
; break;
2062 case 8: src
[1][i
].u32
= src
[1][i
].u8
; break;
2071 nir_const_value
*srcs
[3] = {
2072 src
[0], src
[1], src
[2],
2074 nir_eval_const_opcode(op
, val
->constant
->values
,
2075 num_components
, bit_size
, srcs
,
2076 b
->shader
->info
.float_controls_execution_mode
);
2083 case SpvOpConstantNull
:
2084 val
->constant
= vtn_null_constant(b
, val
->type
);
2087 case SpvOpConstantSampler
:
2088 vtn_fail("OpConstantSampler requires Kernel Capability");
2092 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2095 /* Now that we have the value, update the workgroup size if needed */
2096 vtn_foreach_decoration(b
, val
, handle_workgroup_size_decoration_cb
, NULL
);
2099 SpvMemorySemanticsMask
2100 vtn_storage_class_to_memory_semantics(SpvStorageClass sc
)
2103 case SpvStorageClassStorageBuffer
:
2104 case SpvStorageClassPhysicalStorageBuffer
:
2105 return SpvMemorySemanticsUniformMemoryMask
;
2106 case SpvStorageClassWorkgroup
:
2107 return SpvMemorySemanticsWorkgroupMemoryMask
;
2109 return SpvMemorySemanticsMaskNone
;
2114 vtn_split_barrier_semantics(struct vtn_builder
*b
,
2115 SpvMemorySemanticsMask semantics
,
2116 SpvMemorySemanticsMask
*before
,
2117 SpvMemorySemanticsMask
*after
)
2119 /* For memory semantics embedded in operations, we split them into up to
2120 * two barriers, to be added before and after the operation. This is less
2121 * strict than if we propagated until the final backend stage, but still
2122 * result in correct execution.
2124 * A further improvement could be pipe this information (and use!) into the
2125 * next compiler layers, at the expense of making the handling of barriers
2129 *before
= SpvMemorySemanticsMaskNone
;
2130 *after
= SpvMemorySemanticsMaskNone
;
2132 SpvMemorySemanticsMask order_semantics
=
2133 semantics
& (SpvMemorySemanticsAcquireMask
|
2134 SpvMemorySemanticsReleaseMask
|
2135 SpvMemorySemanticsAcquireReleaseMask
|
2136 SpvMemorySemanticsSequentiallyConsistentMask
);
2138 if (util_bitcount(order_semantics
) > 1) {
2139 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2140 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2141 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2143 vtn_warn("Multiple memory ordering semantics specified, "
2144 "assuming AcquireRelease.");
2145 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2148 const SpvMemorySemanticsMask av_vis_semantics
=
2149 semantics
& (SpvMemorySemanticsMakeAvailableMask
|
2150 SpvMemorySemanticsMakeVisibleMask
);
2152 const SpvMemorySemanticsMask storage_semantics
=
2153 semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2154 SpvMemorySemanticsSubgroupMemoryMask
|
2155 SpvMemorySemanticsWorkgroupMemoryMask
|
2156 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2157 SpvMemorySemanticsAtomicCounterMemoryMask
|
2158 SpvMemorySemanticsImageMemoryMask
|
2159 SpvMemorySemanticsOutputMemoryMask
);
2161 const SpvMemorySemanticsMask other_semantics
=
2162 semantics
& ~(order_semantics
| av_vis_semantics
| storage_semantics
);
2164 if (other_semantics
)
2165 vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics
);
2167 /* SequentiallyConsistent is treated as AcquireRelease. */
2169 /* The RELEASE barrier happens BEFORE the operation, and it is usually
2170 * associated with a Store. All the write operations with a matching
2171 * semantics will not be reordered after the Store.
2173 if (order_semantics
& (SpvMemorySemanticsReleaseMask
|
2174 SpvMemorySemanticsAcquireReleaseMask
|
2175 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2176 *before
|= SpvMemorySemanticsReleaseMask
| storage_semantics
;
2179 /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2180 * associated with a Load. All the operations with a matching semantics
2181 * will not be reordered before the Load.
2183 if (order_semantics
& (SpvMemorySemanticsAcquireMask
|
2184 SpvMemorySemanticsAcquireReleaseMask
|
2185 SpvMemorySemanticsSequentiallyConsistentMask
)) {
2186 *after
|= SpvMemorySemanticsAcquireMask
| storage_semantics
;
2189 if (av_vis_semantics
& SpvMemorySemanticsMakeVisibleMask
)
2190 *before
|= SpvMemorySemanticsMakeVisibleMask
| storage_semantics
;
2192 if (av_vis_semantics
& SpvMemorySemanticsMakeAvailableMask
)
2193 *after
|= SpvMemorySemanticsMakeAvailableMask
| storage_semantics
;
2196 static nir_memory_semantics
2197 vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder
*b
,
2198 SpvMemorySemanticsMask semantics
)
2200 nir_memory_semantics nir_semantics
= 0;
2202 SpvMemorySemanticsMask order_semantics
=
2203 semantics
& (SpvMemorySemanticsAcquireMask
|
2204 SpvMemorySemanticsReleaseMask
|
2205 SpvMemorySemanticsAcquireReleaseMask
|
2206 SpvMemorySemanticsSequentiallyConsistentMask
);
2208 if (util_bitcount(order_semantics
) > 1) {
2209 /* Old GLSLang versions incorrectly set all the ordering bits. This was
2210 * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2211 * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2213 vtn_warn("Multiple memory ordering semantics bits specified, "
2214 "assuming AcquireRelease.");
2215 order_semantics
= SpvMemorySemanticsAcquireReleaseMask
;
2218 switch (order_semantics
) {
2220 /* Not an ordering barrier. */
2223 case SpvMemorySemanticsAcquireMask
:
2224 nir_semantics
= NIR_MEMORY_ACQUIRE
;
2227 case SpvMemorySemanticsReleaseMask
:
2228 nir_semantics
= NIR_MEMORY_RELEASE
;
2231 case SpvMemorySemanticsSequentiallyConsistentMask
:
2232 /* Fall through. Treated as AcquireRelease in Vulkan. */
2233 case SpvMemorySemanticsAcquireReleaseMask
:
2234 nir_semantics
= NIR_MEMORY_ACQUIRE
| NIR_MEMORY_RELEASE
;
2238 unreachable("Invalid memory order semantics");
2241 if (semantics
& SpvMemorySemanticsMakeAvailableMask
) {
2242 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2243 "To use MakeAvailable memory semantics the VulkanMemoryModel "
2244 "capability must be declared.");
2245 nir_semantics
|= NIR_MEMORY_MAKE_AVAILABLE
;
2248 if (semantics
& SpvMemorySemanticsMakeVisibleMask
) {
2249 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2250 "To use MakeVisible memory semantics the VulkanMemoryModel "
2251 "capability must be declared.");
2252 nir_semantics
|= NIR_MEMORY_MAKE_VISIBLE
;
2255 return nir_semantics
;
2258 static nir_variable_mode
2259 vtn_mem_sematics_to_nir_var_modes(struct vtn_builder
*b
,
2260 SpvMemorySemanticsMask semantics
)
2262 /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2263 * and AtomicCounterMemory are ignored".
2265 semantics
&= ~(SpvMemorySemanticsSubgroupMemoryMask
|
2266 SpvMemorySemanticsCrossWorkgroupMemoryMask
|
2267 SpvMemorySemanticsAtomicCounterMemoryMask
);
2269 /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2270 * for SpvMemorySemanticsImageMemoryMask.
2273 nir_variable_mode modes
= 0;
2274 if (semantics
& (SpvMemorySemanticsUniformMemoryMask
|
2275 SpvMemorySemanticsImageMemoryMask
)) {
2276 modes
|= nir_var_uniform
|
2281 if (semantics
& SpvMemorySemanticsWorkgroupMemoryMask
)
2282 modes
|= nir_var_mem_shared
;
2283 if (semantics
& SpvMemorySemanticsOutputMemoryMask
) {
2284 modes
|= nir_var_shader_out
;
2291 vtn_scope_to_nir_scope(struct vtn_builder
*b
, SpvScope scope
)
2293 nir_scope nir_scope
;
2295 case SpvScopeDevice
:
2296 vtn_fail_if(b
->options
->caps
.vk_memory_model
&&
2297 !b
->options
->caps
.vk_memory_model_device_scope
,
2298 "If the Vulkan memory model is declared and any instruction "
2299 "uses Device scope, the VulkanMemoryModelDeviceScope "
2300 "capability must be declared.");
2301 nir_scope
= NIR_SCOPE_DEVICE
;
2304 case SpvScopeQueueFamily
:
2305 vtn_fail_if(!b
->options
->caps
.vk_memory_model
,
2306 "To use Queue Family scope, the VulkanMemoryModel capability "
2307 "must be declared.");
2308 nir_scope
= NIR_SCOPE_QUEUE_FAMILY
;
2311 case SpvScopeWorkgroup
:
2312 nir_scope
= NIR_SCOPE_WORKGROUP
;
2315 case SpvScopeSubgroup
:
2316 nir_scope
= NIR_SCOPE_SUBGROUP
;
2319 case SpvScopeInvocation
:
2320 nir_scope
= NIR_SCOPE_INVOCATION
;
2324 vtn_fail("Invalid memory scope");
2331 vtn_emit_scoped_control_barrier(struct vtn_builder
*b
, SpvScope exec_scope
,
2333 SpvMemorySemanticsMask semantics
)
2335 nir_memory_semantics nir_semantics
=
2336 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2337 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2338 nir_scope nir_exec_scope
= vtn_scope_to_nir_scope(b
, exec_scope
);
2340 /* Memory semantics is optional for OpControlBarrier. */
2341 nir_scope nir_mem_scope
;
2342 if (nir_semantics
== 0 || modes
== 0)
2343 nir_mem_scope
= NIR_SCOPE_NONE
;
2345 nir_mem_scope
= vtn_scope_to_nir_scope(b
, mem_scope
);
2347 nir_scoped_barrier(&b
->nb
, nir_exec_scope
, nir_mem_scope
, nir_semantics
, modes
);
2351 vtn_emit_scoped_memory_barrier(struct vtn_builder
*b
, SpvScope scope
,
2352 SpvMemorySemanticsMask semantics
)
2354 nir_variable_mode modes
= vtn_mem_sematics_to_nir_var_modes(b
, semantics
);
2355 nir_memory_semantics nir_semantics
=
2356 vtn_mem_semantics_to_nir_mem_semantics(b
, semantics
);
2358 /* No barrier to add. */
2359 if (nir_semantics
== 0 || modes
== 0)
2362 nir_scope nir_mem_scope
= vtn_scope_to_nir_scope(b
, scope
);
2363 nir_scoped_barrier(&b
->nb
, NIR_SCOPE_NONE
, nir_mem_scope
, nir_semantics
, modes
);
2366 struct vtn_ssa_value
*
2367 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
2369 /* Always use bare types for SSA values for a couple of reasons:
2371 * 1. Code which emits deref chains should never listen to the explicit
2372 * layout information on the SSA value if any exists. If we've
2373 * accidentally been relying on this, we want to find those bugs.
2375 * 2. We want to be able to quickly check that an SSA value being assigned
2376 * to a SPIR-V value has the right type. Using bare types everywhere
2377 * ensures that we can pointer-compare.
2379 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
2380 val
->type
= glsl_get_bare_type(type
);
2383 if (!glsl_type_is_vector_or_scalar(type
)) {
2384 unsigned elems
= glsl_get_length(val
->type
);
2385 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2386 if (glsl_type_is_array_or_matrix(type
)) {
2387 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
2388 for (unsigned i
= 0; i
< elems
; i
++)
2389 val
->elems
[i
] = vtn_create_ssa_value(b
, elem_type
);
2391 vtn_assert(glsl_type_is_struct_or_ifc(type
));
2392 for (unsigned i
= 0; i
< elems
; i
++) {
2393 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
2394 val
->elems
[i
] = vtn_create_ssa_value(b
, elem_type
);
2403 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
2406 src
.src
= nir_src_for_ssa(vtn_get_nir_ssa(b
, index
));
2407 src
.src_type
= type
;
2412 image_operand_arg(struct vtn_builder
*b
, const uint32_t *w
, uint32_t count
,
2413 uint32_t mask_idx
, SpvImageOperandsMask op
)
2415 static const SpvImageOperandsMask ops_with_arg
=
2416 SpvImageOperandsBiasMask
|
2417 SpvImageOperandsLodMask
|
2418 SpvImageOperandsGradMask
|
2419 SpvImageOperandsConstOffsetMask
|
2420 SpvImageOperandsOffsetMask
|
2421 SpvImageOperandsConstOffsetsMask
|
2422 SpvImageOperandsSampleMask
|
2423 SpvImageOperandsMinLodMask
|
2424 SpvImageOperandsMakeTexelAvailableMask
|
2425 SpvImageOperandsMakeTexelVisibleMask
;
2427 assert(util_bitcount(op
) == 1);
2428 assert(w
[mask_idx
] & op
);
2429 assert(op
& ops_with_arg
);
2431 uint32_t idx
= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_arg
) + 1;
2433 /* Adjust indices for operands with two arguments. */
2434 static const SpvImageOperandsMask ops_with_two_args
=
2435 SpvImageOperandsGradMask
;
2436 idx
+= util_bitcount(w
[mask_idx
] & (op
- 1) & ops_with_two_args
);
2440 vtn_fail_if(idx
+ (op
& ops_with_two_args
? 1 : 0) >= count
,
2441 "Image op claims to have %s but does not enough "
2442 "following operands", spirv_imageoperands_to_string(op
));
2448 non_uniform_decoration_cb(struct vtn_builder
*b
,
2449 struct vtn_value
*val
, int member
,
2450 const struct vtn_decoration
*dec
, void *void_ctx
)
2452 enum gl_access_qualifier
*access
= void_ctx
;
2453 switch (dec
->decoration
) {
2454 case SpvDecorationNonUniformEXT
:
2455 *access
|= ACCESS_NON_UNIFORM
;
2464 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
2465 const uint32_t *w
, unsigned count
)
2467 struct vtn_type
*ret_type
= vtn_get_type(b
, w
[1]);
2469 if (opcode
== SpvOpSampledImage
) {
2470 struct vtn_sampled_image si
= {
2471 .image
= vtn_get_image(b
, w
[3]),
2472 .sampler
= vtn_get_sampler(b
, w
[4]),
2474 vtn_push_sampled_image(b
, w
[2], si
);
2476 } else if (opcode
== SpvOpImage
) {
2477 struct vtn_sampled_image si
= vtn_get_sampled_image(b
, w
[3]);
2478 vtn_push_image(b
, w
[2], si
.image
);
2482 nir_deref_instr
*image
= NULL
, *sampler
= NULL
;
2483 struct vtn_value
*sampled_val
= vtn_untyped_value(b
, w
[3]);
2484 if (sampled_val
->type
->base_type
== vtn_base_type_sampled_image
) {
2485 struct vtn_sampled_image si
= vtn_get_sampled_image(b
, w
[3]);
2487 sampler
= si
.sampler
;
2489 image
= vtn_get_image(b
, w
[3]);
2492 const enum glsl_sampler_dim sampler_dim
= glsl_get_sampler_dim(image
->type
);
2493 const bool is_array
= glsl_sampler_type_is_array(image
->type
);
2494 nir_alu_type dest_type
= nir_type_invalid
;
2496 /* Figure out the base texture operation */
2499 case SpvOpImageSampleImplicitLod
:
2500 case SpvOpImageSampleDrefImplicitLod
:
2501 case SpvOpImageSampleProjImplicitLod
:
2502 case SpvOpImageSampleProjDrefImplicitLod
:
2503 texop
= nir_texop_tex
;
2506 case SpvOpImageSampleExplicitLod
:
2507 case SpvOpImageSampleDrefExplicitLod
:
2508 case SpvOpImageSampleProjExplicitLod
:
2509 case SpvOpImageSampleProjDrefExplicitLod
:
2510 texop
= nir_texop_txl
;
2513 case SpvOpImageFetch
:
2514 if (sampler_dim
== GLSL_SAMPLER_DIM_MS
) {
2515 texop
= nir_texop_txf_ms
;
2517 texop
= nir_texop_txf
;
2521 case SpvOpImageGather
:
2522 case SpvOpImageDrefGather
:
2523 texop
= nir_texop_tg4
;
2526 case SpvOpImageQuerySizeLod
:
2527 case SpvOpImageQuerySize
:
2528 texop
= nir_texop_txs
;
2529 dest_type
= nir_type_int
;
2532 case SpvOpImageQueryLod
:
2533 texop
= nir_texop_lod
;
2534 dest_type
= nir_type_float
;
2537 case SpvOpImageQueryLevels
:
2538 texop
= nir_texop_query_levels
;
2539 dest_type
= nir_type_int
;
2542 case SpvOpImageQuerySamples
:
2543 texop
= nir_texop_texture_samples
;
2544 dest_type
= nir_type_int
;
2547 case SpvOpFragmentFetchAMD
:
2548 texop
= nir_texop_fragment_fetch
;
2551 case SpvOpFragmentMaskFetchAMD
:
2552 texop
= nir_texop_fragment_mask_fetch
;
2556 vtn_fail_with_opcode("Unhandled opcode", opcode
);
2559 nir_tex_src srcs
[10]; /* 10 should be enough */
2560 nir_tex_src
*p
= srcs
;
2562 p
->src
= nir_src_for_ssa(&image
->dest
.ssa
);
2563 p
->src_type
= nir_tex_src_texture_deref
;
2573 vtn_fail_if(sampler
== NULL
,
2574 "%s requires an image of type OpTypeSampledImage",
2575 spirv_op_to_string(opcode
));
2576 p
->src
= nir_src_for_ssa(&sampler
->dest
.ssa
);
2577 p
->src_type
= nir_tex_src_sampler_deref
;
2581 case nir_texop_txf_ms
:
2583 case nir_texop_query_levels
:
2584 case nir_texop_texture_samples
:
2585 case nir_texop_samples_identical
:
2586 case nir_texop_fragment_fetch
:
2587 case nir_texop_fragment_mask_fetch
:
2590 case nir_texop_txf_ms_fb
:
2591 vtn_fail("unexpected nir_texop_txf_ms_fb");
2593 case nir_texop_txf_ms_mcs
:
2594 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2595 case nir_texop_tex_prefetch
:
2596 vtn_fail("unexpected nir_texop_tex_prefetch");
2601 struct nir_ssa_def
*coord
;
2602 unsigned coord_components
;
2604 case SpvOpImageSampleImplicitLod
:
2605 case SpvOpImageSampleExplicitLod
:
2606 case SpvOpImageSampleDrefImplicitLod
:
2607 case SpvOpImageSampleDrefExplicitLod
:
2608 case SpvOpImageSampleProjImplicitLod
:
2609 case SpvOpImageSampleProjExplicitLod
:
2610 case SpvOpImageSampleProjDrefImplicitLod
:
2611 case SpvOpImageSampleProjDrefExplicitLod
:
2612 case SpvOpImageFetch
:
2613 case SpvOpImageGather
:
2614 case SpvOpImageDrefGather
:
2615 case SpvOpImageQueryLod
:
2616 case SpvOpFragmentFetchAMD
:
2617 case SpvOpFragmentMaskFetchAMD
: {
2618 /* All these types have the coordinate as their first real argument */
2619 coord_components
= glsl_get_sampler_dim_coordinate_components(sampler_dim
);
2621 if (is_array
&& texop
!= nir_texop_lod
)
2624 coord
= vtn_get_nir_ssa(b
, w
[idx
++]);
2625 p
->src
= nir_src_for_ssa(nir_channels(&b
->nb
, coord
,
2626 (1 << coord_components
) - 1));
2627 p
->src_type
= nir_tex_src_coord
;
2634 coord_components
= 0;
2639 case SpvOpImageSampleProjImplicitLod
:
2640 case SpvOpImageSampleProjExplicitLod
:
2641 case SpvOpImageSampleProjDrefImplicitLod
:
2642 case SpvOpImageSampleProjDrefExplicitLod
:
2643 /* These have the projector as the last coordinate component */
2644 p
->src
= nir_src_for_ssa(nir_channel(&b
->nb
, coord
, coord_components
));
2645 p
->src_type
= nir_tex_src_projector
;
2653 bool is_shadow
= false;
2654 unsigned gather_component
= 0;
2656 case SpvOpImageSampleDrefImplicitLod
:
2657 case SpvOpImageSampleDrefExplicitLod
:
2658 case SpvOpImageSampleProjDrefImplicitLod
:
2659 case SpvOpImageSampleProjDrefExplicitLod
:
2660 case SpvOpImageDrefGather
:
2661 /* These all have an explicit depth value as their next source */
2663 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparator
);
2666 case SpvOpImageGather
:
2667 /* This has a component as its next source */
2668 gather_component
= vtn_constant_uint(b
, w
[idx
++]);
2675 /* For OpImageQuerySizeLod, we always have an LOD */
2676 if (opcode
== SpvOpImageQuerySizeLod
)
2677 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
2679 /* For OpFragmentFetchAMD, we always have a multisample index */
2680 if (opcode
== SpvOpFragmentFetchAMD
)
2681 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
2683 /* Now we need to handle some number of optional arguments */
2684 struct vtn_value
*gather_offsets
= NULL
;
2686 uint32_t operands
= w
[idx
];
2688 if (operands
& SpvImageOperandsBiasMask
) {
2689 vtn_assert(texop
== nir_texop_tex
||
2690 texop
== nir_texop_tg4
);
2691 if (texop
== nir_texop_tex
)
2692 texop
= nir_texop_txb
;
2693 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2694 SpvImageOperandsBiasMask
);
2695 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_bias
);
2698 if (operands
& SpvImageOperandsLodMask
) {
2699 vtn_assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
2700 texop
== nir_texop_txs
|| texop
== nir_texop_tg4
);
2701 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2702 SpvImageOperandsLodMask
);
2703 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_lod
);
2706 if (operands
& SpvImageOperandsGradMask
) {
2707 vtn_assert(texop
== nir_texop_txl
);
2708 texop
= nir_texop_txd
;
2709 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2710 SpvImageOperandsGradMask
);
2711 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ddx
);
2712 (*p
++) = vtn_tex_src(b
, w
[arg
+ 1], nir_tex_src_ddy
);
2715 vtn_fail_if(util_bitcount(operands
& (SpvImageOperandsConstOffsetsMask
|
2716 SpvImageOperandsOffsetMask
|
2717 SpvImageOperandsConstOffsetMask
)) > 1,
2718 "At most one of the ConstOffset, Offset, and ConstOffsets "
2719 "image operands can be used on a given instruction.");
2721 if (operands
& SpvImageOperandsOffsetMask
) {
2722 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2723 SpvImageOperandsOffsetMask
);
2724 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2727 if (operands
& SpvImageOperandsConstOffsetMask
) {
2728 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2729 SpvImageOperandsConstOffsetMask
);
2730 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_offset
);
2733 if (operands
& SpvImageOperandsConstOffsetsMask
) {
2734 vtn_assert(texop
== nir_texop_tg4
);
2735 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2736 SpvImageOperandsConstOffsetsMask
);
2737 gather_offsets
= vtn_value(b
, w
[arg
], vtn_value_type_constant
);
2740 if (operands
& SpvImageOperandsSampleMask
) {
2741 vtn_assert(texop
== nir_texop_txf_ms
);
2742 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2743 SpvImageOperandsSampleMask
);
2744 texop
= nir_texop_txf_ms
;
2745 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_ms_index
);
2748 if (operands
& SpvImageOperandsMinLodMask
) {
2749 vtn_assert(texop
== nir_texop_tex
||
2750 texop
== nir_texop_txb
||
2751 texop
== nir_texop_txd
);
2752 uint32_t arg
= image_operand_arg(b
, w
, count
, idx
,
2753 SpvImageOperandsMinLodMask
);
2754 (*p
++) = vtn_tex_src(b
, w
[arg
], nir_tex_src_min_lod
);
2758 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
2761 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
2763 instr
->coord_components
= coord_components
;
2764 instr
->sampler_dim
= sampler_dim
;
2765 instr
->is_array
= is_array
;
2766 instr
->is_shadow
= is_shadow
;
2767 instr
->is_new_style_shadow
=
2768 is_shadow
&& glsl_get_components(ret_type
->type
) == 1;
2769 instr
->component
= gather_component
;
2771 /* The Vulkan spec says:
2773 * "If an instruction loads from or stores to a resource (including
2774 * atomics and image instructions) and the resource descriptor being
2775 * accessed is not dynamically uniform, then the operand corresponding
2776 * to that resource (e.g. the pointer or sampled image operand) must be
2777 * decorated with NonUniform."
2779 * It's very careful to specify that the exact operand must be decorated
2780 * NonUniform. The SPIR-V parser is not expected to chase through long
2781 * chains to find the NonUniform decoration. It's either right there or we
2782 * can assume it doesn't exist.
2784 enum gl_access_qualifier access
= 0;
2785 vtn_foreach_decoration(b
, sampled_val
, non_uniform_decoration_cb
, &access
);
2787 if (image
&& (access
& ACCESS_NON_UNIFORM
))
2788 instr
->texture_non_uniform
= true;
2790 if (sampler
&& (access
& ACCESS_NON_UNIFORM
))
2791 instr
->sampler_non_uniform
= true;
2793 /* for non-query ops, get dest_type from sampler type */
2794 if (dest_type
== nir_type_invalid
) {
2795 switch (glsl_get_sampler_result_type(image
->type
)) {
2796 case GLSL_TYPE_FLOAT
: dest_type
= nir_type_float
; break;
2797 case GLSL_TYPE_INT
: dest_type
= nir_type_int
; break;
2798 case GLSL_TYPE_UINT
: dest_type
= nir_type_uint
; break;
2799 case GLSL_TYPE_BOOL
: dest_type
= nir_type_bool
; break;
2801 vtn_fail("Invalid base type for sampler result");
2805 instr
->dest_type
= dest_type
;
2807 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
2808 nir_tex_instr_dest_size(instr
), 32, NULL
);
2810 vtn_assert(glsl_get_vector_elements(ret_type
->type
) ==
2811 nir_tex_instr_dest_size(instr
));
2813 if (gather_offsets
) {
2814 vtn_fail_if(gather_offsets
->type
->base_type
!= vtn_base_type_array
||
2815 gather_offsets
->type
->length
!= 4,
2816 "ConstOffsets must be an array of size four of vectors "
2817 "of two integer components");
2819 struct vtn_type
*vec_type
= gather_offsets
->type
->array_element
;
2820 vtn_fail_if(vec_type
->base_type
!= vtn_base_type_vector
||
2821 vec_type
->length
!= 2 ||
2822 !glsl_type_is_integer(vec_type
->type
),
2823 "ConstOffsets must be an array of size four of vectors "
2824 "of two integer components");
2826 unsigned bit_size
= glsl_get_bit_size(vec_type
->type
);
2827 for (uint32_t i
= 0; i
< 4; i
++) {
2828 const nir_const_value
*cvec
=
2829 gather_offsets
->constant
->elements
[i
]->values
;
2830 for (uint32_t j
= 0; j
< 2; j
++) {
2832 case 8: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i8
; break;
2833 case 16: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i16
; break;
2834 case 32: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i32
; break;
2835 case 64: instr
->tg4_offsets
[i
][j
] = cvec
[j
].i64
; break;
2837 vtn_fail("Unsupported bit size: %u", bit_size
);
2843 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2845 vtn_push_nir_ssa(b
, w
[2], &instr
->dest
.ssa
);
2849 fill_common_atomic_sources(struct vtn_builder
*b
, SpvOp opcode
,
2850 const uint32_t *w
, nir_src
*src
)
2853 case SpvOpAtomicIIncrement
:
2854 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, 1));
2857 case SpvOpAtomicIDecrement
:
2858 src
[0] = nir_src_for_ssa(nir_imm_int(&b
->nb
, -1));
2861 case SpvOpAtomicISub
:
2863 nir_src_for_ssa(nir_ineg(&b
->nb
, vtn_get_nir_ssa(b
, w
[6])));
2866 case SpvOpAtomicCompareExchange
:
2867 case SpvOpAtomicCompareExchangeWeak
:
2868 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[8]));
2869 src
[1] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[7]));
2872 case SpvOpAtomicExchange
:
2873 case SpvOpAtomicIAdd
:
2874 case SpvOpAtomicSMin
:
2875 case SpvOpAtomicUMin
:
2876 case SpvOpAtomicSMax
:
2877 case SpvOpAtomicUMax
:
2878 case SpvOpAtomicAnd
:
2880 case SpvOpAtomicXor
:
2881 case SpvOpAtomicFAddEXT
:
2882 src
[0] = nir_src_for_ssa(vtn_get_nir_ssa(b
, w
[6]));
2886 vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode
);
2890 static nir_ssa_def
*
2891 get_image_coord(struct vtn_builder
*b
, uint32_t value
)
2893 nir_ssa_def
*coord
= vtn_get_nir_ssa(b
, value
);
2895 /* The image_load_store intrinsics assume a 4-dim coordinate */
2896 unsigned swizzle
[4];
2897 for (unsigned i
= 0; i
< 4; i
++)
2898 swizzle
[i
] = MIN2(i
, coord
->num_components
- 1);
2900 return nir_swizzle(&b
->nb
, coord
, swizzle
, 4);
2903 static nir_ssa_def
*
2904 expand_to_vec4(nir_builder
*b
, nir_ssa_def
*value
)
2906 if (value
->num_components
== 4)
2910 for (unsigned i
= 0; i
< 4; i
++)
2911 swiz
[i
] = i
< value
->num_components
? i
: 0;
2912 return nir_swizzle(b
, value
, swiz
, 4);
2916 vtn_handle_image(struct vtn_builder
*b
, SpvOp opcode
,
2917 const uint32_t *w
, unsigned count
)
2919 /* Just get this one out of the way */
2920 if (opcode
== SpvOpImageTexelPointer
) {
2921 struct vtn_value
*val
=
2922 vtn_push_value(b
, w
[2], vtn_value_type_image_pointer
);
2923 val
->image
= ralloc(b
, struct vtn_image_pointer
);
2925 val
->image
->image
= vtn_nir_deref(b
, w
[3]);
2926 val
->image
->coord
= get_image_coord(b
, w
[4]);
2927 val
->image
->sample
= vtn_get_nir_ssa(b
, w
[5]);
2928 val
->image
->lod
= nir_imm_int(&b
->nb
, 0);
2932 struct vtn_image_pointer image
;
2933 SpvScope scope
= SpvScopeInvocation
;
2934 SpvMemorySemanticsMask semantics
= 0;
2936 enum gl_access_qualifier access
= 0;
2938 struct vtn_value
*res_val
;
2940 case SpvOpAtomicExchange
:
2941 case SpvOpAtomicCompareExchange
:
2942 case SpvOpAtomicCompareExchangeWeak
:
2943 case SpvOpAtomicIIncrement
:
2944 case SpvOpAtomicIDecrement
:
2945 case SpvOpAtomicIAdd
:
2946 case SpvOpAtomicISub
:
2947 case SpvOpAtomicLoad
:
2948 case SpvOpAtomicSMin
:
2949 case SpvOpAtomicUMin
:
2950 case SpvOpAtomicSMax
:
2951 case SpvOpAtomicUMax
:
2952 case SpvOpAtomicAnd
:
2954 case SpvOpAtomicXor
:
2955 case SpvOpAtomicFAddEXT
:
2956 res_val
= vtn_value(b
, w
[3], vtn_value_type_image_pointer
);
2957 image
= *res_val
->image
;
2958 scope
= vtn_constant_uint(b
, w
[4]);
2959 semantics
= vtn_constant_uint(b
, w
[5]);
2960 access
|= ACCESS_COHERENT
;
2963 case SpvOpAtomicStore
:
2964 res_val
= vtn_value(b
, w
[1], vtn_value_type_image_pointer
);
2965 image
= *res_val
->image
;
2966 scope
= vtn_constant_uint(b
, w
[2]);
2967 semantics
= vtn_constant_uint(b
, w
[3]);
2968 access
|= ACCESS_COHERENT
;
2971 case SpvOpImageQuerySize
:
2972 res_val
= vtn_untyped_value(b
, w
[3]);
2973 image
.image
= vtn_get_image(b
, w
[3]);
2975 image
.sample
= NULL
;
2979 case SpvOpImageRead
: {
2980 res_val
= vtn_untyped_value(b
, w
[3]);
2981 image
.image
= vtn_get_image(b
, w
[3]);
2982 image
.coord
= get_image_coord(b
, w
[4]);
2984 const SpvImageOperandsMask operands
=
2985 count
> 5 ? w
[5] : SpvImageOperandsMaskNone
;
2987 if (operands
& SpvImageOperandsSampleMask
) {
2988 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2989 SpvImageOperandsSampleMask
);
2990 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
2992 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
2995 if (operands
& SpvImageOperandsMakeTexelVisibleMask
) {
2996 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
2997 "MakeTexelVisible requires NonPrivateTexel to also be set.");
2998 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
2999 SpvImageOperandsMakeTexelVisibleMask
);
3000 semantics
= SpvMemorySemanticsMakeVisibleMask
;
3001 scope
= vtn_constant_uint(b
, w
[arg
]);
3004 if (operands
& SpvImageOperandsLodMask
) {
3005 uint32_t arg
= image_operand_arg(b
, w
, count
, 5,
3006 SpvImageOperandsLodMask
);
3007 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
3009 image
.lod
= nir_imm_int(&b
->nb
, 0);
3012 /* TODO: Volatile. */
3017 case SpvOpImageWrite
: {
3018 res_val
= vtn_untyped_value(b
, w
[1]);
3019 image
.image
= vtn_get_image(b
, w
[1]);
3020 image
.coord
= get_image_coord(b
, w
[2]);
3024 const SpvImageOperandsMask operands
=
3025 count
> 4 ? w
[4] : SpvImageOperandsMaskNone
;
3027 if (operands
& SpvImageOperandsSampleMask
) {
3028 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
3029 SpvImageOperandsSampleMask
);
3030 image
.sample
= vtn_get_nir_ssa(b
, w
[arg
]);
3032 image
.sample
= nir_ssa_undef(&b
->nb
, 1, 32);
3035 if (operands
& SpvImageOperandsMakeTexelAvailableMask
) {
3036 vtn_fail_if((operands
& SpvImageOperandsNonPrivateTexelMask
) == 0,
3037 "MakeTexelAvailable requires NonPrivateTexel to also be set.");
3038 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
3039 SpvImageOperandsMakeTexelAvailableMask
);
3040 semantics
= SpvMemorySemanticsMakeAvailableMask
;
3041 scope
= vtn_constant_uint(b
, w
[arg
]);
3044 if (operands
& SpvImageOperandsLodMask
) {
3045 uint32_t arg
= image_operand_arg(b
, w
, count
, 4,
3046 SpvImageOperandsLodMask
);
3047 image
.lod
= vtn_get_nir_ssa(b
, w
[arg
]);
3049 image
.lod
= nir_imm_int(&b
->nb
, 0);
3052 /* TODO: Volatile. */
3058 vtn_fail_with_opcode("Invalid image opcode", opcode
);
3061 nir_intrinsic_op op
;
3063 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
3064 OP(ImageQuerySize
, size
)
3066 OP(ImageWrite
, store
)
3067 OP(AtomicLoad
, load
)
3068 OP(AtomicStore
, store
)
3069 OP(AtomicExchange
, atomic_exchange
)
3070 OP(AtomicCompareExchange
, atomic_comp_swap
)
3071 OP(AtomicCompareExchangeWeak
, atomic_comp_swap
)
3072 OP(AtomicIIncrement
, atomic_add
)
3073 OP(AtomicIDecrement
, atomic_add
)
3074 OP(AtomicIAdd
, atomic_add
)
3075 OP(AtomicISub
, atomic_add
)
3076 OP(AtomicSMin
, atomic_imin
)
3077 OP(AtomicUMin
, atomic_umin
)
3078 OP(AtomicSMax
, atomic_imax
)
3079 OP(AtomicUMax
, atomic_umax
)
3080 OP(AtomicAnd
, atomic_and
)
3081 OP(AtomicOr
, atomic_or
)
3082 OP(AtomicXor
, atomic_xor
)
3083 OP(AtomicFAddEXT
, atomic_fadd
)
3086 vtn_fail_with_opcode("Invalid image opcode", opcode
);
3089 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->shader
, op
);
3091 intrin
->src
[0] = nir_src_for_ssa(&image
.image
->dest
.ssa
);
3093 /* ImageQuerySize doesn't take any extra parameters */
3094 if (opcode
!= SpvOpImageQuerySize
) {
3095 /* The image coordinate is always 4 components but we may not have that
3096 * many. Swizzle to compensate.
3098 intrin
->src
[1] = nir_src_for_ssa(expand_to_vec4(&b
->nb
, image
.coord
));
3099 intrin
->src
[2] = nir_src_for_ssa(image
.sample
);
3102 /* The Vulkan spec says:
3104 * "If an instruction loads from or stores to a resource (including
3105 * atomics and image instructions) and the resource descriptor being
3106 * accessed is not dynamically uniform, then the operand corresponding
3107 * to that resource (e.g. the pointer or sampled image operand) must be
3108 * decorated with NonUniform."