2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "spirv_to_nir_private.h"
30 #include "nir_control_flow.h"
32 static struct vtn_ssa_value
*
33 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
34 const struct glsl_type
*type
)
36 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
41 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
44 switch (glsl_get_base_type(type
)) {
49 case GLSL_TYPE_DOUBLE
:
50 if (glsl_type_is_vector_or_scalar(type
)) {
51 unsigned num_components
= glsl_get_vector_elements(val
->type
);
52 nir_load_const_instr
*load
=
53 nir_load_const_instr_create(b
->shader
, num_components
);
55 for (unsigned i
= 0; i
< num_components
; i
++)
56 load
->value
.u
[i
] = constant
->value
.u
[i
];
58 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
59 val
->def
= &load
->def
;
61 assert(glsl_type_is_matrix(type
));
62 unsigned rows
= glsl_get_vector_elements(val
->type
);
63 unsigned columns
= glsl_get_matrix_columns(val
->type
);
64 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
66 for (unsigned i
= 0; i
< columns
; i
++) {
67 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
68 col_val
->type
= glsl_get_column_type(val
->type
);
69 nir_load_const_instr
*load
=
70 nir_load_const_instr_create(b
->shader
, rows
);
72 for (unsigned j
= 0; j
< rows
; j
++)
73 load
->value
.u
[j
] = constant
->value
.u
[rows
* i
+ j
];
75 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
76 col_val
->def
= &load
->def
;
78 val
->elems
[i
] = col_val
;
83 case GLSL_TYPE_ARRAY
: {
84 unsigned elems
= glsl_get_length(val
->type
);
85 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
86 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
87 for (unsigned i
= 0; i
< elems
; i
++)
88 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
93 case GLSL_TYPE_STRUCT
: {
94 unsigned elems
= glsl_get_length(val
->type
);
95 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
96 for (unsigned i
= 0; i
< elems
; i
++) {
97 const struct glsl_type
*elem_type
=
98 glsl_get_struct_field(val
->type
, i
);
99 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
106 unreachable("bad constant type");
112 struct vtn_ssa_value
*
113 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
115 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
116 switch (val
->value_type
) {
117 case vtn_value_type_constant
:
118 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
120 case vtn_value_type_ssa
:
123 unreachable("Invalid type for an SSA value");
128 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
131 return ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
134 static const uint32_t *
135 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
136 const uint32_t *end
, vtn_instruction_handler handler
)
138 const uint32_t *w
= start
;
140 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
141 unsigned count
= w
[0] >> SpvWordCountShift
;
142 assert(count
>= 1 && w
+ count
<= end
);
144 if (!handler(b
, opcode
, w
, count
))
154 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
155 const uint32_t *w
, unsigned count
)
158 case SpvOpExtInstImport
: {
159 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
160 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
161 val
->ext_handler
= vtn_handle_glsl450_instruction
;
163 assert(!"Unsupported extension");
169 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
170 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
177 unreachable("Unhandled opcode");
182 _foreach_decoration_helper(struct vtn_builder
*b
,
183 struct vtn_value
*base_value
,
185 struct vtn_value
*value
,
186 vtn_decoration_foreach_cb cb
, void *data
)
188 int new_member
= member
;
190 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
191 if (dec
->member
>= 0) {
192 assert(member
== -1);
193 new_member
= dec
->member
;
197 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
198 _foreach_decoration_helper(b
, base_value
, new_member
, dec
->group
,
201 cb(b
, base_value
, new_member
, dec
, data
);
206 /** Iterates (recursively if needed) over all of the decorations on a value
208 * This function iterates over all of the decorations applied to a given
209 * value. If it encounters a decoration group, it recurses into the group
210 * and iterates over all of those decorations as well.
213 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
214 vtn_decoration_foreach_cb cb
, void *data
)
216 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
220 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
221 const uint32_t *w
, unsigned count
)
223 const uint32_t *w_end
= w
+ count
;
224 const uint32_t target
= w
[1];
229 case SpvOpDecorationGroup
:
230 vtn_push_value(b
, target
, vtn_value_type_undef
);
233 case SpvOpMemberDecorate
:
236 case SpvOpDecorate
: {
237 struct vtn_value
*val
= &b
->values
[target
];
239 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
240 dec
->member
= member
;
241 dec
->decoration
= *(w
++);
244 /* Link into the list */
245 dec
->next
= val
->decoration
;
246 val
->decoration
= dec
;
250 case SpvOpGroupMemberDecorate
:
253 case SpvOpGroupDecorate
: {
254 struct vtn_value
*group
= &b
->values
[target
];
255 assert(group
->value_type
== vtn_value_type_decoration_group
);
257 for (; w
< w_end
; w
++) {
258 struct vtn_value
*val
= &b
->values
[*w
];
259 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
260 dec
->member
= member
;
263 /* Link into the list */
264 dec
->next
= val
->decoration
;
265 val
->decoration
= dec
;
271 unreachable("Unhandled opcode");
275 struct member_decoration_ctx
{
276 struct glsl_struct_field
*fields
;
277 struct vtn_type
*type
;
280 /* does a shallow copy of a vtn_type */
282 static struct vtn_type
*
283 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
285 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
286 dest
->type
= src
->type
;
287 dest
->is_builtin
= src
->is_builtin
;
289 dest
->builtin
= src
->builtin
;
291 if (!glsl_type_is_vector_or_scalar(src
->type
)) {
292 switch (glsl_get_base_type(src
->type
)) {
293 case GLSL_TYPE_ARRAY
:
294 dest
->array_element
= src
->array_element
;
295 dest
->stride
= src
->stride
;
301 case GLSL_TYPE_FLOAT
:
302 case GLSL_TYPE_DOUBLE
:
304 dest
->row_major
= src
->row_major
;
305 dest
->stride
= src
->stride
;
308 case GLSL_TYPE_STRUCT
: {
309 unsigned elems
= glsl_get_length(src
->type
);
311 dest
->members
= ralloc_array(b
, struct vtn_type
*, elems
);
312 memcpy(dest
->members
, src
->members
, elems
* sizeof(struct vtn_type
*));
314 dest
->offsets
= ralloc_array(b
, unsigned, elems
);
315 memcpy(dest
->offsets
, src
->offsets
, elems
* sizeof(unsigned));
320 unreachable("unhandled type");
328 struct_member_decoration_cb(struct vtn_builder
*b
,
329 struct vtn_value
*val
, int member
,
330 const struct vtn_decoration
*dec
, void *void_ctx
)
332 struct member_decoration_ctx
*ctx
= void_ctx
;
337 switch (dec
->decoration
) {
338 case SpvDecorationRelaxedPrecision
:
339 break; /* FIXME: Do nothing with this for now. */
340 case SpvDecorationSmooth
:
341 ctx
->fields
[member
].interpolation
= INTERP_QUALIFIER_SMOOTH
;
343 case SpvDecorationNoperspective
:
344 ctx
->fields
[member
].interpolation
= INTERP_QUALIFIER_NOPERSPECTIVE
;
346 case SpvDecorationFlat
:
347 ctx
->fields
[member
].interpolation
= INTERP_QUALIFIER_FLAT
;
349 case SpvDecorationCentroid
:
350 ctx
->fields
[member
].centroid
= true;
352 case SpvDecorationSample
:
353 ctx
->fields
[member
].sample
= true;
355 case SpvDecorationLocation
:
356 ctx
->fields
[member
].location
= dec
->literals
[0];
358 case SpvDecorationBuiltIn
:
359 ctx
->type
->members
[member
] = vtn_type_copy(b
,
360 ctx
->type
->members
[member
]);
361 ctx
->type
->members
[member
]->is_builtin
= true;
362 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
363 ctx
->type
->builtin_block
= true;
365 case SpvDecorationOffset
:
366 ctx
->type
->offsets
[member
] = dec
->literals
[0];
368 case SpvDecorationMatrixStride
:
369 ctx
->type
->members
[member
]->stride
= dec
->literals
[0];
371 case SpvDecorationColMajor
:
372 break; /* Nothing to do here. Column-major is the default. */
374 unreachable("Unhandled member decoration");
379 type_decoration_cb(struct vtn_builder
*b
,
380 struct vtn_value
*val
, int member
,
381 const struct vtn_decoration
*dec
, void *ctx
)
383 struct vtn_type
*type
= val
->type
;
388 switch (dec
->decoration
) {
389 case SpvDecorationArrayStride
:
390 type
->stride
= dec
->literals
[0];
392 case SpvDecorationBlock
:
395 case SpvDecorationBufferBlock
:
396 type
->buffer_block
= true;
398 case SpvDecorationGLSLShared
:
399 case SpvDecorationGLSLPacked
:
400 /* Ignore these, since we get explicit offsets anyways */
404 unreachable("Unhandled type decoration");
409 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
410 const uint32_t *w
, unsigned count
)
412 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
414 val
->type
= rzalloc(b
, struct vtn_type
);
415 val
->type
->is_builtin
= false;
419 val
->type
->type
= glsl_void_type();
422 val
->type
->type
= glsl_bool_type();
425 val
->type
->type
= glsl_int_type();
428 val
->type
->type
= glsl_float_type();
431 case SpvOpTypeVector
: {
432 const struct glsl_type
*base
=
433 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
434 unsigned elems
= w
[3];
436 assert(glsl_type_is_scalar(base
));
437 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
), elems
);
441 case SpvOpTypeMatrix
: {
442 struct vtn_type
*base
=
443 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
444 unsigned columns
= w
[3];
446 assert(glsl_type_is_vector(base
->type
));
447 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
448 glsl_get_vector_elements(base
->type
),
450 val
->type
->array_element
= base
;
451 val
->type
->row_major
= false;
452 val
->type
->stride
= 0;
456 case SpvOpTypeArray
: {
457 struct vtn_type
*array_element
=
458 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
459 val
->type
->type
= glsl_array_type(array_element
->type
, w
[3]);
460 val
->type
->array_element
= array_element
;
461 val
->type
->stride
= 0;
465 case SpvOpTypeStruct
: {
466 unsigned num_fields
= count
- 2;
467 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
468 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
470 NIR_VLA(struct glsl_struct_field
, fields
, count
);
471 for (unsigned i
= 0; i
< num_fields
; i
++) {
472 /* TODO: Handle decorators */
473 val
->type
->members
[i
] =
474 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
475 fields
[i
].type
= val
->type
->members
[i
]->type
;
476 fields
[i
].name
= ralloc_asprintf(b
, "field%d", i
);
477 fields
[i
].location
= -1;
478 fields
[i
].interpolation
= 0;
479 fields
[i
].centroid
= 0;
480 fields
[i
].sample
= 0;
481 fields
[i
].matrix_layout
= 2;
482 fields
[i
].stream
= -1;
485 struct member_decoration_ctx ctx
= {
490 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
492 const char *name
= val
->name
? val
->name
: "struct";
494 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
498 case SpvOpTypeFunction
: {
499 const struct glsl_type
*return_type
=
500 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
501 NIR_VLA(struct glsl_function_param
, params
, count
- 3);
502 for (unsigned i
= 0; i
< count
- 3; i
++) {
503 params
[i
].type
= vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
->type
;
507 params
[i
].out
= true;
509 val
->type
->type
= glsl_function_type(return_type
, params
, count
- 3);
513 case SpvOpTypePointer
:
514 /* FIXME: For now, we'll just do the really lame thing and return
515 * the same type. The validator should ensure that the proper number
516 * of dereferences happen
518 val
->type
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
521 case SpvOpTypeImage
: {
522 const struct glsl_type
*sampled_type
=
523 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
525 assert(glsl_type_is_vector_or_scalar(sampled_type
));
527 enum glsl_sampler_dim dim
;
528 switch ((SpvDim
)w
[3]) {
529 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
530 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
531 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
532 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
533 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
534 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
536 unreachable("Invalid SPIR-V Sampler dimension");
539 bool is_shadow
= w
[4];
540 bool is_array
= w
[5];
542 assert(w
[6] == 0 && "FIXME: Handl multi-sampled textures");
543 assert(w
[7] == 1 && "FIXME: Add support for non-sampled images");
545 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
546 glsl_get_base_type(sampled_type
));
550 case SpvOpTypeSampledImage
:
551 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
554 case SpvOpTypeRuntimeArray
:
555 case SpvOpTypeOpaque
:
557 case SpvOpTypeDeviceEvent
:
558 case SpvOpTypeReserveId
:
562 unreachable("Unhandled opcode");
565 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
569 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
570 const uint32_t *w
, unsigned count
)
572 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
573 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
574 val
->constant
= ralloc(b
, nir_constant
);
576 case SpvOpConstantTrue
:
577 assert(val
->const_type
== glsl_bool_type());
578 val
->constant
->value
.u
[0] = NIR_TRUE
;
580 case SpvOpConstantFalse
:
581 assert(val
->const_type
== glsl_bool_type());
582 val
->constant
->value
.u
[0] = NIR_FALSE
;
585 assert(glsl_type_is_scalar(val
->const_type
));
586 val
->constant
->value
.u
[0] = w
[3];
588 case SpvOpConstantComposite
: {
589 unsigned elem_count
= count
- 3;
590 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
591 for (unsigned i
= 0; i
< elem_count
; i
++)
592 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
594 switch (glsl_get_base_type(val
->const_type
)) {
597 case GLSL_TYPE_FLOAT
:
599 if (glsl_type_is_matrix(val
->const_type
)) {
600 unsigned rows
= glsl_get_vector_elements(val
->const_type
);
601 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
602 for (unsigned i
= 0; i
< elem_count
; i
++)
603 for (unsigned j
= 0; j
< rows
; j
++)
604 val
->constant
->value
.u
[rows
* i
+ j
] = elems
[i
]->value
.u
[j
];
606 assert(glsl_type_is_vector(val
->const_type
));
607 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
608 for (unsigned i
= 0; i
< elem_count
; i
++)
609 val
->constant
->value
.u
[i
] = elems
[i
]->value
.u
[0];
614 case GLSL_TYPE_STRUCT
:
615 case GLSL_TYPE_ARRAY
:
616 ralloc_steal(val
->constant
, elems
);
617 val
->constant
->elements
= elems
;
621 unreachable("Unsupported type for constants");
627 unreachable("Unhandled opcode");
632 vtn_get_builtin_location(SpvBuiltIn builtin
, int *location
,
633 nir_variable_mode
*mode
)
636 case SpvBuiltInPosition
:
637 *location
= VARYING_SLOT_POS
;
638 *mode
= nir_var_shader_out
;
640 case SpvBuiltInPointSize
:
641 *location
= VARYING_SLOT_PSIZ
;
642 *mode
= nir_var_shader_out
;
644 case SpvBuiltInClipDistance
:
645 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
646 *mode
= nir_var_shader_in
;
648 case SpvBuiltInCullDistance
:
649 /* XXX figure this out */
650 unreachable("unhandled builtin");
651 case SpvBuiltInVertexId
:
652 /* Vulkan defines VertexID to be zero-based and reserves the new
653 * builtin keyword VertexIndex to indicate the non-zero-based value.
655 *location
= SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
656 *mode
= nir_var_system_value
;
658 case SpvBuiltInInstanceId
:
659 *location
= SYSTEM_VALUE_INSTANCE_ID
;
660 *mode
= nir_var_system_value
;
662 case SpvBuiltInPrimitiveId
:
663 *location
= VARYING_SLOT_PRIMITIVE_ID
;
664 *mode
= nir_var_shader_out
;
666 case SpvBuiltInInvocationId
:
667 *location
= SYSTEM_VALUE_INVOCATION_ID
;
668 *mode
= nir_var_system_value
;
670 case SpvBuiltInLayer
:
671 *location
= VARYING_SLOT_LAYER
;
672 *mode
= nir_var_shader_out
;
674 case SpvBuiltInTessLevelOuter
:
675 case SpvBuiltInTessLevelInner
:
676 case SpvBuiltInTessCoord
:
677 case SpvBuiltInPatchVertices
:
678 unreachable("no tessellation support");
679 case SpvBuiltInFragCoord
:
680 *location
= VARYING_SLOT_POS
;
681 *mode
= nir_var_shader_in
;
683 case SpvBuiltInPointCoord
:
684 *location
= VARYING_SLOT_PNTC
;
685 *mode
= nir_var_shader_out
;
687 case SpvBuiltInFrontFacing
:
688 *location
= VARYING_SLOT_FACE
;
689 *mode
= nir_var_shader_out
;
691 case SpvBuiltInSampleId
:
692 *location
= SYSTEM_VALUE_SAMPLE_ID
;
693 *mode
= nir_var_shader_in
;
695 case SpvBuiltInSamplePosition
:
696 *location
= SYSTEM_VALUE_SAMPLE_POS
;
697 *mode
= nir_var_shader_in
;
699 case SpvBuiltInSampleMask
:
700 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
; /* XXX out? */
701 *mode
= nir_var_shader_in
;
703 case SpvBuiltInFragColor
:
704 *location
= FRAG_RESULT_COLOR
;
705 *mode
= nir_var_shader_out
;
707 case SpvBuiltInFragDepth
:
708 *location
= FRAG_RESULT_DEPTH
;
709 *mode
= nir_var_shader_out
;
711 case SpvBuiltInHelperInvocation
:
712 unreachable("unsupported builtin"); /* XXX */
714 case SpvBuiltInNumWorkgroups
:
715 case SpvBuiltInWorkgroupSize
:
716 /* these are constants, need to be handled specially */
717 unreachable("unsupported builtin");
718 case SpvBuiltInWorkgroupId
:
719 case SpvBuiltInLocalInvocationId
:
720 case SpvBuiltInGlobalInvocationId
:
721 case SpvBuiltInLocalInvocationIndex
:
722 unreachable("no compute shader support");
724 unreachable("unsupported builtin");
729 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
730 const struct vtn_decoration
*dec
, void *void_var
)
732 assert(val
->value_type
== vtn_value_type_deref
);
733 assert(val
->deref
->deref
.child
== NULL
);
734 assert(val
->deref
->var
== void_var
);
736 nir_variable
*var
= void_var
;
737 switch (dec
->decoration
) {
738 case SpvDecorationRelaxedPrecision
:
739 break; /* FIXME: Do nothing with this for now. */
740 case SpvDecorationSmooth
:
741 var
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
743 case SpvDecorationNoperspective
:
744 var
->data
.interpolation
= INTERP_QUALIFIER_NOPERSPECTIVE
;
746 case SpvDecorationFlat
:
747 var
->data
.interpolation
= INTERP_QUALIFIER_FLAT
;
749 case SpvDecorationCentroid
:
750 var
->data
.centroid
= true;
752 case SpvDecorationSample
:
753 var
->data
.sample
= true;
755 case SpvDecorationInvariant
:
756 var
->data
.invariant
= true;
758 case SpvDecorationConstant
:
759 assert(var
->constant_initializer
!= NULL
);
760 var
->data
.read_only
= true;
762 case SpvDecorationNonwritable
:
763 var
->data
.read_only
= true;
765 case SpvDecorationLocation
:
766 var
->data
.location
= dec
->literals
[0];
768 case SpvDecorationComponent
:
769 var
->data
.location_frac
= dec
->literals
[0];
771 case SpvDecorationIndex
:
772 var
->data
.explicit_index
= true;
773 var
->data
.index
= dec
->literals
[0];
775 case SpvDecorationBinding
:
776 var
->data
.explicit_binding
= true;
777 var
->data
.binding
= dec
->literals
[0];
779 case SpvDecorationDescriptorSet
:
780 var
->data
.descriptor_set
= dec
->literals
[0];
782 case SpvDecorationBuiltIn
: {
783 nir_variable_mode mode
;
784 vtn_get_builtin_location(dec
->literals
[0], &var
->data
.location
,
786 var
->data
.explicit_location
= true;
787 var
->data
.mode
= mode
;
788 if (mode
== nir_var_shader_in
|| mode
== nir_var_system_value
)
789 var
->data
.read_only
= true;
790 b
->builtins
[dec
->literals
[0]] = var
;
793 case SpvDecorationNoStaticUse
:
794 /* This can safely be ignored */
796 case SpvDecorationRowMajor
:
797 case SpvDecorationColMajor
:
798 case SpvDecorationGLSLShared
:
799 case SpvDecorationPatch
:
800 case SpvDecorationRestrict
:
801 case SpvDecorationAliased
:
802 case SpvDecorationVolatile
:
803 case SpvDecorationCoherent
:
804 case SpvDecorationNonreadable
:
805 case SpvDecorationUniform
:
806 /* This is really nice but we have no use for it right now. */
807 case SpvDecorationCPacked
:
808 case SpvDecorationSaturatedConversion
:
809 case SpvDecorationStream
:
810 case SpvDecorationOffset
:
811 case SpvDecorationXfbBuffer
:
812 case SpvDecorationFuncParamAttr
:
813 case SpvDecorationFPRoundingMode
:
814 case SpvDecorationFPFastMathMode
:
815 case SpvDecorationLinkageAttributes
:
816 case SpvDecorationSpecId
:
819 unreachable("Unhandled variable decoration");
823 static nir_variable
*
824 get_builtin_variable(struct vtn_builder
*b
,
825 const struct glsl_type
*type
,
828 nir_variable
*var
= b
->builtins
[builtin
];
831 var
= ralloc(b
->shader
, nir_variable
);
834 nir_variable_mode mode
;
835 vtn_get_builtin_location(builtin
, &var
->data
.location
, &mode
);
836 var
->data
.explicit_location
= true;
837 var
->data
.mode
= mode
;
838 var
->name
= ralloc_strdup(var
, "builtin");
841 case nir_var_shader_in
:
842 exec_list_push_tail(&b
->shader
->inputs
, &var
->node
);
844 case nir_var_shader_out
:
845 exec_list_push_tail(&b
->shader
->outputs
, &var
->node
);
847 case nir_var_system_value
:
848 exec_list_push_tail(&b
->shader
->system_values
, &var
->node
);
851 unreachable("bad builtin mode");
854 b
->builtins
[builtin
] = var
;
861 vtn_builtin_load(struct vtn_builder
*b
,
862 struct vtn_ssa_value
*val
,
865 assert(glsl_type_is_vector_or_scalar(val
->type
));
867 nir_variable
*var
= get_builtin_variable(b
, val
->type
, builtin
);
869 nir_intrinsic_instr
*load
=
870 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_var
);
871 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
872 glsl_get_vector_elements(val
->type
), NULL
);
874 load
->variables
[0] = nir_deref_var_create(load
, var
);
875 load
->num_components
= glsl_get_vector_elements(val
->type
);
876 nir_builder_instr_insert(&b
->nb
, &load
->instr
);
877 val
->def
= &load
->dest
.ssa
;
881 vtn_builtin_store(struct vtn_builder
*b
,
882 struct vtn_ssa_value
*val
,
885 assert(glsl_type_is_vector_or_scalar(val
->type
));
887 nir_variable
*var
= get_builtin_variable(b
, val
->type
, builtin
);
889 nir_intrinsic_instr
*store
=
890 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_var
);
892 store
->variables
[0] = nir_deref_var_create(store
, var
);
893 store
->num_components
= glsl_get_vector_elements(val
->type
);
894 store
->src
[0] = nir_src_for_ssa(val
->def
);
895 nir_builder_instr_insert(&b
->nb
, &store
->instr
);
898 static struct vtn_ssa_value
*
899 _vtn_variable_load(struct vtn_builder
*b
,
900 nir_deref_var
*src_deref
, struct vtn_type
*src_type
,
901 nir_deref
*src_deref_tail
)
903 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
904 val
->type
= src_deref_tail
->type
;
906 if (src_type
->is_builtin
) {
907 vtn_builtin_load(b
, val
, src_type
->builtin
);
911 /* The deref tail may contain a deref to select a component of a vector (in
912 * other words, it might not be an actual tail) so we have to save it away
913 * here since we overwrite it later.
915 nir_deref
*old_child
= src_deref_tail
->child
;
917 if (glsl_type_is_vector_or_scalar(val
->type
)) {
918 nir_intrinsic_instr
*load
=
919 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_var
);
921 nir_deref_as_var(nir_copy_deref(load
, &src_deref
->deref
));
922 load
->num_components
= glsl_get_vector_elements(val
->type
);
923 nir_ssa_dest_init(&load
->instr
, &load
->dest
, load
->num_components
, NULL
);
925 nir_builder_instr_insert(&b
->nb
, &load
->instr
);
927 if (src_deref
->var
->data
.mode
== nir_var_uniform
&&
928 glsl_get_base_type(val
->type
) == GLSL_TYPE_BOOL
) {
929 /* Uniform boolean loads need to be fixed up since they're defined
930 * to be zero/nonzero rather than NIR_FALSE/NIR_TRUE.
932 val
->def
= nir_ine(&b
->nb
, &load
->dest
.ssa
, nir_imm_int(&b
->nb
, 0));
934 val
->def
= &load
->dest
.ssa
;
936 } else if (glsl_get_base_type(val
->type
) == GLSL_TYPE_ARRAY
||
937 glsl_type_is_matrix(val
->type
)) {
938 unsigned elems
= glsl_get_length(val
->type
);
939 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
941 nir_deref_array
*deref
= nir_deref_array_create(b
);
942 deref
->deref_array_type
= nir_deref_array_type_direct
;
943 deref
->deref
.type
= glsl_get_array_element(val
->type
);
944 src_deref_tail
->child
= &deref
->deref
;
945 for (unsigned i
= 0; i
< elems
; i
++) {
946 deref
->base_offset
= i
;
947 val
->elems
[i
] = _vtn_variable_load(b
, src_deref
,
948 src_type
->array_element
,
952 assert(glsl_get_base_type(val
->type
) == GLSL_TYPE_STRUCT
);
953 unsigned elems
= glsl_get_length(val
->type
);
954 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
956 nir_deref_struct
*deref
= nir_deref_struct_create(b
, 0);
957 src_deref_tail
->child
= &deref
->deref
;
958 for (unsigned i
= 0; i
< elems
; i
++) {
960 deref
->deref
.type
= glsl_get_struct_field(val
->type
, i
);
961 val
->elems
[i
] = _vtn_variable_load(b
, src_deref
,
962 src_type
->members
[i
],
967 src_deref_tail
->child
= old_child
;
973 _vtn_variable_store(struct vtn_builder
*b
, struct vtn_type
*dest_type
,
974 nir_deref_var
*dest_deref
, nir_deref
*dest_deref_tail
,
975 struct vtn_ssa_value
*src
)
977 if (dest_type
->is_builtin
) {
978 vtn_builtin_store(b
, src
, dest_type
->builtin
);
982 nir_deref
*old_child
= dest_deref_tail
->child
;
984 if (glsl_type_is_vector_or_scalar(src
->type
)) {
985 nir_intrinsic_instr
*store
=
986 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_var
);
987 store
->variables
[0] =
988 nir_deref_as_var(nir_copy_deref(store
, &dest_deref
->deref
));
989 store
->num_components
= glsl_get_vector_elements(src
->type
);
990 store
->src
[0] = nir_src_for_ssa(src
->def
);
992 nir_builder_instr_insert(&b
->nb
, &store
->instr
);
993 } else if (glsl_get_base_type(src
->type
) == GLSL_TYPE_ARRAY
||
994 glsl_type_is_matrix(src
->type
)) {
995 unsigned elems
= glsl_get_length(src
->type
);
997 nir_deref_array
*deref
= nir_deref_array_create(b
);
998 deref
->deref_array_type
= nir_deref_array_type_direct
;
999 deref
->deref
.type
= glsl_get_array_element(src
->type
);
1000 dest_deref_tail
->child
= &deref
->deref
;
1001 for (unsigned i
= 0; i
< elems
; i
++) {
1002 deref
->base_offset
= i
;
1003 _vtn_variable_store(b
, dest_type
->array_element
, dest_deref
,
1004 &deref
->deref
, src
->elems
[i
]);
1007 assert(glsl_get_base_type(src
->type
) == GLSL_TYPE_STRUCT
);
1008 unsigned elems
= glsl_get_length(src
->type
);
1010 nir_deref_struct
*deref
= nir_deref_struct_create(b
, 0);
1011 dest_deref_tail
->child
= &deref
->deref
;
1012 for (unsigned i
= 0; i
< elems
; i
++) {
1014 deref
->deref
.type
= glsl_get_struct_field(src
->type
, i
);
1015 _vtn_variable_store(b
, dest_type
->members
[i
], dest_deref
,
1016 &deref
->deref
, src
->elems
[i
]);
1020 dest_deref_tail
->child
= old_child
;
1023 static struct vtn_ssa_value
*
1024 _vtn_block_load(struct vtn_builder
*b
, nir_intrinsic_op op
,
1025 unsigned set
, nir_ssa_def
*binding
,
1026 unsigned offset
, nir_ssa_def
*indirect
,
1027 struct vtn_type
*type
)
1029 struct vtn_ssa_value
*val
= ralloc(b
, struct vtn_ssa_value
);
1030 val
->type
= type
->type
;
1031 val
->transposed
= NULL
;
1032 if (glsl_type_is_vector_or_scalar(type
->type
)) {
1033 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
1034 load
->num_components
= glsl_get_vector_elements(type
->type
);
1035 load
->const_index
[0] = set
;
1036 load
->src
[0] = nir_src_for_ssa(binding
);
1037 load
->const_index
[1] = offset
;
1039 load
->src
[1] = nir_src_for_ssa(indirect
);
1040 nir_ssa_dest_init(&load
->instr
, &load
->dest
, load
->num_components
, NULL
);
1041 nir_builder_instr_insert(&b
->nb
, &load
->instr
);
1042 val
->def
= &load
->dest
.ssa
;
1044 unsigned elems
= glsl_get_length(type
->type
);
1045 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1046 if (glsl_type_is_struct(type
->type
)) {
1047 for (unsigned i
= 0; i
< elems
; i
++) {
1048 val
->elems
[i
] = _vtn_block_load(b
, op
, set
, binding
,
1049 offset
+ type
->offsets
[i
],
1050 indirect
, type
->members
[i
]);
1053 for (unsigned i
= 0; i
< elems
; i
++) {
1054 val
->elems
[i
] = _vtn_block_load(b
, op
, set
, binding
,
1055 offset
+ i
* type
->stride
,
1056 indirect
, type
->array_element
);
1064 static struct vtn_ssa_value
*
1065 vtn_block_load(struct vtn_builder
*b
, nir_deref_var
*src
,
1066 struct vtn_type
*type
, nir_deref
*src_tail
)
1068 unsigned set
= src
->var
->data
.descriptor_set
;
1070 nir_ssa_def
*binding
= nir_imm_int(&b
->nb
, src
->var
->data
.binding
);
1071 nir_deref
*deref
= &src
->deref
;
1073 /* The block variable may be an array, in which case the array index adds
1074 * an offset to the binding. Figure out that index now.
1077 if (deref
->child
->deref_type
== nir_deref_type_array
) {
1078 deref
= deref
->child
;
1079 type
= type
->array_element
;
1080 nir_deref_array
*deref_array
= nir_deref_as_array(deref
);
1081 if (deref_array
->deref_array_type
== nir_deref_array_type_direct
) {
1082 binding
= nir_imm_int(&b
->nb
, src
->var
->data
.binding
+
1083 deref_array
->base_offset
);
1085 binding
= nir_iadd(&b
->nb
, binding
, deref_array
->indirect
.ssa
);
1089 unsigned offset
= 0;
1090 nir_ssa_def
*indirect
= NULL
;
1091 while (deref
!= src_tail
) {
1092 deref
= deref
->child
;
1093 switch (deref
->deref_type
) {
1094 case nir_deref_type_array
: {
1095 nir_deref_array
*deref_array
= nir_deref_as_array(deref
);
1096 if (deref_array
->deref_array_type
== nir_deref_array_type_direct
) {
1097 offset
+= type
->stride
* deref_array
->base_offset
;
1099 nir_ssa_def
*offset
= nir_imul(&b
->nb
, deref_array
->indirect
.ssa
,
1100 nir_imm_int(&b
->nb
, type
->stride
));
1101 indirect
= indirect
? nir_iadd(&b
->nb
, indirect
, offset
) : offset
;
1103 type
= type
->array_element
;
1107 case nir_deref_type_struct
: {
1108 nir_deref_struct
*deref_struct
= nir_deref_as_struct(deref
);
1109 offset
+= type
->offsets
[deref_struct
->index
];
1110 type
= type
->members
[deref_struct
->index
];
1115 unreachable("unknown deref type");
1120 nir_intrinsic_op op
= indirect
? nir_intrinsic_load_ubo_indirect
1121 : nir_intrinsic_load_ubo
;
1123 return _vtn_block_load(b
, op
, set
, binding
, offset
, indirect
, type
);
1127 * Gets the NIR-level deref tail, which may have as a child an array deref
1128 * selecting which component due to OpAccessChain supporting per-component
1129 * indexing in SPIR-V.
1133 get_deref_tail(nir_deref_var
*deref
)
1135 nir_deref
*cur
= &deref
->deref
;
1136 while (!glsl_type_is_vector_or_scalar(cur
->type
) && cur
->child
)
1142 static nir_ssa_def
*vtn_vector_extract(struct vtn_builder
*b
,
1143 nir_ssa_def
*src
, unsigned index
);
1145 static nir_ssa_def
*vtn_vector_extract_dynamic(struct vtn_builder
*b
,
1147 nir_ssa_def
*index
);
1149 static struct vtn_ssa_value
*
1150 vtn_variable_load(struct vtn_builder
*b
, nir_deref_var
*src
,
1151 struct vtn_type
*src_type
)
1153 nir_deref
*src_tail
= get_deref_tail(src
);
1155 struct vtn_ssa_value
*val
;
1156 if (src
->var
->interface_type
&& src
->var
->data
.mode
== nir_var_uniform
)
1157 val
= vtn_block_load(b
, src
, src_type
, src_tail
);
1159 val
= _vtn_variable_load(b
, src
, src_type
, src_tail
);
1161 if (src_tail
->child
) {
1162 nir_deref_array
*vec_deref
= nir_deref_as_array(src_tail
->child
);
1163 assert(vec_deref
->deref
.child
== NULL
);
1164 val
->type
= vec_deref
->deref
.type
;
1165 if (vec_deref
->deref_array_type
== nir_deref_array_type_direct
)
1166 val
->def
= vtn_vector_extract(b
, val
->def
, vec_deref
->base_offset
);
1168 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
,
1169 vec_deref
->indirect
.ssa
);
1175 static nir_ssa_def
* vtn_vector_insert(struct vtn_builder
*b
,
1176 nir_ssa_def
*src
, nir_ssa_def
*insert
,
1179 static nir_ssa_def
* vtn_vector_insert_dynamic(struct vtn_builder
*b
,
1181 nir_ssa_def
*insert
,
1182 nir_ssa_def
*index
);
1184 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1185 nir_deref_var
*dest
, struct vtn_type
*dest_type
)
1187 nir_deref
*dest_tail
= get_deref_tail(dest
);
1188 if (dest_tail
->child
) {
1189 struct vtn_ssa_value
*val
= _vtn_variable_load(b
, dest
, dest_type
,
1191 nir_deref_array
*deref
= nir_deref_as_array(dest_tail
->child
);
1192 assert(deref
->deref
.child
== NULL
);
1193 if (deref
->deref_array_type
== nir_deref_array_type_direct
)
1194 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
1195 deref
->base_offset
);
1197 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
1198 deref
->indirect
.ssa
);
1199 _vtn_variable_store(b
, dest_type
, dest
, dest_tail
, val
);
1201 _vtn_variable_store(b
, dest_type
, dest
, dest_tail
, src
);
1206 vtn_variable_copy(struct vtn_builder
*b
, nir_deref_var
*src
,
1207 nir_deref_var
*dest
, struct vtn_type
*type
)
1209 nir_deref
*src_tail
= get_deref_tail(src
);
1211 if (src_tail
->child
|| src
->var
->interface_type
) {
1212 assert(get_deref_tail(dest
)->child
);
1213 struct vtn_ssa_value
*val
= vtn_variable_load(b
, src
, type
);
1214 vtn_variable_store(b
, val
, dest
, type
);
1216 nir_intrinsic_instr
*copy
=
1217 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_copy_var
);
1218 copy
->variables
[0] = nir_deref_as_var(nir_copy_deref(copy
, &dest
->deref
));
1219 copy
->variables
[1] = nir_deref_as_var(nir_copy_deref(copy
, &src
->deref
));
1221 nir_builder_instr_insert(&b
->nb
, ©
->instr
);
1226 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
1227 const uint32_t *w
, unsigned count
)
1230 case SpvOpVariable
: {
1231 struct vtn_type
*type
=
1232 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1233 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_deref
);
1235 nir_variable
*var
= ralloc(b
->shader
, nir_variable
);
1237 var
->type
= type
->type
;
1238 var
->name
= ralloc_strdup(var
, val
->name
);
1240 bool builtin_block
= false;
1242 var
->interface_type
= type
->type
;
1243 builtin_block
= type
->builtin_block
;
1244 } else if (glsl_type_is_array(type
->type
) &&
1245 (type
->array_element
->block
||
1246 type
->array_element
->buffer_block
)) {
1247 var
->interface_type
= type
->array_element
->type
;
1248 builtin_block
= type
->array_element
->builtin_block
;
1250 var
->interface_type
= NULL
;
1253 switch ((SpvStorageClass
)w
[3]) {
1254 case SpvStorageClassUniform
:
1255 case SpvStorageClassUniformConstant
:
1256 var
->data
.mode
= nir_var_uniform
;
1257 var
->data
.read_only
= true;
1259 case SpvStorageClassInput
:
1260 var
->data
.mode
= nir_var_shader_in
;
1261 var
->data
.read_only
= true;
1263 case SpvStorageClassOutput
:
1264 var
->data
.mode
= nir_var_shader_out
;
1266 case SpvStorageClassPrivateGlobal
:
1267 var
->data
.mode
= nir_var_global
;
1269 case SpvStorageClassFunction
:
1270 var
->data
.mode
= nir_var_local
;
1272 case SpvStorageClassWorkgroupLocal
:
1273 case SpvStorageClassWorkgroupGlobal
:
1274 case SpvStorageClassGeneric
:
1275 case SpvStorageClassAtomicCounter
:
1277 unreachable("Unhandled variable storage class");
1282 var
->constant_initializer
=
1283 vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
1286 val
->deref
= nir_deref_var_create(b
, var
);
1287 val
->deref_type
= type
;
1289 /* We handle decorations first because decorations might give us
1290 * location information. We use the data.explicit_location field to
1291 * note that the location provided is the "final" location. If
1292 * data.explicit_location == false, this means that it's relative to
1293 * whatever the base location is.
1295 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
1297 if (!var
->data
.explicit_location
) {
1298 if (b
->execution_model
== SpvExecutionModelFragment
&&
1299 var
->data
.mode
== nir_var_shader_out
) {
1300 var
->data
.location
+= FRAG_RESULT_DATA0
;
1301 } else if (b
->execution_model
== SpvExecutionModelVertex
&&
1302 var
->data
.mode
== nir_var_shader_in
) {
1303 var
->data
.location
+= VERT_ATTRIB_GENERIC0
;
1304 } else if (var
->data
.mode
== nir_var_shader_in
||
1305 var
->data
.mode
== nir_var_shader_out
) {
1306 var
->data
.location
+= VARYING_SLOT_VAR0
;
1310 /* If this was a uniform block, then we're not going to actually use the
1311 * variable (we're only going to use it to compute offsets), so don't
1312 * declare it in the shader.
1314 if (var
->data
.mode
== nir_var_uniform
&& var
->interface_type
)
1317 /* Builtin blocks are lowered to individual variables during SPIR-V ->
1318 * NIR, so don't declare them either.
1323 switch (var
->data
.mode
) {
1324 case nir_var_shader_in
:
1325 exec_list_push_tail(&b
->shader
->inputs
, &var
->node
);
1327 case nir_var_shader_out
:
1328 exec_list_push_tail(&b
->shader
->outputs
, &var
->node
);
1330 case nir_var_global
:
1331 exec_list_push_tail(&b
->shader
->globals
, &var
->node
);
1334 exec_list_push_tail(&b
->impl
->locals
, &var
->node
);
1336 case nir_var_uniform
:
1337 exec_list_push_tail(&b
->shader
->uniforms
, &var
->node
);
1339 case nir_var_system_value
:
1340 exec_list_push_tail(&b
->shader
->system_values
, &var
->node
);
1346 case SpvOpAccessChain
:
1347 case SpvOpInBoundsAccessChain
: {
1348 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_deref
);
1349 nir_deref_var
*base
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref
;
1350 val
->deref
= nir_deref_as_var(nir_copy_deref(b
, &base
->deref
));
1351 struct vtn_type
*deref_type
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref_type
;
1353 nir_deref
*tail
= &val
->deref
->deref
;
1357 for (unsigned i
= 0; i
< count
- 4; i
++) {
1358 assert(w
[i
+ 4] < b
->value_id_bound
);
1359 struct vtn_value
*idx_val
= &b
->values
[w
[i
+ 4]];
1361 enum glsl_base_type base_type
= glsl_get_base_type(tail
->type
);
1362 switch (base_type
) {
1363 case GLSL_TYPE_UINT
:
1365 case GLSL_TYPE_FLOAT
:
1366 case GLSL_TYPE_DOUBLE
:
1367 case GLSL_TYPE_BOOL
:
1368 case GLSL_TYPE_ARRAY
: {
1369 nir_deref_array
*deref_arr
= nir_deref_array_create(b
);
1370 if (base_type
== GLSL_TYPE_ARRAY
||
1371 glsl_type_is_matrix(tail
->type
)) {
1372 deref_type
= deref_type
->array_element
;
1374 assert(glsl_type_is_vector(tail
->type
));
1375 deref_type
= ralloc(b
, struct vtn_type
);
1376 deref_type
->type
= glsl_scalar_type(base_type
);
1379 deref_arr
->deref
.type
= deref_type
->type
;
1381 if (idx_val
->value_type
== vtn_value_type_constant
) {
1382 unsigned idx
= idx_val
->constant
->value
.u
[0];
1383 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
1384 deref_arr
->base_offset
= idx
;
1386 assert(idx_val
->value_type
== vtn_value_type_ssa
);
1387 deref_arr
->deref_array_type
= nir_deref_array_type_indirect
;
1388 deref_arr
->base_offset
= 0;
1389 deref_arr
->indirect
=
1390 nir_src_for_ssa(vtn_ssa_value(b
, w
[1])->def
);
1392 tail
->child
= &deref_arr
->deref
;
1396 case GLSL_TYPE_STRUCT
: {
1397 assert(idx_val
->value_type
== vtn_value_type_constant
);
1398 unsigned idx
= idx_val
->constant
->value
.u
[0];
1399 deref_type
= deref_type
->members
[idx
];
1400 nir_deref_struct
*deref_struct
= nir_deref_struct_create(b
, idx
);
1401 deref_struct
->deref
.type
= deref_type
->type
;
1402 tail
->child
= &deref_struct
->deref
;
1406 unreachable("Invalid type for deref");
1411 /* For uniform blocks, we don't resolve the access chain until we
1412 * actually access the variable, so we need to keep around the original
1413 * type of the variable.
1415 if (base
->var
->interface_type
&& base
->var
->data
.mode
== nir_var_uniform
)
1416 val
->deref_type
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref_type
;
1418 val
->deref_type
= deref_type
;
1424 case SpvOpCopyMemory
: {
1425 nir_deref_var
*dest
= vtn_value(b
, w
[1], vtn_value_type_deref
)->deref
;
1426 nir_deref_var
*src
= vtn_value(b
, w
[2], vtn_value_type_deref
)->deref
;
1427 struct vtn_type
*type
=
1428 vtn_value(b
, w
[1], vtn_value_type_deref
)->deref_type
;
1430 vtn_variable_copy(b
, src
, dest
, type
);
1435 nir_deref_var
*src
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref
;
1436 struct vtn_type
*src_type
=
1437 vtn_value(b
, w
[3], vtn_value_type_deref
)->deref_type
;
1439 if (glsl_get_base_type(src_type
->type
) == GLSL_TYPE_SAMPLER
) {
1440 vtn_push_value(b
, w
[2], vtn_value_type_deref
)->deref
= src
;
1444 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1445 val
->ssa
= vtn_variable_load(b
, src
, src_type
);
1450 nir_deref_var
*dest
= vtn_value(b
, w
[1], vtn_value_type_deref
)->deref
;
1451 struct vtn_type
*dest_type
=
1452 vtn_value(b
, w
[1], vtn_value_type_deref
)->deref_type
;
1453 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
1454 vtn_variable_store(b
, src
, dest
, dest_type
);
1458 case SpvOpCopyMemorySized
:
1459 case SpvOpArrayLength
:
1460 case SpvOpImageTexelPointer
:
1462 unreachable("Unhandled opcode");
1467 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1468 const uint32_t *w
, unsigned count
)
1470 unreachable("Unhandled opcode");
1473 static struct vtn_ssa_value
*
1474 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1476 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1479 if (!glsl_type_is_vector_or_scalar(type
)) {
1480 unsigned elems
= glsl_get_length(type
);
1481 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1482 for (unsigned i
= 0; i
< elems
; i
++) {
1483 const struct glsl_type
*child_type
;
1485 switch (glsl_get_base_type(type
)) {
1487 case GLSL_TYPE_UINT
:
1488 case GLSL_TYPE_BOOL
:
1489 case GLSL_TYPE_FLOAT
:
1490 case GLSL_TYPE_DOUBLE
:
1491 child_type
= glsl_get_column_type(type
);
1493 case GLSL_TYPE_ARRAY
:
1494 child_type
= glsl_get_array_element(type
);
1496 case GLSL_TYPE_STRUCT
:
1497 child_type
= glsl_get_struct_field(type
, i
);
1500 unreachable("unkown base type");
1503 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1511 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1514 src
.src
= nir_src_for_ssa(vtn_value(b
, index
, vtn_value_type_ssa
)->ssa
->def
);
1515 src
.src_type
= type
;
1520 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1521 const uint32_t *w
, unsigned count
)
1523 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1524 nir_deref_var
*sampler
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref
;
1526 nir_tex_src srcs
[8]; /* 8 should be enough */
1527 nir_tex_src
*p
= srcs
;
1531 unsigned coord_components
= 0;
1533 case SpvOpImageSampleImplicitLod
:
1534 case SpvOpImageSampleExplicitLod
:
1535 case SpvOpImageSampleDrefImplicitLod
:
1536 case SpvOpImageSampleDrefExplicitLod
:
1537 case SpvOpImageSampleProjImplicitLod
:
1538 case SpvOpImageSampleProjExplicitLod
:
1539 case SpvOpImageSampleProjDrefImplicitLod
:
1540 case SpvOpImageSampleProjDrefExplicitLod
:
1541 case SpvOpImageFetch
:
1542 case SpvOpImageGather
:
1543 case SpvOpImageDrefGather
:
1544 case SpvOpImageQueryLod
: {
1545 /* All these types have the coordinate as their first real argument */
1546 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, w
[idx
++]);
1547 coord_components
= glsl_get_vector_elements(coord
->type
);
1548 p
->src
= nir_src_for_ssa(coord
->def
);
1549 p
->src_type
= nir_tex_src_coord
;
1558 /* These all have an explicit depth value as their next source */
1560 case SpvOpImageSampleDrefImplicitLod
:
1561 case SpvOpImageSampleDrefExplicitLod
:
1562 case SpvOpImageSampleProjDrefImplicitLod
:
1563 case SpvOpImageSampleProjDrefExplicitLod
:
1564 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparitor
);
1570 /* Figure out the base texture operation */
1573 case SpvOpImageSampleImplicitLod
:
1574 case SpvOpImageSampleExplicitLod
:
1575 case SpvOpImageSampleDrefImplicitLod
:
1576 case SpvOpImageSampleDrefExplicitLod
:
1577 case SpvOpImageSampleProjImplicitLod
:
1578 case SpvOpImageSampleProjExplicitLod
:
1579 case SpvOpImageSampleProjDrefImplicitLod
:
1580 case SpvOpImageSampleProjDrefExplicitLod
:
1581 texop
= nir_texop_tex
;
1584 case SpvOpImageFetch
:
1585 texop
= nir_texop_txf
;
1588 case SpvOpImageGather
:
1589 case SpvOpImageDrefGather
:
1590 texop
= nir_texop_tg4
;
1593 case SpvOpImageQuerySizeLod
:
1594 case SpvOpImageQuerySize
:
1595 texop
= nir_texop_txs
;
1598 case SpvOpImageQueryLod
:
1599 texop
= nir_texop_lod
;
1602 case SpvOpImageQueryLevels
:
1603 texop
= nir_texop_query_levels
;
1606 case SpvOpImageQuerySamples
:
1608 unreachable("Unhandled opcode");
1611 /* Now we need to handle some number of optional arguments */
1613 uint32_t operands
= w
[idx
++];
1615 if (operands
& SpvImageOperandsBiasMask
) {
1616 assert(texop
== nir_texop_tex
);
1617 texop
= nir_texop_txb
;
1618 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1621 if (operands
& SpvImageOperandsLodMask
) {
1622 assert(texop
== nir_texop_tex
);
1623 texop
= nir_texop_txl
;
1624 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1627 if (operands
& SpvImageOperandsGradMask
) {
1628 assert(texop
== nir_texop_tex
);
1629 texop
= nir_texop_txd
;
1630 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1631 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1634 if (operands
& SpvImageOperandsOffsetMask
||
1635 operands
& SpvImageOperandsConstOffsetMask
)
1636 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1638 if (operands
& SpvImageOperandsConstOffsetsMask
)
1639 assert(!"Constant offsets to texture gather not yet implemented");
1641 if (operands
& SpvImageOperandsSampleMask
) {
1642 assert(texop
== nir_texop_txf
);
1643 texop
= nir_texop_txf_ms
;
1644 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1647 /* We should have now consumed exactly all of the arguments */
1648 assert(idx
== count
);
1650 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1652 const struct glsl_type
*sampler_type
= nir_deref_tail(&sampler
->deref
)->type
;
1653 instr
->sampler_dim
= glsl_get_sampler_dim(sampler_type
);
1655 switch (glsl_get_sampler_result_type(sampler_type
)) {
1656 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1657 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1658 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_unsigned
; break;
1659 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1661 unreachable("Invalid base type for sampler result");
1665 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1666 instr
->coord_components
= coord_components
;
1667 instr
->is_array
= glsl_sampler_type_is_array(sampler_type
);
1668 instr
->is_shadow
= glsl_sampler_type_is_shadow(sampler_type
);
1670 instr
->sampler
= nir_deref_as_var(nir_copy_deref(instr
, &sampler
->deref
));
1672 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 4, NULL
);
1673 val
->ssa
= vtn_create_ssa_value(b
, glsl_vector_type(GLSL_TYPE_FLOAT
, 4));
1674 val
->ssa
->def
= &instr
->dest
.ssa
;
1676 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
1680 static nir_alu_instr
*
1681 create_vec(void *mem_ctx
, unsigned num_components
)
1684 switch (num_components
) {
1685 case 1: op
= nir_op_fmov
; break;
1686 case 2: op
= nir_op_vec2
; break;
1687 case 3: op
= nir_op_vec3
; break;
1688 case 4: op
= nir_op_vec4
; break;
1689 default: unreachable("bad vector size");
1692 nir_alu_instr
*vec
= nir_alu_instr_create(mem_ctx
, op
);
1693 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
, NULL
);
1694 vec
->dest
.write_mask
= (1 << num_components
) - 1;
1699 static struct vtn_ssa_value
*
1700 vtn_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
1702 if (src
->transposed
)
1703 return src
->transposed
;
1705 struct vtn_ssa_value
*dest
=
1706 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
1708 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
1709 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
));
1710 if (glsl_type_is_vector_or_scalar(src
->type
)) {
1711 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
1712 vec
->src
[0].swizzle
[0] = i
;
1714 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
1715 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
1716 vec
->src
[j
].swizzle
[0] = i
;
1719 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
1720 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
1723 dest
->transposed
= src
;
1729 * Normally, column vectors in SPIR-V correspond to a single NIR SSA
1730 * definition. But for matrix multiplies, we want to do one routine for
1731 * multiplying a matrix by a matrix and then pretend that vectors are matrices
1732 * with one column. So we "wrap" these things, and unwrap the result before we
1736 static struct vtn_ssa_value
*
1737 vtn_wrap_matrix(struct vtn_builder
*b
, struct vtn_ssa_value
*val
)
1742 if (glsl_type_is_matrix(val
->type
))
1745 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
1746 dest
->type
= val
->type
;
1747 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, 1);
1748 dest
->elems
[0] = val
;
1753 static struct vtn_ssa_value
*
1754 vtn_unwrap_matrix(struct vtn_ssa_value
*val
)
1756 if (glsl_type_is_matrix(val
->type
))
1759 return val
->elems
[0];
1762 static struct vtn_ssa_value
*
1763 vtn_matrix_multiply(struct vtn_builder
*b
,
1764 struct vtn_ssa_value
*_src0
, struct vtn_ssa_value
*_src1
)
1767 struct vtn_ssa_value
*src0
= vtn_wrap_matrix(b
, _src0
);
1768 struct vtn_ssa_value
*src1
= vtn_wrap_matrix(b
, _src1
);
1769 struct vtn_ssa_value
*src0_transpose
= vtn_wrap_matrix(b
, _src0
->transposed
);
1770 struct vtn_ssa_value
*src1_transpose
= vtn_wrap_matrix(b
, _src1
->transposed
);
1772 unsigned src0_rows
= glsl_get_vector_elements(src0
->type
);
1773 unsigned src0_columns
= glsl_get_matrix_columns(src0
->type
);
1774 unsigned src1_columns
= glsl_get_matrix_columns(src1
->type
);
1776 struct vtn_ssa_value
*dest
=
1777 vtn_create_ssa_value(b
, glsl_matrix_type(glsl_get_base_type(src0
->type
),
1778 src0_rows
, src1_columns
));
1780 dest
= vtn_wrap_matrix(b
, dest
);
1782 bool transpose_result
= false;
1783 if (src0_transpose
&& src1_transpose
) {
1784 /* transpose(A) * transpose(B) = transpose(B * A) */
1785 src1
= src0_transpose
;
1786 src0
= src1_transpose
;
1787 src0_transpose
= NULL
;
1788 src1_transpose
= NULL
;
1789 transpose_result
= true;
1792 if (src0_transpose
&& !src1_transpose
&&
1793 glsl_get_base_type(src0
->type
) == GLSL_TYPE_FLOAT
) {
1794 /* We already have the rows of src0 and the columns of src1 available,
1795 * so we can just take the dot product of each row with each column to
1799 for (unsigned i
= 0; i
< src1_columns
; i
++) {
1800 nir_alu_instr
*vec
= create_vec(b
, src0_rows
);
1801 for (unsigned j
= 0; j
< src0_rows
; j
++) {
1803 nir_src_for_ssa(nir_fdot(&b
->nb
, src0_transpose
->elems
[j
]->def
,
1804 src1
->elems
[i
]->def
));
1807 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
1808 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
1811 /* We don't handle the case where src1 is transposed but not src0, since
1812 * the general case only uses individual components of src1 so the
1813 * optimizer should chew through the transpose we emitted for src1.
1816 for (unsigned i
= 0; i
< src1_columns
; i
++) {
1817 /* dest[i] = sum(src0[j] * src1[i][j] for all j) */
1818 dest
->elems
[i
]->def
=
1819 nir_fmul(&b
->nb
, src0
->elems
[0]->def
,
1820 vtn_vector_extract(b
, src1
->elems
[i
]->def
, 0));
1821 for (unsigned j
= 1; j
< src0_columns
; j
++) {
1822 dest
->elems
[i
]->def
=
1823 nir_fadd(&b
->nb
, dest
->elems
[i
]->def
,
1824 nir_fmul(&b
->nb
, src0
->elems
[j
]->def
,
1825 vtn_vector_extract(b
,
1826 src1
->elems
[i
]->def
, j
)));
1831 dest
= vtn_unwrap_matrix(dest
);
1833 if (transpose_result
)
1834 dest
= vtn_transpose(b
, dest
);
1839 static struct vtn_ssa_value
*
1840 vtn_mat_times_scalar(struct vtn_builder
*b
,
1841 struct vtn_ssa_value
*mat
,
1842 nir_ssa_def
*scalar
)
1844 struct vtn_ssa_value
*dest
= vtn_create_ssa_value(b
, mat
->type
);
1845 for (unsigned i
= 0; i
< glsl_get_matrix_columns(mat
->type
); i
++) {
1846 if (glsl_get_base_type(mat
->type
) == GLSL_TYPE_FLOAT
)
1847 dest
->elems
[i
]->def
= nir_fmul(&b
->nb
, mat
->elems
[i
]->def
, scalar
);
1849 dest
->elems
[i
]->def
= nir_imul(&b
->nb
, mat
->elems
[i
]->def
, scalar
);
1856 vtn_handle_matrix_alu(struct vtn_builder
*b
, SpvOp opcode
,
1857 const uint32_t *w
, unsigned count
)
1859 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1862 case SpvOpTranspose
: {
1863 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[3]);
1864 val
->ssa
= vtn_transpose(b
, src
);
1868 case SpvOpOuterProduct
: {
1869 struct vtn_ssa_value
*src0
= vtn_ssa_value(b
, w
[3]);
1870 struct vtn_ssa_value
*src1
= vtn_ssa_value(b
, w
[4]);
1872 val
->ssa
= vtn_matrix_multiply(b
, src0
, vtn_transpose(b
, src1
));
1876 case SpvOpMatrixTimesScalar
: {
1877 struct vtn_ssa_value
*mat
= vtn_ssa_value(b
, w
[3]);
1878 struct vtn_ssa_value
*scalar
= vtn_ssa_value(b
, w
[4]);
1880 if (mat
->transposed
) {
1881 val
->ssa
= vtn_transpose(b
, vtn_mat_times_scalar(b
, mat
->transposed
,
1884 val
->ssa
= vtn_mat_times_scalar(b
, mat
, scalar
->def
);
1889 case SpvOpVectorTimesMatrix
:
1890 case SpvOpMatrixTimesVector
:
1891 case SpvOpMatrixTimesMatrix
: {
1892 struct vtn_ssa_value
*src0
= vtn_ssa_value(b
, w
[3]);
1893 struct vtn_ssa_value
*src1
= vtn_ssa_value(b
, w
[4]);
1895 val
->ssa
= vtn_matrix_multiply(b
, src0
, src1
);
1899 default: unreachable("unknown matrix opcode");
1904 vtn_handle_alu(struct vtn_builder
*b
, SpvOp opcode
,
1905 const uint32_t *w
, unsigned count
)
1907 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1908 const struct glsl_type
*type
=
1909 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
1910 val
->ssa
= vtn_create_ssa_value(b
, type
);
1912 /* Collect the various SSA sources */
1913 unsigned num_inputs
= count
- 3;
1914 nir_ssa_def
*src
[4];
1915 for (unsigned i
= 0; i
< num_inputs
; i
++)
1916 src
[i
] = vtn_ssa_value(b
, w
[i
+ 3])->def
;
1918 /* Indicates that the first two arguments should be swapped. This is
1919 * used for implementing greater-than and less-than-or-equal.
1925 /* Basic ALU operations */
1926 case SpvOpSNegate
: op
= nir_op_ineg
; break;
1927 case SpvOpFNegate
: op
= nir_op_fneg
; break;
1928 case SpvOpNot
: op
= nir_op_inot
; break;
1931 switch (src
[0]->num_components
) {
1932 case 1: op
= nir_op_imov
; break;
1933 case 2: op
= nir_op_bany2
; break;
1934 case 3: op
= nir_op_bany3
; break;
1935 case 4: op
= nir_op_bany4
; break;
1940 switch (src
[0]->num_components
) {
1941 case 1: op
= nir_op_imov
; break;
1942 case 2: op
= nir_op_ball2
; break;
1943 case 3: op
= nir_op_ball3
; break;
1944 case 4: op
= nir_op_ball4
; break;
1948 case SpvOpIAdd
: op
= nir_op_iadd
; break;
1949 case SpvOpFAdd
: op
= nir_op_fadd
; break;
1950 case SpvOpISub
: op
= nir_op_isub
; break;
1951 case SpvOpFSub
: op
= nir_op_fsub
; break;
1952 case SpvOpIMul
: op
= nir_op_imul
; break;
1953 case SpvOpFMul
: op
= nir_op_fmul
; break;
1954 case SpvOpUDiv
: op
= nir_op_udiv
; break;
1955 case SpvOpSDiv
: op
= nir_op_idiv
; break;
1956 case SpvOpFDiv
: op
= nir_op_fdiv
; break;
1957 case SpvOpUMod
: op
= nir_op_umod
; break;
1958 case SpvOpSMod
: op
= nir_op_umod
; break; /* FIXME? */
1959 case SpvOpFMod
: op
= nir_op_fmod
; break;
1962 assert(src
[0]->num_components
== src
[1]->num_components
);
1963 switch (src
[0]->num_components
) {
1964 case 1: op
= nir_op_fmul
; break;
1965 case 2: op
= nir_op_fdot2
; break;
1966 case 3: op
= nir_op_fdot3
; break;
1967 case 4: op
= nir_op_fdot4
; break;
1971 case SpvOpShiftRightLogical
: op
= nir_op_ushr
; break;
1972 case SpvOpShiftRightArithmetic
: op
= nir_op_ishr
; break;
1973 case SpvOpShiftLeftLogical
: op
= nir_op_ishl
; break;
1974 case SpvOpLogicalOr
: op
= nir_op_ior
; break;
1975 case SpvOpLogicalEqual
: op
= nir_op_ieq
; break;
1976 case SpvOpLogicalNotEqual
: op
= nir_op_ine
; break;
1977 case SpvOpLogicalAnd
: op
= nir_op_iand
; break;
1978 case SpvOpBitwiseOr
: op
= nir_op_ior
; break;
1979 case SpvOpBitwiseXor
: op
= nir_op_ixor
; break;
1980 case SpvOpBitwiseAnd
: op
= nir_op_iand
; break;
1981 case SpvOpSelect
: op
= nir_op_bcsel
; break;
1982 case SpvOpIEqual
: op
= nir_op_ieq
; break;
1984 /* Comparisons: (TODO: How do we want to handled ordered/unordered?) */
1985 case SpvOpFOrdEqual
: op
= nir_op_feq
; break;
1986 case SpvOpFUnordEqual
: op
= nir_op_feq
; break;
1987 case SpvOpINotEqual
: op
= nir_op_ine
; break;
1988 case SpvOpFOrdNotEqual
: op
= nir_op_fne
; break;
1989 case SpvOpFUnordNotEqual
: op
= nir_op_fne
; break;
1990 case SpvOpULessThan
: op
= nir_op_ult
; break;
1991 case SpvOpSLessThan
: op
= nir_op_ilt
; break;
1992 case SpvOpFOrdLessThan
: op
= nir_op_flt
; break;
1993 case SpvOpFUnordLessThan
: op
= nir_op_flt
; break;
1994 case SpvOpUGreaterThan
: op
= nir_op_ult
; swap
= true; break;
1995 case SpvOpSGreaterThan
: op
= nir_op_ilt
; swap
= true; break;
1996 case SpvOpFOrdGreaterThan
: op
= nir_op_flt
; swap
= true; break;
1997 case SpvOpFUnordGreaterThan
: op
= nir_op_flt
; swap
= true; break;
1998 case SpvOpULessThanEqual
: op
= nir_op_uge
; swap
= true; break;
1999 case SpvOpSLessThanEqual
: op
= nir_op_ige
; swap
= true; break;
2000 case SpvOpFOrdLessThanEqual
: op
= nir_op_fge
; swap
= true; break;
2001 case SpvOpFUnordLessThanEqual
: op
= nir_op_fge
; swap
= true; break;
2002 case SpvOpUGreaterThanEqual
: op
= nir_op_uge
; break;
2003 case SpvOpSGreaterThanEqual
: op
= nir_op_ige
; break;
2004 case SpvOpFOrdGreaterThanEqual
: op
= nir_op_fge
; break;
2005 case SpvOpFUnordGreaterThanEqual
:op
= nir_op_fge
; break;
2008 case SpvOpConvertFToU
: op
= nir_op_f2u
; break;
2009 case SpvOpConvertFToS
: op
= nir_op_f2i
; break;
2010 case SpvOpConvertSToF
: op
= nir_op_i2f
; break;
2011 case SpvOpConvertUToF
: op
= nir_op_u2f
; break;
2012 case SpvOpBitcast
: op
= nir_op_imov
; break;
2015 op
= nir_op_imov
; /* TODO: NIR is 32-bit only; these are no-ops. */
2022 case SpvOpDPdx
: op
= nir_op_fddx
; break;
2023 case SpvOpDPdy
: op
= nir_op_fddy
; break;
2024 case SpvOpDPdxFine
: op
= nir_op_fddx_fine
; break;
2025 case SpvOpDPdyFine
: op
= nir_op_fddy_fine
; break;
2026 case SpvOpDPdxCoarse
: op
= nir_op_fddx_coarse
; break;
2027 case SpvOpDPdyCoarse
: op
= nir_op_fddy_coarse
; break;
2029 val
->ssa
->def
= nir_fadd(&b
->nb
,
2030 nir_fabs(&b
->nb
, nir_fddx(&b
->nb
, src
[0])),
2031 nir_fabs(&b
->nb
, nir_fddx(&b
->nb
, src
[1])));
2033 case SpvOpFwidthFine
:
2034 val
->ssa
->def
= nir_fadd(&b
->nb
,
2035 nir_fabs(&b
->nb
, nir_fddx_fine(&b
->nb
, src
[0])),
2036 nir_fabs(&b
->nb
, nir_fddx_fine(&b
->nb
, src
[1])));
2038 case SpvOpFwidthCoarse
:
2039 val
->ssa
->def
= nir_fadd(&b
->nb
,
2040 nir_fabs(&b
->nb
, nir_fddx_coarse(&b
->nb
, src
[0])),
2041 nir_fabs(&b
->nb
, nir_fddx_coarse(&b
->nb
, src
[1])));
2044 case SpvOpVectorTimesScalar
:
2045 /* The builder will take care of splatting for us. */
2046 val
->ssa
->def
= nir_fmul(&b
->nb
, src
[0], src
[1]);
2051 unreachable("No NIR equivalent");
2057 case SpvOpSignBitSet
:
2058 case SpvOpLessOrGreater
:
2060 case SpvOpUnordered
:
2062 unreachable("Unhandled opcode");
2066 nir_ssa_def
*tmp
= src
[0];
2071 nir_alu_instr
*instr
= nir_alu_instr_create(b
->shader
, op
);
2072 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
.dest
,
2073 glsl_get_vector_elements(type
), val
->name
);
2074 instr
->dest
.write_mask
= (1 << glsl_get_vector_elements(type
)) - 1;
2075 val
->ssa
->def
= &instr
->dest
.dest
.ssa
;
2077 for (unsigned i
= 0; i
< nir_op_infos
[op
].num_inputs
; i
++)
2078 instr
->src
[i
].src
= nir_src_for_ssa(src
[i
]);
2080 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2083 static nir_ssa_def
*
2084 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2086 unsigned swiz
[4] = { index
};
2087 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2091 static nir_ssa_def
*
2092 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2095 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
);
2097 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2099 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2101 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2102 vec
->src
[i
].swizzle
[0] = i
;
2106 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2108 return &vec
->dest
.dest
.ssa
;
2111 static nir_ssa_def
*
2112 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2115 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2116 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2117 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2118 vtn_vector_extract(b
, src
, i
), dest
);
2123 static nir_ssa_def
*
2124 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2125 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2127 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2128 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2129 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2130 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2135 static nir_ssa_def
*
2136 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2137 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2138 const uint32_t *indices
)
2140 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
);
2142 nir_ssa_undef_instr
*undef
= nir_ssa_undef_instr_create(b
->shader
, 1);
2143 nir_builder_instr_insert(&b
->nb
, &undef
->instr
);
2145 for (unsigned i
= 0; i
< num_components
; i
++) {
2146 uint32_t index
= indices
[i
];
2147 if (index
== 0xffffffff) {
2148 vec
->src
[i
].src
= nir_src_for_ssa(&undef
->def
);
2149 } else if (index
< src0
->num_components
) {
2150 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2151 vec
->src
[i
].swizzle
[0] = index
;
2153 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2154 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2158 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2160 return &vec
->dest
.dest
.ssa
;
2164 * Concatentates a number of vectors/scalars together to produce a vector
2166 static nir_ssa_def
*
2167 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2168 unsigned num_srcs
, nir_ssa_def
**srcs
)
2170 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
);
2172 unsigned dest_idx
= 0;
2173 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2174 nir_ssa_def
*src
= srcs
[i
];
2175 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2176 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2177 vec
->src
[dest_idx
].swizzle
[0] = j
;
2182 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2184 return &vec
->dest
.dest
.ssa
;
2187 static struct vtn_ssa_value
*
2188 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2190 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2191 dest
->type
= src
->type
;
2193 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2194 dest
->def
= src
->def
;
2196 unsigned elems
= glsl_get_length(src
->type
);
2198 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2199 for (unsigned i
= 0; i
< elems
; i
++)
2200 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2206 static struct vtn_ssa_value
*
2207 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2208 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2209 unsigned num_indices
)
2211 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2213 struct vtn_ssa_value
*cur
= dest
;
2215 for (i
= 0; i
< num_indices
- 1; i
++) {
2216 cur
= cur
->elems
[indices
[i
]];
2219 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2220 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2221 * the component granularity. In that case, the last index will be
2222 * the index to insert the scalar into the vector.
2225 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2227 cur
->elems
[indices
[i
]] = insert
;
2233 static struct vtn_ssa_value
*
2234 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2235 const uint32_t *indices
, unsigned num_indices
)
2237 struct vtn_ssa_value
*cur
= src
;
2238 for (unsigned i
= 0; i
< num_indices
; i
++) {
2239 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2240 assert(i
== num_indices
- 1);
2241 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2242 * the component granularity. The last index will be the index of the
2243 * vector to extract.
2246 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2247 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2248 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2257 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2258 const uint32_t *w
, unsigned count
)
2260 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2261 const struct glsl_type
*type
=
2262 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2263 val
->ssa
= vtn_create_ssa_value(b
, type
);
2266 case SpvOpVectorExtractDynamic
:
2267 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2268 vtn_ssa_value(b
, w
[4])->def
);
2271 case SpvOpVectorInsertDynamic
:
2272 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2273 vtn_ssa_value(b
, w
[4])->def
,
2274 vtn_ssa_value(b
, w
[5])->def
);
2277 case SpvOpVectorShuffle
:
2278 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2279 vtn_ssa_value(b
, w
[3])->def
,
2280 vtn_ssa_value(b
, w
[4])->def
,
2284 case SpvOpCompositeConstruct
: {
2285 unsigned elems
= count
- 3;
2286 if (glsl_type_is_vector_or_scalar(type
)) {
2287 nir_ssa_def
*srcs
[4];
2288 for (unsigned i
= 0; i
< elems
; i
++)
2289 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2291 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2294 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2295 for (unsigned i
= 0; i
< elems
; i
++)
2296 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2300 case SpvOpCompositeExtract
:
2301 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2305 case SpvOpCompositeInsert
:
2306 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2307 vtn_ssa_value(b
, w
[3]),
2311 case SpvOpCopyObject
:
2312 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2316 unreachable("unknown composite operation");
2321 vtn_phi_node_init(struct vtn_builder
*b
, struct vtn_ssa_value
*val
)
2323 if (glsl_type_is_vector_or_scalar(val
->type
)) {
2324 nir_phi_instr
*phi
= nir_phi_instr_create(b
->shader
);
2325 nir_ssa_dest_init(&phi
->instr
, &phi
->dest
,
2326 glsl_get_vector_elements(val
->type
), NULL
);
2327 exec_list_make_empty(&phi
->srcs
);
2328 nir_builder_instr_insert(&b
->nb
, &phi
->instr
);
2329 val
->def
= &phi
->dest
.ssa
;
2331 unsigned elems
= glsl_get_length(val
->type
);
2332 for (unsigned i
= 0; i
< elems
; i
++)
2333 vtn_phi_node_init(b
, val
->elems
[i
]);
2337 static struct vtn_ssa_value
*
2338 vtn_phi_node_create(struct vtn_builder
*b
, const struct glsl_type
*type
)
2340 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, type
);
2341 vtn_phi_node_init(b
, val
);
2346 vtn_handle_phi_first_pass(struct vtn_builder
*b
, const uint32_t *w
)
2348 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2349 const struct glsl_type
*type
=
2350 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2351 val
->ssa
= vtn_phi_node_create(b
, type
);
2355 vtn_phi_node_add_src(struct vtn_ssa_value
*phi
, const nir_block
*pred
,
2356 struct vtn_ssa_value
*val
)
2358 assert(phi
->type
== val
->type
);
2359 if (glsl_type_is_vector_or_scalar(phi
->type
)) {
2360 nir_phi_instr
*phi_instr
= nir_instr_as_phi(phi
->def
->parent_instr
);
2361 nir_phi_src
*src
= ralloc(phi_instr
, nir_phi_src
);
2362 src
->pred
= (nir_block
*) pred
;
2363 src
->src
= nir_src_for_ssa(val
->def
);
2364 exec_list_push_tail(&phi_instr
->srcs
, &src
->node
);
2366 unsigned elems
= glsl_get_length(phi
->type
);
2367 for (unsigned i
= 0; i
< elems
; i
++)
2368 vtn_phi_node_add_src(phi
->elems
[i
], pred
, val
->elems
[i
]);
2372 static struct vtn_ssa_value
*
2373 vtn_get_phi_node_src(struct vtn_builder
*b
, nir_block
*block
,
2374 const struct glsl_type
*type
, const uint32_t *w
,
2377 struct hash_entry
*entry
= _mesa_hash_table_search(b
->block_table
, block
);
2379 struct vtn_block
*spv_block
= entry
->data
;
2380 for (unsigned off
= 4; off
< count
; off
+= 2) {
2381 if (spv_block
== vtn_value(b
, w
[off
], vtn_value_type_block
)->block
) {
2382 return vtn_ssa_value(b
, w
[off
- 1]);
2387 b
->nb
.cursor
= nir_before_block(block
);
2388 struct vtn_ssa_value
*phi
= vtn_phi_node_create(b
, type
);
2390 struct set_entry
*entry2
;
2391 set_foreach(block
->predecessors
, entry2
) {
2392 nir_block
*pred
= (nir_block
*) entry2
->key
;
2393 struct vtn_ssa_value
*val
= vtn_get_phi_node_src(b
, pred
, type
, w
,
2395 vtn_phi_node_add_src(phi
, pred
, val
);
2402 vtn_handle_phi_second_pass(struct vtn_builder
*b
, SpvOp opcode
,
2403 const uint32_t *w
, unsigned count
)
2405 if (opcode
== SpvOpLabel
) {
2406 b
->block
= vtn_value(b
, w
[1], vtn_value_type_block
)->block
;
2410 if (opcode
!= SpvOpPhi
)
2413 struct vtn_ssa_value
*phi
= vtn_value(b
, w
[2], vtn_value_type_ssa
)->ssa
;
2415 struct set_entry
*entry
;
2416 set_foreach(b
->block
->block
->predecessors
, entry
) {
2417 nir_block
*pred
= (nir_block
*) entry
->key
;
2419 struct vtn_ssa_value
*val
= vtn_get_phi_node_src(b
, pred
, phi
->type
, w
,
2421 vtn_phi_node_add_src(phi
, pred
, val
);
2428 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2429 const uint32_t *w
, unsigned count
)
2433 case SpvOpSourceExtension
:
2434 case SpvOpExtension
:
2435 /* Unhandled, but these are for debug so that's ok. */
2438 case SpvOpCapability
:
2440 * TODO properly handle these and give a real error if asking for too
2443 assert(w
[1] == SpvCapabilityMatrix
||
2444 w
[1] == SpvCapabilityShader
);
2447 case SpvOpExtInstImport
:
2448 vtn_handle_extension(b
, opcode
, w
, count
);
2451 case SpvOpMemoryModel
:
2452 assert(w
[1] == SpvAddressingModelLogical
);
2453 assert(w
[2] == SpvMemoryModelGLSL450
);
2456 case SpvOpEntryPoint
:
2457 assert(b
->entry_point
== NULL
);
2458 b
->entry_point
= &b
->values
[w
[2]];
2459 b
->execution_model
= w
[1];
2462 case SpvOpExecutionMode
:
2467 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2468 vtn_string_literal(b
, &w
[2], count
- 2);
2472 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2);
2475 case SpvOpMemberName
:
2480 break; /* Ignored for now */
2482 case SpvOpDecorationGroup
:
2484 case SpvOpMemberDecorate
:
2485 case SpvOpGroupDecorate
:
2486 case SpvOpGroupMemberDecorate
:
2487 vtn_handle_decoration(b
, opcode
, w
, count
);
2493 case SpvOpTypeFloat
:
2494 case SpvOpTypeVector
:
2495 case SpvOpTypeMatrix
:
2496 case SpvOpTypeImage
:
2497 case SpvOpTypeSampler
:
2498 case SpvOpTypeSampledImage
:
2499 case SpvOpTypeArray
:
2500 case SpvOpTypeRuntimeArray
:
2501 case SpvOpTypeStruct
:
2502 case SpvOpTypeOpaque
:
2503 case SpvOpTypePointer
:
2504 case SpvOpTypeFunction
:
2505 case SpvOpTypeEvent
:
2506 case SpvOpTypeDeviceEvent
:
2507 case SpvOpTypeReserveId
:
2508 case SpvOpTypeQueue
:
2510 vtn_handle_type(b
, opcode
, w
, count
);
2513 case SpvOpConstantTrue
:
2514 case SpvOpConstantFalse
:
2516 case SpvOpConstantComposite
:
2517 case SpvOpConstantSampler
:
2518 case SpvOpSpecConstantTrue
:
2519 case SpvOpSpecConstantFalse
:
2520 case SpvOpSpecConstant
:
2521 case SpvOpSpecConstantComposite
:
2522 vtn_handle_constant(b
, opcode
, w
, count
);
2526 vtn_handle_variables(b
, opcode
, w
, count
);
2530 return false; /* End of preamble */
2537 vtn_handle_first_cfg_pass_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2538 const uint32_t *w
, unsigned count
)
2541 case SpvOpFunction
: {
2542 assert(b
->func
== NULL
);
2543 b
->func
= rzalloc(b
, struct vtn_function
);
2545 const struct glsl_type
*result_type
=
2546 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2547 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_function
);
2548 const struct glsl_type
*func_type
=
2549 vtn_value(b
, w
[4], vtn_value_type_type
)->type
->type
;
2551 assert(glsl_get_function_return_type(func_type
) == result_type
);
2553 nir_function
*func
=
2554 nir_function_create(b
->shader
, ralloc_strdup(b
->shader
, val
->name
));
2556 nir_function_overload
*overload
= nir_function_overload_create(func
);
2557 overload
->num_params
= glsl_get_length(func_type
);
2558 overload
->params
= ralloc_array(overload
, nir_parameter
,
2559 overload
->num_params
);
2560 for (unsigned i
= 0; i
< overload
->num_params
; i
++) {
2561 const struct glsl_function_param
*param
=
2562 glsl_get_function_param(func_type
, i
);
2563 overload
->params
[i
].type
= param
->type
;
2566 overload
->params
[i
].param_type
= nir_parameter_inout
;
2568 overload
->params
[i
].param_type
= nir_parameter_in
;
2572 overload
->params
[i
].param_type
= nir_parameter_out
;
2574 assert(!"Parameter is neither in nor out");
2578 b
->func
->overload
= overload
;
2582 case SpvOpFunctionEnd
:
2587 case SpvOpFunctionParameter
:
2588 break; /* Does nothing */
2591 assert(b
->block
== NULL
);
2592 b
->block
= rzalloc(b
, struct vtn_block
);
2593 b
->block
->label
= w
;
2594 vtn_push_value(b
, w
[1], vtn_value_type_block
)->block
= b
->block
;
2596 if (b
->func
->start_block
== NULL
) {
2597 /* This is the first block encountered for this function. In this
2598 * case, we set the start block and add it to the list of
2599 * implemented functions that we'll walk later.
2601 b
->func
->start_block
= b
->block
;
2602 exec_list_push_tail(&b
->functions
, &b
->func
->node
);
2608 case SpvOpBranchConditional
:
2612 case SpvOpReturnValue
:
2613 case SpvOpUnreachable
:
2615 b
->block
->branch
= w
;
2619 case SpvOpSelectionMerge
:
2620 case SpvOpLoopMerge
:
2621 assert(b
->block
&& b
->block
->merge_op
== SpvOpNop
);
2622 b
->block
->merge_op
= opcode
;
2623 b
->block
->merge_block_id
= w
[1];
2627 /* Continue on as per normal */
2635 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2636 const uint32_t *w
, unsigned count
)
2640 struct vtn_block
*block
= vtn_value(b
, w
[1], vtn_value_type_block
)->block
;
2641 assert(block
->block
== NULL
);
2643 block
->block
= nir_cursor_current_block(b
->nb
.cursor
);
2647 case SpvOpLoopMerge
:
2648 case SpvOpSelectionMerge
:
2649 /* This is handled by cfg pre-pass and walk_blocks */
2653 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2657 vtn_handle_extension(b
, opcode
, w
, count
);
2663 case SpvOpCopyMemory
:
2664 case SpvOpCopyMemorySized
:
2665 case SpvOpAccessChain
:
2666 case SpvOpInBoundsAccessChain
:
2667 case SpvOpArrayLength
:
2668 case SpvOpImageTexelPointer
:
2669 vtn_handle_variables(b
, opcode
, w
, count
);
2672 case SpvOpFunctionCall
:
2673 vtn_handle_function_call(b
, opcode
, w
, count
);
2676 case SpvOpImageSampleImplicitLod
:
2677 case SpvOpImageSampleExplicitLod
:
2678 case SpvOpImageSampleDrefImplicitLod
:
2679 case SpvOpImageSampleDrefExplicitLod
:
2680 case SpvOpImageSampleProjImplicitLod
:
2681 case SpvOpImageSampleProjExplicitLod
:
2682 case SpvOpImageSampleProjDrefImplicitLod
:
2683 case SpvOpImageSampleProjDrefExplicitLod
:
2684 case SpvOpImageFetch
:
2685 case SpvOpImageGather
:
2686 case SpvOpImageDrefGather
:
2687 case SpvOpImageQuerySizeLod
:
2688 case SpvOpImageQuerySize
:
2689 case SpvOpImageQueryLod
:
2690 case SpvOpImageQueryLevels
:
2691 case SpvOpImageQuerySamples
:
2692 vtn_handle_texture(b
, opcode
, w
, count
);
2700 case SpvOpConvertFToU
:
2701 case SpvOpConvertFToS
:
2702 case SpvOpConvertSToF
:
2703 case SpvOpConvertUToF
:
2707 case SpvOpConvertPtrToU
:
2708 case SpvOpConvertUToPtr
:
2709 case SpvOpPtrCastToGeneric
:
2710 case SpvOpGenericCastToPtr
:
2716 case SpvOpSignBitSet
:
2717 case SpvOpLessOrGreater
:
2719 case SpvOpUnordered
:
2734 case SpvOpVectorTimesScalar
:
2736 case SpvOpShiftRightLogical
:
2737 case SpvOpShiftRightArithmetic
:
2738 case SpvOpShiftLeftLogical
:
2739 case SpvOpLogicalOr
:
2740 case SpvOpLogicalEqual
:
2741 case SpvOpLogicalNotEqual
:
2742 case SpvOpLogicalAnd
:
2743 case SpvOpBitwiseOr
:
2744 case SpvOpBitwiseXor
:
2745 case SpvOpBitwiseAnd
:
2748 case SpvOpFOrdEqual
:
2749 case SpvOpFUnordEqual
:
2750 case SpvOpINotEqual
:
2751 case SpvOpFOrdNotEqual
:
2752 case SpvOpFUnordNotEqual
:
2753 case SpvOpULessThan
:
2754 case SpvOpSLessThan
:
2755 case SpvOpFOrdLessThan
:
2756 case SpvOpFUnordLessThan
:
2757 case SpvOpUGreaterThan
:
2758 case SpvOpSGreaterThan
:
2759 case SpvOpFOrdGreaterThan
:
2760 case SpvOpFUnordGreaterThan
:
2761 case SpvOpULessThanEqual
:
2762 case SpvOpSLessThanEqual
:
2763 case SpvOpFOrdLessThanEqual
:
2764 case SpvOpFUnordLessThanEqual
:
2765 case SpvOpUGreaterThanEqual
:
2766 case SpvOpSGreaterThanEqual
:
2767 case SpvOpFOrdGreaterThanEqual
:
2768 case SpvOpFUnordGreaterThanEqual
:
2774 case SpvOpFwidthFine
:
2775 case SpvOpDPdxCoarse
:
2776 case SpvOpDPdyCoarse
:
2777 case SpvOpFwidthCoarse
:
2778 vtn_handle_alu(b
, opcode
, w
, count
);
2781 case SpvOpTranspose
:
2782 case SpvOpOuterProduct
:
2783 case SpvOpMatrixTimesScalar
:
2784 case SpvOpVectorTimesMatrix
:
2785 case SpvOpMatrixTimesVector
:
2786 case SpvOpMatrixTimesMatrix
:
2787 vtn_handle_matrix_alu(b
, opcode
, w
, count
);
2790 case SpvOpVectorExtractDynamic
:
2791 case SpvOpVectorInsertDynamic
:
2792 case SpvOpVectorShuffle
:
2793 case SpvOpCompositeConstruct
:
2794 case SpvOpCompositeExtract
:
2795 case SpvOpCompositeInsert
:
2796 case SpvOpCopyObject
:
2797 vtn_handle_composite(b
, opcode
, w
, count
);
2801 vtn_handle_phi_first_pass(b
, w
);
2805 unreachable("Unhandled opcode");
2812 vtn_walk_blocks(struct vtn_builder
*b
, struct vtn_block
*start
,
2813 struct vtn_block
*break_block
, struct vtn_block
*cont_block
,
2814 struct vtn_block
*end_block
)
2816 struct vtn_block
*block
= start
;
2817 while (block
!= end_block
) {
2818 if (block
->merge_op
== SpvOpLoopMerge
) {
2819 /* This is the jump into a loop. */
2820 struct vtn_block
*new_cont_block
= block
;
2821 struct vtn_block
*new_break_block
=
2822 vtn_value(b
, block
->merge_block_id
, vtn_value_type_block
)->block
;
2824 nir_loop
*loop
= nir_loop_create(b
->shader
);
2825 nir_cf_node_insert(b
->nb
.cursor
, &loop
->cf_node
);
2827 /* Reset the merge_op to prerevent infinite recursion */
2828 block
->merge_op
= SpvOpNop
;
2830 b
->nb
.cursor
= nir_after_cf_list(&loop
->body
);
2831 vtn_walk_blocks(b
, block
, new_break_block
, new_cont_block
, NULL
);
2833 b
->nb
.cursor
= nir_after_cf_node(&loop
->cf_node
);
2834 block
= new_break_block
;
2838 const uint32_t *w
= block
->branch
;
2839 SpvOp branch_op
= w
[0] & SpvOpCodeMask
;
2842 vtn_foreach_instruction(b
, block
->label
, block
->branch
,
2843 vtn_handle_body_instruction
);
2845 nir_block
*cur_block
= nir_cursor_current_block(b
->nb
.cursor
);
2846 assert(cur_block
== block
->block
);
2847 _mesa_hash_table_insert(b
->block_table
, cur_block
, block
);
2849 switch (branch_op
) {
2851 struct vtn_block
*branch_block
=
2852 vtn_value(b
, w
[1], vtn_value_type_block
)->block
;
2854 if (branch_block
== break_block
) {
2855 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2857 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2860 } else if (branch_block
== cont_block
) {
2861 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2863 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2866 } else if (branch_block
== end_block
) {
2867 /* We're branching to the merge block of an if, since for loops
2868 * and functions end_block == NULL, so we're done here.
2872 /* We're branching to another block, and according to the rules,
2873 * we can only branch to another block with one predecessor (so
2874 * we're the only one jumping to it) so we can just process it
2877 block
= branch_block
;
2882 case SpvOpBranchConditional
: {
2883 /* Gather up the branch blocks */
2884 struct vtn_block
*then_block
=
2885 vtn_value(b
, w
[2], vtn_value_type_block
)->block
;
2886 struct vtn_block
*else_block
=
2887 vtn_value(b
, w
[3], vtn_value_type_block
)->block
;
2889 nir_if
*if_stmt
= nir_if_create(b
->shader
);
2890 if_stmt
->condition
= nir_src_for_ssa(vtn_ssa_value(b
, w
[1])->def
);
2891 nir_cf_node_insert(b
->nb
.cursor
, &if_stmt
->cf_node
);
2893 if (then_block
== break_block
) {
2894 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2896 nir_instr_insert_after_cf_list(&if_stmt
->then_list
,
2899 } else if (else_block
== break_block
) {
2900 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2902 nir_instr_insert_after_cf_list(&if_stmt
->else_list
,
2905 } else if (then_block
== cont_block
) {
2906 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2908 nir_instr_insert_after_cf_list(&if_stmt
->then_list
,
2911 } else if (else_block
== cont_block
) {
2912 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2914 nir_instr_insert_after_cf_list(&if_stmt
->else_list
,
2918 /* According to the rules we're branching to two blocks that don't
2919 * have any other predecessors, so we can handle this as a
2922 assert(block
->merge_op
== SpvOpSelectionMerge
);
2923 struct vtn_block
*merge_block
=
2924 vtn_value(b
, block
->merge_block_id
, vtn_value_type_block
)->block
;
2926 b
->nb
.cursor
= nir_after_cf_list(&if_stmt
->then_list
);
2927 vtn_walk_blocks(b
, then_block
, break_block
, cont_block
, merge_block
);
2929 b
->nb
.cursor
= nir_after_cf_list(&if_stmt
->else_list
);
2930 vtn_walk_blocks(b
, else_block
, break_block
, cont_block
, merge_block
);
2932 b
->nb
.cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
2933 block
= merge_block
;
2937 /* If we got here then we inserted a predicated break or continue
2938 * above and we need to handle the other case. We already set
2939 * `block` above to indicate what block to visit after the
2943 /* It's possible that the other branch is also a break/continue.
2944 * If it is, we handle that here.
2946 if (block
== break_block
) {
2947 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2949 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2952 } else if (block
== cont_block
) {
2953 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2955 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2960 /* If we got here then there was a predicated break/continue but
2961 * the other half of the if has stuff in it. `block` was already
2962 * set above so there is nothing left for us to do.
2968 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2970 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2975 nir_intrinsic_instr
*discard
=
2976 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard
);
2977 nir_builder_instr_insert(&b
->nb
, &discard
->instr
);
2982 case SpvOpReturnValue
:
2983 case SpvOpUnreachable
:
2985 unreachable("Unhandled opcode");
2991 spirv_to_nir(const uint32_t *words
, size_t word_count
,
2992 gl_shader_stage stage
,
2993 const nir_shader_compiler_options
*options
)
2995 const uint32_t *word_end
= words
+ word_count
;
2997 /* Handle the SPIR-V header (first 4 dwords) */
2998 assert(word_count
> 5);
3000 assert(words
[0] == SpvMagicNumber
);
3001 assert(words
[1] == 99);
3002 /* words[2] == generator magic */
3003 unsigned value_id_bound
= words
[3];
3004 assert(words
[4] == 0);
3008 nir_shader
*shader
= nir_shader_create(NULL
, stage
, options
);
3010 /* Initialize the stn_builder object */
3011 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3013 b
->value_id_bound
= value_id_bound
;
3014 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3015 exec_list_make_empty(&b
->functions
);
3017 /* Handle all the preamble instructions */
3018 words
= vtn_foreach_instruction(b
, words
, word_end
,
3019 vtn_handle_preamble_instruction
);
3021 /* Do a very quick CFG analysis pass */
3022 vtn_foreach_instruction(b
, words
, word_end
,
3023 vtn_handle_first_cfg_pass_instruction
);
3025 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3026 b
->impl
= nir_function_impl_create(func
->overload
);
3027 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3028 _mesa_key_pointer_equal
);
3029 b
->block_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3030 _mesa_key_pointer_equal
);
3031 nir_builder_init(&b
->nb
, b
->impl
);
3032 b
->nb
.cursor
= nir_after_cf_list(&b
->impl
->body
);
3033 vtn_walk_blocks(b
, func
->start_block
, NULL
, NULL
, NULL
);
3034 vtn_foreach_instruction(b
, func
->start_block
->label
, func
->end
,
3035 vtn_handle_phi_second_pass
);
3038 /* Because we can still have output reads in NIR, we need to lower
3039 * outputs to temporaries before we are truely finished.
3041 nir_lower_outputs_to_temporaries(shader
);