2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "spirv_to_nir_private.h"
30 #include "nir_control_flow.h"
32 static struct vtn_ssa_value
*
33 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
34 const struct glsl_type
*type
)
36 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
41 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
44 switch (glsl_get_base_type(type
)) {
49 case GLSL_TYPE_DOUBLE
:
50 if (glsl_type_is_vector_or_scalar(type
)) {
51 unsigned num_components
= glsl_get_vector_elements(val
->type
);
52 nir_load_const_instr
*load
=
53 nir_load_const_instr_create(b
->shader
, num_components
);
55 for (unsigned i
= 0; i
< num_components
; i
++)
56 load
->value
.u
[i
] = constant
->value
.u
[i
];
58 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
59 val
->def
= &load
->def
;
61 assert(glsl_type_is_matrix(type
));
62 unsigned rows
= glsl_get_vector_elements(val
->type
);
63 unsigned columns
= glsl_get_matrix_columns(val
->type
);
64 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
66 for (unsigned i
= 0; i
< columns
; i
++) {
67 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
68 col_val
->type
= glsl_get_column_type(val
->type
);
69 nir_load_const_instr
*load
=
70 nir_load_const_instr_create(b
->shader
, rows
);
72 for (unsigned j
= 0; j
< rows
; j
++)
73 load
->value
.u
[j
] = constant
->value
.u
[rows
* i
+ j
];
75 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
76 col_val
->def
= &load
->def
;
78 val
->elems
[i
] = col_val
;
83 case GLSL_TYPE_ARRAY
: {
84 unsigned elems
= glsl_get_length(val
->type
);
85 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
86 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
87 for (unsigned i
= 0; i
< elems
; i
++)
88 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
93 case GLSL_TYPE_STRUCT
: {
94 unsigned elems
= glsl_get_length(val
->type
);
95 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
96 for (unsigned i
= 0; i
< elems
; i
++) {
97 const struct glsl_type
*elem_type
=
98 glsl_get_struct_field(val
->type
, i
);
99 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
106 unreachable("bad constant type");
112 struct vtn_ssa_value
*
113 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
115 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
116 switch (val
->value_type
) {
117 case vtn_value_type_constant
:
118 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
120 case vtn_value_type_ssa
:
123 unreachable("Invalid type for an SSA value");
128 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
131 return ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
134 static const uint32_t *
135 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
136 const uint32_t *end
, vtn_instruction_handler handler
)
138 const uint32_t *w
= start
;
140 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
141 unsigned count
= w
[0] >> SpvWordCountShift
;
142 assert(count
>= 1 && w
+ count
<= end
);
144 if (!handler(b
, opcode
, w
, count
))
154 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
155 const uint32_t *w
, unsigned count
)
158 case SpvOpExtInstImport
: {
159 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
160 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
161 val
->ext_handler
= vtn_handle_glsl450_instruction
;
163 assert(!"Unsupported extension");
169 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
170 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
177 unreachable("Unhandled opcode");
182 _foreach_decoration_helper(struct vtn_builder
*b
,
183 struct vtn_value
*base_value
,
185 struct vtn_value
*value
,
186 vtn_decoration_foreach_cb cb
, void *data
)
188 int new_member
= member
;
190 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
191 if (dec
->member
>= 0) {
192 assert(member
== -1);
193 new_member
= dec
->member
;
197 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
198 _foreach_decoration_helper(b
, base_value
, new_member
, dec
->group
,
201 cb(b
, base_value
, new_member
, dec
, data
);
206 /** Iterates (recursively if needed) over all of the decorations on a value
208 * This function iterates over all of the decorations applied to a given
209 * value. If it encounters a decoration group, it recurses into the group
210 * and iterates over all of those decorations as well.
213 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
214 vtn_decoration_foreach_cb cb
, void *data
)
216 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
220 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
221 const uint32_t *w
, unsigned count
)
223 const uint32_t *w_end
= w
+ count
;
224 const uint32_t target
= w
[1];
229 case SpvOpDecorationGroup
:
230 vtn_push_value(b
, target
, vtn_value_type_undef
);
233 case SpvOpMemberDecorate
:
236 case SpvOpDecorate
: {
237 struct vtn_value
*val
= &b
->values
[target
];
239 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
240 dec
->member
= member
;
241 dec
->decoration
= *(w
++);
244 /* Link into the list */
245 dec
->next
= val
->decoration
;
246 val
->decoration
= dec
;
250 case SpvOpGroupMemberDecorate
:
253 case SpvOpGroupDecorate
: {
254 struct vtn_value
*group
= &b
->values
[target
];
255 assert(group
->value_type
== vtn_value_type_decoration_group
);
257 for (; w
< w_end
; w
++) {
258 struct vtn_value
*val
= &b
->values
[*w
];
259 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
260 dec
->member
= member
;
263 /* Link into the list */
264 dec
->next
= val
->decoration
;
265 val
->decoration
= dec
;
271 unreachable("Unhandled opcode");
275 struct member_decoration_ctx
{
276 struct glsl_struct_field
*fields
;
277 struct vtn_type
*type
;
280 /* does a shallow copy of a vtn_type */
282 static struct vtn_type
*
283 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
285 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
286 dest
->type
= src
->type
;
287 dest
->is_builtin
= src
->is_builtin
;
289 dest
->builtin
= src
->builtin
;
291 if (!glsl_type_is_vector_or_scalar(src
->type
)) {
292 switch (glsl_get_base_type(src
->type
)) {
293 case GLSL_TYPE_ARRAY
:
294 dest
->array_element
= src
->array_element
;
295 dest
->stride
= src
->stride
;
301 case GLSL_TYPE_FLOAT
:
302 case GLSL_TYPE_DOUBLE
:
304 dest
->row_major
= src
->row_major
;
305 dest
->stride
= src
->stride
;
308 case GLSL_TYPE_STRUCT
: {
309 unsigned elems
= glsl_get_length(src
->type
);
311 dest
->members
= ralloc_array(b
, struct vtn_type
*, elems
);
312 memcpy(dest
->members
, src
->members
, elems
* sizeof(struct vtn_type
*));
314 dest
->offsets
= ralloc_array(b
, unsigned, elems
);
315 memcpy(dest
->offsets
, src
->offsets
, elems
* sizeof(unsigned));
320 unreachable("unhandled type");
328 struct_member_decoration_cb(struct vtn_builder
*b
,
329 struct vtn_value
*val
, int member
,
330 const struct vtn_decoration
*dec
, void *void_ctx
)
332 struct member_decoration_ctx
*ctx
= void_ctx
;
337 switch (dec
->decoration
) {
338 case SpvDecorationRelaxedPrecision
:
339 break; /* FIXME: Do nothing with this for now. */
340 case SpvDecorationSmooth
:
341 ctx
->fields
[member
].interpolation
= INTERP_QUALIFIER_SMOOTH
;
343 case SpvDecorationNoPerspective
:
344 ctx
->fields
[member
].interpolation
= INTERP_QUALIFIER_NOPERSPECTIVE
;
346 case SpvDecorationFlat
:
347 ctx
->fields
[member
].interpolation
= INTERP_QUALIFIER_FLAT
;
349 case SpvDecorationCentroid
:
350 ctx
->fields
[member
].centroid
= true;
352 case SpvDecorationSample
:
353 ctx
->fields
[member
].sample
= true;
355 case SpvDecorationLocation
:
356 ctx
->fields
[member
].location
= dec
->literals
[0];
358 case SpvDecorationBuiltIn
:
359 ctx
->type
->members
[member
] = vtn_type_copy(b
,
360 ctx
->type
->members
[member
]);
361 ctx
->type
->members
[member
]->is_builtin
= true;
362 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
363 ctx
->type
->builtin_block
= true;
365 case SpvDecorationOffset
:
366 ctx
->type
->offsets
[member
] = dec
->literals
[0];
368 case SpvDecorationMatrixStride
:
369 ctx
->type
->members
[member
]->stride
= dec
->literals
[0];
371 case SpvDecorationColMajor
:
372 break; /* Nothing to do here. Column-major is the default. */
374 unreachable("Unhandled member decoration");
379 type_decoration_cb(struct vtn_builder
*b
,
380 struct vtn_value
*val
, int member
,
381 const struct vtn_decoration
*dec
, void *ctx
)
383 struct vtn_type
*type
= val
->type
;
388 switch (dec
->decoration
) {
389 case SpvDecorationArrayStride
:
390 type
->stride
= dec
->literals
[0];
392 case SpvDecorationBlock
:
395 case SpvDecorationBufferBlock
:
396 type
->buffer_block
= true;
398 case SpvDecorationGLSLShared
:
399 case SpvDecorationGLSLPacked
:
400 /* Ignore these, since we get explicit offsets anyways */
404 unreachable("Unhandled type decoration");
409 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
410 const uint32_t *w
, unsigned count
)
412 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
414 val
->type
= rzalloc(b
, struct vtn_type
);
415 val
->type
->is_builtin
= false;
419 val
->type
->type
= glsl_void_type();
422 val
->type
->type
= glsl_bool_type();
425 val
->type
->type
= glsl_int_type();
428 val
->type
->type
= glsl_float_type();
431 case SpvOpTypeVector
: {
432 const struct glsl_type
*base
=
433 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
434 unsigned elems
= w
[3];
436 assert(glsl_type_is_scalar(base
));
437 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
), elems
);
441 case SpvOpTypeMatrix
: {
442 struct vtn_type
*base
=
443 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
444 unsigned columns
= w
[3];
446 assert(glsl_type_is_vector(base
->type
));
447 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
448 glsl_get_vector_elements(base
->type
),
450 val
->type
->array_element
= base
;
451 val
->type
->row_major
= false;
452 val
->type
->stride
= 0;
456 case SpvOpTypeArray
: {
457 struct vtn_type
*array_element
=
458 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
459 val
->type
->type
= glsl_array_type(array_element
->type
, w
[3]);
460 val
->type
->array_element
= array_element
;
461 val
->type
->stride
= 0;
465 case SpvOpTypeStruct
: {
466 unsigned num_fields
= count
- 2;
467 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
468 val
->type
->offsets
= ralloc_array(b
, unsigned, num_fields
);
470 NIR_VLA(struct glsl_struct_field
, fields
, count
);
471 for (unsigned i
= 0; i
< num_fields
; i
++) {
472 /* TODO: Handle decorators */
473 val
->type
->members
[i
] =
474 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
475 fields
[i
].type
= val
->type
->members
[i
]->type
;
476 fields
[i
].name
= ralloc_asprintf(b
, "field%d", i
);
477 fields
[i
].location
= -1;
478 fields
[i
].interpolation
= 0;
479 fields
[i
].centroid
= 0;
480 fields
[i
].sample
= 0;
481 fields
[i
].matrix_layout
= 2;
482 fields
[i
].stream
= -1;
485 struct member_decoration_ctx ctx
= {
490 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
492 const char *name
= val
->name
? val
->name
: "struct";
494 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
498 case SpvOpTypeFunction
: {
499 const struct glsl_type
*return_type
=
500 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
501 NIR_VLA(struct glsl_function_param
, params
, count
- 3);
502 for (unsigned i
= 0; i
< count
- 3; i
++) {
503 params
[i
].type
= vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
->type
;
507 params
[i
].out
= true;
509 val
->type
->type
= glsl_function_type(return_type
, params
, count
- 3);
513 case SpvOpTypePointer
:
514 /* FIXME: For now, we'll just do the really lame thing and return
515 * the same type. The validator should ensure that the proper number
516 * of dereferences happen
518 val
->type
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
521 case SpvOpTypeImage
: {
522 const struct glsl_type
*sampled_type
=
523 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
525 assert(glsl_type_is_vector_or_scalar(sampled_type
));
527 enum glsl_sampler_dim dim
;
528 switch ((SpvDim
)w
[3]) {
529 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
530 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
531 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
532 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
533 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
534 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
536 unreachable("Invalid SPIR-V Sampler dimension");
539 bool is_shadow
= w
[4];
540 bool is_array
= w
[5];
542 assert(w
[6] == 0 && "FIXME: Handl multi-sampled textures");
543 assert(w
[7] == 1 && "FIXME: Add support for non-sampled images");
545 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
546 glsl_get_base_type(sampled_type
));
550 case SpvOpTypeSampledImage
:
551 val
->type
= vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
554 case SpvOpTypeRuntimeArray
:
555 case SpvOpTypeOpaque
:
557 case SpvOpTypeDeviceEvent
:
558 case SpvOpTypeReserveId
:
562 unreachable("Unhandled opcode");
565 vtn_foreach_decoration(b
, val
, type_decoration_cb
, NULL
);
569 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
570 const uint32_t *w
, unsigned count
)
572 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
573 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
574 val
->constant
= ralloc(b
, nir_constant
);
576 case SpvOpConstantTrue
:
577 assert(val
->const_type
== glsl_bool_type());
578 val
->constant
->value
.u
[0] = NIR_TRUE
;
580 case SpvOpConstantFalse
:
581 assert(val
->const_type
== glsl_bool_type());
582 val
->constant
->value
.u
[0] = NIR_FALSE
;
585 assert(glsl_type_is_scalar(val
->const_type
));
586 val
->constant
->value
.u
[0] = w
[3];
588 case SpvOpConstantComposite
: {
589 unsigned elem_count
= count
- 3;
590 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
591 for (unsigned i
= 0; i
< elem_count
; i
++)
592 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
594 switch (glsl_get_base_type(val
->const_type
)) {
597 case GLSL_TYPE_FLOAT
:
599 if (glsl_type_is_matrix(val
->const_type
)) {
600 unsigned rows
= glsl_get_vector_elements(val
->const_type
);
601 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
602 for (unsigned i
= 0; i
< elem_count
; i
++)
603 for (unsigned j
= 0; j
< rows
; j
++)
604 val
->constant
->value
.u
[rows
* i
+ j
] = elems
[i
]->value
.u
[j
];
606 assert(glsl_type_is_vector(val
->const_type
));
607 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
608 for (unsigned i
= 0; i
< elem_count
; i
++)
609 val
->constant
->value
.u
[i
] = elems
[i
]->value
.u
[0];
614 case GLSL_TYPE_STRUCT
:
615 case GLSL_TYPE_ARRAY
:
616 ralloc_steal(val
->constant
, elems
);
617 val
->constant
->elements
= elems
;
621 unreachable("Unsupported type for constants");
627 unreachable("Unhandled opcode");
632 vtn_get_builtin_location(SpvBuiltIn builtin
, int *location
,
633 nir_variable_mode
*mode
)
636 case SpvBuiltInPosition
:
637 *location
= VARYING_SLOT_POS
;
638 *mode
= nir_var_shader_out
;
640 case SpvBuiltInPointSize
:
641 *location
= VARYING_SLOT_PSIZ
;
642 *mode
= nir_var_shader_out
;
644 case SpvBuiltInClipDistance
:
645 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
646 *mode
= nir_var_shader_in
;
648 case SpvBuiltInCullDistance
:
649 /* XXX figure this out */
650 unreachable("unhandled builtin");
651 case SpvBuiltInVertexId
:
652 /* Vulkan defines VertexID to be zero-based and reserves the new
653 * builtin keyword VertexIndex to indicate the non-zero-based value.
655 *location
= SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
656 *mode
= nir_var_system_value
;
658 case SpvBuiltInInstanceId
:
659 *location
= SYSTEM_VALUE_INSTANCE_ID
;
660 *mode
= nir_var_system_value
;
662 case SpvBuiltInPrimitiveId
:
663 *location
= VARYING_SLOT_PRIMITIVE_ID
;
664 *mode
= nir_var_shader_out
;
666 case SpvBuiltInInvocationId
:
667 *location
= SYSTEM_VALUE_INVOCATION_ID
;
668 *mode
= nir_var_system_value
;
670 case SpvBuiltInLayer
:
671 *location
= VARYING_SLOT_LAYER
;
672 *mode
= nir_var_shader_out
;
674 case SpvBuiltInTessLevelOuter
:
675 case SpvBuiltInTessLevelInner
:
676 case SpvBuiltInTessCoord
:
677 case SpvBuiltInPatchVertices
:
678 unreachable("no tessellation support");
679 case SpvBuiltInFragCoord
:
680 *location
= VARYING_SLOT_POS
;
681 *mode
= nir_var_shader_in
;
683 case SpvBuiltInPointCoord
:
684 *location
= VARYING_SLOT_PNTC
;
685 *mode
= nir_var_shader_out
;
687 case SpvBuiltInFrontFacing
:
688 *location
= VARYING_SLOT_FACE
;
689 *mode
= nir_var_shader_out
;
691 case SpvBuiltInSampleId
:
692 *location
= SYSTEM_VALUE_SAMPLE_ID
;
693 *mode
= nir_var_shader_in
;
695 case SpvBuiltInSamplePosition
:
696 *location
= SYSTEM_VALUE_SAMPLE_POS
;
697 *mode
= nir_var_shader_in
;
699 case SpvBuiltInSampleMask
:
700 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
; /* XXX out? */
701 *mode
= nir_var_shader_in
;
703 case SpvBuiltInFragColor
:
704 *location
= FRAG_RESULT_COLOR
;
705 *mode
= nir_var_shader_out
;
707 case SpvBuiltInFragDepth
:
708 *location
= FRAG_RESULT_DEPTH
;
709 *mode
= nir_var_shader_out
;
711 case SpvBuiltInHelperInvocation
:
712 unreachable("unsupported builtin"); /* XXX */
714 case SpvBuiltInNumWorkgroups
:
715 case SpvBuiltInWorkgroupSize
:
716 /* these are constants, need to be handled specially */
717 unreachable("unsupported builtin");
718 case SpvBuiltInWorkgroupId
:
719 case SpvBuiltInLocalInvocationId
:
720 case SpvBuiltInGlobalInvocationId
:
721 case SpvBuiltInLocalInvocationIndex
:
722 unreachable("no compute shader support");
724 unreachable("unsupported builtin");
729 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
730 const struct vtn_decoration
*dec
, void *void_var
)
732 assert(val
->value_type
== vtn_value_type_deref
);
733 assert(val
->deref
->deref
.child
== NULL
);
734 assert(val
->deref
->var
== void_var
);
736 nir_variable
*var
= void_var
;
737 switch (dec
->decoration
) {
738 case SpvDecorationRelaxedPrecision
:
739 break; /* FIXME: Do nothing with this for now. */
740 case SpvDecorationSmooth
:
741 var
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
743 case SpvDecorationNoPerspective
:
744 var
->data
.interpolation
= INTERP_QUALIFIER_NOPERSPECTIVE
;
746 case SpvDecorationFlat
:
747 var
->data
.interpolation
= INTERP_QUALIFIER_FLAT
;
749 case SpvDecorationCentroid
:
750 var
->data
.centroid
= true;
752 case SpvDecorationSample
:
753 var
->data
.sample
= true;
755 case SpvDecorationInvariant
:
756 var
->data
.invariant
= true;
758 case SpvDecorationConstant
:
759 assert(var
->constant_initializer
!= NULL
);
760 var
->data
.read_only
= true;
762 case SpvDecorationNonWritable
:
763 var
->data
.read_only
= true;
765 case SpvDecorationLocation
:
766 var
->data
.location
= dec
->literals
[0];
768 case SpvDecorationComponent
:
769 var
->data
.location_frac
= dec
->literals
[0];
771 case SpvDecorationIndex
:
772 var
->data
.explicit_index
= true;
773 var
->data
.index
= dec
->literals
[0];
775 case SpvDecorationBinding
:
776 var
->data
.explicit_binding
= true;
777 var
->data
.binding
= dec
->literals
[0];
779 case SpvDecorationDescriptorSet
:
780 var
->data
.descriptor_set
= dec
->literals
[0];
782 case SpvDecorationBuiltIn
: {
783 SpvBuiltIn builtin
= dec
->literals
[0];
785 nir_variable_mode mode
;
786 vtn_get_builtin_location(builtin
, &var
->data
.location
, &mode
);
787 var
->data
.explicit_location
= true;
788 var
->data
.mode
= mode
;
789 if (mode
== nir_var_shader_in
|| mode
== nir_var_system_value
)
790 var
->data
.read_only
= true;
792 if (builtin
== SpvBuiltInPosition
|| builtin
== SpvBuiltInSamplePosition
)
793 var
->data
.origin_upper_left
= b
->origin_upper_left
;
795 b
->builtins
[dec
->literals
[0]] = var
;
798 case SpvDecorationRowMajor
:
799 case SpvDecorationColMajor
:
800 case SpvDecorationGLSLShared
:
801 case SpvDecorationPatch
:
802 case SpvDecorationRestrict
:
803 case SpvDecorationAliased
:
804 case SpvDecorationVolatile
:
805 case SpvDecorationCoherent
:
806 case SpvDecorationNonReadable
:
807 case SpvDecorationUniform
:
808 /* This is really nice but we have no use for it right now. */
809 case SpvDecorationCPacked
:
810 case SpvDecorationSaturatedConversion
:
811 case SpvDecorationStream
:
812 case SpvDecorationOffset
:
813 case SpvDecorationXfbBuffer
:
814 case SpvDecorationFuncParamAttr
:
815 case SpvDecorationFPRoundingMode
:
816 case SpvDecorationFPFastMathMode
:
817 case SpvDecorationLinkageAttributes
:
818 case SpvDecorationSpecId
:
821 unreachable("Unhandled variable decoration");
825 static nir_variable
*
826 get_builtin_variable(struct vtn_builder
*b
,
827 const struct glsl_type
*type
,
830 nir_variable
*var
= b
->builtins
[builtin
];
834 nir_variable_mode mode
;
835 vtn_get_builtin_location(builtin
, &location
, &mode
);
837 var
= nir_variable_create(b
->shader
, mode
, type
, "builtin");
839 var
->data
.location
= location
;
840 var
->data
.explicit_location
= true;
842 if (builtin
== SpvBuiltInPosition
|| builtin
== SpvBuiltInSamplePosition
)
843 var
->data
.origin_upper_left
= b
->origin_upper_left
;
845 b
->builtins
[builtin
] = var
;
851 static struct vtn_ssa_value
*
852 _vtn_variable_load(struct vtn_builder
*b
,
853 nir_deref_var
*src_deref
, struct vtn_type
*src_type
,
854 nir_deref
*src_deref_tail
)
856 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
857 val
->type
= src_deref_tail
->type
;
859 /* The deref tail may contain a deref to select a component of a vector (in
860 * other words, it might not be an actual tail) so we have to save it away
861 * here since we overwrite it later.
863 nir_deref
*old_child
= src_deref_tail
->child
;
865 if (glsl_type_is_vector_or_scalar(val
->type
)) {
866 /* Terminate the deref chain in case there is one more link to pick
867 * off a component of the vector.
869 src_deref_tail
->child
= NULL
;
871 nir_intrinsic_instr
*load
=
872 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_var
);
874 nir_deref_as_var(nir_copy_deref(load
, &src_deref
->deref
));
875 load
->num_components
= glsl_get_vector_elements(val
->type
);
876 nir_ssa_dest_init(&load
->instr
, &load
->dest
, load
->num_components
, NULL
);
878 nir_builder_instr_insert(&b
->nb
, &load
->instr
);
880 if (src_deref
->var
->data
.mode
== nir_var_uniform
&&
881 glsl_get_base_type(val
->type
) == GLSL_TYPE_BOOL
) {
882 /* Uniform boolean loads need to be fixed up since they're defined
883 * to be zero/nonzero rather than NIR_FALSE/NIR_TRUE.
885 val
->def
= nir_ine(&b
->nb
, &load
->dest
.ssa
, nir_imm_int(&b
->nb
, 0));
887 val
->def
= &load
->dest
.ssa
;
889 } else if (glsl_get_base_type(val
->type
) == GLSL_TYPE_ARRAY
||
890 glsl_type_is_matrix(val
->type
)) {
891 unsigned elems
= glsl_get_length(val
->type
);
892 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
894 nir_deref_array
*deref
= nir_deref_array_create(b
);
895 deref
->deref_array_type
= nir_deref_array_type_direct
;
896 deref
->deref
.type
= glsl_get_array_element(val
->type
);
897 src_deref_tail
->child
= &deref
->deref
;
898 for (unsigned i
= 0; i
< elems
; i
++) {
899 deref
->base_offset
= i
;
900 val
->elems
[i
] = _vtn_variable_load(b
, src_deref
,
901 src_type
->array_element
,
905 assert(glsl_get_base_type(val
->type
) == GLSL_TYPE_STRUCT
);
906 unsigned elems
= glsl_get_length(val
->type
);
907 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
909 nir_deref_struct
*deref
= nir_deref_struct_create(b
, 0);
910 src_deref_tail
->child
= &deref
->deref
;
911 for (unsigned i
= 0; i
< elems
; i
++) {
913 deref
->deref
.type
= glsl_get_struct_field(val
->type
, i
);
914 val
->elems
[i
] = _vtn_variable_load(b
, src_deref
,
915 src_type
->members
[i
],
920 src_deref_tail
->child
= old_child
;
926 _vtn_variable_store(struct vtn_builder
*b
, struct vtn_type
*dest_type
,
927 nir_deref_var
*dest_deref
, nir_deref
*dest_deref_tail
,
928 struct vtn_ssa_value
*src
)
930 nir_deref
*old_child
= dest_deref_tail
->child
;
932 if (glsl_type_is_vector_or_scalar(src
->type
)) {
933 /* Terminate the deref chain in case there is one more link to pick
934 * off a component of the vector.
936 dest_deref_tail
->child
= NULL
;
938 nir_intrinsic_instr
*store
=
939 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_var
);
940 store
->variables
[0] =
941 nir_deref_as_var(nir_copy_deref(store
, &dest_deref
->deref
));
942 store
->num_components
= glsl_get_vector_elements(src
->type
);
943 store
->src
[0] = nir_src_for_ssa(src
->def
);
945 nir_builder_instr_insert(&b
->nb
, &store
->instr
);
946 } else if (glsl_get_base_type(src
->type
) == GLSL_TYPE_ARRAY
||
947 glsl_type_is_matrix(src
->type
)) {
948 unsigned elems
= glsl_get_length(src
->type
);
950 nir_deref_array
*deref
= nir_deref_array_create(b
);
951 deref
->deref_array_type
= nir_deref_array_type_direct
;
952 deref
->deref
.type
= glsl_get_array_element(src
->type
);
953 dest_deref_tail
->child
= &deref
->deref
;
954 for (unsigned i
= 0; i
< elems
; i
++) {
955 deref
->base_offset
= i
;
956 _vtn_variable_store(b
, dest_type
->array_element
, dest_deref
,
957 &deref
->deref
, src
->elems
[i
]);
960 assert(glsl_get_base_type(src
->type
) == GLSL_TYPE_STRUCT
);
961 unsigned elems
= glsl_get_length(src
->type
);
963 nir_deref_struct
*deref
= nir_deref_struct_create(b
, 0);
964 dest_deref_tail
->child
= &deref
->deref
;
965 for (unsigned i
= 0; i
< elems
; i
++) {
967 deref
->deref
.type
= glsl_get_struct_field(src
->type
, i
);
968 _vtn_variable_store(b
, dest_type
->members
[i
], dest_deref
,
969 &deref
->deref
, src
->elems
[i
]);
973 dest_deref_tail
->child
= old_child
;
976 static struct vtn_ssa_value
*
977 _vtn_block_load(struct vtn_builder
*b
, nir_intrinsic_op op
,
978 unsigned set
, unsigned binding
, nir_ssa_def
*index
,
979 unsigned offset
, nir_ssa_def
*indirect
,
980 struct vtn_type
*type
)
982 struct vtn_ssa_value
*val
= ralloc(b
, struct vtn_ssa_value
);
983 val
->type
= type
->type
;
984 val
->transposed
= NULL
;
985 if (glsl_type_is_vector_or_scalar(type
->type
)) {
986 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
987 load
->num_components
= glsl_get_vector_elements(type
->type
);
988 load
->const_index
[0] = set
;
989 load
->const_index
[1] = binding
;
990 load
->src
[0] = nir_src_for_ssa(index
);
991 load
->const_index
[2] = offset
;
993 load
->src
[1] = nir_src_for_ssa(indirect
);
994 nir_ssa_dest_init(&load
->instr
, &load
->dest
, load
->num_components
, NULL
);
995 nir_builder_instr_insert(&b
->nb
, &load
->instr
);
996 val
->def
= &load
->dest
.ssa
;
998 unsigned elems
= glsl_get_length(type
->type
);
999 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1000 if (glsl_type_is_struct(type
->type
)) {
1001 for (unsigned i
= 0; i
< elems
; i
++) {
1002 val
->elems
[i
] = _vtn_block_load(b
, op
, set
, binding
, index
,
1003 offset
+ type
->offsets
[i
],
1004 indirect
, type
->members
[i
]);
1007 for (unsigned i
= 0; i
< elems
; i
++) {
1008 val
->elems
[i
] = _vtn_block_load(b
, op
, set
, binding
, index
,
1009 offset
+ i
* type
->stride
,
1010 indirect
, type
->array_element
);
1018 static struct vtn_ssa_value
*
1019 vtn_block_load(struct vtn_builder
*b
, nir_deref_var
*src
,
1020 struct vtn_type
*type
, nir_deref
*src_tail
)
1022 unsigned set
= src
->var
->data
.descriptor_set
;
1023 unsigned binding
= src
->var
->data
.binding
;
1025 nir_deref
*deref
= &src
->deref
;
1028 if (deref
->child
->deref_type
== nir_deref_type_array
) {
1029 deref
= deref
->child
;
1030 type
= type
->array_element
;
1031 nir_deref_array
*deref_array
= nir_deref_as_array(deref
);
1032 index
= nir_imm_int(&b
->nb
, deref_array
->base_offset
);
1034 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
)
1035 index
= nir_iadd(&b
->nb
, index
, deref_array
->indirect
.ssa
);
1037 index
= nir_imm_int(&b
->nb
, 0);
1040 unsigned offset
= 0;
1041 nir_ssa_def
*indirect
= NULL
;
1042 while (deref
!= src_tail
) {
1043 deref
= deref
->child
;
1044 switch (deref
->deref_type
) {
1045 case nir_deref_type_array
: {
1046 nir_deref_array
*deref_array
= nir_deref_as_array(deref
);
1047 if (deref_array
->deref_array_type
== nir_deref_array_type_direct
) {
1048 offset
+= type
->stride
* deref_array
->base_offset
;
1050 nir_ssa_def
*offset
= nir_imul(&b
->nb
, deref_array
->indirect
.ssa
,
1051 nir_imm_int(&b
->nb
, type
->stride
));
1052 indirect
= indirect
? nir_iadd(&b
->nb
, indirect
, offset
) : offset
;
1054 type
= type
->array_element
;
1058 case nir_deref_type_struct
: {
1059 nir_deref_struct
*deref_struct
= nir_deref_as_struct(deref
);
1060 offset
+= type
->offsets
[deref_struct
->index
];
1061 type
= type
->members
[deref_struct
->index
];
1066 unreachable("unknown deref type");
1071 nir_intrinsic_op op
= indirect
? nir_intrinsic_load_ubo_vk_indirect
1072 : nir_intrinsic_load_ubo_vk
;
1074 return _vtn_block_load(b
, op
, set
, binding
, index
, offset
, indirect
, type
);
1078 * Gets the NIR-level deref tail, which may have as a child an array deref
1079 * selecting which component due to OpAccessChain supporting per-component
1080 * indexing in SPIR-V.
1084 get_deref_tail(nir_deref_var
*deref
)
1086 nir_deref
*cur
= &deref
->deref
;
1087 while (!glsl_type_is_vector_or_scalar(cur
->type
) && cur
->child
)
1093 static nir_ssa_def
*vtn_vector_extract(struct vtn_builder
*b
,
1094 nir_ssa_def
*src
, unsigned index
);
1096 static nir_ssa_def
*vtn_vector_extract_dynamic(struct vtn_builder
*b
,
1098 nir_ssa_def
*index
);
1100 static struct vtn_ssa_value
*
1101 vtn_variable_load(struct vtn_builder
*b
, nir_deref_var
*src
,
1102 struct vtn_type
*src_type
)
1104 nir_deref
*src_tail
= get_deref_tail(src
);
1106 struct vtn_ssa_value
*val
;
1107 if (src
->var
->interface_type
&& src
->var
->data
.mode
== nir_var_uniform
)
1108 val
= vtn_block_load(b
, src
, src_type
, src_tail
);
1110 val
= _vtn_variable_load(b
, src
, src_type
, src_tail
);
1112 if (src_tail
->child
) {
1113 nir_deref_array
*vec_deref
= nir_deref_as_array(src_tail
->child
);
1114 assert(vec_deref
->deref
.child
== NULL
);
1115 val
->type
= vec_deref
->deref
.type
;
1116 if (vec_deref
->deref_array_type
== nir_deref_array_type_direct
)
1117 val
->def
= vtn_vector_extract(b
, val
->def
, vec_deref
->base_offset
);
1119 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
,
1120 vec_deref
->indirect
.ssa
);
1126 static nir_ssa_def
* vtn_vector_insert(struct vtn_builder
*b
,
1127 nir_ssa_def
*src
, nir_ssa_def
*insert
,
1130 static nir_ssa_def
* vtn_vector_insert_dynamic(struct vtn_builder
*b
,
1132 nir_ssa_def
*insert
,
1133 nir_ssa_def
*index
);
1135 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1136 nir_deref_var
*dest
, struct vtn_type
*dest_type
)
1138 nir_deref
*dest_tail
= get_deref_tail(dest
);
1139 if (dest_tail
->child
) {
1140 struct vtn_ssa_value
*val
= _vtn_variable_load(b
, dest
, dest_type
,
1142 nir_deref_array
*deref
= nir_deref_as_array(dest_tail
->child
);
1143 assert(deref
->deref
.child
== NULL
);
1144 if (deref
->deref_array_type
== nir_deref_array_type_direct
)
1145 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
1146 deref
->base_offset
);
1148 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
1149 deref
->indirect
.ssa
);
1150 _vtn_variable_store(b
, dest_type
, dest
, dest_tail
, val
);
1152 _vtn_variable_store(b
, dest_type
, dest
, dest_tail
, src
);
1157 vtn_variable_copy(struct vtn_builder
*b
, nir_deref_var
*src
,
1158 nir_deref_var
*dest
, struct vtn_type
*type
)
1160 nir_deref
*src_tail
= get_deref_tail(src
);
1162 if (src_tail
->child
|| src
->var
->interface_type
) {
1163 assert(get_deref_tail(dest
)->child
);
1164 struct vtn_ssa_value
*val
= vtn_variable_load(b
, src
, type
);
1165 vtn_variable_store(b
, val
, dest
, type
);
1167 nir_intrinsic_instr
*copy
=
1168 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_copy_var
);
1169 copy
->variables
[0] = nir_deref_as_var(nir_copy_deref(copy
, &dest
->deref
));
1170 copy
->variables
[1] = nir_deref_as_var(nir_copy_deref(copy
, &src
->deref
));
1172 nir_builder_instr_insert(&b
->nb
, ©
->instr
);
1177 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
1178 const uint32_t *w
, unsigned count
)
1181 case SpvOpVariable
: {
1182 struct vtn_type
*type
=
1183 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1184 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_deref
);
1186 nir_variable
*var
= ralloc(b
->shader
, nir_variable
);
1188 var
->type
= type
->type
;
1189 var
->name
= ralloc_strdup(var
, val
->name
);
1191 bool builtin_block
= false;
1193 var
->interface_type
= type
->type
;
1194 builtin_block
= type
->builtin_block
;
1195 } else if (glsl_type_is_array(type
->type
) &&
1196 (type
->array_element
->block
||
1197 type
->array_element
->buffer_block
)) {
1198 var
->interface_type
= type
->array_element
->type
;
1199 builtin_block
= type
->array_element
->builtin_block
;
1201 var
->interface_type
= NULL
;
1204 switch ((SpvStorageClass
)w
[3]) {
1205 case SpvStorageClassUniform
:
1206 case SpvStorageClassUniformConstant
:
1207 var
->data
.mode
= nir_var_uniform
;
1208 var
->data
.read_only
= true;
1210 case SpvStorageClassInput
:
1211 var
->data
.mode
= nir_var_shader_in
;
1212 var
->data
.read_only
= true;
1214 case SpvStorageClassOutput
:
1215 var
->data
.mode
= nir_var_shader_out
;
1217 case SpvStorageClassPrivateGlobal
:
1218 var
->data
.mode
= nir_var_global
;
1220 case SpvStorageClassFunction
:
1221 var
->data
.mode
= nir_var_local
;
1223 case SpvStorageClassWorkgroupLocal
:
1224 case SpvStorageClassWorkgroupGlobal
:
1225 case SpvStorageClassGeneric
:
1226 case SpvStorageClassAtomicCounter
:
1228 unreachable("Unhandled variable storage class");
1233 var
->constant_initializer
=
1234 vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
1237 val
->deref
= nir_deref_var_create(b
, var
);
1238 val
->deref_type
= type
;
1240 /* We handle decorations first because decorations might give us
1241 * location information. We use the data.explicit_location field to
1242 * note that the location provided is the "final" location. If
1243 * data.explicit_location == false, this means that it's relative to
1244 * whatever the base location is.
1246 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
1248 if (!var
->data
.explicit_location
) {
1249 if (b
->execution_model
== SpvExecutionModelFragment
&&
1250 var
->data
.mode
== nir_var_shader_out
) {
1251 var
->data
.location
+= FRAG_RESULT_DATA0
;
1252 } else if (b
->execution_model
== SpvExecutionModelVertex
&&
1253 var
->data
.mode
== nir_var_shader_in
) {
1254 var
->data
.location
+= VERT_ATTRIB_GENERIC0
;
1255 } else if (var
->data
.mode
== nir_var_shader_in
||
1256 var
->data
.mode
== nir_var_shader_out
) {
1257 var
->data
.location
+= VARYING_SLOT_VAR0
;
1261 /* If this was a uniform block, then we're not going to actually use the
1262 * variable (we're only going to use it to compute offsets), so don't
1263 * declare it in the shader.
1265 if (var
->data
.mode
== nir_var_uniform
&& var
->interface_type
)
1268 /* Builtin blocks are lowered to individual variables during SPIR-V ->
1269 * NIR, so don't declare them either.
1274 if (var
->data
.mode
== nir_var_local
) {
1275 nir_function_impl_add_variable(b
->impl
, var
);
1277 nir_shader_add_variable(b
->shader
, var
);
1283 case SpvOpAccessChain
:
1284 case SpvOpInBoundsAccessChain
: {
1285 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_deref
);
1286 nir_deref_var
*base
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref
;
1287 val
->deref
= nir_deref_as_var(nir_copy_deref(b
, &base
->deref
));
1288 struct vtn_type
*deref_type
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref_type
;
1290 nir_deref
*tail
= &val
->deref
->deref
;
1294 for (unsigned i
= 0; i
< count
- 4; i
++) {
1295 assert(w
[i
+ 4] < b
->value_id_bound
);
1296 struct vtn_value
*idx_val
= &b
->values
[w
[i
+ 4]];
1298 enum glsl_base_type base_type
= glsl_get_base_type(tail
->type
);
1299 switch (base_type
) {
1300 case GLSL_TYPE_UINT
:
1302 case GLSL_TYPE_FLOAT
:
1303 case GLSL_TYPE_DOUBLE
:
1304 case GLSL_TYPE_BOOL
:
1305 case GLSL_TYPE_ARRAY
: {
1306 nir_deref_array
*deref_arr
= nir_deref_array_create(b
);
1307 if (base_type
== GLSL_TYPE_ARRAY
||
1308 glsl_type_is_matrix(tail
->type
)) {
1309 deref_type
= deref_type
->array_element
;
1311 assert(glsl_type_is_vector(tail
->type
));
1312 deref_type
= ralloc(b
, struct vtn_type
);
1313 deref_type
->type
= glsl_scalar_type(base_type
);
1316 deref_arr
->deref
.type
= deref_type
->type
;
1318 if (idx_val
->value_type
== vtn_value_type_constant
) {
1319 unsigned idx
= idx_val
->constant
->value
.u
[0];
1320 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
1321 deref_arr
->base_offset
= idx
;
1323 assert(idx_val
->value_type
== vtn_value_type_ssa
);
1324 assert(glsl_type_is_scalar(idx_val
->ssa
->type
));
1325 deref_arr
->deref_array_type
= nir_deref_array_type_indirect
;
1326 deref_arr
->base_offset
= 0;
1327 deref_arr
->indirect
= nir_src_for_ssa(idx_val
->ssa
->def
);
1329 tail
->child
= &deref_arr
->deref
;
1333 case GLSL_TYPE_STRUCT
: {
1334 assert(idx_val
->value_type
== vtn_value_type_constant
);
1335 unsigned idx
= idx_val
->constant
->value
.u
[0];
1336 deref_type
= deref_type
->members
[idx
];
1337 nir_deref_struct
*deref_struct
= nir_deref_struct_create(b
, idx
);
1338 deref_struct
->deref
.type
= deref_type
->type
;
1339 tail
->child
= &deref_struct
->deref
;
1343 unreachable("Invalid type for deref");
1346 if (deref_type
->is_builtin
) {
1347 /* If we encounter a builtin, we throw away the ress of the
1348 * access chain, jump to the builtin, and keep building.
1350 nir_variable
*builtin
= get_builtin_variable(b
, deref_type
->type
,
1351 deref_type
->builtin
);
1352 val
->deref
= nir_deref_var_create(b
, builtin
);
1353 tail
= &val
->deref
->deref
;
1359 /* For uniform blocks, we don't resolve the access chain until we
1360 * actually access the variable, so we need to keep around the original
1361 * type of the variable.
1363 if (base
->var
->interface_type
&& base
->var
->data
.mode
== nir_var_uniform
)
1364 val
->deref_type
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref_type
;
1366 val
->deref_type
= deref_type
;
1372 case SpvOpCopyMemory
: {
1373 nir_deref_var
*dest
= vtn_value(b
, w
[1], vtn_value_type_deref
)->deref
;
1374 nir_deref_var
*src
= vtn_value(b
, w
[2], vtn_value_type_deref
)->deref
;
1375 struct vtn_type
*type
=
1376 vtn_value(b
, w
[1], vtn_value_type_deref
)->deref_type
;
1378 vtn_variable_copy(b
, src
, dest
, type
);
1383 nir_deref_var
*src
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref
;
1384 struct vtn_type
*src_type
=
1385 vtn_value(b
, w
[3], vtn_value_type_deref
)->deref_type
;
1387 if (glsl_get_base_type(src_type
->type
) == GLSL_TYPE_SAMPLER
) {
1388 vtn_push_value(b
, w
[2], vtn_value_type_deref
)->deref
= src
;
1392 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1393 val
->ssa
= vtn_variable_load(b
, src
, src_type
);
1398 nir_deref_var
*dest
= vtn_value(b
, w
[1], vtn_value_type_deref
)->deref
;
1399 struct vtn_type
*dest_type
=
1400 vtn_value(b
, w
[1], vtn_value_type_deref
)->deref_type
;
1401 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
1402 vtn_variable_store(b
, src
, dest
, dest_type
);
1406 case SpvOpCopyMemorySized
:
1407 case SpvOpArrayLength
:
1408 case SpvOpImageTexelPointer
:
1410 unreachable("Unhandled opcode");
1415 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1416 const uint32_t *w
, unsigned count
)
1418 unreachable("Unhandled opcode");
1421 static struct vtn_ssa_value
*
1422 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1424 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1427 if (!glsl_type_is_vector_or_scalar(type
)) {
1428 unsigned elems
= glsl_get_length(type
);
1429 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1430 for (unsigned i
= 0; i
< elems
; i
++) {
1431 const struct glsl_type
*child_type
;
1433 switch (glsl_get_base_type(type
)) {
1435 case GLSL_TYPE_UINT
:
1436 case GLSL_TYPE_BOOL
:
1437 case GLSL_TYPE_FLOAT
:
1438 case GLSL_TYPE_DOUBLE
:
1439 child_type
= glsl_get_column_type(type
);
1441 case GLSL_TYPE_ARRAY
:
1442 child_type
= glsl_get_array_element(type
);
1444 case GLSL_TYPE_STRUCT
:
1445 child_type
= glsl_get_struct_field(type
, i
);
1448 unreachable("unkown base type");
1451 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1459 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1462 src
.src
= nir_src_for_ssa(vtn_ssa_value(b
, index
)->def
);
1463 src
.src_type
= type
;
1468 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1469 const uint32_t *w
, unsigned count
)
1471 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1472 nir_deref_var
*sampler
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref
;
1474 nir_tex_src srcs
[8]; /* 8 should be enough */
1475 nir_tex_src
*p
= srcs
;
1479 unsigned coord_components
= 0;
1481 case SpvOpImageSampleImplicitLod
:
1482 case SpvOpImageSampleExplicitLod
:
1483 case SpvOpImageSampleDrefImplicitLod
:
1484 case SpvOpImageSampleDrefExplicitLod
:
1485 case SpvOpImageSampleProjImplicitLod
:
1486 case SpvOpImageSampleProjExplicitLod
:
1487 case SpvOpImageSampleProjDrefImplicitLod
:
1488 case SpvOpImageSampleProjDrefExplicitLod
:
1489 case SpvOpImageFetch
:
1490 case SpvOpImageGather
:
1491 case SpvOpImageDrefGather
:
1492 case SpvOpImageQueryLod
: {
1493 /* All these types have the coordinate as their first real argument */
1494 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, w
[idx
++]);
1495 coord_components
= glsl_get_vector_elements(coord
->type
);
1496 p
->src
= nir_src_for_ssa(coord
->def
);
1497 p
->src_type
= nir_tex_src_coord
;
1506 /* These all have an explicit depth value as their next source */
1508 case SpvOpImageSampleDrefImplicitLod
:
1509 case SpvOpImageSampleDrefExplicitLod
:
1510 case SpvOpImageSampleProjDrefImplicitLod
:
1511 case SpvOpImageSampleProjDrefExplicitLod
:
1512 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_comparitor
);
1518 /* Figure out the base texture operation */
1521 case SpvOpImageSampleImplicitLod
:
1522 case SpvOpImageSampleDrefImplicitLod
:
1523 case SpvOpImageSampleProjImplicitLod
:
1524 case SpvOpImageSampleProjDrefImplicitLod
:
1525 texop
= nir_texop_tex
;
1528 case SpvOpImageSampleExplicitLod
:
1529 case SpvOpImageSampleDrefExplicitLod
:
1530 case SpvOpImageSampleProjExplicitLod
:
1531 case SpvOpImageSampleProjDrefExplicitLod
:
1532 texop
= nir_texop_txl
;
1535 case SpvOpImageFetch
:
1536 texop
= nir_texop_txf
;
1539 case SpvOpImageGather
:
1540 case SpvOpImageDrefGather
:
1541 texop
= nir_texop_tg4
;
1544 case SpvOpImageQuerySizeLod
:
1545 case SpvOpImageQuerySize
:
1546 texop
= nir_texop_txs
;
1549 case SpvOpImageQueryLod
:
1550 texop
= nir_texop_lod
;
1553 case SpvOpImageQueryLevels
:
1554 texop
= nir_texop_query_levels
;
1557 case SpvOpImageQuerySamples
:
1559 unreachable("Unhandled opcode");
1562 /* Now we need to handle some number of optional arguments */
1564 uint32_t operands
= w
[idx
++];
1566 if (operands
& SpvImageOperandsBiasMask
) {
1567 assert(texop
== nir_texop_tex
);
1568 texop
= nir_texop_txb
;
1569 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_bias
);
1572 if (operands
& SpvImageOperandsLodMask
) {
1573 assert(texop
== nir_texop_txl
|| texop
== nir_texop_txf
||
1574 texop
== nir_texop_txs
);
1575 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_lod
);
1578 if (operands
& SpvImageOperandsGradMask
) {
1579 assert(texop
== nir_texop_tex
);
1580 texop
= nir_texop_txd
;
1581 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddx
);
1582 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ddy
);
1585 if (operands
& SpvImageOperandsOffsetMask
||
1586 operands
& SpvImageOperandsConstOffsetMask
)
1587 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_offset
);
1589 if (operands
& SpvImageOperandsConstOffsetsMask
)
1590 assert(!"Constant offsets to texture gather not yet implemented");
1592 if (operands
& SpvImageOperandsSampleMask
) {
1593 assert(texop
== nir_texop_txf
);
1594 texop
= nir_texop_txf_ms
;
1595 (*p
++) = vtn_tex_src(b
, w
[idx
++], nir_tex_src_ms_index
);
1598 /* We should have now consumed exactly all of the arguments */
1599 assert(idx
== count
);
1601 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1603 const struct glsl_type
*sampler_type
= nir_deref_tail(&sampler
->deref
)->type
;
1604 instr
->sampler_dim
= glsl_get_sampler_dim(sampler_type
);
1606 switch (glsl_get_sampler_result_type(sampler_type
)) {
1607 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1608 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1609 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_unsigned
; break;
1610 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1612 unreachable("Invalid base type for sampler result");
1616 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1617 instr
->coord_components
= coord_components
;
1618 instr
->is_array
= glsl_sampler_type_is_array(sampler_type
);
1619 instr
->is_shadow
= glsl_sampler_type_is_shadow(sampler_type
);
1621 instr
->sampler
= nir_deref_as_var(nir_copy_deref(instr
, &sampler
->deref
));
1623 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 4, NULL
);
1624 val
->ssa
= vtn_create_ssa_value(b
, glsl_vector_type(GLSL_TYPE_FLOAT
, 4));
1625 val
->ssa
->def
= &instr
->dest
.ssa
;
1627 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
1631 static nir_alu_instr
*
1632 create_vec(void *mem_ctx
, unsigned num_components
)
1635 switch (num_components
) {
1636 case 1: op
= nir_op_fmov
; break;
1637 case 2: op
= nir_op_vec2
; break;
1638 case 3: op
= nir_op_vec3
; break;
1639 case 4: op
= nir_op_vec4
; break;
1640 default: unreachable("bad vector size");
1643 nir_alu_instr
*vec
= nir_alu_instr_create(mem_ctx
, op
);
1644 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
, NULL
);
1645 vec
->dest
.write_mask
= (1 << num_components
) - 1;
1650 static struct vtn_ssa_value
*
1651 vtn_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
1653 if (src
->transposed
)
1654 return src
->transposed
;
1656 struct vtn_ssa_value
*dest
=
1657 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
1659 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
1660 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
));
1661 if (glsl_type_is_vector_or_scalar(src
->type
)) {
1662 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
1663 vec
->src
[0].swizzle
[0] = i
;
1665 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
1666 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
1667 vec
->src
[j
].swizzle
[0] = i
;
1670 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
1671 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
1674 dest
->transposed
= src
;
1680 * Normally, column vectors in SPIR-V correspond to a single NIR SSA
1681 * definition. But for matrix multiplies, we want to do one routine for
1682 * multiplying a matrix by a matrix and then pretend that vectors are matrices
1683 * with one column. So we "wrap" these things, and unwrap the result before we
1687 static struct vtn_ssa_value
*
1688 vtn_wrap_matrix(struct vtn_builder
*b
, struct vtn_ssa_value
*val
)
1693 if (glsl_type_is_matrix(val
->type
))
1696 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
1697 dest
->type
= val
->type
;
1698 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, 1);
1699 dest
->elems
[0] = val
;
1704 static struct vtn_ssa_value
*
1705 vtn_unwrap_matrix(struct vtn_ssa_value
*val
)
1707 if (glsl_type_is_matrix(val
->type
))
1710 return val
->elems
[0];
1713 static struct vtn_ssa_value
*
1714 vtn_matrix_multiply(struct vtn_builder
*b
,
1715 struct vtn_ssa_value
*_src0
, struct vtn_ssa_value
*_src1
)
1718 struct vtn_ssa_value
*src0
= vtn_wrap_matrix(b
, _src0
);
1719 struct vtn_ssa_value
*src1
= vtn_wrap_matrix(b
, _src1
);
1720 struct vtn_ssa_value
*src0_transpose
= vtn_wrap_matrix(b
, _src0
->transposed
);
1721 struct vtn_ssa_value
*src1_transpose
= vtn_wrap_matrix(b
, _src1
->transposed
);
1723 unsigned src0_rows
= glsl_get_vector_elements(src0
->type
);
1724 unsigned src0_columns
= glsl_get_matrix_columns(src0
->type
);
1725 unsigned src1_columns
= glsl_get_matrix_columns(src1
->type
);
1727 struct vtn_ssa_value
*dest
=
1728 vtn_create_ssa_value(b
, glsl_matrix_type(glsl_get_base_type(src0
->type
),
1729 src0_rows
, src1_columns
));
1731 dest
= vtn_wrap_matrix(b
, dest
);
1733 bool transpose_result
= false;
1734 if (src0_transpose
&& src1_transpose
) {
1735 /* transpose(A) * transpose(B) = transpose(B * A) */
1736 src1
= src0_transpose
;
1737 src0
= src1_transpose
;
1738 src0_transpose
= NULL
;
1739 src1_transpose
= NULL
;
1740 transpose_result
= true;
1743 if (src0_transpose
&& !src1_transpose
&&
1744 glsl_get_base_type(src0
->type
) == GLSL_TYPE_FLOAT
) {
1745 /* We already have the rows of src0 and the columns of src1 available,
1746 * so we can just take the dot product of each row with each column to
1750 for (unsigned i
= 0; i
< src1_columns
; i
++) {
1751 nir_alu_instr
*vec
= create_vec(b
, src0_rows
);
1752 for (unsigned j
= 0; j
< src0_rows
; j
++) {
1754 nir_src_for_ssa(nir_fdot(&b
->nb
, src0_transpose
->elems
[j
]->def
,
1755 src1
->elems
[i
]->def
));
1758 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
1759 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
1762 /* We don't handle the case where src1 is transposed but not src0, since
1763 * the general case only uses individual components of src1 so the
1764 * optimizer should chew through the transpose we emitted for src1.
1767 for (unsigned i
= 0; i
< src1_columns
; i
++) {
1768 /* dest[i] = sum(src0[j] * src1[i][j] for all j) */
1769 dest
->elems
[i
]->def
=
1770 nir_fmul(&b
->nb
, src0
->elems
[0]->def
,
1771 vtn_vector_extract(b
, src1
->elems
[i
]->def
, 0));
1772 for (unsigned j
= 1; j
< src0_columns
; j
++) {
1773 dest
->elems
[i
]->def
=
1774 nir_fadd(&b
->nb
, dest
->elems
[i
]->def
,
1775 nir_fmul(&b
->nb
, src0
->elems
[j
]->def
,
1776 vtn_vector_extract(b
,
1777 src1
->elems
[i
]->def
, j
)));
1782 dest
= vtn_unwrap_matrix(dest
);
1784 if (transpose_result
)
1785 dest
= vtn_transpose(b
, dest
);
1790 static struct vtn_ssa_value
*
1791 vtn_mat_times_scalar(struct vtn_builder
*b
,
1792 struct vtn_ssa_value
*mat
,
1793 nir_ssa_def
*scalar
)
1795 struct vtn_ssa_value
*dest
= vtn_create_ssa_value(b
, mat
->type
);
1796 for (unsigned i
= 0; i
< glsl_get_matrix_columns(mat
->type
); i
++) {
1797 if (glsl_get_base_type(mat
->type
) == GLSL_TYPE_FLOAT
)
1798 dest
->elems
[i
]->def
= nir_fmul(&b
->nb
, mat
->elems
[i
]->def
, scalar
);
1800 dest
->elems
[i
]->def
= nir_imul(&b
->nb
, mat
->elems
[i
]->def
, scalar
);
1807 vtn_handle_matrix_alu(struct vtn_builder
*b
, SpvOp opcode
,
1808 const uint32_t *w
, unsigned count
)
1810 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1813 case SpvOpTranspose
: {
1814 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[3]);
1815 val
->ssa
= vtn_transpose(b
, src
);
1819 case SpvOpOuterProduct
: {
1820 struct vtn_ssa_value
*src0
= vtn_ssa_value(b
, w
[3]);
1821 struct vtn_ssa_value
*src1
= vtn_ssa_value(b
, w
[4]);
1823 val
->ssa
= vtn_matrix_multiply(b
, src0
, vtn_transpose(b
, src1
));
1827 case SpvOpMatrixTimesScalar
: {
1828 struct vtn_ssa_value
*mat
= vtn_ssa_value(b
, w
[3]);
1829 struct vtn_ssa_value
*scalar
= vtn_ssa_value(b
, w
[4]);
1831 if (mat
->transposed
) {
1832 val
->ssa
= vtn_transpose(b
, vtn_mat_times_scalar(b
, mat
->transposed
,
1835 val
->ssa
= vtn_mat_times_scalar(b
, mat
, scalar
->def
);
1840 case SpvOpVectorTimesMatrix
:
1841 case SpvOpMatrixTimesVector
:
1842 case SpvOpMatrixTimesMatrix
: {
1843 struct vtn_ssa_value
*src0
= vtn_ssa_value(b
, w
[3]);
1844 struct vtn_ssa_value
*src1
= vtn_ssa_value(b
, w
[4]);
1846 val
->ssa
= vtn_matrix_multiply(b
, src0
, src1
);
1850 default: unreachable("unknown matrix opcode");
1855 vtn_handle_alu(struct vtn_builder
*b
, SpvOp opcode
,
1856 const uint32_t *w
, unsigned count
)
1858 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1859 const struct glsl_type
*type
=
1860 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
1861 val
->ssa
= vtn_create_ssa_value(b
, type
);
1863 /* Collect the various SSA sources */
1864 unsigned num_inputs
= count
- 3;
1865 nir_ssa_def
*src
[4];
1866 for (unsigned i
= 0; i
< num_inputs
; i
++)
1867 src
[i
] = vtn_ssa_value(b
, w
[i
+ 3])->def
;
1869 /* Indicates that the first two arguments should be swapped. This is
1870 * used for implementing greater-than and less-than-or-equal.
1876 /* Basic ALU operations */
1877 case SpvOpSNegate
: op
= nir_op_ineg
; break;
1878 case SpvOpFNegate
: op
= nir_op_fneg
; break;
1879 case SpvOpNot
: op
= nir_op_inot
; break;
1882 switch (src
[0]->num_components
) {
1883 case 1: op
= nir_op_imov
; break;
1884 case 2: op
= nir_op_bany2
; break;
1885 case 3: op
= nir_op_bany3
; break;
1886 case 4: op
= nir_op_bany4
; break;
1891 switch (src
[0]->num_components
) {
1892 case 1: op
= nir_op_imov
; break;
1893 case 2: op
= nir_op_ball2
; break;
1894 case 3: op
= nir_op_ball3
; break;
1895 case 4: op
= nir_op_ball4
; break;
1899 case SpvOpIAdd
: op
= nir_op_iadd
; break;
1900 case SpvOpFAdd
: op
= nir_op_fadd
; break;
1901 case SpvOpISub
: op
= nir_op_isub
; break;
1902 case SpvOpFSub
: op
= nir_op_fsub
; break;
1903 case SpvOpIMul
: op
= nir_op_imul
; break;
1904 case SpvOpFMul
: op
= nir_op_fmul
; break;
1905 case SpvOpUDiv
: op
= nir_op_udiv
; break;
1906 case SpvOpSDiv
: op
= nir_op_idiv
; break;
1907 case SpvOpFDiv
: op
= nir_op_fdiv
; break;
1908 case SpvOpUMod
: op
= nir_op_umod
; break;
1909 case SpvOpSMod
: op
= nir_op_umod
; break; /* FIXME? */
1910 case SpvOpFMod
: op
= nir_op_fmod
; break;
1913 assert(src
[0]->num_components
== src
[1]->num_components
);
1914 switch (src
[0]->num_components
) {
1915 case 1: op
= nir_op_fmul
; break;
1916 case 2: op
= nir_op_fdot2
; break;
1917 case 3: op
= nir_op_fdot3
; break;
1918 case 4: op
= nir_op_fdot4
; break;
1922 case SpvOpShiftRightLogical
: op
= nir_op_ushr
; break;
1923 case SpvOpShiftRightArithmetic
: op
= nir_op_ishr
; break;
1924 case SpvOpShiftLeftLogical
: op
= nir_op_ishl
; break;
1925 case SpvOpLogicalOr
: op
= nir_op_ior
; break;
1926 case SpvOpLogicalEqual
: op
= nir_op_ieq
; break;
1927 case SpvOpLogicalNotEqual
: op
= nir_op_ine
; break;
1928 case SpvOpLogicalAnd
: op
= nir_op_iand
; break;
1929 case SpvOpBitwiseOr
: op
= nir_op_ior
; break;
1930 case SpvOpBitwiseXor
: op
= nir_op_ixor
; break;
1931 case SpvOpBitwiseAnd
: op
= nir_op_iand
; break;
1932 case SpvOpSelect
: op
= nir_op_bcsel
; break;
1933 case SpvOpIEqual
: op
= nir_op_ieq
; break;
1935 /* Comparisons: (TODO: How do we want to handled ordered/unordered?) */
1936 case SpvOpFOrdEqual
: op
= nir_op_feq
; break;
1937 case SpvOpFUnordEqual
: op
= nir_op_feq
; break;
1938 case SpvOpINotEqual
: op
= nir_op_ine
; break;
1939 case SpvOpFOrdNotEqual
: op
= nir_op_fne
; break;
1940 case SpvOpFUnordNotEqual
: op
= nir_op_fne
; break;
1941 case SpvOpULessThan
: op
= nir_op_ult
; break;
1942 case SpvOpSLessThan
: op
= nir_op_ilt
; break;
1943 case SpvOpFOrdLessThan
: op
= nir_op_flt
; break;
1944 case SpvOpFUnordLessThan
: op
= nir_op_flt
; break;
1945 case SpvOpUGreaterThan
: op
= nir_op_ult
; swap
= true; break;
1946 case SpvOpSGreaterThan
: op
= nir_op_ilt
; swap
= true; break;
1947 case SpvOpFOrdGreaterThan
: op
= nir_op_flt
; swap
= true; break;
1948 case SpvOpFUnordGreaterThan
: op
= nir_op_flt
; swap
= true; break;
1949 case SpvOpULessThanEqual
: op
= nir_op_uge
; swap
= true; break;
1950 case SpvOpSLessThanEqual
: op
= nir_op_ige
; swap
= true; break;
1951 case SpvOpFOrdLessThanEqual
: op
= nir_op_fge
; swap
= true; break;
1952 case SpvOpFUnordLessThanEqual
: op
= nir_op_fge
; swap
= true; break;
1953 case SpvOpUGreaterThanEqual
: op
= nir_op_uge
; break;
1954 case SpvOpSGreaterThanEqual
: op
= nir_op_ige
; break;
1955 case SpvOpFOrdGreaterThanEqual
: op
= nir_op_fge
; break;
1956 case SpvOpFUnordGreaterThanEqual
:op
= nir_op_fge
; break;
1959 case SpvOpConvertFToU
: op
= nir_op_f2u
; break;
1960 case SpvOpConvertFToS
: op
= nir_op_f2i
; break;
1961 case SpvOpConvertSToF
: op
= nir_op_i2f
; break;
1962 case SpvOpConvertUToF
: op
= nir_op_u2f
; break;
1963 case SpvOpBitcast
: op
= nir_op_imov
; break;
1966 op
= nir_op_imov
; /* TODO: NIR is 32-bit only; these are no-ops. */
1973 case SpvOpDPdx
: op
= nir_op_fddx
; break;
1974 case SpvOpDPdy
: op
= nir_op_fddy
; break;
1975 case SpvOpDPdxFine
: op
= nir_op_fddx_fine
; break;
1976 case SpvOpDPdyFine
: op
= nir_op_fddy_fine
; break;
1977 case SpvOpDPdxCoarse
: op
= nir_op_fddx_coarse
; break;
1978 case SpvOpDPdyCoarse
: op
= nir_op_fddy_coarse
; break;
1980 val
->ssa
->def
= nir_fadd(&b
->nb
,
1981 nir_fabs(&b
->nb
, nir_fddx(&b
->nb
, src
[0])),
1982 nir_fabs(&b
->nb
, nir_fddx(&b
->nb
, src
[1])));
1984 case SpvOpFwidthFine
:
1985 val
->ssa
->def
= nir_fadd(&b
->nb
,
1986 nir_fabs(&b
->nb
, nir_fddx_fine(&b
->nb
, src
[0])),
1987 nir_fabs(&b
->nb
, nir_fddx_fine(&b
->nb
, src
[1])));
1989 case SpvOpFwidthCoarse
:
1990 val
->ssa
->def
= nir_fadd(&b
->nb
,
1991 nir_fabs(&b
->nb
, nir_fddx_coarse(&b
->nb
, src
[0])),
1992 nir_fabs(&b
->nb
, nir_fddx_coarse(&b
->nb
, src
[1])));
1995 case SpvOpVectorTimesScalar
:
1996 /* The builder will take care of splatting for us. */
1997 val
->ssa
->def
= nir_fmul(&b
->nb
, src
[0], src
[1]);
2002 unreachable("No NIR equivalent");
2008 case SpvOpSignBitSet
:
2009 case SpvOpLessOrGreater
:
2011 case SpvOpUnordered
:
2013 unreachable("Unhandled opcode");
2017 nir_ssa_def
*tmp
= src
[0];
2022 nir_alu_instr
*instr
= nir_alu_instr_create(b
->shader
, op
);
2023 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
.dest
,
2024 glsl_get_vector_elements(type
), val
->name
);
2025 instr
->dest
.write_mask
= (1 << glsl_get_vector_elements(type
)) - 1;
2026 val
->ssa
->def
= &instr
->dest
.dest
.ssa
;
2028 for (unsigned i
= 0; i
< nir_op_infos
[op
].num_inputs
; i
++)
2029 instr
->src
[i
].src
= nir_src_for_ssa(src
[i
]);
2031 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
2034 static nir_ssa_def
*
2035 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
2037 unsigned swiz
[4] = { index
};
2038 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
2042 static nir_ssa_def
*
2043 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
2046 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
);
2048 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
2050 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
2052 vec
->src
[i
].src
= nir_src_for_ssa(src
);
2053 vec
->src
[i
].swizzle
[0] = i
;
2057 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2059 return &vec
->dest
.dest
.ssa
;
2062 static nir_ssa_def
*
2063 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2066 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
2067 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2068 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2069 vtn_vector_extract(b
, src
, i
), dest
);
2074 static nir_ssa_def
*
2075 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
2076 nir_ssa_def
*insert
, nir_ssa_def
*index
)
2078 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
2079 for (unsigned i
= 1; i
< src
->num_components
; i
++)
2080 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
2081 vtn_vector_insert(b
, src
, insert
, i
), dest
);
2086 static nir_ssa_def
*
2087 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
2088 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
2089 const uint32_t *indices
)
2091 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
);
2093 nir_ssa_undef_instr
*undef
= nir_ssa_undef_instr_create(b
->shader
, 1);
2094 nir_builder_instr_insert(&b
->nb
, &undef
->instr
);
2096 for (unsigned i
= 0; i
< num_components
; i
++) {
2097 uint32_t index
= indices
[i
];
2098 if (index
== 0xffffffff) {
2099 vec
->src
[i
].src
= nir_src_for_ssa(&undef
->def
);
2100 } else if (index
< src0
->num_components
) {
2101 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
2102 vec
->src
[i
].swizzle
[0] = index
;
2104 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
2105 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
2109 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2111 return &vec
->dest
.dest
.ssa
;
2115 * Concatentates a number of vectors/scalars together to produce a vector
2117 static nir_ssa_def
*
2118 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
2119 unsigned num_srcs
, nir_ssa_def
**srcs
)
2121 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
);
2123 unsigned dest_idx
= 0;
2124 for (unsigned i
= 0; i
< num_srcs
; i
++) {
2125 nir_ssa_def
*src
= srcs
[i
];
2126 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
2127 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
2128 vec
->src
[dest_idx
].swizzle
[0] = j
;
2133 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
2135 return &vec
->dest
.dest
.ssa
;
2138 static struct vtn_ssa_value
*
2139 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
2141 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
2142 dest
->type
= src
->type
;
2144 if (glsl_type_is_vector_or_scalar(src
->type
)) {
2145 dest
->def
= src
->def
;
2147 unsigned elems
= glsl_get_length(src
->type
);
2149 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
2150 for (unsigned i
= 0; i
< elems
; i
++)
2151 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
2157 static struct vtn_ssa_value
*
2158 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2159 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
2160 unsigned num_indices
)
2162 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
2164 struct vtn_ssa_value
*cur
= dest
;
2166 for (i
= 0; i
< num_indices
- 1; i
++) {
2167 cur
= cur
->elems
[indices
[i
]];
2170 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2171 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2172 * the component granularity. In that case, the last index will be
2173 * the index to insert the scalar into the vector.
2176 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
2178 cur
->elems
[indices
[i
]] = insert
;
2184 static struct vtn_ssa_value
*
2185 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2186 const uint32_t *indices
, unsigned num_indices
)
2188 struct vtn_ssa_value
*cur
= src
;
2189 for (unsigned i
= 0; i
< num_indices
; i
++) {
2190 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2191 assert(i
== num_indices
- 1);
2192 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2193 * the component granularity. The last index will be the index of the
2194 * vector to extract.
2197 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2198 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2199 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2208 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2209 const uint32_t *w
, unsigned count
)
2211 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2212 const struct glsl_type
*type
=
2213 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2214 val
->ssa
= vtn_create_ssa_value(b
, type
);
2217 case SpvOpVectorExtractDynamic
:
2218 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2219 vtn_ssa_value(b
, w
[4])->def
);
2222 case SpvOpVectorInsertDynamic
:
2223 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2224 vtn_ssa_value(b
, w
[4])->def
,
2225 vtn_ssa_value(b
, w
[5])->def
);
2228 case SpvOpVectorShuffle
:
2229 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2230 vtn_ssa_value(b
, w
[3])->def
,
2231 vtn_ssa_value(b
, w
[4])->def
,
2235 case SpvOpCompositeConstruct
: {
2236 unsigned elems
= count
- 3;
2237 if (glsl_type_is_vector_or_scalar(type
)) {
2238 nir_ssa_def
*srcs
[4];
2239 for (unsigned i
= 0; i
< elems
; i
++)
2240 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2242 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2245 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2246 for (unsigned i
= 0; i
< elems
; i
++)
2247 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2251 case SpvOpCompositeExtract
:
2252 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2256 case SpvOpCompositeInsert
:
2257 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2258 vtn_ssa_value(b
, w
[3]),
2262 case SpvOpCopyObject
:
2263 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2267 unreachable("unknown composite operation");
2272 vtn_phi_node_init(struct vtn_builder
*b
, struct vtn_ssa_value
*val
)
2274 if (glsl_type_is_vector_or_scalar(val
->type
)) {
2275 nir_phi_instr
*phi
= nir_phi_instr_create(b
->shader
);
2276 nir_ssa_dest_init(&phi
->instr
, &phi
->dest
,
2277 glsl_get_vector_elements(val
->type
), NULL
);
2278 exec_list_make_empty(&phi
->srcs
);
2279 nir_builder_instr_insert(&b
->nb
, &phi
->instr
);
2280 val
->def
= &phi
->dest
.ssa
;
2282 unsigned elems
= glsl_get_length(val
->type
);
2283 for (unsigned i
= 0; i
< elems
; i
++)
2284 vtn_phi_node_init(b
, val
->elems
[i
]);
2288 static struct vtn_ssa_value
*
2289 vtn_phi_node_create(struct vtn_builder
*b
, const struct glsl_type
*type
)
2291 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, type
);
2292 vtn_phi_node_init(b
, val
);
2297 vtn_handle_phi_first_pass(struct vtn_builder
*b
, const uint32_t *w
)
2299 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2300 const struct glsl_type
*type
=
2301 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2302 val
->ssa
= vtn_phi_node_create(b
, type
);
2306 vtn_phi_node_add_src(struct vtn_ssa_value
*phi
, const nir_block
*pred
,
2307 struct vtn_ssa_value
*val
)
2309 assert(phi
->type
== val
->type
);
2310 if (glsl_type_is_vector_or_scalar(phi
->type
)) {
2311 nir_phi_instr
*phi_instr
= nir_instr_as_phi(phi
->def
->parent_instr
);
2312 nir_phi_src
*src
= ralloc(phi_instr
, nir_phi_src
);
2313 src
->pred
= (nir_block
*) pred
;
2314 src
->src
= nir_src_for_ssa(val
->def
);
2315 exec_list_push_tail(&phi_instr
->srcs
, &src
->node
);
2317 unsigned elems
= glsl_get_length(phi
->type
);
2318 for (unsigned i
= 0; i
< elems
; i
++)
2319 vtn_phi_node_add_src(phi
->elems
[i
], pred
, val
->elems
[i
]);
2323 static struct vtn_ssa_value
*
2324 vtn_get_phi_node_src(struct vtn_builder
*b
, nir_block
*block
,
2325 const struct glsl_type
*type
, const uint32_t *w
,
2328 struct hash_entry
*entry
= _mesa_hash_table_search(b
->block_table
, block
);
2330 struct vtn_block
*spv_block
= entry
->data
;
2331 for (unsigned off
= 4; off
< count
; off
+= 2) {
2332 if (spv_block
== vtn_value(b
, w
[off
], vtn_value_type_block
)->block
) {
2333 return vtn_ssa_value(b
, w
[off
- 1]);
2338 b
->nb
.cursor
= nir_before_block(block
);
2339 struct vtn_ssa_value
*phi
= vtn_phi_node_create(b
, type
);
2341 struct set_entry
*entry2
;
2342 set_foreach(block
->predecessors
, entry2
) {
2343 nir_block
*pred
= (nir_block
*) entry2
->key
;
2344 struct vtn_ssa_value
*val
= vtn_get_phi_node_src(b
, pred
, type
, w
,
2346 vtn_phi_node_add_src(phi
, pred
, val
);
2353 vtn_handle_phi_second_pass(struct vtn_builder
*b
, SpvOp opcode
,
2354 const uint32_t *w
, unsigned count
)
2356 if (opcode
== SpvOpLabel
) {
2357 b
->block
= vtn_value(b
, w
[1], vtn_value_type_block
)->block
;
2361 if (opcode
!= SpvOpPhi
)
2364 struct vtn_ssa_value
*phi
= vtn_value(b
, w
[2], vtn_value_type_ssa
)->ssa
;
2366 struct set_entry
*entry
;
2367 set_foreach(b
->block
->block
->predecessors
, entry
) {
2368 nir_block
*pred
= (nir_block
*) entry
->key
;
2370 struct vtn_ssa_value
*val
= vtn_get_phi_node_src(b
, pred
, phi
->type
, w
,
2372 vtn_phi_node_add_src(phi
, pred
, val
);
2379 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2380 const uint32_t *w
, unsigned count
)
2384 case SpvOpSourceExtension
:
2385 case SpvOpExtension
:
2386 /* Unhandled, but these are for debug so that's ok. */
2389 case SpvOpCapability
:
2391 * TODO properly handle these and give a real error if asking for too
2394 assert(w
[1] == SpvCapabilityMatrix
||
2395 w
[1] == SpvCapabilityShader
);
2398 case SpvOpExtInstImport
:
2399 vtn_handle_extension(b
, opcode
, w
, count
);
2402 case SpvOpMemoryModel
:
2403 assert(w
[1] == SpvAddressingModelLogical
);
2404 assert(w
[2] == SpvMemoryModelGLSL450
);
2407 case SpvOpEntryPoint
:
2408 assert(b
->entry_point
== NULL
);
2409 b
->entry_point
= &b
->values
[w
[2]];
2410 b
->execution_model
= w
[1];
2413 case SpvOpExecutionMode
:
2414 assert(b
->entry_point
== &b
->values
[w
[1]]);
2416 switch((SpvExecutionMode
)w
[2]) {
2417 case SpvExecutionModeOriginUpperLeft
:
2418 case SpvExecutionModeOriginLowerLeft
:
2419 b
->origin_upper_left
= (w
[2] == SpvExecutionModeOriginUpperLeft
);
2422 case SpvExecutionModeEarlyFragmentTests
:
2423 b
->shader
->info
.fs
.early_fragment_tests
= true;
2426 case SpvExecutionModeInvocations
:
2427 b
->shader
->info
.gs
.invocations
= w
[3];
2430 case SpvExecutionModeDepthReplacing
:
2431 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_ANY
;
2433 case SpvExecutionModeDepthGreater
:
2434 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_GREATER
;
2436 case SpvExecutionModeDepthLess
:
2437 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_LESS
;
2439 case SpvExecutionModeDepthUnchanged
:
2440 b
->shader
->info
.fs
.depth_layout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2443 case SpvExecutionModeLocalSize
:
2444 b
->shader
->info
.cs
.local_size
[0] = w
[3];
2445 b
->shader
->info
.cs
.local_size
[1] = w
[4];
2446 b
->shader
->info
.cs
.local_size
[2] = w
[5];
2448 case SpvExecutionModeLocalSizeHint
:
2449 break; /* Nothing do do with this */
2451 case SpvExecutionModeOutputVertices
:
2452 b
->shader
->info
.gs
.vertices_out
= w
[3];
2455 case SpvExecutionModeInputPoints
:
2456 case SpvExecutionModeInputLines
:
2457 case SpvExecutionModeInputLinesAdjacency
:
2458 case SpvExecutionModeInputTriangles
:
2459 case SpvExecutionModeInputTrianglesAdjacency
:
2460 case SpvExecutionModeInputQuads
:
2461 case SpvExecutionModeInputIsolines
:
2462 case SpvExecutionModeOutputPoints
:
2463 case SpvExecutionModeOutputLineStrip
:
2464 case SpvExecutionModeOutputTriangleStrip
:
2465 assert(!"TODO: Add geometry metadata");
2468 case SpvExecutionModeSpacingEqual
:
2469 case SpvExecutionModeSpacingFractionalEven
:
2470 case SpvExecutionModeSpacingFractionalOdd
:
2471 case SpvExecutionModeVertexOrderCw
:
2472 case SpvExecutionModeVertexOrderCcw
:
2473 case SpvExecutionModePointMode
:
2474 assert(!"TODO: Add tessellation metadata");
2477 case SpvExecutionModePixelCenterInteger
:
2478 case SpvExecutionModeXfb
:
2479 assert(!"Unhandled execution mode");
2482 case SpvExecutionModeVecTypeHint
:
2483 case SpvExecutionModeContractionOff
:
2484 case SpvExecutionModeIndependentForwardProgress
:
2490 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2491 vtn_string_literal(b
, &w
[2], count
- 2);
2495 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2);
2498 case SpvOpMemberName
:
2503 break; /* Ignored for now */
2505 case SpvOpDecorationGroup
:
2507 case SpvOpMemberDecorate
:
2508 case SpvOpGroupDecorate
:
2509 case SpvOpGroupMemberDecorate
:
2510 vtn_handle_decoration(b
, opcode
, w
, count
);
2516 case SpvOpTypeFloat
:
2517 case SpvOpTypeVector
:
2518 case SpvOpTypeMatrix
:
2519 case SpvOpTypeImage
:
2520 case SpvOpTypeSampler
:
2521 case SpvOpTypeSampledImage
:
2522 case SpvOpTypeArray
:
2523 case SpvOpTypeRuntimeArray
:
2524 case SpvOpTypeStruct
:
2525 case SpvOpTypeOpaque
:
2526 case SpvOpTypePointer
:
2527 case SpvOpTypeFunction
:
2528 case SpvOpTypeEvent
:
2529 case SpvOpTypeDeviceEvent
:
2530 case SpvOpTypeReserveId
:
2531 case SpvOpTypeQueue
:
2533 vtn_handle_type(b
, opcode
, w
, count
);
2536 case SpvOpConstantTrue
:
2537 case SpvOpConstantFalse
:
2539 case SpvOpConstantComposite
:
2540 case SpvOpConstantSampler
:
2541 case SpvOpSpecConstantTrue
:
2542 case SpvOpSpecConstantFalse
:
2543 case SpvOpSpecConstant
:
2544 case SpvOpSpecConstantComposite
:
2545 vtn_handle_constant(b
, opcode
, w
, count
);
2549 vtn_handle_variables(b
, opcode
, w
, count
);
2553 return false; /* End of preamble */
2560 vtn_handle_first_cfg_pass_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2561 const uint32_t *w
, unsigned count
)
2564 case SpvOpFunction
: {
2565 assert(b
->func
== NULL
);
2566 b
->func
= rzalloc(b
, struct vtn_function
);
2568 const struct glsl_type
*result_type
=
2569 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2570 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_function
);
2571 const struct glsl_type
*func_type
=
2572 vtn_value(b
, w
[4], vtn_value_type_type
)->type
->type
;
2574 assert(glsl_get_function_return_type(func_type
) == result_type
);
2576 nir_function
*func
=
2577 nir_function_create(b
->shader
, ralloc_strdup(b
->shader
, val
->name
));
2579 nir_function_overload
*overload
= nir_function_overload_create(func
);
2580 overload
->num_params
= glsl_get_length(func_type
);
2581 overload
->params
= ralloc_array(overload
, nir_parameter
,
2582 overload
->num_params
);
2583 for (unsigned i
= 0; i
< overload
->num_params
; i
++) {
2584 const struct glsl_function_param
*param
=
2585 glsl_get_function_param(func_type
, i
);
2586 overload
->params
[i
].type
= param
->type
;
2589 overload
->params
[i
].param_type
= nir_parameter_inout
;
2591 overload
->params
[i
].param_type
= nir_parameter_in
;
2595 overload
->params
[i
].param_type
= nir_parameter_out
;
2597 assert(!"Parameter is neither in nor out");
2601 b
->func
->overload
= overload
;
2605 case SpvOpFunctionEnd
:
2610 case SpvOpFunctionParameter
:
2611 break; /* Does nothing */
2614 assert(b
->block
== NULL
);
2615 b
->block
= rzalloc(b
, struct vtn_block
);
2616 b
->block
->label
= w
;
2617 vtn_push_value(b
, w
[1], vtn_value_type_block
)->block
= b
->block
;
2619 if (b
->func
->start_block
== NULL
) {
2620 /* This is the first block encountered for this function. In this
2621 * case, we set the start block and add it to the list of
2622 * implemented functions that we'll walk later.
2624 b
->func
->start_block
= b
->block
;
2625 exec_list_push_tail(&b
->functions
, &b
->func
->node
);
2631 case SpvOpBranchConditional
:
2635 case SpvOpReturnValue
:
2636 case SpvOpUnreachable
:
2638 b
->block
->branch
= w
;
2642 case SpvOpSelectionMerge
:
2643 case SpvOpLoopMerge
:
2644 assert(b
->block
&& b
->block
->merge_op
== SpvOpNop
);
2645 b
->block
->merge_op
= opcode
;
2646 b
->block
->merge_block_id
= w
[1];
2650 /* Continue on as per normal */
2658 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2659 const uint32_t *w
, unsigned count
)
2663 struct vtn_block
*block
= vtn_value(b
, w
[1], vtn_value_type_block
)->block
;
2664 assert(block
->block
== NULL
);
2666 block
->block
= nir_cursor_current_block(b
->nb
.cursor
);
2670 case SpvOpLoopMerge
:
2671 case SpvOpSelectionMerge
:
2672 /* This is handled by cfg pre-pass and walk_blocks */
2676 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2680 vtn_handle_extension(b
, opcode
, w
, count
);
2686 case SpvOpCopyMemory
:
2687 case SpvOpCopyMemorySized
:
2688 case SpvOpAccessChain
:
2689 case SpvOpInBoundsAccessChain
:
2690 case SpvOpArrayLength
:
2691 case SpvOpImageTexelPointer
:
2692 vtn_handle_variables(b
, opcode
, w
, count
);
2695 case SpvOpFunctionCall
:
2696 vtn_handle_function_call(b
, opcode
, w
, count
);
2699 case SpvOpImageSampleImplicitLod
:
2700 case SpvOpImageSampleExplicitLod
:
2701 case SpvOpImageSampleDrefImplicitLod
:
2702 case SpvOpImageSampleDrefExplicitLod
:
2703 case SpvOpImageSampleProjImplicitLod
:
2704 case SpvOpImageSampleProjExplicitLod
:
2705 case SpvOpImageSampleProjDrefImplicitLod
:
2706 case SpvOpImageSampleProjDrefExplicitLod
:
2707 case SpvOpImageFetch
:
2708 case SpvOpImageGather
:
2709 case SpvOpImageDrefGather
:
2710 case SpvOpImageQuerySizeLod
:
2711 case SpvOpImageQuerySize
:
2712 case SpvOpImageQueryLod
:
2713 case SpvOpImageQueryLevels
:
2714 case SpvOpImageQuerySamples
:
2715 vtn_handle_texture(b
, opcode
, w
, count
);
2723 case SpvOpConvertFToU
:
2724 case SpvOpConvertFToS
:
2725 case SpvOpConvertSToF
:
2726 case SpvOpConvertUToF
:
2730 case SpvOpConvertPtrToU
:
2731 case SpvOpConvertUToPtr
:
2732 case SpvOpPtrCastToGeneric
:
2733 case SpvOpGenericCastToPtr
:
2739 case SpvOpSignBitSet
:
2740 case SpvOpLessOrGreater
:
2742 case SpvOpUnordered
:
2757 case SpvOpVectorTimesScalar
:
2759 case SpvOpShiftRightLogical
:
2760 case SpvOpShiftRightArithmetic
:
2761 case SpvOpShiftLeftLogical
:
2762 case SpvOpLogicalOr
:
2763 case SpvOpLogicalEqual
:
2764 case SpvOpLogicalNotEqual
:
2765 case SpvOpLogicalAnd
:
2766 case SpvOpBitwiseOr
:
2767 case SpvOpBitwiseXor
:
2768 case SpvOpBitwiseAnd
:
2771 case SpvOpFOrdEqual
:
2772 case SpvOpFUnordEqual
:
2773 case SpvOpINotEqual
:
2774 case SpvOpFOrdNotEqual
:
2775 case SpvOpFUnordNotEqual
:
2776 case SpvOpULessThan
:
2777 case SpvOpSLessThan
:
2778 case SpvOpFOrdLessThan
:
2779 case SpvOpFUnordLessThan
:
2780 case SpvOpUGreaterThan
:
2781 case SpvOpSGreaterThan
:
2782 case SpvOpFOrdGreaterThan
:
2783 case SpvOpFUnordGreaterThan
:
2784 case SpvOpULessThanEqual
:
2785 case SpvOpSLessThanEqual
:
2786 case SpvOpFOrdLessThanEqual
:
2787 case SpvOpFUnordLessThanEqual
:
2788 case SpvOpUGreaterThanEqual
:
2789 case SpvOpSGreaterThanEqual
:
2790 case SpvOpFOrdGreaterThanEqual
:
2791 case SpvOpFUnordGreaterThanEqual
:
2797 case SpvOpFwidthFine
:
2798 case SpvOpDPdxCoarse
:
2799 case SpvOpDPdyCoarse
:
2800 case SpvOpFwidthCoarse
:
2801 vtn_handle_alu(b
, opcode
, w
, count
);
2804 case SpvOpTranspose
:
2805 case SpvOpOuterProduct
:
2806 case SpvOpMatrixTimesScalar
:
2807 case SpvOpVectorTimesMatrix
:
2808 case SpvOpMatrixTimesVector
:
2809 case SpvOpMatrixTimesMatrix
:
2810 vtn_handle_matrix_alu(b
, opcode
, w
, count
);
2813 case SpvOpVectorExtractDynamic
:
2814 case SpvOpVectorInsertDynamic
:
2815 case SpvOpVectorShuffle
:
2816 case SpvOpCompositeConstruct
:
2817 case SpvOpCompositeExtract
:
2818 case SpvOpCompositeInsert
:
2819 case SpvOpCopyObject
:
2820 vtn_handle_composite(b
, opcode
, w
, count
);
2824 vtn_handle_phi_first_pass(b
, w
);
2828 unreachable("Unhandled opcode");
2835 vtn_walk_blocks(struct vtn_builder
*b
, struct vtn_block
*start
,
2836 struct vtn_block
*break_block
, struct vtn_block
*cont_block
,
2837 struct vtn_block
*end_block
)
2839 struct vtn_block
*block
= start
;
2840 while (block
!= end_block
) {
2841 if (block
->merge_op
== SpvOpLoopMerge
) {
2842 /* This is the jump into a loop. */
2843 struct vtn_block
*new_cont_block
= block
;
2844 struct vtn_block
*new_break_block
=
2845 vtn_value(b
, block
->merge_block_id
, vtn_value_type_block
)->block
;
2847 nir_loop
*loop
= nir_loop_create(b
->shader
);
2848 nir_cf_node_insert(b
->nb
.cursor
, &loop
->cf_node
);
2850 /* Reset the merge_op to prerevent infinite recursion */
2851 block
->merge_op
= SpvOpNop
;
2853 b
->nb
.cursor
= nir_after_cf_list(&loop
->body
);
2854 vtn_walk_blocks(b
, block
, new_break_block
, new_cont_block
, NULL
);
2856 b
->nb
.cursor
= nir_after_cf_node(&loop
->cf_node
);
2857 block
= new_break_block
;
2861 const uint32_t *w
= block
->branch
;
2862 SpvOp branch_op
= w
[0] & SpvOpCodeMask
;
2865 vtn_foreach_instruction(b
, block
->label
, block
->branch
,
2866 vtn_handle_body_instruction
);
2868 nir_block
*cur_block
= nir_cursor_current_block(b
->nb
.cursor
);
2869 assert(cur_block
== block
->block
);
2870 _mesa_hash_table_insert(b
->block_table
, cur_block
, block
);
2872 switch (branch_op
) {
2874 struct vtn_block
*branch_block
=
2875 vtn_value(b
, w
[1], vtn_value_type_block
)->block
;
2877 if (branch_block
== break_block
) {
2878 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2880 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2883 } else if (branch_block
== cont_block
) {
2884 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2886 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2889 } else if (branch_block
== end_block
) {
2890 /* We're branching to the merge block of an if, since for loops
2891 * and functions end_block == NULL, so we're done here.
2895 /* We're branching to another block, and according to the rules,
2896 * we can only branch to another block with one predecessor (so
2897 * we're the only one jumping to it) so we can just process it
2900 block
= branch_block
;
2905 case SpvOpBranchConditional
: {
2906 /* Gather up the branch blocks */
2907 struct vtn_block
*then_block
=
2908 vtn_value(b
, w
[2], vtn_value_type_block
)->block
;
2909 struct vtn_block
*else_block
=
2910 vtn_value(b
, w
[3], vtn_value_type_block
)->block
;
2912 nir_if
*if_stmt
= nir_if_create(b
->shader
);
2913 if_stmt
->condition
= nir_src_for_ssa(vtn_ssa_value(b
, w
[1])->def
);
2914 nir_cf_node_insert(b
->nb
.cursor
, &if_stmt
->cf_node
);
2916 if (then_block
== break_block
) {
2917 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2919 nir_instr_insert_after_cf_list(&if_stmt
->then_list
,
2922 } else if (else_block
== break_block
) {
2923 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2925 nir_instr_insert_after_cf_list(&if_stmt
->else_list
,
2928 } else if (then_block
== cont_block
) {
2929 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2931 nir_instr_insert_after_cf_list(&if_stmt
->then_list
,
2934 } else if (else_block
== cont_block
) {
2935 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2937 nir_instr_insert_after_cf_list(&if_stmt
->else_list
,
2941 /* According to the rules we're branching to two blocks that don't
2942 * have any other predecessors, so we can handle this as a
2945 assert(block
->merge_op
== SpvOpSelectionMerge
);
2946 struct vtn_block
*merge_block
=
2947 vtn_value(b
, block
->merge_block_id
, vtn_value_type_block
)->block
;
2949 b
->nb
.cursor
= nir_after_cf_list(&if_stmt
->then_list
);
2950 vtn_walk_blocks(b
, then_block
, break_block
, cont_block
, merge_block
);
2952 b
->nb
.cursor
= nir_after_cf_list(&if_stmt
->else_list
);
2953 vtn_walk_blocks(b
, else_block
, break_block
, cont_block
, merge_block
);
2955 b
->nb
.cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
2956 block
= merge_block
;
2960 /* If we got here then we inserted a predicated break or continue
2961 * above and we need to handle the other case. We already set
2962 * `block` above to indicate what block to visit after the
2966 /* It's possible that the other branch is also a break/continue.
2967 * If it is, we handle that here.
2969 if (block
== break_block
) {
2970 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2972 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2975 } else if (block
== cont_block
) {
2976 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2978 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2983 /* If we got here then there was a predicated break/continue but
2984 * the other half of the if has stuff in it. `block` was already
2985 * set above so there is nothing left for us to do.
2991 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2993 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2998 nir_intrinsic_instr
*discard
=
2999 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard
);
3000 nir_builder_instr_insert(&b
->nb
, &discard
->instr
);
3005 case SpvOpReturnValue
:
3006 case SpvOpUnreachable
:
3008 unreachable("Unhandled opcode");
3014 spirv_to_nir(const uint32_t *words
, size_t word_count
,
3015 gl_shader_stage stage
,
3016 const nir_shader_compiler_options
*options
)
3018 const uint32_t *word_end
= words
+ word_count
;
3020 /* Handle the SPIR-V header (first 4 dwords) */
3021 assert(word_count
> 5);
3023 assert(words
[0] == SpvMagicNumber
);
3024 assert(words
[1] == 99);
3025 /* words[2] == generator magic */
3026 unsigned value_id_bound
= words
[3];
3027 assert(words
[4] == 0);
3031 nir_shader
*shader
= nir_shader_create(NULL
, stage
, options
);
3033 /* Initialize the stn_builder object */
3034 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
3036 b
->value_id_bound
= value_id_bound
;
3037 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
3038 exec_list_make_empty(&b
->functions
);
3040 /* Handle all the preamble instructions */
3041 words
= vtn_foreach_instruction(b
, words
, word_end
,
3042 vtn_handle_preamble_instruction
);
3044 /* Do a very quick CFG analysis pass */
3045 vtn_foreach_instruction(b
, words
, word_end
,
3046 vtn_handle_first_cfg_pass_instruction
);
3048 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
3049 b
->impl
= nir_function_impl_create(func
->overload
);
3050 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3051 _mesa_key_pointer_equal
);
3052 b
->block_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
3053 _mesa_key_pointer_equal
);
3054 nir_builder_init(&b
->nb
, b
->impl
);
3055 b
->nb
.cursor
= nir_after_cf_list(&b
->impl
->body
);
3056 vtn_walk_blocks(b
, func
->start_block
, NULL
, NULL
, NULL
);
3057 vtn_foreach_instruction(b
, func
->start_block
->label
, func
->end
,
3058 vtn_handle_phi_second_pass
);
3061 /* Because we can still have output reads in NIR, we need to lower
3062 * outputs to temporaries before we are truely finished.
3064 nir_lower_outputs_to_temporaries(shader
);