2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "spirv_to_nir_private.h"
31 static struct vtn_ssa_value
*
32 vtn_const_ssa_value(struct vtn_builder
*b
, nir_constant
*constant
,
33 const struct glsl_type
*type
)
35 struct hash_entry
*entry
= _mesa_hash_table_search(b
->const_table
, constant
);
40 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
43 switch (glsl_get_base_type(type
)) {
48 case GLSL_TYPE_DOUBLE
:
49 if (glsl_type_is_vector_or_scalar(type
)) {
50 unsigned num_components
= glsl_get_vector_elements(val
->type
);
51 nir_load_const_instr
*load
=
52 nir_load_const_instr_create(b
->shader
, num_components
);
54 for (unsigned i
= 0; i
< num_components
; i
++)
55 load
->value
.u
[i
] = constant
->value
.u
[i
];
57 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
58 val
->def
= &load
->def
;
60 assert(glsl_type_is_matrix(type
));
61 unsigned rows
= glsl_get_vector_elements(val
->type
);
62 unsigned columns
= glsl_get_matrix_columns(val
->type
);
63 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, columns
);
65 for (unsigned i
= 0; i
< columns
; i
++) {
66 struct vtn_ssa_value
*col_val
= rzalloc(b
, struct vtn_ssa_value
);
67 col_val
->type
= glsl_get_column_type(val
->type
);
68 nir_load_const_instr
*load
=
69 nir_load_const_instr_create(b
->shader
, rows
);
71 for (unsigned j
= 0; j
< rows
; j
++)
72 load
->value
.u
[j
] = constant
->value
.u
[rows
* i
+ j
];
74 nir_instr_insert_before_cf_list(&b
->impl
->body
, &load
->instr
);
75 col_val
->def
= &load
->def
;
77 val
->elems
[i
] = col_val
;
82 case GLSL_TYPE_ARRAY
: {
83 unsigned elems
= glsl_get_length(val
->type
);
84 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
85 const struct glsl_type
*elem_type
= glsl_get_array_element(val
->type
);
86 for (unsigned i
= 0; i
< elems
; i
++)
87 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
92 case GLSL_TYPE_STRUCT
: {
93 unsigned elems
= glsl_get_length(val
->type
);
94 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
95 for (unsigned i
= 0; i
< elems
; i
++) {
96 const struct glsl_type
*elem_type
=
97 glsl_get_struct_field(val
->type
, i
);
98 val
->elems
[i
] = vtn_const_ssa_value(b
, constant
->elements
[i
],
105 unreachable("bad constant type");
111 struct vtn_ssa_value
*
112 vtn_ssa_value(struct vtn_builder
*b
, uint32_t value_id
)
114 struct vtn_value
*val
= vtn_untyped_value(b
, value_id
);
115 switch (val
->value_type
) {
116 case vtn_value_type_constant
:
117 return vtn_const_ssa_value(b
, val
->constant
, val
->const_type
);
119 case vtn_value_type_ssa
:
122 unreachable("Invalid type for an SSA value");
127 vtn_string_literal(struct vtn_builder
*b
, const uint32_t *words
,
130 return ralloc_strndup(b
, (char *)words
, word_count
* sizeof(*words
));
133 static const uint32_t *
134 vtn_foreach_instruction(struct vtn_builder
*b
, const uint32_t *start
,
135 const uint32_t *end
, vtn_instruction_handler handler
)
137 const uint32_t *w
= start
;
139 SpvOp opcode
= w
[0] & SpvOpCodeMask
;
140 unsigned count
= w
[0] >> SpvWordCountShift
;
141 assert(count
>= 1 && w
+ count
<= end
);
143 if (!handler(b
, opcode
, w
, count
))
153 vtn_handle_extension(struct vtn_builder
*b
, SpvOp opcode
,
154 const uint32_t *w
, unsigned count
)
157 case SpvOpExtInstImport
: {
158 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_extension
);
159 if (strcmp((const char *)&w
[2], "GLSL.std.450") == 0) {
160 val
->ext_handler
= vtn_handle_glsl450_instruction
;
162 assert(!"Unsupported extension");
168 struct vtn_value
*val
= vtn_value(b
, w
[3], vtn_value_type_extension
);
169 bool handled
= val
->ext_handler(b
, w
[4], w
, count
);
176 unreachable("Unhandled opcode");
181 _foreach_decoration_helper(struct vtn_builder
*b
,
182 struct vtn_value
*base_value
,
184 struct vtn_value
*value
,
185 vtn_decoration_foreach_cb cb
, void *data
)
187 int new_member
= member
;
189 for (struct vtn_decoration
*dec
= value
->decoration
; dec
; dec
= dec
->next
) {
190 if (dec
->member
>= 0) {
191 assert(member
== -1);
192 new_member
= dec
->member
;
196 assert(dec
->group
->value_type
== vtn_value_type_decoration_group
);
197 _foreach_decoration_helper(b
, base_value
, new_member
, dec
->group
,
200 cb(b
, base_value
, new_member
, dec
, data
);
205 /** Iterates (recursively if needed) over all of the decorations on a value
207 * This function iterates over all of the decorations applied to a given
208 * value. If it encounters a decoration group, it recurses into the group
209 * and iterates over all of those decorations as well.
212 vtn_foreach_decoration(struct vtn_builder
*b
, struct vtn_value
*value
,
213 vtn_decoration_foreach_cb cb
, void *data
)
215 _foreach_decoration_helper(b
, value
, -1, value
, cb
, data
);
219 vtn_handle_decoration(struct vtn_builder
*b
, SpvOp opcode
,
220 const uint32_t *w
, unsigned count
)
222 const uint32_t *w_end
= w
+ count
;
223 const uint32_t target
= w
[1];
228 case SpvOpDecorationGroup
:
229 vtn_push_value(b
, target
, vtn_value_type_undef
);
232 case SpvOpMemberDecorate
:
235 case SpvOpDecorate
: {
236 struct vtn_value
*val
= &b
->values
[target
];
238 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
239 dec
->member
= member
;
240 dec
->decoration
= *(w
++);
243 /* Link into the list */
244 dec
->next
= val
->decoration
;
245 val
->decoration
= dec
;
249 case SpvOpGroupMemberDecorate
:
252 case SpvOpGroupDecorate
: {
253 struct vtn_value
*group
= &b
->values
[target
];
254 assert(group
->value_type
== vtn_value_type_decoration_group
);
256 for (; w
< w_end
; w
++) {
257 struct vtn_value
*val
= &b
->values
[*w
];
258 struct vtn_decoration
*dec
= rzalloc(b
, struct vtn_decoration
);
259 dec
->member
= member
;
262 /* Link into the list */
263 dec
->next
= val
->decoration
;
264 val
->decoration
= dec
;
270 unreachable("Unhandled opcode");
274 struct member_decoration_ctx
{
275 struct glsl_struct_field
*fields
;
276 struct vtn_type
*type
;
279 /* does a shallow copy of a vtn_type */
281 static struct vtn_type
*
282 vtn_type_copy(struct vtn_builder
*b
, struct vtn_type
*src
)
284 struct vtn_type
*dest
= ralloc(b
, struct vtn_type
);
285 dest
->type
= src
->type
;
286 dest
->is_builtin
= src
->is_builtin
;
288 dest
->builtin
= src
->builtin
;
290 if (!glsl_type_is_vector_or_scalar(src
->type
)) {
291 switch (glsl_get_base_type(src
->type
)) {
292 case GLSL_TYPE_ARRAY
:
293 dest
->array_element
= src
->array_element
;
294 dest
->stride
= src
->stride
;
300 case GLSL_TYPE_FLOAT
:
301 case GLSL_TYPE_DOUBLE
:
303 dest
->row_major
= src
->row_major
;
304 dest
->stride
= src
->stride
;
307 case GLSL_TYPE_STRUCT
: {
308 unsigned elems
= glsl_get_length(src
->type
);
310 dest
->members
= ralloc_array(b
, struct vtn_type
*, elems
);
311 memcpy(dest
->members
, src
->members
, elems
* sizeof(struct vtn_type
*));
313 dest
->offsets
= ralloc_array(b
, unsigned, elems
);
314 memcpy(dest
->offsets
, src
->offsets
, elems
* sizeof(unsigned));
319 unreachable("unhandled type");
327 struct_member_decoration_cb(struct vtn_builder
*b
,
328 struct vtn_value
*val
, int member
,
329 const struct vtn_decoration
*dec
, void *void_ctx
)
331 struct member_decoration_ctx
*ctx
= void_ctx
;
336 switch (dec
->decoration
) {
337 case SpvDecorationPrecisionLow
:
338 case SpvDecorationPrecisionMedium
:
339 case SpvDecorationPrecisionHigh
:
340 break; /* FIXME: Do nothing with these for now. */
341 case SpvDecorationSmooth
:
342 ctx
->fields
[member
].interpolation
= INTERP_QUALIFIER_SMOOTH
;
344 case SpvDecorationNoperspective
:
345 ctx
->fields
[member
].interpolation
= INTERP_QUALIFIER_NOPERSPECTIVE
;
347 case SpvDecorationFlat
:
348 ctx
->fields
[member
].interpolation
= INTERP_QUALIFIER_FLAT
;
350 case SpvDecorationCentroid
:
351 ctx
->fields
[member
].centroid
= true;
353 case SpvDecorationSample
:
354 ctx
->fields
[member
].sample
= true;
356 case SpvDecorationLocation
:
357 ctx
->fields
[member
].location
= dec
->literals
[0];
359 case SpvDecorationBuiltIn
:
360 ctx
->type
->members
[member
] = vtn_type_copy(b
,
361 ctx
->type
->members
[member
]);
362 ctx
->type
->members
[member
]->is_builtin
= true;
363 ctx
->type
->members
[member
]->builtin
= dec
->literals
[0];
366 unreachable("Unhandled member decoration");
371 vtn_handle_type(struct vtn_builder
*b
, SpvOp opcode
,
372 const uint32_t *w
, unsigned count
)
374 struct vtn_value
*val
= vtn_push_value(b
, w
[1], vtn_value_type_type
);
376 val
->type
= ralloc(b
, struct vtn_type
);
377 val
->type
->is_builtin
= false;
381 val
->type
->type
= glsl_void_type();
384 val
->type
->type
= glsl_bool_type();
387 val
->type
->type
= glsl_int_type();
390 val
->type
->type
= glsl_float_type();
393 case SpvOpTypeVector
: {
394 const struct glsl_type
*base
=
395 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
396 unsigned elems
= w
[3];
398 assert(glsl_type_is_scalar(base
));
399 val
->type
->type
= glsl_vector_type(glsl_get_base_type(base
), elems
);
403 case SpvOpTypeMatrix
: {
404 struct vtn_type
*base
=
405 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
406 unsigned columns
= w
[3];
408 assert(glsl_type_is_vector(base
->type
));
409 val
->type
->type
= glsl_matrix_type(glsl_get_base_type(base
->type
),
410 glsl_get_vector_elements(base
->type
),
412 val
->type
->array_element
= base
;
413 val
->type
->row_major
= false;
414 val
->type
->stride
= 0;
418 case SpvOpTypeArray
: {
419 struct vtn_type
*array_element
=
420 vtn_value(b
, w
[2], vtn_value_type_type
)->type
;
421 val
->type
->type
= glsl_array_type(array_element
->type
, w
[3]);
422 val
->type
->array_element
= array_element
;
423 val
->type
->stride
= 0;
427 case SpvOpTypeStruct
: {
428 unsigned num_fields
= count
- 2;
429 val
->type
->members
= ralloc_array(b
, struct vtn_type
*, num_fields
);
431 NIR_VLA(struct glsl_struct_field
, fields
, count
);
432 for (unsigned i
= 0; i
< num_fields
; i
++) {
433 /* TODO: Handle decorators */
434 val
->type
->members
[i
] =
435 vtn_value(b
, w
[i
+ 2], vtn_value_type_type
)->type
;
436 fields
[i
].type
= val
->type
->members
[i
]->type
;
437 fields
[i
].name
= ralloc_asprintf(b
, "field%d", i
);
438 fields
[i
].location
= -1;
439 fields
[i
].interpolation
= 0;
440 fields
[i
].centroid
= 0;
441 fields
[i
].sample
= 0;
442 fields
[i
].matrix_layout
= 2;
443 fields
[i
].stream
= -1;
446 struct member_decoration_ctx ctx
= {
451 vtn_foreach_decoration(b
, val
, struct_member_decoration_cb
, &ctx
);
453 const char *name
= val
->name
? val
->name
: "struct";
455 val
->type
->type
= glsl_struct_type(fields
, num_fields
, name
);
459 case SpvOpTypeFunction
: {
460 const struct glsl_type
*return_type
=
461 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
462 NIR_VLA(struct glsl_function_param
, params
, count
- 3);
463 for (unsigned i
= 0; i
< count
- 3; i
++) {
464 params
[i
].type
= vtn_value(b
, w
[i
+ 3], vtn_value_type_type
)->type
->type
;
468 params
[i
].out
= true;
470 val
->type
->type
= glsl_function_type(return_type
, params
, count
- 3);
474 case SpvOpTypePointer
:
475 /* FIXME: For now, we'll just do the really lame thing and return
476 * the same type. The validator should ensure that the proper number
477 * of dereferences happen
479 val
->type
= vtn_value(b
, w
[3], vtn_value_type_type
)->type
;
482 case SpvOpTypeSampler
: {
483 const struct glsl_type
*sampled_type
=
484 vtn_value(b
, w
[2], vtn_value_type_type
)->type
->type
;
486 assert(glsl_type_is_vector_or_scalar(sampled_type
));
488 enum glsl_sampler_dim dim
;
489 switch ((SpvDim
)w
[3]) {
490 case SpvDim1D
: dim
= GLSL_SAMPLER_DIM_1D
; break;
491 case SpvDim2D
: dim
= GLSL_SAMPLER_DIM_2D
; break;
492 case SpvDim3D
: dim
= GLSL_SAMPLER_DIM_3D
; break;
493 case SpvDimCube
: dim
= GLSL_SAMPLER_DIM_CUBE
; break;
494 case SpvDimRect
: dim
= GLSL_SAMPLER_DIM_RECT
; break;
495 case SpvDimBuffer
: dim
= GLSL_SAMPLER_DIM_BUF
; break;
497 unreachable("Invalid SPIR-V Sampler dimension");
500 /* TODO: Handle the various texture image/filter options */
503 bool is_array
= w
[5];
504 bool is_shadow
= w
[6];
506 assert(w
[7] == 0 && "FIXME: Handl multi-sampled textures");
508 val
->type
->type
= glsl_sampler_type(dim
, is_shadow
, is_array
,
509 glsl_get_base_type(sampled_type
));
513 case SpvOpTypeRuntimeArray
:
514 case SpvOpTypeOpaque
:
516 case SpvOpTypeDeviceEvent
:
517 case SpvOpTypeReserveId
:
521 unreachable("Unhandled opcode");
526 vtn_handle_constant(struct vtn_builder
*b
, SpvOp opcode
,
527 const uint32_t *w
, unsigned count
)
529 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_constant
);
530 val
->const_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
531 val
->constant
= ralloc(b
, nir_constant
);
533 case SpvOpConstantTrue
:
534 assert(val
->const_type
== glsl_bool_type());
535 val
->constant
->value
.u
[0] = NIR_TRUE
;
537 case SpvOpConstantFalse
:
538 assert(val
->const_type
== glsl_bool_type());
539 val
->constant
->value
.u
[0] = NIR_FALSE
;
542 assert(glsl_type_is_scalar(val
->const_type
));
543 val
->constant
->value
.u
[0] = w
[3];
545 case SpvOpConstantComposite
: {
546 unsigned elem_count
= count
- 3;
547 nir_constant
**elems
= ralloc_array(b
, nir_constant
*, elem_count
);
548 for (unsigned i
= 0; i
< elem_count
; i
++)
549 elems
[i
] = vtn_value(b
, w
[i
+ 3], vtn_value_type_constant
)->constant
;
551 switch (glsl_get_base_type(val
->const_type
)) {
554 case GLSL_TYPE_FLOAT
:
556 if (glsl_type_is_matrix(val
->const_type
)) {
557 unsigned rows
= glsl_get_vector_elements(val
->const_type
);
558 assert(glsl_get_matrix_columns(val
->const_type
) == elem_count
);
559 for (unsigned i
= 0; i
< elem_count
; i
++)
560 for (unsigned j
= 0; j
< rows
; j
++)
561 val
->constant
->value
.u
[rows
* i
+ j
] = elems
[i
]->value
.u
[j
];
563 assert(glsl_type_is_vector(val
->const_type
));
564 assert(glsl_get_vector_elements(val
->const_type
) == elem_count
);
565 for (unsigned i
= 0; i
< elem_count
; i
++)
566 val
->constant
->value
.u
[i
] = elems
[i
]->value
.u
[0];
571 case GLSL_TYPE_STRUCT
:
572 case GLSL_TYPE_ARRAY
:
573 ralloc_steal(val
->constant
, elems
);
574 val
->constant
->elements
= elems
;
578 unreachable("Unsupported type for constants");
584 unreachable("Unhandled opcode");
589 vtn_get_builtin_location(SpvBuiltIn builtin
, int *location
,
590 nir_variable_mode
*mode
)
593 case SpvBuiltInPosition
:
594 *location
= VARYING_SLOT_POS
;
595 *mode
= nir_var_shader_out
;
597 case SpvBuiltInPointSize
:
598 *location
= VARYING_SLOT_PSIZ
;
599 *mode
= nir_var_shader_out
;
601 case SpvBuiltInClipVertex
:
602 *location
= VARYING_SLOT_CLIP_VERTEX
;
603 *mode
= nir_var_shader_out
;
605 case SpvBuiltInClipDistance
:
606 *location
= VARYING_SLOT_CLIP_DIST0
; /* XXX CLIP_DIST1? */
607 *mode
= nir_var_shader_in
;
609 case SpvBuiltInCullDistance
:
610 /* XXX figure this out */
611 unreachable("unhandled builtin");
612 case SpvBuiltInVertexId
:
613 *location
= SYSTEM_VALUE_VERTEX_ID
;
614 *mode
= nir_var_system_value
;
616 case SpvBuiltInInstanceId
:
617 *location
= SYSTEM_VALUE_INSTANCE_ID
;
618 *mode
= nir_var_system_value
;
620 case SpvBuiltInPrimitiveId
:
621 *location
= VARYING_SLOT_PRIMITIVE_ID
;
622 *mode
= nir_var_shader_out
;
624 case SpvBuiltInInvocationId
:
625 *location
= SYSTEM_VALUE_INVOCATION_ID
;
626 *mode
= nir_var_system_value
;
628 case SpvBuiltInLayer
:
629 *location
= VARYING_SLOT_LAYER
;
630 *mode
= nir_var_shader_out
;
632 case SpvBuiltInTessLevelOuter
:
633 case SpvBuiltInTessLevelInner
:
634 case SpvBuiltInTessCoord
:
635 case SpvBuiltInPatchVertices
:
636 unreachable("no tessellation support");
637 case SpvBuiltInFragCoord
:
638 *location
= VARYING_SLOT_POS
;
639 *mode
= nir_var_shader_in
;
641 case SpvBuiltInPointCoord
:
642 *location
= VARYING_SLOT_PNTC
;
643 *mode
= nir_var_shader_out
;
645 case SpvBuiltInFrontFacing
:
646 *location
= VARYING_SLOT_FACE
;
647 *mode
= nir_var_shader_out
;
649 case SpvBuiltInSampleId
:
650 *location
= SYSTEM_VALUE_SAMPLE_ID
;
651 *mode
= nir_var_shader_in
;
653 case SpvBuiltInSamplePosition
:
654 *location
= SYSTEM_VALUE_SAMPLE_POS
;
655 *mode
= nir_var_shader_in
;
657 case SpvBuiltInSampleMask
:
658 *location
= SYSTEM_VALUE_SAMPLE_MASK_IN
; /* XXX out? */
659 *mode
= nir_var_shader_in
;
661 case SpvBuiltInFragColor
:
662 *location
= FRAG_RESULT_COLOR
;
663 *mode
= nir_var_shader_out
;
665 case SpvBuiltInFragDepth
:
666 *location
= FRAG_RESULT_DEPTH
;
667 *mode
= nir_var_shader_out
;
669 case SpvBuiltInHelperInvocation
:
670 unreachable("unsupported builtin"); /* XXX */
672 case SpvBuiltInNumWorkgroups
:
673 case SpvBuiltInWorkgroupSize
:
674 /* these are constants, need to be handled specially */
675 unreachable("unsupported builtin");
676 case SpvBuiltInWorkgroupId
:
677 case SpvBuiltInLocalInvocationId
:
678 case SpvBuiltInGlobalInvocationId
:
679 case SpvBuiltInLocalInvocationIndex
:
680 unreachable("no compute shader support");
682 unreachable("unsupported builtin");
687 var_decoration_cb(struct vtn_builder
*b
, struct vtn_value
*val
, int member
,
688 const struct vtn_decoration
*dec
, void *void_var
)
690 assert(val
->value_type
== vtn_value_type_deref
);
691 assert(val
->deref
->deref
.child
== NULL
);
692 assert(val
->deref
->var
== void_var
);
694 nir_variable
*var
= void_var
;
695 switch (dec
->decoration
) {
696 case SpvDecorationPrecisionLow
:
697 case SpvDecorationPrecisionMedium
:
698 case SpvDecorationPrecisionHigh
:
699 break; /* FIXME: Do nothing with these for now. */
700 case SpvDecorationSmooth
:
701 var
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
703 case SpvDecorationNoperspective
:
704 var
->data
.interpolation
= INTERP_QUALIFIER_NOPERSPECTIVE
;
706 case SpvDecorationFlat
:
707 var
->data
.interpolation
= INTERP_QUALIFIER_FLAT
;
709 case SpvDecorationCentroid
:
710 var
->data
.centroid
= true;
712 case SpvDecorationSample
:
713 var
->data
.sample
= true;
715 case SpvDecorationInvariant
:
716 var
->data
.invariant
= true;
718 case SpvDecorationConstant
:
719 assert(var
->constant_initializer
!= NULL
);
720 var
->data
.read_only
= true;
722 case SpvDecorationNonwritable
:
723 var
->data
.read_only
= true;
725 case SpvDecorationLocation
:
726 var
->data
.explicit_location
= true;
727 var
->data
.location
= dec
->literals
[0];
729 case SpvDecorationComponent
:
730 var
->data
.location_frac
= dec
->literals
[0];
732 case SpvDecorationIndex
:
733 var
->data
.explicit_index
= true;
734 var
->data
.index
= dec
->literals
[0];
736 case SpvDecorationBinding
:
737 var
->data
.explicit_binding
= true;
738 var
->data
.binding
= dec
->literals
[0];
740 case SpvDecorationDescriptorSet
:
741 var
->data
.descriptor_set
= dec
->literals
[0];
743 case SpvDecorationBuiltIn
: {
744 nir_variable_mode mode
;
745 vtn_get_builtin_location(dec
->literals
[0], &var
->data
.location
,
747 var
->data
.mode
= mode
;
748 if (mode
== nir_var_shader_in
|| mode
== nir_var_system_value
)
749 var
->data
.read_only
= true;
750 b
->builtins
[dec
->literals
[0]] = var
;
753 case SpvDecorationNoStaticUse
:
754 /* This can safely be ignored */
756 case SpvDecorationBlock
:
757 case SpvDecorationBufferBlock
:
758 case SpvDecorationRowMajor
:
759 case SpvDecorationColMajor
:
760 case SpvDecorationGLSLShared
:
761 case SpvDecorationGLSLStd140
:
762 case SpvDecorationGLSLStd430
:
763 case SpvDecorationGLSLPacked
:
764 case SpvDecorationPatch
:
765 case SpvDecorationRestrict
:
766 case SpvDecorationAliased
:
767 case SpvDecorationVolatile
:
768 case SpvDecorationCoherent
:
769 case SpvDecorationNonreadable
:
770 case SpvDecorationUniform
:
771 /* This is really nice but we have no use for it right now. */
772 case SpvDecorationCPacked
:
773 case SpvDecorationSaturatedConversion
:
774 case SpvDecorationStream
:
775 case SpvDecorationOffset
:
776 case SpvDecorationAlignment
:
777 case SpvDecorationXfbBuffer
:
778 case SpvDecorationStride
:
779 case SpvDecorationFuncParamAttr
:
780 case SpvDecorationFPRoundingMode
:
781 case SpvDecorationFPFastMathMode
:
782 case SpvDecorationLinkageAttributes
:
783 case SpvDecorationSpecId
:
786 unreachable("Unhandled variable decoration");
790 static nir_variable
*
791 get_builtin_variable(struct vtn_builder
*b
,
792 const struct glsl_type
*type
,
795 nir_variable
*var
= b
->builtins
[builtin
];
798 var
= ralloc(b
->shader
, nir_variable
);
801 nir_variable_mode mode
;
802 vtn_get_builtin_location(builtin
, &var
->data
.location
, &mode
);
803 var
->data
.mode
= mode
;
804 var
->name
= ralloc_strdup(b
->shader
, "builtin");
807 case nir_var_shader_in
:
808 exec_list_push_tail(&b
->shader
->inputs
, &var
->node
);
810 case nir_var_shader_out
:
811 exec_list_push_tail(&b
->shader
->outputs
, &var
->node
);
813 case nir_var_system_value
:
814 exec_list_push_tail(&b
->shader
->system_values
, &var
->node
);
817 unreachable("bad builtin mode");
820 b
->builtins
[builtin
] = var
;
827 vtn_builtin_load(struct vtn_builder
*b
,
828 struct vtn_ssa_value
*val
,
831 assert(glsl_type_is_vector_or_scalar(val
->type
));
833 nir_variable
*var
= get_builtin_variable(b
, val
->type
, builtin
);
835 nir_intrinsic_instr
*load
=
836 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_var
);
837 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
838 glsl_get_vector_elements(val
->type
), NULL
);
840 load
->variables
[0] = nir_deref_var_create(load
, var
);
841 load
->num_components
= glsl_get_vector_elements(val
->type
);
842 nir_builder_instr_insert(&b
->nb
, &load
->instr
);
843 val
->def
= &load
->dest
.ssa
;
847 vtn_builtin_store(struct vtn_builder
*b
,
848 struct vtn_ssa_value
*val
,
851 assert(glsl_type_is_vector_or_scalar(val
->type
));
853 nir_variable
*var
= get_builtin_variable(b
, val
->type
, builtin
);
855 nir_intrinsic_instr
*store
=
856 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_var
);
858 store
->variables
[0] = nir_deref_var_create(store
, var
);
859 store
->num_components
= glsl_get_vector_elements(val
->type
);
860 store
->src
[0] = nir_src_for_ssa(val
->def
);
861 nir_builder_instr_insert(&b
->nb
, &store
->instr
);
864 static struct vtn_ssa_value
*
865 _vtn_variable_load(struct vtn_builder
*b
,
866 nir_deref_var
*src_deref
, struct vtn_type
*src_type
,
867 nir_deref
*src_deref_tail
)
869 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
870 val
->type
= src_deref_tail
->type
;
872 if (src_type
->is_builtin
) {
873 vtn_builtin_load(b
, val
, src_type
->builtin
);
877 /* The deref tail may contain a deref to select a component of a vector (in
878 * other words, it might not be an actual tail) so we have to save it away
879 * here since we overwrite it later.
881 nir_deref
*old_child
= src_deref_tail
->child
;
883 if (glsl_type_is_vector_or_scalar(val
->type
)) {
884 nir_intrinsic_instr
*load
=
885 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_var
);
887 nir_deref_as_var(nir_copy_deref(load
, &src_deref
->deref
));
888 load
->num_components
= glsl_get_vector_elements(val
->type
);
889 nir_ssa_dest_init(&load
->instr
, &load
->dest
, load
->num_components
, NULL
);
891 nir_builder_instr_insert(&b
->nb
, &load
->instr
);
893 if (src_deref
->var
->data
.mode
== nir_var_uniform
&&
894 glsl_get_base_type(val
->type
) == GLSL_TYPE_BOOL
) {
895 /* Uniform boolean loads need to be fixed up since they're defined
896 * to be zero/nonzero rather than NIR_FALSE/NIR_TRUE.
898 val
->def
= nir_ine(&b
->nb
, &load
->dest
.ssa
, nir_imm_int(&b
->nb
, 0));
900 val
->def
= &load
->dest
.ssa
;
902 } else if (glsl_get_base_type(val
->type
) == GLSL_TYPE_ARRAY
||
903 glsl_type_is_matrix(val
->type
)) {
904 unsigned elems
= glsl_get_length(val
->type
);
905 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
907 nir_deref_array
*deref
= nir_deref_array_create(b
);
908 deref
->deref_array_type
= nir_deref_array_type_direct
;
909 deref
->deref
.type
= glsl_get_array_element(val
->type
);
910 src_deref_tail
->child
= &deref
->deref
;
911 for (unsigned i
= 0; i
< elems
; i
++) {
912 deref
->base_offset
= i
;
913 val
->elems
[i
] = _vtn_variable_load(b
, src_deref
,
914 src_type
->array_element
,
918 assert(glsl_get_base_type(val
->type
) == GLSL_TYPE_STRUCT
);
919 unsigned elems
= glsl_get_length(val
->type
);
920 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
922 nir_deref_struct
*deref
= nir_deref_struct_create(b
, 0);
923 src_deref_tail
->child
= &deref
->deref
;
924 for (unsigned i
= 0; i
< elems
; i
++) {
926 deref
->deref
.type
= glsl_get_struct_field(val
->type
, i
);
927 val
->elems
[i
] = _vtn_variable_load(b
, src_deref
,
928 src_type
->members
[i
],
933 src_deref_tail
->child
= old_child
;
939 _vtn_variable_store(struct vtn_builder
*b
, struct vtn_type
*dest_type
,
940 nir_deref_var
*dest_deref
, nir_deref
*dest_deref_tail
,
941 struct vtn_ssa_value
*src
)
943 if (dest_type
->is_builtin
) {
944 vtn_builtin_store(b
, src
, dest_type
->builtin
);
948 nir_deref
*old_child
= dest_deref_tail
->child
;
950 if (glsl_type_is_vector_or_scalar(src
->type
)) {
951 nir_intrinsic_instr
*store
=
952 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_var
);
953 store
->variables
[0] =
954 nir_deref_as_var(nir_copy_deref(store
, &dest_deref
->deref
));
955 store
->num_components
= glsl_get_vector_elements(src
->type
);
956 store
->src
[0] = nir_src_for_ssa(src
->def
);
958 nir_builder_instr_insert(&b
->nb
, &store
->instr
);
959 } else if (glsl_get_base_type(src
->type
) == GLSL_TYPE_ARRAY
||
960 glsl_type_is_matrix(src
->type
)) {
961 unsigned elems
= glsl_get_length(src
->type
);
963 nir_deref_array
*deref
= nir_deref_array_create(b
);
964 deref
->deref_array_type
= nir_deref_array_type_direct
;
965 deref
->deref
.type
= glsl_get_array_element(src
->type
);
966 dest_deref_tail
->child
= &deref
->deref
;
967 for (unsigned i
= 0; i
< elems
; i
++) {
968 deref
->base_offset
= i
;
969 _vtn_variable_store(b
, dest_type
->array_element
, dest_deref
,
970 &deref
->deref
, src
->elems
[i
]);
973 assert(glsl_get_base_type(src
->type
) == GLSL_TYPE_STRUCT
);
974 unsigned elems
= glsl_get_length(src
->type
);
976 nir_deref_struct
*deref
= nir_deref_struct_create(b
, 0);
977 dest_deref_tail
->child
= &deref
->deref
;
978 for (unsigned i
= 0; i
< elems
; i
++) {
980 deref
->deref
.type
= glsl_get_struct_field(src
->type
, i
);
981 _vtn_variable_store(b
, dest_type
->members
[i
], dest_deref
,
982 &deref
->deref
, src
->elems
[i
]);
986 dest_deref_tail
->child
= old_child
;
990 * Gets the NIR-level deref tail, which may have as a child an array deref
991 * selecting which component due to OpAccessChain supporting per-component
992 * indexing in SPIR-V.
996 get_deref_tail(nir_deref_var
*deref
)
998 nir_deref
*cur
= &deref
->deref
;
999 while (!glsl_type_is_vector_or_scalar(cur
->type
) && cur
->child
)
1005 static nir_ssa_def
*vtn_vector_extract(struct vtn_builder
*b
,
1006 nir_ssa_def
*src
, unsigned index
);
1008 static nir_ssa_def
*vtn_vector_extract_dynamic(struct vtn_builder
*b
,
1010 nir_ssa_def
*index
);
1012 static struct vtn_ssa_value
*
1013 vtn_variable_load(struct vtn_builder
*b
, nir_deref_var
*src
,
1014 struct vtn_type
*src_type
)
1016 nir_deref
*src_tail
= get_deref_tail(src
);
1017 struct vtn_ssa_value
*val
= _vtn_variable_load(b
, src
, src_type
, src_tail
);
1019 if (src_tail
->child
) {
1020 nir_deref_array
*vec_deref
= nir_deref_as_array(src_tail
->child
);
1021 assert(vec_deref
->deref
.child
== NULL
);
1022 val
->type
= vec_deref
->deref
.type
;
1023 if (vec_deref
->deref_array_type
== nir_deref_array_type_direct
)
1024 val
->def
= vtn_vector_extract(b
, val
->def
, vec_deref
->base_offset
);
1026 val
->def
= vtn_vector_extract_dynamic(b
, val
->def
,
1027 vec_deref
->indirect
.ssa
);
1033 static nir_ssa_def
* vtn_vector_insert(struct vtn_builder
*b
,
1034 nir_ssa_def
*src
, nir_ssa_def
*insert
,
1037 static nir_ssa_def
* vtn_vector_insert_dynamic(struct vtn_builder
*b
,
1039 nir_ssa_def
*insert
,
1040 nir_ssa_def
*index
);
1042 vtn_variable_store(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1043 nir_deref_var
*dest
, struct vtn_type
*dest_type
)
1045 nir_deref
*dest_tail
= get_deref_tail(dest
);
1046 if (dest_tail
->child
) {
1047 struct vtn_ssa_value
*val
= _vtn_variable_load(b
, dest
, dest_type
,
1049 nir_deref_array
*deref
= nir_deref_as_array(dest_tail
->child
);
1050 assert(deref
->deref
.child
== NULL
);
1051 if (deref
->deref_array_type
== nir_deref_array_type_direct
)
1052 val
->def
= vtn_vector_insert(b
, val
->def
, src
->def
,
1053 deref
->base_offset
);
1055 val
->def
= vtn_vector_insert_dynamic(b
, val
->def
, src
->def
,
1056 deref
->indirect
.ssa
);
1057 _vtn_variable_store(b
, dest_type
, dest
, dest_tail
, val
);
1059 _vtn_variable_store(b
, dest_type
, dest
, dest_tail
, src
);
1064 vtn_variable_copy(struct vtn_builder
*b
, nir_deref_var
*src
,
1065 nir_deref_var
*dest
, struct vtn_type
*type
)
1067 nir_deref
*src_tail
= get_deref_tail(src
);
1069 if (src_tail
->child
) {
1070 assert(get_deref_tail(dest
)->child
);
1071 struct vtn_ssa_value
*val
= vtn_variable_load(b
, src
, type
);
1072 vtn_variable_store(b
, val
, dest
, type
);
1074 nir_intrinsic_instr
*copy
=
1075 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_copy_var
);
1076 copy
->variables
[0] = nir_deref_as_var(nir_copy_deref(copy
, &dest
->deref
));
1077 copy
->variables
[1] = nir_deref_as_var(nir_copy_deref(copy
, &src
->deref
));
1079 nir_builder_instr_insert(&b
->nb
, ©
->instr
);
1084 vtn_handle_variables(struct vtn_builder
*b
, SpvOp opcode
,
1085 const uint32_t *w
, unsigned count
)
1088 case SpvOpVariable
: {
1089 struct vtn_type
*type
=
1090 vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
1091 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_deref
);
1093 nir_variable
*var
= ralloc(b
->shader
, nir_variable
);
1095 var
->type
= type
->type
;
1096 var
->name
= ralloc_strdup(var
, val
->name
);
1098 switch ((SpvStorageClass
)w
[3]) {
1099 case SpvStorageClassUniform
:
1100 case SpvStorageClassUniformConstant
:
1101 var
->data
.mode
= nir_var_uniform
;
1102 var
->data
.read_only
= true;
1103 var
->interface_type
= type
->type
;
1105 case SpvStorageClassInput
:
1106 var
->data
.mode
= nir_var_shader_in
;
1107 var
->data
.read_only
= true;
1109 case SpvStorageClassOutput
:
1110 var
->data
.mode
= nir_var_shader_out
;
1112 case SpvStorageClassPrivateGlobal
:
1113 var
->data
.mode
= nir_var_global
;
1115 case SpvStorageClassFunction
:
1116 var
->data
.mode
= nir_var_local
;
1118 case SpvStorageClassWorkgroupLocal
:
1119 case SpvStorageClassWorkgroupGlobal
:
1120 case SpvStorageClassGeneric
:
1121 case SpvStorageClassPrivate
:
1122 case SpvStorageClassAtomicCounter
:
1124 unreachable("Unhandled variable storage class");
1129 var
->constant_initializer
=
1130 vtn_value(b
, w
[4], vtn_value_type_constant
)->constant
;
1133 val
->deref
= nir_deref_var_create(b
, var
);
1134 val
->deref_type
= type
;
1136 vtn_foreach_decoration(b
, val
, var_decoration_cb
, var
);
1138 if (b
->execution_model
== SpvExecutionModelFragment
&&
1139 var
->data
.mode
== nir_var_shader_out
) {
1140 var
->data
.location
+= FRAG_RESULT_DATA0
;
1141 } else if (b
->execution_model
== SpvExecutionModelVertex
&&
1142 var
->data
.mode
== nir_var_shader_in
) {
1143 var
->data
.location
+= VERT_ATTRIB_GENERIC0
;
1144 } else if (var
->data
.mode
== nir_var_shader_in
||
1145 var
->data
.mode
== nir_var_shader_out
) {
1146 var
->data
.location
+= VARYING_SLOT_VAR0
;
1149 switch (var
->data
.mode
) {
1150 case nir_var_shader_in
:
1151 exec_list_push_tail(&b
->shader
->inputs
, &var
->node
);
1153 case nir_var_shader_out
:
1154 exec_list_push_tail(&b
->shader
->outputs
, &var
->node
);
1156 case nir_var_global
:
1157 exec_list_push_tail(&b
->shader
->globals
, &var
->node
);
1160 exec_list_push_tail(&b
->impl
->locals
, &var
->node
);
1162 case nir_var_uniform
:
1163 exec_list_push_tail(&b
->shader
->uniforms
, &var
->node
);
1165 case nir_var_system_value
:
1166 exec_list_push_tail(&b
->shader
->system_values
, &var
->node
);
1172 case SpvOpAccessChain
:
1173 case SpvOpInBoundsAccessChain
: {
1174 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_deref
);
1175 nir_deref_var
*base
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref
;
1176 val
->deref
= nir_deref_as_var(nir_copy_deref(b
, &base
->deref
));
1177 val
->deref_type
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref_type
;
1179 nir_deref
*tail
= &val
->deref
->deref
;
1183 for (unsigned i
= 0; i
< count
- 4; i
++) {
1184 assert(w
[i
+ 4] < b
->value_id_bound
);
1185 struct vtn_value
*idx_val
= &b
->values
[w
[i
+ 4]];
1187 enum glsl_base_type base_type
= glsl_get_base_type(tail
->type
);
1188 switch (base_type
) {
1189 case GLSL_TYPE_UINT
:
1191 case GLSL_TYPE_FLOAT
:
1192 case GLSL_TYPE_DOUBLE
:
1193 case GLSL_TYPE_BOOL
:
1194 case GLSL_TYPE_ARRAY
: {
1195 nir_deref_array
*deref_arr
= nir_deref_array_create(b
);
1196 if (base_type
== GLSL_TYPE_ARRAY
||
1197 glsl_type_is_matrix(tail
->type
)) {
1198 val
->deref_type
= val
->deref_type
->array_element
;
1200 assert(glsl_type_is_vector(tail
->type
));
1201 val
->deref_type
= ralloc(b
, struct vtn_type
);
1202 val
->deref_type
->type
= glsl_scalar_type(base_type
);
1205 deref_arr
->deref
.type
= val
->deref_type
->type
;
1207 if (idx_val
->value_type
== vtn_value_type_constant
) {
1208 unsigned idx
= idx_val
->constant
->value
.u
[0];
1209 deref_arr
->deref_array_type
= nir_deref_array_type_direct
;
1210 deref_arr
->base_offset
= idx
;
1212 assert(idx_val
->value_type
== vtn_value_type_ssa
);
1213 deref_arr
->deref_array_type
= nir_deref_array_type_indirect
;
1214 deref_arr
->base_offset
= 0;
1215 deref_arr
->indirect
=
1216 nir_src_for_ssa(vtn_ssa_value(b
, w
[1])->def
);
1218 tail
->child
= &deref_arr
->deref
;
1222 case GLSL_TYPE_STRUCT
: {
1223 assert(idx_val
->value_type
== vtn_value_type_constant
);
1224 unsigned idx
= idx_val
->constant
->value
.u
[0];
1225 val
->deref_type
= val
->deref_type
->members
[idx
];
1226 nir_deref_struct
*deref_struct
= nir_deref_struct_create(b
, idx
);
1227 deref_struct
->deref
.type
= val
->deref_type
->type
;
1228 tail
->child
= &deref_struct
->deref
;
1232 unreachable("Invalid type for deref");
1239 case SpvOpCopyMemory
: {
1240 nir_deref_var
*dest
= vtn_value(b
, w
[1], vtn_value_type_deref
)->deref
;
1241 nir_deref_var
*src
= vtn_value(b
, w
[2], vtn_value_type_deref
)->deref
;
1242 struct vtn_type
*type
=
1243 vtn_value(b
, w
[1], vtn_value_type_deref
)->deref_type
;
1245 vtn_variable_copy(b
, src
, dest
, type
);
1250 nir_deref_var
*src
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref
;
1251 struct vtn_type
*src_type
=
1252 vtn_value(b
, w
[3], vtn_value_type_deref
)->deref_type
;
1254 if (glsl_get_base_type(src_type
->type
) == GLSL_TYPE_SAMPLER
) {
1255 vtn_push_value(b
, w
[2], vtn_value_type_deref
)->deref
= src
;
1259 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1260 val
->ssa
= vtn_variable_load(b
, src
, src_type
);
1265 nir_deref_var
*dest
= vtn_value(b
, w
[1], vtn_value_type_deref
)->deref
;
1266 struct vtn_type
*dest_type
=
1267 vtn_value(b
, w
[1], vtn_value_type_deref
)->deref_type
;
1268 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[2]);
1269 vtn_variable_store(b
, src
, dest
, dest_type
);
1273 case SpvOpVariableArray
:
1274 case SpvOpCopyMemorySized
:
1275 case SpvOpArrayLength
:
1276 case SpvOpImagePointer
:
1278 unreachable("Unhandled opcode");
1283 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
1284 const uint32_t *w
, unsigned count
)
1286 unreachable("Unhandled opcode");
1289 static struct vtn_ssa_value
*
1290 vtn_create_ssa_value(struct vtn_builder
*b
, const struct glsl_type
*type
)
1292 struct vtn_ssa_value
*val
= rzalloc(b
, struct vtn_ssa_value
);
1295 if (!glsl_type_is_vector_or_scalar(type
)) {
1296 unsigned elems
= glsl_get_length(type
);
1297 val
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
1298 for (unsigned i
= 0; i
< elems
; i
++) {
1299 const struct glsl_type
*child_type
;
1301 switch (glsl_get_base_type(type
)) {
1303 case GLSL_TYPE_UINT
:
1304 case GLSL_TYPE_BOOL
:
1305 case GLSL_TYPE_FLOAT
:
1306 case GLSL_TYPE_DOUBLE
:
1307 child_type
= glsl_get_column_type(type
);
1309 case GLSL_TYPE_ARRAY
:
1310 child_type
= glsl_get_array_element(type
);
1312 case GLSL_TYPE_STRUCT
:
1313 child_type
= glsl_get_struct_field(type
, i
);
1316 unreachable("unkown base type");
1319 val
->elems
[i
] = vtn_create_ssa_value(b
, child_type
);
1327 vtn_tex_src(struct vtn_builder
*b
, unsigned index
, nir_tex_src_type type
)
1330 src
.src
= nir_src_for_ssa(vtn_value(b
, index
, vtn_value_type_ssa
)->ssa
->def
);
1331 src
.src_type
= type
;
1336 vtn_handle_texture(struct vtn_builder
*b
, SpvOp opcode
,
1337 const uint32_t *w
, unsigned count
)
1339 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1340 nir_deref_var
*sampler
= vtn_value(b
, w
[3], vtn_value_type_deref
)->deref
;
1342 nir_tex_src srcs
[8]; /* 8 should be enough */
1343 nir_tex_src
*p
= srcs
;
1345 unsigned coord_components
= 0;
1347 case SpvOpTextureSample
:
1348 case SpvOpTextureSampleDref
:
1349 case SpvOpTextureSampleLod
:
1350 case SpvOpTextureSampleProj
:
1351 case SpvOpTextureSampleGrad
:
1352 case SpvOpTextureSampleOffset
:
1353 case SpvOpTextureSampleProjLod
:
1354 case SpvOpTextureSampleProjGrad
:
1355 case SpvOpTextureSampleLodOffset
:
1356 case SpvOpTextureSampleProjOffset
:
1357 case SpvOpTextureSampleGradOffset
:
1358 case SpvOpTextureSampleProjLodOffset
:
1359 case SpvOpTextureSampleProjGradOffset
:
1360 case SpvOpTextureFetchTexelLod
:
1361 case SpvOpTextureFetchTexelOffset
:
1362 case SpvOpTextureFetchSample
:
1363 case SpvOpTextureFetchTexel
:
1364 case SpvOpTextureGather
:
1365 case SpvOpTextureGatherOffset
:
1366 case SpvOpTextureGatherOffsets
:
1367 case SpvOpTextureQueryLod
: {
1368 /* All these types have the coordinate as their first real argument */
1369 struct vtn_ssa_value
*coord
= vtn_ssa_value(b
, w
[4]);
1370 coord_components
= glsl_get_vector_elements(coord
->type
);
1371 p
->src
= nir_src_for_ssa(coord
->def
);
1372 p
->src_type
= nir_tex_src_coord
;
1383 case SpvOpTextureSample
:
1384 texop
= nir_texop_tex
;
1387 texop
= nir_texop_txb
;
1388 *p
++ = vtn_tex_src(b
, w
[5], nir_tex_src_bias
);
1392 case SpvOpTextureSampleDref
:
1393 case SpvOpTextureSampleLod
:
1394 case SpvOpTextureSampleProj
:
1395 case SpvOpTextureSampleGrad
:
1396 case SpvOpTextureSampleOffset
:
1397 case SpvOpTextureSampleProjLod
:
1398 case SpvOpTextureSampleProjGrad
:
1399 case SpvOpTextureSampleLodOffset
:
1400 case SpvOpTextureSampleProjOffset
:
1401 case SpvOpTextureSampleGradOffset
:
1402 case SpvOpTextureSampleProjLodOffset
:
1403 case SpvOpTextureSampleProjGradOffset
:
1404 case SpvOpTextureFetchTexelLod
:
1405 case SpvOpTextureFetchTexelOffset
:
1406 case SpvOpTextureFetchSample
:
1407 case SpvOpTextureFetchTexel
:
1408 case SpvOpTextureGather
:
1409 case SpvOpTextureGatherOffset
:
1410 case SpvOpTextureGatherOffsets
:
1411 case SpvOpTextureQuerySizeLod
:
1412 case SpvOpTextureQuerySize
:
1413 case SpvOpTextureQueryLod
:
1414 case SpvOpTextureQueryLevels
:
1415 case SpvOpTextureQuerySamples
:
1417 unreachable("Unhandled opcode");
1420 nir_tex_instr
*instr
= nir_tex_instr_create(b
->shader
, p
- srcs
);
1422 const struct glsl_type
*sampler_type
= nir_deref_tail(&sampler
->deref
)->type
;
1423 instr
->sampler_dim
= glsl_get_sampler_dim(sampler_type
);
1425 switch (glsl_get_sampler_result_type(sampler_type
)) {
1426 case GLSL_TYPE_FLOAT
: instr
->dest_type
= nir_type_float
; break;
1427 case GLSL_TYPE_INT
: instr
->dest_type
= nir_type_int
; break;
1428 case GLSL_TYPE_UINT
: instr
->dest_type
= nir_type_unsigned
; break;
1429 case GLSL_TYPE_BOOL
: instr
->dest_type
= nir_type_bool
; break;
1431 unreachable("Invalid base type for sampler result");
1435 memcpy(instr
->src
, srcs
, instr
->num_srcs
* sizeof(*instr
->src
));
1436 instr
->coord_components
= coord_components
;
1437 instr
->is_array
= glsl_sampler_type_is_array(sampler_type
);
1438 instr
->is_shadow
= glsl_sampler_type_is_shadow(sampler_type
);
1440 instr
->sampler
= nir_deref_as_var(nir_copy_deref(instr
, &sampler
->deref
));
1442 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 4, NULL
);
1443 val
->ssa
= vtn_create_ssa_value(b
, glsl_vector_type(GLSL_TYPE_FLOAT
, 4));
1444 val
->ssa
->def
= &instr
->dest
.ssa
;
1446 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
1450 static nir_alu_instr
*
1451 create_vec(void *mem_ctx
, unsigned num_components
)
1454 switch (num_components
) {
1455 case 1: op
= nir_op_fmov
; break;
1456 case 2: op
= nir_op_vec2
; break;
1457 case 3: op
= nir_op_vec3
; break;
1458 case 4: op
= nir_op_vec4
; break;
1459 default: unreachable("bad vector size");
1462 nir_alu_instr
*vec
= nir_alu_instr_create(mem_ctx
, op
);
1463 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, num_components
, NULL
);
1464 vec
->dest
.write_mask
= (1 << num_components
) - 1;
1469 static struct vtn_ssa_value
*
1470 vtn_transpose(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
1472 if (src
->transposed
)
1473 return src
->transposed
;
1475 struct vtn_ssa_value
*dest
=
1476 vtn_create_ssa_value(b
, glsl_transposed_type(src
->type
));
1478 for (unsigned i
= 0; i
< glsl_get_matrix_columns(dest
->type
); i
++) {
1479 nir_alu_instr
*vec
= create_vec(b
, glsl_get_matrix_columns(src
->type
));
1480 if (glsl_type_is_vector_or_scalar(src
->type
)) {
1481 vec
->src
[0].src
= nir_src_for_ssa(src
->def
);
1482 vec
->src
[0].swizzle
[0] = i
;
1484 for (unsigned j
= 0; j
< glsl_get_matrix_columns(src
->type
); j
++) {
1485 vec
->src
[j
].src
= nir_src_for_ssa(src
->elems
[j
]->def
);
1486 vec
->src
[j
].swizzle
[0] = i
;
1489 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
1490 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
1493 dest
->transposed
= src
;
1499 * Normally, column vectors in SPIR-V correspond to a single NIR SSA
1500 * definition. But for matrix multiplies, we want to do one routine for
1501 * multiplying a matrix by a matrix and then pretend that vectors are matrices
1502 * with one column. So we "wrap" these things, and unwrap the result before we
1506 static struct vtn_ssa_value
*
1507 vtn_wrap_matrix(struct vtn_builder
*b
, struct vtn_ssa_value
*val
)
1512 if (glsl_type_is_matrix(val
->type
))
1515 struct vtn_ssa_value
*dest
= rzalloc(b
, struct vtn_ssa_value
);
1516 dest
->type
= val
->type
;
1517 dest
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, 1);
1518 dest
->elems
[0] = val
;
1523 static struct vtn_ssa_value
*
1524 vtn_unwrap_matrix(struct vtn_ssa_value
*val
)
1526 if (glsl_type_is_matrix(val
->type
))
1529 return val
->elems
[0];
1532 static struct vtn_ssa_value
*
1533 vtn_matrix_multiply(struct vtn_builder
*b
,
1534 struct vtn_ssa_value
*_src0
, struct vtn_ssa_value
*_src1
)
1537 struct vtn_ssa_value
*src0
= vtn_wrap_matrix(b
, _src0
);
1538 struct vtn_ssa_value
*src1
= vtn_wrap_matrix(b
, _src1
);
1539 struct vtn_ssa_value
*src0_transpose
= vtn_wrap_matrix(b
, _src0
->transposed
);
1540 struct vtn_ssa_value
*src1_transpose
= vtn_wrap_matrix(b
, _src1
->transposed
);
1542 unsigned src0_rows
= glsl_get_vector_elements(src0
->type
);
1543 unsigned src0_columns
= glsl_get_matrix_columns(src0
->type
);
1544 unsigned src1_columns
= glsl_get_matrix_columns(src1
->type
);
1546 struct vtn_ssa_value
*dest
=
1547 vtn_create_ssa_value(b
, glsl_matrix_type(glsl_get_base_type(src0
->type
),
1548 src0_rows
, src1_columns
));
1550 dest
= vtn_wrap_matrix(b
, dest
);
1552 bool transpose_result
= false;
1553 if (src0_transpose
&& src1_transpose
) {
1554 /* transpose(A) * transpose(B) = transpose(B * A) */
1555 src1
= src0_transpose
;
1556 src0
= src1_transpose
;
1557 src0_transpose
= NULL
;
1558 src1_transpose
= NULL
;
1559 transpose_result
= true;
1562 if (src0_transpose
&& !src1_transpose
&&
1563 glsl_get_base_type(src0
->type
) == GLSL_TYPE_FLOAT
) {
1564 /* We already have the rows of src0 and the columns of src1 available,
1565 * so we can just take the dot product of each row with each column to
1569 for (unsigned i
= 0; i
< src1_columns
; i
++) {
1570 nir_alu_instr
*vec
= create_vec(b
, src0_rows
);
1571 for (unsigned j
= 0; j
< src0_rows
; j
++) {
1573 nir_src_for_ssa(nir_fdot(&b
->nb
, src0_transpose
->elems
[j
]->def
,
1574 src1
->elems
[i
]->def
));
1577 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
1578 dest
->elems
[i
]->def
= &vec
->dest
.dest
.ssa
;
1581 /* We don't handle the case where src1 is transposed but not src0, since
1582 * the general case only uses individual components of src1 so the
1583 * optimizer should chew through the transpose we emitted for src1.
1586 for (unsigned i
= 0; i
< src1_columns
; i
++) {
1587 /* dest[i] = sum(src0[j] * src1[i][j] for all j) */
1588 dest
->elems
[i
]->def
=
1589 nir_fmul(&b
->nb
, src0
->elems
[0]->def
,
1590 vtn_vector_extract(b
, src1
->elems
[i
]->def
, 0));
1591 for (unsigned j
= 1; j
< src0_columns
; j
++) {
1592 dest
->elems
[i
]->def
=
1593 nir_fadd(&b
->nb
, dest
->elems
[i
]->def
,
1594 nir_fmul(&b
->nb
, src0
->elems
[j
]->def
,
1595 vtn_vector_extract(b
,
1596 src1
->elems
[i
]->def
, j
)));
1601 dest
= vtn_unwrap_matrix(dest
);
1603 if (transpose_result
)
1604 dest
= vtn_transpose(b
, dest
);
1609 static struct vtn_ssa_value
*
1610 vtn_mat_times_scalar(struct vtn_builder
*b
,
1611 struct vtn_ssa_value
*mat
,
1612 nir_ssa_def
*scalar
)
1614 struct vtn_ssa_value
*dest
= vtn_create_ssa_value(b
, mat
->type
);
1615 for (unsigned i
= 0; i
< glsl_get_matrix_columns(mat
->type
); i
++) {
1616 if (glsl_get_base_type(mat
->type
) == GLSL_TYPE_FLOAT
)
1617 dest
->elems
[i
]->def
= nir_fmul(&b
->nb
, mat
->elems
[i
]->def
, scalar
);
1619 dest
->elems
[i
]->def
= nir_imul(&b
->nb
, mat
->elems
[i
]->def
, scalar
);
1626 vtn_handle_matrix_alu(struct vtn_builder
*b
, SpvOp opcode
,
1627 const uint32_t *w
, unsigned count
)
1629 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1632 case SpvOpTranspose
: {
1633 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[3]);
1634 val
->ssa
= vtn_transpose(b
, src
);
1638 case SpvOpOuterProduct
: {
1639 struct vtn_ssa_value
*src0
= vtn_ssa_value(b
, w
[3]);
1640 struct vtn_ssa_value
*src1
= vtn_ssa_value(b
, w
[4]);
1642 val
->ssa
= vtn_matrix_multiply(b
, src0
, vtn_transpose(b
, src1
));
1646 case SpvOpMatrixTimesScalar
: {
1647 struct vtn_ssa_value
*mat
= vtn_ssa_value(b
, w
[3]);
1648 struct vtn_ssa_value
*scalar
= vtn_ssa_value(b
, w
[4]);
1650 if (mat
->transposed
) {
1651 val
->ssa
= vtn_transpose(b
, vtn_mat_times_scalar(b
, mat
->transposed
,
1654 val
->ssa
= vtn_mat_times_scalar(b
, mat
, scalar
->def
);
1659 case SpvOpVectorTimesMatrix
:
1660 case SpvOpMatrixTimesVector
:
1661 case SpvOpMatrixTimesMatrix
: {
1662 struct vtn_ssa_value
*src0
= vtn_ssa_value(b
, w
[3]);
1663 struct vtn_ssa_value
*src1
= vtn_ssa_value(b
, w
[4]);
1665 val
->ssa
= vtn_matrix_multiply(b
, src0
, src1
);
1669 default: unreachable("unknown matrix opcode");
1674 vtn_handle_alu(struct vtn_builder
*b
, SpvOp opcode
,
1675 const uint32_t *w
, unsigned count
)
1677 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
1678 const struct glsl_type
*type
=
1679 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
1680 val
->ssa
= vtn_create_ssa_value(b
, type
);
1682 /* Collect the various SSA sources */
1683 unsigned num_inputs
= count
- 3;
1684 nir_ssa_def
*src
[4];
1685 for (unsigned i
= 0; i
< num_inputs
; i
++)
1686 src
[i
] = vtn_ssa_value(b
, w
[i
+ 3])->def
;
1688 /* Indicates that the first two arguments should be swapped. This is
1689 * used for implementing greater-than and less-than-or-equal.
1695 /* Basic ALU operations */
1696 case SpvOpSNegate
: op
= nir_op_ineg
; break;
1697 case SpvOpFNegate
: op
= nir_op_fneg
; break;
1698 case SpvOpNot
: op
= nir_op_inot
; break;
1701 switch (src
[0]->num_components
) {
1702 case 1: op
= nir_op_imov
; break;
1703 case 2: op
= nir_op_bany2
; break;
1704 case 3: op
= nir_op_bany3
; break;
1705 case 4: op
= nir_op_bany4
; break;
1710 switch (src
[0]->num_components
) {
1711 case 1: op
= nir_op_imov
; break;
1712 case 2: op
= nir_op_ball2
; break;
1713 case 3: op
= nir_op_ball3
; break;
1714 case 4: op
= nir_op_ball4
; break;
1718 case SpvOpIAdd
: op
= nir_op_iadd
; break;
1719 case SpvOpFAdd
: op
= nir_op_fadd
; break;
1720 case SpvOpISub
: op
= nir_op_isub
; break;
1721 case SpvOpFSub
: op
= nir_op_fsub
; break;
1722 case SpvOpIMul
: op
= nir_op_imul
; break;
1723 case SpvOpFMul
: op
= nir_op_fmul
; break;
1724 case SpvOpUDiv
: op
= nir_op_udiv
; break;
1725 case SpvOpSDiv
: op
= nir_op_idiv
; break;
1726 case SpvOpFDiv
: op
= nir_op_fdiv
; break;
1727 case SpvOpUMod
: op
= nir_op_umod
; break;
1728 case SpvOpSMod
: op
= nir_op_umod
; break; /* FIXME? */
1729 case SpvOpFMod
: op
= nir_op_fmod
; break;
1732 assert(src
[0]->num_components
== src
[1]->num_components
);
1733 switch (src
[0]->num_components
) {
1734 case 1: op
= nir_op_fmul
; break;
1735 case 2: op
= nir_op_fdot2
; break;
1736 case 3: op
= nir_op_fdot3
; break;
1737 case 4: op
= nir_op_fdot4
; break;
1741 case SpvOpShiftRightLogical
: op
= nir_op_ushr
; break;
1742 case SpvOpShiftRightArithmetic
: op
= nir_op_ishr
; break;
1743 case SpvOpShiftLeftLogical
: op
= nir_op_ishl
; break;
1744 case SpvOpLogicalOr
: op
= nir_op_ior
; break;
1745 case SpvOpLogicalXor
: op
= nir_op_ixor
; break;
1746 case SpvOpLogicalAnd
: op
= nir_op_iand
; break;
1747 case SpvOpBitwiseOr
: op
= nir_op_ior
; break;
1748 case SpvOpBitwiseXor
: op
= nir_op_ixor
; break;
1749 case SpvOpBitwiseAnd
: op
= nir_op_iand
; break;
1750 case SpvOpSelect
: op
= nir_op_bcsel
; break;
1751 case SpvOpIEqual
: op
= nir_op_ieq
; break;
1753 /* Comparisons: (TODO: How do we want to handled ordered/unordered?) */
1754 case SpvOpFOrdEqual
: op
= nir_op_feq
; break;
1755 case SpvOpFUnordEqual
: op
= nir_op_feq
; break;
1756 case SpvOpINotEqual
: op
= nir_op_ine
; break;
1757 case SpvOpFOrdNotEqual
: op
= nir_op_fne
; break;
1758 case SpvOpFUnordNotEqual
: op
= nir_op_fne
; break;
1759 case SpvOpULessThan
: op
= nir_op_ult
; break;
1760 case SpvOpSLessThan
: op
= nir_op_ilt
; break;
1761 case SpvOpFOrdLessThan
: op
= nir_op_flt
; break;
1762 case SpvOpFUnordLessThan
: op
= nir_op_flt
; break;
1763 case SpvOpUGreaterThan
: op
= nir_op_ult
; swap
= true; break;
1764 case SpvOpSGreaterThan
: op
= nir_op_ilt
; swap
= true; break;
1765 case SpvOpFOrdGreaterThan
: op
= nir_op_flt
; swap
= true; break;
1766 case SpvOpFUnordGreaterThan
: op
= nir_op_flt
; swap
= true; break;
1767 case SpvOpULessThanEqual
: op
= nir_op_uge
; swap
= true; break;
1768 case SpvOpSLessThanEqual
: op
= nir_op_ige
; swap
= true; break;
1769 case SpvOpFOrdLessThanEqual
: op
= nir_op_fge
; swap
= true; break;
1770 case SpvOpFUnordLessThanEqual
: op
= nir_op_fge
; swap
= true; break;
1771 case SpvOpUGreaterThanEqual
: op
= nir_op_uge
; break;
1772 case SpvOpSGreaterThanEqual
: op
= nir_op_ige
; break;
1773 case SpvOpFOrdGreaterThanEqual
: op
= nir_op_fge
; break;
1774 case SpvOpFUnordGreaterThanEqual
:op
= nir_op_fge
; break;
1777 case SpvOpConvertFToU
: op
= nir_op_f2u
; break;
1778 case SpvOpConvertFToS
: op
= nir_op_f2i
; break;
1779 case SpvOpConvertSToF
: op
= nir_op_i2f
; break;
1780 case SpvOpConvertUToF
: op
= nir_op_u2f
; break;
1781 case SpvOpBitcast
: op
= nir_op_imov
; break;
1784 op
= nir_op_imov
; /* TODO: NIR is 32-bit only; these are no-ops. */
1791 case SpvOpDPdx
: op
= nir_op_fddx
; break;
1792 case SpvOpDPdy
: op
= nir_op_fddy
; break;
1793 case SpvOpDPdxFine
: op
= nir_op_fddx_fine
; break;
1794 case SpvOpDPdyFine
: op
= nir_op_fddy_fine
; break;
1795 case SpvOpDPdxCoarse
: op
= nir_op_fddx_coarse
; break;
1796 case SpvOpDPdyCoarse
: op
= nir_op_fddy_coarse
; break;
1798 val
->ssa
->def
= nir_fadd(&b
->nb
,
1799 nir_fabs(&b
->nb
, nir_fddx(&b
->nb
, src
[0])),
1800 nir_fabs(&b
->nb
, nir_fddx(&b
->nb
, src
[1])));
1802 case SpvOpFwidthFine
:
1803 val
->ssa
->def
= nir_fadd(&b
->nb
,
1804 nir_fabs(&b
->nb
, nir_fddx_fine(&b
->nb
, src
[0])),
1805 nir_fabs(&b
->nb
, nir_fddx_fine(&b
->nb
, src
[1])));
1807 case SpvOpFwidthCoarse
:
1808 val
->ssa
->def
= nir_fadd(&b
->nb
,
1809 nir_fabs(&b
->nb
, nir_fddx_coarse(&b
->nb
, src
[0])),
1810 nir_fabs(&b
->nb
, nir_fddx_coarse(&b
->nb
, src
[1])));
1813 case SpvOpVectorTimesScalar
:
1814 /* The builder will take care of splatting for us. */
1815 val
->ssa
->def
= nir_fmul(&b
->nb
, src
[0], src
[1]);
1820 unreachable("No NIR equivalent");
1826 case SpvOpSignBitSet
:
1827 case SpvOpLessOrGreater
:
1829 case SpvOpUnordered
:
1831 unreachable("Unhandled opcode");
1835 nir_ssa_def
*tmp
= src
[0];
1840 nir_alu_instr
*instr
= nir_alu_instr_create(b
->shader
, op
);
1841 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
.dest
,
1842 glsl_get_vector_elements(type
), val
->name
);
1843 val
->ssa
->def
= &instr
->dest
.dest
.ssa
;
1845 for (unsigned i
= 0; i
< nir_op_infos
[op
].num_inputs
; i
++)
1846 instr
->src
[i
].src
= nir_src_for_ssa(src
[i
]);
1848 nir_builder_instr_insert(&b
->nb
, &instr
->instr
);
1851 static nir_ssa_def
*
1852 vtn_vector_extract(struct vtn_builder
*b
, nir_ssa_def
*src
, unsigned index
)
1854 unsigned swiz
[4] = { index
};
1855 return nir_swizzle(&b
->nb
, src
, swiz
, 1, true);
1859 static nir_ssa_def
*
1860 vtn_vector_insert(struct vtn_builder
*b
, nir_ssa_def
*src
, nir_ssa_def
*insert
,
1863 nir_alu_instr
*vec
= create_vec(b
->shader
, src
->num_components
);
1865 for (unsigned i
= 0; i
< src
->num_components
; i
++) {
1867 vec
->src
[i
].src
= nir_src_for_ssa(insert
);
1869 vec
->src
[i
].src
= nir_src_for_ssa(src
);
1870 vec
->src
[i
].swizzle
[0] = i
;
1874 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
1876 return &vec
->dest
.dest
.ssa
;
1879 static nir_ssa_def
*
1880 vtn_vector_extract_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
1883 nir_ssa_def
*dest
= vtn_vector_extract(b
, src
, 0);
1884 for (unsigned i
= 1; i
< src
->num_components
; i
++)
1885 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
1886 vtn_vector_extract(b
, src
, i
), dest
);
1891 static nir_ssa_def
*
1892 vtn_vector_insert_dynamic(struct vtn_builder
*b
, nir_ssa_def
*src
,
1893 nir_ssa_def
*insert
, nir_ssa_def
*index
)
1895 nir_ssa_def
*dest
= vtn_vector_insert(b
, src
, insert
, 0);
1896 for (unsigned i
= 1; i
< src
->num_components
; i
++)
1897 dest
= nir_bcsel(&b
->nb
, nir_ieq(&b
->nb
, index
, nir_imm_int(&b
->nb
, i
)),
1898 vtn_vector_insert(b
, src
, insert
, i
), dest
);
1903 static nir_ssa_def
*
1904 vtn_vector_shuffle(struct vtn_builder
*b
, unsigned num_components
,
1905 nir_ssa_def
*src0
, nir_ssa_def
*src1
,
1906 const uint32_t *indices
)
1908 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
);
1910 nir_ssa_undef_instr
*undef
= nir_ssa_undef_instr_create(b
->shader
, 1);
1911 nir_builder_instr_insert(&b
->nb
, &undef
->instr
);
1913 for (unsigned i
= 0; i
< num_components
; i
++) {
1914 uint32_t index
= indices
[i
];
1915 if (index
== 0xffffffff) {
1916 vec
->src
[i
].src
= nir_src_for_ssa(&undef
->def
);
1917 } else if (index
< src0
->num_components
) {
1918 vec
->src
[i
].src
= nir_src_for_ssa(src0
);
1919 vec
->src
[i
].swizzle
[0] = index
;
1921 vec
->src
[i
].src
= nir_src_for_ssa(src1
);
1922 vec
->src
[i
].swizzle
[0] = index
- src0
->num_components
;
1926 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
1928 return &vec
->dest
.dest
.ssa
;
1932 * Concatentates a number of vectors/scalars together to produce a vector
1934 static nir_ssa_def
*
1935 vtn_vector_construct(struct vtn_builder
*b
, unsigned num_components
,
1936 unsigned num_srcs
, nir_ssa_def
**srcs
)
1938 nir_alu_instr
*vec
= create_vec(b
->shader
, num_components
);
1940 unsigned dest_idx
= 0;
1941 for (unsigned i
= 0; i
< num_srcs
; i
++) {
1942 nir_ssa_def
*src
= srcs
[i
];
1943 for (unsigned j
= 0; j
< src
->num_components
; j
++) {
1944 vec
->src
[dest_idx
].src
= nir_src_for_ssa(src
);
1945 vec
->src
[dest_idx
].swizzle
[0] = j
;
1950 nir_builder_instr_insert(&b
->nb
, &vec
->instr
);
1952 return &vec
->dest
.dest
.ssa
;
1955 static struct vtn_ssa_value
*
1956 vtn_composite_copy(void *mem_ctx
, struct vtn_ssa_value
*src
)
1958 struct vtn_ssa_value
*dest
= rzalloc(mem_ctx
, struct vtn_ssa_value
);
1959 dest
->type
= src
->type
;
1961 if (glsl_type_is_vector_or_scalar(src
->type
)) {
1962 dest
->def
= src
->def
;
1964 unsigned elems
= glsl_get_length(src
->type
);
1966 dest
->elems
= ralloc_array(mem_ctx
, struct vtn_ssa_value
*, elems
);
1967 for (unsigned i
= 0; i
< elems
; i
++)
1968 dest
->elems
[i
] = vtn_composite_copy(mem_ctx
, src
->elems
[i
]);
1974 static struct vtn_ssa_value
*
1975 vtn_composite_insert(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
1976 struct vtn_ssa_value
*insert
, const uint32_t *indices
,
1977 unsigned num_indices
)
1979 struct vtn_ssa_value
*dest
= vtn_composite_copy(b
, src
);
1981 struct vtn_ssa_value
*cur
= dest
;
1983 for (i
= 0; i
< num_indices
- 1; i
++) {
1984 cur
= cur
->elems
[indices
[i
]];
1987 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
1988 /* According to the SPIR-V spec, OpCompositeInsert may work down to
1989 * the component granularity. In that case, the last index will be
1990 * the index to insert the scalar into the vector.
1993 cur
->def
= vtn_vector_insert(b
, cur
->def
, insert
->def
, indices
[i
]);
1995 cur
->elems
[indices
[i
]] = insert
;
2001 static struct vtn_ssa_value
*
2002 vtn_composite_extract(struct vtn_builder
*b
, struct vtn_ssa_value
*src
,
2003 const uint32_t *indices
, unsigned num_indices
)
2005 struct vtn_ssa_value
*cur
= src
;
2006 for (unsigned i
= 0; i
< num_indices
; i
++) {
2007 if (glsl_type_is_vector_or_scalar(cur
->type
)) {
2008 assert(i
== num_indices
- 1);
2009 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2010 * the component granularity. The last index will be the index of the
2011 * vector to extract.
2014 struct vtn_ssa_value
*ret
= rzalloc(b
, struct vtn_ssa_value
);
2015 ret
->type
= glsl_scalar_type(glsl_get_base_type(cur
->type
));
2016 ret
->def
= vtn_vector_extract(b
, cur
->def
, indices
[i
]);
2025 vtn_handle_composite(struct vtn_builder
*b
, SpvOp opcode
,
2026 const uint32_t *w
, unsigned count
)
2028 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2029 const struct glsl_type
*type
=
2030 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2031 val
->ssa
= vtn_create_ssa_value(b
, type
);
2034 case SpvOpVectorExtractDynamic
:
2035 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2036 vtn_ssa_value(b
, w
[4])->def
);
2039 case SpvOpVectorInsertDynamic
:
2040 val
->ssa
->def
= vtn_vector_insert_dynamic(b
, vtn_ssa_value(b
, w
[3])->def
,
2041 vtn_ssa_value(b
, w
[4])->def
,
2042 vtn_ssa_value(b
, w
[5])->def
);
2045 case SpvOpVectorShuffle
:
2046 val
->ssa
->def
= vtn_vector_shuffle(b
, glsl_get_vector_elements(type
),
2047 vtn_ssa_value(b
, w
[3])->def
,
2048 vtn_ssa_value(b
, w
[4])->def
,
2052 case SpvOpCompositeConstruct
: {
2053 unsigned elems
= count
- 3;
2054 if (glsl_type_is_vector_or_scalar(type
)) {
2055 nir_ssa_def
*srcs
[4];
2056 for (unsigned i
= 0; i
< elems
; i
++)
2057 srcs
[i
] = vtn_ssa_value(b
, w
[3 + i
])->def
;
2059 vtn_vector_construct(b
, glsl_get_vector_elements(type
),
2062 val
->ssa
->elems
= ralloc_array(b
, struct vtn_ssa_value
*, elems
);
2063 for (unsigned i
= 0; i
< elems
; i
++)
2064 val
->ssa
->elems
[i
] = vtn_ssa_value(b
, w
[3 + i
]);
2068 case SpvOpCompositeExtract
:
2069 val
->ssa
= vtn_composite_extract(b
, vtn_ssa_value(b
, w
[3]),
2073 case SpvOpCompositeInsert
:
2074 val
->ssa
= vtn_composite_insert(b
, vtn_ssa_value(b
, w
[4]),
2075 vtn_ssa_value(b
, w
[3]),
2079 case SpvOpCopyObject
:
2080 val
->ssa
= vtn_composite_copy(b
, vtn_ssa_value(b
, w
[3]));
2084 unreachable("unknown composite operation");
2089 vtn_phi_node_init(struct vtn_builder
*b
, struct vtn_ssa_value
*val
)
2091 if (glsl_type_is_vector_or_scalar(val
->type
)) {
2092 nir_phi_instr
*phi
= nir_phi_instr_create(b
->shader
);
2093 nir_ssa_dest_init(&phi
->instr
, &phi
->dest
,
2094 glsl_get_vector_elements(val
->type
), NULL
);
2095 exec_list_make_empty(&phi
->srcs
);
2096 nir_builder_instr_insert(&b
->nb
, &phi
->instr
);
2097 val
->def
= &phi
->dest
.ssa
;
2099 unsigned elems
= glsl_get_length(val
->type
);
2100 for (unsigned i
= 0; i
< elems
; i
++)
2101 vtn_phi_node_init(b
, val
->elems
[i
]);
2105 static struct vtn_ssa_value
*
2106 vtn_phi_node_create(struct vtn_builder
*b
, const struct glsl_type
*type
)
2108 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, type
);
2109 vtn_phi_node_init(b
, val
);
2114 vtn_handle_phi_first_pass(struct vtn_builder
*b
, const uint32_t *w
)
2116 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
2117 const struct glsl_type
*type
=
2118 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2119 val
->ssa
= vtn_phi_node_create(b
, type
);
2123 vtn_phi_node_add_src(struct vtn_ssa_value
*phi
, const nir_block
*pred
,
2124 struct vtn_ssa_value
*val
)
2126 assert(phi
->type
== val
->type
);
2127 if (glsl_type_is_vector_or_scalar(phi
->type
)) {
2128 nir_phi_instr
*phi_instr
= nir_instr_as_phi(phi
->def
->parent_instr
);
2129 nir_phi_src
*src
= ralloc(phi_instr
, nir_phi_src
);
2130 src
->pred
= (nir_block
*) pred
;
2131 src
->src
= nir_src_for_ssa(val
->def
);
2132 exec_list_push_tail(&phi_instr
->srcs
, &src
->node
);
2134 unsigned elems
= glsl_get_length(phi
->type
);
2135 for (unsigned i
= 0; i
< elems
; i
++)
2136 vtn_phi_node_add_src(phi
->elems
[i
], pred
, val
->elems
[i
]);
2140 static struct vtn_ssa_value
*
2141 vtn_get_phi_node_src(struct vtn_builder
*b
, nir_block
*block
,
2142 const struct glsl_type
*type
, const uint32_t *w
,
2145 struct hash_entry
*entry
= _mesa_hash_table_search(b
->block_table
, block
);
2147 struct vtn_block
*spv_block
= entry
->data
;
2148 for (unsigned off
= 4; off
< count
; off
+= 2) {
2149 if (spv_block
== vtn_value(b
, w
[off
], vtn_value_type_block
)->block
) {
2150 return vtn_ssa_value(b
, w
[off
- 1]);
2155 nir_builder_insert_before_block(&b
->nb
, block
);
2156 struct vtn_ssa_value
*phi
= vtn_phi_node_create(b
, type
);
2158 struct set_entry
*entry2
;
2159 set_foreach(block
->predecessors
, entry2
) {
2160 nir_block
*pred
= (nir_block
*) entry2
->key
;
2161 struct vtn_ssa_value
*val
= vtn_get_phi_node_src(b
, pred
, type
, w
,
2163 vtn_phi_node_add_src(phi
, pred
, val
);
2170 vtn_handle_phi_second_pass(struct vtn_builder
*b
, SpvOp opcode
,
2171 const uint32_t *w
, unsigned count
)
2173 if (opcode
== SpvOpLabel
) {
2174 b
->block
= vtn_value(b
, w
[1], vtn_value_type_block
)->block
;
2178 if (opcode
!= SpvOpPhi
)
2181 struct vtn_ssa_value
*phi
= vtn_value(b
, w
[2], vtn_value_type_ssa
)->ssa
;
2183 struct set_entry
*entry
;
2184 set_foreach(b
->block
->block
->predecessors
, entry
) {
2185 nir_block
*pred
= (nir_block
*) entry
->key
;
2187 struct vtn_ssa_value
*val
= vtn_get_phi_node_src(b
, pred
, phi
->type
, w
,
2189 vtn_phi_node_add_src(phi
, pred
, val
);
2196 vtn_handle_preamble_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2197 const uint32_t *w
, unsigned count
)
2201 case SpvOpSourceExtension
:
2202 case SpvOpCompileFlag
:
2203 case SpvOpExtension
:
2204 /* Unhandled, but these are for debug so that's ok. */
2207 case SpvOpExtInstImport
:
2208 vtn_handle_extension(b
, opcode
, w
, count
);
2211 case SpvOpMemoryModel
:
2212 assert(w
[1] == SpvAddressingModelLogical
);
2213 assert(w
[2] == SpvMemoryModelGLSL450
);
2216 case SpvOpEntryPoint
:
2217 assert(b
->entry_point
== NULL
);
2218 b
->entry_point
= &b
->values
[w
[2]];
2219 b
->execution_model
= w
[1];
2222 case SpvOpExecutionMode
:
2223 unreachable("Execution modes not yet implemented");
2227 vtn_push_value(b
, w
[1], vtn_value_type_string
)->str
=
2228 vtn_string_literal(b
, &w
[2], count
- 2);
2232 b
->values
[w
[1]].name
= vtn_string_literal(b
, &w
[2], count
- 2);
2235 case SpvOpMemberName
:
2240 break; /* Ignored for now */
2242 case SpvOpDecorationGroup
:
2244 case SpvOpMemberDecorate
:
2245 case SpvOpGroupDecorate
:
2246 case SpvOpGroupMemberDecorate
:
2247 vtn_handle_decoration(b
, opcode
, w
, count
);
2253 case SpvOpTypeFloat
:
2254 case SpvOpTypeVector
:
2255 case SpvOpTypeMatrix
:
2256 case SpvOpTypeSampler
:
2257 case SpvOpTypeArray
:
2258 case SpvOpTypeRuntimeArray
:
2259 case SpvOpTypeStruct
:
2260 case SpvOpTypeOpaque
:
2261 case SpvOpTypePointer
:
2262 case SpvOpTypeFunction
:
2263 case SpvOpTypeEvent
:
2264 case SpvOpTypeDeviceEvent
:
2265 case SpvOpTypeReserveId
:
2266 case SpvOpTypeQueue
:
2268 vtn_handle_type(b
, opcode
, w
, count
);
2271 case SpvOpConstantTrue
:
2272 case SpvOpConstantFalse
:
2274 case SpvOpConstantComposite
:
2275 case SpvOpConstantSampler
:
2276 case SpvOpConstantNullPointer
:
2277 case SpvOpConstantNullObject
:
2278 case SpvOpSpecConstantTrue
:
2279 case SpvOpSpecConstantFalse
:
2280 case SpvOpSpecConstant
:
2281 case SpvOpSpecConstantComposite
:
2282 vtn_handle_constant(b
, opcode
, w
, count
);
2286 vtn_handle_variables(b
, opcode
, w
, count
);
2290 return false; /* End of preamble */
2297 vtn_handle_first_cfg_pass_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2298 const uint32_t *w
, unsigned count
)
2301 case SpvOpFunction
: {
2302 assert(b
->func
== NULL
);
2303 b
->func
= rzalloc(b
, struct vtn_function
);
2305 const struct glsl_type
*result_type
=
2306 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
2307 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_function
);
2308 const struct glsl_type
*func_type
=
2309 vtn_value(b
, w
[4], vtn_value_type_type
)->type
->type
;
2311 assert(glsl_get_function_return_type(func_type
) == result_type
);
2313 nir_function
*func
=
2314 nir_function_create(b
->shader
, ralloc_strdup(b
->shader
, val
->name
));
2316 nir_function_overload
*overload
= nir_function_overload_create(func
);
2317 overload
->num_params
= glsl_get_length(func_type
);
2318 overload
->params
= ralloc_array(overload
, nir_parameter
,
2319 overload
->num_params
);
2320 for (unsigned i
= 0; i
< overload
->num_params
; i
++) {
2321 const struct glsl_function_param
*param
=
2322 glsl_get_function_param(func_type
, i
);
2323 overload
->params
[i
].type
= param
->type
;
2326 overload
->params
[i
].param_type
= nir_parameter_inout
;
2328 overload
->params
[i
].param_type
= nir_parameter_in
;
2332 overload
->params
[i
].param_type
= nir_parameter_out
;
2334 assert(!"Parameter is neither in nor out");
2338 b
->func
->overload
= overload
;
2342 case SpvOpFunctionEnd
:
2347 case SpvOpFunctionParameter
:
2348 break; /* Does nothing */
2351 assert(b
->block
== NULL
);
2352 b
->block
= rzalloc(b
, struct vtn_block
);
2353 b
->block
->label
= w
;
2354 vtn_push_value(b
, w
[1], vtn_value_type_block
)->block
= b
->block
;
2356 if (b
->func
->start_block
== NULL
) {
2357 /* This is the first block encountered for this function. In this
2358 * case, we set the start block and add it to the list of
2359 * implemented functions that we'll walk later.
2361 b
->func
->start_block
= b
->block
;
2362 exec_list_push_tail(&b
->functions
, &b
->func
->node
);
2368 case SpvOpBranchConditional
:
2372 case SpvOpReturnValue
:
2373 case SpvOpUnreachable
:
2375 b
->block
->branch
= w
;
2379 case SpvOpSelectionMerge
:
2380 case SpvOpLoopMerge
:
2381 assert(b
->block
&& b
->block
->merge_op
== SpvOpNop
);
2382 b
->block
->merge_op
= opcode
;
2383 b
->block
->merge_block_id
= w
[1];
2387 /* Continue on as per normal */
2395 vtn_handle_body_instruction(struct vtn_builder
*b
, SpvOp opcode
,
2396 const uint32_t *w
, unsigned count
)
2400 struct vtn_block
*block
= vtn_value(b
, w
[1], vtn_value_type_block
)->block
;
2401 assert(block
->block
== NULL
);
2403 struct exec_node
*list_tail
= exec_list_get_tail(b
->nb
.cf_node_list
);
2404 nir_cf_node
*tail_node
= exec_node_data(nir_cf_node
, list_tail
, node
);
2405 assert(tail_node
->type
== nir_cf_node_block
);
2406 block
->block
= nir_cf_node_as_block(tail_node
);
2410 case SpvOpLoopMerge
:
2411 case SpvOpSelectionMerge
:
2412 /* This is handled by cfg pre-pass and walk_blocks */
2416 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
2420 vtn_handle_extension(b
, opcode
, w
, count
);
2424 case SpvOpVariableArray
:
2427 case SpvOpCopyMemory
:
2428 case SpvOpCopyMemorySized
:
2429 case SpvOpAccessChain
:
2430 case SpvOpInBoundsAccessChain
:
2431 case SpvOpArrayLength
:
2432 case SpvOpImagePointer
:
2433 vtn_handle_variables(b
, opcode
, w
, count
);
2436 case SpvOpFunctionCall
:
2437 vtn_handle_function_call(b
, opcode
, w
, count
);
2440 case SpvOpTextureSample
:
2441 case SpvOpTextureSampleDref
:
2442 case SpvOpTextureSampleLod
:
2443 case SpvOpTextureSampleProj
:
2444 case SpvOpTextureSampleGrad
:
2445 case SpvOpTextureSampleOffset
:
2446 case SpvOpTextureSampleProjLod
:
2447 case SpvOpTextureSampleProjGrad
:
2448 case SpvOpTextureSampleLodOffset
:
2449 case SpvOpTextureSampleProjOffset
:
2450 case SpvOpTextureSampleGradOffset
:
2451 case SpvOpTextureSampleProjLodOffset
:
2452 case SpvOpTextureSampleProjGradOffset
:
2453 case SpvOpTextureFetchTexelLod
:
2454 case SpvOpTextureFetchTexelOffset
:
2455 case SpvOpTextureFetchSample
:
2456 case SpvOpTextureFetchTexel
:
2457 case SpvOpTextureGather
:
2458 case SpvOpTextureGatherOffset
:
2459 case SpvOpTextureGatherOffsets
:
2460 case SpvOpTextureQuerySizeLod
:
2461 case SpvOpTextureQuerySize
:
2462 case SpvOpTextureQueryLod
:
2463 case SpvOpTextureQueryLevels
:
2464 case SpvOpTextureQuerySamples
:
2465 vtn_handle_texture(b
, opcode
, w
, count
);
2473 case SpvOpConvertFToU
:
2474 case SpvOpConvertFToS
:
2475 case SpvOpConvertSToF
:
2476 case SpvOpConvertUToF
:
2480 case SpvOpConvertPtrToU
:
2481 case SpvOpConvertUToPtr
:
2482 case SpvOpPtrCastToGeneric
:
2483 case SpvOpGenericCastToPtr
:
2489 case SpvOpSignBitSet
:
2490 case SpvOpLessOrGreater
:
2492 case SpvOpUnordered
:
2507 case SpvOpVectorTimesScalar
:
2509 case SpvOpShiftRightLogical
:
2510 case SpvOpShiftRightArithmetic
:
2511 case SpvOpShiftLeftLogical
:
2512 case SpvOpLogicalOr
:
2513 case SpvOpLogicalXor
:
2514 case SpvOpLogicalAnd
:
2515 case SpvOpBitwiseOr
:
2516 case SpvOpBitwiseXor
:
2517 case SpvOpBitwiseAnd
:
2520 case SpvOpFOrdEqual
:
2521 case SpvOpFUnordEqual
:
2522 case SpvOpINotEqual
:
2523 case SpvOpFOrdNotEqual
:
2524 case SpvOpFUnordNotEqual
:
2525 case SpvOpULessThan
:
2526 case SpvOpSLessThan
:
2527 case SpvOpFOrdLessThan
:
2528 case SpvOpFUnordLessThan
:
2529 case SpvOpUGreaterThan
:
2530 case SpvOpSGreaterThan
:
2531 case SpvOpFOrdGreaterThan
:
2532 case SpvOpFUnordGreaterThan
:
2533 case SpvOpULessThanEqual
:
2534 case SpvOpSLessThanEqual
:
2535 case SpvOpFOrdLessThanEqual
:
2536 case SpvOpFUnordLessThanEqual
:
2537 case SpvOpUGreaterThanEqual
:
2538 case SpvOpSGreaterThanEqual
:
2539 case SpvOpFOrdGreaterThanEqual
:
2540 case SpvOpFUnordGreaterThanEqual
:
2546 case SpvOpFwidthFine
:
2547 case SpvOpDPdxCoarse
:
2548 case SpvOpDPdyCoarse
:
2549 case SpvOpFwidthCoarse
:
2550 vtn_handle_alu(b
, opcode
, w
, count
);
2553 case SpvOpTranspose
:
2554 case SpvOpOuterProduct
:
2555 case SpvOpMatrixTimesScalar
:
2556 case SpvOpVectorTimesMatrix
:
2557 case SpvOpMatrixTimesVector
:
2558 case SpvOpMatrixTimesMatrix
:
2559 vtn_handle_matrix_alu(b
, opcode
, w
, count
);
2562 case SpvOpVectorExtractDynamic
:
2563 case SpvOpVectorInsertDynamic
:
2564 case SpvOpVectorShuffle
:
2565 case SpvOpCompositeConstruct
:
2566 case SpvOpCompositeExtract
:
2567 case SpvOpCompositeInsert
:
2568 case SpvOpCopyObject
:
2569 vtn_handle_composite(b
, opcode
, w
, count
);
2573 vtn_handle_phi_first_pass(b
, w
);
2577 unreachable("Unhandled opcode");
2584 vtn_walk_blocks(struct vtn_builder
*b
, struct vtn_block
*start
,
2585 struct vtn_block
*break_block
, struct vtn_block
*cont_block
,
2586 struct vtn_block
*end_block
)
2588 struct vtn_block
*block
= start
;
2589 while (block
!= end_block
) {
2590 if (block
->merge_op
== SpvOpLoopMerge
) {
2591 /* This is the jump into a loop. */
2592 struct vtn_block
*new_cont_block
= block
;
2593 struct vtn_block
*new_break_block
=
2594 vtn_value(b
, block
->merge_block_id
, vtn_value_type_block
)->block
;
2596 nir_loop
*loop
= nir_loop_create(b
->shader
);
2597 nir_cf_node_insert_end(b
->nb
.cf_node_list
, &loop
->cf_node
);
2599 struct exec_list
*old_list
= b
->nb
.cf_node_list
;
2601 /* Reset the merge_op to prerevent infinite recursion */
2602 block
->merge_op
= SpvOpNop
;
2604 nir_builder_insert_after_cf_list(&b
->nb
, &loop
->body
);
2605 vtn_walk_blocks(b
, block
, new_break_block
, new_cont_block
, NULL
);
2607 nir_builder_insert_after_cf_list(&b
->nb
, old_list
);
2608 block
= new_break_block
;
2612 const uint32_t *w
= block
->branch
;
2613 SpvOp branch_op
= w
[0] & SpvOpCodeMask
;
2616 vtn_foreach_instruction(b
, block
->label
, block
->branch
,
2617 vtn_handle_body_instruction
);
2619 nir_cf_node
*cur_cf_node
=
2620 exec_node_data(nir_cf_node
, exec_list_get_tail(b
->nb
.cf_node_list
),
2622 nir_block
*cur_block
= nir_cf_node_as_block(cur_cf_node
);
2623 _mesa_hash_table_insert(b
->block_table
, cur_block
, block
);
2625 switch (branch_op
) {
2627 struct vtn_block
*branch_block
=
2628 vtn_value(b
, w
[1], vtn_value_type_block
)->block
;
2630 if (branch_block
== break_block
) {
2631 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2633 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2636 } else if (branch_block
== cont_block
) {
2637 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2639 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2642 } else if (branch_block
== end_block
) {
2643 /* We're branching to the merge block of an if, since for loops
2644 * and functions end_block == NULL, so we're done here.
2648 /* We're branching to another block, and according to the rules,
2649 * we can only branch to another block with one predecessor (so
2650 * we're the only one jumping to it) so we can just process it
2653 block
= branch_block
;
2658 case SpvOpBranchConditional
: {
2659 /* Gather up the branch blocks */
2660 struct vtn_block
*then_block
=
2661 vtn_value(b
, w
[2], vtn_value_type_block
)->block
;
2662 struct vtn_block
*else_block
=
2663 vtn_value(b
, w
[3], vtn_value_type_block
)->block
;
2665 nir_if
*if_stmt
= nir_if_create(b
->shader
);
2666 if_stmt
->condition
= nir_src_for_ssa(vtn_ssa_value(b
, w
[1])->def
);
2667 nir_cf_node_insert_end(b
->nb
.cf_node_list
, &if_stmt
->cf_node
);
2669 if (then_block
== break_block
) {
2670 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2672 nir_instr_insert_after_cf_list(&if_stmt
->then_list
,
2675 } else if (else_block
== break_block
) {
2676 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2678 nir_instr_insert_after_cf_list(&if_stmt
->else_list
,
2681 } else if (then_block
== cont_block
) {
2682 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2684 nir_instr_insert_after_cf_list(&if_stmt
->then_list
,
2687 } else if (else_block
== cont_block
) {
2688 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2690 nir_instr_insert_after_cf_list(&if_stmt
->else_list
,
2694 /* According to the rules we're branching to two blocks that don't
2695 * have any other predecessors, so we can handle this as a
2698 assert(block
->merge_op
== SpvOpSelectionMerge
);
2699 struct vtn_block
*merge_block
=
2700 vtn_value(b
, block
->merge_block_id
, vtn_value_type_block
)->block
;
2702 struct exec_list
*old_list
= b
->nb
.cf_node_list
;
2704 nir_builder_insert_after_cf_list(&b
->nb
, &if_stmt
->then_list
);
2705 vtn_walk_blocks(b
, then_block
, break_block
, cont_block
, merge_block
);
2707 nir_builder_insert_after_cf_list(&b
->nb
, &if_stmt
->else_list
);
2708 vtn_walk_blocks(b
, else_block
, break_block
, cont_block
, merge_block
);
2710 nir_builder_insert_after_cf_list(&b
->nb
, old_list
);
2711 block
= merge_block
;
2715 /* If we got here then we inserted a predicated break or continue
2716 * above and we need to handle the other case. We already set
2717 * `block` above to indicate what block to visit after the
2721 /* It's possible that the other branch is also a break/continue.
2722 * If it is, we handle that here.
2724 if (block
== break_block
) {
2725 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2727 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2730 } else if (block
== cont_block
) {
2731 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2733 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2738 /* If we got here then there was a predicated break/continue but
2739 * the other half of the if has stuff in it. `block` was already
2740 * set above so there is nothing left for us to do.
2746 nir_jump_instr
*jump
= nir_jump_instr_create(b
->shader
,
2748 nir_builder_instr_insert(&b
->nb
, &jump
->instr
);
2753 nir_intrinsic_instr
*discard
=
2754 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard
);
2755 nir_builder_instr_insert(&b
->nb
, &discard
->instr
);
2760 case SpvOpReturnValue
:
2761 case SpvOpUnreachable
:
2763 unreachable("Unhandled opcode");
2769 spirv_to_nir(const uint32_t *words
, size_t word_count
,
2770 const nir_shader_compiler_options
*options
)
2772 const uint32_t *word_end
= words
+ word_count
;
2774 /* Handle the SPIR-V header (first 4 dwords) */
2775 assert(word_count
> 5);
2777 assert(words
[0] == SpvMagicNumber
);
2778 assert(words
[1] == 99);
2779 /* words[2] == generator magic */
2780 unsigned value_id_bound
= words
[3];
2781 assert(words
[4] == 0);
2785 nir_shader
*shader
= nir_shader_create(NULL
, options
);
2787 /* Initialize the stn_builder object */
2788 struct vtn_builder
*b
= rzalloc(NULL
, struct vtn_builder
);
2790 b
->value_id_bound
= value_id_bound
;
2791 b
->values
= rzalloc_array(b
, struct vtn_value
, value_id_bound
);
2792 exec_list_make_empty(&b
->functions
);
2794 /* Handle all the preamble instructions */
2795 words
= vtn_foreach_instruction(b
, words
, word_end
,
2796 vtn_handle_preamble_instruction
);
2798 /* Do a very quick CFG analysis pass */
2799 vtn_foreach_instruction(b
, words
, word_end
,
2800 vtn_handle_first_cfg_pass_instruction
);
2802 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
2803 b
->impl
= nir_function_impl_create(func
->overload
);
2804 b
->const_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
2805 _mesa_key_pointer_equal
);
2806 b
->block_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
2807 _mesa_key_pointer_equal
);
2808 nir_builder_init(&b
->nb
, b
->impl
);
2809 nir_builder_insert_after_cf_list(&b
->nb
, &b
->impl
->body
);
2810 vtn_walk_blocks(b
, func
->start_block
, NULL
, NULL
, NULL
);
2811 vtn_foreach_instruction(b
, func
->start_block
->label
, func
->end
,
2812 vtn_handle_phi_second_pass
);