spirv: Rework logging
[mesa.git] / src / compiler / spirv / spirv_to_nir.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
33
34 void
35 vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
36 size_t spirv_offset, const char *message)
37 {
38 if (b->options->debug.func) {
39 b->options->debug.func(b->options->debug.private_data,
40 level, spirv_offset, message);
41 }
42
43 #ifndef NDEBUG
44 if (level >= NIR_SPIRV_DEBUG_LEVEL_WARNING)
45 fprintf(stderr, "%s\n", message);
46 #endif
47 }
48
49 void
50 vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level,
51 size_t spirv_offset, const char *fmt, ...)
52 {
53 va_list args;
54 char *msg;
55
56 va_start(args, fmt);
57 msg = ralloc_vasprintf(NULL, fmt, args);
58 va_end(args);
59
60 vtn_log(b, level, spirv_offset, msg);
61
62 ralloc_free(msg);
63 }
64
65 static void
66 vtn_log_err(struct vtn_builder *b,
67 enum nir_spirv_debug_level level, const char *prefix,
68 const char *file, unsigned line,
69 const char *fmt, va_list args)
70 {
71 char *msg;
72
73 msg = ralloc_strdup(NULL, prefix);
74
75 #ifndef NDEBUG
76 ralloc_asprintf_append(&msg, " In file %s:%u\n", file, line);
77 #endif
78
79 ralloc_asprintf_append(&msg, " ");
80
81 ralloc_vasprintf_append(&msg, fmt, args);
82
83 ralloc_asprintf_append(&msg, "\n %zu bytes into the SPIR-V binary",
84 b->spirv_offset);
85
86 if (b->file) {
87 ralloc_asprintf_append(&msg,
88 "\n in SPIR-V source file %s, line %d, col %d",
89 b->file, b->line, b->col);
90 }
91
92 vtn_log(b, level, b->spirv_offset, msg);
93
94 ralloc_free(msg);
95 }
96
97 void
98 _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
99 const char *fmt, ...)
100 {
101 va_list args;
102
103 va_start(args, fmt);
104 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_WARNING, "SPIR-V WARNING:\n",
105 file, line, fmt, args);
106 va_end(args);
107 }
108
109 struct spec_constant_value {
110 bool is_double;
111 union {
112 uint32_t data32;
113 uint64_t data64;
114 };
115 };
116
117 static struct vtn_ssa_value *
118 vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
119 {
120 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
121 val->type = type;
122
123 if (glsl_type_is_vector_or_scalar(type)) {
124 unsigned num_components = glsl_get_vector_elements(val->type);
125 unsigned bit_size = glsl_get_bit_size(val->type);
126 val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
127 } else {
128 unsigned elems = glsl_get_length(val->type);
129 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
130 if (glsl_type_is_matrix(type)) {
131 const struct glsl_type *elem_type =
132 glsl_vector_type(glsl_get_base_type(type),
133 glsl_get_vector_elements(type));
134
135 for (unsigned i = 0; i < elems; i++)
136 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
137 } else if (glsl_type_is_array(type)) {
138 const struct glsl_type *elem_type = glsl_get_array_element(type);
139 for (unsigned i = 0; i < elems; i++)
140 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
141 } else {
142 for (unsigned i = 0; i < elems; i++) {
143 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
144 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
145 }
146 }
147 }
148
149 return val;
150 }
151
152 static struct vtn_ssa_value *
153 vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
154 const struct glsl_type *type)
155 {
156 struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
157
158 if (entry)
159 return entry->data;
160
161 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
162 val->type = type;
163
164 switch (glsl_get_base_type(type)) {
165 case GLSL_TYPE_INT:
166 case GLSL_TYPE_UINT:
167 case GLSL_TYPE_INT64:
168 case GLSL_TYPE_UINT64:
169 case GLSL_TYPE_BOOL:
170 case GLSL_TYPE_FLOAT:
171 case GLSL_TYPE_DOUBLE: {
172 int bit_size = glsl_get_bit_size(type);
173 if (glsl_type_is_vector_or_scalar(type)) {
174 unsigned num_components = glsl_get_vector_elements(val->type);
175 nir_load_const_instr *load =
176 nir_load_const_instr_create(b->shader, num_components, bit_size);
177
178 load->value = constant->values[0];
179
180 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
181 val->def = &load->def;
182 } else {
183 assert(glsl_type_is_matrix(type));
184 unsigned rows = glsl_get_vector_elements(val->type);
185 unsigned columns = glsl_get_matrix_columns(val->type);
186 val->elems = ralloc_array(b, struct vtn_ssa_value *, columns);
187
188 for (unsigned i = 0; i < columns; i++) {
189 struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
190 col_val->type = glsl_get_column_type(val->type);
191 nir_load_const_instr *load =
192 nir_load_const_instr_create(b->shader, rows, bit_size);
193
194 load->value = constant->values[i];
195
196 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
197 col_val->def = &load->def;
198
199 val->elems[i] = col_val;
200 }
201 }
202 break;
203 }
204
205 case GLSL_TYPE_ARRAY: {
206 unsigned elems = glsl_get_length(val->type);
207 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
208 const struct glsl_type *elem_type = glsl_get_array_element(val->type);
209 for (unsigned i = 0; i < elems; i++)
210 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
211 elem_type);
212 break;
213 }
214
215 case GLSL_TYPE_STRUCT: {
216 unsigned elems = glsl_get_length(val->type);
217 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
218 for (unsigned i = 0; i < elems; i++) {
219 const struct glsl_type *elem_type =
220 glsl_get_struct_field(val->type, i);
221 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
222 elem_type);
223 }
224 break;
225 }
226
227 default:
228 unreachable("bad constant type");
229 }
230
231 return val;
232 }
233
234 struct vtn_ssa_value *
235 vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
236 {
237 struct vtn_value *val = vtn_untyped_value(b, value_id);
238 switch (val->value_type) {
239 case vtn_value_type_undef:
240 return vtn_undef_ssa_value(b, val->type->type);
241
242 case vtn_value_type_constant:
243 return vtn_const_ssa_value(b, val->constant, val->const_type);
244
245 case vtn_value_type_ssa:
246 return val->ssa;
247
248 case vtn_value_type_pointer:
249 assert(val->pointer->ptr_type && val->pointer->ptr_type->type);
250 struct vtn_ssa_value *ssa =
251 vtn_create_ssa_value(b, val->pointer->ptr_type->type);
252 ssa->def = vtn_pointer_to_ssa(b, val->pointer);
253 return ssa;
254
255 default:
256 unreachable("Invalid type for an SSA value");
257 }
258 }
259
260 static char *
261 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
262 unsigned word_count, unsigned *words_used)
263 {
264 char *dup = ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
265 if (words_used) {
266 /* Ammount of space taken by the string (including the null) */
267 unsigned len = strlen(dup) + 1;
268 *words_used = DIV_ROUND_UP(len, sizeof(*words));
269 }
270 return dup;
271 }
272
273 const uint32_t *
274 vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
275 const uint32_t *end, vtn_instruction_handler handler)
276 {
277 b->file = NULL;
278 b->line = -1;
279 b->col = -1;
280
281 const uint32_t *w = start;
282 while (w < end) {
283 SpvOp opcode = w[0] & SpvOpCodeMask;
284 unsigned count = w[0] >> SpvWordCountShift;
285 assert(count >= 1 && w + count <= end);
286
287 b->spirv_offset = (uint8_t *)w - (uint8_t *)b->spirv;
288
289 switch (opcode) {
290 case SpvOpNop:
291 break; /* Do nothing */
292
293 case SpvOpLine:
294 b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
295 b->line = w[2];
296 b->col = w[3];
297 break;
298
299 case SpvOpNoLine:
300 b->file = NULL;
301 b->line = -1;
302 b->col = -1;
303 break;
304
305 default:
306 if (!handler(b, opcode, w, count))
307 return w;
308 break;
309 }
310
311 w += count;
312 }
313
314 b->spirv_offset = 0;
315 b->file = NULL;
316 b->line = -1;
317 b->col = -1;
318
319 assert(w == end);
320 return w;
321 }
322
323 static void
324 vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
325 const uint32_t *w, unsigned count)
326 {
327 switch (opcode) {
328 case SpvOpExtInstImport: {
329 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
330 if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
331 val->ext_handler = vtn_handle_glsl450_instruction;
332 } else {
333 unreachable("Unsupported extension");
334 }
335 break;
336 }
337
338 case SpvOpExtInst: {
339 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
340 bool handled = val->ext_handler(b, w[4], w, count);
341 (void)handled;
342 assert(handled);
343 break;
344 }
345
346 default:
347 unreachable("Unhandled opcode");
348 }
349 }
350
351 static void
352 _foreach_decoration_helper(struct vtn_builder *b,
353 struct vtn_value *base_value,
354 int parent_member,
355 struct vtn_value *value,
356 vtn_decoration_foreach_cb cb, void *data)
357 {
358 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
359 int member;
360 if (dec->scope == VTN_DEC_DECORATION) {
361 member = parent_member;
362 } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
363 assert(parent_member == -1);
364 member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
365 } else {
366 /* Not a decoration */
367 continue;
368 }
369
370 if (dec->group) {
371 assert(dec->group->value_type == vtn_value_type_decoration_group);
372 _foreach_decoration_helper(b, base_value, member, dec->group,
373 cb, data);
374 } else {
375 cb(b, base_value, member, dec, data);
376 }
377 }
378 }
379
380 /** Iterates (recursively if needed) over all of the decorations on a value
381 *
382 * This function iterates over all of the decorations applied to a given
383 * value. If it encounters a decoration group, it recurses into the group
384 * and iterates over all of those decorations as well.
385 */
386 void
387 vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
388 vtn_decoration_foreach_cb cb, void *data)
389 {
390 _foreach_decoration_helper(b, value, -1, value, cb, data);
391 }
392
393 void
394 vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
395 vtn_execution_mode_foreach_cb cb, void *data)
396 {
397 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
398 if (dec->scope != VTN_DEC_EXECUTION_MODE)
399 continue;
400
401 assert(dec->group == NULL);
402 cb(b, value, dec, data);
403 }
404 }
405
406 static void
407 vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
408 const uint32_t *w, unsigned count)
409 {
410 const uint32_t *w_end = w + count;
411 const uint32_t target = w[1];
412 w += 2;
413
414 switch (opcode) {
415 case SpvOpDecorationGroup:
416 vtn_push_value(b, target, vtn_value_type_decoration_group);
417 break;
418
419 case SpvOpDecorate:
420 case SpvOpMemberDecorate:
421 case SpvOpExecutionMode: {
422 struct vtn_value *val = &b->values[target];
423
424 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
425 switch (opcode) {
426 case SpvOpDecorate:
427 dec->scope = VTN_DEC_DECORATION;
428 break;
429 case SpvOpMemberDecorate:
430 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
431 break;
432 case SpvOpExecutionMode:
433 dec->scope = VTN_DEC_EXECUTION_MODE;
434 break;
435 default:
436 unreachable("Invalid decoration opcode");
437 }
438 dec->decoration = *(w++);
439 dec->literals = w;
440
441 /* Link into the list */
442 dec->next = val->decoration;
443 val->decoration = dec;
444 break;
445 }
446
447 case SpvOpGroupMemberDecorate:
448 case SpvOpGroupDecorate: {
449 struct vtn_value *group =
450 vtn_value(b, target, vtn_value_type_decoration_group);
451
452 for (; w < w_end; w++) {
453 struct vtn_value *val = vtn_untyped_value(b, *w);
454 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
455
456 dec->group = group;
457 if (opcode == SpvOpGroupDecorate) {
458 dec->scope = VTN_DEC_DECORATION;
459 } else {
460 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
461 }
462
463 /* Link into the list */
464 dec->next = val->decoration;
465 val->decoration = dec;
466 }
467 break;
468 }
469
470 default:
471 unreachable("Unhandled opcode");
472 }
473 }
474
475 struct member_decoration_ctx {
476 unsigned num_fields;
477 struct glsl_struct_field *fields;
478 struct vtn_type *type;
479 };
480
481 /* does a shallow copy of a vtn_type */
482
483 static struct vtn_type *
484 vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
485 {
486 struct vtn_type *dest = ralloc(b, struct vtn_type);
487 *dest = *src;
488
489 switch (src->base_type) {
490 case vtn_base_type_void:
491 case vtn_base_type_scalar:
492 case vtn_base_type_vector:
493 case vtn_base_type_matrix:
494 case vtn_base_type_array:
495 case vtn_base_type_pointer:
496 case vtn_base_type_image:
497 case vtn_base_type_sampler:
498 /* Nothing more to do */
499 break;
500
501 case vtn_base_type_struct:
502 dest->members = ralloc_array(b, struct vtn_type *, src->length);
503 memcpy(dest->members, src->members,
504 src->length * sizeof(src->members[0]));
505
506 dest->offsets = ralloc_array(b, unsigned, src->length);
507 memcpy(dest->offsets, src->offsets,
508 src->length * sizeof(src->offsets[0]));
509 break;
510
511 case vtn_base_type_function:
512 dest->params = ralloc_array(b, struct vtn_type *, src->length);
513 memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
514 break;
515 }
516
517 return dest;
518 }
519
520 static struct vtn_type *
521 mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
522 {
523 type->members[member] = vtn_type_copy(b, type->members[member]);
524 type = type->members[member];
525
526 /* We may have an array of matrices.... Oh, joy! */
527 while (glsl_type_is_array(type->type)) {
528 type->array_element = vtn_type_copy(b, type->array_element);
529 type = type->array_element;
530 }
531
532 assert(glsl_type_is_matrix(type->type));
533
534 return type;
535 }
536
537 static void
538 struct_member_decoration_cb(struct vtn_builder *b,
539 struct vtn_value *val, int member,
540 const struct vtn_decoration *dec, void *void_ctx)
541 {
542 struct member_decoration_ctx *ctx = void_ctx;
543
544 if (member < 0)
545 return;
546
547 assert(member < ctx->num_fields);
548
549 switch (dec->decoration) {
550 case SpvDecorationNonWritable:
551 case SpvDecorationNonReadable:
552 case SpvDecorationRelaxedPrecision:
553 case SpvDecorationVolatile:
554 case SpvDecorationCoherent:
555 case SpvDecorationUniform:
556 break; /* FIXME: Do nothing with this for now. */
557 case SpvDecorationNoPerspective:
558 ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
559 break;
560 case SpvDecorationFlat:
561 ctx->fields[member].interpolation = INTERP_MODE_FLAT;
562 break;
563 case SpvDecorationCentroid:
564 ctx->fields[member].centroid = true;
565 break;
566 case SpvDecorationSample:
567 ctx->fields[member].sample = true;
568 break;
569 case SpvDecorationStream:
570 /* Vulkan only allows one GS stream */
571 assert(dec->literals[0] == 0);
572 break;
573 case SpvDecorationLocation:
574 ctx->fields[member].location = dec->literals[0];
575 break;
576 case SpvDecorationComponent:
577 break; /* FIXME: What should we do with these? */
578 case SpvDecorationBuiltIn:
579 ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
580 ctx->type->members[member]->is_builtin = true;
581 ctx->type->members[member]->builtin = dec->literals[0];
582 ctx->type->builtin_block = true;
583 break;
584 case SpvDecorationOffset:
585 ctx->type->offsets[member] = dec->literals[0];
586 break;
587 case SpvDecorationMatrixStride:
588 /* Handled as a second pass */
589 break;
590 case SpvDecorationColMajor:
591 break; /* Nothing to do here. Column-major is the default. */
592 case SpvDecorationRowMajor:
593 mutable_matrix_member(b, ctx->type, member)->row_major = true;
594 break;
595
596 case SpvDecorationPatch:
597 break;
598
599 case SpvDecorationSpecId:
600 case SpvDecorationBlock:
601 case SpvDecorationBufferBlock:
602 case SpvDecorationArrayStride:
603 case SpvDecorationGLSLShared:
604 case SpvDecorationGLSLPacked:
605 case SpvDecorationInvariant:
606 case SpvDecorationRestrict:
607 case SpvDecorationAliased:
608 case SpvDecorationConstant:
609 case SpvDecorationIndex:
610 case SpvDecorationBinding:
611 case SpvDecorationDescriptorSet:
612 case SpvDecorationLinkageAttributes:
613 case SpvDecorationNoContraction:
614 case SpvDecorationInputAttachmentIndex:
615 vtn_warn("Decoration not allowed on struct members: %s",
616 spirv_decoration_to_string(dec->decoration));
617 break;
618
619 case SpvDecorationXfbBuffer:
620 case SpvDecorationXfbStride:
621 vtn_warn("Vulkan does not have transform feedback");
622 break;
623
624 case SpvDecorationCPacked:
625 case SpvDecorationSaturatedConversion:
626 case SpvDecorationFuncParamAttr:
627 case SpvDecorationFPRoundingMode:
628 case SpvDecorationFPFastMathMode:
629 case SpvDecorationAlignment:
630 vtn_warn("Decoration only allowed for CL-style kernels: %s",
631 spirv_decoration_to_string(dec->decoration));
632 break;
633
634 default:
635 unreachable("Unhandled decoration");
636 }
637 }
638
639 /* Matrix strides are handled as a separate pass because we need to know
640 * whether the matrix is row-major or not first.
641 */
642 static void
643 struct_member_matrix_stride_cb(struct vtn_builder *b,
644 struct vtn_value *val, int member,
645 const struct vtn_decoration *dec,
646 void *void_ctx)
647 {
648 if (dec->decoration != SpvDecorationMatrixStride)
649 return;
650 assert(member >= 0);
651
652 struct member_decoration_ctx *ctx = void_ctx;
653
654 struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
655 if (mat_type->row_major) {
656 mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
657 mat_type->stride = mat_type->array_element->stride;
658 mat_type->array_element->stride = dec->literals[0];
659 } else {
660 assert(mat_type->array_element->stride > 0);
661 mat_type->stride = dec->literals[0];
662 }
663 }
664
665 static void
666 type_decoration_cb(struct vtn_builder *b,
667 struct vtn_value *val, int member,
668 const struct vtn_decoration *dec, void *ctx)
669 {
670 struct vtn_type *type = val->type;
671
672 if (member != -1)
673 return;
674
675 switch (dec->decoration) {
676 case SpvDecorationArrayStride:
677 assert(type->base_type == vtn_base_type_matrix ||
678 type->base_type == vtn_base_type_array ||
679 type->base_type == vtn_base_type_pointer);
680 type->stride = dec->literals[0];
681 break;
682 case SpvDecorationBlock:
683 assert(type->base_type == vtn_base_type_struct);
684 type->block = true;
685 break;
686 case SpvDecorationBufferBlock:
687 assert(type->base_type == vtn_base_type_struct);
688 type->buffer_block = true;
689 break;
690 case SpvDecorationGLSLShared:
691 case SpvDecorationGLSLPacked:
692 /* Ignore these, since we get explicit offsets anyways */
693 break;
694
695 case SpvDecorationRowMajor:
696 case SpvDecorationColMajor:
697 case SpvDecorationMatrixStride:
698 case SpvDecorationBuiltIn:
699 case SpvDecorationNoPerspective:
700 case SpvDecorationFlat:
701 case SpvDecorationPatch:
702 case SpvDecorationCentroid:
703 case SpvDecorationSample:
704 case SpvDecorationVolatile:
705 case SpvDecorationCoherent:
706 case SpvDecorationNonWritable:
707 case SpvDecorationNonReadable:
708 case SpvDecorationUniform:
709 case SpvDecorationStream:
710 case SpvDecorationLocation:
711 case SpvDecorationComponent:
712 case SpvDecorationOffset:
713 case SpvDecorationXfbBuffer:
714 case SpvDecorationXfbStride:
715 vtn_warn("Decoration only allowed for struct members: %s",
716 spirv_decoration_to_string(dec->decoration));
717 break;
718
719 case SpvDecorationRelaxedPrecision:
720 case SpvDecorationSpecId:
721 case SpvDecorationInvariant:
722 case SpvDecorationRestrict:
723 case SpvDecorationAliased:
724 case SpvDecorationConstant:
725 case SpvDecorationIndex:
726 case SpvDecorationBinding:
727 case SpvDecorationDescriptorSet:
728 case SpvDecorationLinkageAttributes:
729 case SpvDecorationNoContraction:
730 case SpvDecorationInputAttachmentIndex:
731 vtn_warn("Decoration not allowed on types: %s",
732 spirv_decoration_to_string(dec->decoration));
733 break;
734
735 case SpvDecorationCPacked:
736 case SpvDecorationSaturatedConversion:
737 case SpvDecorationFuncParamAttr:
738 case SpvDecorationFPRoundingMode:
739 case SpvDecorationFPFastMathMode:
740 case SpvDecorationAlignment:
741 vtn_warn("Decoration only allowed for CL-style kernels: %s",
742 spirv_decoration_to_string(dec->decoration));
743 break;
744
745 default:
746 unreachable("Unhandled decoration");
747 }
748 }
749
750 static unsigned
751 translate_image_format(SpvImageFormat format)
752 {
753 switch (format) {
754 case SpvImageFormatUnknown: return 0; /* GL_NONE */
755 case SpvImageFormatRgba32f: return 0x8814; /* GL_RGBA32F */
756 case SpvImageFormatRgba16f: return 0x881A; /* GL_RGBA16F */
757 case SpvImageFormatR32f: return 0x822E; /* GL_R32F */
758 case SpvImageFormatRgba8: return 0x8058; /* GL_RGBA8 */
759 case SpvImageFormatRgba8Snorm: return 0x8F97; /* GL_RGBA8_SNORM */
760 case SpvImageFormatRg32f: return 0x8230; /* GL_RG32F */
761 case SpvImageFormatRg16f: return 0x822F; /* GL_RG16F */
762 case SpvImageFormatR11fG11fB10f: return 0x8C3A; /* GL_R11F_G11F_B10F */
763 case SpvImageFormatR16f: return 0x822D; /* GL_R16F */
764 case SpvImageFormatRgba16: return 0x805B; /* GL_RGBA16 */
765 case SpvImageFormatRgb10A2: return 0x8059; /* GL_RGB10_A2 */
766 case SpvImageFormatRg16: return 0x822C; /* GL_RG16 */
767 case SpvImageFormatRg8: return 0x822B; /* GL_RG8 */
768 case SpvImageFormatR16: return 0x822A; /* GL_R16 */
769 case SpvImageFormatR8: return 0x8229; /* GL_R8 */
770 case SpvImageFormatRgba16Snorm: return 0x8F9B; /* GL_RGBA16_SNORM */
771 case SpvImageFormatRg16Snorm: return 0x8F99; /* GL_RG16_SNORM */
772 case SpvImageFormatRg8Snorm: return 0x8F95; /* GL_RG8_SNORM */
773 case SpvImageFormatR16Snorm: return 0x8F98; /* GL_R16_SNORM */
774 case SpvImageFormatR8Snorm: return 0x8F94; /* GL_R8_SNORM */
775 case SpvImageFormatRgba32i: return 0x8D82; /* GL_RGBA32I */
776 case SpvImageFormatRgba16i: return 0x8D88; /* GL_RGBA16I */
777 case SpvImageFormatRgba8i: return 0x8D8E; /* GL_RGBA8I */
778 case SpvImageFormatR32i: return 0x8235; /* GL_R32I */
779 case SpvImageFormatRg32i: return 0x823B; /* GL_RG32I */
780 case SpvImageFormatRg16i: return 0x8239; /* GL_RG16I */
781 case SpvImageFormatRg8i: return 0x8237; /* GL_RG8I */
782 case SpvImageFormatR16i: return 0x8233; /* GL_R16I */
783 case SpvImageFormatR8i: return 0x8231; /* GL_R8I */
784 case SpvImageFormatRgba32ui: return 0x8D70; /* GL_RGBA32UI */
785 case SpvImageFormatRgba16ui: return 0x8D76; /* GL_RGBA16UI */
786 case SpvImageFormatRgba8ui: return 0x8D7C; /* GL_RGBA8UI */
787 case SpvImageFormatR32ui: return 0x8236; /* GL_R32UI */
788 case SpvImageFormatRgb10a2ui: return 0x906F; /* GL_RGB10_A2UI */
789 case SpvImageFormatRg32ui: return 0x823C; /* GL_RG32UI */
790 case SpvImageFormatRg16ui: return 0x823A; /* GL_RG16UI */
791 case SpvImageFormatRg8ui: return 0x8238; /* GL_RG8UI */
792 case SpvImageFormatR16ui: return 0x8234; /* GL_R16UI */
793 case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
794 default:
795 unreachable("Invalid image format");
796 return 0;
797 }
798 }
799
800 static void
801 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
802 const uint32_t *w, unsigned count)
803 {
804 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
805
806 val->type = rzalloc(b, struct vtn_type);
807 val->type->val = val;
808
809 switch (opcode) {
810 case SpvOpTypeVoid:
811 val->type->base_type = vtn_base_type_void;
812 val->type->type = glsl_void_type();
813 break;
814 case SpvOpTypeBool:
815 val->type->base_type = vtn_base_type_scalar;
816 val->type->type = glsl_bool_type();
817 break;
818 case SpvOpTypeInt: {
819 int bit_size = w[2];
820 const bool signedness = w[3];
821 val->type->base_type = vtn_base_type_scalar;
822 if (bit_size == 64)
823 val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
824 else
825 val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
826 break;
827 }
828 case SpvOpTypeFloat: {
829 int bit_size = w[2];
830 val->type->base_type = vtn_base_type_scalar;
831 val->type->type = bit_size == 64 ? glsl_double_type() : glsl_float_type();
832 break;
833 }
834
835 case SpvOpTypeVector: {
836 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
837 unsigned elems = w[3];
838
839 assert(glsl_type_is_scalar(base->type));
840 val->type->base_type = vtn_base_type_vector;
841 val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
842 val->type->stride = glsl_get_bit_size(base->type) / 8;
843 val->type->array_element = base;
844 break;
845 }
846
847 case SpvOpTypeMatrix: {
848 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
849 unsigned columns = w[3];
850
851 assert(glsl_type_is_vector(base->type));
852 val->type->base_type = vtn_base_type_matrix;
853 val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
854 glsl_get_vector_elements(base->type),
855 columns);
856 assert(!glsl_type_is_error(val->type->type));
857 val->type->length = columns;
858 val->type->array_element = base;
859 val->type->row_major = false;
860 val->type->stride = 0;
861 break;
862 }
863
864 case SpvOpTypeRuntimeArray:
865 case SpvOpTypeArray: {
866 struct vtn_type *array_element =
867 vtn_value(b, w[2], vtn_value_type_type)->type;
868
869 if (opcode == SpvOpTypeRuntimeArray) {
870 /* A length of 0 is used to denote unsized arrays */
871 val->type->length = 0;
872 } else {
873 val->type->length =
874 vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0].u32[0];
875 }
876
877 val->type->base_type = vtn_base_type_array;
878 val->type->type = glsl_array_type(array_element->type, val->type->length);
879 val->type->array_element = array_element;
880 val->type->stride = 0;
881 break;
882 }
883
884 case SpvOpTypeStruct: {
885 unsigned num_fields = count - 2;
886 val->type->base_type = vtn_base_type_struct;
887 val->type->length = num_fields;
888 val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
889 val->type->offsets = ralloc_array(b, unsigned, num_fields);
890
891 NIR_VLA(struct glsl_struct_field, fields, count);
892 for (unsigned i = 0; i < num_fields; i++) {
893 val->type->members[i] =
894 vtn_value(b, w[i + 2], vtn_value_type_type)->type;
895 fields[i] = (struct glsl_struct_field) {
896 .type = val->type->members[i]->type,
897 .name = ralloc_asprintf(b, "field%d", i),
898 .location = -1,
899 };
900 }
901
902 struct member_decoration_ctx ctx = {
903 .num_fields = num_fields,
904 .fields = fields,
905 .type = val->type
906 };
907
908 vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
909 vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
910
911 const char *name = val->name ? val->name : "struct";
912
913 val->type->type = glsl_struct_type(fields, num_fields, name);
914 break;
915 }
916
917 case SpvOpTypeFunction: {
918 val->type->base_type = vtn_base_type_function;
919 val->type->type = NULL;
920
921 val->type->return_type = vtn_value(b, w[2], vtn_value_type_type)->type;
922
923 const unsigned num_params = count - 3;
924 val->type->length = num_params;
925 val->type->params = ralloc_array(b, struct vtn_type *, num_params);
926 for (unsigned i = 0; i < count - 3; i++) {
927 val->type->params[i] =
928 vtn_value(b, w[i + 3], vtn_value_type_type)->type;
929 }
930 break;
931 }
932
933 case SpvOpTypePointer: {
934 SpvStorageClass storage_class = w[2];
935 struct vtn_type *deref_type =
936 vtn_value(b, w[3], vtn_value_type_type)->type;
937
938 val->type->base_type = vtn_base_type_pointer;
939 val->type->storage_class = storage_class;
940 val->type->deref = deref_type;
941
942 if (storage_class == SpvStorageClassUniform ||
943 storage_class == SpvStorageClassStorageBuffer) {
944 /* These can actually be stored to nir_variables and used as SSA
945 * values so they need a real glsl_type.
946 */
947 val->type->type = glsl_vector_type(GLSL_TYPE_UINT, 2);
948 }
949 break;
950 }
951
952 case SpvOpTypeImage: {
953 val->type->base_type = vtn_base_type_image;
954
955 const struct glsl_type *sampled_type =
956 vtn_value(b, w[2], vtn_value_type_type)->type->type;
957
958 assert(glsl_type_is_vector_or_scalar(sampled_type));
959
960 enum glsl_sampler_dim dim;
961 switch ((SpvDim)w[3]) {
962 case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
963 case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
964 case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
965 case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
966 case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
967 case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
968 case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
969 default:
970 unreachable("Invalid SPIR-V Sampler dimension");
971 }
972
973 bool is_shadow = w[4];
974 bool is_array = w[5];
975 bool multisampled = w[6];
976 unsigned sampled = w[7];
977 SpvImageFormat format = w[8];
978
979 if (count > 9)
980 val->type->access_qualifier = w[9];
981 else
982 val->type->access_qualifier = SpvAccessQualifierReadWrite;
983
984 if (multisampled) {
985 if (dim == GLSL_SAMPLER_DIM_2D)
986 dim = GLSL_SAMPLER_DIM_MS;
987 else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
988 dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
989 else
990 unreachable("Unsupported multisampled image type");
991 }
992
993 val->type->image_format = translate_image_format(format);
994
995 if (sampled == 1) {
996 val->type->sampled = true;
997 val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
998 glsl_get_base_type(sampled_type));
999 } else if (sampled == 2) {
1000 assert(!is_shadow);
1001 val->type->sampled = false;
1002 val->type->type = glsl_image_type(dim, is_array,
1003 glsl_get_base_type(sampled_type));
1004 } else {
1005 unreachable("We need to know if the image will be sampled");
1006 }
1007 break;
1008 }
1009
1010 case SpvOpTypeSampledImage:
1011 val->type = vtn_value(b, w[2], vtn_value_type_type)->type;
1012 break;
1013
1014 case SpvOpTypeSampler:
1015 /* The actual sampler type here doesn't really matter. It gets
1016 * thrown away the moment you combine it with an image. What really
1017 * matters is that it's a sampler type as opposed to an integer type
1018 * so the backend knows what to do.
1019 */
1020 val->type->base_type = vtn_base_type_sampler;
1021 val->type->type = glsl_bare_sampler_type();
1022 break;
1023
1024 case SpvOpTypeOpaque:
1025 case SpvOpTypeEvent:
1026 case SpvOpTypeDeviceEvent:
1027 case SpvOpTypeReserveId:
1028 case SpvOpTypeQueue:
1029 case SpvOpTypePipe:
1030 default:
1031 unreachable("Unhandled opcode");
1032 }
1033
1034 vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
1035 }
1036
1037 static nir_constant *
1038 vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type)
1039 {
1040 nir_constant *c = rzalloc(b, nir_constant);
1041
1042 /* For pointers and other typeless things, we have to return something but
1043 * it doesn't matter what.
1044 */
1045 if (!type)
1046 return c;
1047
1048 switch (glsl_get_base_type(type)) {
1049 case GLSL_TYPE_INT:
1050 case GLSL_TYPE_UINT:
1051 case GLSL_TYPE_INT64:
1052 case GLSL_TYPE_UINT64:
1053 case GLSL_TYPE_BOOL:
1054 case GLSL_TYPE_FLOAT:
1055 case GLSL_TYPE_DOUBLE:
1056 /* Nothing to do here. It's already initialized to zero */
1057 break;
1058
1059 case GLSL_TYPE_ARRAY:
1060 assert(glsl_get_length(type) > 0);
1061 c->num_elements = glsl_get_length(type);
1062 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1063
1064 c->elements[0] = vtn_null_constant(b, glsl_get_array_element(type));
1065 for (unsigned i = 1; i < c->num_elements; i++)
1066 c->elements[i] = c->elements[0];
1067 break;
1068
1069 case GLSL_TYPE_STRUCT:
1070 c->num_elements = glsl_get_length(type);
1071 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1072
1073 for (unsigned i = 0; i < c->num_elements; i++) {
1074 c->elements[i] = vtn_null_constant(b, glsl_get_struct_field(type, i));
1075 }
1076 break;
1077
1078 default:
1079 unreachable("Invalid type for null constant");
1080 }
1081
1082 return c;
1083 }
1084
1085 static void
1086 spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v,
1087 int member, const struct vtn_decoration *dec,
1088 void *data)
1089 {
1090 assert(member == -1);
1091 if (dec->decoration != SpvDecorationSpecId)
1092 return;
1093
1094 struct spec_constant_value *const_value = data;
1095
1096 for (unsigned i = 0; i < b->num_specializations; i++) {
1097 if (b->specializations[i].id == dec->literals[0]) {
1098 if (const_value->is_double)
1099 const_value->data64 = b->specializations[i].data64;
1100 else
1101 const_value->data32 = b->specializations[i].data32;
1102 return;
1103 }
1104 }
1105 }
1106
1107 static uint32_t
1108 get_specialization(struct vtn_builder *b, struct vtn_value *val,
1109 uint32_t const_value)
1110 {
1111 struct spec_constant_value data;
1112 data.is_double = false;
1113 data.data32 = const_value;
1114 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
1115 return data.data32;
1116 }
1117
1118 static uint64_t
1119 get_specialization64(struct vtn_builder *b, struct vtn_value *val,
1120 uint64_t const_value)
1121 {
1122 struct spec_constant_value data;
1123 data.is_double = true;
1124 data.data64 = const_value;
1125 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
1126 return data.data64;
1127 }
1128
1129 static void
1130 handle_workgroup_size_decoration_cb(struct vtn_builder *b,
1131 struct vtn_value *val,
1132 int member,
1133 const struct vtn_decoration *dec,
1134 void *data)
1135 {
1136 assert(member == -1);
1137 if (dec->decoration != SpvDecorationBuiltIn ||
1138 dec->literals[0] != SpvBuiltInWorkgroupSize)
1139 return;
1140
1141 assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3));
1142
1143 b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0];
1144 b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1];
1145 b->shader->info.cs.local_size[2] = val->constant->values[0].u32[2];
1146 }
1147
1148 static void
1149 vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
1150 const uint32_t *w, unsigned count)
1151 {
1152 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
1153 val->const_type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
1154 val->constant = rzalloc(b, nir_constant);
1155 switch (opcode) {
1156 case SpvOpConstantTrue:
1157 assert(val->const_type == glsl_bool_type());
1158 val->constant->values[0].u32[0] = NIR_TRUE;
1159 break;
1160 case SpvOpConstantFalse:
1161 assert(val->const_type == glsl_bool_type());
1162 val->constant->values[0].u32[0] = NIR_FALSE;
1163 break;
1164
1165 case SpvOpSpecConstantTrue:
1166 case SpvOpSpecConstantFalse: {
1167 assert(val->const_type == glsl_bool_type());
1168 uint32_t int_val =
1169 get_specialization(b, val, (opcode == SpvOpSpecConstantTrue));
1170 val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE;
1171 break;
1172 }
1173
1174 case SpvOpConstant: {
1175 assert(glsl_type_is_scalar(val->const_type));
1176 int bit_size = glsl_get_bit_size(val->const_type);
1177 if (bit_size == 64) {
1178 val->constant->values->u32[0] = w[3];
1179 val->constant->values->u32[1] = w[4];
1180 } else {
1181 assert(bit_size == 32);
1182 val->constant->values->u32[0] = w[3];
1183 }
1184 break;
1185 }
1186 case SpvOpSpecConstant: {
1187 assert(glsl_type_is_scalar(val->const_type));
1188 val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
1189 int bit_size = glsl_get_bit_size(val->const_type);
1190 if (bit_size == 64)
1191 val->constant->values[0].u64[0] =
1192 get_specialization64(b, val, vtn_u64_literal(&w[3]));
1193 else
1194 val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
1195 break;
1196 }
1197 case SpvOpSpecConstantComposite:
1198 case SpvOpConstantComposite: {
1199 unsigned elem_count = count - 3;
1200 nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
1201 for (unsigned i = 0; i < elem_count; i++)
1202 elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
1203
1204 switch (glsl_get_base_type(val->const_type)) {
1205 case GLSL_TYPE_UINT:
1206 case GLSL_TYPE_INT:
1207 case GLSL_TYPE_UINT64:
1208 case GLSL_TYPE_INT64:
1209 case GLSL_TYPE_FLOAT:
1210 case GLSL_TYPE_BOOL:
1211 case GLSL_TYPE_DOUBLE: {
1212 int bit_size = glsl_get_bit_size(val->const_type);
1213 if (glsl_type_is_matrix(val->const_type)) {
1214 assert(glsl_get_matrix_columns(val->const_type) == elem_count);
1215 for (unsigned i = 0; i < elem_count; i++)
1216 val->constant->values[i] = elems[i]->values[0];
1217 } else {
1218 assert(glsl_type_is_vector(val->const_type));
1219 assert(glsl_get_vector_elements(val->const_type) == elem_count);
1220 for (unsigned i = 0; i < elem_count; i++) {
1221 if (bit_size == 64) {
1222 val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
1223 } else {
1224 assert(bit_size == 32);
1225 val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
1226 }
1227 }
1228 }
1229 ralloc_free(elems);
1230 break;
1231 }
1232 case GLSL_TYPE_STRUCT:
1233 case GLSL_TYPE_ARRAY:
1234 ralloc_steal(val->constant, elems);
1235 val->constant->num_elements = elem_count;
1236 val->constant->elements = elems;
1237 break;
1238
1239 default:
1240 unreachable("Unsupported type for constants");
1241 }
1242 break;
1243 }
1244
1245 case SpvOpSpecConstantOp: {
1246 SpvOp opcode = get_specialization(b, val, w[3]);
1247 switch (opcode) {
1248 case SpvOpVectorShuffle: {
1249 struct vtn_value *v0 = &b->values[w[4]];
1250 struct vtn_value *v1 = &b->values[w[5]];
1251
1252 assert(v0->value_type == vtn_value_type_constant ||
1253 v0->value_type == vtn_value_type_undef);
1254 assert(v1->value_type == vtn_value_type_constant ||
1255 v1->value_type == vtn_value_type_undef);
1256
1257 unsigned len0 = v0->value_type == vtn_value_type_constant ?
1258 glsl_get_vector_elements(v0->const_type) :
1259 glsl_get_vector_elements(v0->type->type);
1260 unsigned len1 = v1->value_type == vtn_value_type_constant ?
1261 glsl_get_vector_elements(v1->const_type) :
1262 glsl_get_vector_elements(v1->type->type);
1263
1264 assert(len0 + len1 < 16);
1265
1266 unsigned bit_size = glsl_get_bit_size(val->const_type);
1267 unsigned bit_size0 = v0->value_type == vtn_value_type_constant ?
1268 glsl_get_bit_size(v0->const_type) :
1269 glsl_get_bit_size(v0->type->type);
1270 unsigned bit_size1 = v1->value_type == vtn_value_type_constant ?
1271 glsl_get_bit_size(v1->const_type) :
1272 glsl_get_bit_size(v1->type->type);
1273
1274 assert(bit_size == bit_size0 && bit_size == bit_size1);
1275 (void)bit_size0; (void)bit_size1;
1276
1277 if (bit_size == 64) {
1278 uint64_t u64[8];
1279 if (v0->value_type == vtn_value_type_constant) {
1280 for (unsigned i = 0; i < len0; i++)
1281 u64[i] = v0->constant->values[0].u64[i];
1282 }
1283 if (v1->value_type == vtn_value_type_constant) {
1284 for (unsigned i = 0; i < len1; i++)
1285 u64[len0 + i] = v1->constant->values[0].u64[i];
1286 }
1287
1288 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1289 uint32_t comp = w[i + 6];
1290 /* If component is not used, set the value to a known constant
1291 * to detect if it is wrongly used.
1292 */
1293 if (comp == (uint32_t)-1)
1294 val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef;
1295 else
1296 val->constant->values[0].u64[j] = u64[comp];
1297 }
1298 } else {
1299 uint32_t u32[8];
1300 if (v0->value_type == vtn_value_type_constant) {
1301 for (unsigned i = 0; i < len0; i++)
1302 u32[i] = v0->constant->values[0].u32[i];
1303 }
1304 if (v1->value_type == vtn_value_type_constant) {
1305 for (unsigned i = 0; i < len1; i++)
1306 u32[len0 + i] = v1->constant->values[0].u32[i];
1307 }
1308
1309 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1310 uint32_t comp = w[i + 6];
1311 /* If component is not used, set the value to a known constant
1312 * to detect if it is wrongly used.
1313 */
1314 if (comp == (uint32_t)-1)
1315 val->constant->values[0].u32[j] = 0xdeadbeef;
1316 else
1317 val->constant->values[0].u32[j] = u32[comp];
1318 }
1319 }
1320 break;
1321 }
1322
1323 case SpvOpCompositeExtract:
1324 case SpvOpCompositeInsert: {
1325 struct vtn_value *comp;
1326 unsigned deref_start;
1327 struct nir_constant **c;
1328 if (opcode == SpvOpCompositeExtract) {
1329 comp = vtn_value(b, w[4], vtn_value_type_constant);
1330 deref_start = 5;
1331 c = &comp->constant;
1332 } else {
1333 comp = vtn_value(b, w[5], vtn_value_type_constant);
1334 deref_start = 6;
1335 val->constant = nir_constant_clone(comp->constant,
1336 (nir_variable *)b);
1337 c = &val->constant;
1338 }
1339
1340 int elem = -1;
1341 int col = 0;
1342 const struct glsl_type *type = comp->const_type;
1343 for (unsigned i = deref_start; i < count; i++) {
1344 switch (glsl_get_base_type(type)) {
1345 case GLSL_TYPE_UINT:
1346 case GLSL_TYPE_INT:
1347 case GLSL_TYPE_UINT64:
1348 case GLSL_TYPE_INT64:
1349 case GLSL_TYPE_FLOAT:
1350 case GLSL_TYPE_DOUBLE:
1351 case GLSL_TYPE_BOOL:
1352 /* If we hit this granularity, we're picking off an element */
1353 if (glsl_type_is_matrix(type)) {
1354 assert(col == 0 && elem == -1);
1355 col = w[i];
1356 elem = 0;
1357 type = glsl_get_column_type(type);
1358 } else {
1359 assert(elem <= 0 && glsl_type_is_vector(type));
1360 elem = w[i];
1361 type = glsl_scalar_type(glsl_get_base_type(type));
1362 }
1363 continue;
1364
1365 case GLSL_TYPE_ARRAY:
1366 c = &(*c)->elements[w[i]];
1367 type = glsl_get_array_element(type);
1368 continue;
1369
1370 case GLSL_TYPE_STRUCT:
1371 c = &(*c)->elements[w[i]];
1372 type = glsl_get_struct_field(type, w[i]);
1373 continue;
1374
1375 default:
1376 unreachable("Invalid constant type");
1377 }
1378 }
1379
1380 if (opcode == SpvOpCompositeExtract) {
1381 if (elem == -1) {
1382 val->constant = *c;
1383 } else {
1384 unsigned num_components = glsl_get_vector_elements(type);
1385 unsigned bit_size = glsl_get_bit_size(type);
1386 for (unsigned i = 0; i < num_components; i++)
1387 if (bit_size == 64) {
1388 val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i];
1389 } else {
1390 assert(bit_size == 32);
1391 val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
1392 }
1393 }
1394 } else {
1395 struct vtn_value *insert =
1396 vtn_value(b, w[4], vtn_value_type_constant);
1397 assert(insert->const_type == type);
1398 if (elem == -1) {
1399 *c = insert->constant;
1400 } else {
1401 unsigned num_components = glsl_get_vector_elements(type);
1402 unsigned bit_size = glsl_get_bit_size(type);
1403 for (unsigned i = 0; i < num_components; i++)
1404 if (bit_size == 64) {
1405 (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i];
1406 } else {
1407 assert(bit_size == 32);
1408 (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
1409 }
1410 }
1411 }
1412 break;
1413 }
1414
1415 default: {
1416 bool swap;
1417 nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->const_type);
1418 nir_alu_type src_alu_type = dst_alu_type;
1419 nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type);
1420
1421 unsigned num_components = glsl_get_vector_elements(val->const_type);
1422 unsigned bit_size =
1423 glsl_get_bit_size(val->const_type);
1424
1425 nir_const_value src[4];
1426 assert(count <= 7);
1427 for (unsigned i = 0; i < count - 4; i++) {
1428 nir_constant *c =
1429 vtn_value(b, w[4 + i], vtn_value_type_constant)->constant;
1430
1431 unsigned j = swap ? 1 - i : i;
1432 assert(bit_size == 32);
1433 src[j] = c->values[0];
1434 }
1435
1436 val->constant->values[0] =
1437 nir_eval_const_opcode(op, num_components, bit_size, src);
1438 break;
1439 } /* default */
1440 }
1441 break;
1442 }
1443
1444 case SpvOpConstantNull:
1445 val->constant = vtn_null_constant(b, val->const_type);
1446 break;
1447
1448 case SpvOpConstantSampler:
1449 unreachable("OpConstantSampler requires Kernel Capability");
1450 break;
1451
1452 default:
1453 unreachable("Unhandled opcode");
1454 }
1455
1456 /* Now that we have the value, update the workgroup size if needed */
1457 vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
1458 }
1459
1460 static void
1461 vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
1462 const uint32_t *w, unsigned count)
1463 {
1464 struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type;
1465 struct vtn_function *vtn_callee =
1466 vtn_value(b, w[3], vtn_value_type_function)->func;
1467 struct nir_function *callee = vtn_callee->impl->function;
1468
1469 vtn_callee->referenced = true;
1470
1471 nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
1472 for (unsigned i = 0; i < call->num_params; i++) {
1473 unsigned arg_id = w[4 + i];
1474 struct vtn_value *arg = vtn_untyped_value(b, arg_id);
1475 if (arg->value_type == vtn_value_type_pointer &&
1476 arg->pointer->ptr_type->type == NULL) {
1477 nir_deref_var *d = vtn_pointer_to_deref(b, arg->pointer);
1478 call->params[i] = nir_deref_var_clone(d, call);
1479 } else {
1480 struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
1481
1482 /* Make a temporary to store the argument in */
1483 nir_variable *tmp =
1484 nir_local_variable_create(b->nb.impl, arg_ssa->type, "arg_tmp");
1485 call->params[i] = nir_deref_var_create(call, tmp);
1486
1487 vtn_local_store(b, arg_ssa, call->params[i]);
1488 }
1489 }
1490
1491 nir_variable *out_tmp = NULL;
1492 assert(res_type->type == callee->return_type);
1493 if (!glsl_type_is_void(callee->return_type)) {
1494 out_tmp = nir_local_variable_create(b->nb.impl, callee->return_type,
1495 "out_tmp");
1496 call->return_deref = nir_deref_var_create(call, out_tmp);
1497 }
1498
1499 nir_builder_instr_insert(&b->nb, &call->instr);
1500
1501 if (glsl_type_is_void(callee->return_type)) {
1502 vtn_push_value(b, w[2], vtn_value_type_undef);
1503 } else {
1504 vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, call->return_deref));
1505 }
1506 }
1507
1508 struct vtn_ssa_value *
1509 vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
1510 {
1511 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
1512 val->type = type;
1513
1514 if (!glsl_type_is_vector_or_scalar(type)) {
1515 unsigned elems = glsl_get_length(type);
1516 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
1517 for (unsigned i = 0; i < elems; i++) {
1518 const struct glsl_type *child_type;
1519
1520 switch (glsl_get_base_type(type)) {
1521 case GLSL_TYPE_INT:
1522 case GLSL_TYPE_UINT:
1523 case GLSL_TYPE_INT64:
1524 case GLSL_TYPE_UINT64:
1525 case GLSL_TYPE_BOOL:
1526 case GLSL_TYPE_FLOAT:
1527 case GLSL_TYPE_DOUBLE:
1528 child_type = glsl_get_column_type(type);
1529 break;
1530 case GLSL_TYPE_ARRAY:
1531 child_type = glsl_get_array_element(type);
1532 break;
1533 case GLSL_TYPE_STRUCT:
1534 child_type = glsl_get_struct_field(type, i);
1535 break;
1536 default:
1537 unreachable("unkown base type");
1538 }
1539
1540 val->elems[i] = vtn_create_ssa_value(b, child_type);
1541 }
1542 }
1543
1544 return val;
1545 }
1546
1547 static nir_tex_src
1548 vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
1549 {
1550 nir_tex_src src;
1551 src.src = nir_src_for_ssa(vtn_ssa_value(b, index)->def);
1552 src.src_type = type;
1553 return src;
1554 }
1555
1556 static void
1557 vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
1558 const uint32_t *w, unsigned count)
1559 {
1560 if (opcode == SpvOpSampledImage) {
1561 struct vtn_value *val =
1562 vtn_push_value(b, w[2], vtn_value_type_sampled_image);
1563 val->sampled_image = ralloc(b, struct vtn_sampled_image);
1564 val->sampled_image->type =
1565 vtn_value(b, w[1], vtn_value_type_type)->type;
1566 val->sampled_image->image =
1567 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
1568 val->sampled_image->sampler =
1569 vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
1570 return;
1571 } else if (opcode == SpvOpImage) {
1572 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
1573 struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
1574 if (src_val->value_type == vtn_value_type_sampled_image) {
1575 val->pointer = src_val->sampled_image->image;
1576 } else {
1577 assert(src_val->value_type == vtn_value_type_pointer);
1578 val->pointer = src_val->pointer;
1579 }
1580 return;
1581 }
1582
1583 struct vtn_type *ret_type = vtn_value(b, w[1], vtn_value_type_type)->type;
1584 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
1585
1586 struct vtn_sampled_image sampled;
1587 struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
1588 if (sampled_val->value_type == vtn_value_type_sampled_image) {
1589 sampled = *sampled_val->sampled_image;
1590 } else {
1591 assert(sampled_val->value_type == vtn_value_type_pointer);
1592 sampled.type = sampled_val->pointer->type;
1593 sampled.image = NULL;
1594 sampled.sampler = sampled_val->pointer;
1595 }
1596
1597 const struct glsl_type *image_type = sampled.type->type;
1598 const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type);
1599 const bool is_array = glsl_sampler_type_is_array(image_type);
1600 const bool is_shadow = glsl_sampler_type_is_shadow(image_type);
1601
1602 /* Figure out the base texture operation */
1603 nir_texop texop;
1604 switch (opcode) {
1605 case SpvOpImageSampleImplicitLod:
1606 case SpvOpImageSampleDrefImplicitLod:
1607 case SpvOpImageSampleProjImplicitLod:
1608 case SpvOpImageSampleProjDrefImplicitLod:
1609 texop = nir_texop_tex;
1610 break;
1611
1612 case SpvOpImageSampleExplicitLod:
1613 case SpvOpImageSampleDrefExplicitLod:
1614 case SpvOpImageSampleProjExplicitLod:
1615 case SpvOpImageSampleProjDrefExplicitLod:
1616 texop = nir_texop_txl;
1617 break;
1618
1619 case SpvOpImageFetch:
1620 if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_MS) {
1621 texop = nir_texop_txf_ms;
1622 } else {
1623 texop = nir_texop_txf;
1624 }
1625 break;
1626
1627 case SpvOpImageGather:
1628 case SpvOpImageDrefGather:
1629 texop = nir_texop_tg4;
1630 break;
1631
1632 case SpvOpImageQuerySizeLod:
1633 case SpvOpImageQuerySize:
1634 texop = nir_texop_txs;
1635 break;
1636
1637 case SpvOpImageQueryLod:
1638 texop = nir_texop_lod;
1639 break;
1640
1641 case SpvOpImageQueryLevels:
1642 texop = nir_texop_query_levels;
1643 break;
1644
1645 case SpvOpImageQuerySamples:
1646 texop = nir_texop_texture_samples;
1647 break;
1648
1649 default:
1650 unreachable("Unhandled opcode");
1651 }
1652
1653 nir_tex_src srcs[8]; /* 8 should be enough */
1654 nir_tex_src *p = srcs;
1655
1656 unsigned idx = 4;
1657
1658 struct nir_ssa_def *coord;
1659 unsigned coord_components;
1660 switch (opcode) {
1661 case SpvOpImageSampleImplicitLod:
1662 case SpvOpImageSampleExplicitLod:
1663 case SpvOpImageSampleDrefImplicitLod:
1664 case SpvOpImageSampleDrefExplicitLod:
1665 case SpvOpImageSampleProjImplicitLod:
1666 case SpvOpImageSampleProjExplicitLod:
1667 case SpvOpImageSampleProjDrefImplicitLod:
1668 case SpvOpImageSampleProjDrefExplicitLod:
1669 case SpvOpImageFetch:
1670 case SpvOpImageGather:
1671 case SpvOpImageDrefGather:
1672 case SpvOpImageQueryLod: {
1673 /* All these types have the coordinate as their first real argument */
1674 switch (sampler_dim) {
1675 case GLSL_SAMPLER_DIM_1D:
1676 case GLSL_SAMPLER_DIM_BUF:
1677 coord_components = 1;
1678 break;
1679 case GLSL_SAMPLER_DIM_2D:
1680 case GLSL_SAMPLER_DIM_RECT:
1681 case GLSL_SAMPLER_DIM_MS:
1682 coord_components = 2;
1683 break;
1684 case GLSL_SAMPLER_DIM_3D:
1685 case GLSL_SAMPLER_DIM_CUBE:
1686 coord_components = 3;
1687 break;
1688 default:
1689 unreachable("Invalid sampler type");
1690 }
1691
1692 if (is_array && texop != nir_texop_lod)
1693 coord_components++;
1694
1695 coord = vtn_ssa_value(b, w[idx++])->def;
1696 p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
1697 (1 << coord_components) - 1));
1698 p->src_type = nir_tex_src_coord;
1699 p++;
1700 break;
1701 }
1702
1703 default:
1704 coord = NULL;
1705 coord_components = 0;
1706 break;
1707 }
1708
1709 switch (opcode) {
1710 case SpvOpImageSampleProjImplicitLod:
1711 case SpvOpImageSampleProjExplicitLod:
1712 case SpvOpImageSampleProjDrefImplicitLod:
1713 case SpvOpImageSampleProjDrefExplicitLod:
1714 /* These have the projector as the last coordinate component */
1715 p->src = nir_src_for_ssa(nir_channel(&b->nb, coord, coord_components));
1716 p->src_type = nir_tex_src_projector;
1717 p++;
1718 break;
1719
1720 default:
1721 break;
1722 }
1723
1724 unsigned gather_component = 0;
1725 switch (opcode) {
1726 case SpvOpImageSampleDrefImplicitLod:
1727 case SpvOpImageSampleDrefExplicitLod:
1728 case SpvOpImageSampleProjDrefImplicitLod:
1729 case SpvOpImageSampleProjDrefExplicitLod:
1730 case SpvOpImageDrefGather:
1731 /* These all have an explicit depth value as their next source */
1732 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
1733 break;
1734
1735 case SpvOpImageGather:
1736 /* This has a component as its next source */
1737 gather_component =
1738 vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0].u32[0];
1739 break;
1740
1741 default:
1742 break;
1743 }
1744
1745 /* For OpImageQuerySizeLod, we always have an LOD */
1746 if (opcode == SpvOpImageQuerySizeLod)
1747 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
1748
1749 /* Now we need to handle some number of optional arguments */
1750 const struct vtn_ssa_value *gather_offsets = NULL;
1751 if (idx < count) {
1752 uint32_t operands = w[idx++];
1753
1754 if (operands & SpvImageOperandsBiasMask) {
1755 assert(texop == nir_texop_tex);
1756 texop = nir_texop_txb;
1757 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_bias);
1758 }
1759
1760 if (operands & SpvImageOperandsLodMask) {
1761 assert(texop == nir_texop_txl || texop == nir_texop_txf ||
1762 texop == nir_texop_txs);
1763 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
1764 }
1765
1766 if (operands & SpvImageOperandsGradMask) {
1767 assert(texop == nir_texop_txl);
1768 texop = nir_texop_txd;
1769 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddx);
1770 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddy);
1771 }
1772
1773 if (operands & SpvImageOperandsOffsetMask ||
1774 operands & SpvImageOperandsConstOffsetMask)
1775 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
1776
1777 if (operands & SpvImageOperandsConstOffsetsMask) {
1778 gather_offsets = vtn_ssa_value(b, w[idx++]);
1779 (*p++) = (nir_tex_src){};
1780 }
1781
1782 if (operands & SpvImageOperandsSampleMask) {
1783 assert(texop == nir_texop_txf_ms);
1784 texop = nir_texop_txf_ms;
1785 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
1786 }
1787 }
1788 /* We should have now consumed exactly all of the arguments */
1789 assert(idx == count);
1790
1791 nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
1792 instr->op = texop;
1793
1794 memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
1795
1796 instr->coord_components = coord_components;
1797 instr->sampler_dim = sampler_dim;
1798 instr->is_array = is_array;
1799 instr->is_shadow = is_shadow;
1800 instr->is_new_style_shadow =
1801 is_shadow && glsl_get_components(ret_type->type) == 1;
1802 instr->component = gather_component;
1803
1804 switch (glsl_get_sampler_result_type(image_type)) {
1805 case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
1806 case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
1807 case GLSL_TYPE_UINT: instr->dest_type = nir_type_uint; break;
1808 case GLSL_TYPE_BOOL: instr->dest_type = nir_type_bool; break;
1809 default:
1810 unreachable("Invalid base type for sampler result");
1811 }
1812
1813 nir_deref_var *sampler = vtn_pointer_to_deref(b, sampled.sampler);
1814 nir_deref_var *texture;
1815 if (sampled.image) {
1816 nir_deref_var *image = vtn_pointer_to_deref(b, sampled.image);
1817 texture = image;
1818 } else {
1819 texture = sampler;
1820 }
1821
1822 instr->texture = nir_deref_var_clone(texture, instr);
1823
1824 switch (instr->op) {
1825 case nir_texop_tex:
1826 case nir_texop_txb:
1827 case nir_texop_txl:
1828 case nir_texop_txd:
1829 case nir_texop_tg4:
1830 /* These operations require a sampler */
1831 instr->sampler = nir_deref_var_clone(sampler, instr);
1832 break;
1833 case nir_texop_txf:
1834 case nir_texop_txf_ms:
1835 case nir_texop_txs:
1836 case nir_texop_lod:
1837 case nir_texop_query_levels:
1838 case nir_texop_texture_samples:
1839 case nir_texop_samples_identical:
1840 /* These don't */
1841 instr->sampler = NULL;
1842 break;
1843 case nir_texop_txf_ms_mcs:
1844 unreachable("unexpected nir_texop_txf_ms_mcs");
1845 }
1846
1847 nir_ssa_dest_init(&instr->instr, &instr->dest,
1848 nir_tex_instr_dest_size(instr), 32, NULL);
1849
1850 assert(glsl_get_vector_elements(ret_type->type) ==
1851 nir_tex_instr_dest_size(instr));
1852
1853 nir_ssa_def *def;
1854 nir_instr *instruction;
1855 if (gather_offsets) {
1856 assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY);
1857 assert(glsl_get_length(gather_offsets->type) == 4);
1858 nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL};
1859
1860 /* Copy the current instruction 4x */
1861 for (uint32_t i = 1; i < 4; i++) {
1862 instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs);
1863 instrs[i]->op = instr->op;
1864 instrs[i]->coord_components = instr->coord_components;
1865 instrs[i]->sampler_dim = instr->sampler_dim;
1866 instrs[i]->is_array = instr->is_array;
1867 instrs[i]->is_shadow = instr->is_shadow;
1868 instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
1869 instrs[i]->component = instr->component;
1870 instrs[i]->dest_type = instr->dest_type;
1871 instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]);
1872 instrs[i]->sampler = NULL;
1873
1874 memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
1875
1876 nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest,
1877 nir_tex_instr_dest_size(instr), 32, NULL);
1878 }
1879
1880 /* Fill in the last argument with the offset from the passed in offsets
1881 * and insert the instruction into the stream.
1882 */
1883 for (uint32_t i = 0; i < 4; i++) {
1884 nir_tex_src src;
1885 src.src = nir_src_for_ssa(gather_offsets->elems[i]->def);
1886 src.src_type = nir_tex_src_offset;
1887 instrs[i]->src[instrs[i]->num_srcs - 1] = src;
1888 nir_builder_instr_insert(&b->nb, &instrs[i]->instr);
1889 }
1890
1891 /* Combine the results of the 4 instructions by taking their .w
1892 * components
1893 */
1894 nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4);
1895 nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL);
1896 vec4->dest.write_mask = 0xf;
1897 for (uint32_t i = 0; i < 4; i++) {
1898 vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa);
1899 vec4->src[i].swizzle[0] = 3;
1900 }
1901 def = &vec4->dest.dest.ssa;
1902 instruction = &vec4->instr;
1903 } else {
1904 def = &instr->dest.ssa;
1905 instruction = &instr->instr;
1906 }
1907
1908 val->ssa = vtn_create_ssa_value(b, ret_type->type);
1909 val->ssa->def = def;
1910
1911 nir_builder_instr_insert(&b->nb, instruction);
1912 }
1913
1914 static void
1915 fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
1916 const uint32_t *w, nir_src *src)
1917 {
1918 switch (opcode) {
1919 case SpvOpAtomicIIncrement:
1920 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
1921 break;
1922
1923 case SpvOpAtomicIDecrement:
1924 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
1925 break;
1926
1927 case SpvOpAtomicISub:
1928 src[0] =
1929 nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
1930 break;
1931
1932 case SpvOpAtomicCompareExchange:
1933 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
1934 src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
1935 break;
1936
1937 case SpvOpAtomicExchange:
1938 case SpvOpAtomicIAdd:
1939 case SpvOpAtomicSMin:
1940 case SpvOpAtomicUMin:
1941 case SpvOpAtomicSMax:
1942 case SpvOpAtomicUMax:
1943 case SpvOpAtomicAnd:
1944 case SpvOpAtomicOr:
1945 case SpvOpAtomicXor:
1946 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
1947 break;
1948
1949 default:
1950 unreachable("Invalid SPIR-V atomic");
1951 }
1952 }
1953
1954 static nir_ssa_def *
1955 get_image_coord(struct vtn_builder *b, uint32_t value)
1956 {
1957 struct vtn_ssa_value *coord = vtn_ssa_value(b, value);
1958
1959 /* The image_load_store intrinsics assume a 4-dim coordinate */
1960 unsigned dim = glsl_get_vector_elements(coord->type);
1961 unsigned swizzle[4];
1962 for (unsigned i = 0; i < 4; i++)
1963 swizzle[i] = MIN2(i, dim - 1);
1964
1965 return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
1966 }
1967
1968 static void
1969 vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
1970 const uint32_t *w, unsigned count)
1971 {
1972 /* Just get this one out of the way */
1973 if (opcode == SpvOpImageTexelPointer) {
1974 struct vtn_value *val =
1975 vtn_push_value(b, w[2], vtn_value_type_image_pointer);
1976 val->image = ralloc(b, struct vtn_image_pointer);
1977
1978 val->image->image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
1979 val->image->coord = get_image_coord(b, w[4]);
1980 val->image->sample = vtn_ssa_value(b, w[5])->def;
1981 return;
1982 }
1983
1984 struct vtn_image_pointer image;
1985
1986 switch (opcode) {
1987 case SpvOpAtomicExchange:
1988 case SpvOpAtomicCompareExchange:
1989 case SpvOpAtomicCompareExchangeWeak:
1990 case SpvOpAtomicIIncrement:
1991 case SpvOpAtomicIDecrement:
1992 case SpvOpAtomicIAdd:
1993 case SpvOpAtomicISub:
1994 case SpvOpAtomicLoad:
1995 case SpvOpAtomicSMin:
1996 case SpvOpAtomicUMin:
1997 case SpvOpAtomicSMax:
1998 case SpvOpAtomicUMax:
1999 case SpvOpAtomicAnd:
2000 case SpvOpAtomicOr:
2001 case SpvOpAtomicXor:
2002 image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
2003 break;
2004
2005 case SpvOpAtomicStore:
2006 image = *vtn_value(b, w[1], vtn_value_type_image_pointer)->image;
2007 break;
2008
2009 case SpvOpImageQuerySize:
2010 image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2011 image.coord = NULL;
2012 image.sample = NULL;
2013 break;
2014
2015 case SpvOpImageRead:
2016 image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2017 image.coord = get_image_coord(b, w[4]);
2018
2019 if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) {
2020 assert(w[5] == SpvImageOperandsSampleMask);
2021 image.sample = vtn_ssa_value(b, w[6])->def;
2022 } else {
2023 image.sample = nir_ssa_undef(&b->nb, 1, 32);
2024 }
2025 break;
2026
2027 case SpvOpImageWrite:
2028 image.image = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
2029 image.coord = get_image_coord(b, w[2]);
2030
2031 /* texel = w[3] */
2032
2033 if (count > 4 && (w[4] & SpvImageOperandsSampleMask)) {
2034 assert(w[4] == SpvImageOperandsSampleMask);
2035 image.sample = vtn_ssa_value(b, w[5])->def;
2036 } else {
2037 image.sample = nir_ssa_undef(&b->nb, 1, 32);
2038 }
2039 break;
2040
2041 default:
2042 unreachable("Invalid image opcode");
2043 }
2044
2045 nir_intrinsic_op op;
2046 switch (opcode) {
2047 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
2048 OP(ImageQuerySize, size)
2049 OP(ImageRead, load)
2050 OP(ImageWrite, store)
2051 OP(AtomicLoad, load)
2052 OP(AtomicStore, store)
2053 OP(AtomicExchange, atomic_exchange)
2054 OP(AtomicCompareExchange, atomic_comp_swap)
2055 OP(AtomicIIncrement, atomic_add)
2056 OP(AtomicIDecrement, atomic_add)
2057 OP(AtomicIAdd, atomic_add)
2058 OP(AtomicISub, atomic_add)
2059 OP(AtomicSMin, atomic_min)
2060 OP(AtomicUMin, atomic_min)
2061 OP(AtomicSMax, atomic_max)
2062 OP(AtomicUMax, atomic_max)
2063 OP(AtomicAnd, atomic_and)
2064 OP(AtomicOr, atomic_or)
2065 OP(AtomicXor, atomic_xor)
2066 #undef OP
2067 default:
2068 unreachable("Invalid image opcode");
2069 }
2070
2071 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
2072
2073 nir_deref_var *image_deref = vtn_pointer_to_deref(b, image.image);
2074 intrin->variables[0] = nir_deref_var_clone(image_deref, intrin);
2075
2076 /* ImageQuerySize doesn't take any extra parameters */
2077 if (opcode != SpvOpImageQuerySize) {
2078 /* The image coordinate is always 4 components but we may not have that
2079 * many. Swizzle to compensate.
2080 */
2081 unsigned swiz[4];
2082 for (unsigned i = 0; i < 4; i++)
2083 swiz[i] = i < image.coord->num_components ? i : 0;
2084 intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
2085 swiz, 4, false));
2086 intrin->src[1] = nir_src_for_ssa(image.sample);
2087 }
2088
2089 switch (opcode) {
2090 case SpvOpAtomicLoad:
2091 case SpvOpImageQuerySize:
2092 case SpvOpImageRead:
2093 break;
2094 case SpvOpAtomicStore:
2095 intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
2096 break;
2097 case SpvOpImageWrite:
2098 intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
2099 break;
2100
2101 case SpvOpAtomicCompareExchange:
2102 case SpvOpAtomicIIncrement:
2103 case SpvOpAtomicIDecrement:
2104 case SpvOpAtomicExchange:
2105 case SpvOpAtomicIAdd:
2106 case SpvOpAtomicISub:
2107 case SpvOpAtomicSMin:
2108 case SpvOpAtomicUMin:
2109 case SpvOpAtomicSMax:
2110 case SpvOpAtomicUMax:
2111 case SpvOpAtomicAnd:
2112 case SpvOpAtomicOr:
2113 case SpvOpAtomicXor:
2114 fill_common_atomic_sources(b, opcode, w, &intrin->src[2]);
2115 break;
2116
2117 default:
2118 unreachable("Invalid image opcode");
2119 }
2120
2121 if (opcode != SpvOpImageWrite) {
2122 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2123 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
2124
2125 unsigned dest_components =
2126 nir_intrinsic_infos[intrin->intrinsic].dest_components;
2127 if (intrin->intrinsic == nir_intrinsic_image_size) {
2128 dest_components = intrin->num_components =
2129 glsl_get_vector_elements(type->type);
2130 }
2131
2132 nir_ssa_dest_init(&intrin->instr, &intrin->dest,
2133 dest_components, 32, NULL);
2134
2135 nir_builder_instr_insert(&b->nb, &intrin->instr);
2136
2137 val->ssa = vtn_create_ssa_value(b, type->type);
2138 val->ssa->def = &intrin->dest.ssa;
2139 } else {
2140 nir_builder_instr_insert(&b->nb, &intrin->instr);
2141 }
2142 }
2143
2144 static nir_intrinsic_op
2145 get_ssbo_nir_atomic_op(SpvOp opcode)
2146 {
2147 switch (opcode) {
2148 case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
2149 case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
2150 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2151 OP(AtomicExchange, atomic_exchange)
2152 OP(AtomicCompareExchange, atomic_comp_swap)
2153 OP(AtomicIIncrement, atomic_add)
2154 OP(AtomicIDecrement, atomic_add)
2155 OP(AtomicIAdd, atomic_add)
2156 OP(AtomicISub, atomic_add)
2157 OP(AtomicSMin, atomic_imin)
2158 OP(AtomicUMin, atomic_umin)
2159 OP(AtomicSMax, atomic_imax)
2160 OP(AtomicUMax, atomic_umax)
2161 OP(AtomicAnd, atomic_and)
2162 OP(AtomicOr, atomic_or)
2163 OP(AtomicXor, atomic_xor)
2164 #undef OP
2165 default:
2166 unreachable("Invalid SSBO atomic");
2167 }
2168 }
2169
2170 static nir_intrinsic_op
2171 get_shared_nir_atomic_op(SpvOp opcode)
2172 {
2173 switch (opcode) {
2174 case SpvOpAtomicLoad: return nir_intrinsic_load_var;
2175 case SpvOpAtomicStore: return nir_intrinsic_store_var;
2176 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2177 OP(AtomicExchange, atomic_exchange)
2178 OP(AtomicCompareExchange, atomic_comp_swap)
2179 OP(AtomicIIncrement, atomic_add)
2180 OP(AtomicIDecrement, atomic_add)
2181 OP(AtomicIAdd, atomic_add)
2182 OP(AtomicISub, atomic_add)
2183 OP(AtomicSMin, atomic_imin)
2184 OP(AtomicUMin, atomic_umin)
2185 OP(AtomicSMax, atomic_imax)
2186 OP(AtomicUMax, atomic_umax)
2187 OP(AtomicAnd, atomic_and)
2188 OP(AtomicOr, atomic_or)
2189 OP(AtomicXor, atomic_xor)
2190 #undef OP
2191 default:
2192 unreachable("Invalid shared atomic");
2193 }
2194 }
2195
2196 static void
2197 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
2198 const uint32_t *w, unsigned count)
2199 {
2200 struct vtn_pointer *ptr;
2201 nir_intrinsic_instr *atomic;
2202
2203 switch (opcode) {
2204 case SpvOpAtomicLoad:
2205 case SpvOpAtomicExchange:
2206 case SpvOpAtomicCompareExchange:
2207 case SpvOpAtomicCompareExchangeWeak:
2208 case SpvOpAtomicIIncrement:
2209 case SpvOpAtomicIDecrement:
2210 case SpvOpAtomicIAdd:
2211 case SpvOpAtomicISub:
2212 case SpvOpAtomicSMin:
2213 case SpvOpAtomicUMin:
2214 case SpvOpAtomicSMax:
2215 case SpvOpAtomicUMax:
2216 case SpvOpAtomicAnd:
2217 case SpvOpAtomicOr:
2218 case SpvOpAtomicXor:
2219 ptr = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2220 break;
2221
2222 case SpvOpAtomicStore:
2223 ptr = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
2224 break;
2225
2226 default:
2227 unreachable("Invalid SPIR-V atomic");
2228 }
2229
2230 /*
2231 SpvScope scope = w[4];
2232 SpvMemorySemanticsMask semantics = w[5];
2233 */
2234
2235 if (ptr->mode == vtn_variable_mode_workgroup) {
2236 nir_deref_var *deref = vtn_pointer_to_deref(b, ptr);
2237 const struct glsl_type *deref_type = nir_deref_tail(&deref->deref)->type;
2238 nir_intrinsic_op op = get_shared_nir_atomic_op(opcode);
2239 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
2240 atomic->variables[0] = nir_deref_var_clone(deref, atomic);
2241
2242 switch (opcode) {
2243 case SpvOpAtomicLoad:
2244 atomic->num_components = glsl_get_vector_elements(deref_type);
2245 break;
2246
2247 case SpvOpAtomicStore:
2248 atomic->num_components = glsl_get_vector_elements(deref_type);
2249 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
2250 atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
2251 break;
2252
2253 case SpvOpAtomicExchange:
2254 case SpvOpAtomicCompareExchange:
2255 case SpvOpAtomicCompareExchangeWeak:
2256 case SpvOpAtomicIIncrement:
2257 case SpvOpAtomicIDecrement:
2258 case SpvOpAtomicIAdd:
2259 case SpvOpAtomicISub:
2260 case SpvOpAtomicSMin:
2261 case SpvOpAtomicUMin:
2262 case SpvOpAtomicSMax:
2263 case SpvOpAtomicUMax:
2264 case SpvOpAtomicAnd:
2265 case SpvOpAtomicOr:
2266 case SpvOpAtomicXor:
2267 fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
2268 break;
2269
2270 default:
2271 unreachable("Invalid SPIR-V atomic");
2272
2273 }
2274 } else {
2275 assert(ptr->mode == vtn_variable_mode_ssbo);
2276 nir_ssa_def *offset, *index;
2277 offset = vtn_pointer_to_offset(b, ptr, &index, NULL);
2278
2279 nir_intrinsic_op op = get_ssbo_nir_atomic_op(opcode);
2280
2281 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
2282
2283 switch (opcode) {
2284 case SpvOpAtomicLoad:
2285 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
2286 atomic->src[0] = nir_src_for_ssa(index);
2287 atomic->src[1] = nir_src_for_ssa(offset);
2288 break;
2289
2290 case SpvOpAtomicStore:
2291 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
2292 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
2293 atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
2294 atomic->src[1] = nir_src_for_ssa(index);
2295 atomic->src[2] = nir_src_for_ssa(offset);
2296 break;
2297
2298 case SpvOpAtomicExchange:
2299 case SpvOpAtomicCompareExchange:
2300 case SpvOpAtomicCompareExchangeWeak:
2301 case SpvOpAtomicIIncrement:
2302 case SpvOpAtomicIDecrement:
2303 case SpvOpAtomicIAdd:
2304 case SpvOpAtomicISub:
2305 case SpvOpAtomicSMin:
2306 case SpvOpAtomicUMin:
2307 case SpvOpAtomicSMax:
2308 case SpvOpAtomicUMax:
2309 case SpvOpAtomicAnd:
2310 case SpvOpAtomicOr:
2311 case SpvOpAtomicXor:
2312 atomic->src[0] = nir_src_for_ssa(index);
2313 atomic->src[1] = nir_src_for_ssa(offset);
2314 fill_common_atomic_sources(b, opcode, w, &atomic->src[2]);
2315 break;
2316
2317 default:
2318 unreachable("Invalid SPIR-V atomic");
2319 }
2320 }
2321
2322 if (opcode != SpvOpAtomicStore) {
2323 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
2324
2325 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
2326 glsl_get_vector_elements(type->type),
2327 glsl_get_bit_size(type->type), NULL);
2328
2329 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2330 val->ssa = rzalloc(b, struct vtn_ssa_value);
2331 val->ssa->def = &atomic->dest.ssa;
2332 val->ssa->type = type->type;
2333 }
2334
2335 nir_builder_instr_insert(&b->nb, &atomic->instr);
2336 }
2337
2338 static nir_alu_instr *
2339 create_vec(nir_shader *shader, unsigned num_components, unsigned bit_size)
2340 {
2341 nir_op op;
2342 switch (num_components) {
2343 case 1: op = nir_op_fmov; break;
2344 case 2: op = nir_op_vec2; break;
2345 case 3: op = nir_op_vec3; break;
2346 case 4: op = nir_op_vec4; break;
2347 default: unreachable("bad vector size");
2348 }
2349
2350 nir_alu_instr *vec = nir_alu_instr_create(shader, op);
2351 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
2352 bit_size, NULL);
2353 vec->dest.write_mask = (1 << num_components) - 1;
2354
2355 return vec;
2356 }
2357
2358 struct vtn_ssa_value *
2359 vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
2360 {
2361 if (src->transposed)
2362 return src->transposed;
2363
2364 struct vtn_ssa_value *dest =
2365 vtn_create_ssa_value(b, glsl_transposed_type(src->type));
2366
2367 for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
2368 nir_alu_instr *vec = create_vec(b->shader,
2369 glsl_get_matrix_columns(src->type),
2370 glsl_get_bit_size(src->type));
2371 if (glsl_type_is_vector_or_scalar(src->type)) {
2372 vec->src[0].src = nir_src_for_ssa(src->def);
2373 vec->src[0].swizzle[0] = i;
2374 } else {
2375 for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
2376 vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
2377 vec->src[j].swizzle[0] = i;
2378 }
2379 }
2380 nir_builder_instr_insert(&b->nb, &vec->instr);
2381 dest->elems[i]->def = &vec->dest.dest.ssa;
2382 }
2383
2384 dest->transposed = src;
2385
2386 return dest;
2387 }
2388
2389 nir_ssa_def *
2390 vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
2391 {
2392 unsigned swiz[4] = { index };
2393 return nir_swizzle(&b->nb, src, swiz, 1, true);
2394 }
2395
2396 nir_ssa_def *
2397 vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
2398 unsigned index)
2399 {
2400 nir_alu_instr *vec = create_vec(b->shader, src->num_components,
2401 src->bit_size);
2402
2403 for (unsigned i = 0; i < src->num_components; i++) {
2404 if (i == index) {
2405 vec->src[i].src = nir_src_for_ssa(insert);
2406 } else {
2407 vec->src[i].src = nir_src_for_ssa(src);
2408 vec->src[i].swizzle[0] = i;
2409 }
2410 }
2411
2412 nir_builder_instr_insert(&b->nb, &vec->instr);
2413
2414 return &vec->dest.dest.ssa;
2415 }
2416
2417 nir_ssa_def *
2418 vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
2419 nir_ssa_def *index)
2420 {
2421 nir_ssa_def *dest = vtn_vector_extract(b, src, 0);
2422 for (unsigned i = 1; i < src->num_components; i++)
2423 dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
2424 vtn_vector_extract(b, src, i), dest);
2425
2426 return dest;
2427 }
2428
2429 nir_ssa_def *
2430 vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
2431 nir_ssa_def *insert, nir_ssa_def *index)
2432 {
2433 nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
2434 for (unsigned i = 1; i < src->num_components; i++)
2435 dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
2436 vtn_vector_insert(b, src, insert, i), dest);
2437
2438 return dest;
2439 }
2440
2441 static nir_ssa_def *
2442 vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
2443 nir_ssa_def *src0, nir_ssa_def *src1,
2444 const uint32_t *indices)
2445 {
2446 nir_alu_instr *vec = create_vec(b->shader, num_components, src0->bit_size);
2447
2448 for (unsigned i = 0; i < num_components; i++) {
2449 uint32_t index = indices[i];
2450 if (index == 0xffffffff) {
2451 vec->src[i].src =
2452 nir_src_for_ssa(nir_ssa_undef(&b->nb, 1, src0->bit_size));
2453 } else if (index < src0->num_components) {
2454 vec->src[i].src = nir_src_for_ssa(src0);
2455 vec->src[i].swizzle[0] = index;
2456 } else {
2457 vec->src[i].src = nir_src_for_ssa(src1);
2458 vec->src[i].swizzle[0] = index - src0->num_components;
2459 }
2460 }
2461
2462 nir_builder_instr_insert(&b->nb, &vec->instr);
2463
2464 return &vec->dest.dest.ssa;
2465 }
2466
2467 /*
2468 * Concatentates a number of vectors/scalars together to produce a vector
2469 */
2470 static nir_ssa_def *
2471 vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
2472 unsigned num_srcs, nir_ssa_def **srcs)
2473 {
2474 nir_alu_instr *vec = create_vec(b->shader, num_components,
2475 srcs[0]->bit_size);
2476
2477 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2478 *
2479 * "When constructing a vector, there must be at least two Constituent
2480 * operands."
2481 */
2482 assert(num_srcs >= 2);
2483
2484 unsigned dest_idx = 0;
2485 for (unsigned i = 0; i < num_srcs; i++) {
2486 nir_ssa_def *src = srcs[i];
2487 assert(dest_idx + src->num_components <= num_components);
2488 for (unsigned j = 0; j < src->num_components; j++) {
2489 vec->src[dest_idx].src = nir_src_for_ssa(src);
2490 vec->src[dest_idx].swizzle[0] = j;
2491 dest_idx++;
2492 }
2493 }
2494
2495 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2496 *
2497 * "When constructing a vector, the total number of components in all
2498 * the operands must equal the number of components in Result Type."
2499 */
2500 assert(dest_idx == num_components);
2501
2502 nir_builder_instr_insert(&b->nb, &vec->instr);
2503
2504 return &vec->dest.dest.ssa;
2505 }
2506
2507 static struct vtn_ssa_value *
2508 vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
2509 {
2510 struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
2511 dest->type = src->type;
2512
2513 if (glsl_type_is_vector_or_scalar(src->type)) {
2514 dest->def = src->def;
2515 } else {
2516 unsigned elems = glsl_get_length(src->type);
2517
2518 dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
2519 for (unsigned i = 0; i < elems; i++)
2520 dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
2521 }
2522
2523 return dest;
2524 }
2525
2526 static struct vtn_ssa_value *
2527 vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
2528 struct vtn_ssa_value *insert, const uint32_t *indices,
2529 unsigned num_indices)
2530 {
2531 struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
2532
2533 struct vtn_ssa_value *cur = dest;
2534 unsigned i;
2535 for (i = 0; i < num_indices - 1; i++) {
2536 cur = cur->elems[indices[i]];
2537 }
2538
2539 if (glsl_type_is_vector_or_scalar(cur->type)) {
2540 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2541 * the component granularity. In that case, the last index will be
2542 * the index to insert the scalar into the vector.
2543 */
2544
2545 cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
2546 } else {
2547 cur->elems[indices[i]] = insert;
2548 }
2549
2550 return dest;
2551 }
2552
2553 static struct vtn_ssa_value *
2554 vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
2555 const uint32_t *indices, unsigned num_indices)
2556 {
2557 struct vtn_ssa_value *cur = src;
2558 for (unsigned i = 0; i < num_indices; i++) {
2559 if (glsl_type_is_vector_or_scalar(cur->type)) {
2560 assert(i == num_indices - 1);
2561 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2562 * the component granularity. The last index will be the index of the
2563 * vector to extract.
2564 */
2565
2566 struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
2567 ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
2568 ret->def = vtn_vector_extract(b, cur->def, indices[i]);
2569 return ret;
2570 } else {
2571 cur = cur->elems[indices[i]];
2572 }
2573 }
2574
2575 return cur;
2576 }
2577
2578 static void
2579 vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
2580 const uint32_t *w, unsigned count)
2581 {
2582 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2583 const struct glsl_type *type =
2584 vtn_value(b, w[1], vtn_value_type_type)->type->type;
2585 val->ssa = vtn_create_ssa_value(b, type);
2586
2587 switch (opcode) {
2588 case SpvOpVectorExtractDynamic:
2589 val->ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
2590 vtn_ssa_value(b, w[4])->def);
2591 break;
2592
2593 case SpvOpVectorInsertDynamic:
2594 val->ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
2595 vtn_ssa_value(b, w[4])->def,
2596 vtn_ssa_value(b, w[5])->def);
2597 break;
2598
2599 case SpvOpVectorShuffle:
2600 val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
2601 vtn_ssa_value(b, w[3])->def,
2602 vtn_ssa_value(b, w[4])->def,
2603 w + 5);
2604 break;
2605
2606 case SpvOpCompositeConstruct: {
2607 unsigned elems = count - 3;
2608 if (glsl_type_is_vector_or_scalar(type)) {
2609 nir_ssa_def *srcs[4];
2610 for (unsigned i = 0; i < elems; i++)
2611 srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
2612 val->ssa->def =
2613 vtn_vector_construct(b, glsl_get_vector_elements(type),
2614 elems, srcs);
2615 } else {
2616 val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
2617 for (unsigned i = 0; i < elems; i++)
2618 val->ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
2619 }
2620 break;
2621 }
2622 case SpvOpCompositeExtract:
2623 val->ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
2624 w + 4, count - 4);
2625 break;
2626
2627 case SpvOpCompositeInsert:
2628 val->ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
2629 vtn_ssa_value(b, w[3]),
2630 w + 5, count - 5);
2631 break;
2632
2633 case SpvOpCopyObject:
2634 val->ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
2635 break;
2636
2637 default:
2638 unreachable("unknown composite operation");
2639 }
2640 }
2641
2642 static void
2643 vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
2644 const uint32_t *w, unsigned count)
2645 {
2646 nir_intrinsic_op intrinsic_op;
2647 switch (opcode) {
2648 case SpvOpEmitVertex:
2649 case SpvOpEmitStreamVertex:
2650 intrinsic_op = nir_intrinsic_emit_vertex;
2651 break;
2652 case SpvOpEndPrimitive:
2653 case SpvOpEndStreamPrimitive:
2654 intrinsic_op = nir_intrinsic_end_primitive;
2655 break;
2656 case SpvOpMemoryBarrier:
2657 intrinsic_op = nir_intrinsic_memory_barrier;
2658 break;
2659 case SpvOpControlBarrier:
2660 intrinsic_op = nir_intrinsic_barrier;
2661 break;
2662 default:
2663 unreachable("unknown barrier instruction");
2664 }
2665
2666 nir_intrinsic_instr *intrin =
2667 nir_intrinsic_instr_create(b->shader, intrinsic_op);
2668
2669 if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
2670 nir_intrinsic_set_stream_id(intrin, w[1]);
2671
2672 nir_builder_instr_insert(&b->nb, &intrin->instr);
2673 }
2674
2675 static unsigned
2676 gl_primitive_from_spv_execution_mode(SpvExecutionMode mode)
2677 {
2678 switch (mode) {
2679 case SpvExecutionModeInputPoints:
2680 case SpvExecutionModeOutputPoints:
2681 return 0; /* GL_POINTS */
2682 case SpvExecutionModeInputLines:
2683 return 1; /* GL_LINES */
2684 case SpvExecutionModeInputLinesAdjacency:
2685 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2686 case SpvExecutionModeTriangles:
2687 return 4; /* GL_TRIANGLES */
2688 case SpvExecutionModeInputTrianglesAdjacency:
2689 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2690 case SpvExecutionModeQuads:
2691 return 7; /* GL_QUADS */
2692 case SpvExecutionModeIsolines:
2693 return 0x8E7A; /* GL_ISOLINES */
2694 case SpvExecutionModeOutputLineStrip:
2695 return 3; /* GL_LINE_STRIP */
2696 case SpvExecutionModeOutputTriangleStrip:
2697 return 5; /* GL_TRIANGLE_STRIP */
2698 default:
2699 unreachable("Invalid primitive type");
2700 return 4;
2701 }
2702 }
2703
2704 static unsigned
2705 vertices_in_from_spv_execution_mode(SpvExecutionMode mode)
2706 {
2707 switch (mode) {
2708 case SpvExecutionModeInputPoints:
2709 return 1;
2710 case SpvExecutionModeInputLines:
2711 return 2;
2712 case SpvExecutionModeInputLinesAdjacency:
2713 return 4;
2714 case SpvExecutionModeTriangles:
2715 return 3;
2716 case SpvExecutionModeInputTrianglesAdjacency:
2717 return 6;
2718 default:
2719 unreachable("Invalid GS input mode");
2720 return 0;
2721 }
2722 }
2723
2724 static gl_shader_stage
2725 stage_for_execution_model(SpvExecutionModel model)
2726 {
2727 switch (model) {
2728 case SpvExecutionModelVertex:
2729 return MESA_SHADER_VERTEX;
2730 case SpvExecutionModelTessellationControl:
2731 return MESA_SHADER_TESS_CTRL;
2732 case SpvExecutionModelTessellationEvaluation:
2733 return MESA_SHADER_TESS_EVAL;
2734 case SpvExecutionModelGeometry:
2735 return MESA_SHADER_GEOMETRY;
2736 case SpvExecutionModelFragment:
2737 return MESA_SHADER_FRAGMENT;
2738 case SpvExecutionModelGLCompute:
2739 return MESA_SHADER_COMPUTE;
2740 default:
2741 unreachable("Unsupported execution model");
2742 }
2743 }
2744
2745 #define spv_check_supported(name, cap) do { \
2746 if (!(b->options && b->options->caps.name)) \
2747 vtn_warn("Unsupported SPIR-V capability: %s", \
2748 spirv_capability_to_string(cap)); \
2749 } while(0)
2750
2751 static bool
2752 vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
2753 const uint32_t *w, unsigned count)
2754 {
2755 switch (opcode) {
2756 case SpvOpSource:
2757 case SpvOpSourceExtension:
2758 case SpvOpSourceContinued:
2759 case SpvOpExtension:
2760 /* Unhandled, but these are for debug so that's ok. */
2761 break;
2762
2763 case SpvOpCapability: {
2764 SpvCapability cap = w[1];
2765 switch (cap) {
2766 case SpvCapabilityMatrix:
2767 case SpvCapabilityShader:
2768 case SpvCapabilityGeometry:
2769 case SpvCapabilityGeometryPointSize:
2770 case SpvCapabilityUniformBufferArrayDynamicIndexing:
2771 case SpvCapabilitySampledImageArrayDynamicIndexing:
2772 case SpvCapabilityStorageBufferArrayDynamicIndexing:
2773 case SpvCapabilityStorageImageArrayDynamicIndexing:
2774 case SpvCapabilityImageRect:
2775 case SpvCapabilitySampledRect:
2776 case SpvCapabilitySampled1D:
2777 case SpvCapabilityImage1D:
2778 case SpvCapabilitySampledCubeArray:
2779 case SpvCapabilityImageCubeArray:
2780 case SpvCapabilitySampledBuffer:
2781 case SpvCapabilityImageBuffer:
2782 case SpvCapabilityImageQuery:
2783 case SpvCapabilityDerivativeControl:
2784 case SpvCapabilityInterpolationFunction:
2785 case SpvCapabilityMultiViewport:
2786 case SpvCapabilitySampleRateShading:
2787 case SpvCapabilityClipDistance:
2788 case SpvCapabilityCullDistance:
2789 case SpvCapabilityInputAttachment:
2790 case SpvCapabilityImageGatherExtended:
2791 case SpvCapabilityStorageImageExtendedFormats:
2792 break;
2793
2794 case SpvCapabilityGeometryStreams:
2795 case SpvCapabilityLinkage:
2796 case SpvCapabilityVector16:
2797 case SpvCapabilityFloat16Buffer:
2798 case SpvCapabilityFloat16:
2799 case SpvCapabilityInt64Atomics:
2800 case SpvCapabilityAtomicStorage:
2801 case SpvCapabilityInt16:
2802 case SpvCapabilityStorageImageMultisample:
2803 case SpvCapabilityInt8:
2804 case SpvCapabilitySparseResidency:
2805 case SpvCapabilityMinLod:
2806 case SpvCapabilityTransformFeedback:
2807 vtn_warn("Unsupported SPIR-V capability: %s",
2808 spirv_capability_to_string(cap));
2809 break;
2810
2811 case SpvCapabilityFloat64:
2812 spv_check_supported(float64, cap);
2813 break;
2814 case SpvCapabilityInt64:
2815 spv_check_supported(int64, cap);
2816 break;
2817
2818 case SpvCapabilityAddresses:
2819 case SpvCapabilityKernel:
2820 case SpvCapabilityImageBasic:
2821 case SpvCapabilityImageReadWrite:
2822 case SpvCapabilityImageMipmap:
2823 case SpvCapabilityPipes:
2824 case SpvCapabilityGroups:
2825 case SpvCapabilityDeviceEnqueue:
2826 case SpvCapabilityLiteralSampler:
2827 case SpvCapabilityGenericPointer:
2828 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
2829 spirv_capability_to_string(cap));
2830 break;
2831
2832 case SpvCapabilityImageMSArray:
2833 spv_check_supported(image_ms_array, cap);
2834 break;
2835
2836 case SpvCapabilityTessellation:
2837 case SpvCapabilityTessellationPointSize:
2838 spv_check_supported(tessellation, cap);
2839 break;
2840
2841 case SpvCapabilityDrawParameters:
2842 spv_check_supported(draw_parameters, cap);
2843 break;
2844
2845 case SpvCapabilityStorageImageReadWithoutFormat:
2846 spv_check_supported(image_read_without_format, cap);
2847 break;
2848
2849 case SpvCapabilityStorageImageWriteWithoutFormat:
2850 spv_check_supported(image_write_without_format, cap);
2851 break;
2852
2853 case SpvCapabilityMultiView:
2854 spv_check_supported(multiview, cap);
2855 break;
2856
2857 case SpvCapabilityVariablePointersStorageBuffer:
2858 case SpvCapabilityVariablePointers:
2859 spv_check_supported(variable_pointers, cap);
2860 break;
2861
2862 default:
2863 unreachable("Unhandled capability");
2864 }
2865 break;
2866 }
2867
2868 case SpvOpExtInstImport:
2869 vtn_handle_extension(b, opcode, w, count);
2870 break;
2871
2872 case SpvOpMemoryModel:
2873 assert(w[1] == SpvAddressingModelLogical);
2874 assert(w[2] == SpvMemoryModelSimple ||
2875 w[2] == SpvMemoryModelGLSL450);
2876 break;
2877
2878 case SpvOpEntryPoint: {
2879 struct vtn_value *entry_point = &b->values[w[2]];
2880 /* Let this be a name label regardless */
2881 unsigned name_words;
2882 entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
2883
2884 if (strcmp(entry_point->name, b->entry_point_name) != 0 ||
2885 stage_for_execution_model(w[1]) != b->entry_point_stage)
2886 break;
2887
2888 assert(b->entry_point == NULL);
2889 b->entry_point = entry_point;
2890 break;
2891 }
2892
2893 case SpvOpString:
2894 vtn_push_value(b, w[1], vtn_value_type_string)->str =
2895 vtn_string_literal(b, &w[2], count - 2, NULL);
2896 break;
2897
2898 case SpvOpName:
2899 b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2, NULL);
2900 break;
2901
2902 case SpvOpMemberName:
2903 /* TODO */
2904 break;
2905
2906 case SpvOpExecutionMode:
2907 case SpvOpDecorationGroup:
2908 case SpvOpDecorate:
2909 case SpvOpMemberDecorate:
2910 case SpvOpGroupDecorate:
2911 case SpvOpGroupMemberDecorate:
2912 vtn_handle_decoration(b, opcode, w, count);
2913 break;
2914
2915 default:
2916 return false; /* End of preamble */
2917 }
2918
2919 return true;
2920 }
2921
2922 static void
2923 vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
2924 const struct vtn_decoration *mode, void *data)
2925 {
2926 assert(b->entry_point == entry_point);
2927
2928 switch(mode->exec_mode) {
2929 case SpvExecutionModeOriginUpperLeft:
2930 case SpvExecutionModeOriginLowerLeft:
2931 b->origin_upper_left =
2932 (mode->exec_mode == SpvExecutionModeOriginUpperLeft);
2933 break;
2934
2935 case SpvExecutionModeEarlyFragmentTests:
2936 assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
2937 b->shader->info.fs.early_fragment_tests = true;
2938 break;
2939
2940 case SpvExecutionModeInvocations:
2941 assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
2942 b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
2943 break;
2944
2945 case SpvExecutionModeDepthReplacing:
2946 assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
2947 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
2948 break;
2949 case SpvExecutionModeDepthGreater:
2950 assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
2951 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
2952 break;
2953 case SpvExecutionModeDepthLess:
2954 assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
2955 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
2956 break;
2957 case SpvExecutionModeDepthUnchanged:
2958 assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
2959 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
2960 break;
2961
2962 case SpvExecutionModeLocalSize:
2963 assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
2964 b->shader->info.cs.local_size[0] = mode->literals[0];
2965 b->shader->info.cs.local_size[1] = mode->literals[1];
2966 b->shader->info.cs.local_size[2] = mode->literals[2];
2967 break;
2968 case SpvExecutionModeLocalSizeHint:
2969 break; /* Nothing to do with this */
2970
2971 case SpvExecutionModeOutputVertices:
2972 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
2973 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
2974 b->shader->info.tess.tcs_vertices_out = mode->literals[0];
2975 } else {
2976 assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
2977 b->shader->info.gs.vertices_out = mode->literals[0];
2978 }
2979 break;
2980
2981 case SpvExecutionModeInputPoints:
2982 case SpvExecutionModeInputLines:
2983 case SpvExecutionModeInputLinesAdjacency:
2984 case SpvExecutionModeTriangles:
2985 case SpvExecutionModeInputTrianglesAdjacency:
2986 case SpvExecutionModeQuads:
2987 case SpvExecutionModeIsolines:
2988 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
2989 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
2990 b->shader->info.tess.primitive_mode =
2991 gl_primitive_from_spv_execution_mode(mode->exec_mode);
2992 } else {
2993 assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
2994 b->shader->info.gs.vertices_in =
2995 vertices_in_from_spv_execution_mode(mode->exec_mode);
2996 }
2997 break;
2998
2999 case SpvExecutionModeOutputPoints:
3000 case SpvExecutionModeOutputLineStrip:
3001 case SpvExecutionModeOutputTriangleStrip:
3002 assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
3003 b->shader->info.gs.output_primitive =
3004 gl_primitive_from_spv_execution_mode(mode->exec_mode);
3005 break;
3006
3007 case SpvExecutionModeSpacingEqual:
3008 assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3009 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3010 b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
3011 break;
3012 case SpvExecutionModeSpacingFractionalEven:
3013 assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3014 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3015 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
3016 break;
3017 case SpvExecutionModeSpacingFractionalOdd:
3018 assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3019 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3020 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
3021 break;
3022 case SpvExecutionModeVertexOrderCw:
3023 assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3024 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3025 b->shader->info.tess.ccw = false;
3026 break;
3027 case SpvExecutionModeVertexOrderCcw:
3028 assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3029 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3030 b->shader->info.tess.ccw = true;
3031 break;
3032 case SpvExecutionModePointMode:
3033 assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3034 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3035 b->shader->info.tess.point_mode = true;
3036 break;
3037
3038 case SpvExecutionModePixelCenterInteger:
3039 b->pixel_center_integer = true;
3040 break;
3041
3042 case SpvExecutionModeXfb:
3043 unreachable("Unhandled execution mode");
3044 break;
3045
3046 case SpvExecutionModeVecTypeHint:
3047 case SpvExecutionModeContractionOff:
3048 break; /* OpenCL */
3049
3050 default:
3051 unreachable("Unhandled execution mode");
3052 }
3053 }
3054
3055 static bool
3056 vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
3057 const uint32_t *w, unsigned count)
3058 {
3059 switch (opcode) {
3060 case SpvOpSource:
3061 case SpvOpSourceContinued:
3062 case SpvOpSourceExtension:
3063 case SpvOpExtension:
3064 case SpvOpCapability:
3065 case SpvOpExtInstImport:
3066 case SpvOpMemoryModel:
3067 case SpvOpEntryPoint:
3068 case SpvOpExecutionMode:
3069 case SpvOpString:
3070 case SpvOpName:
3071 case SpvOpMemberName:
3072 case SpvOpDecorationGroup:
3073 case SpvOpDecorate:
3074 case SpvOpMemberDecorate:
3075 case SpvOpGroupDecorate:
3076 case SpvOpGroupMemberDecorate:
3077 unreachable("Invalid opcode types and variables section");
3078 break;
3079
3080 case SpvOpTypeVoid:
3081 case SpvOpTypeBool:
3082 case SpvOpTypeInt:
3083 case SpvOpTypeFloat:
3084 case SpvOpTypeVector:
3085 case SpvOpTypeMatrix:
3086 case SpvOpTypeImage:
3087 case SpvOpTypeSampler:
3088 case SpvOpTypeSampledImage:
3089 case SpvOpTypeArray:
3090 case SpvOpTypeRuntimeArray:
3091 case SpvOpTypeStruct:
3092 case SpvOpTypeOpaque:
3093 case SpvOpTypePointer:
3094 case SpvOpTypeFunction:
3095 case SpvOpTypeEvent:
3096 case SpvOpTypeDeviceEvent:
3097 case SpvOpTypeReserveId:
3098 case SpvOpTypeQueue:
3099 case SpvOpTypePipe:
3100 vtn_handle_type(b, opcode, w, count);
3101 break;
3102
3103 case SpvOpConstantTrue:
3104 case SpvOpConstantFalse:
3105 case SpvOpConstant:
3106 case SpvOpConstantComposite:
3107 case SpvOpConstantSampler:
3108 case SpvOpConstantNull:
3109 case SpvOpSpecConstantTrue:
3110 case SpvOpSpecConstantFalse:
3111 case SpvOpSpecConstant:
3112 case SpvOpSpecConstantComposite:
3113 case SpvOpSpecConstantOp:
3114 vtn_handle_constant(b, opcode, w, count);
3115 break;
3116
3117 case SpvOpUndef:
3118 case SpvOpVariable:
3119 vtn_handle_variables(b, opcode, w, count);
3120 break;
3121
3122 default:
3123 return false; /* End of preamble */
3124 }
3125
3126 return true;
3127 }
3128
3129 static bool
3130 vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
3131 const uint32_t *w, unsigned count)
3132 {
3133 switch (opcode) {
3134 case SpvOpLabel:
3135 break;
3136
3137 case SpvOpLoopMerge:
3138 case SpvOpSelectionMerge:
3139 /* This is handled by cfg pre-pass and walk_blocks */
3140 break;
3141
3142 case SpvOpUndef: {
3143 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
3144 val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
3145 break;
3146 }
3147
3148 case SpvOpExtInst:
3149 vtn_handle_extension(b, opcode, w, count);
3150 break;
3151
3152 case SpvOpVariable:
3153 case SpvOpLoad:
3154 case SpvOpStore:
3155 case SpvOpCopyMemory:
3156 case SpvOpCopyMemorySized:
3157 case SpvOpAccessChain:
3158 case SpvOpPtrAccessChain:
3159 case SpvOpInBoundsAccessChain:
3160 case SpvOpArrayLength:
3161 vtn_handle_variables(b, opcode, w, count);
3162 break;
3163
3164 case SpvOpFunctionCall:
3165 vtn_handle_function_call(b, opcode, w, count);
3166 break;
3167
3168 case SpvOpSampledImage:
3169 case SpvOpImage:
3170 case SpvOpImageSampleImplicitLod:
3171 case SpvOpImageSampleExplicitLod:
3172 case SpvOpImageSampleDrefImplicitLod:
3173 case SpvOpImageSampleDrefExplicitLod:
3174 case SpvOpImageSampleProjImplicitLod:
3175 case SpvOpImageSampleProjExplicitLod:
3176 case SpvOpImageSampleProjDrefImplicitLod:
3177 case SpvOpImageSampleProjDrefExplicitLod:
3178 case SpvOpImageFetch:
3179 case SpvOpImageGather:
3180 case SpvOpImageDrefGather:
3181 case SpvOpImageQuerySizeLod:
3182 case SpvOpImageQueryLod:
3183 case SpvOpImageQueryLevels:
3184 case SpvOpImageQuerySamples:
3185 vtn_handle_texture(b, opcode, w, count);
3186 break;
3187
3188 case SpvOpImageRead:
3189 case SpvOpImageWrite:
3190 case SpvOpImageTexelPointer:
3191 vtn_handle_image(b, opcode, w, count);
3192 break;
3193
3194 case SpvOpImageQuerySize: {
3195 struct vtn_pointer *image =
3196 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
3197 if (image->mode == vtn_variable_mode_image) {
3198 vtn_handle_image(b, opcode, w, count);
3199 } else {
3200 assert(image->mode == vtn_variable_mode_sampler);
3201 vtn_handle_texture(b, opcode, w, count);
3202 }
3203 break;
3204 }
3205
3206 case SpvOpAtomicLoad:
3207 case SpvOpAtomicExchange:
3208 case SpvOpAtomicCompareExchange:
3209 case SpvOpAtomicCompareExchangeWeak:
3210 case SpvOpAtomicIIncrement:
3211 case SpvOpAtomicIDecrement:
3212 case SpvOpAtomicIAdd:
3213 case SpvOpAtomicISub:
3214 case SpvOpAtomicSMin:
3215 case SpvOpAtomicUMin:
3216 case SpvOpAtomicSMax:
3217 case SpvOpAtomicUMax:
3218 case SpvOpAtomicAnd:
3219 case SpvOpAtomicOr:
3220 case SpvOpAtomicXor: {
3221 struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
3222 if (pointer->value_type == vtn_value_type_image_pointer) {
3223 vtn_handle_image(b, opcode, w, count);
3224 } else {
3225 assert(pointer->value_type == vtn_value_type_pointer);
3226 vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
3227 }
3228 break;
3229 }
3230
3231 case SpvOpAtomicStore: {
3232 struct vtn_value *pointer = vtn_untyped_value(b, w[1]);
3233 if (pointer->value_type == vtn_value_type_image_pointer) {
3234 vtn_handle_image(b, opcode, w, count);
3235 } else {
3236 assert(pointer->value_type == vtn_value_type_pointer);
3237 vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
3238 }
3239 break;
3240 }
3241
3242 case SpvOpSelect: {
3243 /* Handle OpSelect up-front here because it needs to be able to handle
3244 * pointers and not just regular vectors and scalars.
3245 */
3246 struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type;
3247 struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, res_type->type);
3248 ssa->def = nir_bcsel(&b->nb, vtn_ssa_value(b, w[3])->def,
3249 vtn_ssa_value(b, w[4])->def,
3250 vtn_ssa_value(b, w[5])->def);
3251 vtn_push_ssa(b, w[2], res_type, ssa);
3252 break;
3253 }
3254
3255 case SpvOpSNegate:
3256 case SpvOpFNegate:
3257 case SpvOpNot:
3258 case SpvOpAny:
3259 case SpvOpAll:
3260 case SpvOpConvertFToU:
3261 case SpvOpConvertFToS:
3262 case SpvOpConvertSToF:
3263 case SpvOpConvertUToF:
3264 case SpvOpUConvert:
3265 case SpvOpSConvert:
3266 case SpvOpFConvert:
3267 case SpvOpQuantizeToF16:
3268 case SpvOpConvertPtrToU:
3269 case SpvOpConvertUToPtr:
3270 case SpvOpPtrCastToGeneric:
3271 case SpvOpGenericCastToPtr:
3272 case SpvOpBitcast:
3273 case SpvOpIsNan:
3274 case SpvOpIsInf:
3275 case SpvOpIsFinite:
3276 case SpvOpIsNormal:
3277 case SpvOpSignBitSet:
3278 case SpvOpLessOrGreater:
3279 case SpvOpOrdered:
3280 case SpvOpUnordered:
3281 case SpvOpIAdd:
3282 case SpvOpFAdd:
3283 case SpvOpISub:
3284 case SpvOpFSub:
3285 case SpvOpIMul:
3286 case SpvOpFMul:
3287 case SpvOpUDiv:
3288 case SpvOpSDiv:
3289 case SpvOpFDiv:
3290 case SpvOpUMod:
3291 case SpvOpSRem:
3292 case SpvOpSMod:
3293 case SpvOpFRem:
3294 case SpvOpFMod:
3295 case SpvOpVectorTimesScalar:
3296 case SpvOpDot:
3297 case SpvOpIAddCarry:
3298 case SpvOpISubBorrow:
3299 case SpvOpUMulExtended:
3300 case SpvOpSMulExtended:
3301 case SpvOpShiftRightLogical:
3302 case SpvOpShiftRightArithmetic:
3303 case SpvOpShiftLeftLogical:
3304 case SpvOpLogicalEqual:
3305 case SpvOpLogicalNotEqual:
3306 case SpvOpLogicalOr:
3307 case SpvOpLogicalAnd:
3308 case SpvOpLogicalNot:
3309 case SpvOpBitwiseOr:
3310 case SpvOpBitwiseXor:
3311 case SpvOpBitwiseAnd:
3312 case SpvOpIEqual:
3313 case SpvOpFOrdEqual:
3314 case SpvOpFUnordEqual:
3315 case SpvOpINotEqual:
3316 case SpvOpFOrdNotEqual:
3317 case SpvOpFUnordNotEqual:
3318 case SpvOpULessThan:
3319 case SpvOpSLessThan:
3320 case SpvOpFOrdLessThan:
3321 case SpvOpFUnordLessThan:
3322 case SpvOpUGreaterThan:
3323 case SpvOpSGreaterThan:
3324 case SpvOpFOrdGreaterThan:
3325 case SpvOpFUnordGreaterThan:
3326 case SpvOpULessThanEqual:
3327 case SpvOpSLessThanEqual:
3328 case SpvOpFOrdLessThanEqual:
3329 case SpvOpFUnordLessThanEqual:
3330 case SpvOpUGreaterThanEqual:
3331 case SpvOpSGreaterThanEqual:
3332 case SpvOpFOrdGreaterThanEqual:
3333 case SpvOpFUnordGreaterThanEqual:
3334 case SpvOpDPdx:
3335 case SpvOpDPdy:
3336 case SpvOpFwidth:
3337 case SpvOpDPdxFine:
3338 case SpvOpDPdyFine:
3339 case SpvOpFwidthFine:
3340 case SpvOpDPdxCoarse:
3341 case SpvOpDPdyCoarse:
3342 case SpvOpFwidthCoarse:
3343 case SpvOpBitFieldInsert:
3344 case SpvOpBitFieldSExtract:
3345 case SpvOpBitFieldUExtract:
3346 case SpvOpBitReverse:
3347 case SpvOpBitCount:
3348 case SpvOpTranspose:
3349 case SpvOpOuterProduct:
3350 case SpvOpMatrixTimesScalar:
3351 case SpvOpVectorTimesMatrix:
3352 case SpvOpMatrixTimesVector:
3353 case SpvOpMatrixTimesMatrix:
3354 vtn_handle_alu(b, opcode, w, count);
3355 break;
3356
3357 case SpvOpVectorExtractDynamic:
3358 case SpvOpVectorInsertDynamic:
3359 case SpvOpVectorShuffle:
3360 case SpvOpCompositeConstruct:
3361 case SpvOpCompositeExtract:
3362 case SpvOpCompositeInsert:
3363 case SpvOpCopyObject:
3364 vtn_handle_composite(b, opcode, w, count);
3365 break;
3366
3367 case SpvOpEmitVertex:
3368 case SpvOpEndPrimitive:
3369 case SpvOpEmitStreamVertex:
3370 case SpvOpEndStreamPrimitive:
3371 case SpvOpControlBarrier:
3372 case SpvOpMemoryBarrier:
3373 vtn_handle_barrier(b, opcode, w, count);
3374 break;
3375
3376 default:
3377 unreachable("Unhandled opcode");
3378 }
3379
3380 return true;
3381 }
3382
3383 nir_function *
3384 spirv_to_nir(const uint32_t *words, size_t word_count,
3385 struct nir_spirv_specialization *spec, unsigned num_spec,
3386 gl_shader_stage stage, const char *entry_point_name,
3387 const struct spirv_to_nir_options *options,
3388 const nir_shader_compiler_options *nir_options)
3389 {
3390 /* Initialize the stn_builder object */
3391 struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
3392 b->spirv = words;
3393 b->file = NULL;
3394 b->line = -1;
3395 b->col = -1;
3396 exec_list_make_empty(&b->functions);
3397 b->entry_point_stage = stage;
3398 b->entry_point_name = entry_point_name;
3399 b->options = options;
3400
3401 const uint32_t *word_end = words + word_count;
3402
3403 /* Handle the SPIR-V header (first 4 dwords) */
3404 assert(word_count > 5);
3405
3406 assert(words[0] == SpvMagicNumber);
3407 assert(words[1] >= 0x10000);
3408 /* words[2] == generator magic */
3409 unsigned value_id_bound = words[3];
3410 assert(words[4] == 0);
3411
3412 words+= 5;
3413
3414 b->value_id_bound = value_id_bound;
3415 b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
3416
3417 /* Handle all the preamble instructions */
3418 words = vtn_foreach_instruction(b, words, word_end,
3419 vtn_handle_preamble_instruction);
3420
3421 if (b->entry_point == NULL) {
3422 assert(!"Entry point not found");
3423 ralloc_free(b);
3424 return NULL;
3425 }
3426
3427 b->shader = nir_shader_create(b, stage, nir_options, NULL);
3428
3429 /* Set shader info defaults */
3430 b->shader->info.gs.invocations = 1;
3431
3432 /* Parse execution modes */
3433 vtn_foreach_execution_mode(b, b->entry_point,
3434 vtn_handle_execution_mode, NULL);
3435
3436 b->specializations = spec;
3437 b->num_specializations = num_spec;
3438
3439 /* Handle all variable, type, and constant instructions */
3440 words = vtn_foreach_instruction(b, words, word_end,
3441 vtn_handle_variable_or_type_instruction);
3442
3443 vtn_build_cfg(b, words, word_end);
3444
3445 assert(b->entry_point->value_type == vtn_value_type_function);
3446 b->entry_point->func->referenced = true;
3447
3448 bool progress;
3449 do {
3450 progress = false;
3451 foreach_list_typed(struct vtn_function, func, node, &b->functions) {
3452 if (func->referenced && !func->emitted) {
3453 b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
3454 _mesa_key_pointer_equal);
3455
3456 vtn_function_emit(b, func, vtn_handle_body_instruction);
3457 progress = true;
3458 }
3459 }
3460 } while (progress);
3461
3462 assert(b->entry_point->value_type == vtn_value_type_function);
3463 nir_function *entry_point = b->entry_point->func->impl->function;
3464 assert(entry_point);
3465
3466 /* Unparent the shader from the vtn_builder before we delete the builder */
3467 ralloc_steal(NULL, b->shader);
3468
3469 ralloc_free(b);
3470
3471 return entry_point;
3472 }