spirv: allow specialization constants with bitsize different than 32 bits
[mesa.git] / src / compiler / spirv / spirv_to_nir.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "spirv_info.h"
33
34 void
35 vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
36 size_t spirv_offset, const char *message)
37 {
38 if (b->options->debug.func) {
39 b->options->debug.func(b->options->debug.private_data,
40 level, spirv_offset, message);
41 }
42
43 #ifndef NDEBUG
44 if (level >= NIR_SPIRV_DEBUG_LEVEL_WARNING)
45 fprintf(stderr, "%s\n", message);
46 #endif
47 }
48
49 void
50 vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level,
51 size_t spirv_offset, const char *fmt, ...)
52 {
53 va_list args;
54 char *msg;
55
56 va_start(args, fmt);
57 msg = ralloc_vasprintf(NULL, fmt, args);
58 va_end(args);
59
60 vtn_log(b, level, spirv_offset, msg);
61
62 ralloc_free(msg);
63 }
64
65 static void
66 vtn_log_err(struct vtn_builder *b,
67 enum nir_spirv_debug_level level, const char *prefix,
68 const char *file, unsigned line,
69 const char *fmt, va_list args)
70 {
71 char *msg;
72
73 msg = ralloc_strdup(NULL, prefix);
74
75 #ifndef NDEBUG
76 ralloc_asprintf_append(&msg, " In file %s:%u\n", file, line);
77 #endif
78
79 ralloc_asprintf_append(&msg, " ");
80
81 ralloc_vasprintf_append(&msg, fmt, args);
82
83 ralloc_asprintf_append(&msg, "\n %zu bytes into the SPIR-V binary",
84 b->spirv_offset);
85
86 if (b->file) {
87 ralloc_asprintf_append(&msg,
88 "\n in SPIR-V source file %s, line %d, col %d",
89 b->file, b->line, b->col);
90 }
91
92 vtn_log(b, level, b->spirv_offset, msg);
93
94 ralloc_free(msg);
95 }
96
97 void
98 _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
99 const char *fmt, ...)
100 {
101 va_list args;
102
103 va_start(args, fmt);
104 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_WARNING, "SPIR-V WARNING:\n",
105 file, line, fmt, args);
106 va_end(args);
107 }
108
109 void
110 _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
111 const char *fmt, ...)
112 {
113 va_list args;
114
115 va_start(args, fmt);
116 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V parsing FAILED:\n",
117 file, line, fmt, args);
118 va_end(args);
119
120 longjmp(b->fail_jump, 1);
121 }
122
123 struct spec_constant_value {
124 bool is_double;
125 union {
126 uint32_t data32;
127 uint64_t data64;
128 };
129 };
130
131 static struct vtn_ssa_value *
132 vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
133 {
134 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
135 val->type = type;
136
137 if (glsl_type_is_vector_or_scalar(type)) {
138 unsigned num_components = glsl_get_vector_elements(val->type);
139 unsigned bit_size = glsl_get_bit_size(val->type);
140 val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
141 } else {
142 unsigned elems = glsl_get_length(val->type);
143 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
144 if (glsl_type_is_matrix(type)) {
145 const struct glsl_type *elem_type =
146 glsl_vector_type(glsl_get_base_type(type),
147 glsl_get_vector_elements(type));
148
149 for (unsigned i = 0; i < elems; i++)
150 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
151 } else if (glsl_type_is_array(type)) {
152 const struct glsl_type *elem_type = glsl_get_array_element(type);
153 for (unsigned i = 0; i < elems; i++)
154 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
155 } else {
156 for (unsigned i = 0; i < elems; i++) {
157 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
158 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
159 }
160 }
161 }
162
163 return val;
164 }
165
166 static struct vtn_ssa_value *
167 vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
168 const struct glsl_type *type)
169 {
170 struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
171
172 if (entry)
173 return entry->data;
174
175 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
176 val->type = type;
177
178 switch (glsl_get_base_type(type)) {
179 case GLSL_TYPE_INT:
180 case GLSL_TYPE_UINT:
181 case GLSL_TYPE_INT16:
182 case GLSL_TYPE_UINT16:
183 case GLSL_TYPE_INT64:
184 case GLSL_TYPE_UINT64:
185 case GLSL_TYPE_BOOL:
186 case GLSL_TYPE_FLOAT:
187 case GLSL_TYPE_FLOAT16:
188 case GLSL_TYPE_DOUBLE: {
189 int bit_size = glsl_get_bit_size(type);
190 if (glsl_type_is_vector_or_scalar(type)) {
191 unsigned num_components = glsl_get_vector_elements(val->type);
192 nir_load_const_instr *load =
193 nir_load_const_instr_create(b->shader, num_components, bit_size);
194
195 load->value = constant->values[0];
196
197 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
198 val->def = &load->def;
199 } else {
200 assert(glsl_type_is_matrix(type));
201 unsigned rows = glsl_get_vector_elements(val->type);
202 unsigned columns = glsl_get_matrix_columns(val->type);
203 val->elems = ralloc_array(b, struct vtn_ssa_value *, columns);
204
205 for (unsigned i = 0; i < columns; i++) {
206 struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
207 col_val->type = glsl_get_column_type(val->type);
208 nir_load_const_instr *load =
209 nir_load_const_instr_create(b->shader, rows, bit_size);
210
211 load->value = constant->values[i];
212
213 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
214 col_val->def = &load->def;
215
216 val->elems[i] = col_val;
217 }
218 }
219 break;
220 }
221
222 case GLSL_TYPE_ARRAY: {
223 unsigned elems = glsl_get_length(val->type);
224 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
225 const struct glsl_type *elem_type = glsl_get_array_element(val->type);
226 for (unsigned i = 0; i < elems; i++)
227 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
228 elem_type);
229 break;
230 }
231
232 case GLSL_TYPE_STRUCT: {
233 unsigned elems = glsl_get_length(val->type);
234 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
235 for (unsigned i = 0; i < elems; i++) {
236 const struct glsl_type *elem_type =
237 glsl_get_struct_field(val->type, i);
238 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
239 elem_type);
240 }
241 break;
242 }
243
244 default:
245 vtn_fail("bad constant type");
246 }
247
248 return val;
249 }
250
251 struct vtn_ssa_value *
252 vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
253 {
254 struct vtn_value *val = vtn_untyped_value(b, value_id);
255 switch (val->value_type) {
256 case vtn_value_type_undef:
257 return vtn_undef_ssa_value(b, val->type->type);
258
259 case vtn_value_type_constant:
260 return vtn_const_ssa_value(b, val->constant, val->const_type);
261
262 case vtn_value_type_ssa:
263 return val->ssa;
264
265 case vtn_value_type_pointer:
266 vtn_assert(val->pointer->ptr_type && val->pointer->ptr_type->type);
267 struct vtn_ssa_value *ssa =
268 vtn_create_ssa_value(b, val->pointer->ptr_type->type);
269 ssa->def = vtn_pointer_to_ssa(b, val->pointer);
270 return ssa;
271
272 default:
273 vtn_fail("Invalid type for an SSA value");
274 }
275 }
276
277 static char *
278 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
279 unsigned word_count, unsigned *words_used)
280 {
281 char *dup = ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
282 if (words_used) {
283 /* Ammount of space taken by the string (including the null) */
284 unsigned len = strlen(dup) + 1;
285 *words_used = DIV_ROUND_UP(len, sizeof(*words));
286 }
287 return dup;
288 }
289
290 const uint32_t *
291 vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
292 const uint32_t *end, vtn_instruction_handler handler)
293 {
294 b->file = NULL;
295 b->line = -1;
296 b->col = -1;
297
298 const uint32_t *w = start;
299 while (w < end) {
300 SpvOp opcode = w[0] & SpvOpCodeMask;
301 unsigned count = w[0] >> SpvWordCountShift;
302 vtn_assert(count >= 1 && w + count <= end);
303
304 b->spirv_offset = (uint8_t *)w - (uint8_t *)b->spirv;
305
306 switch (opcode) {
307 case SpvOpNop:
308 break; /* Do nothing */
309
310 case SpvOpLine:
311 b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
312 b->line = w[2];
313 b->col = w[3];
314 break;
315
316 case SpvOpNoLine:
317 b->file = NULL;
318 b->line = -1;
319 b->col = -1;
320 break;
321
322 default:
323 if (!handler(b, opcode, w, count))
324 return w;
325 break;
326 }
327
328 w += count;
329 }
330
331 b->spirv_offset = 0;
332 b->file = NULL;
333 b->line = -1;
334 b->col = -1;
335
336 assert(w == end);
337 return w;
338 }
339
340 static void
341 vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
342 const uint32_t *w, unsigned count)
343 {
344 switch (opcode) {
345 case SpvOpExtInstImport: {
346 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
347 if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
348 val->ext_handler = vtn_handle_glsl450_instruction;
349 } else {
350 vtn_fail("Unsupported extension");
351 }
352 break;
353 }
354
355 case SpvOpExtInst: {
356 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
357 bool handled = val->ext_handler(b, w[4], w, count);
358 vtn_assert(handled);
359 break;
360 }
361
362 default:
363 vtn_fail("Unhandled opcode");
364 }
365 }
366
367 static void
368 _foreach_decoration_helper(struct vtn_builder *b,
369 struct vtn_value *base_value,
370 int parent_member,
371 struct vtn_value *value,
372 vtn_decoration_foreach_cb cb, void *data)
373 {
374 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
375 int member;
376 if (dec->scope == VTN_DEC_DECORATION) {
377 member = parent_member;
378 } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
379 vtn_assert(parent_member == -1);
380 member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
381 } else {
382 /* Not a decoration */
383 continue;
384 }
385
386 if (dec->group) {
387 vtn_assert(dec->group->value_type == vtn_value_type_decoration_group);
388 _foreach_decoration_helper(b, base_value, member, dec->group,
389 cb, data);
390 } else {
391 cb(b, base_value, member, dec, data);
392 }
393 }
394 }
395
396 /** Iterates (recursively if needed) over all of the decorations on a value
397 *
398 * This function iterates over all of the decorations applied to a given
399 * value. If it encounters a decoration group, it recurses into the group
400 * and iterates over all of those decorations as well.
401 */
402 void
403 vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
404 vtn_decoration_foreach_cb cb, void *data)
405 {
406 _foreach_decoration_helper(b, value, -1, value, cb, data);
407 }
408
409 void
410 vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
411 vtn_execution_mode_foreach_cb cb, void *data)
412 {
413 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
414 if (dec->scope != VTN_DEC_EXECUTION_MODE)
415 continue;
416
417 vtn_assert(dec->group == NULL);
418 cb(b, value, dec, data);
419 }
420 }
421
422 static void
423 vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
424 const uint32_t *w, unsigned count)
425 {
426 const uint32_t *w_end = w + count;
427 const uint32_t target = w[1];
428 w += 2;
429
430 switch (opcode) {
431 case SpvOpDecorationGroup:
432 vtn_push_value(b, target, vtn_value_type_decoration_group);
433 break;
434
435 case SpvOpDecorate:
436 case SpvOpMemberDecorate:
437 case SpvOpExecutionMode: {
438 struct vtn_value *val = &b->values[target];
439
440 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
441 switch (opcode) {
442 case SpvOpDecorate:
443 dec->scope = VTN_DEC_DECORATION;
444 break;
445 case SpvOpMemberDecorate:
446 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
447 break;
448 case SpvOpExecutionMode:
449 dec->scope = VTN_DEC_EXECUTION_MODE;
450 break;
451 default:
452 vtn_fail("Invalid decoration opcode");
453 }
454 dec->decoration = *(w++);
455 dec->literals = w;
456
457 /* Link into the list */
458 dec->next = val->decoration;
459 val->decoration = dec;
460 break;
461 }
462
463 case SpvOpGroupMemberDecorate:
464 case SpvOpGroupDecorate: {
465 struct vtn_value *group =
466 vtn_value(b, target, vtn_value_type_decoration_group);
467
468 for (; w < w_end; w++) {
469 struct vtn_value *val = vtn_untyped_value(b, *w);
470 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
471
472 dec->group = group;
473 if (opcode == SpvOpGroupDecorate) {
474 dec->scope = VTN_DEC_DECORATION;
475 } else {
476 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
477 }
478
479 /* Link into the list */
480 dec->next = val->decoration;
481 val->decoration = dec;
482 }
483 break;
484 }
485
486 default:
487 vtn_fail("Unhandled opcode");
488 }
489 }
490
491 struct member_decoration_ctx {
492 unsigned num_fields;
493 struct glsl_struct_field *fields;
494 struct vtn_type *type;
495 };
496
497 /* does a shallow copy of a vtn_type */
498
499 static struct vtn_type *
500 vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
501 {
502 struct vtn_type *dest = ralloc(b, struct vtn_type);
503 *dest = *src;
504
505 switch (src->base_type) {
506 case vtn_base_type_void:
507 case vtn_base_type_scalar:
508 case vtn_base_type_vector:
509 case vtn_base_type_matrix:
510 case vtn_base_type_array:
511 case vtn_base_type_pointer:
512 case vtn_base_type_image:
513 case vtn_base_type_sampler:
514 /* Nothing more to do */
515 break;
516
517 case vtn_base_type_struct:
518 dest->members = ralloc_array(b, struct vtn_type *, src->length);
519 memcpy(dest->members, src->members,
520 src->length * sizeof(src->members[0]));
521
522 dest->offsets = ralloc_array(b, unsigned, src->length);
523 memcpy(dest->offsets, src->offsets,
524 src->length * sizeof(src->offsets[0]));
525 break;
526
527 case vtn_base_type_function:
528 dest->params = ralloc_array(b, struct vtn_type *, src->length);
529 memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
530 break;
531 }
532
533 return dest;
534 }
535
536 static struct vtn_type *
537 mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
538 {
539 type->members[member] = vtn_type_copy(b, type->members[member]);
540 type = type->members[member];
541
542 /* We may have an array of matrices.... Oh, joy! */
543 while (glsl_type_is_array(type->type)) {
544 type->array_element = vtn_type_copy(b, type->array_element);
545 type = type->array_element;
546 }
547
548 vtn_assert(glsl_type_is_matrix(type->type));
549
550 return type;
551 }
552
553 static void
554 struct_member_decoration_cb(struct vtn_builder *b,
555 struct vtn_value *val, int member,
556 const struct vtn_decoration *dec, void *void_ctx)
557 {
558 struct member_decoration_ctx *ctx = void_ctx;
559
560 if (member < 0)
561 return;
562
563 vtn_assert(member < ctx->num_fields);
564
565 switch (dec->decoration) {
566 case SpvDecorationNonWritable:
567 case SpvDecorationNonReadable:
568 case SpvDecorationRelaxedPrecision:
569 case SpvDecorationVolatile:
570 case SpvDecorationCoherent:
571 case SpvDecorationUniform:
572 break; /* FIXME: Do nothing with this for now. */
573 case SpvDecorationNoPerspective:
574 ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
575 break;
576 case SpvDecorationFlat:
577 ctx->fields[member].interpolation = INTERP_MODE_FLAT;
578 break;
579 case SpvDecorationCentroid:
580 ctx->fields[member].centroid = true;
581 break;
582 case SpvDecorationSample:
583 ctx->fields[member].sample = true;
584 break;
585 case SpvDecorationStream:
586 /* Vulkan only allows one GS stream */
587 vtn_assert(dec->literals[0] == 0);
588 break;
589 case SpvDecorationLocation:
590 ctx->fields[member].location = dec->literals[0];
591 break;
592 case SpvDecorationComponent:
593 break; /* FIXME: What should we do with these? */
594 case SpvDecorationBuiltIn:
595 ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
596 ctx->type->members[member]->is_builtin = true;
597 ctx->type->members[member]->builtin = dec->literals[0];
598 ctx->type->builtin_block = true;
599 break;
600 case SpvDecorationOffset:
601 ctx->type->offsets[member] = dec->literals[0];
602 break;
603 case SpvDecorationMatrixStride:
604 /* Handled as a second pass */
605 break;
606 case SpvDecorationColMajor:
607 break; /* Nothing to do here. Column-major is the default. */
608 case SpvDecorationRowMajor:
609 mutable_matrix_member(b, ctx->type, member)->row_major = true;
610 break;
611
612 case SpvDecorationPatch:
613 break;
614
615 case SpvDecorationSpecId:
616 case SpvDecorationBlock:
617 case SpvDecorationBufferBlock:
618 case SpvDecorationArrayStride:
619 case SpvDecorationGLSLShared:
620 case SpvDecorationGLSLPacked:
621 case SpvDecorationInvariant:
622 case SpvDecorationRestrict:
623 case SpvDecorationAliased:
624 case SpvDecorationConstant:
625 case SpvDecorationIndex:
626 case SpvDecorationBinding:
627 case SpvDecorationDescriptorSet:
628 case SpvDecorationLinkageAttributes:
629 case SpvDecorationNoContraction:
630 case SpvDecorationInputAttachmentIndex:
631 vtn_warn("Decoration not allowed on struct members: %s",
632 spirv_decoration_to_string(dec->decoration));
633 break;
634
635 case SpvDecorationXfbBuffer:
636 case SpvDecorationXfbStride:
637 vtn_warn("Vulkan does not have transform feedback");
638 break;
639
640 case SpvDecorationCPacked:
641 case SpvDecorationSaturatedConversion:
642 case SpvDecorationFuncParamAttr:
643 case SpvDecorationFPRoundingMode:
644 case SpvDecorationFPFastMathMode:
645 case SpvDecorationAlignment:
646 vtn_warn("Decoration only allowed for CL-style kernels: %s",
647 spirv_decoration_to_string(dec->decoration));
648 break;
649
650 default:
651 vtn_fail("Unhandled decoration");
652 }
653 }
654
655 /* Matrix strides are handled as a separate pass because we need to know
656 * whether the matrix is row-major or not first.
657 */
658 static void
659 struct_member_matrix_stride_cb(struct vtn_builder *b,
660 struct vtn_value *val, int member,
661 const struct vtn_decoration *dec,
662 void *void_ctx)
663 {
664 if (dec->decoration != SpvDecorationMatrixStride)
665 return;
666 vtn_assert(member >= 0);
667
668 struct member_decoration_ctx *ctx = void_ctx;
669
670 struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
671 if (mat_type->row_major) {
672 mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
673 mat_type->stride = mat_type->array_element->stride;
674 mat_type->array_element->stride = dec->literals[0];
675 } else {
676 vtn_assert(mat_type->array_element->stride > 0);
677 mat_type->stride = dec->literals[0];
678 }
679 }
680
681 static void
682 type_decoration_cb(struct vtn_builder *b,
683 struct vtn_value *val, int member,
684 const struct vtn_decoration *dec, void *ctx)
685 {
686 struct vtn_type *type = val->type;
687
688 if (member != -1)
689 return;
690
691 switch (dec->decoration) {
692 case SpvDecorationArrayStride:
693 vtn_assert(type->base_type == vtn_base_type_matrix ||
694 type->base_type == vtn_base_type_array ||
695 type->base_type == vtn_base_type_pointer);
696 type->stride = dec->literals[0];
697 break;
698 case SpvDecorationBlock:
699 vtn_assert(type->base_type == vtn_base_type_struct);
700 type->block = true;
701 break;
702 case SpvDecorationBufferBlock:
703 vtn_assert(type->base_type == vtn_base_type_struct);
704 type->buffer_block = true;
705 break;
706 case SpvDecorationGLSLShared:
707 case SpvDecorationGLSLPacked:
708 /* Ignore these, since we get explicit offsets anyways */
709 break;
710
711 case SpvDecorationRowMajor:
712 case SpvDecorationColMajor:
713 case SpvDecorationMatrixStride:
714 case SpvDecorationBuiltIn:
715 case SpvDecorationNoPerspective:
716 case SpvDecorationFlat:
717 case SpvDecorationPatch:
718 case SpvDecorationCentroid:
719 case SpvDecorationSample:
720 case SpvDecorationVolatile:
721 case SpvDecorationCoherent:
722 case SpvDecorationNonWritable:
723 case SpvDecorationNonReadable:
724 case SpvDecorationUniform:
725 case SpvDecorationStream:
726 case SpvDecorationLocation:
727 case SpvDecorationComponent:
728 case SpvDecorationOffset:
729 case SpvDecorationXfbBuffer:
730 case SpvDecorationXfbStride:
731 vtn_warn("Decoration only allowed for struct members: %s",
732 spirv_decoration_to_string(dec->decoration));
733 break;
734
735 case SpvDecorationRelaxedPrecision:
736 case SpvDecorationSpecId:
737 case SpvDecorationInvariant:
738 case SpvDecorationRestrict:
739 case SpvDecorationAliased:
740 case SpvDecorationConstant:
741 case SpvDecorationIndex:
742 case SpvDecorationBinding:
743 case SpvDecorationDescriptorSet:
744 case SpvDecorationLinkageAttributes:
745 case SpvDecorationNoContraction:
746 case SpvDecorationInputAttachmentIndex:
747 vtn_warn("Decoration not allowed on types: %s",
748 spirv_decoration_to_string(dec->decoration));
749 break;
750
751 case SpvDecorationCPacked:
752 case SpvDecorationSaturatedConversion:
753 case SpvDecorationFuncParamAttr:
754 case SpvDecorationFPRoundingMode:
755 case SpvDecorationFPFastMathMode:
756 case SpvDecorationAlignment:
757 vtn_warn("Decoration only allowed for CL-style kernels: %s",
758 spirv_decoration_to_string(dec->decoration));
759 break;
760
761 default:
762 vtn_fail("Unhandled decoration");
763 }
764 }
765
766 static unsigned
767 translate_image_format(struct vtn_builder *b, SpvImageFormat format)
768 {
769 switch (format) {
770 case SpvImageFormatUnknown: return 0; /* GL_NONE */
771 case SpvImageFormatRgba32f: return 0x8814; /* GL_RGBA32F */
772 case SpvImageFormatRgba16f: return 0x881A; /* GL_RGBA16F */
773 case SpvImageFormatR32f: return 0x822E; /* GL_R32F */
774 case SpvImageFormatRgba8: return 0x8058; /* GL_RGBA8 */
775 case SpvImageFormatRgba8Snorm: return 0x8F97; /* GL_RGBA8_SNORM */
776 case SpvImageFormatRg32f: return 0x8230; /* GL_RG32F */
777 case SpvImageFormatRg16f: return 0x822F; /* GL_RG16F */
778 case SpvImageFormatR11fG11fB10f: return 0x8C3A; /* GL_R11F_G11F_B10F */
779 case SpvImageFormatR16f: return 0x822D; /* GL_R16F */
780 case SpvImageFormatRgba16: return 0x805B; /* GL_RGBA16 */
781 case SpvImageFormatRgb10A2: return 0x8059; /* GL_RGB10_A2 */
782 case SpvImageFormatRg16: return 0x822C; /* GL_RG16 */
783 case SpvImageFormatRg8: return 0x822B; /* GL_RG8 */
784 case SpvImageFormatR16: return 0x822A; /* GL_R16 */
785 case SpvImageFormatR8: return 0x8229; /* GL_R8 */
786 case SpvImageFormatRgba16Snorm: return 0x8F9B; /* GL_RGBA16_SNORM */
787 case SpvImageFormatRg16Snorm: return 0x8F99; /* GL_RG16_SNORM */
788 case SpvImageFormatRg8Snorm: return 0x8F95; /* GL_RG8_SNORM */
789 case SpvImageFormatR16Snorm: return 0x8F98; /* GL_R16_SNORM */
790 case SpvImageFormatR8Snorm: return 0x8F94; /* GL_R8_SNORM */
791 case SpvImageFormatRgba32i: return 0x8D82; /* GL_RGBA32I */
792 case SpvImageFormatRgba16i: return 0x8D88; /* GL_RGBA16I */
793 case SpvImageFormatRgba8i: return 0x8D8E; /* GL_RGBA8I */
794 case SpvImageFormatR32i: return 0x8235; /* GL_R32I */
795 case SpvImageFormatRg32i: return 0x823B; /* GL_RG32I */
796 case SpvImageFormatRg16i: return 0x8239; /* GL_RG16I */
797 case SpvImageFormatRg8i: return 0x8237; /* GL_RG8I */
798 case SpvImageFormatR16i: return 0x8233; /* GL_R16I */
799 case SpvImageFormatR8i: return 0x8231; /* GL_R8I */
800 case SpvImageFormatRgba32ui: return 0x8D70; /* GL_RGBA32UI */
801 case SpvImageFormatRgba16ui: return 0x8D76; /* GL_RGBA16UI */
802 case SpvImageFormatRgba8ui: return 0x8D7C; /* GL_RGBA8UI */
803 case SpvImageFormatR32ui: return 0x8236; /* GL_R32UI */
804 case SpvImageFormatRgb10a2ui: return 0x906F; /* GL_RGB10_A2UI */
805 case SpvImageFormatRg32ui: return 0x823C; /* GL_RG32UI */
806 case SpvImageFormatRg16ui: return 0x823A; /* GL_RG16UI */
807 case SpvImageFormatRg8ui: return 0x8238; /* GL_RG8UI */
808 case SpvImageFormatR16ui: return 0x8234; /* GL_R16UI */
809 case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
810 default:
811 vtn_fail("Invalid image format");
812 }
813 }
814
815 static struct vtn_type *
816 vtn_type_layout_std430(struct vtn_builder *b, struct vtn_type *type,
817 uint32_t *size_out, uint32_t *align_out)
818 {
819 switch (type->base_type) {
820 case vtn_base_type_scalar: {
821 uint32_t comp_size = glsl_get_bit_size(type->type) / 8;
822 *size_out = comp_size;
823 *align_out = comp_size;
824 return type;
825 }
826
827 case vtn_base_type_vector: {
828 uint32_t comp_size = glsl_get_bit_size(type->type) / 8;
829 assert(type->length > 0 && type->length <= 4);
830 unsigned align_comps = type->length == 3 ? 4 : type->length;
831 *size_out = comp_size * type->length,
832 *align_out = comp_size * align_comps;
833 return type;
834 }
835
836 case vtn_base_type_matrix:
837 case vtn_base_type_array: {
838 /* We're going to add an array stride */
839 type = vtn_type_copy(b, type);
840 uint32_t elem_size, elem_align;
841 type->array_element = vtn_type_layout_std430(b, type->array_element,
842 &elem_size, &elem_align);
843 type->stride = vtn_align_u32(elem_size, elem_align);
844 *size_out = type->stride * type->length;
845 *align_out = elem_align;
846 return type;
847 }
848
849 case vtn_base_type_struct: {
850 /* We're going to add member offsets */
851 type = vtn_type_copy(b, type);
852 uint32_t offset = 0;
853 uint32_t align = 0;
854 for (unsigned i = 0; i < type->length; i++) {
855 uint32_t mem_size, mem_align;
856 type->members[i] = vtn_type_layout_std430(b, type->members[i],
857 &mem_size, &mem_align);
858 offset = vtn_align_u32(offset, mem_align);
859 type->offsets[i] = offset;
860 offset += mem_size;
861 align = MAX2(align, mem_align);
862 }
863 *size_out = offset;
864 *align_out = align;
865 return type;
866 }
867
868 default:
869 unreachable("Invalid SPIR-V type for std430");
870 }
871 }
872
873 static void
874 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
875 const uint32_t *w, unsigned count)
876 {
877 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
878
879 val->type = rzalloc(b, struct vtn_type);
880 val->type->val = val;
881
882 switch (opcode) {
883 case SpvOpTypeVoid:
884 val->type->base_type = vtn_base_type_void;
885 val->type->type = glsl_void_type();
886 break;
887 case SpvOpTypeBool:
888 val->type->base_type = vtn_base_type_scalar;
889 val->type->type = glsl_bool_type();
890 break;
891 case SpvOpTypeInt: {
892 int bit_size = w[2];
893 const bool signedness = w[3];
894 val->type->base_type = vtn_base_type_scalar;
895 switch (bit_size) {
896 case 64:
897 val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
898 break;
899 case 32:
900 val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
901 break;
902 case 16:
903 val->type->type = (signedness ? glsl_int16_t_type() : glsl_uint16_t_type());
904 break;
905 default:
906 vtn_fail("Invalid int bit size");
907 }
908 break;
909 }
910
911 case SpvOpTypeFloat: {
912 int bit_size = w[2];
913 val->type->base_type = vtn_base_type_scalar;
914 switch (bit_size) {
915 case 16:
916 val->type->type = glsl_float16_t_type();
917 break;
918 case 32:
919 val->type->type = glsl_float_type();
920 break;
921 case 64:
922 val->type->type = glsl_double_type();
923 break;
924 default:
925 vtn_fail("Invalid float bit size");
926 }
927 break;
928 }
929
930 case SpvOpTypeVector: {
931 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
932 unsigned elems = w[3];
933
934 vtn_assert(glsl_type_is_scalar(base->type));
935 val->type->base_type = vtn_base_type_vector;
936 val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
937 val->type->stride = glsl_get_bit_size(base->type) / 8;
938 val->type->array_element = base;
939 break;
940 }
941
942 case SpvOpTypeMatrix: {
943 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
944 unsigned columns = w[3];
945
946 vtn_assert(glsl_type_is_vector(base->type));
947 val->type->base_type = vtn_base_type_matrix;
948 val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
949 glsl_get_vector_elements(base->type),
950 columns);
951 vtn_assert(!glsl_type_is_error(val->type->type));
952 val->type->length = columns;
953 val->type->array_element = base;
954 val->type->row_major = false;
955 val->type->stride = 0;
956 break;
957 }
958
959 case SpvOpTypeRuntimeArray:
960 case SpvOpTypeArray: {
961 struct vtn_type *array_element =
962 vtn_value(b, w[2], vtn_value_type_type)->type;
963
964 if (opcode == SpvOpTypeRuntimeArray) {
965 /* A length of 0 is used to denote unsized arrays */
966 val->type->length = 0;
967 } else {
968 val->type->length =
969 vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0].u32[0];
970 }
971
972 val->type->base_type = vtn_base_type_array;
973 val->type->type = glsl_array_type(array_element->type, val->type->length);
974 val->type->array_element = array_element;
975 val->type->stride = 0;
976 break;
977 }
978
979 case SpvOpTypeStruct: {
980 unsigned num_fields = count - 2;
981 val->type->base_type = vtn_base_type_struct;
982 val->type->length = num_fields;
983 val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
984 val->type->offsets = ralloc_array(b, unsigned, num_fields);
985
986 NIR_VLA(struct glsl_struct_field, fields, count);
987 for (unsigned i = 0; i < num_fields; i++) {
988 val->type->members[i] =
989 vtn_value(b, w[i + 2], vtn_value_type_type)->type;
990 fields[i] = (struct glsl_struct_field) {
991 .type = val->type->members[i]->type,
992 .name = ralloc_asprintf(b, "field%d", i),
993 .location = -1,
994 };
995 }
996
997 struct member_decoration_ctx ctx = {
998 .num_fields = num_fields,
999 .fields = fields,
1000 .type = val->type
1001 };
1002
1003 vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
1004 vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
1005
1006 const char *name = val->name ? val->name : "struct";
1007
1008 val->type->type = glsl_struct_type(fields, num_fields, name);
1009 break;
1010 }
1011
1012 case SpvOpTypeFunction: {
1013 val->type->base_type = vtn_base_type_function;
1014 val->type->type = NULL;
1015
1016 val->type->return_type = vtn_value(b, w[2], vtn_value_type_type)->type;
1017
1018 const unsigned num_params = count - 3;
1019 val->type->length = num_params;
1020 val->type->params = ralloc_array(b, struct vtn_type *, num_params);
1021 for (unsigned i = 0; i < count - 3; i++) {
1022 val->type->params[i] =
1023 vtn_value(b, w[i + 3], vtn_value_type_type)->type;
1024 }
1025 break;
1026 }
1027
1028 case SpvOpTypePointer: {
1029 SpvStorageClass storage_class = w[2];
1030 struct vtn_type *deref_type =
1031 vtn_value(b, w[3], vtn_value_type_type)->type;
1032
1033 val->type->base_type = vtn_base_type_pointer;
1034 val->type->storage_class = storage_class;
1035 val->type->deref = deref_type;
1036
1037 if (storage_class == SpvStorageClassUniform ||
1038 storage_class == SpvStorageClassStorageBuffer) {
1039 /* These can actually be stored to nir_variables and used as SSA
1040 * values so they need a real glsl_type.
1041 */
1042 val->type->type = glsl_vector_type(GLSL_TYPE_UINT, 2);
1043 }
1044
1045 if (storage_class == SpvStorageClassWorkgroup &&
1046 b->options->lower_workgroup_access_to_offsets) {
1047 uint32_t size, align;
1048 val->type->deref = vtn_type_layout_std430(b, val->type->deref,
1049 &size, &align);
1050 val->type->length = size;
1051 val->type->align = align;
1052 /* These can actually be stored to nir_variables and used as SSA
1053 * values so they need a real glsl_type.
1054 */
1055 val->type->type = glsl_uint_type();
1056 }
1057 break;
1058 }
1059
1060 case SpvOpTypeImage: {
1061 val->type->base_type = vtn_base_type_image;
1062
1063 const struct glsl_type *sampled_type =
1064 vtn_value(b, w[2], vtn_value_type_type)->type->type;
1065
1066 vtn_assert(glsl_type_is_vector_or_scalar(sampled_type));
1067
1068 enum glsl_sampler_dim dim;
1069 switch ((SpvDim)w[3]) {
1070 case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
1071 case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
1072 case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
1073 case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
1074 case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
1075 case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
1076 case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
1077 default:
1078 vtn_fail("Invalid SPIR-V Sampler dimension");
1079 }
1080
1081 bool is_shadow = w[4];
1082 bool is_array = w[5];
1083 bool multisampled = w[6];
1084 unsigned sampled = w[7];
1085 SpvImageFormat format = w[8];
1086
1087 if (count > 9)
1088 val->type->access_qualifier = w[9];
1089 else
1090 val->type->access_qualifier = SpvAccessQualifierReadWrite;
1091
1092 if (multisampled) {
1093 if (dim == GLSL_SAMPLER_DIM_2D)
1094 dim = GLSL_SAMPLER_DIM_MS;
1095 else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
1096 dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
1097 else
1098 vtn_fail("Unsupported multisampled image type");
1099 }
1100
1101 val->type->image_format = translate_image_format(b, format);
1102
1103 if (sampled == 1) {
1104 val->type->sampled = true;
1105 val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
1106 glsl_get_base_type(sampled_type));
1107 } else if (sampled == 2) {
1108 vtn_assert(!is_shadow);
1109 val->type->sampled = false;
1110 val->type->type = glsl_image_type(dim, is_array,
1111 glsl_get_base_type(sampled_type));
1112 } else {
1113 vtn_fail("We need to know if the image will be sampled");
1114 }
1115 break;
1116 }
1117
1118 case SpvOpTypeSampledImage:
1119 val->type = vtn_value(b, w[2], vtn_value_type_type)->type;
1120 break;
1121
1122 case SpvOpTypeSampler:
1123 /* The actual sampler type here doesn't really matter. It gets
1124 * thrown away the moment you combine it with an image. What really
1125 * matters is that it's a sampler type as opposed to an integer type
1126 * so the backend knows what to do.
1127 */
1128 val->type->base_type = vtn_base_type_sampler;
1129 val->type->type = glsl_bare_sampler_type();
1130 break;
1131
1132 case SpvOpTypeOpaque:
1133 case SpvOpTypeEvent:
1134 case SpvOpTypeDeviceEvent:
1135 case SpvOpTypeReserveId:
1136 case SpvOpTypeQueue:
1137 case SpvOpTypePipe:
1138 default:
1139 vtn_fail("Unhandled opcode");
1140 }
1141
1142 vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
1143 }
1144
1145 static nir_constant *
1146 vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type)
1147 {
1148 nir_constant *c = rzalloc(b, nir_constant);
1149
1150 /* For pointers and other typeless things, we have to return something but
1151 * it doesn't matter what.
1152 */
1153 if (!type)
1154 return c;
1155
1156 switch (glsl_get_base_type(type)) {
1157 case GLSL_TYPE_INT:
1158 case GLSL_TYPE_UINT:
1159 case GLSL_TYPE_INT16:
1160 case GLSL_TYPE_UINT16:
1161 case GLSL_TYPE_INT64:
1162 case GLSL_TYPE_UINT64:
1163 case GLSL_TYPE_BOOL:
1164 case GLSL_TYPE_FLOAT:
1165 case GLSL_TYPE_FLOAT16:
1166 case GLSL_TYPE_DOUBLE:
1167 /* Nothing to do here. It's already initialized to zero */
1168 break;
1169
1170 case GLSL_TYPE_ARRAY:
1171 vtn_assert(glsl_get_length(type) > 0);
1172 c->num_elements = glsl_get_length(type);
1173 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1174
1175 c->elements[0] = vtn_null_constant(b, glsl_get_array_element(type));
1176 for (unsigned i = 1; i < c->num_elements; i++)
1177 c->elements[i] = c->elements[0];
1178 break;
1179
1180 case GLSL_TYPE_STRUCT:
1181 c->num_elements = glsl_get_length(type);
1182 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1183
1184 for (unsigned i = 0; i < c->num_elements; i++) {
1185 c->elements[i] = vtn_null_constant(b, glsl_get_struct_field(type, i));
1186 }
1187 break;
1188
1189 default:
1190 vtn_fail("Invalid type for null constant");
1191 }
1192
1193 return c;
1194 }
1195
1196 static void
1197 spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v,
1198 int member, const struct vtn_decoration *dec,
1199 void *data)
1200 {
1201 vtn_assert(member == -1);
1202 if (dec->decoration != SpvDecorationSpecId)
1203 return;
1204
1205 struct spec_constant_value *const_value = data;
1206
1207 for (unsigned i = 0; i < b->num_specializations; i++) {
1208 if (b->specializations[i].id == dec->literals[0]) {
1209 if (const_value->is_double)
1210 const_value->data64 = b->specializations[i].data64;
1211 else
1212 const_value->data32 = b->specializations[i].data32;
1213 return;
1214 }
1215 }
1216 }
1217
1218 static uint32_t
1219 get_specialization(struct vtn_builder *b, struct vtn_value *val,
1220 uint32_t const_value)
1221 {
1222 struct spec_constant_value data;
1223 data.is_double = false;
1224 data.data32 = const_value;
1225 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
1226 return data.data32;
1227 }
1228
1229 static uint64_t
1230 get_specialization64(struct vtn_builder *b, struct vtn_value *val,
1231 uint64_t const_value)
1232 {
1233 struct spec_constant_value data;
1234 data.is_double = true;
1235 data.data64 = const_value;
1236 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
1237 return data.data64;
1238 }
1239
1240 static void
1241 handle_workgroup_size_decoration_cb(struct vtn_builder *b,
1242 struct vtn_value *val,
1243 int member,
1244 const struct vtn_decoration *dec,
1245 void *data)
1246 {
1247 vtn_assert(member == -1);
1248 if (dec->decoration != SpvDecorationBuiltIn ||
1249 dec->literals[0] != SpvBuiltInWorkgroupSize)
1250 return;
1251
1252 vtn_assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3));
1253
1254 b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0];
1255 b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1];
1256 b->shader->info.cs.local_size[2] = val->constant->values[0].u32[2];
1257 }
1258
1259 static void
1260 vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
1261 const uint32_t *w, unsigned count)
1262 {
1263 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
1264 val->const_type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
1265 val->constant = rzalloc(b, nir_constant);
1266 switch (opcode) {
1267 case SpvOpConstantTrue:
1268 vtn_assert(val->const_type == glsl_bool_type());
1269 val->constant->values[0].u32[0] = NIR_TRUE;
1270 break;
1271 case SpvOpConstantFalse:
1272 vtn_assert(val->const_type == glsl_bool_type());
1273 val->constant->values[0].u32[0] = NIR_FALSE;
1274 break;
1275
1276 case SpvOpSpecConstantTrue:
1277 case SpvOpSpecConstantFalse: {
1278 vtn_assert(val->const_type == glsl_bool_type());
1279 uint32_t int_val =
1280 get_specialization(b, val, (opcode == SpvOpSpecConstantTrue));
1281 val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE;
1282 break;
1283 }
1284
1285 case SpvOpConstant: {
1286 vtn_assert(glsl_type_is_scalar(val->const_type));
1287 int bit_size = glsl_get_bit_size(val->const_type);
1288 switch (bit_size) {
1289 case 64:
1290 val->constant->values->u64[0] = vtn_u64_literal(&w[3]);
1291 break;
1292 case 32:
1293 val->constant->values->u32[0] = w[3];
1294 break;
1295 case 16:
1296 val->constant->values->u16[0] = w[3];
1297 break;
1298 default:
1299 vtn_fail("Unsupported SpvOpConstant bit size");
1300 }
1301 break;
1302 }
1303 case SpvOpSpecConstant: {
1304 vtn_assert(glsl_type_is_scalar(val->const_type));
1305 val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
1306 int bit_size = glsl_get_bit_size(val->const_type);
1307 switch (bit_size) {
1308 case 64:
1309 val->constant->values[0].u64[0] =
1310 get_specialization64(b, val, vtn_u64_literal(&w[3]));
1311 break;
1312 case 32:
1313 val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
1314 break;
1315 case 16:
1316 val->constant->values[0].u16[0] = get_specialization(b, val, w[3]);
1317 break;
1318 default:
1319 vtn_fail("Unsupported SpvOpSpecConstant bit size");
1320 }
1321 break;
1322 }
1323 case SpvOpSpecConstantComposite:
1324 case SpvOpConstantComposite: {
1325 unsigned elem_count = count - 3;
1326 nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
1327 for (unsigned i = 0; i < elem_count; i++)
1328 elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
1329
1330 switch (glsl_get_base_type(val->const_type)) {
1331 case GLSL_TYPE_UINT:
1332 case GLSL_TYPE_INT:
1333 case GLSL_TYPE_UINT16:
1334 case GLSL_TYPE_INT16:
1335 case GLSL_TYPE_UINT64:
1336 case GLSL_TYPE_INT64:
1337 case GLSL_TYPE_FLOAT:
1338 case GLSL_TYPE_FLOAT16:
1339 case GLSL_TYPE_BOOL:
1340 case GLSL_TYPE_DOUBLE: {
1341 int bit_size = glsl_get_bit_size(val->const_type);
1342 if (glsl_type_is_matrix(val->const_type)) {
1343 vtn_assert(glsl_get_matrix_columns(val->const_type) == elem_count);
1344 for (unsigned i = 0; i < elem_count; i++)
1345 val->constant->values[i] = elems[i]->values[0];
1346 } else {
1347 vtn_assert(glsl_type_is_vector(val->const_type));
1348 vtn_assert(glsl_get_vector_elements(val->const_type) == elem_count);
1349 for (unsigned i = 0; i < elem_count; i++) {
1350 switch (bit_size) {
1351 case 64:
1352 val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
1353 break;
1354 case 32:
1355 val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
1356 break;
1357 case 16:
1358 val->constant->values[0].u16[i] = elems[i]->values[0].u16[0];
1359 break;
1360 default:
1361 vtn_fail("Invalid SpvOpConstantComposite bit size");
1362 }
1363 }
1364 }
1365 ralloc_free(elems);
1366 break;
1367 }
1368 case GLSL_TYPE_STRUCT:
1369 case GLSL_TYPE_ARRAY:
1370 ralloc_steal(val->constant, elems);
1371 val->constant->num_elements = elem_count;
1372 val->constant->elements = elems;
1373 break;
1374
1375 default:
1376 vtn_fail("Unsupported type for constants");
1377 }
1378 break;
1379 }
1380
1381 case SpvOpSpecConstantOp: {
1382 SpvOp opcode = get_specialization(b, val, w[3]);
1383 switch (opcode) {
1384 case SpvOpVectorShuffle: {
1385 struct vtn_value *v0 = &b->values[w[4]];
1386 struct vtn_value *v1 = &b->values[w[5]];
1387
1388 vtn_assert(v0->value_type == vtn_value_type_constant ||
1389 v0->value_type == vtn_value_type_undef);
1390 vtn_assert(v1->value_type == vtn_value_type_constant ||
1391 v1->value_type == vtn_value_type_undef);
1392
1393 unsigned len0 = v0->value_type == vtn_value_type_constant ?
1394 glsl_get_vector_elements(v0->const_type) :
1395 glsl_get_vector_elements(v0->type->type);
1396 unsigned len1 = v1->value_type == vtn_value_type_constant ?
1397 glsl_get_vector_elements(v1->const_type) :
1398 glsl_get_vector_elements(v1->type->type);
1399
1400 vtn_assert(len0 + len1 < 16);
1401
1402 unsigned bit_size = glsl_get_bit_size(val->const_type);
1403 unsigned bit_size0 = v0->value_type == vtn_value_type_constant ?
1404 glsl_get_bit_size(v0->const_type) :
1405 glsl_get_bit_size(v0->type->type);
1406 unsigned bit_size1 = v1->value_type == vtn_value_type_constant ?
1407 glsl_get_bit_size(v1->const_type) :
1408 glsl_get_bit_size(v1->type->type);
1409
1410 vtn_assert(bit_size == bit_size0 && bit_size == bit_size1);
1411 (void)bit_size0; (void)bit_size1;
1412
1413 if (bit_size == 64) {
1414 uint64_t u64[8];
1415 if (v0->value_type == vtn_value_type_constant) {
1416 for (unsigned i = 0; i < len0; i++)
1417 u64[i] = v0->constant->values[0].u64[i];
1418 }
1419 if (v1->value_type == vtn_value_type_constant) {
1420 for (unsigned i = 0; i < len1; i++)
1421 u64[len0 + i] = v1->constant->values[0].u64[i];
1422 }
1423
1424 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1425 uint32_t comp = w[i + 6];
1426 /* If component is not used, set the value to a known constant
1427 * to detect if it is wrongly used.
1428 */
1429 if (comp == (uint32_t)-1)
1430 val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef;
1431 else
1432 val->constant->values[0].u64[j] = u64[comp];
1433 }
1434 } else {
1435 /* This is for both 32-bit and 16-bit values */
1436 uint32_t u32[8];
1437 if (v0->value_type == vtn_value_type_constant) {
1438 for (unsigned i = 0; i < len0; i++)
1439 u32[i] = v0->constant->values[0].u32[i];
1440 }
1441 if (v1->value_type == vtn_value_type_constant) {
1442 for (unsigned i = 0; i < len1; i++)
1443 u32[len0 + i] = v1->constant->values[0].u32[i];
1444 }
1445
1446 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1447 uint32_t comp = w[i + 6];
1448 /* If component is not used, set the value to a known constant
1449 * to detect if it is wrongly used.
1450 */
1451 if (comp == (uint32_t)-1)
1452 val->constant->values[0].u32[j] = 0xdeadbeef;
1453 else
1454 val->constant->values[0].u32[j] = u32[comp];
1455 }
1456 }
1457 break;
1458 }
1459
1460 case SpvOpCompositeExtract:
1461 case SpvOpCompositeInsert: {
1462 struct vtn_value *comp;
1463 unsigned deref_start;
1464 struct nir_constant **c;
1465 if (opcode == SpvOpCompositeExtract) {
1466 comp = vtn_value(b, w[4], vtn_value_type_constant);
1467 deref_start = 5;
1468 c = &comp->constant;
1469 } else {
1470 comp = vtn_value(b, w[5], vtn_value_type_constant);
1471 deref_start = 6;
1472 val->constant = nir_constant_clone(comp->constant,
1473 (nir_variable *)b);
1474 c = &val->constant;
1475 }
1476
1477 int elem = -1;
1478 int col = 0;
1479 const struct glsl_type *type = comp->const_type;
1480 for (unsigned i = deref_start; i < count; i++) {
1481 switch (glsl_get_base_type(type)) {
1482 case GLSL_TYPE_UINT:
1483 case GLSL_TYPE_INT:
1484 case GLSL_TYPE_UINT16:
1485 case GLSL_TYPE_INT16:
1486 case GLSL_TYPE_UINT64:
1487 case GLSL_TYPE_INT64:
1488 case GLSL_TYPE_FLOAT:
1489 case GLSL_TYPE_FLOAT16:
1490 case GLSL_TYPE_DOUBLE:
1491 case GLSL_TYPE_BOOL:
1492 /* If we hit this granularity, we're picking off an element */
1493 if (glsl_type_is_matrix(type)) {
1494 vtn_assert(col == 0 && elem == -1);
1495 col = w[i];
1496 elem = 0;
1497 type = glsl_get_column_type(type);
1498 } else {
1499 vtn_assert(elem <= 0 && glsl_type_is_vector(type));
1500 elem = w[i];
1501 type = glsl_scalar_type(glsl_get_base_type(type));
1502 }
1503 continue;
1504
1505 case GLSL_TYPE_ARRAY:
1506 c = &(*c)->elements[w[i]];
1507 type = glsl_get_array_element(type);
1508 continue;
1509
1510 case GLSL_TYPE_STRUCT:
1511 c = &(*c)->elements[w[i]];
1512 type = glsl_get_struct_field(type, w[i]);
1513 continue;
1514
1515 default:
1516 vtn_fail("Invalid constant type");
1517 }
1518 }
1519
1520 if (opcode == SpvOpCompositeExtract) {
1521 if (elem == -1) {
1522 val->constant = *c;
1523 } else {
1524 unsigned num_components = glsl_get_vector_elements(type);
1525 unsigned bit_size = glsl_get_bit_size(type);
1526 for (unsigned i = 0; i < num_components; i++)
1527 switch(bit_size) {
1528 case 64:
1529 val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i];
1530 break;
1531 case 32:
1532 val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
1533 break;
1534 case 16:
1535 val->constant->values[0].u16[i] = (*c)->values[col].u16[elem + i];
1536 break;
1537 default:
1538 vtn_fail("Invalid SpvOpCompositeExtract bit size");
1539 }
1540 }
1541 } else {
1542 struct vtn_value *insert =
1543 vtn_value(b, w[4], vtn_value_type_constant);
1544 vtn_assert(insert->const_type == type);
1545 if (elem == -1) {
1546 *c = insert->constant;
1547 } else {
1548 unsigned num_components = glsl_get_vector_elements(type);
1549 unsigned bit_size = glsl_get_bit_size(type);
1550 for (unsigned i = 0; i < num_components; i++)
1551 switch (bit_size) {
1552 case 64:
1553 (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i];
1554 break;
1555 case 32:
1556 (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
1557 break;
1558 case 16:
1559 (*c)->values[col].u16[elem + i] = insert->constant->values[0].u16[i];
1560 break;
1561 default:
1562 vtn_fail("Invalid SpvOpCompositeInsert bit size");
1563 }
1564 }
1565 }
1566 break;
1567 }
1568
1569 default: {
1570 bool swap;
1571 nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->const_type);
1572 nir_alu_type src_alu_type = dst_alu_type;
1573 nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
1574 src_alu_type,
1575 dst_alu_type);
1576
1577 unsigned num_components = glsl_get_vector_elements(val->const_type);
1578 unsigned bit_size =
1579 glsl_get_bit_size(val->const_type);
1580
1581 nir_const_value src[4];
1582 vtn_assert(count <= 7);
1583 for (unsigned i = 0; i < count - 4; i++) {
1584 nir_constant *c =
1585 vtn_value(b, w[4 + i], vtn_value_type_constant)->constant;
1586
1587 unsigned j = swap ? 1 - i : i;
1588 src[j] = c->values[0];
1589 }
1590
1591 val->constant->values[0] =
1592 nir_eval_const_opcode(op, num_components, bit_size, src);
1593 break;
1594 } /* default */
1595 }
1596 break;
1597 }
1598
1599 case SpvOpConstantNull:
1600 val->constant = vtn_null_constant(b, val->const_type);
1601 break;
1602
1603 case SpvOpConstantSampler:
1604 vtn_fail("OpConstantSampler requires Kernel Capability");
1605 break;
1606
1607 default:
1608 vtn_fail("Unhandled opcode");
1609 }
1610
1611 /* Now that we have the value, update the workgroup size if needed */
1612 vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
1613 }
1614
1615 static void
1616 vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
1617 const uint32_t *w, unsigned count)
1618 {
1619 struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type;
1620 struct vtn_function *vtn_callee =
1621 vtn_value(b, w[3], vtn_value_type_function)->func;
1622 struct nir_function *callee = vtn_callee->impl->function;
1623
1624 vtn_callee->referenced = true;
1625
1626 nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
1627 for (unsigned i = 0; i < call->num_params; i++) {
1628 unsigned arg_id = w[4 + i];
1629 struct vtn_value *arg = vtn_untyped_value(b, arg_id);
1630 if (arg->value_type == vtn_value_type_pointer &&
1631 arg->pointer->ptr_type->type == NULL) {
1632 nir_deref_var *d = vtn_pointer_to_deref(b, arg->pointer);
1633 call->params[i] = nir_deref_var_clone(d, call);
1634 } else {
1635 struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
1636
1637 /* Make a temporary to store the argument in */
1638 nir_variable *tmp =
1639 nir_local_variable_create(b->nb.impl, arg_ssa->type, "arg_tmp");
1640 call->params[i] = nir_deref_var_create(call, tmp);
1641
1642 vtn_local_store(b, arg_ssa, call->params[i]);
1643 }
1644 }
1645
1646 nir_variable *out_tmp = NULL;
1647 vtn_assert(res_type->type == callee->return_type);
1648 if (!glsl_type_is_void(callee->return_type)) {
1649 out_tmp = nir_local_variable_create(b->nb.impl, callee->return_type,
1650 "out_tmp");
1651 call->return_deref = nir_deref_var_create(call, out_tmp);
1652 }
1653
1654 nir_builder_instr_insert(&b->nb, &call->instr);
1655
1656 if (glsl_type_is_void(callee->return_type)) {
1657 vtn_push_value(b, w[2], vtn_value_type_undef);
1658 } else {
1659 vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, call->return_deref));
1660 }
1661 }
1662
1663 struct vtn_ssa_value *
1664 vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
1665 {
1666 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
1667 val->type = type;
1668
1669 if (!glsl_type_is_vector_or_scalar(type)) {
1670 unsigned elems = glsl_get_length(type);
1671 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
1672 for (unsigned i = 0; i < elems; i++) {
1673 const struct glsl_type *child_type;
1674
1675 switch (glsl_get_base_type(type)) {
1676 case GLSL_TYPE_INT:
1677 case GLSL_TYPE_UINT:
1678 case GLSL_TYPE_INT16:
1679 case GLSL_TYPE_UINT16:
1680 case GLSL_TYPE_INT64:
1681 case GLSL_TYPE_UINT64:
1682 case GLSL_TYPE_BOOL:
1683 case GLSL_TYPE_FLOAT:
1684 case GLSL_TYPE_FLOAT16:
1685 case GLSL_TYPE_DOUBLE:
1686 child_type = glsl_get_column_type(type);
1687 break;
1688 case GLSL_TYPE_ARRAY:
1689 child_type = glsl_get_array_element(type);
1690 break;
1691 case GLSL_TYPE_STRUCT:
1692 child_type = glsl_get_struct_field(type, i);
1693 break;
1694 default:
1695 vtn_fail("unkown base type");
1696 }
1697
1698 val->elems[i] = vtn_create_ssa_value(b, child_type);
1699 }
1700 }
1701
1702 return val;
1703 }
1704
1705 static nir_tex_src
1706 vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
1707 {
1708 nir_tex_src src;
1709 src.src = nir_src_for_ssa(vtn_ssa_value(b, index)->def);
1710 src.src_type = type;
1711 return src;
1712 }
1713
1714 static void
1715 vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
1716 const uint32_t *w, unsigned count)
1717 {
1718 if (opcode == SpvOpSampledImage) {
1719 struct vtn_value *val =
1720 vtn_push_value(b, w[2], vtn_value_type_sampled_image);
1721 val->sampled_image = ralloc(b, struct vtn_sampled_image);
1722 val->sampled_image->type =
1723 vtn_value(b, w[1], vtn_value_type_type)->type;
1724 val->sampled_image->image =
1725 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
1726 val->sampled_image->sampler =
1727 vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
1728 return;
1729 } else if (opcode == SpvOpImage) {
1730 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
1731 struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
1732 if (src_val->value_type == vtn_value_type_sampled_image) {
1733 val->pointer = src_val->sampled_image->image;
1734 } else {
1735 vtn_assert(src_val->value_type == vtn_value_type_pointer);
1736 val->pointer = src_val->pointer;
1737 }
1738 return;
1739 }
1740
1741 struct vtn_type *ret_type = vtn_value(b, w[1], vtn_value_type_type)->type;
1742 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
1743
1744 struct vtn_sampled_image sampled;
1745 struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
1746 if (sampled_val->value_type == vtn_value_type_sampled_image) {
1747 sampled = *sampled_val->sampled_image;
1748 } else {
1749 vtn_assert(sampled_val->value_type == vtn_value_type_pointer);
1750 sampled.type = sampled_val->pointer->type;
1751 sampled.image = NULL;
1752 sampled.sampler = sampled_val->pointer;
1753 }
1754
1755 const struct glsl_type *image_type = sampled.type->type;
1756 const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type);
1757 const bool is_array = glsl_sampler_type_is_array(image_type);
1758 const bool is_shadow = glsl_sampler_type_is_shadow(image_type);
1759
1760 /* Figure out the base texture operation */
1761 nir_texop texop;
1762 switch (opcode) {
1763 case SpvOpImageSampleImplicitLod:
1764 case SpvOpImageSampleDrefImplicitLod:
1765 case SpvOpImageSampleProjImplicitLod:
1766 case SpvOpImageSampleProjDrefImplicitLod:
1767 texop = nir_texop_tex;
1768 break;
1769
1770 case SpvOpImageSampleExplicitLod:
1771 case SpvOpImageSampleDrefExplicitLod:
1772 case SpvOpImageSampleProjExplicitLod:
1773 case SpvOpImageSampleProjDrefExplicitLod:
1774 texop = nir_texop_txl;
1775 break;
1776
1777 case SpvOpImageFetch:
1778 if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_MS) {
1779 texop = nir_texop_txf_ms;
1780 } else {
1781 texop = nir_texop_txf;
1782 }
1783 break;
1784
1785 case SpvOpImageGather:
1786 case SpvOpImageDrefGather:
1787 texop = nir_texop_tg4;
1788 break;
1789
1790 case SpvOpImageQuerySizeLod:
1791 case SpvOpImageQuerySize:
1792 texop = nir_texop_txs;
1793 break;
1794
1795 case SpvOpImageQueryLod:
1796 texop = nir_texop_lod;
1797 break;
1798
1799 case SpvOpImageQueryLevels:
1800 texop = nir_texop_query_levels;
1801 break;
1802
1803 case SpvOpImageQuerySamples:
1804 texop = nir_texop_texture_samples;
1805 break;
1806
1807 default:
1808 vtn_fail("Unhandled opcode");
1809 }
1810
1811 nir_tex_src srcs[8]; /* 8 should be enough */
1812 nir_tex_src *p = srcs;
1813
1814 unsigned idx = 4;
1815
1816 struct nir_ssa_def *coord;
1817 unsigned coord_components;
1818 switch (opcode) {
1819 case SpvOpImageSampleImplicitLod:
1820 case SpvOpImageSampleExplicitLod:
1821 case SpvOpImageSampleDrefImplicitLod:
1822 case SpvOpImageSampleDrefExplicitLod:
1823 case SpvOpImageSampleProjImplicitLod:
1824 case SpvOpImageSampleProjExplicitLod:
1825 case SpvOpImageSampleProjDrefImplicitLod:
1826 case SpvOpImageSampleProjDrefExplicitLod:
1827 case SpvOpImageFetch:
1828 case SpvOpImageGather:
1829 case SpvOpImageDrefGather:
1830 case SpvOpImageQueryLod: {
1831 /* All these types have the coordinate as their first real argument */
1832 switch (sampler_dim) {
1833 case GLSL_SAMPLER_DIM_1D:
1834 case GLSL_SAMPLER_DIM_BUF:
1835 coord_components = 1;
1836 break;
1837 case GLSL_SAMPLER_DIM_2D:
1838 case GLSL_SAMPLER_DIM_RECT:
1839 case GLSL_SAMPLER_DIM_MS:
1840 coord_components = 2;
1841 break;
1842 case GLSL_SAMPLER_DIM_3D:
1843 case GLSL_SAMPLER_DIM_CUBE:
1844 coord_components = 3;
1845 break;
1846 default:
1847 vtn_fail("Invalid sampler type");
1848 }
1849
1850 if (is_array && texop != nir_texop_lod)
1851 coord_components++;
1852
1853 coord = vtn_ssa_value(b, w[idx++])->def;
1854 p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
1855 (1 << coord_components) - 1));
1856 p->src_type = nir_tex_src_coord;
1857 p++;
1858 break;
1859 }
1860
1861 default:
1862 coord = NULL;
1863 coord_components = 0;
1864 break;
1865 }
1866
1867 switch (opcode) {
1868 case SpvOpImageSampleProjImplicitLod:
1869 case SpvOpImageSampleProjExplicitLod:
1870 case SpvOpImageSampleProjDrefImplicitLod:
1871 case SpvOpImageSampleProjDrefExplicitLod:
1872 /* These have the projector as the last coordinate component */
1873 p->src = nir_src_for_ssa(nir_channel(&b->nb, coord, coord_components));
1874 p->src_type = nir_tex_src_projector;
1875 p++;
1876 break;
1877
1878 default:
1879 break;
1880 }
1881
1882 unsigned gather_component = 0;
1883 switch (opcode) {
1884 case SpvOpImageSampleDrefImplicitLod:
1885 case SpvOpImageSampleDrefExplicitLod:
1886 case SpvOpImageSampleProjDrefImplicitLod:
1887 case SpvOpImageSampleProjDrefExplicitLod:
1888 case SpvOpImageDrefGather:
1889 /* These all have an explicit depth value as their next source */
1890 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
1891 break;
1892
1893 case SpvOpImageGather:
1894 /* This has a component as its next source */
1895 gather_component =
1896 vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0].u32[0];
1897 break;
1898
1899 default:
1900 break;
1901 }
1902
1903 /* For OpImageQuerySizeLod, we always have an LOD */
1904 if (opcode == SpvOpImageQuerySizeLod)
1905 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
1906
1907 /* Now we need to handle some number of optional arguments */
1908 const struct vtn_ssa_value *gather_offsets = NULL;
1909 if (idx < count) {
1910 uint32_t operands = w[idx++];
1911
1912 if (operands & SpvImageOperandsBiasMask) {
1913 vtn_assert(texop == nir_texop_tex);
1914 texop = nir_texop_txb;
1915 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_bias);
1916 }
1917
1918 if (operands & SpvImageOperandsLodMask) {
1919 vtn_assert(texop == nir_texop_txl || texop == nir_texop_txf ||
1920 texop == nir_texop_txs);
1921 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
1922 }
1923
1924 if (operands & SpvImageOperandsGradMask) {
1925 vtn_assert(texop == nir_texop_txl);
1926 texop = nir_texop_txd;
1927 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddx);
1928 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddy);
1929 }
1930
1931 if (operands & SpvImageOperandsOffsetMask ||
1932 operands & SpvImageOperandsConstOffsetMask)
1933 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
1934
1935 if (operands & SpvImageOperandsConstOffsetsMask) {
1936 gather_offsets = vtn_ssa_value(b, w[idx++]);
1937 (*p++) = (nir_tex_src){};
1938 }
1939
1940 if (operands & SpvImageOperandsSampleMask) {
1941 vtn_assert(texop == nir_texop_txf_ms);
1942 texop = nir_texop_txf_ms;
1943 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
1944 }
1945 }
1946 /* We should have now consumed exactly all of the arguments */
1947 vtn_assert(idx == count);
1948
1949 nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
1950 instr->op = texop;
1951
1952 memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
1953
1954 instr->coord_components = coord_components;
1955 instr->sampler_dim = sampler_dim;
1956 instr->is_array = is_array;
1957 instr->is_shadow = is_shadow;
1958 instr->is_new_style_shadow =
1959 is_shadow && glsl_get_components(ret_type->type) == 1;
1960 instr->component = gather_component;
1961
1962 switch (glsl_get_sampler_result_type(image_type)) {
1963 case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
1964 case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
1965 case GLSL_TYPE_UINT: instr->dest_type = nir_type_uint; break;
1966 case GLSL_TYPE_BOOL: instr->dest_type = nir_type_bool; break;
1967 default:
1968 vtn_fail("Invalid base type for sampler result");
1969 }
1970
1971 nir_deref_var *sampler = vtn_pointer_to_deref(b, sampled.sampler);
1972 nir_deref_var *texture;
1973 if (sampled.image) {
1974 nir_deref_var *image = vtn_pointer_to_deref(b, sampled.image);
1975 texture = image;
1976 } else {
1977 texture = sampler;
1978 }
1979
1980 instr->texture = nir_deref_var_clone(texture, instr);
1981
1982 switch (instr->op) {
1983 case nir_texop_tex:
1984 case nir_texop_txb:
1985 case nir_texop_txl:
1986 case nir_texop_txd:
1987 case nir_texop_tg4:
1988 /* These operations require a sampler */
1989 instr->sampler = nir_deref_var_clone(sampler, instr);
1990 break;
1991 case nir_texop_txf:
1992 case nir_texop_txf_ms:
1993 case nir_texop_txs:
1994 case nir_texop_lod:
1995 case nir_texop_query_levels:
1996 case nir_texop_texture_samples:
1997 case nir_texop_samples_identical:
1998 /* These don't */
1999 instr->sampler = NULL;
2000 break;
2001 case nir_texop_txf_ms_mcs:
2002 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2003 }
2004
2005 nir_ssa_dest_init(&instr->instr, &instr->dest,
2006 nir_tex_instr_dest_size(instr), 32, NULL);
2007
2008 vtn_assert(glsl_get_vector_elements(ret_type->type) ==
2009 nir_tex_instr_dest_size(instr));
2010
2011 nir_ssa_def *def;
2012 nir_instr *instruction;
2013 if (gather_offsets) {
2014 vtn_assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY);
2015 vtn_assert(glsl_get_length(gather_offsets->type) == 4);
2016 nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL};
2017
2018 /* Copy the current instruction 4x */
2019 for (uint32_t i = 1; i < 4; i++) {
2020 instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs);
2021 instrs[i]->op = instr->op;
2022 instrs[i]->coord_components = instr->coord_components;
2023 instrs[i]->sampler_dim = instr->sampler_dim;
2024 instrs[i]->is_array = instr->is_array;
2025 instrs[i]->is_shadow = instr->is_shadow;
2026 instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
2027 instrs[i]->component = instr->component;
2028 instrs[i]->dest_type = instr->dest_type;
2029 instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]);
2030 instrs[i]->sampler = NULL;
2031
2032 memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
2033
2034 nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest,
2035 nir_tex_instr_dest_size(instr), 32, NULL);
2036 }
2037
2038 /* Fill in the last argument with the offset from the passed in offsets
2039 * and insert the instruction into the stream.
2040 */
2041 for (uint32_t i = 0; i < 4; i++) {
2042 nir_tex_src src;
2043 src.src = nir_src_for_ssa(gather_offsets->elems[i]->def);
2044 src.src_type = nir_tex_src_offset;
2045 instrs[i]->src[instrs[i]->num_srcs - 1] = src;
2046 nir_builder_instr_insert(&b->nb, &instrs[i]->instr);
2047 }
2048
2049 /* Combine the results of the 4 instructions by taking their .w
2050 * components
2051 */
2052 nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4);
2053 nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL);
2054 vec4->dest.write_mask = 0xf;
2055 for (uint32_t i = 0; i < 4; i++) {
2056 vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa);
2057 vec4->src[i].swizzle[0] = 3;
2058 }
2059 def = &vec4->dest.dest.ssa;
2060 instruction = &vec4->instr;
2061 } else {
2062 def = &instr->dest.ssa;
2063 instruction = &instr->instr;
2064 }
2065
2066 val->ssa = vtn_create_ssa_value(b, ret_type->type);
2067 val->ssa->def = def;
2068
2069 nir_builder_instr_insert(&b->nb, instruction);
2070 }
2071
2072 static void
2073 fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
2074 const uint32_t *w, nir_src *src)
2075 {
2076 switch (opcode) {
2077 case SpvOpAtomicIIncrement:
2078 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
2079 break;
2080
2081 case SpvOpAtomicIDecrement:
2082 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
2083 break;
2084
2085 case SpvOpAtomicISub:
2086 src[0] =
2087 nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
2088 break;
2089
2090 case SpvOpAtomicCompareExchange:
2091 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
2092 src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
2093 break;
2094
2095 case SpvOpAtomicExchange:
2096 case SpvOpAtomicIAdd:
2097 case SpvOpAtomicSMin:
2098 case SpvOpAtomicUMin:
2099 case SpvOpAtomicSMax:
2100 case SpvOpAtomicUMax:
2101 case SpvOpAtomicAnd:
2102 case SpvOpAtomicOr:
2103 case SpvOpAtomicXor:
2104 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
2105 break;
2106
2107 default:
2108 vtn_fail("Invalid SPIR-V atomic");
2109 }
2110 }
2111
2112 static nir_ssa_def *
2113 get_image_coord(struct vtn_builder *b, uint32_t value)
2114 {
2115 struct vtn_ssa_value *coord = vtn_ssa_value(b, value);
2116
2117 /* The image_load_store intrinsics assume a 4-dim coordinate */
2118 unsigned dim = glsl_get_vector_elements(coord->type);
2119 unsigned swizzle[4];
2120 for (unsigned i = 0; i < 4; i++)
2121 swizzle[i] = MIN2(i, dim - 1);
2122
2123 return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
2124 }
2125
2126 static void
2127 vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
2128 const uint32_t *w, unsigned count)
2129 {
2130 /* Just get this one out of the way */
2131 if (opcode == SpvOpImageTexelPointer) {
2132 struct vtn_value *val =
2133 vtn_push_value(b, w[2], vtn_value_type_image_pointer);
2134 val->image = ralloc(b, struct vtn_image_pointer);
2135
2136 val->image->image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2137 val->image->coord = get_image_coord(b, w[4]);
2138 val->image->sample = vtn_ssa_value(b, w[5])->def;
2139 return;
2140 }
2141
2142 struct vtn_image_pointer image;
2143
2144 switch (opcode) {
2145 case SpvOpAtomicExchange:
2146 case SpvOpAtomicCompareExchange:
2147 case SpvOpAtomicCompareExchangeWeak:
2148 case SpvOpAtomicIIncrement:
2149 case SpvOpAtomicIDecrement:
2150 case SpvOpAtomicIAdd:
2151 case SpvOpAtomicISub:
2152 case SpvOpAtomicLoad:
2153 case SpvOpAtomicSMin:
2154 case SpvOpAtomicUMin:
2155 case SpvOpAtomicSMax:
2156 case SpvOpAtomicUMax:
2157 case SpvOpAtomicAnd:
2158 case SpvOpAtomicOr:
2159 case SpvOpAtomicXor:
2160 image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
2161 break;
2162
2163 case SpvOpAtomicStore:
2164 image = *vtn_value(b, w[1], vtn_value_type_image_pointer)->image;
2165 break;
2166
2167 case SpvOpImageQuerySize:
2168 image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2169 image.coord = NULL;
2170 image.sample = NULL;
2171 break;
2172
2173 case SpvOpImageRead:
2174 image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2175 image.coord = get_image_coord(b, w[4]);
2176
2177 if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) {
2178 vtn_assert(w[5] == SpvImageOperandsSampleMask);
2179 image.sample = vtn_ssa_value(b, w[6])->def;
2180 } else {
2181 image.sample = nir_ssa_undef(&b->nb, 1, 32);
2182 }
2183 break;
2184
2185 case SpvOpImageWrite:
2186 image.image = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
2187 image.coord = get_image_coord(b, w[2]);
2188
2189 /* texel = w[3] */
2190
2191 if (count > 4 && (w[4] & SpvImageOperandsSampleMask)) {
2192 vtn_assert(w[4] == SpvImageOperandsSampleMask);
2193 image.sample = vtn_ssa_value(b, w[5])->def;
2194 } else {
2195 image.sample = nir_ssa_undef(&b->nb, 1, 32);
2196 }
2197 break;
2198
2199 default:
2200 vtn_fail("Invalid image opcode");
2201 }
2202
2203 nir_intrinsic_op op;
2204 switch (opcode) {
2205 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
2206 OP(ImageQuerySize, size)
2207 OP(ImageRead, load)
2208 OP(ImageWrite, store)
2209 OP(AtomicLoad, load)
2210 OP(AtomicStore, store)
2211 OP(AtomicExchange, atomic_exchange)
2212 OP(AtomicCompareExchange, atomic_comp_swap)
2213 OP(AtomicIIncrement, atomic_add)
2214 OP(AtomicIDecrement, atomic_add)
2215 OP(AtomicIAdd, atomic_add)
2216 OP(AtomicISub, atomic_add)
2217 OP(AtomicSMin, atomic_min)
2218 OP(AtomicUMin, atomic_min)
2219 OP(AtomicSMax, atomic_max)
2220 OP(AtomicUMax, atomic_max)
2221 OP(AtomicAnd, atomic_and)
2222 OP(AtomicOr, atomic_or)
2223 OP(AtomicXor, atomic_xor)
2224 #undef OP
2225 default:
2226 vtn_fail("Invalid image opcode");
2227 }
2228
2229 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
2230
2231 nir_deref_var *image_deref = vtn_pointer_to_deref(b, image.image);
2232 intrin->variables[0] = nir_deref_var_clone(image_deref, intrin);
2233
2234 /* ImageQuerySize doesn't take any extra parameters */
2235 if (opcode != SpvOpImageQuerySize) {
2236 /* The image coordinate is always 4 components but we may not have that
2237 * many. Swizzle to compensate.
2238 */
2239 unsigned swiz[4];
2240 for (unsigned i = 0; i < 4; i++)
2241 swiz[i] = i < image.coord->num_components ? i : 0;
2242 intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
2243 swiz, 4, false));
2244 intrin->src[1] = nir_src_for_ssa(image.sample);
2245 }
2246
2247 switch (opcode) {
2248 case SpvOpAtomicLoad:
2249 case SpvOpImageQuerySize:
2250 case SpvOpImageRead:
2251 break;
2252 case SpvOpAtomicStore:
2253 intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
2254 break;
2255 case SpvOpImageWrite:
2256 intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
2257 break;
2258
2259 case SpvOpAtomicCompareExchange:
2260 case SpvOpAtomicIIncrement:
2261 case SpvOpAtomicIDecrement:
2262 case SpvOpAtomicExchange:
2263 case SpvOpAtomicIAdd:
2264 case SpvOpAtomicISub:
2265 case SpvOpAtomicSMin:
2266 case SpvOpAtomicUMin:
2267 case SpvOpAtomicSMax:
2268 case SpvOpAtomicUMax:
2269 case SpvOpAtomicAnd:
2270 case SpvOpAtomicOr:
2271 case SpvOpAtomicXor:
2272 fill_common_atomic_sources(b, opcode, w, &intrin->src[2]);
2273 break;
2274
2275 default:
2276 vtn_fail("Invalid image opcode");
2277 }
2278
2279 if (opcode != SpvOpImageWrite) {
2280 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2281 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
2282
2283 unsigned dest_components =
2284 nir_intrinsic_infos[intrin->intrinsic].dest_components;
2285 if (intrin->intrinsic == nir_intrinsic_image_size) {
2286 dest_components = intrin->num_components =
2287 glsl_get_vector_elements(type->type);
2288 }
2289
2290 nir_ssa_dest_init(&intrin->instr, &intrin->dest,
2291 dest_components, 32, NULL);
2292
2293 nir_builder_instr_insert(&b->nb, &intrin->instr);
2294
2295 val->ssa = vtn_create_ssa_value(b, type->type);
2296 val->ssa->def = &intrin->dest.ssa;
2297 } else {
2298 nir_builder_instr_insert(&b->nb, &intrin->instr);
2299 }
2300 }
2301
2302 static nir_intrinsic_op
2303 get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
2304 {
2305 switch (opcode) {
2306 case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
2307 case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
2308 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2309 OP(AtomicExchange, atomic_exchange)
2310 OP(AtomicCompareExchange, atomic_comp_swap)
2311 OP(AtomicIIncrement, atomic_add)
2312 OP(AtomicIDecrement, atomic_add)
2313 OP(AtomicIAdd, atomic_add)
2314 OP(AtomicISub, atomic_add)
2315 OP(AtomicSMin, atomic_imin)
2316 OP(AtomicUMin, atomic_umin)
2317 OP(AtomicSMax, atomic_imax)
2318 OP(AtomicUMax, atomic_umax)
2319 OP(AtomicAnd, atomic_and)
2320 OP(AtomicOr, atomic_or)
2321 OP(AtomicXor, atomic_xor)
2322 #undef OP
2323 default:
2324 vtn_fail("Invalid SSBO atomic");
2325 }
2326 }
2327
2328 static nir_intrinsic_op
2329 get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
2330 {
2331 switch (opcode) {
2332 case SpvOpAtomicLoad: return nir_intrinsic_load_shared;
2333 case SpvOpAtomicStore: return nir_intrinsic_store_shared;
2334 #define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
2335 OP(AtomicExchange, atomic_exchange)
2336 OP(AtomicCompareExchange, atomic_comp_swap)
2337 OP(AtomicIIncrement, atomic_add)
2338 OP(AtomicIDecrement, atomic_add)
2339 OP(AtomicIAdd, atomic_add)
2340 OP(AtomicISub, atomic_add)
2341 OP(AtomicSMin, atomic_imin)
2342 OP(AtomicUMin, atomic_umin)
2343 OP(AtomicSMax, atomic_imax)
2344 OP(AtomicUMax, atomic_umax)
2345 OP(AtomicAnd, atomic_and)
2346 OP(AtomicOr, atomic_or)
2347 OP(AtomicXor, atomic_xor)
2348 #undef OP
2349 default:
2350 vtn_fail("Invalid shared atomic");
2351 }
2352 }
2353
2354 static nir_intrinsic_op
2355 get_var_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
2356 {
2357 switch (opcode) {
2358 case SpvOpAtomicLoad: return nir_intrinsic_load_var;
2359 case SpvOpAtomicStore: return nir_intrinsic_store_var;
2360 #define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
2361 OP(AtomicExchange, atomic_exchange)
2362 OP(AtomicCompareExchange, atomic_comp_swap)
2363 OP(AtomicIIncrement, atomic_add)
2364 OP(AtomicIDecrement, atomic_add)
2365 OP(AtomicIAdd, atomic_add)
2366 OP(AtomicISub, atomic_add)
2367 OP(AtomicSMin, atomic_imin)
2368 OP(AtomicUMin, atomic_umin)
2369 OP(AtomicSMax, atomic_imax)
2370 OP(AtomicUMax, atomic_umax)
2371 OP(AtomicAnd, atomic_and)
2372 OP(AtomicOr, atomic_or)
2373 OP(AtomicXor, atomic_xor)
2374 #undef OP
2375 default:
2376 vtn_fail("Invalid shared atomic");
2377 }
2378 }
2379
2380 static void
2381 vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
2382 const uint32_t *w, unsigned count)
2383 {
2384 struct vtn_pointer *ptr;
2385 nir_intrinsic_instr *atomic;
2386
2387 switch (opcode) {
2388 case SpvOpAtomicLoad:
2389 case SpvOpAtomicExchange:
2390 case SpvOpAtomicCompareExchange:
2391 case SpvOpAtomicCompareExchangeWeak:
2392 case SpvOpAtomicIIncrement:
2393 case SpvOpAtomicIDecrement:
2394 case SpvOpAtomicIAdd:
2395 case SpvOpAtomicISub:
2396 case SpvOpAtomicSMin:
2397 case SpvOpAtomicUMin:
2398 case SpvOpAtomicSMax:
2399 case SpvOpAtomicUMax:
2400 case SpvOpAtomicAnd:
2401 case SpvOpAtomicOr:
2402 case SpvOpAtomicXor:
2403 ptr = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2404 break;
2405
2406 case SpvOpAtomicStore:
2407 ptr = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
2408 break;
2409
2410 default:
2411 vtn_fail("Invalid SPIR-V atomic");
2412 }
2413
2414 /*
2415 SpvScope scope = w[4];
2416 SpvMemorySemanticsMask semantics = w[5];
2417 */
2418
2419 if (ptr->mode == vtn_variable_mode_workgroup &&
2420 !b->options->lower_workgroup_access_to_offsets) {
2421 nir_deref_var *deref = vtn_pointer_to_deref(b, ptr);
2422 const struct glsl_type *deref_type = nir_deref_tail(&deref->deref)->type;
2423 nir_intrinsic_op op = get_var_nir_atomic_op(b, opcode);
2424 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
2425 atomic->variables[0] = nir_deref_var_clone(deref, atomic);
2426
2427 switch (opcode) {
2428 case SpvOpAtomicLoad:
2429 atomic->num_components = glsl_get_vector_elements(deref_type);
2430 break;
2431
2432 case SpvOpAtomicStore:
2433 atomic->num_components = glsl_get_vector_elements(deref_type);
2434 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
2435 atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
2436 break;
2437
2438 case SpvOpAtomicExchange:
2439 case SpvOpAtomicCompareExchange:
2440 case SpvOpAtomicCompareExchangeWeak:
2441 case SpvOpAtomicIIncrement:
2442 case SpvOpAtomicIDecrement:
2443 case SpvOpAtomicIAdd:
2444 case SpvOpAtomicISub:
2445 case SpvOpAtomicSMin:
2446 case SpvOpAtomicUMin:
2447 case SpvOpAtomicSMax:
2448 case SpvOpAtomicUMax:
2449 case SpvOpAtomicAnd:
2450 case SpvOpAtomicOr:
2451 case SpvOpAtomicXor:
2452 fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
2453 break;
2454
2455 default:
2456 vtn_fail("Invalid SPIR-V atomic");
2457
2458 }
2459 } else {
2460 nir_ssa_def *offset, *index;
2461 offset = vtn_pointer_to_offset(b, ptr, &index, NULL);
2462
2463 nir_intrinsic_op op;
2464 if (ptr->mode == vtn_variable_mode_ssbo) {
2465 op = get_ssbo_nir_atomic_op(b, opcode);
2466 } else {
2467 vtn_assert(ptr->mode == vtn_variable_mode_workgroup &&
2468 b->options->lower_workgroup_access_to_offsets);
2469 op = get_shared_nir_atomic_op(b, opcode);
2470 }
2471
2472 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
2473
2474 int src = 0;
2475 switch (opcode) {
2476 case SpvOpAtomicLoad:
2477 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
2478 if (ptr->mode == vtn_variable_mode_ssbo)
2479 atomic->src[src++] = nir_src_for_ssa(index);
2480 atomic->src[src++] = nir_src_for_ssa(offset);
2481 break;
2482
2483 case SpvOpAtomicStore:
2484 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
2485 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
2486 atomic->src[src++] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
2487 if (ptr->mode == vtn_variable_mode_ssbo)
2488 atomic->src[src++] = nir_src_for_ssa(index);
2489 atomic->src[src++] = nir_src_for_ssa(offset);
2490 break;
2491
2492 case SpvOpAtomicExchange:
2493 case SpvOpAtomicCompareExchange:
2494 case SpvOpAtomicCompareExchangeWeak:
2495 case SpvOpAtomicIIncrement:
2496 case SpvOpAtomicIDecrement:
2497 case SpvOpAtomicIAdd:
2498 case SpvOpAtomicISub:
2499 case SpvOpAtomicSMin:
2500 case SpvOpAtomicUMin:
2501 case SpvOpAtomicSMax:
2502 case SpvOpAtomicUMax:
2503 case SpvOpAtomicAnd:
2504 case SpvOpAtomicOr:
2505 case SpvOpAtomicXor:
2506 if (ptr->mode == vtn_variable_mode_ssbo)
2507 atomic->src[src++] = nir_src_for_ssa(index);
2508 atomic->src[src++] = nir_src_for_ssa(offset);
2509 fill_common_atomic_sources(b, opcode, w, &atomic->src[src]);
2510 break;
2511
2512 default:
2513 vtn_fail("Invalid SPIR-V atomic");
2514 }
2515 }
2516
2517 if (opcode != SpvOpAtomicStore) {
2518 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
2519
2520 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
2521 glsl_get_vector_elements(type->type),
2522 glsl_get_bit_size(type->type), NULL);
2523
2524 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2525 val->ssa = rzalloc(b, struct vtn_ssa_value);
2526 val->ssa->def = &atomic->dest.ssa;
2527 val->ssa->type = type->type;
2528 }
2529
2530 nir_builder_instr_insert(&b->nb, &atomic->instr);
2531 }
2532
2533 static nir_alu_instr *
2534 create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size)
2535 {
2536 nir_op op;
2537 switch (num_components) {
2538 case 1: op = nir_op_fmov; break;
2539 case 2: op = nir_op_vec2; break;
2540 case 3: op = nir_op_vec3; break;
2541 case 4: op = nir_op_vec4; break;
2542 default: vtn_fail("bad vector size");
2543 }
2544
2545 nir_alu_instr *vec = nir_alu_instr_create(b->shader, op);
2546 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
2547 bit_size, NULL);
2548 vec->dest.write_mask = (1 << num_components) - 1;
2549
2550 return vec;
2551 }
2552
2553 struct vtn_ssa_value *
2554 vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
2555 {
2556 if (src->transposed)
2557 return src->transposed;
2558
2559 struct vtn_ssa_value *dest =
2560 vtn_create_ssa_value(b, glsl_transposed_type(src->type));
2561
2562 for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
2563 nir_alu_instr *vec = create_vec(b, glsl_get_matrix_columns(src->type),
2564 glsl_get_bit_size(src->type));
2565 if (glsl_type_is_vector_or_scalar(src->type)) {
2566 vec->src[0].src = nir_src_for_ssa(src->def);
2567 vec->src[0].swizzle[0] = i;
2568 } else {
2569 for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
2570 vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
2571 vec->src[j].swizzle[0] = i;
2572 }
2573 }
2574 nir_builder_instr_insert(&b->nb, &vec->instr);
2575 dest->elems[i]->def = &vec->dest.dest.ssa;
2576 }
2577
2578 dest->transposed = src;
2579
2580 return dest;
2581 }
2582
2583 nir_ssa_def *
2584 vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
2585 {
2586 unsigned swiz[4] = { index };
2587 return nir_swizzle(&b->nb, src, swiz, 1, true);
2588 }
2589
2590 nir_ssa_def *
2591 vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
2592 unsigned index)
2593 {
2594 nir_alu_instr *vec = create_vec(b, src->num_components,
2595 src->bit_size);
2596
2597 for (unsigned i = 0; i < src->num_components; i++) {
2598 if (i == index) {
2599 vec->src[i].src = nir_src_for_ssa(insert);
2600 } else {
2601 vec->src[i].src = nir_src_for_ssa(src);
2602 vec->src[i].swizzle[0] = i;
2603 }
2604 }
2605
2606 nir_builder_instr_insert(&b->nb, &vec->instr);
2607
2608 return &vec->dest.dest.ssa;
2609 }
2610
2611 nir_ssa_def *
2612 vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
2613 nir_ssa_def *index)
2614 {
2615 nir_ssa_def *dest = vtn_vector_extract(b, src, 0);
2616 for (unsigned i = 1; i < src->num_components; i++)
2617 dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
2618 vtn_vector_extract(b, src, i), dest);
2619
2620 return dest;
2621 }
2622
2623 nir_ssa_def *
2624 vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
2625 nir_ssa_def *insert, nir_ssa_def *index)
2626 {
2627 nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
2628 for (unsigned i = 1; i < src->num_components; i++)
2629 dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
2630 vtn_vector_insert(b, src, insert, i), dest);
2631
2632 return dest;
2633 }
2634
2635 static nir_ssa_def *
2636 vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
2637 nir_ssa_def *src0, nir_ssa_def *src1,
2638 const uint32_t *indices)
2639 {
2640 nir_alu_instr *vec = create_vec(b, num_components, src0->bit_size);
2641
2642 for (unsigned i = 0; i < num_components; i++) {
2643 uint32_t index = indices[i];
2644 if (index == 0xffffffff) {
2645 vec->src[i].src =
2646 nir_src_for_ssa(nir_ssa_undef(&b->nb, 1, src0->bit_size));
2647 } else if (index < src0->num_components) {
2648 vec->src[i].src = nir_src_for_ssa(src0);
2649 vec->src[i].swizzle[0] = index;
2650 } else {
2651 vec->src[i].src = nir_src_for_ssa(src1);
2652 vec->src[i].swizzle[0] = index - src0->num_components;
2653 }
2654 }
2655
2656 nir_builder_instr_insert(&b->nb, &vec->instr);
2657
2658 return &vec->dest.dest.ssa;
2659 }
2660
2661 /*
2662 * Concatentates a number of vectors/scalars together to produce a vector
2663 */
2664 static nir_ssa_def *
2665 vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
2666 unsigned num_srcs, nir_ssa_def **srcs)
2667 {
2668 nir_alu_instr *vec = create_vec(b, num_components, srcs[0]->bit_size);
2669
2670 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2671 *
2672 * "When constructing a vector, there must be at least two Constituent
2673 * operands."
2674 */
2675 vtn_assert(num_srcs >= 2);
2676
2677 unsigned dest_idx = 0;
2678 for (unsigned i = 0; i < num_srcs; i++) {
2679 nir_ssa_def *src = srcs[i];
2680 vtn_assert(dest_idx + src->num_components <= num_components);
2681 for (unsigned j = 0; j < src->num_components; j++) {
2682 vec->src[dest_idx].src = nir_src_for_ssa(src);
2683 vec->src[dest_idx].swizzle[0] = j;
2684 dest_idx++;
2685 }
2686 }
2687
2688 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2689 *
2690 * "When constructing a vector, the total number of components in all
2691 * the operands must equal the number of components in Result Type."
2692 */
2693 vtn_assert(dest_idx == num_components);
2694
2695 nir_builder_instr_insert(&b->nb, &vec->instr);
2696
2697 return &vec->dest.dest.ssa;
2698 }
2699
2700 static struct vtn_ssa_value *
2701 vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
2702 {
2703 struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
2704 dest->type = src->type;
2705
2706 if (glsl_type_is_vector_or_scalar(src->type)) {
2707 dest->def = src->def;
2708 } else {
2709 unsigned elems = glsl_get_length(src->type);
2710
2711 dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
2712 for (unsigned i = 0; i < elems; i++)
2713 dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
2714 }
2715
2716 return dest;
2717 }
2718
2719 static struct vtn_ssa_value *
2720 vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
2721 struct vtn_ssa_value *insert, const uint32_t *indices,
2722 unsigned num_indices)
2723 {
2724 struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
2725
2726 struct vtn_ssa_value *cur = dest;
2727 unsigned i;
2728 for (i = 0; i < num_indices - 1; i++) {
2729 cur = cur->elems[indices[i]];
2730 }
2731
2732 if (glsl_type_is_vector_or_scalar(cur->type)) {
2733 /* According to the SPIR-V spec, OpCompositeInsert may work down to
2734 * the component granularity. In that case, the last index will be
2735 * the index to insert the scalar into the vector.
2736 */
2737
2738 cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
2739 } else {
2740 cur->elems[indices[i]] = insert;
2741 }
2742
2743 return dest;
2744 }
2745
2746 static struct vtn_ssa_value *
2747 vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
2748 const uint32_t *indices, unsigned num_indices)
2749 {
2750 struct vtn_ssa_value *cur = src;
2751 for (unsigned i = 0; i < num_indices; i++) {
2752 if (glsl_type_is_vector_or_scalar(cur->type)) {
2753 vtn_assert(i == num_indices - 1);
2754 /* According to the SPIR-V spec, OpCompositeExtract may work down to
2755 * the component granularity. The last index will be the index of the
2756 * vector to extract.
2757 */
2758
2759 struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
2760 ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
2761 ret->def = vtn_vector_extract(b, cur->def, indices[i]);
2762 return ret;
2763 } else {
2764 cur = cur->elems[indices[i]];
2765 }
2766 }
2767
2768 return cur;
2769 }
2770
2771 static void
2772 vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
2773 const uint32_t *w, unsigned count)
2774 {
2775 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2776 const struct glsl_type *type =
2777 vtn_value(b, w[1], vtn_value_type_type)->type->type;
2778 val->ssa = vtn_create_ssa_value(b, type);
2779
2780 switch (opcode) {
2781 case SpvOpVectorExtractDynamic:
2782 val->ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
2783 vtn_ssa_value(b, w[4])->def);
2784 break;
2785
2786 case SpvOpVectorInsertDynamic:
2787 val->ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
2788 vtn_ssa_value(b, w[4])->def,
2789 vtn_ssa_value(b, w[5])->def);
2790 break;
2791
2792 case SpvOpVectorShuffle:
2793 val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
2794 vtn_ssa_value(b, w[3])->def,
2795 vtn_ssa_value(b, w[4])->def,
2796 w + 5);
2797 break;
2798
2799 case SpvOpCompositeConstruct: {
2800 unsigned elems = count - 3;
2801 if (glsl_type_is_vector_or_scalar(type)) {
2802 nir_ssa_def *srcs[4];
2803 for (unsigned i = 0; i < elems; i++)
2804 srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
2805 val->ssa->def =
2806 vtn_vector_construct(b, glsl_get_vector_elements(type),
2807 elems, srcs);
2808 } else {
2809 val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
2810 for (unsigned i = 0; i < elems; i++)
2811 val->ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
2812 }
2813 break;
2814 }
2815 case SpvOpCompositeExtract:
2816 val->ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
2817 w + 4, count - 4);
2818 break;
2819
2820 case SpvOpCompositeInsert:
2821 val->ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
2822 vtn_ssa_value(b, w[3]),
2823 w + 5, count - 5);
2824 break;
2825
2826 case SpvOpCopyObject:
2827 val->ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
2828 break;
2829
2830 default:
2831 vtn_fail("unknown composite operation");
2832 }
2833 }
2834
2835 static void
2836 vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
2837 const uint32_t *w, unsigned count)
2838 {
2839 nir_intrinsic_op intrinsic_op;
2840 switch (opcode) {
2841 case SpvOpEmitVertex:
2842 case SpvOpEmitStreamVertex:
2843 intrinsic_op = nir_intrinsic_emit_vertex;
2844 break;
2845 case SpvOpEndPrimitive:
2846 case SpvOpEndStreamPrimitive:
2847 intrinsic_op = nir_intrinsic_end_primitive;
2848 break;
2849 case SpvOpMemoryBarrier:
2850 intrinsic_op = nir_intrinsic_memory_barrier;
2851 break;
2852 case SpvOpControlBarrier:
2853 intrinsic_op = nir_intrinsic_barrier;
2854 break;
2855 default:
2856 vtn_fail("unknown barrier instruction");
2857 }
2858
2859 nir_intrinsic_instr *intrin =
2860 nir_intrinsic_instr_create(b->shader, intrinsic_op);
2861
2862 if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
2863 nir_intrinsic_set_stream_id(intrin, w[1]);
2864
2865 nir_builder_instr_insert(&b->nb, &intrin->instr);
2866 }
2867
2868 static unsigned
2869 gl_primitive_from_spv_execution_mode(struct vtn_builder *b,
2870 SpvExecutionMode mode)
2871 {
2872 switch (mode) {
2873 case SpvExecutionModeInputPoints:
2874 case SpvExecutionModeOutputPoints:
2875 return 0; /* GL_POINTS */
2876 case SpvExecutionModeInputLines:
2877 return 1; /* GL_LINES */
2878 case SpvExecutionModeInputLinesAdjacency:
2879 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
2880 case SpvExecutionModeTriangles:
2881 return 4; /* GL_TRIANGLES */
2882 case SpvExecutionModeInputTrianglesAdjacency:
2883 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
2884 case SpvExecutionModeQuads:
2885 return 7; /* GL_QUADS */
2886 case SpvExecutionModeIsolines:
2887 return 0x8E7A; /* GL_ISOLINES */
2888 case SpvExecutionModeOutputLineStrip:
2889 return 3; /* GL_LINE_STRIP */
2890 case SpvExecutionModeOutputTriangleStrip:
2891 return 5; /* GL_TRIANGLE_STRIP */
2892 default:
2893 vtn_fail("Invalid primitive type");
2894 }
2895 }
2896
2897 static unsigned
2898 vertices_in_from_spv_execution_mode(struct vtn_builder *b,
2899 SpvExecutionMode mode)
2900 {
2901 switch (mode) {
2902 case SpvExecutionModeInputPoints:
2903 return 1;
2904 case SpvExecutionModeInputLines:
2905 return 2;
2906 case SpvExecutionModeInputLinesAdjacency:
2907 return 4;
2908 case SpvExecutionModeTriangles:
2909 return 3;
2910 case SpvExecutionModeInputTrianglesAdjacency:
2911 return 6;
2912 default:
2913 vtn_fail("Invalid GS input mode");
2914 }
2915 }
2916
2917 static gl_shader_stage
2918 stage_for_execution_model(struct vtn_builder *b, SpvExecutionModel model)
2919 {
2920 switch (model) {
2921 case SpvExecutionModelVertex:
2922 return MESA_SHADER_VERTEX;
2923 case SpvExecutionModelTessellationControl:
2924 return MESA_SHADER_TESS_CTRL;
2925 case SpvExecutionModelTessellationEvaluation:
2926 return MESA_SHADER_TESS_EVAL;
2927 case SpvExecutionModelGeometry:
2928 return MESA_SHADER_GEOMETRY;
2929 case SpvExecutionModelFragment:
2930 return MESA_SHADER_FRAGMENT;
2931 case SpvExecutionModelGLCompute:
2932 return MESA_SHADER_COMPUTE;
2933 default:
2934 vtn_fail("Unsupported execution model");
2935 }
2936 }
2937
2938 #define spv_check_supported(name, cap) do { \
2939 if (!(b->options && b->options->caps.name)) \
2940 vtn_warn("Unsupported SPIR-V capability: %s", \
2941 spirv_capability_to_string(cap)); \
2942 } while(0)
2943
2944 static bool
2945 vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
2946 const uint32_t *w, unsigned count)
2947 {
2948 switch (opcode) {
2949 case SpvOpSource: {
2950 const char *lang;
2951 switch (w[1]) {
2952 default:
2953 case SpvSourceLanguageUnknown: lang = "unknown"; break;
2954 case SpvSourceLanguageESSL: lang = "ESSL"; break;
2955 case SpvSourceLanguageGLSL: lang = "GLSL"; break;
2956 case SpvSourceLanguageOpenCL_C: lang = "OpenCL C"; break;
2957 case SpvSourceLanguageOpenCL_CPP: lang = "OpenCL C++"; break;
2958 case SpvSourceLanguageHLSL: lang = "HLSL"; break;
2959 }
2960
2961 uint32_t version = w[2];
2962
2963 const char *file =
2964 (count > 3) ? vtn_value(b, w[3], vtn_value_type_string)->str : "";
2965
2966 vtn_info("Parsing SPIR-V from %s %u source file %s", lang, version, file);
2967 break;
2968 }
2969
2970 case SpvOpSourceExtension:
2971 case SpvOpSourceContinued:
2972 case SpvOpExtension:
2973 /* Unhandled, but these are for debug so that's ok. */
2974 break;
2975
2976 case SpvOpCapability: {
2977 SpvCapability cap = w[1];
2978 switch (cap) {
2979 case SpvCapabilityMatrix:
2980 case SpvCapabilityShader:
2981 case SpvCapabilityGeometry:
2982 case SpvCapabilityGeometryPointSize:
2983 case SpvCapabilityUniformBufferArrayDynamicIndexing:
2984 case SpvCapabilitySampledImageArrayDynamicIndexing:
2985 case SpvCapabilityStorageBufferArrayDynamicIndexing:
2986 case SpvCapabilityStorageImageArrayDynamicIndexing:
2987 case SpvCapabilityImageRect:
2988 case SpvCapabilitySampledRect:
2989 case SpvCapabilitySampled1D:
2990 case SpvCapabilityImage1D:
2991 case SpvCapabilitySampledCubeArray:
2992 case SpvCapabilityImageCubeArray:
2993 case SpvCapabilitySampledBuffer:
2994 case SpvCapabilityImageBuffer:
2995 case SpvCapabilityImageQuery:
2996 case SpvCapabilityDerivativeControl:
2997 case SpvCapabilityInterpolationFunction:
2998 case SpvCapabilityMultiViewport:
2999 case SpvCapabilitySampleRateShading:
3000 case SpvCapabilityClipDistance:
3001 case SpvCapabilityCullDistance:
3002 case SpvCapabilityInputAttachment:
3003 case SpvCapabilityImageGatherExtended:
3004 case SpvCapabilityStorageImageExtendedFormats:
3005 break;
3006
3007 case SpvCapabilityGeometryStreams:
3008 case SpvCapabilityLinkage:
3009 case SpvCapabilityVector16:
3010 case SpvCapabilityFloat16Buffer:
3011 case SpvCapabilityFloat16:
3012 case SpvCapabilityInt64Atomics:
3013 case SpvCapabilityAtomicStorage:
3014 case SpvCapabilityInt16:
3015 case SpvCapabilityStorageImageMultisample:
3016 case SpvCapabilityInt8:
3017 case SpvCapabilitySparseResidency:
3018 case SpvCapabilityMinLod:
3019 case SpvCapabilityTransformFeedback:
3020 vtn_warn("Unsupported SPIR-V capability: %s",
3021 spirv_capability_to_string(cap));
3022 break;
3023
3024 case SpvCapabilityFloat64:
3025 spv_check_supported(float64, cap);
3026 break;
3027 case SpvCapabilityInt64:
3028 spv_check_supported(int64, cap);
3029 break;
3030
3031 case SpvCapabilityAddresses:
3032 case SpvCapabilityKernel:
3033 case SpvCapabilityImageBasic:
3034 case SpvCapabilityImageReadWrite:
3035 case SpvCapabilityImageMipmap:
3036 case SpvCapabilityPipes:
3037 case SpvCapabilityGroups:
3038 case SpvCapabilityDeviceEnqueue:
3039 case SpvCapabilityLiteralSampler:
3040 case SpvCapabilityGenericPointer:
3041 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3042 spirv_capability_to_string(cap));
3043 break;
3044
3045 case SpvCapabilityImageMSArray:
3046 spv_check_supported(image_ms_array, cap);
3047 break;
3048
3049 case SpvCapabilityTessellation:
3050 case SpvCapabilityTessellationPointSize:
3051 spv_check_supported(tessellation, cap);
3052 break;
3053
3054 case SpvCapabilityDrawParameters:
3055 spv_check_supported(draw_parameters, cap);
3056 break;
3057
3058 case SpvCapabilityStorageImageReadWithoutFormat:
3059 spv_check_supported(image_read_without_format, cap);
3060 break;
3061
3062 case SpvCapabilityStorageImageWriteWithoutFormat:
3063 spv_check_supported(image_write_without_format, cap);
3064 break;
3065
3066 case SpvCapabilityMultiView:
3067 spv_check_supported(multiview, cap);
3068 break;
3069
3070 case SpvCapabilityVariablePointersStorageBuffer:
3071 case SpvCapabilityVariablePointers:
3072 spv_check_supported(variable_pointers, cap);
3073 break;
3074
3075 case SpvCapabilityStorageUniformBufferBlock16:
3076 case SpvCapabilityStorageUniform16:
3077 case SpvCapabilityStoragePushConstant16:
3078 case SpvCapabilityStorageInputOutput16:
3079 spv_check_supported(storage_16bit, cap);
3080 break;
3081
3082 default:
3083 vtn_fail("Unhandled capability");
3084 }
3085 break;
3086 }
3087
3088 case SpvOpExtInstImport:
3089 vtn_handle_extension(b, opcode, w, count);
3090 break;
3091
3092 case SpvOpMemoryModel:
3093 vtn_assert(w[1] == SpvAddressingModelLogical);
3094 vtn_assert(w[2] == SpvMemoryModelSimple ||
3095 w[2] == SpvMemoryModelGLSL450);
3096 break;
3097
3098 case SpvOpEntryPoint: {
3099 struct vtn_value *entry_point = &b->values[w[2]];
3100 /* Let this be a name label regardless */
3101 unsigned name_words;
3102 entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
3103
3104 if (strcmp(entry_point->name, b->entry_point_name) != 0 ||
3105 stage_for_execution_model(b, w[1]) != b->entry_point_stage)
3106 break;
3107
3108 vtn_assert(b->entry_point == NULL);
3109 b->entry_point = entry_point;
3110 break;
3111 }
3112
3113 case SpvOpString:
3114 vtn_push_value(b, w[1], vtn_value_type_string)->str =
3115 vtn_string_literal(b, &w[2], count - 2, NULL);
3116 break;
3117
3118 case SpvOpName:
3119 b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2, NULL);
3120 break;
3121
3122 case SpvOpMemberName:
3123 /* TODO */
3124 break;
3125
3126 case SpvOpExecutionMode:
3127 case SpvOpDecorationGroup:
3128 case SpvOpDecorate:
3129 case SpvOpMemberDecorate:
3130 case SpvOpGroupDecorate:
3131 case SpvOpGroupMemberDecorate:
3132 vtn_handle_decoration(b, opcode, w, count);
3133 break;
3134
3135 default:
3136 return false; /* End of preamble */
3137 }
3138
3139 return true;
3140 }
3141
3142 static void
3143 vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
3144 const struct vtn_decoration *mode, void *data)
3145 {
3146 vtn_assert(b->entry_point == entry_point);
3147
3148 switch(mode->exec_mode) {
3149 case SpvExecutionModeOriginUpperLeft:
3150 case SpvExecutionModeOriginLowerLeft:
3151 b->origin_upper_left =
3152 (mode->exec_mode == SpvExecutionModeOriginUpperLeft);
3153 break;
3154
3155 case SpvExecutionModeEarlyFragmentTests:
3156 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3157 b->shader->info.fs.early_fragment_tests = true;
3158 break;
3159
3160 case SpvExecutionModeInvocations:
3161 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
3162 b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
3163 break;
3164
3165 case SpvExecutionModeDepthReplacing:
3166 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3167 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
3168 break;
3169 case SpvExecutionModeDepthGreater:
3170 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3171 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
3172 break;
3173 case SpvExecutionModeDepthLess:
3174 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3175 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
3176 break;
3177 case SpvExecutionModeDepthUnchanged:
3178 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3179 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
3180 break;
3181
3182 case SpvExecutionModeLocalSize:
3183 vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
3184 b->shader->info.cs.local_size[0] = mode->literals[0];
3185 b->shader->info.cs.local_size[1] = mode->literals[1];
3186 b->shader->info.cs.local_size[2] = mode->literals[2];
3187 break;
3188 case SpvExecutionModeLocalSizeHint:
3189 break; /* Nothing to do with this */
3190
3191 case SpvExecutionModeOutputVertices:
3192 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3193 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
3194 b->shader->info.tess.tcs_vertices_out = mode->literals[0];
3195 } else {
3196 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
3197 b->shader->info.gs.vertices_out = mode->literals[0];
3198 }
3199 break;
3200
3201 case SpvExecutionModeInputPoints:
3202 case SpvExecutionModeInputLines:
3203 case SpvExecutionModeInputLinesAdjacency:
3204 case SpvExecutionModeTriangles:
3205 case SpvExecutionModeInputTrianglesAdjacency:
3206 case SpvExecutionModeQuads:
3207 case SpvExecutionModeIsolines:
3208 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3209 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
3210 b->shader->info.tess.primitive_mode =
3211 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
3212 } else {
3213 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
3214 b->shader->info.gs.vertices_in =
3215 vertices_in_from_spv_execution_mode(b, mode->exec_mode);
3216 }
3217 break;
3218
3219 case SpvExecutionModeOutputPoints:
3220 case SpvExecutionModeOutputLineStrip:
3221 case SpvExecutionModeOutputTriangleStrip:
3222 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
3223 b->shader->info.gs.output_primitive =
3224 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
3225 break;
3226
3227 case SpvExecutionModeSpacingEqual:
3228 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3229 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3230 b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
3231 break;
3232 case SpvExecutionModeSpacingFractionalEven:
3233 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3234 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3235 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
3236 break;
3237 case SpvExecutionModeSpacingFractionalOdd:
3238 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3239 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3240 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
3241 break;
3242 case SpvExecutionModeVertexOrderCw:
3243 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3244 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3245 b->shader->info.tess.ccw = false;
3246 break;
3247 case SpvExecutionModeVertexOrderCcw:
3248 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3249 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3250 b->shader->info.tess.ccw = true;
3251 break;
3252 case SpvExecutionModePointMode:
3253 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3254 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3255 b->shader->info.tess.point_mode = true;
3256 break;
3257
3258 case SpvExecutionModePixelCenterInteger:
3259 b->pixel_center_integer = true;
3260 break;
3261
3262 case SpvExecutionModeXfb:
3263 vtn_fail("Unhandled execution mode");
3264 break;
3265
3266 case SpvExecutionModeVecTypeHint:
3267 case SpvExecutionModeContractionOff:
3268 break; /* OpenCL */
3269
3270 default:
3271 vtn_fail("Unhandled execution mode");
3272 }
3273 }
3274
3275 static bool
3276 vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
3277 const uint32_t *w, unsigned count)
3278 {
3279 switch (opcode) {
3280 case SpvOpSource:
3281 case SpvOpSourceContinued:
3282 case SpvOpSourceExtension:
3283 case SpvOpExtension:
3284 case SpvOpCapability:
3285 case SpvOpExtInstImport:
3286 case SpvOpMemoryModel:
3287 case SpvOpEntryPoint:
3288 case SpvOpExecutionMode:
3289 case SpvOpString:
3290 case SpvOpName:
3291 case SpvOpMemberName:
3292 case SpvOpDecorationGroup:
3293 case SpvOpDecorate:
3294 case SpvOpMemberDecorate:
3295 case SpvOpGroupDecorate:
3296 case SpvOpGroupMemberDecorate:
3297 vtn_fail("Invalid opcode types and variables section");
3298 break;
3299
3300 case SpvOpTypeVoid:
3301 case SpvOpTypeBool:
3302 case SpvOpTypeInt:
3303 case SpvOpTypeFloat:
3304 case SpvOpTypeVector:
3305 case SpvOpTypeMatrix:
3306 case SpvOpTypeImage:
3307 case SpvOpTypeSampler:
3308 case SpvOpTypeSampledImage:
3309 case SpvOpTypeArray:
3310 case SpvOpTypeRuntimeArray:
3311 case SpvOpTypeStruct:
3312 case SpvOpTypeOpaque:
3313 case SpvOpTypePointer:
3314 case SpvOpTypeFunction:
3315 case SpvOpTypeEvent:
3316 case SpvOpTypeDeviceEvent:
3317 case SpvOpTypeReserveId:
3318 case SpvOpTypeQueue:
3319 case SpvOpTypePipe:
3320 vtn_handle_type(b, opcode, w, count);
3321 break;
3322
3323 case SpvOpConstantTrue:
3324 case SpvOpConstantFalse:
3325 case SpvOpConstant:
3326 case SpvOpConstantComposite:
3327 case SpvOpConstantSampler:
3328 case SpvOpConstantNull:
3329 case SpvOpSpecConstantTrue:
3330 case SpvOpSpecConstantFalse:
3331 case SpvOpSpecConstant:
3332 case SpvOpSpecConstantComposite:
3333 case SpvOpSpecConstantOp:
3334 vtn_handle_constant(b, opcode, w, count);
3335 break;
3336
3337 case SpvOpUndef:
3338 case SpvOpVariable:
3339 vtn_handle_variables(b, opcode, w, count);
3340 break;
3341
3342 default:
3343 return false; /* End of preamble */
3344 }
3345
3346 return true;
3347 }
3348
3349 static bool
3350 vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
3351 const uint32_t *w, unsigned count)
3352 {
3353 switch (opcode) {
3354 case SpvOpLabel:
3355 break;
3356
3357 case SpvOpLoopMerge:
3358 case SpvOpSelectionMerge:
3359 /* This is handled by cfg pre-pass and walk_blocks */
3360 break;
3361
3362 case SpvOpUndef: {
3363 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
3364 val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
3365 break;
3366 }
3367
3368 case SpvOpExtInst:
3369 vtn_handle_extension(b, opcode, w, count);
3370 break;
3371
3372 case SpvOpVariable:
3373 case SpvOpLoad:
3374 case SpvOpStore:
3375 case SpvOpCopyMemory:
3376 case SpvOpCopyMemorySized:
3377 case SpvOpAccessChain:
3378 case SpvOpPtrAccessChain:
3379 case SpvOpInBoundsAccessChain:
3380 case SpvOpArrayLength:
3381 vtn_handle_variables(b, opcode, w, count);
3382 break;
3383
3384 case SpvOpFunctionCall:
3385 vtn_handle_function_call(b, opcode, w, count);
3386 break;
3387
3388 case SpvOpSampledImage:
3389 case SpvOpImage:
3390 case SpvOpImageSampleImplicitLod:
3391 case SpvOpImageSampleExplicitLod:
3392 case SpvOpImageSampleDrefImplicitLod:
3393 case SpvOpImageSampleDrefExplicitLod:
3394 case SpvOpImageSampleProjImplicitLod:
3395 case SpvOpImageSampleProjExplicitLod:
3396 case SpvOpImageSampleProjDrefImplicitLod:
3397 case SpvOpImageSampleProjDrefExplicitLod:
3398 case SpvOpImageFetch:
3399 case SpvOpImageGather:
3400 case SpvOpImageDrefGather:
3401 case SpvOpImageQuerySizeLod:
3402 case SpvOpImageQueryLod:
3403 case SpvOpImageQueryLevels:
3404 case SpvOpImageQuerySamples:
3405 vtn_handle_texture(b, opcode, w, count);
3406 break;
3407
3408 case SpvOpImageRead:
3409 case SpvOpImageWrite:
3410 case SpvOpImageTexelPointer:
3411 vtn_handle_image(b, opcode, w, count);
3412 break;
3413
3414 case SpvOpImageQuerySize: {
3415 struct vtn_pointer *image =
3416 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
3417 if (image->mode == vtn_variable_mode_image) {
3418 vtn_handle_image(b, opcode, w, count);
3419 } else {
3420 vtn_assert(image->mode == vtn_variable_mode_sampler);
3421 vtn_handle_texture(b, opcode, w, count);
3422 }
3423 break;
3424 }
3425
3426 case SpvOpAtomicLoad:
3427 case SpvOpAtomicExchange:
3428 case SpvOpAtomicCompareExchange:
3429 case SpvOpAtomicCompareExchangeWeak:
3430 case SpvOpAtomicIIncrement:
3431 case SpvOpAtomicIDecrement:
3432 case SpvOpAtomicIAdd:
3433 case SpvOpAtomicISub:
3434 case SpvOpAtomicSMin:
3435 case SpvOpAtomicUMin:
3436 case SpvOpAtomicSMax:
3437 case SpvOpAtomicUMax:
3438 case SpvOpAtomicAnd:
3439 case SpvOpAtomicOr:
3440 case SpvOpAtomicXor: {
3441 struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
3442 if (pointer->value_type == vtn_value_type_image_pointer) {
3443 vtn_handle_image(b, opcode, w, count);
3444 } else {
3445 vtn_assert(pointer->value_type == vtn_value_type_pointer);
3446 vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
3447 }
3448 break;
3449 }
3450
3451 case SpvOpAtomicStore: {
3452 struct vtn_value *pointer = vtn_untyped_value(b, w[1]);
3453 if (pointer->value_type == vtn_value_type_image_pointer) {
3454 vtn_handle_image(b, opcode, w, count);
3455 } else {
3456 vtn_assert(pointer->value_type == vtn_value_type_pointer);
3457 vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
3458 }
3459 break;
3460 }
3461
3462 case SpvOpSelect: {
3463 /* Handle OpSelect up-front here because it needs to be able to handle
3464 * pointers and not just regular vectors and scalars.
3465 */
3466 struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type;
3467 struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, res_type->type);
3468 ssa->def = nir_bcsel(&b->nb, vtn_ssa_value(b, w[3])->def,
3469 vtn_ssa_value(b, w[4])->def,
3470 vtn_ssa_value(b, w[5])->def);
3471 vtn_push_ssa(b, w[2], res_type, ssa);
3472 break;
3473 }
3474
3475 case SpvOpSNegate:
3476 case SpvOpFNegate:
3477 case SpvOpNot:
3478 case SpvOpAny:
3479 case SpvOpAll:
3480 case SpvOpConvertFToU:
3481 case SpvOpConvertFToS:
3482 case SpvOpConvertSToF:
3483 case SpvOpConvertUToF:
3484 case SpvOpUConvert:
3485 case SpvOpSConvert:
3486 case SpvOpFConvert:
3487 case SpvOpQuantizeToF16:
3488 case SpvOpConvertPtrToU:
3489 case SpvOpConvertUToPtr:
3490 case SpvOpPtrCastToGeneric:
3491 case SpvOpGenericCastToPtr:
3492 case SpvOpBitcast:
3493 case SpvOpIsNan:
3494 case SpvOpIsInf:
3495 case SpvOpIsFinite:
3496 case SpvOpIsNormal:
3497 case SpvOpSignBitSet:
3498 case SpvOpLessOrGreater:
3499 case SpvOpOrdered:
3500 case SpvOpUnordered:
3501 case SpvOpIAdd:
3502 case SpvOpFAdd:
3503 case SpvOpISub:
3504 case SpvOpFSub:
3505 case SpvOpIMul:
3506 case SpvOpFMul:
3507 case SpvOpUDiv:
3508 case SpvOpSDiv:
3509 case SpvOpFDiv:
3510 case SpvOpUMod:
3511 case SpvOpSRem:
3512 case SpvOpSMod:
3513 case SpvOpFRem:
3514 case SpvOpFMod:
3515 case SpvOpVectorTimesScalar:
3516 case SpvOpDot:
3517 case SpvOpIAddCarry:
3518 case SpvOpISubBorrow:
3519 case SpvOpUMulExtended:
3520 case SpvOpSMulExtended:
3521 case SpvOpShiftRightLogical:
3522 case SpvOpShiftRightArithmetic:
3523 case SpvOpShiftLeftLogical:
3524 case SpvOpLogicalEqual:
3525 case SpvOpLogicalNotEqual:
3526 case SpvOpLogicalOr:
3527 case SpvOpLogicalAnd:
3528 case SpvOpLogicalNot:
3529 case SpvOpBitwiseOr:
3530 case SpvOpBitwiseXor:
3531 case SpvOpBitwiseAnd:
3532 case SpvOpIEqual:
3533 case SpvOpFOrdEqual:
3534 case SpvOpFUnordEqual:
3535 case SpvOpINotEqual:
3536 case SpvOpFOrdNotEqual:
3537 case SpvOpFUnordNotEqual:
3538 case SpvOpULessThan:
3539 case SpvOpSLessThan:
3540 case SpvOpFOrdLessThan:
3541 case SpvOpFUnordLessThan:
3542 case SpvOpUGreaterThan:
3543 case SpvOpSGreaterThan:
3544 case SpvOpFOrdGreaterThan:
3545 case SpvOpFUnordGreaterThan:
3546 case SpvOpULessThanEqual:
3547 case SpvOpSLessThanEqual:
3548 case SpvOpFOrdLessThanEqual:
3549 case SpvOpFUnordLessThanEqual:
3550 case SpvOpUGreaterThanEqual:
3551 case SpvOpSGreaterThanEqual:
3552 case SpvOpFOrdGreaterThanEqual:
3553 case SpvOpFUnordGreaterThanEqual:
3554 case SpvOpDPdx:
3555 case SpvOpDPdy:
3556 case SpvOpFwidth:
3557 case SpvOpDPdxFine:
3558 case SpvOpDPdyFine:
3559 case SpvOpFwidthFine:
3560 case SpvOpDPdxCoarse:
3561 case SpvOpDPdyCoarse:
3562 case SpvOpFwidthCoarse:
3563 case SpvOpBitFieldInsert:
3564 case SpvOpBitFieldSExtract:
3565 case SpvOpBitFieldUExtract:
3566 case SpvOpBitReverse:
3567 case SpvOpBitCount:
3568 case SpvOpTranspose:
3569 case SpvOpOuterProduct:
3570 case SpvOpMatrixTimesScalar:
3571 case SpvOpVectorTimesMatrix:
3572 case SpvOpMatrixTimesVector:
3573 case SpvOpMatrixTimesMatrix:
3574 vtn_handle_alu(b, opcode, w, count);
3575 break;
3576
3577 case SpvOpVectorExtractDynamic:
3578 case SpvOpVectorInsertDynamic:
3579 case SpvOpVectorShuffle:
3580 case SpvOpCompositeConstruct:
3581 case SpvOpCompositeExtract:
3582 case SpvOpCompositeInsert:
3583 case SpvOpCopyObject:
3584 vtn_handle_composite(b, opcode, w, count);
3585 break;
3586
3587 case SpvOpEmitVertex:
3588 case SpvOpEndPrimitive:
3589 case SpvOpEmitStreamVertex:
3590 case SpvOpEndStreamPrimitive:
3591 case SpvOpControlBarrier:
3592 case SpvOpMemoryBarrier:
3593 vtn_handle_barrier(b, opcode, w, count);
3594 break;
3595
3596 default:
3597 vtn_fail("Unhandled opcode");
3598 }
3599
3600 return true;
3601 }
3602
3603 nir_function *
3604 spirv_to_nir(const uint32_t *words, size_t word_count,
3605 struct nir_spirv_specialization *spec, unsigned num_spec,
3606 gl_shader_stage stage, const char *entry_point_name,
3607 const struct spirv_to_nir_options *options,
3608 const nir_shader_compiler_options *nir_options)
3609 {
3610 /* Initialize the stn_builder object */
3611 struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
3612 b->spirv = words;
3613 b->file = NULL;
3614 b->line = -1;
3615 b->col = -1;
3616 exec_list_make_empty(&b->functions);
3617 b->entry_point_stage = stage;
3618 b->entry_point_name = entry_point_name;
3619 b->options = options;
3620
3621 /* See also _vtn_fail() */
3622 if (setjmp(b->fail_jump)) {
3623 ralloc_free(b);
3624 return NULL;
3625 }
3626
3627 const uint32_t *word_end = words + word_count;
3628
3629 /* Handle the SPIR-V header (first 4 dwords) */
3630 vtn_assert(word_count > 5);
3631
3632 vtn_assert(words[0] == SpvMagicNumber);
3633 vtn_assert(words[1] >= 0x10000);
3634 /* words[2] == generator magic */
3635 unsigned value_id_bound = words[3];
3636 vtn_assert(words[4] == 0);
3637
3638 words+= 5;
3639
3640 b->value_id_bound = value_id_bound;
3641 b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
3642
3643 /* Handle all the preamble instructions */
3644 words = vtn_foreach_instruction(b, words, word_end,
3645 vtn_handle_preamble_instruction);
3646
3647 if (b->entry_point == NULL) {
3648 vtn_fail("Entry point not found");
3649 ralloc_free(b);
3650 return NULL;
3651 }
3652
3653 b->shader = nir_shader_create(b, stage, nir_options, NULL);
3654
3655 /* Set shader info defaults */
3656 b->shader->info.gs.invocations = 1;
3657
3658 /* Parse execution modes */
3659 vtn_foreach_execution_mode(b, b->entry_point,
3660 vtn_handle_execution_mode, NULL);
3661
3662 b->specializations = spec;
3663 b->num_specializations = num_spec;
3664
3665 /* Handle all variable, type, and constant instructions */
3666 words = vtn_foreach_instruction(b, words, word_end,
3667 vtn_handle_variable_or_type_instruction);
3668
3669 vtn_build_cfg(b, words, word_end);
3670
3671 assert(b->entry_point->value_type == vtn_value_type_function);
3672 b->entry_point->func->referenced = true;
3673
3674 bool progress;
3675 do {
3676 progress = false;
3677 foreach_list_typed(struct vtn_function, func, node, &b->functions) {
3678 if (func->referenced && !func->emitted) {
3679 b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
3680 _mesa_key_pointer_equal);
3681
3682 vtn_function_emit(b, func, vtn_handle_body_instruction);
3683 progress = true;
3684 }
3685 }
3686 } while (progress);
3687
3688 vtn_assert(b->entry_point->value_type == vtn_value_type_function);
3689 nir_function *entry_point = b->entry_point->func->impl->function;
3690 vtn_assert(entry_point);
3691
3692 /* Unparent the shader from the vtn_builder before we delete the builder */
3693 ralloc_steal(NULL, b->shader);
3694
3695 ralloc_free(b);
3696
3697 return entry_point;
3698 }